From 2e109b1ba62bf9db04fee1016852a5df929a66a6 Mon Sep 17 00:00:00 2001 From: David Anderson Date: Wed, 23 Jan 2008 20:55:42 +0000 Subject: [PATCH] update to upstream version 4.0-5.0 --- crash.patch | 119340 +++++++++++++++++++++++++++++++------------------ crash.spec | 5 +- 2 files changed, 75017 insertions(+), 44328 deletions(-) diff --git a/crash.patch b/crash.patch index 5923be3..fe24c91 100644 --- a/crash.patch +++ b/crash.patch @@ -1,15924 +1,5 @@ ---- crash/extensions/Makefile.orig 2007-08-27 15:02:36.000000000 -0400 -+++ crash/extensions/Makefile 2007-05-29 10:16:56.000000000 -0400 -@@ -0,0 +1,43 @@ -+# -+# Makefile for building crash shared object extensions -+# -+# Copyright (C) 2005 David Anderson -+# Copyright (C) 2005 Red Hat, Inc. All rights reserved. -+# -+# This program is free software; you can redistribute it and/or modify -+# it under the terms of the GNU General Public License as published by -+# the Free Software Foundation; either version 2 of the License, or -+# (at your option) any later version. -+# -+# This program is distributed in the hope that it will be useful, -+# but WITHOUT ANY WARRANTY; without even the implied warranty of -+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+# GNU General Public License for more details. -+# -+# To build the extension shared objects in this directory, run -+# "make extensions" from the top-level directory. -+# -+# To add a new extension object: -+# -+# - add the new source file to the EXTENSION_SOURCE_FILES list -+# in the top-level Makefile -+# - add the object file name to the EXTENSION_OBJECT_FILES list -+# in the top-level Makefile -+# - create a compile stanza below, typically using "echo.so" as -+# a base template. -+# -+ -+all: link_defs $(OBJECTS) -+ -+link_defs: -+ @if [ ! -f defs.h ]; then \ -+ ln -s ../defs.h; fi -+ -+echo.so: ../defs.h echo.c -+ gcc -nostartfiles -shared -rdynamic -o echo.so echo.c -fPIC \ -+ -D$(TARGET) $(TARGET_CFLAGS) -+ -+dminfo.so: ../defs.h dminfo.c -+ gcc -nostartfiles -shared -rdynamic -o dminfo.so dminfo.c -fPIC \ -+ -D$(TARGET) $(TARGET_CFLAGS) -+ ---- crash/extensions/echo.c.orig 2007-08-27 15:02:36.000000000 -0400 -+++ crash/extensions/echo.c 2005-11-08 10:37:53.000000000 -0500 -@@ -0,0 +1,105 @@ -+/* echo.c - simple example of a crash extension -+ * -+ * Copyright (C) 2001, 2002 Mission Critical Linux, Inc. -+ * Copyright (C) 2002, 2003, 2004, 2005 David Anderson -+ * Copyright (C) 2002, 2003, 2004, 2005 Red Hat, Inc. All rights reserved. -+ * -+ * This program is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ */ -+ -+#include "defs.h" /* From the crash source top-level directory */ -+ -+void cmd_echo(); /* Declare the commands and their help data. */ -+char *help_echo[]; -+ -+static struct command_table_entry command_table[] = { -+ "echo", cmd_echo, help_echo, 0, /* One or more commands, */ -+ NULL, /* terminated by NULL, */ -+}; -+ -+ -+_init() /* Register the command set. */ -+{ -+ register_extension(command_table); -+} -+ -+/* -+ * The _fini() function is called if the shared object is unloaded. -+ * If desired, perform any cleanups here. -+ */ -+_fini() { } -+ -+ -+/* -+ * Arguments are passed to the command functions in the global args[argcnt] -+ * array. See getopt(3) for info on dash arguments. Check out defs.h and -+ * other crash commands for usage of the myriad of utility routines available -+ * to accomplish what your task. -+ */ -+void -+cmd_echo() -+{ -+ int c; -+ -+ while ((c = getopt(argcnt, args, "")) != EOF) { -+ switch(c) -+ { -+ default: -+ argerrs++; -+ break; -+ } -+ } -+ -+ if (argerrs) -+ cmd_usage(pc->curcmd, SYNOPSIS); -+ -+ while (args[optind]) -+ fprintf(fp, "%s ", args[optind++]); -+ -+ fprintf(fp, "\n"); -+} -+ -+/* -+ * The optional help data is simply an array of strings in a defined format. -+ * For example, the "help echo" command will use the help_echo[] string -+ * array below to create a help page that looks like this: -+ * -+ * NAME -+ * echo - echoes back its arguments -+ * -+ * SYNOPSIS -+ * echo arg ... -+ * -+ * DESCRIPTION -+ * This command simply echoes back its arguments. -+ * -+ * EXAMPLE -+ * Echo back all command arguments: -+ * -+ * crash> echo hello, world -+ * hello, world -+ * -+ */ -+ -+char *help_echo[] = { -+ "echo", /* command name */ -+ "echoes back its arguments", /* short description */ -+ "arg ...", /* argument synopsis, or " " if none */ -+ -+ " This command simply echoes back its arguments.", -+ "\nEXAMPLE", -+ " Echo back all command arguments:\n", -+ " crash> echo hello, world", -+ " hello, world", -+ NULL -+}; -+ -+ ---- crash/extensions/dminfo.c.orig 2007-08-27 15:02:36.000000000 -0400 -+++ crash/extensions/dminfo.c 2005-11-08 10:37:53.000000000 -0500 -@@ -0,0 +1,1531 @@ -+/* dminfo.c - crash extension module for device-mapper analysis -+ * -+ * Copyright (C) 2005 NEC Corporation -+ * Copyright (C) 2005 Red Hat, Inc. All rights reserved. -+ * -+ * This program is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ */ -+ -+#include "defs.h" /* From the crash source top-level directory */ -+ -+/* -+ * Indices of size-offset array (Used by GET_xxx macros) -+ * -+ * DM__ -+ */ -+enum { -+ DM_hash_cell_name_list = 0, -+ DM_hash_cell_name, -+ DM_hash_cell_md, -+ -+ DM_mapped_device_disk, -+ DM_mapped_device_map, -+ -+ DM_gendisk_major, -+ DM_gendisk_first_minor, -+ DM_gendisk_disk_name, -+ -+ DM_dm_table_num_targets, -+ DM_dm_table_targets, -+ DM_dm_table_devices, -+ -+ DM_dm_target_type, -+ DM_dm_target_begin, -+ DM_dm_target_len, -+ DM_dm_target_private, -+ -+ DM_dm_dev_count, -+ DM_dm_dev_bdev, -+ DM_dm_dev_name, -+ -+ DM_dm_io_md, -+ DM_dm_io_bio, -+ -+ DM_target_type_name, -+ -+ DM_target_io_io, -+ -+ DM_block_device_bd_disk, -+ -+ DM_bio_bi_private, -+ -+ DM_bio_list_head, -+ -+ DM_linear_c_dev, -+ DM_linear_c_start, -+ -+ DM_multipath_hw_handler, -+ DM_multipath_nr_priority_groups, -+ DM_multipath_priority_groups, -+ DM_multipath_nr_valid_paths, -+ DM_multipath_current_pg, -+ DM_multipath_queue_if_no_path, -+ DM_multipath_queue_size, -+ -+ DM_hw_handler_type, -+ DM_hw_handler_type_name, -+ -+ DM_priority_group_ps, -+ DM_priority_group_pg_num, -+ DM_priority_group_bypassed, -+ DM_priority_group_nr_pgpaths, -+ DM_priority_group_pgpaths, -+ -+ DM_path_selector_type, -+ DM_path_selector_type_name, -+ -+ DM_pgpath_fail_count, -+ DM_pgpath_path, -+ -+ DM_path_dev, -+ DM_path_is_active, -+ -+ DM_mirror_set_rh, -+ DM_mirror_set_reads, -+ DM_mirror_set_writes, -+ DM_mirror_set_in_sync, -+ DM_mirror_set_nr_mirrors, -+ DM_mirror_set_mirror, -+ -+ DM_region_hash_log, -+ DM_region_hash_quiesced_regions, -+ DM_region_hash_recovered_regions, -+ -+ DM_dirty_log_type, -+ DM_dirty_log_type_name, -+ -+ DM_mirror_error_count, -+ DM_mirror_dev, -+ DM_mirror_offset, -+ -+ DM_crypt_config_dev, -+ DM_crypt_config_iv_mode, -+ DM_crypt_config_tfm, -+ DM_crypt_config_key_size, -+ DM_crypt_config_key, -+ -+ DM_crypto_tfm_crt_u, -+ DM_crypto_tfm___crt_alg, -+ -+ DM_crypto_alg_cra_name, -+ -+ DM_cipher_tfm_cit_mode, -+ -+ DM_stripe_c_stripes, -+ DM_stripe_c_chunk_mask, -+ DM_stripe_c_stripe, -+ -+ DM_stripe_dev, -+ -+ DM_dm_snapshot_origin, -+ DM_dm_snapshot_cow, -+ DM_dm_snapshot_chunk_size, -+ DM_dm_snapshot_valid, -+ DM_dm_snapshot_type, -+ -+ NR_DMINFO_MEMBER_TABLE_ENTRY -+}; -+ -+/* Size-offset array for structure's member */ -+static struct dminfo_member_entry { -+ unsigned long offset; -+ unsigned long size; -+} mbr_ary[NR_DMINFO_MEMBER_TABLE_ENTRY]; -+ -+/* -+ * Macros to retrieve data of given structure's member -+ * -+ * Macros except for the MSG assume 'struct s' is at 'addr' -+ */ -+#define MSG(msg, s, m) msg ": " s "." m -+ -+/* Initialize the size-offset array */ -+#define INIT_MBR_TABLE(s, m) \ -+ do { \ -+ if (!mbr_ary[DM_##s##_##m].size) { \ -+ mbr_ary[DM_##s##_##m].offset = MEMBER_OFFSET("struct " #s, #m); \ -+ mbr_ary[DM_##s##_##m].size = MEMBER_SIZE("struct " #s, #m); \ -+ } \ -+ } while (0) -+ -+/* -+ * Store the data of member m in ret. -+ * Initialize the size-offset array for the member m if needed. -+ */ -+#define GET_VALUE(addr, s, m, ret) \ -+ do { \ -+ INIT_MBR_TABLE(s, m); \ -+ if (sizeof(ret) < mbr_ary[DM_##s##_##m].size) \ -+ fprintf(fp, "%s\n", \ -+ MSG("ERROR: GET_VALUE size_check", #s, #m)); \ -+ readmem(addr + mbr_ary[DM_##s##_##m].offset, KVADDR, &ret, \ -+ mbr_ary[DM_##s##_##m].size, MSG("GET_VALUE", #s, #m), \ -+ FAULT_ON_ERROR);\ -+ } while (0) -+ -+/* -+ * Store the address of member m in ret. -+ * Initialize the size-offset array for the member m if needed. -+ */ -+#define GET_ADDR(addr, s, m, ret) \ -+ do { \ -+ INIT_MBR_TABLE(s, m); \ -+ ret = addr + mbr_ary[DM_##s##_##m].offset; \ -+ } while (0) -+ -+/* -+ * Store the string data of member m in ret. -+ * Initialize the size-offset array for the member m if needed. -+ */ -+#define GET_STR(addr, s, m, ret, len) \ -+ do { \ -+ INIT_MBR_TABLE(s, m); \ -+ if (!read_string(addr + mbr_ary[DM_##s##_##m].offset, ret, len - 1)) \ -+ fprintf(fp, "%s\n", MSG("ERROR: GET_STR", #s, #m)); \ -+ } while (0) -+ -+/* -+ * Store the string data pointed by member m in ret. -+ * Initialize the size-offset array for the member m if needed. -+ */ -+#define GET_PTR_STR(addr, s, m, ret, len) \ -+ do { \ -+ unsigned long tmp; \ -+ INIT_MBR_TABLE(s, m); \ -+ readmem(addr + mbr_ary[DM_##s##_##m].offset, KVADDR, &tmp, \ -+ mbr_ary[DM_##s##_##m].size, MSG("GET_PTR_STR", #s, #m),\ -+ FAULT_ON_ERROR);\ -+ if (!read_string(tmp, ret, len - 1)) \ -+ fprintf(fp, "%s\n", MSG("ERROR: GET_PTR_STR", #s, #m));\ -+ } while (0) -+ -+/* -+ * Utility function/macro to walk the list -+ */ -+static unsigned long -+get_next_from_list_head(unsigned long addr) -+{ -+ unsigned long ret; -+ -+ readmem(addr + OFFSET(list_head_next), KVADDR, &ret, sizeof(void *), -+ MSG("get_next_from_list_head", "list_head", "next"), -+ FAULT_ON_ERROR); -+ -+ return ret; -+} -+ -+#define list_for_each(next, head, last) \ -+ for (next = get_next_from_list_head(head), last = 0UL; \ -+ next && next != head && next != last; \ -+ last = next, next = get_next_from_list_head(next)) -+ -+/* -+ * device-mapper target analyzer -+ * -+ * device-mapper has various target driver: linear, mirror, multipath, etc. -+ * Information specific to target is stored in its own way. -+ * Target-specific analyzer is provided for each target driver for this reason. -+ */ -+static struct dminfo_target_analyzer { -+ struct dminfo_target_analyzer *next; -+ char *target_name; -+ int (*ready) (void); /* returns true if analyzer is available */ -+ void (*show_table) (unsigned long); /* display table info */ -+ void (*show_status) (unsigned long); /* display status info */ -+ void (*show_queue) (unsigned long); /* display queued I/O info */ -+} analyzers_head; -+ -+static void -+dminfo_register_target_analyzer(struct dminfo_target_analyzer *ta) -+{ -+ ta->next = analyzers_head.next; -+ analyzers_head.next = ta; -+} -+ -+static struct -+dminfo_target_analyzer *find_target_analyzer(char *target_type) -+{ -+ struct dminfo_target_analyzer *ta; -+ -+ for (ta = analyzers_head.next; ta; ta = ta->next) -+ if (!strcmp(ta->target_name, target_type)) -+ return ta; -+ -+ return NULL; -+} -+ -+/* -+ * zero target -+ */ -+static int -+zero_ready(void) -+{ -+ return 1; -+} -+ -+static void -+zero_show_table(unsigned long target) -+{ -+ unsigned long long start, len; -+ -+ /* Get target information */ -+ GET_VALUE(target, dm_target, begin, start); -+ GET_VALUE(target, dm_target, len, len); -+ -+ fprintf(fp, " begin:%llu len:%llu", start, len); -+} -+ -+static void -+zero_show_status(unsigned long target) -+{ -+ /* zero target has no status */ -+ fprintf(fp, " No status info"); -+} -+ -+static void -+zero_show_queue(unsigned long target) -+{ -+ /* zero target has no queue */ -+ fprintf(fp, " No queue info"); -+} -+ -+static struct dminfo_target_analyzer zero_analyzer = { -+ .target_name = "zero", -+ .ready = zero_ready, -+ .show_table = zero_show_table, -+ .show_status = zero_show_status, -+ .show_queue = zero_show_queue -+}; -+ -+/* -+ * error target -+ */ -+static int -+error_ready(void) -+{ -+ return 1; -+} -+ -+static void -+error_show_table(unsigned long target) -+{ -+ unsigned long long start, len; -+ -+ /* Get target information */ -+ GET_VALUE(target, dm_target, begin, start); -+ GET_VALUE(target, dm_target, len, len); -+ -+ fprintf(fp, " begin:%llu len:%llu", start, len); -+} -+ -+static void -+error_show_status(unsigned long target) -+{ -+ /* error target has no status */ -+ fprintf(fp, " No status info"); -+} -+ -+static void -+error_show_queue(unsigned long target) -+{ -+ /* error target has no queue */ -+ fprintf(fp, " No queue info"); -+} -+ -+static struct dminfo_target_analyzer error_analyzer = { -+ .target_name = "error", -+ .ready = error_ready, -+ .show_table = error_show_table, -+ .show_status = error_show_status, -+ .show_queue = error_show_queue -+}; -+ -+/* -+ * linear target -+ */ -+static int -+linear_ready(void) -+{ -+ static int debuginfo = 0; -+ -+ if (debuginfo) -+ return 1; -+ -+ if (STRUCT_EXISTS("struct linear_c")) { -+ debuginfo = 1; -+ return 1; -+ } else -+ fprintf(fp, "No such struct info: linear_c"); -+ -+ return 0; -+} -+ -+static void -+linear_show_table(unsigned long target) -+{ -+ unsigned long lc, dm_dev; -+ unsigned long long start, len, offset; -+ char devt[BUFSIZE]; -+ -+ /* Get target information */ -+ GET_VALUE(target, dm_target, begin, start); -+ GET_VALUE(target, dm_target, len, len); -+ GET_VALUE(target, dm_target, private, lc); -+ GET_VALUE(lc, linear_c, dev, dm_dev); -+ GET_STR(dm_dev, dm_dev, name, devt, BUFSIZE); -+ GET_VALUE(lc, linear_c, start, offset); -+ -+ fprintf(fp, " begin:%llu len:%llu dev:%s offset:%llu", -+ start, len, devt, offset); -+} -+ -+static void -+linear_show_status(unsigned long target) -+{ -+ /* linear target has no status */ -+ fprintf(fp, " No status info"); -+} -+ -+static void -+linear_show_queue(unsigned long target) -+{ -+ /* linear target has no I/O queue */ -+ fprintf(fp, " No queue info"); -+} -+ -+static struct dminfo_target_analyzer linear_analyzer = { -+ .target_name = "linear", -+ .ready = linear_ready, -+ .show_table = linear_show_table, -+ .show_status = linear_show_status, -+ .show_queue = linear_show_queue -+}; -+ -+/* -+ * mirror target -+ */ -+static int -+mirror_ready(void) -+{ -+ static int debuginfo = 0; -+ -+ if (debuginfo) -+ return 1; -+ -+ if (STRUCT_EXISTS("struct mirror_set")) { -+ debuginfo = 1; -+ return 1; -+ } else -+ fprintf(fp, "No such struct info: mirror_set"); -+ -+ return 0; -+} -+ -+static void -+mirror_show_table(unsigned long target) -+{ -+ unsigned int i, nr_mir; -+ unsigned long ms, rh, log, log_type, mir_size, mir_head, mir, dm_dev; -+ unsigned long long offset; -+ char buf[BUFSIZE]; -+ -+ /* Get the address of struct mirror_set */ -+ GET_VALUE(target, dm_target, private, ms); -+ -+ /* Get the log-type name of the mirror_set */ -+ GET_ADDR(ms, mirror_set, rh, rh); -+ GET_VALUE(rh, region_hash, log, log); -+ GET_VALUE(log, dirty_log, type, log_type); -+ GET_PTR_STR(log_type, dirty_log_type, name, buf, BUFSIZE); -+ fprintf(fp, " log:%s", buf); -+ -+ /* -+ * Display information for each mirror disks. -+ * -+ * mir_head = mirror_set.mirror. -+ * This is the head of struct mirror array. -+ */ -+ fprintf(fp, " dev:"); -+ mir_size = STRUCT_SIZE("struct mirror"); -+ GET_ADDR(ms, mirror_set, mirror, mir_head); -+ GET_VALUE(ms, mirror_set, nr_mirrors, nr_mir); -+ for (i = 0; i < nr_mir; i++) { -+ mir = mir_head + mir_size * i; /* Get next mirror */ -+ -+ /* Get the devt of the mirror disk */ -+ GET_VALUE(mir, mirror, dev, dm_dev); -+ GET_STR(dm_dev, dm_dev, name, buf, BUFSIZE); -+ -+ /* Get the offset of the mirror disk */ -+ GET_VALUE(mir, mirror, offset, offset); -+ -+ fprintf(fp, "%s(%llu)%s", buf, offset, -+ i == nr_mir - 1 ? "" : ","); -+ } -+ if (i != nr_mir) -+ fprintf(fp, " ERROR: dev are less than nr_mir:%d", nr_mir); -+} -+ -+static void -+mirror_show_status(unsigned long target) -+{ -+ unsigned int i, nr_mir, synced, nr_error; -+ unsigned long ms, mir_size, mir_head, mir, dm_dev; -+ char buf[BUFSIZE]; -+ -+ /* Get the address of struct mirror_set */ -+ GET_VALUE(target, dm_target, private, ms); -+ -+ /* Get the status info of the mirror_set */ -+ GET_VALUE(ms, mirror_set, in_sync, synced); -+ fprintf(fp, " in_sync:%d", synced); -+ -+ /* -+ * Display information for each mirror disks. -+ * -+ * mir_head = mirror_set.mirror. -+ * This is the head of struct mirror array. -+ */ -+ fprintf(fp, " dev:"); -+ mir_size = STRUCT_SIZE("struct mirror"); -+ GET_ADDR(ms, mirror_set, mirror, mir_head); -+ GET_VALUE(ms, mirror_set, nr_mirrors, nr_mir); -+ for (i = 0; i < nr_mir; i++) { -+ mir = mir_head + mir_size * i; /* Get next mirror */ -+ -+ /* Get the devt of the mirror disk */ -+ GET_VALUE(mir, mirror, dev, dm_dev); -+ GET_STR(dm_dev, dm_dev, name, buf, BUFSIZE); -+ -+ /* Get the offset of the mirror disk */ -+ GET_VALUE(mir, mirror, error_count, nr_error); -+ -+ fprintf(fp, "%s(%c,%d)%s", buf, nr_error ? 'D' : 'A', nr_error, -+ i == nr_mir - 1 ? "" : ","); -+ } -+ if (i != nr_mir) -+ fprintf(fp, " ERROR: dev are less than nr_mir:%d", nr_mir); -+} -+ -+static void -+mirror_show_queue(unsigned long target) -+{ -+ unsigned long ms, rlist, wlist, rhead, whead; -+ unsigned long rh, quis_head, rcov_head, quis_next, rcov_next; -+ -+ /* Get the address of struct mirror_set */ -+ GET_VALUE(target, dm_target, private, ms); -+ -+ /* Get the address of queued I/O lists in struct mirror_set */ -+ GET_ADDR(ms, mirror_set, reads, rlist); -+ GET_ADDR(ms, mirror_set, writes, wlist); -+ -+ /* Get the head of queued I/O lists */ -+ GET_VALUE(rlist, bio_list, head, rhead); -+ GET_VALUE(wlist, bio_list, head, whead); -+ fprintf(fp, " %s", rhead ? "reads" : "(reads)"); -+ fprintf(fp, " %s", whead ? "writes" : "(writes)"); -+ -+ /* Get the address of the struct region_hash */ -+ GET_ADDR(ms, mirror_set, rh, rh); -+ -+ /* Get the address of recover region lists in struct region_hash */ -+ GET_ADDR(rh, region_hash, quiesced_regions, quis_head); -+ GET_ADDR(rh, region_hash, recovered_regions, rcov_head); -+ -+ /* Get the head of recover region lists */ -+ quis_next = get_next_from_list_head(quis_head); -+ rcov_next = get_next_from_list_head(rcov_head); -+ -+ fprintf(fp, " %s", quis_next != quis_head ? "quiesced" : "(quiesced)"); -+ fprintf(fp, " %s", rcov_next != rcov_head ? "recovered" : "(recovered)"); -+} -+ -+static struct dminfo_target_analyzer mirror_analyzer = { -+ .target_name = "mirror", -+ .ready = mirror_ready, -+ .show_table = mirror_show_table, -+ .show_status = mirror_show_status, -+ .show_queue = mirror_show_queue -+}; -+ -+/* -+ * multipath target -+ */ -+static int -+multipath_ready(void) -+{ -+ static int debuginfo = 0; -+ -+ if (debuginfo) -+ return 1; -+ -+ if (STRUCT_EXISTS("struct multipath")) { -+ debuginfo = 1; -+ return 1; -+ } else -+ fprintf(fp, "No such struct info: multipath"); -+ -+ return 0; -+} -+ -+static void -+multipath_show_table(unsigned long target) -+{ -+ int i, j; -+ unsigned int queue_if_no_path, nr_pgs, pg_id, nr_paths; -+ unsigned long mp, hwh, hwh_type, ps, ps_type, path, dm_dev; -+ unsigned long pg_head, pg_next, pg_last; -+ unsigned long path_head, path_next, path_last; -+ char name[BUFSIZE]; -+ -+ /* Get the address of struct multipath */ -+ GET_VALUE(target, dm_target, private, mp); -+ -+ /* Get features information */ -+ GET_VALUE(mp, multipath, queue_if_no_path, queue_if_no_path); -+ -+ /* Get the hardware-handler information */ -+ GET_ADDR(mp, multipath, hw_handler, hwh); -+ GET_VALUE(hwh, hw_handler, type, hwh_type); -+ if (hwh_type) -+ GET_PTR_STR(hwh_type, hw_handler_type, name, name, BUFSIZE); -+ else -+ strcpy(name, "none"); -+ -+ /* Get the number of priority groups */ -+ GET_VALUE(mp, multipath, nr_priority_groups, nr_pgs); -+ -+ fprintf(fp, " queue_if_no_path:%d hwh:%s nr_pgs:%d\n", -+ queue_if_no_path, name, nr_pgs); -+ -+ /* Display information for each priority group */ -+ fprintf(fp, " %-2s %-13s %-8s %s", -+ "PG", "PATH_SELECTOR", "NR_PATHS", "PATHS"); -+ GET_ADDR(mp, multipath, priority_groups, pg_head); -+ i = 0; -+ list_for_each (pg_next, pg_head, pg_last) { -+ /* pg_next == struct priority_group */ -+ -+ /* Get the index of the priority group */ -+ GET_VALUE(pg_next, priority_group, pg_num, pg_id); -+ -+ /* Get the name of path selector */ -+ GET_ADDR(pg_next, priority_group, ps, ps); -+ GET_VALUE(ps, path_selector, type, ps_type); -+ GET_PTR_STR(ps_type, path_selector_type, name, name, BUFSIZE); -+ -+ /* Get the number of paths in the priority group */ -+ GET_VALUE(pg_next, priority_group, nr_pgpaths, nr_paths); -+ -+ fprintf(fp, "\n %-2d %-13s %-8d ", pg_id, name, nr_paths); -+ -+ /* Display information for each path */ -+ GET_ADDR(pg_next, priority_group, pgpaths, path_head); -+ j = 0; -+ list_for_each (path_next, path_head, path_last) { -+ /* path_next == struct pgpath */ -+ -+ /* Get the devt of the pgpath */ -+ GET_ADDR(path_next, pgpath, path, path); -+ GET_VALUE(path, path, dev, dm_dev); -+ GET_STR(dm_dev, dm_dev, name, name, BUFSIZE); -+ -+ fprintf(fp, " %s", name); -+ j++; -+ } -+ if (j != nr_paths) -+ fprintf(fp, " ERROR: paths are less than nr_paths:%d", -+ nr_paths); -+ i++; -+ } -+ if (i != nr_pgs) -+ fprintf(fp, " ERROR: pgs are less than nr_pgs:%d", nr_pgs); -+} -+ -+static void -+multipath_show_status(unsigned long target) -+{ -+ int i, j; -+ unsigned int queue_if_no_path, nr_pgs, pg_id, nr_paths; -+ unsigned int bypassed_pg, path_active, nr_fails; -+ unsigned long mp, hwh, hwh_type, cur_pg, path, dm_dev; -+ unsigned long pg_head, pg_next, pg_last; -+ unsigned long path_head, path_next, path_last; -+ char buf[BUFSIZE], path_status; -+ -+ /* Get the address of struct multipath */ -+ GET_VALUE(target, dm_target, private, mp); -+ -+ /* Get features information */ -+ GET_VALUE(mp, multipath, queue_if_no_path, queue_if_no_path); -+ -+ /* Get the hardware-handler information */ -+ GET_ADDR(mp, multipath, hw_handler, hwh); -+ GET_VALUE(hwh, hw_handler, type, hwh_type); -+ if (hwh_type) -+ GET_PTR_STR(hwh_type, hw_handler_type, name, buf, BUFSIZE); -+ else -+ strcpy(buf, "none"); -+ -+ /* Get the number of priority groups */ -+ GET_VALUE(mp, multipath, nr_priority_groups, nr_pgs); -+ -+ fprintf(fp, " queue_if_no_path:%d hwh:%s nr_pgs:%d\n", -+ queue_if_no_path, buf, nr_pgs); -+ -+ /* Display information for each priority group */ -+ fprintf(fp, " %-2s %-9s %-8s %s", -+ "PG", "PG_STATUS", "NR_PATHS", "PATHS"); -+ GET_ADDR(mp, multipath, priority_groups, pg_head); -+ i = 0; -+ list_for_each (pg_next, pg_head, pg_last) { -+ /* pg_next == struct priority_group */ -+ -+ /* Get the index of the priority group */ -+ GET_VALUE(pg_next, priority_group, pg_num, pg_id); -+ -+ /* Get the status of the priority group */ -+ GET_VALUE(pg_next, priority_group, bypassed, bypassed_pg); -+ if (bypassed_pg) -+ strcpy(buf, "disabled"); -+ else { -+ GET_VALUE(mp, multipath, current_pg, cur_pg); -+ if (pg_next == cur_pg) -+ strcpy(buf, "active"); -+ else -+ strcpy(buf, "enabled"); -+ } -+ -+ /* Get the number of paths in the priority group */ -+ GET_VALUE(pg_next, priority_group, nr_pgpaths, nr_paths); -+ -+ fprintf(fp, "\n %-2d %-9s %-8d ", pg_id, buf, nr_paths); -+ -+ /* Display information for each path */ -+ GET_ADDR(pg_next, priority_group, pgpaths, path_head); -+ j = 0; -+ list_for_each (path_next, path_head, path_last) { -+ /* path_next == struct pgpath */ -+ -+ /* Get the devt of the pgpath */ -+ GET_ADDR(path_next, pgpath, path, path); -+ GET_VALUE(path, path, dev, dm_dev); -+ GET_STR(dm_dev, dm_dev, name, buf, BUFSIZE); -+ -+ /* Get the status of the path */ -+ GET_VALUE(path, path, is_active, path_active); -+ GET_VALUE(path_next, pgpath, fail_count, nr_fails); -+ path_status = path_active ? 'A' : 'F'; -+ -+ fprintf(fp, " %s(%c,%u)", buf, path_status, nr_fails); -+ j++; -+ } -+ if (j != nr_paths) -+ fprintf(fp, " ERROR: paths are less than nr_paths:%d", -+ nr_paths); -+ i++; -+ } -+ if (i != nr_pgs) -+ fprintf(fp, " ERROR: pgs are less than nr_pgs:%d", nr_pgs); -+} -+ -+static void -+multipath_show_queue(unsigned long target) -+{ -+ unsigned int queue_size; -+ unsigned long mp; -+ -+ /* Get the address of struct multipath */ -+ GET_VALUE(target, dm_target, private, mp); -+ -+ /* Get the size of queued I/Os in this 'target' */ -+ GET_VALUE(mp, multipath, queue_size, queue_size); -+ -+ fprintf(fp, " queue_size:%d", queue_size); -+} -+ -+static struct dminfo_target_analyzer multipath_analyzer = { -+ .target_name = "multipath", -+ .ready = multipath_ready, -+ .show_table = multipath_show_table, -+ .show_status = multipath_show_status, -+ .show_queue = multipath_show_queue -+}; -+ -+/* -+ * crypt target -+ */ -+static int -+crypt_ready(void) -+{ -+ static int debuginfo = 0; -+ -+ if (debuginfo) -+ return 1; -+ -+ if (STRUCT_EXISTS("struct crypt_config")) { -+ debuginfo = 1; -+ return 1; -+ } else -+ fprintf(fp, "No such struct info: crypt_config"); -+ -+ return 0; -+} -+ -+#define DMINFO_CRYPTO_TFM_MODE_ECB 0x00000001 -+#define DMINFO_CRYPTO_TFM_MODE_CBC 0x00000002 -+ -+static void -+crypt_show_table(unsigned long target) -+{ -+ int i, cit_mode, key_size; -+ unsigned long cc, tfm, crt_alg, cipher, iv_mode, dm_dev; -+ char buf[BUFSIZE], *chainmode; -+ -+ /* Get the address of struct crypt_config */ -+ GET_VALUE(target, dm_target, private, cc); -+ -+ /* Get the cipher name of the crypt_tfm */ -+ GET_VALUE(cc, crypt_config, tfm, tfm); -+ GET_VALUE(tfm, crypto_tfm, __crt_alg, crt_alg); -+ GET_STR(crt_alg, crypto_alg, cra_name, buf, BUFSIZE); -+ fprintf(fp, " type:%s", buf); -+ -+ /* Get the cit_mode of the crypt_tfm */ -+ GET_ADDR(tfm, crypto_tfm, crt_u, cipher); -+ GET_VALUE(cipher, cipher_tfm, cit_mode, cit_mode); -+ -+ if (MEMBER_EXISTS("struct crypt_config", "iv_mode")) { -+ if (cit_mode == DMINFO_CRYPTO_TFM_MODE_CBC) -+ chainmode = "cbc"; -+ else if (cit_mode == DMINFO_CRYPTO_TFM_MODE_ECB) -+ chainmode = "ecb"; -+ else -+ chainmode = "unknown"; -+ -+ /* Get the iv_mode of the crypt_config */ -+ GET_VALUE(cc, crypt_config, iv_mode, iv_mode); -+ if (iv_mode) { -+ GET_PTR_STR(cc, crypt_config, iv_mode, buf, BUFSIZE); -+ fprintf(fp, "-%s-%s", chainmode, buf); -+ } else -+ fprintf(fp, "-%s", chainmode); -+ -+ } else { -+ /* Compatibility mode for old dm-crypt cipher strings */ -+ if (cit_mode == DMINFO_CRYPTO_TFM_MODE_CBC) -+ chainmode = "plain"; -+ else if (cit_mode == DMINFO_CRYPTO_TFM_MODE_ECB) -+ chainmode = "ecb"; -+ else -+ chainmode = "unknown"; -+ -+ fprintf(fp, "-%s", chainmode); -+ } -+ -+ /* Get the devt of the crypt_config */ -+ GET_VALUE(cc, crypt_config, dev, dm_dev); -+ GET_STR(dm_dev, dm_dev, name, buf, BUFSIZE); -+ fprintf(fp, " dev:%s", buf); -+ -+ /* -+ * Get the key of the crypt_config. -+ */ -+ GET_VALUE(cc, crypt_config, key_size, key_size); -+ GET_STR(cc, crypt_config, key, buf, MIN(key_size + 1, BUFSIZE)); -+ fprintf(fp, " key:"); -+ for (i = 0; i < key_size; i++) -+ fprintf(fp, "%02x", (unsigned char)buf[i]); -+} -+ -+static void -+crypt_show_status(unsigned long target) -+{ -+ /* crypt target has no status */ -+ fprintf(fp, " No status info"); -+} -+ -+static void -+crypt_show_queue(unsigned long target) -+{ -+ /* crypt target has no queue */ -+ fprintf(fp, " No queue info"); -+} -+ -+static struct dminfo_target_analyzer crypt_analyzer = { -+ .target_name = "crypt", -+ .ready = crypt_ready, -+ .show_table = crypt_show_table, -+ .show_status = crypt_show_status, -+ .show_queue = crypt_show_queue -+}; -+ -+/* -+ * stripe target -+ */ -+static int -+stripe_ready(void) -+{ -+ static int debuginfo = 0; -+ -+ if (debuginfo) -+ return 1; -+ -+ if (STRUCT_EXISTS("struct stripe_c")) { -+ debuginfo = 1; -+ return 1; -+ } else -+ fprintf(fp, "No such struct info: stripe_c"); -+ -+ return 0; -+} -+ -+static void -+stripe_show_table(unsigned long target) -+{ -+ unsigned int i, n_stripe; -+ unsigned long sc, stripe_size, s, head, dm_dev; -+ unsigned long long mask; -+ char buf[BUFSIZE]; -+ -+ /* Get the address of struct stripe_c */ -+ GET_VALUE(target, dm_target, private, sc); -+ -+ /* Get the chunk_size of the stripe_c */ -+ GET_VALUE(sc, stripe_c, chunk_mask, mask); -+ fprintf(fp, " chunk_size:%llu", mask + 1); -+ -+ /* -+ * Display the information of each stripe disks. -+ * -+ * head = stripe_c.stripe. -+ * This is the head of struct stripe array. -+ */ -+ stripe_size = STRUCT_SIZE("struct stripe"); -+ GET_ADDR(sc, stripe_c, stripe, head); -+ GET_VALUE(sc, stripe_c, stripes, n_stripe); -+ fprintf(fp, " dev:"); -+ for (i = 0; i < n_stripe; i++) { -+ s = head + stripe_size * i; /* Get next stripe */ -+ -+ /* Get the devt of the stripe disk */ -+ GET_VALUE(s, stripe, dev, dm_dev); -+ GET_STR(dm_dev, dm_dev, name, buf, BUFSIZE); -+ -+ fprintf(fp, "%s%s", buf, i == n_stripe - 1 ? "" : ","); -+ } -+ if (i != n_stripe) -+ fprintf(fp, " ERROR: dev are less than n_stripe:%d", n_stripe); -+} -+ -+static void -+stripe_show_status(unsigned long target) -+{ -+ /* stripe target has no status */ -+ fprintf(fp, " No status info"); -+} -+ -+static void -+stripe_show_queue(unsigned long target) -+{ -+ /* stripe target has no queue */ -+ fprintf(fp, " No queue info"); -+} -+ -+static struct dminfo_target_analyzer stripe_analyzer = { -+ .target_name = "striped", -+ .ready = stripe_ready, -+ .show_table = stripe_show_table, -+ .show_status = stripe_show_status, -+ .show_queue = stripe_show_queue -+}; -+ -+/* -+ * snapshot target -+ */ -+static int -+snapshot_ready(void) -+{ -+ static int debuginfo = 0; -+ -+ if (debuginfo) -+ return 1; -+ -+ if (STRUCT_EXISTS("struct dm_snapshot")) { -+ debuginfo = 1; -+ return 1; -+ } else -+ fprintf(fp, "No such struct info: dm_snapshot"); -+ -+ return 0; -+} -+ -+static void -+snapshot_show_table(unsigned long target) -+{ -+ unsigned long snap, orig_dev, cow_dev; -+ unsigned long long chunk_size; -+ char orig_name[BUFSIZE], cow_name[BUFSIZE], type; -+ -+ /* Get the address of struct dm_snapshot */ -+ GET_VALUE(target, dm_target, private, snap); -+ -+ /* Get snapshot parameters of the dm_snapshot */ -+ GET_VALUE(snap, dm_snapshot, origin, orig_dev); -+ GET_STR(orig_dev, dm_dev, name, orig_name, BUFSIZE); -+ GET_VALUE(snap, dm_snapshot, cow, cow_dev); -+ GET_STR(cow_dev, dm_dev, name, cow_name, BUFSIZE); -+ GET_VALUE(snap, dm_snapshot, type, type); -+ GET_VALUE(snap, dm_snapshot, chunk_size, chunk_size); -+ -+ fprintf(fp, " orig:%s cow:%s type:%c chunk_size:%llu", -+ orig_name, cow_name, type, chunk_size); -+} -+ -+static void -+snapshot_show_status(unsigned long target) -+{ -+ int valid; -+ unsigned long snap; -+ -+ /* Get the address of struct dm_snapshot */ -+ GET_VALUE(target, dm_target, private, snap); -+ -+ /* Get snapshot parameters of the dm_snapshot */ -+ GET_VALUE(snap, dm_snapshot, valid, valid); -+ -+ fprintf(fp, " vaild:%d", valid); -+} -+ -+static void -+snapshot_show_queue(unsigned long target) -+{ -+ fprintf(fp, " No queue info"); -+} -+ -+static struct dminfo_target_analyzer snapshot_analyzer = { -+ .target_name = "snapshot", -+ .ready = snapshot_ready, -+ .show_table = snapshot_show_table, -+ .show_status = snapshot_show_status, -+ .show_queue = snapshot_show_queue -+}; -+ -+/* -+ * snapshot-origin target -+ */ -+static int -+origin_ready(void) -+{ -+ return 1; -+} -+ -+static void -+origin_show_table(unsigned long target) -+{ -+ unsigned long dm_dev; -+ char buf[BUFSIZE]; -+ -+ /* Get the name of the struct dm_dev */ -+ GET_VALUE(target, dm_target, private, dm_dev); -+ GET_STR(dm_dev, dm_dev, name, buf, BUFSIZE); -+ -+ fprintf(fp, " orig_dev:%s", buf); -+} -+ -+static void -+origin_show_status(unsigned long target) -+{ -+ /* snapshot-origin target has no status */ -+ fprintf(fp, " No status info"); -+} -+ -+static void -+origin_show_queue(unsigned long target) -+{ -+ /* snapshot-origin target has no queue */ -+ fprintf(fp, " No queue info"); -+} -+ -+static struct dminfo_target_analyzer snapshot_origin_analyzer = { -+ .target_name = "snapshot-origin", -+ .ready = origin_ready, -+ .show_table = origin_show_table, -+ .show_status = origin_show_status, -+ .show_queue = origin_show_queue -+}; -+ -+/* -+ * Core part of dminfo -+ */ -+#define DMINFO_LIST 0 -+#define DMINFO_DEPS 1 -+#define DMINFO_TABLE 2 -+#define DMINFO_STATUS 3 -+#define DMINFO_QUEUE 4 -+ -+static int -+dm_core_ready(void) -+{ -+ static int debuginfo = 0; -+ -+ if (debuginfo) -+ return 1; -+ -+ if (STRUCT_EXISTS("struct hash_cell")) { -+ debuginfo = 1; -+ return 1; -+ } else -+ fprintf(fp, "No such struct info: hash_cell\n"); -+ -+ return 0; -+} -+ -+/* Display dependency information of the 'table' */ -+static void -+dminfo_show_deps(unsigned long table) -+{ -+ int major, minor, count; -+ unsigned long head, next, last, dev, bdev; -+ char buf[BUFSIZE]; -+ -+ /* head = dm_table.devices */ -+ GET_ADDR(table, dm_table, devices, head); -+ -+ fprintf(fp, " %-3s %-3s %-16s %-5s %s\n", -+ "MAJ", "MIN", "GENDISK", "COUNT", "DEVNAME"); -+ -+ list_for_each (next, head, last) { -+ /* Get dependency information. (next == struct *dm_dev) */ -+ GET_VALUE(next, dm_dev, count, count); -+ GET_VALUE(next, dm_dev, bdev, bdev); -+ GET_VALUE(bdev, block_device, bd_disk, dev); -+ GET_VALUE(dev, gendisk, major, major); -+ GET_VALUE(dev, gendisk, first_minor, minor); -+ GET_STR(dev, gendisk, disk_name, buf, BUFSIZE); -+ -+ fprintf(fp, " %-3d %-3d %-16lx %-5d %s\n", -+ major, minor, dev, count, buf); -+ } -+} -+ -+/* -+ * Display target specific information in the 'table', if the target -+ * analyzer is registered and available. -+ */ -+static void -+dminfo_show_details(unsigned long table, unsigned int num_targets, int info_type) -+{ -+ unsigned int i; -+ unsigned long head, target_size, target, target_type; -+ struct dminfo_target_analyzer *ta; -+ char buf[BUFSIZE]; -+ -+ /* -+ * head = dm_table.targets. -+ * This is the head of struct dm_target array. -+ */ -+ GET_VALUE(table, dm_table, targets, head); -+ target_size = STRUCT_SIZE("struct dm_target"); -+ -+ fprintf(fp, " %-16s %-11s %s\n", -+ "TARGET", "TARGET_TYPE", "PRIVATE_DATA"); -+ -+ for (i = 0; i < num_targets; i++, fprintf(fp, "\n")) { -+ target = head + target_size * i; /* Get next target */ -+ -+ /* Get target information */ -+ GET_VALUE(target, dm_target, type, target_type); -+ GET_PTR_STR(target_type, target_type, name, buf, BUFSIZE); -+ -+ fprintf(fp, " %-16lx %-11s", target, buf); -+ -+ if (!(ta = find_target_analyzer(buf)) || !ta->ready -+ || !ta->ready()) -+ continue; -+ -+ switch (info_type) { -+ case DMINFO_TABLE: -+ if (ta->show_table) -+ ta->show_table(target); -+ break; -+ case DMINFO_STATUS: -+ if (ta->show_status) -+ ta->show_status(target); -+ break; -+ case DMINFO_QUEUE: -+ if (ta->show_queue) -+ ta->show_queue(target); -+ break; -+ default: -+ break; -+ } -+ } -+ -+ if (i != num_targets) -+ fprintf(fp, " ERROR: targets are less than num_targets:%d", -+ num_targets); -+} -+ -+/* -+ * Display lists (and detail information if specified) of existing -+ * dm devices. -+ */ -+static void -+dminfo_show_list(int additional_info) -+{ -+ int i, major, minor, array_len; -+ unsigned int num_targets; -+ unsigned long _name_buckets, head, next, last, md, dev, table; -+ char buf[BUFSIZE]; -+ -+ _name_buckets = symbol_value("_name_buckets"); -+ array_len = get_array_length("_name_buckets", NULL, 0); -+ -+ if (additional_info == DMINFO_LIST) -+ fprintf(fp, "%-3s %-3s %-16s %-16s %-7s %s\n", -+ "MAJ", "MIN", "MAP_DEV", "DM_TABLE", -+ "TARGETS", "MAPNAME"); -+ -+ for (i = 0; i < array_len; i++) { -+ /* head = _name_buckets[i] */ -+ head = _name_buckets + (i * SIZE(list_head)); -+ -+ list_for_each (next, head, last) { /* next == hash_cell */ -+ /* Get device and table information */ -+ GET_PTR_STR(next, hash_cell, name, buf, BUFSIZE); -+ GET_VALUE(next, hash_cell, md, md); -+ GET_VALUE(md, mapped_device, disk, dev); -+ GET_VALUE(dev, gendisk, major, major); -+ GET_VALUE(dev, gendisk, first_minor, minor); -+ GET_VALUE(md, mapped_device, map, table); -+ GET_VALUE(table, dm_table, num_targets, num_targets); -+ -+ if (additional_info != DMINFO_LIST) -+ fprintf(fp, "%-3s %-3s %-16s %-16s %-7s %s\n", -+ "MAJ", "MIN", "MAP_DEV", "DM_TABLE", -+ "TARGETS", "MAPNAME"); -+ -+ fprintf(fp, "%-3d %-3d %-16lx %-16lx %-7d %s\n", -+ major, minor, md, table, num_targets, buf); -+ -+ switch(additional_info) { -+ case DMINFO_DEPS: -+ dminfo_show_deps(table); -+ break; -+ case DMINFO_TABLE: -+ case DMINFO_STATUS: -+ case DMINFO_QUEUE: -+ dminfo_show_details(table, num_targets, -+ additional_info); -+ break; -+ default: -+ break; -+ } -+ -+ if (additional_info != DMINFO_LIST) -+ fprintf(fp, "\n"); -+ } -+ } -+} -+ -+/* -+ * Display the original bio information for the 'bio'. -+ * If the 'bio' is for dm devices, the original bio information is pointed -+ * by bio.bi_private as struct target_io. -+ */ -+static void -+dminfo_show_bio(unsigned long bio) -+{ -+ int major, minor; -+ unsigned long target_io, dm_io, dm_bio, md, dev; -+ char buf[BUFSIZE]; -+ -+ /* Get original bio and device information */ -+ GET_VALUE(bio, bio, bi_private, target_io); -+ GET_VALUE(target_io, target_io, io, dm_io); -+ GET_VALUE(dm_io, dm_io, bio, dm_bio); -+ GET_VALUE(dm_io, dm_io, md, md); -+ GET_VALUE(md, mapped_device, disk, dev); -+ GET_VALUE(dev, gendisk, major, major); -+ GET_VALUE(dev, gendisk, first_minor, minor); -+ GET_STR(dev, gendisk, disk_name, buf, BUFSIZE); -+ -+ fprintf(fp, "%-16s %-3s %-3s %-16s %s\n", -+ "DM_BIO_ADDRESS", "MAJ", "MIN", "MAP_DEV", "DEVNAME"); -+ fprintf(fp, "%-16lx %-3d %-3d %-16lx %s\n", -+ dm_bio, major, minor, md, buf); -+} -+ -+static void -+cmd_dminfo(void) -+{ -+ int c, additional_info = DMINFO_LIST; -+ unsigned long bio; -+ -+ if (!dm_core_ready()) -+ return; -+ -+ /* Parse command line option */ -+ while ((c = getopt(argcnt, args, "b:dlqst")) != EOF) { -+ switch(c) -+ { -+ case 'b': -+ bio = stol(optarg, FAULT_ON_ERROR, NULL); -+ dminfo_show_bio(bio); -+ return; -+ case 'd': -+ additional_info = DMINFO_DEPS; -+ break; -+ case 'l': -+ additional_info = DMINFO_LIST; -+ break; -+ case 'q': -+ additional_info = DMINFO_QUEUE; -+ break; -+ case 's': -+ additional_info = DMINFO_STATUS; -+ break; -+ case 't': -+ additional_info = DMINFO_TABLE; -+ break; -+ default: -+ argerrs++; -+ break; -+ } -+ } -+ -+ if (argerrs) -+ cmd_usage(pc->curcmd, SYNOPSIS); -+ -+ dminfo_show_list(additional_info); -+} -+ -+/* -+ * dminfo help -+ */ -+static char *help_dminfo[] = { -+ "dminfo", /* command name */ -+ "device mapper (dm) information", /* short description */ -+ "[-b bio | -d | -l | -q | -s | -t]", /* argument synopsis */ -+ " This command displays information about device-mapper mapped ", -+ " devices (dm devices).", -+ " If no argument is entered, displays lists of existing dm devices.", -+ " It's same as -l option.", -+ "", -+ " -b bio displays the information of the dm device which the bio", -+ " is submitted in. If the bio isn't for dm devices,", -+ " results will be error.", -+ " -d displays dependency information for existing dm devices.", -+ " -l displays lists of existing dm devices.", -+ " -q displays queued I/O information for each target of", -+ " existing dm devices.", -+ " -s displays status information for each target of existing", -+ " dm devices.", -+ " -t displays table information for each target of existing", -+ " dm devices.", -+ "", -+ "EXAMPLE", -+ " Display lists of dm devices. \"MAP_DEV\" is the address of the", -+ " struct mapped_device. \"DM_TABLE\" is the address of the struct", -+ " dm_table. \"TARGETS\" is the number of targets which are in", -+ " the struct dm_table.", -+ "", -+ " %s> dminfo", -+ " MAJ MIN MAP_DEV DM_TABLE TARGETS MAPNAME", -+ " 253 8 c4866c80 c4866280 1 vg0-snap0", -+ " 253 6 f6a04a80 f6a04580 1 vg0-lv0-real", -+ " 253 0 c4840380 c4841880 1 mp0", -+ " 253 5 f7c50c80 c488e480 1 via_cbeheddbdd", -+ " 253 7 c4866a80 c4866380 1 vg0-snap0-cow", -+ " 253 4 d441e280 c919ed80 1 dummy1", -+ " 253 3 f5dc4280 cba81d80 1 dummy0", -+ " 253 2 f7c53180 c4866180 1 vg0-lv0", -+ " 253 1 f746d280 f746cd80 1 mp0p1", -+ "", -+ " Display the dm device information which the bio is submitted in.", -+ " The bio (ceacee80) is a clone of the bio (ceacee00) which is", -+ " submitted in the dm-3 (dummy0). And the bio (ceacee00) is a clone", -+ " of the bio (ceaced80) which is submitted in the dm-4 (dummy1), too.", -+ " The bio (ceaced80) is the original bio.", -+ "", -+ " %s> dminfo -b ceacee80", -+ " DM_BIO_ADDRESS MAJ MIN MAP_DEV DEVNAME", -+ " ceacee00 253 3 f5dc4280 dm-3", -+ " crash> dminfo -b ceacee00", -+ " DM_BIO_ADDRESS MAJ MIN MAP_DEV DEVNAME", -+ " ceaced80 253 4 d441e280 dm-4", -+ " crash> dminfo -b ceaced80", -+ " dminfo: invalid kernel virtual address: 64 type: \"GET_VALUE: dm_io.bio\"", -+ "", -+ " Display dependency information for each target.", -+ " The vg0-snap0 depends on thd dm-6 (vg0-lv0-real) and the dm-7", -+ " (vg0-snap0-cow)", -+ "", -+ " %s> dminfo -d", -+ " MAJ MIN MAP_DEV DM_TABLE TARGETS MAPNAME", -+ " 253 8 c4866c80 c4866280 1 vg0-snap0", -+ " MAJ MIN GENDISK COUNT DEVNAME", -+ " 253 7 c4866980 1 dm-7", -+ " 253 6 f6a04280 1 dm-6", -+ "", -+ " MAJ MIN MAP_DEV DM_TABLE TARGETS MAPNAME", -+ " 253 6 f6a04a80 f6a04580 1 vg0-lv0-real", -+ " MAJ MIN GENDISK COUNT DEVNAME", -+ " 8 0 f7f24c80 1 sda", -+ "", -+ " MAJ MIN MAP_DEV DM_TABLE TARGETS MAPNAME", -+ " 253 7 c4866a80 c4866380 1 vg0-snap0-cow", -+ " MAJ MIN GENDISK COUNT DEVNAME", -+ " 8 0 f7f24c80 1 sda", -+ "", -+ " MAJ MIN MAP_DEV DM_TABLE TARGETS MAPNAME", -+ " 253 2 f7c53180 c4866180 1 vg0-lv0", -+ " MAJ MIN GENDISK COUNT DEVNAME", -+ " 253 6 f6a04280 1 dm-6", -+ "", -+ " Display queued I/O information for each target.", -+ " The information is displayed under the \"PRIVATE_DATA\" column.", -+ "", -+ " %s> dminfo -q", -+ " MAJ MIN MAP_DEV DM_TABLE TARGETS MAPNAME", -+ " 253 5 f7c50c80 c488e480 1 via_cbeheddbdd", -+ " TARGET TARGET_TYPE PRIVATE_DATA", -+ " f8961080 mirror (reads) (writes) (quiesced) (recovered)", -+ "", -+ " --------------------------------------------------------------", -+ " \"reads/writes\" are members of the struct mirror_set, and", -+ " \"quiesced/recovered\" are members of the struct region_hash.", -+ " If the list is empty, the member is bracketed by \"()\".", -+ " --------------------------------------------------------------", -+ "", -+ " MAJ MIN MAP_DEV DM_TABLE TARGETS MAPNAME", -+ " 253 0 c4840380 c4841880 1 mp0", -+ " TARGET TARGET_TYPE PRIVATE_DATA", -+ " f8802080 multipath queue_size:0", -+ "", -+ " MAJ MIN MAP_DEV DM_TABLE TARGETS MAPNAME", -+ " 253 1 f746d280 f746cd80 1 mp0p1", -+ " TARGET TARGET_TYPE PRIVATE_DATA", -+ " f8821080 linear No queue info", -+ "", -+ " Display status information for each target.", -+ " The information is displayed under the \"PRIVATE_DATA\" column.", -+ "", -+ " %s> dminfo -s", -+ " MAJ MIN MAP_DEV DM_TABLE TARGETS MAPNAME", -+ " 253 0 c4840380 c4841880 1 mp0", -+ " TARGET TARGET_TYPE PRIVATE_DATA", -+ " f8802080 multipath queue_if_no_path:0 hwh:none nr_pgs:1", -+ " PG PG_STATUS NR_PATHS PATHS", -+ " 1 active 2 8:16(A,0) 8:32(A,0)", -+ "", -+ " --------------------------------------------------------------", -+ " Format of \"PATHS\": :(,)", -+ " Status: A:active, F:faulty", -+ " Fail_count: the value of the struct pgpath.fail_count", -+ " --------------------------------------------------------------", -+ "", -+ " MAJ MIN MAP_DEV DM_TABLE TARGETS MAPNAME", -+ " 253 5 f7c50c80 c488e480 1 via_cbeheddbdd", -+ " TARGET TARGET_TYPE PRIVATE_DATA", -+ " f8961080 mirror in_sync:1 dev:8:16(A,0),8:32(A,0)", -+ "", -+ " --------------------------------------------------------------", -+ " Format of \"dev\": :(,)", -+ " Status: A:active, D:degraded", -+ " Error_count: the value of the struct mirror.error_count", -+ " --------------------------------------------------------------", -+ "", -+ " MAJ MIN MAP_DEV DM_TABLE TARGETS MAPNAME", -+ " 253 1 f746d280 f746cd80 1 mp0p1", -+ " TARGET TARGET_TYPE PRIVATE_DATA", -+ " f8821080 linear No status info", -+ "", -+ " Display table information for each target.", -+ " The information is displayed under the \"PRIVATE_DATA\" column.", -+ "", -+ " %s> dminfo -t", -+ " MAJ MIN MAP_DEV DM_TABLE TARGETS MAPNAME", -+ " 253 8 c4866c80 c4866280 1 vg0-snap0", -+ " TARGET TARGET_TYPE PRIVATE_DATA", -+ " f89b4080 snapshot orig:253:6 cow:253:7 type:P chunk_size:16", -+ "", -+ " MAJ MIN MAP_DEV DM_TABLE TARGETS MAPNAME", -+ " 253 6 f6a04a80 f6a04580 1 vg0-lv0-real", -+ " TARGET TARGET_TYPE PRIVATE_DATA", -+ " f890f080 linear begin:0 len:204800 dev:8:5 offset:384", -+ "", -+ " MAJ MIN MAP_DEV DM_TABLE TARGETS MAPNAME", -+ " 253 0 c4840380 c4841880 1 mp0", -+ " TARGET TARGET_TYPE PRIVATE_DATA", -+ " f8802080 multipath queue_if_no_path:0 hwh:none nr_pgs:1", -+ " PG PATH_SELECTOR NR_PATHS PATHS", -+ " 1 round-robin 2 8:16 8:32", -+ "", -+ " MAJ MIN MAP_DEV DM_TABLE TARGETS MAPNAME", -+ " 253 5 f7c50c80 c488e480 1 via_cbeheddbdd", -+ " TARGET TARGET_TYPE PRIVATE_DATA", -+ " f8961080 mirror log:core dev:8:16(0),8:32(0)", -+ "", -+ " --------------------------------------------------------------", -+ " Format of \"dev\": :()", -+ " Offset: the value of the struct mirror.offset", -+ " --------------------------------------------------------------", -+ "", -+ " MAJ MIN MAP_DEV DM_TABLE TARGETS MAPNAME", -+ " 253 7 c4866a80 c4866380 1 vg0-snap0-cow", -+ " TARGET TARGET_TYPE PRIVATE_DATA", -+ " f899d080 linear begin:0 len:8192 dev:8:5 offset:205184", -+ "", -+ " MAJ MIN MAP_DEV DM_TABLE TARGETS MAPNAME", -+ " 253 2 f7c53180 c4866180 1 vg0-lv0", -+ " TARGET TARGET_TYPE PRIVATE_DATA", -+ " f8bbc080 snapshot-origin orig_dev:253:6", -+ "", -+ " MAJ MIN MAP_DEV DM_TABLE TARGETS MAPNAME", -+ " 253 1 f746d280 f746cd80 1 mp0p1", -+ " TARGET TARGET_TYPE PRIVATE_DATA", -+ " f8821080 linear begin:0 len:2040192 dev:253:0 offset:63", -+ NULL -+}; -+ -+/* -+ * Registering command extension -+ */ -+ -+static struct command_table_entry command_table[] = { -+ {"dminfo", cmd_dminfo, help_dminfo, 0}, -+ {NULL, NULL, NULL, 0}, -+}; -+ -+int _init() -+{ -+ register_extension(command_table); -+ -+ dminfo_register_target_analyzer(&zero_analyzer); -+ dminfo_register_target_analyzer(&error_analyzer); -+ dminfo_register_target_analyzer(&linear_analyzer); -+ dminfo_register_target_analyzer(&mirror_analyzer); -+ dminfo_register_target_analyzer(&multipath_analyzer); -+ dminfo_register_target_analyzer(&crypt_analyzer); -+ dminfo_register_target_analyzer(&stripe_analyzer); -+ dminfo_register_target_analyzer(&snapshot_analyzer); -+ dminfo_register_target_analyzer(&snapshot_origin_analyzer); -+ -+ return 0; -+} -+ -+int _fini() -+{ -+ return 0; -+} ---- crash/gdb-6.1/gdb/symtab.c.orig 2007-08-27 15:02:36.000000000 -0400 -+++ crash/gdb-6.1/gdb/symtab.c 2007-01-23 17:11:34.000000000 -0500 -@@ -4,7 +4,7 @@ - 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004 - Free Software Foundation, Inc. - Portions Copyright (C) 2001, 2002 Mission Critical Linux, Inc. -- Copyright (c) 2002, 2003, 2004, 2005 Red Hat, Inc. All rights reserved. -+ Copyright (c) 2002, 2003, 2004, 2005, 2007 Red Hat, Inc. All rights reserved. - - This file is part of GDB. - -@@ -4523,14 +4523,54 @@ - struct symbol *sym; - struct expression *expr; - struct cleanup *old_chain; -- -+ int i; -+ int allsect = 0; -+ char *secname; -+ char buf[80]; -+ - gdb_current_load_module = lm = (struct load_module *)req->addr; - - req->name = lm->mod_namelist; - gdb_delete_symbol_file(req); - -- sprintf(req->buf, "add-symbol-file %s 0x%lx", lm->mod_namelist, -- lm->mod_text_start); -+ for (i = 0 ; i < lm->mod_sections; i++) { -+ if (STREQ(lm->mod_section_data[i].name, ".text") && -+ (lm->mod_section_data[i].flags & SEC_FOUND)) -+ allsect = 1; -+ } -+ -+ if (!allsect) { -+ sprintf(req->buf, "add-symbol-file %s 0x%lx", lm->mod_namelist, -+ lm->mod_text_start ? lm->mod_text_start : lm->mod_base); -+ if (lm->mod_data_start) { -+ sprintf(buf, " -s .data 0x%lx", lm->mod_data_start); -+ strcat(req->buf, buf); -+ } -+ if (lm->mod_bss_start) { -+ sprintf(buf, " -s .bss 0x%lx", lm->mod_bss_start); -+ strcat(req->buf, buf); -+ } -+ if (lm->mod_rodata_start) { -+ sprintf(buf, " -s .rodata 0x%lx", lm->mod_rodata_start); -+ strcat(req->buf, buf); -+ } -+ } else { -+ sprintf(req->buf, "add-symbol-file %s 0x%lx", lm->mod_namelist, -+ lm->mod_text_start); -+ for (i = 0; i < lm->mod_sections; i++) { -+ secname = lm->mod_section_data[i].name; -+ if ((lm->mod_section_data[i].flags & SEC_FOUND) && -+ !STREQ(secname, ".text")) { -+ sprintf(buf, " -s %s 0x%lx", secname, -+ lm->mod_section_data[i].offset + lm->mod_base); -+ strcat(req->buf, buf); -+ } -+ } -+ } -+ -+ if (gdb_CRASHDEBUG(1)) { -+ fprintf_filtered(gdb_stdout, "gdb_add_symbol_file: %s\n", req->buf); -+ } - - execute_command(req->buf, FALSE); - ---- crash/gdb-6.1/gdb/symfile.c.orig 2007-08-27 15:02:36.000000000 -0400 -+++ crash/gdb-6.1/gdb/symfile.c 2007-01-23 15:15:36.000000000 -0500 -@@ -3,7 +3,7 @@ - Copyright 1990, 1991, 1992, 1993, 1994, 1995, 1996, 1997, 1998, - 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc. - Portions Copyright (C) 2001, 2002 Mission Critical Linux, Inc. -- Copyright (c) 2002, 2003, 2004, 2005 Red Hat, Inc. All rights reserved. -+ Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007 Red Hat, Inc. All rights reserved. - - Contributed by Cygnus Support, using pieces from other GDB modules. - -@@ -1678,7 +1678,11 @@ - to load the program. */ - sect_opts[section_index].name = ".text"; - sect_opts[section_index].value = arg; -+#ifdef CRASH_MERGE -+ if (++section_index >= num_sect_opts) -+#else - if (++section_index > num_sect_opts) -+#endif - { - num_sect_opts *= 2; - sect_opts = ((struct sect_opt *) -@@ -1714,7 +1718,11 @@ - { - sect_opts[section_index].value = arg; - expecting_sec_addr = 0; -+#ifdef CRASH_MERGE -+ if (++section_index >= num_sect_opts) -+#else - if (++section_index > num_sect_opts) -+#endif - { - num_sect_opts *= 2; - sect_opts = ((struct sect_opt *) -@@ -3510,6 +3518,13 @@ - bfd_byte * - symfile_relocate_debug_section (bfd *abfd, asection *sectp, bfd_byte *buf) - { -+#ifdef CRASH_MERGE -+ /* Executable files have all the relocations already resolved. -+ * Handle files linked with --emit-relocs. -+ * http://sources.redhat.com/ml/gdb/2006-08/msg00137.html */ -+ if ((abfd->flags & EXEC_P) != 0) -+ return NULL; -+#endif - /* We're only interested in debugging sections with relocation - information. */ - if ((sectp->flags & SEC_RELOC) == 0) ---- crash/gdb-6.1/gdb/ppc-linux-tdep.c.orig 2007-08-27 15:02:36.000000000 -0400 -+++ crash/gdb-6.1/gdb/ppc-linux-tdep.c 2005-07-14 11:08:17.000000000 -0400 -@@ -0,0 +1,1116 @@ -+/* Target-dependent code for GDB, the GNU debugger. -+ -+ Copyright 1986, 1987, 1989, 1991, 1992, 1993, 1994, 1995, 1996, -+ 1997, 2000, 2001, 2002, 2003 Free Software Foundation, Inc. -+ Copyright (c) 2004, 2005 Red Hat, Inc. All rights reserved. -+ -+ This file is part of GDB. -+ -+ This program is free software; you can redistribute it and/or modify -+ it under the terms of the GNU General Public License as published by -+ the Free Software Foundation; either version 2 of the License, or -+ (at your option) any later version. -+ -+ This program is distributed in the hope that it will be useful, -+ but WITHOUT ANY WARRANTY; without even the implied warranty of -+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ GNU General Public License for more details. -+ -+ You should have received a copy of the GNU General Public License -+ along with this program; if not, write to the Free Software -+ Foundation, Inc., 59 Temple Place - Suite 330, -+ Boston, MA 02111-1307, USA. */ -+ -+#include "defs.h" -+#include "frame.h" -+#include "inferior.h" -+#include "symtab.h" -+#include "target.h" -+#include "gdbcore.h" -+#include "gdbcmd.h" -+#include "symfile.h" -+#include "objfiles.h" -+#include "regcache.h" -+#include "value.h" -+#include "osabi.h" -+ -+#include "solib-svr4.h" -+#include "ppc-tdep.h" -+ -+/* The following instructions are used in the signal trampoline code -+ on GNU/Linux PPC. The kernel used to use magic syscalls 0x6666 and -+ 0x7777 but now uses the sigreturn syscalls. We check for both. */ -+#define INSTR_LI_R0_0x6666 0x38006666 -+#define INSTR_LI_R0_0x7777 0x38007777 -+#define INSTR_LI_R0_NR_sigreturn 0x38000077 -+#define INSTR_LI_R0_NR_rt_sigreturn 0x380000AC -+ -+#define INSTR_SC 0x44000002 -+ -+/* Since the *-tdep.c files are platform independent (i.e, they may be -+ used to build cross platform debuggers), we can't include system -+ headers. Therefore, details concerning the sigcontext structure -+ must be painstakingly rerecorded. What's worse, if these details -+ ever change in the header files, they'll have to be changed here -+ as well. */ -+ -+/* __SIGNAL_FRAMESIZE from */ -+#define PPC_LINUX_SIGNAL_FRAMESIZE 64 -+ -+/* From , offsetof(struct sigcontext_struct, regs) == 0x1c */ -+#define PPC_LINUX_REGS_PTR_OFFSET (PPC_LINUX_SIGNAL_FRAMESIZE + 0x1c) -+ -+/* From , -+ offsetof(struct sigcontext_struct, handler) == 0x14 */ -+#define PPC_LINUX_HANDLER_PTR_OFFSET (PPC_LINUX_SIGNAL_FRAMESIZE + 0x14) -+ -+/* From , values for PT_NIP, PT_R1, and PT_LNK */ -+#define PPC_LINUX_PT_R0 0 -+#define PPC_LINUX_PT_R1 1 -+#define PPC_LINUX_PT_R2 2 -+#define PPC_LINUX_PT_R3 3 -+#define PPC_LINUX_PT_R4 4 -+#define PPC_LINUX_PT_R5 5 -+#define PPC_LINUX_PT_R6 6 -+#define PPC_LINUX_PT_R7 7 -+#define PPC_LINUX_PT_R8 8 -+#define PPC_LINUX_PT_R9 9 -+#define PPC_LINUX_PT_R10 10 -+#define PPC_LINUX_PT_R11 11 -+#define PPC_LINUX_PT_R12 12 -+#define PPC_LINUX_PT_R13 13 -+#define PPC_LINUX_PT_R14 14 -+#define PPC_LINUX_PT_R15 15 -+#define PPC_LINUX_PT_R16 16 -+#define PPC_LINUX_PT_R17 17 -+#define PPC_LINUX_PT_R18 18 -+#define PPC_LINUX_PT_R19 19 -+#define PPC_LINUX_PT_R20 20 -+#define PPC_LINUX_PT_R21 21 -+#define PPC_LINUX_PT_R22 22 -+#define PPC_LINUX_PT_R23 23 -+#define PPC_LINUX_PT_R24 24 -+#define PPC_LINUX_PT_R25 25 -+#define PPC_LINUX_PT_R26 26 -+#define PPC_LINUX_PT_R27 27 -+#define PPC_LINUX_PT_R28 28 -+#define PPC_LINUX_PT_R29 29 -+#define PPC_LINUX_PT_R30 30 -+#define PPC_LINUX_PT_R31 31 -+#define PPC_LINUX_PT_NIP 32 -+#define PPC_LINUX_PT_MSR 33 -+#define PPC_LINUX_PT_CTR 35 -+#define PPC_LINUX_PT_LNK 36 -+#define PPC_LINUX_PT_XER 37 -+#define PPC_LINUX_PT_CCR 38 -+#define PPC_LINUX_PT_MQ 39 -+#define PPC_LINUX_PT_FPR0 48 /* each FP reg occupies 2 slots in this space */ -+#define PPC_LINUX_PT_FPR31 (PPC_LINUX_PT_FPR0 + 2*31) -+#define PPC_LINUX_PT_FPSCR (PPC_LINUX_PT_FPR0 + 2*32 + 1) -+ -+static int ppc_linux_at_sigtramp_return_path (CORE_ADDR pc); -+ -+/* Determine if pc is in a signal trampoline... -+ -+ Ha! That's not what this does at all. wait_for_inferior in -+ infrun.c calls PC_IN_SIGTRAMP in order to detect entry into a -+ signal trampoline just after delivery of a signal. But on -+ GNU/Linux, signal trampolines are used for the return path only. -+ The kernel sets things up so that the signal handler is called -+ directly. -+ -+ If we use in_sigtramp2() in place of in_sigtramp() (see below) -+ we'll (often) end up with stop_pc in the trampoline and prev_pc in -+ the (now exited) handler. The code there will cause a temporary -+ breakpoint to be set on prev_pc which is not very likely to get hit -+ again. -+ -+ If this is confusing, think of it this way... the code in -+ wait_for_inferior() needs to be able to detect entry into a signal -+ trampoline just after a signal is delivered, not after the handler -+ has been run. -+ -+ So, we define in_sigtramp() below to return 1 if the following is -+ true: -+ -+ 1) The previous frame is a real signal trampoline. -+ -+ - and - -+ -+ 2) pc is at the first or second instruction of the corresponding -+ handler. -+ -+ Why the second instruction? It seems that wait_for_inferior() -+ never sees the first instruction when single stepping. When a -+ signal is delivered while stepping, the next instruction that -+ would've been stepped over isn't, instead a signal is delivered and -+ the first instruction of the handler is stepped over instead. That -+ puts us on the second instruction. (I added the test for the -+ first instruction long after the fact, just in case the observed -+ behavior is ever fixed.) -+ -+ PC_IN_SIGTRAMP is called from blockframe.c as well in order to set -+ the frame's type (if a SIGTRAMP_FRAME). Because of our strange -+ definition of in_sigtramp below, we can't rely on the frame's type -+ getting set correctly from within blockframe.c. This is why we -+ take pains to set it in init_extra_frame_info(). -+ -+ NOTE: cagney/2002-11-10: I suspect the real problem here is that -+ the get_prev_frame() only initializes the frame's type after the -+ call to INIT_FRAME_INFO. get_prev_frame() should be fixed, this -+ code shouldn't be working its way around a bug :-(. */ -+ -+int -+ppc_linux_in_sigtramp (CORE_ADDR pc, char *func_name) -+{ -+ CORE_ADDR lr; -+ CORE_ADDR sp; -+ CORE_ADDR tramp_sp; -+ char buf[4]; -+ CORE_ADDR handler; -+ -+ lr = read_register (gdbarch_tdep (current_gdbarch)->ppc_lr_regnum); -+ if (!ppc_linux_at_sigtramp_return_path (lr)) -+ return 0; -+ -+ sp = read_register (SP_REGNUM); -+ -+ if (target_read_memory (sp, buf, sizeof (buf)) != 0) -+ return 0; -+ -+ tramp_sp = extract_unsigned_integer (buf, 4); -+ -+ if (target_read_memory (tramp_sp + PPC_LINUX_HANDLER_PTR_OFFSET, buf, -+ sizeof (buf)) != 0) -+ return 0; -+ -+ handler = extract_unsigned_integer (buf, 4); -+ -+ return (pc == handler || pc == handler + 4); -+} -+ -+static int -+insn_is_sigreturn (unsigned long pcinsn) -+{ -+ switch(pcinsn) -+ { -+ case INSTR_LI_R0_0x6666: -+ case INSTR_LI_R0_0x7777: -+ case INSTR_LI_R0_NR_sigreturn: -+ case INSTR_LI_R0_NR_rt_sigreturn: -+ return 1; -+ default: -+ return 0; -+ } -+} -+ -+/* -+ * The signal handler trampoline is on the stack and consists of exactly -+ * two instructions. The easiest and most accurate way of determining -+ * whether the pc is in one of these trampolines is by inspecting the -+ * instructions. It'd be faster though if we could find a way to do this -+ * via some simple address comparisons. -+ */ -+static int -+ppc_linux_at_sigtramp_return_path (CORE_ADDR pc) -+{ -+ char buf[12]; -+ unsigned long pcinsn; -+ if (target_read_memory (pc - 4, buf, sizeof (buf)) != 0) -+ return 0; -+ -+ /* extract the instruction at the pc */ -+ pcinsn = extract_unsigned_integer (buf + 4, 4); -+ -+ return ( -+ (insn_is_sigreturn (pcinsn) -+ && extract_unsigned_integer (buf + 8, 4) == INSTR_SC) -+ || -+ (pcinsn == INSTR_SC -+ && insn_is_sigreturn (extract_unsigned_integer (buf, 4)))); -+} -+ -+static CORE_ADDR -+ppc_linux_skip_trampoline_code (CORE_ADDR pc) -+{ -+ char buf[4]; -+ struct obj_section *sect; -+ struct objfile *objfile; -+ unsigned long insn; -+ CORE_ADDR plt_start = 0; -+ CORE_ADDR symtab = 0; -+ CORE_ADDR strtab = 0; -+ int num_slots = -1; -+ int reloc_index = -1; -+ CORE_ADDR plt_table; -+ CORE_ADDR reloc; -+ CORE_ADDR sym; -+ long symidx; -+ char symname[1024]; -+ struct minimal_symbol *msymbol; -+ -+ /* Find the section pc is in; return if not in .plt */ -+ sect = find_pc_section (pc); -+ if (!sect || strcmp (sect->the_bfd_section->name, ".plt") != 0) -+ return 0; -+ -+ objfile = sect->objfile; -+ -+ /* Pick up the instruction at pc. It had better be of the -+ form -+ li r11, IDX -+ -+ where IDX is an index into the plt_table. */ -+ -+ if (target_read_memory (pc, buf, 4) != 0) -+ return 0; -+ insn = extract_unsigned_integer (buf, 4); -+ -+ if ((insn & 0xffff0000) != 0x39600000 /* li r11, VAL */ ) -+ return 0; -+ -+ reloc_index = (insn << 16) >> 16; -+ -+ /* Find the objfile that pc is in and obtain the information -+ necessary for finding the symbol name. */ -+ for (sect = objfile->sections; sect < objfile->sections_end; ++sect) -+ { -+ const char *secname = sect->the_bfd_section->name; -+ if (strcmp (secname, ".plt") == 0) -+ plt_start = sect->addr; -+ else if (strcmp (secname, ".rela.plt") == 0) -+ num_slots = ((int) sect->endaddr - (int) sect->addr) / 12; -+ else if (strcmp (secname, ".dynsym") == 0) -+ symtab = sect->addr; -+ else if (strcmp (secname, ".dynstr") == 0) -+ strtab = sect->addr; -+ } -+ -+ /* Make sure we have all the information we need. */ -+ if (plt_start == 0 || num_slots == -1 || symtab == 0 || strtab == 0) -+ return 0; -+ -+ /* Compute the value of the plt table */ -+ plt_table = plt_start + 72 + 8 * num_slots; -+ -+ /* Get address of the relocation entry (Elf32_Rela) */ -+ if (target_read_memory (plt_table + reloc_index, buf, 4) != 0) -+ return 0; -+ reloc = extract_unsigned_integer (buf, 4); -+ -+ sect = find_pc_section (reloc); -+ if (!sect) -+ return 0; -+ -+ if (strcmp (sect->the_bfd_section->name, ".text") == 0) -+ return reloc; -+ -+ /* Now get the r_info field which is the relocation type and symbol -+ index. */ -+ if (target_read_memory (reloc + 4, buf, 4) != 0) -+ return 0; -+ symidx = extract_unsigned_integer (buf, 4); -+ -+ /* Shift out the relocation type leaving just the symbol index */ -+ /* symidx = ELF32_R_SYM(symidx); */ -+ symidx = symidx >> 8; -+ -+ /* compute the address of the symbol */ -+ sym = symtab + symidx * 4; -+ -+ /* Fetch the string table index */ -+ if (target_read_memory (sym, buf, 4) != 0) -+ return 0; -+ symidx = extract_unsigned_integer (buf, 4); -+ -+ /* Fetch the string; we don't know how long it is. Is it possible -+ that the following will fail because we're trying to fetch too -+ much? */ -+ if (target_read_memory (strtab + symidx, symname, sizeof (symname)) != 0) -+ return 0; -+ -+ /* This might not work right if we have multiple symbols with the -+ same name; the only way to really get it right is to perform -+ the same sort of lookup as the dynamic linker. */ -+ msymbol = lookup_minimal_symbol_text (symname, NULL); -+ if (!msymbol) -+ return 0; -+ -+ return SYMBOL_VALUE_ADDRESS (msymbol); -+} -+ -+/* The rs6000 version of FRAME_SAVED_PC will almost work for us. The -+ signal handler details are different, so we'll handle those here -+ and call the rs6000 version to do the rest. */ -+CORE_ADDR -+ppc_linux_frame_saved_pc (struct frame_info *fi) -+{ -+ if ((get_frame_type (fi) == SIGTRAMP_FRAME)) -+ { -+ CORE_ADDR regs_addr = -+ read_memory_integer (get_frame_base (fi) -+ + PPC_LINUX_REGS_PTR_OFFSET, 4); -+ /* return the NIP in the regs array */ -+ return read_memory_integer (regs_addr + 4 * PPC_LINUX_PT_NIP, 4); -+ } -+ else if (get_next_frame (fi) -+ && (get_frame_type (get_next_frame (fi)) == SIGTRAMP_FRAME)) -+ { -+ CORE_ADDR regs_addr = -+ read_memory_integer (get_frame_base (get_next_frame (fi)) -+ + PPC_LINUX_REGS_PTR_OFFSET, 4); -+ /* return LNK in the regs array */ -+ return read_memory_integer (regs_addr + 4 * PPC_LINUX_PT_LNK, 4); -+ } -+ else -+ return rs6000_frame_saved_pc (fi); -+} -+ -+void -+ppc_linux_init_extra_frame_info (int fromleaf, struct frame_info *fi) -+{ -+ rs6000_init_extra_frame_info (fromleaf, fi); -+ -+ if (get_next_frame (fi) != 0) -+ { -+ /* We're called from get_prev_frame_info; check to see if -+ this is a signal frame by looking to see if the pc points -+ at trampoline code */ -+ if (ppc_linux_at_sigtramp_return_path (get_frame_pc (fi))) -+ deprecated_set_frame_type (fi, SIGTRAMP_FRAME); -+ else -+ /* FIXME: cagney/2002-11-10: Is this double bogus? What -+ happens if the frame has previously been marked as a dummy? */ -+ deprecated_set_frame_type (fi, NORMAL_FRAME); -+ } -+} -+ -+int -+ppc_linux_frameless_function_invocation (struct frame_info *fi) -+{ -+ /* We'll find the wrong thing if we let -+ rs6000_frameless_function_invocation () search for a signal trampoline */ -+ if (ppc_linux_at_sigtramp_return_path (get_frame_pc (fi))) -+ return 0; -+ else -+ return rs6000_frameless_function_invocation (fi); -+} -+ -+void -+ppc_linux_frame_init_saved_regs (struct frame_info *fi) -+{ -+ if ((get_frame_type (fi) == SIGTRAMP_FRAME)) -+ { -+ CORE_ADDR regs_addr; -+ int i; -+ if (deprecated_get_frame_saved_regs (fi)) -+ return; -+ -+ frame_saved_regs_zalloc (fi); -+ -+ regs_addr = -+ read_memory_integer (get_frame_base (fi) -+ + PPC_LINUX_REGS_PTR_OFFSET, 4); -+ deprecated_get_frame_saved_regs (fi)[PC_REGNUM] = regs_addr + 4 * PPC_LINUX_PT_NIP; -+ deprecated_get_frame_saved_regs (fi)[gdbarch_tdep (current_gdbarch)->ppc_ps_regnum] = -+ regs_addr + 4 * PPC_LINUX_PT_MSR; -+ deprecated_get_frame_saved_regs (fi)[gdbarch_tdep (current_gdbarch)->ppc_cr_regnum] = -+ regs_addr + 4 * PPC_LINUX_PT_CCR; -+ deprecated_get_frame_saved_regs (fi)[gdbarch_tdep (current_gdbarch)->ppc_lr_regnum] = -+ regs_addr + 4 * PPC_LINUX_PT_LNK; -+ deprecated_get_frame_saved_regs (fi)[gdbarch_tdep (current_gdbarch)->ppc_ctr_regnum] = -+ regs_addr + 4 * PPC_LINUX_PT_CTR; -+ deprecated_get_frame_saved_regs (fi)[gdbarch_tdep (current_gdbarch)->ppc_xer_regnum] = -+ regs_addr + 4 * PPC_LINUX_PT_XER; -+ deprecated_get_frame_saved_regs (fi)[gdbarch_tdep (current_gdbarch)->ppc_mq_regnum] = -+ regs_addr + 4 * PPC_LINUX_PT_MQ; -+ for (i = 0; i < 32; i++) -+ deprecated_get_frame_saved_regs (fi)[gdbarch_tdep (current_gdbarch)->ppc_gp0_regnum + i] = -+ regs_addr + 4 * PPC_LINUX_PT_R0 + 4 * i; -+ for (i = 0; i < 32; i++) -+ deprecated_get_frame_saved_regs (fi)[FP0_REGNUM + i] = regs_addr + 4 * PPC_LINUX_PT_FPR0 + 8 * i; -+ } -+ else -+ rs6000_frame_init_saved_regs (fi); -+} -+ -+CORE_ADDR -+ppc_linux_frame_chain (struct frame_info *thisframe) -+{ -+ /* Kernel properly constructs the frame chain for the handler */ -+ if ((get_frame_type (thisframe) == SIGTRAMP_FRAME)) -+ return read_memory_integer (get_frame_base (thisframe), 4); -+ else -+ return rs6000_frame_chain (thisframe); -+} -+ -+/* ppc_linux_memory_remove_breakpoints attempts to remove a breakpoint -+ in much the same fashion as memory_remove_breakpoint in mem-break.c, -+ but is careful not to write back the previous contents if the code -+ in question has changed in between inserting the breakpoint and -+ removing it. -+ -+ Here is the problem that we're trying to solve... -+ -+ Once upon a time, before introducing this function to remove -+ breakpoints from the inferior, setting a breakpoint on a shared -+ library function prior to running the program would not work -+ properly. In order to understand the problem, it is first -+ necessary to understand a little bit about dynamic linking on -+ this platform. -+ -+ A call to a shared library function is accomplished via a bl -+ (branch-and-link) instruction whose branch target is an entry -+ in the procedure linkage table (PLT). The PLT in the object -+ file is uninitialized. To gdb, prior to running the program, the -+ entries in the PLT are all zeros. -+ -+ Once the program starts running, the shared libraries are loaded -+ and the procedure linkage table is initialized, but the entries in -+ the table are not (necessarily) resolved. Once a function is -+ actually called, the code in the PLT is hit and the function is -+ resolved. In order to better illustrate this, an example is in -+ order; the following example is from the gdb testsuite. -+ -+ We start the program shmain. -+ -+ [kev@arroyo testsuite]$ ../gdb gdb.base/shmain -+ [...] -+ -+ We place two breakpoints, one on shr1 and the other on main. -+ -+ (gdb) b shr1 -+ Breakpoint 1 at 0x100409d4 -+ (gdb) b main -+ Breakpoint 2 at 0x100006a0: file gdb.base/shmain.c, line 44. -+ -+ Examine the instruction (and the immediatly following instruction) -+ upon which the breakpoint was placed. Note that the PLT entry -+ for shr1 contains zeros. -+ -+ (gdb) x/2i 0x100409d4 -+ 0x100409d4 : .long 0x0 -+ 0x100409d8 : .long 0x0 -+ -+ Now run 'til main. -+ -+ (gdb) r -+ Starting program: gdb.base/shmain -+ Breakpoint 1 at 0xffaf790: file gdb.base/shr1.c, line 19. -+ -+ Breakpoint 2, main () -+ at gdb.base/shmain.c:44 -+ 44 g = 1; -+ -+ Examine the PLT again. Note that the loading of the shared -+ library has initialized the PLT to code which loads a constant -+ (which I think is an index into the GOT) into r11 and then -+ branchs a short distance to the code which actually does the -+ resolving. -+ -+ (gdb) x/2i 0x100409d4 -+ 0x100409d4 : li r11,4 -+ 0x100409d8 : b 0x10040984 -+ (gdb) c -+ Continuing. -+ -+ Breakpoint 1, shr1 (x=1) -+ at gdb.base/shr1.c:19 -+ 19 l = 1; -+ -+ Now we've hit the breakpoint at shr1. (The breakpoint was -+ reset from the PLT entry to the actual shr1 function after the -+ shared library was loaded.) Note that the PLT entry has been -+ resolved to contain a branch that takes us directly to shr1. -+ (The real one, not the PLT entry.) -+ -+ (gdb) x/2i 0x100409d4 -+ 0x100409d4 : b 0xffaf76c -+ 0x100409d8 : b 0x10040984 -+ -+ The thing to note here is that the PLT entry for shr1 has been -+ changed twice. -+ -+ Now the problem should be obvious. GDB places a breakpoint (a -+ trap instruction) on the zero value of the PLT entry for shr1. -+ Later on, after the shared library had been loaded and the PLT -+ initialized, GDB gets a signal indicating this fact and attempts -+ (as it always does when it stops) to remove all the breakpoints. -+ -+ The breakpoint removal was causing the former contents (a zero -+ word) to be written back to the now initialized PLT entry thus -+ destroying a portion of the initialization that had occurred only a -+ short time ago. When execution continued, the zero word would be -+ executed as an instruction an an illegal instruction trap was -+ generated instead. (0 is not a legal instruction.) -+ -+ The fix for this problem was fairly straightforward. The function -+ memory_remove_breakpoint from mem-break.c was copied to this file, -+ modified slightly, and renamed to ppc_linux_memory_remove_breakpoint. -+ In tm-linux.h, MEMORY_REMOVE_BREAKPOINT is defined to call this new -+ function. -+ -+ The differences between ppc_linux_memory_remove_breakpoint () and -+ memory_remove_breakpoint () are minor. All that the former does -+ that the latter does not is check to make sure that the breakpoint -+ location actually contains a breakpoint (trap instruction) prior -+ to attempting to write back the old contents. If it does contain -+ a trap instruction, we allow the old contents to be written back. -+ Otherwise, we silently do nothing. -+ -+ The big question is whether memory_remove_breakpoint () should be -+ changed to have the same functionality. The downside is that more -+ traffic is generated for remote targets since we'll have an extra -+ fetch of a memory word each time a breakpoint is removed. -+ -+ For the time being, we'll leave this self-modifying-code-friendly -+ version in ppc-linux-tdep.c, but it ought to be migrated somewhere -+ else in the event that some other platform has similar needs with -+ regard to removing breakpoints in some potentially self modifying -+ code. */ -+int -+ppc_linux_memory_remove_breakpoint (CORE_ADDR addr, char *contents_cache) -+{ -+ const unsigned char *bp; -+ int val; -+ int bplen; -+ char old_contents[BREAKPOINT_MAX]; -+ -+ /* Determine appropriate breakpoint contents and size for this address. */ -+ bp = BREAKPOINT_FROM_PC (&addr, &bplen); -+ if (bp == NULL) -+ error ("Software breakpoints not implemented for this target."); -+ -+ val = target_read_memory (addr, old_contents, bplen); -+ -+ /* If our breakpoint is no longer at the address, this means that the -+ program modified the code on us, so it is wrong to put back the -+ old value */ -+ if (val == 0 && memcmp (bp, old_contents, bplen) == 0) -+ val = target_write_memory (addr, contents_cache, bplen); -+ -+ return val; -+} -+ -+/* For historic reasons, PPC 32 GNU/Linux follows PowerOpen rather -+ than the 32 bit SYSV R4 ABI structure return convention - all -+ structures, no matter their size, are put in memory. Vectors, -+ which were added later, do get returned in a register though. */ -+ -+static enum return_value_convention -+ppc_linux_return_value (struct gdbarch *gdbarch, struct type *valtype, -+ struct regcache *regcache, void *readbuf, -+ const void *writebuf) -+{ -+ if ((TYPE_CODE (valtype) == TYPE_CODE_STRUCT -+ || TYPE_CODE (valtype) == TYPE_CODE_UNION) -+ && !((TYPE_LENGTH (valtype) == 16 || TYPE_LENGTH (valtype) == 8) -+ && TYPE_VECTOR (valtype))) -+ return RETURN_VALUE_STRUCT_CONVENTION; -+ else -+ return ppc_sysv_abi_return_value (gdbarch, valtype, regcache, readbuf, -+ writebuf); -+} -+ -+/* Fetch (and possibly build) an appropriate link_map_offsets -+ structure for GNU/Linux PPC targets using the struct offsets -+ defined in link.h (but without actual reference to that file). -+ -+ This makes it possible to access GNU/Linux PPC shared libraries -+ from a GDB that was not built on an GNU/Linux PPC host (for cross -+ debugging). */ -+ -+struct link_map_offsets * -+ppc_linux_svr4_fetch_link_map_offsets (void) -+{ -+ static struct link_map_offsets lmo; -+ static struct link_map_offsets *lmp = NULL; -+ -+ if (lmp == NULL) -+ { -+ lmp = &lmo; -+ -+ lmo.r_debug_size = 8; /* The actual size is 20 bytes, but -+ this is all we need. */ -+ lmo.r_map_offset = 4; -+ lmo.r_map_size = 4; -+ -+ lmo.link_map_size = 20; /* The actual size is 560 bytes, but -+ this is all we need. */ -+ lmo.l_addr_offset = 0; -+ lmo.l_addr_size = 4; -+ -+ lmo.l_name_offset = 4; -+ lmo.l_name_size = 4; -+ -+ lmo.l_next_offset = 12; -+ lmo.l_next_size = 4; -+ -+ lmo.l_prev_offset = 16; -+ lmo.l_prev_size = 4; -+ } -+ -+ return lmp; -+} -+ -+ -+/* Macros for matching instructions. Note that, since all the -+ operands are masked off before they're or-ed into the instruction, -+ you can use -1 to make masks. */ -+ -+#define insn_d(opcd, rts, ra, d) \ -+ ((((opcd) & 0x3f) << 26) \ -+ | (((rts) & 0x1f) << 21) \ -+ | (((ra) & 0x1f) << 16) \ -+ | ((d) & 0xffff)) -+ -+#define insn_ds(opcd, rts, ra, d, xo) \ -+ ((((opcd) & 0x3f) << 26) \ -+ | (((rts) & 0x1f) << 21) \ -+ | (((ra) & 0x1f) << 16) \ -+ | ((d) & 0xfffc) \ -+ | ((xo) & 0x3)) -+ -+#define insn_xfx(opcd, rts, spr, xo) \ -+ ((((opcd) & 0x3f) << 26) \ -+ | (((rts) & 0x1f) << 21) \ -+ | (((spr) & 0x1f) << 16) \ -+ | (((spr) & 0x3e0) << 6) \ -+ | (((xo) & 0x3ff) << 1)) -+ -+/* Read a PPC instruction from memory. PPC instructions are always -+ big-endian, no matter what endianness the program is running in, so -+ we can't use read_memory_integer or one of its friends here. */ -+static unsigned int -+read_insn (CORE_ADDR pc) -+{ -+ unsigned char buf[4]; -+ -+ read_memory (pc, buf, 4); -+ return (buf[0] << 24) | (buf[1] << 16) | (buf[2] << 8) | buf[3]; -+} -+ -+ -+/* An instruction to match. */ -+struct insn_pattern -+{ -+ unsigned int mask; /* mask the insn with this... */ -+ unsigned int data; /* ...and see if it matches this. */ -+ int optional; /* If non-zero, this insn may be absent. */ -+}; -+ -+/* Return non-zero if the instructions at PC match the series -+ described in PATTERN, or zero otherwise. PATTERN is an array of -+ 'struct insn_pattern' objects, terminated by an entry whose mask is -+ zero. -+ -+ When the match is successful, fill INSN[i] with what PATTERN[i] -+ matched. If PATTERN[i] is optional, and the instruction wasn't -+ present, set INSN[i] to 0 (which is not a valid PPC instruction). -+ INSN should have as many elements as PATTERN. Note that, if -+ PATTERN contains optional instructions which aren't present in -+ memory, then INSN will have holes, so INSN[i] isn't necessarily the -+ i'th instruction in memory. */ -+static int -+insns_match_pattern (CORE_ADDR pc, -+ struct insn_pattern *pattern, -+ unsigned int *insn) -+{ -+ int i; -+ -+ for (i = 0; pattern[i].mask; i++) -+ { -+ insn[i] = read_insn (pc); -+ if ((insn[i] & pattern[i].mask) == pattern[i].data) -+ pc += 4; -+ else if (pattern[i].optional) -+ insn[i] = 0; -+ else -+ return 0; -+ } -+ -+ return 1; -+} -+ -+ -+/* Return the 'd' field of the d-form instruction INSN, properly -+ sign-extended. */ -+static CORE_ADDR -+insn_d_field (unsigned int insn) -+{ -+ return ((((CORE_ADDR) insn & 0xffff) ^ 0x8000) - 0x8000); -+} -+ -+ -+/* Return the 'ds' field of the ds-form instruction INSN, with the two -+ zero bits concatenated at the right, and properly -+ sign-extended. */ -+static CORE_ADDR -+insn_ds_field (unsigned int insn) -+{ -+ return ((((CORE_ADDR) insn & 0xfffc) ^ 0x8000) - 0x8000); -+} -+ -+ -+/* If DESC is the address of a 64-bit PowerPC GNU/Linux function -+ descriptor, return the descriptor's entry point. */ -+static CORE_ADDR -+ppc64_desc_entry_point (CORE_ADDR desc) -+{ -+ /* The first word of the descriptor is the entry point. */ -+ return (CORE_ADDR) read_memory_unsigned_integer (desc, 8); -+} -+ -+ -+/* Pattern for the standard linkage function. These are built by -+ build_plt_stub in elf64-ppc.c, whose GLINK argument is always -+ zero. */ -+static struct insn_pattern ppc64_standard_linkage[] = -+ { -+ /* addis r12, r2, */ -+ { insn_d (-1, -1, -1, 0), insn_d (15, 12, 2, 0), 0 }, -+ -+ /* std r2, 40(r1) */ -+ { -1, insn_ds (62, 2, 1, 40, 0), 0 }, -+ -+ /* ld r11, (r12) */ -+ { insn_ds (-1, -1, -1, 0, -1), insn_ds (58, 11, 12, 0, 0), 0 }, -+ -+ /* addis r12, r12, 1 */ -+ { insn_d (-1, -1, -1, -1), insn_d (15, 12, 2, 1), 1 }, -+ -+ /* ld r2, (r12) */ -+ { insn_ds (-1, -1, -1, 0, -1), insn_ds (58, 2, 12, 0, 0), 0 }, -+ -+ /* addis r12, r12, 1 */ -+ { insn_d (-1, -1, -1, -1), insn_d (15, 12, 2, 1), 1 }, -+ -+ /* mtctr r11 */ -+ { insn_xfx (-1, -1, -1, -1), insn_xfx (31, 11, 9, 467), -+ 0 }, -+ -+ /* ld r11, (r12) */ -+ { insn_ds (-1, -1, -1, 0, -1), insn_ds (58, 11, 12, 0, 0), 0 }, -+ -+ /* bctr */ -+ { -1, 0x4e800420, 0 }, -+ -+ { 0, 0, 0 } -+ }; -+#define PPC64_STANDARD_LINKAGE_LEN \ -+ (sizeof (ppc64_standard_linkage) / sizeof (ppc64_standard_linkage[0])) -+ -+ -+/* Recognize a 64-bit PowerPC GNU/Linux linkage function --- what GDB -+ calls a "solib trampoline". */ -+static int -+ppc64_in_solib_call_trampoline (CORE_ADDR pc, char *name) -+{ -+ /* Detecting solib call trampolines on PPC64 GNU/Linux is a pain. -+ -+ It's not specifically solib call trampolines that are the issue. -+ Any call from one function to another function that uses a -+ different TOC requires a trampoline, to save the caller's TOC -+ pointer and then load the callee's TOC. An executable or shared -+ library may have more than one TOC, so even intra-object calls -+ may require a trampoline. Since executable and shared libraries -+ will all have their own distinct TOCs, every inter-object call is -+ also an inter-TOC call, and requires a trampoline --- so "solib -+ call trampolines" are just a special case. -+ -+ The 64-bit PowerPC GNU/Linux ABI calls these call trampolines -+ "linkage functions". Since they need to be near the functions -+ that call them, they all appear in .text, not in any special -+ section. The .plt section just contains an array of function -+ descriptors, from which the linkage functions load the callee's -+ entry point, TOC value, and environment pointer. So -+ in_plt_section is useless. The linkage functions don't have any -+ special linker symbols to name them, either. -+ -+ The only way I can see to recognize them is to actually look at -+ their code. They're generated by ppc_build_one_stub and some -+ other functions in bfd/elf64-ppc.c, so that should show us all -+ the instruction sequences we need to recognize. */ -+ unsigned int insn[PPC64_STANDARD_LINKAGE_LEN]; -+ -+ return insns_match_pattern (pc, ppc64_standard_linkage, insn); -+} -+ -+ -+/* When the dynamic linker is doing lazy symbol resolution, the first -+ call to a function in another object will go like this: -+ -+ - The user's function calls the linkage function: -+ -+ 100007c4: 4b ff fc d5 bl 10000498 -+ 100007c8: e8 41 00 28 ld r2,40(r1) -+ -+ - The linkage function loads the entry point (and other stuff) from -+ the function descriptor in the PLT, and jumps to it: -+ -+ 10000498: 3d 82 00 00 addis r12,r2,0 -+ 1000049c: f8 41 00 28 std r2,40(r1) -+ 100004a0: e9 6c 80 98 ld r11,-32616(r12) -+ 100004a4: e8 4c 80 a0 ld r2,-32608(r12) -+ 100004a8: 7d 69 03 a6 mtctr r11 -+ 100004ac: e9 6c 80 a8 ld r11,-32600(r12) -+ 100004b0: 4e 80 04 20 bctr -+ -+ - But since this is the first time that PLT entry has been used, it -+ sends control to its glink entry. That loads the number of the -+ PLT entry and jumps to the common glink0 code: -+ -+ 10000c98: 38 00 00 00 li r0,0 -+ 10000c9c: 4b ff ff dc b 10000c78 -+ -+ - The common glink0 code then transfers control to the dynamic -+ linker's fixup code: -+ -+ 10000c78: e8 41 00 28 ld r2,40(r1) -+ 10000c7c: 3d 82 00 00 addis r12,r2,0 -+ 10000c80: e9 6c 80 80 ld r11,-32640(r12) -+ 10000c84: e8 4c 80 88 ld r2,-32632(r12) -+ 10000c88: 7d 69 03 a6 mtctr r11 -+ 10000c8c: e9 6c 80 90 ld r11,-32624(r12) -+ 10000c90: 4e 80 04 20 bctr -+ -+ Eventually, this code will figure out how to skip all of this, -+ including the dynamic linker. At the moment, we just get through -+ the linkage function. */ -+ -+/* If the current thread is about to execute a series of instructions -+ at PC matching the ppc64_standard_linkage pattern, and INSN is the result -+ from that pattern match, return the code address to which the -+ standard linkage function will send them. (This doesn't deal with -+ dynamic linker lazy symbol resolution stubs.) */ -+static CORE_ADDR -+ppc64_standard_linkage_target (CORE_ADDR pc, unsigned int *insn) -+{ -+ struct gdbarch_tdep *tdep = gdbarch_tdep (current_gdbarch); -+ -+ /* The address of the function descriptor this linkage function -+ references. */ -+ CORE_ADDR desc -+ = ((CORE_ADDR) read_register (tdep->ppc_gp0_regnum + 2) -+ + (insn_d_field (insn[0]) << 16) -+ + insn_ds_field (insn[2])); -+ -+ /* The first word of the descriptor is the entry point. Return that. */ -+ return ppc64_desc_entry_point (desc); -+} -+ -+ -+/* Given that we've begun executing a call trampoline at PC, return -+ the entry point of the function the trampoline will go to. */ -+static CORE_ADDR -+ppc64_skip_trampoline_code (CORE_ADDR pc) -+{ -+ unsigned int ppc64_standard_linkage_insn[PPC64_STANDARD_LINKAGE_LEN]; -+ -+ if (insns_match_pattern (pc, ppc64_standard_linkage, -+ ppc64_standard_linkage_insn)) -+ return ppc64_standard_linkage_target (pc, ppc64_standard_linkage_insn); -+ else -+ return 0; -+} -+ -+ -+/* Support for CONVERT_FROM_FUNC_PTR_ADDR (ARCH, ADDR, TARG) on PPC64 -+ GNU/Linux. -+ -+ Usually a function pointer's representation is simply the address -+ of the function. On GNU/Linux on the 64-bit PowerPC however, a -+ function pointer is represented by a pointer to a TOC entry. This -+ TOC entry contains three words, the first word is the address of -+ the function, the second word is the TOC pointer (r2), and the -+ third word is the static chain value. Throughout GDB it is -+ currently assumed that a function pointer contains the address of -+ the function, which is not easy to fix. In addition, the -+ conversion of a function address to a function pointer would -+ require allocation of a TOC entry in the inferior's memory space, -+ with all its drawbacks. To be able to call C++ virtual methods in -+ the inferior (which are called via function pointers), -+ find_function_addr uses this function to get the function address -+ from a function pointer. */ -+ -+/* If ADDR points at what is clearly a function descriptor, transform -+ it into the address of the corresponding function. Be -+ conservative, otherwize GDB will do the transformation on any -+ random addresses such as occures when there is no symbol table. */ -+ -+static CORE_ADDR -+ppc64_linux_convert_from_func_ptr_addr (struct gdbarch *gdbarch, -+ CORE_ADDR addr, -+ struct target_ops *targ) -+{ -+ struct section_table *s = target_section_by_addr (targ, addr); -+ -+ /* Check if ADDR points to a function descriptor. */ -+ if (s && strcmp (s->the_bfd_section->name, ".opd") == 0) -+ return get_target_memory_unsigned (targ, addr, 8); -+ -+ return addr; -+} -+ -+#ifdef CRASH_MERGE -+enum { -+ PPC_ELF_NGREG = 48, -+ PPC_ELF_NFPREG = 33, -+ PPC_ELF_NVRREG = 33 -+}; -+ -+enum { -+ ELF_GREGSET_SIZE = (PPC_ELF_NGREG * 4), -+ ELF_FPREGSET_SIZE = (PPC_ELF_NFPREG * 8) -+}; -+#else -+enum { -+ ELF_NGREG = 48, -+ ELF_NFPREG = 33, -+ ELF_NVRREG = 33 -+}; -+ -+enum { -+ ELF_GREGSET_SIZE = (ELF_NGREG * 4), -+ ELF_FPREGSET_SIZE = (ELF_NFPREG * 8) -+}; -+#endif -+ -+void -+ppc_linux_supply_gregset (char *buf) -+{ -+ int regi; -+ struct gdbarch_tdep *tdep = gdbarch_tdep (current_gdbarch); -+ -+ for (regi = 0; regi < 32; regi++) -+ supply_register (regi, buf + 4 * regi); -+ -+ supply_register (PC_REGNUM, buf + 4 * PPC_LINUX_PT_NIP); -+ supply_register (tdep->ppc_lr_regnum, buf + 4 * PPC_LINUX_PT_LNK); -+ supply_register (tdep->ppc_cr_regnum, buf + 4 * PPC_LINUX_PT_CCR); -+ supply_register (tdep->ppc_xer_regnum, buf + 4 * PPC_LINUX_PT_XER); -+ supply_register (tdep->ppc_ctr_regnum, buf + 4 * PPC_LINUX_PT_CTR); -+ if (tdep->ppc_mq_regnum != -1) -+ supply_register (tdep->ppc_mq_regnum, buf + 4 * PPC_LINUX_PT_MQ); -+ supply_register (tdep->ppc_ps_regnum, buf + 4 * PPC_LINUX_PT_MSR); -+} -+ -+void -+ppc_linux_supply_fpregset (char *buf) -+{ -+ int regi; -+ struct gdbarch_tdep *tdep = gdbarch_tdep (current_gdbarch); -+ -+ for (regi = 0; regi < 32; regi++) -+ supply_register (FP0_REGNUM + regi, buf + 8 * regi); -+ -+ /* The FPSCR is stored in the low order word of the last doubleword in the -+ fpregset. */ -+ supply_register (tdep->ppc_fpscr_regnum, buf + 8 * 32 + 4); -+} -+ -+/* -+ Use a local version of this function to get the correct types for regsets. -+*/ -+ -+static void -+fetch_core_registers (char *core_reg_sect, -+ unsigned core_reg_size, -+ int which, -+ CORE_ADDR reg_addr) -+{ -+ if (which == 0) -+ { -+ if (core_reg_size == ELF_GREGSET_SIZE) -+ ppc_linux_supply_gregset (core_reg_sect); -+ else -+ warning ("wrong size gregset struct in core file"); -+ } -+ else if (which == 2) -+ { -+ if (core_reg_size == ELF_FPREGSET_SIZE) -+ ppc_linux_supply_fpregset (core_reg_sect); -+ else -+ warning ("wrong size fpregset struct in core file"); -+ } -+} -+ -+/* Register that we are able to handle ELF file formats using standard -+ procfs "regset" structures. */ -+ -+static struct core_fns ppc_linux_regset_core_fns = -+{ -+ bfd_target_elf_flavour, /* core_flavour */ -+ default_check_format, /* check_format */ -+ default_core_sniffer, /* core_sniffer */ -+ fetch_core_registers, /* core_read_registers */ -+ NULL /* next */ -+}; -+ -+static void -+ppc_linux_init_abi (struct gdbarch_info info, -+ struct gdbarch *gdbarch) -+{ -+ struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch); -+ -+ if (tdep->wordsize == 4) -+ { -+ /* Until November 2001, gcc did not comply with the 32 bit SysV -+ R4 ABI requirement that structures less than or equal to 8 -+ bytes should be returned in registers. Instead GCC was using -+ the the AIX/PowerOpen ABI - everything returned in memory -+ (well ignoring vectors that is). When this was corrected, it -+ wasn't fixed for GNU/Linux native platform. Use the -+ PowerOpen struct convention. */ -+ set_gdbarch_return_value (gdbarch, ppc_linux_return_value); -+ -+ /* Note: kevinb/2002-04-12: See note in rs6000_gdbarch_init regarding -+ *_push_arguments(). The same remarks hold for the methods below. */ -+ set_gdbarch_deprecated_frameless_function_invocation (gdbarch, ppc_linux_frameless_function_invocation); -+ set_gdbarch_deprecated_frame_chain (gdbarch, ppc_linux_frame_chain); -+ set_gdbarch_deprecated_frame_saved_pc (gdbarch, ppc_linux_frame_saved_pc); -+ -+ set_gdbarch_deprecated_frame_init_saved_regs (gdbarch, -+ ppc_linux_frame_init_saved_regs); -+ set_gdbarch_deprecated_init_extra_frame_info (gdbarch, -+ ppc_linux_init_extra_frame_info); -+ -+ set_gdbarch_memory_remove_breakpoint (gdbarch, -+ ppc_linux_memory_remove_breakpoint); -+ /* Shared library handling. */ -+ set_gdbarch_in_solib_call_trampoline (gdbarch, in_plt_section); -+ set_gdbarch_skip_trampoline_code (gdbarch, -+ ppc_linux_skip_trampoline_code); -+ set_solib_svr4_fetch_link_map_offsets -+ (gdbarch, ppc_linux_svr4_fetch_link_map_offsets); -+ } -+ -+ if (tdep->wordsize == 8) -+ { -+ /* Handle PPC64 GNU/Linux function pointers (which are really -+ function descriptors). */ -+ set_gdbarch_convert_from_func_ptr_addr -+ (gdbarch, ppc64_linux_convert_from_func_ptr_addr); -+ -+ set_gdbarch_in_solib_call_trampoline -+ (gdbarch, ppc64_in_solib_call_trampoline); -+ set_gdbarch_skip_trampoline_code (gdbarch, ppc64_skip_trampoline_code); -+ -+ /* PPC64 malloc's entry-point is called ".malloc". */ -+ set_gdbarch_name_of_malloc (gdbarch, ".malloc"); -+ } -+} -+ -+void -+_initialize_ppc_linux_tdep (void) -+{ -+ /* Register for all sub-familes of the POWER/PowerPC: 32-bit and -+ 64-bit PowerPC, and the older rs6k. */ -+ gdbarch_register_osabi (bfd_arch_powerpc, bfd_mach_ppc, GDB_OSABI_LINUX, -+ ppc_linux_init_abi); -+ gdbarch_register_osabi (bfd_arch_powerpc, bfd_mach_ppc64, GDB_OSABI_LINUX, -+ ppc_linux_init_abi); -+ gdbarch_register_osabi (bfd_arch_rs6000, bfd_mach_rs6k, GDB_OSABI_LINUX, -+ ppc_linux_init_abi); -+ add_core_fns (&ppc_linux_regset_core_fns); -+} ---- crash/main.c.orig 2007-08-27 15:02:36.000000000 -0400 -+++ crash/main.c 2007-08-21 16:02:46.000000000 -0400 -@@ -1,8 +1,8 @@ - /* main.c - core analysis suite - * - * Copyright (C) 1999, 2000, 2001, 2002 Mission Critical Linux, Inc. -- * Copyright (C) 2002, 2003, 2004, 2005 David Anderson -- * Copyright (C) 2002, 2003, 2004, 2005 Red Hat, Inc. All rights reserved. -+ * Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007 David Anderson -+ * Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007 Red Hat, Inc. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by -@@ -16,6 +16,7 @@ - */ - - #include "defs.h" -+#include "xen_hyper_defs.h" - #include - #include - -@@ -23,23 +24,37 @@ - static int is_external_command(void); - static int is_builtin_command(void); - static int is_input_file(void); -+static void check_xen_hyper(void); - - static struct option long_options[] = { -- {"memory_module", 1, 0, 0}, -- {"memory_device", 1, 0, 0}, -+ {"memory_module", required_argument, 0, 0}, -+ {"memory_device", required_argument, 0, 0}, - {"no_kallsyms", 0, 0, 0}, - {"no_modules", 0, 0, 0}, - {"no_namelist_gzip", 0, 0, 0}, -- {"help", 0, 0, 0}, -+ {"help", optional_argument, 0, 'h'}, - {"data_debug", 0, 0, 0}, - {"no_data_debug", 0, 0, 0}, - {"no_crashrc", 0, 0, 0}, - {"no_kmem_cache", 0, 0, 0}, -+ {"kmem_cache_delay", 0, 0, 0}, - {"readnow", 0, 0, 0}, - {"smp", 0, 0, 0}, -- {"machdep", 1, 0, 0}, -+ {"machdep", required_argument, 0, 0}, - {"version", 0, 0, 0}, - {"buildinfo", 0, 0, 0}, -+ {"shadow_page_tables", 0, 0, 0}, -+ {"cpus", required_argument, 0, 0}, -+ {"no_ikconfig", 0, 0, 0}, -+ {"hyper", 0, 0, 0}, -+ {"p2m_mfn", required_argument, 0, 0}, -+ {"zero_excluded", 0, 0, 0}, -+ {"no_panic", 0, 0, 0}, -+ {"more", 0, 0, 0}, -+ {"less", 0, 0, 0}, -+ {"CRASHPAGER", 0, 0, 0}, -+ {"no_scroll", 0, 0, 0}, -+ {"reloc", required_argument, 0, 0}, - {0, 0, 0, 0} - }; - -@@ -55,7 +70,7 @@ - */ - opterr = 0; - optind = 0; -- while((c = getopt_long(argc, argv, "LgH:h:e:i:sSvc:d:tf", -+ while((c = getopt_long(argc, argv, "Lkgh::e:i:sSvc:d:tfp:m:", - long_options, &option_index)) != -1) { - switch (c) - { -@@ -64,52 +79,55 @@ - "memory_module")) - pc->memory_module = optarg; - -- if (STREQ(long_options[option_index].name, -+ else if (STREQ(long_options[option_index].name, - "memory_device")) - pc->memory_device = optarg; - -- if (STREQ(long_options[option_index].name, -+ else if (STREQ(long_options[option_index].name, - "no_kallsyms")) - kt->flags |= NO_KALLSYMS; - -- if (STREQ(long_options[option_index].name, -+ else if (STREQ(long_options[option_index].name, - "no_modules")) - kt->flags |= NO_MODULE_ACCESS; - -- if (STREQ(long_options[option_index].name, -+ else if (STREQ(long_options[option_index].name, -+ "no_ikconfig")) -+ kt->flags |= NO_IKCONFIG; -+ -+ else if (STREQ(long_options[option_index].name, - "no_namelist_gzip")) - pc->flags |= NAMELIST_NO_GZIP; - -- if (STREQ(long_options[option_index].name, "help")) { -- program_usage(LONG_FORM); -- clean_exit(0); -- } -- -- if (STREQ(long_options[option_index].name, -+ else if (STREQ(long_options[option_index].name, - "data_debug")) - pc->flags |= DATADEBUG; - -- if (STREQ(long_options[option_index].name, -+ else if (STREQ(long_options[option_index].name, - "no_data_debug")) - pc->flags &= ~DATADEBUG; - -- if (STREQ(long_options[option_index].name, -+ else if (STREQ(long_options[option_index].name, - "no_kmem_cache")) - vt->flags |= KMEM_CACHE_UNAVAIL; - -- if (STREQ(long_options[option_index].name, -+ else if (STREQ(long_options[option_index].name, -+ "kmem_cache_delay")) -+ vt->flags |= KMEM_CACHE_DELAY; -+ -+ else if (STREQ(long_options[option_index].name, - "readnow")) - pc->flags |= READNOW; - -- if (STREQ(long_options[option_index].name, -+ else if (STREQ(long_options[option_index].name, - "smp")) - kt->flags |= SMP; - -- if (STREQ(long_options[option_index].name, -+ else if (STREQ(long_options[option_index].name, - "machdep")) - machdep->cmdline_arg = optarg; - -- if (STREQ(long_options[option_index].name, -+ else if (STREQ(long_options[option_index].name, - "version")) { - pc->flags |= VERSION_QUERY; - display_version(); -@@ -117,12 +135,69 @@ - clean_exit(0); - } - -- if (STREQ(long_options[option_index].name, -+ else if (STREQ(long_options[option_index].name, - "buildinfo")) { - dump_build_data(); - clean_exit(0); - } - -+ else if (STREQ(long_options[option_index].name, -+ "shadow_page_tables")) -+ kt->xen_flags |= SHADOW_PAGE_TABLES; -+ -+ else if (STREQ(long_options[option_index].name, "cpus")) -+ kt->cpus_override = optarg; -+ -+ else if (STREQ(long_options[option_index].name, "hyper")) -+ pc->flags |= XEN_HYPER; -+ -+ else if (STREQ(long_options[option_index].name, "p2m_mfn")) -+ xen_kdump_p2m_mfn(optarg); -+ -+ else if (STREQ(long_options[option_index].name, "zero_excluded")) -+ *diskdump_flags |= ZERO_EXCLUDED; -+ -+ else if (STREQ(long_options[option_index].name, "no_panic")) -+ tt->flags |= PANIC_TASK_NOT_FOUND; -+ -+ else if (STREQ(long_options[option_index].name, "more")) { -+ if ((pc->scroll_command != SCROLL_NONE) && -+ file_exists("/bin/more", NULL)) -+ pc->scroll_command = SCROLL_MORE; -+ } -+ -+ else if (STREQ(long_options[option_index].name, "less")) { -+ if ((pc->scroll_command != SCROLL_NONE) && -+ file_exists("/usr/bin/less", NULL)) -+ pc->scroll_command = SCROLL_LESS; -+ } -+ -+ else if (STREQ(long_options[option_index].name, "CRASHPAGER")) { -+ if ((pc->scroll_command != SCROLL_NONE) && -+ CRASHPAGER_valid()) -+ pc->scroll_command = SCROLL_CRASHPAGER; -+ } -+ -+ else if (STREQ(long_options[option_index].name, "no_scroll")) -+ pc->flags &= ~SCROLL; -+ -+ else if (STREQ(long_options[option_index].name, "no_crashrc")) -+ pc->flags |= NOCRASHRC; -+ -+ else if (STREQ(long_options[option_index].name, "reloc")) { -+ if (!calculate(optarg, &kt->relocate, NULL, 0)) { -+ error(INFO, "invalid --reloc argument: %s\n", -+ optarg); -+ program_usage(SHORT_FORM); -+ } -+ kt->flags |= RELOC_SET; -+ } -+ -+ else { -+ error(INFO, "internal error: option %s unhandled\n", -+ long_options[option_index].name); -+ program_usage(SHORT_FORM); -+ } - break; - - case 'f': -@@ -133,14 +208,25 @@ - pc->flags |= KERNEL_DEBUG_QUERY; - break; - -- case 'H': -- cmd_usage(optarg, COMPLETE_HELP); -- clean_exit(0); -- - case 'h': -- cmd_usage(optarg, COMPLETE_HELP|PIPE_TO_LESS); -+ /* note: long_getopt's handling of optional arguments is weak. -+ * To it, an optional argument must be part of the same argument -+ * as the flag itself (eg. --help=commands or -hcommands). -+ * We want to accept "--help commands" or "-h commands". -+ * So we must do that part ourselves. -+ */ -+ if (optarg != NULL) -+ cmd_usage(optarg, COMPLETE_HELP|PIPE_TO_SCROLL|MUST_HELP); -+ else if (argv[optind] != NULL && argv[optind][0] != '-') -+ cmd_usage(argv[optind++], COMPLETE_HELP|PIPE_TO_SCROLL|MUST_HELP); -+ else -+ program_usage(LONG_FORM); - clean_exit(0); - -+ case 'k': -+ pc->flags |= KERNTYPES; -+ break; -+ - case 'e': - if (STREQ(optarg, "vi")) - pc->editing_mode = "vi"; -@@ -168,7 +254,7 @@ - case 's': - pc->flags |= SILENT; - pc->flags &= ~SCROLL; -- pc->scroll_command = SCROLL_NONE; -+// pc->scroll_command = SCROLL_NONE; (why?) - break; - - case 'L': -@@ -193,14 +279,18 @@ - set_vas_debug(pc->debug); - break; - -+ case 'p': -+ force_page_size(optarg); -+ break; -+ -+ case 'm': -+ machdep->cmdline_arg = optarg; -+ break; -+ - default: -- if (STREQ(argv[optind-1], "-h")) -- program_usage(LONG_FORM); -- else { -- error(INFO, "invalid option: %s\n", -- argv[optind-1]); -- program_usage(SHORT_FORM); -- } -+ error(INFO, "invalid option: %s\n", -+ argv[optind-1]); -+ program_usage(SHORT_FORM); - } - } - opterr = 1; -@@ -261,8 +351,36 @@ - } - pc->flags |= NETDUMP; - pc->dumpfile = argv[optind]; -- pc->readmem = read_netdump; -- pc->writemem = write_netdump; -+ -+ if (is_sadump_xen()) { -+ pc->readmem = read_kdump; -+ pc->writemem = write_kdump; -+ } else { -+ pc->readmem = read_netdump; -+ pc->writemem = write_netdump; -+ } -+ -+ } else if (is_kdump(argv[optind], KDUMP_LOCAL)) { -+ if (pc->flags & MEMORY_SOURCES) { -+ error(INFO, -+ "too many dumpfile arguments\n"); -+ program_usage(SHORT_FORM); -+ } -+ pc->flags |= KDUMP; -+ pc->dumpfile = argv[optind]; -+ pc->readmem = read_kdump; -+ pc->writemem = write_kdump; -+ -+ } else if (is_xendump(argv[optind])) { -+ if (pc->flags & MEMORY_SOURCES) { -+ error(INFO, -+ "too many dumpfile arguments\n"); -+ program_usage(SHORT_FORM); -+ } -+ pc->flags |= XENDUMP; -+ pc->dumpfile = argv[optind]; -+ pc->readmem = read_xendump; -+ pc->writemem = write_xendump; - - } else if (is_diskdump(argv[optind])) { - if (pc->flags & MEMORY_SOURCES) { -@@ -322,6 +440,8 @@ - optind++; - } - -+ check_xen_hyper(); -+ - if (setjmp(pc->main_loop_env)) - clean_exit(1); - -@@ -332,11 +452,10 @@ - buf_init(); - cmdline_init(); - mem_init(); -+ hq_init(); - machdep_init(PRE_SYMTAB); - symtab_init(); - machdep_init(PRE_GDB); -- kernel_init(PRE_GDB); -- verify_version(); - datatype_init(); - - /* -@@ -361,17 +480,28 @@ - { - if (!(pc->flags & GDB_INIT)) { - gdb_session_init(); -- kernel_init(POST_GDB); -- machdep_init(POST_GDB); -- vm_init(); -- hq_init(); -- module_init(); -- help_init(); -- task_init(); -- vfs_init(); -- net_init(); -- dev_init(); -- machdep_init(POST_INIT); -+ if (XEN_HYPER_MODE()) { -+#ifdef XEN_HYPERVISOR_ARCH -+ machdep_init(POST_GDB); -+ xen_hyper_init(); -+ machdep_init(POST_INIT); -+#else -+ error(FATAL, XEN_HYPERVISOR_NOT_SUPPORTED); -+#endif -+ } else { -+ read_in_kernel_config(IKCFG_INIT); -+ kernel_init(); -+ machdep_init(POST_GDB); -+ vm_init(); -+ machdep_init(POST_VM); -+ module_init(); -+ help_init(); -+ task_init(); -+ vfs_init(); -+ net_init(); -+ dev_init(); -+ machdep_init(POST_INIT); -+ } - } else - SIGACTION(SIGINT, restart, &pc->sigaction, NULL); - -@@ -379,8 +509,17 @@ - * Display system statistics and current context. - */ - if (!(pc->flags & SILENT) && !(pc->flags & RUNTIME)) { -- display_sys_stats(); -- show_context(CURRENT_CONTEXT()); -+ if (XEN_HYPER_MODE()) { -+#ifdef XEN_HYPERVISOR_ARCH -+ xen_hyper_display_sys_stats(); -+ xen_hyper_show_vcpu_context(XEN_HYPER_VCPU_LAST_CONTEXT()); -+#else -+ error(FATAL, XEN_HYPERVISOR_NOT_SUPPORTED); -+#endif -+ } else { -+ display_sys_stats(); -+ show_context(CURRENT_CONTEXT()); -+ } - fprintf(fp, "\n"); - } - -@@ -426,8 +565,17 @@ - - if ((ct = get_command_table_entry(args[0]))) { - if (ct->flags & REFRESH_TASK_TABLE) { -- tt->refresh_task_table(); -- sort_context_array(); -+ if (XEN_HYPER_MODE()) { -+#ifdef XEN_HYPERVISOR_ARCH -+ xen_hyper_refresh_domain_context_space(); -+ xen_hyper_refresh_vcpu_context_space(); -+#else -+ error(FATAL, XEN_HYPERVISOR_NOT_SUPPORTED); -+#endif -+ } else { -+ tt->refresh_task_table(); -+ sort_context_array(); -+ } - } - if (!STREQ(pc->curcmd, pc->program_name)) - pc->lastcmd = pc->curcmd; -@@ -459,6 +607,9 @@ - - pc->curcmd = pc->program_name; - error(INFO, "command not found: %s\n", args[0]); -+ -+ if (pc->curcmd_flags & REPEAT) -+ pc->curcmd_flags &= ~REPEAT; - } - - -@@ -471,7 +622,7 @@ - struct command_table_entry *cp; - struct extension_table *ext; - -- for (cp = &base_command_table[0]; cp->name; cp++) { -+ for (cp = pc->cmd_table; cp->name; cp++) { - if (STREQ(cp->name, name)) - return cp; - } -@@ -591,6 +742,8 @@ - int i; - char *p1; - char buf[BUFSIZE]; -+ char homerc[BUFSIZE]; -+ char localrc[BUFSIZE]; - FILE *afp; - char *program; - -@@ -625,7 +778,8 @@ - machdep->verify_paddr = generic_verify_paddr; - pc->redhat_debug_loc = DEFAULT_REDHAT_DEBUG_LOCATION; - pc->cmdgencur = 0; -- pc->cmdgenspec = ~pc->cmdgencur; -+ pc->cmd_table = linux_command_table; -+ kt->BUG_bytes = -1; - - /* - * Get gdb version before initializing it since this might be one -@@ -637,7 +791,10 @@ - * Set up the default scrolling behavior for terminal output. - */ - if (isatty(fileno(stdout))) { -- if (file_exists("/usr/bin/less", NULL)) { -+ if (CRASHPAGER_valid()) { -+ pc->flags |= SCROLL; -+ pc->scroll_command = SCROLL_CRASHPAGER; -+ } else if (file_exists("/usr/bin/less", NULL)) { - pc->flags |= SCROLL; - pc->scroll_command = SCROLL_LESS; - } else if (file_exists("/bin/more", NULL)) { -@@ -685,11 +842,11 @@ - pc->home = "(unknown)"; - } else - strcpy(pc->home, p1); -- sprintf(buf, "%s/.%src", pc->home, pc->program_name); -- if (!(pc->flags & NOCRASHRC) && file_exists(buf, NULL)) { -- if ((afp = fopen(buf, "r")) == NULL) -+ sprintf(homerc, "%s/.%src", pc->home, pc->program_name); -+ if (!(pc->flags & NOCRASHRC) && file_exists(homerc, NULL)) { -+ if ((afp = fopen(homerc, "r")) == NULL) - error(INFO, "cannot open %s: %s\n", -- buf, strerror(errno)); -+ homerc, strerror(errno)); - else { - while (fgets(buf, BUFSIZE, afp)) - resolve_rc_cmd(buf, ALIAS_RCHOME); -@@ -698,11 +855,12 @@ - } - } - -- sprintf(buf, ".%src", pc->program_name); -- if (!(pc->flags & NOCRASHRC) && file_exists(buf, NULL)) { -- if ((afp = fopen(buf, "r")) == NULL) -+ sprintf(localrc, ".%src", pc->program_name); -+ if (!same_file(homerc, localrc) && -+ !(pc->flags & NOCRASHRC) && file_exists(localrc, NULL)) { -+ if ((afp = fopen(localrc, "r")) == NULL) - error(INFO, "cannot open %s: %s\n", -- buf, strerror(errno)); -+ localrc, strerror(errno)); - else { - while (fgets(buf, BUFSIZE, afp)) - resolve_rc_cmd(buf, ALIAS_RCLOCAL); -@@ -712,6 +870,8 @@ - - if (STREQ(pc->editing_mode, "no_mode")) - pc->editing_mode = "vi"; -+ -+ machdep_init(SETUP_ENV); - } - - -@@ -840,13 +1000,22 @@ - if (pc->flags & REM_S390D) - sprintf(&buf[strlen(buf)], - "%sREM_S390D", others++ ? "|" : ""); -- if (pc->flags & NETDUMP) -+ if (pc->flags & NETDUMP) - sprintf(&buf[strlen(buf)], - "%sNETDUMP", others++ ? "|" : ""); -+ if (pc->flags & XENDUMP) -+ sprintf(&buf[strlen(buf)], -+ "%sXENDUMP", others++ ? "|" : ""); -+ if (pc->flags & KDUMP) -+ sprintf(&buf[strlen(buf)], -+ "%sKDUMP", others++ ? "|" : ""); -+ if (pc->flags & SYSRQ) -+ sprintf(&buf[strlen(buf)], -+ "%sSYSRQ", others++ ? "|" : ""); - if (pc->flags & REM_NETDUMP) - sprintf(&buf[strlen(buf)], - "%sREM_NETDUMP", others++ ? "|" : ""); -- if (pc->flags & DISKDUMP) -+ if (pc->flags & DISKDUMP) - sprintf(&buf[strlen(buf)], - "%sDISKDUMP", others++ ? "|" : ""); - if (pc->flags & SYSMAP) -@@ -855,21 +1024,36 @@ - if (pc->flags & SYSMAP_ARG) - sprintf(&buf[strlen(buf)], - "%sSYSMAP_ARG", others++ ? "|" : ""); -- if (pc->flags & DATADEBUG) -+ if (pc->flags & DATADEBUG) - sprintf(&buf[strlen(buf)], - "%sDATADEBUG", others++ ? "|" : ""); -- if (pc->flags & FINDKERNEL) -+ if (pc->flags & FINDKERNEL) - sprintf(&buf[strlen(buf)], - "%sFINDKERNEL", others++ ? "|" : ""); -- if (pc->flags & VERSION_QUERY) -+ if (pc->flags & VERSION_QUERY) - sprintf(&buf[strlen(buf)], - "%sVERSION_QUERY", others++ ? "|" : ""); -- if (pc->flags & READNOW) -+ if (pc->flags & READNOW) - sprintf(&buf[strlen(buf)], - "%sREADNOW", others++ ? "|" : ""); -- if (pc->flags & NOCRASHRC) -+ if (pc->flags & NOCRASHRC) - sprintf(&buf[strlen(buf)], - "%sNOCRASHRC", others++ ? "|" : ""); -+ if (pc->flags & INIT_IFILE) -+ sprintf(&buf[strlen(buf)], -+ "%sINIT_IFILE", others++ ? "|" : ""); -+ if (pc->flags & XEN_HYPER) -+ sprintf(&buf[strlen(buf)], -+ "%sXEN_HYPER", others++ ? "|" : ""); -+ if (pc->flags & XEN_CORE) -+ sprintf(&buf[strlen(buf)], -+ "%sXEN_CORE", others++ ? "|" : ""); -+ if (pc->flags & PLEASE_WAIT) -+ sprintf(&buf[strlen(buf)], -+ "%sPLEASE_WAIT", others++ ? "|" : ""); -+ if (pc->flags & IFILE_ERROR) -+ sprintf(&buf[strlen(buf)], -+ "%sIFILE_ERROR", others++ ? "|" : ""); - - if (pc->flags) - strcat(buf, ")"); -@@ -933,10 +1117,36 @@ - fprintf(fp, " ifile_pipe: %lx\n", (ulong)pc->ifile_pipe); - fprintf(fp, " ifile_ofile: %lx\n", (ulong)pc->ifile_ofile); - fprintf(fp, " input_file: %s\n", pc->input_file); -- fprintf(fp, " scroll_command: %s\n", -- pc->scroll_command == SCROLL_NONE ? "(none)" : -- pc->scroll_command == SCROLL_LESS ? -- "/usr/bin/less" : "/bin/more"); -+ fprintf(fp, "ifile_in_progress: %lx (", pc->ifile_in_progress); -+ others = 0; -+ if (pc->ifile_in_progress & RCHOME_IFILE) -+ fprintf(fp, "%sRCHOME_IFILE", others++ ? "|" : ""); -+ if (pc->ifile_in_progress & RCLOCAL_IFILE) -+ fprintf(fp, "%sRCLOCAL_IFILE", others++ ? "|" : ""); -+ if (pc->ifile_in_progress & CMDLINE_IFILE) -+ fprintf(fp, "%sCMDLINE_IFILE", others++ ? "|" : ""); -+ if (pc->ifile_in_progress & RUNTIME_IFILE) -+ fprintf(fp, "%sRUNTIME_IFILE", others++ ? "|" : ""); -+ fprintf(fp, ")\n"); -+ fprintf(fp, " ifile_offset: %lld\n", (ulonglong)pc->ifile_offset); -+ fprintf(fp, "runtime_ifile_cmd: %s\n", pc->runtime_ifile_cmd ? -+ pc->runtime_ifile_cmd : "(unused)"); -+ fprintf(fp, " scroll_command: "); -+ switch (pc->scroll_command) -+ { -+ case SCROLL_NONE: -+ fprintf(fp, "SCROLL_NONE\n"); -+ break; -+ case SCROLL_LESS: -+ fprintf(fp, "SCROLL_LESS\n"); -+ break; -+ case SCROLL_MORE: -+ fprintf(fp, "SCROLL_MORE\n"); -+ break; -+ case SCROLL_CRASHPAGER: -+ fprintf(fp, "SCROLL_CRASHPAGER (%s)\n", getenv("CRASHPAGER")); -+ break; -+ } - - buf[0] = NULLCHAR; - fprintf(fp, " redirect: %lx ", pc->redirect); -@@ -1008,6 +1218,8 @@ - fprintf(fp, " tmp_fp: %lx\n", (ulong)pc->tmp_fp); - fprintf(fp, " tmpfile2: %lx\n", (ulong)pc->tmpfile2); - -+ fprintf(fp, " cmd_table: %s\n", XEN_HYPER_MODE() ? -+ "xen_hyper_command_table" : "linux_command_table"); - fprintf(fp, " curcmd: %s\n", pc->curcmd); - fprintf(fp, " lastcmd: %s\n", pc->lastcmd); - fprintf(fp, " cur_gdb_cmd: %d %s\n", pc->cur_gdb_cmd, -@@ -1016,7 +1228,30 @@ - gdb_command_string(pc->last_gdb_cmd, buf, FALSE)); - fprintf(fp, " cur_req: %lx\n", (ulong)pc->cur_req); - fprintf(fp, " cmdgencur: %ld\n", pc->cmdgencur); -- fprintf(fp, " cmdgenspec: %ld\n", pc->cmdgenspec); -+ fprintf(fp, " curcmd_flags: %lx (", pc->curcmd_flags); -+ others = 0; -+ if (pc->curcmd_flags & XEN_MACHINE_ADDR) -+ fprintf(fp, "%sXEN_MACHINE_ADDR", others ? "|" : ""); -+ if (pc->curcmd_flags & REPEAT) -+ fprintf(fp, "%sREPEAT", others ? "|" : ""); -+ if (pc->curcmd_flags & IDLE_TASK_SHOWN) -+ fprintf(fp, "%sIDLE_TASK_SHOWN", others ? "|" : ""); -+ if (pc->curcmd_flags & TASK_SPECIFIED) -+ fprintf(fp, "%sTASK_SPECIFIED", others ? "|" : ""); -+ if (pc->curcmd_flags & MEMTYPE_UVADDR) -+ fprintf(fp, "%sMEMTYPE_UVADDR", others ? "|" : ""); -+ if (pc->curcmd_flags & MEMTYPE_FILEADDR) -+ fprintf(fp, "%sMEMTYPE_FILEADDR", others ? "|" : ""); -+ if (pc->curcmd_flags & HEADER_PRINTED) -+ fprintf(fp, "%sHEADER_PRINTED", others ? "|" : ""); -+ if (pc->curcmd_flags & BAD_INSTRUCTION) -+ fprintf(fp, "%sBAD_INSTRUCTION", others ? "|" : ""); -+ if (pc->curcmd_flags & UD2A_INSTRUCTION) -+ fprintf(fp, "%sUD2A_INSTRUCTION", others ? "|" : ""); -+ if (pc->curcmd_flags & IRQ_IN_USE) -+ fprintf(fp, "%sIRQ_IN_USE", others ? "|" : ""); -+ fprintf(fp, ")\n"); -+ fprintf(fp, " curcmd_private: %llx\n", pc->curcmd_private); - fprintf(fp, " sigint_cnt: %d\n", pc->sigint_cnt); - fprintf(fp, " sigaction: %lx\n", (ulong)&pc->sigaction); - fprintf(fp, " gdb_sigaction: %lx\n", (ulong)&pc->gdb_sigaction); -@@ -1051,8 +1286,16 @@ - fprintf(fp, " readmem: read_daemon()\n"); - else if (pc->readmem == read_netdump) - fprintf(fp, " readmem: read_netdump()\n"); -+ else if (pc->readmem == read_xendump) -+ fprintf(fp, " readmem: read_xendump()\n"); -+ else if (pc->readmem == read_kdump) -+ fprintf(fp, " readmem: read_kdump()\n"); - else if (pc->readmem == read_memory_device) - fprintf(fp, " readmem: read_memory_device()\n"); -+ else if (pc->readmem == read_xendump_hyper) -+ fprintf(fp, " readmem: read_xendump_hyper()\n"); -+ else if (pc->readmem == read_diskdump) -+ fprintf(fp, " readmem: read_diskdump()\n"); - else - fprintf(fp, " readmem: %lx\n", (ulong)pc->readmem); - if (pc->writemem == write_dev_mem) -@@ -1065,8 +1308,14 @@ - fprintf(fp, " writemem: write_daemon()\n"); - else if (pc->writemem == write_netdump) - fprintf(fp, " writemem: write_netdump()\n"); -+ else if (pc->writemem == write_xendump) -+ fprintf(fp, " writemem: write_xendump()\n"); -+ else if (pc->writemem == write_kdump) -+ fprintf(fp, " writemem: write_kdump()\n"); - else if (pc->writemem == write_memory_device) - fprintf(fp, " writemem: write_memory_device()\n"); -+ else if (pc->writemem == write_diskdump) -+ fprintf(fp, " writemem: write_diskdump()\n"); - else - fprintf(fp, " writemem: %lx\n", (ulong)pc->writemem); - -@@ -1100,3 +1349,28 @@ - - exit(status); - } -+ -+/* -+ * Check whether this session is for xen hypervisor analysis. -+ */ -+static void -+check_xen_hyper(void) -+{ -+ if (!pc->namelist) -+ return; -+ -+ if (!XEN_HYPER_MODE()) { -+ if (STRNEQ(basename(pc->namelist), "xen-syms")) -+ pc->flags |= XEN_HYPER; -+ else -+ return; -+ } -+ -+#ifdef XEN_HYPERVISOR_ARCH -+ pc->cmd_table = xen_hyper_command_table; -+ if (pc->flags & XENDUMP) -+ pc->readmem = read_xendump_hyper; -+#else -+ error(FATAL, XEN_HYPERVISOR_NOT_SUPPORTED); -+#endif -+} ---- crash/tools.c.orig 2007-08-27 15:02:36.000000000 -0400 -+++ crash/tools.c 2007-07-26 14:11:50.000000000 -0400 -@@ -1,8 +1,8 @@ - /* tools.c - core analysis suite - * - * Copyright (C) 1999, 2000, 2001, 2002 Mission Critical Linux, Inc. -- * Copyright (C) 2002, 2003, 2004, 2005 David Anderson -- * Copyright (C) 2002, 2003, 2004, 2005 Red Hat, Inc. All rights reserved. -+ * Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007 David Anderson -+ * Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007 Red Hat, Inc. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by -@@ -18,12 +18,12 @@ - #include "defs.h" - #include - --static int calculate(char *, ulong *, ulonglong *, ulong); - static void print_number(struct number_option *, int, int); - static long alloc_hq_entry(void); - struct hq_entry; - static void dealloc_hq_entry(struct hq_entry *); - static void show_options(void); -+static void dump_struct_members(struct list_data *, int, ulong); - - /* - * General purpose error reporting routine. Type INFO prints the message -@@ -63,6 +63,8 @@ - - if ((new_line = (buf[0] == '\n'))) - shift_string_left(buf, 1); -+ else if (pc->flags & PLEASE_WAIT) -+ new_line = TRUE; - - if (pc->stdpipe) { - fprintf(pc->stdpipe, "%s%s: %s%s", -@@ -1770,6 +1772,42 @@ - pc->flags & HASH ? "on" : "off"); - return; - -+ } else if (STREQ(args[optind], "unwind")) { -+ if (args[optind+1]) { -+ optind++; -+ if (STREQ(args[optind], "on")) { -+ if ((kt->flags & DWARF_UNWIND_CAPABLE) || -+ !runtime) { -+ kt->flags |= DWARF_UNWIND; -+ kt->flags &= ~NO_DWARF_UNWIND; -+ } -+ } else if (STREQ(args[optind], "off")) { -+ kt->flags &= ~DWARF_UNWIND; -+ if (!runtime) -+ kt->flags |= NO_DWARF_UNWIND; -+ } else if (IS_A_NUMBER(args[optind])) { -+ value = stol(args[optind], -+ FAULT_ON_ERROR, NULL); -+ if (value) { -+ if ((kt->flags & DWARF_UNWIND_CAPABLE) || -+ !runtime) { -+ kt->flags |= DWARF_UNWIND; -+ kt->flags &= ~NO_DWARF_UNWIND; -+ } -+ } else { -+ kt->flags &= ~DWARF_UNWIND; -+ if (!runtime) -+ kt->flags |= NO_DWARF_UNWIND; -+ } -+ } else -+ goto invalid_set_command; -+ } -+ -+ if (runtime) -+ fprintf(fp, "unwind: %s\n", -+ kt->flags & DWARF_UNWIND ? "on" : "off"); -+ return; -+ - } else if (STREQ(args[optind], "refresh")) { - if (args[optind+1]) { - optind++; -@@ -1806,7 +1844,14 @@ - pc->flags |= SCROLL; - else if (STREQ(args[optind], "off")) - pc->flags &= ~SCROLL; -- else if (IS_A_NUMBER(args[optind])) { -+ else if (STREQ(args[optind], "more")) -+ pc->scroll_command = SCROLL_MORE; -+ else if (STREQ(args[optind], "less")) -+ pc->scroll_command = SCROLL_LESS; -+ else if (STREQ(args[optind], "CRASHPAGER")) { -+ if (CRASHPAGER_valid()) -+ pc->scroll_command = SCROLL_CRASHPAGER; -+ } else if (IS_A_NUMBER(args[optind])) { - value = stol(args[optind], - FAULT_ON_ERROR, NULL); - if (value) -@@ -1817,9 +1862,25 @@ - goto invalid_set_command; - } - -- if (runtime) -- fprintf(fp, "scroll: %s\n", -- pc->flags & SCROLL ? "on" : "off"); -+ if (runtime) { -+ fprintf(fp, "scroll: %s ", -+ pc->flags & SCROLL ? "on" : "off"); -+ switch (pc->scroll_command) -+ { -+ case SCROLL_LESS: -+ fprintf(fp, "(/usr/bin/less)\n"); -+ break; -+ case SCROLL_MORE: -+ fprintf(fp, "(/bin/more)\n"); -+ break; -+ case SCROLL_NONE: -+ fprintf(fp, "(none)\n"); -+ break; -+ case SCROLL_CRASHPAGER: -+ fprintf(fp, "(CRASHPAGER: %s)\n", getenv("CRASHPAGER")); -+ break; -+ } -+ } - - return; - -@@ -2004,6 +2065,10 @@ - pc->flags &= ~(DUMPFILE_TYPES); - if (is_netdump(args[optind], NETDUMP_LOCAL)) - pc->flags |= NETDUMP; -+ else if (is_kdump(args[optind], KDUMP_LOCAL)) -+ pc->flags |= KDUMP; -+ else if (is_xendump(args[optind])) -+ pc->flags |= XENDUMP; - else if (is_diskdump(args[optind])) - pc->flags |= DISKDUMP; - else if (is_lkcd_compressed_dump(args[optind])) -@@ -2054,6 +2119,31 @@ - pc->flags |= DATADEBUG; - return; - -+ } else if (STREQ(args[optind], "zero_excluded")) { -+ -+ if (args[optind+1]) { -+ optind++; -+ if (STREQ(args[optind], "on")) -+ *diskdump_flags |= ZERO_EXCLUDED; -+ else if (STREQ(args[optind], "off")) -+ *diskdump_flags &= ~ZERO_EXCLUDED; -+ else if (IS_A_NUMBER(args[optind])) { -+ value = stol(args[optind], -+ FAULT_ON_ERROR, NULL); -+ if (value) -+ *diskdump_flags |= ZERO_EXCLUDED; -+ else -+ *diskdump_flags &= ~ZERO_EXCLUDED; -+ } else -+ goto invalid_set_command; -+ } -+ -+ if (runtime) -+ fprintf(fp, "zero_excluded: %s\n", -+ *diskdump_flags & ZERO_EXCLUDED ? -+ "on" : "off"); -+ return; -+ - } else if (runtime) { - ulong pid, task; - -@@ -2106,7 +2196,23 @@ - static void - show_options(void) - { -- fprintf(fp, " scroll: %s\n", pc->flags & SCROLL ? "on" : "off"); -+ fprintf(fp, " scroll: %s ", -+ pc->flags & SCROLL ? "on" : "off"); -+ switch (pc->scroll_command) -+ { -+ case SCROLL_LESS: -+ fprintf(fp, "(/usr/bin/less)\n"); -+ break; -+ case SCROLL_MORE: -+ fprintf(fp, "(/bin/more)\n"); -+ break; -+ case SCROLL_NONE: -+ fprintf(fp, "(none)\n"); -+ break; -+ case SCROLL_CRASHPAGER: -+ fprintf(fp, "(CRASHPAGER: %s)\n", getenv("CRASHPAGER")); -+ break; -+ } - fprintf(fp, " radix: %d (%s)\n", pc->output_radix, - pc->output_radix == 10 ? "decimal" : - pc->output_radix == 16 ? "hexadecimal" : "unknown"); -@@ -2121,6 +2227,8 @@ - fprintf(fp, " edit: %s\n", pc->editing_mode); - fprintf(fp, " namelist: %s\n", pc->namelist); - fprintf(fp, " dumpfile: %s\n", pc->dumpfile); -+ fprintf(fp, " unwind: %s\n", kt->flags & DWARF_UNWIND ? "on" : "off"); -+ fprintf(fp, " zero_excluded: %s\n", *diskdump_flags & ZERO_EXCLUDED ? "on" : "off"); - } - - -@@ -2336,6 +2444,7 @@ - char *element2; - struct syment *sp; - -+ opcode = 0; - value1 = value2 = 0; - ll_value1 = ll_value2 = 0; - -@@ -2550,7 +2659,7 @@ - * its real value. The allowable multipliers are k, K, m, M, g and G, for - * kilobytes, megabytes and gigabytes. - */ --static int -+int - calculate(char *s, ulong *value, ulonglong *llvalue, ulong flags) - { - ulong factor, bias; -@@ -2832,7 +2941,9 @@ - break; - - case 's': -- ld->structname = optarg; -+ if (ld->structname_args++ == 0) -+ hq_open(); -+ hq_enter((ulong)optarg); - break; - - case 'o': -@@ -2871,6 +2982,12 @@ - cmd_usage(pc->curcmd, SYNOPSIS); - } - -+ if (ld->structname_args) { -+ ld->structname = (char **)GETBUF(sizeof(char *) * ld->structname_args); -+ retrieve_list((ulong *)ld->structname, ld->structname_args); -+ hq_close(); -+ } -+ - while (args[optind]) { - if (strstr(args[optind], ".") && - arg_to_datatype(args[optind], sm, RETURN_ON_ERROR) > 1) { -@@ -2896,11 +3013,25 @@ - } - - /* -- * If it's not a symbol nor a number, bail out. -+ * If it's not a symbol nor a number, bail out if it -+ * cannot be evaluated as a start address. - */ -- if (!IS_A_NUMBER(args[optind])) -+ if (!IS_A_NUMBER(args[optind])) { -+ if (can_eval(args[optind])) { -+ value = eval(args[optind], FAULT_ON_ERROR, NULL); -+ if (IS_KVADDR(value)) { -+ if (ld->flags & LIST_START_ENTERED) -+ error(FATAL, -+ "list start already entered\n"); -+ ld->start = value; -+ ld->flags |= LIST_START_ENTERED; -+ goto next_arg; -+ } -+ } -+ - error(FATAL, "invalid argument: %s\n", - args[optind]); -+ } - - /* - * If the start is known, it's got to be an offset. -@@ -2941,7 +3072,8 @@ - ld->member_offset = value; - ld->flags |= LIST_OFFSET_ENTERED; - goto next_arg; -- } else if (!IS_A_NUMBER(args[optind+1]) && -+ } else if ((!IS_A_NUMBER(args[optind+1]) && -+ !can_eval(args[optind+1])) && - !strstr(args[optind+1], ".")) - error(FATAL, "symbol not found: %s\n", - args[optind+1]); -@@ -3002,8 +3134,12 @@ - hq_open(); - c = do_list(ld); - hq_close(); -+ -+ if (ld->structname_args) -+ FREEBUF(ld->structname); - } - -+ - /* - * Does the work for cmd_list() and any other function that requires the - * contents of a linked list. See cmd_list description above for details. -@@ -3013,7 +3149,7 @@ - { - ulong next, last, first; - ulong searchfor, readflag; -- int count, others; -+ int i, count, others; - - if (CRASHDEBUG(1)) { - others = 0; -@@ -3038,7 +3174,11 @@ - console("list_head_offset: %ld\n", ld->list_head_offset); - console(" end: %lx\n", ld->end); - console(" searchfor: %lx\n", ld->searchfor); -- console(" structname: %s\n", ld->structname); -+ console(" structname_args: %lx\n", ld->structname_args); -+ if (!ld->structname_args) -+ console(" structname: (unused)\n"); -+ for (i = 0; i < ld->structname_args; i++) -+ console(" structname[%d]: %s\n", i, ld->structname[i]); - console(" header: %s\n", ld->header); - } - -@@ -3065,20 +3205,21 @@ - fprintf(fp, "%lx\n", next - ld->list_head_offset); - - if (ld->structname) { -- switch (count_chars(ld->structname, '.')) -- { -- case 0: -- dump_struct(ld->structname, -- next - ld->list_head_offset, 0); -- break; -- case 1: -- dump_struct_member(ld->structname, -- next - ld->list_head_offset, 0); -- break; -- default: -- error(FATAL, -- "invalid structure reference: %s\n", -- ld->structname); -+ for (i = 0; i < ld->structname_args; i++) { -+ switch (count_chars(ld->structname[i], '.')) -+ { -+ case 0: -+ dump_struct(ld->structname[i], -+ next - ld->list_head_offset, 0); -+ break; -+ case 1: -+ dump_struct_members(ld, i, next); -+ break; -+ default: -+ error(FATAL, -+ "invalid structure reference: %s\n", -+ ld->structname[i]); -+ } - } - } - } -@@ -3148,6 +3289,42 @@ - } - - /* -+ * Issue a dump_struct_member() call for one or more structure -+ * members. Multiple members are passed in a comma-separated -+ * list using the the format: -+ * -+ * struct.member1,member2,member3 -+ */ -+void -+dump_struct_members(struct list_data *ld, int idx, ulong next) -+{ -+ int i, argc; -+ char *p1, *p2; -+ char *structname, *members; -+ char *arglist[MAXARGS]; -+ -+ structname = GETBUF(strlen(ld->structname[idx])+1); -+ members = GETBUF(strlen(ld->structname[idx])+1); -+ -+ strcpy(structname, ld->structname[idx]); -+ p1 = strstr(structname, ".") + 1; -+ -+ p2 = strstr(ld->structname[idx], ".") + 1; -+ strcpy(members, p2); -+ replace_string(members, ",", ' '); -+ argc = parse_line(members, arglist); -+ -+ for (i = 0; i < argc; i++) { -+ *p1 = NULLCHAR; -+ strcat(structname, arglist[i]); -+ dump_struct_member(structname, next - ld->list_head_offset, 0); -+ } -+ -+ FREEBUF(structname); -+ FREEBUF(members); -+} -+ -+/* - * The next set of functions are a general purpose hashing tool used to - * identify duplicate entries in a set of passed-in data, and if found, - * to fail the entry attempt. When a command wishes to verify a list -@@ -3552,6 +3729,52 @@ - return(-1); - } - -+/* -+ * For a given value, check to see if a hash queue entry exists. If an -+ * entry is found, return TRUE; for all other possibilities return FALSE. -+ */ -+int -+hq_entry_exists(ulong value) -+{ -+ struct hash_table *ht; -+ struct hq_entry *list_entry; -+ long hqi; -+ -+ if (!(pc->flags & HASH)) -+ return FALSE; -+ -+ ht = &hash_table; -+ -+ if (ht->flags & (HASH_QUEUE_NONE)) -+ return FALSE; -+ -+ if (!(ht->flags & HASH_QUEUE_OPEN)) -+ return FALSE; -+ -+ hqi = HQ_INDEX(value); -+ list_entry = ht->memptr + ht->queue_heads[hqi].next; -+ -+ while (TRUE) { -+ if (list_entry->value == value) -+ return TRUE; -+ -+ if (list_entry->next >= ht->count) { -+ error(INFO, corrupt_hq, -+ list_entry->value, -+ list_entry->next, -+ list_entry->order); -+ ht->flags |= HASH_QUEUE_NONE; -+ return FALSE; -+ } -+ -+ if (list_entry->next == 0) -+ break; -+ -+ list_entry = ht->memptr + list_entry->next; -+ } -+ -+ return FALSE; -+} - - /* - * K&R power function for integers -@@ -4210,6 +4433,9 @@ - { - ulonglong total, days, hours, minutes, seconds; - -+ if (CRASHDEBUG(2)) -+ error(INFO, "convert_time: %lld (%llx)\n", count, count); -+ - total = (count)/(ulonglong)machdep->hz; - - days = total / SEC_DAYS; -@@ -4300,12 +4526,58 @@ - void - command_not_supported() - { -- error(FATAL, "command not supported on this architecture\n"); -+ error(FATAL, "command not supported on this architecture or kernel\n"); - } - - void - option_not_supported(int c) - { -- error(FATAL, "-%c option not supported on this architecture\n", -+ error(FATAL, "-%c option not supported on this architecture or kernel\n", - (char)c); - } -+ -+void -+please_wait(char *s) -+{ -+ if ((pc->flags & SILENT) || !(pc->flags & TTY) || -+ !DUMPFILE() || (pc->flags & RUNTIME)) -+ return; -+ -+ pc->flags |= PLEASE_WAIT; -+ -+ fprintf(fp, "\rplease wait... (%s)", s); -+ fflush(fp); -+} -+ -+void -+please_wait_done(void) -+{ -+ if ((pc->flags & SILENT) || !(pc->flags & TTY) || -+ !DUMPFILE() || (pc->flags & RUNTIME)) -+ return; -+ -+ pc->flags &= ~PLEASE_WAIT; -+ -+ fprintf(fp, "\r \r"); -+ fflush(fp); -+} -+ -+/* -+ * Compare two pathnames. -+ */ -+int -+pathcmp(char *p1, char *p2) -+{ -+ char c1, c2; -+ -+ do { -+ if ((c1 = *p1++) == '/') -+ while (*p1 == '/') { p1++; } -+ if ((c2 = *p2++) == '/') -+ while (*p2 == '/') { p2++; } -+ if (c1 == '\0') -+ return ((c2 == '/') && (*p2 == '\0')) ? 0 : c1 - c2; -+ } while (c1 == c2); -+ -+ return ((c2 == '\0') && (c1 == '/') && (*p1 == '\0')) ? 0 : c1 - c2; -+} ---- crash/global_data.c.orig 2007-08-27 15:02:36.000000000 -0400 -+++ crash/global_data.c 2006-11-21 11:19:51.000000000 -0500 -@@ -68,7 +68,7 @@ - * To add a new command, declare it in defs.h and enter it in this table. - */ - --struct command_table_entry base_command_table[] = { -+struct command_table_entry linux_command_table[] = { - {"*", cmd_pointer, help_pointer, 0}, - {"alias", cmd_alias, help_alias, 0}, - {"ascii", cmd_ascii, help_ascii, 0}, -@@ -117,6 +117,9 @@ - {"waitq", cmd_waitq, help_waitq, REFRESH_TASK_TABLE}, - {"whatis", cmd_whatis, help_whatis, 0}, - {"wr", cmd_wr, help_wr, 0}, -+#if defined(S390) || defined(S390X) -+ {"s390dbf", cmd_s390dbf, help_s390dbf, 0}, -+#endif - {(char *)NULL} - }; - ---- crash/memory.c.orig 2007-08-27 15:02:36.000000000 -0400 -+++ crash/memory.c 2007-07-27 12:03:17.000000000 -0400 -@@ -1,8 +1,8 @@ - /* memory.c - core analysis suite - * - * Copyright (C) 1999, 2000, 2001, 2002 Mission Critical Linux, Inc. -- * Copyright (C) 2002, 2003, 2004, 2005 David Anderson -- * Copyright (C) 2002, 2003, 2004, 2005 Red Hat, Inc. All rights reserved. -+ * Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007 David Anderson -+ * Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007 Red Hat, Inc. All rights reserved. - * Copyright (C) 2002 Silicon Graphics, Inc. - * - * This program is free software; you can redistribute it and/or modify -@@ -45,6 +45,8 @@ - ulong *addrlist; - int *kmem_bufctl; - ulong *cpudata[NR_CPUS]; -+ ulong *shared_array_cache; -+ int current_cache_index; - ulong found; - ulong retval; - char *ignore; -@@ -57,12 +59,17 @@ - ulong get_slabs; - char *slab_buf; - char *cache_buf; -+ struct vmlist { -+ ulong addr; -+ ulong size; -+ } *vmlist; - }; - - static char *memtype_string(int, int); - static char *error_handle_string(ulong); - static void dump_mem_map(struct meminfo *); --static void fill_mem_map_cache(ulong, char *); -+static void dump_mem_map_SPARSEMEM(struct meminfo *); -+static void fill_mem_map_cache(ulong, ulong, char *); - static void dump_free_pages(struct meminfo *); - static int dump_zone_page_usage(void); - static void dump_multidimensional_free_pages(struct meminfo *); -@@ -85,6 +92,7 @@ - static void do_slab_chain(int, struct meminfo *); - static void do_slab_chain_percpu_v1(long, struct meminfo *); - static void do_slab_chain_percpu_v2(long, struct meminfo *); -+static void do_slab_chain_percpu_v2_nodes(long, struct meminfo *); - static void save_slab_data(struct meminfo *); - static int slab_data_saved(struct meminfo *); - static void dump_saved_slab_data(void); -@@ -97,7 +105,9 @@ - static void gather_slab_free_list_percpu(struct meminfo *); - static void gather_cpudata_list_v1(struct meminfo *); - static void gather_cpudata_list_v2(struct meminfo *); -+static void gather_cpudata_list_v2_nodes(struct meminfo *, int); - static int check_cpudata_list(struct meminfo *, ulong); -+static int check_shared_list(struct meminfo *, ulong); - static void gather_slab_cached_count(struct meminfo *); - static void dump_slab_objects(struct meminfo *); - static void dump_slab_objects_percpu(struct meminfo *); -@@ -110,6 +120,7 @@ - static void search(ulong, ulong, ulong, int, ulong *, int); - static int next_upage(struct task_context *, ulong, ulong *); - static int next_kpage(ulong, ulong *); -+static ulong next_vmlist_vaddr(struct meminfo *, ulong); - static int vm_area_page_dump(ulong, ulong, ulong, ulong, void *, - struct reference *); - static int dump_swap_info(ulong, ulong *, ulong *); -@@ -124,9 +135,26 @@ - static int compare_node_data(const void *, const void *); - static void do_vm_flags(ulong); - static void PG_reserved_flag_init(void); -+static void PG_slab_flag_init(void); - static ulong nr_blockdev_pages(void); -- -- -+void sparse_mem_init(void); -+void dump_mem_sections(void); -+void list_mem_sections(void); -+ulong sparse_decode_mem_map(ulong, ulong); -+char *read_mem_section(ulong); -+ulong nr_to_section(ulong); -+int valid_section(ulong); -+int section_has_mem_map(ulong); -+ulong section_mem_map_addr(ulong); -+ulong valid_section_nr(ulong); -+ulong pfn_to_map(ulong); -+static int get_nodes_online(void); -+static int next_online_node(int); -+static ulong next_online_pgdat(int); -+static int vm_stat_init(void); -+static int dump_vm_stat(char *, long *); -+static int generic_read_dumpfile(ulonglong, void *, long, char *, ulong); -+static int generic_write_dumpfile(ulonglong, void *, long, char *, ulong); - - /* - * Memory display modes specific to this file. -@@ -142,6 +170,7 @@ - #define DECIMAL (0x100) - #define UDECIMAL (0x200) - #define ASCII_ENDLINE (0x400) -+#define NO_ASCII (0x800) - - static ulong DISPLAY_DEFAULT; - -@@ -182,6 +211,13 @@ - MEMBER_OFFSET_INIT(mm_struct_mmap, "mm_struct", "mmap"); - MEMBER_OFFSET_INIT(mm_struct_pgd, "mm_struct", "pgd"); - MEMBER_OFFSET_INIT(mm_struct_rss, "mm_struct", "rss"); -+ if (!VALID_MEMBER(mm_struct_rss)) -+ MEMBER_OFFSET_INIT(mm_struct_rss, "mm_struct", "_rss"); -+ if (!VALID_MEMBER(mm_struct_rss)) -+ MEMBER_OFFSET_INIT(mm_struct_rss, "mm_struct", "_file_rss"); -+ MEMBER_OFFSET_INIT(mm_struct_anon_rss, "mm_struct", "anon_rss"); -+ if (!VALID_MEMBER(mm_struct_anon_rss)) -+ MEMBER_OFFSET_INIT(mm_struct_anon_rss, "mm_struct", "_anon_rss"); - MEMBER_OFFSET_INIT(mm_struct_total_vm, "mm_struct", "total_vm"); - MEMBER_OFFSET_INIT(mm_struct_start_code, "mm_struct", "start_code"); - MEMBER_OFFSET_INIT(vm_area_struct_vm_mm, "vm_area_struct", "vm_mm"); -@@ -270,6 +306,7 @@ - STRUCT_SIZE_INIT(kmem_slab_s, "kmem_slab_s"); - STRUCT_SIZE_INIT(slab_s, "slab_s"); - STRUCT_SIZE_INIT(slab, "slab"); -+ STRUCT_SIZE_INIT(kmem_cache_s, "kmem_cache_s"); - STRUCT_SIZE_INIT(pgd_t, "pgd_t"); - - if (!VALID_STRUCT(kmem_slab_s) && VALID_STRUCT(slab_s)) { -@@ -310,17 +347,49 @@ - !VALID_STRUCT(slab_s) && VALID_STRUCT(slab)) { - vt->flags |= PERCPU_KMALLOC_V2; - -- MEMBER_OFFSET_INIT(kmem_cache_s_num, "kmem_cache_s", "num"); -- MEMBER_OFFSET_INIT(kmem_cache_s_next, "kmem_cache_s", "next"); -- MEMBER_OFFSET_INIT(kmem_cache_s_name, "kmem_cache_s", "name"); -- MEMBER_OFFSET_INIT(kmem_cache_s_colour_off, "kmem_cache_s", -- "colour_off"); -- MEMBER_OFFSET_INIT(kmem_cache_s_objsize, "kmem_cache_s", -- "objsize"); -- MEMBER_OFFSET_INIT(kmem_cache_s_flags, "kmem_cache_s", "flags"); -- MEMBER_OFFSET_INIT(kmem_cache_s_gfporder, -- "kmem_cache_s", "gfporder"); -- -+ if (VALID_STRUCT(kmem_cache_s)) { -+ MEMBER_OFFSET_INIT(kmem_cache_s_num, "kmem_cache_s", "num"); -+ MEMBER_OFFSET_INIT(kmem_cache_s_next, "kmem_cache_s", "next"); -+ MEMBER_OFFSET_INIT(kmem_cache_s_name, "kmem_cache_s", "name"); -+ MEMBER_OFFSET_INIT(kmem_cache_s_colour_off, "kmem_cache_s", -+ "colour_off"); -+ MEMBER_OFFSET_INIT(kmem_cache_s_objsize, "kmem_cache_s", -+ "objsize"); -+ MEMBER_OFFSET_INIT(kmem_cache_s_flags, "kmem_cache_s", "flags"); -+ MEMBER_OFFSET_INIT(kmem_cache_s_gfporder, -+ "kmem_cache_s", "gfporder"); -+ -+ MEMBER_OFFSET_INIT(kmem_cache_s_lists, "kmem_cache_s", "lists"); -+ MEMBER_OFFSET_INIT(kmem_cache_s_array, "kmem_cache_s", "array"); -+ ARRAY_LENGTH_INIT(len, NULL, "kmem_cache_s.array", NULL, 0); -+ } else { -+ STRUCT_SIZE_INIT(kmem_cache_s, "kmem_cache"); -+ MEMBER_OFFSET_INIT(kmem_cache_s_num, "kmem_cache", "num"); -+ MEMBER_OFFSET_INIT(kmem_cache_s_next, "kmem_cache", "next"); -+ MEMBER_OFFSET_INIT(kmem_cache_s_name, "kmem_cache", "name"); -+ MEMBER_OFFSET_INIT(kmem_cache_s_colour_off, "kmem_cache", -+ "colour_off"); -+ if (MEMBER_EXISTS("kmem_cache", "objsize")) -+ MEMBER_OFFSET_INIT(kmem_cache_s_objsize, "kmem_cache", -+ "objsize"); -+ else if (MEMBER_EXISTS("kmem_cache", "buffer_size")) -+ MEMBER_OFFSET_INIT(kmem_cache_s_objsize, "kmem_cache", -+ "buffer_size"); -+ MEMBER_OFFSET_INIT(kmem_cache_s_flags, "kmem_cache", "flags"); -+ MEMBER_OFFSET_INIT(kmem_cache_s_gfporder, -+ "kmem_cache", "gfporder"); -+ -+ if (MEMBER_EXISTS("kmem_cache", "lists")) -+ MEMBER_OFFSET_INIT(kmem_cache_s_lists, "kmem_cache", "lists"); -+ else if (MEMBER_EXISTS("kmem_cache", "nodelists")) { -+ vt->flags |= PERCPU_KMALLOC_V2_NODES; -+ MEMBER_OFFSET_INIT(kmem_cache_s_lists, "kmem_cache", "nodelists"); -+ ARRAY_LENGTH_INIT(vt->kmem_cache_len_nodes, NULL, -+ "kmem_cache.nodelists", NULL, 0); -+ } -+ MEMBER_OFFSET_INIT(kmem_cache_s_array, "kmem_cache", "array"); -+ ARRAY_LENGTH_INIT(len, NULL, "kmem_cache.array", NULL, 0); -+ } - MEMBER_OFFSET_INIT(slab_list, "slab", "list"); - MEMBER_OFFSET_INIT(slab_s_mem, "slab", "s_mem"); - MEMBER_OFFSET_INIT(slab_inuse, "slab", "inuse"); -@@ -330,10 +399,6 @@ - MEMBER_OFFSET_INIT(array_cache_limit, "array_cache", "limit"); - STRUCT_SIZE_INIT(array_cache, "array_cache"); - -- MEMBER_OFFSET_INIT(kmem_cache_s_lists, "kmem_cache_s", "lists"); -- MEMBER_OFFSET_INIT(kmem_cache_s_array, "kmem_cache_s", "array"); -- ARRAY_LENGTH_INIT(len, NULL, "kmem_cache_s.array", NULL, 0); -- - MEMBER_OFFSET_INIT(kmem_list3_slabs_partial, - "kmem_list3", "slabs_partial"); - MEMBER_OFFSET_INIT(kmem_list3_slabs_full, -@@ -343,6 +408,9 @@ - MEMBER_OFFSET_INIT(kmem_list3_free_objects, - "kmem_list3", "free_objects"); - MEMBER_OFFSET_INIT(kmem_list3_shared, "kmem_list3", "shared"); -+ } else if (MEMBER_EXISTS("kmem_cache", "cpu_slab") && -+ STRUCT_EXISTS("kmem_cache_node")) { -+ vt->flags |= KMALLOC_SLUB; - } else { - MEMBER_OFFSET_INIT(kmem_cache_s_c_nextp, - "kmem_cache_s", "c_nextp"); -@@ -381,6 +449,19 @@ - "kmem_slab_s", "s_magic"); - } - -+ if (!kt->kernel_NR_CPUS) { -+ kt->kernel_NR_CPUS = ARRAY_LENGTH(kmem_cache_s_cpudata) ? -+ ARRAY_LENGTH(kmem_cache_s_cpudata) : -+ ARRAY_LENGTH(kmem_cache_s_array); -+ } -+ -+ if (kt->kernel_NR_CPUS > NR_CPUS) { -+ error(WARNING, -+ "kernel-configured NR_CPUS (%d) greater than compiled-in NR_CPUS (%d)\n", -+ kt->kernel_NR_CPUS, NR_CPUS); -+ error(FATAL, "recompile crash with larger NR_CPUS\n"); -+ } -+ - if (machdep->init_kernel_pgd) - machdep->init_kernel_pgd(); - else if (symbol_exists("swapper_pg_dir")) { -@@ -415,10 +496,17 @@ - error(FATAL, "no swapper_pg_dir or cpu_pgd symbols exist?\n"); - - get_symbol_data("high_memory", sizeof(ulong), &vt->high_memory); -- if (kernel_symbol_exists("mem_map")) -+ -+ if (kernel_symbol_exists("mem_map")) { - get_symbol_data("mem_map", sizeof(char *), &vt->mem_map); -+ vt->flags |= FLATMEM; -+ } else if (kernel_symbol_exists("mem_section")) -+ vt->flags |= SPARSEMEM; - else - vt->flags |= DISCONTIGMEM; -+ -+ sparse_mem_init(); -+ - vt->vmalloc_start = machdep->vmalloc_start(); - if (IS_VMALLOC_ADDR(vt->mem_map)) - vt->flags |= V_MEM_MAP; -@@ -478,7 +566,6 @@ - STRUCT_SIZE_INIT(free_area_struct, "free_area_struct"); - STRUCT_SIZE_INIT(zone, "zone"); - STRUCT_SIZE_INIT(zone_struct, "zone_struct"); -- STRUCT_SIZE_INIT(kmem_cache_s, "kmem_cache_s"); - STRUCT_SIZE_INIT(kmem_bufctl_t, "kmem_bufctl_t"); - STRUCT_SIZE_INIT(swap_info_struct, "swap_info_struct"); - STRUCT_SIZE_INIT(mm_struct, "mm_struct"); -@@ -488,13 +575,20 @@ - if (VALID_STRUCT(pglist_data)) { - vt->flags |= ZONES; - -- if (symbol_exists("pgdat_list")) -+ if (symbol_exists("pgdat_list") && !IS_SPARSEMEM()) - vt->flags |= NODES; - -+ /* -+ * Determine the number of nodes the best way possible, -+ * starting with a default of 1. -+ */ -+ vt->numnodes = 1; -+ - if (symbol_exists("numnodes")) - get_symbol_data("numnodes", sizeof(int), &vt->numnodes); -- else -- vt->numnodes = 1; -+ -+ if ((vt->numnodes = get_nodes_online())) -+ vt->flags |= NODES_ONLINE; - - MEMBER_OFFSET_INIT(pglist_data_node_zones, - "pglist_data", "node_zones"); -@@ -524,6 +618,7 @@ - ARRAY_LENGTH_INIT(vt->nr_zones, pglist_data_node_zones, - "pglist_data.node_zones", NULL, - SIZE_OPTION(zone_struct, zone)); -+ vt->ZONE_HIGHMEM = vt->nr_zones - 1; - - if (VALID_STRUCT(zone_struct)) { - MEMBER_OFFSET_INIT(zone_struct_free_pages, -@@ -539,6 +634,8 @@ - if (INVALID_MEMBER(zone_struct_size)) - MEMBER_OFFSET_INIT(zone_struct_memsize, - "zone_struct", "memsize"); -+ MEMBER_OFFSET_INIT(zone_struct_zone_start_pfn, -+ "zone_struct", "zone_start_pfn"); - MEMBER_OFFSET_INIT(zone_struct_zone_start_paddr, - "zone_struct", "zone_start_paddr"); - MEMBER_OFFSET_INIT(zone_struct_zone_start_mapnr, -@@ -565,8 +662,17 @@ - vt->dump_free_pages = dump_free_pages_zones_v1; - - } else if (VALID_STRUCT(zone)) { -- MEMBER_OFFSET_INIT(zone_free_pages, -- "zone", "free_pages"); -+ MEMBER_OFFSET_INIT(zone_vm_stat, "zone", "vm_stat"); -+ MEMBER_OFFSET_INIT(zone_free_pages, "zone", "free_pages"); -+ if (INVALID_MEMBER(zone_free_pages) && -+ VALID_MEMBER(zone_vm_stat)) { -+ long nr_free_pages = 0; -+ if (!enumerator_value("NR_FREE_PAGES", &nr_free_pages)) -+ error(WARNING, -+ "cannot determine NR_FREE_PAGES enumerator\n"); -+ ASSIGN_OFFSET(zone_free_pages) = OFFSET(zone_vm_stat) + -+ (nr_free_pages * sizeof(long)); -+ } - MEMBER_OFFSET_INIT(zone_free_area, - "zone", "free_area"); - MEMBER_OFFSET_INIT(zone_zone_pgdat, -@@ -603,6 +709,8 @@ - vt->dump_kmem_cache = dump_kmem_cache_percpu_v1; - else if (vt->flags & PERCPU_KMALLOC_V2) - vt->dump_kmem_cache = dump_kmem_cache_percpu_v2; -+ else if (vt->flags & KMALLOC_SLUB) -+ vt->flags |= KMEM_CACHE_UNAVAIL; /* TBD */ - else - vt->dump_kmem_cache = dump_kmem_cache; - -@@ -640,13 +748,7 @@ - kmem_cache_init(); - - PG_reserved_flag_init(); -- -- if (VALID_MEMBER(page_pte)) { -- if (THIS_KERNEL_VERSION < LINUX(2,6,0)) -- vt->PG_slab = 10; -- else if (THIS_KERNEL_VERSION >= LINUX(2,6,0)) -- vt->PG_slab = 7; -- } -+ PG_slab_flag_init(); - } - - /* -@@ -685,7 +787,7 @@ - memtype = KVADDR; - count = -1; - -- while ((c = getopt(argcnt, args, "e:pudDuso:81:3:6:")) != EOF) { -+ while ((c = getopt(argcnt, args, "xme:pfudDuso:81:3:6:")) != EOF) { - switch(c) - { - case '8': -@@ -748,12 +850,12 @@ - break; - - case 'p': -- memtype &= ~(UVADDR|KVADDR); -+ memtype &= ~(UVADDR|KVADDR|XENMACHADDR|FILEADDR); - memtype = PHYSADDR; - break; - - case 'u': -- memtype &= ~(KVADDR|PHYSADDR); -+ memtype &= ~(KVADDR|PHYSADDR|XENMACHADDR|FILEADDR); - memtype = UVADDR; - break; - -@@ -767,6 +869,25 @@ - flag |= UDECIMAL; - break; - -+ case 'm': -+ if (!(kt->flags & ARCH_XEN)) -+ error(FATAL, "-m option only applies to xen architecture\n"); -+ memtype &= ~(UVADDR|KVADDR|FILEADDR); -+ memtype = XENMACHADDR; -+ break; -+ -+ case 'f': -+ if (!pc->dumpfile) -+ error(FATAL, -+ "-f option requires a dumpfile\n"); -+ memtype &= ~(KVADDR|UVADDR|PHYSADDR|XENMACHADDR); -+ memtype = FILEADDR; -+ break; -+ -+ case 'x': -+ flag |= NO_ASCII; -+ break; -+ - default: - argerrs++; - break; -@@ -830,7 +951,7 @@ - error(WARNING, - "ending address ignored when count is specified\n"); - -- if ((flag & HEXADECIMAL) && !(flag & SYMBOLIC)) -+ if ((flag & HEXADECIMAL) && !(flag & SYMBOLIC) && !(flag & NO_ASCII)) - flag |= ASCII_ENDLINE; - - if (memtype == KVADDR) { -@@ -839,7 +960,6 @@ - } - - display_memory(addr, count, flag, memtype); -- - } - - /* -@@ -903,6 +1023,12 @@ - case PHYSADDR: - addrtype = "PHYSADDR"; - break; -+ case XENMACHADDR: -+ addrtype = "XENMACHADDR"; -+ break; -+ case FILEADDR: -+ addrtype = "FILEADDR"; -+ break; - } - - if (CRASHDEBUG(4)) -@@ -970,7 +1096,8 @@ - case DISPLAY_64: - if ((flag & (HEXADECIMAL|SYMBOLIC|DISPLAY_DEFAULT)) == - (HEXADECIMAL|SYMBOLIC|DISPLAY_DEFAULT)) { -- if (in_ksymbol_range(mem.u64)) { -+ if (in_ksymbol_range(mem.u64) && -+ strlen(value_to_symstr(mem.u64, buf, 0))) { - fprintf(fp, "%-16s ", - value_to_symstr(mem.u64, buf, 0)); - linelen += strlen(buf)+1; -@@ -993,7 +1120,8 @@ - case DISPLAY_32: - if ((flag & (HEXADECIMAL|SYMBOLIC|DISPLAY_DEFAULT)) == - (HEXADECIMAL|SYMBOLIC|DISPLAY_DEFAULT)) { -- if (in_ksymbol_range(mem.u32)) { -+ if (in_ksymbol_range(mem.u32) && -+ strlen(value_to_symstr(mem.u32, buf, 0))) { - fprintf(fp, INT_PRLEN == 16 ? - "%-16s " : "%-8s ", - value_to_symstr(mem.u32, -@@ -1138,7 +1266,7 @@ - size = sizeof(void*); - addr_entered = value_entered = FALSE; - -- while ((c = getopt(argcnt, args, "ukp81:3:6:")) != EOF) { -+ while ((c = getopt(argcnt, args, "fukp81:3:6:")) != EOF) { - switch(c) - { - case '8': -@@ -1173,17 +1301,33 @@ - break; - - case 'p': -+ memtype &= ~(UVADDR|KVADDR|FILEADDR); - memtype = PHYSADDR; - break; - - case 'u': -+ memtype &= ~(PHYSADDR|KVADDR|FILEADDR); - memtype = UVADDR; - break; - - case 'k': -+ memtype &= ~(PHYSADDR|UVADDR|FILEADDR); - memtype = KVADDR; - break; - -+ case 'f': -+ /* -+ * Unsupported, but can be forcibly implemented -+ * by removing the DUMPFILE() check above and -+ * recompiling. -+ */ -+ if (!pc->dumpfile) -+ error(FATAL, -+ "-f option requires a dumpfile\n"); -+ memtype &= ~(PHYSADDR|UVADDR|KVADDR); -+ memtype = FILEADDR; -+ break; -+ - default: - argerrs++; - break; -@@ -1262,6 +1406,9 @@ - case PHYSADDR: - break; - -+ case FILEADDR: -+ break; -+ - case AMBIGUOUS: - error(INFO, - "ambiguous address: %llx (requires -p, -u or -k)\n", -@@ -1309,6 +1456,8 @@ - raw_data_dump(ulong addr, long count, int symbolic) - { - long wordcnt; -+ ulonglong address; -+ int memtype; - - switch (sizeof(long)) - { -@@ -1328,9 +1477,20 @@ - break; - } - -- display_memory(addr, wordcnt, -+ if (pc->curcmd_flags & MEMTYPE_FILEADDR) { -+ address = pc->curcmd_private; -+ memtype = FILEADDR; -+ } else if (pc->curcmd_flags & MEMTYPE_UVADDR) { -+ address = (ulonglong)addr; -+ memtype = UVADDR; -+ } else { -+ address = (ulonglong)addr; -+ memtype = KVADDR; -+ } -+ -+ display_memory(address, wordcnt, - HEXADECIMAL|DISPLAY_DEFAULT|(symbolic ? SYMBOLIC : ASCII_ENDLINE), -- KVADDR); -+ memtype); - } - - /* -@@ -1351,7 +1511,7 @@ - * is appropriate: - * - * addr a user, kernel or physical memory address. -- * memtype addr type: UVADDR, KVADDR or PHYSADDR. -+ * memtype addr type: UVADDR, KVADDR, PHYSADDR, XENMACHADDR or FILEADDR - * buffer supplied buffer to read the data into. - * size number of bytes to read. - * type string describing the request -- helpful when the read fails. -@@ -1368,6 +1528,7 @@ - #define SEEK_ERRMSG "seek error: %s address: %llx type: \"%s\"\n" - #define READ_ERRMSG "read error: %s address: %llx type: \"%s\"\n" - #define WRITE_ERRMSG "write error: %s address: %llx type: \"%s\"\n" -+#define PAGE_EXCLUDED_ERRMSG "page excluded: %s address: %llx type: \"%s\"\n" - - int - readmem(ulonglong addr, int memtype, void *buffer, long size, -@@ -1376,6 +1537,7 @@ - int fd; - long cnt; - physaddr_t paddr; -+ ulonglong pseudo; - char *bufptr; - - if (CRASHDEBUG(4)) -@@ -1424,7 +1586,11 @@ - break; - - case PHYSADDR: -+ case XENMACHADDR: - break; -+ -+ case FILEADDR: -+ return generic_read_dumpfile(addr, buffer, size, type, error_handle); - } - - while (size > 0) { -@@ -1449,6 +1615,17 @@ - case PHYSADDR: - paddr = addr; - break; -+ -+ case XENMACHADDR: -+ pseudo = xen_m2p(addr); -+ -+ if (pseudo == XEN_MACHADDR_NOT_FOUND) { -+ pc->curcmd_flags |= XEN_MACHINE_ADDR; -+ paddr = addr; -+ } else -+ paddr = pseudo | PAGEOFFSET(addr); -+ -+ break; - } - - /* -@@ -1460,7 +1637,7 @@ - cnt = size; - - switch (READMEM(fd, bufptr, cnt, -- memtype == PHYSADDR ? 0 : addr, paddr)) -+ (memtype == PHYSADDR) || (memtype == XENMACHADDR) ? 0 : addr, paddr)) - { - case SEEK_ERROR: - if (PRINT_ERROR_MESSAGE) -@@ -1472,6 +1649,11 @@ - error(INFO, READ_ERRMSG, memtype_string(memtype, 0), addr, type); - goto readmem_error; - -+ case PAGE_EXCLUDED: -+ if (PRINT_ERROR_MESSAGE) -+ error(INFO, PAGE_EXCLUDED_ERRMSG, memtype_string(memtype, 0), addr, type); -+ goto readmem_error; -+ - default: - break; - } -@@ -1610,6 +1792,9 @@ - int - read_memory_device(int fd, void *bufptr, int cnt, ulong addr, physaddr_t paddr) - { -+ if (pc->curcmd_flags & XEN_MACHINE_ADDR) -+ return READ_ERROR; -+ - if (!machdep->verify_paddr(paddr)) { - if (CRASHDEBUG(1)) - error(INFO, "verify_paddr(%lx) failed\n", paddr); -@@ -1754,6 +1939,12 @@ - case PHYSADDR: - sprintf(membuf, debug ? "PHYSADDR" : "physical"); - break; -+ case XENMACHADDR: -+ sprintf(membuf, debug ? "XENMACHADDR" : "xen machine"); -+ break; -+ case FILEADDR: -+ sprintf(membuf, debug ? "FILEADDR" : "dumpfile"); -+ break; - default: - if (debug) - sprintf(membuf, "0x%x (?)", memtype); -@@ -1849,6 +2040,10 @@ - - case PHYSADDR: - break; -+ -+ -+ case FILEADDR: -+ return generic_write_dumpfile(addr, buffer, size, type, error_handle); - } - - while (size > 0) { -@@ -1946,6 +2141,77 @@ - } - - /* -+ * Generic dumpfile read/write functions to handle FILEADDR -+ * memtype arguments to readmem() and writemem(). These are -+ * not to be confused with pc->readmem/writemem plug-ins. -+ */ -+static int -+generic_read_dumpfile(ulonglong addr, void *buffer, long size, char *type, -+ ulong error_handle) -+{ -+ int fd; -+ int retval; -+ -+ retval = TRUE; -+ -+ if (!pc->dumpfile) -+ error(FATAL, "command requires a dumpfile\n"); -+ -+ if ((fd = open(pc->dumpfile, O_RDONLY)) < 0) -+ error(FATAL, "%s: %s\n", pc->dumpfile, -+ strerror(errno)); -+ -+ if (lseek(fd, addr, SEEK_SET) == -1) { -+ if (PRINT_ERROR_MESSAGE) -+ error(INFO, SEEK_ERRMSG, -+ memtype_string(FILEADDR, 0), addr, type); -+ retval = FALSE; -+ } else if (read(fd, buffer, size) != size) { -+ if (PRINT_ERROR_MESSAGE) -+ error(INFO, READ_ERRMSG, -+ memtype_string(FILEADDR, 0), addr, type); -+ retval = FALSE; -+ } -+ -+ close(fd); -+ -+ return retval; -+} -+ -+static int -+generic_write_dumpfile(ulonglong addr, void *buffer, long size, char *type, -+ ulong error_handle) -+{ -+ int fd; -+ int retval; -+ -+ retval = TRUE; -+ -+ if (!pc->dumpfile) -+ error(FATAL, "command requires a dumpfile\n"); -+ -+ if ((fd = open(pc->dumpfile, O_WRONLY)) < 0) -+ error(FATAL, "%s: %s\n", pc->dumpfile, -+ strerror(errno)); -+ -+ if (lseek(fd, addr, SEEK_SET) == -1) { -+ if (PRINT_ERROR_MESSAGE) -+ error(INFO, SEEK_ERRMSG, -+ memtype_string(FILEADDR, 0), addr, type); -+ retval = FALSE; -+ } else if (write(fd, buffer, size) != size) { -+ if (PRINT_ERROR_MESSAGE) -+ error(INFO, WRITE_ERRMSG, -+ memtype_string(FILEADDR, 0), addr, type); -+ retval = FALSE; -+ } -+ -+ close(fd); -+ -+ return retval; -+} -+ -+/* - * Translates a kernel virtual address to its physical address. cmd_vtop() - * sets the verbose flag so that the pte translation gets displayed; all - * other callers quietly accept the translation. -@@ -2113,6 +2379,8 @@ - break; - } - -+ paddr = 0; -+ - switch (memtype) { - case UVADDR: - fprintf(fp, "%s %s\n", -@@ -2126,9 +2394,12 @@ - return; - } - if (!uvtop(tc, vaddr, &paddr, 0)) { -- fprintf(fp, "%s (not mapped)\n\n", -+ fprintf(fp, "%s %s\n\n", - mkstring(buf1, UVADDR_PRLEN, LJUST|LONG_HEX, -- MKSTR(vaddr))); -+ MKSTR(vaddr)), -+ (XEN() && (paddr == PADDR_NOT_AVAILABLE)) ? -+ "(page not available)" : "(not mapped)"); -+ - page_exists = FALSE; - } else { - fprintf(fp, "%s %s\n\n", -@@ -2161,9 +2432,13 @@ - } - if (vtop_flags & USE_USER_PGD) { - if (!uvtop(tc, vaddr, &paddr, 0)) { -- fprintf(fp, "%s (not mapped)\n\n", -+ fprintf(fp, "%s %s\n\n", - mkstring(buf1, UVADDR_PRLEN, -- LJUST|LONG_HEX, MKSTR(vaddr))); -+ LJUST|LONG_HEX, MKSTR(vaddr)), -+ (XEN() && -+ (paddr == PADDR_NOT_AVAILABLE)) ? -+ "(page not available)" : -+ "(not mapped)"); - page_exists = FALSE; - } else { - fprintf(fp, "%s %s\n\n", -@@ -2176,9 +2451,13 @@ - uvtop(tc, vaddr, &paddr, VERBOSE); - } else { - if (!kvtop(tc, vaddr, &paddr, 0)) { -- fprintf(fp, "%s (not mapped)\n\n", -+ fprintf(fp, "%s %s\n\n", - mkstring(buf1, VADDR_PRLEN, -- LJUST|LONG_HEX, MKSTR(vaddr))); -+ LJUST|LONG_HEX, MKSTR(vaddr)), -+ (XEN() && -+ (paddr == PADDR_NOT_AVAILABLE)) ? -+ "(page not available)" : -+ "(not mapped)"); - page_exists = FALSE; - } else { - fprintf(fp, "%s %s\n\n", -@@ -2839,7 +3118,8 @@ - - if (DO_REF_SEARCH(ref)) { - if (VM_REF_CHECK_DECVAL(ref, -- SWP_OFFSET(paddr))) { -+ THIS_KERNEL_VERSION >= LINUX(2,6,0) ? -+ __swp_offset(paddr) : SWP_OFFSET(paddr))) { - if (DO_REF_DISPLAY(ref)) - display = TRUE; - else { -@@ -2980,6 +3260,8 @@ - return; - - tm->rss = ULONG(tt->mm_struct + OFFSET(mm_struct_rss)); -+ if (VALID_MEMBER(mm_struct_anon_rss)) -+ tm->rss += ULONG(tt->mm_struct + OFFSET(mm_struct_anon_rss)); - tm->total_vm = ULONG(tt->mm_struct + OFFSET(mm_struct_total_vm)); - tm->pgd_addr = ULONG(tt->mm_struct + OFFSET(mm_struct_pgd)); - -@@ -3036,6 +3318,9 @@ - #define GET_INACTIVE_DIRTY (ADDRESS_SPECIFIED << 13) /* obsolete */ - #define SLAB_GET_COUNTS (ADDRESS_SPECIFIED << 14) - #define SLAB_WALKTHROUGH (ADDRESS_SPECIFIED << 15) -+#define GET_VMLIST_COUNT (ADDRESS_SPECIFIED << 16) -+#define GET_VMLIST (ADDRESS_SPECIFIED << 17) -+#define SLAB_DATA_NOSAVE (ADDRESS_SPECIFIED << 18) - - #define GET_ALL \ - (GET_SHARED_PAGES|GET_TOTALRAM_PAGES|GET_BUFFERS_PAGES|GET_SLAB_PAGES) -@@ -3046,7 +3331,7 @@ - int i; - int c; - int sflag, Sflag, pflag, fflag, Fflag, vflag; -- int nflag, cflag, Cflag, iflag, lflag, Lflag, Pflag; -+ int nflag, cflag, Cflag, iflag, lflag, Lflag, Pflag, Vflag; - struct meminfo meminfo; - ulonglong value[MAXARGS]; - char buf[BUFSIZE]; -@@ -3055,13 +3340,17 @@ - - spec_addr = 0; - sflag = Sflag = pflag = fflag = Fflag = Pflag = 0; -- vflag = Cflag = cflag = iflag = nflag = lflag = Lflag = 0; -+ vflag = Cflag = cflag = iflag = nflag = lflag = Lflag = Vflag = 0; - BZERO(&meminfo, sizeof(struct meminfo)); - BZERO(&value[0], sizeof(ulonglong)*MAXARGS); - -- while ((c = getopt(argcnt, args, "I:sSFfpvcCinl:L:P")) != EOF) { -+ while ((c = getopt(argcnt, args, "I:sSFfpvcCinl:L:PV")) != EOF) { - switch(c) - { -+ case 'V': -+ Vflag = 1; -+ break; -+ - case 'n': - nflag = 1; - break; -@@ -3153,13 +3442,13 @@ - if (argerrs) - cmd_usage(pc->curcmd, SYNOPSIS); - -- if ((sflag + Sflag + pflag + fflag + Fflag + -+ if ((sflag + Sflag + pflag + fflag + Fflag + Vflag + - vflag + Cflag + cflag + iflag + lflag + Lflag) > 1) { - error(INFO, "only one flag allowed!\n"); - cmd_usage(pc->curcmd, SYNOPSIS); - } - -- if (sflag || Sflag) -+ if (sflag || Sflag || !(vt->flags & KMEM_CACHE_INIT)) - kmem_cache_init(); - - while (args[optind]) { -@@ -3198,8 +3487,6 @@ - if (pflag) { - meminfo.spec_addr = value[i]; - meminfo.flags = ADDRESS_SPECIFIED; -- if (meminfo.calls++) -- fprintf(fp, "\n"); - dump_mem_map(&meminfo); - pflag++; - } -@@ -3248,8 +3535,6 @@ - if (vflag) { - meminfo.spec_addr = value[i]; - meminfo.flags = ADDRESS_SPECIFIED; -- if (meminfo.calls++) -- fprintf(fp, "\n"); - dump_vmlist(&meminfo); - vflag++; - } -@@ -3275,7 +3560,7 @@ - /* - * no value arguments allowed! - */ -- if (nflag || iflag || Fflag || Cflag || Lflag) { -+ if (nflag || iflag || Fflag || Cflag || Lflag || Vflag) { - error(INFO, - "no address arguments allowed with this option\n"); - cmd_usage(pc->curcmd, SYNOPSIS); -@@ -3352,7 +3637,10 @@ - dump_page_lists(&meminfo); - } - -- if (!(sflag + Sflag + pflag + fflag + Fflag + vflag + -+ if (Vflag == 1) -+ dump_vm_stat(NULL, NULL); -+ -+ if (!(sflag + Sflag + pflag + fflag + Fflag + vflag + Vflag + - cflag + Cflag + iflag + nflag + lflag + Lflag + meminfo.calls)) - cmd_usage(pc->curcmd, SYNOPSIS); - -@@ -3373,12 +3661,13 @@ - buf = (char *)GETBUF(SIZE(page)); - - if (!readmem(pageptr, KVADDR, buf, SIZE(page), -- "reserved page", RETURN_ON_ERROR|QUIET)) -+ "reserved page", RETURN_ON_ERROR|QUIET)) { -+ FREEBUF(buf); - return; -+ } - - flags = ULONG(buf + OFFSET(page_flags)); - -- - if (count_bits_long(flags) == 1) - vt->PG_reserved = flags; - else -@@ -3386,12 +3675,50 @@ - - if (CRASHDEBUG(2)) - fprintf(fp, -- "PG_reserved bit: vaddr: %lx page: %lx flags: %lx => %lx\n", -+ "PG_reserved: vaddr: %lx page: %lx flags: %lx => %lx\n", - vaddr, pageptr, flags, vt->PG_reserved); - - FREEBUF(buf); - } - -+static void -+PG_slab_flag_init(void) -+{ -+ int bit; -+ ulong pageptr; -+ ulong vaddr, flags; -+ char buf[BUFSIZE]; /* safe for a page struct */ -+ -+ /* -+ * Set the old defaults in case the search below fails. -+ */ -+ if (VALID_MEMBER(page_pte)) { -+ if (THIS_KERNEL_VERSION < LINUX(2,6,0)) -+ vt->PG_slab = 10; -+ else if (THIS_KERNEL_VERSION >= LINUX(2,6,0)) -+ vt->PG_slab = 7; -+ } else if (THIS_KERNEL_VERSION >= LINUX(2,6,0)) -+ vt->PG_slab = 7; -+ -+ if (try_get_symbol_data("vm_area_cachep", sizeof(void *), &vaddr) && -+ phys_to_page((physaddr_t)VTOP(vaddr), &pageptr) && -+ readmem(pageptr, KVADDR, buf, SIZE(page), -+ "vm_area_cachep page", RETURN_ON_ERROR|QUIET)) { -+ -+ flags = ULONG(buf + OFFSET(page_flags)); -+ -+ if ((bit = ffsl(flags))) { -+ vt->PG_slab = bit - 1; -+ -+ if (CRASHDEBUG(2)) -+ fprintf(fp, -+ "PG_slab bit: vaddr: %lx page: %lx flags: %lx => %ld\n", -+ vaddr, pageptr, flags, vt->PG_slab); -+ -+ } -+ } -+} -+ - /* - * dump_mem_map() displays basic data about each entry in the mem_map[] - * array, or if an address is specified, just the mem_map[] entry for that -@@ -3438,22 +3765,20 @@ - #define PGMM_CACHED (512) - - static void --dump_mem_map(struct meminfo *mi) -+dump_mem_map_SPARSEMEM(struct meminfo *mi) - { -- long i, n; -+ ulong i; - long total_pages; -- int others, page_not_mapped, phys_not_mapped; -+ int others, page_not_mapped, phys_not_mapped, page_mapping; - ulong pp, ppend; - physaddr_t phys, physend; - ulong tmp, reserved, shared, slabs; - ulong PG_reserved_flag; - long buffers; - ulong inode, offset, flags, mapping, index; -- ulong node_size; - uint count; - int print_hdr, pg_spec, phys_spec, done; - int v22; -- struct node_table *nt; - char hdr[BUFSIZE]; - char buf0[BUFSIZE]; - char buf1[BUFSIZE]; -@@ -3462,6 +3787,7 @@ - char buf4[BUFSIZE]; - char *page_cache; - char *pcache; -+ ulong section, section_nr, nr_mem_sections, section_size; - - v22 = VALID_MEMBER(page_inode); /* page.inode vs. page.mapping */ - -@@ -3549,22 +3875,62 @@ - done = FALSE; - total_pages = 0; - -- for (n = 0; n < vt->numnodes; n++) { -+ nr_mem_sections = NR_MEM_SECTIONS(); -+ -+ /* -+ * Iterate over all possible sections -+ */ -+ for (section_nr = 0; section_nr < nr_mem_sections ; section_nr++) { -+ -+ if (CRASHDEBUG(2)) -+ fprintf(fp, "section_nr = %ld\n", section_nr); -+ -+ /* -+ * If we are looking up a specific address, jump directly -+ * to the section with that page -+ */ -+ if (mi->flags & ADDRESS_SPECIFIED) { -+ ulong pfn; -+ physaddr_t tmp; -+ -+ if (pg_spec) { -+ if (!page_to_phys(mi->spec_addr, &tmp)) -+ return; -+ pfn = tmp >> PAGESHIFT(); -+ } else -+ pfn = mi->spec_addr >> PAGESHIFT(); -+ section_nr = pfn_to_section_nr(pfn); -+ } -+ -+ if (!(section = valid_section_nr(section_nr))) { -+#ifdef NOTDEF -+ break; /* On a real sparsemem system we need to check -+ * every section as gaps may exist. But this -+ * can be slow. If we know we don't have gaps -+ * just stop validating sections when we -+ * get to the end of the valid ones. -+ * In the future find a way to short circuit -+ * this loop. -+ */ -+#endif -+ if (mi->flags & ADDRESS_SPECIFIED) -+ break; -+ continue; -+ } -+ - if (print_hdr) { -- fprintf(fp, "%s%s", n ? "\n" : "", hdr); -+ if (!(pc->curcmd_flags & HEADER_PRINTED)) -+ fprintf(fp, "%s", hdr); - print_hdr = FALSE; -+ pc->curcmd_flags |= HEADER_PRINTED; - } - -- nt = &vt->node_table[n]; -- total_pages += nt->size; -- pp = nt->mem_map; -- phys = nt->start_paddr; -- if ((vt->flags & V_MEM_MAP) && (vt->numnodes == 1)) -- node_size = vt->max_mapnr; -- else -- node_size = nt->size; -+ pp = section_mem_map_addr(section); -+ pp = sparse_decode_mem_map(pp, section_nr); -+ phys = section_nr * PAGES_PER_SECTION() * PAGESIZE(); -+ section_size = PAGES_PER_SECTION(); - -- for (i = 0; i < node_size; -+ for (i = 0; i < section_size; - i++, pp += SIZE(page), phys += PAGESIZE()) { - - if ((i % PGMM_CACHED) == 0) { -@@ -3581,7 +3947,7 @@ - continue; - } - -- fill_mem_map_cache(pp, page_cache); -+ fill_mem_map_cache(pp, ppend, page_cache); - } - - pcache = page_cache + ((i%PGMM_CACHED) * SIZE(page)); -@@ -3653,11 +4019,12 @@ - } - continue; - } -+ page_mapping = VALID_MEMBER(page_mapping); - - if (v22) { - inode = ULONG(pcache + OFFSET(page_inode)); - offset = ULONG(pcache + OFFSET(page_offset)); -- } else { -+ } else if (page_mapping) { - mapping = ULONG(pcache + - OFFSET(page_mapping)); - index = ULONG(pcache + OFFSET(page_index)); -@@ -3700,6 +4067,20 @@ - space(MINSPACE), - mkstring(buf4, 8, CENTER|RJUST, " "), - " "); -+ else if (!page_mapping) -+ fprintf(fp, "%s%s%s%s%s%s%s %2d ", -+ mkstring(buf0, VADDR_PRLEN, -+ LJUST|LONG_HEX, MKSTR(pp)), -+ space(MINSPACE), -+ mkstring(buf1, MAX(PADDR_PRLEN, -+ strlen("PHYSICAL")), -+ RJUST|LONGLONG_HEX, MKSTR(&phys)), -+ space(MINSPACE), -+ mkstring(buf3, VADDR_PRLEN, -+ CENTER|RJUST, "-------"), -+ space(MINSPACE), -+ mkstring(buf4, 8, CENTER|RJUST, "-----"), -+ count); - else - fprintf(fp, "%s%s%s%s%s%s%8ld %2d ", - mkstring(buf0, VADDR_PRLEN, -@@ -3862,65 +4243,512 @@ - FREEBUF(page_cache); - } - --/* -- * Stash a chunk of PGMM_CACHED page structures, starting at addr, into the -- * passed-in buffer. The mem_map array is normally guaranteed to be -- * readable except in the case of virtual mem_map usage. When V_MEM_MAP -- * is in place, read all pages consumed by PGMM_CACHED page structures -- * that are currently mapped, leaving the unmapped ones just zeroed out. -- */ - static void --fill_mem_map_cache(ulong pp, char *page_cache) -+dump_mem_map(struct meminfo *mi) - { -- long size, cnt; -- ulong addr; -- char *bufptr; -+ long i, n; -+ long total_pages; -+ int others, page_not_mapped, phys_not_mapped, page_mapping; -+ ulong pp, ppend; -+ physaddr_t phys, physend; -+ ulong tmp, reserved, shared, slabs; -+ ulong PG_reserved_flag; -+ long buffers; -+ ulong inode, offset, flags, mapping, index; -+ ulong node_size; -+ uint count; -+ int print_hdr, pg_spec, phys_spec, done; -+ int v22; -+ struct node_table *nt; -+ char hdr[BUFSIZE]; -+ char buf0[BUFSIZE]; -+ char buf1[BUFSIZE]; -+ char buf2[BUFSIZE]; -+ char buf3[BUFSIZE]; -+ char buf4[BUFSIZE]; -+ char *page_cache; -+ char *pcache; - -- /* -- * Try to read it in one fell swoop. -- */ -- if (readmem(pp, KVADDR, page_cache, SIZE(page) * PGMM_CACHED, -- "page struct cache", RETURN_ON_ERROR|QUIET)) -+ if (IS_SPARSEMEM()) { -+ dump_mem_map_SPARSEMEM(mi); - return; -+ } - -- /* -- * Break it into page-size-or-less requests, warning if it's -- * not a virtual mem_map. -- */ -- size = SIZE(page) * PGMM_CACHED; -- addr = pp; -- bufptr = page_cache; -- -- while (size > 0) { -- /* -- * Compute bytes till end of page. -- */ -- cnt = PAGESIZE() - PAGEOFFSET(addr); -- -- if (cnt > size) -- cnt = size; -- -- if (!readmem(addr, KVADDR, bufptr, size, -- "virtual page struct cache", RETURN_ON_ERROR|QUIET)) { -- BZERO(bufptr, size); -- if (!(vt->flags & V_MEM_MAP)) -- error(WARNING, -- "mem_map[] from %lx to %lx not accessible\n", -- addr, addr+size); -- } -+ v22 = VALID_MEMBER(page_inode); /* page.inode vs. page.mapping */ - -- addr += cnt; -- bufptr += cnt; -- size -= cnt; -+ if (v22) { -+ sprintf(hdr, "%s%s%s%s%s%s%s%sCNT FLAGS\n", -+ mkstring(buf1, VADDR_PRLEN, CENTER, "PAGE"), -+ space(MINSPACE), -+ mkstring(buf2, MAX(PADDR_PRLEN, strlen("PHYSICAL")), -+ RJUST, "PHYSICAL"), -+ space(MINSPACE), -+ mkstring(buf3, VADDR_PRLEN, CENTER|RJUST, "INODE"), -+ space(MINSPACE), -+ mkstring(buf4, 8, CENTER|LJUST, "OFFSET"), -+ space(MINSPACE-1)); -+ } else { -+ sprintf(hdr, "%s%s%s%s%s%s%sCNT FLAGS\n", -+ mkstring(buf1, VADDR_PRLEN, CENTER, "PAGE"), -+ space(MINSPACE), -+ mkstring(buf2, MAX(PADDR_PRLEN, strlen("PHYSICAL")), -+ RJUST, "PHYSICAL"), -+ space(MINSPACE), -+ mkstring(buf3, VADDR_PRLEN, CENTER|RJUST, "MAPPING"), -+ space(MINSPACE), -+ mkstring(buf4, 8, CENTER|RJUST, "INDEX")); - } --} -- -- --/* -- * dump_page_hash_table() displays the entries in each page_hash_table. -- */ -- --#define PGHASH_CACHED (1024) -+ -+ pg_spec = phys_spec = print_hdr = FALSE; -+ -+ switch (mi->flags) -+ { -+ case ADDRESS_SPECIFIED: -+ switch (mi->memtype) -+ { -+ case KVADDR: -+ if (is_page_ptr(mi->spec_addr, NULL)) -+ pg_spec = TRUE; -+ else { -+ if (kvtop(NULL, mi->spec_addr, &phys, 0)) { -+ mi->spec_addr = phys; -+ phys_spec = TRUE; -+ } -+ else -+ return; -+ } -+ break; -+ case PHYSADDR: -+ phys_spec = TRUE; -+ break; -+ default: -+ error(FATAL, "dump_mem_map: no memtype specified\n"); -+ break; -+ } -+ print_hdr = TRUE; -+ break; -+ -+ case GET_ALL: -+ shared = 0; -+ reserved = 0; -+ buffers = 0; -+ slabs = 0; -+ break; -+ -+ case GET_SHARED_PAGES: -+ shared = 0; -+ break; -+ -+ case GET_TOTALRAM_PAGES: -+ reserved = 0; -+ break; -+ -+ case GET_BUFFERS_PAGES: -+ buffers = 0; -+ break; -+ -+ case GET_SLAB_PAGES: -+ slabs = 0; -+ break; -+ -+ default: -+ print_hdr = TRUE; -+ break; -+ } -+ -+ page_cache = GETBUF(SIZE(page) * PGMM_CACHED); -+ done = FALSE; -+ total_pages = 0; -+ -+ for (n = 0; n < vt->numnodes; n++) { -+ if (print_hdr) { -+ if (!(pc->curcmd_flags & HEADER_PRINTED)) -+ fprintf(fp, "%s%s", n ? "\n" : "", hdr); -+ print_hdr = FALSE; -+ pc->curcmd_flags |= HEADER_PRINTED; -+ } -+ -+ nt = &vt->node_table[n]; -+ total_pages += nt->size; -+ pp = nt->mem_map; -+ phys = nt->start_paddr; -+ if ((vt->flags & V_MEM_MAP) && (vt->numnodes == 1)) -+ node_size = vt->max_mapnr; -+ else -+ node_size = nt->size; -+ -+ for (i = 0; i < node_size; -+ i++, pp += SIZE(page), phys += PAGESIZE()) { -+ -+ if ((i % PGMM_CACHED) == 0) { -+ ppend = pp + ((PGMM_CACHED-1) * SIZE(page)); -+ physend = phys + ((PGMM_CACHED-1) * PAGESIZE()); -+ -+ if ((pg_spec && (mi->spec_addr > ppend)) || -+ (phys_spec && -+ (PHYSPAGEBASE(mi->spec_addr) > physend))) { -+ i += (PGMM_CACHED-1); -+ pp = ppend; -+ phys = physend; -+ continue; -+ } -+ -+ fill_mem_map_cache(pp, ppend, page_cache); -+ } -+ -+ pcache = page_cache + ((i%PGMM_CACHED) * SIZE(page)); -+ -+ if (received_SIGINT()) -+ restart(0); -+ -+ if ((pg_spec && (pp == mi->spec_addr)) || -+ (phys_spec && (phys == PHYSPAGEBASE(mi->spec_addr)))) -+ done = TRUE; -+ -+ if (!done && (pg_spec || phys_spec)) -+ continue; -+ -+ flags = ULONG(pcache + OFFSET(page_flags)); -+ count = UINT(pcache + OFFSET(page_count)); -+ -+ switch (mi->flags) -+ { -+ case GET_ALL: -+ case GET_BUFFERS_PAGES: -+ if (VALID_MEMBER(page_buffers)) { -+ tmp = ULONG(pcache + -+ OFFSET(page_buffers)); -+ if (tmp) -+ buffers++; -+ } else if (THIS_KERNEL_VERSION >= LINUX(2,6,0)) { -+ if ((flags >> v26_PG_private) & 1) -+ buffers++; -+ } else -+ error(FATAL, -+ "cannot determine whether pages have buffers\n"); -+ -+ if (mi->flags != GET_ALL) -+ continue; -+ -+ /* FALLTHROUGH */ -+ -+ case GET_SLAB_PAGES: -+ if (v22) { -+ if ((flags >> v22_PG_Slab) & 1) -+ slabs++; -+ } else if (vt->PG_slab) { -+ if ((flags >> vt->PG_slab) & 1) -+ slabs++; -+ } else { -+ if ((flags >> v24_PG_slab) & 1) -+ slabs++; -+ } -+ if (mi->flags != GET_ALL) -+ continue; -+ -+ /* FALLTHROUGH */ -+ -+ case GET_SHARED_PAGES: -+ case GET_TOTALRAM_PAGES: -+ if (vt->PG_reserved) -+ PG_reserved_flag = vt->PG_reserved; -+ else -+ PG_reserved_flag = v22 ? -+ 1 << v22_PG_reserved : -+ 1 << v24_PG_reserved; -+ -+ if (flags & PG_reserved_flag) { -+ reserved++; -+ } else { -+ if (count > 1) -+ shared++; -+ } -+ continue; -+ } -+ -+ page_mapping = VALID_MEMBER(page_mapping); -+ -+ if (v22) { -+ inode = ULONG(pcache + OFFSET(page_inode)); -+ offset = ULONG(pcache + OFFSET(page_offset)); -+ } else if (page_mapping) { -+ mapping = ULONG(pcache + -+ OFFSET(page_mapping)); -+ index = ULONG(pcache + OFFSET(page_index)); -+ } -+ -+ page_not_mapped = phys_not_mapped = FALSE; -+ -+ if (v22) { -+ fprintf(fp, "%lx%s%s%s%s%s%8lx %2d%s", -+ pp, -+ space(MINSPACE), -+ mkstring(buf1, MAX(PADDR_PRLEN, -+ strlen("PHYSICAL")), -+ RJUST|LONGLONG_HEX, MKSTR(&phys)), -+ space(MINSPACE), -+ mkstring(buf2, VADDR_PRLEN, -+ RJUST|LONG_HEX, MKSTR(inode)), -+ space(MINSPACE), -+ offset, -+ count, -+ space(MINSPACE)); -+ } else { -+ if ((vt->flags & V_MEM_MAP)) { -+ if (!machdep->verify_paddr(phys)) -+ phys_not_mapped = TRUE; -+ if (!kvtop(NULL, pp, NULL, 0)) -+ page_not_mapped = TRUE; -+ } -+ if (page_not_mapped) -+ fprintf(fp, "%s%s%s%s%s%s%s %2s ", -+ mkstring(buf0, VADDR_PRLEN, -+ LJUST|LONG_HEX, MKSTR(pp)), -+ space(MINSPACE), -+ mkstring(buf1, MAX(PADDR_PRLEN, -+ strlen("PHYSICAL")), -+ RJUST|LONGLONG_HEX, MKSTR(&phys)), -+ space(MINSPACE), -+ mkstring(buf3, VADDR_PRLEN, -+ CENTER|RJUST, " "), -+ space(MINSPACE), -+ mkstring(buf4, 8, CENTER|RJUST, " "), -+ " "); -+ else if (!page_mapping) -+ fprintf(fp, "%s%s%s%s%s%s%s %2d ", -+ mkstring(buf0, VADDR_PRLEN, -+ LJUST|LONG_HEX, MKSTR(pp)), -+ space(MINSPACE), -+ mkstring(buf1, MAX(PADDR_PRLEN, -+ strlen("PHYSICAL")), -+ RJUST|LONGLONG_HEX, MKSTR(&phys)), -+ space(MINSPACE), -+ mkstring(buf3, VADDR_PRLEN, -+ CENTER|RJUST, "-------"), -+ space(MINSPACE), -+ mkstring(buf4, 8, CENTER|RJUST, "-----"), -+ count); -+ else -+ fprintf(fp, "%s%s%s%s%s%s%8ld %2d ", -+ mkstring(buf0, VADDR_PRLEN, -+ LJUST|LONG_HEX, MKSTR(pp)), -+ space(MINSPACE), -+ mkstring(buf1, MAX(PADDR_PRLEN, -+ strlen("PHYSICAL")), -+ RJUST|LONGLONG_HEX, MKSTR(&phys)), -+ space(MINSPACE), -+ mkstring(buf2, VADDR_PRLEN, -+ RJUST|LONG_HEX, MKSTR(mapping)), -+ space(MINSPACE), -+ index, -+ count); -+ } -+ -+ others = 0; -+ -+ if (v22) { -+ if ((flags >> v22_PG_DMA) & 1) -+ fprintf(fp, "%sDMA", -+ others++ ? "," : ""); -+ if ((flags >> v22_PG_locked) & 1) -+ fprintf(fp, "%slocked", -+ others++ ? "," : ""); -+ if ((flags >> v22_PG_error) & 1) -+ fprintf(fp, "%serror", -+ others++ ? "," : ""); -+ if ((flags >> v22_PG_referenced) & 1) -+ fprintf(fp, "%sreferenced", -+ others++ ? "," : ""); -+ if ((flags >> v22_PG_dirty) & 1) -+ fprintf(fp, "%sdirty", -+ others++ ? "," : ""); -+ if ((flags >> v22_PG_uptodate) & 1) -+ fprintf(fp, "%suptodate", -+ others++ ? "," : ""); -+ if ((flags >> v22_PG_free_after) & 1) -+ fprintf(fp, "%sfree_after", -+ others++ ? "," : ""); -+ if ((flags >> v22_PG_decr_after) & 1) -+ fprintf(fp, "%sdecr_after", -+ others++ ? "," : ""); -+ if ((flags >> v22_PG_swap_unlock_after) & 1) -+ fprintf(fp, "%sswap_unlock_after", -+ others++ ? "," : ""); -+ if ((flags >> v22_PG_Slab) & 1) -+ fprintf(fp, "%sslab", -+ others++ ? "," : ""); -+ if ((flags >> v22_PG_swap_cache) & 1) -+ fprintf(fp, "%sswap_cache", -+ others++ ? "," : ""); -+ if ((flags >> v22_PG_skip) & 1) -+ fprintf(fp, "%sskip", -+ others++ ? "," : ""); -+ if ((flags >> v22_PG_reserved) & 1) -+ fprintf(fp, "%sreserved", -+ others++ ? "," : ""); -+ fprintf(fp, "\n"); -+ } else if (THIS_KERNEL_VERSION > LINUX(2,4,9)) { -+ fprintf(fp, "%lx\n", flags); -+ } else { -+ -+ if ((flags >> v24_PG_locked) & 1) -+ fprintf(fp, "%slocked", -+ others++ ? "," : ""); -+ if ((flags >> v24_PG_error) & 1) -+ fprintf(fp, "%serror", -+ others++ ? "," : ""); -+ if ((flags >> v24_PG_referenced) & 1) -+ fprintf(fp, "%sreferenced", -+ others++ ? "," : ""); -+ if ((flags >> v24_PG_uptodate) & 1) -+ fprintf(fp, "%suptodate", -+ others++ ? "," : ""); -+ if ((flags >> v24_PG_dirty) & 1) -+ fprintf(fp, "%sdirty", -+ others++ ? "," : ""); -+ if ((flags >> v24_PG_decr_after) & 1) -+ fprintf(fp, "%sdecr_after", -+ others++ ? "," : ""); -+ if ((flags >> v24_PG_active) & 1) -+ fprintf(fp, "%sactive", -+ others++ ? "," : ""); -+ if ((flags >> v24_PG_inactive_dirty) & 1) -+ fprintf(fp, "%sinactive_dirty", -+ others++ ? "," : ""); -+ if ((flags >> v24_PG_slab) & 1) -+ fprintf(fp, "%sslab", -+ others++ ? "," : ""); -+ if ((flags >> v24_PG_swap_cache) & 1) -+ fprintf(fp, "%sswap_cache", -+ others++ ? "," : ""); -+ if ((flags >> v24_PG_skip) & 1) -+ fprintf(fp, "%sskip", -+ others++ ? "," : ""); -+ if ((flags >> v24_PG_inactive_clean) & 1) -+ fprintf(fp, "%sinactive_clean", -+ others++ ? "," : ""); -+ if ((flags >> v24_PG_highmem) & 1) -+ fprintf(fp, "%shighmem", -+ others++ ? "," : ""); -+ if ((flags >> v24_PG_checked) & 1) -+ fprintf(fp, "%schecked", -+ others++ ? "," : ""); -+ if ((flags >> v24_PG_bigpage) & 1) -+ fprintf(fp, "%sbigpage", -+ others++ ? "," : ""); -+ if ((flags >> v24_PG_arch_1) & 1) -+ fprintf(fp, "%sarch_1", -+ others++ ? "," : ""); -+ if ((flags >> v24_PG_reserved) & 1) -+ fprintf(fp, "%sreserved", -+ others++ ? "," : ""); -+ if (phys_not_mapped) -+ fprintf(fp, "%s[NOT MAPPED]", -+ others++ ? " " : ""); -+ -+ fprintf(fp, "\n"); -+ } -+ -+ if (done) -+ break; -+ } -+ -+ if (done) -+ break; -+ } -+ -+ switch (mi->flags) -+ { -+ case GET_TOTALRAM_PAGES: -+ mi->retval = total_pages - reserved; -+ break; -+ -+ case GET_SHARED_PAGES: -+ mi->retval = shared; -+ break; -+ -+ case GET_BUFFERS_PAGES: -+ mi->retval = buffers; -+ break; -+ -+ case GET_SLAB_PAGES: -+ mi->retval = slabs; -+ break; -+ -+ case GET_ALL: -+ mi->get_totalram = total_pages - reserved; -+ mi->get_shared = shared; -+ mi->get_buffers = buffers; -+ mi->get_slabs = slabs; -+ break; -+ -+ case ADDRESS_SPECIFIED: -+ mi->retval = done; -+ break; -+ } -+ -+ FREEBUF(page_cache); -+} -+ -+/* -+ * Stash a chunk of PGMM_CACHED page structures, starting at addr, into the -+ * passed-in buffer. The mem_map array is normally guaranteed to be -+ * readable except in the case of virtual mem_map usage. When V_MEM_MAP -+ * is in place, read all pages consumed by PGMM_CACHED page structures -+ * that are currently mapped, leaving the unmapped ones just zeroed out. -+ */ -+static void -+fill_mem_map_cache(ulong pp, ulong ppend, char *page_cache) -+{ -+ long size, cnt; -+ ulong addr; -+ char *bufptr; -+ -+ /* -+ * Try to read it in one fell swoop. -+ */ -+ if (readmem(pp, KVADDR, page_cache, SIZE(page) * PGMM_CACHED, -+ "page struct cache", RETURN_ON_ERROR|QUIET)) -+ return; -+ -+ /* -+ * Break it into page-size-or-less requests, warning if it's -+ * not a virtual mem_map. -+ */ -+ size = SIZE(page) * PGMM_CACHED; -+ addr = pp; -+ bufptr = page_cache; -+ -+ while (size > 0) { -+ /* -+ * Compute bytes till end of page. -+ */ -+ cnt = PAGESIZE() - PAGEOFFSET(addr); -+ -+ if (cnt > size) -+ cnt = size; -+ -+ if (!readmem(addr, KVADDR, bufptr, size, -+ "virtual page struct cache", RETURN_ON_ERROR|QUIET)) { -+ BZERO(bufptr, size); -+ if (!(vt->flags & V_MEM_MAP) && ((addr+size) < ppend)) -+ error(WARNING, -+ "mem_map[] from %lx to %lx not accessible\n", -+ addr, addr+size); -+ } -+ -+ addr += cnt; -+ bufptr += cnt; -+ size -= cnt; -+ } -+} -+ -+ -+/* -+ * dump_page_hash_table() displays the entries in each page_hash_table. -+ */ -+ -+#define PGHASH_CACHED (1024) - - static void - dump_page_hash_table(struct meminfo *hi) -@@ -4520,13 +5348,6 @@ - */ - static char *zone_hdr = "ZONE NAME SIZE FREE"; - --/* -- * From linux/mmzone.h -- */ --#define ZONE_DMA 0 --#define ZONE_NORMAL 1 --#define ZONE_HIGHMEM 2 -- - static void - dump_free_pages_zones_v1(struct meminfo *fi) - { -@@ -4610,7 +5431,7 @@ - } - - if (fi->flags == GET_FREE_HIGHMEM_PAGES) { -- if (i == ZONE_HIGHMEM) { -+ if (i == vt->ZONE_HIGHMEM) { - readmem(node_zones+ - OFFSET(zone_struct_free_pages), - KVADDR, &value, sizeof(ulong), -@@ -4702,7 +5523,7 @@ - - hq_close(); - -- if (fi->flags & (GET_FREE_PAGES|GET_ZONE_SIZES|GET_FREE_HIGHMEM_PAGES)){ -+ if (fi->flags & (GET_FREE_PAGES|GET_ZONE_SIZES|GET_FREE_HIGHMEM_PAGES)) { - fi->retval = sum; - return; - } -@@ -4828,7 +5649,8 @@ - int order, errflag, do_search; - ulong offset, verbose, value, sum, found; - ulong this_addr; -- physaddr_t this_phys, searchphys; -+ physaddr_t phys, this_phys, searchphys; -+ ulong pp; - ulong zone_mem_map; - ulong zone_start_paddr; - ulong zone_start_pfn; -@@ -4886,7 +5708,6 @@ - node_zones = nt->pgdat + OFFSET(pglist_data_node_zones); - - for (i = 0; i < vt->nr_zones; i++) { -- - if (fi->flags == GET_FREE_PAGES) { - readmem(node_zones+ - OFFSET(zone_free_pages), -@@ -4899,7 +5720,7 @@ - } - - if (fi->flags == GET_FREE_HIGHMEM_PAGES) { -- if (i == ZONE_HIGHMEM) { -+ if (i == vt->ZONE_HIGHMEM) { - readmem(node_zones+ - OFFSET(zone_free_pages), - KVADDR, &value, sizeof(ulong), -@@ -4958,15 +5779,34 @@ - - fprintf(fp, "%6ld ", value); - -- readmem(node_zones+OFFSET(zone_zone_mem_map), -- KVADDR, &zone_mem_map, sizeof(ulong), -- "node_zones zone_mem_map", FAULT_ON_ERROR); -+ if (VALID_MEMBER(zone_zone_mem_map)) { -+ readmem(node_zones+OFFSET(zone_zone_mem_map), -+ KVADDR, &zone_mem_map, sizeof(ulong), -+ "node_zones zone_mem_map", FAULT_ON_ERROR); -+ } - - readmem(node_zones+ OFFSET(zone_zone_start_pfn), - KVADDR, &zone_start_pfn, sizeof(ulong), - "node_zones zone_start_pfn", FAULT_ON_ERROR); - zone_start_paddr = PTOB(zone_start_pfn); - -+ if (!VALID_MEMBER(zone_zone_mem_map)) { -+ if (IS_SPARSEMEM() || IS_DISCONTIGMEM()) { -+ zone_mem_map = 0; -+ if (size) { -+ phys = PTOB(zone_start_pfn); -+ if (phys_to_page(phys, &pp)) -+ zone_mem_map = pp; -+ } -+ } else if (vt->flags & FLATMEM) { -+ zone_mem_map = 0; -+ if (size) -+ zone_mem_map = nt->mem_map + -+ (zone_start_pfn * SIZE(page)); -+ } else -+ error(FATAL, "\ncannot determine zone mem_map: TBD\n"); -+ } -+ - if (zone_mem_map) - zone_start_mapnr = - (zone_mem_map - nt->mem_map) / -@@ -4997,7 +5837,7 @@ - - hq_close(); - -- if (fi->flags & (GET_FREE_PAGES|GET_ZONE_SIZES|GET_FREE_HIGHMEM_PAGES)){ -+ if (fi->flags & (GET_FREE_PAGES|GET_ZONE_SIZES|GET_FREE_HIGHMEM_PAGES)) { - fi->retval = sum; - return; - } -@@ -5313,6 +6153,8 @@ - ulong freehighmem_pages; - ulong totallowmem_pages; - ulong freelowmem_pages; -+ long nr_file_pages, nr_slab; -+ ulong swapper_space_nrpages; - ulong pct; - ulong value1, value2; - uint tmp; -@@ -5334,6 +6176,14 @@ - get_buffers = meminfo.get_buffers; - get_slabs = meminfo.get_slabs; - -+ /* -+ * If vm_stat array exists, override page search info. -+ */ -+ if (vm_stat_init()) { -+ if (dump_vm_stat("NR_SLAB", &nr_slab)) -+ get_slabs = nr_slab; -+ } -+ - fprintf(fp, kmeminfo_hdr); - /* - * Get total RAM based upon how the various versions of si_meminfo() -@@ -5409,12 +6259,26 @@ - } else - get_symbol_data("page_cache_size", sizeof(long), - &page_cache_size); -+ page_cache_size -= subtract_buffer_pages; - } else if (symbol_exists("nr_pagecache")) { - get_symbol_data("nr_pagecache", sizeof(int), &tmp); - page_cache_size = (long)tmp; -+ page_cache_size -= subtract_buffer_pages; -+ } else if (dump_vm_stat("NR_FILE_PAGES", &nr_file_pages)) { -+ char *swapper_space = GETBUF(SIZE(address_space)); -+ -+ if (!readmem(symbol_value("swapper_space"), KVADDR, swapper_space, -+ SIZE(address_space), "swapper_space", RETURN_ON_ERROR)) -+ swapper_space_nrpages = 0; -+ else -+ swapper_space_nrpages = ULONG(swapper_space + -+ OFFSET(address_space_nrpages)); -+ -+ page_cache_size = nr_file_pages - swapper_space_nrpages - -+ buffer_pages; -+ FREEBUF(swapper_space); - } - -- page_cache_size -= subtract_buffer_pages; - - pct = (page_cache_size * 100)/totalram_pages; - fprintf(fp, "%10s %7ld %11s %3ld%% of TOTAL MEM\n", -@@ -5519,17 +6383,18 @@ - ulong nrpages; - char *block_device_buf, *inode_buf, *address_space_buf; - -- block_device_buf = GETBUF(SIZE(block_device)); -- inode_buf = GETBUF(SIZE(inode)); -- address_space_buf = GETBUF(SIZE(address_space)); -- - ld = &list_data; - BZERO(ld, sizeof(struct list_data)); -- - get_symbol_data("all_bdevs", sizeof(void *), &ld->start); -+ if (empty_list(ld->start)) -+ return 0; - ld->end = symbol_value("all_bdevs"); - ld->list_head_offset = OFFSET(block_device_bd_list); - -+ block_device_buf = GETBUF(SIZE(block_device)); -+ inode_buf = GETBUF(SIZE(inode)); -+ address_space_buf = GETBUF(SIZE(address_space)); -+ - hq_open(); - bdevcnt = do_list(ld); - bdevlist = (ulong *)GETBUF(bdevcnt * sizeof(ulong)); -@@ -5575,21 +6440,24 @@ - char buf1[BUFSIZE]; - char buf2[BUFSIZE]; - ulong vmlist; -- ulong addr, size, next, pcheck; -+ ulong addr, size, next, pcheck, count; - physaddr_t paddr; - - get_symbol_data("vmlist", sizeof(void *), &vmlist); - next = vmlist; -+ count = 0; - - while (next) { -- if ((next == vmlist) && -- !(vi->flags & (GET_HIGHEST|GET_PHYS_TO_VMALLOC))) { -+ if (!(pc->curcmd_flags & HEADER_PRINTED) && (next == vmlist) && -+ !(vi->flags & (GET_HIGHEST|GET_PHYS_TO_VMALLOC| -+ GET_VMLIST_COUNT|GET_VMLIST))) { - fprintf(fp, "%s ", - mkstring(buf, MAX(strlen("VM_STRUCT"), VADDR_PRLEN), - CENTER|LJUST, "VM_STRUCT")); - fprintf(fp, "%s SIZE\n", - mkstring(buf, (VADDR_PRLEN * 2) + strlen(" - "), - CENTER|LJUST, "ADDRESS RANGE")); -+ pc->curcmd_flags |= HEADER_PRINTED; - } - - readmem(next+OFFSET(vm_struct_addr), KVADDR, -@@ -5599,6 +6467,20 @@ - &size, sizeof(ulong), - "vmlist size", FAULT_ON_ERROR); - -+ if (vi->flags & (GET_VMLIST_COUNT|GET_VMLIST)) { -+ /* -+ * Preceding GET_VMLIST_COUNT set vi->retval. -+ */ -+ if (vi->flags & GET_VMLIST) { -+ if (count < vi->retval) { -+ vi->vmlist[count].addr = addr; -+ vi->vmlist[count].size = size; -+ } -+ } -+ count++; -+ goto next_entry; -+ } -+ - if (!(vi->flags & ADDRESS_SPECIFIED) || - ((vi->memtype == KVADDR) && - ((vi->spec_addr >= addr) && (vi->spec_addr < (addr+size))))) -@@ -5639,7 +6521,7 @@ - } - - } -- -+next_entry: - readmem(next+OFFSET(vm_struct_next), - KVADDR, &next, sizeof(void *), - "vmlist next", FAULT_ON_ERROR); -@@ -5647,6 +6529,9 @@ - - if (vi->flags & GET_HIGHEST) - vi->retval = addr+size; -+ -+ if (vi->flags & GET_VMLIST_COUNT) -+ vi->retval = count; - } - - /* -@@ -6136,9 +7021,14 @@ - if (vt->flags & KMEM_CACHE_UNAVAIL) - return; - -+ if ((vt->flags & KMEM_CACHE_DELAY) && !(pc->flags & RUNTIME)) -+ return; -+ - if (DUMPFILE() && (vt->flags & KMEM_CACHE_INIT)) - return; - -+ please_wait("gathering kmem slab cache data"); -+ - if (!strlen(slab_hdr)) - sprintf(slab_hdr, - "SLAB%sMEMORY%sTOTAL ALLOCATED FREE\n", -@@ -6177,9 +7067,11 @@ - - if (!readmem(cache, KVADDR, cache_buf, SIZE(kmem_cache_s), - "kmem_cache_s buffer", RETURN_ON_ERROR)) { -+ FREEBUF(cache_buf); - vt->flags |= KMEM_CACHE_UNAVAIL; - error(INFO, -- "unable to initialize kmem slab cache subsystem\n\n"); -+ "%sunable to initialize kmem slab cache subsystem\n\n", -+ DUMPFILE() ? "\n" : ""); - return; - } - -@@ -6190,6 +7082,13 @@ - - if ((tmp = max_cpudata_limit(cache, &tmp2)) > max_limit) - max_limit = tmp; -+ /* -+ * Recognize and bail out on any max_cpudata_limit() failures. -+ */ -+ if (vt->flags & KMEM_CACHE_UNAVAIL) { -+ FREEBUF(cache_buf); -+ return; -+ } - - if (tmp2 > max_cpus) - max_cpus = tmp2; -@@ -6237,6 +7136,8 @@ - NULL, 0); - } - -+ please_wait_done(); -+ - vt->flags |= KMEM_CACHE_INIT; - } - -@@ -6250,25 +7151,32 @@ - ulong cpudata[NR_CPUS]; - int limit; - ulong max_limit; -- -+ ulong shared; -+ ulong *start_address; -+ -+ if (vt->flags & PERCPU_KMALLOC_V2_NODES) -+ goto kmem_cache_s_array_nodes; -+ - if (vt->flags & PERCPU_KMALLOC_V2) - goto kmem_cache_s_array; -+ -+ if (INVALID_MEMBER(kmem_cache_s_cpudata)) { -+ *cpus = 0; -+ return 0; -+ } - -- if (INVALID_MEMBER(kmem_cache_s_cpudata)) { -- *cpus = 0; -- return 0; -- } -- -- readmem(cache+OFFSET(kmem_cache_s_cpudata), -- KVADDR, &cpudata[0], -- sizeof(ulong) * ARRAY_LENGTH(kmem_cache_s_cpudata), -- "cpudata array", FAULT_ON_ERROR); -+ if (!readmem(cache+OFFSET(kmem_cache_s_cpudata), -+ KVADDR, &cpudata[0], -+ sizeof(ulong) * ARRAY_LENGTH(kmem_cache_s_cpudata), -+ "cpudata array", RETURN_ON_ERROR)) -+ goto bail_out; - - for (i = max_limit = 0; (i < ARRAY_LENGTH(kmem_cache_s_cpudata)) && - cpudata[i]; i++) { -- readmem(cpudata[i]+OFFSET(cpucache_s_limit), -- KVADDR, &limit, sizeof(int), -- "cpucache limit", FAULT_ON_ERROR); -+ if (!readmem(cpudata[i]+OFFSET(cpucache_s_limit), -+ KVADDR, &limit, sizeof(int), -+ "cpucache limit", RETURN_ON_ERROR)) -+ goto bail_out; - if (limit > max_limit) - max_limit = limit; - } -@@ -6279,22 +7187,89 @@ - - kmem_cache_s_array: - -- readmem(cache+OFFSET(kmem_cache_s_array), -- KVADDR, &cpudata[0], -- sizeof(ulong) * ARRAY_LENGTH(kmem_cache_s_array), -- "array cache array", FAULT_ON_ERROR); -+ if (!readmem(cache+OFFSET(kmem_cache_s_array), -+ KVADDR, &cpudata[0], -+ sizeof(ulong) * ARRAY_LENGTH(kmem_cache_s_array), -+ "array cache array", RETURN_ON_ERROR)) -+ goto bail_out; -+ -+ for (i = max_limit = 0; (i < ARRAY_LENGTH(kmem_cache_s_array)) && -+ cpudata[i]; i++) { -+ if (!readmem(cpudata[i]+OFFSET(array_cache_limit), -+ KVADDR, &limit, sizeof(int), -+ "array cache limit", RETURN_ON_ERROR)) -+ goto bail_out; -+ if (limit > max_limit) -+ max_limit = limit; -+ } -+ -+ /* -+ * If the shared list can be accessed, check its size as well. -+ */ -+ if (VALID_MEMBER(kmem_list3_shared) && -+ VALID_MEMBER(kmem_cache_s_lists) && -+ readmem(cache+OFFSET(kmem_cache_s_lists)+OFFSET(kmem_list3_shared), -+ KVADDR, &shared, sizeof(void *), "kmem_list3 shared", -+ RETURN_ON_ERROR|QUIET) && -+ readmem(shared+OFFSET(array_cache_limit), -+ KVADDR, &limit, sizeof(int), "shared array_cache limit", -+ RETURN_ON_ERROR|QUIET)) { -+ if (limit > max_limit) -+ max_limit = limit; -+ } -+ -+ *cpus = i; -+ return max_limit; -+ -+kmem_cache_s_array_nodes: -+ -+ if (!readmem(cache+OFFSET(kmem_cache_s_array), -+ KVADDR, &cpudata[0], -+ sizeof(ulong) * ARRAY_LENGTH(kmem_cache_s_array), -+ "array cache array", RETURN_ON_ERROR)) -+ goto bail_out; - - for (i = max_limit = 0; (i < ARRAY_LENGTH(kmem_cache_s_array)) && - cpudata[i]; i++) { -- readmem(cpudata[i]+OFFSET(array_cache_limit), -- KVADDR, &limit, sizeof(int), -- "array cache limit", FAULT_ON_ERROR); -+ if (!readmem(cpudata[i]+OFFSET(array_cache_limit), -+ KVADDR, &limit, sizeof(int), -+ "array cache limit", RETURN_ON_ERROR)) -+ goto bail_out; - if (limit > max_limit) - max_limit = limit; - } - - *cpus = i; -+ -+ /* -+ * Check the shared list of all the nodes. -+ */ -+ start_address = (ulong *)GETBUF(sizeof(ulong) * vt->kmem_cache_len_nodes); -+ -+ if (VALID_MEMBER(kmem_list3_shared) && VALID_MEMBER(kmem_cache_s_lists) && -+ readmem(cache+OFFSET(kmem_cache_s_lists), KVADDR, &start_address[0], -+ sizeof(ulong) * vt->kmem_cache_len_nodes, "array nodelist array", -+ RETURN_ON_ERROR)) { -+ for (i = 0; i < vt->kmem_cache_len_nodes && start_address[i]; i++) { -+ if (readmem(start_address[i] + OFFSET(kmem_list3_shared), -+ KVADDR, &shared, sizeof(void *), -+ "kmem_list3 shared", RETURN_ON_ERROR|QUIET) && -+ readmem(shared + OFFSET(array_cache_limit), -+ KVADDR, &limit, sizeof(int), "shared array_cache limit", -+ RETURN_ON_ERROR|QUIET)) { -+ if (limit > max_limit) -+ max_limit = limit; -+ } -+ } -+ } -+ FREEBUF(start_address); - return max_limit; -+ -+bail_out: -+ vt->flags |= KMEM_CACHE_UNAVAIL; -+ error(INFO, "unable to initialize kmem slab cache subsystem\n\n"); -+ *cpus = 0; -+ return 0; - } - - /* -@@ -6353,6 +7328,7 @@ - #define KMEM_OBJECT_ADDR_INUSE (4) - #define KMEM_OBJECT_ADDR_CACHED (5) - #define KMEM_ON_SLAB (6) -+#define KMEM_OBJECT_ADDR_SHARED (7) - - #define DUMP_KMEM_CACHE_INFO_V1() \ - { \ -@@ -6408,7 +7384,7 @@ - { \ - char b1[BUFSIZE], b2[BUFSIZE]; \ - ulong allocated, freeobjs; \ -- if (vt->flags & PERCPU_KMALLOC_V1) { \ -+ if (vt->flags & (PERCPU_KMALLOC_V1|PERCPU_KMALLOC_V2)) { \ - allocated = si->s_inuse - si->cpucached_slab; \ - freeobjs = si->c_num - allocated - si->cpucached_slab; \ - } else { \ -@@ -6419,8 +7395,8 @@ - mkstring(b1, VADDR_PRLEN, LJUST|LONG_HEX, MKSTR(si->slab)), \ - mkstring(b2, VADDR_PRLEN, LJUST|LONG_HEX, MKSTR(si->s_mem)), \ - si->c_num, allocated, \ -- vt->flags & PERCPU_KMALLOC_V1 ? freeobjs + si->cpucached_slab :\ -- freeobjs); \ -+ vt->flags & (PERCPU_KMALLOC_V1|PERCPU_KMALLOC_V2) ? \ -+ freeobjs + si->cpucached_slab : freeobjs); \ - } - - static void -@@ -6857,6 +7833,13 @@ - for (i = 0; i < vt->kmem_max_cpus; i++) - si->cpudata[i] = (ulong *) - GETBUF(vt->kmem_max_limit * sizeof(ulong)); -+ if(vt->flags & PERCPU_KMALLOC_V2_NODES) -+ si->shared_array_cache = (ulong *) -+ GETBUF(vt->kmem_cache_len_nodes * -+ (vt->kmem_max_limit+1) * sizeof(ulong)); -+ else -+ si->shared_array_cache = (ulong *) -+ GETBUF((vt->kmem_max_limit+1) * sizeof(ulong)); - - cnt = 0; - -@@ -6939,7 +7922,10 @@ - "kmem_cache_s num", FAULT_ON_ERROR); - si->c_num = (ulong)tmp_val; - -- do_slab_chain_percpu_v2(SLAB_GET_COUNTS, si); -+ if( vt->flags & PERCPU_KMALLOC_V2_NODES ) -+ do_slab_chain_percpu_v2_nodes(SLAB_GET_COUNTS, si); -+ else -+ do_slab_chain_percpu_v2(SLAB_GET_COUNTS, si); - - if (!(si->flags & (ADDRESS_SPECIFIED|GET_SLAB_PAGES))) { - DUMP_KMEM_CACHE_INFO_V2(); -@@ -6953,12 +7939,16 @@ - - if (si->flags & (VERBOSE|ADDRESS_SPECIFIED)) { - -- gather_cpudata_list_v2(si); -+ if (!(vt->flags & PERCPU_KMALLOC_V2_NODES)) -+ gather_cpudata_list_v2(si); - - si->slab = (si->flags & ADDRESS_SPECIFIED) ? - vaddr_to_slab(si->spec_addr) : 0; - -- do_slab_chain_percpu_v2(SLAB_WALKTHROUGH, si); -+ if (vt->flags & PERCPU_KMALLOC_V2_NODES) -+ do_slab_chain_percpu_v2_nodes(SLAB_WALKTHROUGH, si); -+ else -+ do_slab_chain_percpu_v2(SLAB_WALKTHROUGH, si); - - if (si->found) { - fprintf(fp, kmem_cache_hdr); -@@ -7005,7 +7995,14 @@ - " %lx (cpu %d cache)\n", - (ulong)si->spec_addr, si->cpu); - break; -- } -+ -+ case KMEM_OBJECT_ADDR_SHARED: -+ fprintf(fp, free_inuse_hdr); -+ fprintf(fp, -+ " %lx (shared cache)\n", -+ (ulong)si->spec_addr); -+ break; -+ } - - break; - } -@@ -7033,6 +8030,7 @@ - FREEBUF(si->kmem_bufctl); - for (i = 0; i < vt->kmem_max_cpus; i++) - FREEBUF(si->cpudata[i]); -+ FREEBUF(si->shared_array_cache); - - } - -@@ -7613,29 +8611,254 @@ - if (received_SIGINT()) - restart(0); - -- if (!verify_slab_v2(si, last, s)) { -- list_borked = 1; -- continue; -- } -- last = si->slab - OFFSET(slab_list); -+ if (!verify_slab_v2(si, last, s)) { -+ list_borked = 1; -+ continue; -+ } -+ last = si->slab - OFFSET(slab_list); -+ -+ dump_slab_percpu_v2(si); -+ -+ if (si->found) { -+ return; -+ } -+ -+ readmem(si->slab+OFFSET(slab_list), -+ KVADDR, &si->slab, sizeof(ulong), -+ "slab list", FAULT_ON_ERROR); -+ -+ si->slab -= OFFSET(slab_list); -+ -+ } while (si->slab != slab_chains[s] && !list_borked); -+ } -+ -+ break; -+ } -+} -+ -+ -+/* -+* Added To Traverse the Nodelists -+*/ -+ -+static void -+do_slab_chain_percpu_v2_nodes(long cmd, struct meminfo *si) -+{ -+ int i, tmp, s; -+ int list_borked; -+ char *slab_buf; -+ ulong specified_slab; -+ ulong last; -+ ulong slab_chains[SLAB_CHAINS]; -+ ulong *start_address; -+ int index; -+ -+ list_borked = 0; -+ si->slabsize = (power(2, si->order) * PAGESIZE()); -+ si->cpucached_slab = 0; -+ start_address = (ulong *)GETBUF(sizeof(ulong) * vt->kmem_cache_len_nodes); -+ -+ if (!readmem(si->cache+OFFSET(kmem_cache_s_lists), KVADDR, -+ &start_address[0], sizeof(ulong) * vt->kmem_cache_len_nodes, -+ "array nodelist array", RETURN_ON_ERROR)) -+ error(INFO, "cannot read kmem_cache nodelists array"); -+ -+ switch (cmd) -+ { -+ case SLAB_GET_COUNTS: -+ si->flags |= SLAB_GET_COUNTS; -+ si->flags &= ~SLAB_WALKTHROUGH; -+ si->cpucached_cache = 0; -+ si->num_slabs = si->inuse = 0; -+ slab_buf = GETBUF(SIZE(slab)); -+ for (index=0; (index < vt->kmem_cache_len_nodes) && start_address[index]; index++) -+ { -+ slab_chains[0] = start_address[index] + OFFSET(kmem_list3_slabs_partial); -+ slab_chains[1] = start_address[index] + OFFSET(kmem_list3_slabs_full); -+ slab_chains[2] = start_address[index] + OFFSET(kmem_list3_slabs_free); -+ -+ gather_cpudata_list_v2_nodes(si, index); -+ -+ if (CRASHDEBUG(1)) { -+ fprintf(fp, "[ %s: %lx ", si->curname, si->cache); -+ fprintf(fp, "partial: %lx full: %lx free: %lx ]\n", -+ slab_chains[0], slab_chains[1], slab_chains[2]); -+ } -+ -+ for (s = 0; s < SLAB_CHAINS; s++) { -+ if (!slab_chains[s]) -+ continue; -+ -+ if (!readmem(slab_chains[s], -+ KVADDR, &si->slab, sizeof(ulong), -+ "first slab", QUIET|RETURN_ON_ERROR)) { -+ error(INFO, -+ "%s: %s list: bad slab pointer: %lx\n", -+ si->curname, -+ slab_chain_name_v2[s], -+ slab_chains[s]); -+ list_borked = 1; -+ continue; -+ } -+ -+ if (slab_data_saved(si)) { -+ FREEBUF(slab_buf); -+ FREEBUF(start_address); -+ return; -+ } -+ -+ if (si->slab == slab_chains[s]) -+ continue; -+ -+ last = slab_chains[s]; -+ -+ do { -+ if (received_SIGINT()) { -+ FREEBUF(slab_buf); -+ FREEBUF(start_address); -+ restart(0); -+ } -+ -+ if (!verify_slab_v2(si, last, s)) { -+ list_borked = 1; -+ continue; -+ } -+ last = si->slab - OFFSET(slab_list); -+ -+ readmem(si->slab, KVADDR, slab_buf, -+ SIZE(slab), "slab buffer", -+ FAULT_ON_ERROR); -+ -+ tmp = INT(slab_buf + OFFSET(slab_inuse)); -+ si->inuse += tmp; -+ -+ if (ACTIVE()) -+ gather_cpudata_list_v2_nodes(si, index); -+ -+ si->s_mem = ULONG(slab_buf + -+ OFFSET(slab_s_mem)); -+ gather_slab_cached_count(si); -+ -+ si->num_slabs++; -+ -+ si->slab = ULONG(slab_buf + -+ OFFSET(slab_list)); -+ si->slab -= OFFSET(slab_list); -+ -+ /* -+ * Check for slab transition. (Tony Dziedzic) -+ */ -+ for (i = 0; i < SLAB_CHAINS; i++) { -+ if ((i != s) && -+ (si->slab == slab_chains[i])) { -+ error(NOTE, -+ "%s: slab chain inconsistency: %s list\n", -+ si->curname, -+ slab_chain_name_v2[s]); -+ list_borked = 1; -+ } -+ } -+ -+ } while (si->slab != slab_chains[s] && !list_borked); -+ } -+ } -+ -+ if (!list_borked) -+ save_slab_data(si); -+ break; -+ -+ case SLAB_WALKTHROUGH: -+ specified_slab = si->slab; -+ si->flags |= SLAB_WALKTHROUGH; -+ si->flags &= ~SLAB_GET_COUNTS; -+ slab_buf = GETBUF(SIZE(slab)); -+ for (index=0; (index < vt->kmem_cache_len_nodes) && start_address[index]; index++) -+ { -+ slab_chains[0] = start_address[index] + OFFSET(kmem_list3_slabs_partial); -+ slab_chains[1] = start_address[index] + OFFSET(kmem_list3_slabs_full); -+ slab_chains[2] = start_address[index] + OFFSET(kmem_list3_slabs_free); -+ -+ gather_cpudata_list_v2_nodes(si, index); -+ -+ if (CRASHDEBUG(1)) { -+ fprintf(fp, "[ %s: %lx ", si->curname, si->cache); -+ fprintf(fp, "partial: %lx full: %lx free: %lx ]\n", -+ slab_chains[0], slab_chains[1], slab_chains[2]); -+ } - -- dump_slab_percpu_v2(si); -+ for (s = 0; s < SLAB_CHAINS; s++) { -+ if (!slab_chains[s]) -+ continue; -+ -+ if (!specified_slab) { -+ if (!readmem(slab_chains[s], -+ KVADDR, &si->slab, sizeof(ulong), -+ "slabs", QUIET|RETURN_ON_ERROR)) { -+ error(INFO, -+ "%s: %s list: bad slab pointer: %lx\n", -+ si->curname, -+ slab_chain_name_v2[s], -+ slab_chains[s]); -+ list_borked = 1; -+ continue; -+ } -+ last = slab_chains[s]; -+ } else -+ last = 0; -+ -+ if (si->slab == slab_chains[s]) -+ continue; -+ -+ readmem(si->slab, KVADDR, slab_buf, -+ SIZE(slab), "slab buffer", -+ FAULT_ON_ERROR); - -- if (si->found) { -- return; -+ si->s_mem = ULONG(slab_buf + -+ OFFSET(slab_s_mem)); -+ -+ if (CRASHDEBUG(1)) { -+ fprintf(fp, "search cache: [%s] ", si->curname); -+ if (si->flags & ADDRESS_SPECIFIED) -+ fprintf(fp, "for %llx", si->spec_addr); -+ fprintf(fp, "\n"); - } -+ -+ do { -+ if (received_SIGINT()) -+ { -+ FREEBUF(start_address); -+ FREEBUF(slab_buf); -+ restart(0); -+ } -+ -+ if (!verify_slab_v2(si, last, s)) { -+ list_borked = 1; -+ continue; -+ } -+ last = si->slab - OFFSET(slab_list); -+ -+ dump_slab_percpu_v2(si); -+ -+ if (si->found) { -+ FREEBUF(start_address); -+ FREEBUF(slab_buf); -+ return; -+ } - -- readmem(si->slab+OFFSET(slab_list), -- KVADDR, &si->slab, sizeof(ulong), -- "slab list", FAULT_ON_ERROR); -- -- si->slab -= OFFSET(slab_list); -+ readmem(si->slab+OFFSET(slab_list), -+ KVADDR, &si->slab, sizeof(ulong), -+ "slab list", FAULT_ON_ERROR); -+ -+ si->slab -= OFFSET(slab_list); - -- } while (si->slab != slab_chains[s] && !list_borked); -+ } while (si->slab != slab_chains[s] && !list_borked); -+ } - } - - break; - } -+ FREEBUF(slab_buf); -+ FREEBUF(start_address); - } - - /* -@@ -7750,6 +8973,11 @@ - { - int i; - -+ if (si->flags & SLAB_DATA_NOSAVE) { -+ si->flags &= ~SLAB_DATA_NOSAVE; -+ return; -+ } -+ - if (ACTIVE()) - return; - -@@ -7840,7 +9068,7 @@ - - if (si->flags & ADDRESS_SPECIFIED) { - if (INSLAB(si->slab, si) && (si->spec_addr >= si->slab) && -- (si->spec_addr < (si->slab+SIZE(kmem_slab_s)))){ -+ (si->spec_addr < (si->slab+SIZE(kmem_slab_s)))) { - si->found = KMEM_SLAB_ADDR; - return; - } -@@ -8213,7 +9441,7 @@ - */ - - if (si->c_flags & SLAB_CFLGS_BUFCTL) { -- for (i = 0, next = si->s_index; i < si->c_num; i++, next++){ -+ for (i = 0, next = si->s_index; i < si->c_num; i++, next++) { - obj = si->s_mem + - ((next - si->s_index) * si->c_offset); - DUMP_SLAB_OBJECT(); -@@ -8263,7 +9491,7 @@ - dump_slab_objects_percpu(struct meminfo *si) - { - int i, j; -- int on_free_list, on_cpudata_list; -+ int on_free_list, on_cpudata_list, on_shared_list; - ulong cnt, expected; - ulong obj; - -@@ -8285,6 +9513,7 @@ - for (i = 0, obj = si->s_mem; i < si->c_num; i++, obj += si->size) { - on_free_list = FALSE; - on_cpudata_list = FALSE; -+ on_shared_list = FALSE; - - for (j = 0; j < si->c_num; j++) { - if (obj == si->addrlist[j]) { -@@ -8294,13 +9523,26 @@ - } - - on_cpudata_list = check_cpudata_list(si, obj); -+ on_shared_list = check_shared_list(si, obj); - - if (on_free_list && on_cpudata_list) { - error(INFO, -- "\"%s\" cache: object %lx on both free and cpudata lists\n", -+ "\"%s\" cache: object %lx on both free and cpu %d lists\n", -+ si->curname, si->cpu, obj); -+ si->errors++; -+ } -+ if (on_free_list && on_shared_list) { -+ error(INFO, -+ "\"%s\" cache: object %lx on both free and shared lists\n", - si->curname, obj); - si->errors++; - } -+ if (on_cpudata_list && on_shared_list) { -+ error(INFO, -+ "\"%s\" cache: object %lx on both cpu %d and shared lists\n", -+ si->curname, obj, si->cpu); -+ si->errors++; -+ } - - if (on_free_list) { - if (!(si->flags & ADDRESS_SPECIFIED)) -@@ -8324,6 +9566,17 @@ - return; - } - } -+ } else if (on_shared_list) { -+ if (!(si->flags & ADDRESS_SPECIFIED)) -+ fprintf(fp, " %lx (shared cache)\n", obj); -+ cnt++; -+ if (si->flags & ADDRESS_SPECIFIED) { -+ if (INOBJECT(si->spec_addr, obj)) { -+ si->found = -+ KMEM_OBJECT_ADDR_SHARED; -+ return; -+ } -+ } - } else { - if (!(si->flags & ADDRESS_SPECIFIED)) - fprintf(fp, " [%lx]\n", obj); -@@ -8349,7 +9602,10 @@ - /* - * Determine how many of the "inuse" slab objects are actually cached - * in the kmem_cache_s header. Set the per-slab count and update the -- * cumulative per-cache count. -+ * cumulative per-cache count. With the addition of the shared list -+ * check, the terms "cpucached_cache" and "cpucached_slab" are somewhat -+ * misleading. But they both are types of objects that are cached -+ * in the kmem_cache_s header, just not necessarily per-cpu. - */ - - static void -@@ -8357,16 +9613,35 @@ - { - int i; - ulong obj; -+ int in_cpudata, in_shared; - - si->cpucached_slab = 0; - - for (i = 0, obj = si->s_mem; i < si->c_num; i++, obj += si->size) { -+ in_cpudata = in_shared = 0; - if (check_cpudata_list(si, obj)) { -+ in_cpudata = TRUE; - si->cpucached_slab++; - if (si->flags & SLAB_GET_COUNTS) { - si->cpucached_cache++; - } - } -+ if (check_shared_list(si, obj)) { -+ in_shared = TRUE; -+ if (!in_cpudata) { -+ si->cpucached_slab++; -+ if (si->flags & SLAB_GET_COUNTS) { -+ si->cpucached_cache++; -+ } -+ } -+ } -+ if (in_cpudata && in_shared) { -+ si->flags |= SLAB_DATA_NOSAVE; -+ if (!(si->flags & VERBOSE)) -+ error(INFO, -+ "\"%s\" cache: object %lx on both cpu %d and shared lists\n", -+ si->curname, obj, si->cpu); -+ } - } - } - -@@ -8423,7 +9698,8 @@ - } - - /* -- * Updated for 2.6 slab percpu data structure. -+ * Updated for 2.6 slab percpu data structure, this also gathers -+ * the shared array_cache list as well. - */ - static void - gather_cpudata_list_v2(struct meminfo *si) -@@ -8431,6 +9707,7 @@ - int i, j; - int avail; - ulong cpudata[NR_CPUS]; -+ ulong shared; - - readmem(si->cache+OFFSET(kmem_cache_s_array), - KVADDR, &cpudata[0], -@@ -8466,8 +9743,152 @@ - - if (CRASHDEBUG(2)) - for (j = 0; j < avail; j++) -- fprintf(fp, " %lx\n", si->cpudata[i][j]); -+ fprintf(fp, " %lx (cpu %d)\n", si->cpudata[i][j], i); -+ } -+ -+ /* -+ * If the shared list contains anything, gather them as well. -+ */ -+ BZERO(si->shared_array_cache, sizeof(ulong) * vt->kmem_max_limit); -+ -+ if (!VALID_MEMBER(kmem_list3_shared) || -+ !VALID_MEMBER(kmem_cache_s_lists) || -+ !readmem(si->cache+OFFSET(kmem_cache_s_lists)+ -+ OFFSET(kmem_list3_shared), KVADDR, &shared, sizeof(void *), -+ "kmem_list3 shared", RETURN_ON_ERROR|QUIET) || -+ !readmem(shared+OFFSET(array_cache_avail), -+ KVADDR, &avail, sizeof(int), "shared array_cache avail", -+ RETURN_ON_ERROR|QUIET) || !avail) -+ return; -+ -+ if (avail > vt->kmem_max_limit) { -+ error(INFO, -+ "\"%s\" cache: shared array_cache.avail %d greater than limit %ld\n", -+ si->curname, avail, vt->kmem_max_limit); -+ si->errors++; -+ return; -+ } -+ -+ if (CRASHDEBUG(2)) -+ fprintf(fp, "%s: shared avail: %d\n", -+ si->curname, avail); -+ -+ readmem(shared+SIZE(array_cache), KVADDR, si->shared_array_cache, -+ sizeof(void *) * avail, "shared array_cache avail", -+ FAULT_ON_ERROR); -+ -+ if (CRASHDEBUG(2)) -+ for (j = 0; j < avail; j++) -+ fprintf(fp, " %lx (shared list)\n", si->shared_array_cache[j]); -+} -+ -+ -+ -+/* -+ * Updated gather_cpudata_list_v2 for per-node kmem_list3's in kmem_cache -+ */ -+static void -+gather_cpudata_list_v2_nodes(struct meminfo *si, int index) -+{ -+ int i, j; -+ int avail; -+ ulong cpudata[NR_CPUS]; -+ ulong shared; -+ ulong *start_address; -+ -+ start_address = (ulong *) GETBUF(sizeof(ulong) * vt->kmem_cache_len_nodes); -+ readmem(si->cache+OFFSET(kmem_cache_s_array), -+ KVADDR, &cpudata[0], -+ sizeof(ulong) * ARRAY_LENGTH(kmem_cache_s_array), -+ "array_cache array", FAULT_ON_ERROR); -+ -+ for (i = 0; (i < ARRAY_LENGTH(kmem_cache_s_array)) && -+ (cpudata[i]) && !(index); i++) { -+ BZERO(si->cpudata[i], sizeof(ulong) * vt->kmem_max_limit); -+ -+ readmem(cpudata[i]+OFFSET(array_cache_avail), -+ KVADDR, &avail, sizeof(int), -+ "array cache avail", FAULT_ON_ERROR); -+ -+ if (!avail) -+ continue; -+ -+ if (avail > vt->kmem_max_limit) { -+ error(INFO, -+ "\"%s\" cache: array_cache.avail %d greater than limit %ld\n", -+ si->curname, avail, vt->kmem_max_limit); -+ si->errors++; -+ } -+ -+ if (CRASHDEBUG(2)) -+ fprintf(fp, "%s: cpu[%d] avail: %d\n", -+ si->curname, i, avail); -+ -+ readmem(cpudata[i]+SIZE(array_cache), -+ KVADDR, si->cpudata[i], -+ sizeof(void *) * avail, -+ "array_cache avail", FAULT_ON_ERROR); -+ -+ if (CRASHDEBUG(2)) -+ for (j = 0; j < avail; j++) -+ fprintf(fp, " %lx (cpu %d)\n", si->cpudata[i][j], i); - } -+ -+ /* -+ * If the shared list contains anything, gather them as well. -+ */ -+ if (!index) { -+ BZERO(si->shared_array_cache, sizeof(ulong) * -+ vt->kmem_max_limit * vt->kmem_cache_len_nodes); -+ si->current_cache_index = 0; -+ } -+ -+ if (!readmem(si->cache+OFFSET(kmem_cache_s_lists), KVADDR, &start_address[0], -+ sizeof(ulong) * vt->kmem_cache_len_nodes , "array nodelist array", -+ RETURN_ON_ERROR) || -+ !readmem(start_address[index] + OFFSET(kmem_list3_shared), KVADDR, &shared, -+ sizeof(void *), "kmem_list3 shared", RETURN_ON_ERROR|QUIET) || -+ !readmem(shared + OFFSET(array_cache_avail), KVADDR, &avail, sizeof(int), -+ "shared array_cache avail", RETURN_ON_ERROR|QUIET) || !avail) { -+ FREEBUF(start_address); -+ return; -+ } -+ -+ if (avail > vt->kmem_max_limit) { -+ error(INFO, -+ "\"%s\" cache: shared array_cache.avail %d greater than limit %ld\n", -+ si->curname, avail, vt->kmem_max_limit); -+ si->errors++; -+ FREEBUF(start_address); -+ return; -+ } -+ -+ if (CRASHDEBUG(2)) -+ fprintf(fp, "%s: shared avail: %d\n", -+ si->curname, avail); -+ -+ readmem(shared+SIZE(array_cache), KVADDR, si->shared_array_cache + si->current_cache_index, -+ sizeof(void *) * avail, "shared array_cache avail", -+ FAULT_ON_ERROR); -+ -+ if ((si->current_cache_index + avail) > -+ (vt->kmem_max_limit * vt->kmem_cache_len_nodes)) { -+ error(INFO, -+ "\"%s\" cache: total shared array_cache.avail %d greater than total limit %ld\n", -+ si->curname, -+ si->current_cache_index + avail, -+ vt->kmem_max_limit * vt->kmem_cache_len_nodes); -+ si->errors++; -+ FREEBUF(start_address); -+ return; -+ } -+ -+ if (CRASHDEBUG(2)) -+ for (j = si->current_cache_index; j < (si->current_cache_index + avail); j++) -+ fprintf(fp, " %lx (shared list)\n", si->shared_array_cache[j]); -+ -+ si->current_cache_index += avail; -+ FREEBUF(start_address); - } - - /* -@@ -8491,6 +9912,27 @@ - return FALSE; - } - -+/* -+ * Check whether a given address is contained in the previously-gathered -+ * shared object cache. -+ */ -+ -+static int -+check_shared_list(struct meminfo *si, ulong obj) -+{ -+ int i; -+ -+ if (INVALID_MEMBER(kmem_list3_shared) || -+ !si->shared_array_cache) -+ return FALSE; -+ -+ for (i = 0; si->shared_array_cache[i]; i++) { -+ if (si->shared_array_cache[i] == obj) -+ return TRUE; -+ } -+ -+ return FALSE; -+} - - /* - * Search the various memory subsystems for instances of this address. -@@ -8624,6 +10066,33 @@ - ulong ppstart, ppend; - struct node_table *nt; - ulong pgnum, node_size; -+ ulong nr, sec_addr; -+ ulong nr_mem_sections; -+ ulong coded_mem_map, mem_map, end_mem_map; -+ physaddr_t section_paddr; -+ -+ if (IS_SPARSEMEM()) { -+ nr_mem_sections = NR_MEM_SECTIONS(); -+ for (nr = 0; nr <= nr_mem_sections ; nr++) { -+ if ((sec_addr = valid_section_nr(nr))) { -+ coded_mem_map = section_mem_map_addr(sec_addr); -+ mem_map = sparse_decode_mem_map(coded_mem_map, nr); -+ end_mem_map = mem_map + (PAGES_PER_SECTION() * SIZE(page)); -+ -+ if ((addr >= mem_map) && (addr < end_mem_map)) { -+ if ((addr - mem_map) % SIZE(page)) -+ return FALSE; -+ if (phys) { -+ section_paddr = PTOB(section_nr_to_pfn(nr)); -+ pgnum = (addr - mem_map) / SIZE(page); -+ *phys = section_paddr + (pgnum * PAGESIZE()); -+ } -+ return TRUE; -+ } -+ } -+ } -+ return FALSE; -+ } - - for (n = 0; n < vt->numnodes; n++) { - nt = &vt->node_table[n]; -@@ -8690,6 +10159,16 @@ - physaddr_t pstart, pend; - ulong node_size; - -+ if (IS_SPARSEMEM()) { -+ ulong map; -+ map = pfn_to_map(phys >> PAGESHIFT()); -+ if (map) { -+ *pp = map; -+ return TRUE; -+ } -+ return FALSE; -+ } -+ - for (n = 0; n < vt->numnodes; n++) { - nt = &vt->node_table[n]; - if ((vt->flags & V_MEM_MAP) && (vt->numnodes == 1)) -@@ -8775,12 +10254,15 @@ - int i; - struct node_table *nt; - int others; -+ ulong *up; - - others = 0; - fprintf(fp, " flags: %lx %s(", - vt->flags, count_bits_long(vt->flags) > 4 ? "\n " : ""); - if (vt->flags & NODES) - fprintf(fp, "%sNODES", others++ ? "|" : ""); -+ if (vt->flags & NODES_ONLINE) -+ fprintf(fp, "%sNODES_ONLINE", others++ ? "|" : ""); - if (vt->flags & ZONES) - fprintf(fp, "%sZONES", others++ ? "|" : ""); - if (vt->flags & PERCPU_KMALLOC_V1) -@@ -8797,6 +10279,21 @@ - fprintf(fp, "%sKMEM_CACHE_UNAVAIL", others++ ? "|" : ""); - if (vt->flags & DISCONTIGMEM) - fprintf(fp, "%sDISCONTIGMEM", others++ ? "|" : ""); -+ if (vt->flags & FLATMEM) -+ fprintf(fp, "%sFLATMEM", others++ ? "|" : ""); -+ if (vt->flags & SPARSEMEM) -+ fprintf(fp, "%sSPARSEMEM", others++ ? "|" : "");\ -+ if (vt->flags & SPARSEMEM_EX) -+ fprintf(fp, "%sSPARSEMEM_EX", others++ ? "|" : "");\ -+ if (vt->flags & KMEM_CACHE_DELAY) -+ fprintf(fp, "%sKMEM_CACHE_DELAY", others++ ? "|" : "");\ -+ if (vt->flags & PERCPU_KMALLOC_V2_NODES) -+ fprintf(fp, "%sPERCPU_KMALLOC_V2_NODES", others++ ? "|" : "");\ -+ if (vt->flags & VM_STAT) -+ fprintf(fp, "%sVM_STAT", others++ ? "|" : "");\ -+ if (vt->flags & KMALLOC_SLUB) -+ fprintf(fp, "%sKMALLOC_SLUB", others++ ? "|" : "");\ -+ - fprintf(fp, ")\n"); - if (vt->kernel_pgd[0] == vt->kernel_pgd[1]) - fprintf(fp, " kernel_pgd[NR_CPUS]: %lx ...\n", -@@ -8825,6 +10322,7 @@ - fprintf(fp, " kmem_max_cpus: %ld\n", vt->kmem_max_cpus); - fprintf(fp, " kmem_cache_count: %ld\n", vt->kmem_cache_count); - fprintf(fp, " kmem_cache_namelen: %d\n", vt->kmem_cache_namelen); -+ fprintf(fp, "kmem_cache_nodelist_len: %ld\n", vt->kmem_cache_len_nodes); - fprintf(fp, " PG_reserved: %lx\n", vt->PG_reserved); - fprintf(fp, " PG_slab: %ld\n", vt->PG_slab); - fprintf(fp, " paddr_prlen: %d\n", vt->paddr_prlen); -@@ -8834,12 +10332,13 @@ - for (i = 0; i < vt->numnodes; i++) { - nt = &vt->node_table[i]; - fprintf(fp, " node_table[%d]: \n", i); -- fprintf(fp, " id: %d\n", nt->node_id); -- fprintf(fp, " pgdat: %lx\n", nt->pgdat); -- fprintf(fp, " size: %ld\n", nt->size); -- fprintf(fp, " mem_map: %lx\n", nt->mem_map); -- fprintf(fp, " start_paddr: %llx\n", nt->start_paddr); -- fprintf(fp, " start_mapnr: %ld\n", nt->start_mapnr); -+ fprintf(fp, " id: %d\n", nt->node_id); -+ fprintf(fp, " pgdat: %lx\n", nt->pgdat); -+ fprintf(fp, " size: %ld\n", nt->size); -+ fprintf(fp, " present: %ld\n", nt->present); -+ fprintf(fp, " mem_map: %lx\n", nt->mem_map); -+ fprintf(fp, " start_paddr: %llx\n", nt->start_paddr); -+ fprintf(fp, " start_mapnr: %ld\n", nt->start_mapnr); - } - - fprintf(fp, " dump_free_pages: "); -@@ -8869,6 +10368,25 @@ - fprintf(fp, " nr_swapfiles: %d\n", vt->nr_swapfiles); - fprintf(fp, " last_swap_read: %lx\n", vt->last_swap_read); - fprintf(fp, " swap_info_struct: %lx\n", (ulong)vt->swap_info_struct); -+ fprintf(fp, " mem_sec: %lx\n", (ulong)vt->mem_sec); -+ fprintf(fp, " ZONE_HIGHMEM: %d\n", vt->ZONE_HIGHMEM); -+ fprintf(fp, "node_online_map_len: %d\n", vt->node_online_map_len); -+ if (vt->node_online_map_len) { -+ fprintf(fp, " node_online_map: "); -+ up = (ulong *)vt->node_online_map; -+ for (i = 0; i < vt->node_online_map_len; i++) { -+ fprintf(fp, "%s%lx", i ? ", " : "[", *up); -+ up++; -+ } -+ fprintf(fp, "]\n"); -+ } else { -+ fprintf(fp, " node_online_map: (unused)\n"); -+ } -+ fprintf(fp, " nr_vm_stat_items: %d\n", vt->nr_vm_stat_items); -+ fprintf(fp, " vm_stat_items: %s", (vt->flags & VM_STAT) ? -+ "\n" : "(not used)\n"); -+ for (i = 0; i < vt->nr_vm_stat_items; i++) -+ fprintf(fp, " [%d] %s\n", i, vt->vm_stat_items[i]); - - dump_vma_cache(VERBOSE); - } -@@ -8891,12 +10409,16 @@ - console(" id: %d\n", nt->node_id); - console(" pgdat: %lx\n", nt->pgdat); - console(" size: %ld\n", nt->size); -+ console(" present: %ld\n", nt->present); - console(" mem_map: %lx\n", nt->mem_map); - console(" start_paddr: %lx\n", nt->start_paddr); - console(" start_mapnr: %ld\n", nt->start_mapnr); - } - -- total += (uint64_t)((uint64_t)nt->size * (uint64_t)PAGESIZE()); -+ if (nt->present) -+ total += (uint64_t)((uint64_t)nt->present * (uint64_t)PAGESIZE()); -+ else -+ total += (uint64_t)((uint64_t)nt->size * (uint64_t)PAGESIZE()); - } - - return total; -@@ -9321,6 +10843,43 @@ - } - - /* -+ * Return the next mapped kernel virtual address in the vmlist -+ * that is equal to or comes after the passed-in address. -+ */ -+static ulong -+next_vmlist_vaddr(struct meminfo *mi, ulong vaddr) -+{ -+ ulong i, count; -+ -+ BZERO(mi, sizeof(struct meminfo)); -+ -+ mi->flags = GET_VMLIST_COUNT; -+ dump_vmlist(mi); -+ count = mi->retval; -+ -+ if (!count) -+ return vaddr; -+ -+ mi->vmlist = (struct vmlist *)GETBUF(sizeof(struct vmlist)*count); -+ mi->flags = GET_VMLIST; -+ dump_vmlist(mi); -+ -+ for (i = 0; i < count; i++) { -+ if (vaddr <= mi->vmlist[i].addr) { -+ vaddr = mi->vmlist[i].addr; -+ break; -+ } -+ if (vaddr < (mi->vmlist[i].addr + mi->vmlist[i].size)) -+ break; -+ } -+ -+ FREEBUF(mi->vmlist); -+ -+ return vaddr; -+} -+ -+ -+/* - * Return the next kernel virtual address page that comes after - * the passed-in address. - */ -@@ -9348,6 +10907,8 @@ - - if (IS_VMALLOC_ADDR(vaddr_orig)) { - if (IS_VMALLOC_ADDR(vaddr) && (vaddr < vmalloc_limit)) { -+ if (machine_type("X86_64")) -+ vaddr = next_vmlist_vaddr(&meminfo, vaddr); - *nextvaddr = vaddr; - return TRUE; - } -@@ -9377,6 +10938,7 @@ - /* - * We're in the physical range. - */ -+ *nextvaddr = vaddr; - return TRUE; - } - -@@ -9446,7 +11008,7 @@ - totalswap = totalused = 0; - - for (i = 0; i < vt->nr_swapfiles; i++, -- swap_info += SIZE(swap_info_struct)){ -+ swap_info += SIZE(swap_info_struct)) { - fill_swap_info(swap_info); - - flags = INT(vt->swap_info_struct + -@@ -9471,8 +11033,12 @@ - prio = INT(vt->swap_info_struct + - OFFSET(swap_info_struct_prio)); - -- max = ULONG(vt->swap_info_struct + -- OFFSET(swap_info_struct_max)); -+ if (MEMBER_SIZE("swap_info_struct", "max") == sizeof(int)) -+ max = UINT(vt->swap_info_struct + -+ OFFSET(swap_info_struct_max)); -+ else -+ max = ULONG(vt->swap_info_struct + -+ OFFSET(swap_info_struct_max)); - - swap_map = ULONG(vt->swap_info_struct + - OFFSET(swap_info_struct_swap_map)); -@@ -9486,7 +11052,7 @@ - } else if (VALID_MEMBER - (swap_info_struct_old_block_size)) { - get_pathname(file_to_dentry(swap_file), -- buf, BUFSIZE, 1, 0); -+ buf, BUFSIZE, 1, file_to_vfsmnt(swap_file)); - } else { - get_pathname(swap_file, buf, BUFSIZE, 1, 0); - } -@@ -9551,8 +11117,12 @@ - if (!pte) - return NULL; - -- sprintf(buf, "%s OFFSET: %lld", -- get_swapdev(SWP_TYPE(pte), swapdev), SWP_OFFSET(pte)); -+ if (THIS_KERNEL_VERSION >= LINUX(2,6,0)) -+ sprintf(buf, "%s OFFSET: %lld", -+ get_swapdev(__swp_type(pte), swapdev), __swp_offset(pte)); -+ else -+ sprintf(buf, "%s OFFSET: %llx", -+ get_swapdev(SWP_TYPE(pte), swapdev), SWP_OFFSET(pte)); - - return buf; - } -@@ -9741,14 +11311,16 @@ - dump_memory_nodes(int initialize) - { - int i, j; -- int n, id, flen, slen; -+ int n, id, node, flen, slen, badaddr; - ulong node_mem_map; - ulong node_start_paddr; - ulong node_start_pfn; - ulong node_start_mapnr; -- ulong node_spanned_pages; -- ulong free_pages, zone_size, node_size; -+ ulong node_spanned_pages, node_present_pages; -+ ulong free_pages, zone_size, node_size, cum_zone_size; - ulong zone_start_paddr, zone_start_mapnr, zone_mem_map; -+ physaddr_t phys; -+ ulong pp; - ulong zone_start_pfn; - ulong bdata; - ulong pgdat; -@@ -9761,31 +11333,55 @@ - char buf5[BUFSIZE]; - struct node_table *nt; - -- if (!(vt->flags & NODES)) { -- if (!initialize) -- error(FATAL, -- "memory nodes not supported by this kernel\n\n"); -- else { -- nt = &vt->node_table[0]; -- nt->node_id = 0; -- if (symbol_exists("contig_page_data")) -- nt->pgdat = symbol_value("contig_page_data"); -- else -- nt->pgdat = 0; -- nt->size = vt->total_pages; -- nt->mem_map = vt->mem_map; -- nt->start_paddr = 0; -- nt->start_mapnr = 0; -- return; -- } -+ if (!(vt->flags & (NODES|NODES_ONLINE)) && initialize) { -+ nt = &vt->node_table[0]; -+ nt->node_id = 0; -+ if (symbol_exists("contig_page_data")) -+ nt->pgdat = symbol_value("contig_page_data"); -+ else -+ nt->pgdat = 0; -+ nt->size = vt->total_pages; -+ nt->mem_map = vt->mem_map; -+ nt->start_paddr = 0; -+ nt->start_mapnr = 0; -+ if (CRASHDEBUG(1)) { -+ fprintf(fp, "node_table[%d]: \n", 0); -+ fprintf(fp, " id: %d\n", nt->node_id); -+ fprintf(fp, " pgdat: %lx\n", nt->pgdat); -+ fprintf(fp, " size: %ld\n", nt->size); -+ fprintf(fp, " present: %ld\n", nt->present); -+ fprintf(fp, " mem_map: %lx\n", nt->mem_map); -+ fprintf(fp, " start_paddr: %llx\n", nt->start_paddr); -+ fprintf(fp, " start_mapnr: %ld\n", nt->start_mapnr); -+ } -+ return; - } - -- if (initialize) -- get_symbol_data("pgdat_list", sizeof(void *), &pgdat); -- else -+ if (initialize) { -+ /* -+ * This order may have to change based upon architecture... -+ */ -+ if (symbol_exists("pgdat_list") && -+ (VALID_MEMBER(pglist_data_node_next) || -+ VALID_MEMBER(pglist_data_pgdat_next))) { -+ get_symbol_data("pgdat_list", sizeof(void *), &pgdat); -+ vt->flags &= ~NODES_ONLINE; -+ } else if (vt->flags & NODES_ONLINE) { -+ if ((node = next_online_node(0)) < 0) { -+ error(WARNING, -+ "cannot determine first node from node_online_map\n\n"); -+ return; -+ } -+ if (!(pgdat = next_online_pgdat(node))) { -+ error(WARNING, -+ "cannot determine pgdat list for this kernel/architecture\n\n"); -+ return; -+ } -+ } -+ } else - pgdat = vt->node_table[0].pgdat; - -- for (n = 0; pgdat; n++) { -+ for (n = 0, badaddr = FALSE; pgdat; n++) { - if (n >= vt->numnodes) - error(FATAL, "numnodes out of sync with pgdat_list?\n"); - -@@ -9794,9 +11390,14 @@ - readmem(pgdat+OFFSET(pglist_data_node_id), KVADDR, &id, - sizeof(int), "pglist node_id", FAULT_ON_ERROR); - -- readmem(pgdat+OFFSET(pglist_data_node_mem_map), KVADDR, -- &node_mem_map, sizeof(ulong), -- "node_mem_map", FAULT_ON_ERROR); -+ if (VALID_MEMBER(pglist_data_node_mem_map)) { -+ readmem(pgdat+OFFSET(pglist_data_node_mem_map), KVADDR, -+ &node_mem_map, sizeof(ulong), -+ "node_mem_map", FAULT_ON_ERROR); -+ } else { -+ node_mem_map = BADADDR; -+ badaddr = TRUE; -+ } - - if (VALID_MEMBER(pglist_data_node_start_paddr)) - readmem(pgdat+OFFSET(pglist_data_node_start_paddr), -@@ -9808,6 +11409,11 @@ - "pglist node_start_pfn", FAULT_ON_ERROR); - node_start_mapnr = node_start_pfn; - node_start_paddr = PTOB(node_start_pfn); -+ if (badaddr && IS_SPARSEMEM()) { -+ phys = PTOB(node_start_pfn); -+ if (phys_to_page(phys, &pp)) -+ node_mem_map = pp; -+ } - } else error(INFO, - "cannot determine zone starting physical address\n"); - -@@ -9827,6 +11433,13 @@ - node_size = node_spanned_pages; - } else error(INFO, "cannot determine zone size\n"); - -+ if (VALID_MEMBER(pglist_data_node_present_pages)) -+ readmem(pgdat+OFFSET(pglist_data_node_present_pages), -+ KVADDR, &node_present_pages, sizeof(ulong), -+ "pglist node_present_pages", FAULT_ON_ERROR); -+ else -+ node_present_pages = 0; -+ - readmem(pgdat+OFFSET(pglist_data_bdata), KVADDR, &bdata, - sizeof(ulong), "pglist bdata", FAULT_ON_ERROR); - -@@ -9837,9 +11450,21 @@ - nt->size = 0; /* initialize below */ - else - nt->size = node_size; -+ nt->present = node_present_pages; - nt->mem_map = node_mem_map; - nt->start_paddr = node_start_paddr; - nt->start_mapnr = node_start_mapnr; -+ -+ if (CRASHDEBUG(1)) { -+ fprintf(fp, "node_table[%d]: \n", n); -+ fprintf(fp, " id: %d\n", nt->node_id); -+ fprintf(fp, " pgdat: %lx\n", nt->pgdat); -+ fprintf(fp, " size: %ld\n", nt->size); -+ fprintf(fp, " present: %ld\n", nt->present); -+ fprintf(fp, " mem_map: %lx\n", nt->mem_map); -+ fprintf(fp, " start_paddr: %llx\n", nt->start_paddr); -+ fprintf(fp, " start_mapnr: %ld\n", nt->start_mapnr); -+ } - } - - if (!initialize) { -@@ -9896,9 +11521,10 @@ - } - - node_zones = pgdat + OFFSET(pglist_data_node_zones); -+ cum_zone_size = 0; - for (i = 0; i < vt->nr_zones; i++) { - if (CRASHDEBUG(7)) -- fprintf(fp, "zone at %lx\n", node_zones); -+ fprintf(fp, "zone %d at %lx\n", i, node_zones); - - if (VALID_MEMBER(zone_struct_size)) - readmem(node_zones+OFFSET(zone_struct_size), -@@ -9915,6 +11541,7 @@ - "zone spanned_pages", FAULT_ON_ERROR); - } else error(FATAL, - "zone_struct has neither size nor memsize field\n"); -+ - readmem(node_zones+ - OFFSET_OPTION(zone_struct_free_pages, - zone_free_pages), KVADDR, &free_pages, -@@ -9926,12 +11553,24 @@ - if (!read_string(value, buf1, BUFSIZE-1)) - sprintf(buf1, "(unknown) "); - if (VALID_STRUCT(zone_struct)) { -- readmem(node_zones+ -- OFFSET(zone_struct_zone_start_paddr), -- KVADDR, &zone_start_paddr, -- sizeof(ulong), -- "node_zones zone_start_paddr", -- FAULT_ON_ERROR); -+ if (VALID_MEMBER(zone_struct_zone_start_paddr)) -+ { -+ readmem(node_zones+OFFSET -+ (zone_struct_zone_start_paddr), -+ KVADDR, &zone_start_paddr, -+ sizeof(ulong), -+ "node_zones zone_start_paddr", -+ FAULT_ON_ERROR); -+ } else { -+ readmem(node_zones+ -+ OFFSET(zone_struct_zone_start_pfn), -+ KVADDR, &zone_start_pfn, -+ sizeof(ulong), -+ "node_zones zone_start_pfn", -+ FAULT_ON_ERROR); -+ zone_start_paddr = -+ PTOB(zone_start_pfn); -+ } - readmem(node_zones+ - OFFSET(zone_struct_zone_start_mapnr), - KVADDR, &zone_start_mapnr, -@@ -9946,28 +11585,65 @@ - "node_zones zone_start_pfn", - FAULT_ON_ERROR); - zone_start_paddr = PTOB(zone_start_pfn); -- readmem(node_zones+ -+ -+ if (IS_SPARSEMEM()) { -+ zone_mem_map = 0; -+ zone_start_mapnr = 0; -+ if (zone_size) { -+ phys = PTOB(zone_start_pfn); -+ zone_start_mapnr = phys/PAGESIZE(); -+ } -+ -+ } else if (!(vt->flags & NODES) && -+ INVALID_MEMBER(zone_zone_mem_map)) { -+ readmem(pgdat+OFFSET(pglist_data_node_mem_map), -+ KVADDR, &zone_mem_map, sizeof(void *), -+ "contig_page_data mem_map", FAULT_ON_ERROR); -+ if (zone_size) -+ zone_mem_map += cum_zone_size * SIZE(page); -+ } else readmem(node_zones+ - OFFSET(zone_zone_mem_map), - KVADDR, &zone_mem_map, - sizeof(ulong), - "node_zones zone_mem_map", - FAULT_ON_ERROR); -+ - if (zone_mem_map) - zone_start_mapnr = - (zone_mem_map - node_mem_map) / - SIZE(page); -- else -+ else if (!IS_SPARSEMEM()) - zone_start_mapnr = 0; - } -- readmem(node_zones+ -- OFFSET_OPTION(zone_struct_zone_mem_map, -- zone_zone_mem_map), KVADDR, &zone_mem_map, -- sizeof(ulong), "node_zones zone_mem_map", -- FAULT_ON_ERROR); -+ -+ if (IS_SPARSEMEM()) { -+ zone_mem_map = 0; -+ if (zone_size) { -+ phys = PTOB(zone_start_pfn); -+ if (phys_to_page(phys, &pp)) -+ zone_mem_map = pp; -+ } -+ } else if (!(vt->flags & NODES) && -+ INVALID_MEMBER(zone_struct_zone_mem_map) && -+ INVALID_MEMBER(zone_zone_mem_map)) { -+ readmem(pgdat+OFFSET(pglist_data_node_mem_map), -+ KVADDR, &zone_mem_map, sizeof(void *), -+ "contig_page_data mem_map", FAULT_ON_ERROR); -+ if (zone_size) -+ zone_mem_map += cum_zone_size * SIZE(page); -+ else -+ zone_mem_map = 0; -+ } else -+ readmem(node_zones+ -+ OFFSET_OPTION(zone_struct_zone_mem_map, -+ zone_zone_mem_map), KVADDR, &zone_mem_map, -+ sizeof(ulong), "node_zones zone_mem_map", -+ FAULT_ON_ERROR); - - if (!initialize) { - fprintf(fp, " %2d %-9s %7ld ", - i, buf1, zone_size); -+ cum_zone_size += zone_size; - fprintf(fp, "%s %s %s\n", - mkstring(buf1, VADDR_PRLEN, - RJUST|LONG_HEX,MKSTR(zone_mem_map)), -@@ -9981,12 +11657,22 @@ - node_zones += SIZE_OPTION(zone_struct, zone); - } - -- if (initialize) -- readmem(pgdat + OFFSET_OPTION(pglist_data_node_next, -- pglist_data_pgdat_next), KVADDR, -- &pgdat, sizeof(void *), "pglist_data node_next", -- FAULT_ON_ERROR); -- else { -+ if (initialize) { -+ if (vt->flags & NODES_ONLINE) { -+ if ((node = next_online_node(node+1)) < 0) -+ pgdat = 0; -+ else if (!(pgdat = next_online_pgdat(node))) { -+ error(WARNING, -+ "cannot determine pgdat list for this kernel/architecture (node %d)\n\n", -+ node); -+ pgdat = 0; -+ } -+ } else -+ readmem(pgdat + OFFSET_OPTION(pglist_data_node_next, -+ pglist_data_pgdat_next), KVADDR, -+ &pgdat, sizeof(void *), "pglist_data node_next", -+ FAULT_ON_ERROR); -+ } else { - if ((n+1) < vt->numnodes) - pgdat = vt->node_table[n+1].pgdat; - else -@@ -9994,8 +11680,15 @@ - } - } - -- if (n != vt->numnodes) -- error(FATAL, "numnodes out of sync with pgdat_list?\n"); -+ if (n != vt->numnodes) { -+ if (CRASHDEBUG(2)) -+ error(NOTE, "changing numnodes from %d to %d\n", -+ vt->numnodes, n); -+ vt->numnodes = n; -+ } -+ -+ if (!initialize && IS_SPARSEMEM()) -+ dump_mem_sections(); - } - - /* -@@ -10011,20 +11704,25 @@ - * Override numnodes -- some kernels may leave it at 1 on a system - * with multiple memory nodes. - */ -- get_symbol_data("pgdat_list", sizeof(void *), &pgdat); -+ if ((vt->flags & NODES) && (VALID_MEMBER(pglist_data_node_next) || -+ VALID_MEMBER(pglist_data_pgdat_next))) { - -- for (n = 0; pgdat; n++) { -- readmem(pgdat + OFFSET_OPTION(pglist_data_node_next, -- pglist_data_pgdat_next), KVADDR, -- &pgdat, sizeof(void *), "pglist_data node_next", -- FAULT_ON_ERROR); -- } -- if (n != vt->numnodes) { -- if (CRASHDEBUG(2)) -- error(NOTE, "changing numnodes from %d to %d\n", -- vt->numnodes, n); -- vt->numnodes = n; -- } -+ get_symbol_data("pgdat_list", sizeof(void *), &pgdat); -+ -+ for (n = 0; pgdat; n++) { -+ readmem(pgdat + OFFSET_OPTION(pglist_data_node_next, -+ pglist_data_pgdat_next), KVADDR, -+ &pgdat, sizeof(void *), "pglist_data node_next", -+ FAULT_ON_ERROR); -+ } -+ if (n != vt->numnodes) { -+ if (CRASHDEBUG(2)) -+ error(NOTE, "changing numnodes from %d to %d\n", -+ vt->numnodes, n); -+ vt->numnodes = n; -+ } -+ } else -+ vt->flags &= ~NODES; - - if (!(vt->node_table = (struct node_table *) - malloc(sizeof(struct node_table) * vt->numnodes))) -@@ -10072,6 +11770,9 @@ - { - uint psz; - -+ if (machdep->pagesize) -+ return machdep->pagesize; -+ - if (REMOTE_MEMSRC()) - return remote_page_size(); - -@@ -10081,6 +11782,14 @@ - psz = diskdump_page_size(); - break; - -+ case XENDUMP: -+ psz = xendump_page_size(); -+ break; -+ -+ case KDUMP: -+ psz = kdump_page_size(); -+ break; -+ - case NETDUMP: - psz = netdump_page_size(); - break; -@@ -10115,6 +11824,50 @@ - } - - /* -+ * If the page size cannot be determined by the dumpfile (like kdump), -+ * and the processor default cannot be used, allow the force-feeding -+ * of a crash command-line page size option. -+ */ -+void -+force_page_size(char *s) -+{ -+ int k, err; -+ ulong psize; -+ -+ k = 1; -+ err = FALSE; -+ -+ switch (LASTCHAR(s)) -+ { -+ case 'k': -+ case 'K': -+ LASTCHAR(s) = NULLCHAR; -+ if (!decimal(s, 0)) { -+ err = TRUE; -+ break; -+ } -+ k = 1024; -+ -+ /* FALLTHROUGH */ -+ -+ default: -+ if (decimal(s, 0)) -+ psize = dtol(s, QUIET|RETURN_ON_ERROR, &err); -+ else if (hexadecimal(s, 0)) -+ psize = htol(s, QUIET|RETURN_ON_ERROR, &err); -+ else -+ err = TRUE; -+ break; -+ } -+ -+ if (err) -+ error(INFO, "invalid page size: %s\n", s); -+ else -+ machdep->pagesize = psize * k; -+} -+ -+ -+/* - * Return the vmalloc address referenced by the first vm_struct - * on the vmlist. This can normally be used by the machine-specific - * xxx_vmalloc_start() routines. -@@ -10127,6 +11880,9 @@ - - get_symbol_data("vmlist", sizeof(void *), &vmlist); - -+ if (!vmlist) -+ return 0; -+ - if (!readmem(vmlist+OFFSET(vm_struct_addr), KVADDR, &addr, - sizeof(void *), "first vmlist addr", RETURN_ON_ERROR)) - non_matching_kernel(); -@@ -10186,6 +11942,10 @@ - retval = remote_memory_used(); - else if (pc->flags & NETDUMP) - retval = netdump_memory_used(); -+ else if (pc->flags & KDUMP) -+ retval = kdump_memory_used(); -+ else if (pc->flags & XENDUMP) -+ retval = xendump_memory_used(); - else if (pc->flags & DISKDUMP) - retval = diskdump_memory_used(); - else if (pc->flags & LKCD) -@@ -10201,6 +11961,10 @@ - retval = remote_free_memory(); - else if (pc->flags & NETDUMP) - retval = netdump_free_memory(); -+ else if (pc->flags & KDUMP) -+ retval = kdump_free_memory(); -+ else if (pc->flags & XENDUMP) -+ retval = xendump_free_memory(); - else if (pc->flags & DISKDUMP) - retval = diskdump_free_memory(); - else if (pc->flags & LKCD) -@@ -10216,6 +11980,10 @@ - retval = remote_memory_dump(0); - else if (pc->flags & NETDUMP) - retval = netdump_memory_dump(fp); -+ else if (pc->flags & KDUMP) -+ retval = kdump_memory_dump(fp); -+ else if (pc->flags & XENDUMP) -+ retval = xendump_memory_dump(fp); - else if (pc->flags & DISKDUMP) - retval = diskdump_memory_dump(fp); - else if (pc->flags & LKCD) -@@ -10238,3 +12006,563 @@ - return retval; - } - -+/* -+ * Functions for sparse mem support -+ */ -+ulong -+sparse_decode_mem_map(ulong coded_mem_map, ulong section_nr) -+{ -+ return coded_mem_map + -+ (section_nr_to_pfn(section_nr) * SIZE(page)); -+} -+ -+void -+sparse_mem_init(void) -+{ -+ ulong addr; -+ ulong mem_section_size; -+ -+ if (!IS_SPARSEMEM()) -+ return; -+ -+ MEMBER_OFFSET_INIT(mem_section_section_mem_map, "mem_section", -+ "section_mem_map"); -+ STRUCT_SIZE_INIT(mem_section, "mem_section"); -+ -+ if (!MAX_PHYSMEM_BITS()) -+ error(FATAL, -+ "CONFIG_SPARSEMEM kernels not supported for this architecture\n"); -+ -+ if (get_array_length("mem_section", NULL, 0) == -+ (NR_MEM_SECTIONS() / _SECTIONS_PER_ROOT_EXTREME())) -+ vt->flags |= SPARSEMEM_EX; -+ -+ if (IS_SPARSEMEM_EX()) { -+ machdep->sections_per_root = _SECTIONS_PER_ROOT_EXTREME(); -+ mem_section_size = sizeof(void *) * NR_SECTION_ROOTS(); -+ } else { -+ machdep->sections_per_root = _SECTIONS_PER_ROOT(); -+ mem_section_size = SIZE(mem_section) * NR_SECTION_ROOTS(); -+ } -+ -+ if (CRASHDEBUG(1)) { -+ fprintf(fp, "PAGESIZE=%d\n",PAGESIZE()); -+ fprintf(fp,"mem_section_size = %ld\n", mem_section_size); -+ fprintf(fp, "NR_SECTION_ROOTS = %ld\n", NR_SECTION_ROOTS()); -+ fprintf(fp, "NR_MEM_SECTIONS = %ld\n", NR_MEM_SECTIONS()); -+ fprintf(fp, "SECTIONS_PER_ROOT = %ld\n", SECTIONS_PER_ROOT() ); -+ fprintf(fp, "SECTION_ROOT_MASK = 0x%lx\n", SECTION_ROOT_MASK()); -+ fprintf(fp, "PAGES_PER_SECTION = %ld\n", PAGES_PER_SECTION()); -+ } -+ -+ if (!(vt->mem_sec = malloc(mem_section_size))) -+ error(FATAL, "cannot malloc mem_sec cache\n"); -+ -+ addr = symbol_value("mem_section"); -+ readmem(addr, KVADDR,vt->mem_sec ,mem_section_size, -+ "memory section root table", FAULT_ON_ERROR); -+} -+ -+char -+*read_mem_section(ulong addr) -+{ -+ static char *mem_section; -+ -+ if (!mem_section) { -+ mem_section = GETBUF(SIZE(mem_section)); -+ } -+ -+ if (!IS_KVADDR(addr)) -+ return 0; -+ -+ readmem(addr, KVADDR, mem_section, SIZE(mem_section), -+ "memory section", FAULT_ON_ERROR); -+ -+ return mem_section; -+} -+ -+ulong -+nr_to_section(ulong nr) -+{ -+ ulong addr; -+ ulong *mem_sec = vt->mem_sec; -+ -+ if (!IS_KVADDR(mem_sec[SECTION_NR_TO_ROOT(nr)])) -+ return 0; -+ -+ if (IS_SPARSEMEM_EX()) -+ addr = mem_sec[SECTION_NR_TO_ROOT(nr)] + -+ (nr & SECTION_ROOT_MASK()) * SIZE(mem_section); -+ else -+ addr = mem_sec[0] + (nr & SECTION_ROOT_MASK()) * SIZE(mem_section); -+ -+ if (!IS_KVADDR(addr)) -+ return 0; -+ -+ return addr; -+} -+ -+/* -+ * We use the lower bits of the mem_map pointer to store -+ * a little bit of information. There should be at least -+ * 3 bits here due to 32-bit alignment. -+ */ -+#define SECTION_MARKED_PRESENT (1UL<<0) -+#define SECTION_HAS_MEM_MAP (1UL<<1) -+#define SECTION_MAP_LAST_BIT (1UL<<2) -+#define SECTION_MAP_MASK (~(SECTION_MAP_LAST_BIT-1)) -+ -+ -+int -+valid_section(ulong addr) -+{ -+ char *mem_section; -+ -+ if ((mem_section = read_mem_section(addr))) -+ return (ULONG(mem_section + -+ OFFSET(mem_section_section_mem_map)) && -+ SECTION_MARKED_PRESENT); -+ return 0; -+} -+ -+int -+section_has_mem_map(ulong addr) -+{ -+ char *mem_section; -+ -+ if ((mem_section = read_mem_section(addr))) -+ return (ULONG(mem_section + -+ OFFSET(mem_section_section_mem_map)) -+ && SECTION_HAS_MEM_MAP); -+ return 0; -+} -+ -+ulong -+section_mem_map_addr(ulong addr) -+{ -+ char *mem_section; -+ ulong map; -+ -+ if ((mem_section = read_mem_section(addr))) { -+ map = ULONG(mem_section + -+ OFFSET(mem_section_section_mem_map)); -+ map &= SECTION_MAP_MASK; -+ return map; -+ } -+ return 0; -+} -+ -+ -+ulong -+valid_section_nr(ulong nr) -+{ -+ ulong addr = nr_to_section(nr); -+ -+ if (valid_section(addr)) -+ return addr; -+ -+ return 0; -+} -+ -+ulong -+pfn_to_map(ulong pfn) -+{ -+ ulong section, page_offset; -+ ulong section_nr; -+ ulong coded_mem_map, mem_map; -+ -+ section_nr = pfn_to_section_nr(pfn); -+ if (!(section = valid_section_nr(section_nr))) -+ return 0; -+ -+ if (section_has_mem_map(section)) { -+ page_offset = pfn - section_nr_to_pfn(section_nr); -+ coded_mem_map = section_mem_map_addr(section); -+ mem_map = sparse_decode_mem_map(coded_mem_map, section_nr) + -+ (page_offset * SIZE(page)); -+ return mem_map; -+ } -+ -+ return 0; -+} -+ -+void -+dump_mem_sections(void) -+{ -+ ulong nr,addr; -+ ulong nr_mem_sections; -+ ulong coded_mem_map, mem_map, pfn; -+ char buf1[BUFSIZE]; -+ char buf2[BUFSIZE]; -+ char buf3[BUFSIZE]; -+ char buf4[BUFSIZE]; -+ -+ nr_mem_sections = NR_MEM_SECTIONS(); -+ -+ fprintf(fp, "\n"); -+ pad_line(fp, BITS32() ? 59 : 67, '-'); -+ fprintf(fp, "\n\nNR %s %s %s PFN\n", -+ mkstring(buf1, VADDR_PRLEN, CENTER|LJUST, "SECTION"), -+ mkstring(buf2, VADDR_PRLEN, CENTER|LJUST, "CODED_MEM_MAP"), -+ mkstring(buf3, VADDR_PRLEN, CENTER|LJUST, "MEM_MAP")); -+ -+ for (nr = 0; nr <= nr_mem_sections ; nr++) { -+ if ((addr = valid_section_nr(nr))) { -+ coded_mem_map = section_mem_map_addr(addr); -+ mem_map = sparse_decode_mem_map(coded_mem_map,nr); -+ pfn = section_nr_to_pfn(nr); -+ -+ fprintf(fp, "%2ld %s %s %s %s\n", -+ nr, -+ mkstring(buf1, VADDR_PRLEN, -+ CENTER|LONG_HEX, MKSTR(addr)), -+ mkstring(buf2, VADDR_PRLEN, -+ CENTER|LONG_HEX|RJUST, MKSTR(coded_mem_map)), -+ mkstring(buf3, VADDR_PRLEN, -+ CENTER|LONG_HEX|RJUST, MKSTR(mem_map)), -+ pc->output_radix == 10 ? -+ mkstring(buf4, VADDR_PRLEN, -+ LONG_DEC|LJUST, MKSTR(pfn)) : -+ mkstring(buf4, VADDR_PRLEN, -+ LONG_HEX|LJUST, MKSTR(pfn))); -+ } -+ } -+} -+ -+void -+list_mem_sections(void) -+{ -+ ulong nr,addr; -+ ulong nr_mem_sections = NR_MEM_SECTIONS(); -+ ulong coded_mem_map; -+ -+ for (nr = 0; nr <= nr_mem_sections ; nr++) { -+ if ((addr = valid_section_nr(nr))) { -+ coded_mem_map = section_mem_map_addr(addr); -+ fprintf(fp, -+ "nr=%ld section = %lx coded_mem_map=%lx pfn=%ld mem_map=%lx\n", -+ nr, -+ addr, -+ coded_mem_map, -+ section_nr_to_pfn(nr), -+ sparse_decode_mem_map(coded_mem_map,nr)); -+ } -+ } -+} -+ -+/* -+ * For kernels containing the node_online_map, return -+ * the number of node bits set. -+ */ -+static int -+get_nodes_online(void) -+{ -+ int i, len, online; -+ struct gnu_request req; -+ ulong *maskptr; -+ -+ if (!symbol_exists("node_online_map")) -+ return 0; -+ -+ if (LKCD_KERNTYPES()) { -+ if ((len = STRUCT_SIZE("nodemask_t")) < 0) -+ error(FATAL, "cannot determine type nodemask_t\n"); -+ } else -+ len = get_symbol_type("node_online_map", NULL, &req) -+ == TYPE_CODE_UNDEF ? sizeof(ulong) : req.length; -+ -+ if (!(vt->node_online_map = (ulong *)malloc(len))) -+ error(FATAL, "cannot malloc node_online_map\n"); -+ -+ if (!readmem(symbol_value("node_online_map"), KVADDR, -+ (void *)&vt->node_online_map[0], len, "node_online_map", -+ QUIET|RETURN_ON_ERROR)) -+ error(FATAL, "cannot read node_online_map\n"); -+ -+ vt->node_online_map_len = len/sizeof(ulong); -+ -+ online = 0; -+ -+ maskptr = (ulong *)vt->node_online_map; -+ for (i = 0; i < vt->node_online_map_len; i++, maskptr++) -+ online += count_bits_long(*maskptr); -+ -+ if (CRASHDEBUG(1)) { -+ fprintf(fp, "node_online_map: ["); -+ for (i = 0; i < vt->node_online_map_len; i++) -+ fprintf(fp, "%s%lx", i ? ", " : "", vt->node_online_map[i]); -+ fprintf(fp, "] -> nodes online: %d\n", online); -+ } -+ -+ return online; -+} -+ -+/* -+ * Return the next node index, with "first" being the first acceptable node. -+ */ -+static int -+next_online_node(int first) -+{ -+ int i, j, node; -+ ulong mask, *maskptr; -+ -+ if ((first/BITS_PER_LONG) >= vt->node_online_map_len) { -+ error(INFO, "next_online_node: %d is too large!\n", first); -+ return -1; -+ } -+ -+ maskptr = (ulong *)vt->node_online_map; -+ for (i = node = 0; i < vt->node_online_map_len; i++, maskptr++) { -+ mask = *maskptr; -+ for (j = 0; j < BITS_PER_LONG; j++, node++) { -+ if (mask & 1) { -+ if (node >= first) -+ return node; -+ } -+ mask >>= 1; -+ } -+ } -+ -+ return -1; -+} -+ -+/* -+ * Modify appropriately for architecture/kernel nuances. -+ */ -+static ulong -+next_online_pgdat(int node) -+{ -+ char buf[BUFSIZE]; -+ ulong pgdat; -+ -+ /* -+ * Default -- look for type: struct pglist_data node_data[] -+ */ -+ if (LKCD_KERNTYPES()) { -+ if (!kernel_symbol_exists("node_data")) -+ goto pgdat2; -+ /* -+ * Just index into node_data[] without checking that it is -+ * an array; kerntypes have no such symbol information. -+ */ -+ } else { -+ if (get_symbol_type("node_data", NULL, NULL) != TYPE_CODE_ARRAY) -+ goto pgdat2; -+ -+ open_tmpfile(); -+ sprintf(buf, "whatis node_data"); -+ if (!gdb_pass_through(buf, fp, GNU_RETURN_ON_ERROR)) { -+ close_tmpfile(); -+ goto pgdat2; -+ } -+ rewind(pc->tmpfile); -+ while (fgets(buf, BUFSIZE, pc->tmpfile)) { -+ if (STRNEQ(buf, "type = ")) -+ break; -+ } -+ close_tmpfile(); -+ -+ if ((!strstr(buf, "struct pglist_data *") && -+ !strstr(buf, "pg_data_t *")) || -+ (count_chars(buf, '[') != 1) || -+ (count_chars(buf, ']') != 1)) -+ goto pgdat2; -+ } -+ -+ if (!readmem(symbol_value("node_data") + (node * sizeof(void *)), -+ KVADDR, &pgdat, sizeof(void *), "node_data", RETURN_ON_ERROR) || -+ !IS_KVADDR(pgdat)) -+ goto pgdat2; -+ -+ return pgdat; -+ -+pgdat2: -+ if (LKCD_KERNTYPES()) { -+ if (!kernel_symbol_exists("pgdat_list")) -+ goto pgdat3; -+ } else { -+ if (get_symbol_type("pgdat_list",NULL,NULL) != TYPE_CODE_ARRAY) -+ goto pgdat3; -+ -+ open_tmpfile(); -+ sprintf(buf, "whatis pgdat_list"); -+ if (!gdb_pass_through(buf, fp, GNU_RETURN_ON_ERROR)) { -+ close_tmpfile(); -+ goto pgdat3; -+ } -+ rewind(pc->tmpfile); -+ while (fgets(buf, BUFSIZE, pc->tmpfile)) { -+ if (STRNEQ(buf, "type = ")) -+ break; -+ } -+ close_tmpfile(); -+ -+ if ((!strstr(buf, "struct pglist_data *") && -+ !strstr(buf, "pg_data_t *")) || -+ (count_chars(buf, '[') != 1) || -+ (count_chars(buf, ']') != 1)) -+ goto pgdat3; -+ } -+ -+ if (!readmem(symbol_value("pgdat_list") + (node * sizeof(void *)), -+ KVADDR, &pgdat, sizeof(void *), "pgdat_list", RETURN_ON_ERROR) || -+ !IS_KVADDR(pgdat)) -+ goto pgdat3; -+ -+ return pgdat; -+ -+pgdat3: -+ if (symbol_exists("contig_page_data") && (node == 0)) -+ return symbol_value("contig_page_data"); -+ -+ return 0; -+} -+ -+/* -+ * Make the vm_stat[] array contents easily accessible. -+ */ -+static int -+vm_stat_init(void) -+{ -+ char buf[BUFSIZE]; -+ char *arglist[MAXARGS]; -+ int i, c, stringlen, total; -+ struct gnu_request *req; -+ char *start; -+ -+ if (vt->flags & VM_STAT) -+ return TRUE; -+ -+ if ((vt->nr_vm_stat_items == -1) || !symbol_exists("vm_stat")) -+ goto bailout; -+ -+ /* -+ * look for type: type = atomic_long_t [] -+ */ -+ if (LKCD_KERNTYPES()) { -+ if (!symbol_exists("vm_stat")) -+ goto bailout; -+ /* -+ * Just assume that vm_stat is an array; there is -+ * no symbol info in a kerntypes file. -+ */ -+ } else { -+ if (!symbol_exists("vm_stat") || -+ get_symbol_type("vm_stat", NULL, NULL) != TYPE_CODE_ARRAY) -+ goto bailout; -+ -+ open_tmpfile(); -+ sprintf(buf, "whatis vm_stat"); -+ if (!gdb_pass_through(buf, fp, GNU_RETURN_ON_ERROR)) { -+ close_tmpfile(); -+ goto bailout; -+ } -+ rewind(pc->tmpfile); -+ while (fgets(buf, BUFSIZE, pc->tmpfile)) { -+ if (STRNEQ(buf, "type = ")) -+ break; -+ } -+ close_tmpfile(); -+ -+ if (!strstr(buf, "atomic_long_t") || -+ (count_chars(buf, '[') != 1) || -+ (count_chars(buf, ']') != 1)) -+ goto bailout; -+ } -+ -+ open_tmpfile(); -+ req = (struct gnu_request *)GETBUF(sizeof(struct gnu_request)); -+ req->command = GNU_GET_DATATYPE; -+ req->name = "zone_stat_item"; -+ req->flags = GNU_PRINT_ENUMERATORS; -+ gdb_interface(req); -+ FREEBUF(req); -+ -+ stringlen = 1; -+ -+ rewind(pc->tmpfile); -+ while (fgets(buf, BUFSIZE, pc->tmpfile)) { -+ if (strstr(buf, "{") || strstr(buf, "}")) -+ continue; -+ clean_line(buf); -+ c = parse_line(buf, arglist); -+ if (STREQ(arglist[0], "NR_VM_ZONE_STAT_ITEMS")) { -+ vt->nr_vm_stat_items = atoi(arglist[2]); -+ break; -+ } else -+ stringlen += strlen(arglist[0]); -+ } -+ -+ total = stringlen + vt->nr_vm_stat_items + -+ (sizeof(void *) * vt->nr_vm_stat_items); -+ if (!(vt->vm_stat_items = (char **)malloc(total))) { -+ close_tmpfile(); -+ error(FATAL, "cannot malloc vm_area_struct cache\n"); -+ } -+ -+ start = (char *)&vt->vm_stat_items[vt->nr_vm_stat_items]; -+ -+ rewind(pc->tmpfile); -+ while (fgets(buf, BUFSIZE, pc->tmpfile)) { -+ if (strstr(buf, "{") || strstr(buf, "}")) -+ continue; -+ c = parse_line(buf, arglist); -+ i = atoi(arglist[2]); -+ if (i < vt->nr_vm_stat_items) { -+ vt->vm_stat_items[i] = start; -+ strcpy(start, arglist[0]); -+ start += strlen(arglist[0]) + 1; -+ } -+ } -+ close_tmpfile(); -+ -+ vt->flags |= VM_STAT; -+ return TRUE; -+ -+bailout: -+ vt->nr_vm_stat_items = -1; -+ return FALSE; -+} -+ -+/* -+ * Either dump all vm_stat entries, or return the value of -+ * the specified vm_stat item. -+ */ -+static int -+dump_vm_stat(char *item, long *retval) -+{ -+ char *buf; -+ ulong *vp; -+ int i; -+ -+ if (!vm_stat_init()) { -+ if (!item) -+ error(FATAL, -+ "vm_stat not available in this kernel\n"); -+ return FALSE; -+ } -+ -+ buf = GETBUF(sizeof(ulong) * vt->nr_vm_stat_items); -+ -+ readmem(symbol_value("vm_stat"), KVADDR, buf, -+ sizeof(ulong) * vt->nr_vm_stat_items, -+ "vm_stat", FAULT_ON_ERROR); -+ -+ -+ if (!item) { -+ vp = (ulong *)buf; -+ for (i = 0; i < vt->nr_vm_stat_items; i++) -+ fprintf(fp, "%20s: %ld\n", vt->vm_stat_items[i], vp[i]); -+ return TRUE; -+ } -+ -+ vp = (ulong *)buf; -+ for (i = 0; i < vt->nr_vm_stat_items; i++) { -+ if (STREQ(vt->vm_stat_items[i], item)) { -+ *retval = vp[i]; -+ return TRUE; -+ } -+ } -+ -+ return FALSE; -+} ---- crash/filesys.c.orig 2007-08-27 15:02:36.000000000 -0400 -+++ crash/filesys.c 2007-07-20 11:11:57.000000000 -0400 -@@ -1,8 +1,8 @@ - /* filesys.c - core analysis suite - * - * Copyright (C) 1999, 2000, 2001, 2002 Mission Critical Linux, Inc. -- * Copyright (C) 2002, 2003, 2004, 2005 David Anderson -- * Copyright (C) 2002, 2003, 2004, 2005 Red Hat, Inc. All rights reserved. -+ * Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007 David Anderson -+ * Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007 Red Hat, Inc. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by -@@ -18,7 +18,7 @@ - #include "defs.h" - #include - --static void show_mounts(ulong, int); -+static void show_mounts(ulong, int, struct task_context *); - static int find_booted_kernel(void); - static int find_booted_system_map(void); - static int verify_utsname(char *); -@@ -33,7 +33,7 @@ - static int open_file_reference(struct reference *); - static void memory_source_init(void); - static int get_pathname_component(ulong, ulong, int, char *, char *); --static ulong *get_mount_list(int *); -+static ulong *get_mount_list(int *, struct task_context *); - char *inode_type(char *, char *); - static void match_proc_version(void); - static void get_live_memory_source(void); -@@ -43,6 +43,7 @@ - static int memory_driver_init(void); - static int create_memory_device(dev_t); - static void *radix_tree_lookup(ulong, ulong, int); -+static int match_file_string(char *, char *, char *); - - #define DENTRY_CACHE (20) - #define INODE_CACHE (20) -@@ -99,6 +100,10 @@ - } - - if (pc->namelist) { -+ if (XEN_HYPER_MODE() && !pc->dumpfile) -+ error(FATAL, -+ "Xen hypervisor mode requires a dumpfile\n"); -+ - if (!pc->dumpfile && !get_proc_version()) - error(INFO, "/proc/version: %s\n", - strerror(errno)); -@@ -190,7 +195,15 @@ - if (!netdump_init(pc->dumpfile, fp)) - error(FATAL, "%s: initialization failed\n", - pc->dumpfile); -- } else if (pc->flags & NETDUMP) { -+ } else if (pc->flags & KDUMP) { -+ if (!kdump_init(pc->dumpfile, fp)) -+ error(FATAL, "%s: initialization failed\n", -+ pc->dumpfile); -+ } else if (pc->flags & XENDUMP) { -+ if (!xendump_init(pc->dumpfile, fp)) -+ error(FATAL, "%s: initialization failed\n", -+ pc->dumpfile); -+ } else if (pc->flags & DISKDUMP) { - if (!diskdump_init(pc->dumpfile, fp)) - error(FATAL, "%s: initialization failed\n", - pc->dumpfile); -@@ -217,10 +230,7 @@ - static void - match_proc_version(void) - { -- char command[BUFSIZE]; -- char buffer[BUFSIZE]; -- FILE *pipe; -- int found; -+ char buffer[BUFSIZE], *p1, *p2; - - if (pc->flags & KERNEL_DEBUG_QUERY) - return; -@@ -228,24 +238,7 @@ - if (!strlen(kt->proc_version)) - return; - -- sprintf(command, "/usr/bin/strings %s", pc->namelist); -- if ((pipe = popen(command, "r")) == NULL) { -- error(INFO, "%s: %s\n", pc->namelist, strerror(errno)); -- return; -- } -- -- found = FALSE; -- while (fgets(buffer, BUFSIZE-1, pipe)) { -- if (!strstr(buffer, "Linux version 2.")) -- continue; -- -- if (STREQ(buffer, kt->proc_version)) -- found = TRUE; -- break; -- } -- pclose(pipe); -- -- if (found) { -+ if (match_file_string(pc->namelist, kt->proc_version, buffer)) { - if (CRASHDEBUG(1)) { - fprintf(fp, "/proc/version:\n%s", kt->proc_version); - fprintf(fp, "%s:\n%s", pc->namelist, buffer); -@@ -253,7 +246,29 @@ - return; - } - -- if (find_booted_system_map()) -+ error(WARNING, "%s%sand /proc/version do not match!\n\n", -+ pc->namelist, -+ strlen(pc->namelist) > 39 ? "\n " : " "); -+ -+ /* -+ * find_booted_system_map() requires VTOP(), which used to be a -+ * hardwired masking of the kernel address. But some architectures -+ * may not know what their physical base address is at this point, -+ * and others may have different machdep->kvbase values, so for all -+ * but the 0-based kernel virtual address architectures, bail out -+ * here with a relevant error message. -+ */ -+ if (!machine_type("S390") && !machine_type("S390X")) { -+ p1 = &kt->proc_version[strlen("Linux version ")]; -+ p2 = strstr(p1, " "); -+ *p2 = NULLCHAR; -+ error(WARNING, "/proc/version indicates kernel version: %s\n", p1); -+ error(FATAL, "please use the vmlinux file for that kernel version, or try using\n" -+ " the System.map for that kernel version as an additional argument.\n", p1); -+ clean_exit(1); -+ } -+ -+ if (find_booted_system_map()) - pc->flags |= SYSMAP; - } - -@@ -303,14 +318,12 @@ - for (dp = readdir(dirp); dp != NULL; dp = readdir(dirp)) - cnt++; - -- if ((searchdirs = (char **)malloc(cnt * sizeof(char *))) -- == NULL) { -+ if ((searchdirs = calloc(cnt, sizeof(char *))) == NULL) { - error(INFO, "/usr/src/ directory list malloc: %s\n", - strerror(errno)); - closedir(dirp); - return default_searchdirs; - } -- BZERO(searchdirs, cnt * sizeof(char *)); - - for (i = 0; i < DEFAULT_SEARCHDIRS; i++) - searchdirs[i] = default_searchdirs[i]; -@@ -345,6 +358,16 @@ - closedir(dirp); - - searchdirs[cnt] = NULL; -+ } else { -+ if ((searchdirs = calloc(cnt, sizeof(char *))) == NULL) { -+ error(INFO, "search directory list malloc: %s\n", -+ strerror(errno)); -+ closedir(dirp); -+ return default_searchdirs; -+ } -+ for (i = 0; i < DEFAULT_SEARCHDIRS; i++) -+ searchdirs[i] = default_searchdirs[i]; -+ cnt = DEFAULT_SEARCHDIRS; - } - - if (redhat_kernel_directory_v1(dirbuf)) { -@@ -483,13 +506,11 @@ - find_booted_kernel(void) - { - char kernel[BUFSIZE]; -- char command[BUFSIZE]; - char buffer[BUFSIZE]; - char **searchdirs; - int i, preferred, wrapped; - DIR *dirp; - struct dirent *dp; -- FILE *pipe; - int found; - - pc->flags |= FINDKERNEL; -@@ -538,24 +559,11 @@ - !is_elf_file(kernel)) - continue; - -- sprintf(command, "/usr/bin/strings %s", kernel); -- if ((pipe = popen(command, "r")) == NULL) { -- error(INFO, "%s: %s\n", -- kernel, strerror(errno)); -- continue; -- } -- - if (CRASHDEBUG(1)) - fprintf(fp, "find_booted_kernel: check: %s\n", - kernel); - -- while (fgets(buffer, BUFSIZE-1, pipe)) { -- if (STREQ(buffer, kt->proc_version)) { -- found = TRUE; -- break; -- } -- } -- pclose(pipe); -+ found = match_file_string(kernel, kt->proc_version, buffer); - - if (found) - break; -@@ -797,30 +805,14 @@ - static int - verify_utsname(char *system_map) - { -- char command[BUFSIZE]; - char buffer[BUFSIZE]; -- FILE *pipe; -- int found; - ulong value; - struct new_utsname new_utsname; - -- sprintf(command, "/usr/bin/strings %s", system_map); -- if ((pipe = popen(command, "r")) == NULL) -- return FALSE; -- - if (CRASHDEBUG(1)) - fprintf(fp, "verify_utsname: check: %s\n", system_map); - -- found = FALSE; -- while (fgets(buffer, BUFSIZE-1, pipe)) { -- if (strstr(buffer, "D system_utsname")) { -- found = TRUE; -- break; -- } -- } -- pclose(pipe); -- -- if (!found) -+ if (!match_file_string(system_map, "D system_utsname", buffer)) - return FALSE; - - if (extract_hex(buffer, &value, NULLCHAR, TRUE) && -@@ -1125,6 +1117,8 @@ - { - int i; - int c, found; -+ struct task_context *tc, *namespace_context; -+ ulong value; - char *spec_string; - char buf1[BUFSIZE]; - char buf2[BUFSIZE]; -@@ -1133,7 +1127,9 @@ - int flags = 0; - int save_next; - -- while ((c = getopt(argcnt, args, "if")) != EOF) { -+ namespace_context = pid_to_context(1); -+ -+ while ((c = getopt(argcnt, args, "ifn:")) != EOF) { - switch(c) - { - case 'i': -@@ -1144,6 +1140,19 @@ - flags |= MOUNT_PRINT_FILES; - break; - -+ case 'n': -+ switch (str_to_context(optarg, &value, &tc)) { -+ case STR_PID: -+ case STR_TASK: -+ namespace_context = tc; -+ break; -+ case STR_INVALID: -+ error(FATAL, "invalid task or pid value: %s\n", -+ optarg); -+ break; -+ } -+ break; -+ - default: - argerrs++; - break; -@@ -1162,7 +1171,7 @@ - shift_string_left(spec_string, 2); - - open_tmpfile(); -- show_mounts(0, MOUNT_PRINT_ALL); -+ show_mounts(0, MOUNT_PRINT_ALL, namespace_context); - - found = FALSE; - rewind(pc->tmpfile); -@@ -1181,16 +1190,20 @@ - continue; - - for (i = 0; i < c; i++) { -- if (STREQ(arglist[i], spec_string)) -+ if (PATHEQ(arglist[i], spec_string)) - found = TRUE; - } - if (found) { - fp = pc->saved_fp; - if (flags) { - sscanf(buf2,"%lx",&vfsmount); -- show_mounts(vfsmount, flags); -+ show_mounts(vfsmount, flags, -+ namespace_context); - } else { -- fprintf(fp, mount_hdr); -+ if (!(pc->curcmd_flags & HEADER_PRINTED)) { -+ fprintf(fp, mount_hdr); -+ pc->curcmd_flags |= HEADER_PRINTED; -+ } - fprintf(fp, buf2); - } - found = FALSE; -@@ -1200,7 +1213,7 @@ - close_tmpfile(); - } while (args[++optind]); - } else -- show_mounts(0, flags); -+ show_mounts(0, flags, namespace_context); - } - - /* -@@ -1208,7 +1221,7 @@ - */ - - static void --show_mounts(ulong one_vfsmount, int flags) -+show_mounts(ulong one_vfsmount, int flags, struct task_context *namespace_context) - { - ulong one_vfsmount_list; - long sb_s_files; -@@ -1246,7 +1259,7 @@ - mount_cnt = 1; - mntlist = &one_vfsmount_list; - } else -- mntlist = get_mount_list(&mount_cnt); -+ mntlist = get_mount_list(&mount_cnt, namespace_context); - - if (!strlen(mount_hdr)) { - devlen = strlen("DEVNAME"); -@@ -1408,11 +1421,11 @@ - * Allocate and fill a list of the currently-mounted vfsmount pointers. - */ - static ulong * --get_mount_list(int *cntptr) -+get_mount_list(int *cntptr, struct task_context *namespace_context) - { - struct list_data list_data, *ld; - int mount_cnt; -- ulong *mntlist, namespace, root; -+ ulong *mntlist, namespace, root, nsproxy, mnt_ns; - struct task_context *tc; - - ld = &list_data; -@@ -1421,9 +1434,26 @@ - if (symbol_exists("vfsmntlist")) { - get_symbol_data("vfsmntlist", sizeof(void *), &ld->start); - ld->end = symbol_value("vfsmntlist"); -+ } else if (VALID_MEMBER(task_struct_nsproxy)) { -+ tc = namespace_context; -+ -+ readmem(tc->task + OFFSET(task_struct_nsproxy), KVADDR, -+ &nsproxy, sizeof(void *), "task nsproxy", -+ FAULT_ON_ERROR); -+ if (!readmem(nsproxy + OFFSET(nsproxy_mnt_ns), KVADDR, -+ &mnt_ns, sizeof(void *), "nsproxy mnt_ns", -+ RETURN_ON_ERROR|QUIET)) -+ error(FATAL, "cannot determine mount list location!\n"); -+ if (!readmem(mnt_ns + OFFSET(mnt_namespace_root), KVADDR, -+ &root, sizeof(void *), "mnt_namespace root", -+ RETURN_ON_ERROR|QUIET)) -+ error(FATAL, "cannot determine mount list location!\n"); -+ -+ ld->start = root + OFFSET(vfsmount_mnt_list); -+ ld->end = mnt_ns + OFFSET(mnt_namespace_list); -+ - } else if (VALID_MEMBER(namespace_root)) { -- if (!(tc = pid_to_context(1))) -- tc = CURRENT_CONTEXT(); -+ tc = namespace_context; - - readmem(tc->task + OFFSET(task_struct_namespace), KVADDR, - &namespace, sizeof(void *), "task namespace", -@@ -1497,7 +1527,7 @@ - goto nopath; - - if (VALID_MEMBER(file_f_vfsmnt)) { -- mntlist = get_mount_list(&mount_cnt); -+ mntlist = get_mount_list(&mount_cnt, pid_to_context(1)); - vfsmount_buf = GETBUF(SIZE(vfsmount)); - - for (m = found = 0, vfsmnt = mntlist; -@@ -1706,15 +1736,30 @@ - MEMBER_OFFSET_INIT(fs_struct_pwd, "fs_struct", "pwd"); - MEMBER_OFFSET_INIT(fs_struct_rootmnt, "fs_struct", "rootmnt"); - MEMBER_OFFSET_INIT(fs_struct_pwdmnt, "fs_struct", "pwdmnt"); -- MEMBER_OFFSET_INIT(files_struct_max_fds, "files_struct", "max_fds"); -- MEMBER_OFFSET_INIT(files_struct_max_fdset, "files_struct", "max_fdset"); -- MEMBER_OFFSET_INIT(files_struct_open_fds, "files_struct", "open_fds"); - MEMBER_OFFSET_INIT(files_struct_open_fds_init, - "files_struct", "open_fds_init"); -- MEMBER_OFFSET_INIT(files_struct_fd, "files_struct", "fd"); -+ MEMBER_OFFSET_INIT(files_struct_fdt, "files_struct", "fdt"); -+ if (VALID_MEMBER(files_struct_fdt)) { -+ MEMBER_OFFSET_INIT(fdtable_max_fds, "fdtable", "max_fds"); -+ MEMBER_OFFSET_INIT(fdtable_max_fdset, "fdtable", "max_fdset"); -+ MEMBER_OFFSET_INIT(fdtable_open_fds, "fdtable", "open_fds"); -+ MEMBER_OFFSET_INIT(fdtable_fd, "fdtable", "fd"); -+ } else { -+ MEMBER_OFFSET_INIT(files_struct_max_fds, "files_struct", "max_fds"); -+ MEMBER_OFFSET_INIT(files_struct_max_fdset, "files_struct", "max_fdset"); -+ MEMBER_OFFSET_INIT(files_struct_open_fds, "files_struct", "open_fds"); -+ MEMBER_OFFSET_INIT(files_struct_fd, "files_struct", "fd"); -+ } - MEMBER_OFFSET_INIT(file_f_dentry, "file", "f_dentry"); - MEMBER_OFFSET_INIT(file_f_vfsmnt, "file", "f_vfsmnt"); - MEMBER_OFFSET_INIT(file_f_count, "file", "f_count"); -+ if (INVALID_MEMBER(file_f_dentry)) { -+ MEMBER_OFFSET_INIT(file_f_path, "file", "f_path"); -+ MEMBER_OFFSET_INIT(path_mnt, "path", "mnt"); -+ MEMBER_OFFSET_INIT(path_dentry, "path", "dentry"); -+ ASSIGN_OFFSET(file_f_dentry) = OFFSET(file_f_path) + OFFSET(path_dentry); -+ ASSIGN_OFFSET(file_f_vfsmnt) = OFFSET(file_f_path) + OFFSET(path_mnt); -+ } - MEMBER_OFFSET_INIT(dentry_d_inode, "dentry", "d_inode"); - MEMBER_OFFSET_INIT(dentry_d_parent, "dentry", "d_parent"); - MEMBER_OFFSET_INIT(dentry_d_covers, "dentry", "d_covers"); -@@ -1736,10 +1781,15 @@ - MEMBER_OFFSET_INIT(vfsmount_mnt_mountpoint, - "vfsmount", "mnt_mountpoint"); - MEMBER_OFFSET_INIT(namespace_root, "namespace", "root"); -+ MEMBER_OFFSET_INIT(task_struct_nsproxy, "task_struct", "nsproxy"); - if (VALID_MEMBER(namespace_root)) { - MEMBER_OFFSET_INIT(namespace_list, "namespace", "list"); - MEMBER_OFFSET_INIT(task_struct_namespace, - "task_struct", "namespace"); -+ } else if (VALID_MEMBER(task_struct_nsproxy)) { -+ MEMBER_OFFSET_INIT(nsproxy_mnt_ns, "nsproxy", "mnt_ns"); -+ MEMBER_OFFSET_INIT(mnt_namespace_root, "mnt_namespace", "root"); -+ MEMBER_OFFSET_INIT(mnt_namespace_list, "mnt_namespace", "list"); - } else if (THIS_KERNEL_VERSION >= LINUX(2,4,20)) { - if (CRASHDEBUG(2)) - fprintf(fp, "hardwiring namespace stuff\n"); -@@ -1762,6 +1812,8 @@ - STRUCT_SIZE_INIT(umode_t, "umode_t"); - STRUCT_SIZE_INIT(dentry, "dentry"); - STRUCT_SIZE_INIT(files_struct, "files_struct"); -+ if (VALID_MEMBER(files_struct_fdt)) -+ STRUCT_SIZE_INIT(fdtable, "fdtable"); - STRUCT_SIZE_INIT(file, "file"); - STRUCT_SIZE_INIT(inode, "inode"); - STRUCT_SIZE_INIT(vfsmount, "vfsmount"); -@@ -1777,8 +1829,12 @@ - - if (symbol_exists("height_to_maxindex")) { - int tmp; -- ARRAY_LENGTH_INIT(tmp, height_to_maxindex, -- "height_to_maxindex", NULL, 0); -+ if (LKCD_KERNTYPES()) -+ ARRAY_LENGTH_INIT_ALT(tmp, "height_to_maxindex", -+ "radix_tree_preload.nodes", NULL, 0); -+ else -+ ARRAY_LENGTH_INIT(tmp, height_to_maxindex, -+ "height_to_maxindex", NULL, 0); - STRUCT_SIZE_INIT(radix_tree_root, "radix_tree_root"); - STRUCT_SIZE_INIT(radix_tree_node, "radix_tree_node"); - MEMBER_OFFSET_INIT(radix_tree_root_height, -@@ -1998,8 +2054,9 @@ - open_files_dump(ulong task, int flags, struct reference *ref) - { - struct task_context *tc; -- ulong files_struct_addr; -- char *files_struct_buf; -+ ulong files_struct_addr; -+ ulong fdtable_addr = 0; -+ char *files_struct_buf, *fdtable_buf = NULL; - ulong fs_struct_addr; - char *dentry_buf, *fs_struct_buf; - ulong root_dentry, pwd_dentry; -@@ -2027,6 +2084,8 @@ - BZERO(root_pathname, BUFSIZE); - BZERO(pwd_pathname, BUFSIZE); - files_struct_buf = GETBUF(SIZE(files_struct)); -+ if (VALID_STRUCT(fdtable)) -+ fdtable_buf = GETBUF(SIZE(fdtable)); - fill_task_struct(task); - - sprintf(files_header, " FD%s%s%s%s%s%s%sTYPE%sPATH\n", -@@ -2107,24 +2166,45 @@ - - files_struct_addr = ULONG(tt->task_struct + OFFSET(task_struct_files)); - -- if (files_struct_addr) { -- readmem(files_struct_addr, KVADDR, files_struct_buf, -- SIZE(files_struct), "files_struct buffer", -- FAULT_ON_ERROR); -- -- max_fdset = INT(files_struct_buf + -+ if (files_struct_addr) { -+ readmem(files_struct_addr, KVADDR, files_struct_buf, -+ SIZE(files_struct), "files_struct buffer", -+ FAULT_ON_ERROR); -+ -+ if (VALID_MEMBER(files_struct_max_fdset)) { -+ max_fdset = INT(files_struct_buf + - OFFSET(files_struct_max_fdset)); - -- max_fds = INT(files_struct_buf + -- OFFSET(files_struct_max_fds)); -- } -+ max_fds = INT(files_struct_buf + -+ OFFSET(files_struct_max_fds)); -+ } -+ } - -- if (!files_struct_addr || max_fdset == 0 || max_fds == 0) { -+ if (VALID_MEMBER(files_struct_fdt)) { -+ fdtable_addr = ULONG(files_struct_buf + OFFSET(files_struct_fdt)); -+ -+ if (fdtable_addr) { -+ readmem(fdtable_addr, KVADDR, fdtable_buf, -+ SIZE(fdtable), "fdtable buffer", FAULT_ON_ERROR); -+ if (VALID_MEMBER(fdtable_max_fdset)) -+ max_fdset = INT(fdtable_buf + -+ OFFSET(fdtable_max_fdset)); -+ else -+ max_fdset = -1; -+ max_fds = INT(fdtable_buf + -+ OFFSET(fdtable_max_fds)); -+ } -+ } -+ -+ if ((VALID_MEMBER(files_struct_fdt) && !fdtable_addr) || -+ !files_struct_addr || max_fdset == 0 || max_fds == 0) { - if (ref) { - if (ref->cmdflags & FILES_REF_FOUND) - fprintf(fp, "\n"); - } else - fprintf(fp, "No open files\n"); -+ if (fdtable_buf) -+ FREEBUF(fdtable_buf); - FREEBUF(files_struct_buf); - return; - } -@@ -2146,8 +2226,12 @@ - } - } - -- open_fds_addr = ULONG(files_struct_buf + -- OFFSET(files_struct_open_fds)); -+ if (VALID_MEMBER(fdtable_open_fds)) -+ open_fds_addr = ULONG(fdtable_buf + -+ OFFSET(fdtable_open_fds)); -+ else -+ open_fds_addr = ULONG(files_struct_buf + -+ OFFSET(files_struct_open_fds)); - - if (open_fds_addr) { - if (VALID_MEMBER(files_struct_open_fds_init) && -@@ -2157,16 +2241,21 @@ - OFFSET(files_struct_open_fds_init), - &open_fds, sizeof(fd_set)); - else -- readmem(open_fds_addr, KVADDR, &open_fds, -- sizeof(fd_set), "files_struct open_fds", -+ readmem(open_fds_addr, KVADDR, &open_fds, -+ sizeof(fd_set), "fdtable open_fds", - FAULT_ON_ERROR); - } - -- fd = ULONG(files_struct_buf + OFFSET(files_struct_fd)); -+ if (VALID_MEMBER(fdtable_fd)) -+ fd = ULONG(fdtable_buf + OFFSET(fdtable_fd)); -+ else -+ fd = ULONG(files_struct_buf + OFFSET(files_struct_fd)); - - if (!open_fds_addr || !fd) { - if (ref && (ref->cmdflags & FILES_REF_FOUND)) - fprintf(fp, "\n"); -+ if (fdtable_buf) -+ FREEBUF(fdtable_buf); - FREEBUF(files_struct_buf); - return; - } -@@ -2175,7 +2264,8 @@ - for (;;) { - unsigned long set; - i = j * __NFDBITS; -- if (i >= max_fdset || i >= max_fds) -+ if (((max_fdset >= 0) && (i >= max_fdset)) || -+ (i >= max_fds)) - break; - set = open_fds.__fds_bits[j++]; - while (set) { -@@ -2220,6 +2310,8 @@ - if (ref && (ref->cmdflags & FILES_REF_FOUND)) - fprintf(fp, "\n"); - -+ if (fdtable_buf) -+ FREEBUF(fdtable_buf); - FREEBUF(files_struct_buf); - } - -@@ -2494,6 +2586,20 @@ - } - - /* -+ * Get the vfsmnt associated with a file. -+ */ -+ulong -+file_to_vfsmnt(ulong file) -+{ -+ char *file_buf; -+ ulong vfsmnt; -+ -+ file_buf = fill_file_cache(file); -+ vfsmnt = ULONG(file_buf + OFFSET(file_f_vfsmnt)); -+ return vfsmnt; -+} -+ -+/* - * get_pathname() fills in a pathname string for an ending dentry - * See __d_path() in the kernel for help fixing problems. - */ -@@ -3575,3 +3681,29 @@ - - return TRUE; - } -+ -+static int -+match_file_string(char *filename, char *string, char *buffer) -+{ -+ int found; -+ char command[BUFSIZE]; -+ FILE *pipe; -+ -+ -+ sprintf(command, "/usr/bin/strings %s", filename); -+ if ((pipe = popen(command, "r")) == NULL) { -+ error(INFO, "%s: %s\n", filename, strerror(errno)); -+ return FALSE; -+ } -+ -+ found = FALSE; -+ while (fgets(buffer, BUFSIZE-1, pipe)) { -+ if (strstr(buffer, string)) { -+ found = TRUE; -+ break; -+ } -+ } -+ pclose(pipe); -+ -+ return found; -+} ---- crash/help.c.orig 2007-08-27 15:02:36.000000000 -0400 -+++ crash/help.c 2007-07-20 15:17:26.000000000 -0400 -@@ -1,8 +1,8 @@ - /* help.c - core analysis suite - * - * Copyright (C) 1999, 2000, 2001, 2002 Mission Critical Linux, Inc. -- * Copyright (C) 2002, 2003, 2004, 2005 David Anderson -- * Copyright (C) 2002, 2003, 2004, 2005 Red Hat, Inc. All rights reserved. -+ * Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007 David Anderson -+ * Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007 Red Hat, Inc. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by -@@ -19,7 +19,6 @@ - - static void reshuffle_cmdlist(void); - static int sort_command_name(const void *, const void *); --static void display_help_screen(char *); - static void display_commands(void); - static void display_copying_info(void); - static void display_warranty_info(void); -@@ -106,34 +105,33 @@ - void - program_usage(int form) - { -- int i; -- char **p; -- FILE *less; -+ if (form == SHORT_FORM) { -+ fprintf(fp, program_usage_info[0], pc->program_name); -+ fprintf(fp, "\nEnter \"%s -h\" for details.\n", -+ pc->program_name); -+ clean_exit(1); -+ } else { -+ FILE *scroll; -+ char *scroll_command; -+ char **p; -+ -+ if ((scroll_command = setup_scroll_command()) && -+ (scroll = popen(scroll_command, "w"))) -+ fp = scroll; -+ else -+ scroll = NULL; - -- if (form == LONG_FORM) -- less = popen("/usr/bin/less", "w"); -- else -- less = NULL; -- -- p = program_usage_info; -- -- if (form == LONG_FORM) { -- if (less) -- fp = less; -- for (i = 0; program_usage_info[i]; i++, p++) { -- fprintf(fp, *p, pc->program_name); -+ for (p = program_usage_info; *p; p++) { -+ fprintf(fp, *p, pc->program_name); - fprintf(fp, "\n"); - } -- } else { -- fprintf(fp, *p, pc->program_name); -- fprintf(fp, "\nEnter \"%s -h\" for details.\n", -- pc->program_name); -- } -- fflush(fp); -- if (less) -- pclose(less); -+ fflush(fp); - -- clean_exit(1); -+ if (scroll) -+ pclose(scroll); -+ -+ clean_exit(0); -+ } - } - - -@@ -147,14 +145,16 @@ - struct command_table_entry *cp; - struct extension_table *ext; - -- for (pc->ncmds = 0, cp = &base_command_table[0]; cp->name; cp++) { -+ for (pc->ncmds = 0, cp = pc->cmd_table; cp->name; cp++) { - if (!(cp->flags & HIDDEN_COMMAND)) - pc->ncmds++; - } - - for (ext = extension_table; ext; ext = ext->next) { -- for (cp = ext->command_table; cp->name; cp++) -- pc->ncmds++; -+ for (cp = ext->command_table; cp->name; cp++) { -+ if (!(cp->flags & (CLEANUP|HIDDEN_COMMAND))) -+ pc->ncmds++; -+ } - } - - if (!pc->cmdlist) { -@@ -188,14 +188,16 @@ - for (i = 0; i < pc->cmdlistsz; i++) - pc->cmdlist[i] = NULL; - -- for (cnt = 0, cp = &base_command_table[0]; cp->name; cp++) { -+ for (cnt = 0, cp = pc->cmd_table; cp->name; cp++) { - if (!(cp->flags & HIDDEN_COMMAND)) - pc->cmdlist[cnt++] = cp->name; - } - - for (ext = extension_table; ext; ext = ext->next) { -- for (cp = ext->command_table; cp->name; cp++) -- pc->cmdlist[cnt++] = cp->name; -+ for (cp = ext->command_table; cp->name; cp++) { -+ if (!(cp->flags & (CLEANUP|HIDDEN_COMMAND))) -+ pc->cmdlist[cnt++] = cp->name; -+ } - } - - if (cnt > pc->cmdlistsz) -@@ -239,7 +241,7 @@ - oflag = 0; - - while ((c = getopt(argcnt, args, -- "efNDdmM:ngcaBbHhksvVoptTzLxO")) != EOF) { -+ "efNDdmM:ngcaBbHhkKsvVoptTzLxO")) != EOF) { - switch(c) - { - case 'e': -@@ -303,7 +305,11 @@ - return; - - case 'k': -- dump_kernel_table(); -+ dump_kernel_table(!VERBOSE); -+ return; -+ -+ case 'K': -+ dump_kernel_table(VERBOSE); - return; - - case 's': -@@ -349,6 +355,7 @@ - fprintf(fp, " -D - dumpfile memory usage\n"); - fprintf(fp, " -f - filesys table\n"); - fprintf(fp, " -k - kernel_table\n"); -+ fprintf(fp, " -K - kernel_table (verbose)\n"); - fprintf(fp, " -M machine specific\n"); - fprintf(fp, " -m - machdep_table\n"); - fprintf(fp, " -s - symbol table data\n"); -@@ -389,7 +396,7 @@ - if (oflag) - dump_offset_table(args[optind], FALSE); - else -- cmd_usage(args[optind], COMPLETE_HELP); -+ cmd_usage(args[optind], COMPLETE_HELP|MUST_HELP); - optind++; - } while (args[optind]); - } -@@ -398,7 +405,7 @@ - * Format and display the help menu. - */ - --static void -+void - display_help_screen(char *indent) - { - int i, j, rows; -@@ -508,16 +515,16 @@ - " active perform the command(s) on the active thread on each CPU.\n", - " If none of the task-identifying arguments above are entered, the command", - " will be performed on all tasks.\n", --" command select one or more of the following commands on the tasks", -+" command select one or more of the following commands to be run on the tasks", - " selected, or on all tasks:\n", --" bt same as the \"bt\" command (optional flags: -r -t -l -e -R -f)", --" vm same as the \"vm\" command (optional flags: -p -v -m -R)", --" task same as the \"task\" command (optional flag: -R)", --" files same as the \"files\" command (optional flag: -R)", --" net same as the \"net\" command (optional flags: -s -S -R)", --" set same as the \"set\" command", --" sig same as the \"sig\" command", --" vtop same as the \"vtop\" command (optional flags: -c -u -k)\n", -+" bt run the \"bt\" command (optional flags: -r -t -l -e -R -f -o)", -+" vm run the \"vm\" command (optional flags: -p -v -m -R)", -+" task run the \"task\" command (optional flag: -R)", -+" files run the \"files\" command (optional flag: -R)", -+" net run the \"net\" command (optional flags: -s -S -R)", -+" set run the \"set\" command", -+" sig run the \"sig\" command (optional flag: -g)", -+" vtop run the \"vtop\" command (optional flags: -c -u -k)\n", - " flag Pass this optional flag to the command selected.", - " argument Pass this argument to the command selected.", - " ", -@@ -651,6 +658,10 @@ - " argument is entered, the current value of the %s variable is shown. These", - " are the %s variables, acceptable arguments, and purpose:\n", - " scroll on | off controls output scrolling.", -+" scroll less /usr/bin/less as the output scrolling program.", -+" scroll more /bin/more as the output scrolling program.", -+" scroll CRASHPAGER use CRASHPAGER environment variable as the", -+" output scrolling program.", - " radix 10 | 16 sets output radix to 10 or 16.", - " refresh on | off controls internal task list refresh.", - " print_max number set maximum number of array elements to print.", -@@ -665,6 +676,8 @@ - " edit vi | emacs set line editing mode (from .%src file only).", - " namelist filename name of kernel (from .%src file only).", - " dumpfile filename name of core dumpfile (from .%src file only).", -+" zero_excluded on | off controls whether excluded pages from a dumpfile", -+" should return zero-filled memory.", - " ", - " Internal variables may be set in four manners:\n", - " 1. entering the set command in $HOME/.%src.", -@@ -694,11 +707,11 @@ - " STATE: TASK_RUNNING (PANIC)\n", - " Turn off output scrolling:\n", - " %s> set scroll off", --" scroll: off", -+" scroll: off (/usr/bin/less)", - " ", - " Show the current state of %s internal variables:\n", - " %s> set -v", --" scroll: on", -+" scroll: on (/usr/bin/less)", - " radix: 10 (decimal)", - " refresh: on", - " print_max: 256", -@@ -710,6 +723,7 @@ - " edit: vi", - " namelist: vmlinux", - " dumpfile: vmcore", -+" zero_excluded: off", - " ", - " Show the current context:\n", - " %s> set", -@@ -787,7 +801,7 @@ - char *help_ps[] = { - "ps", - "display process status information", --"[-k|-u][-s][-p|-c|-t|-l] [pid | taskp | command] ...", -+"[-k|-u][-s][-p|-c|-t|-l|-a|-g|-r] [pid | taskp | command] ...", - " This command displays process status for selected, or all, processes" , - " in the system. If no arguments are entered, the process data is", - " is displayed for all processes. Selected process identifiers can be", -@@ -822,8 +836,9 @@ - " On SMP machines, the active task on each CPU will be highlighted by an", - " angle bracket (\">\") preceding its information.", - " ", --" Alternatively, information regarding parent-child relationships, or", --" per-task time usage data may be displayed:", -+" Alternatively, information regarding parent-child relationships,", -+" per-task time usage data, argument/environment data, thread groups,", -+" or resource limits may be displayed:", - " ", - " -p display the parental hierarchy of selected, or all, tasks.", - " -c display the children of selected, or all, tasks.", -@@ -832,6 +847,10 @@ - " -l display the task last_run or timestamp value, whichever applies,", - " of selected, or all, tasks; the list is sorted with the most", - " recently-run task (largest last_run/timestamp) shown first.", -+" -a display the command line arguments and environment strings of", -+" selected, or all, user-mode tasks.", -+" -g display tasks by thread group, of selected, or all, tasks.", -+" -r display resource limits (rlimits) of selected, or all, tasks.", - "\nEXAMPLES", - " Show the process status of all current tasks:\n", - " %s> ps", -@@ -1031,13 +1050,73 @@ - " 381 1 0 c34ddf28 IN 0.2 1316 224 automount", - " 391 1 1 c2777f28 IN 0.2 1316 224 automount", - " ...", -+" ", -+" Display the argument and environment data for the automount task:\n", -+" %s> ps -a automount", -+" PID: 3948 TASK: f722ee30 CPU: 0 COMMAND: \"automount\"", -+" ARG: /usr/sbin/automount --timeout=60 /net program /etc/auto.net", -+" ENV: SELINUX_INIT=YES", -+" CONSOLE=/dev/console", -+" TERM=linux", -+" INIT_VERSION=sysvinit-2.85", -+" PATH=/sbin:/usr/sbin:/bin:/usr/bin", -+" LC_MESSAGES=en_US", -+" RUNLEVEL=3", -+" runlevel=3", -+" PWD=/", -+" LANG=ja_JP.UTF-8", -+" PREVLEVEL=N", -+" previous=N", -+" HOME=/", -+" SHLVL=2", -+" _=/usr/sbin/automount", -+" ", -+" Display the tasks in the thread group containing task c20ab0b0:\n", -+" %s> ps -g c20ab0b0", -+" PID: 6425 TASK: f72f50b0 CPU: 0 COMMAND: \"firefox-bin\"", -+" PID: 6516 TASK: f71bf1b0 CPU: 0 COMMAND: \"firefox-bin\"", -+" PID: 6518 TASK: d394b930 CPU: 0 COMMAND: \"firefox-bin\"", -+" PID: 6520 TASK: c20aa030 CPU: 0 COMMAND: \"firefox-bin\"", -+" PID: 6523 TASK: c20ab0b0 CPU: 0 COMMAND: \"firefox-bin\"", -+" PID: 6614 TASK: f1f181b0 CPU: 0 COMMAND: \"firefox-bin\"", -+" ", -+" Display the tasks in the thread group for each instance of the", -+" program named \"multi-thread\":\n", -+" %s> ps -g multi-thread", -+" PID: 2522 TASK: 1003f0dc7f0 CPU: 1 COMMAND: \"multi-thread\"", -+" PID: 2523 TASK: 10037b13030 CPU: 1 COMMAND: \"multi-thread\"", -+" PID: 2524 TASK: 1003e064030 CPU: 1 COMMAND: \"multi-thread\"", -+" PID: 2525 TASK: 1003e13a7f0 CPU: 1 COMMAND: \"multi-thread\"", -+" ", -+" PID: 2526 TASK: 1002f82b7f0 CPU: 1 COMMAND: \"multi-thread\"", -+" PID: 2527 TASK: 1003e1737f0 CPU: 1 COMMAND: \"multi-thread\"", -+" PID: 2528 TASK: 10035b4b7f0 CPU: 1 COMMAND: \"multi-thread\"", -+" PID: 2529 TASK: 1003f0c37f0 CPU: 1 COMMAND: \"multi-thread\"", -+" PID: 2530 TASK: 10035597030 CPU: 1 COMMAND: \"multi-thread\"", -+" PID: 2531 TASK: 100184be7f0 CPU: 1 COMMAND: \"multi-thread\"", -+" ", -+" Display the resource limits of \"bash\" task 13896:\n", -+" %s> ps -r 13896", -+" PID: 13896 TASK: cf402000 CPU: 0 COMMAND: \"bash\"", -+" RLIMIT CURRENT MAXIMUM", -+" CPU (unlimited) (unlimited)", -+" FSIZE (unlimited) (unlimited)", -+" DATA (unlimited) (unlimited)", -+" STACK 10485760 (unlimited)", -+" CORE (unlimited) (unlimited)", -+" RSS (unlimited) (unlimited)", -+" NPROC 4091 4091", -+" NOFILE 1024 1024", -+" MEMLOCK 4096 4096", -+" AS (unlimited) (unlimited)", -+" LOCKS (unlimited) (unlimited)", - NULL - }; - - char *help_rd[] = { - "rd", - "read memory", --"[-dDsup][-8|-16|-32|-64][-o offs][-e addr] [address|symbol] [count]", -+"[-dDsupxmf][-8|-16|-32|-64][-o offs][-e addr] [address|symbol] [count]", - " This command displays the contents of memory, with the output formatted", - " in several different manners. The starting address may be entered either", - " symbolically or by address. The default output size is the size of a long", -@@ -1046,9 +1125,12 @@ - " -p address argument is a physical address.", - " -u address argument is a user virtual address; only required on", - " processors with common user and kernel virtual address spaces.", -+" -m address argument is a xen host machine address.", -+" -f address argument is a dumpfile offset.", - " -d display output in signed decimal format (default is hexadecimal).", - " -D display output in unsigned decimal format (default is hexadecimal).", - " -s displays output symbolically when appropriate.", -+" -x do not display ASCII translation at end of each line.", - #ifdef NOTDEF - " -o Shows offset value from the starting address.", - #endif -@@ -1064,7 +1146,8 @@ - " 3. -u specifies a user virtual address, but is only necessary on", - " processors with common user and kernel virtual address spaces.", - " symbol symbol of starting address to read.", --" count number of memory locations to display (default is 1).", -+" count number of memory locations to display (default is 1); if entered,", -+" must be the last argument on the command line.", - "\nEXAMPLES", - " Display the kernel_version string:\n", - " %s> rd kernel_version 4 ", -@@ -1155,7 +1238,7 @@ - "bt", - "backtrace", - #if defined(GDB_6_0) || defined(GDB_6_1) --"[-a|-r|-t|-l|-e|-E|-f] [-R ref] [ -I ip ] [-S sp] [pid | taskp]", -+"[-a|-r|-t|-T|-l|-e|-E|-f|-o|-O] [-R ref] [ -I ip ] [-S sp] [pid | taskp]", - #else - "[-a|-r|-t|-l|-e|-f|-g] [-R ref] [ -I ip ] [-S sp] [pid | taskp]", - #endif -@@ -1167,14 +1250,26 @@ - " pages of memory containing the task_union structure.", - " -t display all text symbols found from the last known stack location", - " to the top of the stack. (helpful if the back trace fails)", -+" -T display all text symbols found from just above the task_struct or", -+" thread_info to the top of the stack. (helpful if the back trace", -+" fails or the -t option starts too high in the process stack).", - " -l show file and line number of each stack trace text location.", - " -e search the stack for possible kernel and user mode exception frames.", --" -E search the IRQ stacks (x86, x86_64 and PPC64), and the exception", -+" -E search the IRQ stacks (x86, x86_64 and ppc64), and the exception", - " stacks (x86_64) for possible exception frames; all other arguments", - " will be ignored since this is not a context-sensitive operation.", - " -f display all stack data contained in a frame; this option can be", --" used to determine the arguments passed to each function (x86 only);", --" on IA64, the argument register contents are dumped.", -+" used to determine the arguments passed to each function; on ia64,", -+" the argument register contents are dumped.", -+" -o x86: use old backtrace method, permissable only on kernels that were", -+" compiled without the -fomit-frame_pointer.", -+" x86_64: use old backtrace method, which dumps potentially stale", -+" kernel text return addresses found on the stack.", -+" -O x86: use old backtrace method by default, permissable only on kernels", -+" that were compiled without the -fomit-frame_pointer; subsequent usage", -+" of this option toggles the backtrace method.", -+" x86_64: use old backtrace method by default; subsequent usage of this", -+" option toggles the backtrace method.", - #if !defined(GDB_6_0) && !defined(GDB_6_1) - " -g use gdb stack trace code. (alpha only)", - #endif -@@ -1189,11 +1284,8 @@ - " Note that all examples below are for x86 only. The output format will differ", - " for other architectures. x86 backtraces from kernels that were compiled", - " with the --fomit-frame-pointer CFLAG occasionally will drop stack frames,", --" or display a stale frame reference. x86_64 backtraces are only slightly", --" more intelligent than those generated from kernel oops messages; text return", --" addresses shown in the back trace may include stale references. When in", --" doubt as to the accuracy of a backtrace, the -t option may help fill in", --" the blanks.\n", -+" or display a stale frame reference. When in doubt as to the accuracy of a", -+" backtrace, the -t or -T options may help fill in the blanks.\n", - "EXAMPLES", - " Display the stack trace of the active task(s) when the kernel panicked:\n", - " %s> bt -a", -@@ -1439,12 +1531,13 @@ - " called \"echo\", which simply echoes back all arguments passed to it.", - " Note the comments contained within it for further details. To build it,", - " cut and paste the following output into a file, and call it, for example,", --" \"extlib.c\". Then compile like so:", -+" \"echo.c\". Then compile like so:", - " ", --" gcc -nostartfiles -shared -rdynamic -o extlib.so extlib.c", -+" gcc -nostartfiles -shared -rdynamic -o echo.so echo.c -fPIC -D", - " ", --" The extlib.so file may be dynamically linked into %s during runtime, or", --" during initialization by putting \"extend extlib.so\" into a .%src file", -+" where must be one of the MACHINE_TYPE #define's in defs.h.", -+" The echo.so file may be dynamically linked into %s during runtime, or", -+" during initialization by putting \"extend echo.so\" into a .%src file", - " located in the current directory, or in the user's $HOME directory.", - " ", - "---------------------------------- cut here ----------------------------------", -@@ -1583,7 +1676,8 @@ - " This command displays the timer queue entries, both old- and new-style,", - " in chronological order. In the case of the old-style timers, the", - " timer_table array index is shown; in the case of the new-style timers, ", --" the timer_list address is shown.", -+" the timer_list address is shown. On later kernels, the timer data is", -+" per-cpu.", - "\nEXAMPLES", - " %s> timer", - " JIFFIES", -@@ -1610,6 +1704,37 @@ - " 372010 c2323f7c c0112d6c ", - " 372138 c2191f10 c0112d6c ", - " 8653052 c1f13f10 c0112d6c ", -+" ", -+" Display the timer queue on a 2-cpu system:\n", -+" %s> timer", -+" TVEC_BASES[0]: c1299be0", -+" JIFFIES", -+" 18256298", -+" EXPIRES TIMER_LIST FUNCTION", -+" 18256406 cd5ddec0 c01232bb ", -+" 18256677 ceea93e0 c011e3cc ", -+" 18256850 ceea7f64 c01232bb ", -+" 18258751 cd1d4f64 c01232bb ", -+" 18258792 cf5782f0 c011e3cc ", -+" 18261266 c03c9f80 c022fad5 ", -+" 18262196 c02dc2e0 c0233329 ", -+" 18270518 ceb8bf1c c01232bb ", -+" 18271327 c03c9120 c0222074 ", -+" 18271327 c03ca580 c0233ace ", -+" 18272532 c02d1e18 c0129946 ", -+" 18276518 c03c9fc0 c022fd40 ", -+" 18332334 ceea9970 c011e3cc ", -+" 18332334 cfb6a840 c011e3cc ", -+" 18665378 cec25ec0 c01232bb ", -+" TVEC_BASES[1]: c12a1be0", -+" JIFFIES", -+" 18256298", -+" EXPIRES TIMER_LIST FUNCTION", -+" 18256493 c02c7d00 c013dad5 ", -+" 18256499 c12a2db8 c0129946 ", -+" 18277900 ceebaec0 c01232bb ", -+" 18283769 cf739f64 c01232bb ", -+" 18331902 cee8af64 c01232bb ", - NULL - }; - -@@ -1905,7 +2030,7 @@ - char *help_irq[] = { - "irq", - "IRQ data", --"[-d | -b | [index ...]]", -+"[[[index ...] | -u] | -d | -b]", - " This command collaborates the data in an irq_desc_t, along with its", - " associated hw_interrupt_type and irqaction structure data, into a", - " consolidated per-IRQ display. Alternatively, the intel interrupt", -@@ -1913,6 +2038,7 @@ - " If no index value argument(s) nor any options are entered, the IRQ", - " data for all IRQs will be displayed.\n", - " index a valid IRQ index.", -+" -u dump data for in-use IRQs only.", - " -d dump the intel interrupt descriptor table.", - " -b dump bottom half data.", - "\nEXAMPLES", -@@ -2013,7 +2139,7 @@ - char *help_sys[] = { - "sys", - "system data", --"[-c [name|number]] ", -+"[-c [name|number]] config", - " This command displays system-specific data. If no arguments are entered,\n" - " the same system data shown during %s invocation is shown.\n", - " -c [name|number] If no name or number argument is entered, dump all", -@@ -2023,6 +2149,8 @@ - " that number is displayed. If the current output radix", - " has been set to 16, the system call numbers will be ", - " displayed in hexadecimal.", -+" config If the kernel was configured with CONFIG_IKCONFIG, then", -+" dump the in-kernel configuration data.", - " -panic Panic a live system. Requires write permission to", - " /dev/mem. Results in the %s context causing an", - " \"Attempted to kill the idle task!\" panic. (The dump", -@@ -2043,6 +2171,27 @@ - " VERSION: #24 SMP Mon Oct 11 17:41:40 CDT 1999", - " MACHINE: i686 (500 MHz)", - " MEMORY: 1 GB", -+"\n Dump the system configuration data (if CONFIG_IKCONFIG):\n", -+" %s> sys config", -+" #", -+" # Automatically generated make config: don't edit", -+" # Linux kernel version: 2.6.16", -+" # Mon Apr 10 07:58:06 2006", -+" #", -+" CONFIG_X86_64=y", -+" CONFIG_64BIT=y", -+" CONFIG_X86=y", -+" CONFIG_SEMAPHORE_SLEEPERS=y", -+" CONFIG_MMU=y", -+" CONFIG_RWSEM_GENERIC_SPINLOCK=y", -+" CONFIG_GENERIC_CALIBRATE_DELAY=y", -+" CONFIG_X86_CMPXCHG=y", -+" CONFIG_EARLY_PRINTK=y", -+" CONFIG_GENERIC_ISA_DMA=y", -+" CONFIG_GENERIC_IOMAP=y", -+" CONFIG_ARCH_MAY_HAVE_PC_FDC=y", -+" CONFIG_DMI=y", -+" ...", - "\n Dump the system call table:\n", - " %s> sys -c", - " NUM SYSTEM CALL FILE AND LINE NUMBER", -@@ -2191,13 +2340,18 @@ - char *help_mount[] = { - "mount", - "mounted filesystem data", --"[-f] [-i] [vfsmount | superblock | devname | dirname | inode]", -+"[-f] [-i] [-n pid|task] [vfsmount|superblock|devname|dirname|inode]", - " This command displays basic information about the currently-mounted", - " filesystems. The per-filesystem dirty inode list or list of open", - " files for the filesystem may also be displayed.\n", - " -f dump dentries and inodes for open files in each filesystem.", - " -i dump all dirty inodes associated with each filesystem.\n", --" Filesystems may be selected in the following forms:\n", -+" For kernels supporting namespaces, the -n option may be used to", -+" display the mounted filesystems with respect to the namespace of a", -+" specified task:\n", -+" -n pid a process PID.", -+" -n task a hexadecimal task_struct pointer.\n", -+" Specific filesystems may be selected using the following forms:\n", - " vfsmount hexadecimal address of filesystem vfsmount structure.", - " superblock hexadecimal address of filesystem super_block structure.", - " devname device name of filesystem.", -@@ -2721,22 +2875,22 @@ - char *help_sig[] = { - "sig", - "task signal handling", --"[[-l] | [-s sigset]] | [pid | taskp] ...", -+"[[-l] | [-s sigset]] | [-g] [pid | taskp] ...", - " This command displays signal-handling data of one or more tasks. Multiple", - " task or PID numbers may be entered; if no arguments are entered, the signal", - " handling data of the current context will be displayed. The default display", - " shows:", - " ", --" 1. Whether the task has an unblocked signal pending.", --" 2. The contents of the \"signal\" and \"blocked\" sigset_t structures", --" from the task_struct, both of which are represented as a 64-bit ", --" hexadecimal value.", --" 3. A formatted dump of the \"sig\" signal_struct structure referenced by", -+" 1. A formatted dump of the \"sig\" signal_struct structure referenced by", - " the task_struct. For each defined signal, it shows the sigaction", - " structure address, the signal handler, the signal sigset_t mask ", - " (also expressed as a 64-bit hexadecimal value), and the flags.", --" 4. For each queued signal, if any, its signal number and associated", --" siginfo structure address.", -+" 2. Whether the task has an unblocked signal pending.", -+" 3. The contents of the \"blocked\" and \"signal\" sigset_t structures", -+" from the task_struct/signal_struct, both of which are represented ", -+" as a 64-bit hexadecimal value.", -+" 4. For each queued signal, private and/or shared, if any, its signal", -+" number and associated siginfo structure address.", - " ", - " The -l option lists the signal numbers and their name(s). The -s option", - " translates a 64-bit hexadecimal value representing the contents of a", -@@ -2744,56 +2898,105 @@ - " ", - " pid a process PID.", - " taskp a hexadecimal task_struct pointer.", -+" -g displays signal information for all threads in a task's ", -+" thread group.", - " -l displays the defined signal numbers and names.", - " -s sigset translates a 64-bit hexadecimal value representing a sigset_t", - " into a list of signal names associated with the bits set.", - "\nEXAMPLES", --" Dump the signal-handling data of PID 614:\n", --" %s> sig 614", --" PID: 614 TASK: c6f26000 CPU: 1 COMMAND: \"httpd\"", --" SIGPENDING: no", --" SIGNAL: 0000000000000000", --" BLOCKED: 0000000000000000", --" SIGNAL_STRUCT: c1913800 COUNT: 1", -+" Dump the signal-handling data of PID 8970:\n", -+" %s> sig 8970", -+" PID: 8970 TASK: f67d8560 CPU: 1 COMMAND: \"procsig\"", -+" SIGNAL_STRUCT: f6018680 COUNT: 1", - " SIG SIGACTION HANDLER MASK FLAGS ", --" [1] c1913804 8057c98 0000000000000201 0 ", --" [2] c1913818 8057c8c 0000000000000000 0 ", --" [3] c191382c SIG_DFL 0000000000000000 0 ", --" [4] c1913840 8057bd8 0000000000000000 80000000 (SA_RESETHAND)", --" [5] c1913854 SIG_DFL 0000000000000000 0 ", --" [6] c1913868 8057bd8 0000000000000000 80000000 (SA_RESETHAND)", --" [7] c191387c 8057bd8 0000000000000000 80000000 (SA_RESETHAND)", --" [8] c1913890 SIG_DFL 0000000000000000 0 ", --" [9] c19138a4 SIG_DFL 0000000000000000 0 ", --" [10] c19138b8 8057c98 0000000000000201 0 ", --" [11] c19138cc 8057bd8 0000000000000000 80000000 (SA_RESETHAND)", --" [12] c19138e0 SIG_DFL 0000000000000000 0 ", --" [13] c19138f4 SIG_IGN 0000000000000000 0 ", --" [14] c1913908 SIG_DFL 0000000000000000 0 ", --" [15] c191391c 8057c8c 0000000000000000 0 ", --" [16] c1913930 SIG_DFL 0000000000000000 0 ", --" [17] c1913944 SIG_DFL 0000000000000000 0 ", --" [18] c1913958 SIG_DFL 0000000000000000 0 ", --" [19] c191396c SIG_DFL 0000000000000000 0 ", --" [20] c1913980 SIG_DFL 0000000000000000 0 ", --" [21] c1913994 SIG_DFL 0000000000000000 0 ", --" [22] c19139a8 SIG_DFL 0000000000000000 0 ", --" [23] c19139bc SIG_DFL 0000000000000000 0 ", --" [24] c19139d0 SIG_DFL 0000000000000000 0 ", --" [25] c19139e4 SIG_DFL 0000000000000000 0 ", --" [26] c19139f8 SIG_DFL 0000000000000000 0 ", --" [27] c1913a0c SIG_DFL 0000000000000000 0 ", --" [28] c1913a20 SIG_DFL 0000000000000000 0 ", --" [29] c1913a34 SIG_DFL 0000000000000000 0 ", --" [30] c1913a48 SIG_DFL 0000000000000000 0 ", --" [31] c1913a5c SIG_DFL 0000000000000000 0 ", --" SIGQUEUE: (empty)", -+" [1] f7877684 SIG_DFL 0000000000000000 0 ", -+" [2] f7877698 SIG_DFL 0000000000000000 0 ", -+" ...", -+" [8] f7877710 SIG_DFL 0000000000000000 0 ", -+" [9] f7877724 SIG_DFL 0000000000000000 0 ", -+" [10] f7877738 804867a 0000000000000000 80000000 (SA_RESETHAND)", -+" [11] f787774c SIG_DFL 0000000000000000 0 ", -+" [12] f7877760 804867f 0000000000000000 10000004 (SA_SIGINFO|SA_RESTART)", -+" [13] f7877774 SIG_DFL 0000000000000000 0 ", -+" ...", -+" [31] f78778dc SIG_DFL 0000000000000000 0 ", -+" [32] f78778f0 SIG_DFL 0000000000000000 0 ", -+" [33] f7877904 SIG_DFL 0000000000000000 0 ", -+" [34] f7877918 804867f 0000000000000000 10000004 (SA_SIGINFO|SA_RESTART)", -+" [35] f787792c SIG_DFL 0000000000000000 0 ", -+" [36] f7877940 SIG_DFL 0000000000000000 0 ", -+" ...", -+" [58] f7877af8 SIG_DFL 0000000000000000 0 ", -+" [59] f7877b0c SIG_DFL 0000000000000000 0 ", -+" [60] f7877b20 SIG_DFL 0000000000000000 0 ", -+" [61] f7877b34 SIG_DFL 0000000000000000 0 ", -+" [62] f7877b48 SIG_DFL 0000000000000000 0 ", -+" [63] f7877b5c SIG_DFL 0000000000000000 0 ", -+" [64] f7877b70 804867f 0000000000000000 10000004 (SA_SIGINFO|SA_RESTART)", -+" SIGPENDING: no", -+" BLOCKED: 8000000200000800", -+" PRIVATE_PENDING", -+" SIGNAL: 0000000200000800", -+" SIGQUEUE: SIG SIGINFO ", -+" 12 f51b9c84", -+" 34 f51b9594", -+" SHARED_PENDING", -+" SIGNAL: 8000000000000800", -+" SIGQUEUE: SIG SIGINFO ", -+" 12 f51b9188", -+" 64 f51b9d18", -+" 64 f51b9500", -+" ", -+" Dump the signal-handling data for all tasks in the thread group containing", -+" PID 2578:\n", -+" %s> sig -g 2578", -+" PID: 2387 TASK: f617d020 CPU: 0 COMMAND: \"slapd\"", -+" SIGNAL_STRUCT: f7dede00 COUNT: 6", -+" SIG SIGACTION HANDLER MASK FLAGS", -+" [1] c1f60c04 a258a7 0000000000000000 10000000 (SA_RESTART)", -+" [2] c1f60c18 a258a7 0000000000000000 10000000 (SA_RESTART)", -+" [3] c1f60c2c SIG_DFL 0000000000000000 0", -+" [4] c1f60c40 SIG_DFL 0000000000000000 0", -+" [5] c1f60c54 a258a7 0000000000000000 10000000 (SA_RESTART)", -+" [6] c1f60c68 SIG_DFL 0000000000000000 0", -+" [7] c1f60c7c SIG_DFL 0000000000000000 0", -+" [8] c1f60c90 SIG_DFL 0000000000000000 0", -+" [9] c1f60ca4 SIG_DFL 0000000000000000 0", -+" [10] c1f60cb8 a25911 0000000000000000 10000000 (SA_RESTART)", -+" ...", -+" [64] c1f610f0 SIG_DFL 0000000000000000 0", -+" SHARED_PENDING", -+" SIGNAL: 0000000000000000", -+" SIGQUEUE: (empty)", -+" ", -+" PID: 2387 TASK: f617d020 CPU: 0 COMMAND: \"slapd\"", -+" SIGPENDING: no", -+" BLOCKED: 0000000000000000", -+" PRIVATE_PENDING", -+" SIGNAL: 0000000000000000", -+" SIGQUEUE: (empty)", -+" ", -+" PID: 2392 TASK: f6175aa0 CPU: 0 COMMAND: \"slapd\"", -+" SIGPENDING: no", -+" BLOCKED: 0000000000000000", -+" PRIVATE_PENDING", -+" SIGNAL: 0000000000000000", -+" SIGQUEUE: (empty)", -+" ", -+" PID: 2523 TASK: f7cd4aa0 CPU: 1 COMMAND: \"slapd\"", -+" SIGPENDING: no", -+" BLOCKED: 0000000000000000", -+" PRIVATE_PENDING", -+" SIGNAL: 0000000000000000", -+" SIGQUEUE: (empty)", -+" ", -+" ...", - " ", - " Translate the sigset_t mask value, cut-and-pasted from the signal handling", - " data from signals 1 and 10 above:", - " ", --" %s> sig -s 0000000000000201", --" SIGHUP SIGUSR1", -+" %s> sig -s 800A000000000201", -+" SIGHUP SIGUSR1 SIGRTMAX-14 SIGRTMAX-12 SIGRTMAX", - " ", - " List the signal numbers and their names:", - " ", -@@ -2829,6 +3032,40 @@ - " [29] SIGIO/SIGPOLL", - " [30] SIGPWR", - " [31] SIGSYS", -+" [32] SIGRTMIN", -+" [33] SIGRTMIN+1", -+" [34] SIGRTMIN+2", -+" [35] SIGRTMIN+3", -+" [36] SIGRTMIN+4", -+" [37] SIGRTMIN+5", -+" [38] SIGRTMIN+6", -+" [39] SIGRTMIN+7", -+" [40] SIGRTMIN+8", -+" [41] SIGRTMIN+9", -+" [42] SIGRTMIN+10", -+" [43] SIGRTMIN+11", -+" [44] SIGRTMIN+12", -+" [45] SIGRTMIN+13", -+" [46] SIGRTMIN+14", -+" [47] SIGRTMIN+15", -+" [48] SIGRTMIN+16", -+" [49] SIGRTMAX-15", -+" [50] SIGRTMAX-14", -+" [51] SIGRTMAX-13", -+" [52] SIGRTMAX-12", -+" [53] SIGRTMAX-11", -+" [54] SIGRTMAX-10", -+" [55] SIGRTMAX-9", -+" [56] SIGRTMAX-8", -+" [57] SIGRTMAX-7", -+" [58] SIGRTMAX-6", -+" [59] SIGRTMAX-5", -+" [60] SIGRTMAX-4", -+" [61] SIGRTMAX-3", -+" [62] SIGRTMAX-2", -+" [63] SIGRTMAX-1", -+" [64] SIGRTMAX", -+ - - NULL - }; -@@ -2836,8 +3073,8 @@ - char *help_struct[] = { - "struct", - "structure contents", --"struct_name[.member] [[-o][-l offset][-r] [address | symbol] [count]]\n" --" [-c count]", -+"struct_name[.member[,member]][-o][-l offset][-rfu] [address | symbol]\n" -+" [count | -c count]", - " This command displays either a structure definition, or a formatted display", - " of the contents of a structure at a specified address. When no address is", - " specified, the structure definition is shown along with the structure size.", -@@ -2845,7 +3082,8 @@ - " the scope of the data displayed to that particular member; when no address", - " is specified, the member's offset and definition are shown.\n", - " struct_name name of a C-code structure used by the kernel.", --" .member name of a structure member.", -+" .member name of a structure member; to display multiple members of a", -+" structure, use a comma-separated list of members.", - " -o show member offsets when displaying structure definitions.", - " -l offset if the address argument is a pointer to a list_head structure", - " that is embedded in the target data structure, the offset", -@@ -2854,6 +3092,9 @@ - " 1. in \"structure.member\" format.", - " 2. a number of bytes. ", - " -r raw dump of structure data.", -+" -f address argument is a dumpfile offset.", -+" -u address argument is a user virtual address in the current", -+" context.", - " address hexadecimal address of a structure; if the address points", - " to an embedded list_head structure contained within the", - " target data structure, then the \"-l\" option must be used.", -@@ -2944,6 +3185,21 @@ - " struct mm_struct {", - " [12] pgd_t *pgd;", - " }\n", -+" Display the flags and virtual members of 4 contigous page structures", -+" in the mem_map page structure array:\n", -+" %s> page.flags,virtual c101196c 4", -+" flags = 0x8000,", -+" virtual = 0xc04b0000", -+" ", -+" flags = 0x8000,", -+" virtual = 0xc04b1000", -+" ", -+" flags = 0x8000,", -+" virtual = 0xc04b2000", -+" ", -+" flags = 0x8000,", -+" virtual = 0xc04b3000", -+" ", - " Display the array of tcp_sl_timer structures declared by tcp_slt_array[]:\n", - " %s> struct tcp_sl_timer tcp_slt_array 4", - " struct tcp_sl_timer {", -@@ -3052,8 +3308,8 @@ - char *help_union[] = { - "union", - "union contents", --"union_name[.member] [[-o][-l offset][-r] [address | symbol] [count]]\n" --" [-c count]", -+"union_name[.member[,member]] [-o][-l offset][-rfu] [address | symbol]\n" -+" [count | -c count]", - " This command displays either a union definition, or a formatted display", - " of the contents of a union at a specified address. When no address is", - " specified, the union definition is shown along with the union size.", -@@ -3061,7 +3317,8 @@ - " the scope of the data displayed to that particular member; when no address", - " is specified, the member's offset (always 0) and definition are shown.\n", - " union_name name of a C-code union used by the kernel.", --" .member name of a union member.", -+" .member name of a union member; to display multiple members of a", -+" union, use a comma-separated list of members.", - " -o show member offsets when displaying union definitions.", - " (always 0)", - " -l offset if the address argument is a pointer to a list_head structure", -@@ -3071,6 +3328,9 @@ - " 1. in \"structure.member\" format.", - " 2. a number of bytes. ", - " -r raw dump of union data.", -+" -f address argument is a dumpfile offset.", -+" -u address argument is a user virtual address in the current", -+" context.", - " address hexadecimal address of a union; if the address points", - " to an embedded list_head structure contained within the", - " target union structure, then the \"-l\" option must be used.", -@@ -3152,7 +3412,7 @@ - char *help_mod[] = { - "mod", - "module information and loading of symbols and debugging data", --"[ -s module [objfile] | -d module | -S [directory] | -D | -r ] ", -+"[ -s module [objfile] | -d module | -S [directory] | -D | -r | -o ] ", - " With no arguments, this command displays basic information of the currently", - " installed modules, consisting of the module address, name, size, the", - " object file name (if known), and whether the module was compiled with", -@@ -3203,6 +3463,7 @@ - " -r Reinitialize module data. All currently-loaded symbolic", - " and debugging data will be deleted, and the installed", - " module list will be updated (live system only).", -+" -o Load module symbols with old mechanism.", - " ", - " After symbolic and debugging data have been loaded, backtraces and text", - " disassembly will be displayed appropriately. Depending upon the processor", -@@ -3322,9 +3583,10 @@ - char *help__list[] = { - "list", - "linked list", --"[[-o] offset] [-e end] [-s struct[.member]] [-H] start", -+"[[-o] offset] [-e end] [-s struct[.member[,member]]] [-H] start", - " This command dumps the contents of a linked list. The entries in a linked", --" are typically data structures that are tied together in one of two formats:", -+" list are typically data structures that are tied together in one of two", -+" formats:", - " ", - " 1. A starting address points to a data structure; that structure contains", - " a member that is a pointer to the next structure, and so on. The list", -@@ -3335,7 +3597,7 @@ - " c. a pointer to the first item pointed to by the start address.", - " d. a pointer to its containing structure.", - " ", --" 2. Many Linux lists are linked via embedded list_head structures contained ", -+" 2. Most Linux lists are linked via embedded list_head structures contained ", - " within the data structures in the list. The linked list is headed by an", - " external LIST_HEAD, which is simply a list_head structure initialized to", - " point to itself, signifying that the list is empty:", -@@ -3370,15 +3632,17 @@ - " entered.", - " -s struct For each address in list, format and print as this type of", - " structure; use the \"struct.member\" format in order to display", --" a particular member of the structure.", -+" a particular member of the structure. To display multiple", -+" members of a structure, use a comma-separated list of members.", - " ", - " The meaning of the \"start\" argument, which can be expressed either", - " symbolically or in hexadecimal format, depends upon whether the -H option", - " is pre-pended or not:", - " ", - " start The address of the first structure in the list.", --" -H start The address of the LIST_HEAD structure, typically expressed", --" symbolically.", -+" -H start The address of the list_head structure, typically expressed", -+" symbolically, but also can be an expression evaluating to the", -+" address of the starting list_head structure.", - "\nEXAMPLES", - " Note that each task_struct is linked to its parent's task_struct via the", - " p_pptr member:", -@@ -3416,31 +3680,66 @@ - " The list of currently-registered file system types are headed up by a", - " struct file_system_type pointer named \"file_systems\", and linked by", - " the \"next\" field in each file_system_type structure. The following", --" sequence displays the address and name of each registered file system type:", -+" sequence displays the structure address followed by the name and ", -+" fs_flags members of each registered file system type:", - " ", - " %s> p file_systems", --" file_systems = $2 = (struct file_system_type *) 0xc02ebea0", --" %s> list file_system_type.next -s file_system_type.name 0xc02ebea0", --" c02ebea0", --" name = 0xc0280372 \"proc\", ", --" c02fd4a0", --" name = 0xc02bf348 \"sockfs\", ", --" c02eb544", --" name = 0xc027c25a \"tmpfs\", ", --" c02eb52c", --" name = 0xc027c256 \"shm\", ", --" c02ebbe0", --" name = 0xc027e054 \"pipefs\", ", --" c02ec9c0", --" name = 0xc0283c13 \"ext2\", ", --" c02ecaa8", --" name = 0xc0284567 \"iso9660\", ", --" c02ecc08", --" name = 0xc0284cf5 \"nfs\", ", --" c02edc60", --" name = 0xc028d832 \"autofs\", ", --" c02edfa0", --" name = 0xc028e1e0 \"devpts\"", -+" file_systems = $1 = (struct file_system_type *) 0xc03adc90", -+" %s> list file_system_type.next -s file_system_type.name,fs_flags 0xc03adc90", -+" c03adc90", -+" name = 0xc02c05c8 \"rootfs\",", -+" fs_flags = 0x30,", -+" c03abf94", -+" name = 0xc02c0319 \"bdev\",", -+" fs_flags = 0x10,", -+" c03acb40", -+" name = 0xc02c07c4 \"proc\",", -+" fs_flags = 0x8,", -+" c03e9834", -+" name = 0xc02cfc83 \"sockfs\",", -+" fs_flags = 0x10,", -+" c03ab8e4", -+" name = 0xc02bf512 \"tmpfs\",", -+" fs_flags = 0x20,", -+" c03ab8c8", -+" name = 0xc02c3d6b \"shm\",", -+" fs_flags = 0x20,", -+" c03ac394", -+" name = 0xc02c03cf \"pipefs\",", -+" fs_flags = 0x10,", -+" c03ada74", -+" name = 0xc02c0e6b \"ext2\",", -+" fs_flags = 0x1,", -+" c03adc74", -+" name = 0xc02c0e70 \"ramfs\",", -+" fs_flags = 0x20,", -+" c03ade74", -+" name = 0xc02c0e76 \"hugetlbfs\",", -+" fs_flags = 0x20,", -+" c03adf8c", -+" name = 0xc02c0f84 \"iso9660\",", -+" fs_flags = 0x1,", -+" c03aec14", -+" name = 0xc02c0ffd \"devpts\",", -+" fs_flags = 0x8,", -+" c03e93f4", -+" name = 0xc02cf1b9 \"pcihpfs\",", -+" fs_flags = 0x28,", -+" e0831a14", -+" name = 0xe082f89f \"ext3\",", -+" fs_flags = 0x1,", -+" e0846af4", -+" name = 0xe0841ac6 \"usbdevfs\",", -+" fs_flags = 0x8,", -+" e0846b10", -+" name = 0xe0841acf \"usbfs\",", -+" fs_flags = 0x8,", -+" e0992370", -+" name = 0xe099176c \"autofs\",", -+" fs_flags = 0x0,", -+" e2dcc030", -+" name = 0xe2dc8849 \"nfs\",", -+" fs_flags = 0x48000,", - " ", - " In some kernels, the system run queue is a linked list headed up by the", - " \"runqueue_head\", which is defined like so:", -@@ -3555,7 +3854,7 @@ - char *help_kmem[] = { - "kmem", - "kernel memory", --"[-f|-F|-p|-c|-C|-i|-s|-S|-v|-n] [-[l|L][a|i]] [slab-name] [[-P] address]", -+"[-f|-F|-p|-c|-C|-i|-s|-S|-v|-V|-n] [-[l|L][a|i]] [slab] [[-P] address]", - " This command displays information about the use of kernel memory.\n", - " -f displays the contents of the system free memory headers.", - " also verifies that the page count equals nr_free_pages.", -@@ -3569,13 +3868,14 @@ - " -S displays all kmalloc() slab data, including all slab objects,", - " and whether each object is in use or is free.", - " -v displays the vmlist entries.", -+" -V displays the kernel vm_stat table.", - " -n display memory node data (if supported).", - " -la walks through the active_list and verifies nr_active_pages.", - " -li walks through the inactive_list and verifies nr_inactive_pages.", - " -La same as -la, but also dumps each page in the active_list.", - " -Li same as -li, but also dumps each page in the inactive_list.", --" slab-name when used with -s or -S, limits the command to only the slab cache", --" of name \"slab-name\". If the slab-name argument is \"list\", then", -+" slab when used with -s or -S, limits the command to only the slab cache", -+" of name \"slab\". If the slab argument is \"list\", then", - " all slab cache names and addresses are listed.", - " -P declares that the following address argument is a physical address.", - " address when used without any flag, the address can be a kernel virtual,", -@@ -3781,6 +4081,24 @@ - " c2f8ab60 c8095000 - c8097000 8192", - " c2f519e0 c8097000 - c8099000 8192", - " ", -+" Dump the vm_table contents:\n", -+" %s> kmem -V", -+" NR_ANON_PAGES: 38989", -+" NR_FILE_MAPPED: 3106", -+" NR_FILE_PAGES: 169570", -+" NR_SLAB: 32439", -+" NR_PAGETABLE: 1181", -+" NR_FILE_DIRTY: 4633", -+" NR_WRITEBACK: 0", -+" NR_UNSTABLE_NFS: 0", -+" NR_BOUNCE: 0", -+" NUMA_HIT: 63545992", -+" NUMA_MISS: 0", -+" NUMA_FOREIGN: 0", -+" NUMA_INTERLEAVE_HIT: 24002", -+" NUMA_LOCAL: 63545992", -+" NUMA_OTHER: 0", -+" ", - " Determine (and verify) the page cache size:\n", - " %s> kmem -c", - " page_cache_size: 18431 (verified)", -@@ -3979,18 +4297,21 @@ - char *help_dis[] = { - "dis", - "disassemble", --"[-r][-l][-u] [address | symbol | (expression)] [count]", -+"[-r][-l][-u][-b [num]] [address | symbol | (expression)] [count]", - " This command disassembles source code instructions starting (or ending) at", - " a text address that may be expressed by value, symbol or expression:\n", - " -r (reverse) displays all instructions from the start of the ", - " routine up to and including the designated address.", - " -l displays source code line number data in addition to the ", - " disassembly output.", --" -u address is a user virtual address; otherwise the address is ", --" assumed to be a kernel virtual address. If this option is", --" used, then -r and -l are ignored.", -+" -u address is a user virtual address in the current context;", -+" otherwise the address is assumed to be a kernel virtual address.", -+" If this option is used, then -r and -l are ignored.", -+" -b [num] modify the pre-calculated number of encoded bytes to skip after", -+" a kernel BUG (\"ud2a\") instruction; with no argument, displays", -+" the current number of bytes being skipped. (x86 and x86_64 only)", - " address starting hexadecimal text address.", --" symbol symbol of starting text address. On PPC64, the symbol", -+" symbol symbol of starting text address. On ppc64, the symbol", - " preceded by '.' is used.", - " (expression) expression evaluating to a starting text address.", - " count the number of instructions to be disassembled (default is 1).", -@@ -4419,10 +4740,11 @@ - " Display various network related data:\n", - " -a display the ARP cache.", - " -s display open network socket/sock addresses, their family and type,", --" and their source and destination addresses and ports.", -+" and for INET and INET6 families, their source and destination", -+" addresses and ports.", - " -S displays open network socket/sock addresses followed by a dump", - " of both structures.", --" -n addr translates an IP address expressed as a decimal or hexadecimal ", -+" -n addr translates an IPv4 address expressed as a decimal or hexadecimal", - " value into a standard numbers-and-dots notation.", - " -R ref socket or sock address, or file descriptor.", - " pid a process PID.", -@@ -4450,8 +4772,8 @@ - " Display the sockets for PID 2517, using both -s and -S output formats:\n", - " %s> net -s 2517", - " PID: 2517 TASK: c1598000 CPU: 1 COMMAND: \"rlogin\"", --" FD SOCKET SOCK FAMILY:TYPE SOURCE:PORT DESTINATION:PORT", --" 3 c57375dc c1ff1850 INET:STREAM 10.1.8.20:1023 10.1.16.62:513", -+" FD SOCKET SOCK FAMILY:TYPE SOURCE-PORT DESTINATION-PORT", -+" 3 c57375dc c1ff1850 INET:STREAM 10.1.8.20-1023 10.1.16.62-513", - " ", - " %s> net -S 2517", - " PID: 2517 TASK: c1598000 CPU: 1 COMMAND: \"rlogin\"", -@@ -4497,52 +4819,52 @@ - " From \"foreach\", find all tasks with references to socket c08ea3cc:\n", - " %s> foreach net -s -R c08ea3cc", - " PID: 2184 TASK: c7026000 CPU: 1 COMMAND: \"klines.kss\"", --" FD SOCKET SOCK FAMILY:TYPE SOURCE:PORT DESTINATION:PORT", --" 5 c08ea3cc c50d3c80 INET:STREAM 0.0.0.0:1026 0.0.0.0:0", -+" FD SOCKET SOCK FAMILY:TYPE SOURCE-PORT DESTINATION-PORT", -+" 5 c08ea3cc c50d3c80 INET:STREAM 0.0.0.0-1026 0.0.0.0-0", - " ", - " PID: 2200 TASK: c670a000 CPU: 1 COMMAND: \"kpanel\"", --" FD SOCKET SOCK FAMILY:TYPE SOURCE:PORT DESTINATION:PORT", --" 5 c08ea3cc c50d3c80 INET:STREAM 0.0.0.0:1026 0.0.0.0:0", -+" FD SOCKET SOCK FAMILY:TYPE SOURCE-PORT DESTINATION-PORT", -+" 5 c08ea3cc c50d3c80 INET:STREAM 0.0.0.0-1026 0.0.0.0-0", - " ", - " PID: 2201 TASK: c648a000 CPU: 1 COMMAND: \"kbgndwm\"", --" FD SOCKET SOCK FAMILY:TYPE SOURCE:PORT DESTINATION:PORT", --" 5 c08ea3cc c50d3c80 INET:STREAM 0.0.0.0:1026 0.0.0.0:0", -+" FD SOCKET SOCK FAMILY:TYPE SOURCE-PORT DESTINATION-PORT", -+" 5 c08ea3cc c50d3c80 INET:STREAM 0.0.0.0-1026 0.0.0.0-0", - " ", - " PID: 19294 TASK: c250a000 CPU: 0 COMMAND: \"prefdm\"", --" FD SOCKET SOCK FAMILY:TYPE SOURCE:PORT DESTINATION:PORT", --" 5 c08ea3cc c50d3c80 INET:STREAM 0.0.0.0:1026 0.0.0.0:0", -+" FD SOCKET SOCK FAMILY:TYPE SOURCE-PORT DESTINATION-PORT", -+" 5 c08ea3cc c50d3c80 INET:STREAM 0.0.0.0-1026 0.0.0.0-0", - " ", - " PID: 2194 TASK: c62dc000 CPU: 1 COMMAND: \"kaudioserver\"", --" FD SOCKET SOCK FAMILY:TYPE SOURCE:PORT DESTINATION:PORT", --" 5 c08ea3cc c50d3c80 INET:STREAM 0.0.0.0:1026 0.0.0.0:0", -+" FD SOCKET SOCK FAMILY:TYPE SOURCE-PORT DESTINATION-PORT", -+" 5 c08ea3cc c50d3c80 INET:STREAM 0.0.0.0-1026 0.0.0.0-0", - " ", - " PID: 2195 TASK: c6684000 CPU: 1 COMMAND: \"maudio\"", --" FD SOCKET SOCK FAMILY:TYPE SOURCE:PORT DESTINATION:PORT", --" 5 c08ea3cc c50d3c80 INET:STREAM 0.0.0.0:1026 0.0.0.0:0", -+" FD SOCKET SOCK FAMILY:TYPE SOURCE-PORT DESTINATION-PORT", -+" 5 c08ea3cc c50d3c80 INET:STREAM 0.0.0.0-1026 0.0.0.0-0", - " ", - " PID: 2196 TASK: c6b58000 CPU: 1 COMMAND: \"kwmsound\"", --" FD SOCKET SOCK FAMILY:TYPE SOURCE:PORT DESTINATION:PORT", --" 5 c08ea3cc c50d3c80 INET:STREAM 0.0.0.0:1026 0.0.0.0:0", -+" FD SOCKET SOCK FAMILY:TYPE SOURCE-PORT DESTINATION-PORT", -+" 5 c08ea3cc c50d3c80 INET:STREAM 0.0.0.0-1026 0.0.0.0-0", - " ", - " PID: 2197 TASK: c6696000 CPU: 0 COMMAND: \"kfm\"", --" FD SOCKET SOCK FAMILY:TYPE SOURCE:PORT DESTINATION:PORT", --" 5 c08ea3cc c50d3c80 INET:STREAM 0.0.0.0:1026 0.0.0.0:0", -+" FD SOCKET SOCK FAMILY:TYPE SOURCE-PORT DESTINATION-PORT", -+" 5 c08ea3cc c50d3c80 INET:STREAM 0.0.0.0-1026 0.0.0.0-0", - " ", - " PID: 2199 TASK: c65ec000 CPU: 0 COMMAND: \"krootwm\"", --" FD SOCKET SOCK FAMILY:TYPE SOURCE:PORT DESTINATION:PORT", --" 5 c08ea3cc c50d3c80 INET:STREAM 0.0.0.0:1026 0.0.0.0:0", -+" FD SOCKET SOCK FAMILY:TYPE SOURCE-PORT DESTINATION-PORT", -+" 5 c08ea3cc c50d3c80 INET:STREAM 0.0.0.0-1026 0.0.0.0-0", - " ", - " PID: 694 TASK: c1942000 CPU: 0 COMMAND: \"prefdm\"", --" FD SOCKET SOCK FAMILY:TYPE SOURCE:PORT DESTINATION:PORT", --" 5 c08ea3cc c50d3c80 INET:STREAM 0.0.0.0:1026 0.0.0.0:0", -+" FD SOCKET SOCK FAMILY:TYPE SOURCE-PORT DESTINATION-PORT", -+" 5 c08ea3cc c50d3c80 INET:STREAM 0.0.0.0-1026 0.0.0.0-0", - " ", - " PID: 698 TASK: c6a2c000 CPU: 1 COMMAND: \"X\"", --" FD SOCKET SOCK FAMILY:TYPE SOURCE:PORT DESTINATION:PORT", --" 5 c08ea3cc c50d3c80 INET:STREAM 0.0.0.0:1026 0.0.0.0:0", -+" FD SOCKET SOCK FAMILY:TYPE SOURCE-PORT DESTINATION-PORT", -+" 5 c08ea3cc c50d3c80 INET:STREAM 0.0.0.0-1026 0.0.0.0-0", - " ", - " PID: 2159 TASK: c4a5a000 CPU: 1 COMMAND: \"kwm\"", --" FD SOCKET SOCK FAMILY:TYPE SOURCE:PORT DESTINATION:PORT", --" 5 c08ea3cc c50d3c80 INET:STREAM 0.0.0.0:1026 0.0.0.0:0", -+" FD SOCKET SOCK FAMILY:TYPE SOURCE-PORT DESTINATION-PORT", -+" 5 c08ea3cc c50d3c80 INET:STREAM 0.0.0.0-1026 0.0.0.0-0", - " ", - NULL - }; -@@ -4584,21 +4906,22 @@ - void - cmd_usage(char *cmd, int helpflag) - { -- int i; -- int found; -- char **p; -+ char **p, *scroll_command; - struct command_table_entry *cp; - char buf[BUFSIZE]; -- struct alias_data *ad; -- FILE *less; -+ FILE *scroll; -+ int i; - -- if (helpflag & PIPE_TO_LESS) { -- if ((less = popen("/usr/bin/less", "w")) != NULL) -- fp = less; -- helpflag &= ~PIPE_TO_LESS; -- } else -- less = NULL; -- -+ if (helpflag & PIPE_TO_SCROLL) { -+ if ((scroll_command = setup_scroll_command()) && -+ (scroll = popen(scroll_command, "w"))) -+ fp = scroll; -+ else -+ scroll = NULL; -+ } else { -+ scroll_command = NULL; -+ scroll = NULL; -+ } - - if (STREQ(cmd, "copying")) { - display_copying_info(); -@@ -4641,46 +4964,50 @@ - goto done_usage; - } - -- found = FALSE; --retry: -- if ((cp = get_command_table_entry(cmd))) { -- if ((p = cp->help_data)) -- found = TRUE; -- } -+ /* look up command, possibly through an alias */ -+ for (;;) { -+ struct alias_data *ad; -+ -+ cp = get_command_table_entry(cmd); -+ if (cp != NULL) -+ break; /* found command */ -+ -+ /* try for an alias */ -+ ad = is_alias(cmd); -+ if (ad == NULL) -+ break; /* neither command nor alias */ - -- /* -- * Check for alias names or gdb commands. -- */ -- if (!found) { -- if ((ad = is_alias(cmd))) { -- cmd = ad->args[0]; -- goto retry; -- } -+ cmd = ad->args[0]; -+ cp = get_command_table_entry(cmd); -+ } - -- if (helpflag == SYNOPSIS) { -- fprintf(fp, -- "No usage data for the \"%s\" command is available.\n", -+ if (cp == NULL || (p = cp->help_data) == NULL) { -+ if (helpflag & SYNOPSIS) { -+ fprintf(fp, -+ "No usage data for the \"%s\" command" -+ " is available.\n", - cmd); - RESTART(); - } - -- if (STREQ(pc->curcmd, "help")) { -- if (cp) -- fprintf(fp, -- "No help data for the \"%s\" command is available.\n", -+ if (helpflag & MUST_HELP) { -+ if (cp || !(pc->flags & GDB_INIT)) -+ fprintf(fp, -+ "No help data for the \"%s\" command" -+ " is available.\n", - cmd); - else if (!gdb_pass_through(concat_args(buf, 0, FALSE), - NULL, GNU_RETURN_ON_ERROR)) - fprintf(fp, -- "No help data for \"%s\" is available.\n", -- cmd); -+ "No help data for \"%s\" is available.\n", -+ cmd); - } - goto done_usage; - } - - p++; - -- if (helpflag == SYNOPSIS) { -+ if (helpflag & SYNOPSIS) { - p++; - fprintf(fp, "Usage: %s ", cmd); - fprintf(fp, *p, pc->program_name, pc->program_name); -@@ -4711,10 +5038,12 @@ - - done_usage: - -- if (less) { -- fflush(less); -- pclose(less); -+ if (scroll) { -+ fflush(scroll); -+ pclose(scroll); - } -+ if (scroll_command) -+ FREEBUF(scroll_command); - } - - -@@ -4812,7 +5141,9 @@ - "The default output radix for gdb output and certain %s commands is", - "hexadecimal. This can be changed to decimal by entering \"set radix 10\"", - "or the alias \"dec\". It can be reverted back to hexadecimal by entering", --"\"set radix 16\" or the alias \"hex\".", -+"\"set radix 16\" or the alias \"hex\".\n", -+"To execute an external shell command, precede the command with an \"!\".", -+"To escape to a shell, enter \"!\" alone.", - " ", - NULL - }; -@@ -4854,10 +5185,13 @@ - static - char *version_info[] = { - --"Copyright (C) 2002, 2003, 2004, 2005 Red Hat, Inc.", --"Copyright (C) 2004, 2005 IBM Corporation", --"Copyright (C) 1999-2005 Hewlett-Packard Co", --"Copyright (C) 1999, 2002 Silicon Graphics, Inc.", -+"Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007 Red Hat, Inc.", -+"Copyright (C) 2004, 2005, 2006 IBM Corporation", -+"Copyright (C) 1999-2006 Hewlett-Packard Co", -+"Copyright (C) 2005, 2006 Fujitsu Limited", -+"Copyright (C) 2006, 2007 VA Linux Systems Japan K.K.", -+"Copyright (C) 2005 NEC Corporation", -+"Copyright (C) 1999, 2002, 2007 Silicon Graphics, Inc.", - "Copyright (C) 1999, 2000, 2001, 2002 Mission Critical Linux, Inc.", - "This program is free software, covered by the GNU General Public License,", - "and you are welcome to change it and/or distribute copies of it under", ---- crash/task.c.orig 2007-08-27 15:02:36.000000000 -0400 -+++ crash/task.c 2007-07-31 16:09:39.000000000 -0400 -@@ -1,8 +1,8 @@ - /* task.c - core analysis suite - * - * Copyright (C) 1999, 2000, 2001, 2002 Mission Critical Linux, Inc. -- * Copyright (C) 2002, 2003, 2004, 2005 David Anderson -- * Copyright (C) 2002, 2003, 2004, 2005 Red Hat, Inc. All rights reserved. -+ * Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007 David Anderson -+ * Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007 Red Hat, Inc. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by -@@ -27,11 +27,15 @@ - static void refresh_pidhash_task_table(void); - static void refresh_pid_hash_task_table(void); - static void refresh_hlist_task_table(void); -+static void refresh_hlist_task_table_v2(void); - static struct task_context *store_context(struct task_context *, ulong, char *); - static void refresh_context(ulong, ulong); - static void parent_list(ulong); - static void child_list(ulong); - static void show_task_times(struct task_context *, ulong); -+static void show_task_args(struct task_context *); -+static void show_task_rlimit(struct task_context *); -+static void show_tgid_list(ulong); - static int compare_start_time(const void *, const void *); - static int start_time_timespec(void); - static ulonglong convert_start_time(ulonglong, ulonglong); -@@ -46,11 +50,26 @@ - static void dump_runq(void); - static void dump_runqueues(void); - static void dump_prio_array(int, ulong, char *); -+struct rb_root; -+static struct rb_node *rb_first(struct rb_root *); -+struct rb_node; -+static struct rb_node *rb_next(struct rb_node *); -+static struct rb_node *rb_parent(struct rb_node *, struct rb_node *); -+static struct rb_node *rb_right(struct rb_node *, struct rb_node *); -+static struct rb_node *rb_left(struct rb_node *, struct rb_node *); -+static void dump_CFS_runqueues(void); -+static void dump_RT_prio_array(int, ulong, char *); - static void task_struct_member(struct task_context *,ulong,struct reference *); - static void signal_reference(struct task_context *, ulong, struct reference *); --static void dump_signal_data(struct task_context *); -+static void do_sig_thread_group(ulong); -+static void dump_signal_data(struct task_context *, ulong); -+#define TASK_LEVEL (0x1) -+#define THREAD_GROUP_LEVEL (0x2) -+#define TASK_INDENT (0x4) -+static int sigrt_minmax(int *, int *); - static void signame_list(void); --static ulonglong task_signal(ulong); -+static void sigqueue_list(ulong); -+static ulonglong task_signal(ulong, ulong*); - static ulonglong task_blocked(ulong); - static void translate_sigset(ulonglong); - static ulonglong sigaction_mask(ulong); -@@ -151,8 +170,15 @@ - get_idle_threads(&tt->idle_threads[0], kt->cpus); - } - -- MEMBER_OFFSET_INIT(task_struct_thread_info, "task_struct", -- "thread_info"); -+ if (MEMBER_EXISTS("task_struct", "thread_info")) -+ MEMBER_OFFSET_INIT(task_struct_thread_info, "task_struct", -+ "thread_info"); -+ else if (MEMBER_EXISTS("task_struct", "stack")) -+ MEMBER_OFFSET_INIT(task_struct_thread_info, "task_struct", -+ "stack"); -+ else -+ ASSIGN_OFFSET(task_struct_thread_info) = INVALID_OFFSET; -+ - if (VALID_MEMBER(task_struct_thread_info)) { - MEMBER_OFFSET_INIT(thread_info_task, "thread_info", "task"); - MEMBER_OFFSET_INIT(thread_info_cpu, "thread_info", "cpu"); -@@ -193,6 +219,8 @@ - MEMBER_OFFSET_INIT(pid_link_pid, "pid_link", "pid"); - MEMBER_OFFSET_INIT(pid_hash_chain, "pid", "hash_chain"); - -+ STRUCT_SIZE_INIT(pid_link, "pid_link"); -+ - MEMBER_OFFSET_INIT(pid_pid_chain, "pid", "pid_chain"); - - STRUCT_SIZE_INIT(task_struct, "task_struct"); -@@ -207,6 +235,8 @@ - - MEMBER_OFFSET_INIT(signal_struct_count, "signal_struct", "count"); - MEMBER_OFFSET_INIT(signal_struct_action, "signal_struct", "action"); -+ MEMBER_OFFSET_INIT(signal_struct_shared_pending, "signal_struct", -+ "shared_pending"); - - MEMBER_OFFSET_INIT(k_sigaction_sa, "k_sigaction", "sa"); - -@@ -217,17 +247,10 @@ - if (INVALID_MEMBER(sigpending_head)) - MEMBER_OFFSET_INIT(sigpending_list, "sigpending", "list"); - MEMBER_OFFSET_INIT(sigpending_signal, "sigpending", "signal"); -+ MEMBER_SIZE_INIT(sigpending_signal, "sigpending", "signal"); - - STRUCT_SIZE_INIT(sigqueue, "sigqueue"); -- if (VALID_STRUCT(sigqueue)) { -- MEMBER_OFFSET_INIT(sigqueue_next, "sigqueue", "next"); -- MEMBER_OFFSET_INIT(sigqueue_list, "sigqueue", "list"); -- MEMBER_OFFSET_INIT(sigqueue_info, "sigqueue", "info"); -- } else { -- STRUCT_SIZE_INIT(signal_queue, "signal_queue"); -- MEMBER_OFFSET_INIT(signal_queue_next, "signal_queue", "next"); -- MEMBER_OFFSET_INIT(signal_queue_info, "signal_queue", "info"); -- } -+ STRUCT_SIZE_INIT(signal_queue, "signal_queue"); - - STRUCT_SIZE_INIT(sighand_struct, "sighand_struct"); - if (VALID_STRUCT(sighand_struct)) -@@ -249,6 +272,19 @@ - - STRUCT_SIZE_INIT(cputime_t, "cputime_t"); - -+ if (symbol_exists("cfq_slice_async")) { -+ uint cfq_slice_async; -+ -+ get_symbol_data("cfq_slice_async", sizeof(int), -+ &cfq_slice_async); -+ machdep->hz = cfq_slice_async * 25; -+ -+ if (CRASHDEBUG(2)) -+ fprintf(fp, -+ "cfq_slice_async exitsts: setting hz to %d\n", -+ machdep->hz); -+ } -+ - if (VALID_MEMBER(runqueue_arrays)) - MEMBER_OFFSET_INIT(task_struct_run_list, "task_struct", - "run_list"); -@@ -279,12 +315,6 @@ - error(FATAL, - "pidhash and pid_hash both exist -- cannot distinquish between them\n"); - -- /* -- * NOTE: We rely on PIDTYPE_PID staying at enum value of 0, because -- * evan at the lowest level in gdb, I can't seem to find where -- * the actual value is stored via the struct type. (?) -- * Should be safe, though... -- */ - if (symbol_exists("pid_hash") && symbol_exists("pidhash_shift")) { - int pidhash_shift; - -@@ -302,7 +332,24 @@ - tt->refresh_task_table = refresh_pid_hash_task_table; - } else { - tt->pidhash_addr = symbol_value("pid_hash"); -- tt->refresh_task_table = refresh_hlist_task_table; -+ if (LKCD_KERNTYPES()) { -+ if (VALID_STRUCT(pid_link)) -+ tt->refresh_task_table = -+ refresh_hlist_task_table_v2; -+ else -+ tt->refresh_task_table = -+ refresh_hlist_task_table; -+ builtin_array_length("pid_hash", -+ tt->pidhash_len, NULL); -+ } else { -+ if (!get_array_length("pid_hash", NULL, -+ sizeof(void *)) && VALID_STRUCT(pid_link)) -+ tt->refresh_task_table = -+ refresh_hlist_task_table_v2; -+ else -+ tt->refresh_task_table = -+ refresh_hlist_task_table; -+ } - } - - tt->flags |= PID_HASH; -@@ -353,8 +400,11 @@ - set_context(NO_TASK, active_pid); - tt->this_task = pid_to_task(active_pid); - } -- else -+ else { -+ please_wait("determining panic task"); - set_context(get_panic_context(), NO_PID); -+ please_wait_done(); -+ } - - sort_context_array(); - -@@ -987,9 +1037,7 @@ - return; - - if (DUMPFILE()) { /* impossible */ -- fprintf(fp, (pc->flags & SILENT) || !(pc->flags & TTY) ? -- "" : "\rplease wait... (gathering task table data)"); -- fflush(fp); -+ please_wait("gathering task table data"); - if (!symbol_exists("panic_threads")) - tt->flags |= POPULATE_PANIC; - } -@@ -1152,11 +1200,7 @@ - - FREEBUF(pid_hash); - -- if (DUMPFILE()) { -- fprintf(fp, (pc->flags & SILENT) || !(pc->flags & TTY) ? "" : -- "\r \r"); -- fflush(fp); -- } -+ please_wait_done(); - - if (ACTIVE() && (tt->flags & TASK_INIT_DONE)) - refresh_context(curtask, curpid); -@@ -1176,12 +1220,14 @@ - { - int i; - ulong *pid_hash; -+ struct syment *sp; - ulong pidhash_array; - ulong kpp; - char *tp; - ulong next, pnext, pprev; - char *nodebuf; - int plen, len, cnt; -+ long value; - struct task_context *tc; - ulong curtask; - ulong curpid; -@@ -1192,9 +1238,7 @@ - return; - - if (DUMPFILE()) { /* impossible */ -- fprintf(fp, (pc->flags & SILENT) || !(pc->flags & TTY) ? -- "" : "\rplease wait... (gathering task table data)"); -- fflush(fp); -+ please_wait("gathering task table data"); - if (!symbol_exists("panic_threads")) - tt->flags |= POPULATE_PANIC; - } -@@ -1211,8 +1255,21 @@ - curpid = CURRENT_PID(); - } - -- if (!(plen = get_array_length("pid_hash", NULL, sizeof(void *)))) -- error(FATAL, "cannot determine pid_hash array dimensions\n"); -+ if (!(plen = get_array_length("pid_hash", NULL, sizeof(void *)))) { -+ /* -+ * Workaround for gcc omitting debuginfo data for pid_hash. -+ */ -+ if (enumerator_value("PIDTYPE_MAX", &value)) { -+ if ((sp = next_symbol("pid_hash", NULL)) && -+ (((sp->value - tt->pidhash_addr) / sizeof(void *)) < value)) -+ error(WARNING, "possible pid_hash array mis-handling\n"); -+ plen = (int)value; -+ } else { -+ error(WARNING, -+ "cannot determine pid_hash array dimensions\n"); -+ plen = 1; -+ } -+ } - - pid_hash = (ulong *)GETBUF(plen * sizeof(void *)); - -@@ -1228,6 +1285,16 @@ - * The zero'th (PIDTYPE_PID) entry is the hlist_head array - * that we want. - */ -+ if (CRASHDEBUG(1)) { -+ if (!enumerator_value("PIDTYPE_PID", &value)) -+ error(WARNING, -+ "possible pid_hash array mis-handling: PIDTYPE_PID: (unknown)\n"); -+ else if (value != 0) -+ error(WARNING, -+ "possible pid_hash array mis-handling: PIDTYPE_PID: %d \n", -+ value); -+ } -+ - pidhash_array = pid_hash[0]; - FREEBUF(pid_hash); - -@@ -1345,6 +1412,15 @@ - } - } - -+ if (cnt > tt->max_tasks) { -+ tt->max_tasks = cnt + TASK_SLUSH; -+ allocate_task_space(tt->max_tasks); -+ hq_close(); -+ if (!DUMPFILE()) -+ retries++; -+ goto retry_pid_hash; -+ } -+ - BZERO(tt->task_local, tt->max_tasks * sizeof(void *)); - cnt = retrieve_list((ulong *)tt->task_local, cnt); - -@@ -1394,12 +1470,238 @@ - FREEBUF(pid_hash); - FREEBUF(nodebuf); - -- if (DUMPFILE()) { -- fprintf(fp, (pc->flags & SILENT) || !(pc->flags & TTY) ? "" : -- "\r \r"); -- fflush(fp); -+ please_wait_done(); -+ -+ if (ACTIVE() && (tt->flags & TASK_INIT_DONE)) -+ refresh_context(curtask, curpid); -+ -+ tt->retries = MAX(tt->retries, retries); -+} -+ -+/* -+ * 2.6.17 replaced: -+ * static struct hlist_head *pid_hash[PIDTYPE_MAX]; -+ * with -+ * static struct hlist_head *pid_hash; -+ */ -+static void -+refresh_hlist_task_table_v2(void) -+{ -+ int i; -+ ulong *pid_hash; -+ ulong pidhash_array; -+ ulong kpp; -+ char *tp; -+ ulong next, pnext, pprev; -+ char *nodebuf; -+ int len, cnt; -+ struct task_context *tc; -+ ulong curtask; -+ ulong curpid; -+ ulong retries; -+ ulong *tlp; -+ -+ if (DUMPFILE() && (tt->flags & TASK_INIT_DONE)) /* impossible */ -+ return; -+ -+ if (DUMPFILE()) { /* impossible */ -+ please_wait("gathering task table data"); -+ if (!symbol_exists("panic_threads")) -+ tt->flags |= POPULATE_PANIC; -+ } -+ -+ if (ACTIVE() && !(tt->flags & TASK_REFRESH)) -+ return; -+ -+ /* -+ * The current task's task_context entry may change, -+ * or the task may not even exist anymore. -+ */ -+ if (ACTIVE() && (tt->flags & TASK_INIT_DONE)) { -+ curtask = CURRENT_TASK(); -+ curpid = CURRENT_PID(); -+ } -+ -+ get_symbol_data("pid_hash", sizeof(void *), &pidhash_array); -+ -+ len = tt->pidhash_len; -+ pid_hash = (ulong *)GETBUF(len * SIZE(hlist_head)); -+ nodebuf = GETBUF(SIZE(pid_link)); -+ retries = 0; -+ -+retry_pid_hash: -+ if (retries && DUMPFILE()) -+ error(FATAL, -+ "\ncannot gather a stable task list via pid_hash\n"); -+ -+ if ((retries == MAX_UNLIMITED_TASK_RETRIES) && -+ !(tt->flags & TASK_INIT_DONE)) -+ error(FATAL, -+ "\ncannot gather a stable task list via pid_hash (%d retries)\n", -+ retries); -+ -+ if (!readmem(pidhash_array, KVADDR, pid_hash, -+ len * SIZE(hlist_head), "pid_hash contents", RETURN_ON_ERROR)) -+ error(FATAL, "\ncannot read pid_hash array\n"); -+ -+ if (!hq_open()) { -+ error(INFO, "cannot hash task_struct entries\n"); -+ if (!(tt->flags & TASK_INIT_DONE)) -+ clean_exit(1); -+ error(INFO, "using stale task_structs\n"); -+ FREEBUF(pid_hash); -+ return; -+ } -+ -+ /* -+ * Get the idle threads first. -+ */ -+ cnt = 0; -+ for (i = 0; i < kt->cpus; i++) { -+ if (hq_enter(tt->idle_threads[i])) -+ cnt++; -+ else -+ error(WARNING, "%sduplicate idle tasks?\n", -+ DUMPFILE() ? "\n" : ""); -+ } -+ -+ for (i = 0; i < len; i++) { -+ if (!pid_hash[i]) -+ continue; -+ -+ if (!readmem(pid_hash[i], KVADDR, nodebuf, -+ SIZE(pid_link), "pid_hash node pid_link", RETURN_ON_ERROR|QUIET)) { -+ error(INFO, "\ncannot read pid_hash node pid_link\n"); -+ if (DUMPFILE()) -+ continue; -+ hq_close(); -+ retries++; -+ goto retry_pid_hash; -+ } -+ -+ kpp = pid_hash[i]; -+ next = ULONG(nodebuf + OFFSET(pid_link_pid)); -+ if (next) -+ next -= OFFSET(task_struct_pids); -+ pnext = ULONG(nodebuf + OFFSET(hlist_node_next)); -+ pprev = ULONG(nodebuf + OFFSET(hlist_node_pprev)); -+ -+ if (CRASHDEBUG(1)) -+ console("pid_hash[%d]: %lx task: %lx (node: %lx) next: %lx pprev: %lx\n", -+ i, pid_hash[i], next, kpp, pnext, pprev); -+ -+ while (next) { -+ if (!IS_TASK_ADDR(next)) { -+ error(INFO, -+ "%sinvalid task address in pid_hash: %lx\n", -+ DUMPFILE() ? "\n" : "", next); -+ if (DUMPFILE()) -+ break; -+ hq_close(); -+ retries++; -+ goto retry_pid_hash; -+ -+ } -+ -+ if (!is_idle_thread(next) && !hq_enter(next)) { -+ error(INFO, -+ "%sduplicate task in pid_hash: %lx\n", -+ DUMPFILE() ? "\n" : "", next); -+ if (DUMPFILE()) -+ break; -+ hq_close(); -+ retries++; -+ goto retry_pid_hash; -+ } -+ -+ cnt++; -+ -+ if (!pnext) -+ break; -+ -+ if (!readmem((ulonglong)pnext, KVADDR, nodebuf, -+ SIZE(pid_link), "task hlist_node pid_link", RETURN_ON_ERROR|QUIET)) { -+ error(INFO, "\ncannot read hlist_node pid_link from node next\n"); -+ if (DUMPFILE()) -+ break; -+ hq_close(); -+ retries++; -+ goto retry_pid_hash; -+ } -+ -+ kpp = (ulong)pnext; -+ next = ULONG(nodebuf + OFFSET(pid_link_pid)); -+ if (next) -+ next -= OFFSET(task_struct_pids); -+ pnext = ULONG(nodebuf + OFFSET(hlist_node_next)); -+ pprev = ULONG(nodebuf + OFFSET(hlist_node_pprev)); -+ -+ if (CRASHDEBUG(1)) -+ console(" chained task: %lx (node: %lx) next: %lx pprev: %lx\n", -+ next, kpp, pnext, pprev); -+ } -+ } -+ -+ if (cnt > tt->max_tasks) { -+ tt->max_tasks = cnt + TASK_SLUSH; -+ allocate_task_space(tt->max_tasks); -+ hq_close(); -+ if (!DUMPFILE()) -+ retries++; -+ goto retry_pid_hash; -+ } -+ -+ BZERO(tt->task_local, tt->max_tasks * sizeof(void *)); -+ cnt = retrieve_list((ulong *)tt->task_local, cnt); -+ -+ hq_close(); -+ -+ clear_task_cache(); -+ -+ for (i = 0, tlp = (ulong *)tt->task_local, -+ tt->running_tasks = 0, tc = tt->context_array; -+ i < tt->max_tasks; i++, tlp++) { -+ if (!(*tlp)) -+ continue; -+ -+ if (!IS_TASK_ADDR(*tlp)) { -+ error(WARNING, -+ "%sinvalid task address found in task list: %lx\n", -+ DUMPFILE() ? "\n" : "", *tlp); -+ if (DUMPFILE()) -+ continue; -+ retries++; -+ goto retry_pid_hash; -+ } -+ -+ if (task_exists(*tlp)) { -+ error(WARNING, -+ "%sduplicate task address found in task list: %lx\n", -+ DUMPFILE() ? "\n" : "", *tlp); -+ if (DUMPFILE()) -+ continue; -+ retries++; -+ goto retry_pid_hash; -+ } -+ -+ if (!(tp = fill_task_struct(*tlp))) { -+ if (DUMPFILE()) -+ continue; -+ retries++; -+ goto retry_pid_hash; -+ } -+ -+ if (store_context(tc, *tlp, tp)) { -+ tc++; -+ tt->running_tasks++; -+ } - } - -+ FREEBUF(pid_hash); -+ FREEBUF(nodebuf); -+ -+ please_wait_done(); -+ - if (ACTIVE() && (tt->flags & TASK_INIT_DONE)) - refresh_context(curtask, curpid); - -@@ -1426,6 +1728,8 @@ - do_verify = 1; - else if (tt->refresh_task_table == refresh_pid_hash_task_table) - do_verify = 2; -+ else if (tt->refresh_task_table == refresh_hlist_task_table_v2) -+ do_verify = 2; - else - do_verify = 0; - -@@ -1581,6 +1885,9 @@ - char * - fill_task_struct(ulong task) - { -+ if (XEN_HYPER_MODE()) -+ return NULL; -+ - if (!IS_LAST_TASK_READ(task)) { - if (!readmem(task, KVADDR, tt->task_struct, - SIZE(task_struct), "fill_task_struct", -@@ -1632,6 +1939,9 @@ - bt->stackbase); - } - -+ if (XEN_HYPER_MODE()) -+ return; -+ - if (!IS_LAST_TASK_READ(bt->task)) { - if (bt->stackbase == bt->task) { - BCOPY(bt->stackbuf, tt->task_struct, SIZE(task_struct)); -@@ -1893,7 +2203,7 @@ - BZERO(&psinfo, sizeof(struct psinfo)); - flag = 0; - -- while ((c = getopt(argcnt, args, "stcpkul")) != EOF) { -+ while ((c = getopt(argcnt, args, "gstcpkular")) != EOF) { - switch(c) - { - case 'k': -@@ -1907,21 +2217,31 @@ - break; - - /* -- * The remaining flags are all mutually-exclusive. -+ * The a, t, c, p, g and l flags are all mutually-exclusive. - */ -+ case 'g': -+ flag &= ~(PS_EXCLUSIVE); -+ flag |= PS_TGID_LIST; -+ break; -+ -+ case 'a': -+ flag &= ~(PS_EXCLUSIVE); -+ flag |= PS_ARGV_ENVP; -+ break; -+ - case 't': -+ flag &= ~(PS_EXCLUSIVE); - flag |= PS_TIMES; -- flag &= ~(PS_CHILD_LIST|PS_PPID_LIST|PS_LAST_RUN); - break; - - case 'c': -+ flag &= ~(PS_EXCLUSIVE); - flag |= PS_CHILD_LIST; -- flag &= ~(PS_PPID_LIST|PS_TIMES|PS_LAST_RUN); - break; - - case 'p': -+ flag &= ~(PS_EXCLUSIVE); - flag |= PS_PPID_LIST; -- flag &= ~(PS_CHILD_LIST|PS_TIMES|PS_LAST_RUN); - break; - - case 'l': -@@ -1932,14 +2252,19 @@ - argerrs++; - break; - } -+ flag &= ~(PS_EXCLUSIVE); - flag |= PS_LAST_RUN; -- flag &= ~(PS_CHILD_LIST|PS_TIMES|PS_PPID_LIST); - break; - - case 's': - flag |= PS_KSTACKP; - break; - -+ case 'r': -+ flag &= ~(PS_EXCLUSIVE); -+ flag |= PS_RLIMIT; -+ break; -+ - default: - argerrs++; - break; -@@ -2020,6 +2345,18 @@ - show_last_run(tc); \ - continue; \ - } \ -+ if (flag & PS_ARGV_ENVP) { \ -+ show_task_args(tc); \ -+ continue; \ -+ } \ -+ if (flag & PS_RLIMIT) { \ -+ show_task_rlimit(tc); \ -+ continue; \ -+ } \ -+ if (flag & PS_TGID_LIST) { \ -+ show_tgid_list(tc->task); \ -+ continue; \ -+ } \ - get_task_mem_usage(tc->task, tm); \ - fprintf(fp, "%s", is_task_active(tc->task) ? "> " : " "); \ - fprintf(fp, "%5ld %5ld %2s %s %3s", \ -@@ -2050,7 +2387,7 @@ - char buf2[BUFSIZE]; - char buf3[BUFSIZE]; - -- if (!(flag & (PS_PPID_LIST|PS_CHILD_LIST|PS_TIMES|PS_LAST_RUN))) -+ if (!(flag & PS_EXCLUSIVE)) - fprintf(fp, - " PID PPID CPU %s ST %%MEM VSZ RSS COMM\n", - flag & PS_KSTACKP ? -@@ -2076,6 +2413,8 @@ - return; - } - -+ pc->curcmd_flags |= TASK_SPECIFIED; -+ - for (ac = 0; ac < psi->argc; ac++) { - tm = &task_mem_usage; - tc = FIRST_CONTEXT(); -@@ -2096,8 +2435,15 @@ - break; - - case PS_BY_CMD: -- if (STREQ(tc->comm, psi->comm[ac])) -- print = TRUE; -+ if (STREQ(tc->comm, psi->comm[ac])) { -+ if (flag & PS_TGID_LIST) { -+ if (tc->pid == task_tgid(tc->task)) -+ print = TRUE; -+ else -+ print = FALSE; -+ } else -+ print = TRUE; -+ } - break; - } - -@@ -2145,6 +2491,229 @@ - } - - /* -+ * Show the argv and envp strings pointed to by mm_struct->arg_start -+ * and mm_struct->env_start. The user addresses need to broken up -+ * into physical on a page-per-page basis because we typically are -+ * not going to be working in the context of the target task. -+ */ -+static void -+show_task_args(struct task_context *tc) -+{ -+ ulong arg_start, arg_end, env_start, env_end; -+ char *buf, *bufptr, *p1; -+ char *as, *ae, *es, *ee; -+ physaddr_t paddr; -+ ulong uvaddr, size, cnt; -+ int c, d; -+ -+ print_task_header(fp, tc, 0); -+ -+ if (!tc || !tc->mm_struct) { /* probably a kernel thread */ -+ error(INFO, "no user stack\n\n"); -+ return; -+ } -+ -+ if (!task_mm(tc->task, TRUE)) -+ return; -+ -+ if (INVALID_MEMBER(mm_struct_arg_start)) { -+ MEMBER_OFFSET_INIT(mm_struct_arg_start, "mm_struct", "arg_start"); -+ MEMBER_OFFSET_INIT(mm_struct_arg_end, "mm_struct", "arg_end"); -+ MEMBER_OFFSET_INIT(mm_struct_env_start, "mm_struct", "env_start"); -+ MEMBER_OFFSET_INIT(mm_struct_env_end, "mm_struct", "env_end"); -+ } -+ -+ arg_start = ULONG(tt->mm_struct + OFFSET(mm_struct_arg_start)); -+ arg_end = ULONG(tt->mm_struct + OFFSET(mm_struct_arg_end)); -+ env_start = ULONG(tt->mm_struct + OFFSET(mm_struct_env_start)); -+ env_end = ULONG(tt->mm_struct + OFFSET(mm_struct_env_end)); -+ -+ if (CRASHDEBUG(1)) { -+ fprintf(fp, "arg_start: %lx arg_end: %lx (%ld)\n", -+ arg_start, arg_end, arg_end - arg_start); -+ fprintf(fp, "env_start: %lx env_end: %lx (%ld)\n", -+ env_start, env_end, env_end - env_start); -+ } -+ -+ buf = GETBUF(env_end - arg_start + 1); -+ -+ uvaddr = arg_start; -+ size = env_end - arg_start; -+ bufptr = buf; -+ -+ while (size > 0) { -+ if (!uvtop(tc, uvaddr, &paddr, 0)) { -+ error(INFO, "cannot access user stack address: %lx\n\n", -+ uvaddr); -+ goto bailout; -+ } -+ -+ cnt = PAGESIZE() - PAGEOFFSET(uvaddr); -+ -+ if (cnt > size) -+ cnt = size; -+ -+ if (!readmem(paddr, PHYSADDR, bufptr, cnt, -+ "user stack contents", RETURN_ON_ERROR|QUIET)) { -+ error(INFO, "cannot access user stack address: %lx\n\n", -+ uvaddr); -+ goto bailout; -+ } -+ -+ uvaddr += cnt; -+ bufptr += cnt; -+ size -= cnt; -+ } -+ -+ as = buf; -+ ae = &buf[arg_end - arg_start]; -+ es = &buf[env_start - arg_start]; -+ ee = &buf[env_end - arg_start]; -+ -+ fprintf(fp, "ARG: "); -+ for (p1 = as, c = 0; p1 < ae; p1++) { -+ if (*p1 == NULLCHAR) { -+ if (c) -+ fprintf(fp, " "); -+ c = 0; -+ } else { -+ fprintf(fp, "%c", *p1); -+ c++; -+ } -+ } -+ -+ fprintf(fp, "\nENV: "); -+ for (p1 = es, c = d = 0; p1 < ee; p1++) { -+ if (*p1 == NULLCHAR) { -+ if (c) -+ fprintf(fp, "\n"); -+ c = 0; -+ } else { -+ fprintf(fp, "%s%c", !c && (p1 != es) ? " " : "", *p1); -+ c++, d++; -+ } -+ } -+ fprintf(fp, "\n%s", d ? "" : "\n"); -+ -+bailout: -+ FREEBUF(buf); -+} -+ -+char *rlim_names[] = { -+ /* 0 */ "CPU", -+ /* 1 */ "FSIZE", -+ /* 2 */ "DATA", -+ /* 3 */ "STACK", -+ /* 4 */ "CORE", -+ /* 5 */ "RSS", -+ /* 6 */ "NPROC", -+ /* 7 */ "NOFILE", -+ /* 8 */ "MEMLOCK", -+ /* 9 */ "AS", -+ /* 10 */ "LOCKS", -+ /* 11 */ "SIGPENDING", -+ /* 12 */ "MSGQUEUE", -+ /* 13 */ "NICE", -+ /* 14 */ "RTPRIO", -+ NULL, -+}; -+ -+#ifndef RLIM_INFINITY -+#define RLIM_INFINITY (~0UL) -+#endif -+ -+/* -+ * Show the current and maximum rlimit values. -+ */ -+static void -+show_task_rlimit(struct task_context *tc) -+{ -+ int i, j, len1, len2, rlimit_index; -+ int in_task_struct, in_signal_struct; -+ char *rlimit_buffer; -+ ulong *p1, rlim_addr; -+ char buf1[BUFSIZE]; -+ char buf2[BUFSIZE]; -+ char buf3[BUFSIZE]; -+ -+ if (!VALID_MEMBER(task_struct_rlim) && !VALID_MEMBER(signal_struct_rlim)) { -+ MEMBER_OFFSET_INIT(task_struct_rlim, "task_struct", "rlim"); -+ MEMBER_OFFSET_INIT(signal_struct_rlim, "signal_struct", "rlim"); -+ STRUCT_SIZE_INIT(rlimit, "rlimit"); -+ if (!VALID_MEMBER(task_struct_rlim) && -+ !VALID_MEMBER(signal_struct_rlim)) -+ error(FATAL, "cannot determine rlimit array location\n"); -+ } else if (!VALID_STRUCT(rlimit)) -+ error(FATAL, "cannot determine rlimit structure definition\n"); -+ -+ in_task_struct = in_signal_struct = FALSE; -+ -+ if (VALID_MEMBER(task_struct_rlim)) { -+ rlimit_index = get_array_length("task_struct.rlim", NULL, 0); -+ in_task_struct = TRUE; -+ } else if (VALID_MEMBER(signal_struct_rlim)) { -+ if (!VALID_MEMBER(task_struct_signal)) -+ error(FATAL, "cannot determine rlimit array location\n"); -+ rlimit_index = get_array_length("signal_struct.rlim", NULL, 0); -+ in_signal_struct = TRUE; -+ } -+ -+ if (!rlimit_index) -+ error(FATAL, "cannot determine rlimit array size\n"); -+ -+ for (i = len1 = 0; i < rlimit_index; i++) { -+ if ((j = strlen(rlim_names[i])) > len1) -+ len1 = j; -+ } -+ len2 = strlen("(unlimited)"); -+ -+ rlimit_buffer = GETBUF(rlimit_index * SIZE(rlimit)); -+ -+ print_task_header(fp, tc, 0); -+ -+ fill_task_struct(tc->task); -+ -+ if (in_task_struct) { -+ BCOPY(tt->task_struct + OFFSET(task_struct_rlim), -+ rlimit_buffer, rlimit_index * SIZE(rlimit)); -+ } else if (in_signal_struct) { -+ rlim_addr = ULONG(tt->task_struct + OFFSET(task_struct_signal)); -+ if (!readmem(rlim_addr + OFFSET(signal_struct_rlim), -+ KVADDR, rlimit_buffer, rlimit_index * SIZE(rlimit), -+ "signal_struct rlimit array", RETURN_ON_ERROR)) { -+ FREEBUF(rlimit_buffer); -+ return; -+ } -+ } -+ -+ fprintf(fp, " %s %s %s\n", -+ mkstring(buf1, len1, RJUST, "RLIMIT"), -+ mkstring(buf2, len2, CENTER|RJUST, "CURRENT"), -+ mkstring(buf3, len2, CENTER|RJUST, "MAXIMUM")); -+ -+ for (p1 = (ulong *)rlimit_buffer, i = 0; i < rlimit_index; i++) { -+ fprintf(fp, " %s ", mkstring(buf1, len1, RJUST, -+ rlim_names[i] ? rlim_names[i] : "(unknown)")); -+ if (*p1 == (ulong)RLIM_INFINITY) -+ fprintf(fp, "(unlimited) "); -+ else -+ fprintf(fp, "%s ", mkstring(buf1, len2, -+ CENTER|LJUST|LONG_DEC, MKSTR(*p1))); -+ p1++; -+ if (*p1 == (ulong)RLIM_INFINITY) -+ fprintf(fp, "(unlimited)\n"); -+ else -+ fprintf(fp, "%s\n", mkstring(buf1, len2, -+ CENTER|LJUST|LONG_DEC, MKSTR(*p1))); -+ p1++; -+ } -+ -+ fprintf(fp, "\n"); -+ -+ FREEBUF(rlimit_buffer); -+} -+ -+/* - * Put either the task_struct address or kernel stack pointer into a string. - * If the kernel stack pointer is requested, piggy-back on top of the - * back trace code to avoid having to deal with machine dependencies, -@@ -2229,11 +2798,8 @@ - - use_kernel_timeval = STRUCT_EXISTS("kernel_timeval"); - get_symbol_data("jiffies", sizeof(long), &jiffies); -- if (symbol_exists("jiffies_64")) { -- get_symbol_data("jiffies_64", sizeof(long long), &jiffies_64); -- if ((jiffies_64 & 0xffffffff00000000ULL) == 0x100000000ULL) -- jiffies_64 &= 0xffffffffULL; -- } -+ if (symbol_exists("jiffies_64")) -+ get_uptime(NULL, &jiffies_64); - tsp = task_start_times; - tc = tcp ? tcp : FIRST_CONTEXT(); - -@@ -2330,8 +2896,7 @@ - for (i = 0, tsp = task_start_times; i < tasks; i++, tsp++) { - print_task_header(fp, tsp->tc, 0); - fprintf(fp, " RUN TIME: %s\n", symbol_exists("jiffies_64") ? -- convert_time(jiffies_64 - -- convert_start_time(tsp->start_time, jiffies_64), buf1) : -+ convert_time(convert_start_time(tsp->start_time, jiffies_64), buf1) : - convert_time(jiffies - tsp->start_time, buf1)); - fprintf(fp, " START TIME: %llu\n", tsp->start_time); - if (VALID_MEMBER(task_struct_times)) { -@@ -2397,15 +2962,33 @@ - static ulonglong - convert_start_time(ulonglong start_time, ulonglong current) - { -+ ulong tmp1, tmp2; -+ ulonglong wrapped; -+ - switch(tt->flags & (TIMESPEC | NO_TIMESPEC)) - { - case TIMESPEC: -- if ((start_time * (ulonglong)machdep->hz) > current) -- return current; -+ if ((start_time * (ulonglong)machdep->hz) > current) -+ return 0; - else -- return start_time * (ulonglong)machdep->hz; -+ return current - (start_time * (ulonglong)machdep->hz); - - case NO_TIMESPEC: -+ if (THIS_KERNEL_VERSION >= LINUX(2,6,0)) { -+ wrapped = (start_time & 0xffffffff00000000ULL); -+ if (wrapped) { -+ wrapped -= 0x100000000ULL; -+ start_time &= 0x00000000ffffffffULL; -+ start_time |= wrapped; -+ start_time += (ulonglong)(300*machdep->hz); -+ } else { -+ tmp1 = (ulong)(uint)(-300*machdep->hz); -+ tmp2 = (ulong)start_time; -+ start_time = (ulonglong)(tmp2 - tmp1); -+ } -+ } -+ break; -+ - default: - break; - } -@@ -2511,6 +3094,54 @@ - } - - /* -+ * Dump the children of a task. -+ */ -+static void -+show_tgid_list(ulong task) -+{ -+ int i; -+ int cnt; -+ struct task_context *tc; -+ ulong tgid; -+ -+ tc = task_to_context(task); -+ tgid = task_tgid(task); -+ -+ if (tc->pid != tgid) { -+ if (pc->curcmd_flags & TASK_SPECIFIED) { -+ if (!(tc = tgid_to_context(tgid))) -+ return; -+ task = tc->task; -+ } else -+ return; -+ } -+ -+ if ((tc->pid == 0) && (pc->curcmd_flags & IDLE_TASK_SHOWN)) -+ return; -+ -+ print_task_header(fp, tc, 0); -+ -+ tc = FIRST_CONTEXT(); -+ for (i = cnt = 0; i < RUNNING_TASKS(); i++, tc++) { -+ if (tc->task == task) -+ continue; -+ -+ if (task_tgid(tc->task) == tgid) { -+ INDENT(2); -+ print_task_header(fp, tc, 0); -+ cnt++; -+ if (tc->pid == 0) -+ pc->curcmd_flags |= IDLE_TASK_SHOWN; -+ } -+ } -+ -+ if (!cnt) -+ fprintf(fp, " (no threads)\n"); -+ -+ fprintf(fp, "\n"); -+} -+ -+/* - * Return the first task found that belongs to a pid. - */ - ulong -@@ -2580,6 +3211,26 @@ - return NULL; - } - -+/* -+ * Return a tgid's parent task_context structure. -+ */ -+struct task_context * -+tgid_to_context(ulong parent_tgid) -+{ -+ int i; -+ struct task_context *tc; -+ ulong tgid; -+ -+ tc = FIRST_CONTEXT(); -+ for (i = 0; i < RUNNING_TASKS(); i++, tc++) { -+ tgid = task_tgid(tc->task); -+ if ((tgid == parent_tgid) && (tgid == tc->pid)) -+ return tc; -+ } -+ -+ return NULL; -+} -+ - - /* - * Return the task_context structure of the first task found with a pid, -@@ -2821,15 +3472,15 @@ - int - comm_exists(char *s) - { -- int i; -+ int i, cnt; - struct task_context *tc; - - tc = FIRST_CONTEXT(); -- for (i = 0; i < RUNNING_TASKS(); i++, tc++) -+ for (i = cnt = 0; i < RUNNING_TASKS(); i++, tc++) - if (STREQ(tc->comm, s)) -- return TRUE; -+ cnt++; - -- return FALSE; -+ return cnt; - } - - /* -@@ -2925,7 +3576,11 @@ - fprintf(fp, "COMMAND: \"%s\"\n", tc->comm); - INDENT(indent); - fprintf(fp, " TASK: %lx ", tc->task); -- if ((cnt = TASKS_PER_PID(tc->pid)) > 1) -+ if ((machdep->flags & (INIT|MCA)) && (tc->pid == 0)) -+ cnt = comm_exists(tc->comm); -+ else -+ cnt = TASKS_PER_PID(tc->pid); -+ if (cnt > 1) - fprintf(fp, "(1 of %d) ", cnt); - if (tt->flags & THREAD_INFO) - fprintf(fp, "[THREAD_INFO: %lx]", tc->thread_info); -@@ -2938,19 +3593,27 @@ - if (is_task_active(tc->task)) { - if (machdep->flags & HWRESET) - fprintf(fp, "(HARDWARE RESET)"); -- else if (machdep->flags & SYSRQ) -+ else if ((pc->flags & SYSRQ) && (tc->task == tt->panic_task)) - fprintf(fp, "(SYSRQ)"); - else if (machdep->flags & INIT) - fprintf(fp, "(INIT)"); -- else if (kt->cpu_flags[tc->processor] & NMI) -+ else if ((machdep->flags & MCA) && (tc->task == tt->panic_task)) -+ fprintf(fp, "(MCA)"); -+ else if ((tc->processor >= 0) && -+ (tc->processor < NR_CPUS) && -+ (kt->cpu_flags[tc->processor] & NMI)) - fprintf(fp, "(NMI)"); -+ else if ((tc->task == tt->panic_task) && -+ XENDUMP_DUMPFILE() && (kt->xen_flags & XEN_SUSPEND)) -+ fprintf(fp, "(SUSPEND)"); - else if (tc->task == tt->panic_task) - fprintf(fp, "(PANIC)"); - else - fprintf(fp, "(ACTIVE)"); - } - -- if (!(pc->flags & RUNTIME) && (tt->flags & PANIC_TASK_NOT_FOUND) && -+ if (!(pc->flags & RUNTIME) && !ACTIVE() && -+ (tt->flags & PANIC_TASK_NOT_FOUND) && - !SYSRQ_TASK(tc->task)) { - fprintf(fp, "\n"); INDENT(indent); - if (machine_type("S390") || machine_type("S390X")) -@@ -3182,6 +3845,22 @@ - return flags; - } - -+/* -+ * Return a task's tgid. -+ */ -+ulong -+task_tgid(ulong task) -+{ -+ uint tgid; -+ -+ fill_task_struct(task); -+ -+ tgid = tt->last_task_read ? -+ UINT(tt->task_struct + OFFSET(task_struct_tgid)) : 0; -+ -+ return (ulong)tgid; -+} -+ - ulonglong - task_last_run(ulong task) - { -@@ -3368,6 +4047,12 @@ - task = NO_TASK; - tc = FIRST_CONTEXT(); - -+ /* -+ * --no_panic command line option -+ */ -+ if (tt->flags & PANIC_TASK_NOT_FOUND) -+ goto use_task_0; -+ - if (symbol_exists("panic_threads") && - symbol_exists("panicmsg") && - symbol_exists("panic_processor")) { -@@ -3411,6 +4096,9 @@ - - use_task_0: - -+ if (CRASHDEBUG(1)) -+ error(INFO, "get_panic_context: panic task not found\n"); -+ - tt->flags |= PANIC_TASK_NOT_FOUND; - tc = FIRST_CONTEXT(); - return(tc->task); -@@ -3448,49 +4136,73 @@ - int msg_found; - - BZERO(buf, BUFSIZE); -+ msg_found = FALSE; - -- if (tt->panicmsg) -+ if (tt->panicmsg) { - read_string(tt->panicmsg, buf, BUFSIZE-1); -- else if (LKCD_DUMPFILE()) -+ msg_found = TRUE; -+ } else if (LKCD_DUMPFILE()) { - get_lkcd_panicmsg(buf); -- else { -- msg_found = FALSE; -+ msg_found = TRUE; -+ } - -- open_tmpfile(); -- dump_log(FALSE); -+ if (msg_found == TRUE) -+ return(buf); - -- rewind(pc->tmpfile); -- while (!msg_found && fgets(buf, BUFSIZE, pc->tmpfile)) { -- if (strstr(buf, "Kernel panic: ")) -- msg_found = TRUE; -- } -- rewind(pc->tmpfile); -- while (!msg_found && fgets(buf, BUFSIZE, pc->tmpfile)) { -- if (strstr(buf, "Oops: ") || -- strstr(buf, "kernel BUG at")) -- msg_found = TRUE; -- } -- rewind(pc->tmpfile); -- while (!msg_found && fgets(buf, BUFSIZE, pc->tmpfile)) { -- if (strstr(buf, "SysRq : Netdump") || -- strstr(buf, "SysRq : Crash")) { -- machdep->flags |= SYSRQ; -- msg_found = TRUE; -- } -- } -- rewind(pc->tmpfile); -- while (!msg_found && fgets(buf, BUFSIZE, pc->tmpfile)) { -- if (strstr(buf, "sysrq") && -- symbol_exists("sysrq_pressed")) -- get_symbol_data("sysrq_pressed", sizeof(int), -- &msg_found); -- } -+ open_tmpfile(); -+ dump_log(FALSE); - -- close_tmpfile(); -+ /* -+ * First check for a SYSRQ-generated crash, and set the -+ * active-task flag appropriately. The message may or -+ * may not be used as the panic message. -+ */ -+ rewind(pc->tmpfile); -+ while (fgets(buf, BUFSIZE, pc->tmpfile)) { -+ if (strstr(buf, "SysRq : Crash") || -+ strstr(buf, "SysRq : Trigger a crashdump")) { -+ pc->flags |= SYSRQ; -+ break; -+ } -+ } - -- if (!msg_found) -- BZERO(buf, BUFSIZE); -+ rewind(pc->tmpfile); -+ while (!msg_found && fgets(buf, BUFSIZE, pc->tmpfile)) { -+ if (strstr(buf, "Kernel panic: ")) -+ msg_found = TRUE; - } -+ rewind(pc->tmpfile); -+ while (!msg_found && fgets(buf, BUFSIZE, pc->tmpfile)) { -+ if (strstr(buf, "Oops: ") || -+ strstr(buf, "kernel BUG at")) -+ msg_found = TRUE; -+ } -+ rewind(pc->tmpfile); -+ while (!msg_found && fgets(buf, BUFSIZE, pc->tmpfile)) { -+ if (strstr(buf, "SysRq : Netdump") || -+ strstr(buf, "SysRq : Trigger a crashdump") || -+ strstr(buf, "SysRq : Crash")) { -+ pc->flags |= SYSRQ; -+ msg_found = TRUE; -+ } -+ } -+ rewind(pc->tmpfile); -+ while (!msg_found && fgets(buf, BUFSIZE, pc->tmpfile)) { -+ if (strstr(buf, "sysrq") && -+ symbol_exists("sysrq_pressed")) -+ get_symbol_data("sysrq_pressed", sizeof(int), -+ &msg_found); -+ } -+ rewind(pc->tmpfile); -+ while (!msg_found && fgets(buf, BUFSIZE, pc->tmpfile)) { -+ if (strstr(buf, "Kernel panic - ")) -+ msg_found = TRUE; -+ } -+ -+ close_tmpfile(); -+ -+ if (!msg_found) -+ BZERO(buf, BUFSIZE); - - return(buf); - } -@@ -3517,7 +4229,7 @@ - BZERO(&foreach_data, sizeof(struct foreach_data)); - fd = &foreach_data; - -- while ((c = getopt(argcnt, args, "R:vomlgersStpukcf")) != EOF) { -+ while ((c = getopt(argcnt, args, "R:vomlgersStTpukcf")) != EOF) { - switch(c) - { - case 'R': -@@ -3560,6 +4272,10 @@ - fd->flags |= FOREACH_r_FLAG; - break; - -+ case 'T': -+ fd->flags |= FOREACH_T_FLAG; -+ break; -+ - case 't': - fd->flags |= FOREACH_t_FLAG; - break; -@@ -3754,12 +4470,14 @@ - foreach(struct foreach_data *fd) - { - int i, j, k, a; -- struct task_context *tc; -+ struct task_context *tc, *tgc; - int specified; - int doit; - int subsequent; - ulong cmdflags; -+ ulong tgid; - struct reference reference, *ref; -+ int print_header; - struct bt_info bt_info, *bt; - - /* -@@ -3797,6 +4515,8 @@ - fd->reference ? fd->reference : ""); - } - -+ print_header = TRUE; -+ - for (k = 0; k < fd->keys; k++) { - switch(fd->keyword_array[k]) - { -@@ -3881,6 +4601,14 @@ - error(FATAL, - "sig: -l and -s options are not applicable\n"); - } -+ if (fd->flags & FOREACH_g_FLAG) { -+ if (!hq_open()) { -+ error(INFO, -+ "cannot hash thread group tasks\n"); -+ fd->flags &= ~FOREACH_g_FLAG; -+ } else -+ print_header = FALSE; -+ } - break; - - case FOREACH_TEST: -@@ -3941,7 +4669,7 @@ - if (fd->reference) { - BZERO(ref, sizeof(struct reference)); - ref->str = fd->reference; -- } else -+ } else if (print_header) - print_task_header(fp, tc, subsequent++); - - for (k = 0; k < fd->keys; k++) { -@@ -3962,7 +4690,12 @@ - bt->flags |= BT_SYMBOLIC_ARGS; - if (fd->flags & FOREACH_t_FLAG) - bt->flags |= BT_TEXT_SYMBOLS; -- if (fd->flags & FOREACH_o_FLAG) -+ if (fd->flags & FOREACH_T_FLAG) { -+ bt->flags |= BT_TEXT_SYMBOLS; -+ bt->flags |= BT_TEXT_SYMBOLS_ALL; -+ } -+ if ((fd->flags & FOREACH_o_FLAG) || -+ (kt->flags & USE_OLD_BT)) - bt->flags |= BT_OLD_BACK_TRACE; - if (fd->flags & FOREACH_e_FLAG) - bt->flags |= BT_EFRAME_SEARCH; -@@ -4010,8 +4743,14 @@ - - case FOREACH_SIG: - pc->curcmd = "sig"; -- do_sig(tc->task, FOREACH_SIG, -- fd->reference ? ref : NULL); -+ if (fd->flags & FOREACH_g_FLAG) { -+ tgid = task_tgid(tc->task); -+ tgc = tgid_to_context(tgid); -+ if (hq_enter(tgc->task)) -+ do_sig_thread_group(tgc->task); -+ } else -+ do_sig(tc->task, FOREACH_SIG, -+ fd->reference ? ref : NULL); - break; - - case FOREACH_SET: -@@ -4075,6 +4814,11 @@ - nlm_files_dump(); - } - break; -+ -+ case FOREACH_SIG: -+ if (fd->flags & FOREACH_g_FLAG) -+ hq_close(); -+ break; - } - } - -@@ -4161,7 +4905,7 @@ - fd = &foreach_data; - fd->keys = 1; - fd->keyword_array[0] = FOREACH_BT; -- fd->flags |= FOREACH_t_FLAG; -+ fd->flags |= (FOREACH_t_FLAG|FOREACH_o_FLAG); - - dietask = lasttask = NO_TASK; - -@@ -4188,6 +4932,12 @@ - break; - } - -+ if (strstr(buf, " crash_kexec at ") || -+ strstr(buf, " .crash_kexec at ")) { -+ found = TRUE; -+ break; -+ } -+ - if (strstr(buf, " die at ")) { - switch (dietask) - { -@@ -4211,6 +4961,10 @@ - if (dietask == (NO_TASK+1)) - error(WARNING, "multiple active tasks have called die\n\n"); - -+ if (CRASHDEBUG(1) && found) -+ error(INFO, "panic_search: %lx (via foreach bt)\n", -+ lasttask); -+ - found_panic_task: - populate_panic_threads(); - -@@ -4229,6 +4983,9 @@ - } - } - -+ if (CRASHDEBUG(1)) -+ error(INFO, "panic_search: failed (via foreach bt)\n"); -+ - return NULL; - } - -@@ -4240,25 +4997,28 @@ - { - ulong task; - -- if (LKCD_DUMPFILE()) -- return(get_lkcd_panic_task()); -- - if (NETDUMP_DUMPFILE()) { - task = pc->flags & REM_NETDUMP ? - tt->panic_task : get_netdump_panic_task(); - if (task) - return task; -- if (get_active_set()) -- return(get_active_set_panic_task()); -- } -- -- if (DISKDUMP_DUMPFILE()) { -+ } else if (KDUMP_DUMPFILE()) { -+ task = get_kdump_panic_task(); -+ if (task) -+ return task; -+ } else if (DISKDUMP_DUMPFILE()) { - task = get_diskdump_panic_task(); - if (task) - return task; -- if (get_active_set()) -- return(get_active_set_panic_task()); -- } -+ } else if (XENDUMP_DUMPFILE()) { -+ task = get_xendump_panic_task(); -+ if (task) -+ return task; -+ } else if (LKCD_DUMPFILE()) -+ return(get_lkcd_panic_task()); -+ -+ if (get_active_set()) -+ return(get_active_set_panic_task()); - - return NO_TASK; - } -@@ -4298,14 +5058,17 @@ - - tc = FIRST_CONTEXT(); - for (i = 0; i < RUNNING_TASKS(); i++, tc++) { -- if (task_has_cpu(tc->task, NULL)) { -+ if (task_has_cpu(tc->task, NULL) && -+ (tc->processor >= 0) && -+ (tc->processor < NR_CPUS)) { - tt->panic_threads[tc->processor] = tc->task; - found++; - } - } - - if (!found && !(kt->flags & SMP) && -- (LKCD_DUMPFILE() || NETDUMP_DUMPFILE() || DISKDUMP_DUMPFILE())) -+ (LKCD_DUMPFILE() || NETDUMP_DUMPFILE() || -+ KDUMP_DUMPFILE() || DISKDUMP_DUMPFILE())) - tt->panic_threads[0] = get_dumpfile_panic_task(); - } - -@@ -4331,7 +5094,7 @@ - void - dump_task_table(int verbose) - { -- int i; -+ int i, nr_cpus; - struct task_context *tc; - char buf[BUFSIZE]; - int others, wrap, flen; -@@ -4363,6 +5126,8 @@ - fprintf(fp, "refresh_pid_hash_task_table()\n"); - else if (tt->refresh_task_table == refresh_hlist_task_table) - fprintf(fp, "refresh_hlist_task_table()\n"); -+ else if (tt->refresh_task_table == refresh_hlist_task_table_v2) -+ fprintf(fp, "refresh_hlist_task_table_v2()\n"); - else - fprintf(fp, "%lx\n", (ulong)tt->refresh_task_table); - -@@ -4443,7 +5208,9 @@ - wrap = sizeof(void *) == SIZEOF_32BIT ? 8 : 4; - flen = sizeof(void *) == SIZEOF_32BIT ? 8 : 16; - -- for (i = 0; i < NR_CPUS; i++) { -+ nr_cpus = kt->kernel_NR_CPUS ? kt->kernel_NR_CPUS : NR_CPUS; -+ -+ for (i = 0; i < nr_cpus; i++) { - if ((i % wrap) == 0) - fprintf(fp, "\n "); - fprintf(fp, "%.*lx ", flen, tt->panic_threads[i]); -@@ -4451,7 +5218,7 @@ - fprintf(fp, "\n"); - - fprintf(fp, " panic_ksp:"); -- for (i = 0; i < NR_CPUS; i++) { -+ for (i = 0; i < nr_cpus; i++) { - if ((i % wrap) == 0) - fprintf(fp, "\n "); - fprintf(fp, "%.*lx ", flen, tt->panic_ksp[i]); -@@ -4459,7 +5226,7 @@ - fprintf(fp, "\n"); - - fprintf(fp, " hardirq_ctx:"); -- for (i = 0; i < NR_CPUS; i++) { -+ for (i = 0; i < nr_cpus; i++) { - if ((i % wrap) == 0) - fprintf(fp, "\n "); - fprintf(fp, "%.*lx ", flen, tt->hardirq_ctx[i]); -@@ -4467,7 +5234,7 @@ - fprintf(fp, "\n"); - - fprintf(fp, " hardirq_tasks:"); -- for (i = 0; i < NR_CPUS; i++) { -+ for (i = 0; i < nr_cpus; i++) { - if ((i % wrap) == 0) - fprintf(fp, "\n "); - fprintf(fp, "%.*lx ", flen, tt->hardirq_tasks[i]); -@@ -4475,7 +5242,7 @@ - fprintf(fp, "\n"); - - fprintf(fp, " softirq_ctx:"); -- for (i = 0; i < NR_CPUS; i++) { -+ for (i = 0; i < nr_cpus; i++) { - if ((i % wrap) == 0) - fprintf(fp, "\n "); - fprintf(fp, "%.*lx ", flen, tt->softirq_ctx[i]); -@@ -4483,7 +5250,7 @@ - fprintf(fp, "\n"); - - fprintf(fp, " softirq_tasks:"); -- for (i = 0; i < NR_CPUS; i++) { -+ for (i = 0; i < nr_cpus; i++) { - if ((i % wrap) == 0) - fprintf(fp, "\n "); - fprintf(fp, "%.*lx ", flen, tt->softirq_tasks[i]); -@@ -4491,7 +5258,7 @@ - fprintf(fp, "\n"); - - fprintf(fp, " idle_threads:"); -- for (i = 0; i < NR_CPUS; i++) { -+ for (i = 0; i < nr_cpus; i++) { - if ((i % wrap) == 0) - fprintf(fp, "\n "); - fprintf(fp, "%.*lx ", flen, tt->idle_threads[i]); -@@ -4499,7 +5266,7 @@ - fprintf(fp, "\n"); - - fprintf(fp, " active_set:"); -- for (i = 0; i < NR_CPUS; i++) { -+ for (i = 0; i < nr_cpus; i++) { - if ((i % wrap) == 0) - fprintf(fp, "\n "); - fprintf(fp, "%.*lx ", flen, tt->active_set[i]); -@@ -4546,6 +5313,9 @@ - if ((tc->pid == 0) && !STREQ(tc->comm, pc->program_name)) - return TRUE; - -+ if (_ZOMBIE_ == TASK_STATE_UNINITIALIZED) -+ initialize_task_state(); -+ - if (IS_ZOMBIE(task) || IS_EXITING(task)) - return FALSE; - -@@ -4799,23 +5569,55 @@ - tt->flags &= ~ACTIVE_SET; - } - --#define RESOLVE_PANIC_AND_DIE_CALLERS() \ -- if ((panic_task > (NO_TASK+1)) && !die_task) \ -- return panic_task; \ -- \ -- if (panic_task && die_task) { \ -- error(WARNING, \ -- "multiple active tasks have called die and/or panic\n\n"); \ -- return NO_TASK; \ -- } \ -- \ -- if (die_task > (NO_TASK+1)) \ -- return die_task; \ -- else if (die_task == (NO_TASK+1)) \ -- error(WARNING, \ -+#define RESOLVE_PANIC_AND_DIE_CALLERS() \ -+ if (xen_panic_task) { \ -+ if (CRASHDEBUG(1)) \ -+ error(INFO, \ -+ "get_active_set_panic_task: %lx (xen_panic_event)\n", \ -+ xen_panic_task); \ -+ return xen_panic_task; \ -+ } \ -+ if (crash_kexec_task) { \ -+ if (CRASHDEBUG(1)) \ -+ error(INFO, \ -+ "get_active_set_panic_task: %lx (crash_kexec)\n", \ -+ crash_kexec_task); \ -+ return crash_kexec_task; \ -+ } \ -+ if ((panic_task > (NO_TASK+1)) && !die_task) { \ -+ if (CRASHDEBUG(1)) \ -+ fprintf(fp, \ -+ "get_active_set_panic_task: %lx (panic)\n", \ -+ panic_task); \ -+ return panic_task; \ -+ } \ -+ \ -+ if (panic_task && die_task) { \ -+ if ((panic_task > (NO_TASK+1)) && \ -+ (panic_task == die_task)) { \ -+ if (CRASHDEBUG(1)) \ -+ fprintf(fp, \ -+ "get_active_set_panic_task: %lx (panic)\n", \ -+ panic_task); \ -+ return panic_task; \ -+ } \ -+ error(WARNING, \ -+ "multiple active tasks have called die and/or panic\n\n"); \ -+ goto no_panic_task_found; \ -+ } \ -+ \ -+ if (die_task > (NO_TASK+1)) { \ -+ if (CRASHDEBUG(1)) \ -+ fprintf(fp, \ -+ "get_active_set_panic_task: %lx (die)\n", \ -+ die_task); \ -+ return die_task; \ -+ } \ -+ else if (die_task == (NO_TASK+1)) \ -+ error(WARNING, \ - "multiple active tasks have called die\n\n"); - --#define SEARCH_STACK_FOR_PANIC_AND_DIE_CALLERS() \ -+#define SEARCH_STACK_FOR_PANIC_DIE_AND_KEXEC_CALLERS() \ - while (fgets(buf, BUFSIZE, pc->tmpfile)) { \ - if (strstr(buf, " die+")) { \ - switch (die_task) \ -@@ -4833,12 +5635,30 @@ - { \ - case NO_TASK: \ - panic_task = task; \ -+ if (XENDUMP_DUMPFILE()) \ -+ xendump_panic_hook(buf); \ - break; \ - default: \ - panic_task = NO_TASK+1; \ - break; \ - } \ - } \ -+ if (strstr(buf, " crash_kexec+") || \ -+ strstr(buf, " .crash_kexec+")) { \ -+ crash_kexec_task = task; \ -+ } \ -+ if (strstr(buf, " machine_kexec+") || \ -+ strstr(buf, " .machine_kexec+")) { \ -+ crash_kexec_task = task; \ -+ } \ -+ if (strstr(buf, " xen_panic_event+") || \ -+ strstr(buf, " .xen_panic_event+")){ \ -+ xen_panic_task = task; \ -+ xendump_panic_hook(buf); \ -+ } \ -+ if (machine_type("IA64") && XENDUMP_DUMPFILE() && !xen_panic_task && \ -+ strstr(buf, " sysrq_handle_crashdump+")) \ -+ xen_sysrq_task = task; \ - } - - /* -@@ -4850,11 +5670,14 @@ - int i, j, found; - ulong task; - char buf[BUFSIZE]; -- ulong panic_task, die_task; -+ ulong panic_task, die_task, crash_kexec_task; -+ ulong xen_panic_task; -+ ulong xen_sysrq_task; - char *tp; - struct task_context *tc; - -- panic_task = die_task = NO_TASK; -+ panic_task = die_task = crash_kexec_task = xen_panic_task = NO_TASK; -+ xen_sysrq_task = NO_TASK; - - for (i = 0; i < NR_CPUS; i++) { - if (!(task = tt->active_set[i])) -@@ -4867,15 +5690,16 @@ - if ((tp = fill_task_struct(task))) { - if ((tc = store_context(NULL, task, tp))) - tt->running_tasks++; -+ else -+ continue; - } -- continue; - } - - open_tmpfile(); - raw_stack_dump(GET_STACKBASE(task), STACKSIZE()); - rewind(pc->tmpfile); - -- SEARCH_STACK_FOR_PANIC_AND_DIE_CALLERS(); -+ SEARCH_STACK_FOR_PANIC_DIE_AND_KEXEC_CALLERS(); - - close_tmpfile(); - } -@@ -4903,7 +5727,7 @@ - raw_stack_dump(tt->hardirq_ctx[i], SIZE(thread_union)); - rewind(pc->tmpfile); - -- SEARCH_STACK_FOR_PANIC_AND_DIE_CALLERS(); -+ SEARCH_STACK_FOR_PANIC_DIE_AND_KEXEC_CALLERS(); - - close_tmpfile(); - } -@@ -4930,7 +5754,7 @@ - raw_stack_dump(tt->softirq_ctx[i], SIZE(thread_union)); - rewind(pc->tmpfile); - -- SEARCH_STACK_FOR_PANIC_AND_DIE_CALLERS(); -+ SEARCH_STACK_FOR_PANIC_DIE_AND_KEXEC_CALLERS(); - - close_tmpfile(); - } -@@ -4938,6 +5762,28 @@ - RESOLVE_PANIC_AND_DIE_CALLERS(); - } - -+ if (crash_kexec_task) { -+ if (CRASHDEBUG(1)) -+ error(INFO, -+ "get_active_set_panic_task: %lx (crash_kexec)\n", -+ crash_kexec_task); -+ return crash_kexec_task; -+ } -+ -+ if (xen_sysrq_task) { -+ if (CRASHDEBUG(1)) -+ error(INFO, -+ "get_active_set_panic_task: %lx (sysrq_handle_crashdump)\n", -+ xen_sysrq_task); -+ return xen_sysrq_task; -+ } -+ -+no_panic_task_found: -+ -+ if (CRASHDEBUG(1)) -+ error(INFO, -+ "get_active_set_panic_task: failed\n"); -+ - return NO_TASK; - } - -@@ -4997,6 +5843,11 @@ - ulong *tlist; - struct task_context *tc; - -+ if (VALID_MEMBER(rq_cfs)) { -+ dump_CFS_runqueues(); -+ return; -+ } -+ - if (VALID_MEMBER(runqueue_arrays)) { - dump_runqueues(); - return; -@@ -5169,11 +6020,230 @@ - } - } - -+/* -+ * CFS scheduler uses Red-Black trees to maintain run queue. -+ */ -+struct rb_node -+{ -+ unsigned long rb_parent_color; -+#define RB_RED 0 -+#define RB_BLACK 1 -+ struct rb_node *rb_right; -+ struct rb_node *rb_left; -+}; -+ -+struct rb_root -+{ -+ struct rb_node *rb_node; -+}; -+ -+static struct rb_node * -+rb_first(struct rb_root *root) -+{ -+ struct rb_root rloc; -+ struct rb_node *n; -+ struct rb_node nloc; -+ -+ readmem((ulong)root, KVADDR, &rloc, sizeof(struct rb_root), -+ "rb_root", FAULT_ON_ERROR); -+ -+ n = rloc.rb_node; -+ if (!n) -+ return NULL; -+ while (rb_left(n, &nloc)) -+ n = nloc.rb_left; -+ -+ return n; -+} -+ -+static struct rb_node * -+rb_parent(struct rb_node *node, struct rb_node *nloc) -+{ -+ readmem((ulong)node, KVADDR, nloc, sizeof(struct rb_node), -+ "rb_node", FAULT_ON_ERROR); -+ -+ return (struct rb_node *)(nloc->rb_parent_color & ~3); -+} -+ -+static struct rb_node * -+rb_right(struct rb_node *node, struct rb_node *nloc) -+{ -+ readmem((ulong)node, KVADDR, nloc, sizeof(struct rb_node), -+ "rb_node", FAULT_ON_ERROR); -+ -+ return nloc->rb_right; -+} -+ -+static struct rb_node * -+rb_left(struct rb_node *node, struct rb_node *nloc) -+{ -+ readmem((ulong)node, KVADDR, nloc, sizeof(struct rb_node), -+ "rb_node", FAULT_ON_ERROR); -+ -+ return nloc->rb_left; -+} -+ -+static struct rb_node * -+rb_next(struct rb_node *node) -+{ -+ struct rb_node nloc; -+ struct rb_node *parent; -+ -+ parent = rb_parent(node, &nloc); -+ -+ if (parent == node) -+ return NULL; -+ -+ if (nloc.rb_right) { -+ node = nloc.rb_right; -+ while (rb_left(node, &nloc)) -+ node = nloc.rb_left; -+ return node; -+ } -+ -+ while ((parent = rb_parent(node, &nloc)) && (node == rb_right(parent, &nloc))) -+ node = parent; -+ -+ return parent; -+} -+ -+static void -+dump_CFS_runqueues(void) -+{ -+ int cpu; -+ ulong runq; -+ char *runqbuf; -+ ulong leftmost, tasks_timeline; -+ struct task_context *tc; -+ long nr_running, cfs_rq_nr_running; -+ struct rb_root *root; -+ struct rb_node *node; -+ -+ if (INVALID_MEMBER(rq_rt)) { -+ MEMBER_OFFSET_INIT(rq_rt, "rq", "rt"); -+ MEMBER_OFFSET_INIT(rq_nr_running, "rq", "nr_running"); -+ MEMBER_OFFSET_INIT(task_struct_se, "task_struct", "se"); -+ MEMBER_OFFSET_INIT(sched_entity_run_node, "sched_entity", -+ "run_node"); -+ MEMBER_OFFSET_INIT(cfs_rq_rb_leftmost, "cfs_rq", "rb_leftmost"); -+ MEMBER_OFFSET_INIT(cfs_rq_nr_running, "cfs_rq", "nr_running"); -+ MEMBER_OFFSET_INIT(cfs_rq_tasks_timeline, "cfs_rq", -+ "tasks_timeline"); -+ MEMBER_OFFSET_INIT(rt_rq_active, "rt_rq", "active"); -+ MEMBER_OFFSET_INIT(task_struct_run_list, "task_struct", -+ "run_list"); -+ } -+ -+ if (!symbol_exists("per_cpu__runqueues")) -+ error(FATAL, "per_cpu__runqueues does not exist\n"); -+ -+ runq = symbol_value("per_cpu__runqueues"); -+ -+ runqbuf = GETBUF(SIZE(runqueue)); -+ -+ for (cpu = 0; cpu < kt->cpus; cpu++) { -+ if ((kt->flags & SMP) && (kt->flags & PER_CPU_OFF)) { -+ runq = symbol_value("per_cpu__runqueues") + -+ kt->__per_cpu_offset[cpu]; -+ } else -+ runq = symbol_value("per_cpu__runqueues"); -+ -+ fprintf(fp, "RUNQUEUES[%d]: %lx\n", cpu, runq); -+ -+ readmem(runq, KVADDR, runqbuf, SIZE(runqueue), -+ "per-cpu rq", FAULT_ON_ERROR); -+ leftmost = ULONG(runqbuf + OFFSET(rq_cfs) + -+ OFFSET(cfs_rq_rb_leftmost)); -+ tasks_timeline = ULONG(runqbuf + OFFSET(rq_cfs) + -+ OFFSET(cfs_rq_tasks_timeline)); -+ nr_running = LONG(runqbuf + OFFSET(rq_nr_running)); -+ cfs_rq_nr_running = ULONG(runqbuf + OFFSET(rq_cfs) + -+ OFFSET(cfs_rq_nr_running)); -+ -+ dump_RT_prio_array(nr_running != cfs_rq_nr_running, -+ runq + OFFSET(rq_rt) + OFFSET(rt_rq_active), -+ &runqbuf[OFFSET(rq_rt) + OFFSET(rt_rq_active)]); -+ -+ root = (struct rb_root *)(runq + OFFSET(rq_cfs) + OFFSET(cfs_rq_tasks_timeline)); -+ fprintf(fp, " CFS RB_ROOT: %lx\n", (ulong)root); -+ -+ if (!leftmost) -+ continue; -+ -+ for (node = rb_first(root); node; node = rb_next(node)) { -+ tc = task_to_context((ulong)node - OFFSET(task_struct_se) - -+ OFFSET(sched_entity_run_node)); -+ if (!tc) -+ continue; -+ INDENT(2); -+ print_task_header(fp, tc, FALSE); -+ } -+ } -+} -+ -+static void -+dump_RT_prio_array(int active, ulong k_prio_array, char *u_prio_array) -+{ -+ int i, c, cnt, qheads; -+ ulong offset, kvaddr, uvaddr; -+ ulong list_head[2]; -+ struct list_data list_data, *ld; -+ struct task_context *tc; -+ ulong *tlist; -+ -+ fprintf(fp, " RT PRIO_ARRAY: %lx\n", k_prio_array); -+ -+ if (!active) -+ return; -+ -+ qheads = (i = ARRAY_LENGTH(prio_array_queue)) ? -+ i : get_array_length("prio_array.queue", NULL, SIZE(list_head)); -+ -+ ld = &list_data; -+ -+ for (i = 0; i < qheads; i++) { -+ offset = OFFSET(prio_array_queue) + (i * SIZE(list_head)); -+ kvaddr = k_prio_array + offset; -+ uvaddr = (ulong)u_prio_array + offset; -+ BCOPY((char *)uvaddr, (char *)&list_head[0], sizeof(ulong)*2); -+ -+ if (CRASHDEBUG(1)) -+ fprintf(fp, "prio_array[%d] @ %lx => %lx/%lx\n", -+ i, kvaddr, list_head[0], list_head[1]); -+ -+ if ((list_head[0] == kvaddr) && (list_head[1] == kvaddr)) -+ continue; -+ -+ fprintf(fp, " [%3d] ", i); -+ -+ BZERO(ld, sizeof(struct list_data)); -+ ld->start = list_head[0]; -+ ld->list_head_offset = OFFSET(task_struct_run_list); -+ ld->end = kvaddr; -+ hq_open(); -+ cnt = do_list(ld); -+ hq_close(); -+ tlist = (ulong *)GETBUF((cnt) * sizeof(ulong)); -+ cnt = retrieve_list(tlist, cnt); -+ for (c = 0; c < cnt; c++) { -+ if (!(tc = task_to_context(tlist[c]))) -+ continue; -+ if (c) -+ INDENT(8); -+ print_task_header(fp, tc, FALSE); -+ } -+ FREEBUF(tlist); -+ } -+} -+ - #undef _NSIG - #define _NSIG 64 - #define _NSIG_BPW machdep->bits - #define _NSIG_WORDS (_NSIG / _NSIG_BPW) - -+#undef SIGRTMIN -+#define SIGRTMIN 32 -+ - static struct signame { - char *name; - char *altname; -@@ -5209,23 +6279,56 @@ - /* 28 */ {"SIGWINCH", NULL}, - /* 29 */ {"SIGIO", "SIGPOLL"}, - /* 30 */ {"SIGPWR", NULL}, -- /* 31 */ {"SIGSYS", NULL}, -+ /* 31 */ {"SIGSYS", "SIGUNUSED"}, - {NULL, NULL}, /* Real time signals start here. */ - }; - -+static int -+sigrt_minmax(int *min, int *max) -+{ -+ int sigrtmax, j; -+ -+ sigrtmax = THIS_KERNEL_VERSION < LINUX(2,5,0) ? -+ _NSIG - 1 : _NSIG; -+ -+ if (min && max) { -+ j = sigrtmax-SIGRTMIN-1; -+ *max = j / 2; -+ *min = j - *max; -+ } -+ -+ return sigrtmax; -+} -+ - static void - signame_list(void) - { -- int i; -+ int i, sigrtmax, j, min, max; - -- for (i = 0; i < _NSIG; i++) { -- if (!signame[i].name) -- continue; -+ sigrtmax = sigrt_minmax(&min, &max); -+ j = 1; - -- fprintf(fp, "%s[%d] %s", i < 10 ? " " : "", -- i, signame[i].name); -- if (signame[i].altname) -- fprintf(fp, "/%s", signame[i].altname); -+ for (i = 1; i <= sigrtmax; i++) { -+ if ((i == SIGRTMIN) || (i == sigrtmax)) { -+ fprintf(fp, "[%d] %s", i, -+ (i== SIGRTMIN) ? "SIGRTMIN" : "SIGRTMAX"); -+ } else if (i > SIGRTMIN) { -+ if (j <= min){ -+ fprintf(fp, "[%d] %s%d", i , "SIGRTMIN+", j); -+ j++; -+ } else if (max >= 1) { -+ fprintf(fp, "[%d] %s%d", i , "SIGRTMAX-",max); -+ max--; -+ } -+ } else { -+ if (!signame[i].name) -+ continue; -+ -+ fprintf(fp, "%s[%d] %s", i < 10 ? " " : "", -+ i, signame[i].name); -+ if (signame[i].altname) -+ fprintf(fp, "/%s", signame[i].altname); -+ } - fprintf(fp, "\n"); - } - } -@@ -5236,8 +6339,7 @@ - static void - translate_sigset(ulonglong sigset) - { -- int i, c, bit, len; -- ulonglong mask, sig; -+ int sigrtmax, min, max, i, j, c, len; - char buf[BUFSIZE]; - - if (!sigset) { -@@ -5246,21 +6348,42 @@ - } - - len = 0; -+ sigrtmax= sigrt_minmax(&min, &max); -+ j = 1; -+ -+ for (i = 1, c = 0; i <= sigrtmax; i++) { -+ if (sigset & (ulonglong)1) { -+ if (i == SIGRTMIN || i == sigrtmax) -+ sprintf(buf, "%s%s", c++ ? " " : "", -+ (i==SIGRTMIN) ? "SIGRTMIN" : "SIGRTMAX"); -+ else if (i > SIGRTMIN) { -+ if (j <= min) -+ sprintf(buf, "%s%s%d", -+ c++ ? " " : "", "SIGRTMIN+", j); -+ else if (max >= 1) -+ sprintf(buf, "%s%s%d", -+ c++ ? " " : "", "SIGRTMAX-", max); -+ } else -+ sprintf(buf, "%s%s", c++ ? " " : "", -+ signame[i].name); - -- for (i = c = 0; i < (_NSIG/2); i++) { -- mask = (ulong)(1) << i; -- if ((sig = (sigset & mask))) { -- bit = ffs((int)sig); -- sprintf(buf, "%s%s", c++ ? " " : "", -- signame[bit].name); - if ((len + strlen(buf)) > 80) { - shift_string_left(buf, 1); - fprintf(fp, "\n"); - len = 0; - } -+ - len += strlen(buf); - fprintf(fp, buf); - } -+ -+ sigset >>= 1; -+ if (i > SIGRTMIN) { -+ if (j <= min) -+ j++; -+ else if (max >= 1) -+ max--; -+ } - } - fprintf(fp, "\n"); - } -@@ -5290,13 +6413,14 @@ - struct task_context *tc; - ulong *tasklist; - char *siglist; -+ int thread_group = FALSE; - - tasklist = (ulong *)GETBUF((MAXARGS+NR_CPUS)*sizeof(ulong)); - ref = (struct reference *)GETBUF(sizeof(struct reference)); - siglist = GETBUF(BUFSIZE); - ref->str = siglist; - -- while ((c = getopt(argcnt, args, "lR:s:")) != EOF) { -+ while ((c = getopt(argcnt, args, "lR:s:g")) != EOF) { - switch(c) - { - case 's': -@@ -5314,6 +6438,10 @@ - signame_list(); - return; - -+ case 'g': -+ pc->curcmd_flags |= TASK_SPECIFIED; -+ thread_group = TRUE; -+ break; - default: - argerrs++; - break; -@@ -5360,12 +6488,67 @@ - tasklist[tcnt++] = CURRENT_TASK(); - - for (c = 0; c < tcnt; c++) { -- do_sig(tasklist[c], 0, strlen(ref->str) ? ref : NULL); -- fprintf(fp, "\n"); -+ if (thread_group) -+ do_sig_thread_group(tasklist[c]); -+ else { -+ do_sig(tasklist[c], 0, strlen(ref->str) ? ref : NULL); -+ fprintf(fp, "\n"); -+ } - } - - } - -+ -+/* -+ * Do the work for the "sig -g" command option, coming from sig or foreach. -+ */ -+static void -+do_sig_thread_group(ulong task) -+{ -+ int i; -+ int cnt; -+ struct task_context *tc; -+ ulong tgid; -+ -+ tc = task_to_context(task); -+ tgid = task_tgid(task); -+ -+ if (tc->pid != tgid) { -+ if (pc->curcmd_flags & TASK_SPECIFIED) { -+ if (!(tc = tgid_to_context(tgid))) -+ return; -+ task = tc->task; -+ } else -+ return; -+ } -+ -+ if ((tc->pid == 0) && (pc->curcmd_flags & IDLE_TASK_SHOWN)) -+ return; -+ -+ print_task_header(fp, tc, 0); -+ dump_signal_data(tc, THREAD_GROUP_LEVEL); -+ fprintf(fp, "\n "); -+ print_task_header(fp, tc, 0); -+ dump_signal_data(tc, TASK_LEVEL|TASK_INDENT); -+ -+ tc = FIRST_CONTEXT(); -+ for (i = cnt = 0; i < RUNNING_TASKS(); i++, tc++) { -+ if (tc->task == task) -+ continue; -+ -+ if (task_tgid(tc->task) == tgid) { -+ fprintf(fp, "\n "); -+ print_task_header(fp, tc, 0); -+ dump_signal_data(tc, TASK_LEVEL|TASK_INDENT); -+ cnt++; -+ if (tc->pid == 0) -+ pc->curcmd_flags |= IDLE_TASK_SHOWN; -+ } -+ } -+ -+ fprintf(fp, "\n"); -+} -+ - /* - * Do the work for the sig command, coming from sig or foreach. - */ -@@ -5381,7 +6564,7 @@ - else { - if (!(flags & FOREACH_TASK)) - print_task_header(fp, tc, 0); -- dump_signal_data(tc); -+ dump_signal_data(tc, TASK_LEVEL|THREAD_GROUP_LEVEL); - } - } - -@@ -5401,40 +6584,34 @@ - * Dump all signal-handling data for a task. - */ - static void --dump_signal_data(struct task_context *tc) -+dump_signal_data(struct task_context *tc, ulong flags) - { -- int i, others, use_sighand; -- int translate, sig, sigpending; -+ int i, sigrtmax, others, use_sighand; -+ int translate, sigpending; - uint ti_flags; - ulonglong sigset, blocked, mask; -- ulong signal_struct, kaddr, handler, flags, sigqueue, next; -+ ulong signal_struct, kaddr, handler, sa_flags, sigqueue; - ulong sighand_struct; - long size; - char *signal_buf, *uaddr; -+ ulong shared_pending, signal; - char buf1[BUFSIZE]; - char buf2[BUFSIZE]; - char buf3[BUFSIZE]; - char buf4[BUFSIZE]; - -- sigset = task_signal(tc->task); -+ if (VALID_STRUCT(sigqueue) && !VALID_MEMBER(sigqueue_next)) { -+ MEMBER_OFFSET_INIT(sigqueue_next, "sigqueue", "next"); -+ MEMBER_OFFSET_INIT(sigqueue_list, "sigqueue", "list"); -+ MEMBER_OFFSET_INIT(sigqueue_info, "sigqueue", "info"); -+ } else if (!VALID_MEMBER(signal_queue_next)) { -+ MEMBER_OFFSET_INIT(signal_queue_next, "signal_queue", "next"); -+ MEMBER_OFFSET_INIT(signal_queue_info, "signal_queue", "info"); -+ } -+ -+ sigset = task_signal(tc->task, 0); - if (!tt->last_task_read) - return; -- blocked = task_blocked(tc->task); -- -- if (VALID_MEMBER(task_struct_sigpending)) -- sigpending = INT(tt->task_struct + -- OFFSET(task_struct_sigpending)); -- else if (VALID_MEMBER(thread_info_flags)) { -- fill_thread_info(tc->thread_info); -- ti_flags = UINT(tt->thread_info + OFFSET(thread_info_flags)); -- sigpending = ti_flags & (1<task_struct + -@@ -5443,143 +6620,259 @@ - signal_struct = ULONG(tt->task_struct + - OFFSET(task_struct_signal)); - -- fprintf(fp, "SIGNAL_STRUCT: %lx ", signal_struct); -- - size = MAX(SIZE(signal_struct), VALID_SIZE(signal_queue) ? - SIZE(signal_queue) : SIZE(sigqueue)); - if (VALID_SIZE(sighand_struct)) - size = MAX(size, SIZE(sighand_struct)); - signal_buf = GETBUF(size); - -- readmem(signal_struct, KVADDR, signal_buf, -- SIZE(signal_struct), "signal_struct buffer", -- FAULT_ON_ERROR); -- fprintf(fp, "COUNT: %d\n", -- INT(signal_buf + OFFSET(signal_struct_count))); -- -- fprintf(fp, " SIG %s %s %s %s\n", -- mkstring(buf1, VADDR_PRLEN == 8 ? 9 : VADDR_PRLEN, -- CENTER, "SIGACTION"), -+ if (signal_struct) -+ readmem(signal_struct, KVADDR, signal_buf, -+ SIZE(signal_struct), "signal_struct buffer", -+ FAULT_ON_ERROR); -+ -+ /* -+ * Signal dispositions (thread group level). -+ */ -+ if (flags & THREAD_GROUP_LEVEL) { -+ if (flags & TASK_INDENT) -+ INDENT(2); -+ fprintf(fp, "SIGNAL_STRUCT: %lx ", signal_struct); -+ if (!signal_struct) { -+ fprintf(fp, "\n"); -+ return; -+ } -+ fprintf(fp, "COUNT: %d\n", -+ INT(signal_buf + OFFSET(signal_struct_count))); -+ -+ if (flags & TASK_INDENT) -+ INDENT(2); -+ fprintf(fp, " SIG %s %s %s %s\n", -+ mkstring(buf1, VADDR_PRLEN == 8 ? 9 : VADDR_PRLEN, -+ CENTER, "SIGACTION"), - mkstring(buf2, UVADDR_PRLEN, RJUST, "HANDLER"), - mkstring(buf3, 16, CENTER, "MASK"), - mkstring(buf4, VADDR_PRLEN, LJUST, "FLAGS")); - -- if (VALID_MEMBER(task_struct_sighand)) { -- sighand_struct = ULONG(tt->task_struct + -- OFFSET(task_struct_sighand)); -- readmem(sighand_struct, KVADDR, signal_buf, -- SIZE(sighand_struct), "sighand_struct buffer", -- FAULT_ON_ERROR); -- use_sighand = TRUE; -- } else -- use_sighand = FALSE; -- -- for (i = 1; i < _NSIG; i++) { -- fprintf(fp, "%s[%d] ", i < 10 ? " " : "", i); -- -- if (use_sighand) { -- kaddr = sighand_struct + OFFSET(sighand_struct_action) + -- ((i-1) * SIZE(k_sigaction)); -- uaddr = signal_buf + OFFSET(sighand_struct_action) + -- ((i-1) * SIZE(k_sigaction)); -- } else { -- kaddr = signal_struct + OFFSET(signal_struct_action) + -- ((i-1) * SIZE(k_sigaction)); -- uaddr = signal_buf + OFFSET(signal_struct_action) + -- ((i-1) * SIZE(k_sigaction)); -- } -+ if (VALID_MEMBER(task_struct_sighand)) { -+ sighand_struct = ULONG(tt->task_struct + -+ OFFSET(task_struct_sighand)); -+ readmem(sighand_struct, KVADDR, signal_buf, -+ SIZE(sighand_struct), "sighand_struct buffer", -+ FAULT_ON_ERROR); -+ use_sighand = TRUE; -+ } else -+ use_sighand = FALSE; - -- handler = ULONG(uaddr + OFFSET(sigaction_sa_handler)); -- switch ((long)handler) -- { -- case -1: -- mkstring(buf1, UVADDR_PRLEN, RJUST, "SIG_ERR"); -- break; -- case 0: -- mkstring(buf1, UVADDR_PRLEN, RJUST, "SIG_DFL"); -- break; -- case 1: -- mkstring(buf1, UVADDR_PRLEN, RJUST, "SIG_IGN"); -- break; -- default: -- mkstring(buf1, UVADDR_PRLEN, RJUST|LONG_HEX, -- MKSTR(handler)); -- break; -- } -+ sigrtmax = sigrt_minmax(NULL, NULL); - -- mask = sigaction_mask((ulong)uaddr); -- flags = ULONG(uaddr + OFFSET(sigaction_sa_flags)); -+ for (i = 1; i <= sigrtmax; i++) { -+ if (flags & TASK_INDENT) -+ INDENT(2); - -- fprintf(fp, "%s%s %s %016llx %lx ", -- space(MINSPACE-1), -- mkstring(buf2,UVADDR_PRLEN,LJUST|LONG_HEX,MKSTR(kaddr)), -- buf1, -- mask, -- flags); -- -- if (flags) { -- others = 0; translate = 1; -- if (flags & SA_NOCLDSTOP) -- fprintf(fp, "%s%sSA_NOCLDSTOP", -- translate-- > 0 ? "(" : "", -- others++ ? "|" : ""); -+ fprintf(fp, "%s[%d] ", i < 10 ? " " : "", i); -+ -+ if (use_sighand) { -+ kaddr = sighand_struct + -+ OFFSET(sighand_struct_action) + -+ ((i-1) * SIZE(k_sigaction)); -+ uaddr = signal_buf + -+ OFFSET(sighand_struct_action) + -+ ((i-1) * SIZE(k_sigaction)); -+ } else { -+ kaddr = signal_struct + -+ OFFSET(signal_struct_action) + -+ ((i-1) * SIZE(k_sigaction)); -+ uaddr = signal_buf + -+ OFFSET(signal_struct_action) + -+ ((i-1) * SIZE(k_sigaction)); -+ } -+ -+ handler = ULONG(uaddr + OFFSET(sigaction_sa_handler)); -+ switch ((long)handler) -+ { -+ case -1: -+ mkstring(buf1, UVADDR_PRLEN, RJUST, "SIG_ERR"); -+ break; -+ case 0: -+ mkstring(buf1, UVADDR_PRLEN, RJUST, "SIG_DFL"); -+ break; -+ case 1: -+ mkstring(buf1, UVADDR_PRLEN, RJUST, "SIG_IGN"); -+ break; -+ default: -+ mkstring(buf1, UVADDR_PRLEN, RJUST|LONG_HEX, -+ MKSTR(handler)); -+ break; -+ } -+ -+ mask = sigaction_mask((ulong)uaddr); -+ sa_flags = ULONG(uaddr + OFFSET(sigaction_sa_flags)); -+ -+ fprintf(fp, "%s%s %s %016llx %lx ", -+ space(MINSPACE-1), -+ mkstring(buf2, -+ UVADDR_PRLEN,LJUST|LONG_HEX,MKSTR(kaddr)), -+ buf1, -+ mask, -+ sa_flags); -+ -+ if (sa_flags) { -+ others = 0; translate = 1; -+ if (sa_flags & SA_NOCLDSTOP) -+ fprintf(fp, "%s%sSA_NOCLDSTOP", -+ translate-- > 0 ? "(" : "", -+ others++ ? "|" : ""); - #ifdef SA_RESTORER -- if (flags & SA_RESTORER) -- fprintf(fp, "%s%sSA_RESTORER", -- translate-- > 0 ? "(" : "", -- others++ ? "|" : ""); -+ if (sa_flags & SA_RESTORER) -+ fprintf(fp, "%s%sSA_RESTORER", -+ translate-- > 0 ? "(" : "", -+ others++ ? "|" : ""); - #endif - #ifdef SA_NOCLDWAIT -- if (flags & SA_NOCLDWAIT) -- fprintf(fp, "%s%sSA_NOCLDWAIT", -- translate-- > 0 ? "(" : "", -- others++ ? "|" : ""); -+ if (sa_flags & SA_NOCLDWAIT) -+ fprintf(fp, "%s%sSA_NOCLDWAIT", -+ translate-- > 0 ? "(" : "", -+ others++ ? "|" : ""); - #endif -- if (flags & SA_SIGINFO) -- fprintf(fp, "%s%sSA_SIGINFO", -- translate-- > 0 ? "(" : "", -- others++ ? "|" : ""); -- if (flags & SA_ONSTACK) -- fprintf(fp, "%s%sSA_ONSTACK", -- translate-- > 0 ? "(" : "", -- others++ ? "|" : ""); -- if (flags & SA_RESTART) -- fprintf(fp, "%s%sSA_RESTART", -- translate-- > 0 ? "(" : "", -- others++ ? "|" : ""); -- if (flags & SA_NODEFER) -- fprintf(fp, "%s%sSA_NODEFER", -- translate-- > 0 ? "(" : "", -- others++ ? "|" : ""); -- if (flags & SA_RESETHAND) -- fprintf(fp, "%s%sSA_RESETHAND", -- translate-- > 0 ? "(" : "", -- others++ ? "|" : ""); -- if (translate < 1) -- fprintf(fp, ")"); -- } -- -- fprintf(fp, "\n"); -- } -- -- if (VALID_MEMBER(task_struct_sigqueue)) -- sigqueue = ULONG(tt->task_struct + -- OFFSET(task_struct_sigqueue)); -- -- else if (VALID_MEMBER(task_struct_pending)) -- sigqueue = ULONG(tt->task_struct + -- OFFSET(task_struct_pending) + -- OFFSET_OPTION(sigpending_head, sigpending_list)); -- -- if (VALID_MEMBER(sigqueue_list) && empty_list(sigqueue)) -- sigqueue = 0; -- -- if (sigqueue) -- fprintf(fp, "SIGQUEUE: SIG %s\n", -- mkstring(buf1, VADDR_PRLEN, CENTER|LJUST, "SIGINFO")); -- else -- fprintf(fp, "SIGQUEUE: (empty)\n"); -+ if (sa_flags & SA_SIGINFO) -+ fprintf(fp, "%s%sSA_SIGINFO", -+ translate-- > 0 ? "(" : "", -+ others++ ? "|" : ""); -+ if (sa_flags & SA_ONSTACK) -+ fprintf(fp, "%s%sSA_ONSTACK", -+ translate-- > 0 ? "(" : "", -+ others++ ? "|" : ""); -+ if (sa_flags & SA_RESTART) -+ fprintf(fp, "%s%sSA_RESTART", -+ translate-- > 0 ? "(" : "", -+ others++ ? "|" : ""); -+ if (sa_flags & SA_NODEFER) -+ fprintf(fp, "%s%sSA_NODEFER", -+ translate-- > 0 ? "(" : "", -+ others++ ? "|" : ""); -+ if (sa_flags & SA_RESETHAND) -+ fprintf(fp, "%s%sSA_RESETHAND", -+ translate-- > 0 ? "(" : "", -+ others++ ? "|" : ""); -+ if (translate < 1) -+ fprintf(fp, ")"); -+ } -+ -+ fprintf(fp, "\n"); -+ } -+ } -+ -+ if (flags & TASK_LEVEL) { -+ /* -+ * Pending signals (task level). -+ */ -+ if (VALID_MEMBER(task_struct_sigpending)) -+ sigpending = INT(tt->task_struct + -+ OFFSET(task_struct_sigpending)); -+ else if (VALID_MEMBER(thread_info_flags)) { -+ fill_thread_info(tc->thread_info); -+ ti_flags = UINT(tt->thread_info + OFFSET(thread_info_flags)); -+ sigpending = ti_flags & (1<task); -+ if (flags & TASK_INDENT) -+ INDENT(2); -+ fprintf(fp, " BLOCKED: %016llx\n", blocked); -+ -+ /* -+ * Pending queue (task level). -+ */ -+ -+ if (flags & TASK_INDENT) -+ INDENT(2); -+ if (VALID_MEMBER(signal_struct_shared_pending)) { -+ fprintf(fp, "PRIVATE_PENDING\n"); -+ if (flags & TASK_INDENT) -+ INDENT(2); -+ } -+ fprintf(fp, " SIGNAL: %016llx\n", sigset); -+ -+ if (VALID_MEMBER(task_struct_sigqueue)) -+ sigqueue = ULONG(tt->task_struct + -+ OFFSET(task_struct_sigqueue)); -+ -+ else if (VALID_MEMBER(task_struct_pending)) -+ sigqueue = ULONG(tt->task_struct + -+ OFFSET(task_struct_pending) + -+ OFFSET_OPTION(sigpending_head, -+ sigpending_list)); -+ -+ if (VALID_MEMBER(sigqueue_list) && empty_list(sigqueue)) -+ sigqueue = 0; -+ -+ if (flags & TASK_INDENT) -+ INDENT(2); -+ if (sigqueue) { -+ fprintf(fp, " SIGQUEUE: SIG %s\n", -+ mkstring(buf1, VADDR_PRLEN, CENTER|LJUST, "SIGINFO")); -+ sigqueue_list(sigqueue); -+ } else -+ fprintf(fp, " SIGQUEUE: (empty)\n"); -+ } -+ -+ /* -+ * Pending queue (thread group level). -+ */ -+ if ((flags & THREAD_GROUP_LEVEL) && -+ VALID_MEMBER(signal_struct_shared_pending)) { - -+ fprintf(fp, "SHARED_PENDING\n"); -+ shared_pending = signal_struct + OFFSET(signal_struct_shared_pending); -+ signal = shared_pending + OFFSET(sigpending_signal); -+ readmem(signal, KVADDR, signal_buf,SIZE(sigpending_signal), -+ "signal", FAULT_ON_ERROR); -+ sigset = task_signal(0, (ulong*)signal_buf); -+ if (flags & TASK_INDENT) -+ INDENT(2); -+ fprintf(fp, " SIGNAL: %016llx\n", sigset); -+ sigqueue = (shared_pending + -+ OFFSET_OPTION(sigpending_head, sigpending_list) + -+ OFFSET(list_head_next)); -+ readmem(sigqueue,KVADDR, signal_buf, -+ SIZE(sigqueue), "sigqueue", FAULT_ON_ERROR); -+ sigqueue = ULONG(signal_buf); -+ -+ if (VALID_MEMBER(sigqueue_list) && empty_list(sigqueue)) -+ sigqueue = 0; -+ if (flags & TASK_INDENT) -+ INDENT(2); -+ if (sigqueue) { -+ fprintf(fp, " SIGQUEUE: SIG %s\n", -+ mkstring(buf1, VADDR_PRLEN, CENTER|LJUST, "SIGINFO")); -+ sigqueue_list(sigqueue); -+ } else -+ fprintf(fp, " SIGQUEUE: (empty)\n"); -+ } -+ FREEBUF(signal_buf); -+} -+ -+/* -+ * Dump a pending signal queue (private/shared). -+ */ -+ -+static void sigqueue_list(ulong sigqueue) { -+ ulong sigqueue_save, next; -+ int sig; -+ char *signal_buf; -+ long size; -+ size = VALID_SIZE(signal_queue) ? SIZE(signal_queue) : SIZE(sigqueue); -+ signal_buf = GETBUF(size); -+ -+ sigqueue_save = sigqueue; - while (sigqueue) { - readmem(sigqueue, KVADDR, signal_buf, - SIZE_OPTION(signal_queue, sigqueue), -@@ -5597,14 +6890,17 @@ - OFFSET(siginfo_si_signo)); - } - -- fprintf(fp, " %3d %lx\n", -+ if (sigqueue_save == next) -+ break; -+ -+ fprintf(fp, " %3d %lx\n", - sig, sigqueue + - OFFSET_OPTION(signal_queue_info, sigqueue_info)); - - sigqueue = next; - } -- - FREEBUF(signal_buf); -+ - } - - /* -@@ -5614,12 +6910,13 @@ - */ - - static ulonglong --task_signal(ulong task) -+task_signal(ulong task, ulong *signal) - { - ulonglong sigset; - ulong *sigset_ptr; - -- fill_task_struct(task); -+ if (task) { -+ fill_task_struct(task); - - if (!tt->last_task_read) - return 0; -@@ -5633,6 +6930,10 @@ - OFFSET(task_struct_signal)); - } else - return 0; -+ } else if (signal) { -+ sigset_ptr = signal; -+ } else -+ return 0; - - switch (_NSIG_WORDS) - { ---- crash/kernel.c.orig 2007-08-27 15:02:36.000000000 -0400 -+++ crash/kernel.c 2007-07-31 16:05:45.000000000 -0400 -@@ -1,8 +1,8 @@ - /* kernel.c - core analysis suite - * - * Copyright (C) 1999, 2000, 2001, 2002 Mission Critical Linux, Inc. -- * Copyright (C) 2002, 2003, 2004, 2005 David Anderson -- * Copyright (C) 2002, 2003, 2004, 2005 Red Hat, Inc. All rights reserved. -+ * Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007 David Anderson -+ * Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007 Red Hat, Inc. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by -@@ -16,11 +16,12 @@ - */ - - #include "defs.h" -+#include "xen_hyper_defs.h" - #include - - static void do_module_cmd(ulong, char *, ulong, char *, char *); - static char *find_module_objfile(char *, char *, char *); --static char *get_uptime(char *); -+static char *module_objfile_search(char *, char *, char *); - static char *get_loadavg(char *); - static void get_lkcd_regs(struct bt_info *, ulong *, ulong *); - static void dump_sys_call_table(char *, int); -@@ -42,328 +43,435 @@ - static void verify_namelist(void); - static char *debug_kernel_version(char *); - static int restore_stack(struct bt_info *); -+static ulong __xen_m2p(ulonglong, ulong); -+static int search_mapping_page(ulong, ulong *, ulong *, ulong *); -+static void read_in_kernel_config_err(int, char *); -+static void BUG_bytes_init(void); -+static int BUG_x86(void); -+static int BUG_x86_64(void); - - - /* - * Gather a few kernel basics. - */ - void --kernel_init(int when) -+kernel_init() - { -- int i; -- char *p1, *p2, buf[BUFSIZE];; -+ int i, c; -+ char *p1, *p2, buf[BUFSIZE]; - struct syment *sp1, *sp2; -+ char *rqstruct; -+ char *irq_desc_type_name; - - if (pc->flags & KERNEL_DEBUG_QUERY) - return; - -- switch (when) -- { -- case PRE_GDB: -- kt->stext = symbol_value("_stext"); -- kt->etext = symbol_value("_etext"); -- get_text_init_space(); -- if (symbol_exists("__init_begin")) { -- kt->init_begin = symbol_value("__init_begin"); -- kt->init_end = symbol_value("__init_end"); -- } -- kt->end = symbol_value("_end"); -+ kt->stext = symbol_value("_stext"); -+ kt->etext = symbol_value("_etext"); -+ get_text_init_space(); -+ if (symbol_exists("__init_begin")) { -+ kt->init_begin = symbol_value("__init_begin"); -+ kt->init_end = symbol_value("__init_end"); -+ } -+ kt->end = symbol_value("_end"); - -- if (symbol_exists("smp_num_cpus")) { -- kt->flags |= SMP; -- get_symbol_data("smp_num_cpus", sizeof(int), &kt->cpus); -- if (kt->cpus < 1 || kt->cpus > NR_CPUS) -- error(WARNING, -- "invalid value: smp_num_cpus: %d\n", -- kt->cpus); -- } else if (symbol_exists("__per_cpu_offset")) { -- kt->flags |= SMP; -- kt->cpus = 1; -- } else -- kt->cpus = 1; -- -- if ((sp1 = symbol_search("__per_cpu_start")) && -- (sp2 = symbol_search("__per_cpu_end")) && -- (sp1->type == 'A') && (sp2->type == 'A') && -- (sp2->value > sp1->value)) -- kt->flags |= SMP|PER_CPU_OFF; -+ /* -+ * For the Xen architecture, default to writable page tables unless: -+ * -+ * (1) it's an "xm save" CANONICAL_PAGE_TABLES dumpfile, or -+ * (2) the --shadow_page_tables option was explicitly entered. -+ * -+ * But if the "phys_to_maching_mapping" array does not exist, and -+ * it's not an "xm save" canonical dumpfile, then we have no choice -+ * but to presume shadow page tables. -+ */ -+ if (symbol_exists("xen_start_info")) { -+ kt->flags |= ARCH_XEN; -+ if (!(kt->xen_flags & (SHADOW_PAGE_TABLES|CANONICAL_PAGE_TABLES))) -+ kt->xen_flags |= WRITABLE_PAGE_TABLES; -+ if (symbol_exists("phys_to_machine_mapping")) -+ get_symbol_data("phys_to_machine_mapping", sizeof(ulong), -+ &kt->phys_to_machine_mapping); -+ else if (!(kt->xen_flags & CANONICAL_PAGE_TABLES)) { -+ kt->xen_flags &= ~WRITABLE_PAGE_TABLES; -+ kt->xen_flags |= SHADOW_PAGE_TABLES; -+ } -+ if (machine_type("X86")) -+ get_symbol_data("max_pfn", sizeof(ulong), &kt->p2m_table_size); -+ if (machine_type("X86_64")) -+ get_symbol_data("end_pfn", sizeof(ulong), &kt->p2m_table_size); -+ if ((kt->m2p_page = (char *)malloc(PAGESIZE())) == NULL) -+ error(FATAL, "cannot malloc m2p page."); -+ } -+ -+ if (symbol_exists("smp_num_cpus")) { -+ kt->flags |= SMP; -+ get_symbol_data("smp_num_cpus", sizeof(int), &kt->cpus); -+ if (kt->cpus < 1 || kt->cpus > NR_CPUS) -+ error(WARNING, -+ "invalid value: smp_num_cpus: %d\n", -+ kt->cpus); -+ } else if (symbol_exists("__per_cpu_offset")) { -+ kt->flags |= SMP; -+ kt->cpus = 1; -+ } else -+ kt->cpus = 1; -+ -+ if ((sp1 = symbol_search("__per_cpu_start")) && -+ (sp2 = symbol_search("__per_cpu_end")) && -+ (sp1->type == 'A' || sp1->type == 'D') && -+ (sp2->type == 'A' || sp2->type == 'D') && -+ (sp2->value > sp1->value)) -+ kt->flags |= SMP|PER_CPU_OFF; - -- get_symbol_data("xtime", sizeof(struct timespec), &kt->date); -+ get_symbol_data("xtime", sizeof(struct timespec), &kt->date); - -- if (pc->flags & GET_TIMESTAMP) { -- fprintf(fp, "%s\n\n", -- strip_linefeeds(ctime(&kt->date.tv_sec))); -- clean_exit(0); -- } -+ if (pc->flags & GET_TIMESTAMP) { -+ fprintf(fp, "%s\n\n", -+ strip_linefeeds(ctime(&kt->date.tv_sec))); -+ clean_exit(0); -+ } - -- readmem(symbol_value("system_utsname"), KVADDR, &kt->utsname, -- sizeof(struct new_utsname), "system_utsname", -- FAULT_ON_ERROR); -- strncpy(buf, kt->utsname.release, MIN(strlen(kt->utsname.release), 65)); -- if (ascii_string(kt->utsname.release)) { -- p1 = p2 = buf; -- while (*p2 != '.') -- p2++; -- *p2 = NULLCHAR; -- kt->kernel_version[0] = atoi(p1); -- p1 = ++p2; -- while (*p2 != '.') -- p2++; -- *p2 = NULLCHAR; -- kt->kernel_version[1] = atoi(p1); -- p1 = ++p2; -- while ((*p2 >= '0') && (*p2 <= '9')) -- p2++; -- *p2 = NULLCHAR; -- kt->kernel_version[2] = atoi(p1); -- } -- break; -+ if (symbol_exists("system_utsname")) -+ readmem(symbol_value("system_utsname"), KVADDR, &kt->utsname, -+ sizeof(struct new_utsname), "system_utsname", -+ RETURN_ON_ERROR); -+ else if (symbol_exists("init_uts_ns")) -+ readmem(symbol_value("init_uts_ns") + sizeof(int), -+ KVADDR, &kt->utsname, sizeof(struct new_utsname), -+ "init_uts_ns", RETURN_ON_ERROR); -+ else -+ error(INFO, "cannot access utsname information\n\n"); - -- case POST_GDB: -- if (symbol_exists("__per_cpu_offset")) { -+ strncpy(buf, kt->utsname.release, MIN(strlen(kt->utsname.release), 65)); -+ if (ascii_string(kt->utsname.release)) { -+ p1 = p2 = buf; -+ while (*p2 != '.') -+ p2++; -+ *p2 = NULLCHAR; -+ kt->kernel_version[0] = atoi(p1); -+ p1 = ++p2; -+ while (*p2 != '.') -+ p2++; -+ *p2 = NULLCHAR; -+ kt->kernel_version[1] = atoi(p1); -+ p1 = ++p2; -+ while ((*p2 >= '0') && (*p2 <= '9')) -+ p2++; -+ *p2 = NULLCHAR; -+ kt->kernel_version[2] = atoi(p1); -+ } -+ -+ verify_version(); -+ -+ if (symbol_exists("__per_cpu_offset")) { -+ if (LKCD_KERNTYPES()) -+ i = get_cpus_possible(); -+ else - i = get_array_length("__per_cpu_offset", NULL, 0); -- get_symbol_data("__per_cpu_offset", -- sizeof(long)*(i <= NR_CPUS ? i : NR_CPUS), -- &kt->__per_cpu_offset[0]); -- kt->flags |= PER_CPU_OFF; -- } -- MEMBER_OFFSET_INIT(runqueue_cpu, "runqueue", "cpu"); -- if (VALID_MEMBER(runqueue_cpu)) { -- MEMBER_OFFSET_INIT(cpu_s_curr, "cpu_s", "curr"); -- MEMBER_OFFSET_INIT(cpu_s_idle, "cpu_s", "idle"); -- STRUCT_SIZE_INIT(cpu_s, "cpu_s"); -- kt->runq_siblings = get_array_length("runqueue.cpu", -- NULL, 0); -- if (symbol_exists("__cpu_idx") && -- symbol_exists("__rq_idx")) { -- if (!readmem(symbol_value("__cpu_idx"), KVADDR, -- &kt->__cpu_idx[0], sizeof(long) * NR_CPUS, -- "__cpu_idx[NR_CPUS]", RETURN_ON_ERROR)) -- error(INFO, -- "cannot read __cpu_idx[NR_CPUS] array\n"); -- if (!readmem(symbol_value("__rq_idx"), KVADDR, -- &kt->__rq_idx[0], sizeof(long) * NR_CPUS, -- "__rq_idx[NR_CPUS]", RETURN_ON_ERROR)) -- error(INFO, -- "cannot read __rq_idx[NR_CPUS] array\n"); -- } else if (kt->runq_siblings > 1) -- error(INFO, -- "runq_siblings: %d: __cpu_idx and __rq_idx arrays don't exist?\n", -- kt->runq_siblings); -- } else { -- MEMBER_OFFSET_INIT(runqueue_idle, "runqueue", "idle"); -- MEMBER_OFFSET_INIT(runqueue_curr, "runqueue", "curr"); -- } -- MEMBER_OFFSET_INIT(runqueue_active, "runqueue", "active"); -- MEMBER_OFFSET_INIT(runqueue_expired, "runqueue", "expired"); -- MEMBER_OFFSET_INIT(runqueue_arrays, "runqueue", "arrays"); -- MEMBER_OFFSET_INIT(prio_array_queue, "prio_array", "queue"); -- MEMBER_OFFSET_INIT(prio_array_nr_active, "prio_array", -- "nr_active"); -- STRUCT_SIZE_INIT(runqueue, "runqueue"); -- STRUCT_SIZE_INIT(prio_array, "prio_array"); -- -- /* -- * In 2.4, smp_send_stop() sets smp_num_cpus back to 1 -- * in some, but not all, architectures. So if a count -- * of 1 is found, be suspicious, and check the -- * init_tasks[NR_CPUS] array (also intro'd in 2.4), -- * for idle thread addresses. For 2.2, prepare for the -- * eventuality by verifying the cpu count with the machine -- * dependent count. -- */ -- if ((kt->flags & SMP) && DUMPFILE() && (kt->cpus == 1)) { -- if (symbol_exists("init_tasks")) { -- ulong init_tasks[NR_CPUS]; -- int nr_cpus; -- -- BZERO(&init_tasks[0], sizeof(ulong) * NR_CPUS); -- -- nr_cpus = get_array_length("init_tasks", -- NULL, 0); -- if ((nr_cpus < 1) || (nr_cpus > NR_CPUS)) -- nr_cpus = NR_CPUS; -- -- get_idle_threads(&init_tasks[0], nr_cpus); -- -- for (i = kt->cpus = 0; i < nr_cpus; i++) -- if (init_tasks[i]) -- kt->cpus++; -- } else -- kt->cpus = machdep->get_smp_cpus(); -- } -+ get_symbol_data("__per_cpu_offset", -+ sizeof(long)*((i && (i <= NR_CPUS)) ? i : NR_CPUS), -+ &kt->__per_cpu_offset[0]); -+ kt->flags |= PER_CPU_OFF; -+ } -+ if (STRUCT_EXISTS("runqueue")) -+ rqstruct = "runqueue"; -+ else if (STRUCT_EXISTS("rq")) -+ rqstruct = "rq"; - -- if ((kt->flags & SMP) && ACTIVE() && (kt->cpus == 1) && -- (kt->flags & PER_CPU_OFF)) -+ MEMBER_OFFSET_INIT(runqueue_cpu, rqstruct, "cpu"); -+ /* -+ * 'cpu' does not exist in 'struct rq'. -+ */ -+ if (VALID_MEMBER(runqueue_cpu) && -+ (get_array_length("runqueue.cpu", NULL, 0) > 0)) { -+ MEMBER_OFFSET_INIT(cpu_s_curr, "cpu_s", "curr"); -+ MEMBER_OFFSET_INIT(cpu_s_idle, "cpu_s", "idle"); -+ STRUCT_SIZE_INIT(cpu_s, "cpu_s"); -+ kt->runq_siblings = get_array_length("runqueue.cpu", -+ NULL, 0); -+ if (symbol_exists("__cpu_idx") && -+ symbol_exists("__rq_idx")) { -+ if (!readmem(symbol_value("__cpu_idx"), KVADDR, -+ &kt->__cpu_idx[0], sizeof(long) * NR_CPUS, -+ "__cpu_idx[NR_CPUS]", RETURN_ON_ERROR)) -+ error(INFO, -+ "cannot read __cpu_idx[NR_CPUS] array\n"); -+ if (!readmem(symbol_value("__rq_idx"), KVADDR, -+ &kt->__rq_idx[0], sizeof(long) * NR_CPUS, -+ "__rq_idx[NR_CPUS]", RETURN_ON_ERROR)) -+ error(INFO, -+ "cannot read __rq_idx[NR_CPUS] array\n"); -+ } else if (kt->runq_siblings > 1) -+ error(INFO, -+ "runq_siblings: %d: __cpu_idx and __rq_idx arrays don't exist?\n", -+ kt->runq_siblings); -+ } else { -+ MEMBER_OFFSET_INIT(runqueue_idle, rqstruct, "idle"); -+ MEMBER_OFFSET_INIT(runqueue_curr, rqstruct, "curr"); -+ ASSIGN_OFFSET(runqueue_cpu) = INVALID_OFFSET; -+ } -+ MEMBER_OFFSET_INIT(runqueue_active, rqstruct, "active"); -+ MEMBER_OFFSET_INIT(runqueue_expired, rqstruct, "expired"); -+ MEMBER_OFFSET_INIT(runqueue_arrays, rqstruct, "arrays"); -+ MEMBER_OFFSET_INIT(prio_array_queue, "prio_array", "queue"); -+ MEMBER_OFFSET_INIT(prio_array_nr_active, "prio_array", "nr_active"); -+ STRUCT_SIZE_INIT(runqueue, rqstruct); -+ STRUCT_SIZE_INIT(prio_array, "prio_array"); -+ -+ MEMBER_OFFSET_INIT(rq_cfs, "rq", "cfs"); -+ -+ /* -+ * In 2.4, smp_send_stop() sets smp_num_cpus back to 1 -+ * in some, but not all, architectures. So if a count -+ * of 1 is found, be suspicious, and check the -+ * init_tasks[NR_CPUS] array (also intro'd in 2.4), -+ * for idle thread addresses. For 2.2, prepare for the -+ * eventuality by verifying the cpu count with the machine -+ * dependent count. -+ */ -+ if ((kt->flags & SMP) && DUMPFILE() && (kt->cpus == 1)) { -+ if (symbol_exists("init_tasks")) { -+ ulong init_tasks[NR_CPUS]; -+ int nr_cpus; -+ -+ BZERO(&init_tasks[0], sizeof(ulong) * NR_CPUS); -+ -+ nr_cpus = get_array_length("init_tasks", NULL, 0); -+ if ((nr_cpus < 1) || (nr_cpus > NR_CPUS)) -+ nr_cpus = NR_CPUS; -+ -+ get_idle_threads(&init_tasks[0], nr_cpus); -+ -+ for (i = kt->cpus = 0; i < nr_cpus; i++) -+ if (init_tasks[i]) -+ kt->cpus++; -+ } else - kt->cpus = machdep->get_smp_cpus(); -+ } - -- if (kt->cpus > NR_CPUS) { -- error(WARNING, -- "calculated number of cpus (%d) greater than compiled-in NR_CPUS (%d)\n", -- kt->cpus, NR_CPUS); -- error(FATAL, "recompile crash with larger NR_CPUS\n"); -- } -- -- STRUCT_SIZE_INIT(spinlock_t, "spinlock_t"); -- verify_spinlock(); -- -- STRUCT_SIZE_INIT(list_head, "list_head"); -- MEMBER_OFFSET_INIT(list_head_next, "list_head", "next"); -- MEMBER_OFFSET_INIT(list_head_prev, "list_head", "prev"); -- if (OFFSET(list_head_next) != 0) -- error(WARNING, -- "list_head.next offset: %ld: list command may fail\n", -- OFFSET(list_head_next)); -- -- MEMBER_OFFSET_INIT(hlist_node_next, "hlist_node", "next"); -- MEMBER_OFFSET_INIT(hlist_node_pprev, "hlist_node", "pprev"); -- STRUCT_SIZE_INIT(hlist_head, "hlist_head"); -- STRUCT_SIZE_INIT(hlist_node, "hlist_node"); -- -- MEMBER_OFFSET_INIT(irq_desc_t_status, "irq_desc_t", "status"); -- MEMBER_OFFSET_INIT(irq_desc_t_handler, "irq_desc_t", "handler"); -- MEMBER_OFFSET_INIT(irq_desc_t_action, "irq_desc_t", "action"); -- MEMBER_OFFSET_INIT(irq_desc_t_depth, "irq_desc_t", "depth"); -- MEMBER_OFFSET_INIT(hw_interrupt_type_typename, -+ if ((kt->flags & SMP) && ACTIVE() && (kt->cpus == 1) && -+ (kt->flags & PER_CPU_OFF)) -+ kt->cpus = machdep->get_smp_cpus(); -+ -+ if (kt->cpus_override && (c = atoi(kt->cpus_override))) { -+ error(WARNING, "forcing cpu count to: %d\n\n", c); -+ kt->cpus = c; -+ } -+ -+ if (kt->cpus > NR_CPUS) { -+ error(WARNING, -+ "%s number of cpus (%d) greater than compiled-in NR_CPUS (%d)\n", -+ kt->cpus_override && atoi(kt->cpus_override) ? -+ "configured" : "calculated", kt->cpus, NR_CPUS); -+ error(FATAL, "recompile crash with larger NR_CPUS\n"); -+ } -+ -+ STRUCT_SIZE_INIT(spinlock_t, "spinlock_t"); -+ verify_spinlock(); -+ -+ STRUCT_SIZE_INIT(list_head, "list_head"); -+ MEMBER_OFFSET_INIT(list_head_next, "list_head", "next"); -+ MEMBER_OFFSET_INIT(list_head_prev, "list_head", "prev"); -+ if (OFFSET(list_head_next) != 0) -+ error(WARNING, -+ "list_head.next offset: %ld: list command may fail\n", -+ OFFSET(list_head_next)); -+ -+ MEMBER_OFFSET_INIT(hlist_node_next, "hlist_node", "next"); -+ MEMBER_OFFSET_INIT(hlist_node_pprev, "hlist_node", "pprev"); -+ STRUCT_SIZE_INIT(hlist_head, "hlist_head"); -+ STRUCT_SIZE_INIT(hlist_node, "hlist_node"); -+ -+ if (STRUCT_EXISTS("irq_desc_t")) -+ irq_desc_type_name = "irq_desc_t"; -+ else -+ irq_desc_type_name = "irq_desc"; -+ -+ STRUCT_SIZE_INIT(irq_desc_t, irq_desc_type_name); -+ MEMBER_OFFSET_INIT(irq_desc_t_status, irq_desc_type_name, "status"); -+ if (MEMBER_EXISTS(irq_desc_type_name, "handler")) -+ MEMBER_OFFSET_INIT(irq_desc_t_handler, irq_desc_type_name, "handler"); -+ else -+ MEMBER_OFFSET_INIT(irq_desc_t_chip, irq_desc_type_name, "chip"); -+ MEMBER_OFFSET_INIT(irq_desc_t_action, irq_desc_type_name, "action"); -+ MEMBER_OFFSET_INIT(irq_desc_t_depth, irq_desc_type_name, "depth"); -+ if (STRUCT_EXISTS("hw_interrupt_type")) { -+ MEMBER_OFFSET_INIT(hw_interrupt_type_typename, - "hw_interrupt_type", "typename"); - MEMBER_OFFSET_INIT(hw_interrupt_type_startup, - "hw_interrupt_type", "startup"); - MEMBER_OFFSET_INIT(hw_interrupt_type_shutdown, - "hw_interrupt_type", "shutdown"); -- MEMBER_OFFSET_INIT(hw_interrupt_type_handle, -- "hw_interrupt_type", "handle"); -+ MEMBER_OFFSET_INIT(hw_interrupt_type_handle, -+ "hw_interrupt_type", "handle"); - MEMBER_OFFSET_INIT(hw_interrupt_type_enable, - "hw_interrupt_type", "enable"); - MEMBER_OFFSET_INIT(hw_interrupt_type_disable, - "hw_interrupt_type", "disable"); -- MEMBER_OFFSET_INIT(hw_interrupt_type_ack, -+ MEMBER_OFFSET_INIT(hw_interrupt_type_ack, - "hw_interrupt_type", "ack"); -- MEMBER_OFFSET_INIT(hw_interrupt_type_end, -+ MEMBER_OFFSET_INIT(hw_interrupt_type_end, - "hw_interrupt_type", "end"); - MEMBER_OFFSET_INIT(hw_interrupt_type_set_affinity, - "hw_interrupt_type", "set_affinity"); -- MEMBER_OFFSET_INIT(irqaction_handler, "irqaction", "handler"); -- MEMBER_OFFSET_INIT(irqaction_flags, "irqaction", "flags"); -- MEMBER_OFFSET_INIT(irqaction_mask, "irqaction", "mask"); -- MEMBER_OFFSET_INIT(irqaction_name, "irqaction", "name"); -- MEMBER_OFFSET_INIT(irqaction_dev_id, "irqaction", "dev_id"); -- MEMBER_OFFSET_INIT(irqaction_next, "irqaction", "next"); -- -- STRUCT_SIZE_INIT(irq_desc_t, "irq_desc_t"); -- -- STRUCT_SIZE_INIT(irq_cpustat_t, "irq_cpustat_t"); -- MEMBER_OFFSET_INIT(irq_cpustat_t___softirq_active, -- "irq_cpustat_t", "__softirq_active"); -- MEMBER_OFFSET_INIT(irq_cpustat_t___softirq_mask, -- "irq_cpustat_t", "__softirq_mask"); -- -- STRUCT_SIZE_INIT(timer_list, "timer_list"); -- MEMBER_OFFSET_INIT(timer_list_list, "timer_list", "list"); -- MEMBER_OFFSET_INIT(timer_list_next, "timer_list", "next"); -- MEMBER_OFFSET_INIT(timer_list_entry, "timer_list", "entry"); -- MEMBER_OFFSET_INIT(timer_list_expires, "timer_list", "expires"); -- MEMBER_OFFSET_INIT(timer_list_function, -- "timer_list", "function"); -- STRUCT_SIZE_INIT(timer_vec_root, "timer_vec_root"); -- if (VALID_STRUCT(timer_vec_root)) -- MEMBER_OFFSET_INIT(timer_vec_root_vec, -- "timer_vec_root", "vec"); -- STRUCT_SIZE_INIT(timer_vec, "timer_vec"); -- if (VALID_STRUCT(timer_vec)) -- MEMBER_OFFSET_INIT(timer_vec_vec, "timer_vec", "vec"); -- -- STRUCT_SIZE_INIT(tvec_root_s, "tvec_root_s"); -- if (VALID_STRUCT(tvec_root_s)) { -- STRUCT_SIZE_INIT(tvec_t_base_s, "tvec_t_base_s"); -- MEMBER_OFFSET_INIT(tvec_t_base_s_tv1, -- "tvec_t_base_s", "tv1"); -- MEMBER_OFFSET_INIT(tvec_root_s_vec, -- "tvec_root_s", "vec"); -- STRUCT_SIZE_INIT(tvec_s, "tvec_s"); -- MEMBER_OFFSET_INIT(tvec_s_vec, "tvec_s", "vec"); -- } -- -- STRUCT_SIZE_INIT(__wait_queue, "__wait_queue"); -- if (VALID_STRUCT(__wait_queue)) { -- MEMBER_OFFSET_INIT(__wait_queue_task, -- "__wait_queue", "task"); -- MEMBER_OFFSET_INIT(__wait_queue_head_task_list, -- "__wait_queue_head", "task_list"); -- MEMBER_OFFSET_INIT(__wait_queue_task_list, -- "__wait_queue", "task_list"); -- } else { -- STRUCT_SIZE_INIT(wait_queue, "wait_queue"); -- if (VALID_STRUCT(wait_queue)) { -- MEMBER_OFFSET_INIT(wait_queue_task, -- "wait_queue", "task"); -- MEMBER_OFFSET_INIT(wait_queue_next, -- "wait_queue", "next"); -- } -+ } else { /* -+ * On later kernels where hw_interrupt_type was replaced -+ * by irq_chip -+ */ -+ MEMBER_OFFSET_INIT(irq_chip_typename, -+ "irq_chip", "name"); -+ MEMBER_OFFSET_INIT(irq_chip_startup, -+ "irq_chip", "startup"); -+ MEMBER_OFFSET_INIT(irq_chip_shutdown, -+ "irq_chip", "shutdown"); -+ MEMBER_OFFSET_INIT(irq_chip_enable, -+ "irq_chip", "enable"); -+ MEMBER_OFFSET_INIT(irq_chip_disable, -+ "irq_chip", "disable"); -+ MEMBER_OFFSET_INIT(irq_chip_ack, -+ "irq_chip", "ack"); -+ MEMBER_OFFSET_INIT(irq_chip_mask, -+ "irq_chip", "mask"); -+ MEMBER_OFFSET_INIT(irq_chip_mask_ack, -+ "irq_chip", "mask_ack"); -+ MEMBER_OFFSET_INIT(irq_chip_unmask, -+ "irq_chip", "unmask"); -+ MEMBER_OFFSET_INIT(irq_chip_eoi, -+ "irq_chip", "eoi"); -+ MEMBER_OFFSET_INIT(irq_chip_end, -+ "irq_chip", "end"); -+ MEMBER_OFFSET_INIT(irq_chip_set_affinity, -+ "irq_chip", "set_affinity"); -+ MEMBER_OFFSET_INIT(irq_chip_retrigger, -+ "irq_chip", "retrigger"); -+ MEMBER_OFFSET_INIT(irq_chip_set_type, -+ "irq_chip", "set_type"); -+ MEMBER_OFFSET_INIT(irq_chip_set_wake, -+ "irq_chip", "set_wake"); -+ } -+ MEMBER_OFFSET_INIT(irqaction_handler, "irqaction", "handler"); -+ MEMBER_OFFSET_INIT(irqaction_flags, "irqaction", "flags"); -+ MEMBER_OFFSET_INIT(irqaction_mask, "irqaction", "mask"); -+ MEMBER_OFFSET_INIT(irqaction_name, "irqaction", "name"); -+ MEMBER_OFFSET_INIT(irqaction_dev_id, "irqaction", "dev_id"); -+ MEMBER_OFFSET_INIT(irqaction_next, "irqaction", "next"); -+ -+ STRUCT_SIZE_INIT(irq_cpustat_t, "irq_cpustat_t"); -+ MEMBER_OFFSET_INIT(irq_cpustat_t___softirq_active, -+ "irq_cpustat_t", "__softirq_active"); -+ MEMBER_OFFSET_INIT(irq_cpustat_t___softirq_mask, -+ "irq_cpustat_t", "__softirq_mask"); -+ -+ STRUCT_SIZE_INIT(timer_list, "timer_list"); -+ MEMBER_OFFSET_INIT(timer_list_list, "timer_list", "list"); -+ MEMBER_OFFSET_INIT(timer_list_next, "timer_list", "next"); -+ MEMBER_OFFSET_INIT(timer_list_entry, "timer_list", "entry"); -+ MEMBER_OFFSET_INIT(timer_list_expires, "timer_list", "expires"); -+ MEMBER_OFFSET_INIT(timer_list_function, "timer_list", "function"); -+ STRUCT_SIZE_INIT(timer_vec_root, "timer_vec_root"); -+ if (VALID_STRUCT(timer_vec_root)) -+ MEMBER_OFFSET_INIT(timer_vec_root_vec, -+ "timer_vec_root", "vec"); -+ STRUCT_SIZE_INIT(timer_vec, "timer_vec"); -+ if (VALID_STRUCT(timer_vec)) -+ MEMBER_OFFSET_INIT(timer_vec_vec, "timer_vec", "vec"); -+ -+ STRUCT_SIZE_INIT(tvec_root_s, "tvec_root_s"); -+ if (VALID_STRUCT(tvec_root_s)) { -+ STRUCT_SIZE_INIT(tvec_t_base_s, "tvec_t_base_s"); -+ MEMBER_OFFSET_INIT(tvec_t_base_s_tv1, -+ "tvec_t_base_s", "tv1"); -+ MEMBER_OFFSET_INIT(tvec_root_s_vec, -+ "tvec_root_s", "vec"); -+ STRUCT_SIZE_INIT(tvec_s, "tvec_s"); -+ MEMBER_OFFSET_INIT(tvec_s_vec, "tvec_s", "vec"); -+ } -+ -+ STRUCT_SIZE_INIT(__wait_queue, "__wait_queue"); -+ if (VALID_STRUCT(__wait_queue)) { -+ MEMBER_OFFSET_INIT(__wait_queue_task, -+ "__wait_queue", "task"); -+ MEMBER_OFFSET_INIT(__wait_queue_head_task_list, -+ "__wait_queue_head", "task_list"); -+ MEMBER_OFFSET_INIT(__wait_queue_task_list, -+ "__wait_queue", "task_list"); -+ } else { -+ STRUCT_SIZE_INIT(wait_queue, "wait_queue"); -+ if (VALID_STRUCT(wait_queue)) { -+ MEMBER_OFFSET_INIT(wait_queue_task, -+ "wait_queue", "task"); -+ MEMBER_OFFSET_INIT(wait_queue_next, -+ "wait_queue", "next"); - } -+ } - -- STRUCT_SIZE_INIT(pt_regs, "pt_regs"); -- STRUCT_SIZE_INIT(softirq_state, "softirq_state"); -- STRUCT_SIZE_INIT(desc_struct, "desc_struct"); -- -- STRUCT_SIZE_INIT(char_device_struct, "char_device_struct"); -- if (VALID_STRUCT(char_device_struct)) { -- MEMBER_OFFSET_INIT(char_device_struct_next, -- "char_device_struct", "next"); -- MEMBER_OFFSET_INIT(char_device_struct_name, -- "char_device_struct", "name"); -- MEMBER_OFFSET_INIT(char_device_struct_fops, -- "char_device_struct", "fops"); -- MEMBER_OFFSET_INIT(char_device_struct_major, -- "char_device_struct", "major"); -- } -- -- MEMBER_OFFSET_INIT(module_kallsyms_start, "module", -- "kallsyms_start"); -- -- STRUCT_SIZE_INIT(kallsyms_header, "kallsyms_header"); -- -- if (VALID_MEMBER(module_kallsyms_start) && -- VALID_SIZE(kallsyms_header)) { -- MEMBER_OFFSET_INIT(kallsyms_header_sections, -- "kallsyms_header", "sections"); -- MEMBER_OFFSET_INIT(kallsyms_header_section_off, -- "kallsyms_header", "section_off"); -- MEMBER_OFFSET_INIT(kallsyms_header_symbols, -- "kallsyms_header", "symbols"); -- MEMBER_OFFSET_INIT(kallsyms_header_symbol_off, -- "kallsyms_header", "symbol_off"); -- MEMBER_OFFSET_INIT(kallsyms_header_string_off, -- "kallsyms_header", "string_off"); -- MEMBER_OFFSET_INIT(kallsyms_symbol_section_off, -- "kallsyms_symbol", "section_off"); -- MEMBER_OFFSET_INIT(kallsyms_symbol_symbol_addr, -- "kallsyms_symbol", "symbol_addr"); -- MEMBER_OFFSET_INIT(kallsyms_symbol_name_off, -- "kallsyms_symbol", "name_off"); -- MEMBER_OFFSET_INIT(kallsyms_section_start, -- "kallsyms_section", "start"); -- MEMBER_OFFSET_INIT(kallsyms_section_size, -- "kallsyms_section", "size"); -- MEMBER_OFFSET_INIT(kallsyms_section_name_off, -- "kallsyms_section", "name_off"); -- STRUCT_SIZE_INIT(kallsyms_symbol, "kallsyms_symbol"); -- STRUCT_SIZE_INIT(kallsyms_section, "kallsyms_section"); -+ STRUCT_SIZE_INIT(pt_regs, "pt_regs"); -+ STRUCT_SIZE_INIT(softirq_state, "softirq_state"); -+ STRUCT_SIZE_INIT(desc_struct, "desc_struct"); -+ -+ STRUCT_SIZE_INIT(char_device_struct, "char_device_struct"); -+ if (VALID_STRUCT(char_device_struct)) { -+ MEMBER_OFFSET_INIT(char_device_struct_next, -+ "char_device_struct", "next"); -+ MEMBER_OFFSET_INIT(char_device_struct_name, -+ "char_device_struct", "name"); -+ MEMBER_OFFSET_INIT(char_device_struct_fops, -+ "char_device_struct", "fops"); -+ MEMBER_OFFSET_INIT(char_device_struct_major, -+ "char_device_struct", "major"); -+ } -+ -+ MEMBER_OFFSET_INIT(module_kallsyms_start, "module", -+ "kallsyms_start"); -+ -+ STRUCT_SIZE_INIT(kallsyms_header, "kallsyms_header"); -+ -+ if (VALID_MEMBER(module_kallsyms_start) && -+ VALID_SIZE(kallsyms_header)) { -+ MEMBER_OFFSET_INIT(kallsyms_header_sections, -+ "kallsyms_header", "sections"); -+ MEMBER_OFFSET_INIT(kallsyms_header_section_off, -+ "kallsyms_header", "section_off"); -+ MEMBER_OFFSET_INIT(kallsyms_header_symbols, -+ "kallsyms_header", "symbols"); -+ MEMBER_OFFSET_INIT(kallsyms_header_symbol_off, -+ "kallsyms_header", "symbol_off"); -+ MEMBER_OFFSET_INIT(kallsyms_header_string_off, -+ "kallsyms_header", "string_off"); -+ MEMBER_OFFSET_INIT(kallsyms_symbol_section_off, -+ "kallsyms_symbol", "section_off"); -+ MEMBER_OFFSET_INIT(kallsyms_symbol_symbol_addr, -+ "kallsyms_symbol", "symbol_addr"); -+ MEMBER_OFFSET_INIT(kallsyms_symbol_name_off, -+ "kallsyms_symbol", "name_off"); -+ MEMBER_OFFSET_INIT(kallsyms_section_start, -+ "kallsyms_section", "start"); -+ MEMBER_OFFSET_INIT(kallsyms_section_size, -+ "kallsyms_section", "size"); -+ MEMBER_OFFSET_INIT(kallsyms_section_name_off, -+ "kallsyms_section", "name_off"); -+ STRUCT_SIZE_INIT(kallsyms_symbol, "kallsyms_symbol"); -+ STRUCT_SIZE_INIT(kallsyms_section, "kallsyms_section"); - -- if (!(kt->flags & NO_KALLSYMS)) -- kt->flags |= KALLSYMS_V1; -- } -+ if (!(kt->flags & NO_KALLSYMS)) -+ kt->flags |= KALLSYMS_V1; -+ } - -- MEMBER_OFFSET_INIT(module_num_symtab, "module", "num_symtab"); -+ MEMBER_OFFSET_INIT(module_num_symtab, "module", "num_symtab"); - -- if (VALID_MEMBER(module_num_symtab)) { -- MEMBER_OFFSET_INIT(module_symtab, "module", "symtab"); -- MEMBER_OFFSET_INIT(module_strtab, "module", "strtab"); -+ if (VALID_MEMBER(module_num_symtab)) { -+ MEMBER_OFFSET_INIT(module_symtab, "module", "symtab"); -+ MEMBER_OFFSET_INIT(module_strtab, "module", "strtab"); - -- if (!(kt->flags & NO_KALLSYMS)) -- kt->flags |= KALLSYMS_V2; -- } -- break; -+ if (!(kt->flags & NO_KALLSYMS)) -+ kt->flags |= KALLSYMS_V2; - } -+ -+ if (!(kt->flags & DWARF_UNWIND)) -+ kt->flags |= NO_DWARF_UNWIND; -+ -+ BUG_bytes_init(); - } - - /* -@@ -377,7 +485,7 @@ - { - char buf[BUFSIZE]; - ulong linux_banner; -- int argc; -+ int argc, len; - char *arglist[MAXARGS]; - char *p1, *p2; - struct syment *sp; -@@ -389,7 +497,7 @@ - - if (!(sp = symbol_search("linux_banner"))) - error(FATAL, "linux_banner symbol does not exist?\n"); -- else if (sp->type == 'R') -+ else if ((sp->type == 'R') || (sp->type == 'r')) - linux_banner = symbol_value("linux_banner"); - else - get_symbol_data("linux_banner", sizeof(ulong), &linux_banner); -@@ -405,7 +513,8 @@ - error(WARNING, "cannot read linux_banner string\n"); - - if (ACTIVE()) { -- if (strlen(kt->proc_version) && !STREQ(buf, kt->proc_version)) { -+ len = strlen(kt->proc_version) - 1; -+ if ((len > 0) && (strncmp(buf, kt->proc_version, len) != 0)) { - if (CRASHDEBUG(1)) { - fprintf(fp, "/proc/version:\n%s", - kt->proc_version); -@@ -471,6 +580,9 @@ - } - } - -+ if (CRASHDEBUG(1)) -+ gdb_readnow_warning(); -+ - return; - - bad_match: -@@ -614,6 +726,10 @@ - if (pc->flags & KERNEL_DEBUG_QUERY) - return; - -+ /* the kerntypes may not match in terms of gcc version or SMP */ -+ if (LKCD_KERNTYPES()) -+ return; -+ - if (!strlen(kt->utsname.version)) - return; - -@@ -740,7 +856,7 @@ - { - int c; - int do_load_module_filter, do_machdep_filter, reverse; -- int unfiltered, user_mode, count_entered; -+ int unfiltered, user_mode, count_entered, bug_bytes_entered; - ulong curaddr; - ulong revtarget; - ulong count; -@@ -754,7 +870,16 @@ - char buf4[BUFSIZE]; - char buf5[BUFSIZE]; - -- reverse = count_entered = FALSE; -+ if ((argcnt == 2) && STREQ(args[1], "-b")) { -+ fprintf(fp, "encoded bytes being skipped after ud2a: "); -+ if (kt->BUG_bytes < 0) -+ fprintf(fp, "undetermined\n"); -+ else -+ fprintf(fp, "%d\n", kt->BUG_bytes); -+ return; -+ } -+ -+ reverse = count_entered = bug_bytes_entered = FALSE; - sp = NULL; - unfiltered = user_mode = do_machdep_filter = do_load_module_filter = 0; - -@@ -763,7 +888,7 @@ - req->flags |= GNU_FROM_TTY_OFF|GNU_RETURN_ON_ERROR; - req->count = 1; - -- while ((c = getopt(argcnt, args, "ulrx")) != EOF) { -+ while ((c = getopt(argcnt, args, "ulrxb:B:")) != EOF) { - switch(c) - { - case 'x': -@@ -786,6 +911,12 @@ - BZERO(buf4, BUFSIZE); - break; - -+ case 'B': -+ case 'b': -+ kt->BUG_bytes = atoi(optarg); -+ bug_bytes_entered = TRUE; -+ break; -+ - default: - argerrs++; - break; -@@ -846,7 +977,7 @@ - if (user_mode) { - sprintf(buf1, "x/%ldi 0x%lx", - req->count ? req->count : 1, req->addr); -- pc->cmdgenspec = pc->cmdgencur; -+ pc->curcmd_flags |= MEMTYPE_UVADDR; - gdb_pass_through(buf1, NULL, 0); - return; - } -@@ -962,7 +1093,9 @@ - close_tmpfile(); - } - } -- else cmd_usage(pc->curcmd, SYNOPSIS); -+ else if (bug_bytes_entered) -+ return; -+ else cmd_usage(pc->curcmd, SYNOPSIS); - - if (!reverse) { - FREEBUF(req->buf); -@@ -1053,6 +1186,185 @@ - FREEBUF(req); - } - -+/* -+ * x86 and x86_64 kernels may have file/line-number encoding -+ * asm()'d in just after the "ud2a" instruction, which confuses -+ * the disassembler and the x86 backtracer. Determine the -+ * number of bytes to skip. -+ */ -+static void -+BUG_bytes_init(void) -+{ -+ if (machine_type("X86")) -+ kt->BUG_bytes = BUG_x86(); -+ else if (machine_type("X86_64")) -+ kt->BUG_bytes = BUG_x86_64(); -+} -+ -+static int -+BUG_x86(void) -+{ -+ struct syment *sp, *spn; -+ char buf1[BUFSIZE]; -+ char buf2[BUFSIZE]; -+ char *arglist[MAXARGS]; -+ ulong vaddr, fileptr; -+ int found; -+ -+ /* -+ * Prior to 2.4.19, a call to do_BUG() preceded -+ * the standalone ud2a instruction. -+ */ -+ if (THIS_KERNEL_VERSION < LINUX(2,4,19)) -+ return 0; -+ -+ /* -+ * 2.6.20 introduced __bug_table support for i386, -+ * but even if CONFIG_DEBUG_BUGVERBOSE is not configured, -+ * the ud2a stands alone. -+ */ -+ if (THIS_KERNEL_VERSION >= LINUX(2,6,20)) -+ return 0; -+ -+ /* -+ * For previous kernel versions, it may depend upon -+ * whether CONFIG_DEBUG_BUGVERBOSE was configured: -+ * -+ * #ifdef CONFIG_DEBUG_BUGVERBOSE -+ * #define BUG() \ -+ * __asm__ __volatile__( "ud2\n" \ -+ * "\t.word %c0\n" \ -+ * "\t.long %c1\n" \ -+ * : : "i" (__LINE__), "i" (__FILE__)) -+ * #else -+ * #define BUG() __asm__ __volatile__("ud2\n") -+ * #endif -+ * -+ * But that's not necessarily true, since there are -+ * pre-2.6.11 versions that force it like so: -+ * -+ * #if 1 /- Set to zero for a slightly smaller kernel -/ -+ * #define BUG() \ -+ * __asm__ __volatile__( "ud2\n" \ -+ * "\t.word %c0\n" \ -+ * "\t.long %c1\n" \ -+ * : : "i" (__LINE__), "i" (__FILE__)) -+ * #else -+ * #define BUG() __asm__ __volatile__("ud2\n") -+ * #endif -+ */ -+ -+ /* -+ * This works if in-kernel config data is available. -+ */ -+ if ((THIS_KERNEL_VERSION >= LINUX(2,6,11)) && -+ (kt->flags & BUGVERBOSE_OFF)) -+ return 0; -+ -+ /* -+ * At this point, it's a pretty safe bet that it's configured, -+ * but to be sure, disassemble a known BUG() caller and -+ * verify that the encoding is there. -+ */ -+ -+#define X86_BUG_BYTES (6) /* sizeof(short) + sizeof(pointer) */ -+ -+ if (!(sp = symbol_search("do_exit")) || -+ !(spn = next_symbol(NULL, sp))) -+ return X86_BUG_BYTES; -+ -+ sprintf(buf1, "x/%ldi 0x%lx", spn->value - sp->value, sp->value); -+ -+ found = FALSE; -+ open_tmpfile(); -+ gdb_pass_through(buf1, pc->tmpfile, GNU_RETURN_ON_ERROR); -+ rewind(pc->tmpfile); -+ while (fgets(buf2, BUFSIZE, pc->tmpfile)) { -+ if (parse_line(buf2, arglist) < 3) -+ continue; -+ -+ if ((vaddr = htol(arglist[0], RETURN_ON_ERROR, NULL)) >= spn->value) -+ continue; -+ -+ if (STREQ(arglist[2], "ud2a")) { -+ found = TRUE; -+ break; -+ } -+ } -+ close_tmpfile(); -+ -+ if (!found || !readmem(vaddr+4, KVADDR, &fileptr, sizeof(ulong), -+ "BUG filename pointer", RETURN_ON_ERROR|QUIET)) -+ return X86_BUG_BYTES; -+ -+ if (!IS_KVADDR(fileptr)) { -+ if (CRASHDEBUG(1)) -+ fprintf(fp, -+ "no filename pointer: kt->BUG_bytes: 0\n"); -+ return 0; -+ } -+ -+ if (!read_string(fileptr, buf1, BUFSIZE-1)) -+ error(WARNING, -+ "cannot read BUG (ud2a) encoded filename address: %lx\n", -+ fileptr); -+ else if (CRASHDEBUG(1)) -+ fprintf(fp, "BUG bytes filename encoding: [%s]\n", buf1); -+ -+ return X86_BUG_BYTES; -+} -+ -+static int -+BUG_x86_64(void) -+{ -+ /* -+ * 2.6.20 introduced __bug_table support for x86_64, -+ * but even if CONFIG_DEBUG_BUGVERBOSE is not configured, -+ * the ud2a stands alone. -+ */ -+ if (THIS_KERNEL_VERSION >= LINUX(2,6,20)) -+ return 0; -+ -+ /* -+ * The original bug_frame structure looks like this, which -+ * causes the disassembler to go off into the weeds: -+ * -+ * struct bug_frame { -+ * unsigned char ud2[2]; -+ * char *filename; -+ * unsigned short line; -+ * } -+ * -+ * In 2.6.13, fake push and ret instructions were encoded -+ * into the frame so that the disassembly would at least -+ * "work", although the two fake instructions show nonsensical -+ * arguments: -+ * -+ * struct bug_frame { -+ * unsigned char ud2[2]; -+ * unsigned char push; -+ * signed int filename; -+ * unsigned char ret; -+ * unsigned short line; -+ * } -+ */ -+ -+ if (STRUCT_EXISTS("bug_frame")) -+ return (int)(STRUCT_SIZE("bug_frame") - 2); -+ -+ return 0; -+} -+ -+ -+/* -+ * Callback from gdb disassembly code. -+ */ -+int -+kernel_BUG_encoding_bytes(void) -+{ -+ return kt->BUG_bytes; -+} -+ - #ifdef NOT_USED - /* - * To avoid premature stoppage/extension of a dis that includes -@@ -1094,7 +1406,8 @@ - } - - #define FRAMESIZE_DEBUG_MESSAGE \ --"usage: bt -F [size|clear|dump|seek|noseek|validate|novalidate] [-I eip]\n If eip: set its associated framesize to size.\n \"validate/novalidate\" will turn on/off V bit for this eip entry.\n If !eip: \"clear\" will clear the framesize cache and RA seek/noseek flags.\n \"dump\" will dump the current framesize cache entries.\n \"seek/noseek\" turns on/off RA seeking.\n \"validate/novalidate\" turns on/off V bit for all current entries.\n" -+"\nx86 usage: bt -F [size|clear|dump|seek|noseek|validate|novalidate] [-I eip]\n If eip: set its associated framesize to size.\n \"validate/novalidate\" will turn on/off V bit for this eip entry.\n If !eip: \"clear\" will clear the framesize cache and RA seek/noseek flags.\n \"dump\" will dump the current framesize cache entries.\n \"seek/noseek\" turns on/off RA seeking.\n \"validate/novalidate\" turns on/off V bit for all current entries.\n\nx86_64 usage: bt -F [clear|dump|validate] [-I rip]\n If rip: \"validate\" will verbosely recalculate the framesize.\n If !rip: \"clear\" will clear the framesize cache.\n \"dump\" will dump the current framesize cache entries.\n" -+ - - /* - * Display a kernel stack backtrace. Arguments may be any number pid or task -@@ -1108,18 +1421,25 @@ - * -s displays arguments symbolically. - */ - -+void -+clone_bt_info(struct bt_info *orig, struct bt_info *new, -+ struct task_context *tc) -+{ -+ BCOPY(orig, new, sizeof(*new)); -+ new->stackbuf = NULL; -+ new->tc = tc; -+ new->task = tc->task; -+ new->stackbase = GET_STACKBASE(tc->task); -+ new->stacktop = GET_STACKTOP(tc->task); -+} -+ - #define BT_SETUP(TC) \ -- BCOPY(&bt_setup, bt, sizeof(struct bt_info)); \ -+ clone_bt_info(&bt_setup, bt, (TC)); \ - if (refptr) { \ - BZERO(&reference, sizeof(struct reference)); \ - bt->ref = &reference; \ - bt->ref->str = refptr; \ -- } \ -- bt->tc = (TC); \ -- bt->task = ((TC)->task); \ -- bt->stackbase = GET_STACKBASE((TC)->task); \ -- bt->stacktop = GET_STACKTOP((TC)->task); \ -- bt->stackbuf = NULL; -+ } - - void - cmd_bt(void) -@@ -1140,8 +1460,11 @@ - bt = &bt_info; - BZERO(bt, sizeof(struct bt_info)); - -- while ((c = getopt(argcnt, args, "fF:I:S:aloreEgstd:R:")) != EOF) { -- switch(c) -+ if (kt->flags & USE_OLD_BT) -+ bt->flags |= BT_OLD_BACK_TRACE; -+ -+ while ((c = getopt(argcnt, args, "fF:I:S:aloreEgstTd:R:O")) != EOF) { -+ switch (c) - { - case 'f': - bt->flags |= BT_FULL; -@@ -1151,6 +1474,28 @@ - bt->flags |= BT_OLD_BACK_TRACE; - break; - -+ case 'O': -+ if (!(machine_type("X86") || machine_type("X86_64"))) -+ option_not_supported(c); -+ else if (kt->flags & USE_OLD_BT) { -+ /* -+ * Make this setting idempotent across the use of -+ * $HOME/.crashrc, ./.crashrc, and "-i input" files. -+ * If we've been here before during initialization, -+ * leave it alone. -+ */ -+ if (pc->flags & INIT_IFILE) { -+ error(INFO, "use old bt method by default (already set)\n"); -+ return; -+ } -+ kt->flags &= ~USE_OLD_BT; -+ error(INFO, "use new bt method by default\n"); -+ } else { -+ kt->flags |= USE_OLD_BT; -+ error(INFO, "use old bt method by default\n"); -+ } -+ return; -+ - case 'R': - if (refptr) - error(INFO, "only one -R option allowed\n"); -@@ -1217,6 +1562,9 @@ - } else if (*optarg == '-') { - hook.esp = dtol(optarg+1, FAULT_ON_ERROR, NULL); - hook.esp = (ulong)(0 - (long)hook.esp); -+ } else if (STREQ(optarg, "dwarf") || STREQ(optarg, "cfi")) { -+ if (!(kt->flags & DWARF_UNWIND_CAPABLE)) -+ return; - } else - hook.esp = dtol(optarg, FAULT_ON_ERROR, NULL); - break; -@@ -1241,6 +1589,8 @@ - bt->flags |= BT_SYMBOLIC_ARGS; - break; - -+ case 'T': -+ bt->flags |= BT_TEXT_SYMBOLS_ALL; - case 't': - bt->flags |= BT_TEXT_SYMBOLS; - break; -@@ -1255,6 +1605,11 @@ - } - } - -+ if (XEN_HYPER_MODE()) { -+ if (bt->flags & BT_EFRAME_SEARCH) -+ argerrs++; -+ } -+ - if (argerrs) - cmd_usage(pc->curcmd, SYNOPSIS); - -@@ -1286,6 +1641,35 @@ - return; - } - -+ if (XEN_HYPER_MODE()) { -+#ifdef XEN_HYPERVISOR_ARCH -+ /* "task" means vcpu for xen hypervisor */ -+ if (active) { -+ for (c = 0; c < XEN_HYPER_MAX_CPUS(); c++) { -+ if (!xen_hyper_test_pcpu_id(c)) -+ continue; -+ fake_tc.task = xen_hyper_pcpu_to_active_vcpu(c); -+ BT_SETUP(&fake_tc); -+ xen_hyper_print_bt_header(fp, fake_tc.task, subsequent++); -+ back_trace(bt); -+ } -+ } else { -+ if (args[optind]) { -+ fake_tc.task = xen_hyper_pcpu_to_active_vcpu( -+ convert(args[optind], 0, NULL, NUM_DEC | NUM_HEX)); -+ } else { -+ fake_tc.task = XEN_HYPER_VCPU_LAST_CONTEXT()->vcpu; -+ } -+ BT_SETUP(&fake_tc); -+ xen_hyper_print_bt_header(fp, fake_tc.task, 0); -+ back_trace(bt); -+ } -+ return; -+#else -+ error(FATAL, XEN_HYPERVISOR_NOT_SUPPORTED); -+#endif -+ } -+ - if (active) { - if (ACTIVE()) - error(FATAL, -@@ -1350,9 +1734,10 @@ - char buf[BUFSIZE]; - - if (bt->flags & BT_TEXT_SYMBOLS) { -- fprintf(fp, "%sSTART: %s at %lx\n", -- space(VADDR_PRLEN > 8 ? 14 : 6), -- closest_symbol(eip), eip); -+ if (!(bt->flags & BT_TEXT_SYMBOLS_ALL)) -+ fprintf(fp, "%sSTART: %s at %lx\n", -+ space(VADDR_PRLEN > 8 ? 14 : 6), -+ closest_symbol(eip), eip); - } - - if (bt->hp) -@@ -1435,6 +1820,9 @@ - i < LONGS_PER_STACK; i++, up++) { - if (is_kernel_text(*up)) - fprintf(fp, "%lx: %s\n", -+ tt->flags & THREAD_INFO ? -+ bt->tc->thread_info + -+ (i * sizeof(long)) : - bt->task + (i * sizeof(long)), - value_to_symstr(*up, buf, 0)); - } -@@ -1461,20 +1849,26 @@ - if (bt->hp) { - if (bt->hp->esp && !INSTACK(bt->hp->esp, bt)) - error(INFO, -- "invalid stack address for this task: %lx\n", -- bt->hp->esp); -+ "invalid stack address for this task: %lx\n (valid range: %lx - %lx)\n", -+ bt->hp->esp, bt->stackbase, bt->stacktop); - eip = bt->hp->eip; - esp = bt->hp->esp; - - machdep->get_stack_frame(bt, eip ? NULL : &eip, - esp ? NULL : &esp); - -- } else if (NETDUMP_DUMPFILE()) -+ } else if (XEN_HYPER_MODE()) -+ machdep->get_stack_frame(bt, &eip, &esp); -+ else if (NETDUMP_DUMPFILE()) - get_netdump_regs(bt, &eip, &esp); -+ else if (KDUMP_DUMPFILE()) -+ get_kdump_regs(bt, &eip, &esp); - else if (DISKDUMP_DUMPFILE()) - get_diskdump_regs(bt, &eip, &esp); - else if (LKCD_DUMPFILE()) - get_lkcd_regs(bt, &eip, &esp); -+ else if (XENDUMP_DUMPFILE()) -+ get_xendump_regs(bt, &eip, &esp); - else - machdep->get_stack_frame(bt, &eip, &esp); - -@@ -1486,6 +1880,13 @@ - if (bt->flags & - (BT_TEXT_SYMBOLS|BT_TEXT_SYMBOLS_PRINT|BT_TEXT_SYMBOLS_NOPRINT)) { - -+ if (bt->flags & BT_TEXT_SYMBOLS_ALL) { -+ esp = bt->stackbase + -+ ((tt->flags & THREAD_INFO) ? -+ SIZE(thread_info) : SIZE(task_struct)); -+ eip = 0; -+ } -+ - if (machdep->flags & MACHDEP_BT_TEXT) { - bt->instptr = eip; - bt->stkptr = esp; -@@ -1666,6 +2067,7 @@ - fprintf(fp, " flags: %llx\n", bt->flags); - fprintf(fp, " instptr: %lx\n", bt->instptr); - fprintf(fp, " stkptr: %lx\n", bt->stkptr); -+ fprintf(fp, " bptr: %lx\n", bt->bptr); - fprintf(fp, " stackbase: %lx\n", bt->stackbase); - fprintf(fp, " stacktop: %lx\n", bt->stacktop); - fprintf(fp, " tc: %lx ", (ulong)bt->tc); -@@ -1721,6 +2123,13 @@ - *esp = *(up-1); - return; - } -+ /* Egenera */ -+ if (STREQ(sym, "netdump_ipi")) { -+ *eip = *up; -+ *esp = bt->task + -+ ((char *)(up-1) - bt->stackbuf); -+ return; -+ } - if (STREQ(sym, "smp_stop_cpu_interrupt")) { - *eip = *up; - *esp = bt->task + -@@ -1837,8 +2246,8 @@ - return; - } - -- if (IS_VMALLOC_ADDR(list.next) && -- IS_VMALLOC_ADDR(list.prev)) { -+ if (IS_VMALLOC_ADDR((ulong)list.next) && -+ IS_VMALLOC_ADDR((ulong)list.prev)) { - kt->kernel_module = sp->value; - kt->module_list = (ulong)list.next; - modules_found = TRUE; -@@ -1873,14 +2282,17 @@ - kallsymsbuf = kt->flags & KALLSYMS_V1 ? - GETBUF(SIZE(kallsyms_header)) : NULL; - -+ please_wait("gathering module symbol data"); -+ - for (mod = kt->module_list; mod != kt->kernel_module; mod = mod_next) { -- if (CRASHDEBUG(7)) -+ if (CRASHDEBUG(3)) - fprintf(fp, "module: %lx\n", mod); - - if (!readmem(mod, KVADDR, modbuf, SIZE(module), - "module struct", RETURN_ON_ERROR|QUIET)) { - error(WARNING, -- "cannot access vmalloc'd module memory\n\n"); -+ "%scannot access vmalloc'd module memory\n\n", -+ DUMPFILE() ? "\n" : ""); - kt->mods_installed = 0; - kt->flags |= NO_MODULE_ACCESS; - FREEBUF(modbuf); -@@ -1914,7 +2326,8 @@ - kallsymsbuf, SIZE(kallsyms_header), - "kallsyms_header", RETURN_ON_ERROR|QUIET)) { - error(WARNING, -- "cannot access module kallsyms_header\n"); -+ "%scannot access module kallsyms_header\n", -+ DUMPFILE() ? "\n" : ""); - } else { - nsyms = UINT(kallsymsbuf + - OFFSET(kallsyms_header_symbols)); -@@ -1947,6 +2360,8 @@ - store_module_symbols_v2(total, kt->mods_installed); - break; - } -+ -+ please_wait_done(); - } - - -@@ -2112,7 +2527,7 @@ - address = 0; - flag = LIST_MODULE_HDR; - -- while ((c = getopt(argcnt, args, "rd:Ds:St:")) != EOF) { -+ while ((c = getopt(argcnt, args, "rd:Ds:St:o")) != EOF) { - switch(c) - { - case 'r': -@@ -2145,6 +2560,19 @@ - cmd_usage(pc->curcmd, SYNOPSIS); - break; - -+ /* -+ * Revert to using old-style add-symbol-file command -+ * for KMOD_V2 kernels. -+ */ -+ case 'o': -+ if (flag) -+ cmd_usage(pc->curcmd, SYNOPSIS); -+ if (kt->flags & KMOD_V1) -+ error(INFO, -+ "-o option is not applicable to this kernel version\n"); -+ st->flags |= USE_OLD_ADD_SYM; -+ return; -+ - case 't': - if (is_directory(optarg)) - tree = optarg; -@@ -2459,7 +2887,7 @@ - - - static char * --find_module_objfile(char *modref, char *filename, char *tree) -+module_objfile_search(char *modref, char *filename, char *tree) - { - char buf[BUFSIZE]; - char file[BUFSIZE]; -@@ -2477,16 +2905,20 @@ - strcpy(file, filename); - #ifdef MODULES_IN_CWD - else { -- sprintf(file, "%s.o", modref); -- if (access(file, R_OK) == 0) { -- retbuf = GETBUF(strlen(file)+1); -- strcpy(retbuf, file); -- if (CRASHDEBUG(1)) -- fprintf(fp, -- "find_module_objfile: [%s] file in cwd\n", -- retbuf); -- return retbuf; -- } -+ char *fileext[] = { "ko", "o"}; -+ int i; -+ for (i = 0; i < 2; i++) { -+ sprintf(file, "%s.%s", modref, fileext[i]); -+ if (access(file, R_OK) == 0) { -+ retbuf = GETBUF(strlen(file)+1); -+ strcpy(retbuf, file); -+ if (CRASHDEBUG(1)) -+ fprintf(fp, -+ "find_module_objfile: [%s] file in cwd\n", -+ retbuf); -+ return retbuf; -+ } -+ } - } - #else - else -@@ -2505,6 +2937,8 @@ - if ((st->flags & INSMOD_BUILTIN) && !filename) { - sprintf(buf, "__insmod_%s_O/", modref); - if (symbol_query(buf, NULL, &sp) == 1) { -+ if (CRASHDEBUG(1)) -+ fprintf(fp, "search: INSMOD_BUILTIN %s\n", sp->name); - BZERO(buf, BUFSIZE); - p1 = strstr(sp->name, "/"); - if ((p2 = strstr(sp->name, file))) -@@ -2592,6 +3026,32 @@ - return retbuf; - } - -+/* -+ * First look for a module based upon its reference name. -+ * If that fails, try replacing any underscores in the -+ * reference name with a dash. -+ * -+ * Example: module name "dm_mod" comes from "dm-mod.ko" objfile -+ */ -+static char * -+find_module_objfile(char *modref, char *filename, char *tree) -+{ -+ char * retbuf; -+ char tmpref[BUFSIZE]; -+ int c; -+ -+ retbuf = module_objfile_search(modref, filename, tree); -+ -+ if (!retbuf) { -+ strncpy(tmpref, modref, BUFSIZE); -+ for (c = 0; c < BUFSIZE && tmpref[c]; c++) -+ if (tmpref[c] == '_') -+ tmpref[c] = '-'; -+ retbuf = module_objfile_search(tmpref, filename, tree); -+ } -+ -+ return retbuf; -+} - - /* - * Unlink any temporary remote module object files. -@@ -2787,6 +3247,8 @@ - do { - if (sflag) - dump_sys_call_table(args[optind], cnt++); -+ else if (STREQ(args[optind], "config")) -+ read_in_kernel_config(IKCFG_READ); - else - cmd_usage(args[optind], COMPLETE_HELP); - optind++; -@@ -2867,6 +3329,9 @@ - if (NETDUMP_DUMPFILE() && is_partial_netdump()) - fprintf(fp, " [PARTIAL DUMP]"); - -+ if (DISKDUMP_DUMPFILE() && is_partial_diskdump()) -+ fprintf(fp, " [PARTIAL DUMP]"); -+ - fprintf(fp, "\n"); - } - -@@ -2876,7 +3341,7 @@ - get_symbol_data("xtime", sizeof(struct timespec), &kt->date); - fprintf(fp, " DATE: %s\n", - strip_linefeeds(ctime(&kt->date.tv_sec))); -- fprintf(fp, " UPTIME: %s\n", get_uptime(buf)); -+ fprintf(fp, " UPTIME: %s\n", get_uptime(buf, NULL)); - fprintf(fp, "LOAD AVERAGE: %s\n", get_loadavg(buf)); - fprintf(fp, " TASKS: %ld\n", RUNNING_TASKS()); - fprintf(fp, " NODENAME: %s\n", uts->nodename); -@@ -2891,10 +3356,17 @@ - #ifdef WHO_CARES - fprintf(fp, " DOMAINNAME: %s\n", uts->domainname); - #endif -+ if (XENDUMP_DUMPFILE() && (kt->xen_flags & XEN_SUSPEND)) -+ return; -+ - if (DUMPFILE()) { - fprintf(fp, " PANIC: "); - if (machdep->flags & HWRESET) -- fprintf(fp, "HARDWARE RESET\n"); -+ fprintf(fp, "(HARDWARE RESET)\n"); -+ else if (machdep->flags & INIT) -+ fprintf(fp, "(INIT)\n"); -+ else if (machdep->flags & MCA) -+ fprintf(fp, "(MCA)\n"); - else { - strip_linefeeds(get_panicmsg(buf)); - fprintf(fp, "\"%s\"%s\n", buf, -@@ -2952,28 +3424,42 @@ - /* - * Calculate and return the uptime. - */ -- --static char * --get_uptime(char *buf) -+char * -+get_uptime(char *buf, ulonglong *j64p) - { -- ulong jiffies; -+ ulong jiffies, tmp1, tmp2; -+ ulonglong jiffies_64, wrapped; - -- get_symbol_data("jiffies", sizeof(long), &jiffies); -- -- if ((machine_type("S390") || machine_type("S390X")) && -- (THIS_KERNEL_VERSION >= LINUX(2,6,0))) -- jiffies -= ((unsigned long)(unsigned int)(-300*machdep->hz)); -- else if (symbol_exists("jiffies_64") && BITS64() && -- (((ulonglong)jiffies & 0xffffffff00000000ULL) == -- 0x100000000ULL)) -- jiffies &= 0xffffffff; -- -- convert_time((ulonglong)jiffies, buf); -+ if (symbol_exists("jiffies_64")) { -+ get_symbol_data("jiffies_64", sizeof(ulonglong), &jiffies_64); -+ if (THIS_KERNEL_VERSION >= LINUX(2,6,0)) { -+ wrapped = (jiffies_64 & 0xffffffff00000000ULL); -+ if (wrapped) { -+ wrapped -= 0x100000000ULL; -+ jiffies_64 &= 0x00000000ffffffffULL; -+ jiffies_64 |= wrapped; -+ jiffies_64 += (ulonglong)(300*machdep->hz); -+ } else { -+ tmp1 = (ulong)(uint)(-300*machdep->hz); -+ tmp2 = (ulong)jiffies_64; -+ jiffies_64 = (ulonglong)(tmp2 - tmp1); -+ } -+ } -+ if (buf) -+ convert_time(jiffies_64, buf); -+ if (j64p) -+ *j64p = jiffies_64; -+ } else { -+ get_symbol_data("jiffies", sizeof(long), &jiffies); -+ if (buf) -+ convert_time((ulonglong)jiffies, buf); -+ if (j64p) -+ *j64p = (ulonglong)jiffies; -+ } - - return buf; - } - -- - #define FSHIFT 11 /* nr of bits of precision */ - #define FIXED_1 (1<> FSHIFT) -@@ -3048,9 +3534,9 @@ - struct syment *sp, *spn; - long size; - #ifdef S390X -- unsigned int *sct, *sys_call_table, addr; -+ unsigned int *sct, *sys_call_table, sys_ni_syscall, addr; - #else -- ulong *sys_call_table, *sct, addr; -+ ulong *sys_call_table, *sct, sys_ni_syscall, addr; - #endif - if (GDB_PATCHED()) - error(INFO, "line numbers are not available\n"); -@@ -3068,6 +3554,8 @@ - readmem(symbol_value("sys_call_table"), KVADDR, sys_call_table, - size, "sys_call_table", FAULT_ON_ERROR); - -+ sys_ni_syscall = symbol_value("sys_ni_syscall"); -+ - if (spec) - open_tmpfile(); - -@@ -3080,13 +3568,17 @@ - "%3x " : "%3d ", i); - fprintf(fp, - "invalid sys_call_table entry: %lx (%s)\n", -- *sct, value_to_symstr(*sct, buf1, 0)); -+ (unsigned long)*sct, -+ value_to_symstr(*sct, buf1, 0)); - } - continue; - } - - fprintf(fp, (output_radix == 16) ? "%3x " : "%3d ", i); -- fprintf(fp, "%-26s ", scp); -+ if (sys_ni_syscall && *sct == sys_ni_syscall) -+ fprintf(fp, "%-26s ", "sys_ni_syscall"); -+ else -+ fprintf(fp, "%-26s ", scp); - - /* - * For system call symbols whose first instruction is -@@ -3181,16 +3673,16 @@ - * "help -k" output - */ - void --dump_kernel_table(void) -+dump_kernel_table(int verbose) - { -- int i; -+ int i, nr_cpus; - struct new_utsname *uts; - int others; - - others = 0; - uts = &kt->utsname; - -- fprintf(fp, " flags: %lx (", kt->flags); -+ fprintf(fp, " flags: %lx\n (", kt->flags); - if (kt->flags & NO_MODULE_ACCESS) - fprintf(fp, "%sNO_MODULE_ACCESS", others++ ? "|" : ""); - if (kt->flags & TVEC_BASES_V1) -@@ -3225,6 +3717,28 @@ - fprintf(fp, "%sKMOD_V2", others++ ? "|" : ""); - if (kt->flags & KALLSYMS_V2) - fprintf(fp, "%sKALLSYMS_V2", others++ ? "|" : ""); -+ if (kt->flags & USE_OLD_BT) -+ fprintf(fp, "%sUSE_OLD_BT", others++ ? "|" : ""); -+ if (kt->flags & ARCH_XEN) -+ fprintf(fp, "%sARCH_XEN", others++ ? "|" : ""); -+ if (kt->flags & NO_IKCONFIG) -+ fprintf(fp, "%sNO_IKCONFIG", others++ ? "|" : ""); -+ if (kt->flags & DWARF_UNWIND) -+ fprintf(fp, "%sDWARF_UNWIND", others++ ? "|" : ""); -+ if (kt->flags & NO_DWARF_UNWIND) -+ fprintf(fp, "%sNO_DWARF_UNWIND", others++ ? "|" : ""); -+ if (kt->flags & DWARF_UNWIND_MEMORY) -+ fprintf(fp, "%sDWARF_UNWIND_MEMORY", others++ ? "|" : ""); -+ if (kt->flags & DWARF_UNWIND_EH_FRAME) -+ fprintf(fp, "%sDWARF_UNWIND_EH_FRAME", others++ ? "|" : ""); -+ if (kt->flags & DWARF_UNWIND_MODULES) -+ fprintf(fp, "%sDWARF_UNWIND_MODULES", others++ ? "|" : ""); -+ if (kt->flags & BUGVERBOSE_OFF) -+ fprintf(fp, "%sBUGVERBOSE_OFF", others++ ? "|" : ""); -+ if (kt->flags & RELOC_SET) -+ fprintf(fp, "%sRELOC_SET", others++ ? "|" : ""); -+ if (kt->flags & RELOC_FORCE) -+ fprintf(fp, "%sRELOC_FORCE", others++ ? "|" : ""); - fprintf(fp, ")\n"); - fprintf(fp, " stext: %lx\n", kt->stext); - fprintf(fp, " etext: %lx\n", kt->etext); -@@ -3234,8 +3748,10 @@ - fprintf(fp, " init_end: %lx\n", kt->init_end); - fprintf(fp, " end: %lx\n", kt->end); - fprintf(fp, " cpus: %d\n", kt->cpus); -+ fprintf(fp, " cpus_override: %s\n", kt->cpus_override); - fprintf(fp, " NR_CPUS: %d (compiled-in to this version of %s)\n", - NR_CPUS, pc->program_name); -+ fprintf(fp, "kernel_NR_CPUS: %d\n", kt->kernel_NR_CPUS); - if (kt->display_bh == display_bh_1) - fprintf(fp, " display_bh: display_bh_1()\n"); - else if (kt->display_bh == display_bh_2) -@@ -3263,21 +3779,61 @@ - kt->kernel_version[1], kt->kernel_version[2]); - fprintf(fp, " gcc_version: %d.%d.%d\n", kt->gcc_version[0], - kt->gcc_version[1], kt->gcc_version[2]); -+ fprintf(fp, " BUG_bytes: %d\n", kt->BUG_bytes); -+ fprintf(fp, " relocate: %lx\n", kt->relocate); - fprintf(fp, " runq_siblings: %d\n", kt->runq_siblings); - fprintf(fp, " __rq_idx[NR_CPUS]: "); -- for (i = 0; i < NR_CPUS; i++) -+ nr_cpus = kt->kernel_NR_CPUS ? kt->kernel_NR_CPUS : NR_CPUS; -+ for (i = 0; i < nr_cpus; i++) - fprintf(fp, "%ld ", kt->__rq_idx[i]); - fprintf(fp, "\n __cpu_idx[NR_CPUS]: "); -- for (i = 0; i < NR_CPUS; i++) -+ for (i = 0; i < nr_cpus; i++) - fprintf(fp, "%ld ", kt->__cpu_idx[i]); - fprintf(fp, "\n __per_cpu_offset[NR_CPUS]:"); -- for (i = 0; i < NR_CPUS; i++) -+ for (i = 0; i < nr_cpus; i++) - fprintf(fp, "%s%.*lx ", (i % 4) == 0 ? "\n " : "", - LONG_PRLEN, kt->__per_cpu_offset[i]); - fprintf(fp, "\n cpu_flags[NR_CPUS]:"); -- for (i = 0; i < NR_CPUS; i++) -+ for (i = 0; i < nr_cpus; i++) - fprintf(fp, "%lx ", kt->cpu_flags[i]); -- fprintf(fp, "\n"); -+ others = 0; -+ fprintf(fp, "\n xen_flags: %lx (", kt->xen_flags); -+ if (kt->xen_flags & WRITABLE_PAGE_TABLES) -+ fprintf(fp, "%sWRITABLE_PAGE_TABLES", others++ ? "|" : ""); -+ if (kt->xen_flags & SHADOW_PAGE_TABLES) -+ fprintf(fp, "%sSHADOW_PAGE_TABLES", others++ ? "|" : ""); -+ if (kt->xen_flags & CANONICAL_PAGE_TABLES) -+ fprintf(fp, "%sCANONICAL_PAGE_TABLES", others++ ? "|" : ""); -+ if (kt->xen_flags & XEN_SUSPEND) -+ fprintf(fp, "%sXEN_SUSPEND", others++ ? "|" : ""); -+ fprintf(fp, ")\n"); -+ fprintf(fp, " m2p_page: %lx\n", (ulong)kt->m2p_page); -+ fprintf(fp, "phys_to_machine_mapping: %lx\n", kt->phys_to_machine_mapping); -+ fprintf(fp, " p2m_table_size: %ld\n", kt->p2m_table_size); -+ fprintf(fp, " p2m_mapping_cache[%d]: %s\n", P2M_MAPPING_CACHE, -+ verbose ? "" : "(use \"help -K\" to view cache contents)"); -+ for (i = 0; verbose && (i < P2M_MAPPING_CACHE); i++) { -+ if (!kt->p2m_mapping_cache[i].mapping) -+ continue; -+ fprintf(fp, " [%d] mapping: %lx start: %lx end: %lx (%ld mfns)\n", -+ i, kt->p2m_mapping_cache[i].mapping, -+ kt->p2m_mapping_cache[i].start, -+ kt->p2m_mapping_cache[i].end, -+ kt->p2m_mapping_cache[i].end - kt->p2m_mapping_cache[i].start + 1); -+ } -+ fprintf(fp, " last_mapping_read: %lx\n", kt->last_mapping_read); -+ fprintf(fp, " p2m_cache_index: %ld\n", kt->p2m_cache_index); -+ fprintf(fp, " p2m_pages_searched: %ld\n", kt->p2m_pages_searched); -+ fprintf(fp, " p2m_mfn_cache_hits: %ld ", kt->p2m_mfn_cache_hits); -+ if (kt->p2m_pages_searched) -+ fprintf(fp, "(%ld%%)\n", kt->p2m_mfn_cache_hits * 100 / kt->p2m_pages_searched); -+ else -+ fprintf(fp, "\n"); -+ fprintf(fp, " p2m_page_cache_hits: %ld ", kt->p2m_page_cache_hits); -+ if (kt->p2m_pages_searched) -+ fprintf(fp, "(%ld%%)\n", kt->p2m_page_cache_hits * 100 / kt->p2m_pages_searched); -+ else -+ fprintf(fp, "\n"); - } - - /* -@@ -3314,7 +3870,7 @@ - if (machine_type("S390") || machine_type("S390X")) - command_not_supported(); - -- while ((c = getopt(argcnt, args, "db")) != EOF) { -+ while ((c = getopt(argcnt, args, "dbu")) != EOF) { - switch(c) - { - case 'd': -@@ -3344,6 +3900,17 @@ - kt->display_bh(); - return; - -+ case 'u': -+ pc->curcmd_flags |= IRQ_IN_USE; -+ if (kernel_symbol_exists("no_irq_chip")) -+ pc->curcmd_private = (ulonglong)symbol_value("no_irq_chip"); -+ else if (kernel_symbol_exists("no_irq_type")) -+ pc->curcmd_private = (ulonglong)symbol_value("no_irq_type"); -+ else -+ error(WARNING, -+ "irq: -u option ignored: \"no_irq_chip\" or \"no_irq_type\" symbols do not exist\n"); -+ break; -+ - default: - argerrs++; - break; -@@ -3362,6 +3929,8 @@ - return; - } - -+ pc->curcmd_flags &= ~IRQ_IN_USE; -+ - while (args[optind]) { - i = dtoi(args[optind], FAULT_ON_ERROR, NULL); - if (i >= nr_irqs) -@@ -3402,13 +3971,22 @@ - - readmem(irq_desc_addr + OFFSET(irq_desc_t_status), KVADDR, &status, - sizeof(int), "irq_desc entry", FAULT_ON_ERROR); -- readmem(irq_desc_addr + OFFSET(irq_desc_t_handler), KVADDR, &handler, -- sizeof(long), "irq_desc entry", FAULT_ON_ERROR); -+ if (VALID_MEMBER(irq_desc_t_handler)) -+ readmem(irq_desc_addr + OFFSET(irq_desc_t_handler), KVADDR, -+ &handler, sizeof(long), "irq_desc entry", -+ FAULT_ON_ERROR); -+ else if (VALID_MEMBER(irq_desc_t_chip)) -+ readmem(irq_desc_addr + OFFSET(irq_desc_t_chip), KVADDR, -+ &handler, sizeof(long), "irq_desc entry", -+ FAULT_ON_ERROR); - readmem(irq_desc_addr + OFFSET(irq_desc_t_action), KVADDR, &action, - sizeof(long), "irq_desc entry", FAULT_ON_ERROR); - readmem(irq_desc_addr + OFFSET(irq_desc_t_depth), KVADDR, &depth, - sizeof(int), "irq_desc entry", FAULT_ON_ERROR); - -+ if (!action && (handler == (ulong)pc->curcmd_private)) -+ return; -+ - fprintf(fp, " IRQ: %d\n", irq); - fprintf(fp, " STATUS: %x %s", status, status ? "(" : ""); - others = 0; -@@ -3441,19 +4019,30 @@ - } else - fprintf(fp, "%lx\n", handler); - -- if (handler) { -- readmem(handler+OFFSET(hw_interrupt_type_typename), KVADDR, -- &tmp1, sizeof(void *), -- "hw_interrupt_type typename", FAULT_ON_ERROR); -+ if (handler) { -+ if (VALID_MEMBER(hw_interrupt_type_typename)) -+ readmem(handler+OFFSET(hw_interrupt_type_typename), -+ KVADDR, &tmp1, sizeof(void *), -+ "hw_interrupt_type typename", FAULT_ON_ERROR); -+ else if (VALID_MEMBER(irq_chip_typename)) -+ readmem(handler+OFFSET(irq_chip_typename), -+ KVADDR, &tmp1, sizeof(void *), -+ "hw_interrupt_type typename", FAULT_ON_ERROR); -+ - fprintf(fp, " typename: %lx ", tmp1); - BZERO(buf, BUFSIZE); - if (read_string(tmp1, buf, BUFSIZE-1)) - fprintf(fp, "\"%s\"", buf); - fprintf(fp, "\n"); - -- readmem(handler+OFFSET(hw_interrupt_type_startup), KVADDR, -- &tmp1, sizeof(void *), -- "hw_interrupt_type startup", FAULT_ON_ERROR); -+ if (VALID_MEMBER(hw_interrupt_type_startup)) -+ readmem(handler+OFFSET(hw_interrupt_type_startup), -+ KVADDR, &tmp1, sizeof(void *), -+ "hw_interrupt_type startup", FAULT_ON_ERROR); -+ else if (VALID_MEMBER(irq_chip_startup)) -+ readmem(handler+OFFSET(irq_chip_startup), -+ KVADDR, &tmp1, sizeof(void *), -+ "hw_interrupt_type startup", FAULT_ON_ERROR); - fprintf(fp, " startup: %lx ", tmp1); - if (is_kernel_text(tmp1)) - fprintf(fp, "<%s>", value_to_symstr(tmp1, buf, 0)); -@@ -3464,9 +4053,15 @@ - value_to_symstr(tmp2, buf, 0)); - fprintf(fp, "\n"); - -- readmem(handler+OFFSET(hw_interrupt_type_shutdown), KVADDR, -- &tmp1, sizeof(void *), -- "hw_interrupt_type shutdown", FAULT_ON_ERROR); -+ if (VALID_MEMBER(hw_interrupt_type_shutdown)) -+ readmem(handler+OFFSET(hw_interrupt_type_shutdown), -+ KVADDR, &tmp1, sizeof(void *), -+ "hw_interrupt_type shutdown", FAULT_ON_ERROR); -+ else if (VALID_MEMBER(irq_chip_shutdown)) -+ readmem(handler+OFFSET(irq_chip_shutdown), -+ KVADDR, &tmp1, sizeof(void *), -+ "hw_interrupt_type shutdown", FAULT_ON_ERROR); -+ - fprintf(fp, " shutdown: %lx ", tmp1); - if (is_kernel_text(tmp1)) - fprintf(fp, "<%s>", value_to_symstr(tmp1, buf, 0)); -@@ -3494,9 +4089,14 @@ - fprintf(fp, "\n"); - } - -- readmem(handler+OFFSET(hw_interrupt_type_enable), KVADDR, -- &tmp1, sizeof(void *), -- "hw_interrupt_type enable", FAULT_ON_ERROR); -+ if (VALID_MEMBER(hw_interrupt_type_enable)) -+ readmem(handler+OFFSET(hw_interrupt_type_enable), -+ KVADDR, &tmp1, sizeof(void *), -+ "hw_interrupt_type enable", FAULT_ON_ERROR); -+ else if (VALID_MEMBER(irq_chip_enable)) -+ readmem(handler+OFFSET(irq_chip_enable), -+ KVADDR, &tmp1, sizeof(void *), -+ "hw_interrupt_type enable", FAULT_ON_ERROR); - fprintf(fp, " enable: %lx ", tmp1); - if (is_kernel_text(tmp1)) - fprintf(fp, "<%s>", value_to_symstr(tmp1, buf, 0)); -@@ -3507,9 +4107,14 @@ - value_to_symstr(tmp2, buf, 0)); - fprintf(fp, "\n"); - -- readmem(handler+OFFSET(hw_interrupt_type_disable), KVADDR, -- &tmp1, sizeof(void *), -- "hw_interrupt_type disable", FAULT_ON_ERROR); -+ if (VALID_MEMBER(hw_interrupt_type_disable)) -+ readmem(handler+OFFSET(hw_interrupt_type_disable), -+ KVADDR, &tmp1, sizeof(void *), -+ "hw_interrupt_type disable", FAULT_ON_ERROR); -+ else if (VALID_MEMBER(irq_chip_disable)) -+ readmem(handler+OFFSET(irq_chip_disable), -+ KVADDR, &tmp1, sizeof(void *), -+ "hw_interrupt_type disable", FAULT_ON_ERROR); - fprintf(fp, " disable: %lx ", tmp1); - if (is_kernel_text(tmp1)) - fprintf(fp, "<%s>", value_to_symstr(tmp1, buf, 0)); -@@ -3534,27 +4139,119 @@ - fprintf(fp, "<%s>", - value_to_symstr(tmp2, buf, 0)); - fprintf(fp, "\n"); -+ } else if (VALID_MEMBER(irq_chip_ack)) { -+ readmem(handler+OFFSET(irq_chip_ack), KVADDR, -+ &tmp1, sizeof(void *), -+ "irq_chip ack", FAULT_ON_ERROR); -+ fprintf(fp, " ack: %lx ", tmp1); -+ if (is_kernel_text(tmp1)) -+ fprintf(fp, "<%s>", -+ value_to_symstr(tmp1, buf, 0)); -+ else if (readmem(tmp1, KVADDR, &tmp2, -+ sizeof(ulong), "ack indirection", -+ RETURN_ON_ERROR|QUIET) && is_kernel_text(tmp2)) -+ fprintf(fp, "<%s>", -+ value_to_symstr(tmp2, buf, 0)); -+ fprintf(fp, "\n"); - } - -- if (VALID_MEMBER(hw_interrupt_type_end)) { -- readmem(handler+OFFSET(hw_interrupt_type_end), KVADDR, -- &tmp1, sizeof(void *), -- "hw_interrupt_type end", FAULT_ON_ERROR); -- fprintf(fp, " end: %lx ", tmp1); -+ if (VALID_MEMBER(irq_chip_mask)) { -+ readmem(handler+OFFSET(irq_chip_mask), KVADDR, -+ &tmp1, sizeof(void *), -+ "irq_chip mask", FAULT_ON_ERROR); -+ fprintf(fp, " mask: %lx ", tmp1); - if (is_kernel_text(tmp1)) -- fprintf(fp, "<%s>", -+ fprintf(fp, "<%s>", - value_to_symstr(tmp1, buf, 0)); - else if (readmem(tmp1, KVADDR, &tmp2, -- sizeof(ulong), "end indirection", -+ sizeof(ulong), "mask indirection", - RETURN_ON_ERROR|QUIET) && is_kernel_text(tmp2)) - fprintf(fp, "<%s>", - value_to_symstr(tmp2, buf, 0)); - fprintf(fp, "\n"); - } -- -- if (VALID_MEMBER(hw_interrupt_type_set_affinity)) { -- readmem(handler+OFFSET(hw_interrupt_type_set_affinity), -- KVADDR, &tmp1, sizeof(void *), -+ -+ if (VALID_MEMBER(irq_chip_mask_ack)) { -+ readmem(handler+OFFSET(irq_chip_mask_ack), KVADDR, -+ &tmp1, sizeof(void *), -+ "irq_chip mask_ack", FAULT_ON_ERROR); -+ fprintf(fp, " mask_ack: %lx ", tmp1); -+ if (is_kernel_text(tmp1)) -+ fprintf(fp, "<%s>", -+ value_to_symstr(tmp1, buf, 0)); -+ else if (readmem(tmp1, KVADDR, &tmp2, -+ sizeof(ulong), "mask_ack indirection", -+ RETURN_ON_ERROR|QUIET) && is_kernel_text(tmp2)) -+ fprintf(fp, "<%s>", -+ value_to_symstr(tmp2, buf, 0)); -+ fprintf(fp, "\n"); -+ } -+ -+ if (VALID_MEMBER(irq_chip_unmask)) { -+ readmem(handler+OFFSET(irq_chip_unmask), KVADDR, -+ &tmp1, sizeof(void *), -+ "irq_chip unmask", FAULT_ON_ERROR); -+ fprintf(fp, " unmask: %lx ", tmp1); -+ if (is_kernel_text(tmp1)) -+ fprintf(fp, "<%s>", -+ value_to_symstr(tmp1, buf, 0)); -+ else if (readmem(tmp1, KVADDR, &tmp2, -+ sizeof(ulong), "unmask indirection", -+ RETURN_ON_ERROR|QUIET) && is_kernel_text(tmp2)) -+ fprintf(fp, "<%s>", -+ value_to_symstr(tmp2, buf, 0)); -+ fprintf(fp, "\n"); -+ } -+ -+ if (VALID_MEMBER(irq_chip_eoi)) { -+ readmem(handler+OFFSET(irq_chip_eoi), KVADDR, -+ &tmp1, sizeof(void *), -+ "irq_chip eoi", FAULT_ON_ERROR); -+ fprintf(fp, " eoi: %lx ", tmp1); -+ if (is_kernel_text(tmp1)) -+ fprintf(fp, "<%s>", -+ value_to_symstr(tmp1, buf, 0)); -+ else if (readmem(tmp1, KVADDR, &tmp2, -+ sizeof(ulong), "eoi indirection", -+ RETURN_ON_ERROR|QUIET) && is_kernel_text(tmp2)) -+ fprintf(fp, "<%s>", -+ value_to_symstr(tmp2, buf, 0)); -+ fprintf(fp, "\n"); -+ } -+ -+ if (VALID_MEMBER(hw_interrupt_type_end)) { -+ readmem(handler+OFFSET(hw_interrupt_type_end), KVADDR, -+ &tmp1, sizeof(void *), -+ "hw_interrupt_type end", FAULT_ON_ERROR); -+ fprintf(fp, " end: %lx ", tmp1); -+ if (is_kernel_text(tmp1)) -+ fprintf(fp, "<%s>", -+ value_to_symstr(tmp1, buf, 0)); -+ else if (readmem(tmp1, KVADDR, &tmp2, -+ sizeof(ulong), "end indirection", -+ RETURN_ON_ERROR|QUIET) && is_kernel_text(tmp2)) -+ fprintf(fp, "<%s>", -+ value_to_symstr(tmp2, buf, 0)); -+ fprintf(fp, "\n"); -+ } else if (VALID_MEMBER(irq_chip_end)) { -+ readmem(handler+OFFSET(irq_chip_end), KVADDR, -+ &tmp1, sizeof(void *), -+ "irq_chip end", FAULT_ON_ERROR); -+ fprintf(fp, " end: %lx ", tmp1); -+ if (is_kernel_text(tmp1)) -+ fprintf(fp, "<%s>", -+ value_to_symstr(tmp1, buf, 0)); -+ else if (readmem(tmp1, KVADDR, &tmp2, -+ sizeof(ulong), "end indirection", -+ RETURN_ON_ERROR|QUIET) && is_kernel_text(tmp2)) -+ fprintf(fp, "<%s>", -+ value_to_symstr(tmp2, buf, 0)); -+ fprintf(fp, "\n"); -+ } -+ -+ if (VALID_MEMBER(hw_interrupt_type_set_affinity)) { -+ readmem(handler+OFFSET(hw_interrupt_type_set_affinity), -+ KVADDR, &tmp1, sizeof(void *), - "hw_interrupt_type set_affinity", - FAULT_ON_ERROR); - fprintf(fp, " set_affinity: %lx ", tmp1); -@@ -3567,6 +4264,66 @@ - fprintf(fp, "<%s>", - value_to_symstr(tmp2, buf, 0)); - fprintf(fp, "\n"); -+ } else if (VALID_MEMBER(irq_chip_set_affinity)) { -+ readmem(handler+OFFSET(irq_chip_set_affinity), -+ KVADDR, &tmp1, sizeof(void *), -+ "irq_chip set_affinity", -+ FAULT_ON_ERROR); -+ fprintf(fp, " set_affinity: %lx ", tmp1); -+ if (is_kernel_text(tmp1)) -+ fprintf(fp, "<%s>", -+ value_to_symstr(tmp1, buf, 0)); -+ else if (readmem(tmp1, KVADDR, &tmp2, -+ sizeof(ulong), "set_affinity indirection", -+ RETURN_ON_ERROR|QUIET) && is_kernel_text(tmp2)) -+ fprintf(fp, "<%s>", -+ value_to_symstr(tmp2, buf, 0)); -+ fprintf(fp, "\n"); -+ } -+ if (VALID_MEMBER(irq_chip_retrigger)) { -+ readmem(handler+OFFSET(irq_chip_retrigger), KVADDR, -+ &tmp1, sizeof(void *), -+ "irq_chip retrigger", FAULT_ON_ERROR); -+ fprintf(fp, " retrigger: %lx ", tmp1); -+ if (is_kernel_text(tmp1)) -+ fprintf(fp, "<%s>", -+ value_to_symstr(tmp1, buf, 0)); -+ else if (readmem(tmp1, KVADDR, &tmp2, -+ sizeof(ulong), "retrigger indirection", -+ RETURN_ON_ERROR|QUIET) && is_kernel_text(tmp2)) -+ fprintf(fp, "<%s>", -+ value_to_symstr(tmp2, buf, 0)); -+ fprintf(fp, "\n"); -+ } -+ if (VALID_MEMBER(irq_chip_set_type)) { -+ readmem(handler+OFFSET(irq_chip_set_type), KVADDR, -+ &tmp1, sizeof(void *), -+ "irq_chip set_type", FAULT_ON_ERROR); -+ fprintf(fp, " set_type: %lx ", tmp1); -+ if (is_kernel_text(tmp1)) -+ fprintf(fp, "<%s>", -+ value_to_symstr(tmp1, buf, 0)); -+ else if (readmem(tmp1, KVADDR, &tmp2, -+ sizeof(ulong), "set_type indirection", -+ RETURN_ON_ERROR|QUIET) && is_kernel_text(tmp2)) -+ fprintf(fp, "<%s>", -+ value_to_symstr(tmp2, buf, 0)); -+ fprintf(fp, "\n"); -+ } -+ if (VALID_MEMBER(irq_chip_set_wake)) { -+ readmem(handler+OFFSET(irq_chip_set_wake), KVADDR, -+ &tmp1, sizeof(void *), -+ "irq_chip set wake", FAULT_ON_ERROR); -+ fprintf(fp, " set_wake: %lx ", tmp1); -+ if (is_kernel_text(tmp1)) -+ fprintf(fp, "<%s>", -+ value_to_symstr(tmp1, buf, 0)); -+ else if (readmem(tmp1, KVADDR, &tmp2, -+ sizeof(ulong), "set_wake indirection", -+ RETURN_ON_ERROR|QUIET) && is_kernel_text(tmp2)) -+ fprintf(fp, "<%s>", -+ value_to_symstr(tmp2, buf, 0)); -+ fprintf(fp, "\n"); - } - } - -@@ -4146,7 +4903,7 @@ - } - - /* -- * 2.6 per-cpu timers, using "per_cpu__tvec_bases". XXX -+ * 2.6 per-cpu timers, using "per_cpu__tvec_bases". - */ - - static void -@@ -4220,8 +4977,12 @@ - else - tvec_bases = symbol_value("per_cpu__tvec_bases"); - -- fprintf(fp, "TVEC_BASES[%d]: %lx\n", cpu, -- tvec_bases + SIZE(tvec_t_base_s)); -+ if (symbol_exists("boot_tvec_bases")) { -+ readmem(tvec_bases, KVADDR, &tvec_bases, sizeof(void *), -+ "per-cpu tvec_bases", FAULT_ON_ERROR); -+ } -+ -+ fprintf(fp, "TVEC_BASES[%d]: %lx\n", cpu, tvec_bases); - - sprintf(buf1, "%ld", highest); - flen = MAX(strlen(buf1), strlen("JIFFIES")); -@@ -4320,6 +5081,11 @@ - else - tvec_bases = symbol_value("per_cpu__tvec_bases"); - -+ if (symbol_exists("boot_tvec_bases")) { -+ readmem(tvec_bases, KVADDR, &tvec_bases, sizeof(void *), -+ "per-cpu tvec_bases", FAULT_ON_ERROR); -+ } -+ - tv[1].base = tvec_bases + - OFFSET(tvec_t_base_s_tv1); - tv[1].end = tv[1].base + SIZE(tvec_root_s); -@@ -4475,9 +5241,16 @@ - ld->start = vec[i]; - ld->list_head_offset = offset; - ld->end = vec_kvaddr; -+ ld->flags = RETURN_ON_LIST_ERROR; - - hq_open(); -- timer_cnt = do_list(ld); -+ if ((timer_cnt = do_list(ld)) == -1) { -+ /* Ignore chains with errors */ -+ error(INFO, -+ "ignoring faulty timer list at index %d of timer array\n", -+ i/2); -+ continue; -+ } - if (!timer_cnt) - continue; - timer_list = (ulong *)GETBUF(timer_cnt * sizeof(ulong)); -@@ -4708,21 +5481,569 @@ - machdep->last_pgd_read = 0; - machdep->last_pmd_read = 0; - machdep->last_ptbl_read = 0; -+ if (machdep->clear_machdep_cache) -+ machdep->clear_machdep_cache(); - } - } - - /* -- * For kernels containing cpu_online_map, count the bits. -+ * For kernels containing at least the cpu_online_map, use it -+ * to determine the cpu count. - */ - int - get_cpus_online() - { -- ulong cpu_online_map; -+ int i, len, online; -+ struct gnu_request req; -+ char *buf; -+ ulong *maskptr; - - if (!symbol_exists("cpu_online_map")) - return 0; - -- get_symbol_data("cpu_online_map", sizeof(ulong), &cpu_online_map); -+ if (LKCD_KERNTYPES()) { -+ if ((len = STRUCT_SIZE("cpumask_t")) < 0) -+ error(FATAL, "cannot determine type cpumask_t\n"); -+ } else -+ len = get_symbol_type("cpu_online_map", NULL, &req) == -+ TYPE_CODE_UNDEF ? sizeof(ulong) : req.length; -+ buf = GETBUF(len); -+ -+ online = 0; -+ -+ if (readmem(symbol_value("cpu_online_map"), KVADDR, buf, len, -+ "cpu_online_map", RETURN_ON_ERROR)) { - -- return count_bits_long(cpu_online_map); -+ maskptr = (ulong *)buf; -+ for (i = 0; i < (len/sizeof(ulong)); i++, maskptr++) -+ online += count_bits_long(*maskptr); -+ -+ FREEBUF(buf); -+ if (CRASHDEBUG(1)) -+ error(INFO, "get_cpus_online: online: %d\n", online); -+ } -+ -+ return online; -+} -+ -+/* -+ * For kernels containing at least the cpu_possible_map, used -+ * to determine the cpu count (of online and offline cpus). -+ */ -+int -+get_cpus_possible() -+{ -+ int i, len, possible; -+ struct gnu_request req; -+ char *buf; -+ ulong *maskptr; -+ -+ if (!symbol_exists("cpu_possible_map")) -+ return 0; -+ -+ if (LKCD_KERNTYPES()) { -+ if ((len = STRUCT_SIZE("cpumask_t")) < 0) -+ error(FATAL, "cannot determine type cpumask_t\n"); -+ } else -+ len = get_symbol_type("cpu_possible_map", NULL, &req) == -+ TYPE_CODE_UNDEF ? sizeof(ulong) : req.length; -+ buf = GETBUF(len); -+ -+ possible = 0; -+ -+ if (readmem(symbol_value("cpu_possible_map"), KVADDR, buf, len, -+ "cpu_possible_map", RETURN_ON_ERROR)) { -+ -+ maskptr = (ulong *)buf; -+ for (i = 0; i < (len/sizeof(ulong)); i++, maskptr++) -+ possible += count_bits_long(*maskptr); -+ -+ FREEBUF(buf); -+ if (CRASHDEBUG(1)) -+ error(INFO, "get_cpus_possible: possible: %d\n", -+ possible); -+ } -+ -+ return possible; -+} -+ -+/* -+ * Xen machine-address to pseudo-physical-page translator. -+ */ -+ulonglong -+xen_m2p(ulonglong machine) -+{ -+ ulong mfn, pfn; -+ -+ mfn = XEN_MACHINE_TO_MFN(machine); -+ pfn = __xen_m2p(machine, mfn); -+ -+ if (pfn == XEN_MFN_NOT_FOUND) { -+ if (CRASHDEBUG(1)) -+ error(INFO, -+ "xen_machine_to_pseudo_PAE: machine address %lx not found\n", -+ machine); -+ return XEN_MACHADDR_NOT_FOUND; -+ } -+ -+ return XEN_PFN_TO_PSEUDO(pfn); -+} -+ -+static ulong -+__xen_m2p(ulonglong machine, ulong mfn) -+{ -+ ulong mapping, kmfn, pfn, p, i, c; -+ ulong start, end; -+ ulong *mp; -+ -+ mp = (ulong *)kt->m2p_page; -+ mapping = kt->phys_to_machine_mapping; -+ -+ /* -+ * Check the FIFO cache first. -+ */ -+ for (c = 0; c < P2M_MAPPING_CACHE; c++) { -+ if (kt->p2m_mapping_cache[c].mapping && -+ ((mfn >= kt->p2m_mapping_cache[c].start) && -+ (mfn <= kt->p2m_mapping_cache[c].end))) { -+ -+ if (kt->p2m_mapping_cache[c].mapping != kt->last_mapping_read) { -+ if (!readmem(kt->p2m_mapping_cache[c].mapping, KVADDR, -+ mp, PAGESIZE(), "phys_to_machine_mapping page (cached)", -+ RETURN_ON_ERROR)) -+ error(FATAL, "cannot access " -+ "phys_to_machine_mapping page\n"); -+ else -+ kt->last_mapping_read = kt->p2m_mapping_cache[c].mapping; -+ } else -+ kt->p2m_page_cache_hits++; -+ -+ for (i = 0; i < XEN_PFNS_PER_PAGE; i++) { -+ kmfn = (*(mp+i)) & ~XEN_FOREIGN_FRAME; -+ if (kmfn == mfn) { -+ p = P2M_MAPPING_TO_PAGE_INDEX(c); -+ pfn = p + i; -+ -+ if (CRASHDEBUG(1)) -+ console("(cached) mfn: %lx (%llx) p: %ld" -+ " i: %ld pfn: %lx (%llx)\n", -+ mfn, machine, p, -+ i, pfn, XEN_PFN_TO_PSEUDO(pfn)); -+ kt->p2m_mfn_cache_hits++; -+ -+ return pfn; -+ } -+ } -+ /* -+ * Stale entry -- clear it out. -+ */ -+ kt->p2m_mapping_cache[c].mapping = 0; -+ } -+ } -+ -+ /* -+ * The machine address was not cached, so search from the -+ * beginning of the phys_to_machine_mapping array, caching -+ * only the found machine address. -+ */ -+ for (p = 0; p < kt->p2m_table_size; p += XEN_PFNS_PER_PAGE) -+ { -+ if (mapping != kt->last_mapping_read) { -+ if (!readmem(mapping, KVADDR, mp, PAGESIZE(), -+ "phys_to_machine_mapping page", RETURN_ON_ERROR)) -+ error(FATAL, -+ "cannot access phys_to_machine_mapping page\n"); -+ else -+ kt->last_mapping_read = mapping; -+ } -+ -+ kt->p2m_pages_searched++; -+ -+ if (search_mapping_page(mfn, &i, &start, &end)) { -+ pfn = p + i; -+ if (CRASHDEBUG(1)) -+ console("pages: %d mfn: %lx (%llx) p: %ld" -+ " i: %ld pfn: %lx (%llx)\n", -+ (p/XEN_PFNS_PER_PAGE)+1, mfn, machine, -+ p, i, pfn, XEN_PFN_TO_PSEUDO(pfn)); -+ -+ c = kt->p2m_cache_index; -+ kt->p2m_mapping_cache[c].start = start; -+ kt->p2m_mapping_cache[c].end = end; -+ kt->p2m_mapping_cache[c].mapping = mapping; -+ kt->p2m_cache_index = (c+1) % P2M_MAPPING_CACHE; -+ -+ return pfn; -+ } -+ -+ mapping += PAGESIZE(); -+ } -+ -+ if (CRASHDEBUG(1)) -+ console("machine address %llx not found\n", machine); -+ -+ return (XEN_MFN_NOT_FOUND); -+} -+ -+/* -+ * Search for an mfn in the current mapping page, and if found, -+ * determine the range of contiguous mfns that it's contained -+ * within (if any). -+ */ -+#define PREV_UP 0x1 -+#define NEXT_UP 0x2 -+#define PREV_DOWN 0x4 -+#define NEXT_DOWN 0x8 -+ -+static int -+search_mapping_page(ulong mfn, ulong *index, ulong *startptr, ulong *endptr) -+{ -+ int n, found; -+ ulong i, kmfn; -+ ulong flags, start, end, next, prev, curr; -+ ulong *mp; -+ -+ mp = (ulong *)kt->m2p_page; -+ -+ for (i = 0, found = FALSE; i < XEN_PFNS_PER_PAGE; i++) { -+ kmfn = (*(mp+i)) & ~XEN_FOREIGN_FRAME; -+ -+ if (kmfn == mfn) { -+ found = TRUE; -+ *index = i; -+ break; -+ } -+ } -+ -+ if (found) { -+ flags = 0; -+ next = prev = XEN_MFN_NOT_FOUND; -+ start = end = kmfn; -+ -+ if (i) -+ prev = (*(mp+(i-1))) & ~XEN_FOREIGN_FRAME; -+ if ((i+1) != XEN_PFNS_PER_PAGE) -+ next = (*(mp+(i+1))) & ~XEN_FOREIGN_FRAME; -+ -+ if (prev == (kmfn-1)) -+ flags |= PREV_UP; -+ else if (prev == (kmfn+1)) -+ flags |= PREV_DOWN; -+ -+ if (next == (kmfn+1)) -+ flags |= NEXT_UP; -+ else if (next == (kmfn-1)) -+ flags |= NEXT_DOWN; -+ -+ /* Should be impossible, but just in case... */ -+ if ((flags & PREV_UP) && (flags & NEXT_DOWN)) -+ flags &= ~NEXT_DOWN; -+ else if ((flags & PREV_DOWN) && (flags & NEXT_UP)) -+ flags &= ~NEXT_UP; -+ -+ if (flags & (PREV_UP|PREV_DOWN)) { -+ start = prev; -+ -+ for (n = (i-2); n >= 0; n--) { -+ curr = (*(mp+n)) & ~XEN_FOREIGN_FRAME; -+ if (flags & PREV_UP) { -+ if (curr == (start-1)) -+ start = curr; -+ } else { -+ if (curr == (start+1)) -+ start = curr; -+ } -+ } -+ -+ } -+ -+ if (flags & (NEXT_UP|NEXT_DOWN)) { -+ end = next; -+ -+ for (n = (i+2); n < XEN_PFNS_PER_PAGE; n++) { -+ curr = (*(mp+n)) & ~XEN_FOREIGN_FRAME; -+ if (flags & NEXT_UP) { -+ if (curr == (end+1)) -+ end = curr; -+ } else { -+ if (curr == (end-1)) -+ end = curr; -+ } -+ } -+ -+ -+ } -+ -+ if (start > end) { -+ curr = start; -+ start = end; -+ end = curr; -+ } -+ -+ *startptr = start; -+ *endptr = end; -+ -+ if (CRASHDEBUG(2)) -+ fprintf(fp, "mfn: %lx -> start: %lx end: %lx (%ld mfns)\n", -+ mfn, start, end, end - start); -+ } -+ -+ return found; -+} -+ -+ -+ -+/* -+ * Read the relevant IKCONFIG (In Kernel Config) data if available. -+ */ -+ -+static char *ikconfig[] = { -+ "CONFIG_NR_CPUS", -+ "CONFIG_PGTABLE_4", -+ "CONFIG_HZ", -+ "CONFIG_DEBUG_BUGVERBOSE", -+ NULL, -+}; -+ -+void -+read_in_kernel_config(int command) -+{ -+ struct syment *sp; -+ int ii, jj, ret, end, found=0; -+ unsigned long size, bufsz; -+ char *pos, *ln, *buf, *head, *tail, *val, *uncomp; -+ char line[512]; -+ z_stream stream; -+ -+ if ((kt->flags & NO_IKCONFIG) && !(pc->flags & RUNTIME)) -+ return; -+ -+ if ((sp = symbol_search("kernel_config_data")) == NULL) { -+ if (command == IKCFG_READ) -+ error(FATAL, -+ "kernel_config_data does not exist in this kernel\n"); -+ return; -+ } -+ -+ /* We don't know how large IKCONFIG is, so we start with -+ * 32k, if we can't find MAGIC_END assume we didn't read -+ * enough, double it and try again. -+ */ -+ ii = 32; -+ -+again: -+ size = ii * 1024; -+ -+ if ((buf = (char *)malloc(size)) == NULL) { -+ error(WARNING, "cannot malloc IKCONFIG input buffer\n"); -+ return; -+ } -+ -+ if (!readmem(sp->value, KVADDR, buf, size, -+ "kernel_config_data", RETURN_ON_ERROR)) { -+ error(WARNING, "cannot read kernel_config_data\n"); -+ goto out2; -+ } -+ -+ /* Find the start */ -+ if (strstr(buf, MAGIC_START)) -+ head = buf + MAGIC_SIZE + 10; /* skip past MAGIC_START and gzip header */ -+ else { -+ error(WARNING, "could not find MAGIC_START!\n"); -+ goto out2; -+ } -+ -+ tail = head; -+ -+ end = strlen(MAGIC_END); -+ -+ /* Find the end*/ -+ while (tail < (buf + (size - 1))) { -+ -+ if (strncmp(tail, MAGIC_END, end)==0) { -+ found = 1; -+ break; -+ } -+ tail++; -+ } -+ -+ if (found) { -+ bufsz = tail - head; -+ size = 10 * bufsz; -+ if ((uncomp = (char *)malloc(size)) == NULL) { -+ error(WARNING, "cannot malloc IKCONFIG output buffer\n"); -+ goto out2; -+ } -+ } else { -+ if (ii > 512) { -+ error(WARNING, "could not find MAGIC_END!\n"); -+ goto out2; -+ } else { -+ free(buf); -+ ii *= 2; -+ goto again; -+ } -+ } -+ -+ -+ /* initialize zlib */ -+ stream.next_in = (Bytef *)head; -+ stream.avail_in = (uInt)bufsz; -+ -+ stream.next_out = (Bytef *)uncomp; -+ stream.avail_out = (uInt)size; -+ -+ stream.zalloc = NULL; -+ stream.zfree = NULL; -+ stream.opaque = NULL; -+ -+ ret = inflateInit2(&stream, -MAX_WBITS); -+ if (ret != Z_OK) { -+ read_in_kernel_config_err(ret, "initialize"); -+ goto out1; -+ } -+ -+ ret = inflate(&stream, Z_FINISH); -+ -+ if (ret != Z_STREAM_END) { -+ inflateEnd(&stream); -+ if (ret == Z_NEED_DICT || -+ (ret == Z_BUF_ERROR && stream.avail_in == 0)) { -+ read_in_kernel_config_err(Z_DATA_ERROR, "uncompress"); -+ goto out1; -+ } -+ read_in_kernel_config_err(ret, "uncompress"); -+ goto out1; -+ } -+ size = stream.total_out; -+ -+ ret = inflateEnd(&stream); -+ -+ pos = uncomp; -+ -+ do { -+ ret = sscanf(pos, "%511[^\n]\n%n", line, &ii); -+ if (ret > 0) { -+ if ((command == IKCFG_READ) || CRASHDEBUG(8)) -+ fprintf(fp, "%s\n", line); -+ -+ pos += ii; -+ -+ ln = line; -+ -+ /* skip leading whitespace */ -+ while (whitespace(*ln)) -+ ln++; -+ -+ /* skip comments -- except when looking for "not set" */ -+ if (*ln == '#') { -+ if (strstr(ln, "CONFIG_DEBUG_BUGVERBOSE") && -+ strstr(ln, "not set")) -+ kt->flags |= BUGVERBOSE_OFF; -+ continue; -+ } -+ -+ /* Find '=' */ -+ if ((head = strchr(ln, '=')) != NULL) { -+ *head = '\0'; -+ val = head + 1; -+ -+ head--; -+ -+ /* skip trailing whitespace */ -+ while (whitespace(*head)) { -+ *head = '\0'; -+ head--; -+ } -+ -+ /* skip whitespace */ -+ while (whitespace(*val)) -+ val++; -+ -+ } else /* Bad line, skip it */ -+ continue; -+ -+ if (command != IKCFG_INIT) -+ continue; -+ -+ for (jj = 0; ikconfig[jj]; jj++) { -+ if (STREQ(ln, ikconfig[jj])) { -+ -+ if (STREQ(ln, "CONFIG_NR_CPUS")) { -+ kt->kernel_NR_CPUS = atoi(val); -+ if (CRASHDEBUG(1)) -+ error(INFO, -+ "CONFIG_NR_CPUS: %d\n", -+ kt->kernel_NR_CPUS); -+ -+ } else if (STREQ(ln, "CONFIG_PGTABLE_4")) { -+ machdep->flags |= VM_4_LEVEL; -+ if (CRASHDEBUG(1)) -+ error(INFO, "CONFIG_PGTABLE_4\n"); -+ -+ } else if (STREQ(ln, "CONFIG_HZ")) { -+ machdep->hz = atoi(val); -+ if (CRASHDEBUG(1)) -+ error(INFO, -+ "CONFIG_HZ: %d\n", -+ machdep->hz); -+ } -+ } -+ } -+ } -+ } while (ret > 0); -+ -+out1: -+ free(uncomp); -+out2: -+ free(buf); -+ -+ return; -+} -+ -+static void -+read_in_kernel_config_err(int e, char *msg) -+{ -+ error(WARNING, "zlib could not %s\n", msg); -+ switch (e) { -+ case Z_OK: -+ fprintf(fp, "Z_OK\n"); -+ break; -+ -+ case Z_STREAM_END: -+ fprintf(fp, "Z_STREAM_END\n"); -+ break; -+ -+ case Z_NEED_DICT: -+ fprintf(fp, "Z_NEED_DICT\n"); -+ break; -+ -+ case Z_ERRNO: -+ fprintf(fp, "Z_ERNO\n"); -+ break; -+ -+ case Z_STREAM_ERROR: -+ fprintf(fp, "Z_STREAM\n"); -+ break; -+ -+ case Z_DATA_ERROR: -+ fprintf(fp, "Z_DATA_ERROR\n"); -+ break; -+ -+ case Z_MEM_ERROR: /* out of memory */ -+ fprintf(fp, "Z_MEM_ERROR\n"); -+ break; -+ -+ case Z_BUF_ERROR: /* not enough room in output buf */ -+ fprintf(fp, "Z_BUF_ERROR\n"); -+ break; -+ -+ case Z_VERSION_ERROR: -+ fprintf(fp, "Z_VERSION_ERROR\n"); -+ break; -+ -+ default: -+ fprintf(fp, "UNKNOWN ERROR: %d\n", e); -+ break; -+ } - } ---- crash/gdb_interface.c.orig 2007-08-27 15:02:36.000000000 -0400 -+++ crash/gdb_interface.c 2007-07-31 16:05:22.000000000 -0400 -@@ -1,8 +1,8 @@ - /* gdb_interface.c - core analysis suite - * - * Copyright (C) 1999, 2000, 2001, 2002 Mission Critical Linux, Inc. -- * Copyright (C) 2002, 2003, 2004, 2005 David Anderson -- * Copyright (C) 2002, 2003, 2004, 2005 Red Hat, Inc. All rights reserved. -+ * Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007 David Anderson -+ * Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007 Red Hat, Inc. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by -@@ -31,9 +31,6 @@ - { - argc = 1; - -- if (CRASHDEBUG(1)) -- gdb_readnow_warning(); -- - if (pc->flags & SILENT) { - if (pc->flags & READNOW) - argv[argc++] = "--readnow"; -@@ -198,20 +195,28 @@ - retry: - BZERO(req->buf, BUFSIZE); - req->command = GNU_GET_DATATYPE; -- req->name = "task_struct"; -+ req->name = XEN_HYPER_MODE() ? "page_info" : "task_struct"; - req->flags = GNU_RETURN_ON_ERROR; - gdb_interface(req); - - if (req->flags & GNU_COMMAND_FAILED) { -+ if (XEN_HYPER_MODE()) -+ no_debugging_data(WARNING); /* just bail out */ -+ - if (!debug_data_pulled_in) { - if (CRASHDEBUG(1)) - error(INFO, -- "gdb_session_init: pulling in debug data by accessing init_mm.mmap\n"); -+ "gdb_session_init: pulling in debug data by accessing init_mm.mmap %s\n", -+ symbol_exists("sysfs_mount") ? -+ "and syfs_mount" : ""); - debug_data_pulled_in = TRUE; - req->command = GNU_PASS_THROUGH; - req->flags = GNU_RETURN_ON_ERROR|GNU_NO_READMEM; - req->name = NULL; -- sprintf(req->buf, "print init_mm.mmap"); -+ if (symbol_exists("sysfs_mount")) -+ sprintf(req->buf, "print sysfs_mount, init_mm.mmap"); -+ else -+ sprintf(req->buf, "print init_mm.mmap"); - gdb_interface(req); - if (!(req->flags & GNU_COMMAND_FAILED)) - goto retry; -@@ -237,11 +242,16 @@ - sprintf(req->buf, "set height 0"); - gdb_interface(req); - -+ req->command = GNU_PASS_THROUGH; -+ req->name = NULL, req->flags = 0; -+ sprintf(req->buf, "set width 0"); -+ gdb_interface(req); -+ - /* - * Patch gdb's symbol values with the correct values from either - * the System.map or non-debug vmlinux, whichever is in effect. - */ -- if ((pc->flags & SYSMAP) || -+ if ((pc->flags & SYSMAP) || (kt->flags & (RELOC_SET|RELOC_FORCE)) || - (pc->namelist_debug && !pc->debuginfo_file)) { - req->command = GNU_PATCH_SYMBOL_VALUES; - req->flags = GNU_RETURN_ON_ERROR; -@@ -556,6 +566,14 @@ - - error_hook = NULL; - -+ if (st->flags & ADD_SYMBOL_FILE) { -+ error(INFO, -+ "%s\n gdb add-symbol-file command failed\n", -+ st->current->mod_namelist); -+ delete_load_module(st->current->mod_base); -+ st->flags &= ~ADD_SYMBOL_FILE; -+ } -+ - if (pc->cur_gdb_cmd) { - pc->last_gdb_cmd = pc->cur_gdb_cmd; - pc->cur_gdb_cmd = 0; -@@ -619,6 +637,7 @@ - "clear", "disable", "enable", "condition", "ignore", "frame", - "select-frame", "f", "up", "down", "catch", "tcatch", "return", - "file", "exec-file", "core-file", "symbol-file", "load", "si", "ni", -+ "shell", - NULL /* must be last */ - }; - -@@ -628,7 +647,7 @@ - }; - - #define RESTRICTED_GDB_COMMAND \ -- "restricted gdb command: %s\n%s\"%s\" may only be used in a .gdbinit file or in a command file.\n%sThe .gdbinit file is read automatically during %s initialization.\n%sOther user-defined command files may be read interactively during\n%s%s runtime by using the gdb \"source\" command." -+ "restricted gdb command: %s\n%s\"%s\" may only be used in a .gdbinit file or in a command file.\n%sThe .gdbinit file is read automatically during %s initialization.\n%sOther user-defined command files may be read interactively during\n%s%s runtime by using the gdb \"source\" command.\n" - - static int - is_restricted_command(char *cmd, ulong flags) -@@ -722,8 +741,10 @@ - if (pc->cur_req->flags & GNU_NO_READMEM) - return TRUE; - -- if (UNIQUE_COMMAND("dis")) -+ if (pc->curcmd_flags & MEMTYPE_UVADDR) - memtype = UVADDR; -+ else if (pc->curcmd_flags & MEMTYPE_FILEADDR) -+ memtype = FILEADDR; - else if (!IS_KVADDR(addr)) { - if (STREQ(pc->curcmd, "gdb") && - STRNEQ(pc->cur_req->buf, "x/")) { -@@ -740,12 +761,11 @@ - if (CRASHDEBUG(1)) - console("gdb_readmem_callback[%d]: %lx %d\n", - memtype, addr, len); -- --#ifdef OLDWAY -- return(readmem(addr, KVADDR, buf, len, -- "gdb_readmem_callback", RETURN_ON_ERROR)); --#endif - -+ if (memtype == FILEADDR) -+ return(readmem(pc->curcmd_private, memtype, buf, len, -+ "gdb_readmem_callback", RETURN_ON_ERROR)); -+ - switch (len) - { - case SIZEOF_8BIT: ---- crash/configure.c.orig 2007-08-27 15:02:36.000000000 -0400 -+++ crash/configure.c 2007-06-04 11:58:33.000000000 -0400 +--- crash/configure.c.orig 2008-01-17 15:17:20.000000000 -0500 ++++ crash/configure.c 2008-01-04 09:42:08.000000000 -0500 @@ -1,8 +1,8 @@ /* configure.c - core analysis suite * @@ -16065,16148 +146,361 @@ } /* ---- crash/net.c.orig 2007-08-27 15:02:36.000000000 -0400 -+++ crash/net.c 2007-02-21 12:09:35.000000000 -0500 +--- crash/Makefile.orig 2008-01-17 15:17:21.000000000 -0500 ++++ crash/Makefile 2008-01-17 15:17:20.000000000 -0500 +@@ -3,8 +3,8 @@ + # Copyright (C) 1999, 2000, 2001, 2002 Mission Critical Linux, Inc. + # www.missioncriticallinux.com, info@missioncriticallinux.com + # +-# Copyright (C) 2002, 2003, 2004, 2005 David Anderson +-# Copyright (C) 2002, 2003, 2004, 2005 Red Hat, Inc. All rights reserved. ++# Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007 David Anderson ++# Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007 Red Hat, Inc. All rights reserved. + # + # This program is free software; you can redistribute it and/or modify + # it under the terms of the GNU General Public License as published by +@@ -35,10 +35,12 @@ + # + # GDB, GDB_FILES and GDB_OFILES will be configured automatically by configure + # +-GDB=gdb-6.1 +-GDB_FILES=${GDB_6.1_FILES} ++GDB= ++GDB_FILES= + GDB_OFILES= + ++GDB_PATCH_FILES=gdb-6.1.patch ++ + # + # Default installation directory + # +@@ -60,34 +62,81 @@ + # (2) Or invoke make like so: + # make LDFLAGS=-static NAT_CLIBS="-lc -lresolv" GDBSERVER_LIBS="-lc -lresolv" + +-GENERIC_HFILES=defs.h ++GENERIC_HFILES=defs.h xen_hyper_defs.h + MCORE_HFILES=va_server.h vas_crash.h +-REDHAT_HFILES=netdump.h diskdump.h ++REDHAT_HFILES=netdump.h diskdump.h xendump.h + LKCD_DUMP_HFILES=lkcd_vmdump_v1.h lkcd_vmdump_v2_v3.h lkcd_dump_v5.h \ +- lkcd_dump_v7.h lkcd_dump_v8.h lkcd_fix_mem.h ++ lkcd_dump_v7.h lkcd_dump_v8.h ++LKCD_OBSOLETE_HFILES=lkcd_fix_mem.h + LKCD_TRACE_HFILES=lkcd_x86_trace.h + IBM_HFILES=ibm_common.h +-UNWIND_HFILES=unwind.h unwind_i.h rse.h ++UNWIND_HFILES=unwind.h unwind_i.h rse.h unwind_x86.h unwind_x86_64.h + + CFILES=main.c tools.c global_data.c memory.c filesys.c help.c task.c \ + kernel.c test.c gdb_interface.c configure.c net.c dev.c \ +- alpha.c x86.c ppc.c ia64.c s390.c s390x.c ppc64.c x86_64.c \ ++ alpha.c x86.c ppc.c ia64.c s390.c s390x.c s390dbf.c ppc64.c x86_64.c \ + extensions.c remote.c va_server.c va_server_v1.c symbols.c cmdline.c \ + lkcd_common.c lkcd_v1.c lkcd_v2_v3.c lkcd_v5.c lkcd_v7.c lkcd_v8.c\ + lkcd_fix_mem.c s390_dump.c lkcd_x86_trace.c \ +- netdump.c diskdump.c unwind.c unwind_decoder.c ++ netdump.c diskdump.c xendump.c unwind.c unwind_decoder.c \ ++ unwind_x86_32_64.c \ ++ xen_hyper.c xen_hyper_command.c xen_hyper_global_data.c \ ++ xen_hyper_dump_tables.c + + SOURCE_FILES=${CFILES} ${GENERIC_HFILES} ${MCORE_HFILES} \ + ${REDHAT_CFILES} ${REDHAT_HFILES} ${UNWIND_HFILES} \ +- ${LKCD_DUMP_HFILES} ${LKCD_TRACE_HFILES} ${IBM_HFILES} ++ ${LKCD_DUMP_HFILES} ${LKCD_TRACE_HFILES} ${LKCD_OBSOLETE_HFILES}\ ++ ${IBM_HFILES} + + OBJECT_FILES=main.o tools.o global_data.o memory.o filesys.o help.o task.o \ + build_data.o kernel.o test.o gdb_interface.o net.o dev.o \ +- alpha.o x86.o ppc.o ia64.o s390.o s390x.o ppc64.o x86_64.o \ ++ alpha.o x86.o ppc.o ia64.o s390.o s390x.o s390dbf.o ppc64.o x86_64.o \ + extensions.o remote.o va_server.o va_server_v1.o symbols.o cmdline.o \ + lkcd_common.o lkcd_v1.o lkcd_v2_v3.o lkcd_v5.o lkcd_v7.o lkcd_v8.o \ +- lkcd_fix_mem.o s390_dump.o netdump.o diskdump.o \ +- lkcd_x86_trace.o unwind_v1.o unwind_v2.o unwind_v3.o ++ lkcd_fix_mem.o s390_dump.o netdump.o diskdump.o xendump.o \ ++ lkcd_x86_trace.o unwind_v1.o unwind_v2.o unwind_v3.o \ ++ unwind_x86_32_64.o \ ++ xen_hyper.o xen_hyper_command.o xen_hyper_global_data.o \ ++ xen_hyper_dump_tables.o ++ ++# These are the current set of crash extensions sources. They are not built ++# by default unless the third command line of the "all:" stanza is uncommented. ++# Alternatively, they can be built by entering "make extensions" from this ++# directory. ++ ++EXTENSIONS=extensions ++EXTENSION_SOURCE_FILES=${EXTENSIONS}/Makefile ${EXTENSIONS}/echo.c ${EXTENSIONS}/dminfo.c \ ++ ${EXTENSIONS}/libsial/Makefile \ ++ ${EXTENSIONS}/libsial/mkbaseop.c \ ++ ${EXTENSIONS}/libsial/README \ ++ ${EXTENSIONS}/libsial/README.sial \ ++ ${EXTENSIONS}/libsial/sial_alloc.c \ ++ ${EXTENSIONS}/libsial/sial_api.c \ ++ ${EXTENSIONS}/libsial/sial_api.h \ ++ ${EXTENSIONS}/libsial/sial_builtin.c \ ++ ${EXTENSIONS}/libsial/sial_case.c \ ++ ${EXTENSIONS}/libsial/sial_define.c \ ++ ${EXTENSIONS}/libsial/sial_func.c \ ++ ${EXTENSIONS}/libsial/sial.h \ ++ ${EXTENSIONS}/libsial/sial_input.c \ ++ ${EXTENSIONS}/libsial/sial.l \ ++ ${EXTENSIONS}/libsial/sial-lsed \ ++ ${EXTENSIONS}/libsial/sial_member.c \ ++ ${EXTENSIONS}/libsial/sial_node.c \ ++ ${EXTENSIONS}/libsial/sial_num.c \ ++ ${EXTENSIONS}/libsial/sial_op.c \ ++ ${EXTENSIONS}/libsial/sialpp.l \ ++ ${EXTENSIONS}/libsial/sialpp-lsed \ ++ ${EXTENSIONS}/libsial/sialpp.y \ ++ ${EXTENSIONS}/libsial/sial_print.c \ ++ ${EXTENSIONS}/libsial/sial_stat.c \ ++ ${EXTENSIONS}/libsial/sial_str.c \ ++ ${EXTENSIONS}/libsial/sial_type.c \ ++ ${EXTENSIONS}/libsial/sial_util.c \ ++ ${EXTENSIONS}/libsial/sial_var.c \ ++ ${EXTENSIONS}/libsial/sial.y \ ++ ${EXTENSIONS}/sial.c \ ++ ${EXTENSIONS}/sial.mk + + DAEMON_OBJECT_FILES=remote_daemon.o va_server.o va_server_v1.o \ + lkcd_common.o lkcd_v1.o lkcd_v2_v3.o lkcd_v5.o lkcd_v7.o lkcd_v8.o \ +@@ -150,10 +199,11 @@ + ${GDB}/gdb/main.c ${GDB}/gdb/symtab.c ${GDB}/gdb/target.c \ + ${GDB}/gdb/symfile.c ${GDB}/gdb/elfread.c \ + ${GDB}/gdb/ui-file.c ${GDB}/gdb/utils.c ${GDB}/gdb/dwarf2read.c \ +- ${GDB}/include/obstack.h ++ ${GDB}/include/obstack.h ${GDB}/gdb/ppc-linux-tdep.c + GDB_6.1_OFILES=${GDB}/gdb/main.o ${GDB}/gdb/symtab.o \ + ${GDB}/gdb/target.o ${GDB}/gdb/symfile.o ${GDB}/gdb/elfread.o \ +- ${GDB}/gdb/ui-file.o ${GDB}/gdb/utils.o ${GDB}/gdb/dwarf2read.o ++ ${GDB}/gdb/ui-file.o ${GDB}/gdb/utils.o ${GDB}/gdb/dwarf2read.o \ ++ ${GDB}/gdb/ppc-linux-tdep.o + + # + # GDB_FLAGS is passed up from the gdb Makefile. +@@ -175,7 +225,8 @@ + + CFLAGS=-g -D${TARGET} ${TARGET_CFLAGS} + +-TAR_FILES=${SOURCE_FILES} Makefile COPYING README .rh_rpm_package crash.8 ++TAR_FILES=${SOURCE_FILES} Makefile COPYING README .rh_rpm_package crash.8 \ ++ ${EXTENSION_SOURCE_FILES} + CSCOPE_FILES=${SOURCE_FILES} + + READLINE_DIRECTORY=./${GDB}/readline +@@ -184,9 +235,13 @@ + + REDHATFLAGS=-DREDHAT + ++# To build the extensions library by default, uncomment the third command ++# line below. Otherwise they can be built by entering "make extensions". ++ + all: make_configure + @./configure -p "RPMPKG=${RPMPKG}" -b + @make --no-print-directory gdb_merge ++# @make --no-print-directory extensions + + gdb_merge: force + @if [ ! -f ${GDB}/README ]; then \ +@@ -206,6 +261,11 @@ + @for FILE in ${GDB_FILES}; do\ + echo $$FILE >> gdb.files; done + @tar --exclude-from gdb.files -xvzmf ${GDB}.tar.gz ++ @make --no-print-directory gdb_patch ++ ++gdb_patch: ++ if [ -f ${GDB}.patch ] && [ -s ${GDB}.patch ]; then \ ++ patch -p0 < ${GDB}.patch; fi + + library: make_build_data ${OBJECT_FILES} + ar -rs ${PROGRAM}lib.a ${OBJECT_FILES} +@@ -223,6 +283,7 @@ + + clean: + rm -f ${OBJECT_FILES} ${DAEMON_OBJECT_FILES} ${PROGRAM} ${PROGRAM}lib.a ${GDB_OFILES} ++ @(cd extensions; make --no-print-directory -i clean) + + make_build_data: force + cc -c ${CFLAGS} build_data.c ${WARNING_OPTIONS} ${WARNING_ERROR} +@@ -318,7 +379,7 @@ + remote_daemon.o: ${GENERIC_HFILES} remote.c + cc -c ${CFLAGS} -DDAEMON remote.c -o remote_daemon.o ${WARNING_OPTIONS} ${WARNING_ERROR} + +-x86.o: ${GENERIC_HFILES} x86.c ++x86.o: ${GENERIC_HFILES} ${REDHAT_HFILES} x86.c + cc -c ${CFLAGS} -DMCLX x86.c ${WARNING_OPTIONS} ${WARNING_ERROR} + + alpha.o: ${GENERIC_HFILES} alpha.c +@@ -327,13 +388,13 @@ + ppc.o: ${GENERIC_HFILES} ppc.c + cc -c ${CFLAGS} ppc.c ${WARNING_OPTIONS} ${WARNING_ERROR} + +-ia64.o: ${GENERIC_HFILES} ia64.c ++ia64.o: ${GENERIC_HFILES} ${REDHAT_HFILES} ia64.c + cc -c ${CFLAGS} ia64.c ${WARNING_OPTIONS} ${WARNING_ERROR} + + ppc64.o: ${GENERIC_HFILES} ppc64.c + cc -c ${CFLAGS} ppc64.c ${WARNING_OPTIONS} ${WARNING_ERROR} + +-x86_64.o: ${GENERIC_HFILES} x86_64.c ++x86_64.o: ${GENERIC_HFILES} ${REDHAT_HFILES} x86_64.c + cc -c ${CFLAGS} x86_64.c ${WARNING_OPTIONS} ${WARNING_ERROR} + + s390.o: ${GENERIC_HFILES} ${IBM_HFILES} s390.c +@@ -342,6 +403,9 @@ + s390x.o: ${GENERIC_HFILES} ${IBM_HFILES} s390x.c + cc -c ${CFLAGS} s390x.c ${WARNING_OPTIONS} ${WARNING_ERROR} + ++s390dbf.o: ${GENERIC_HFILES} ${IBM_HFILES} s390dbf.c ++ cc -c ${CFLAGS} s390dbf.c ${WARNING_OPTIONS} ${WARNING_ERROR} ++ + s390_dump.o: ${GENERIC_HFILES} ${IBM_HFILES} s390_dump.c + cc -c ${CFLAGS} s390_dump.c ${WARNING_OPTIONS} ${WARNING_ERROR} + +@@ -353,12 +417,18 @@ + diskdump.o: ${GENERIC_HFILES} ${REDHAT_HFILES} diskdump.c + cc -c ${CFLAGS} diskdump.c ${WARNING_OPTIONS} ${WARNING_ERROR} + ++xendump.o: ${GENERIC_HFILES} ${REDHAT_HFILES} xendump.c ++ cc -c ${CFLAGS} xendump.c ${WARNING_OPTIONS} ${WARNING_ERROR} ++ + extensions.o: ${GENERIC_HFILES} extensions.c + cc -c ${CFLAGS} extensions.c ${WARNING_OPTIONS} ${WARNING_ERROR} + + lkcd_x86_trace.o: ${GENERIC_HFILES} ${LKCD_TRACE_HFILES} lkcd_x86_trace.c + cc -c ${CFLAGS} -DREDHAT lkcd_x86_trace.c ${WARNING_OPTIONS} ${WARNING_ERROR} + ++unwind_x86_32_64.o: ${GENERIC_HFILES} ${UNWIND_HFILES} unwind_x86_32_64.c ++ cc -c ${CFLAGS} unwind_x86_32_64.c -o unwind_x86_32_64.o ${WARNING_OPTIONS} ${WARNING_ERROR} ++ + unwind_v1.o: ${GENERIC_HFILES} ${UNWIND_HFILES} unwind.c unwind_decoder.c + cc -c ${CFLAGS} -DREDHAT -DUNWIND_V1 unwind.c -o unwind_v1.o ${WARNING_OPTIONS} ${WARNING_ERROR} + +@@ -369,7 +439,19 @@ + cc -c ${CFLAGS} -DREDHAT -DUNWIND_V3 unwind.c -o unwind_v3.o ${WARNING_OPTIONS} ${WARNING_ERROR} + + lkcd_fix_mem.o: ${GENERIC_HFILES} ${LKCD_HFILES} lkcd_fix_mem.c +- cc -c ${CFLAGS} lkcd_fix_mem.c ${WARNING_OPTIONS} ${WARNING_ERROR} ++ cc -c ${CFLAGS} -DMCLX lkcd_fix_mem.c ${WARNING_OPTIONS} ${WARNING_ERROR} ++ ++xen_hyper.o: ${GENERIC_HFILES} xen_hyper.c ++ cc -c ${CFLAGS} xen_hyper.c ${WARNING_OPTIONS} ${WARNING_ERROR} ++ ++xen_hyper_command.o: ${GENERIC_HFILES} xen_hyper_command.c ++ cc -c ${CFLAGS} xen_hyper_command.c ${WARNING_OPTIONS} ${WARNING_ERROR} ++ ++xen_hyper_global_data.o: ${GENERIC_HFILES} xen_hyper_global_data.c ++ cc -c ${CFLAGS} xen_hyper_global_data.c ${WARNING_OPTIONS} ${WARNING_ERROR} ++ ++xen_hyper_dump_tables.o: ${GENERIC_HFILES} xen_hyper_dump_tables.c ++ cc -c ${CFLAGS} xen_hyper_dump_tables.c ${WARNING_OPTIONS} ${WARNING_ERROR} + + ${PROGRAM}: force + @make --no-print-directory all +@@ -393,13 +475,13 @@ + + gdb_files: make_configure + @./configure -q -b +- @echo ${GDB_FILES} ++ @echo ${GDB_FILES} ${GDB_PATCH_FILES} + + show_files: + @if [ -f ${PROGRAM} ]; then \ +- ./${PROGRAM} --no_crashrc -h README > README; fi +- @echo ${SOURCE_FILES} Makefile ${GDB_FILES} COPYING README \ +- .rh_rpm_package crash.8 ++ ./${PROGRAM} --no_scroll --no_crashrc -h README > README; echo $?; fi ++ @echo ${SOURCE_FILES} Makefile ${GDB_FILES} ${GDB_PATCH_FILES} COPYING README \ ++ .rh_rpm_package crash.8 ${EXTENSION_SOURCE_FILES} + + ctags: + ctags ${SOURCE_FILES} +@@ -410,8 +492,8 @@ + + do_tar: + @if [ -f ${PROGRAM} ]; then \ +- ./${PROGRAM} --no_crashrc -h README > README; fi +- tar cvzf ${PROGRAM}.tar.gz ${TAR_FILES} ${GDB_FILES} ++ ./${PROGRAM} --no_scroll --no_crashrc -h README > README; fi ++ tar cvzf ${PROGRAM}.tar.gz ${TAR_FILES} ${GDB_FILES} ${GDB_PATCH_FILES} + @echo; ls -l ${PROGRAM}.tar.gz + + # To create a base tar file for Red Hat RPM packaging, pass the base RPM +@@ -421,12 +503,12 @@ + # spec file will have its own release number, which will in turn get passed + # to the "all" target upon the initial build. + +-RELEASE=4.0 ++RELEASE= + + release: make_configure + @if [ "`id --user`" != "0" ]; then \ + echo "make release: must be super-user"; exit 1; fi +- @./configure -p "RPMPKG=${RPMPKG}" -u -g ++ @./configure -P "RPMPKG=${RPMPKG}" -u -g + @make --no-print-directory release_configure + @echo + @echo "cvs tag this release if necessary" +@@ -446,10 +528,10 @@ + @rm -f ${PROGRAM}-${RELEASE}.tar.gz + @rm -f ${PROGRAM}-${RELEASE}.src.rpm + @chown root ./RELDIR/${PROGRAM}-${RELEASE} +- @tar cf - ${SOURCE_FILES} Makefile ${GDB_FILES} COPYING \ +- .rh_rpm_package crash.8 | (cd ./RELDIR/${PROGRAM}-${RELEASE}; tar xf -) ++ @tar cf - ${SOURCE_FILES} Makefile ${GDB_FILES} ${GDB_PATCH_FILES} COPYING \ ++ .rh_rpm_package crash.8 ${EXTENSION_SOURCE_FILES} | (cd ./RELDIR/${PROGRAM}-${RELEASE}; tar xf -) + @cp ${GDB}.tar.gz ./RELDIR/${PROGRAM}-${RELEASE} +- @./${PROGRAM} --no_crashrc -h README > ./RELDIR/${PROGRAM}-${RELEASE}/README ++ @./${PROGRAM} --no_scroll --no_crashrc -h README > ./RELDIR/${PROGRAM}-${RELEASE}/README + @(cd ./RELDIR; find . -exec chown root {} ";") + @(cd ./RELDIR; find . -exec chgrp root {} ";") + @(cd ./RELDIR; find . -exec touch {} ";") +@@ -464,7 +546,7 @@ + cp ${PROGRAM}-${RELEASE}.tar.gz /usr/src/redhat/SOURCES; \ + /usr/bin/rpmbuild -bs ${PROGRAM}.spec > /dev/null; \ + rm -f /usr/src/redhat/SOURCES/${PROGRAM}-${RELEASE}.tar.gz; \ +- cp /usr/src/redhat/SRPMS/${PROGRAM}-${RELEASE}.src.rpm . ; \ ++ mv /usr/src/redhat/SRPMS/${PROGRAM}-${RELEASE}.src.rpm . ; \ + ls -l ${PROGRAM}-${RELEASE}.src.rpm; \ + exit 0; fi + +@@ -488,3 +570,10 @@ + + dis: + objdump --disassemble --line-numbers ${PROGRAM} > ${PROGRAM}.dis ++ ++extensions: make_configure ++ @./configure -q -b ++ @make --no-print-directory do_extensions ++ ++do_extensions: ++ @(cd extensions; make -i TARGET=$(TARGET) TARGET_CFLAGS=$(TARGET_CFLAGS)) +--- crash/lkcd_dump_v7.h.orig 2008-01-17 15:17:20.000000000 -0500 ++++ crash/lkcd_dump_v7.h 2008-01-04 09:42:08.000000000 -0500 @@ -1,8 +1,8 @@ - /* net.c - core analysis suite - * - * Copyright (C) 1999, 2000, 2001, 2002 Mission Critical Linux, Inc. -- * Copyright (C) 2002, 2003, 2004, 2005 David Anderson -- * Copyright (C) 2002, 2003, 2004, 2005 Red Hat, Inc. All rights reserved. -+ * Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007 David Anderson -+ * Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007 Red Hat, Inc. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by -@@ -50,6 +50,7 @@ - #define STRUCT_NET_DEVICE (0x4) - #define SOCK_V1 (0x8) - #define SOCK_V2 (0x10) -+#define NO_INET_SOCK (0x20) - - #define DEV_NAME_MAX 100 - struct devinfo { -@@ -75,6 +76,7 @@ - static void dump_sockets(ulong, struct reference *); - static int sym_socket_dump(ulong, int, int, ulong, struct reference *); - static void dump_hw_addr(unsigned char *, int); -+static char *dump_in6_addr_port(uint16_t *, uint16_t, char *, int *); - - - #define MK_TYPE_T(f,s,m) \ -@@ -158,13 +160,6 @@ - "in_ifaddr", "ifa_address"); - - STRUCT_SIZE_INIT(sock, "sock"); -- MEMBER_OFFSET_INIT(sock_daddr, "sock", "daddr"); -- MEMBER_OFFSET_INIT(sock_rcv_saddr, "sock", "rcv_saddr"); -- MEMBER_OFFSET_INIT(sock_dport, "sock", "dport"); -- MEMBER_OFFSET_INIT(sock_sport, "sock", "sport"); -- MEMBER_OFFSET_INIT(sock_num, "sock", "num"); -- MEMBER_OFFSET_INIT(sock_family, "sock", "family"); -- MEMBER_OFFSET_INIT(sock_type, "sock", "type"); - - MEMBER_OFFSET_INIT(sock_family, "sock", "family"); - if (VALID_MEMBER(sock_family)) { -@@ -195,7 +190,23 @@ - */ - STRUCT_SIZE_INIT(inet_sock, "inet_sock"); - STRUCT_SIZE_INIT(socket, "socket"); -- MEMBER_OFFSET_INIT(inet_sock_inet, "inet_sock", "inet"); -+ -+ if (STRUCT_EXISTS("inet_opt")) { -+ MEMBER_OFFSET_INIT(inet_sock_inet, "inet_sock", "inet"); -+ MEMBER_OFFSET_INIT(inet_opt_daddr, "inet_opt", "daddr"); -+ MEMBER_OFFSET_INIT(inet_opt_rcv_saddr, "inet_opt", "rcv_saddr"); -+ MEMBER_OFFSET_INIT(inet_opt_dport, "inet_opt", "dport"); -+ MEMBER_OFFSET_INIT(inet_opt_sport, "inet_opt", "sport"); -+ MEMBER_OFFSET_INIT(inet_opt_num, "inet_opt", "num"); -+ } else { /* inet_opt moved to inet_sock */ -+ ASSIGN_OFFSET(inet_sock_inet) = 0; -+ MEMBER_OFFSET_INIT(inet_opt_daddr, "inet_sock", "daddr"); -+ MEMBER_OFFSET_INIT(inet_opt_rcv_saddr, "inet_sock", "rcv_saddr"); -+ MEMBER_OFFSET_INIT(inet_opt_dport, "inet_sock", "dport"); -+ MEMBER_OFFSET_INIT(inet_opt_sport, "inet_sock", "sport"); -+ MEMBER_OFFSET_INIT(inet_opt_num, "inet_sock", "num"); -+ } -+ - if (VALID_STRUCT(inet_sock) && - INVALID_MEMBER(inet_sock_inet)) { - /* -@@ -210,15 +221,36 @@ - * to subtract the size of the inet_opt struct - * from the size of the containing inet_sock. - */ -+ net->flags |= NO_INET_SOCK; - ASSIGN_OFFSET(inet_sock_inet) = - SIZE(inet_sock) - STRUCT_SIZE("inet_opt"); - } -- MEMBER_OFFSET_INIT(inet_opt_daddr, "inet_opt", "daddr"); -- MEMBER_OFFSET_INIT(inet_opt_rcv_saddr, "inet_opt", -- "rcv_saddr"); -- MEMBER_OFFSET_INIT(inet_opt_dport, "inet_opt", "dport"); -- MEMBER_OFFSET_INIT(inet_opt_sport, "inet_opt", "sport"); -- MEMBER_OFFSET_INIT(inet_opt_num, "inet_opt", "num"); -+ -+ /* -+ * If necessary, set inet_sock size and inet_sock_inet offset, -+ * accounting for the configuration-dependent, intervening, -+ * struct ipv6_pinfo pointer located in between the sock and -+ * inet_opt members of the inet_sock. -+ */ -+ if (!VALID_STRUCT(inet_sock)) -+ { -+ if (symbol_exists("tcpv6_protocol") && -+ symbol_exists("udpv6_protocol")) { -+ ASSIGN_SIZE(inet_sock) = SIZE(sock) + -+ sizeof(void *) + STRUCT_SIZE("inet_opt"); -+ ASSIGN_OFFSET(inet_sock_inet) = SIZE(sock) + -+ sizeof(void *); -+ } else { -+ ASSIGN_SIZE(inet_sock) = SIZE(sock) + -+ STRUCT_SIZE("inet_opt"); -+ ASSIGN_OFFSET(inet_sock_inet) = SIZE(sock); -+ } -+ } -+ -+ MEMBER_OFFSET_INIT(ipv6_pinfo_rcv_saddr, "ipv6_pinfo", "rcv_saddr"); -+ MEMBER_OFFSET_INIT(ipv6_pinfo_daddr, "ipv6_pinfo", "daddr"); -+ STRUCT_SIZE_INIT(in6_addr, "in6_addr"); -+ - net->flags |= SOCK_V2; - } - } -@@ -378,6 +410,24 @@ - nhash_buckets = (i = ARRAY_LENGTH(neigh_table_hash_buckets)) ? - i : get_array_length("neigh_table.hash_buckets", - NULL, sizeof(void *)); -+ -+ /* -+ * NOTE: 2.6.8 -> 2.6.9 neigh_table struct changed from: -+ * -+ * struct neighbour *hash_buckets[32]; -+ * to -+ * struct neighbour **hash_buckets; -+ * -+ * Even after hardwiring and testing with the correct -+ * array size, other changes cause this command to break -+ * down, so it needs to be looked at by someone who cares... -+ */ -+ -+ if (nhash_buckets == 0) { -+ option_not_supported('a'); -+ return; -+ } -+ - hash_bytes = nhash_buckets * sizeof(*hash_buckets); - - hash_buckets = (ulong *)GETBUF(hash_bytes); -@@ -609,8 +659,14 @@ - uint16_t dport, sport; - ushort num, family, type; - char *sockbuf, *inet_sockbuf; -+ ulong ipv6_pinfo, ipv6_rcv_saddr, ipv6_daddr; -+ uint16_t u6_addr16_src[8]; -+ uint16_t u6_addr16_dest[8]; -+ char buf2[BUFSIZE]; -+ int len; - - BZERO(buf, BUFSIZE); -+ BZERO(buf2, BUFSIZE); - sockbuf = inet_sockbuf = NULL; - - switch (net->flags & (SOCK_V1|SOCK_V2)) -@@ -646,6 +702,7 @@ - OFFSET(inet_opt_num)); - family = USHORT(inet_sockbuf + OFFSET(sock_common_skc_family)); - type = USHORT(inet_sockbuf + OFFSET(sock_sk_type)); -+ ipv6_pinfo = ULONG(inet_sockbuf + SIZE(sock)); - break; - } - -@@ -723,27 +780,28 @@ - } - - /* make sure we have room at the end... */ -- sprintf(&buf[strlen(buf)], "%s", space(MINSPACE-1)); -+// sprintf(&buf[strlen(buf)], "%s", space(MINSPACE-1)); -+ sprintf(&buf[strlen(buf)], " "); - - if (family == AF_INET) { - if (BITS32()) { -- sprintf(&buf[strlen(buf)], "%*s:%-*d%s", -+ sprintf(&buf[strlen(buf)], "%*s-%-*d%s", - BYTES_IP_ADDR, - inet_ntoa(*((struct in_addr *)&rcv_saddr)), - BYTES_PORT_NUM, - ntohs(sport), - space(1)); -- sprintf(&buf[strlen(buf)], "%*s:%-*d%s", -+ sprintf(&buf[strlen(buf)], "%*s-%-*d%s", - BYTES_IP_ADDR, - inet_ntoa(*((struct in_addr *)&daddr)), - BYTES_PORT_NUM, - ntohs(dport), - space(1)); - } else { -- sprintf(&buf[strlen(buf)], " %s:%d ", -+ sprintf(&buf[strlen(buf)], " %s-%d ", - inet_ntoa(*((struct in_addr *)&rcv_saddr)), - ntohs(sport)); -- sprintf(&buf[strlen(buf)], "%s:%d", -+ sprintf(&buf[strlen(buf)], "%s-%d", - inet_ntoa(*((struct in_addr *)&daddr)), - ntohs(dport)); - } -@@ -753,6 +811,60 @@ - FREEBUF(sockbuf); - if (inet_sockbuf) - FREEBUF(inet_sockbuf); -+ -+ if (family != AF_INET6) -+ return; -+ -+ switch (net->flags & (SOCK_V1|SOCK_V2)) -+ { -+ case SOCK_V1: -+ break; -+ -+ case SOCK_V2: -+ if (INVALID_MEMBER(ipv6_pinfo_rcv_saddr) || -+ INVALID_MEMBER(ipv6_pinfo_daddr)) -+ break; -+ -+ ipv6_rcv_saddr = ipv6_pinfo + OFFSET(ipv6_pinfo_rcv_saddr); -+ ipv6_daddr = ipv6_pinfo + OFFSET(ipv6_pinfo_daddr); -+ -+ if (!readmem(ipv6_rcv_saddr, KVADDR, u6_addr16_src, SIZE(in6_addr), -+ "ipv6_rcv_saddr buffer", QUIET|RETURN_ON_ERROR)) -+ break; -+ if (!readmem(ipv6_daddr, KVADDR, u6_addr16_dest, SIZE(in6_addr), -+ "ipv6_daddr buffer", QUIET|RETURN_ON_ERROR)) -+ break; -+ -+ sprintf(&buf[strlen(buf)], "%*s ", BITS32() ? 22 : 12, -+ dump_in6_addr_port(u6_addr16_src, sport, buf2, &len)); -+ if (BITS32() && (len > 22)) -+ len = 1; -+ mkstring(dump_in6_addr_port(u6_addr16_dest, dport, buf2, NULL), -+ len, CENTER, NULL); -+ sprintf(&buf[strlen(buf)], "%s", buf2); -+ -+ break; -+ } -+} -+ -+static char * -+dump_in6_addr_port(uint16_t *addr, uint16_t port, char *buf, int *len) -+{ -+ sprintf(buf, "%x:%x:%x:%x:%x:%x:%x:%x-%d", -+ ntohs(addr[0]), -+ ntohs(addr[1]), -+ ntohs(addr[2]), -+ ntohs(addr[3]), -+ ntohs(addr[4]), -+ ntohs(addr[5]), -+ ntohs(addr[6]), -+ ntohs(addr[7]), -+ ntohs(port)); -+ -+ if (len) -+ *len = strlen(buf); -+ -+ return buf; - } - - -@@ -899,6 +1011,8 @@ - fprintf(fp, "%sSTRUCT_DEVICE", others++ ? "|" : ""); - if (net->flags & STRUCT_NET_DEVICE) - fprintf(fp, "%sSTRUCT_NET_DEVICE", others++ ? "|" : ""); -+ if (net->flags & NO_INET_SOCK) -+ fprintf(fp, "%sNO_INET_SOCK", others++ ? "|" : ""); - if (net->flags & SOCK_V1) - fprintf(fp, "%sSOCK_V1", others++ ? "|" : ""); - if (net->flags & SOCK_V2) -@@ -972,7 +1086,7 @@ - void - dump_sockets_workhorse(ulong task, ulong flag, struct reference *ref) - { -- ulong files_struct_addr = 0; -+ ulong files_struct_addr = 0, fdtable_addr = 0; - int max_fdset = 0; - int max_fds = 0; - ulong open_fds_addr = 0; -@@ -1004,32 +1118,54 @@ - sizeof(void *), "task files contents", FAULT_ON_ERROR); - - if (files_struct_addr) { -- readmem(files_struct_addr + OFFSET(files_struct_max_fdset), -- KVADDR, &max_fdset, sizeof(int), -- "files_struct max_fdset", FAULT_ON_ERROR); -- -- readmem(files_struct_addr + OFFSET(files_struct_max_fds), -- KVADDR, &max_fds, sizeof(int), "files_struct max_fds", -- FAULT_ON_ERROR); -- } -+ if (VALID_MEMBER(files_struct_max_fdset)) { -+ readmem(files_struct_addr + OFFSET(files_struct_max_fdset), -+ KVADDR, &max_fdset, sizeof(int), -+ "files_struct max_fdset", FAULT_ON_ERROR); -+ readmem(files_struct_addr + OFFSET(files_struct_max_fds), -+ KVADDR, &max_fds, sizeof(int), "files_struct max_fds", -+ FAULT_ON_ERROR); -+ } -+ else if (VALID_MEMBER(files_struct_fdt)) { -+ readmem(files_struct_addr + OFFSET(files_struct_fdt), KVADDR, -+ &fdtable_addr, sizeof(void *), "fdtable buffer", -+ FAULT_ON_ERROR); -+ if (VALID_MEMBER(fdtable_max_fdset)) -+ readmem(fdtable_addr + OFFSET(fdtable_max_fdset), -+ KVADDR, &max_fdset, sizeof(int), -+ "fdtable_struct max_fdset", FAULT_ON_ERROR); -+ else -+ max_fdset = -1; -+ readmem(fdtable_addr + OFFSET(fdtable_max_fds), -+ KVADDR, &max_fds, sizeof(int), "fdtable_struct max_fds", -+ FAULT_ON_ERROR); -+ } -+ } - -- if (!files_struct_addr || (max_fdset == 0) || (max_fds == 0)) { -+ if ((VALID_MEMBER(files_struct_fdt) && !fdtable_addr) || -+ !files_struct_addr || (max_fdset == 0) || (max_fds == 0)) { - if (!NET_REFERENCE_CHECK(ref)) - fprintf(fp, "No open sockets.\n"); - return; - } - -- readmem(files_struct_addr + OFFSET(files_struct_open_fds), KVADDR, -- &open_fds_addr, sizeof(void *), "files_struct open_fds addr", -- FAULT_ON_ERROR); -+ if (VALID_MEMBER(fdtable_open_fds)){ -+ readmem(fdtable_addr + OFFSET(fdtable_open_fds), KVADDR, -+ &open_fds_addr, sizeof(void *), "files_struct open_fds addr", -+ FAULT_ON_ERROR); -+ readmem(fdtable_addr + OFFSET(fdtable_fd), KVADDR, &fd, -+ sizeof(void *), "files_struct fd addr", FAULT_ON_ERROR); -+ } else { -+ readmem(files_struct_addr + OFFSET(files_struct_open_fds), KVADDR, -+ &open_fds_addr, sizeof(void *), "files_struct open_fds addr", -+ FAULT_ON_ERROR); -+ readmem(files_struct_addr + OFFSET(files_struct_fd), KVADDR, &fd, -+ sizeof(void *), "files_struct fd addr", FAULT_ON_ERROR); -+ } - - if (open_fds_addr) -- readmem(open_fds_addr, KVADDR, &open_fds, sizeof(fd_set), -- "files_struct open_fds", FAULT_ON_ERROR); -- -- readmem(files_struct_addr + OFFSET(files_struct_fd), KVADDR, &fd, -- sizeof(void *), "files_struct fd addr", FAULT_ON_ERROR); -- -+ readmem(open_fds_addr, KVADDR, &open_fds, sizeof(fd_set), -+ "files_struct open_fds", FAULT_ON_ERROR); - if (!open_fds_addr || !fd) { - if (!NET_REFERENCE_CHECK(ref)) - fprintf(fp, "No open sockets.\n"); -@@ -1061,7 +1197,7 @@ - for (;;) { - unsigned long set; - i = j * __NFDBITS; -- if ((i >= max_fdset) || (i >= max_fds)) -+ if (((max_fdset >= 0) && (i >= max_fdset)) || (i >= max_fds)) - break; - set = open_fds.__fds_bits[j++]; - while (set) { -@@ -1096,9 +1232,9 @@ - */ - - static char *socket_hdr_32 = --"FD SOCKET SOCK FAMILY:TYPE SOURCE:PORT DESTINATION:PORT"; -+"FD SOCKET SOCK FAMILY:TYPE SOURCE-PORT DESTINATION-PORT"; - static char *socket_hdr_64 = --"FD SOCKET SOCK FAMILY:TYPE SOURCE:PORT DESTINATION:PORT"; -+"FD SOCKET SOCK FAMILY:TYPE SOURCE-PORT DESTINATION-PORT"; - - static int - sym_socket_dump(ulong file, -@@ -1223,7 +1359,12 @@ - dump_struct("sock", sock, 0); - break; - case SOCK_V2: -- dump_struct("inet_sock", sock, 0); -+ if (STRUCT_EXISTS("inet_sock") && !(net->flags & NO_INET_SOCK)) -+ dump_struct("inet_sock", sock, 0); -+ else if (STRUCT_EXISTS("sock")) -+ dump_struct("sock", sock, 0); -+ else -+ fprintf(fp, "\nunable to display inet_sock structure\n"); - break; - } - break; ---- crash/dev.c.orig 2007-08-27 15:02:36.000000000 -0400 -+++ crash/dev.c 2006-12-22 14:19:41.000000000 -0500 -@@ -91,13 +91,13 @@ - switch(c) - { - case 'i': -- if (machine_type("X86") || machine_type("S390X")) -+ if (machine_type("S390X")) - option_not_supported(c); - do_io(); - return; - - case 'p': -- if (machine_type("X86") || machine_type("S390X")) -+ if (machine_type("S390X")) - option_not_supported(c); - do_pci(); - return; -@@ -1957,29 +1955,44 @@ - unsigned int class; - unsigned short device, vendor; - unsigned char busno; -- ulong *devlist, bus, devfn, tmp; -+ ulong *devlist, bus, devfn, prev, next; - char buf1[BUFSIZE]; - char buf2[BUFSIZE]; - char buf3[BUFSIZE]; - -- fprintf(fp, "%s BU:SL.FN CLASS: VENDOR-DEVICE\n", -- mkstring(buf1, VADDR_PRLEN, CENTER|LJUST, "PCI_DEV")); -+ if (!symbol_exists("pci_devices")) -+ error(FATAL, "no PCI devices found on this system.\n"); - - BZERO(&pcilist_data, sizeof(struct list_data)); - - if (VALID_MEMBER(pci_dev_global_list)) { -- get_symbol_data("pci_devices", sizeof(void *), &tmp); -- readmem(tmp + OFFSET(list_head_next), KVADDR, -- &pcilist_data.start, sizeof(void *), "pci devices", -- FAULT_ON_ERROR); -+ get_symbol_data("pci_devices", sizeof(void *), &pcilist_data.start); - pcilist_data.end = symbol_value("pci_devices"); - pcilist_data.list_head_offset = OFFSET(pci_dev_global_list); -+ readmem(symbol_value("pci_devices") + OFFSET(list_head_prev), -+ KVADDR, &prev, sizeof(void *), "list head prev", -+ FAULT_ON_ERROR); -+ /* -+ * Check if this system does not have any PCI devices. -+ */ -+ if ((pcilist_data.start == pcilist_data.end) && -+ (prev == pcilist_data.end)) -+ error(FATAL, "no PCI devices found on this system.\n"); - -- } else { -+ } else if (VALID_MEMBER(pci_dev_next)) { - get_symbol_data("pci_devices", sizeof(void *), - &pcilist_data.start); - pcilist_data.member_offset = OFFSET(pci_dev_next); -- } -+ /* -+ * Check if this system does not have any PCI devices. -+ */ -+ readmem(pcilist_data.start + pcilist_data.member_offset, -+ KVADDR, &next, sizeof(void *), "pci dev next", -+ FAULT_ON_ERROR); -+ if (!next) -+ error(FATAL, "no PCI devices found on this system.\n"); -+ } else -+ option_not_supported('p'); - - hq_open(); - devcnt = do_list(&pcilist_data); -@@ -1987,6 +2000,9 @@ - devcnt = retrieve_list(devlist, devcnt); - hq_close(); - -+ fprintf(fp, "%s BU:SL.FN CLASS: VENDOR-DEVICE\n", -+ mkstring(buf1, VADDR_PRLEN, CENTER|LJUST, "PCI_DEV")); -+ - for (i = 0; i < devcnt; i++) { - - /* ---- crash/alpha.c.orig 2007-08-27 15:02:36.000000000 -0400 -+++ crash/alpha.c 2006-10-11 09:14:35.000000000 -0400 -@@ -1,8 +1,8 @@ - /* alpha.c - core analysis suite - * - * Copyright (C) 1999, 2000, 2001, 2002 Mission Critical Linux, Inc. -- * Copyright (C) 2002, 2003, 2004, 2005 David Anderson -- * Copyright (C) 2002, 2003, 2004, 2005 Red Hat, Inc. All rights reserved. -+ * Copyright (C) 2002, 2003, 2004, 2005, 2006 David Anderson -+ * Copyright (C) 2002, 2003, 2004, 2005, 2006 Red Hat, Inc. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by -@@ -186,7 +186,8 @@ - "irq_desc", NULL, 0); - else - machdep->nr_irqs = 0; -- machdep->hz = HZ; -+ if (!machdep->hz) -+ machdep->hz = HZ; - break; - - case POST_INIT: -@@ -1858,8 +1859,6 @@ - fprintf(fp, " flags: %lx (", machdep->flags); - if (machdep->flags & HWRESET) - fprintf(fp, "%sHWRESET", others++ ? "|" : ""); -- if (machdep->flags & SYSRQ) -- fprintf(fp, "%sSYSRQ", others++ ? "|" : ""); - fprintf(fp, ")\n"); - fprintf(fp, " kvbase: %lx\n", machdep->kvbase); - fprintf(fp, " identity_map_base: %lx\n", machdep->identity_map_base); ---- crash/x86.c.orig 2007-08-27 15:02:36.000000000 -0400 -+++ crash/x86.c 2007-04-04 14:29:35.000000000 -0400 -@@ -1,8 +1,8 @@ - /* x86.c - core analysis suite - * - * Portions Copyright (C) 1999, 2000, 2001, 2002 Mission Critical Linux, Inc. -- * Copyright (C) 2002, 2003, 2004, 2005 David Anderson -- * Copyright (C) 2002, 2003, 2004, 2005 Red Hat, Inc. All rights reserved. -+ * Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007 David Anderson -+ * Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007 Red Hat, Inc. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by -@@ -51,6 +51,7 @@ - * rights to redistribute these changes. - */ - #include "defs.h" -+#include "xen_hyper_defs.h" - - #ifndef MCLX - -@@ -176,6 +177,7 @@ - static void db_symbol_values(db_sym_t, char **, db_expr_t *); - static int db_sym_numargs(db_sym_t, int *, char **); - static void x86_dump_line_number(ulong); -+static void x86_clear_machdep_cache(void); - - static ulong mach_debug = 0; - -@@ -215,7 +217,7 @@ - - argp = (int *)db_get_value((int)&fp->f_retaddr, 4, FALSE, bt); - /* -- * XXX etext is wrong for LKMs. We should attempt to interpret -+ * etext is wrong for LKMs. We should attempt to interpret - * the instruction at the return address in all cases. This - * may require better fault handling. - */ -@@ -685,6 +687,7 @@ - bt->debug || - (bt->flags & BT_FRAMESIZE_DEBUG) || - !(bt->flags & BT_OLD_BACK_TRACE)) { -+ bt->flags &= ~BT_OLD_BACK_TRACE; - lkcd_x86_back_trace(bt, 0, fp); - return; - } -@@ -962,8 +965,12 @@ - */ - static int x86_uvtop(struct task_context *, ulong, physaddr_t *, int); - static int x86_kvtop(struct task_context *, ulong, physaddr_t *, int); --static int x86_uvtop_pae(struct task_context *, ulong, physaddr_t *, int); --static int x86_kvtop_pae(struct task_context *, ulong, physaddr_t *, int); -+static int x86_uvtop_PAE(struct task_context *, ulong, physaddr_t *, int); -+static int x86_kvtop_PAE(struct task_context *, ulong, physaddr_t *, int); -+static int x86_uvtop_xen_wpt(struct task_context *, ulong, physaddr_t *, int); -+static int x86_kvtop_xen_wpt(struct task_context *, ulong, physaddr_t *, int); -+static int x86_uvtop_xen_wpt_PAE(struct task_context *, ulong, physaddr_t *, int); -+static int x86_kvtop_xen_wpt_PAE(struct task_context *, ulong, physaddr_t *, int); - static ulong x86_get_task_pgd(ulong); - static ulong x86_processor_speed(void); - static ulong x86_get_pc(struct bt_info *); -@@ -973,6 +980,7 @@ - static uint64_t x86_memory_size(void); - static ulong x86_vmalloc_start(void); - static ulong *read_idt_table(int); -+static void eframe_init(void); - #define READ_IDT_INIT 1 - #define READ_IDT_RUNTIME 2 - static char *extract_idt_function(ulong *, char *, ulong *); -@@ -983,26 +991,42 @@ - static int x86_dis_filter(ulong, char *); - static struct line_number_hook x86_line_number_hooks[]; - static int x86_is_uvaddr(ulong, struct task_context *); -+static void x86_init_kernel_pgd(void); -+static ulong xen_m2p_nonPAE(ulong); -+static int x86_xendump_p2m_create(struct xendump_data *); -+static int x86_xen_kdump_p2m_create(struct xen_kdump_data *); -+static char *x86_xen_kdump_load_page(ulong, char *); -+static char *x86_xen_kdump_load_page_PAE(ulong, char *); -+static ulong x86_xen_kdump_page_mfn(ulong); -+static ulong x86_xen_kdump_page_mfn_PAE(ulong); -+static ulong x86_xendump_panic_task(struct xendump_data *); -+static void x86_get_xendump_regs(struct xendump_data *, struct bt_info *, ulong *, ulong *); -+static char *x86_xendump_load_page(ulong, char *); -+static char *x86_xendump_load_page_PAE(ulong, char *); -+static int x86_xendump_page_index(ulong); -+static int x86_xendump_page_index_PAE(ulong); -+static void x86_init_hyper(int); -+static ulong x86_get_stackbase_hyper(ulong); -+static ulong x86_get_stacktop_hyper(ulong); -+ -+static int INT_EFRAME_SS = 14; -+static int INT_EFRAME_ESP = 13; -+static int INT_EFRAME_EFLAGS = 12; /* CS lcall7 */ -+static int INT_EFRAME_CS = 11; /* EIP lcall7 */ -+static int INT_EFRAME_EIP = 10; /* EFLAGS lcall7 */ -+static int INT_EFRAME_ERR = 9; -+static int INT_EFRAME_ES = 8; -+static int INT_EFRAME_DS = 7; -+static int INT_EFRAME_EAX = 6; -+static int INT_EFRAME_EBP = 5; -+static int INT_EFRAME_EDI = 4; -+static int INT_EFRAME_ESI = 3; -+static int INT_EFRAME_EDX = 2; -+static int INT_EFRAME_ECX = 1; -+static int INT_EFRAME_EBX = 0; -+static int INT_EFRAME_GS = -1; - -- --#define INT_EFRAME_SS (14) --#define INT_EFRAME_ESP (13) --#define INT_EFRAME_EFLAGS (12) /* CS lcall7 */ --#define INT_EFRAME_CS (11) /* EIP lcall7 */ --#define INT_EFRAME_EIP (10) /* EFLAGS lcall7 */ --#define INT_EFRAME_ERR (9) -- --#define INT_EFRAME_ES (8) --#define INT_EFRAME_DS (7) --#define INT_EFRAME_EAX (6) --#define INT_EFRAME_EBP (5) --#define INT_EFRAME_EDI (4) --#define INT_EFRAME_ESI (3) --#define INT_EFRAME_EDX (2) --#define INT_EFRAME_ECX (1) --#define INT_EFRAME_EBX (0) -- --#define USER_EFRAME_SIZE (INT_EFRAME_SS+1) -+#define MAX_USER_EFRAME_SIZE (16) - #define KERNEL_EFRAME_SIZE (INT_EFRAME_EFLAGS+1) - - #define EFRAME_USER (1) -@@ -1015,7 +1039,7 @@ - { - int i; - char buf[BUFSIZE], *sp; -- ulong int_eframe[USER_EFRAME_SIZE]; -+ ulong int_eframe[MAX_USER_EFRAME_SIZE]; - int eframe_type, args; - ulong value, *argp; - -@@ -1025,11 +1049,11 @@ - return(frame_number); - - GET_STACK_DATA(ep->eframe_addr, (char *)int_eframe, -- USER_EFRAME_SIZE * sizeof(ulong)); -+ SIZE(pt_regs)); - - if (int_eframe[INT_EFRAME_CS] & DPL_BITS) { - if (!INSTACK(ep->eframe_addr + -- (USER_EFRAME_SIZE*sizeof(ulong)) - 1, bt)) -+ SIZE(pt_regs) - 1, bt)) - return(frame_number); - /* error(FATAL, "read of exception frame would go beyond stack\n"); */ - eframe_type = EFRAME_USER; -@@ -1158,17 +1182,24 @@ - int_eframe[INT_EFRAME_EDX]); - - fprintf(fp, -- " DS: %04x ESI: %08lx ES: %04x EDI: %08lx \n", -+ " DS: %04x ESI: %08lx ES: %04x EDI: %08lx", - (short)int_eframe[INT_EFRAME_DS], - int_eframe[INT_EFRAME_ESI], - (short)int_eframe[INT_EFRAME_ES], - int_eframe[INT_EFRAME_EDI]); -+ if (kernel && (INT_EFRAME_GS != -1)) -+ fprintf(fp, " GS: %04x", (short)int_eframe[INT_EFRAME_GS]); -+ fprintf(fp, "\n"); - -- if (!kernel) -- fprintf(fp, " SS: %04x ESP: %08lx EBP: %08lx \n", -+ if (!kernel) { -+ fprintf(fp, " SS: %04x ESP: %08lx EBP: %08lx", - (short)int_eframe[INT_EFRAME_SS], - int_eframe[INT_EFRAME_ESP], - int_eframe[INT_EFRAME_EBP]); -+ if (INT_EFRAME_GS != -1) -+ fprintf(fp, " GS: %04x", (short)int_eframe[INT_EFRAME_GS]); -+ fprintf(fp, "\n"); -+ } - - fprintf(fp, - " CS: %04x EIP: %08lx ERR: %08lx EFLAGS: %08lx \n", -@@ -1355,7 +1386,7 @@ - */ - - struct x86_pt_regs { -- ulong reg_value[USER_EFRAME_SIZE]; -+ ulong reg_value[MAX_USER_EFRAME_SIZE]; - }; - - /* -@@ -1420,6 +1451,17 @@ - break; - } - -+ if (XEN() && ((short)pt->reg_value[INT_EFRAME_CS] == 0x61) && -+ ((short)pt->reg_value[INT_EFRAME_DS] == 0x7b) && -+ ((short)pt->reg_value[INT_EFRAME_ES] == 0x7b) && -+ IS_KVADDR(pt->reg_value[INT_EFRAME_EIP])) { -+ if (!(machdep->flags & OMIT_FRAME_PTR) && -+ !INSTACK(pt->reg_value[INT_EFRAME_EBP], bt)) -+ continue; -+ rv = bt->stackbase + sizeof(ulong) * (first - stack); -+ break; -+ } -+ - /* check for user exception frame */ - - if (((short)pt->reg_value[INT_EFRAME_CS] == 0x23) && -@@ -1441,6 +1483,20 @@ - rv = bt->stackbase + sizeof(ulong) * (first - stack); - break; - } -+ -+ /* -+ * 2.6 kernels using sysenter_entry instead of system_call -+ * have a funky trampoline EIP address. -+ */ -+ if (((short)pt->reg_value[INT_EFRAME_CS] == 0x73) && -+ ((short)pt->reg_value[INT_EFRAME_DS] == 0x7b) && -+ ((short)pt->reg_value[INT_EFRAME_ES] == 0x7b) && -+ ((short)pt->reg_value[INT_EFRAME_SS] == 0x7b) && -+ (pt->reg_value[INT_EFRAME_EFLAGS] == 0x246) && -+ IS_UVADDR(pt->reg_value[INT_EFRAME_ESP], bt->tc)) { -+ rv = bt->stackbase + sizeof(ulong) * (first - stack); -+ break; -+ } - } - return(rv); - } -@@ -1536,6 +1592,8 @@ - mode = "USER-MODE"; - } else if ((cs == 0x10) || (cs == 0x60)) { - mode = "KERNEL-MODE"; -+ } else if (XEN() && (cs == 0x61)) { -+ mode = "KERNEL-MODE"; - } else { - mode = "UNKNOWN-MODE"; - } -@@ -1626,6 +1684,11 @@ - { - struct syment *sp, *spn; - -+ if (XEN_HYPER_MODE()) { -+ x86_init_hyper(when); -+ return; -+ } -+ - switch (when) - { - case PRE_SYMTAB: -@@ -1639,7 +1702,7 @@ - machdep->stacksize = machdep->pagesize * 2; - if ((machdep->pgd = (char *)malloc(PAGESIZE())) == NULL) - error(FATAL, "cannot malloc pgd space."); -- if ((machdep->pmd = (char *)malloc(PAGESIZE())) == NULL) -+ if ((machdep->pmd = (char *)malloc(PAGESIZE())) == NULL) - error(FATAL, "cannot malloc pmd space."); - if ((machdep->ptbl = (char *)malloc(PAGESIZE())) == NULL) - error(FATAL, "cannot malloc ptbl space."); -@@ -1659,8 +1722,8 @@ - PGDIR_SHIFT = PGDIR_SHIFT_3LEVEL; - PTRS_PER_PTE = PTRS_PER_PTE_3LEVEL; - PTRS_PER_PGD = PTRS_PER_PGD_3LEVEL; -- machdep->uvtop = x86_uvtop_pae; -- machdep->kvtop = x86_kvtop_pae; -+ machdep->uvtop = x86_uvtop_PAE; -+ machdep->kvtop = x86_kvtop_PAE; - } else { - PGDIR_SHIFT = PGDIR_SHIFT_2LEVEL; - PTRS_PER_PTE = PTRS_PER_PTE_2LEVEL; -@@ -1696,14 +1759,19 @@ - machdep->cmd_mach = x86_cmd_mach; - machdep->get_smp_cpus = x86_get_smp_cpus; - machdep->line_number_hooks = x86_line_number_hooks; -- if (x86_omit_frame_pointer()) -- machdep->flags |= OMIT_FRAME_PTR; - machdep->flags |= FRAMESIZE_DEBUG; - machdep->value_to_symbol = generic_machdep_value_to_symbol; -- machdep->init_kernel_pgd = NULL; -+ machdep->init_kernel_pgd = x86_init_kernel_pgd; -+ machdep->xendump_p2m_create = x86_xendump_p2m_create; -+ machdep->xen_kdump_p2m_create = x86_xen_kdump_p2m_create; -+ machdep->xendump_panic_task = x86_xendump_panic_task; -+ machdep->get_xendump_regs = x86_get_xendump_regs; -+ machdep->clear_machdep_cache = x86_clear_machdep_cache; - break; - - case POST_GDB: -+ if (x86_omit_frame_pointer()) -+ machdep->flags |= OMIT_FRAME_PTR; - STRUCT_SIZE_INIT(user_regs_struct, "user_regs_struct"); - MEMBER_OFFSET_INIT(user_regs_struct_ebp, - "user_regs_struct", "ebp"); -@@ -1723,9 +1791,37 @@ - "irq_desc", NULL, 0); - else - machdep->nr_irqs = 224; /* NR_IRQS */ -- machdep->hz = HZ; -- if (THIS_KERNEL_VERSION >= LINUX(2,6,0)) -- machdep->hz = 1000; -+ if (!machdep->hz) { -+ machdep->hz = HZ; -+ if (THIS_KERNEL_VERSION >= LINUX(2,6,0)) -+ machdep->hz = 1000; -+ } -+ -+ if (machdep->flags & PAE) { -+ machdep->section_size_bits = _SECTION_SIZE_BITS_PAE; -+ machdep->max_physmem_bits = _MAX_PHYSMEM_BITS_PAE; -+ } else { -+ machdep->section_size_bits = _SECTION_SIZE_BITS; -+ machdep->max_physmem_bits = _MAX_PHYSMEM_BITS; -+ } -+ -+ if (XEN() && (kt->xen_flags & WRITABLE_PAGE_TABLES)) { -+ if (machdep->flags & PAE) -+ machdep->uvtop = x86_uvtop_xen_wpt_PAE; -+ else -+ machdep->uvtop = x86_uvtop_xen_wpt; -+ } -+ -+ if (XEN()) { -+ MEMBER_OFFSET_INIT(vcpu_guest_context_user_regs, -+ "vcpu_guest_context", "user_regs"); -+ MEMBER_OFFSET_INIT(cpu_user_regs_esp, -+ "cpu_user_regs", "esp"); -+ MEMBER_OFFSET_INIT(cpu_user_regs_eip, -+ "cpu_user_regs", "eip"); -+ } -+ -+ eframe_init(); - break; - - case POST_INIT: -@@ -1735,6 +1831,47 @@ - } - - /* -+ * Account for addition of pt_regs.xgs field in 2.6.20+ kernels. -+ */ -+static void -+eframe_init(void) -+{ -+ if (INVALID_SIZE(pt_regs)) { -+ if (THIS_KERNEL_VERSION < LINUX(2,6,20)) -+ ASSIGN_SIZE(pt_regs) = (MAX_USER_EFRAME_SIZE-1)*sizeof(ulong); -+ else { -+ ASSIGN_SIZE(pt_regs) = MAX_USER_EFRAME_SIZE*sizeof(ulong); -+ INT_EFRAME_SS = 15; -+ INT_EFRAME_ESP = 14; -+ INT_EFRAME_EFLAGS = 13; -+ INT_EFRAME_CS = 12; -+ INT_EFRAME_EIP = 11; -+ INT_EFRAME_ERR = 10; -+ INT_EFRAME_GS = 9; -+ } -+ return; -+ } -+ -+ INT_EFRAME_SS = MEMBER_OFFSET("pt_regs", "xss") / 4; -+ INT_EFRAME_ESP = MEMBER_OFFSET("pt_regs", "esp") / 4; -+ INT_EFRAME_EFLAGS = MEMBER_OFFSET("pt_regs", "eflags") / 4; -+ INT_EFRAME_CS = MEMBER_OFFSET("pt_regs", "xcs") / 4; -+ INT_EFRAME_EIP = MEMBER_OFFSET("pt_regs", "eip") / 4; -+ INT_EFRAME_ERR = MEMBER_OFFSET("pt_regs", "orig_eax") / 4; -+ if ((INT_EFRAME_GS = MEMBER_OFFSET("pt_regs", "xgs")) != -1) -+ INT_EFRAME_GS /= 4; -+ INT_EFRAME_ES = MEMBER_OFFSET("pt_regs", "xes") / 4; -+ INT_EFRAME_DS = MEMBER_OFFSET("pt_regs", "xds") / 4; -+ INT_EFRAME_EAX = MEMBER_OFFSET("pt_regs", "eax") / 4; -+ INT_EFRAME_EBP = MEMBER_OFFSET("pt_regs", "ebp") / 4; -+ INT_EFRAME_EDI = MEMBER_OFFSET("pt_regs", "edi") / 4; -+ INT_EFRAME_ESI = MEMBER_OFFSET("pt_regs", "esi") / 4; -+ INT_EFRAME_EDX = MEMBER_OFFSET("pt_regs", "edx") / 4; -+ INT_EFRAME_ECX = MEMBER_OFFSET("pt_regs", "ecx") / 4; -+ INT_EFRAME_EBX = MEMBER_OFFSET("pt_regs", "ebx") / 4; -+} -+ -+/* - * Needs to be done this way because of potential 4G/4G split. - */ - static int -@@ -1825,7 +1962,7 @@ - fprintf(fp, " PAGE: %s (4MB)\n\n", - mkstring(buf, VADDR_PRLEN, RJUST|LONG_HEX, - MKSTR(NONPAE_PAGEBASE(pgd_pte)))); -- x86_translate_pte(0, 0, pgd_pte); -+ x86_translate_pte(pgd_pte, 0, 0); - } - - *paddr = NONPAE_PAGEBASE(pgd_pte) + (vaddr & ~_4MB_PAGE_MASK); -@@ -1892,7 +2029,170 @@ - } - - static int --x86_uvtop_pae(struct task_context *tc, ulong vaddr, physaddr_t *paddr, int verbose) -+x86_uvtop_xen_wpt(struct task_context *tc, ulong vaddr, physaddr_t *paddr, int verbose) -+{ -+ ulong mm, active_mm; -+ ulong *pgd; -+ ulong *page_dir; -+ ulong *page_middle; -+ ulong *machine_page_table, *pseudo_page_table; -+ ulong pgd_pte, pseudo_pgd_pte; -+ ulong pmd_pte; -+ ulong machine_pte, pseudo_pte; -+ char buf[BUFSIZE]; -+ -+ if (!tc) -+ error(FATAL, "current context invalid\n"); -+ -+ *paddr = 0; -+ -+ if (is_kernel_thread(tc->task) && IS_KVADDR(vaddr)) { -+ if (VALID_MEMBER(thread_struct_cr3)) -+ pgd = (ulong *)machdep->get_task_pgd(tc->task); -+ else { -+ if (INVALID_MEMBER(task_struct_active_mm)) -+ error(FATAL, "no cr3 or active_mm?\n"); -+ -+ readmem(tc->task + OFFSET(task_struct_active_mm), -+ KVADDR, &active_mm, sizeof(void *), -+ "task active_mm contents", FAULT_ON_ERROR); -+ -+ if (!active_mm) -+ error(FATAL, -+ "no active_mm for this kernel thread\n"); -+ -+ readmem(active_mm + OFFSET(mm_struct_pgd), -+ KVADDR, &pgd, sizeof(long), -+ "mm_struct pgd", FAULT_ON_ERROR); -+ } -+ } else { -+ if ((mm = task_mm(tc->task, TRUE))) -+ pgd = ULONG_PTR(tt->mm_struct + -+ OFFSET(mm_struct_pgd)); -+ else -+ readmem(tc->mm_struct + OFFSET(mm_struct_pgd), -+ KVADDR, &pgd, sizeof(long), "mm_struct pgd", -+ FAULT_ON_ERROR); -+ } -+ -+ if (verbose) -+ fprintf(fp, "PAGE DIRECTORY: %lx\n", (ulong)pgd); -+ -+ page_dir = pgd + (vaddr >> PGDIR_SHIFT); -+ -+ FILL_PGD(NONPAE_PAGEBASE(pgd), KVADDR, PAGESIZE()); -+ pgd_pte = ULONG(machdep->pgd + PAGEOFFSET(page_dir)); -+ -+ if (verbose) -+ fprintf(fp, " PGD: %s => %lx\n", -+ mkstring(buf, VADDR_PRLEN, RJUST|LONG_HEX, -+ MKSTR((ulong)page_dir)), -+ pgd_pte); -+ -+ if (!pgd_pte) -+ goto no_upage; -+ -+ if (pgd_pte & _PAGE_4M) { -+ if (verbose) -+ fprintf(fp, " PAGE: %s (4MB) [machine]\n", -+ mkstring(buf, VADDR_PRLEN, RJUST|LONG_HEX, -+ MKSTR(NONPAE_PAGEBASE(pgd_pte)))); -+ -+ pseudo_pgd_pte = xen_m2p_nonPAE(NONPAE_PAGEBASE(pgd_pte)); -+ -+ if (pseudo_pgd_pte == XEN_MFN_NOT_FOUND) { -+ if (verbose) -+ fprintf(fp, " PAGE: page not available\n"); -+ *paddr = PADDR_NOT_AVAILABLE; -+ return FALSE; -+ } -+ -+ pseudo_pgd_pte |= PAGEOFFSET(pgd_pte); -+ -+ if (verbose) { -+ fprintf(fp, " PAGE: %s (4MB)\n\n", -+ mkstring(buf, VADDR_PRLEN, RJUST|LONG_HEX, -+ MKSTR(NONPAE_PAGEBASE(pseudo_pgd_pte)))); -+ -+ x86_translate_pte(pseudo_pgd_pte, 0, 0); -+ } -+ -+ *paddr = NONPAE_PAGEBASE(pseudo_pgd_pte) + -+ (vaddr & ~_4MB_PAGE_MASK); -+ -+ return TRUE; -+ } -+ -+ page_middle = page_dir; -+ -+ FILL_PMD(NONPAE_PAGEBASE(page_middle), KVADDR, PAGESIZE()); -+ pmd_pte = ULONG(machdep->pmd + PAGEOFFSET(page_middle)); -+ -+ if (verbose) -+ fprintf(fp, " PMD: %s => %lx\n", -+ mkstring(buf, VADDR_PRLEN, RJUST|LONG_HEX, -+ MKSTR((ulong)page_middle)), -+ pmd_pte); -+ -+ if (!pmd_pte) -+ goto no_upage; -+ -+ machine_page_table = (ulong *)((NONPAE_PAGEBASE(pmd_pte)) + -+ ((vaddr>>10) & ((PTRS_PER_PTE-1)<<2))); -+ -+ pseudo_page_table = (ulong *) -+ xen_m2p_nonPAE(NONPAE_PAGEBASE(machine_page_table)); -+ -+ FILL_PTBL(NONPAE_PAGEBASE(pseudo_page_table), PHYSADDR, PAGESIZE()); -+ machine_pte = ULONG(machdep->ptbl + PAGEOFFSET(machine_page_table)); -+ -+ if (verbose) { -+ fprintf(fp, " PTE: %s [machine]\n", -+ mkstring(buf, VADDR_PRLEN, RJUST|LONG_HEX, -+ MKSTR((ulong)machine_page_table))); -+ -+ fprintf(fp, " PTE: %s => %lx\n", -+ mkstring(buf, VADDR_PRLEN, RJUST|LONG_HEX, -+ MKSTR((ulong)pseudo_page_table + -+ PAGEOFFSET(machine_page_table))), machine_pte); -+ } -+ -+ if (!(machine_pte & (_PAGE_PRESENT | _PAGE_PROTNONE))) { -+ *paddr = machine_pte; -+ -+ if (machine_pte && verbose) { -+ fprintf(fp, "\n"); -+ x86_translate_pte(machine_pte, 0, 0); -+ } -+ -+ goto no_upage; -+ } -+ -+ pseudo_pte = xen_m2p_nonPAE(NONPAE_PAGEBASE(machine_pte)); -+ pseudo_pte |= PAGEOFFSET(machine_pte); -+ -+ *paddr = NONPAE_PAGEBASE(pseudo_pte) + PAGEOFFSET(vaddr); -+ -+ if (verbose) { -+ fprintf(fp, " PAGE: %s [machine]\n", -+ mkstring(buf, VADDR_PRLEN, RJUST|LONG_HEX, -+ MKSTR(NONPAE_PAGEBASE(machine_pte)))); -+ -+ fprintf(fp, " PAGE: %s\n\n", -+ mkstring(buf, VADDR_PRLEN, RJUST|LONG_HEX, -+ MKSTR(NONPAE_PAGEBASE(pseudo_pte)))); -+ -+ x86_translate_pte(pseudo_pte, 0, 0); -+ } -+ -+ return TRUE; -+ -+no_upage: -+ return FALSE; -+} -+ -+static int -+x86_uvtop_PAE(struct task_context *tc, ulong vaddr, physaddr_t *paddr, int verbose) - { - ulong mm, active_mm; - ulonglong *pgd; -@@ -1962,7 +2262,7 @@ - - page_middle = PAE_PAGEBASE(page_dir_entry); - -- FILL_PMD(page_middle, PHYSADDR, PAGESIZE()); -+ FILL_PMD_PAE(page_middle, PHYSADDR, PAGESIZE()); - - offset = ((vaddr >> PMD_SHIFT) & (PTRS_PER_PMD-1)) * sizeof(ulonglong); - -@@ -1998,7 +2298,7 @@ - - page_table = PAE_PAGEBASE(page_middle_entry); - -- FILL_PTBL(page_table, PHYSADDR, PAGESIZE()); -+ FILL_PTBL_PAE(page_table, PHYSADDR, PAGESIZE()); - - offset = ((vaddr >> PAGESHIFT()) & (PTRS_PER_PTE-1)) * - sizeof(ulonglong); -@@ -2028,9 +2328,10 @@ - *paddr = physpage; - - if (verbose) { -- fprintf(fp, " PAGE: %s\n\n", -- mkstring(buf, VADDR_PRLEN, RJUST|LONGLONG_HEX, -- MKSTR(&physpage))); -+ ull = PAE_PAGEBASE(page_table_entry); -+ fprintf(fp, " PAGE: %s\n\n", -+ mkstring(buf, VADDR_PRLEN, RJUST|LONGLONG_HEX, -+ MKSTR(&ull))); - x86_translate_pte(0, 0, page_table_entry); - } - -@@ -2040,62 +2341,259 @@ - return FALSE; - } - --/* -- * Translates a kernel virtual address to its physical address. cmd_vtop() -- * sets the verbose flag so that the pte translation gets displayed; all -- * other callers quietly accept the translation. -- */ -- - static int --x86_kvtop(struct task_context *tc, ulong kvaddr, physaddr_t *paddr, int verbose) -+x86_uvtop_xen_wpt_PAE(struct task_context *tc, ulong vaddr, physaddr_t *paddr, int verbose) - { -- ulong *pgd; -- ulong *page_dir; -- ulong *page_middle; -- ulong *page_table; -- ulong pgd_pte; -- ulong pmd_pte; -- ulong pte; -+ ulong mm, active_mm; -+ ulonglong *pgd; -+ ulonglong page_dir_entry; -+ ulonglong page_middle, pseudo_page_middle; -+ ulonglong page_middle_entry; -+ ulonglong page_table, pseudo_page_table; -+ ulonglong page_table_entry; -+ ulonglong physpage, pseudo_physpage; -+ ulonglong ull; -+ ulong offset; - char buf[BUFSIZE]; - -- if (!IS_KVADDR(kvaddr)) -- return FALSE; -+ if (!tc) -+ error(FATAL, "current context invalid\n"); - -- if (!vt->vmalloc_start) { -- *paddr = VTOP(kvaddr); -- return TRUE; -- } -+ *paddr = 0; - -- if (!IS_VMALLOC_ADDR(kvaddr)) { -- *paddr = VTOP(kvaddr); -- if (!verbose) -- return TRUE; -- } -+ if (is_kernel_thread(tc->task) && IS_KVADDR(vaddr)) { -+ if (VALID_MEMBER(thread_struct_cr3)) -+ pgd = (ulonglong *)machdep->get_task_pgd(tc->task); -+ else { -+ if (INVALID_MEMBER(task_struct_active_mm)) -+ error(FATAL, "no cr3 or active_mm?\n"); - -- pgd = (ulong *)vt->kernel_pgd[0]; -+ readmem(tc->task + OFFSET(task_struct_active_mm), -+ KVADDR, &active_mm, sizeof(void *), -+ "task active_mm contents", FAULT_ON_ERROR); -+ -+ if (!active_mm) -+ error(FATAL, -+ "no active_mm for this kernel thread\n"); -+ -+ readmem(active_mm + OFFSET(mm_struct_pgd), -+ KVADDR, &pgd, sizeof(long), -+ "mm_struct pgd", FAULT_ON_ERROR); -+ } -+ } else { -+ if ((mm = task_mm(tc->task, TRUE))) -+ pgd = (ulonglong *)(ULONG_PTR(tt->mm_struct + -+ OFFSET(mm_struct_pgd))); -+ else -+ readmem(tc->mm_struct + OFFSET(mm_struct_pgd), -+ KVADDR, &pgd, sizeof(long), "mm_struct pgd", -+ FAULT_ON_ERROR); -+ } - - if (verbose) - fprintf(fp, "PAGE DIRECTORY: %lx\n", (ulong)pgd); - -- page_dir = pgd + (kvaddr >> PGDIR_SHIFT); -+ FILL_PGD(pgd, KVADDR, PTRS_PER_PGD * sizeof(ulonglong)); - -- FILL_PGD(NONPAE_PAGEBASE(pgd), KVADDR, PAGESIZE()); -- pgd_pte = ULONG(machdep->pgd + PAGEOFFSET(page_dir)); -+ offset = ((vaddr >> PGDIR_SHIFT) & (PTRS_PER_PGD-1)) * -+ sizeof(ulonglong); -+ -+ page_dir_entry = *((ulonglong *)&machdep->pgd[offset]); - - if (verbose) -- fprintf(fp, " PGD: %s => %lx\n", -+ fprintf(fp, " PGD: %s => %llx [machine]\n", - mkstring(buf, VADDR_PRLEN, RJUST|LONG_HEX, -- MKSTR((ulong)page_dir)), pgd_pte); -- -- if (!pgd_pte) -- goto no_kpage; -+ MKSTR((ulong)pgd + offset)), -+ page_dir_entry); - -- if (pgd_pte & _PAGE_4M) { -+ if (!(page_dir_entry & _PAGE_PRESENT)) { -+ goto no_upage; -+ } -+ -+ page_middle = PAE_PAGEBASE(page_dir_entry); -+ pseudo_page_middle = xen_m2p(page_middle); -+ -+ if (verbose) -+ fprintf(fp, " PGD: %s => %llx\n", -+ mkstring(buf, VADDR_PRLEN, RJUST|LONG_HEX, -+ MKSTR((ulong)pgd + offset)), -+ pseudo_page_middle | PAGEOFFSET(page_dir_entry) | -+ (page_dir_entry & _PAGE_NX)); -+ -+ FILL_PMD_PAE(pseudo_page_middle, PHYSADDR, PAGESIZE()); -+ -+ offset = ((vaddr >> PMD_SHIFT) & (PTRS_PER_PMD-1)) * sizeof(ulonglong); -+ -+ page_middle_entry = *((ulonglong *)&machdep->pmd[offset]); -+ -+ if (verbose) { -+ ull = page_middle + offset; -+ fprintf(fp, " PMD: %s => %llx [machine]\n", -+ mkstring(buf, VADDR_PRLEN, RJUST|LONGLONG_HEX, -+ MKSTR(&ull)), -+ page_middle_entry); -+ } -+ -+ if (!(page_middle_entry & _PAGE_PRESENT)) { -+ goto no_upage; -+ } -+ -+ if (page_middle_entry & _PAGE_PSE) { -+ error(FATAL, "_PAGE_PSE in an mfn not supported\n"); /* XXX */ -+ if (verbose) { -+ ull = PAE_PAGEBASE(page_middle_entry); -+ fprintf(fp, " PAGE: %s (2MB)\n\n", -+ mkstring(buf, VADDR_PRLEN, RJUST|LONGLONG_HEX, -+ MKSTR(&ull))); -+ x86_translate_pte(0, 0, page_middle_entry); -+ } -+ -+ physpage = PAE_PAGEBASE(page_middle_entry) + -+ (vaddr & ~_2MB_PAGE_MASK); -+ *paddr = physpage; -+ -+ return TRUE; -+ } -+ -+ page_table = PAE_PAGEBASE(page_middle_entry); -+ pseudo_page_table = xen_m2p(page_table); -+ -+ if (verbose) { -+ ull = page_middle + offset; -+ fprintf(fp, " PMD: %s => %llx\n", -+ mkstring(buf, VADDR_PRLEN, RJUST|LONGLONG_HEX, -+ MKSTR(&ull)), -+ pseudo_page_table | PAGEOFFSET(page_middle_entry) | -+ (page_middle_entry & _PAGE_NX)); -+ } -+ -+ FILL_PTBL_PAE(pseudo_page_table, PHYSADDR, PAGESIZE()); -+ -+ offset = ((vaddr >> PAGESHIFT()) & (PTRS_PER_PTE-1)) * -+ sizeof(ulonglong); -+ -+ page_table_entry = *((ulonglong *)&machdep->ptbl[offset]); -+ -+ if (verbose) { -+ ull = page_table + offset; -+ fprintf(fp, " PTE: %s => %llx [machine]\n", -+ mkstring(buf, VADDR_PRLEN, RJUST|LONGLONG_HEX, -+ MKSTR(&ull)), page_table_entry); -+ } -+ -+ if (!(page_table_entry & (_PAGE_PRESENT | _PAGE_PROTNONE))) { -+ *paddr = page_table_entry; -+ -+ if (page_table_entry && verbose) { -+ fprintf(fp, "\n"); -+ x86_translate_pte(0, 0, page_table_entry); -+ } -+ -+ goto no_upage; -+ } -+ -+ physpage = PAE_PAGEBASE(page_table_entry) + PAGEOFFSET(vaddr); -+ pseudo_physpage = xen_m2p(physpage); -+ -+ if (verbose) { -+ ull = page_table + offset; -+ fprintf(fp, " PTE: %s => %llx\n", -+ mkstring(buf, VADDR_PRLEN, RJUST|LONGLONG_HEX, -+ MKSTR(&ull)), -+ pseudo_physpage | PAGEOFFSET(page_table_entry) | -+ (page_table_entry & _PAGE_NX)); -+ } -+ -+ *paddr = pseudo_physpage + PAGEOFFSET(vaddr); -+ -+ if (verbose) { -+ fprintf(fp, " PAGE: %s [machine]\n", -+ mkstring(buf, VADDR_PRLEN, RJUST|LONGLONG_HEX, -+ MKSTR(&physpage))); -+ -+ pseudo_physpage += (PAGEOFFSET(vaddr) | -+ (page_table_entry & (_PAGE_NX|machdep->pageoffset))); -+ -+ fprintf(fp, " PAGE: %s\n\n", -+ mkstring(buf, VADDR_PRLEN, RJUST|LONGLONG_HEX, -+ MKSTR(&pseudo_physpage))); -+ -+ x86_translate_pte(0, 0, pseudo_physpage); -+ } -+ -+ return TRUE; -+ -+no_upage: -+ return FALSE; -+} -+ -+/* -+ * Translates a kernel virtual address to its physical address. cmd_vtop() -+ * sets the verbose flag so that the pte translation gets displayed; all -+ * other callers quietly accept the translation. -+ */ -+ -+static int -+x86_kvtop(struct task_context *tc, ulong kvaddr, physaddr_t *paddr, int verbose) -+{ -+ ulong *pgd; -+ ulong *page_dir; -+ ulong *page_middle; -+ ulong *page_table; -+ ulong pgd_pte; -+ ulong pmd_pte; -+ ulong pte; -+ char buf[BUFSIZE]; -+ -+ if (!IS_KVADDR(kvaddr)) -+ return FALSE; -+ -+ if (XEN_HYPER_MODE()) { -+ if (DIRECTMAP_VIRT_ADDR(kvaddr)) { -+ *paddr = kvaddr - DIRECTMAP_VIRT_START; -+ return TRUE; -+ } -+ pgd = (ulong *)symbol_value("idle_pg_table_l2"); -+ } else { -+ if (!vt->vmalloc_start) { -+ *paddr = VTOP(kvaddr); -+ return TRUE; -+ } -+ -+ if (!IS_VMALLOC_ADDR(kvaddr)) { -+ *paddr = VTOP(kvaddr); -+ if (!verbose) -+ return TRUE; -+ } -+ -+ if (XEN() && (kt->xen_flags & WRITABLE_PAGE_TABLES)) -+ return (x86_kvtop_xen_wpt(tc, kvaddr, paddr, verbose)); -+ -+ pgd = (ulong *)vt->kernel_pgd[0]; -+ } -+ -+ if (verbose) -+ fprintf(fp, "PAGE DIRECTORY: %lx\n", (ulong)pgd); -+ -+ page_dir = pgd + (kvaddr >> PGDIR_SHIFT); -+ -+ FILL_PGD(NONPAE_PAGEBASE(pgd), KVADDR, PAGESIZE()); -+ pgd_pte = ULONG(machdep->pgd + PAGEOFFSET(page_dir)); -+ -+ if (verbose) -+ fprintf(fp, " PGD: %s => %lx\n", -+ mkstring(buf, VADDR_PRLEN, RJUST|LONG_HEX, -+ MKSTR((ulong)page_dir)), pgd_pte); -+ -+ if (!pgd_pte) -+ goto no_kpage; -+ -+ if (pgd_pte & _PAGE_4M) { - if (verbose) { - fprintf(fp, " PAGE: %s (4MB)\n\n", - mkstring(buf, VADDR_PRLEN, RJUST|LONG_HEX, - MKSTR(NONPAE_PAGEBASE(pgd_pte)))); -- x86_translate_pte(0, 0, pgd_pte); -+ x86_translate_pte(pgd_pte, 0, 0); - } - - *paddr = NONPAE_PAGEBASE(pgd_pte) + (kvaddr & ~_4MB_PAGE_MASK); -@@ -2158,9 +2656,134 @@ - return FALSE; - } - -+static int -+x86_kvtop_xen_wpt(struct task_context *tc, ulong kvaddr, physaddr_t *paddr, int verbose) -+{ -+ ulong *pgd; -+ ulong *page_dir; -+ ulong *page_middle; -+ ulong *machine_page_table, *pseudo_page_table; -+ ulong pgd_pte, pseudo_pgd_pte; -+ ulong pmd_pte; -+ ulong machine_pte, pseudo_pte; -+ char buf[BUFSIZE]; -+ -+ pgd = (ulong *)vt->kernel_pgd[0]; -+ -+ if (verbose) -+ fprintf(fp, "PAGE DIRECTORY: %lx\n", (ulong)pgd); -+ -+ page_dir = pgd + (kvaddr >> PGDIR_SHIFT); -+ -+ FILL_PGD(NONPAE_PAGEBASE(pgd), KVADDR, PAGESIZE()); -+ pgd_pte = ULONG(machdep->pgd + PAGEOFFSET(page_dir)); -+ -+ if (verbose) -+ fprintf(fp, " PGD: %s => %lx\n", -+ mkstring(buf, VADDR_PRLEN, RJUST|LONG_HEX, -+ MKSTR((ulong)page_dir)), pgd_pte); -+ -+ if (!pgd_pte) -+ goto no_kpage; -+ -+ if (pgd_pte & _PAGE_4M) { -+ if (verbose) -+ fprintf(fp, " PAGE: %s (4MB) [machine]\n", -+ mkstring(buf, VADDR_PRLEN, RJUST|LONG_HEX, -+ MKSTR(NONPAE_PAGEBASE(pgd_pte)))); -+ -+ pseudo_pgd_pte = xen_m2p_nonPAE(NONPAE_PAGEBASE(pgd_pte)); -+ -+ if (pseudo_pgd_pte == XEN_MFN_NOT_FOUND) { -+ if (verbose) -+ fprintf(fp, " PAGE: page not available\n"); -+ *paddr = PADDR_NOT_AVAILABLE; -+ return FALSE; -+ } -+ -+ pseudo_pgd_pte |= PAGEOFFSET(pgd_pte); -+ -+ if (verbose) { -+ fprintf(fp, " PAGE: %s (4MB)\n\n", -+ mkstring(buf, VADDR_PRLEN, RJUST|LONG_HEX, -+ MKSTR(NONPAE_PAGEBASE(pseudo_pgd_pte)))); -+ -+ x86_translate_pte(pseudo_pgd_pte, 0, 0); -+ } -+ -+ *paddr = NONPAE_PAGEBASE(pseudo_pgd_pte) + -+ (kvaddr & ~_4MB_PAGE_MASK); -+ -+ return TRUE; -+ } -+ -+ page_middle = page_dir; -+ -+ FILL_PMD(NONPAE_PAGEBASE(page_middle), KVADDR, PAGESIZE()); -+ pmd_pte = ULONG(machdep->pmd + PAGEOFFSET(page_middle)); -+ -+ if (verbose) -+ fprintf(fp, " PMD: %s => %lx\n", -+ mkstring(buf, VADDR_PRLEN, RJUST|LONG_HEX, -+ MKSTR((ulong)page_middle)), pmd_pte); -+ -+ if (!pmd_pte) -+ goto no_kpage; -+ -+ machine_page_table = (ulong *)((NONPAE_PAGEBASE(pmd_pte)) + -+ ((kvaddr>>10) & ((PTRS_PER_PTE-1)<<2))); -+ -+ pseudo_page_table = (ulong *) -+ xen_m2p_nonPAE(NONPAE_PAGEBASE(machine_page_table)); -+ -+ FILL_PTBL(NONPAE_PAGEBASE(pseudo_page_table), PHYSADDR, PAGESIZE()); -+ machine_pte = ULONG(machdep->ptbl + PAGEOFFSET(machine_page_table)); -+ -+ if (verbose) { -+ fprintf(fp, " PTE: %s [machine]\n", -+ mkstring(buf, VADDR_PRLEN, RJUST|LONG_HEX, -+ MKSTR((ulong)machine_page_table))); -+ -+ fprintf(fp, " PTE: %s => %lx\n", -+ mkstring(buf, VADDR_PRLEN, RJUST|LONG_HEX, -+ MKSTR((ulong)pseudo_page_table + -+ PAGEOFFSET(machine_page_table))), machine_pte); -+ } -+ -+ if (!(machine_pte & (_PAGE_PRESENT | _PAGE_PROTNONE))) { -+ if (machine_pte && verbose) { -+ fprintf(fp, "\n"); -+ x86_translate_pte(machine_pte, 0, 0); -+ } -+ goto no_kpage; -+ } -+ -+ pseudo_pte = xen_m2p_nonPAE(NONPAE_PAGEBASE(machine_pte)); -+ pseudo_pte |= PAGEOFFSET(machine_pte); -+ -+ if (verbose) { -+ fprintf(fp, " PAGE: %s [machine]\n", -+ mkstring(buf, VADDR_PRLEN, RJUST|LONG_HEX, -+ MKSTR(NONPAE_PAGEBASE(machine_pte)))); -+ -+ fprintf(fp, " PAGE: %s\n\n", -+ mkstring(buf, VADDR_PRLEN, RJUST|LONG_HEX, -+ MKSTR(NONPAE_PAGEBASE(pseudo_pte)))); -+ -+ x86_translate_pte(pseudo_pte, 0, 0); -+ } -+ -+ *paddr = NONPAE_PAGEBASE(pseudo_pte) + PAGEOFFSET(kvaddr); -+ -+ return TRUE; -+ -+no_kpage: -+ return FALSE; -+} -+ - - static int --x86_kvtop_pae(struct task_context *tc, ulong kvaddr, physaddr_t *paddr, int verbose) -+x86_kvtop_PAE(struct task_context *tc, ulong kvaddr, physaddr_t *paddr, int verbose) - { - ulonglong *pgd; - ulonglong page_dir_entry; -@@ -2177,18 +2800,29 @@ - if (!IS_KVADDR(kvaddr)) - return FALSE; - -- if (!vt->vmalloc_start) { -- *paddr = VTOP(kvaddr); -- return TRUE; -- } -- -- if (!IS_VMALLOC_ADDR(kvaddr)) { -- *paddr = VTOP(kvaddr); -- if (!verbose) -+ if (XEN_HYPER_MODE()) { -+ if (DIRECTMAP_VIRT_ADDR(kvaddr)) { -+ *paddr = kvaddr - DIRECTMAP_VIRT_START; - return TRUE; -- } -+ } -+ pgd = (ulonglong *)symbol_value("idle_pg_table_l3"); -+ } else { -+ if (!vt->vmalloc_start) { -+ *paddr = VTOP(kvaddr); -+ return TRUE; -+ } -+ -+ if (!IS_VMALLOC_ADDR(kvaddr)) { -+ *paddr = VTOP(kvaddr); -+ if (!verbose) -+ return TRUE; -+ } - -- pgd = (ulonglong *)vt->kernel_pgd[0]; -+ if (XEN() && (kt->xen_flags & WRITABLE_PAGE_TABLES)) -+ return (x86_kvtop_xen_wpt_PAE(tc, kvaddr, paddr, verbose)); -+ -+ pgd = (ulonglong *)vt->kernel_pgd[0]; -+ } - - if (verbose) - fprintf(fp, "PAGE DIRECTORY: %lx\n", (ulong)pgd); -@@ -2212,7 +2846,7 @@ - - page_middle = PAE_PAGEBASE(page_dir_entry); - -- FILL_PMD(page_middle, PHYSADDR, PAGESIZE()); -+ FILL_PMD_PAE(page_middle, PHYSADDR, PAGESIZE()); - - offset = ((kvaddr >> PMD_SHIFT) & (PTRS_PER_PMD-1)) * sizeof(ulonglong); - -@@ -2249,7 +2883,7 @@ - - page_table = PAE_PAGEBASE(page_middle_entry); - -- FILL_PTBL(page_table, PHYSADDR, PAGESIZE()); -+ FILL_PTBL_PAE(page_table, PHYSADDR, PAGESIZE()); - - offset = ((kvaddr >> PAGESHIFT()) & (PTRS_PER_PTE-1)) * - sizeof(ulonglong); -@@ -2277,9 +2911,10 @@ - *paddr = physpage; - - if (verbose) { -+ ull = PAE_PAGEBASE(page_table_entry); - fprintf(fp, " PAGE: %s\n\n", - mkstring(buf, VADDR_PRLEN, RJUST|LONGLONG_HEX, -- MKSTR(&physpage))); -+ MKSTR(&ull))); - x86_translate_pte(0, 0, page_table_entry); - } - -@@ -2289,11 +2924,170 @@ - return FALSE; - } - --/* -- * Get the relevant page directory pointer from a task structure. -- */ --static ulong --x86_get_task_pgd(ulong task) -+static int -+x86_kvtop_xen_wpt_PAE(struct task_context *tc, ulong kvaddr, physaddr_t *paddr, int verbose) -+{ -+ ulonglong *pgd; -+ ulonglong page_dir_entry; -+ ulonglong page_middle, pseudo_page_middle; -+ ulonglong page_middle_entry; -+ ulonglong page_table, pseudo_page_table; -+ ulonglong page_table_entry; -+ ulonglong physpage, pseudo_physpage; -+ ulonglong ull; -+ ulong offset; -+ char buf[BUFSIZE]; -+ -+ pgd = (ulonglong *)vt->kernel_pgd[0]; -+ -+ if (verbose) -+ fprintf(fp, "PAGE DIRECTORY: %lx\n", (ulong)pgd); -+ -+ FILL_PGD(pgd, KVADDR, PTRS_PER_PGD * sizeof(ulonglong)); -+ -+ offset = ((kvaddr >> PGDIR_SHIFT) & (PTRS_PER_PGD-1)) * -+ sizeof(ulonglong); -+ -+ page_dir_entry = *((ulonglong *)&machdep->pgd[offset]); -+ -+ if (verbose) -+ fprintf(fp, " PGD: %s => %llx [machine]\n", -+ mkstring(buf, VADDR_PRLEN, RJUST|LONG_HEX, -+ MKSTR((ulong)pgd + offset)), -+ page_dir_entry); -+ -+ if (!(page_dir_entry & _PAGE_PRESENT)) { -+ goto no_kpage; -+ } -+ -+ page_middle = PAE_PAGEBASE(page_dir_entry); -+ pseudo_page_middle = xen_m2p(page_middle); -+ -+ if (verbose) -+ fprintf(fp, " PGD: %s => %llx\n", -+ mkstring(buf, VADDR_PRLEN, RJUST|LONG_HEX, -+ MKSTR((ulong)pgd + offset)), -+ pseudo_page_middle | PAGEOFFSET(page_dir_entry) | -+ (page_dir_entry & _PAGE_NX)); -+ -+ FILL_PMD_PAE(pseudo_page_middle, PHYSADDR, PAGESIZE()); -+ -+ offset = ((kvaddr >> PMD_SHIFT) & (PTRS_PER_PMD-1)) * sizeof(ulonglong); -+ -+ page_middle_entry = *((ulonglong *)&machdep->pmd[offset]); -+ -+ if (verbose) { -+ ull = page_middle + offset; -+ fprintf(fp, " PMD: %s => %llx [machine]\n", -+ mkstring(buf, VADDR_PRLEN, RJUST|LONGLONG_HEX, -+ MKSTR(&ull)), -+ page_middle_entry); -+ } -+ -+ if (!(page_middle_entry & _PAGE_PRESENT)) { -+ goto no_kpage; -+ } -+ -+ if (page_middle_entry & _PAGE_PSE) { -+ error(FATAL, "_PAGE_PSE in an mfn not supported\n"); /* XXX */ -+ if (verbose) { -+ ull = PAE_PAGEBASE(page_middle_entry); -+ fprintf(fp, " PAGE: %s (2MB)\n\n", -+ mkstring(buf, VADDR_PRLEN, RJUST|LONGLONG_HEX, -+ MKSTR(&ull))); -+ x86_translate_pte(0, 0, page_middle_entry); -+ } -+ -+ physpage = PAE_PAGEBASE(page_middle_entry) + -+ (kvaddr & ~_2MB_PAGE_MASK); -+ *paddr = physpage; -+ -+ -+ return TRUE; -+ } -+ -+ page_table = PAE_PAGEBASE(page_middle_entry); -+ pseudo_page_table = xen_m2p(page_table); -+ -+ if (verbose) { -+ ull = page_middle + offset; -+ fprintf(fp, " PMD: %s => %llx\n", -+ mkstring(buf, VADDR_PRLEN, RJUST|LONGLONG_HEX, -+ MKSTR(&ull)), -+ pseudo_page_table | PAGEOFFSET(page_middle_entry) | -+ (page_middle_entry & _PAGE_NX)); -+ } -+ -+ FILL_PTBL_PAE(pseudo_page_table, PHYSADDR, PAGESIZE()); -+ -+ offset = ((kvaddr >> PAGESHIFT()) & (PTRS_PER_PTE-1)) * -+ sizeof(ulonglong); -+ -+ page_table_entry = *((ulonglong *)&machdep->ptbl[offset]); -+ -+ if (verbose) { -+ ull = page_table + offset; -+ fprintf(fp, " PTE: %s => %llx [machine]\n", -+ mkstring(buf, VADDR_PRLEN, RJUST|LONGLONG_HEX, -+ MKSTR(&ull)), page_table_entry); -+ } -+ -+ if (!(page_table_entry & (_PAGE_PRESENT | _PAGE_PROTNONE))) { -+ if (page_table_entry && verbose) { -+ fprintf(fp, "\n"); -+ x86_translate_pte(0, 0, page_table_entry); -+ } -+ -+ goto no_kpage; -+ } -+ -+ physpage = PAE_PAGEBASE(page_table_entry) + PAGEOFFSET(kvaddr); -+ pseudo_physpage = xen_m2p(physpage); -+ -+ if (verbose) { -+ ull = page_table + offset; -+ fprintf(fp, " PTE: %s => %llx\n", -+ mkstring(buf, VADDR_PRLEN, RJUST|LONGLONG_HEX, -+ MKSTR(&ull)), -+ pseudo_physpage | PAGEOFFSET(page_table_entry) | -+ (page_table_entry & _PAGE_NX)); -+ } -+ -+ *paddr = pseudo_physpage + PAGEOFFSET(kvaddr); -+ -+ if (verbose) { -+ fprintf(fp, " PAGE: %s [machine]\n", -+ mkstring(buf, VADDR_PRLEN, RJUST|LONGLONG_HEX, -+ MKSTR(&physpage))); -+ -+ pseudo_physpage += (PAGEOFFSET(kvaddr) | -+ (page_table_entry & _PAGE_NX)); -+ -+ fprintf(fp, " PAGE: %s\n\n", -+ mkstring(buf, VADDR_PRLEN, RJUST|LONGLONG_HEX, -+ MKSTR(&pseudo_physpage))); -+ -+ x86_translate_pte(0, 0, pseudo_physpage); -+ } -+ -+ return TRUE; -+ -+no_kpage: -+ return FALSE; -+} -+ -+void -+x86_clear_machdep_cache(void) -+{ -+ machdep->machspec->last_pmd_read_PAE = 0; -+ machdep->machspec->last_ptbl_read_PAE = 0; -+} -+ -+/* -+ * Get the relevant page directory pointer from a task structure. -+ */ -+static ulong -+x86_get_task_pgd(ulong task) - { - long offset; - ulong cr3; -@@ -2341,6 +3135,7 @@ - x86_dump_machdep_table(ulong arg) - { - int others; -+ ulong xen_wpt; - - switch (arg) { - default: -@@ -2355,8 +3150,6 @@ - fprintf(fp, "%sPAE", others++ ? "|" : ""); - if (machdep->flags & OMIT_FRAME_PTR) - fprintf(fp, "%sOMIT_FRAME_PTR", others++ ? "|" : ""); -- if (machdep->flags & SYSRQ) -- fprintf(fp, "%sSYSRQ", others++ ? "|" : ""); - if (machdep->flags & FRAMESIZE_DEBUG) - fprintf(fp, "%sFRAMESIZE_DEBUG", others++ ? "|" : ""); - fprintf(fp, ")\n"); -@@ -2376,12 +3169,17 @@ - fprintf(fp, " eframe_search: x86_eframe_search()\n"); - fprintf(fp, " back_trace: x86_back_trace_cmd()\n"); - fprintf(fp, "get_processor_speed: x86_processor_speed()\n"); -+ xen_wpt = XEN() && (kt->xen_flags & WRITABLE_PAGE_TABLES); - if (machdep->flags & PAE) { -- fprintf(fp, " uvtop: x86_uvtop_pae()\n"); -- fprintf(fp, " kvtop: x86_uvtop_pae()\n"); -+ fprintf(fp, " uvtop: %s()\n", -+ xen_wpt ? "x86_uvtop_xen_wpt_PAE" : "x86_uvtop_PAE"); -+ fprintf(fp, " kvtop: x86_kvtop_PAE()%s\n", -+ xen_wpt ? " -> x86_kvtop_xen_wpt_PAE()" : ""); - } else { -- fprintf(fp, " uvtop: x86_uvtop()\n"); -- fprintf(fp, " kvtop: x86_uvtop()\n"); -+ fprintf(fp, " uvtop: %s()\n", -+ xen_wpt ? "x86_uvtop_xen_wpt" : "x86_uvtop"); -+ fprintf(fp, " kvtop: x86_kvtop()%s\n", -+ xen_wpt ? " -> x86_kvtop_xen_wpt()" : ""); - } - fprintf(fp, " get_task_pgd: x86_get_task_pgd()\n"); - fprintf(fp, " dump_irq: generic_dump_irq()\n"); -@@ -2399,7 +3197,7 @@ - fprintf(fp, " is_kvaddr: generic_is_kvaddr()\n"); - fprintf(fp, " is_uvaddr: generic_is_uvaddr()\n"); - fprintf(fp, " verify_paddr: generic_verify_paddr()\n"); -- fprintf(fp, " init_kernel_pgd: NULL\n"); -+ fprintf(fp, " init_kernel_pgd: x86_init_kernel_pgd()\n"); - fprintf(fp, " value_to_symbol: %s\n", - machdep->value_to_symbol == generic_machdep_value_to_symbol ? - "generic_machdep_value_to_symbol()" : -@@ -2412,6 +3210,14 @@ - fprintf(fp, " pmd: %lx\n", (ulong)machdep->pmd); - fprintf(fp, " ptbl: %lx\n", (ulong)machdep->ptbl); - fprintf(fp, " ptrs_per_pgd: %d\n", machdep->ptrs_per_pgd); -+ fprintf(fp, " section_size_bits: %ld\n", machdep->section_size_bits); -+ fprintf(fp, " max_physmem_bits: %ld\n", machdep->max_physmem_bits); -+ fprintf(fp, " sections_per_root: %ld\n", machdep->sections_per_root); -+ fprintf(fp, " xendump_p2m_create: x86_xendump_p2m_create()\n"); -+ fprintf(fp, " xendump_panic_task: x86_xendump_panic_task()\n"); -+ fprintf(fp, " get_xendump_regs: x86_get_xendump_regs()\n"); -+ fprintf(fp, "xen_kdump_p2m_create: x86_xen_kdump_p2m_create()\n"); -+ fprintf(fp, "clear_machdep_cache: x86_clear_machdep_cache()\n"); - fprintf(fp, " machspec: x86_machine_specific\n"); - fprintf(fp, " idt_table: %lx\n", - (ulong)machdep->machspec->idt_table); -@@ -2421,6 +3227,11 @@ - machdep->machspec->entry_tramp_end); - fprintf(fp, " entry_tramp_start_phys: %llx\n", - machdep->machspec->entry_tramp_start_phys); -+ fprintf(fp, " last_pmd_read_PAE: %llx\n", -+ machdep->machspec->last_pmd_read_PAE); -+ fprintf(fp, " last_ptbl_read_PAE: %llx\n", -+ machdep->machspec->last_ptbl_read_PAE); -+ - } - - /* -@@ -2732,6 +3543,9 @@ - switch (flag) - { - case READ_IDT_INIT: -+ if (!symbol_exists("idt_table")) -+ return NULL; -+ - if (!(idt = (ulong *)malloc(desc_struct_size))) { - error(WARNING, "cannot malloc idt_table\n\n"); - return NULL; -@@ -2779,6 +3593,10 @@ - break; - - case READ_IDT_RUNTIME: -+ if (!symbol_exists("idt_table")) -+ error(FATAL, -+ "idt_table does not exist on this architecture\n"); -+ - idt = (ulong *)GETBUF(desc_struct_size); - readmem(symbol_value("idt_table"), KVADDR, idt, - desc_struct_size, "idt_table", FAULT_ON_ERROR); -@@ -2942,7 +3760,11 @@ - !strstr(buf2, "+")) - sprintf(p1, buf1); - } -- } -+ } -+ else if (STREQ(argv[2], "ud2a")) -+ pc->curcmd_flags |= UD2A_INSTRUCTION; -+ else if (STREQ(argv[2], "(bad)")) -+ pc->curcmd_flags |= BAD_INSTRUCTION; - - if (CRASHDEBUG(1)) - console(" %s", inbuf); -@@ -2969,6 +3791,16 @@ - } - } - -+ if (XEN() && (count == 1) && symbol_exists("cpu_present_map")) { -+ ulong cpu_present_map; -+ -+ get_symbol_data("cpu_present_map", sizeof(ulong), -+ &cpu_present_map); -+ -+ cpucount = count_bits_long(cpu_present_map); -+ count = MAX(cpucount, kt->cpus); -+ } -+ - return count; - } - -@@ -3092,31 +3924,31 @@ - * with the -fomit-frame-pointer flag. - */ - #define PUSH_BP_MOV_ESP_BP 0xe58955 -+#define PUSH_BP_CLR_EAX_MOV_ESP_BP 0xe589c03155ULL - - static int - x86_omit_frame_pointer(void) - { -- ulong push_bp_mov_esp_bp[3]; -+ ulonglong push_bp_mov_esp_bp; -+ int i; -+ char *checkfuncs[] = {"sys_open", "sys_fork", "sys_read"}; - - if (pc->flags & KERNEL_DEBUG_QUERY) - return FALSE; - -- if (!readmem(symbol_value("sys_open"), KVADDR, &push_bp_mov_esp_bp[0], -- sizeof(ulong), "x86_omit_frame_pointer", RETURN_ON_ERROR)) -- return TRUE; -- if (!readmem(symbol_value("sys_fork"), KVADDR, &push_bp_mov_esp_bp[1], -- sizeof(ulong), "x86_omit_frame_pointer", RETURN_ON_ERROR)) -- return TRUE; -- if (!readmem(symbol_value("sys_read"), KVADDR, &push_bp_mov_esp_bp[2], -- sizeof(ulong), "x86_omit_frame_pointer", RETURN_ON_ERROR)) -- return TRUE; -- -- if (((push_bp_mov_esp_bp[0] & 0xffffff) == PUSH_BP_MOV_ESP_BP) && -- ((push_bp_mov_esp_bp[1] & 0xffffff) == PUSH_BP_MOV_ESP_BP) && -- ((push_bp_mov_esp_bp[2] & 0xffffff) == PUSH_BP_MOV_ESP_BP)) -- return FALSE; -+ for (i = 0; i < 2; i++) { -+ if (!readmem(symbol_value(checkfuncs[i]), KVADDR, -+ &push_bp_mov_esp_bp, sizeof(ulonglong), -+ "x86_omit_frame_pointer", RETURN_ON_ERROR)) -+ return TRUE; -+ if (!(((push_bp_mov_esp_bp & 0x0000ffffffULL) == -+ PUSH_BP_MOV_ESP_BP) || -+ ((push_bp_mov_esp_bp & 0xffffffffffULL) == -+ PUSH_BP_CLR_EAX_MOV_ESP_BP))) -+ return TRUE; -+ } - -- return TRUE; -+ return FALSE; - } - - /* -@@ -3207,4 +4039,922 @@ - - return ((sp = value_search(value, offset))); - } -+ -+static void -+x86_init_kernel_pgd(void) -+{ -+ int i; -+ ulong value; -+ -+ value = symbol_value("swapper_pg_dir"); -+ -+ if (XEN()) -+ get_symbol_data("swapper_pg_dir", sizeof(ulong), &value); -+ else -+ value = symbol_value("swapper_pg_dir"); -+ -+ for (i = 0; i < NR_CPUS; i++) -+ vt->kernel_pgd[i] = value; -+ -+} -+ -+static ulong -+xen_m2p_nonPAE(ulong machine) -+{ -+ ulonglong pseudo; -+ -+ pseudo = xen_m2p((ulonglong)machine); -+ -+ if (pseudo == XEN_MACHADDR_NOT_FOUND) -+ return XEN_MFN_NOT_FOUND; -+ -+ return ((ulong)pseudo); -+} -+ -+#include "netdump.h" -+ -+/* -+ * From the xen vmcore, create an index of mfns for each page that makes -+ * up the dom0 kernel's complete phys_to_machine_mapping[max_pfn] array. -+ */ -+ -+#define MAX_X86_FRAMES (16) -+#define MFNS_PER_FRAME (PAGESIZE()/sizeof(ulong)) -+ -+static int -+x86_xen_kdump_p2m_create(struct xen_kdump_data *xkd) -+{ -+ int i, j; -+ ulong kvaddr; -+ ulong *up; -+ ulonglong *ulp; -+ ulong frames; -+ ulong frame_mfn[MAX_X86_FRAMES] = { 0 }; -+ int mfns[MAX_X86_FRAMES] = { 0 }; -+ -+ /* -+ * Temporarily read physical (machine) addresses from vmcore by -+ * going directly to read_netdump() instead of via read_kdump(). -+ */ -+ pc->readmem = read_netdump; -+ -+ if (xkd->flags & KDUMP_CR3) -+ goto use_cr3; -+ -+ xkd->p2m_frames = 0; -+ -+ if (CRASHDEBUG(1)) -+ fprintf(fp, "x86_xen_kdump_p2m_create: p2m_mfn: %lx\n", -+ xkd->p2m_mfn); -+ -+ if (!readmem(PTOB(xkd->p2m_mfn), PHYSADDR, xkd->page, PAGESIZE(), -+ "xen kdump p2m mfn page", RETURN_ON_ERROR)) -+ error(FATAL, "cannot read xen kdump p2m mfn page\n"); -+ -+ if (CRASHDEBUG(1)) { -+ up = (ulong *)xkd->page; -+ for (i = 0; i < 4; i++) { -+ fprintf(fp, "%08lx: %08lx %08lx %08lx %08lx\n", -+ (ulong)((i * 4) * sizeof(ulong)), -+ *up, *(up+1), *(up+2), *(up+3)); -+ up += 4; -+ } -+ fprintf(fp, "\n"); -+ } -+ -+ for (i = 0, up = (ulong *)xkd->page; i < MAX_X86_FRAMES; i++, up++) -+ frame_mfn[i] = *up; -+ -+ for (i = 0; i < MAX_X86_FRAMES; i++) { -+ if (!frame_mfn[i]) -+ break; -+ -+ if (!readmem(PTOB(frame_mfn[i]), PHYSADDR, xkd->page, -+ PAGESIZE(), "xen kdump p2m mfn list page", RETURN_ON_ERROR)) -+ error(FATAL, "cannot read xen kdump p2m mfn list page\n"); -+ -+ for (j = 0, up = (ulong *)xkd->page; j < MFNS_PER_FRAME; j++, up++) -+ if (*up) -+ mfns[i]++; -+ -+ xkd->p2m_frames += mfns[i]; -+ -+ if (CRASHDEBUG(7)) { -+ up = (ulong *)xkd->page; -+ for (i = 0; i < 256; i++) { -+ fprintf(fp, "%08lx: %08lx %08lx %08lx %08lx\n", -+ (ulong)((i * 4) * sizeof(ulong)), -+ *up, *(up+1), *(up+2), *(up+3)); -+ up += 4; -+ } -+ } -+ } -+ -+ if (CRASHDEBUG(1)) -+ fprintf(fp, "p2m_frames: %d\n", xkd->p2m_frames); -+ -+ if ((xkd->p2m_mfn_frame_list = (ulong *) -+ malloc(xkd->p2m_frames * sizeof(ulong))) == NULL) -+ error(FATAL, "cannot malloc p2m_frame_index_list"); -+ -+ for (i = 0, frames = xkd->p2m_frames; frames; i++) { -+ if (!readmem(PTOB(frame_mfn[i]), PHYSADDR, -+ &xkd->p2m_mfn_frame_list[i * MFNS_PER_FRAME], -+ mfns[i] * sizeof(ulong), "xen kdump p2m mfn list page", -+ RETURN_ON_ERROR)) -+ error(FATAL, "cannot read xen kdump p2m mfn list page\n"); -+ -+ frames -= mfns[i]; -+ } -+ -+ if (CRASHDEBUG(2)) { -+ for (i = 0; i < xkd->p2m_frames; i++) -+ fprintf(fp, "%lx ", xkd->p2m_mfn_frame_list[i]); -+ fprintf(fp, "\n"); -+ } -+ -+ pc->readmem = read_kdump; -+ return TRUE; -+ -+use_cr3: -+ if (CRASHDEBUG(1)) -+ fprintf(fp, "x86_xen_kdump_p2m_create: cr3: %lx\n", xkd->cr3); -+ -+ if (!readmem(PTOB(xkd->cr3), PHYSADDR, machdep->pgd, PAGESIZE(), -+ "xen kdump cr3 page", RETURN_ON_ERROR)) -+ error(FATAL, "cannot read xen kdump cr3 page\n"); -+ -+ if (CRASHDEBUG(7)) { -+ fprintf(fp, "contents of page directory page:\n"); -+ -+ if (machdep->flags & PAE) { -+ ulp = (ulonglong *)machdep->pgd; -+ fprintf(fp, -+ "%016llx %016llx %016llx %016llx\n", -+ *ulp, *(ulp+1), *(ulp+2), *(ulp+3)); -+ } else { -+ up = (ulong *)machdep->pgd; -+ for (i = 0; i < 256; i++) { -+ fprintf(fp, -+ "%08lx: %08lx %08lx %08lx %08lx\n", -+ (ulong)((i * 4) * sizeof(ulong)), -+ *up, *(up+1), *(up+2), *(up+3)); -+ up += 4; -+ } -+ } -+ } -+ -+ kvaddr = symbol_value("max_pfn"); -+ if (!x86_xen_kdump_load_page(kvaddr, xkd->page)) -+ return FALSE; -+ up = (ulong *)(xkd->page + PAGEOFFSET(kvaddr)); -+ -+ xkd->p2m_frames = (*up/(PAGESIZE()/sizeof(ulong))) + -+ ((*up%(PAGESIZE()/sizeof(ulong))) ? 1 : 0); -+ -+ if (CRASHDEBUG(1)) -+ fprintf(fp, "max_pfn at %lx: %lx (%ld) -> %d p2m_frames\n", -+ kvaddr, *up, *up, xkd->p2m_frames); -+ -+ if ((xkd->p2m_mfn_frame_list = (ulong *) -+ malloc(xkd->p2m_frames * sizeof(ulong))) == NULL) -+ error(FATAL, "cannot malloc p2m_frame_index_list"); -+ -+ kvaddr = symbol_value("phys_to_machine_mapping"); -+ if (!x86_xen_kdump_load_page(kvaddr, xkd->page)) -+ return FALSE; -+ up = (ulong *)(xkd->page + PAGEOFFSET(kvaddr)); -+ kvaddr = *up; -+ if (CRASHDEBUG(1)) -+ fprintf(fp, "phys_to_machine_mapping: %lx\n", kvaddr); -+ -+ if (CRASHDEBUG(7)) { -+ fprintf(fp, "contents of first phys_to_machine_mapping page:\n"); -+ if (!x86_xen_kdump_load_page(kvaddr, xkd->page)) -+ error(INFO, -+ "cannot read first phys_to_machine_mapping page\n"); -+ -+ up = (ulong *)xkd->page; -+ for (i = 0; i < 256; i++) { -+ fprintf(fp, "%08lx: %08lx %08lx %08lx %08lx\n", -+ (ulong)((i * 4) * sizeof(ulong)), -+ *up, *(up+1), *(up+2), *(up+3)); -+ up += 4; -+ } -+ } -+ -+ machdep->last_ptbl_read = BADADDR; -+ machdep->last_pmd_read = BADADDR; -+ -+ for (i = 0; i < xkd->p2m_frames; i++) { -+ xkd->p2m_mfn_frame_list[i] = x86_xen_kdump_page_mfn(kvaddr); -+ kvaddr += PAGESIZE(); -+ } -+ -+ if (CRASHDEBUG(1)) { -+ for (i = 0; i < xkd->p2m_frames; i++) -+ fprintf(fp, "%lx ", xkd->p2m_mfn_frame_list[i]); -+ fprintf(fp, "\n"); -+ } -+ -+ machdep->last_ptbl_read = 0; -+ machdep->last_pmd_read = 0; -+ pc->readmem = read_kdump; -+ -+ return TRUE; -+} -+ -+/* -+ * Find the page associate with the kvaddr, and read its contents -+ * into the passed-in buffer. -+ */ -+static char * -+x86_xen_kdump_load_page(ulong kvaddr, char *pgbuf) -+{ -+ ulong *entry; -+ ulong *up; -+ ulong mfn; -+ -+ if (machdep->flags & PAE) -+ return x86_xen_kdump_load_page_PAE(kvaddr, pgbuf); -+ -+ up = (ulong *)machdep->pgd; -+ entry = up + (kvaddr >> PGDIR_SHIFT); -+ mfn = (*entry) >> PAGESHIFT(); -+ -+ if (!readmem(PTOB(mfn), PHYSADDR, pgbuf, PAGESIZE(), -+ "xen kdump pgd entry", RETURN_ON_ERROR)) { -+ error(INFO, "cannot read/find pgd entry from cr3 page\n"); -+ return NULL; -+ } -+ -+ up = (ulong *)pgbuf; -+ entry = up + ((kvaddr >> 12) & (PTRS_PER_PTE-1)); -+ mfn = (*entry) >> PAGESHIFT(); -+ -+ if (!readmem(PTOB(mfn), PHYSADDR, pgbuf, PAGESIZE(), -+ "xen page table page", RETURN_ON_ERROR)) { -+ error(INFO, "cannot read/find page table page\n"); -+ return NULL; -+ } -+ -+ return pgbuf; -+} -+ -+static char * -+x86_xen_kdump_load_page_PAE(ulong kvaddr, char *pgbuf) -+{ -+ ulonglong *entry; -+ ulonglong *up; -+ ulong mfn; -+ -+ up = (ulonglong *)machdep->pgd; -+ entry = up + (kvaddr >> PGDIR_SHIFT); -+ mfn = (ulong)((*entry) >> PAGESHIFT()); -+ -+ if (!readmem(PTOB(mfn), PHYSADDR, pgbuf, PAGESIZE(), -+ "xen kdump pgd entry", RETURN_ON_ERROR)) { -+ error(INFO, "cannot read/find pgd entry from cr3 page\n"); -+ return NULL; -+ } -+ -+ up = (ulonglong *)pgbuf; -+ entry = up + ((kvaddr >> PMD_SHIFT) & (PTRS_PER_PMD-1)); -+ mfn = (ulong)((*entry) >> PAGESHIFT()); -+ -+ if (!readmem(PTOB(mfn), PHYSADDR, pgbuf, PAGESIZE(), -+ "xen kdump pmd entry", RETURN_ON_ERROR)) { -+ error(INFO, "cannot read/find pmd entry from pgd\n"); -+ return NULL; -+ } -+ -+ up = (ulonglong *)pgbuf; -+ entry = up + ((kvaddr >> PAGESHIFT()) & (PTRS_PER_PTE-1)); -+ mfn = (ulong)((*entry) >> PAGESHIFT()); -+ -+ if (!readmem(PTOB(mfn), PHYSADDR, pgbuf, PAGESIZE(), -+ "xen kdump page table page", RETURN_ON_ERROR)) { -+ error(INFO, "cannot read/find page table page from pmd\n"); -+ return NULL; -+ } -+ -+ return pgbuf; -+} -+ -+/* -+ * Return the mfn value associated with a virtual address. -+ */ -+static ulong -+x86_xen_kdump_page_mfn(ulong kvaddr) -+{ -+ ulong *entry; -+ ulong *up; -+ ulong mfn; -+ -+ if (machdep->flags & PAE) -+ return x86_xen_kdump_page_mfn_PAE(kvaddr); -+ -+ up = (ulong *)machdep->pgd; -+ entry = up + (kvaddr >> PGDIR_SHIFT); -+ mfn = (*entry) >> PAGESHIFT(); -+ -+ if ((mfn != machdep->last_ptbl_read) && -+ !readmem(PTOB(mfn), PHYSADDR, machdep->ptbl, PAGESIZE(), -+ "xen kdump pgd entry", RETURN_ON_ERROR)) -+ error(FATAL, -+ "cannot read/find pgd entry from cr3 page (mfn: %lx)\n", -+ mfn); -+ machdep->last_ptbl_read = mfn; -+ -+ up = (ulong *)machdep->ptbl; -+ entry = up + ((kvaddr >> 12) & (PTRS_PER_PTE-1)); -+ mfn = (*entry) >> PAGESHIFT(); -+ -+ return mfn; -+} -+ -+static ulong -+x86_xen_kdump_page_mfn_PAE(ulong kvaddr) -+{ -+ ulonglong *entry; -+ ulonglong *up; -+ ulong mfn; -+ -+ up = (ulonglong *)machdep->pgd; -+ entry = up + (kvaddr >> PGDIR_SHIFT); -+ mfn = (ulong)((*entry) >> PAGESHIFT()); -+ -+ if ((mfn != machdep->last_pmd_read) && -+ !readmem(PTOB(mfn), PHYSADDR, machdep->pmd, PAGESIZE(), -+ "xen kdump pgd entry", RETURN_ON_ERROR)) -+ error(FATAL, -+ "cannot read/find pgd entry from cr3 page (mfn: %lx)\n", -+ mfn); -+ machdep->last_pmd_read = mfn; -+ -+ up = (ulonglong *)machdep->pmd; -+ entry = up + ((kvaddr >> PMD_SHIFT) & (PTRS_PER_PMD-1)); -+ mfn = (ulong)((*entry) >> PAGESHIFT()); -+ -+ if ((mfn != machdep->last_ptbl_read) && -+ !readmem(PTOB(mfn), PHYSADDR, machdep->ptbl, PAGESIZE(), -+ "xen kdump pmd entry", RETURN_ON_ERROR)) -+ error(FATAL, -+ "cannot read/find pmd entry from pgd (mfn: %lx)\n", -+ mfn); -+ machdep->last_ptbl_read = mfn; -+ -+ up = (ulonglong *)machdep->ptbl; -+ entry = up + ((kvaddr >> PAGESHIFT()) & (PTRS_PER_PTE-1)); -+ mfn = (ulong)((*entry) >> PAGESHIFT()); -+ -+ return mfn; -+} -+ -+#include "xendump.h" -+ -+/* -+ * Create an index of mfns for each page that makes up the -+ * kernel's complete phys_to_machine_mapping[max_pfn] array. -+ */ -+static int -+x86_xendump_p2m_create(struct xendump_data *xd) -+{ -+ int i, idx; -+ ulong mfn, kvaddr, ctrlreg[8], ctrlreg_offset; -+ ulong *up; -+ ulonglong *ulp; -+ off_t offset; -+ -+ if (!symbol_exists("phys_to_machine_mapping")) { -+ xd->flags |= XC_CORE_NO_P2M; -+ return TRUE; -+ } -+ -+ if ((ctrlreg_offset = MEMBER_OFFSET("vcpu_guest_context", "ctrlreg")) == -+ INVALID_OFFSET) -+ error(FATAL, -+ "cannot determine vcpu_guest_context.ctrlreg offset\n"); -+ else if (CRASHDEBUG(1)) -+ fprintf(xd->ofp, -+ "MEMBER_OFFSET(vcpu_guest_context, ctrlreg): %ld\n", -+ ctrlreg_offset); -+ -+ offset = (off_t)xd->xc_core.header.xch_ctxt_offset + -+ (off_t)ctrlreg_offset; -+ -+ if (lseek(xd->xfd, offset, SEEK_SET) == -1) -+ error(FATAL, "cannot lseek to xch_ctxt_offset\n"); -+ -+ if (read(xd->xfd, &ctrlreg, sizeof(ctrlreg)) != -+ sizeof(ctrlreg)) -+ error(FATAL, "cannot read vcpu_guest_context ctrlreg[8]\n"); -+ -+ mfn = (ctrlreg[3] >> PAGESHIFT()) | (ctrlreg[3] << (BITS()-PAGESHIFT())); -+ -+ for (i = 0; CRASHDEBUG(1) && (i < 8); i++) { -+ fprintf(xd->ofp, "ctrlreg[%d]: %lx", i, ctrlreg[i]); -+ if (i == 3) -+ fprintf(xd->ofp, " -> mfn: %lx", mfn); -+ fprintf(xd->ofp, "\n"); -+ } -+ -+ if (!xc_core_mfn_to_page(mfn, machdep->pgd)) -+ error(FATAL, "cannot read/find cr3 page\n"); -+ -+ if (CRASHDEBUG(1)) { -+ fprintf(xd->ofp, "contents of page directory page:\n"); -+ -+ if (machdep->flags & PAE) { -+ ulp = (ulonglong *)machdep->pgd; -+ fprintf(xd->ofp, -+ "%016llx %016llx %016llx %016llx\n", -+ *ulp, *(ulp+1), *(ulp+2), *(ulp+3)); -+ } else { -+ up = (ulong *)machdep->pgd; -+ for (i = 0; i < 256; i++) { -+ fprintf(xd->ofp, -+ "%08lx: %08lx %08lx %08lx %08lx\n", -+ (ulong)((i * 4) * sizeof(ulong)), -+ *up, *(up+1), *(up+2), *(up+3)); -+ up += 4; -+ } -+ } -+ } -+ -+ kvaddr = symbol_value("max_pfn"); -+ if (!x86_xendump_load_page(kvaddr, xd->page)) -+ return FALSE; -+ up = (ulong *)(xd->page + PAGEOFFSET(kvaddr)); -+ if (CRASHDEBUG(1)) -+ fprintf(xd->ofp, "max_pfn: %lx\n", *up); -+ -+ xd->xc_core.p2m_frames = (*up/(PAGESIZE()/sizeof(ulong))) + -+ ((*up%(PAGESIZE()/sizeof(ulong))) ? 1 : 0); -+ -+ if ((xd->xc_core.p2m_frame_index_list = (ulong *) -+ malloc(xd->xc_core.p2m_frames * sizeof(int))) == NULL) -+ error(FATAL, "cannot malloc p2m_frame_index_list"); -+ -+ kvaddr = symbol_value("phys_to_machine_mapping"); -+ if (!x86_xendump_load_page(kvaddr, xd->page)) -+ return FALSE; -+ up = (ulong *)(xd->page + PAGEOFFSET(kvaddr)); -+ if (CRASHDEBUG(1)) -+ fprintf(fp, "phys_to_machine_mapping: %lx\n", *up); -+ -+ kvaddr = *up; -+ machdep->last_ptbl_read = BADADDR; -+ machdep->last_pmd_read = BADADDR; -+ -+ for (i = 0; i < xd->xc_core.p2m_frames; i++) { -+ if ((idx = x86_xendump_page_index(kvaddr)) == MFN_NOT_FOUND) -+ return FALSE; -+ xd->xc_core.p2m_frame_index_list[i] = idx; -+ kvaddr += PAGESIZE(); -+ } -+ -+ machdep->last_ptbl_read = 0; -+ machdep->last_pmd_read = 0; -+ -+ return TRUE; -+} -+ -+/* -+ * Find the page associate with the kvaddr, and read its contents -+ * into the passed-in buffer. -+ */ -+static char * -+x86_xendump_load_page(ulong kvaddr, char *pgbuf) -+{ -+ ulong *entry; -+ ulong *up; -+ ulong mfn; -+ -+ if (machdep->flags & PAE) -+ return x86_xendump_load_page_PAE(kvaddr, pgbuf); -+ -+ up = (ulong *)machdep->pgd; -+ entry = up + (kvaddr >> PGDIR_SHIFT); -+ mfn = (*entry) >> PAGESHIFT(); -+ -+ if (!xc_core_mfn_to_page(mfn, pgbuf)) { -+ error(INFO, "cannot read/find pgd entry from cr3 page\n"); -+ return NULL; -+ } -+ -+ up = (ulong *)pgbuf; -+ entry = up + ((kvaddr >> 12) & (PTRS_PER_PTE-1)); -+ mfn = (*entry) >> PAGESHIFT(); -+ -+ if (!xc_core_mfn_to_page(mfn, pgbuf)) { -+ error(INFO, "cannot read/find page table page\n"); -+ return NULL; -+ } -+ -+ return pgbuf; -+} -+ -+static char * -+x86_xendump_load_page_PAE(ulong kvaddr, char *pgbuf) -+{ -+ ulonglong *entry; -+ ulonglong *up; -+ ulong mfn; -+ -+ up = (ulonglong *)machdep->pgd; -+ entry = up + (kvaddr >> PGDIR_SHIFT); -+ mfn = (ulong)((*entry) >> PAGESHIFT()); -+ -+ if (!xc_core_mfn_to_page(mfn, pgbuf)) { -+ error(INFO, "cannot read/find pgd entry from cr3 page\n"); -+ return NULL; -+ } -+ -+ up = (ulonglong *)pgbuf; -+ entry = up + ((kvaddr >> PMD_SHIFT) & (PTRS_PER_PMD-1)); -+ mfn = (ulong)((*entry) >> PAGESHIFT()); -+ -+ if (!xc_core_mfn_to_page(mfn, pgbuf)) { -+ error(INFO, "cannot read/find pmd entry from pgd\n"); -+ return NULL; -+ } -+ -+ up = (ulonglong *)pgbuf; -+ entry = up + ((kvaddr >> PAGESHIFT()) & (PTRS_PER_PTE-1)); -+ mfn = (ulong)((*entry) >> PAGESHIFT()); -+ -+ if (!xc_core_mfn_to_page(mfn, pgbuf)) { -+ error(INFO, "cannot read/find page table page from pmd\n"); -+ return NULL; -+ } -+ -+ return pgbuf; -+} -+ -+/* -+ * Find the dumpfile page index associated with the kvaddr. -+ */ -+static int -+x86_xendump_page_index(ulong kvaddr) -+{ -+ int idx; -+ ulong *entry; -+ ulong *up; -+ ulong mfn; -+ -+ if (machdep->flags & PAE) -+ return x86_xendump_page_index_PAE(kvaddr); -+ -+ up = (ulong *)machdep->pgd; -+ entry = up + (kvaddr >> PGDIR_SHIFT); -+ mfn = (*entry) >> PAGESHIFT(); -+ if ((mfn != machdep->last_ptbl_read) && -+ !xc_core_mfn_to_page(mfn, machdep->ptbl)) { -+ error(INFO, "cannot read/find pgd entry from cr3 page\n"); -+ return MFN_NOT_FOUND; -+ } -+ machdep->last_ptbl_read = mfn; -+ -+ up = (ulong *)machdep->ptbl; -+ entry = up + ((kvaddr>>12) & (PTRS_PER_PTE-1)); -+ mfn = (*entry) >> PAGESHIFT(); -+ if ((idx = xc_core_mfn_to_page_index(mfn)) == MFN_NOT_FOUND) -+ error(INFO, "cannot determine page index for %lx\n", -+ kvaddr); -+ -+ return idx; -+} -+ -+static int -+x86_xendump_page_index_PAE(ulong kvaddr) -+{ -+ int idx; -+ ulonglong *entry; -+ ulonglong *up; -+ ulong mfn; -+ -+ up = (ulonglong *)machdep->pgd; -+ entry = up + (kvaddr >> PGDIR_SHIFT); -+ mfn = (ulong)((*entry) >> PAGESHIFT()); -+ if ((mfn != machdep->last_pmd_read) && -+ !xc_core_mfn_to_page(mfn, machdep->pmd)) { -+ error(INFO, "cannot read/find pgd entry from cr3 page\n"); -+ return MFN_NOT_FOUND; -+ } -+ machdep->last_pmd_read = mfn; -+ -+ up = (ulonglong *)machdep->pmd; -+ entry = up + ((kvaddr >> PMD_SHIFT) & (PTRS_PER_PMD-1)); -+ mfn = (ulong)((*entry) >> PAGESHIFT()); -+ if ((mfn != machdep->last_ptbl_read) && -+ !xc_core_mfn_to_page(mfn, machdep->ptbl)) { -+ error(INFO, "cannot read/find pmd entry from pgd\n"); -+ return MFN_NOT_FOUND; -+ } -+ machdep->last_ptbl_read = mfn; -+ -+ up = (ulonglong *)machdep->ptbl; -+ entry = up + ((kvaddr >> PAGESHIFT()) & (PTRS_PER_PTE-1)); -+ mfn = (ulong)((*entry) >> PAGESHIFT()); -+ if ((idx = xc_core_mfn_to_page_index(mfn)) == MFN_NOT_FOUND) -+ error(INFO, "cannot determine page index for %lx\n", -+ kvaddr); -+ -+ return idx; -+} -+ -+/* -+ * Pull the esp from the cpu_user_regs struct in the header -+ * turn it into a task, and match it with the active_set. -+ * Unfortunately, the registers in the vcpu_guest_context -+ * are not necessarily those of the panic task, so for now -+ * let get_active_set_panic_task() get the right task. -+ */ -+static ulong -+x86_xendump_panic_task(struct xendump_data *xd) -+{ -+ return NO_TASK; -+ -+#ifdef TO_BE_REVISITED -+ int i; -+ ulong esp; -+ off_t offset; -+ ulong task; -+ -+ -+ if (INVALID_MEMBER(vcpu_guest_context_user_regs) || -+ INVALID_MEMBER(cpu_user_regs_esp)) -+ return NO_TASK; -+ -+ offset = (off_t)xd->xc_core.header.xch_ctxt_offset + -+ (off_t)OFFSET(vcpu_guest_context_user_regs) + -+ (off_t)OFFSET(cpu_user_regs_esp); -+ -+ if (lseek(xd->xfd, offset, SEEK_SET) == -1) -+ return NO_TASK; -+ -+ if (read(xd->xfd, &esp, sizeof(ulong)) != sizeof(ulong)) -+ return NO_TASK; -+ -+ if (IS_KVADDR(esp) && (task = stkptr_to_task(esp))) { -+ -+ for (i = 0; i < NR_CPUS; i++) { -+ if (task == tt->active_set[i]) { -+ if (CRASHDEBUG(0)) -+ error(INFO, -+ "x86_xendump_panic_task: esp: %lx -> task: %lx\n", -+ esp, task); -+ return task; -+ } -+ } -+ -+ error(WARNING, -+ "x86_xendump_panic_task: esp: %lx -> task: %lx (not active)\n", -+ esp); -+ } -+ -+ return NO_TASK; -+#endif -+} -+ -+/* -+ * Because of an off-by-one vcpu bug in early xc_domain_dumpcore() -+ * instantiations, the registers in the vcpu_guest_context are not -+ * necessarily those of the panic task. If not, the eip/esp will be -+ * in stop_this_cpu, as a result of the IP interrupt in panic(), -+ * but the trace is strange because it comes out of the hypervisor -+ * at least if the vcpu had been idle. -+ */ -+static void -+x86_get_xendump_regs(struct xendump_data *xd, struct bt_info *bt, ulong *eip, ulong *esp) -+{ -+ ulong task, xeip, xesp; -+ off_t offset; -+ -+ if (INVALID_MEMBER(vcpu_guest_context_user_regs) || -+ INVALID_MEMBER(cpu_user_regs_eip) || -+ INVALID_MEMBER(cpu_user_regs_esp)) -+ goto generic; -+ -+ offset = (off_t)xd->xc_core.header.xch_ctxt_offset + -+ (off_t)OFFSET(vcpu_guest_context_user_regs) + -+ (off_t)OFFSET(cpu_user_regs_esp); -+ if (lseek(xd->xfd, offset, SEEK_SET) == -1) -+ goto generic; -+ if (read(xd->xfd, &xesp, sizeof(ulong)) != sizeof(ulong)) -+ goto generic; -+ -+ offset = (off_t)xd->xc_core.header.xch_ctxt_offset + -+ (off_t)OFFSET(vcpu_guest_context_user_regs) + -+ (off_t)OFFSET(cpu_user_regs_eip); -+ if (lseek(xd->xfd, offset, SEEK_SET) == -1) -+ goto generic; -+ if (read(xd->xfd, &xeip, sizeof(ulong)) != sizeof(ulong)) -+ goto generic; -+ -+ if (IS_KVADDR(xesp) && (task = stkptr_to_task(xesp)) && -+ (task == bt->task)) { -+ if (CRASHDEBUG(1)) -+ fprintf(xd->ofp, -+ "hooks from vcpu_guest_context: eip: %lx esp: %lx\n", xeip, xesp); -+ *eip = xeip; -+ *esp = xesp; -+ return; -+ } -+ -+generic: -+ return machdep->get_stack_frame(bt, eip, esp); -+} -+ -+/* for Xen Hypervisor analysis */ -+ -+static int -+x86_xenhyper_is_kvaddr(ulong addr) -+{ -+ if (machdep->flags & PAE) { -+ return (addr >= HYPERVISOR_VIRT_START_PAE); -+ } -+ return (addr >= HYPERVISOR_VIRT_START); -+} -+ -+static ulong -+x86_get_stackbase_hyper(ulong task) -+{ -+ struct xen_hyper_vcpu_context *vcc; -+ int pcpu; -+ ulong init_tss; -+ ulong esp, base; -+ char *buf; -+ -+ /* task means vcpu here */ -+ vcc = xen_hyper_vcpu_to_vcpu_context(task); -+ if (!vcc) -+ error(FATAL, "invalid vcpu\n"); -+ -+ pcpu = vcc->processor; -+ if (!xen_hyper_test_pcpu_id(pcpu)) { -+ error(FATAL, "invalid pcpu number\n"); -+ } -+ init_tss = symbol_value("init_tss"); -+ buf = GETBUF(XEN_HYPER_SIZE(tss_struct)); -+ init_tss += XEN_HYPER_SIZE(tss_struct) * pcpu; -+ if (!readmem(init_tss, KVADDR, buf, -+ XEN_HYPER_SIZE(tss_struct), "init_tss", RETURN_ON_ERROR)) { -+ error(FATAL, "cannot read init_tss.\n"); -+ } -+ esp = ULONG(buf + XEN_HYPER_OFFSET(tss_struct_esp0)); -+ FREEBUF(buf); -+ base = esp & (~(STACKSIZE() - 1)); -+ -+ return base; -+} -+ -+static ulong -+x86_get_stacktop_hyper(ulong task) -+{ -+ return x86_get_stackbase_hyper(task) + STACKSIZE(); -+} -+ -+static void -+x86_get_stack_frame_hyper(struct bt_info *bt, ulong *pcp, ulong *spp) -+{ -+ struct xen_hyper_vcpu_context *vcc; -+ int pcpu; -+ ulong *regs; -+ ulong esp, eip; -+ -+ /* task means vcpu here */ -+ vcc = xen_hyper_vcpu_to_vcpu_context(bt->task); -+ if (!vcc) -+ error(FATAL, "invalid vcpu\n"); -+ -+ pcpu = vcc->processor; -+ if (!xen_hyper_test_pcpu_id(pcpu)) { -+ error(FATAL, "invalid pcpu number\n"); -+ } -+ -+ if (bt->flags & BT_TEXT_SYMBOLS_ALL) { -+ if (spp) -+ *spp = x86_get_stackbase_hyper(bt->task); -+ if (pcp) -+ *pcp = 0; -+ bt->flags &= ~BT_TEXT_SYMBOLS_ALL; -+ return; -+ } -+ -+ regs = (ulong *)xen_hyper_id_to_dumpinfo_context(pcpu)->pr_reg_ptr; -+ esp = XEN_HYPER_X86_NOTE_ESP(regs); -+ eip = XEN_HYPER_X86_NOTE_EIP(regs); -+ -+ if (spp) { -+ if (esp < x86_get_stackbase_hyper(bt->task) || -+ esp >= x86_get_stacktop_hyper(bt->task)) -+ *spp = x86_get_stackbase_hyper(bt->task); -+ else -+ *spp = esp; -+ } -+ if (pcp) { -+ if (is_kernel_text(eip)) -+ *pcp = eip; -+ else -+ *pcp = 0; -+ } -+} -+ -+static void -+x86_init_hyper(int when) -+{ -+ switch (when) -+ { -+ case PRE_SYMTAB: -+ machdep->verify_symbol = x86_verify_symbol; -+ if (pc->flags & KERNEL_DEBUG_QUERY) -+ return; -+ machdep->pagesize = memory_page_size(); -+ machdep->pageshift = ffs(machdep->pagesize) - 1; -+ machdep->pageoffset = machdep->pagesize - 1; -+ machdep->pagemask = ~((ulonglong)machdep->pageoffset); -+ machdep->stacksize = machdep->pagesize * 4; /* ODA: magic num */ -+ if ((machdep->pgd = (char *)malloc(PAGESIZE())) == NULL) -+ error(FATAL, "cannot malloc pgd space."); -+ if ((machdep->pmd = (char *)malloc(PAGESIZE())) == NULL) -+ error(FATAL, "cannot malloc pmd space."); -+ if ((machdep->ptbl = (char *)malloc(PAGESIZE())) == NULL) -+ error(FATAL, "cannot malloc ptbl space."); -+ machdep->last_pgd_read = 0; -+ machdep->last_pmd_read = 0; -+ machdep->last_ptbl_read = 0; -+ machdep->machspec = &x86_machine_specific; /* some members used */ -+ break; -+ -+ case PRE_GDB: -+ if (symbol_exists("idle_pg_table_l3")) { -+ machdep->flags |= PAE; -+ PGDIR_SHIFT = PGDIR_SHIFT_3LEVEL; -+ PTRS_PER_PTE = PTRS_PER_PTE_3LEVEL; -+ PTRS_PER_PGD = PTRS_PER_PGD_3LEVEL; -+ machdep->kvtop = x86_kvtop_PAE; -+ machdep->kvbase = HYPERVISOR_VIRT_START_PAE; -+ } else { -+ PGDIR_SHIFT = PGDIR_SHIFT_2LEVEL; -+ PTRS_PER_PTE = PTRS_PER_PTE_2LEVEL; -+ PTRS_PER_PGD = PTRS_PER_PGD_2LEVEL; -+ machdep->kvtop = x86_kvtop; -+ free(machdep->pmd); -+ machdep->pmd = machdep->pgd; -+ machdep->kvbase = HYPERVISOR_VIRT_START; -+ } -+ machdep->ptrs_per_pgd = PTRS_PER_PGD; -+ machdep->identity_map_base = DIRECTMAP_VIRT_START; -+ machdep->is_kvaddr = x86_xenhyper_is_kvaddr; -+ machdep->eframe_search = x86_eframe_search; -+ machdep->back_trace = x86_back_trace_cmd; -+ machdep->processor_speed = x86_processor_speed; /* ODA: check */ -+ machdep->dump_irq = generic_dump_irq; /* ODA: check */ -+ machdep->get_stack_frame = x86_get_stack_frame_hyper; -+ machdep->get_stackbase = x86_get_stackbase_hyper; -+ machdep->get_stacktop = x86_get_stacktop_hyper; -+ machdep->translate_pte = x86_translate_pte; -+ machdep->memory_size = xen_hyper_x86_memory_size; -+ machdep->dis_filter = x86_dis_filter; -+// machdep->cmd_mach = x86_cmd_mach; /* ODA: check */ -+ machdep->get_smp_cpus = xen_hyper_x86_get_smp_cpus; -+// machdep->line_number_hooks = x86_line_number_hooks; /* ODA: check */ -+ machdep->flags |= FRAMESIZE_DEBUG; /* ODA: check */ -+ machdep->value_to_symbol = generic_machdep_value_to_symbol; -+ machdep->clear_machdep_cache = x86_clear_machdep_cache; -+ -+ /* machdep table for Xen Hypervisor */ -+ xhmachdep->pcpu_init = xen_hyper_x86_pcpu_init; -+ break; -+ -+ case POST_GDB: -+#if 0 /* ODA: need this ? */ -+ if (x86_omit_frame_pointer()) { -+ machdep->flags |= OMIT_FRAME_PTR; -+#endif -+ XEN_HYPER_STRUCT_SIZE_INIT(cpu_time, "cpu_time"); -+ XEN_HYPER_STRUCT_SIZE_INIT(cpuinfo_x86, "cpuinfo_x86"); -+ XEN_HYPER_STRUCT_SIZE_INIT(tss_struct, "tss_struct"); -+ XEN_HYPER_MEMBER_OFFSET_INIT(tss_struct_esp0, "tss_struct", "esp0"); -+ XEN_HYPER_MEMBER_OFFSET_INIT(cpu_time_local_tsc_stamp, "cpu_time", "local_tsc_stamp"); -+ XEN_HYPER_MEMBER_OFFSET_INIT(cpu_time_stime_local_stamp, "cpu_time", "stime_local_stamp"); -+ XEN_HYPER_MEMBER_OFFSET_INIT(cpu_time_stime_master_stamp, "cpu_time", "stime_master_stamp"); -+ XEN_HYPER_MEMBER_OFFSET_INIT(cpu_time_tsc_scale, "cpu_time", "tsc_scale"); -+ XEN_HYPER_MEMBER_OFFSET_INIT(cpu_time_calibration_timer, "cpu_time", "calibration_timer"); -+ if (symbol_exists("cpu_data")) { -+ xht->cpu_data_address = symbol_value("cpu_data"); -+ } -+/* KAK Can this be calculated? */ -+ if (!machdep->hz) { -+ machdep->hz = XEN_HYPER_HZ; -+ } -+ break; -+ -+ case POST_INIT: -+ break; -+ } -+} -+ - #endif /* X86 */ ---- crash/ppc.c.orig 2007-08-27 15:02:36.000000000 -0400 -+++ crash/ppc.c 2007-03-19 12:55:31.000000000 -0500 -@@ -51,6 +51,9 @@ - void - ppc_init(int when) - { -+ uint cpu_features; -+ ulong cur_cpu_spec; -+ - switch (when) - { - case PRE_SYMTAB: -@@ -135,9 +138,23 @@ - "irq_desc", NULL, 0); - else - machdep->nr_irqs = 0; -- machdep->hz = HZ; -- if (THIS_KERNEL_VERSION >= LINUX(2,6,0)) -- machdep->hz = 1000; -+ if (!machdep->hz) { -+ machdep->hz = HZ; -+ if (THIS_KERNEL_VERSION >= LINUX(2,6,0)) -+ machdep->hz = 1000; -+ } -+ if (symbol_exists("cur_cpu_spec")) { -+ get_symbol_data("cur_cpu_spec", sizeof(void *), &cur_cpu_spec); -+ readmem(cur_cpu_spec + MEMBER_OFFSET("cpu_spec", "cpu_user_features"), -+ KVADDR, &cpu_features, sizeof(uint), "cpu user features", -+ FAULT_ON_ERROR); -+ if (cpu_features & CPU_BOOKE) -+ machdep->flags |= CPU_BOOKE; -+ } -+ else -+ machdep->flags |= CPU_BOOKE; -+ machdep->section_size_bits = _SECTION_SIZE_BITS; -+ machdep->max_physmem_bits = _MAX_PHYSMEM_BITS; - break; - - case POST_INIT: -@@ -154,8 +171,6 @@ - fprintf(fp, " flags: %lx (", machdep->flags); - if (machdep->flags & KSYMS_START) - fprintf(fp, "%sKSYMS_START", others++ ? "|" : ""); -- if (machdep->flags & SYSRQ) -- fprintf(fp, "%sSYSRQ", others++ ? "|" : ""); - fprintf(fp, ")\n"); - - fprintf(fp, " kvbase: %lx\n", machdep->kvbase); -@@ -205,6 +220,9 @@ - fprintf(fp, " pmd: %lx\n", (ulong)machdep->pmd); - fprintf(fp, " ptbl: %lx\n", (ulong)machdep->ptbl); - fprintf(fp, " ptrs_per_pgd: %d\n", machdep->ptrs_per_pgd); -+ fprintf(fp, " section_size_bits: %ld\n", machdep->section_size_bits); -+ fprintf(fp, " max_physmem_bits: %ld\n", machdep->max_physmem_bits); -+ fprintf(fp, " sections_per_root: %ld\n", machdep->sections_per_root); - fprintf(fp, " machspec: %lx\n", (ulong)machdep->machspec); - } - -@@ -280,7 +298,11 @@ - - page_middle = (ulong *)pgd_pte; - -- page_table = page_middle + (BTOP(vaddr) & (PTRS_PER_PTE - 1)); -+ if (machdep->flags & CPU_BOOKE) -+ page_table = page_middle + (BTOP(vaddr) & (PTRS_PER_PTE - 1)); -+ else -+ page_table = (ulong *)(((pgd_pte & (ulong)machdep->pagemask) + machdep->kvbase) + -+ ((ulong)BTOP(vaddr) & (PTRS_PER_PTE-1))); - - if (verbose) - fprintf(fp, " PMD: %lx => %lx\n",(ulong)page_middle, -@@ -364,7 +386,11 @@ - - page_middle = (ulong *)pgd_pte; - -- page_table = page_middle + (BTOP(kvaddr) & (PTRS_PER_PTE-1)); -+ if (machdep->flags & CPU_BOOKE) -+ page_table = page_middle + (BTOP(kvaddr) & (PTRS_PER_PTE - 1)); -+ else -+ page_table = (ulong *)(((pgd_pte & (ulong)machdep->pagemask) + machdep->kvbase) + -+ ((ulong)BTOP(kvaddr) & (PTRS_PER_PTE-1))); - - if (verbose) - fprintf(fp, " PMD: %lx => %lx\n", (ulong)page_middle, ---- crash/ia64.c.orig 2007-08-27 15:02:36.000000000 -0400 -+++ crash/ia64.c 2007-08-23 17:02:53.000000000 -0400 -@@ -1,8 +1,8 @@ - /* ia64.c - core analysis suite - * - * Copyright (C) 1999, 2000, 2001, 2002 Mission Critical Linux, Inc. -- * Copyright (C) 2002, 2003, 2004, 2005 David Anderson -- * Copyright (C) 2002, 2003, 2004, 2005 Red Hat, Inc. All rights reserved. -+ * Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007 David Anderson -+ * Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007 Red Hat, Inc. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by -@@ -16,6 +16,8 @@ - */ - #ifdef IA64 - #include "defs.h" -+#include "xen_hyper_defs.h" -+#include - - static int ia64_verify_symbol(const char *, ulong, char); - static int ia64_eframe_search(struct bt_info *); -@@ -25,6 +27,8 @@ - static void try_old_unwind(struct bt_info *); - static void ia64_dump_irq(int); - static ulong ia64_processor_speed(void); -+static int ia64_vtop_4l(ulong, physaddr_t *paddr, ulong *pgd, int, int); -+static int ia64_vtop(ulong, physaddr_t *paddr, ulong *pgd, int, int); - static int ia64_uvtop(struct task_context *, ulong, physaddr_t *, int); - static int ia64_kvtop(struct task_context *, ulong, physaddr_t *, int); - static ulong ia64_get_task_pgd(ulong); -@@ -47,10 +51,12 @@ - static int ia64_verify_paddr(uint64_t); - static int ia64_available_memory(struct efi_memory_desc_t *); - static void ia64_post_init(void); -+static ulong ia64_in_per_cpu_mca_stack(void); - static struct line_number_hook ia64_line_number_hooks[]; - static ulong ia64_get_stackbase(ulong); - static ulong ia64_get_stacktop(ulong); - static void parse_cmdline_arg(void); -+static void ia64_calc_phys_start(void); - - struct unw_frame_info; - static void dump_unw_frame_info(struct unw_frame_info *); -@@ -62,6 +68,17 @@ - static ulong rse_read_reg(struct unw_frame_info *, int, int *); - static void rse_function_params(struct unw_frame_info *, char *); - -+static int ia64_vtop_4l_xen_wpt(ulong, physaddr_t *paddr, ulong *pgd, int, int); -+static int ia64_vtop_xen_wpt(ulong, physaddr_t *paddr, ulong *pgd, int, int); -+static int ia64_xen_kdump_p2m_create(struct xen_kdump_data *); -+static int ia64_xendump_p2m_create(struct xendump_data *); -+static void ia64_debug_dump_page(FILE *, char *, char *); -+static char *ia64_xendump_load_page(ulong, struct xendump_data *); -+static int ia64_xendump_page_index(ulong, struct xendump_data *); -+static ulong ia64_xendump_panic_task(struct xendump_data *); -+static void ia64_get_xendump_regs(struct xendump_data *, struct bt_info *, ulong *, ulong *); -+ -+static void ia64_init_hyper(int); - - struct machine_specific ia64_machine_specific = { 0 }; - -@@ -70,8 +87,22 @@ - { - struct syment *sp, *spn; - -+ if (XEN_HYPER_MODE()) { -+ ia64_init_hyper(when); -+ return; -+ } -+ - switch (when) - { -+ case SETUP_ENV: -+#if defined(PR_SET_FPEMU) && defined(PR_FPEMU_NOPRINT) -+ prctl(PR_SET_FPEMU, PR_FPEMU_NOPRINT, 0, 0, 0); -+#endif -+#if defined(PR_SET_UNALIGN) && defined(PR_UNALIGN_NOPRINT) -+ prctl(PR_SET_UNALIGN, PR_UNALIGN_NOPRINT, 0, 0, 0); -+#endif -+ break; -+ - case PRE_SYMTAB: - machdep->verify_symbol = ia64_verify_symbol; - machdep->machspec = &ia64_machine_specific; -@@ -92,17 +123,23 @@ - case 16384: - machdep->stacksize = (power(2, 1) * PAGESIZE()); - break; -+ case 65536: -+ machdep->stacksize = (power(2, 0) * PAGESIZE()); -+ break; - default: - machdep->stacksize = 32*1024; - break; - } - if ((machdep->pgd = (char *)malloc(PAGESIZE())) == NULL) - error(FATAL, "cannot malloc pgd space."); -+ if ((machdep->pud = (char *)malloc(PAGESIZE())) == NULL) -+ error(FATAL, "cannot malloc pud space."); - if ((machdep->pmd = (char *)malloc(PAGESIZE())) == NULL) - error(FATAL, "cannot malloc pmd space."); - if ((machdep->ptbl = (char *)malloc(PAGESIZE())) == NULL) - error(FATAL, "cannot malloc ptbl space."); - machdep->last_pgd_read = 0; -+ machdep->last_pud_read = 0; - machdep->last_pmd_read = 0; - machdep->last_ptbl_read = 0; - machdep->verify_paddr = ia64_verify_paddr; -@@ -115,14 +152,17 @@ - break; - - case PRE_GDB: -+ - if (pc->flags & KERNEL_DEBUG_QUERY) - return; -+ - /* - * Until the kernel core dump and va_server library code - * do the right thing with respect to the configured page size, - * try to recognize a fatal inequity between the compiled-in - * page size and the page size used by the kernel. - */ -+ - - if ((sp = symbol_search("empty_zero_page")) && - (spn = next_symbol(NULL, sp)) && -@@ -169,10 +209,14 @@ - machdep->machspec->kernel_start + - GIGABYTES((ulong)(4)); - if (machdep->machspec->phys_start == UNKNOWN_PHYS_START) -- machdep->machspec->phys_start = -- DEFAULT_PHYS_START; -+ ia64_calc_phys_start(); - } else - machdep->machspec->vmalloc_start = KERNEL_VMALLOC_BASE; -+ -+ machdep->xen_kdump_p2m_create = ia64_xen_kdump_p2m_create; -+ machdep->xendump_p2m_create = ia64_xendump_p2m_create; -+ machdep->xendump_panic_task = ia64_xendump_panic_task; -+ machdep->get_xendump_regs = ia64_get_xendump_regs; - break; - - case POST_GDB: -@@ -202,7 +246,10 @@ - else if (symbol_exists("_irq_desc")) - ARRAY_LENGTH_INIT(machdep->nr_irqs, irq_desc, - "_irq_desc", NULL, 0); -- machdep->hz = 1024; -+ if (!machdep->hz) -+ machdep->hz = 1024; -+ machdep->section_size_bits = _SECTION_SIZE_BITS; -+ machdep->max_physmem_bits = _MAX_PHYSMEM_BITS; - ia64_create_memmap(); - break; - -@@ -228,8 +275,10 @@ - char *arglist[MAXARGS]; - ulong value; - struct machine_specific *ms; -+ int vm_flag; - - ms = &ia64_machine_specific; -+ vm_flag = 0; - - if (!strstr(machdep->cmdline_arg, "=")) { - errflag = 0; -@@ -284,11 +333,37 @@ - continue; - } - } -+ } else if (STRNEQ(arglist[i], "vm=")) { -+ vm_flag++; -+ p = arglist[i] + strlen("vm="); -+ if (strlen(p)) { -+ if (STREQ(p, "4l")) { -+ machdep->flags |= VM_4_LEVEL; -+ continue; -+ } -+ } - } - - error(WARNING, "ignoring --machdep option: %s\n", arglist[i]); - } - -+ if (vm_flag) { -+ switch (machdep->flags & (VM_4_LEVEL)) -+ { -+ case VM_4_LEVEL: -+ error(NOTE, "using 4-level pagetable\n"); -+ c++; -+ break; -+ -+ default: -+ error(WARNING, "invalid vm= option\n"); -+ c++; -+ machdep->flags &= ~(VM_4_LEVEL); -+ break; -+ } -+ } -+ -+ - if (c) - fprintf(fp, "\n"); - } -@@ -314,6 +389,58 @@ - return TRUE; - } - -+ -+static ulong -+ia64_in_per_cpu_mca_stack(void) -+{ -+ int plen, i; -+ ulong flag; -+ ulong vaddr, paddr, stackbase, stacktop; -+ ulong *__per_cpu_mca; -+ struct task_context *tc; -+ -+ tc = CURRENT_CONTEXT(); -+ -+ if (STRNEQ(CURRENT_COMM(), "INIT")) -+ flag = INIT; -+ else if (STRNEQ(CURRENT_COMM(), "MCA")) -+ flag = MCA; -+ else -+ return 0; -+ -+ if (!symbol_exists("__per_cpu_mca") || -+ !(plen = get_array_length("__per_cpu_mca", NULL, 0)) || -+ (plen < kt->cpus)) -+ return 0; -+ -+ vaddr = SWITCH_STACK_ADDR(CURRENT_TASK()); -+ if (VADDR_REGION(vaddr) != KERNEL_CACHED_REGION) -+ return 0; -+ paddr = ia64_VTOP(vaddr); -+ -+ __per_cpu_mca = (ulong *)GETBUF(sizeof(ulong) * kt->cpus); -+ -+ if (!readmem(symbol_value("__per_cpu_mca"), KVADDR, __per_cpu_mca, -+ sizeof(ulong) * kt->cpus, "__per_cpu_mca", RETURN_ON_ERROR|QUIET)) -+ return 0; -+ -+ if (CRASHDEBUG(1)) { -+ for (i = 0; i < kt->cpus; i++) { -+ fprintf(fp, "__per_cpu_mca[%d]: %lx\n", -+ i, __per_cpu_mca[i]); -+ } -+ } -+ -+ stackbase = __per_cpu_mca[tc->processor]; -+ stacktop = stackbase + (STACKSIZE() * 2); -+ FREEBUF(__per_cpu_mca); -+ -+ if ((paddr >= stackbase) && (paddr < stacktop)) -+ return flag; -+ else -+ return 0; -+} -+ - void - ia64_dump_machdep_table(ulong arg) - { -@@ -401,12 +528,14 @@ - fprintf(fp, "%sUNW_R0", others++ ? "|" : ""); - if (machdep->flags & MEM_LIMIT) - fprintf(fp, "%sMEM_LIMIT", others++ ? "|" : ""); -- if (machdep->flags & SYSRQ) -- fprintf(fp, "%sSYSRQ", others++ ? "|" : ""); - if (machdep->flags & DEVMEMRD) - fprintf(fp, "%sDEVMEMRD", others++ ? "|" : ""); - if (machdep->flags & INIT) - fprintf(fp, "%sINIT", others++ ? "|" : ""); -+ if (machdep->flags & MCA) -+ fprintf(fp, "%sMCA", others++ ? "|" : ""); -+ if (machdep->flags & VM_4_LEVEL) -+ fprintf(fp, "%sVM_4_LEVEL", others++ ? "|" : ""); - fprintf(fp, ")\n"); - fprintf(fp, " kvbase: %lx\n", machdep->kvbase); - fprintf(fp, " identity_map_base: %lx\n", machdep->identity_map_base); -@@ -445,16 +574,25 @@ - (machdep->verify_paddr == ia64_verify_paddr) ? - "ia64_verify_paddr" : "generic_verify_paddr"); - fprintf(fp, " init_kernel_pgd: NULL\n"); -+ fprintf(fp, "xen_kdump_p2m_create: ia64_xen_kdump_p2m_create()\n"); -+ fprintf(fp, " xendump_p2m_create: ia64_xendump_p2m_create()\n"); -+ fprintf(fp, " xendump_panic_task: ia64_xendump_panic_task()\n"); -+ fprintf(fp, " get_xendump_regs: ia64_get_xendump_regs()\n"); - fprintf(fp, " value_to_symbol: generic_machdep_value_to_symbol()\n"); - fprintf(fp, " line_number_hooks: ia64_line_number_hooks\n"); - fprintf(fp, " last_pgd_read: %lx\n", machdep->last_pgd_read); -+ fprintf(fp, " last_pud_read: %lx\n", machdep->last_pud_read); - fprintf(fp, " last_pmd_read: %lx\n", machdep->last_pmd_read); - fprintf(fp, " last_ptbl_read: %lx\n", machdep->last_ptbl_read); - fprintf(fp, " pgd: %lx\n", (ulong)machdep->pgd); -+ fprintf(fp, " pud: %lx\n", (ulong)machdep->pud); - fprintf(fp, " pmd: %lx\n", (ulong)machdep->pmd); - fprintf(fp, " ptbl: %lx\n", (ulong)machdep->ptbl); - fprintf(fp, " ptrs_per_pgd: %d\n", machdep->ptrs_per_pgd); - fprintf(fp, " cmdline_arg: %s\n", machdep->cmdline_arg); -+ fprintf(fp, " section_size_bits: %ld\n", machdep->section_size_bits); -+ fprintf(fp, " max_physmem_bits: %ld\n", machdep->max_physmem_bits); -+ fprintf(fp, " sections_per_root: %ld\n", machdep->sections_per_root); - fprintf(fp, " machspec: ia64_machine_specific\n"); - fprintf(fp, " cpu_data_address: %lx\n", - machdep->machspec->cpu_data_address); -@@ -565,9 +703,9 @@ - if (CRASHDEBUG(8)) - fprintf(fp, "%016lx %s\n", value, name); - -- if (STREQ(name, "phys_start") && type == 'A') -- if (machdep->machspec->phys_start == UNKNOWN_PHYS_START) -- machdep->machspec->phys_start = value; -+// if (STREQ(name, "phys_start") && type == 'A') -+// if (machdep->machspec->phys_start == UNKNOWN_PHYS_START) -+// machdep->machspec->phys_start = value; - - region = VADDR_REGION(value); - -@@ -665,74 +803,148 @@ - return (machdep->mhz = mhz); - } - -- --/* -- * Translates a user virtual address to its physical address. cmd_vtop() -- * sets the verbose flag so that the pte translation gets displayed; all -- * other callers quietly accept the translation. -- * -- * This routine can also take mapped kernel virtual addresses if the -u flag -- * was passed to cmd_vtop(). If so, it makes the translation using the -- * swapper_pg_dir, making it irrelevant in this processor's case. -+/* Generic abstraction to translate user or kernel virtual -+ * addresses to physical using a 4 level page table. - */ - static int --ia64_uvtop(struct task_context *tc, ulong uvaddr, physaddr_t *paddr, int verbose) -+ia64_vtop_4l(ulong vaddr, physaddr_t *paddr, ulong *pgd, int verbose, int usr) - { -- ulong mm; -- ulong *pgd; - ulong *page_dir; -+ ulong *page_upper; - ulong *page_middle; - ulong *page_table; - ulong pgd_pte; -+ ulong pud_pte; - ulong pmd_pte; - ulong pte; - ulong region, offset; - -- if (!tc) -- error(FATAL, "current context invalid\n"); -- -- *paddr = 0; -- region = VADDR_REGION(uvaddr); -+ if (usr) { -+ region = VADDR_REGION(vaddr); -+ offset = (vaddr >> PGDIR_SHIFT) & ((PTRS_PER_PGD >> 3) - 1); -+ offset |= (region << (PAGESHIFT() - 6)); -+ page_dir = pgd + offset; -+ } else { -+ if (!(pgd = (ulong *)vt->kernel_pgd[0])) -+ error(FATAL, "cannot determine kernel pgd pointer\n"); -+ page_dir = pgd + ((vaddr >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1)); -+ } - -- if (IS_KVADDR(uvaddr)) -- return ia64_kvtop(tc, uvaddr, paddr, verbose); -+ if (verbose) -+ fprintf(fp, "PAGE DIRECTORY: %lx\n", (ulong)pgd); - -- if ((mm = task_mm(tc->task, TRUE))) -- pgd = ULONG_PTR(tt->mm_struct + OFFSET(mm_struct_pgd)); -- else -- readmem(tc->mm_struct + OFFSET(mm_struct_pgd), KVADDR, &pgd, -- sizeof(long), "mm_struct pgd", FAULT_ON_ERROR); -+ FILL_PGD(PAGEBASE(pgd), KVADDR, PAGESIZE()); -+ pgd_pte = ULONG(machdep->pgd + PAGEOFFSET(page_dir)); -+ -+ if (verbose) -+ fprintf(fp, " PGD: %lx => %lx\n", (ulong)page_dir, pgd_pte); - -+ if (!(pgd_pte)) -+ return FALSE; -+ -+ offset = (vaddr >> PUD_SHIFT) & (PTRS_PER_PUD - 1); -+ page_upper = (ulong *)(PTOV(pgd_pte & _PFN_MASK)) + offset; -+ -+ FILL_PUD(PAGEBASE(page_upper), KVADDR, PAGESIZE()); -+ pud_pte = ULONG(machdep->pud + PAGEOFFSET(page_upper)); -+ - if (verbose) -- fprintf(fp, "PAGE DIRECTORY: %lx\n", (ulong)pgd); -+ fprintf(fp, " PUD: %lx => %lx\n", (ulong)page_upper, pud_pte); -+ -+ if (!(pud_pte)) -+ return FALSE; -+ -+ offset = (vaddr >> PMD_SHIFT) & (PTRS_PER_PMD - 1); -+ page_middle = (ulong *)(PTOV(pud_pte & _PFN_MASK)) + offset; -+ -+ FILL_PMD(PAGEBASE(page_middle), KVADDR, PAGESIZE()); -+ pmd_pte = ULONG(machdep->pmd + PAGEOFFSET(page_middle)); -+ -+ if (verbose) -+ fprintf(fp, " PMD: %lx => %lx\n", (ulong)page_middle, pmd_pte); -+ -+ if (!(pmd_pte)) -+ return FALSE; -+ -+ offset = (vaddr >> PAGESHIFT()) & (PTRS_PER_PTE - 1); -+ page_table = (ulong *)(PTOV(pmd_pte & _PFN_MASK)) + offset; -+ -+ FILL_PTBL(PAGEBASE(page_table), KVADDR, PAGESIZE()); -+ pte = ULONG(machdep->ptbl + PAGEOFFSET(page_table)); - -- offset = (uvaddr >> PGDIR_SHIFT) & ((PTRS_PER_PGD >> 3) - 1); -- offset |= (region << (PAGESHIFT() - 6)); -- page_dir = pgd + offset; -+ if (verbose) -+ fprintf(fp, " PTE: %lx => %lx\n", (ulong)page_table, pte); -+ -+ if (!(pte & (_PAGE_P))) { -+ if (usr) -+ *paddr = pte; -+ if (pte && verbose) { -+ fprintf(fp, "\n"); -+ ia64_translate_pte(pte, 0, 0); -+ } -+ return FALSE; -+ } -+ -+ *paddr = (pte & _PFN_MASK) + PAGEOFFSET(vaddr); -+ -+ if (verbose) { -+ fprintf(fp, " PAGE: %lx\n\n", PAGEBASE(*paddr)); -+ ia64_translate_pte(pte, 0, 0); -+ } -+ -+ return TRUE; -+} -+ -+/* Generic abstraction to translate user or kernel virtual -+ * addresses to physical using a 3 level page table. -+ */ -+static int -+ia64_vtop(ulong vaddr, physaddr_t *paddr, ulong *pgd, int verbose, int usr) -+{ -+ ulong *page_dir; -+ ulong *page_middle; -+ ulong *page_table; -+ ulong pgd_pte; -+ ulong pmd_pte; -+ ulong pte; -+ ulong region, offset; -+ -+ if (usr) { -+ region = VADDR_REGION(vaddr); -+ offset = (vaddr >> PGDIR_SHIFT_3L) & ((PTRS_PER_PGD >> 3) - 1); -+ offset |= (region << (PAGESHIFT() - 6)); -+ page_dir = pgd + offset; -+ } else { -+ if (!(pgd = (ulong *)vt->kernel_pgd[0])) -+ error(FATAL, "cannot determine kernel pgd pointer\n"); -+ page_dir = pgd + ((vaddr >> PGDIR_SHIFT_3L) & (PTRS_PER_PGD - 1)); -+ } - -+ if (verbose) -+ fprintf(fp, "PAGE DIRECTORY: %lx\n", (ulong)pgd); -+ - FILL_PGD(PAGEBASE(pgd), KVADDR, PAGESIZE()); - pgd_pte = ULONG(machdep->pgd + PAGEOFFSET(page_dir)); - -- if (verbose) { -+ if (verbose) - fprintf(fp, " PGD: %lx => %lx\n", (ulong)page_dir, pgd_pte); -- } - - if (!(pgd_pte)) -- goto no_upage; -+ return FALSE; - -- offset = (uvaddr >> PMD_SHIFT) & (PTRS_PER_PMD - 1); -+ offset = (vaddr >> PMD_SHIFT) & (PTRS_PER_PMD - 1); - page_middle = (ulong *)(PTOV(pgd_pte & _PFN_MASK)) + offset; - - FILL_PMD(PAGEBASE(page_middle), KVADDR, PAGESIZE()); - pmd_pte = ULONG(machdep->pmd + PAGEOFFSET(page_middle)); - - if (verbose) -- fprintf(fp, " PMD: %lx => %lx\n", (ulong)page_middle,pmd_pte); -+ fprintf(fp, " PMD: %lx => %lx\n", (ulong)page_middle, pmd_pte); - - if (!(pmd_pte)) -- goto no_upage; -+ return FALSE; - -- offset = (uvaddr >> PAGESHIFT()) & (PTRS_PER_PTE - 1); -+ offset = (vaddr >> PAGESHIFT()) & (PTRS_PER_PTE - 1); - page_table = (ulong *)(PTOV(pmd_pte & _PFN_MASK)) + offset; - - FILL_PTBL(PAGEBASE(page_table), KVADDR, PAGESIZE()); -@@ -742,15 +954,16 @@ - fprintf(fp, " PTE: %lx => %lx\n", (ulong)page_table, pte); - - if (!(pte & (_PAGE_P))) { -- *paddr = pte; -+ if (usr) -+ *paddr = pte; - if (pte && verbose) { - fprintf(fp, "\n"); - ia64_translate_pte(pte, 0, 0); - } -- goto no_upage; -+ return FALSE; - } - -- *paddr = (pte & _PFN_MASK) + PAGEOFFSET(uvaddr); -+ *paddr = (pte & _PFN_MASK) + PAGEOFFSET(vaddr); - - if (verbose) { - fprintf(fp, " PAGE: %lx\n\n", PAGEBASE(*paddr)); -@@ -758,10 +971,50 @@ - } - - return TRUE; -+} - --no_upage: - -- return FALSE; -+/* -+ * Translates a user virtual address to its physical address. cmd_vtop() -+ * sets the verbose flag so that the pte translation gets displayed; all -+ * other callers quietly accept the translation. -+ * -+ * This routine can also take mapped kernel virtual addresses if the -u flag -+ * was passed to cmd_vtop(). If so, it makes the translation using the -+ * swapper_pg_dir, making it irrelevant in this processor's case. -+ */ -+static int -+ia64_uvtop(struct task_context *tc, ulong uvaddr, physaddr_t *paddr, int verbose) -+{ -+ ulong mm; -+ ulong *pgd; -+ -+ if (!tc) -+ error(FATAL, "current context invalid\n"); -+ -+ *paddr = 0; -+ -+ if (IS_KVADDR(uvaddr)) -+ return ia64_kvtop(tc, uvaddr, paddr, verbose); -+ -+ if ((mm = task_mm(tc->task, TRUE))) -+ pgd = ULONG_PTR(tt->mm_struct + OFFSET(mm_struct_pgd)); -+ else -+ readmem(tc->mm_struct + OFFSET(mm_struct_pgd), KVADDR, &pgd, -+ sizeof(long), "mm_struct pgd", FAULT_ON_ERROR); -+ -+ if (XEN() && (kt->xen_flags & WRITABLE_PAGE_TABLES)) { -+ if (machdep->flags & VM_4_LEVEL) -+ return ia64_vtop_4l_xen_wpt(uvaddr, paddr, pgd, verbose, 1); -+ else -+ return ia64_vtop_xen_wpt(uvaddr, paddr, pgd, verbose, 1); -+ } else { -+ if (machdep->flags & VM_4_LEVEL) -+ return ia64_vtop_4l(uvaddr, paddr, pgd, verbose, 1); -+ else -+ return ia64_vtop(uvaddr, paddr, pgd, verbose, 1); -+ } -+ - } - - -@@ -774,13 +1027,6 @@ - ia64_kvtop(struct task_context *tc, ulong kvaddr, physaddr_t *paddr, int verbose) - { - ulong *pgd; -- ulong *page_dir; -- ulong *page_middle; -- ulong *page_table; -- ulong pgd_pte; -- ulong pmd_pte; -- ulong pte; -- ulong offset; - - if (!IS_KVADDR(kvaddr)) - return FALSE; -@@ -813,66 +1059,21 @@ - return TRUE; - } - -- pgd = (ulong *)vt->kernel_pgd[0]; -- -- if (verbose) { -- fprintf(fp, "PAGE DIRECTORY: %lx\n", (ulong)pgd); -- } -- -- page_dir = pgd + ((kvaddr >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1)); -- -- FILL_PGD(PAGEBASE(pgd), KVADDR, PAGESIZE()); -- pgd_pte = ULONG(machdep->pgd + PAGEOFFSET(page_dir)); -- -- if (verbose) { -- fprintf(fp, " PGD: %lx => %lx\n", (ulong)page_dir, pgd_pte); -- } -- -- if (!(pgd_pte)) -- goto no_kpage; -- -- offset = (kvaddr >> PMD_SHIFT) & (PTRS_PER_PMD - 1); -- page_middle = (ulong *)(PTOV(pgd_pte & _PFN_MASK)) + offset; -- -- FILL_PMD(PAGEBASE(page_middle), KVADDR, PAGESIZE()); -- pmd_pte = ULONG(machdep->pmd + PAGEOFFSET(page_middle)); -- -- if (verbose) -- fprintf(fp, " PMD: %lx => %lx\n", (ulong)page_middle, -- pmd_pte); -- -- if (!(pmd_pte)) -- goto no_kpage; -- -- offset = (kvaddr >> PAGESHIFT()) & (PTRS_PER_PTE - 1); -- page_table = (ulong *)(PTOV(pmd_pte & _PFN_MASK)) + offset; -- -- FILL_PTBL(PAGEBASE(page_table), KVADDR, PAGESIZE()); -- pte = ULONG(machdep->ptbl + PAGEOFFSET(page_table)); -- -- if (verbose) -- fprintf(fp, " PTE: %lx => %lx\n", (ulong)page_table, pte); -- -- if (!(pte & (_PAGE_P))) { -- if (pte && verbose) { -- fprintf(fp, "\n"); -- ia64_translate_pte(pte, 0, 0); -- } -- goto no_kpage; -- } -- -- *paddr = (pte & _PFN_MASK) + PAGEOFFSET(kvaddr); -+ if (!(pgd = (ulong *)vt->kernel_pgd[0])) -+ error(FATAL, "cannot determine kernel pgd pointer\n"); - -- if (verbose) { -- fprintf(fp, " PAGE: %lx\n\n", PAGEBASE(*paddr)); -- ia64_translate_pte(pte, 0, 0); -+ if (XEN() && (kt->xen_flags & WRITABLE_PAGE_TABLES)) { -+ if (machdep->flags & VM_4_LEVEL) -+ return ia64_vtop_4l_xen_wpt(kvaddr, paddr, pgd, verbose, 0); -+ else -+ return ia64_vtop_xen_wpt(kvaddr, paddr, pgd, verbose, 0); -+ } else { -+ if (machdep->flags & VM_4_LEVEL) -+ return ia64_vtop_4l(kvaddr, paddr, pgd, verbose, 0); -+ else -+ return ia64_vtop(kvaddr, paddr, pgd, verbose, 0); - } - -- return TRUE; -- --no_kpage: -- -- return FALSE; - } - - /* -@@ -958,9 +1159,15 @@ - { - ulong ksp; - -- readmem(task + OFFSET(task_struct_thread_ksp), KVADDR, -- &ksp, sizeof(void *), -- "thread_struct ksp", FAULT_ON_ERROR); -+ if (XEN_HYPER_MODE()) { -+ readmem(task + XEN_HYPER_OFFSET(vcpu_thread_ksp), KVADDR, -+ &ksp, sizeof(void *), -+ "vcpu thread ksp", FAULT_ON_ERROR); -+ } else { -+ readmem(task + OFFSET(task_struct_thread_ksp), KVADDR, -+ &ksp, sizeof(void *), -+ "thread_struct ksp", FAULT_ON_ERROR); -+ } - - return ksp; - } -@@ -1315,7 +1522,10 @@ - BZERO(&eframe, sizeof(ulong) * NUM_PT_REGS); - - open_tmpfile(); -- dump_struct("pt_regs", addr, RADIX(16)); -+ if (XEN_HYPER_MODE()) -+ dump_struct("cpu_user_regs", addr, RADIX(16)); -+ else -+ dump_struct("pt_regs", addr, RADIX(16)); - rewind(pc->tmpfile); - - fval = 0; -@@ -1571,6 +1781,12 @@ - - fprintf(fp, " EFRAME: %lx\n", addr); - -+ if (bt->flags & BT_INCOMPLETE_USER_EFRAME) { -+ fprintf(fp, -+ " [exception frame incomplete -- check salinfo for complete context]\n"); -+ bt->flags &= ~BT_INCOMPLETE_USER_EFRAME; -+ } -+ - fprintf(fp, " B0: %016lx CR_IIP: %016lx\n", - eframe[P_b0], eframe[P_cr_iip]); - /** -@@ -2371,9 +2587,10 @@ - !readmem(ia64_boot_param+ - MEMBER_OFFSET("ia64_boot_param", "efi_memmap"), - KVADDR, &efi_memmap, sizeof(uint64_t), "efi_memmap", -- RETURN_ON_ERROR)) { -- error(WARNING, "cannot read ia64_boot_param: " -- "memory verification will not be performed\n\n"); -+ QUIET|RETURN_ON_ERROR)) { -+ if (!XEN() || CRASHDEBUG(1)) -+ error(WARNING, "cannot read ia64_boot_param: " -+ "memory verification will not be performed\n\n"); - return; - } - -@@ -2391,9 +2608,11 @@ - - if ((ms->mem_limit && (efi_memmap >= ms->mem_limit)) || - !readmem(PTOV(efi_memmap), KVADDR, memmap, -- ms->efi_memmap_size, "efi_mmap contents", RETURN_ON_ERROR)) { -- error(WARNING, "cannot read efi_mmap: " -- "memory verification will not be performed\n"); -+ ms->efi_memmap_size, "efi_mmap contents", -+ QUIET|RETURN_ON_ERROR)) { -+ if (!XEN() || (XEN() && CRASHDEBUG(1))) -+ error(WARNING, "cannot read efi_mmap: " -+ "EFI memory verification will not be performed\n\n"); - free(memmap); - return; - } -@@ -2605,6 +2824,8 @@ - ia64_post_init(void) - { - struct machine_specific *ms; -+ struct gnu_request req; -+ ulong flag; - - ms = &ia64_machine_specific; - -@@ -2677,12 +2898,16 @@ - } - } - -- if (symbol_exists("ia64_init_stack") && !ms->ia64_init_stack_size) -- ms->ia64_init_stack_size = get_array_length("ia64_init_stack", -- NULL, 0); -+ if (symbol_exists("ia64_init_stack") && !ms->ia64_init_stack_size) { -+ get_symbol_type("ia64_init_stack", NULL, &req); -+ ms->ia64_init_stack_size = req.length; -+ } - - if (DUMPFILE() && ia64_in_init_stack(SWITCH_STACK_ADDR(CURRENT_TASK()))) - machdep->flags |= INIT; -+ -+ if (DUMPFILE() && (flag = ia64_in_per_cpu_mca_stack())) -+ machdep->flags |= flag; - } - - /* -@@ -3326,4 +3551,766 @@ - (vaddr < (ulong)KERNEL_UNCACHED_BASE)); - } - -+/* Generic abstraction to translate user or kernel virtual -+ * addresses to physical using a 4 level page table. -+ */ -+static int -+ia64_vtop_4l_xen_wpt(ulong vaddr, physaddr_t *paddr, ulong *pgd, int verbose, int usr) -+{ -+ error(FATAL, "ia64_vtop_4l_xen_wpt: TBD\n"); -+ return FALSE; -+#ifdef TBD -+ ulong *page_dir; -+ ulong *page_upper; -+ ulong *page_middle; -+ ulong *page_table; -+ ulong pgd_pte; -+ ulong pud_pte; -+ ulong pmd_pte; -+ ulong pte; -+ ulong region, offset; -+ -+ -+ if (usr) { -+ region = VADDR_REGION(vaddr); -+ offset = (vaddr >> PGDIR_SHIFT) & ((PTRS_PER_PGD >> 3) - 1); -+ offset |= (region << (PAGESHIFT() - 6)); -+ page_dir = pgd + offset; -+ } else { -+ if (!(pgd = (ulong *)vt->kernel_pgd[0])) -+ error(FATAL, "cannot determine kernel pgd pointer\n"); -+ page_dir = pgd + ((vaddr >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1)); -+ } -+ -+ if (verbose) -+ fprintf(fp, "PAGE DIRECTORY: %lx\n", (ulong)pgd); -+ -+ FILL_PGD(PAGEBASE(pgd), KVADDR, PAGESIZE()); -+ pgd_pte = ULONG(machdep->pgd + PAGEOFFSET(page_dir)); -+ -+ if (verbose) -+ fprintf(fp, " PGD: %lx => %lx\n", (ulong)page_dir, pgd_pte); -+ -+ if (!(pgd_pte)) -+ return FALSE; -+ -+ offset = (vaddr >> PUD_SHIFT) & (PTRS_PER_PUD - 1); -+ page_upper = (ulong *)(PTOV(pgd_pte & _PFN_MASK)) + offset; -+ -+ FILL_PUD(PAGEBASE(page_upper), KVADDR, PAGESIZE()); -+ pud_pte = ULONG(machdep->pud + PAGEOFFSET(page_upper)); -+ -+ if (verbose) -+ fprintf(fp, " PUD: %lx => %lx\n", (ulong)page_upper, pud_pte); -+ -+ if (!(pud_pte)) -+ return FALSE; -+ -+ offset = (vaddr >> PMD_SHIFT) & (PTRS_PER_PMD - 1); -+ page_middle = (ulong *)(PTOV(pud_pte & _PFN_MASK)) + offset; -+ -+ FILL_PMD(PAGEBASE(page_middle), KVADDR, PAGESIZE()); -+ pmd_pte = ULONG(machdep->pmd + PAGEOFFSET(page_middle)); -+ -+ if (verbose) -+ fprintf(fp, " PMD: %lx => %lx\n", (ulong)page_middle, pmd_pte); -+ -+ if (!(pmd_pte)) -+ return FALSE; -+ -+ offset = (vaddr >> PAGESHIFT()) & (PTRS_PER_PTE - 1); -+ page_table = (ulong *)(PTOV(pmd_pte & _PFN_MASK)) + offset; -+ -+ FILL_PTBL(PAGEBASE(page_table), KVADDR, PAGESIZE()); -+ pte = ULONG(machdep->ptbl + PAGEOFFSET(page_table)); -+ -+ if (verbose) -+ fprintf(fp, " PTE: %lx => %lx\n", (ulong)page_table, pte); -+ -+ if (!(pte & (_PAGE_P))) { -+ if (usr) -+ *paddr = pte; -+ if (pte && verbose) { -+ fprintf(fp, "\n"); -+ ia64_translate_pte(pte, 0, 0); -+ } -+ return FALSE; -+ } -+ -+ *paddr = (pte & _PFN_MASK) + PAGEOFFSET(vaddr); -+ -+ if (verbose) { -+ fprintf(fp, " PAGE: %lx\n\n", PAGEBASE(*paddr)); -+ ia64_translate_pte(pte, 0, 0); -+ } -+ -+ return TRUE; -+#endif -+} -+ -+/* Generic abstraction to translate user or kernel virtual -+ * addresses to physical using a 3 level page table. -+ */ -+static int -+ia64_vtop_xen_wpt(ulong vaddr, physaddr_t *paddr, ulong *pgd, int verbose, int usr) -+{ -+ error(FATAL, "ia64_vtop_xen_wpt: TBD\n"); -+ return FALSE; -+#ifdef TBD -+ ulong *page_dir; -+ ulong *page_middle; -+ ulong *page_table; -+ ulong pgd_pte; -+ ulong pmd_pte; -+ ulong pte; -+ ulong region, offset; -+ -+ -+ if (usr) { -+ region = VADDR_REGION(vaddr); -+ offset = (vaddr >> PGDIR_SHIFT) & ((PTRS_PER_PGD >> 3) - 1); -+ offset |= (region << (PAGESHIFT() - 6)); -+ page_dir = pgd + offset; -+ } else { -+ if (!(pgd = (ulong *)vt->kernel_pgd[0])) -+ error(FATAL, "cannot determine kernel pgd pointer\n"); -+ page_dir = pgd + ((vaddr >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1)); -+ } -+ -+ if (verbose) -+ fprintf(fp, "PAGE DIRECTORY: %lx\n", (ulong)pgd); -+ -+ FILL_PGD(PAGEBASE(pgd), KVADDR, PAGESIZE()); -+ pgd_pte = ULONG(machdep->pgd + PAGEOFFSET(page_dir)); -+ -+ if (verbose) -+ fprintf(fp, " PGD: %lx => %lx\n", (ulong)page_dir, pgd_pte); -+ -+ if (!(pgd_pte)) -+ return FALSE; -+ -+ offset = (vaddr >> PMD_SHIFT) & (PTRS_PER_PMD - 1); -+ page_middle = (ulong *)(PTOV(pgd_pte & _PFN_MASK)) + offset; -+ -+ FILL_PMD(PAGEBASE(page_middle), KVADDR, PAGESIZE()); -+ pmd_pte = ULONG(machdep->pmd + PAGEOFFSET(page_middle)); -+ -+ if (verbose) -+ fprintf(fp, " PMD: %lx => %lx\n", (ulong)page_middle, pmd_pte); -+ -+ if (!(pmd_pte)) -+ return FALSE; -+ -+ offset = (vaddr >> PAGESHIFT()) & (PTRS_PER_PTE - 1); -+ page_table = (ulong *)(PTOV(pmd_pte & _PFN_MASK)) + offset; -+ -+ FILL_PTBL(PAGEBASE(page_table), KVADDR, PAGESIZE()); -+ pte = ULONG(machdep->ptbl + PAGEOFFSET(page_table)); -+ -+ if (verbose) -+ fprintf(fp, " PTE: %lx => %lx\n", (ulong)page_table, pte); -+ -+ if (!(pte & (_PAGE_P))) { -+ if (usr) -+ *paddr = pte; -+ if (pte && verbose) { -+ fprintf(fp, "\n"); -+ ia64_translate_pte(pte, 0, 0); -+ } -+ return FALSE; -+ } -+ -+ *paddr = (pte & _PFN_MASK) + PAGEOFFSET(vaddr); -+ -+ if (verbose) { -+ fprintf(fp, " PAGE: %lx\n\n", PAGEBASE(*paddr)); -+ ia64_translate_pte(pte, 0, 0); -+ } -+ -+ return TRUE; -+#endif -+} -+ -+#include "netdump.h" -+ -+/* -+ * Determine the relocatable physical address base. -+ */ -+static void -+ia64_calc_phys_start(void) -+{ -+ FILE *iomem; -+ int i, found, errflag; -+ char buf[BUFSIZE]; -+ char *p1; -+ ulong kernel_code_start; -+ struct vmcore_data *vd; -+ Elf64_Phdr *phdr; -+ ulong phys_start, text_start; -+ -+ /* -+ * Default to 64MB. -+ */ -+ machdep->machspec->phys_start = DEFAULT_PHYS_START; -+ -+ text_start = symbol_exists("_text") ? symbol_value("_text") : BADADDR; -+ -+ if (ACTIVE()) { -+ if ((iomem = fopen("/proc/iomem", "r")) == NULL) -+ return; -+ -+ errflag = 1; -+ while (fgets(buf, BUFSIZE, iomem)) { -+ if (strstr(buf, ": Kernel code")) { -+ clean_line(buf); -+ errflag = 0; -+ break; -+ } -+ } -+ fclose(iomem); -+ -+ if (errflag) -+ return; -+ -+ if (!(p1 = strstr(buf, "-"))) -+ return; -+ else -+ *p1 = NULLCHAR; -+ -+ errflag = 0; -+ kernel_code_start = htol(buf, RETURN_ON_ERROR|QUIET, &errflag); -+ if (errflag) -+ return; -+ -+ machdep->machspec->phys_start = kernel_code_start; -+ -+ if (CRASHDEBUG(1)) { -+ if (text_start == BADADDR) -+ fprintf(fp, "_text: (unknown) "); -+ else -+ fprintf(fp, "_text: %lx ", text_start); -+ fprintf(fp, "Kernel code: %lx -> ", kernel_code_start); -+ fprintf(fp, "phys_start: %lx\n\n", -+ machdep->machspec->phys_start); -+ } -+ -+ return; -+ } -+ -+ /* -+ * Get relocation value from whatever dumpfile format is being used. -+ */ -+ -+ if (DISKDUMP_DUMPFILE()) { -+ if (diskdump_phys_base(&phys_start)) { -+ machdep->machspec->phys_start = phys_start; -+ if (CRASHDEBUG(1)) -+ fprintf(fp, -+ "compressed kdump: phys_start: %lx\n", -+ phys_start); -+ } -+ return; -+ } -+ -+ if ((vd = get_kdump_vmcore_data())) { -+ /* -+ * There should be at most one region 5 region, and it -+ * should be equal to "_text". If not, take whatever -+ * region 5 address comes first and hope for the best. -+ */ -+ for (i = found = 0; i < vd->num_pt_load_segments; i++) { -+ phdr = vd->load64 + i; -+ if (phdr->p_vaddr == text_start) { -+ machdep->machspec->phys_start = phdr->p_paddr; -+ found++; -+ break; -+ } -+ } -+ -+ for (i = 0; !found && (i < vd->num_pt_load_segments); i++) { -+ phdr = vd->load64 + i; -+ if (VADDR_REGION(phdr->p_vaddr) == KERNEL_VMALLOC_REGION) { -+ machdep->machspec->phys_start = phdr->p_paddr; -+ found++; -+ break; -+ } -+ } -+ -+ if (found && CRASHDEBUG(1)) { -+ if (text_start == BADADDR) -+ fprintf(fp, "_text: (unknown) "); -+ else -+ fprintf(fp, "_text: %lx ", text_start); -+ fprintf(fp, "p_vaddr: %lx p_paddr: %lx\n", -+ phdr->p_vaddr, phdr->p_paddr); -+ } -+ -+ return; -+ } -+} -+ -+/* -+ * From the xen vmcore, create an index of mfns for each page that makes -+ * up the dom0 kernel's complete phys_to_machine_mapping[max_pfn] array. -+ */ -+static int -+ia64_xen_kdump_p2m_create(struct xen_kdump_data *xkd) -+{ -+ /* -+ * Temporarily read physical (machine) addresses from vmcore by -+ * going directly to read_netdump() instead of via read_kdump(). -+ */ -+ pc->readmem = read_netdump; -+ -+ if (CRASHDEBUG(1)) -+ fprintf(fp, "ia64_xen_kdump_p2m_create: p2m_mfn: %lx\n", xkd->p2m_mfn); -+ -+ if ((xkd->p2m_mfn_frame_list = (ulong *)malloc(PAGESIZE())) == NULL) -+ error(FATAL, "cannot malloc p2m_frame_list"); -+ -+ if (!readmem(PTOB(xkd->p2m_mfn), PHYSADDR, xkd->p2m_mfn_frame_list, PAGESIZE(), -+ "xen kdump p2m mfn page", RETURN_ON_ERROR)) -+ error(FATAL, "cannot read xen kdump p2m mfn page\n"); -+ -+ xkd->p2m_frames = PAGESIZE()/sizeof(ulong); -+ -+ pc->readmem = read_kdump; -+ -+ return TRUE; -+} -+ -+physaddr_t -+ia64_xen_kdump_p2m(struct xen_kdump_data *xkd, physaddr_t pseudo) -+{ -+ ulong pgd_idx, pte_idx; -+ ulong pmd, pte; -+ physaddr_t paddr; -+ -+ /* -+ * Temporarily read physical (machine) addresses from vmcore by -+ * going directly to read_netdump() instead of via read_kdump(). -+ */ -+ pc->readmem = read_netdump; -+ -+ xkd->accesses += 2; -+ -+ pgd_idx = (pseudo >> PGDIR_SHIFT_3L) & (PTRS_PER_PGD - 1); -+ pmd = xkd->p2m_mfn_frame_list[pgd_idx] & _PFN_MASK; -+ if (!pmd) { -+ paddr = P2M_FAILURE; -+ goto out; -+ } -+ -+ pmd += ((pseudo >> PMD_SHIFT) & (PTRS_PER_PMD - 1)) * sizeof(ulong); -+ if (pmd != xkd->last_pmd_read) { -+ if (!readmem(pmd, PHYSADDR, &pte, sizeof(ulong), -+ "ia64_xen_kdump_p2m pmd", RETURN_ON_ERROR)) { -+ xkd->last_pmd_read = BADADDR; -+ xkd->last_mfn_read = BADADDR; -+ paddr = P2M_FAILURE; -+ goto out; -+ } -+ xkd->last_pmd_read = pmd; -+ } else { -+ pte = xkd->last_mfn_read; -+ xkd->cache_hits++; -+ } -+ pte = pte & _PFN_MASK; -+ if (!pte) { -+ paddr = P2M_FAILURE; -+ goto out; -+ } -+ -+ if (pte != xkd->last_mfn_read) { -+ if (!readmem(pte, PHYSADDR, xkd->page, PAGESIZE(), -+ "ia64_xen_kdump_p2m pte page", RETURN_ON_ERROR)) { -+ xkd->last_pmd_read = BADADDR; -+ xkd->last_mfn_read = BADADDR; -+ paddr = P2M_FAILURE; -+ goto out; -+ } -+ xkd->last_mfn_read = pte; -+ } else -+ xkd->cache_hits++; -+ -+ pte_idx = (pseudo >> PAGESHIFT()) & (PTRS_PER_PTE - 1); -+ paddr = *(((ulong *)xkd->page) + pte_idx); -+ if (!(paddr & _PAGE_P)) { -+ paddr = P2M_FAILURE; -+ goto out; -+ } -+ paddr = (paddr & _PFN_MASK) | PAGEOFFSET(pseudo); -+ -+out: -+ pc->readmem = read_kdump; -+ return paddr; -+} -+ -+#include "xendump.h" -+ -+/* -+ * Create an index of mfns for each page that makes up the -+ * kernel's complete phys_to_machine_mapping[max_pfn] array. -+ */ -+static int -+ia64_xendump_p2m_create(struct xendump_data *xd) -+{ -+ if (!symbol_exists("phys_to_machine_mapping")) { -+ xd->flags |= XC_CORE_NO_P2M; -+ return TRUE; -+ } -+ -+ error(FATAL, "ia64_xendump_p2m_create: TBD\n"); -+ -+ /* dummy calls for clean "make [wW]arn" */ -+ ia64_debug_dump_page(NULL, NULL, NULL); -+ ia64_xendump_load_page(0, xd); -+ ia64_xendump_page_index(0, xd); -+ ia64_xendump_panic_task(xd); /* externally called */ -+ ia64_get_xendump_regs(xd, NULL, NULL, NULL); /* externally called */ -+ -+ return FALSE; -+} -+ -+static void -+ia64_debug_dump_page(FILE *ofp, char *page, char *name) -+{ -+ int i; -+ ulong *up; -+ -+ fprintf(ofp, "%s\n", name); -+ -+ up = (ulong *)page; -+ for (i = 0; i < 1024; i++) { -+ fprintf(ofp, "%016lx: %016lx %016lx\n", -+ (ulong)((i * 2) * sizeof(ulong)), -+ *up, *(up+1)); -+ up += 2; -+ } -+} -+ -+/* -+ * Find the page associate with the kvaddr, and read its contents -+ * into the passed-in buffer. -+ */ -+static char * -+ia64_xendump_load_page(ulong kvaddr, struct xendump_data *xd) -+{ -+ error(FATAL, "ia64_xendump_load_page: TBD\n"); -+ -+ return NULL; -+} -+ -+/* -+ * Find the dumpfile page index associated with the kvaddr. -+ */ -+static int -+ia64_xendump_page_index(ulong kvaddr, struct xendump_data *xd) -+{ -+ error(FATAL, "ia64_xendump_page_index: TBD\n"); -+ -+ return 0; -+} -+ -+static ulong -+ia64_xendump_panic_task(struct xendump_data *xd) -+{ -+ if (CRASHDEBUG(1)) -+ error(INFO, "ia64_xendump_panic_task: TBD\n"); -+ -+ return NO_TASK; -+} -+ -+static void -+ia64_get_xendump_regs(struct xendump_data *xd, struct bt_info *bt, ulong *rip, ulong *rsp) -+{ -+ machdep->get_stack_frame(bt, rip, rsp); -+ -+ if (is_task_active(bt->task) && -+ !(bt->flags & (BT_TEXT_SYMBOLS_ALL|BT_TEXT_SYMBOLS)) && -+ STREQ(closest_symbol(*rip), "schedule")) -+ error(INFO, -+ "xendump: switch_stack possibly not saved -- try \"bt -t\"\n"); -+} -+ -+/* for XEN Hypervisor analysis */ -+ -+static int -+ia64_is_kvaddr_hyper(ulong addr) -+{ -+ return (addr >= HYPERVISOR_VIRT_START && addr < HYPERVISOR_VIRT_END); -+} -+ -+static int -+ia64_kvtop_hyper(struct task_context *tc, ulong kvaddr, physaddr_t *paddr, int verbose) -+{ -+ ulong virt_percpu_start, phys_percpu_start; -+ ulong addr, dirp, entry; -+ -+ if (!IS_KVADDR(kvaddr)) -+ return FALSE; -+ -+ if (PERCPU_VIRT_ADDR(kvaddr)) { -+ virt_percpu_start = symbol_value("__phys_per_cpu_start"); -+ phys_percpu_start = virt_percpu_start - DIRECTMAP_VIRT_START; -+ *paddr = kvaddr - PERCPU_ADDR + phys_percpu_start; -+ return TRUE; -+ } else if (DIRECTMAP_VIRT_ADDR(kvaddr)) { -+ *paddr = kvaddr - DIRECTMAP_VIRT_START; -+ return TRUE; -+ } else if (!FRAME_TABLE_VIRT_ADDR(kvaddr)) { -+ return FALSE; -+ } -+ -+ /* frametable virtual address */ -+ addr = kvaddr - xhmachdep->frame_table; -+ -+ dirp = symbol_value("frametable_pg_dir"); -+ dirp += ((addr >> PGDIR_SHIFT_3L) & (PTRS_PER_PGD - 1)) * sizeof(ulong); -+ readmem(dirp, KVADDR, &entry, sizeof(ulong), -+ "frametable_pg_dir", FAULT_ON_ERROR); -+ -+ dirp = entry & _PFN_MASK; -+ if (!dirp) -+ return FALSE; -+ dirp += ((addr >> PMD_SHIFT) & (PTRS_PER_PMD - 1)) * sizeof(ulong); -+ readmem(dirp, PHYSADDR, &entry, sizeof(ulong), -+ "frametable pmd", FAULT_ON_ERROR); -+ -+ dirp = entry & _PFN_MASK; -+ if (!dirp) -+ return FALSE; -+ dirp += ((addr >> PAGESHIFT()) & (PTRS_PER_PTE - 1)) * sizeof(ulong); -+ readmem(dirp, PHYSADDR, &entry, sizeof(ulong), -+ "frametable pte", FAULT_ON_ERROR); -+ -+ if (!(entry & _PAGE_P)) -+ return FALSE; -+ -+ *paddr = (entry & _PFN_MASK) + (kvaddr & (PAGESIZE() - 1)); -+ return TRUE; -+} -+ -+static void -+ia64_post_init_hyper(void) -+{ -+ struct machine_specific *ms; -+ ulong frame_table; -+ -+ ms = &ia64_machine_specific; -+ -+ if (symbol_exists("unw_init_frame_info")) { -+ machdep->flags |= NEW_UNWIND; -+ if (MEMBER_EXISTS("unw_frame_info", "pt")) { -+ if (MEMBER_EXISTS("cpu_user_regs", "ar_csd")) { -+ machdep->flags |= NEW_UNW_V3; -+ ms->unwind_init = unwind_init_v3; -+ ms->unwind = unwind_v3; -+ ms->unwind_debug = unwind_debug_v3; -+ ms->dump_unwind_stats = dump_unwind_stats_v3; -+ } else { -+ machdep->flags |= NEW_UNW_V2; -+ ms->unwind_init = unwind_init_v2; -+ ms->unwind = unwind_v2; -+ ms->unwind_debug = unwind_debug_v2; -+ ms->dump_unwind_stats = dump_unwind_stats_v2; -+ } -+ } else { -+ machdep->flags |= NEW_UNW_V1; -+ ms->unwind_init = unwind_init_v1; -+ ms->unwind = unwind_v1; -+ ms->unwind_debug = unwind_debug_v1; -+ ms->dump_unwind_stats = dump_unwind_stats_v1; -+ } -+ } else { -+ machdep->flags |= OLD_UNWIND; -+ ms->unwind_init = ia64_old_unwind_init; -+ ms->unwind = ia64_old_unwind; -+ } -+ ms->unwind_init(); -+ -+ if (symbol_exists("frame_table")) { -+ frame_table = symbol_value("frame_table"); -+ readmem(frame_table, KVADDR, &xhmachdep->frame_table, sizeof(ulong), -+ "frame_table virtual address", FAULT_ON_ERROR); -+ } else { -+ error(FATAL, "cannot find frame_table virtual address."); -+ } -+} -+ -+int -+ia64_in_mca_stack_hyper(ulong addr, struct bt_info *bt) -+{ -+ int plen, i; -+ ulong paddr, stackbase, stacktop; -+ ulong *__per_cpu_mca; -+ struct xen_hyper_vcpu_context *vcc; -+ -+ vcc = xen_hyper_vcpu_to_vcpu_context(bt->task); -+ if (!vcc) -+ return 0; -+ -+ if (!symbol_exists("__per_cpu_mca") || -+ !(plen = get_array_length("__per_cpu_mca", NULL, 0)) || -+ (plen < xht->pcpus)) -+ return 0; -+ -+ if (!machdep->kvtop(NULL, addr, &paddr, 0)) -+ return 0; -+ -+ __per_cpu_mca = (ulong *)GETBUF(sizeof(ulong) * xht->pcpus); -+ -+ if (!readmem(symbol_value("__per_cpu_mca"), KVADDR, __per_cpu_mca, -+ sizeof(ulong) * xht->pcpus, "__per_cpu_mca", RETURN_ON_ERROR|QUIET)) -+ return 0; -+ -+ if (CRASHDEBUG(1)) { -+ for (i = 0; i < xht->pcpus; i++) { -+ fprintf(fp, "__per_cpu_mca[%d]: %lx\n", -+ i, __per_cpu_mca[i]); -+ } -+ } -+ -+ stackbase = __per_cpu_mca[vcc->processor]; -+ stacktop = stackbase + (STACKSIZE() * 2); -+ FREEBUF(__per_cpu_mca); -+ -+ if ((paddr >= stackbase) && (paddr < stacktop)) -+ return 1; -+ else -+ return 0; -+} -+ -+static void -+ia64_init_hyper(int when) -+{ -+ struct syment *sp; -+ -+ switch (when) -+ { -+ case SETUP_ENV: -+#if defined(PR_SET_FPEMU) && defined(PR_FPEMU_NOPRINT) -+ prctl(PR_SET_FPEMU, PR_FPEMU_NOPRINT, 0, 0, 0); -+#endif -+#if defined(PR_SET_UNALIGN) && defined(PR_UNALIGN_NOPRINT) -+ prctl(PR_SET_UNALIGN, PR_UNALIGN_NOPRINT, 0, 0, 0); -+#endif -+ break; -+ -+ case PRE_SYMTAB: -+ machdep->verify_symbol = ia64_verify_symbol; -+ machdep->machspec = &ia64_machine_specific; -+ if (pc->flags & KERNEL_DEBUG_QUERY) -+ return; -+ machdep->pagesize = memory_page_size(); -+ machdep->pageshift = ffs(machdep->pagesize) - 1; -+ machdep->pageoffset = machdep->pagesize - 1; -+ machdep->pagemask = ~(machdep->pageoffset); -+ switch (machdep->pagesize) -+ { -+ case 4096: -+ machdep->stacksize = (power(2, 3) * PAGESIZE()); -+ break; -+ case 8192: -+ machdep->stacksize = (power(2, 2) * PAGESIZE()); -+ break; -+ case 16384: -+ machdep->stacksize = (power(2, 1) * PAGESIZE()); -+ break; -+ case 65536: -+ machdep->stacksize = (power(2, 0) * PAGESIZE()); -+ break; -+ default: -+ machdep->stacksize = 32*1024; -+ break; -+ } -+ if ((machdep->pgd = (char *)malloc(PAGESIZE())) == NULL) -+ error(FATAL, "cannot malloc pgd space."); -+ if ((machdep->pud = (char *)malloc(PAGESIZE())) == NULL) -+ error(FATAL, "cannot malloc pud space."); -+ if ((machdep->pmd = (char *)malloc(PAGESIZE())) == NULL) -+ error(FATAL, "cannot malloc pmd space."); -+ if ((machdep->ptbl = (char *)malloc(PAGESIZE())) == NULL) -+ error(FATAL, "cannot malloc ptbl space."); -+ machdep->last_pgd_read = 0; -+ machdep->last_pud_read = 0; -+ machdep->last_pmd_read = 0; -+ machdep->last_ptbl_read = 0; -+ machdep->verify_paddr = ia64_verify_paddr; -+ machdep->ptrs_per_pgd = PTRS_PER_PGD; -+ machdep->machspec->phys_start = UNKNOWN_PHYS_START; -+ /* ODA: if need make hyper version -+ if (machdep->cmdline_arg) -+ parse_cmdline_arg(); */ -+ break; -+ -+ case PRE_GDB: -+ -+ if (pc->flags & KERNEL_DEBUG_QUERY) -+ return; -+ -+ machdep->kvbase = HYPERVISOR_VIRT_START; -+ machdep->identity_map_base = HYPERVISOR_VIRT_START; -+ machdep->is_kvaddr = ia64_is_kvaddr_hyper; -+ machdep->is_uvaddr = generic_is_uvaddr; -+ machdep->eframe_search = ia64_eframe_search; -+ machdep->back_trace = ia64_back_trace_cmd; -+ machdep->processor_speed = xen_hyper_ia64_processor_speed; -+ machdep->uvtop = ia64_uvtop; -+ machdep->kvtop = ia64_kvtop_hyper; -+ machdep->get_stack_frame = ia64_get_stack_frame; -+ machdep->get_stackbase = ia64_get_stackbase; -+ machdep->get_stacktop = ia64_get_stacktop; -+ machdep->translate_pte = ia64_translate_pte; -+ machdep->memory_size = xen_hyper_ia64_memory_size; -+ machdep->dis_filter = ia64_dis_filter; -+ machdep->cmd_mach = ia64_cmd_mach; -+ machdep->get_smp_cpus = xen_hyper_ia64_get_smp_cpus; -+ machdep->line_number_hooks = ia64_line_number_hooks; -+ machdep->value_to_symbol = generic_machdep_value_to_symbol; -+ machdep->init_kernel_pgd = NULL; -+ -+ if ((sp = symbol_search("_stext"))) { -+ machdep->machspec->kernel_region = -+ VADDR_REGION(sp->value); -+ machdep->machspec->kernel_start = sp->value; -+ } else { -+// machdep->machspec->kernel_region = KERNEL_CACHED_REGION; -+// machdep->machspec->kernel_start = KERNEL_CACHED_BASE; -+ } -+ -+ /* machdep table for Xen Hypervisor */ -+ xhmachdep->pcpu_init = xen_hyper_ia64_pcpu_init; -+ break; -+ -+ case POST_GDB: -+ STRUCT_SIZE_INIT(switch_stack, "switch_stack"); -+ MEMBER_OFFSET_INIT(thread_struct_fph, "thread_struct", "fph"); -+ MEMBER_OFFSET_INIT(switch_stack_b0, "switch_stack", "b0"); -+ MEMBER_OFFSET_INIT(switch_stack_ar_bspstore, -+ "switch_stack", "ar_bspstore"); -+ MEMBER_OFFSET_INIT(switch_stack_ar_pfs, -+ "switch_stack", "ar_pfs"); -+ MEMBER_OFFSET_INIT(switch_stack_ar_rnat, -+ "switch_stack", "ar_rnat"); -+ MEMBER_OFFSET_INIT(switch_stack_pr, -+ "switch_stack", "pr"); -+ -+ XEN_HYPER_STRUCT_SIZE_INIT(cpuinfo_ia64, "cpuinfo_ia64"); -+ XEN_HYPER_MEMBER_OFFSET_INIT(cpuinfo_ia64_proc_freq, "cpuinfo_ia64", "proc_freq"); -+ XEN_HYPER_MEMBER_OFFSET_INIT(cpuinfo_ia64_vendor, "cpuinfo_ia64", "vendor"); -+ if (symbol_exists("per_cpu__cpu_info")) { -+ xht->cpu_data_address = symbol_value("per_cpu__cpu_info"); -+ } -+ /* kakuma Can this be calculated? */ -+ if (!machdep->hz) { -+ machdep->hz = XEN_HYPER_HZ; -+ } -+ break; -+ -+ case POST_INIT: -+ ia64_post_init_hyper(); -+ break; -+ } -+} - #endif ---- crash/s390.c.orig 2007-08-27 15:02:36.000000000 -0400 -+++ crash/s390.c 2007-04-25 08:58:52.000000000 -0400 -@@ -1,9 +1,9 @@ - /* s390.c - core analysis suite + /* lkcd_dump_v5.h - core analysis suite * * Copyright (C) 2001, 2002 Mission Critical Linux, Inc. - * Copyright (C) 2002, 2003, 2004, 2005 David Anderson - * Copyright (C) 2002, 2003, 2004, 2005 Red Hat, Inc. All rights reserved. -- * Copyright (C) 2005 Michael Holzheu, IBM Corporation -+ * Copyright (C) 2002, 2003, 2004, 2005, 2006 David Anderson -+ * Copyright (C) 2002, 2003, 2004, 2005, 2006 Red Hat, Inc. All rights reserved. -+ * Copyright (C) 2005, 2006 Michael Holzheu, IBM Corporation - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by -@@ -86,7 +86,6 @@ - static ulong s390_processor_speed(void); - static int s390_eframe_search(struct bt_info *); - static void s390_back_trace_cmd(struct bt_info *); --static void s390_back_trace(struct gnu_request *, struct bt_info *); - static void s390_dump_irq(int); - static void s390_get_stack_frame(struct bt_info *, ulong *, ulong *); - static int s390_dis_filter(ulong, char *); -@@ -158,7 +157,8 @@ - machdep->nr_irqs = 0; /* TBD */ - machdep->vmalloc_start = s390_vmalloc_start; - machdep->dump_irq = s390_dump_irq; -- machdep->hz = HZ; -+ if (!machdep->hz) -+ machdep->hz = HZ; - break; - - case POST_INIT: -@@ -178,8 +178,6 @@ - fprintf(fp, " flags: %lx (", machdep->flags); - if (machdep->flags & KSYMS_START) - fprintf(fp, "%sKSYMS_START", others++ ? "|" : ""); -- if (machdep->flags & SYSRQ) -- fprintf(fp, "%sSYSRQ", others++ ? "|" : ""); - fprintf(fp, ")\n"); - - fprintf(fp, " kvbase: %lx\n", machdep->kvbase); -@@ -230,19 +228,6 @@ - } - - /* -- * Check if address is in the vmalloc area -- */ --int --s390_IS_VMALLOC_ADDR(ulong addr) --{ -- static unsigned long high_memory = 0; -- if(!high_memory){ -- high_memory = s390_vmalloc_start(); -- } -- return (addr > high_memory); --} -- --/* - * Check if address is in context's address space - */ - static int -@@ -293,7 +278,7 @@ - /* - * Check if page is mapped - */ --inline int -+static inline int - s390_pte_present(unsigned long x) - { - if(THIS_KERNEL_VERSION >= LINUX(2,6,0)) { -@@ -483,7 +468,7 @@ - return FALSE; - } - fprintf(fp,"PTE PHYSICAL FLAGS\n"); -- fprintf(fp,"%08x %08x",pte, pte & S390_PAGE_BASE_MASK); -+ fprintf(fp,"%08lx %08lx",pte, pte & S390_PAGE_BASE_MASK); - fprintf(fp," ("); - if(pte & S390_PAGE_INVALID) - fprintf(fp,"INVALID "); -@@ -510,7 +495,7 @@ - /* - * returns cpu number of task - */ --int -+static int - s390_cpu_of_task(unsigned long task) - { - int cpu; -@@ -551,12 +536,13 @@ - return FALSE; - } else { - /* Linux 2.6 */ -- unsigned long runqueue_addr, runqueue_offset, per_cpu_offset; -+ unsigned long runqueue_addr, runqueue_offset; - unsigned long cpu_offset, per_cpu_offset_addr, running_task; -- char runqueue[4096]; -+ char *runqueue; - int cpu; - - cpu = s390_cpu_of_task(task); -+ runqueue = GETBUF(SIZE(runqueue)); - - runqueue_offset=symbol_value("per_cpu__runqueues"); - per_cpu_offset_addr=symbol_value("__per_cpu_offset"); -@@ -564,10 +550,10 @@ - &cpu_offset, sizeof(long),"per_cpu_offset", - FAULT_ON_ERROR); - runqueue_addr=runqueue_offset + cpu_offset; -- readmem(runqueue_addr,KVADDR,&runqueue,sizeof(runqueue), -+ readmem(runqueue_addr,KVADDR,runqueue,SIZE(runqueue), - "runqueue", FAULT_ON_ERROR); -- running_task = *((unsigned long*)&runqueue[MEMBER_OFFSET( -- "runqueue", "curr")]); -+ running_task = ULONG(runqueue + OFFSET(runqueue_curr)); -+ FREEBUF(runqueue); - if(running_task == task) - return TRUE; - else -@@ -700,7 +686,7 @@ - } else if(skip_first_frame){ - skip_first_frame=0; - } else { -- fprintf(fp," #%i [%08x] ",i,backchain); -+ fprintf(fp," #%i [%08lx] ",i,backchain); - fprintf(fp,"%s at %x\n", closest_symbol(r14), r14); - if (bt->flags & BT_LINE_NUMBERS) - s390_dump_line_number(r14); -@@ -716,13 +702,15 @@ - frame_size = stack_base - old_backchain - + KERNEL_STACK_SIZE; - } else { -- frame_size = backchain - old_backchain; -+ frame_size = MIN((backchain - old_backchain), -+ (stack_base - old_backchain + -+ KERNEL_STACK_SIZE)); - } - for(j=0; j< frame_size; j+=4){ - if(j % 16 == 0){ -- fprintf(fp,"\n%08x: ",old_backchain+j); -+ fprintf(fp,"\n%08lx: ",old_backchain+j); - } -- fprintf(fp," %08x",ULONG(&stack[old_backchain - -+ fprintf(fp," %08lx",ULONG(&stack[old_backchain - - stack_base + j])); - } - fprintf(fp,"\n\n"); -@@ -771,10 +759,10 @@ - return; - } - fprintf(fp," LOWCORE INFO:\n"); -- fprintf(fp," -psw : %#010x %#010x\n", tmp[0], -+ fprintf(fp," -psw : %#010lx %#010lx\n", tmp[0], - tmp[1]); - if(show_symbols){ -- fprintf(fp," -function : %s at %x\n", -+ fprintf(fp," -function : %s at %lx\n", - closest_symbol(tmp[1] & S390_ADDR_MASK), - tmp[1] & S390_ADDR_MASK); - if (bt->flags & BT_LINE_NUMBERS) -@@ -783,12 +771,12 @@ - ptr = lc + MEMBER_OFFSET("_lowcore","cpu_timer_save_area"); - tmp[0]=UINT(ptr); - tmp[1]=UINT(ptr + S390_WORD_SIZE); -- fprintf(fp," -cpu timer: %#010x %#010x\n", tmp[0],tmp[1]); -+ fprintf(fp," -cpu timer: %#010lx %#010lx\n", tmp[0],tmp[1]); - - ptr = lc + MEMBER_OFFSET("_lowcore","clock_comp_save_area"); - tmp[0]=UINT(ptr); - tmp[1]=UINT(ptr + S390_WORD_SIZE); -- fprintf(fp," -clock cmp: %#010x %#010x\n", tmp[0], tmp[1]); -+ fprintf(fp," -clock cmp: %#010lx %#010lx\n", tmp[0], tmp[1]); - - fprintf(fp," -general registers:\n"); - ptr = lc + MEMBER_OFFSET("_lowcore","gpregs_save_area"); -@@ -796,25 +784,25 @@ - tmp[1]=ULONG(ptr + S390_WORD_SIZE); - tmp[2]=ULONG(ptr + 2 * S390_WORD_SIZE); - tmp[3]=ULONG(ptr + 3 * S390_WORD_SIZE); -- fprintf(fp," %#010x %#010x %#010x %#010x\n", -+ fprintf(fp," %#010lx %#010lx %#010lx %#010lx\n", - tmp[0],tmp[1],tmp[2],tmp[3]); - tmp[0]=ULONG(ptr + 4 * S390_WORD_SIZE); - tmp[1]=ULONG(ptr + 5 * S390_WORD_SIZE); - tmp[2]=ULONG(ptr + 6 * S390_WORD_SIZE); - tmp[3]=ULONG(ptr + 7 * S390_WORD_SIZE); -- fprintf(fp," %#010x %#010x %#010x %#010x\n", -+ fprintf(fp," %#010lx %#010lx %#010lx %#010lx\n", - tmp[0],tmp[1],tmp[2],tmp[3]); - tmp[0]=ULONG(ptr + 8 * S390_WORD_SIZE); - tmp[1]=ULONG(ptr + 9 * S390_WORD_SIZE); - tmp[2]=ULONG(ptr + 10* S390_WORD_SIZE); - tmp[3]=ULONG(ptr + 11* S390_WORD_SIZE); -- fprintf(fp," %#010x %#010x %#010x %#010x\n", -+ fprintf(fp," %#010lx %#010lx %#010lx %#010lx\n", - tmp[0],tmp[1],tmp[2],tmp[3]); - tmp[0]=ULONG(ptr + 12* S390_WORD_SIZE); - tmp[1]=ULONG(ptr + 13* S390_WORD_SIZE); - tmp[2]=ULONG(ptr + 14* S390_WORD_SIZE); - tmp[3]=ULONG(ptr + 15* S390_WORD_SIZE); -- fprintf(fp," %#010x %#010x %#010x %#010x\n", -+ fprintf(fp," %#010lx %#010lx %#010lx %#010lx\n", - tmp[0], tmp[1], tmp[2], tmp[3]); - - fprintf(fp," -access registers:\n"); -@@ -823,25 +811,25 @@ - tmp[1]=ULONG(ptr + S390_WORD_SIZE); - tmp[2]=ULONG(ptr + 2 * S390_WORD_SIZE); - tmp[3]=ULONG(ptr + 3 * S390_WORD_SIZE); -- fprintf(fp," %#010x %#010x %#010x %#010x\n", -+ fprintf(fp," %#010lx %#010lx %#010lx %#010lx\n", - tmp[0], tmp[1], tmp[2], tmp[3]); - tmp[0]=ULONG(ptr + 4 * S390_WORD_SIZE); - tmp[1]=ULONG(ptr + 5 * S390_WORD_SIZE); - tmp[2]=ULONG(ptr + 6 * S390_WORD_SIZE); - tmp[3]=ULONG(ptr + 7 * S390_WORD_SIZE); -- fprintf(fp," %#010x %#010x %#010x %#010x\n", -+ fprintf(fp," %#010lx %#010lx %#010lx %#010lx\n", - tmp[0], tmp[1], tmp[2], tmp[3]); - tmp[0]=ULONG(ptr + 8 * S390_WORD_SIZE); - tmp[1]=ULONG(ptr + 9 * S390_WORD_SIZE); - tmp[2]=ULONG(ptr + 10* S390_WORD_SIZE); - tmp[3]=ULONG(ptr + 11* S390_WORD_SIZE); -- fprintf(fp," %#010x %#010x %#010x %#010x\n", -+ fprintf(fp," %#010lx %#010lx %#010lx %#010lx\n", - tmp[0], tmp[1], tmp[2], tmp[3]); - tmp[0]=ULONG(ptr + 12* S390_WORD_SIZE); - tmp[1]=ULONG(ptr + 13* S390_WORD_SIZE); - tmp[2]=ULONG(ptr + 14* S390_WORD_SIZE); - tmp[3]=ULONG(ptr + 15* S390_WORD_SIZE); -- fprintf(fp," %#010x %#010x %#010x %#010x\n", -+ fprintf(fp," %#010lx %#010lx %#010lx %#010lx\n", - tmp[0], tmp[1], tmp[2], tmp[3]); - - fprintf(fp," -control registers:\n"); -@@ -850,26 +838,26 @@ - tmp[1]=ULONG(ptr + S390_WORD_SIZE); - tmp[2]=ULONG(ptr + 2 * S390_WORD_SIZE); - tmp[3]=ULONG(ptr + 3 * S390_WORD_SIZE); -- fprintf(fp," %#010x %#010x %#010x %#010x\n", -+ fprintf(fp," %#010lx %#010lx %#010lx %#010lx\n", - tmp[0], tmp[1], tmp[2], tmp[3]); - tmp[0]=ULONG(ptr + 4 * S390_WORD_SIZE); - tmp[1]=ULONG(ptr + 5 * S390_WORD_SIZE); - tmp[2]=ULONG(ptr + 6 * S390_WORD_SIZE); - tmp[3]=ULONG(ptr + 7 * S390_WORD_SIZE); -- fprintf(fp," %#010x %#010x %#010x %#010x\n", -+ fprintf(fp," %#010lx %#010lx %#010lx %#010lx\n", - tmp[0], tmp[1], tmp[2], tmp[3]); - - tmp[0]=ULONG(ptr); - tmp[1]=ULONG(ptr + S390_WORD_SIZE); - tmp[2]=ULONG(ptr + 2 * S390_WORD_SIZE); - tmp[3]=ULONG(ptr + 3 * S390_WORD_SIZE); -- fprintf(fp," %#010x %#010x %#010x %#010x\n", -+ fprintf(fp," %#010lx %#010lx %#010lx %#010lx\n", - tmp[0], tmp[1], tmp[2], tmp[3]); - tmp[0]=ULONG(ptr + 4 * S390_WORD_SIZE); - tmp[1]=ULONG(ptr + 5 * S390_WORD_SIZE); - tmp[2]=ULONG(ptr + 6 * S390_WORD_SIZE); - tmp[3]=ULONG(ptr + 7 * S390_WORD_SIZE); -- fprintf(fp," %#010x %#010x %#010x %#010x\n", -+ fprintf(fp," %#010lx %#010lx %#010lx %#010lx\n", - tmp[0], tmp[1], tmp[2], tmp[3]); - - ptr = lc + MEMBER_OFFSET("_lowcore","floating_pt_save_area"); -@@ -878,8 +866,8 @@ - tmp[1]=ULONG(ptr + 2 * S390_WORD_SIZE); - tmp[2]=ULONG(ptr + 4 * S390_WORD_SIZE); - tmp[3]=ULONG(ptr + 6 * S390_WORD_SIZE); -- fprintf(fp," %#018llx %#018llx\n", tmp[0], tmp[1]); -- fprintf(fp," %#018llx %#018llx\n", tmp[2], tmp[3]); -+ fprintf(fp," %#018lx %#018lx\n", tmp[0], tmp[1]); -+ fprintf(fp," %#018lx %#018lx\n", tmp[2], tmp[3]); - } - - /* ---- crash/s390x.c.orig 2007-08-27 15:02:36.000000000 -0400 -+++ crash/s390x.c 2007-04-25 08:58:52.000000000 -0400 -@@ -1,9 +1,9 @@ - /* s390.c - core analysis suite - * - * Copyright (C) 2001, 2002 Mission Critical Linux, Inc. -- * Copyright (C) 2002, 2003, 2004, 2005 David Anderson -- * Copyright (C) 2002, 2003, 2004, 2005 Red Hat, Inc. All rights reserved. -- * Copyright (C) 2005 Michael Holzheu, IBM Corporation -+ * Copyright (C) 2002, 2003, 2004, 2005, 2006 David Anderson -+ * Copyright (C) 2002, 2003, 2004, 2005, 2006 Red Hat, Inc. All rights reserved. -+ * Copyright (C) 2005, 2006 Michael Holzheu, IBM Corporation - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by -@@ -173,7 +173,8 @@ - machdep->nr_irqs = 0; /* TBD */ - machdep->vmalloc_start = s390x_vmalloc_start; - machdep->dump_irq = s390x_dump_irq; -- machdep->hz = HZ; -+ if (!machdep->hz) -+ machdep->hz = HZ; - break; - - case POST_INIT: -@@ -193,8 +194,6 @@ - fprintf(fp, " flags: %lx (", machdep->flags); - if (machdep->flags & KSYMS_START) - fprintf(fp, "%sKSYMS_START", others++ ? "|" : ""); -- if (machdep->flags & SYSRQ) -- fprintf(fp, "%sSYSRQ", others++ ? "|" : ""); - fprintf(fp, ")\n"); - - fprintf(fp, " kvbase: %lx\n", machdep->kvbase); -@@ -207,7 +206,8 @@ - fprintf(fp, " hz: %d\n", machdep->hz); - fprintf(fp, " mhz: %ld\n", machdep->mhz); - fprintf(fp, " memsize: %lld (0x%llx)\n", -- machdep->memsize, machdep->memsize); -+ (unsigned long long)machdep->memsize, -+ (unsigned long long)machdep->memsize); - fprintf(fp, " bits: %d\n", machdep->bits); - fprintf(fp, " nr_irqs: %d\n", machdep->nr_irqs); - fprintf(fp, " eframe_search: s390x_eframe_search()\n"); -@@ -245,19 +245,6 @@ - } - - /* -- * Check if address is in the vmalloc area -- */ --int --s390x_IS_VMALLOC_ADDR(ulong addr) --{ -- static unsigned long high_memory = 0; -- if(!high_memory){ -- high_memory = s390x_vmalloc_start(); -- } -- return (addr > high_memory); --} -- --/* - * Check if address is in context's address space - */ - static int -@@ -308,7 +295,7 @@ - /* - * Check if page is mapped - */ --inline int s390x_pte_present(unsigned long x){ -+static inline int s390x_pte_present(unsigned long x){ - if(THIS_KERNEL_VERSION >= LINUX(2,6,0)){ - return !((x) & S390X_PAGE_INVALID) || - ((x) & S390X_PAGE_INVALID_MASK) == S390X_PAGE_INVALID_NONE; -@@ -514,7 +501,7 @@ - return FALSE; - } - fprintf(fp,"PTE PHYSICAL FLAGS\n"); -- fprintf(fp,"%08x %08x",pte, pte & S390X_PAGE_BASE_MASK); -+ fprintf(fp,"%08lx %08llx",pte, pte & S390X_PAGE_BASE_MASK); - fprintf(fp," ("); - if(pte & S390X_PAGE_INVALID) - fprintf(fp,"INVALID "); -@@ -541,7 +528,7 @@ - /* - * returns cpu number of task - */ --int -+static int - s390x_cpu_of_task(unsigned long task) - { - unsigned int cpu; -@@ -583,12 +570,13 @@ - return FALSE; - } else { - /* Linux 2.6 */ -- unsigned long runqueue_addr, runqueue_offset, per_cpu_offset; -+ unsigned long runqueue_addr, runqueue_offset; - unsigned long cpu_offset, per_cpu_offset_addr, running_task; -- char runqueue[4096]; -+ char *runqueue; - int cpu; - - cpu = s390x_cpu_of_task(task); -+ runqueue = GETBUF(SIZE(runqueue)); - - runqueue_offset=symbol_value("per_cpu__runqueues"); - per_cpu_offset_addr=symbol_value("__per_cpu_offset"); -@@ -596,10 +584,10 @@ - &cpu_offset, sizeof(long),"per_cpu_offset", - FAULT_ON_ERROR); - runqueue_addr=runqueue_offset + cpu_offset; -- readmem(runqueue_addr,KVADDR,&runqueue,sizeof(runqueue), -+ readmem(runqueue_addr,KVADDR,runqueue,SIZE(runqueue), - "runqueue", FAULT_ON_ERROR); -- running_task = *((unsigned long*)&runqueue[MEMBER_OFFSET( -- "runqueue", "curr")]); -+ running_task = ULONG(runqueue + OFFSET(runqueue_curr)); -+ FREEBUF(runqueue); - if(running_task == task) - return TRUE; - else -@@ -733,7 +721,7 @@ - } else if(skip_first_frame){ - skip_first_frame=0; - } else { -- fprintf(fp," #%i [%08x] ",i,backchain); -+ fprintf(fp," #%i [%08lx] ",i,backchain); - fprintf(fp,"%s at %x\n", closest_symbol(r14), r14); - if (bt->flags & BT_LINE_NUMBERS) - s390x_dump_line_number(r14); -@@ -743,22 +731,25 @@ - backchain = ULONG(&stack[backchain - stack_base + bc_offset]); - - /* print stack content if -f is specified */ -- if((bt->flags & BT_FULL) && !BT_REFERENCE_CHECK(bt)){ -+ if ((bt->flags & BT_FULL) && !BT_REFERENCE_CHECK(bt)) { - int frame_size; -- if(backchain == 0){ -+ if (backchain == 0) { - frame_size = stack_base - old_backchain - + KERNEL_STACK_SIZE; - } else { -- frame_size = backchain - old_backchain; -+ frame_size = MIN((backchain - old_backchain), -+ (stack_base - old_backchain + -+ KERNEL_STACK_SIZE)); - } -- for(j=0; j< frame_size; j+=4){ -+ for (j = 0; j < frame_size; j += 8) { - if(j % 16 == 0){ -- fprintf(fp,"\n%08x: ",old_backchain+j); -+ fprintf(fp, "%s %016lx: ", -+ j ? "\n" : "", old_backchain + j); - } -- fprintf(fp," %08x",ULONG(&stack[old_backchain - -- stack_base + j])); -+ fprintf(fp," %016lx", -+ ULONG(&stack[old_backchain - stack_base + j])); - } -- fprintf(fp,"\n\n"); -+ fprintf(fp, "\n"); - } - - /* Check for interrupt stackframe */ -@@ -804,26 +795,26 @@ - return; - } - fprintf(fp," LOWCORE INFO:\n"); -- fprintf(fp," -psw : %#018x %#018x\n", tmp[0], tmp[1]); -+ fprintf(fp," -psw : %#018lx %#018lx\n", tmp[0], tmp[1]); - if(show_symbols){ -- fprintf(fp," -function : %s at %x\n", -+ fprintf(fp," -function : %s at %lx\n", - closest_symbol(tmp[1]), tmp[1]); - if (bt->flags & BT_LINE_NUMBERS) - s390x_dump_line_number(tmp[1]); - } - ptr = lc + MEMBER_OFFSET("_lowcore","prefixreg_save_area"); - tmp[0] = UINT(ptr); -- fprintf(fp," -prefix : %#010x\n", tmp[0]); -+ fprintf(fp," -prefix : %#010lx\n", tmp[0]); - - ptr = lc + MEMBER_OFFSET("_lowcore","cpu_timer_save_area"); - tmp[0]=UINT(ptr); - tmp[1]=UINT(ptr + S390X_WORD_SIZE); -- fprintf(fp," -cpu timer: %#010x %#010x\n", tmp[0],tmp[1]); -+ fprintf(fp," -cpu timer: %#010lx %#010lx\n", tmp[0],tmp[1]); - - ptr = lc + MEMBER_OFFSET("_lowcore","clock_comp_save_area"); - tmp[0]=UINT(ptr); - tmp[1]=UINT(ptr + S390X_WORD_SIZE); -- fprintf(fp," -clock cmp: %#010x %#010x\n", tmp[0], tmp[1]); -+ fprintf(fp," -clock cmp: %#010lx %#010lx\n", tmp[0], tmp[1]); - - fprintf(fp," -general registers:\n"); - ptr = lc + MEMBER_OFFSET("_lowcore","gpregs_save_area"); -@@ -831,26 +822,26 @@ - tmp[1]=ULONG(ptr + S390X_WORD_SIZE); - tmp[2]=ULONG(ptr + 2 * S390X_WORD_SIZE); - tmp[3]=ULONG(ptr + 3 * S390X_WORD_SIZE); -- fprintf(fp," %#018x %#018x\n", tmp[0],tmp[1]); -- fprintf(fp," %#018x %#018x\n", tmp[2],tmp[3]); -+ fprintf(fp," %#018lx %#018lx\n", tmp[0],tmp[1]); -+ fprintf(fp," %#018lx %#018lx\n", tmp[2],tmp[3]); - tmp[0]=ULONG(ptr + 4 * S390X_WORD_SIZE); - tmp[1]=ULONG(ptr + 5 * S390X_WORD_SIZE); - tmp[2]=ULONG(ptr + 6 * S390X_WORD_SIZE); - tmp[3]=ULONG(ptr + 7 * S390X_WORD_SIZE); -- fprintf(fp," %#018x %#018x\n", tmp[0],tmp[1]); -- fprintf(fp," %#018x %#018x\n", tmp[2],tmp[3]); -+ fprintf(fp," %#018lx %#018lx\n", tmp[0],tmp[1]); -+ fprintf(fp," %#018lx %#018lx\n", tmp[2],tmp[3]); - tmp[0]=ULONG(ptr + 8 * S390X_WORD_SIZE); - tmp[1]=ULONG(ptr + 9 * S390X_WORD_SIZE); - tmp[2]=ULONG(ptr + 10* S390X_WORD_SIZE); - tmp[3]=ULONG(ptr + 11* S390X_WORD_SIZE); -- fprintf(fp," %#018x %#018x\n", tmp[0],tmp[1]); -- fprintf(fp," %#018x %#018x\n", tmp[2],tmp[3]); -+ fprintf(fp," %#018lx %#018lx\n", tmp[0],tmp[1]); -+ fprintf(fp," %#018lx %#018lx\n", tmp[2],tmp[3]); - tmp[0]=ULONG(ptr + 12* S390X_WORD_SIZE); - tmp[1]=ULONG(ptr + 13* S390X_WORD_SIZE); - tmp[2]=ULONG(ptr + 14* S390X_WORD_SIZE); - tmp[3]=ULONG(ptr + 15* S390X_WORD_SIZE); -- fprintf(fp," %#018x %#018x\n", tmp[0],tmp[1]); -- fprintf(fp," %#018x %#018x\n", tmp[2],tmp[3]); -+ fprintf(fp," %#018lx %#018lx\n", tmp[0],tmp[1]); -+ fprintf(fp," %#018lx %#018lx\n", tmp[2],tmp[3]); - - fprintf(fp," -access registers:\n"); - ptr = lc + MEMBER_OFFSET("_lowcore","access_regs_save_area"); -@@ -858,25 +849,25 @@ - tmp[1]=ULONG(ptr + 4); - tmp[2]=ULONG(ptr + 2 * 4); - tmp[3]=ULONG(ptr + 3 * 4); -- fprintf(fp," %#010x %#010x %#010x %#010x\n", -+ fprintf(fp," %#010lx %#010lx %#010lx %#010lx\n", - tmp[0], tmp[1], tmp[2], tmp[3]); - tmp[0]=ULONG(ptr + 4 * 4); - tmp[1]=ULONG(ptr + 5 * 4); - tmp[2]=ULONG(ptr + 6 * 4); - tmp[3]=ULONG(ptr + 7 * 4); -- fprintf(fp," %#010x %#010x %#010x %#010x\n", -+ fprintf(fp," %#010lx %#010lx %#010lx %#010lx\n", - tmp[0], tmp[1], tmp[2], tmp[3]); - tmp[0]=ULONG(ptr + 8 * 4); - tmp[1]=ULONG(ptr + 9 * 4); - tmp[2]=ULONG(ptr + 10* 4); - tmp[3]=ULONG(ptr + 11* 4); -- fprintf(fp," %#010x %#010x %#010x %#010x\n", -+ fprintf(fp," %#010lx %#010lx %#010lx %#010lx\n", - tmp[0], tmp[1], tmp[2], tmp[3]); - tmp[0]=ULONG(ptr + 12* 4); - tmp[1]=ULONG(ptr + 13* 4); - tmp[2]=ULONG(ptr + 14* 4); - tmp[3]=ULONG(ptr + 15* 4); -- fprintf(fp," %#010x %#010x %#010x %#010x\n", -+ fprintf(fp," %#010lx %#010lx %#010lx %#010lx\n", - tmp[0], tmp[1], tmp[2], tmp[3]); - - fprintf(fp," -control registers:\n"); -@@ -885,26 +876,26 @@ - tmp[1]=ULONG(ptr + S390X_WORD_SIZE); - tmp[2]=ULONG(ptr + 2 * S390X_WORD_SIZE); - tmp[3]=ULONG(ptr + 3 * S390X_WORD_SIZE); -- fprintf(fp," %#018x %#018x\n", tmp[0],tmp[1]); -- fprintf(fp," %#018x %#018x\n", tmp[2],tmp[3]); -+ fprintf(fp," %#018lx %#018lx\n", tmp[0],tmp[1]); -+ fprintf(fp," %#018lx %#018lx\n", tmp[2],tmp[3]); - tmp[0]=ULONG(ptr + 4 * S390X_WORD_SIZE); - tmp[1]=ULONG(ptr + 5 * S390X_WORD_SIZE); - tmp[2]=ULONG(ptr + 6 * S390X_WORD_SIZE); - tmp[3]=ULONG(ptr + 7 * S390X_WORD_SIZE); -- fprintf(fp," %#018x %#018x\n", tmp[0],tmp[1]); -- fprintf(fp," %#018x %#018x\n", tmp[2],tmp[3]); -+ fprintf(fp," %#018lx %#018lx\n", tmp[0],tmp[1]); -+ fprintf(fp," %#018lx %#018lx\n", tmp[2],tmp[3]); - tmp[0]=ULONG(ptr); - tmp[1]=ULONG(ptr + S390X_WORD_SIZE); - tmp[2]=ULONG(ptr + 2 * S390X_WORD_SIZE); - tmp[3]=ULONG(ptr + 3 * S390X_WORD_SIZE); -- fprintf(fp," %#018x %#018x\n", tmp[0],tmp[1]); -- fprintf(fp," %#018x %#018x\n", tmp[2],tmp[3]); -+ fprintf(fp," %#018lx %#018lx\n", tmp[0],tmp[1]); -+ fprintf(fp," %#018lx %#018lx\n", tmp[2],tmp[3]); - tmp[0]=ULONG(ptr + 4 * S390X_WORD_SIZE); - tmp[1]=ULONG(ptr + 5 * S390X_WORD_SIZE); - tmp[2]=ULONG(ptr + 6 * S390X_WORD_SIZE); - tmp[3]=ULONG(ptr + 7 * S390X_WORD_SIZE); -- fprintf(fp," %#018x %#018x\n", tmp[0],tmp[1]); -- fprintf(fp," %#018x %#018x\n", tmp[2],tmp[3]); -+ fprintf(fp," %#018lx %#018lx\n", tmp[0],tmp[1]); -+ fprintf(fp," %#018lx %#018lx\n", tmp[2],tmp[3]); - - ptr = lc + MEMBER_OFFSET("_lowcore","floating_pt_save_area"); - fprintf(fp," -floating point registers 0,2,4,6:\n"); -@@ -912,26 +903,26 @@ - tmp[1]=ULONG(ptr + S390X_WORD_SIZE); - tmp[2]=ULONG(ptr + 2 * S390X_WORD_SIZE); - tmp[3]=ULONG(ptr + 3 * S390X_WORD_SIZE); -- fprintf(fp," %#018x %#018x\n", tmp[0],tmp[1]); -- fprintf(fp," %#018x %#018x\n", tmp[2],tmp[3]); -+ fprintf(fp," %#018lx %#018lx\n", tmp[0],tmp[1]); -+ fprintf(fp," %#018lx %#018lx\n", tmp[2],tmp[3]); - tmp[0]=ULONG(ptr + 4 * S390X_WORD_SIZE); - tmp[1]=ULONG(ptr + 5 * S390X_WORD_SIZE); - tmp[2]=ULONG(ptr + 6 * S390X_WORD_SIZE); - tmp[3]=ULONG(ptr + 7 * S390X_WORD_SIZE); -- fprintf(fp," %#018x %#018x\n", tmp[0],tmp[1]); -- fprintf(fp," %#018x %#018x\n", tmp[2],tmp[3]); -+ fprintf(fp," %#018lx %#018lx\n", tmp[0],tmp[1]); -+ fprintf(fp," %#018lx %#018lx\n", tmp[2],tmp[3]); - tmp[0]=ULONG(ptr + 6 * S390X_WORD_SIZE); - tmp[1]=ULONG(ptr + 7 * S390X_WORD_SIZE); - tmp[2]=ULONG(ptr + 8 * S390X_WORD_SIZE); - tmp[3]=ULONG(ptr + 9 * S390X_WORD_SIZE); -- fprintf(fp," %#018x %#018x\n", tmp[0],tmp[1]); -- fprintf(fp," %#018x %#018x\n", tmp[2],tmp[3]); -+ fprintf(fp," %#018lx %#018lx\n", tmp[0],tmp[1]); -+ fprintf(fp," %#018lx %#018lx\n", tmp[2],tmp[3]); - tmp[0]=ULONG(ptr + 10* S390X_WORD_SIZE); - tmp[1]=ULONG(ptr + 11* S390X_WORD_SIZE); - tmp[2]=ULONG(ptr + 12* S390X_WORD_SIZE); - tmp[3]=ULONG(ptr + 13* S390X_WORD_SIZE); -- fprintf(fp," %#018x %#018x\n", tmp[0],tmp[1]); -- fprintf(fp," %#018x %#018x\n", tmp[2],tmp[3]); -+ fprintf(fp," %#018lx %#018lx\n", tmp[0],tmp[1]); -+ fprintf(fp," %#018lx %#018lx\n", tmp[2],tmp[3]); - } - - /* ---- crash/s390dbf.c.orig 2007-08-27 15:02:36.000000000 -0400 -+++ crash/s390dbf.c 2006-08-14 13:58:56.000000000 -0400 -@@ -0,0 +1,1340 @@ -+/* -+ * s390 debug feature command for crash -+ * -+ * Copyright (C) IBM Corp. 2006 -+ * Author(s): Michael Holzheu -+ */ -+ -+#if defined(S390) || defined(S390X) -+ -+#include "defs.h" -+#include -+#include -+ -+/* -+ * Compat layer to integrate lcrash commands into crash -+ * Maps lcrash API to crash functions -+ */ -+ -+#define KL_NBPW sizeof(long) -+#define KL_ERRORFP stderr -+#define MAX_ARGS 128 -+#define MAX_CMDLINE 256 -+ -+#define C_FALSE 0x00000001 /* Command takes no arguments */ -+#define C_TRUE 0x00000002 /* Command requires arguments */ -+#define C_ALL 0x00000004 /* All elements */ -+#define C_PERM 0x00000008 /* Allocate perminant blocks */ -+#define C_TEMP 0x00000000 /* For completeness */ -+#define C_FULL 0x00000010 /* Full output */ -+#define C_LIST 0x00000020 /* List items */ -+#define C_NEXT 0x00000040 /* Follow links */ -+#define C_WRITE 0x00000080 /* Write output to file */ -+#define C_NO_OPCHECK 0x00000100 /* Don't reject bad cmd line options */ -+#define C_ITER 0x00000200 /* set iteration threshold */ -+ -+#define C_LFLG_SHFT 12 -+ -+#define KL_ARCH_S390 0 -+#define KL_ARCH_S390X 1 -+#ifdef __s390x__ -+#define KL_ARCH KL_ARCH_S390X -+#define FMTPTR "l" -+#define KL_PTRSZ 8 -+#else -+#define KL_ARCH KL_ARCH_S390 -+#define FMTPTR "ll" -+#define KL_PTRSZ 4 -+#endif -+ -+typedef unsigned long uaddr_t; -+typedef unsigned long kaddr_t; -+ -+typedef struct _syment { -+ char *s_name; -+ kaddr_t s_addr; -+} syment_t; -+ -+typedef struct option_s { -+ struct option_s *op_next; -+ char op_char; -+ char *op_arg; -+} option_t; -+ -+typedef struct command_s { -+ int flags; -+ char cmdstr[MAX_CMDLINE]; -+ char *command; -+ char *cmdline; -+ option_t *options; -+ int nargs; -+ char *args[MAX_ARGS]; -+ char *pipe_cmd; -+ FILE *ofp; -+ FILE *efp; -+} command_t; -+ -+static inline syment_t* kl_lkup_symaddr(kaddr_t addr) -+{ -+ static syment_t sym; -+ struct syment *crash_sym; -+ -+ crash_sym = value_search(addr, &sym.s_addr); -+ if (!crash_sym) -+ return NULL; -+ sym.s_name = crash_sym->name; -+ return &sym; -+} -+ -+static inline syment_t* kl_lkup_symname(char* name) -+{ -+ static syment_t sym; -+ sym.s_addr = symbol_value(name); -+ sym.s_name = NULL; -+ if(!sym.s_addr) -+ return NULL; -+ else -+ return &sym; -+} -+ -+static inline void GET_BLOCK(kaddr_t addr, int size, void* ptr) -+{ -+ readmem(addr, KVADDR,ptr,size,"GET_BLOCK",FAULT_ON_ERROR); -+} -+ -+static inline kaddr_t KL_VREAD_PTR(kaddr_t addr) -+{ -+ unsigned long ptr; -+ readmem(addr, KVADDR,&ptr,sizeof(ptr),"GET_BLOCK",FAULT_ON_ERROR); -+ return (kaddr_t)ptr; -+} -+ -+static inline uint32_t KL_GET_UINT32(void* ptr) -+{ -+ return *((uint32_t*)ptr); -+} -+ -+static inline uint64_t KL_GET_UINT64(void* ptr) -+{ -+ return *((uint64_t*)ptr); -+} -+ -+static inline kaddr_t KL_GET_PTR(void* ptr) -+{ -+ return *((kaddr_t*)ptr); -+} -+ -+static inline void* K_PTR(void* addr, char* struct_name, char* member_name) -+{ -+ return addr+MEMBER_OFFSET(struct_name,member_name); -+} -+ -+static inline uint32_t KL_UINT(void* ptr, char* struct_name, char* member_name) -+{ -+ return (uint32_t) ULONG(ptr+MEMBER_OFFSET(struct_name,member_name)); -+} -+ -+static inline uint32_t KL_VREAD_UINT32(kaddr_t addr) -+{ -+ uint32_t rc; -+ readmem(addr, KVADDR,&rc,sizeof(rc),"KL_VREAD_UINT32",FAULT_ON_ERROR); -+ return rc; -+} -+ -+static inline uint32_t KL_INT(void* ptr, char* struct_name, char* member_name) -+{ -+ return UINT(ptr+MEMBER_OFFSET(struct_name,member_name)); -+} -+ -+static inline int set_cmd_flags(command_t *cmd, int flags, char *extraops) -+{ -+ return 0; -+} -+ -+static inline void kl_s390tod_to_timeval(uint64_t todval, struct timeval *xtime) -+{ -+ todval -= 0x8126d60e46000000LL - (0x3c26700LL * 1000000 * 4096); -+ -+ todval >>= 12; -+ xtime->tv_sec = todval / 1000000; -+ xtime->tv_usec = todval % 1000000; -+} -+ -+static inline int kl_struct_len(char* struct_name) -+{ -+ return STRUCT_SIZE(struct_name); -+} -+ -+static inline kaddr_t kl_funcaddr(kaddr_t addr) -+{ -+ struct syment *crash_sym; -+ -+ crash_sym = value_search(addr, &addr); -+ if (!crash_sym) -+ return -1; -+ else -+ return crash_sym->value; -+} -+ -+#define CMD_USAGE(cmd, s) \ -+ fprintf(cmd->ofp, "Usage: %s %s\n", cmd->command, s); \ -+ fprintf(cmd->ofp, "Enter \"help %s\" for details.\n",cmd->command); -+ -+/* -+ * s390 debug feature implementation -+ */ -+ -+#ifdef DBF_DYNAMIC_VIEWS /* views defined in shared libs */ -+#include -+#endif -+ -+/* Local flags -+ */ -+ -+#define LOAD_FLAG (1 << C_LFLG_SHFT) -+#define VIEWS_FLAG (2 << C_LFLG_SHFT) -+ -+#ifndef MIN -+#define MIN(a,b) (((a)<(b))?(a):(b)) -+#endif -+ -+/* Stuff which has to match with include/asm-s390/debug.h */ -+ -+#define DBF_VERSION_V1 1 -+#define DBF_VERSION_V2 2 -+#define PAGE_SIZE 4096 -+#define DEBUG_MAX_VIEWS 10 /* max number of views in proc fs */ -+#define DEBUG_MAX_PROCF_LEN 16 /* max length for a proc file name */ -+#define DEBUG_SPRINTF_MAX_ARGS 10 -+ -+/* define debug-structures for lcrash */ -+#define DEBUG_DATA(entry) (char*)(entry + 1) -+ -+typedef struct debug_view_s debug_view_t; -+ -+/* struct to hold contents of struct __debug_entry from dump -+ */ -+typedef struct debug_entry_s{ -+ union { -+ struct { -+ unsigned long long clock:52; -+ unsigned long long exception:1; -+ unsigned long long level:3; -+ unsigned long long cpuid:8; -+ } fields; -+ -+ unsigned long long stck; -+ } id; -+ kaddr_t caller; /* changed from void* to kaddr_t */ -+} __attribute__((packed)) debug_entry_t; -+/* typedef struct __debug_entry debug_entry_t; */ -+ -+ -+static unsigned int dbf_version; -+ -+/* struct is used to manage contents of structs debug_info from dump -+ * in lcrash -+ */ -+typedef struct debug_info_s { -+ struct debug_info_s *next; -+ struct debug_info_s *prev; -+ kaddr_t next_dbi; /* store next ptr of struct in dump */ -+ kaddr_t prev_dbi; /* store prev ptr of struct in dump */ -+ int level; -+ int nr_areas; -+ int page_order; -+ int buf_size; -+ int entry_size; -+ void **areas; /* contents of debug areas from dump */ -+ int active_area; -+ int *active_entry; /* change to uint32_t ? */ -+ debug_view_t *views[DEBUG_MAX_VIEWS]; -+ char name[DEBUG_MAX_PROCF_LEN]; -+ kaddr_t addr; -+ int pages_per_area_v2; -+ void ***areas_v2; -+} debug_info_t; -+ -+ -+/* functions to generate dbf output -+ */ -+typedef int (debug_header_proc_t) (debug_info_t* id, debug_view_t* view, -+ int area, debug_entry_t* entry, -+ char* out_buf); -+typedef int (debug_format_proc_t) (debug_info_t* id, debug_view_t* view, -+ char* out_buf, const char* in_buf); -+typedef int (debug_prolog_proc_t) (debug_info_t* id, debug_view_t* view, -+ char* out_buf); -+ -+struct debug_view_s { -+ char name[DEBUG_MAX_PROCF_LEN]; -+ debug_prolog_proc_t* prolog_proc; -+ debug_header_proc_t* header_proc; -+ debug_format_proc_t* format_proc; -+ void* private_data; -+}; -+ -+#define LCRASH_DB_VIEWS 1000 -+ -+static debug_info_t *debug_area_first = NULL; -+static debug_info_t *debug_area_last = NULL; -+static debug_view_t *debug_views[LCRASH_DB_VIEWS]; -+static int initialized = 0; -+static iconv_t ebcdic_ascii_conv = 0; -+ -+void s390dbf_usage(command_t * cmd); -+static int add_lcrash_debug_view(debug_view_t *); -+static int dbe_size = 0; -+ -+static void -+EBCASC(char *inout, size_t len) -+{ -+ iconv(ebcdic_ascii_conv, &inout, &len, &inout, &len); -+} -+ -+/* -+ * prints header for debug entry -+ */ -+static int -+dflt_header_fn(debug_info_t * id, debug_view_t *view, -+ int area, debug_entry_t * entry, char *out_buf) -+{ -+ struct timeval time_val; -+ unsigned long long time; -+ char *except_str; -+ kaddr_t caller; -+ int rc = 0; -+ char *caller_name; -+ int offset; -+ char caller_buf[30]; -+ unsigned int level; -+ syment_t *caller_sym; -+ debug_entry_t lentry; /* store byte swapped values of entry */ -+ -+ lentry.id.stck = KL_GET_UINT64(&entry->id); -+ lentry.caller = KL_GET_PTR(&entry->caller); -+ level = lentry.id.fields.level; -+ time = lentry.id.stck; -+ -+ kl_s390tod_to_timeval(time, &time_val); -+ -+ if (lentry.id.fields.exception) -+ except_str = "*"; -+ else -+ except_str = "-"; -+ caller = lentry.caller; -+ if(KL_ARCH == KL_ARCH_S390){ -+ caller &= 0x7fffffff; -+ } -+ caller_sym = kl_lkup_symaddr(caller); -+ if(caller_sym){ -+ caller_name = caller_sym->s_name; -+ offset = caller - kl_funcaddr(caller); -+ } -+ else { -+ sprintf(caller_buf, "%llx", (unsigned long long)caller); -+ caller_name = caller_buf; -+ offset = 0; -+ } -+ -+ if(KL_ARCH == KL_ARCH_S390X){ -+ rc += sprintf(out_buf, -+ "%02i %011lu:%06lu %1u %1s %02i <%20s+%04i> ", -+ area, time_val.tv_sec, time_val.tv_usec, level, -+ except_str, entry->id.fields.cpuid, caller_name, -+ offset); -+ } else { -+ rc += sprintf(out_buf, -+ "%02i %011lu:%06lu %1u %1s %02i <%-20s+%04i> ", -+ area, time_val.tv_sec, time_val.tv_usec, level, -+ except_str, lentry.id.fields.cpuid, caller_name, -+ offset); -+ } -+ return rc; -+} -+ -+/* -+ * prints debug header in raw format -+ */ -+static int -+raw_header_fn(debug_info_t * id, debug_view_t *view, -+ int area, debug_entry_t * entry, char *out_buf) -+{ -+ int rc; -+ -+ rc = sizeof(debug_entry_t); -+ if (out_buf == NULL) -+ goto out; -+ memcpy(out_buf,entry,sizeof(debug_entry_t)); -+ out: -+ return rc; -+} -+ -+/* -+ * prints debug data in raw format -+ */ -+static int -+raw_format_fn(debug_info_t * id, debug_view_t *view, -+ char *out_buf, const char *in_buf) -+{ -+ int rc; -+ -+ rc = id->buf_size; -+ if (out_buf == NULL || in_buf == NULL) -+ goto out; -+ memcpy(out_buf, in_buf, id->buf_size); -+ out: -+ return rc; -+} -+ -+/* -+ * prints debug data in hex/ascii format -+ */ -+static int -+hex_ascii_format_fn(debug_info_t * id, debug_view_t *view, -+ char *out_buf, const char *in_buf) -+{ -+ int i, rc = 0; -+ -+ if (out_buf == NULL || in_buf == NULL) { -+ rc = id->buf_size * 4 + 3; -+ goto out; -+ } -+ for (i = 0; i < id->buf_size; i++) { -+ rc += sprintf(out_buf + rc, "%02x ", -+ ((unsigned char *) in_buf)[i]); -+ } -+ rc += sprintf(out_buf + rc, "| "); -+ for (i = 0; i < id->buf_size; i++) { -+ unsigned char c = in_buf[i]; -+ if (!isprint(c)) -+ rc += sprintf(out_buf + rc, "."); -+ else -+ rc += sprintf(out_buf + rc, "%c", c); -+ } -+ rc += sprintf(out_buf + rc, "\n"); -+ out: -+ return rc; -+} -+ -+/* -+ * prints debug data in sprintf format -+ */ -+static int -+sprintf_format_fn(debug_info_t * id, debug_view_t *view, -+ char *out_buf, const char *in_buf) -+{ -+#define _BUFSIZE 1024 -+ char buf[_BUFSIZE]; -+ int i, k, rc = 0, num_longs = 0, num_used_args = 0, num_strings = 0; -+ /* use kaddr_t to store long values of 32bit and 64bit archs here */ -+ kaddr_t inbuf_cpy[DEBUG_SPRINTF_MAX_ARGS]; -+ /* store ptrs to strings to be deallocated at end of this function */ -+ uaddr_t to_dealloc[DEBUG_SPRINTF_MAX_ARGS]; -+ kaddr_t addr; -+ -+ memset(buf, 0, sizeof(buf)); -+ memset(inbuf_cpy, 0, sizeof(inbuf_cpy)); -+ memset(to_dealloc, 0, sizeof(to_dealloc)); -+ -+ if (out_buf == NULL || in_buf == NULL) { -+ rc = id->buf_size * 4 + 3; -+ goto out; -+ } -+ -+ /* get the format string into buf */ -+ addr = KL_GET_PTR((void*)in_buf); -+ GET_BLOCK(addr, _BUFSIZE, buf); -+ -+ k = 0; -+ for (i = 0; buf[i] && (buf[i] != '\n'); i++) { -+ if (buf[i] != '%') -+ continue; -+ if (k == DEBUG_SPRINTF_MAX_ARGS) { -+ fprintf(KL_ERRORFP, -+ "\nToo much parameters in sprinf view (%i)\n" -+ ,k + 1); -+ fprintf(KL_ERRORFP, "Format String: %s)\n", buf); -+ break; -+ } -+ /* for sprintf we have only unsigned long values ... */ -+ if (buf[i+1] != 's'){ -+ /* we use KL_GET_PTR here to read ulong value */ -+ addr = KL_GET_PTR((void*) in_buf + ((k + 1)* KL_NBPW)); -+ inbuf_cpy[k] = addr; -+ } else { /* ... or ptrs to strings in debug areas */ -+ inbuf_cpy[k] = (uaddr_t) malloc(_BUFSIZE); -+ to_dealloc[num_strings++] = inbuf_cpy[k]; -+ addr = KL_GET_PTR((void*) in_buf + ((k + 1)* KL_NBPW)); -+ GET_BLOCK(addr, _BUFSIZE, -+ (void*)(uaddr_t)(inbuf_cpy[k])); -+ } -+ k++; -+ } -+ -+ /* count of longs fit into one entry */ -+ num_longs = id->buf_size / KL_NBPW; /* sizeof(long); */ -+ if(num_longs < 1) /* bufsize of entry too small */ -+ goto out; -+ if(num_longs == 1) { /* no args, just print the format string */ -+ rc = sprintf(out_buf + rc, "%s", buf); -+ goto out; -+ } -+ -+ /* number of arguments used for sprintf (without the format string) */ -+ num_used_args = MIN(DEBUG_SPRINTF_MAX_ARGS, (num_longs - 1)); -+ -+ rc = sprintf(out_buf + rc, buf, (uaddr_t)(inbuf_cpy[0]), -+ (uaddr_t)(inbuf_cpy[1]), (uaddr_t)(inbuf_cpy[2]), -+ (uaddr_t)(inbuf_cpy[3]), (uaddr_t)(inbuf_cpy[4]), -+ (uaddr_t)(inbuf_cpy[5]), (uaddr_t)(inbuf_cpy[6]), -+ (uaddr_t)(inbuf_cpy[7]), (uaddr_t)(inbuf_cpy[8]), -+ (uaddr_t)(inbuf_cpy[9])); -+ out: -+ while (num_strings--){ -+ free((char*)(to_dealloc[num_strings])); -+ } -+ return rc; -+} -+ -+ -+/*********************************** -+ * functions for debug-views -+ ***********************************/ -+ -+/* -+ * prints out actual debug level -+ */ -+static int -+prolog_level_fn(debug_info_t * id, -+ debug_view_t *view, char *out_buf) -+{ -+ int rc = 0; -+ -+ if (out_buf == NULL) { -+ rc = 2; -+ goto out; -+ } -+ rc = sprintf(out_buf, "%i\n", id->level); -+ out: -+ return rc; -+} -+ -+/* -+ * prints out actual pages_per_area -+ */ -+static int -+prolog_pages_fn(debug_info_t * id, -+ debug_view_t *view, char *out_buf) -+{ -+ int rc = 0; -+ -+ if (out_buf == NULL) { -+ rc = 2; -+ goto out; -+ } -+ rc = sprintf(out_buf, "%i\n", id->pages_per_area_v2); -+ out: -+ return rc; -+} -+ -+/* -+ * prints out prolog -+ */ -+static int -+prolog_fn(debug_info_t * id, -+ debug_view_t *view, char *out_buf) -+{ -+ int rc = 0; -+ -+ rc = sprintf(out_buf, "AREA TIME LEVEL EXCEPTION CP CALLING FUNCTION" -+ " + OFFSET DATA\n===================================" -+ "=======================================\n"); -+ return rc; -+} -+ -+/* -+ * prints debug data in hex format -+ */ -+static int -+hex_format_fn(debug_info_t * id, debug_view_t *view, -+ char *out_buf, const char *in_buf) -+{ -+ int i, rc = 0; -+ -+ for (i = 0; i < id->buf_size; i++) { -+ rc += sprintf(out_buf + rc, "%02x ", -+ ((unsigned char *) in_buf)[i]); -+ } -+ rc += sprintf(out_buf + rc, "\n"); -+ return rc; -+} -+ -+/* -+ * prints debug data in ascii format -+ */ -+static int -+ascii_format_fn(debug_info_t * id, debug_view_t *view, -+ char *out_buf, const char *in_buf) -+{ -+ int i, rc = 0; -+ -+ if (out_buf == NULL || in_buf == NULL) { -+ rc = id->buf_size + 1; -+ goto out; -+ } -+ for (i = 0; i < id->buf_size; i++) { -+ unsigned char c = in_buf[i]; -+ if (!isprint(c)) -+ rc += sprintf(out_buf + rc, "."); -+ else -+ rc += sprintf(out_buf + rc, "%c", c); -+ } -+ rc += sprintf(out_buf + rc, "\n"); -+ out: -+ return rc; -+} -+ -+/* -+ * prints debug data in ebcdic format -+ */ -+static int -+ebcdic_format_fn(debug_info_t * id, debug_view_t *view, -+ char *out_buf, const char *in_buf) -+{ -+ int i, rc = 0; -+ -+ if (out_buf == NULL || in_buf == NULL) { -+ rc = id->buf_size + 1; -+ goto out; -+ } -+ for (i = 0; i < id->buf_size; i++) { -+ char c = in_buf[i]; -+ EBCASC(&c, 1); -+ if (!isprint(c)) -+ rc += sprintf(out_buf + rc, "."); -+ else -+ rc += sprintf(out_buf + rc, "%c", c); -+ } -+ rc += sprintf(out_buf + rc, "\n"); -+ out: -+ return rc; -+} -+ -+debug_view_t ascii_view = { -+ "ascii", -+ &prolog_fn, -+ &dflt_header_fn, -+ &ascii_format_fn, -+}; -+ -+debug_view_t ebcdic_view = { -+ "ebcdic", -+ &prolog_fn, -+ &dflt_header_fn, -+ &ebcdic_format_fn, -+}; -+ -+debug_view_t hex_view = { -+ "hex", -+ &prolog_fn, -+ &dflt_header_fn, -+ &hex_format_fn, -+}; -+ -+debug_view_t level_view = { -+ "level", -+ &prolog_level_fn, -+ NULL, -+ NULL, -+}; -+ -+debug_view_t pages_view = { -+ "pages", -+ &prolog_pages_fn, -+ NULL, -+ NULL, -+}; -+ -+debug_view_t raw_view = { -+ "raw", -+ NULL, -+ &raw_header_fn, -+ &raw_format_fn, -+}; -+ -+debug_view_t hex_ascii_view = { -+ "hex_ascii", -+ &prolog_fn, -+ &dflt_header_fn, -+ &hex_ascii_format_fn, -+}; -+ -+debug_view_t sprintf_view = { -+ "sprintf", -+ &prolog_fn, -+ &dflt_header_fn, -+ &sprintf_format_fn, -+}; -+ -+ -+static debug_entry_t * -+debug_find_oldest_entry(debug_entry_t *entries, int num, int entry_size) -+{ -+ debug_entry_t *result, *current; -+ int i; -+ uint64_t clock1, clock2; -+ -+ result = entries; -+ current = entries; -+ for (i=0; i < num; i++) { -+ if (current->id.stck == 0) -+ break; -+ clock1 = current->id.fields.clock; -+ clock2 = result->id.fields.clock; -+ clock1 = KL_GET_UINT64(&clock1); -+ clock2 = KL_GET_UINT64(&clock2); -+ if (clock1 < clock2) -+ result = current; -+ current = (debug_entry_t *) ((char *) current + entry_size); -+ } -+ return result; -+} -+ -+ -+/* -+ * debug_format_output: -+ * - calls prolog, header and format functions of view to format output -+ */ -+static int -+debug_format_output_v1(debug_info_t * debug_area, debug_view_t *view, -+ FILE * ofp) -+{ -+ int i, j, len; -+ int nr_of_entries; -+ debug_entry_t *act_entry, *last_entry; -+ char *act_entry_data; -+ char buf[2048]; -+ -+ /* print prolog */ -+ if (view->prolog_proc) { -+ len = view->prolog_proc(debug_area, view, buf); -+ fwrite(buf,len, 1, ofp); -+ memset(buf, 0, 2048); -+ } -+ /* print debug records */ -+ if (!(view->format_proc) && !(view->header_proc)) -+ goto out; -+ if(debug_area->entry_size <= 0){ -+ fprintf(ofp, "Invalid entry_size: %i\n",debug_area->entry_size); -+ goto out; -+ } -+ nr_of_entries = (PAGE_SIZE << debug_area->page_order) / debug_area->entry_size; -+ for (i = 0; i < debug_area->nr_areas; i++) { -+ act_entry = debug_find_oldest_entry(debug_area->areas[i], -+ nr_of_entries, -+ debug_area->entry_size); -+ last_entry = (debug_entry_t *) ((char *) debug_area->areas[i] + -+ (PAGE_SIZE << debug_area->page_order) - -+ debug_area->entry_size); -+ for (j = 0; j < nr_of_entries; j++) { -+ act_entry_data = (char*)act_entry + dbe_size; -+ if (act_entry->id.stck == 0) -+ break; /* empty entry */ -+ if (view->header_proc) { -+ len = view->header_proc(debug_area, view, i, -+ act_entry, buf); -+ fwrite(buf,len, 1, ofp); -+ memset(buf, 0, 2048); -+ } -+ if (view->format_proc) { -+ len = view->format_proc(debug_area, view, -+ buf, act_entry_data); -+ fwrite(buf,len, 1, ofp); -+ memset(buf, 0, 2048); -+ } -+ act_entry = -+ (debug_entry_t *) (((char *) act_entry) + -+ debug_area->entry_size); -+ if (act_entry > last_entry) -+ act_entry = debug_area->areas[i]; -+ } -+ } -+ out: -+ return 1; -+} -+ -+/* -+ * debug_format_output_v2: -+ * - calls prolog, header and format functions of view to format output -+ */ -+static int -+debug_format_output_v2(debug_info_t * debug_area, -+ debug_view_t *view, FILE * ofp) -+{ -+ int i, j, k, len; -+ debug_entry_t *act_entry; -+ char *act_entry_data; -+ char buf[2048]; -+ -+ /* print prolog */ -+ if (view->prolog_proc) { -+ len = view->prolog_proc(debug_area, view, buf); -+ fwrite(buf,len, 1, ofp); -+ memset(buf, 0, 2048); -+ } -+ /* print debug records */ -+ if (!(view->format_proc) && !(view->header_proc)) -+ goto out; -+ if(debug_area->entry_size <= 0){ -+ fprintf(ofp, "Invalid entry_size: %i\n",debug_area->entry_size); -+ goto out; -+ } -+ for (i = 0; i < debug_area->nr_areas; i++) { -+ int nr_entries_per_page = PAGE_SIZE/debug_area->entry_size; -+ for (j = 0; j < debug_area->pages_per_area_v2; j++) { -+ act_entry = debug_area->areas_v2[i][j]; -+ for (k = 0; k < nr_entries_per_page; k++) { -+ act_entry_data = (char*)act_entry + dbe_size; -+ if (act_entry->id.stck == 0) -+ break; /* empty entry */ -+ if (view->header_proc) { -+ len = view->header_proc(debug_area, -+ view, i, act_entry, buf); -+ fwrite(buf,len, 1, ofp); -+ memset(buf, 0, 2048); -+ } -+ if (view->format_proc) { -+ len = view->format_proc(debug_area, -+ view, buf, act_entry_data); -+ fwrite(buf,len, 1, ofp); -+ memset(buf, 0, 2048); -+ } -+ act_entry = (debug_entry_t *) (((char *) -+ act_entry) + debug_area->entry_size); -+ } -+ } -+ } -+out: -+ return 1; -+} -+ -+static debug_info_t * -+find_debug_area(const char *area_name) -+{ -+ debug_info_t* act_debug_info = debug_area_first; -+ while(act_debug_info != NULL){ -+ if (strcmp(act_debug_info->name, area_name) == 0) -+ return act_debug_info; -+ act_debug_info = act_debug_info->next; -+ } -+ return NULL; -+} -+ -+static void -+dbf_init(void) -+{ -+ if (!initialized) { -+ if(dbf_version >= DBF_VERSION_V2) -+ add_lcrash_debug_view(&pages_view); -+ add_lcrash_debug_view(&ascii_view); -+ add_lcrash_debug_view(&level_view); -+ add_lcrash_debug_view(&ebcdic_view); -+ add_lcrash_debug_view(&hex_view); -+ add_lcrash_debug_view(&hex_ascii_view); -+ add_lcrash_debug_view(&sprintf_view); -+ add_lcrash_debug_view(&raw_view); -+ ebcdic_ascii_conv = iconv_open("ISO-8859-1", "EBCDIC-US"); -+ initialized = 1; -+ } -+} -+ -+static debug_view_t* -+get_debug_view(kaddr_t addr) -+{ -+ void* k_debug_view; -+ int k_debug_view_size; -+ debug_view_t* rc; -+ -+ rc = (debug_view_t*)malloc(sizeof(debug_view_t)); -+ memset(rc, 0, sizeof(debug_view_t)); -+ -+ k_debug_view_size = kl_struct_len("debug_view"); -+ k_debug_view = malloc(k_debug_view_size); -+ GET_BLOCK(addr, k_debug_view_size, k_debug_view); -+ strncpy(rc->name,K_PTR(k_debug_view,"debug_view","name"), -+ DEBUG_MAX_PROCF_LEN); -+ -+ free(k_debug_view); -+ return rc; -+} -+ -+static void -+free_debug_view(debug_view_t* view) -+{ -+ if(view) -+ free(view); -+} -+ -+static void -+debug_get_areas_v1(debug_info_t* db_info, void* k_dbi) -+{ -+ kaddr_t mem_pos; -+ kaddr_t dbe_addr; -+ int area_size, i; -+ -+ /* get areas */ -+ /* place to hold ptrs to debug areas in lcrash */ -+ area_size = PAGE_SIZE << db_info->page_order; -+ db_info->areas = (void**)malloc(db_info->nr_areas * sizeof(void *)); -+ memset(db_info->areas, 0, db_info->nr_areas * sizeof(void *)); -+ mem_pos = (kaddr_t) KL_UINT(k_dbi,"debug_info","areas"); -+ for (i = 0; i < db_info->nr_areas; i++) { -+ dbe_addr = KL_VREAD_PTR(mem_pos); -+ db_info->areas[i] = (debug_entry_t *) malloc(area_size); -+ /* read raw data for debug area */ -+ GET_BLOCK(dbe_addr, area_size, db_info->areas[i]); -+ mem_pos += KL_NBPW; -+ } -+} -+ -+static void -+debug_get_areas_v2(debug_info_t* db_info, void* k_dbi) -+{ -+ kaddr_t area_ptr; -+ kaddr_t page_array_ptr; -+ kaddr_t page_ptr; -+ int i,j; -+ db_info->areas_v2=(void***)malloc(db_info->nr_areas * sizeof(void **)); -+ area_ptr = (kaddr_t) KL_UINT(k_dbi,"debug_info","areas"); -+ for (i = 0; i < db_info->nr_areas; i++) { -+ db_info->areas_v2[i] = (void**)malloc(db_info->pages_per_area_v2 -+ * sizeof(void*)); -+ page_array_ptr = KL_VREAD_PTR(area_ptr); -+ for(j=0; j < db_info->pages_per_area_v2; j++) { -+ page_ptr = KL_VREAD_PTR(page_array_ptr); -+ db_info->areas_v2[i][j] = (void*)malloc(PAGE_SIZE); -+ /* read raw data for debug area */ -+ GET_BLOCK(page_ptr, PAGE_SIZE, db_info->areas_v2[i][j]); -+ page_array_ptr += KL_NBPW; -+ } -+ area_ptr += KL_NBPW; -+ } -+} -+ -+static debug_info_t* -+get_debug_info(kaddr_t addr,int get_areas) -+{ -+ void *k_dbi; -+ kaddr_t mem_pos; -+ kaddr_t view_addr; -+ debug_info_t* db_info; -+ int i; -+ int dbi_size; -+ -+ /* get sizes of kernel structures */ -+ if(!(dbi_size = kl_struct_len("debug_info"))){ -+ fprintf (KL_ERRORFP, -+ "Could not determine sizeof(struct debug_info)\n"); -+ return(NULL); -+ } -+ if(!(dbe_size = kl_struct_len("__debug_entry"))){ -+ fprintf(KL_ERRORFP, -+ "Could not determine sizeof(struct __debug_entry)\n"); -+ return(NULL); -+ } -+ -+ /* get kernel debug_info structure */ -+ k_dbi = malloc(dbi_size); -+ GET_BLOCK(addr, dbi_size, k_dbi); -+ -+ db_info = (debug_info_t*)malloc(sizeof(debug_info_t)); -+ memset(db_info, 0, sizeof(debug_info_t)); -+ -+ /* copy members */ -+ db_info->level = KL_INT(k_dbi,"debug_info","level"); -+ db_info->nr_areas = KL_INT(k_dbi,"debug_info","nr_areas"); -+ db_info->pages_per_area_v2= KL_INT(k_dbi,"debug_info","pages_per_area"); -+ db_info->page_order = KL_INT(k_dbi,"debug_info","page_order"); -+ db_info->buf_size = KL_INT(k_dbi,"debug_info","buf_size"); -+ db_info->entry_size = KL_INT(k_dbi,"debug_info","entry_size"); -+ db_info->next_dbi = KL_UINT(k_dbi,"debug_info","next"); -+ db_info->prev_dbi = KL_UINT(k_dbi,"debug_info","prev"); -+ db_info->addr = addr; -+ strncpy(db_info->name,K_PTR(k_dbi,"debug_info","name"), -+ DEBUG_MAX_PROCF_LEN); -+ -+ -+ if(get_areas){ -+ if(dbf_version == DBF_VERSION_V1) -+ debug_get_areas_v1(db_info,k_dbi); -+ else -+ debug_get_areas_v2(db_info,k_dbi); -+ } else { -+ db_info->areas = NULL; -+ } -+ -+ /* get views */ -+ mem_pos = (uaddr_t) K_PTR(k_dbi,"debug_info","views"); -+ memset(&db_info->views, 0, DEBUG_MAX_VIEWS * sizeof(void*)); -+ for (i = 0; i < DEBUG_MAX_VIEWS; i++) { -+ view_addr = KL_GET_PTR((void*)(uaddr_t)mem_pos); -+ if(view_addr == 0){ -+ break; -+ } else { -+ db_info->views[i] = get_debug_view(view_addr); -+ } -+ mem_pos += KL_NBPW; -+ } -+ free(k_dbi); -+ return db_info; -+} -+ -+static void -+free_debug_info_v1(debug_info_t * db_info) -+{ -+ int i; -+ if(db_info->areas){ -+ for (i = 0; i < db_info->nr_areas; i++) { -+ free(db_info->areas[i]); -+ } -+ } -+ for (i = 0; i < DEBUG_MAX_VIEWS; i++) { -+ free_debug_view(db_info->views[i]); -+ } -+ free(db_info->areas); -+ free(db_info); -+} -+ -+static void -+free_debug_info_v2(debug_info_t * db_info) -+{ -+ int i,j; -+ if(db_info->areas) { -+ for (i = 0; i < db_info->nr_areas; i++) { -+ for(j = 0; j < db_info->pages_per_area_v2; j++) { -+ free(db_info->areas_v2[i][j]); -+ } -+ free(db_info->areas[i]); -+ } -+ free(db_info->areas); -+ db_info->areas = NULL; -+ } -+ for (i = 0; i < DEBUG_MAX_VIEWS; i++) { -+ free_debug_view(db_info->views[i]); -+ } -+ free(db_info); -+} -+ -+static int -+get_debug_areas(void) -+{ -+ kaddr_t act_debug_area; -+ syment_t *debug_sym; -+ debug_info_t *act_debug_area_cpy; -+ -+ if(!(debug_sym = kl_lkup_symname("debug_area_first"))){ -+ printf("Did not find debug_areas"); -+ return -1; -+ } -+ act_debug_area = KL_VREAD_PTR(debug_sym->s_addr); -+ while(act_debug_area != 0){ -+ act_debug_area_cpy = get_debug_info(act_debug_area,0); -+ act_debug_area = act_debug_area_cpy->next_dbi; -+ if(debug_area_first == NULL){ -+ debug_area_first = act_debug_area_cpy; -+ } else { -+ debug_area_last->next = act_debug_area_cpy; -+ } -+ debug_area_last = act_debug_area_cpy; -+ } -+ return 0; -+} -+ -+static void -+free_debug_areas(void) -+{ -+ debug_info_t* next; -+ debug_info_t* act_debug_info = debug_area_first; -+ -+ while(act_debug_info != NULL){ -+ next = act_debug_info->next; -+ if(dbf_version == DBF_VERSION_V1) -+ free_debug_info_v1(act_debug_info); -+ else -+ free_debug_info_v2(act_debug_info); -+ act_debug_info = next; -+ } -+ -+ debug_area_first = NULL; -+ debug_area_last = NULL; -+} -+ -+static debug_view_t * -+find_lcrash_debug_view(const char *name) -+{ -+ int i; -+ for (i = 0; (i < LCRASH_DB_VIEWS) && (debug_views[i] != NULL); i++) { -+ if (strcmp(debug_views[i]->name, name) == 0) -+ return debug_views[i]; -+ } -+ return NULL; -+} -+ -+static void -+print_lcrash_debug_views(FILE * ofp) -+{ -+ int i; -+ fprintf(ofp, "REGISTERED VIEWS\n"); -+ fprintf(ofp, "=====================\n"); -+ for (i = 0; i < LCRASH_DB_VIEWS; i++) { -+ if (debug_views[i] == NULL) { -+ return; -+ } -+ fprintf(ofp, " - %s\n", debug_views[i]->name); -+ } -+} -+ -+static int -+add_lcrash_debug_view(debug_view_t *view) -+{ -+ int i; -+ for (i = 0; i < LCRASH_DB_VIEWS; i++) { -+ if (debug_views[i] == NULL) { -+ debug_views[i] = view; -+ return 0; -+ } -+ if (strcmp(debug_views[i]->name, view->name) == 0) -+ return -1; -+ } -+ return -1; -+} -+ -+static int -+list_one_view(char *area_name, char *view_name, command_t * cmd) -+{ -+ debug_info_t *db_info; -+ debug_view_t *db_view; -+ -+ if ((db_info = find_debug_area(area_name)) == NULL) { -+ fprintf(cmd->efp, "Debug log '%s' not found!\n", area_name); -+ return -1; -+ } -+ -+ db_info = get_debug_info(db_info->addr,1); -+ -+ if ((db_view = find_lcrash_debug_view(view_name)) == NULL) { -+ fprintf(cmd->efp, "View '%s' not registered!\n", view_name); -+ return -1; -+ } -+ if(dbf_version == DBF_VERSION_V1){ -+ debug_format_output_v1(db_info, db_view, cmd->ofp); -+ free_debug_info_v1(db_info); -+ } else { -+ debug_format_output_v2(db_info, db_view, cmd->ofp); -+ free_debug_info_v2(db_info); -+ } -+ return 0; -+} -+ -+static int -+list_areas(FILE * ofp) -+{ -+ debug_info_t* act_debug_info = debug_area_first; -+ fprintf(ofp, "Debug Logs:\n"); -+ fprintf(ofp, "==================\n"); -+ while(act_debug_info != NULL){ -+ fprintf(ofp, " - %s\n", act_debug_info->name); -+ act_debug_info = act_debug_info->next; -+ } -+ return 0; -+} -+ -+static int -+list_one_area(const char *area_name, command_t * cmd) -+{ -+ debug_info_t *db_info; -+ int i; -+ if ((db_info = find_debug_area(area_name)) == NULL) { -+ fprintf(cmd->efp, "Debug log '%s' not found!\n", area_name); -+ return -1; -+ } -+ fprintf(cmd->ofp, "INSTALLED VIEWS FOR '%s':\n", area_name); -+ fprintf(cmd->ofp, "================================================" -+ "==============================\n"); -+ for (i = 0; i < DEBUG_MAX_VIEWS; i++) { -+ if (db_info->views[i] != NULL) { -+ fprintf(cmd->ofp, " - %s ", db_info->views[i]->name); -+ if (find_lcrash_debug_view(db_info->views[i]->name)) -+ fprintf(cmd->ofp, "(available)\n"); -+ else -+ fprintf(cmd->ofp, "(not available)\n"); -+ } -+ } -+ fprintf(cmd->ofp, "=================================================" -+ "=============================\n"); -+ return 0; -+} -+ -+#ifdef DBF_DYNAMIC_VIEWS -+static int -+load_debug_view(const char *path, command_t * cmd) -+{ -+ void *library; -+ const char *error; -+ debug_view_t *(*view_init_func) (void); -+ -+ library = dlopen(path, RTLD_LAZY); -+ if (library == NULL) { -+ fprintf(cmd->efp, "Could not open %s: %s\n", path, dlerror()); -+ return (1); -+ } -+ -+ dlerror(); -+ -+ view_init_func = dlsym(library, "debug_view_init"); -+ error = dlerror(); -+ -+ if (error) { -+ fprintf(stderr, "could not find debug_view_init(): %s\n", -+ error); -+ exit(1); -+ } -+ -+ add_lcrash_debug_view((*view_init_func) ()); -+ -+ fprintf(cmd->ofp, "view %s loaded\n", path); -+ fflush(stdout); -+ return 0; -+} -+#endif -+ -+/* -+ * s390dbf_cmd() -- Run the 's390dbf' command. -+ */ -+static int -+s390dbf_cmd(command_t * cmd) -+{ -+ syment_t *dbf_version_sym; -+ int rc = 0; -+ -+ /* check version */ -+ -+ if(!(dbf_version_sym = kl_lkup_symname("debug_feature_version"))){ -+ fprintf(KL_ERRORFP, -+ "Could not determine debug_feature_version\n"); -+ return -1; -+ } -+ -+ dbf_version = KL_VREAD_UINT32(dbf_version_sym->s_addr); -+ -+ if ((dbf_version != DBF_VERSION_V1) && (dbf_version != DBF_VERSION_V2)){ -+ fprintf(cmd->efp,"lcrash does not support the" -+ " debug feature version of the dump kernel:\n"); -+ fprintf(cmd->efp,"DUMP: %i SUPPORTED: %i and %i\n", -+ dbf_version, DBF_VERSION_V1, DBF_VERSION_V2); -+ return -1; -+ } -+ -+ dbf_init(); -+ -+ if (cmd->flags & C_ALL) { -+ return (0); -+ } -+#ifdef DBF_DYNAMIC_VIEWS -+ if (cmd->flags & LOAD_FLAG) { -+ printf("loading: %s\n", cmd->args[0]); -+ return (load_debug_view(cmd->args[0], cmd)); -+ } -+#endif -+ if (cmd->flags & VIEWS_FLAG) { -+ print_lcrash_debug_views(cmd->ofp); -+ return (0); -+ } -+ if (cmd->nargs > 2) { -+ s390dbf_usage(cmd); -+ return (1); -+ } -+ -+ if(get_debug_areas() == -1) -+ return -1; -+ -+ switch (cmd->nargs) { -+ case 0: -+ rc = list_areas(cmd->ofp); -+ break; -+ case 1: -+ rc = list_one_area(cmd->args[0], cmd); -+ break; -+ case 2: -+ rc = list_one_view(cmd->args[0], cmd->args[1], cmd); -+ break; -+ } -+ -+ free_debug_areas(); -+ -+ return rc; -+} -+ -+#define _S390DBF_USAGE " [-v] [debug log] [debug view]" -+ -+/* -+ * s390dbf_usage() -- Print the usage string for the 's390dbf' command. -+ */ -+void -+s390dbf_usage(command_t * cmd) -+{ -+ CMD_USAGE(cmd, _S390DBF_USAGE); -+} -+ -+/* -+ * s390 debug feature command for crash -+ */ -+ -+char *help_s390dbf[] = { -+ "s390dbf", -+ "s390dbf prints out debug feature logs", -+ "[-v] [debug_log] [debug_log view]", -+ "", -+ "Display Debug logs:", -+ " + If called without parameters, all active debug logs are listed.", -+ " + If called with '-v', all debug views which are available to", -+ " 'crash' are listed", -+ " + If called with the name of a debug log, all debug-views for which", -+ " the debug-log has registered are listed. It is possible thatsome", -+ " of the debug views are not available to 'crash'.", -+ " + If called with the name of a debug-log and an available viewname,", -+ " the specified view is printed.", -+ NULL -+}; -+ -+void cmd_s390dbf() -+{ -+ int i,c; -+ -+ command_t cmd = { -+ .ofp = stdout, -+ .efp = stderr, -+ .cmdstr = "s390dbf", -+ .command = "s390dbf", -+ }; -+ -+ cmd.nargs=argcnt - 1; -+ for (i=1; i < argcnt; i++) -+ cmd.args[i-1] = args[i]; -+ -+ while ((c = getopt(argcnt, args, "v")) != EOF) { -+ switch(c) { -+ case 'v': -+ cmd.flags |= VIEWS_FLAG; -+ break; -+ default: -+ s390dbf_usage(&cmd); -+ return; -+ } -+ } -+ s390dbf_cmd(&cmd); -+} -+ -+#endif -+ ---- crash/ppc64.c.orig 2007-08-27 15:02:36.000000000 -0400 -+++ crash/ppc64.c 2006-10-11 09:14:35.000000000 -0400 -@@ -1,8 +1,8 @@ - /* ppc64.c -- core analysis suite - * -- * Copyright (C) 2004, 2005 David Anderson -- * Copyright (C) 2004, 2005 Red Hat, Inc. All rights reserved. -- * Copyright (C) 2004 Haren Myneni, IBM Corporation -+ * Copyright (C) 2004, 2005, 2006 David Anderson -+ * Copyright (C) 2004, 2005, 2006 Red Hat, Inc. All rights reserved. -+ * Copyright (C) 2004, 2006 Haren Myneni, IBM Corporation - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by -@@ -47,6 +47,9 @@ - static char * ppc64_check_eframe(struct ppc64_pt_regs *); - static void ppc64_print_eframe(char *, struct ppc64_pt_regs *, - struct bt_info *); -+static void parse_cmdline_arg(void); -+static void ppc64_paca_init(void); -+static void ppc64_clear_machdep_cache(void); - - struct machine_specific ppc64_machine_specific = { { 0 }, 0, 0 }; - -@@ -64,26 +67,53 @@ - machdep->verify_symbol = ppc64_verify_symbol; - if (pc->flags & KERNEL_DEBUG_QUERY) - return; -- machdep->pagesize = memory_page_size(); -+ machdep->stacksize = PPC64_STACK_SIZE; -+ machdep->last_pgd_read = 0; -+ machdep->last_pmd_read = 0; -+ machdep->last_ptbl_read = 0; -+ machdep->machspec->last_level4_read = 0; -+ machdep->verify_paddr = generic_verify_paddr; -+ machdep->ptrs_per_pgd = PTRS_PER_PGD; -+ machdep->flags |= MACHDEP_BT_TEXT; -+ if (machdep->cmdline_arg) -+ parse_cmdline_arg(); -+ machdep->clear_machdep_cache = ppc64_clear_machdep_cache; -+ break; -+ -+ case PRE_GDB: -+ /* -+ * Recently there were changes made to kexec tools -+ * to support 64K page size. With those changes -+ * vmcore file obtained from a kernel which supports -+ * 64K page size cannot be analyzed using crash on a -+ * machine running with kernel supporting 4K page size -+ * -+ * The following modifications are required in crash -+ * tool to be in sync with kexec tools. -+ * -+ * Look if the following symbol exists. If yes then -+ * the dump was taken with a kernel supporting 64k -+ * page size. So change the page size accordingly. -+ * -+ * Also moved the following code block from -+ * PRE_SYMTAB case here. -+ */ -+ if (symbol_exists("__hash_page_64K")) -+ machdep->pagesize = PPC64_64K_PAGE_SIZE; -+ else -+ machdep->pagesize = memory_page_size(); - machdep->pageshift = ffs(machdep->pagesize) - 1; - machdep->pageoffset = machdep->pagesize - 1; - machdep->pagemask = ~((ulonglong)machdep->pageoffset); -- machdep->stacksize = 4 * machdep->pagesize; - if ((machdep->pgd = (char *)malloc(PAGESIZE())) == NULL) - error(FATAL, "cannot malloc pgd space."); - if ((machdep->pmd = (char *)malloc(PAGESIZE())) == NULL) - error(FATAL, "cannot malloc pmd space."); - if ((machdep->ptbl = (char *)malloc(PAGESIZE())) == NULL) - error(FATAL, "cannot malloc ptbl space."); -- machdep->last_pgd_read = 0; -- machdep->last_pmd_read = 0; -- machdep->last_ptbl_read = 0; -- machdep->verify_paddr = generic_verify_paddr; -- machdep->ptrs_per_pgd = PTRS_PER_PGD; -- machdep->flags |= MACHDEP_BT_TEXT; -- break; -+ if ((machdep->machspec->level4 = (char *)malloc(PAGESIZE())) == NULL) -+ error(FATAL, "cannot malloc level4 space."); - -- case PRE_GDB: - machdep->kvbase = symbol_value("_stext"); - machdep->identity_map_base = machdep->kvbase; - machdep->is_kvaddr = generic_is_kvaddr; -@@ -109,6 +139,56 @@ - break; - - case POST_GDB: -+ if (!(machdep->flags & (VM_ORIG|VM_4_LEVEL))) { -+ if (THIS_KERNEL_VERSION >= LINUX(2,6,14)) { -+ machdep->flags |= VM_4_LEVEL; -+ } else { -+ machdep->flags |= VM_ORIG; -+ } -+ } -+ if (machdep->flags & VM_ORIG) { -+ /* pre-2.6.14 layout */ -+ free(machdep->machspec->level4); -+ machdep->machspec->level4 = NULL; -+ machdep->ptrs_per_pgd = PTRS_PER_PGD; -+ } else { -+ /* 2.6.14 layout */ -+ struct machine_specific *m = machdep->machspec; -+ if (machdep->pagesize == 65536) { -+ /* 64K pagesize */ -+ m->l1_index_size = PTE_INDEX_SIZE_L4_64K; -+ m->l2_index_size = PMD_INDEX_SIZE_L4_64K; -+ m->l3_index_size = PUD_INDEX_SIZE_L4_64K; -+ m->l4_index_size = PGD_INDEX_SIZE_L4_64K; -+ m->pte_shift = PTE_SHIFT_L4_64K; -+ m->l2_masked_bits = PMD_MASKED_BITS_64K; -+ } else { -+ /* 4K pagesize */ -+ m->l1_index_size = PTE_INDEX_SIZE_L4_4K; -+ m->l2_index_size = PMD_INDEX_SIZE_L4_4K; -+ m->l3_index_size = PUD_INDEX_SIZE_L4_4K; -+ m->l4_index_size = PGD_INDEX_SIZE_L4_4K; -+ m->pte_shift = PTE_SHIFT_L4_4K; -+ m->l2_masked_bits = PMD_MASKED_BITS_4K; -+ } -+ -+ /* Compute ptrs per each level */ -+ m->l1_shift = machdep->pageshift; -+ m->ptrs_per_l1 = (1 << m->l1_index_size); -+ m->ptrs_per_l2 = (1 << m->l2_index_size); -+ m->ptrs_per_l3 = (1 << m->l3_index_size); -+ -+ machdep->ptrs_per_pgd = m->ptrs_per_l3; -+ -+ /* Compute shifts */ -+ m->l2_shift = m->l1_shift + m->l1_index_size; -+ m->l3_shift = m->l2_shift + m->l2_index_size; -+ m->l4_shift = m->l3_shift + m->l3_index_size; -+ } -+ -+ machdep->section_size_bits = _SECTION_SIZE_BITS; -+ machdep->max_physmem_bits = _MAX_PHYSMEM_BITS; -+ ppc64_paca_init(); - machdep->vmalloc_start = ppc64_vmalloc_start; - MEMBER_OFFSET_INIT(thread_struct_pg_tables, - "thread_struct", "pg_tables"); -@@ -178,9 +258,11 @@ - */ - BZERO(&machdep->machspec->hwintrstack, - NR_CPUS*sizeof(ulong)); -- machdep->hz = HZ; -- if (THIS_KERNEL_VERSION >= LINUX(2,6,0)) -- machdep->hz = 1000; -+ if (!machdep->hz) { -+ machdep->hz = HZ; -+ if (THIS_KERNEL_VERSION >= LINUX(2,6,0)) -+ machdep->hz = 1000; -+ } - /* - * IRQ stacks are introduced in 2.6 and also configurable. - */ -@@ -229,10 +311,12 @@ - fprintf(fp, " flags: %lx (", machdep->flags); - if (machdep->flags & KSYMS_START) - fprintf(fp, "%sKSYMS_START", others++ ? "|" : ""); -- if (machdep->flags & SYSRQ) -- fprintf(fp, "%sSYSRQ", others++ ? "|" : ""); - if (machdep->flags & MACHDEP_BT_TEXT) - fprintf(fp, "%sMACHDEP_BT_TEXT", others++ ? "|" : ""); -+ if (machdep->flags & VM_ORIG) -+ fprintf(fp, "%sVM_ORIG", others++ ? "|" : ""); -+ if (machdep->flags & VM_4_LEVEL) -+ fprintf(fp, "%sVM_4_LEVEL", others++ ? "|" : ""); - fprintf(fp, ")\n"); - - fprintf(fp, " kvbase: %lx\n", machdep->kvbase); -@@ -269,15 +353,25 @@ - fprintf(fp, " is_kvaddr: generic_is_kvaddr()\n"); - fprintf(fp, " is_uvaddr: generic_is_uvaddr()\n"); - fprintf(fp, " verify_paddr: generic_verify_paddr()\n"); -+ fprintf(fp, " xendump_p2m_create: NULL\n"); -+ fprintf(fp, "xen_kdump_p2m_create: NULL\n"); - fprintf(fp, " line_number_hooks: ppc64_line_number_hooks\n"); - fprintf(fp, " last_pgd_read: %lx\n", machdep->last_pgd_read); - fprintf(fp, " last_pmd_read: %lx\n", machdep->last_pmd_read); - fprintf(fp, " last_ptbl_read: %lx\n", machdep->last_ptbl_read); -+ fprintf(fp, "clear_machdep_cache: ppc64_clear_machdep_cache()\n"); - fprintf(fp, " pgd: %lx\n", (ulong)machdep->pgd); - fprintf(fp, " pmd: %lx\n", (ulong)machdep->pmd); - fprintf(fp, " ptbl: %lx\n", (ulong)machdep->ptbl); - fprintf(fp, " ptrs_per_pgd: %d\n", machdep->ptrs_per_pgd); -+ fprintf(fp, " section_size_bits: %ld\n", machdep->section_size_bits); -+ fprintf(fp, " max_physmem_bits: %ld\n", machdep->max_physmem_bits); -+ fprintf(fp, " sections_per_root: %ld\n", machdep->sections_per_root); - fprintf(fp, " machspec: %lx\n", (ulong)machdep->machspec); -+ fprintf(fp, " pgd_index_size: %d\n", machdep->machspec->l4_index_size); -+ fprintf(fp, " pud_index_size: %d\n", machdep->machspec->l3_index_size); -+ fprintf(fp, " pmd_index_size: %d\n", machdep->machspec->l2_index_size); -+ fprintf(fp, " pte_index_size: %d\n", machdep->machspec->l1_index_size); - } - - /* -@@ -342,7 +436,7 @@ - if (!(pte & _PAGE_PRESENT)) { - if (pte && verbose) { - fprintf(fp, "\n"); -- ppc64_translate_pte(pte, 0, 0); -+ ppc64_translate_pte(pte, 0, PTE_SHIFT); - } - return FALSE; - } -@@ -354,7 +448,90 @@ - - if (verbose) { - fprintf(fp, " PAGE: %lx\n\n", PAGEBASE(*paddr)); -- ppc64_translate_pte(pte, 0, 0); -+ ppc64_translate_pte(pte, 0, PTE_SHIFT); -+ } -+ -+ return TRUE; -+} -+ -+/* -+ * Virtual to physical memory translation. This function will be called -+ * by both ppc64_kvtop and ppc64_uvtop. -+ */ -+static int -+ppc64_vtop_level4(ulong vaddr, ulong *level4, physaddr_t *paddr, int verbose) -+{ -+ ulong *level4_dir; -+ ulong *page_dir; -+ ulong *page_middle; -+ ulong *page_table; -+ ulong level4_pte, pgd_pte, pmd_pte; -+ ulong pte; -+ -+ if (verbose) -+ fprintf(fp, "PAGE DIRECTORY: %lx\n", (ulong)level4); -+ -+ level4_dir = (ulong *)((ulong *)level4 + L4_OFFSET(vaddr)); -+ FILL_L4(PAGEBASE(level4), KVADDR, PAGESIZE()); -+ level4_pte = ULONG(machdep->machspec->level4 + PAGEOFFSET(level4_dir)); -+ if (verbose) -+ fprintf(fp, " L4: %lx => %lx\n", (ulong)level4_dir, level4_pte); -+ if (!level4_pte) -+ return FALSE; -+ -+ /* Sometimes we don't have level3 pagetable entries */ -+ if (machdep->machspec->l3_index_size != 0) { -+ page_dir = (ulong *)((ulong *)level4_pte + PGD_OFFSET_L4(vaddr)); -+ FILL_PGD(PAGEBASE(level4_pte), KVADDR, PAGESIZE()); -+ pgd_pte = ULONG(machdep->pgd + PAGEOFFSET(page_dir)); -+ -+ if (verbose) -+ fprintf(fp, " PGD: %lx => %lx\n", (ulong)page_dir, pgd_pte); -+ if (!pgd_pte) -+ return FALSE; -+ } else { -+ pgd_pte = level4_pte; -+ } -+ -+ page_middle = (ulong *)((ulong *)pgd_pte + PMD_OFFSET_L4(vaddr)); -+ FILL_PMD(PAGEBASE(pgd_pte), KVADDR, PAGESIZE()); -+ pmd_pte = ULONG(machdep->pmd + PAGEOFFSET(page_middle)); -+ -+ if (verbose) -+ fprintf(fp, " PMD: %lx => %lx\n", (ulong)page_middle, pmd_pte); -+ -+ if (!(pmd_pte)) -+ return FALSE; -+ -+ page_table = (ulong *)(pmd_pte & ~(machdep->machspec->l2_masked_bits)) -+ + (BTOP(vaddr) & (machdep->machspec->ptrs_per_l1 - 1)); -+ if (verbose) -+ fprintf(fp, " PMD: %lx => %lx\n",(ulong)page_middle, -+ (ulong)page_table); -+ -+ FILL_PTBL(PAGEBASE(pmd_pte), KVADDR, PAGESIZE()); -+ pte = ULONG(machdep->ptbl + PAGEOFFSET(page_table)); -+ -+ if (verbose) -+ fprintf(fp, " PTE: %lx => %lx\n", (ulong)page_table, pte); -+ -+ if (!(pte & _PAGE_PRESENT)) { -+ if (pte && verbose) { -+ fprintf(fp, "\n"); -+ ppc64_translate_pte(pte, 0, machdep->machspec->pte_shift); -+ } -+ return FALSE; -+ } -+ -+ if (!pte) -+ return FALSE; -+ -+ *paddr = PAGEBASE(PTOB(pte >> machdep->machspec->pte_shift)) -+ + PAGEOFFSET(vaddr); -+ -+ if (verbose) { -+ fprintf(fp, " PAGE: %lx\n\n", PAGEBASE(*paddr)); -+ ppc64_translate_pte(pte, 0, machdep->machspec->pte_shift); - } - - return TRUE; -@@ -411,7 +588,10 @@ - FAULT_ON_ERROR); - } - -- return ppc64_vtop(vaddr, pgd, paddr, verbose); -+ if (machdep->flags & VM_4_LEVEL) -+ return ppc64_vtop_level4(vaddr, pgd, paddr, verbose); -+ else -+ return ppc64_vtop(vaddr, pgd, paddr, verbose); - } - - /* -@@ -436,7 +616,10 @@ - return TRUE; - } - -- return ppc64_vtop(kvaddr, (ulong *)vt->kernel_pgd[0], paddr, verbose); -+ if (machdep->flags & VM_4_LEVEL) -+ return ppc64_vtop_level4(kvaddr, (ulong *)vt->kernel_pgd[0], paddr, verbose); -+ else -+ return ppc64_vtop(kvaddr, (ulong *)vt->kernel_pgd[0], paddr, verbose); - } - - /* -@@ -657,7 +840,7 @@ - * If a physaddr pointer is passed in, don't print anything. - */ - static int --ppc64_translate_pte(ulong pte, void *physaddr, ulonglong unused) -+ppc64_translate_pte(ulong pte, void *physaddr, ulonglong pte_shift) - { - int c, len1, len2, len3, others, page_present; - char buf[BUFSIZE]; -@@ -668,7 +851,7 @@ - char *arglist[MAXARGS]; - ulong paddr; - -- paddr = PTOB(pte >> PTE_SHIFT); -+ paddr = PTOB(pte >> pte_shift); - page_present = (pte & _PAGE_PRESENT); - - if (physaddr) { -@@ -1034,8 +1217,12 @@ - ms->hwstacksize + STACK_FRAME_OVERHEAD; - bt->stackbuf = ms->hwstackbuf; - alter_stackbuf(bt); -- } else -- error(FATAL, "cannot find the stack info"); -+ } else { -+ if (CRASHDEBUG(1)) { -+ fprintf(fp, "cannot find the stack info.\n"); -+ } -+ return; -+ } - } - - -@@ -1270,20 +1457,11 @@ - return NULL; - } - --/* -- * Print exception frame information for ppc64 -- */ - static void --ppc64_print_eframe(char *efrm_str, struct ppc64_pt_regs *regs, -- struct bt_info *bt) -+ppc64_print_regs(struct ppc64_pt_regs *regs) - { - int i; - -- if (BT_REFERENCE_CHECK(bt)) -- return; -- -- fprintf(fp, " %s [%lx] exception frame:", efrm_str, regs->trap); -- - /* print out the gprs... */ - for(i=0; i<32; i++) { - if(!(i % 3)) -@@ -1315,9 +1493,78 @@ - fprintf(fp, "DAR: %016lx\n", regs->dar); - fprintf(fp, " DSISR: %016lx ", regs->dsisr); - fprintf(fp, " Syscall Result: %016lx\n", regs->result); -+} -+ -+/* -+ * Print the exception frame information -+ */ -+static void -+ppc64_print_eframe(char *efrm_str, struct ppc64_pt_regs *regs, -+ struct bt_info *bt) -+{ -+ if (BT_REFERENCE_CHECK(bt)) -+ return; -+ -+ fprintf(fp, " %s [%lx] exception frame:", efrm_str, regs->trap); -+ ppc64_print_regs(regs); - fprintf(fp, "\n"); - } - -+/* -+ * get SP and IP from the saved ptregs. -+ */ -+static int -+ppc64_kdump_stack_frame(struct bt_info *bt_in, ulong *nip, ulong *ksp) -+{ -+ struct ppc64_pt_regs *pt_regs; -+ unsigned long unip; -+ -+ pt_regs = (struct ppc64_pt_regs *)bt_in->machdep; -+ if (!pt_regs->gpr[1]) { -+ /* -+ * Not collected regs. May be the corresponding CPU not -+ * responded to an IPI. -+ */ -+ fprintf(fp, "%0lx: GPR1 register value (SP) was not saved\n", -+ bt_in->task); -+ return FALSE; -+ } -+ *ksp = pt_regs->gpr[1]; -+ if (IS_KVADDR(*ksp)) { -+ readmem(*ksp+16, KVADDR, &unip, sizeof(ulong), "Regs NIP value", -+ FAULT_ON_ERROR); -+ *nip = unip; -+ } else { -+ if (IN_TASK_VMA(bt_in->task, *ksp)) -+ fprintf(fp, "%0lx: Task is running in user space\n", -+ bt_in->task); -+ else -+ fprintf(fp, "%0lx: Invalid Stack Pointer %0lx\n", -+ bt_in->task, *ksp); -+ *nip = pt_regs->nip; -+ } -+ -+ if (bt_in->flags && -+ ((BT_TEXT_SYMBOLS|BT_TEXT_SYMBOLS_PRINT|BT_TEXT_SYMBOLS_NOPRINT))) -+ return TRUE; -+ -+ /* -+ * Print the collected regs for the active task -+ */ -+ ppc64_print_regs(pt_regs); -+ if (!IS_KVADDR(*ksp)) -+ return FALSE; -+ -+ fprintf(fp, " NIP [%016lx] %s\n", pt_regs->nip, -+ closest_symbol(pt_regs->nip)); -+ if (unip != pt_regs->link) -+ fprintf(fp, " LR [%016lx] %s\n", pt_regs->link, -+ closest_symbol(pt_regs->link)); -+ -+ fprintf(fp, "\n"); -+ -+ return TRUE; -+} - - /* - * Get the starting point for the active cpus in a diskdump/netdump. -@@ -1335,12 +1582,18 @@ - ulong ur_ksp = 0; - int check_hardirq, check_softirq; - int check_intrstack = TRUE; -+ struct ppc64_pt_regs *pt_regs; -+ -+ /* -+ * For the kdump vmcore, Use SP and IP values that are saved in ptregs. -+ */ -+ if (pc->flags & KDUMP) -+ return ppc64_kdump_stack_frame(bt_in, nip, ksp); - - bt = &bt_local; - BCOPY(bt_in, bt, sizeof(struct bt_info)); - ms = machdep->machspec; - ur_nip = ur_ksp = 0; -- struct ppc64_pt_regs *pt_regs; - - panic_task = tt->panic_task == bt->task ? TRUE : FALSE; - -@@ -1424,6 +1677,7 @@ - if (STREQ(sym, ".netconsole_netdump") || - STREQ(sym, ".netpoll_start_netdump") || - STREQ(sym, ".start_disk_dump") || -+ STREQ(sym, ".crash_kexec") || - STREQ(sym, ".disk_dump")) { - *nip = *up; - *ksp = bt->stackbase + -@@ -2000,4 +2254,145 @@ - ppc64_dump_line_number(0); - } - -+/* -+ * Force the VM address-range selection via: -+ * -+ * --machdep vm=orig -+ * --machdep vm=2.6.14 -+ */ -+ -+void -+parse_cmdline_arg(void) -+{ -+ int i, c, errflag; -+ char *p; -+ char buf[BUFSIZE]; -+ char *arglist[MAXARGS]; -+ int lines = 0; -+ -+ if (!strstr(machdep->cmdline_arg, "=")) { -+ error(WARNING, "ignoring --machdep option: %s\n\n", -+ machdep->cmdline_arg); -+ return; -+ } -+ -+ strcpy(buf, machdep->cmdline_arg); -+ -+ for (p = buf; *p; p++) { -+ if (*p == ',') -+ *p = ' '; -+ } -+ -+ c = parse_line(buf, arglist); -+ -+ for (i = 0; i < c; i++) { -+ errflag = 0; -+ -+ if (STRNEQ(arglist[i], "vm=")) { -+ p = arglist[i] + strlen("vm="); -+ if (strlen(p)) { -+ if (STREQ(p, "orig")) { -+ machdep->flags |= VM_ORIG; -+ continue; -+ } else if (STREQ(p, "2.6.14")) { -+ machdep->flags |= VM_4_LEVEL; -+ continue; -+ } -+ } -+ } -+ -+ error(WARNING, "ignoring --machdep option: %s\n", arglist[i]); -+ lines++; -+ } -+ -+ switch (machdep->flags & (VM_ORIG|VM_4_LEVEL)) -+ { -+ case VM_ORIG: -+ error(NOTE, "using original PPC64 VM address ranges\n"); -+ lines++; -+ break; -+ -+ case VM_4_LEVEL: -+ error(NOTE, "using 4-level pagetable PPC64 VM address ranges\n"); -+ lines++; -+ break; -+ -+ case (VM_ORIG|VM_4_LEVEL): -+ error(WARNING, "cannot set both vm=orig and vm=2.6.14\n"); -+ lines++; -+ machdep->flags &= ~(VM_ORIG|VM_4_LEVEL); -+ break; -+ } -+ -+ if (lines) -+ fprintf(fp, "\n"); -+} -+ -+/* -+ * Updating any smp-related items that were possibly bypassed -+ * or improperly initialized in kernel_init(). -+ */ -+static void -+ppc64_paca_init(void) -+{ -+#define BITS_FOR_LONG sizeof(ulong)*8 -+ int i, cpus, nr_paca; -+ char *cpu_paca_buf; -+ ulong data_offset; -+ ulong cpu_online_map[NR_CPUS/BITS_FOR_LONG]; -+ -+ if (!symbol_exists("paca")) -+ error(FATAL, "PPC64: Could not find 'paca' symbol\n"); -+ -+ if (!symbol_exists("cpu_online_map")) -+ error(FATAL, "PPC64: Could not find 'cpu_online_map' symbol\n"); -+ -+ if (!MEMBER_EXISTS("paca_struct", "data_offset")) -+ return; -+ -+ STRUCT_SIZE_INIT(ppc64_paca, "paca_struct"); -+ data_offset = MEMBER_OFFSET("paca_struct", "data_offset"); -+ -+ cpu_paca_buf = GETBUF(SIZE(ppc64_paca)); -+ -+ if (!(nr_paca = get_array_length("paca", NULL, 0))) -+ nr_paca = NR_CPUS; -+ -+ if (nr_paca > NR_CPUS) { -+ error(WARNING, -+ "PPC64: Number of paca entries (%d) greater than NR_CPUS (%d)\n", -+ nr_paca, NR_CPUS); -+ error(FATAL, "Recompile crash with larger NR_CPUS\n"); -+ } -+ -+ readmem(symbol_value("cpu_online_map"), KVADDR, &cpu_online_map[0], -+ nr_paca/8, "cpu_online_map", FAULT_ON_ERROR); -+ -+ for (i = cpus = 0; i < nr_paca; i++) { -+ div_t val = div(i, BITS_FOR_LONG); -+ /* -+ * CPU online? -+ */ -+ if (!(cpu_online_map[val.quot] & (0x1UL << val.rem))) -+ continue; -+ -+ readmem(symbol_value("paca") + (i * SIZE(ppc64_paca)), -+ KVADDR, cpu_paca_buf, SIZE(ppc64_paca), -+ "paca entry", FAULT_ON_ERROR); -+ -+ kt->__per_cpu_offset[i] = ULONG(cpu_paca_buf + data_offset); -+ kt->flags |= PER_CPU_OFF; -+ cpus++; -+ } -+ kt->cpus = cpus; -+ if (kt->cpus > 1) -+ kt->flags |= SMP; -+} -+ -+void -+ppc64_clear_machdep_cache(void) -+{ -+ if (machdep->machspec->last_level4_read != vt->kernel_pgd[0]) -+ machdep->machspec->last_level4_read = 0; -+} - #endif /* PPC64 */ ---- crash/x86_64.c.orig 2007-08-27 15:02:36.000000000 -0400 -+++ crash/x86_64.c 2007-08-23 17:02:54.000000000 -0400 -@@ -1,7 +1,7 @@ - /* x86_64.c -- core analysis suite - * -- * Copyright (C) 2004, 2005 David Anderson -- * Copyright (C) 2004, 2005 Red Hat, Inc. All rights reserved. -+ * Copyright (C) 2004, 2005, 2006, 2007 David Anderson -+ * Copyright (C) 2004, 2005, 2006, 2007 Red Hat, Inc. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by -@@ -14,11 +14,16 @@ - * GNU General Public License for more details. - */ - #include "defs.h" -+#include "xen_hyper_defs.h" - - #ifdef X86_64 - - static int x86_64_kvtop(struct task_context *, ulong, physaddr_t *, int); -+static int x86_64_kvtop_xen_wpt(struct task_context *, ulong, physaddr_t *, int); - static int x86_64_uvtop(struct task_context *, ulong, physaddr_t *, int); -+static int x86_64_uvtop_level4(struct task_context *, ulong, physaddr_t *, int); -+static int x86_64_uvtop_level4_xen_wpt(struct task_context *, ulong, physaddr_t *, int); -+static int x86_64_uvtop_level4_rhel4_xen_wpt(struct task_context *, ulong, physaddr_t *, int); - static ulong x86_64_vmalloc_start(void); - static int x86_64_is_task_addr(ulong); - static int x86_64_verify_symbol(const char *, ulong, char); -@@ -32,14 +37,17 @@ - #define EFRAME_VERIFY (0x2) - #define EFRAME_CS (0x4) - #define EFRAME_SEARCH (0x8) -+static int x86_64_print_eframe_location(ulong, int, FILE *); - static void x86_64_back_trace_cmd(struct bt_info *); - static ulong x86_64_in_exception_stack(struct bt_info *); - static ulong x86_64_in_irqstack(struct bt_info *); - static void x86_64_low_budget_back_trace_cmd(struct bt_info *); -+static void x86_64_dwarf_back_trace_cmd(struct bt_info *); - static void x86_64_get_dumpfile_stack_frame(struct bt_info *, ulong *, ulong *); - static struct syment *x86_64_function_called_by(ulong); - static int is_direct_call_target(struct bt_info *); - static void get_x86_64_frame(struct bt_info *, ulong *, ulong *); -+static ulong text_lock_function(char *, struct bt_info *, ulong); - static int x86_64_print_stack_entry(struct bt_info *, FILE *, int, int, ulong); - static void x86_64_display_full_frame(struct bt_info *, ulong, FILE *); - static void x86_64_do_bt_reference_check(struct bt_info *, ulong,char *); -@@ -56,6 +64,8 @@ - static void x86_64_display_memmap(void); - static void x86_64_dump_line_number(ulong); - static struct line_number_hook x86_64_line_number_hooks[]; -+static void x86_64_calc_phys_base(void); -+static int x86_64_is_module_addr(ulong); - static int x86_64_is_kvaddr(ulong); - static int x86_64_is_uvaddr(ulong, struct task_context *); - void x86_64_compiler_warning_stub(void); -@@ -63,7 +73,25 @@ - static void x86_64_cpu_pda_init(void); - static void x86_64_ist_init(void); - static void x86_64_post_init(void); -- -+static void parse_cmdline_arg(void); -+static void x86_64_clear_machdep_cache(void); -+static void x86_64_irq_eframe_link_init(void); -+static int x86_64_xendump_p2m_create(struct xendump_data *); -+static char *x86_64_xendump_load_page(ulong, struct xendump_data *); -+static int x86_64_xendump_page_index(ulong, struct xendump_data *); -+static int x86_64_xen_kdump_p2m_create(struct xen_kdump_data *); -+static char *x86_64_xen_kdump_load_page(ulong, char *); -+static ulong x86_64_xen_kdump_page_mfn(ulong); -+static void x86_64_debug_dump_page(FILE *, char *, char *); -+static void x86_64_get_xendump_regs(struct xendump_data *, struct bt_info *, ulong *, ulong *); -+static ulong x86_64_xendump_panic_task(struct xendump_data *); -+static void x86_64_init_hyper(int); -+static ulong x86_64_get_stackbase_hyper(ulong); -+static ulong x86_64_get_stacktop_hyper(ulong); -+static int x86_64_framesize_cache_resize(void); -+static int x86_64_framesize_cache_func(int, ulong, int *); -+static int x86_64_get_framesize(struct bt_info *, ulong); -+static void x86_64_framesize_debug(struct bt_info *); - - struct machine_specific x86_64_machine_specific = { 0 }; - -@@ -74,6 +102,11 @@ - void - x86_64_init(int when) - { -+ if (XEN_HYPER_MODE()) { -+ x86_64_init_hyper(when); -+ return; -+ } -+ - switch (when) - { - case PRE_SYMTAB: -@@ -86,6 +119,8 @@ - machdep->pageoffset = machdep->pagesize - 1; - machdep->pagemask = ~((ulonglong)machdep->pageoffset); - machdep->stacksize = machdep->pagesize * 2; -+ if ((machdep->machspec->upml = (char *)malloc(PAGESIZE())) == NULL) -+ error(FATAL, "cannot malloc upml space."); - if ((machdep->pgd = (char *)malloc(PAGESIZE())) == NULL) - error(FATAL, "cannot malloc pgd space."); - if ((machdep->pmd = (char *)malloc(PAGESIZE())) == NULL) -@@ -93,17 +128,85 @@ - if ((machdep->ptbl = (char *)malloc(PAGESIZE())) == NULL) - error(FATAL, "cannot malloc ptbl space."); - if ((machdep->machspec->pml4 = -- (char *)malloc(PAGESIZE())) == NULL) -+ (char *)malloc(PAGESIZE()*2)) == NULL) - error(FATAL, "cannot malloc pml4 space."); -+ machdep->machspec->last_upml_read = 0; -+ machdep->machspec->last_pml4_read = 0; - machdep->last_pgd_read = 0; - machdep->last_pmd_read = 0; - machdep->last_ptbl_read = 0; - machdep->verify_paddr = generic_verify_paddr; - machdep->ptrs_per_pgd = PTRS_PER_PGD; - machdep->flags |= MACHDEP_BT_TEXT; -+ machdep->flags |= FRAMESIZE_DEBUG; -+ machdep->machspec->irq_eframe_link = UNINITIALIZED; -+ if (machdep->cmdline_arg) -+ parse_cmdline_arg(); - break; - - case PRE_GDB: -+ if (!(machdep->flags & VM_FLAGS)) { -+ if (symbol_exists("xen_start_info")) { -+ if (symbol_exists("low_pml4") && -+ symbol_exists("swap_low_mappings")) -+ machdep->flags |= VM_XEN_RHEL4; -+ else -+ machdep->flags |= VM_XEN; -+ } else if (symbol_exists("boot_vmalloc_pgt")) -+ machdep->flags |= VM_ORIG; -+ else -+ machdep->flags |= VM_2_6_11; -+ } -+ -+ switch (machdep->flags & VM_FLAGS) -+ { -+ case VM_ORIG: -+ /* pre-2.6.11 layout */ -+ machdep->machspec->userspace_top = USERSPACE_TOP_ORIG; -+ machdep->machspec->page_offset = PAGE_OFFSET_ORIG; -+ machdep->machspec->vmalloc_start_addr = VMALLOC_START_ADDR_ORIG; -+ machdep->machspec->vmalloc_end = VMALLOC_END_ORIG; -+ machdep->machspec->modules_vaddr = MODULES_VADDR_ORIG; -+ machdep->machspec->modules_end = MODULES_END_ORIG; -+ -+ free(machdep->machspec->upml); -+ machdep->machspec->upml = NULL; -+ -+ machdep->uvtop = x86_64_uvtop; -+ break; -+ -+ case VM_2_6_11: -+ /* 2.6.11 layout */ -+ machdep->machspec->userspace_top = USERSPACE_TOP_2_6_11; -+ machdep->machspec->page_offset = PAGE_OFFSET_2_6_11; -+ machdep->machspec->vmalloc_start_addr = VMALLOC_START_ADDR_2_6_11; -+ machdep->machspec->vmalloc_end = VMALLOC_END_2_6_11; -+ machdep->machspec->modules_vaddr = MODULES_VADDR_2_6_11; -+ machdep->machspec->modules_end = MODULES_END_2_6_11; -+ -+ machdep->uvtop = x86_64_uvtop_level4; -+ break; -+ -+ case VM_XEN: -+ /* Xen layout */ -+ machdep->machspec->userspace_top = USERSPACE_TOP_XEN; -+ machdep->machspec->page_offset = PAGE_OFFSET_XEN; -+ machdep->machspec->vmalloc_start_addr = VMALLOC_START_ADDR_XEN; -+ machdep->machspec->vmalloc_end = VMALLOC_END_XEN; -+ machdep->machspec->modules_vaddr = MODULES_VADDR_XEN; -+ machdep->machspec->modules_end = MODULES_END_XEN; -+ break; -+ -+ case VM_XEN_RHEL4: -+ /* RHEL4 Xen layout */ -+ machdep->machspec->userspace_top = USERSPACE_TOP_XEN_RHEL4; -+ machdep->machspec->page_offset = PAGE_OFFSET_XEN_RHEL4; -+ machdep->machspec->vmalloc_start_addr = VMALLOC_START_ADDR_XEN_RHEL4; -+ machdep->machspec->vmalloc_end = VMALLOC_END_XEN_RHEL4; -+ machdep->machspec->modules_vaddr = MODULES_VADDR_XEN_RHEL4; -+ machdep->machspec->modules_end = MODULES_END_XEN_RHEL4; -+ break; -+ } - machdep->kvbase = (ulong)PAGE_OFFSET; - machdep->identity_map_base = (ulong)PAGE_OFFSET; - machdep->is_kvaddr = x86_64_is_kvaddr; -@@ -111,7 +214,6 @@ - machdep->eframe_search = x86_64_eframe_search; - machdep->back_trace = x86_64_low_budget_back_trace_cmd; - machdep->processor_speed = x86_64_processor_speed; -- machdep->uvtop = x86_64_uvtop; - machdep->kvtop = x86_64_kvtop; - machdep->get_task_pgd = x86_64_get_task_pgd; - machdep->get_stack_frame = x86_64_get_stack_frame; -@@ -126,6 +228,12 @@ - machdep->line_number_hooks = x86_64_line_number_hooks; - machdep->value_to_symbol = generic_machdep_value_to_symbol; - machdep->init_kernel_pgd = x86_64_init_kernel_pgd; -+ machdep->clear_machdep_cache = x86_64_clear_machdep_cache; -+ machdep->xendump_p2m_create = x86_64_xendump_p2m_create; -+ machdep->get_xendump_regs = x86_64_get_xendump_regs; -+ machdep->xen_kdump_p2m_create = x86_64_xen_kdump_p2m_create; -+ machdep->xendump_panic_task = x86_64_xendump_panic_task; -+ x86_64_calc_phys_base(); - break; - - case POST_GDB: -@@ -158,16 +266,49 @@ - if ((machdep->machspec->irqstack = (char *) - malloc(machdep->machspec->stkinfo.isize)) == NULL) - error(FATAL, "cannot malloc irqstack space."); -- if (symbol_exists("irq_desc")) -- ARRAY_LENGTH_INIT(machdep->nr_irqs, irq_desc, -- "irq_desc", NULL, 0); -- else -- machdep->nr_irqs = 224; /* NR_IRQS (at least) */ -+ if (symbol_exists("irq_desc")) { -+ if (LKCD_KERNTYPES()) -+ ARRAY_LENGTH_INIT_ALT(machdep->nr_irqs, -+ "irq_desc", "kernel_stat.irqs", NULL, 0); -+ else -+ ARRAY_LENGTH_INIT(machdep->nr_irqs, irq_desc, -+ "irq_desc", NULL, 0); -+ } else -+ machdep->nr_irqs = 224; /* NR_IRQS (at least) */ - machdep->vmalloc_start = x86_64_vmalloc_start; - machdep->dump_irq = x86_64_dump_irq; -- machdep->hz = HZ; -- if (THIS_KERNEL_VERSION >= LINUX(2,6,0)) -- machdep->hz = 1000; -+ if (!machdep->hz) { -+ machdep->hz = HZ; -+ if (THIS_KERNEL_VERSION >= LINUX(2,6,0)) -+ machdep->hz = 1000; -+ } -+ machdep->section_size_bits = _SECTION_SIZE_BITS; -+ machdep->max_physmem_bits = _MAX_PHYSMEM_BITS; -+ if (XEN()) { -+ if (kt->xen_flags & WRITABLE_PAGE_TABLES) { -+ switch (machdep->flags & VM_FLAGS) -+ { -+ case VM_XEN: -+ machdep->uvtop = x86_64_uvtop_level4_xen_wpt; -+ break; -+ case VM_XEN_RHEL4: -+ machdep->uvtop = x86_64_uvtop_level4_rhel4_xen_wpt; -+ break; -+ } -+ } else -+ machdep->uvtop = x86_64_uvtop_level4; -+ MEMBER_OFFSET_INIT(vcpu_guest_context_user_regs, -+ "vcpu_guest_context", "user_regs"); -+ ASSIGN_OFFSET(cpu_user_regs_rsp) = -+ MEMBER_OFFSET("cpu_user_regs", "ss") - sizeof(ulong); -+ ASSIGN_OFFSET(cpu_user_regs_rip) = -+ MEMBER_OFFSET("cpu_user_regs", "cs") - sizeof(ulong); -+ } -+ x86_64_irq_eframe_link_init(); -+ break; -+ -+ case POST_VM: -+ init_unwind_table(); - break; - - case POST_INIT: -@@ -191,10 +332,24 @@ - fprintf(fp, "%sKSYMS_START", others++ ? "|" : ""); - if (machdep->flags & PT_REGS_INIT) - fprintf(fp, "%sPT_REGS_INIT", others++ ? "|" : ""); -- if (machdep->flags & SYSRQ) -- fprintf(fp, "%sSYSRQ", others++ ? "|" : ""); - if (machdep->flags & MACHDEP_BT_TEXT) - fprintf(fp, "%sMACHDEP_BT_TEXT", others++ ? "|" : ""); -+ if (machdep->flags & VM_ORIG) -+ fprintf(fp, "%sVM_ORIG", others++ ? "|" : ""); -+ if (machdep->flags & VM_2_6_11) -+ fprintf(fp, "%sVM_2_6_11", others++ ? "|" : ""); -+ if (machdep->flags & VM_XEN) -+ fprintf(fp, "%sVM_XEN", others++ ? "|" : ""); -+ if (machdep->flags & VM_XEN_RHEL4) -+ fprintf(fp, "%sVM_XEN_RHEL4", others++ ? "|" : ""); -+ if (machdep->flags & NO_TSS) -+ fprintf(fp, "%sNO_TSS", others++ ? "|" : ""); -+ if (machdep->flags & SCHED_TEXT) -+ fprintf(fp, "%sSCHED_TEXT", others++ ? "|" : ""); -+ if (machdep->flags & PHYS_BASE) -+ fprintf(fp, "%sPHYS_BASE", others++ ? "|" : ""); -+ if (machdep->flags & FRAMESIZE_DEBUG) -+ fprintf(fp, "%sFRAMESIZE_DEBUG", others++ ? "|" : ""); - fprintf(fp, ")\n"); - - fprintf(fp, " kvbase: %lx\n", machdep->kvbase); -@@ -215,13 +370,32 @@ - fprintf(fp, " back_trace: x86_64_back_trace_cmd()\n"); - else if (machdep->back_trace == x86_64_low_budget_back_trace_cmd) - fprintf(fp, -- " back_trace: x86_64_low_budget_back_trace_cmd()\n"); -+ " back_trace: x86_64_low_budget_back_trace_cmd() %s\n", -+ kt->flags & DWARF_UNWIND ? -+ "-> x86_64_dwarf_back_trace_cmd()" : ""); -+ else if (machdep->back_trace == x86_64_dwarf_back_trace_cmd) -+ fprintf(fp, -+ " back_trace: x86_64_dwarf_back_trace_cmd() %s\n", -+ kt->flags & DWARF_UNWIND ? -+ "" : "->x86_64_low_budget_back_trace_cmd()"); - else - fprintf(fp, " back_trace: %lx\n", - (ulong)machdep->back_trace); - fprintf(fp, " processor_speed: x86_64_processor_speed()\n"); -- fprintf(fp, " uvtop: x86_64_uvtop()\n"); -- fprintf(fp, " kvtop: x86_64_kvtop()\n"); -+ if (machdep->uvtop == x86_64_uvtop) -+ fprintf(fp, " uvtop: x86_64_uvtop()\n"); -+ else if (machdep->uvtop == x86_64_uvtop_level4) -+ fprintf(fp, " uvtop: x86_64_uvtop_level4()\n"); -+ else if (machdep->uvtop == x86_64_uvtop_level4_xen_wpt) -+ fprintf(fp, " uvtop: x86_64_uvtop_level4_xen_wpt()\n"); -+ else if (machdep->uvtop == x86_64_uvtop_level4_rhel4_xen_wpt) -+ fprintf(fp, " uvtop: x86_64_uvtop_level4_rhel4_xen_wpt()\n"); -+ else -+ fprintf(fp, " uvtop: %lx\n", (ulong)machdep->uvtop); -+ fprintf(fp, " kvtop: x86_64_kvtop()"); -+ if (XEN() && (kt->xen_flags & WRITABLE_PAGE_TABLES)) -+ fprintf(fp, " -> x86_64_kvtop_xen_wpt()"); -+ fprintf(fp, "\n"); - fprintf(fp, " get_task_pgd: x86_64_get_task_pgd()\n"); - fprintf(fp, " dump_irq: x86_64_dump_irq()\n"); - fprintf(fp, " get_stack_frame: x86_64_get_stack_frame()\n"); -@@ -239,6 +413,11 @@ - fprintf(fp, " is_uvaddr: x86_64_is_uvaddr()\n"); - fprintf(fp, " verify_paddr: generic_verify_paddr()\n"); - fprintf(fp, " init_kernel_pgd: x86_64_init_kernel_pgd()\n"); -+ fprintf(fp, "clear_machdep_cache: x86_64_clear_machdep_cache()\n"); -+ fprintf(fp, " xendump_p2m_create: x86_64_xendump_p2m_create()\n"); -+ fprintf(fp, " get_xendump_regs: x86_64_get_xendump_regs()\n"); -+ fprintf(fp, " xendump_panic_task: x86_64_xendump_panic_task()\n"); -+ fprintf(fp, "xen_kdump_p2m_create: x86_64_xen_kdump_p2m_create()\n"); - fprintf(fp, " line_number_hooks: x86_64_line_number_hooks\n"); - fprintf(fp, " value_to_symbol: generic_machdep_value_to_symbol()\n"); - fprintf(fp, " last_pgd_read: %lx\n", machdep->last_pgd_read); -@@ -248,9 +427,29 @@ - fprintf(fp, " pmd: %lx\n", (ulong)machdep->pmd); - fprintf(fp, " ptbl: %lx\n", (ulong)machdep->ptbl); - fprintf(fp, " ptrs_per_pgd: %d\n", machdep->ptrs_per_pgd); -- fprintf(fp, " machspec: %lx\n", (ulong)machdep->machspec); -+ fprintf(fp, " section_size_bits: %ld\n", machdep->section_size_bits); -+ fprintf(fp, " max_physmem_bits: %ld\n", machdep->max_physmem_bits); -+ fprintf(fp, " sections_per_root: %ld\n", machdep->sections_per_root); -+ -+ fprintf(fp, " machspec: %016lx\n", (ulong)machdep->machspec); -+ fprintf(fp, " userspace_top: %016lx\n", (ulong)ms->userspace_top); -+ fprintf(fp, " page_offset: %016lx\n", (ulong)ms->page_offset); -+ fprintf(fp, " vmalloc_start_addr: %016lx\n", (ulong)ms->vmalloc_start_addr); -+ fprintf(fp, " vmalloc_end: %016lx\n", (ulong)ms->vmalloc_end); -+ fprintf(fp, " modules_vaddr: %016lx\n", (ulong)ms->modules_vaddr); -+ fprintf(fp, " modules_end: %016lx\n", (ulong)ms->modules_end); -+ fprintf(fp, " phys_base: %lx\n", (ulong)ms->phys_base); - fprintf(fp, " pml4: %lx\n", (ulong)ms->pml4); -+ fprintf(fp, " last_pml4_read: %lx\n", (ulong)ms->last_pml4_read); -+ if (ms->upml) { -+ fprintf(fp, " upml: %lx\n", (ulong)ms->upml); -+ fprintf(fp, " last_upml_read: %lx\n", (ulong)ms->last_upml_read); -+ } else { -+ fprintf(fp, " upml: (unused)\n"); -+ fprintf(fp, " last_upml_read: (unused)\n"); -+ } - fprintf(fp, " irqstack: %lx\n", (ulong)ms->irqstack); -+ fprintf(fp, " irq_eframe_link: %ld\n", ms->irq_eframe_link); - fprintf(fp, " pto: %s", - machdep->flags & PT_REGS_INIT ? "\n" : "(uninitialized)\n"); - if (machdep->flags & PT_REGS_INIT) { -@@ -276,8 +475,10 @@ - fprintf(fp, " rsp: %ld\n", ms->pto.rsp); - fprintf(fp, " ss: %ld\n", ms->pto.ss); - } -- fprintf(fp, " stkinfo: esize: %d isize: %d\n", -- ms->stkinfo.esize, ms->stkinfo.isize); -+ fprintf(fp, " stkinfo: esize: %d%sisize: %d\n", -+ ms->stkinfo.esize, -+ machdep->flags & NO_TSS ? " (NO TSS) " : " ", -+ ms->stkinfo.isize); - fprintf(fp, " ebase[%s][7]:", - arg ? "NR_CPUS" : "cpus"); - cpus = arg ? NR_CPUS : kt->cpus; -@@ -306,9 +507,9 @@ - static void - x86_64_cpu_pda_init(void) - { -- int i, cpus, nr_pda, cpunumber; -+ int i, cpus, nr_pda, cpunumber, _cpu_pda; - char *cpu_pda_buf; -- ulong level4_pgt, data_offset; -+ ulong level4_pgt, data_offset, cpu_pda_addr; - struct syment *sp, *nsp; - ulong offset, istacksize; - -@@ -320,18 +521,44 @@ - MEMBER_OFFSET_INIT(x8664_pda_irqstackptr, "x8664_pda", "irqstackptr"); - MEMBER_OFFSET_INIT(x8664_pda_level4_pgt, "x8664_pda", "level4_pgt"); - MEMBER_OFFSET_INIT(x8664_pda_cpunumber, "x8664_pda", "cpunumber"); -+ MEMBER_OFFSET_INIT(x8664_pda_me, "x8664_pda", "me"); - - cpu_pda_buf = GETBUF(SIZE(x8664_pda)); - -- if (!(nr_pda = get_array_length("cpu_pda", NULL, 0))) -- nr_pda = NR_CPUS; -+ if (LKCD_KERNTYPES()) { -+ if (symbol_exists("_cpu_pda")) -+ _cpu_pda = TRUE; -+ else -+ _cpu_pda = FALSE; -+ nr_pda = get_cpus_possible(); -+ } else { -+ if (symbol_exists("_cpu_pda")) { -+ if (!(nr_pda = get_array_length("_cpu_pda", NULL, 0))) -+ nr_pda = NR_CPUS; -+ _cpu_pda = TRUE; -+ } else { -+ if (!(nr_pda = get_array_length("cpu_pda", NULL, 0))) -+ nr_pda = NR_CPUS; -+ _cpu_pda = FALSE; -+ } -+ } - - for (i = cpus = 0; i < nr_pda; i++) { -- if (!CPU_PDA_READ(i, cpu_pda_buf)) -- break; -- level4_pgt = ULONG(cpu_pda_buf + OFFSET(x8664_pda_level4_pgt)); -+ if (_cpu_pda) { -+ if (!_CPU_PDA_READ(i, cpu_pda_buf)) -+ break; -+ } else { -+ if (!CPU_PDA_READ(i, cpu_pda_buf)) -+ break; -+ } -+ -+ if (VALID_MEMBER(x8664_pda_level4_pgt)) { -+ level4_pgt = ULONG(cpu_pda_buf + OFFSET(x8664_pda_level4_pgt)); -+ if (!VALID_LEVEL4_PGT_ADDR(level4_pgt)) -+ break; -+ } - cpunumber = INT(cpu_pda_buf + OFFSET(x8664_pda_cpunumber)); -- if (!VALID_LEVEL4_PGT_ADDR(level4_pgt) || (cpunumber != cpus)) -+ if (cpunumber != cpus) - break; - cpus++; - -@@ -351,8 +578,8 @@ - i, level4_pgt, data_offset); - } - -- -- if ((i = get_array_length("boot_cpu_stack", NULL, 0))) { -+ if (!LKCD_KERNTYPES() && -+ (i = get_array_length("boot_cpu_stack", NULL, 0))) { - istacksize = i; - } else if ((sp = symbol_search("boot_cpu_stack")) && - (nsp = next_symbol(NULL, sp))) { -@@ -381,8 +608,9 @@ - * the address of &boot_cpu_stack[0]. - */ - sp = value_search(machdep->machspec->stkinfo.ibase[0], &offset); -- if (!sp || offset || !STREQ(sp->name, "boot_cpu_stack")) { -- if (symbol_value("boot_cpu_stack")) { -+ nsp = symbol_search("boot_cpu_stack"); -+ if (!sp || offset || !nsp || (sp->value != nsp->value)) { -+ if (symbol_exists("boot_cpu_stack")) { - error(WARNING, - "cpu 0 IRQ stack: %lx\n boot_cpu_stack: %lx\n\n", - machdep->machspec->stkinfo.ibase[0], -@@ -448,6 +676,13 @@ - if (ms->stkinfo.ebase[c][0] == 0) - break; - } -+ } else if (!symbol_exists("boot_exception_stacks")) { -+ machdep->flags |= NO_TSS; -+ -+ if (CRASHDEBUG(1)) -+ error(NOTE, "CONFIG_X86_NO_TSS\n"); -+ -+ return; - } - - if (ms->stkinfo.ebase[0][0] && ms->stkinfo.ebase[0][1]) -@@ -535,6 +770,10 @@ - if (clues >= 2) - kt->cpu_flags[c] |= NMI; - } -+ -+ if (symbol_exists("__sched_text_start") && -+ (symbol_value("__sched_text_start") == symbol_value("schedule"))) -+ machdep->flags |= SCHED_TEXT; - } - - /* -@@ -576,7 +815,7 @@ - ulong x86_64_VTOP(ulong vaddr) - { - if (vaddr >= __START_KERNEL_map) -- return ((vaddr) - (ulong)__START_KERNEL_map); -+ return ((vaddr) - (ulong)__START_KERNEL_map + machdep->machspec->phys_base); - else - return ((vaddr) - PAGE_OFFSET); - } -@@ -584,12 +823,19 @@ - /* - * Include both vmalloc'd and module address space as VMALLOC space. - */ --int x86_64_IS_VMALLOC_ADDR(ulong vaddr) -+int -+x86_64_IS_VMALLOC_ADDR(ulong vaddr) - { - return ((vaddr >= VMALLOC_START && vaddr <= VMALLOC_END) || - (vaddr >= MODULES_VADDR && vaddr <= MODULES_END)); - } - -+static int -+x86_64_is_module_addr(ulong vaddr) -+{ -+ return (vaddr >= MODULES_VADDR && vaddr <= MODULES_END); -+} -+ - /* - * Refining this may cause more problems than just doing it this way. - */ -@@ -616,43 +862,52 @@ - */ - - static int --x86_64_uvtop(struct task_context *tc, ulong uvaddr, physaddr_t *paddr, int verbose) -+x86_64_uvtop_level4(struct task_context *tc, ulong uvaddr, physaddr_t *paddr, int verbose) - { -- ulong mm; -- ulong *pgd; -+ ulong mm; -+ ulong *pml; -+ ulong pml_paddr; -+ ulong pml_pte; -+ ulong *pgd; - ulong pgd_paddr; - ulong pgd_pte; - ulong *pmd; - ulong pmd_paddr; - ulong pmd_pte; -- ulong *ptep; -- ulong pte_paddr; -- ulong pte; -- physaddr_t physpage; -+ ulong *ptep; -+ ulong pte_paddr; -+ ulong pte; -+ physaddr_t physpage; - -- if (!tc) -- error(FATAL, "current context invalid\n"); -+ if (!tc) -+ error(FATAL, "current context invalid\n"); - -- *paddr = 0; -+ *paddr = 0; - -- if (IS_KVADDR(uvaddr)) -- return x86_64_kvtop(tc, uvaddr, paddr, verbose); -+ if (IS_KVADDR(uvaddr)) -+ return x86_64_kvtop(tc, uvaddr, paddr, verbose); - -- /* -- * pgd = pgd_offset(mm, address); -- */ -- if ((mm = task_mm(tc->task, TRUE))) -- pgd = ULONG_PTR(tt->mm_struct + OFFSET(mm_struct_pgd)); -- else -- readmem(tc->mm_struct + OFFSET(mm_struct_pgd), KVADDR, &pgd, -- sizeof(long), "mm_struct pgd", FAULT_ON_ERROR); -+ if ((mm = task_mm(tc->task, TRUE))) -+ pml = ULONG_PTR(tt->mm_struct + OFFSET(mm_struct_pgd)); -+ else -+ readmem(tc->mm_struct + OFFSET(mm_struct_pgd), KVADDR, &pml, -+ sizeof(long), "mm_struct pgd", FAULT_ON_ERROR); - -- pgd_paddr = x86_64_VTOP((ulong)pgd); -- FILL_PGD(pgd_paddr, PHYSADDR, PAGESIZE()); -+ pml_paddr = x86_64_VTOP((ulong)pml); -+ FILL_UPML(pml_paddr, PHYSADDR, PAGESIZE()); -+ pml = ((ulong *)pml_paddr) + pml4_index(uvaddr); -+ pml_pte = ULONG(machdep->machspec->upml + PAGEOFFSET(pml)); -+ if (verbose) -+ fprintf(fp, " PML: %lx => %lx\n", (ulong)pml, pml_pte); -+ if (!(pml_pte & _PAGE_PRESENT)) -+ goto no_upage; -+ -+ pgd_paddr = pml_pte & PHYSICAL_PAGE_MASK; -+ FILL_PGD(pgd_paddr, PHYSADDR, PAGESIZE()); - pgd = ((ulong *)pgd_paddr) + pgd_index(uvaddr); - pgd_pte = ULONG(machdep->pgd + PAGEOFFSET(pgd)); -- if (verbose) -- fprintf(fp, " PGD: %lx => %lx\n", (ulong)pgd, pgd_pte); -+ if (verbose) -+ fprintf(fp, " PUD: %lx => %lx\n", (ulong)pgd, pgd_pte); - if (!(pgd_pte & _PAGE_PRESENT)) - goto no_upage; - -@@ -682,29 +937,31 @@ - - /* - * ptep = pte_offset_map(pmd, address); -- * pte = *ptep; -+ * pte = *ptep; - */ -- pte_paddr = pmd_pte & PHYSICAL_PAGE_MASK; -- FILL_PTBL(pte_paddr, PHYSADDR, PAGESIZE()); -- ptep = ((ulong *)pte_paddr) + pte_index(uvaddr); -- pte = ULONG(machdep->ptbl + PAGEOFFSET(ptep)); -- if (verbose) -- fprintf(fp, " PTE: %lx => %lx\n", (ulong)ptep, pte); -- if (!(pte & (_PAGE_PRESENT))) { -- if (pte && verbose) { -- fprintf(fp, "\n"); -- x86_64_translate_pte(pte, 0, 0); -- } -- goto no_upage; -- } -+ pte_paddr = pmd_pte & PHYSICAL_PAGE_MASK; -+ FILL_PTBL(pte_paddr, PHYSADDR, PAGESIZE()); -+ ptep = ((ulong *)pte_paddr) + pte_index(uvaddr); -+ pte = ULONG(machdep->ptbl + PAGEOFFSET(ptep)); -+ if (verbose) -+ fprintf(fp, " PTE: %lx => %lx\n", (ulong)ptep, pte); -+ if (!(pte & (_PAGE_PRESENT))) { -+ *paddr = pte; -+ -+ if (pte && verbose) { -+ fprintf(fp, "\n"); -+ x86_64_translate_pte(pte, 0, 0); -+ } -+ goto no_upage; -+ } - -- *paddr = (PAGEBASE(pte) & PHYSICAL_PAGE_MASK) + PAGEOFFSET(uvaddr); -+ *paddr = (PAGEBASE(pte) & PHYSICAL_PAGE_MASK) + PAGEOFFSET(uvaddr); - -- if (verbose) { -- fprintf(fp, " PAGE: %lx\n\n", -+ if (verbose) { -+ fprintf(fp, " PAGE: %lx\n\n", - PAGEBASE(*paddr) & PHYSICAL_PAGE_MASK); -- x86_64_translate_pte(pte, 0, 0); -- } -+ x86_64_translate_pte(pte, 0, 0); -+ } - - return TRUE; - -@@ -713,1982 +970,4769 @@ - return FALSE; - } - -- --/* -- * Translates a kernel virtual address to its physical address. cmd_vtop() -- * sets the verbose flag so that the pte translation gets displayed; all -- * other callers quietly accept the translation. -- */ - static int --x86_64_kvtop(struct task_context *tc, ulong kvaddr, physaddr_t *paddr, int verbose) -+x86_64_uvtop_level4_xen_wpt(struct task_context *tc, ulong uvaddr, physaddr_t *paddr, int verbose) - { -- ulong *pml4; -- ulong *pgd; -+ ulong mm; -+ ulong *pml; -+ ulong pml_paddr; -+ ulong pml_pte; -+ ulong *pgd; - ulong pgd_paddr; - ulong pgd_pte; - ulong *pmd; - ulong pmd_paddr; - ulong pmd_pte; -+ ulong pseudo_pmd_pte; - ulong *ptep; - ulong pte_paddr; - ulong pte; -+ ulong pseudo_pte; - physaddr_t physpage; -+ char buf[BUFSIZE]; - -- if (!IS_KVADDR(kvaddr)) -- return FALSE; -+ if (!tc) -+ error(FATAL, "current context invalid\n"); - -- if (!vt->vmalloc_start) { -- *paddr = x86_64_VTOP(kvaddr); -- return TRUE; -- } -+ *paddr = 0; - -- if (!IS_VMALLOC_ADDR(kvaddr)) { -- *paddr = x86_64_VTOP(kvaddr); -- if (!verbose) -- return TRUE; -- } -- -- /* -- * pgd = pgd_offset_k(addr); -- */ -- FILL_PML4(); -- pml4 = ((ulong *)machdep->machspec->pml4) + pml4_index(kvaddr); -- if (verbose) { -- fprintf(fp, "PML4 DIRECTORY: %lx\n", vt->kernel_pgd[0]); -- fprintf(fp, "PAGE DIRECTORY: %lx\n", *pml4); -- } -- if (!(*pml4) & _PAGE_PRESENT) -- goto no_kpage; -- pgd_paddr = (*pml4) & PHYSICAL_PAGE_MASK; -+ if (IS_KVADDR(uvaddr)) -+ return x86_64_kvtop(tc, uvaddr, paddr, verbose); -+ -+ if ((mm = task_mm(tc->task, TRUE))) -+ pml = ULONG_PTR(tt->mm_struct + OFFSET(mm_struct_pgd)); -+ else -+ readmem(tc->mm_struct + OFFSET(mm_struct_pgd), KVADDR, &pml, -+ sizeof(long), "mm_struct pgd", FAULT_ON_ERROR); -+ -+ pml_paddr = x86_64_VTOP((ulong)pml); -+ FILL_UPML(pml_paddr, PHYSADDR, PAGESIZE()); -+ pml = ((ulong *)pml_paddr) + pml4_index(uvaddr); -+ pml_pte = ULONG(machdep->machspec->upml + PAGEOFFSET(pml)); -+ if (verbose) -+ fprintf(fp, " PML: %lx => %lx [machine]\n", (ulong)pml, pml_pte); -+ if (!(pml_pte & _PAGE_PRESENT)) -+ goto no_upage; -+ -+ pgd_paddr = pml_pte & PHYSICAL_PAGE_MASK; -+ pgd_paddr = xen_m2p(pgd_paddr); -+ if (verbose) -+ fprintf(fp, " PML: %lx\n", pgd_paddr); - FILL_PGD(pgd_paddr, PHYSADDR, PAGESIZE()); -- pgd = ((ulong *)pgd_paddr) + pgd_index(kvaddr); -+ pgd = ((ulong *)pgd_paddr) + pgd_index(uvaddr); - pgd_pte = ULONG(machdep->pgd + PAGEOFFSET(pgd)); -- if (verbose) -- fprintf(fp, " PGD: %lx => %lx\n", (ulong)pgd, pgd_pte); -+ if (verbose) -+ fprintf(fp, " PUD: %lx => %lx [machine]\n", (ulong)pgd, pgd_pte); - if (!(pgd_pte & _PAGE_PRESENT)) -- goto no_kpage; -+ goto no_upage; - - /* -- * pmd = pmd_offset(pgd, addr); -+ * pmd = pmd_offset(pgd, address); - */ - pmd_paddr = pgd_pte & PHYSICAL_PAGE_MASK; -+ pmd_paddr = xen_m2p(pmd_paddr); -+ if (verbose) -+ fprintf(fp, " PUD: %lx\n", pmd_paddr); - FILL_PMD(pmd_paddr, PHYSADDR, PAGESIZE()); -- pmd = ((ulong *)pmd_paddr) + pmd_index(kvaddr); -+ pmd = ((ulong *)pmd_paddr) + pmd_index(uvaddr); - pmd_pte = ULONG(machdep->pmd + PAGEOFFSET(pmd)); - if (verbose) -- fprintf(fp, " PMD: %lx => %lx\n", (ulong)pmd, pmd_pte); -+ fprintf(fp, " PMD: %lx => %lx [machine]\n", (ulong)pmd, pmd_pte); - if (!(pmd_pte & _PAGE_PRESENT)) -- goto no_kpage; -- if (pmd_pte & _PAGE_PSE) { -- if (verbose) { -- fprintf(fp, " PAGE: %lx (2MB)\n\n", -+ goto no_upage; -+ if (pmd_pte & _PAGE_PSE) { -+ if (verbose) -+ fprintf(fp, " PAGE: %lx (2MB) [machine]\n", - PAGEBASE(pmd_pte) & PHYSICAL_PAGE_MASK); -- x86_64_translate_pte(pmd_pte, 0, 0); -+ -+ pseudo_pmd_pte = xen_m2p(PAGEBASE(pmd_pte)); -+ -+ if (pseudo_pmd_pte == XEN_MACHADDR_NOT_FOUND) { -+ if (verbose) -+ fprintf(fp, " PAGE: page not available\n"); -+ *paddr = PADDR_NOT_AVAILABLE; -+ return FALSE; - } - -- physpage = (PAGEBASE(pmd_pte) & PHYSICAL_PAGE_MASK) + -- (kvaddr & ~_2MB_PAGE_MASK); -+ pseudo_pmd_pte |= PAGEOFFSET(pmd_pte); -+ -+ if (verbose) { -+ fprintf(fp, " PAGE: %s (2MB)\n\n", -+ mkstring(buf, VADDR_PRLEN, RJUST|LONG_HEX, -+ MKSTR(PAGEBASE(pseudo_pmd_pte) & -+ PHYSICAL_PAGE_MASK))); -+ -+ x86_64_translate_pte(pseudo_pmd_pte, 0, 0); -+ } -+ -+ physpage = (PAGEBASE(pseudo_pmd_pte) & PHYSICAL_PAGE_MASK) + -+ (uvaddr & ~_2MB_PAGE_MASK); -+ - *paddr = physpage; - return TRUE; -- } -+ } - -- /* -- * ptep = pte_offset_map(pmd, addr); -+ /* -+ * ptep = pte_offset_map(pmd, address); - * pte = *ptep; - */ - pte_paddr = pmd_pte & PHYSICAL_PAGE_MASK; -+ pte_paddr = xen_m2p(pte_paddr); -+ if (verbose) -+ fprintf(fp, " PMD: %lx\n", pte_paddr); - FILL_PTBL(pte_paddr, PHYSADDR, PAGESIZE()); -- ptep = ((ulong *)pte_paddr) + pte_index(kvaddr); -+ ptep = ((ulong *)pte_paddr) + pte_index(uvaddr); - pte = ULONG(machdep->ptbl + PAGEOFFSET(ptep)); -- if (verbose) -- fprintf(fp, " PTE: %lx => %lx\n", (ulong)ptep, pte); -- if (!(pte & (_PAGE_PRESENT))) { -- if (pte && verbose) { -- fprintf(fp, "\n"); -- x86_64_translate_pte(pte, 0, 0); -- } -- goto no_kpage; -- } -- -- *paddr = (PAGEBASE(pte) & PHYSICAL_PAGE_MASK) + PAGEOFFSET(kvaddr); -- -- if (verbose) { -- fprintf(fp, " PAGE: %lx\n\n", -+ if (verbose) -+ fprintf(fp, " PTE: %lx => %lx [machine]\n", (ulong)ptep, pte); -+ if (!(pte & (_PAGE_PRESENT))) { -+ *paddr = pte; -+ -+ if (pte && verbose) { -+ fprintf(fp, "\n"); -+ x86_64_translate_pte(pte, 0, 0); -+ } -+ goto no_upage; -+ } -+ -+ pseudo_pte = xen_m2p(pte & PHYSICAL_PAGE_MASK); -+ if (verbose) -+ fprintf(fp, " PTE: %lx\n", pseudo_pte + PAGEOFFSET(pte)); -+ -+ *paddr = (PAGEBASE(pseudo_pte) & PHYSICAL_PAGE_MASK) + PAGEOFFSET(uvaddr); -+ -+ if (verbose) { -+ fprintf(fp, " PAGE: %lx [machine]\n", -+ PAGEBASE(pte) & PHYSICAL_PAGE_MASK); -+ fprintf(fp, " PAGE: %lx\n\n", - PAGEBASE(*paddr) & PHYSICAL_PAGE_MASK); -- x86_64_translate_pte(pte, 0, 0); -- } -+ x86_64_translate_pte(pseudo_pte + PAGEOFFSET(pte), 0, 0); -+ } - -- return TRUE; -+ return TRUE; - --no_kpage: -- return FALSE; --} -+no_upage: - --/* -- * Determine where vmalloc'd memory starts. -- */ --static ulong --x86_64_vmalloc_start(void) --{ -- return ((ulong)VMALLOC_START); -+ return FALSE; - } - --/* -- * thread_info implementation makes for less accurate results here. -- */ - static int --x86_64_is_task_addr(ulong task) -+x86_64_uvtop_level4_rhel4_xen_wpt(struct task_context *tc, ulong uvaddr, physaddr_t *paddr, int verbose) - { -- if (tt->flags & THREAD_INFO) -- return IS_KVADDR(task); -- else -- return (IS_KVADDR(task) && (ALIGNED_STACK_OFFSET(task) == 0)); --} -- -+ ulong mm; -+ ulong *pgd; -+ ulong pgd_paddr; -+ ulong pgd_pte; -+ ulong *pmd; -+ ulong pmd_paddr; -+ ulong pmd_pte; -+ ulong pseudo_pmd_pte; -+ ulong *ptep; -+ ulong pte_paddr; -+ ulong pte; -+ ulong pseudo_pte; -+ physaddr_t physpage; -+ char buf[BUFSIZE]; - --/* -- * easy enough... -- */ --static ulong --x86_64_processor_speed(void) --{ -- unsigned long cpu_khz; -+ if (!tc) -+ error(FATAL, "current context invalid\n"); - -- if (machdep->mhz) -- return (machdep->mhz); -+ *paddr = 0; - -- if (symbol_exists("cpu_khz")) { -- get_symbol_data("cpu_khz", sizeof(long), &cpu_khz); -- if (cpu_khz) -- return(machdep->mhz = cpu_khz/1000); -- } -+ if (IS_KVADDR(uvaddr)) -+ return x86_64_kvtop(tc, uvaddr, paddr, verbose); - -- return 0; --} -+ if ((mm = task_mm(tc->task, TRUE))) -+ pgd = ULONG_PTR(tt->mm_struct + OFFSET(mm_struct_pgd)); -+ else -+ readmem(tc->mm_struct + OFFSET(mm_struct_pgd), KVADDR, &pgd, -+ sizeof(long), "mm_struct pgd", FAULT_ON_ERROR); - -+ pgd_paddr = x86_64_VTOP((ulong)pgd); -+ FILL_PGD(pgd_paddr, PHYSADDR, PAGESIZE()); -+ pgd = ((ulong *)pgd_paddr) + pgd_index(uvaddr); -+ pgd_pte = ULONG(machdep->pgd + PAGEOFFSET(pgd)); -+ if (verbose) -+ fprintf(fp, " PGD: %lx => %lx [machine]\n", (ulong)pgd, pgd_pte); -+ if (!(pgd_pte & _PAGE_PRESENT)) -+ goto no_upage; - --/* -- * Accept or reject a symbol from the kernel namelist. -- */ --static int --x86_64_verify_symbol(const char *name, ulong value, char type) --{ -- if (STREQ(name, "_text") || STREQ(name, "_stext")) -- machdep->flags |= KSYMS_START; -+ /* -+ * pmd = pmd_offset(pgd, address); -+ */ -+ pmd_paddr = pgd_pte & PHYSICAL_PAGE_MASK; -+ pmd_paddr = xen_m2p(pmd_paddr); -+ if (verbose) -+ fprintf(fp, " PGD: %lx\n", pmd_paddr); -+ FILL_PMD(pmd_paddr, PHYSADDR, PAGESIZE()); -+ pmd = ((ulong *)pmd_paddr) + pmd_index(uvaddr); -+ pmd_pte = ULONG(machdep->pmd + PAGEOFFSET(pmd)); -+ if (verbose) -+ fprintf(fp, " PMD: %lx => %lx [machine]\n", (ulong)pmd, pmd_pte); -+ if (!(pmd_pte & _PAGE_PRESENT)) -+ goto no_upage; -+ if (pmd_pte & _PAGE_PSE) { -+ if (verbose) -+ fprintf(fp, " PAGE: %lx (2MB) [machine]\n", -+ PAGEBASE(pmd_pte) & PHYSICAL_PAGE_MASK); - -- if (!name || !strlen(name) || !(machdep->flags & KSYMS_START)) -- return FALSE; -+ pseudo_pmd_pte = xen_m2p(PAGEBASE(pmd_pte)); -+ -+ if (pseudo_pmd_pte == XEN_MACHADDR_NOT_FOUND) { -+ if (verbose) -+ fprintf(fp, " PAGE: page not available\n"); -+ *paddr = PADDR_NOT_AVAILABLE; -+ return FALSE; -+ } -+ -+ pseudo_pmd_pte |= PAGEOFFSET(pmd_pte); -+ -+ if (verbose) { -+ fprintf(fp, " PAGE: %s (2MB)\n\n", -+ mkstring(buf, VADDR_PRLEN, RJUST|LONG_HEX, -+ MKSTR(PAGEBASE(pseudo_pmd_pte) & -+ PHYSICAL_PAGE_MASK))); -+ -+ x86_64_translate_pte(pseudo_pmd_pte, 0, 0); -+ } -+ -+ physpage = (PAGEBASE(pseudo_pmd_pte) & PHYSICAL_PAGE_MASK) + -+ (uvaddr & ~_2MB_PAGE_MASK); -+ -+ *paddr = physpage; -+ return TRUE; -+ } -+ -+ /* -+ * ptep = pte_offset_map(pmd, address); -+ * pte = *ptep; -+ */ -+ pte_paddr = pmd_pte & PHYSICAL_PAGE_MASK; -+ pte_paddr = xen_m2p(pte_paddr); -+ if (verbose) -+ fprintf(fp, " PMD: %lx\n", pte_paddr); -+ FILL_PTBL(pte_paddr, PHYSADDR, PAGESIZE()); -+ ptep = ((ulong *)pte_paddr) + pte_index(uvaddr); -+ pte = ULONG(machdep->ptbl + PAGEOFFSET(ptep)); -+ if (verbose) -+ fprintf(fp, " PTE: %lx => %lx [machine]\n", (ulong)ptep, pte); -+ if (!(pte & (_PAGE_PRESENT))) { -+ *paddr = pte; -+ -+ if (pte && verbose) { -+ fprintf(fp, "\n"); -+ x86_64_translate_pte(pte, 0, 0); -+ } -+ goto no_upage; -+ } -+ -+ pseudo_pte = xen_m2p(pte & PHYSICAL_PAGE_MASK); -+ if (verbose) -+ fprintf(fp, " PTE: %lx\n", pseudo_pte + PAGEOFFSET(pte)); -+ -+ *paddr = (PAGEBASE(pseudo_pte) & PHYSICAL_PAGE_MASK) + PAGEOFFSET(uvaddr); -+ -+ if (verbose) { -+ fprintf(fp, " PAGE: %lx [machine]\n", -+ PAGEBASE(pte) & PHYSICAL_PAGE_MASK); -+ fprintf(fp, " PAGE: %lx\n\n", -+ PAGEBASE(*paddr) & PHYSICAL_PAGE_MASK); -+ x86_64_translate_pte(pseudo_pte + PAGEOFFSET(pte), 0, 0); -+ } - - return TRUE; --} - -+no_upage: - --/* -- * Get the relevant page directory pointer from a task structure. -- */ --static ulong --x86_64_get_task_pgd(ulong task) --{ -- return (error(FATAL, "x86_64_get_task_pgd: N/A\n")); -+ return FALSE; - } - -- --/* -- * Translate a PTE, returning TRUE if the page is present. -- * If a physaddr pointer is passed in, don't print anything. -- */ - static int --x86_64_translate_pte(ulong pte, void *physaddr, ulonglong unused) -+x86_64_uvtop(struct task_context *tc, ulong uvaddr, physaddr_t *paddr, int verbose) - { -- int c, others, len1, len2, len3; -- ulong paddr; -- char buf[BUFSIZE]; -- char buf2[BUFSIZE]; -- char buf3[BUFSIZE]; -- char ptebuf[BUFSIZE]; -- char physbuf[BUFSIZE]; -- char *arglist[MAXARGS]; -- int page_present; -+ ulong mm; -+ ulong *pgd; -+ ulong pgd_paddr; -+ ulong pgd_pte; -+ ulong *pmd; -+ ulong pmd_paddr; -+ ulong pmd_pte; -+ ulong *ptep; -+ ulong pte_paddr; -+ ulong pte; -+ physaddr_t physpage; - -- paddr = pte & PHYSICAL_PAGE_MASK; -- page_present = pte & _PAGE_PRESENT; -+ if (!tc) -+ error(FATAL, "current context invalid\n"); - -- if (physaddr) { -- *((ulong *)physaddr) = paddr; -- return page_present; -- } -- -- sprintf(ptebuf, "%lx", pte); -- len1 = MAX(strlen(ptebuf), strlen("PTE")); -- fprintf(fp, "%s ", mkstring(buf, len1, CENTER|LJUST, "PTE")); -+ *paddr = 0; - -- if (!page_present && pte) { -- swap_location(pte, buf); -- if ((c = parse_line(buf, arglist)) != 3) -- error(FATAL, "cannot determine swap location\n"); -+ if (IS_KVADDR(uvaddr)) -+ return x86_64_kvtop(tc, uvaddr, paddr, verbose); - -- len2 = MAX(strlen(arglist[0]), strlen("SWAP")); -- len3 = MAX(strlen(arglist[2]), strlen("OFFSET")); -+ /* -+ * pgd = pgd_offset(mm, address); -+ */ -+ if ((mm = task_mm(tc->task, TRUE))) -+ pgd = ULONG_PTR(tt->mm_struct + OFFSET(mm_struct_pgd)); -+ else -+ readmem(tc->mm_struct + OFFSET(mm_struct_pgd), KVADDR, &pgd, -+ sizeof(long), "mm_struct pgd", FAULT_ON_ERROR); - -- fprintf(fp, "%s %s\n", -- mkstring(buf2, len2, CENTER|LJUST, "SWAP"), -- mkstring(buf3, len3, CENTER|LJUST, "OFFSET")); -+ pgd_paddr = x86_64_VTOP((ulong)pgd); -+ FILL_PGD(pgd_paddr, PHYSADDR, PAGESIZE()); -+ pgd = ((ulong *)pgd_paddr) + pgd_index(uvaddr); -+ pgd_pte = ULONG(machdep->pgd + PAGEOFFSET(pgd)); -+ if (verbose) -+ fprintf(fp, " PGD: %lx => %lx\n", (ulong)pgd, pgd_pte); -+ if (!(pgd_pte & _PAGE_PRESENT)) -+ goto no_upage; - -- strcpy(buf2, arglist[0]); -- strcpy(buf3, arglist[2]); -- fprintf(fp, "%s %s %s\n", -- mkstring(ptebuf, len1, CENTER|RJUST, NULL), -- mkstring(buf2, len2, CENTER|RJUST, NULL), -- mkstring(buf3, len3, CENTER|RJUST, NULL)); -+ /* -+ * pmd = pmd_offset(pgd, address); -+ */ -+ pmd_paddr = pgd_pte & PHYSICAL_PAGE_MASK; -+ FILL_PMD(pmd_paddr, PHYSADDR, PAGESIZE()); -+ pmd = ((ulong *)pmd_paddr) + pmd_index(uvaddr); -+ pmd_pte = ULONG(machdep->pmd + PAGEOFFSET(pmd)); -+ if (verbose) -+ fprintf(fp, " PMD: %lx => %lx\n", (ulong)pmd, pmd_pte); -+ if (!(pmd_pte & _PAGE_PRESENT)) -+ goto no_upage; -+ if (pmd_pte & _PAGE_PSE) { -+ if (verbose) { -+ fprintf(fp, " PAGE: %lx (2MB)\n\n", -+ PAGEBASE(pmd_pte) & PHYSICAL_PAGE_MASK); -+ x86_64_translate_pte(pmd_pte, 0, 0); -+ } - -- return page_present; -+ physpage = (PAGEBASE(pmd_pte) & PHYSICAL_PAGE_MASK) + -+ (uvaddr & ~_2MB_PAGE_MASK); -+ *paddr = physpage; -+ return TRUE; - } - -- sprintf(physbuf, "%lx", paddr); -- len2 = MAX(strlen(physbuf), strlen("PHYSICAL")); -- fprintf(fp, "%s ", mkstring(buf, len2, CENTER|LJUST, "PHYSICAL")); -+ /* -+ * ptep = pte_offset_map(pmd, address); -+ * pte = *ptep; -+ */ -+ pte_paddr = pmd_pte & PHYSICAL_PAGE_MASK; -+ FILL_PTBL(pte_paddr, PHYSADDR, PAGESIZE()); -+ ptep = ((ulong *)pte_paddr) + pte_index(uvaddr); -+ pte = ULONG(machdep->ptbl + PAGEOFFSET(ptep)); -+ if (verbose) -+ fprintf(fp, " PTE: %lx => %lx\n", (ulong)ptep, pte); -+ if (!(pte & (_PAGE_PRESENT))) { -+ *paddr = pte; - -- fprintf(fp, "FLAGS\n"); -+ if (pte && verbose) { -+ fprintf(fp, "\n"); -+ x86_64_translate_pte(pte, 0, 0); -+ } -+ goto no_upage; -+ } - -- fprintf(fp, "%s %s ", -- mkstring(ptebuf, len1, CENTER|RJUST, NULL), -- mkstring(physbuf, len2, CENTER|RJUST, NULL)); -- fprintf(fp, "("); -- others = 0; -+ *paddr = (PAGEBASE(pte) & PHYSICAL_PAGE_MASK) + PAGEOFFSET(uvaddr); - -- if (pte) { -- if (pte & _PAGE_PRESENT) -- fprintf(fp, "%sPRESENT", others++ ? "|" : ""); -- if (pte & _PAGE_RW) -- fprintf(fp, "%sRW", others++ ? "|" : ""); -- if (pte & _PAGE_USER) -- fprintf(fp, "%sUSER", others++ ? "|" : ""); -- if (pte & _PAGE_PWT) -- fprintf(fp, "%sPWT", others++ ? "|" : ""); -- if (pte & _PAGE_PCD) -- fprintf(fp, "%sPCD", others++ ? "|" : ""); -- if (pte & _PAGE_ACCESSED) -- fprintf(fp, "%sACCESSED", others++ ? "|" : ""); -- if (pte & _PAGE_DIRTY) -- fprintf(fp, "%sDIRTY", others++ ? "|" : ""); -- if ((pte & _PAGE_PSE) && (pte & _PAGE_PRESENT)) -- fprintf(fp, "%sPSE", others++ ? "|" : ""); -- if ((pte & _PAGE_PROTNONE) && !(pte & _PAGE_PRESENT)) -- fprintf(fp, "%sPROTNONE", others++ ? "|" : ""); -- if (pte & _PAGE_GLOBAL) -- fprintf(fp, "%sGLOBAL", others++ ? "|" : ""); -- if (pte & _PAGE_NX) -- fprintf(fp, "%sNX", others++ ? "|" : ""); -- } else { -- fprintf(fp, "no mapping"); -+ if (verbose) { -+ fprintf(fp, " PAGE: %lx\n\n", -+ PAGEBASE(*paddr) & PHYSICAL_PAGE_MASK); -+ x86_64_translate_pte(pte, 0, 0); - } - -- fprintf(fp, ")\n"); -+ return TRUE; - -- return (page_present); -+no_upage: -+ -+ return FALSE; - } - --static char * --x86_64_exception_stacks[7] = { -- "STACKFAULT", -- "DOUBLEFAULT", -- "NMI", -- "DEBUG", -- "MCE", -- "(unknown)", -- "(unknown)" --}; - - /* -- * Look for likely exception frames in a stack. -+ * Translates a kernel virtual address to its physical address. cmd_vtop() -+ * sets the verbose flag so that the pte translation gets displayed; all -+ * other callers quietly accept the translation. - */ --static int --x86_64_eframe_search(struct bt_info *bt) -+static int -+x86_64_kvtop(struct task_context *tc, ulong kvaddr, physaddr_t *paddr, int verbose) - { -- int i, c, cnt; -- ulong estack, irqstack, stacksize; -- ulong *up; -- struct machine_specific *ms; -- struct bt_info bt_local; -+ ulong *pml4; -+ ulong *pgd; -+ ulong pgd_paddr; -+ ulong pgd_pte; -+ ulong *pmd; -+ ulong pmd_paddr; -+ ulong pmd_pte; -+ ulong *ptep; -+ ulong pte_paddr; -+ ulong pte; -+ physaddr_t physpage; - -- if (bt->flags & BT_EFRAME_SEARCH2) { -- BCOPY(bt, &bt_local, sizeof(struct bt_info)); -- bt->flags &= ~(ulonglong)BT_EFRAME_SEARCH2; -+ if (!IS_KVADDR(kvaddr)) -+ return FALSE; - -- ms = machdep->machspec; -+ if (XEN_HYPER_MODE()) { -+ if (DIRECTMAP_VIRT_ADDR(kvaddr)) { -+ *paddr = kvaddr - DIRECTMAP_VIRT_START; -+ return TRUE; -+ } -+ FILL_PML4_HYPER(); -+ pml4 = ((ulong *)machdep->machspec->pml4) + pml4_index(kvaddr); -+ if (verbose) { -+ fprintf(fp, "PML4 DIRECTORY: %lx\n", vt->kernel_pgd[0]); -+ fprintf(fp, "PAGE DIRECTORY: %lx\n", *pml4); -+ } -+ } else { -+ if (!vt->vmalloc_start) { -+ *paddr = x86_64_VTOP(kvaddr); -+ return TRUE; -+ } - -- for (c = 0; c < kt->cpus; c++) { -- if (ms->stkinfo.ibase[c] == 0) -- break; -- bt->hp->esp = ms->stkinfo.ibase[c]; -- fprintf(fp, "CPU %d IRQ STACK:\n", c); -- if ((cnt = x86_64_eframe_search(bt))) -- fprintf(fp, "\n"); -- else -- fprintf(fp, "(none found)\n\n"); -+ if (!IS_VMALLOC_ADDR(kvaddr)) { -+ *paddr = x86_64_VTOP(kvaddr); -+ if (!verbose) -+ return TRUE; -+ } -+ -+ if (XEN() && (kt->xen_flags & WRITABLE_PAGE_TABLES)) -+ return (x86_64_kvtop_xen_wpt(tc, kvaddr, paddr, verbose)); -+ -+ /* -+ * pgd = pgd_offset_k(addr); -+ */ -+ FILL_PML4(); -+ pml4 = ((ulong *)machdep->machspec->pml4) + pml4_index(kvaddr); -+ if (verbose) { -+ fprintf(fp, "PML4 DIRECTORY: %lx\n", vt->kernel_pgd[0]); -+ fprintf(fp, "PAGE DIRECTORY: %lx\n", *pml4); -+ } -+ } -+ if (!(*pml4) & _PAGE_PRESENT) -+ goto no_kpage; -+ pgd_paddr = (*pml4) & PHYSICAL_PAGE_MASK; -+ FILL_PGD(pgd_paddr, PHYSADDR, PAGESIZE()); -+ pgd = ((ulong *)pgd_paddr) + pgd_index(kvaddr); -+ pgd_pte = ULONG(machdep->pgd + PAGEOFFSET(pgd)); -+ if (verbose) -+ fprintf(fp, " PUD: %lx => %lx\n", (ulong)pgd, pgd_pte); -+ if (!(pgd_pte & _PAGE_PRESENT)) -+ goto no_kpage; -+ -+ /* -+ * pmd = pmd_offset(pgd, addr); -+ */ -+ pmd_paddr = pgd_pte & PHYSICAL_PAGE_MASK; -+ FILL_PMD(pmd_paddr, PHYSADDR, PAGESIZE()); -+ pmd = ((ulong *)pmd_paddr) + pmd_index(kvaddr); -+ pmd_pte = ULONG(machdep->pmd + PAGEOFFSET(pmd)); -+ if (verbose) -+ fprintf(fp, " PMD: %lx => %lx\n", (ulong)pmd, pmd_pte); -+ if (!(pmd_pte & _PAGE_PRESENT)) -+ goto no_kpage; -+ if (pmd_pte & _PAGE_PSE) { -+ if (verbose) { -+ fprintf(fp, " PAGE: %lx (2MB)\n\n", -+ PAGEBASE(pmd_pte) & PHYSICAL_PAGE_MASK); -+ x86_64_translate_pte(pmd_pte, 0, 0); - } - -- for (c = 0; c < kt->cpus; c++) { -- for (i = 0; i < 7; i++) { -- if (ms->stkinfo.ebase[c][i] == 0) -- break; -- bt->hp->esp = ms->stkinfo.ebase[c][i]; -- fprintf(fp, "CPU %d %s EXCEPTION STACK:\n", -- c, x86_64_exception_stacks[i]); -- if ((cnt = x86_64_eframe_search(bt))) -- fprintf(fp, "\n"); -- else -- fprintf(fp, "(none found)\n\n"); -- } -- } -+ physpage = (PAGEBASE(pmd_pte) & PHYSICAL_PAGE_MASK) + -+ (kvaddr & ~_2MB_PAGE_MASK); -+ *paddr = physpage; -+ return TRUE; -+ } - -- return 0; -+ /* -+ * ptep = pte_offset_map(pmd, addr); -+ * pte = *ptep; -+ */ -+ pte_paddr = pmd_pte & PHYSICAL_PAGE_MASK; -+ FILL_PTBL(pte_paddr, PHYSADDR, PAGESIZE()); -+ ptep = ((ulong *)pte_paddr) + pte_index(kvaddr); -+ pte = ULONG(machdep->ptbl + PAGEOFFSET(ptep)); -+ if (verbose) -+ fprintf(fp, " PTE: %lx => %lx\n", (ulong)ptep, pte); -+ if (!(pte & (_PAGE_PRESENT))) { -+ if (pte && verbose) { -+ fprintf(fp, "\n"); -+ x86_64_translate_pte(pte, 0, 0); -+ } -+ goto no_kpage; - } - -- if (bt->hp && bt->hp->esp) { -- ms = machdep->machspec; -- bt->stkptr = bt->hp->esp; -- if ((estack = x86_64_in_exception_stack(bt))) { -- stacksize = ms->stkinfo.esize; -- bt->stackbase = estack; -- bt->stacktop = estack + ms->stkinfo.esize; -- bt->stackbuf = ms->irqstack; -- alter_stackbuf(bt); -- } else if ((irqstack = x86_64_in_irqstack(bt))) { -- stacksize = ms->stkinfo.isize; -- bt->stackbase = irqstack; -- bt->stacktop = irqstack + ms->stkinfo.isize; -- bt->stackbuf = ms->irqstack; -- alter_stackbuf(bt); -- } else if (!INSTACK(bt->stkptr, bt)) -- error(FATAL, -- "unrecognized stack address for this task: %lx\n", -- bt->hp->esp); -- } -+ *paddr = (PAGEBASE(pte) & PHYSICAL_PAGE_MASK) + PAGEOFFSET(kvaddr); - -- stacksize = bt->stacktop - bt->stackbase - SIZE(pt_regs); -+ if (verbose) { -+ fprintf(fp, " PAGE: %lx\n\n", -+ PAGEBASE(*paddr) & PHYSICAL_PAGE_MASK); -+ x86_64_translate_pte(pte, 0, 0); -+ } - -- if (bt->stkptr) -- i = (bt->stkptr - bt->stackbase)/sizeof(ulong); -- else -- i = 0; -+ return TRUE; - -- for (cnt = 0; i <= stacksize/sizeof(ulong); i++) { -- up = (ulong *)(&bt->stackbuf[i*sizeof(ulong)]); -+no_kpage: -+ return FALSE; -+} - -- if (x86_64_exception_frame(EFRAME_SEARCH|EFRAME_PRINT| -- EFRAME_VERIFY, 0, (char *)up, bt, fp)) -- cnt++; -+ -+static int -+x86_64_kvtop_xen_wpt(struct task_context *tc, ulong kvaddr, physaddr_t *paddr, int verbose) -+{ -+ ulong *pml4; -+ ulong *pgd; -+ ulong pgd_paddr; -+ ulong pgd_pte; -+ ulong *pmd; -+ ulong pmd_paddr; -+ ulong pmd_pte; -+ ulong pseudo_pmd_pte; -+ ulong *ptep; -+ ulong pte_paddr; -+ ulong pte; -+ ulong pseudo_pte; -+ physaddr_t physpage; -+ char buf[BUFSIZE]; -+ -+ /* -+ * pgd = pgd_offset_k(addr); -+ */ -+ FILL_PML4(); -+ pml4 = ((ulong *)machdep->machspec->pml4) + pml4_index(kvaddr); -+ if (verbose) { -+ fprintf(fp, "PML4 DIRECTORY: %lx\n", vt->kernel_pgd[0]); -+ fprintf(fp, "PAGE DIRECTORY: %lx [machine]\n", *pml4); -+ } -+ if (!(*pml4) & _PAGE_PRESENT) -+ goto no_kpage; -+ pgd_paddr = (*pml4) & PHYSICAL_PAGE_MASK; -+ pgd_paddr = xen_m2p(pgd_paddr); -+ if (verbose) -+ fprintf(fp, "PAGE DIRECTORY: %lx\n", pgd_paddr); -+ FILL_PGD(pgd_paddr, PHYSADDR, PAGESIZE()); -+ pgd = ((ulong *)pgd_paddr) + pgd_index(kvaddr); -+ pgd_pte = ULONG(machdep->pgd + PAGEOFFSET(pgd)); -+ if (verbose) -+ fprintf(fp, " PUD: %lx => %lx [machine]\n", (ulong)pgd, pgd_pte); -+ if (!(pgd_pte & _PAGE_PRESENT)) -+ goto no_kpage; -+ -+ /* -+ * pmd = pmd_offset(pgd, addr); -+ */ -+ pmd_paddr = pgd_pte & PHYSICAL_PAGE_MASK; -+ pmd_paddr = xen_m2p(pmd_paddr); -+ if (verbose) -+ fprintf(fp, " PUD: %lx\n", pmd_paddr); -+ FILL_PMD(pmd_paddr, PHYSADDR, PAGESIZE()); -+ pmd = ((ulong *)pmd_paddr) + pmd_index(kvaddr); -+ pmd_pte = ULONG(machdep->pmd + PAGEOFFSET(pmd)); -+ if (verbose) -+ fprintf(fp, " PMD: %lx => %lx [machine]\n", (ulong)pmd, pmd_pte); -+ if (!(pmd_pte & _PAGE_PRESENT)) -+ goto no_kpage; -+ if (pmd_pte & _PAGE_PSE) { -+ if (verbose) -+ fprintf(fp, " PAGE: %lx (2MB) [machine]\n", -+ PAGEBASE(pmd_pte) & PHYSICAL_PAGE_MASK); -+ -+ pseudo_pmd_pte = xen_m2p(PAGEBASE(pmd_pte)); -+ -+ if (pseudo_pmd_pte == XEN_MACHADDR_NOT_FOUND) { -+ if (verbose) -+ fprintf(fp, " PAGE: page not available\n"); -+ *paddr = PADDR_NOT_AVAILABLE; -+ return FALSE; -+ } -+ -+ pseudo_pmd_pte |= PAGEOFFSET(pmd_pte); -+ -+ if (verbose) { -+ fprintf(fp, " PAGE: %s (2MB)\n\n", -+ mkstring(buf, VADDR_PRLEN, RJUST|LONG_HEX, -+ MKSTR(PAGEBASE(pseudo_pmd_pte) & -+ PHYSICAL_PAGE_MASK))); -+ -+ x86_64_translate_pte(pseudo_pmd_pte, 0, 0); -+ } -+ -+ physpage = (PAGEBASE(pseudo_pmd_pte) & PHYSICAL_PAGE_MASK) + -+ (kvaddr & ~_2MB_PAGE_MASK); -+ -+ *paddr = physpage; -+ return TRUE; - } - -- return cnt; -+ /* -+ * ptep = pte_offset_map(pmd, addr); -+ * pte = *ptep; -+ */ -+ pte_paddr = pmd_pte & PHYSICAL_PAGE_MASK; -+ pte_paddr = xen_m2p(pte_paddr); -+ if (verbose) -+ fprintf(fp, " PMD: %lx\n", pte_paddr); -+ FILL_PTBL(pte_paddr, PHYSADDR, PAGESIZE()); -+ ptep = ((ulong *)pte_paddr) + pte_index(kvaddr); -+ pte = ULONG(machdep->ptbl + PAGEOFFSET(ptep)); -+ if (verbose) -+ fprintf(fp, " PTE: %lx => %lx [machine]\n", (ulong)ptep, pte); -+ if (!(pte & (_PAGE_PRESENT))) { -+ if (pte && verbose) { -+ fprintf(fp, "\n"); -+ x86_64_translate_pte(pte, 0, 0); -+ } -+ goto no_kpage; -+ } -+ -+ pseudo_pte = xen_m2p(pte & PHYSICAL_PAGE_MASK); -+ if (verbose) -+ fprintf(fp, " PTE: %lx\n", pseudo_pte + PAGEOFFSET(pte)); -+ -+ *paddr = (PAGEBASE(pseudo_pte) & PHYSICAL_PAGE_MASK) + PAGEOFFSET(kvaddr); -+ -+ if (verbose) { -+ fprintf(fp, " PAGE: %lx [machine]\n", -+ PAGEBASE(pte) & PHYSICAL_PAGE_MASK); -+ fprintf(fp, " PAGE: %lx\n\n", -+ PAGEBASE(*paddr) & PHYSICAL_PAGE_MASK); -+ x86_64_translate_pte(pseudo_pte + PAGEOFFSET(pte), 0, 0); -+ } -+ -+ return TRUE; -+ -+no_kpage: -+ return FALSE; -+} -+ -+ -+/* -+ * Determine where vmalloc'd memory starts. -+ */ -+static ulong -+x86_64_vmalloc_start(void) -+{ -+ return ((ulong)VMALLOC_START); -+} -+ -+/* -+ * thread_info implementation makes for less accurate results here. -+ */ -+static int -+x86_64_is_task_addr(ulong task) -+{ -+ if (tt->flags & THREAD_INFO) -+ return IS_KVADDR(task); -+ else -+ return (IS_KVADDR(task) && (ALIGNED_STACK_OFFSET(task) == 0)); -+} -+ -+ -+/* -+ * easy enough... -+ */ -+static ulong -+x86_64_processor_speed(void) -+{ -+ unsigned long cpu_khz = 0; -+ -+ if (machdep->mhz) -+ return (machdep->mhz); -+ -+ if (symbol_exists("cpu_khz")) { -+ get_symbol_data("cpu_khz", sizeof(int), &cpu_khz); -+ if (cpu_khz) -+ return(machdep->mhz = cpu_khz/1000); -+ } -+ -+ return 0; -+} -+ -+ -+/* -+ * Accept or reject a symbol from the kernel namelist. -+ */ -+static int -+x86_64_verify_symbol(const char *name, ulong value, char type) -+{ -+ if (STREQ(name, "_text") || STREQ(name, "_stext")) -+ machdep->flags |= KSYMS_START; -+ -+ if (!name || !strlen(name) || !(machdep->flags & KSYMS_START)) -+ return FALSE; -+ return TRUE; -+} -+ -+ -+/* -+ * Get the relevant page directory pointer from a task structure. -+ */ -+static ulong -+x86_64_get_task_pgd(ulong task) -+{ -+ return (error(FATAL, "x86_64_get_task_pgd: N/A\n")); -+} -+ -+ -+/* -+ * Translate a PTE, returning TRUE if the page is present. -+ * If a physaddr pointer is passed in, don't print anything. -+ */ -+static int -+x86_64_translate_pte(ulong pte, void *physaddr, ulonglong unused) -+{ -+ int c, others, len1, len2, len3; -+ ulong paddr; -+ char buf[BUFSIZE]; -+ char buf2[BUFSIZE]; -+ char buf3[BUFSIZE]; -+ char ptebuf[BUFSIZE]; -+ char physbuf[BUFSIZE]; -+ char *arglist[MAXARGS]; -+ int page_present; -+ -+ paddr = pte & PHYSICAL_PAGE_MASK; -+ page_present = pte & _PAGE_PRESENT; -+ -+ if (physaddr) { -+ *((ulong *)physaddr) = paddr; -+ return page_present; -+ } -+ -+ sprintf(ptebuf, "%lx", pte); -+ len1 = MAX(strlen(ptebuf), strlen("PTE")); -+ fprintf(fp, "%s ", mkstring(buf, len1, CENTER|LJUST, "PTE")); -+ -+ if (!page_present && pte) { -+ swap_location(pte, buf); -+ if ((c = parse_line(buf, arglist)) != 3) -+ error(FATAL, "cannot determine swap location\n"); -+ -+ len2 = MAX(strlen(arglist[0]), strlen("SWAP")); -+ len3 = MAX(strlen(arglist[2]), strlen("OFFSET")); -+ -+ fprintf(fp, "%s %s\n", -+ mkstring(buf2, len2, CENTER|LJUST, "SWAP"), -+ mkstring(buf3, len3, CENTER|LJUST, "OFFSET")); -+ -+ strcpy(buf2, arglist[0]); -+ strcpy(buf3, arglist[2]); -+ fprintf(fp, "%s %s %s\n", -+ mkstring(ptebuf, len1, CENTER|RJUST, NULL), -+ mkstring(buf2, len2, CENTER|RJUST, NULL), -+ mkstring(buf3, len3, CENTER|RJUST, NULL)); -+ -+ return page_present; -+ } -+ -+ sprintf(physbuf, "%lx", paddr); -+ len2 = MAX(strlen(physbuf), strlen("PHYSICAL")); -+ fprintf(fp, "%s ", mkstring(buf, len2, CENTER|LJUST, "PHYSICAL")); -+ -+ fprintf(fp, "FLAGS\n"); -+ -+ fprintf(fp, "%s %s ", -+ mkstring(ptebuf, len1, CENTER|RJUST, NULL), -+ mkstring(physbuf, len2, CENTER|RJUST, NULL)); -+ fprintf(fp, "("); -+ others = 0; -+ -+ if (pte) { -+ if (pte & _PAGE_PRESENT) -+ fprintf(fp, "%sPRESENT", others++ ? "|" : ""); -+ if (pte & _PAGE_RW) -+ fprintf(fp, "%sRW", others++ ? "|" : ""); -+ if (pte & _PAGE_USER) -+ fprintf(fp, "%sUSER", others++ ? "|" : ""); -+ if (pte & _PAGE_PWT) -+ fprintf(fp, "%sPWT", others++ ? "|" : ""); -+ if (pte & _PAGE_PCD) -+ fprintf(fp, "%sPCD", others++ ? "|" : ""); -+ if (pte & _PAGE_ACCESSED) -+ fprintf(fp, "%sACCESSED", others++ ? "|" : ""); -+ if (pte & _PAGE_DIRTY) -+ fprintf(fp, "%sDIRTY", others++ ? "|" : ""); -+ if ((pte & _PAGE_PSE) && (pte & _PAGE_PRESENT)) -+ fprintf(fp, "%sPSE", others++ ? "|" : ""); -+ if ((pte & _PAGE_PROTNONE) && !(pte & _PAGE_PRESENT)) -+ fprintf(fp, "%sPROTNONE", others++ ? "|" : ""); -+ if (pte & _PAGE_GLOBAL) -+ fprintf(fp, "%sGLOBAL", others++ ? "|" : ""); -+ if (pte & _PAGE_NX) -+ fprintf(fp, "%sNX", others++ ? "|" : ""); -+ } else { -+ fprintf(fp, "no mapping"); -+ } -+ -+ fprintf(fp, ")\n"); -+ -+ return (page_present); -+} -+ -+static char * -+x86_64_exception_stacks[7] = { -+ "STACKFAULT", -+ "DOUBLEFAULT", -+ "NMI", -+ "DEBUG", -+ "MCE", -+ "(unknown)", -+ "(unknown)" -+}; -+ -+/* -+ * Look for likely exception frames in a stack. -+ */ -+static int -+x86_64_eframe_search(struct bt_info *bt) -+{ -+ int i, c, cnt; -+ ulong estack, irqstack, stacksize; -+ ulong *up; -+ struct machine_specific *ms; -+ struct bt_info bt_local; -+ -+ if (bt->flags & BT_EFRAME_SEARCH2) { -+ BCOPY(bt, &bt_local, sizeof(struct bt_info)); -+ bt->flags &= ~(ulonglong)BT_EFRAME_SEARCH2; -+ -+ ms = machdep->machspec; -+ -+ for (c = 0; c < kt->cpus; c++) { -+ if (ms->stkinfo.ibase[c] == 0) -+ break; -+ bt->hp->esp = ms->stkinfo.ibase[c]; -+ fprintf(fp, "CPU %d IRQ STACK:\n", c); -+ if ((cnt = x86_64_eframe_search(bt))) -+ fprintf(fp, "\n"); -+ else -+ fprintf(fp, "(none found)\n\n"); -+ } -+ -+ for (c = 0; c < kt->cpus; c++) { -+ for (i = 0; i < 7; i++) { -+ if (ms->stkinfo.ebase[c][i] == 0) -+ break; -+ bt->hp->esp = ms->stkinfo.ebase[c][i]; -+ fprintf(fp, "CPU %d %s EXCEPTION STACK:\n", -+ c, x86_64_exception_stacks[i]); -+ if ((cnt = x86_64_eframe_search(bt))) -+ fprintf(fp, "\n"); -+ else -+ fprintf(fp, "(none found)\n\n"); -+ } -+ } -+ -+ return 0; -+ } -+ -+ if (bt->hp && bt->hp->esp) { -+ ms = machdep->machspec; -+ bt->stkptr = bt->hp->esp; -+ if ((estack = x86_64_in_exception_stack(bt))) { -+ stacksize = ms->stkinfo.esize; -+ bt->stackbase = estack; -+ bt->stacktop = estack + ms->stkinfo.esize; -+ bt->stackbuf = ms->irqstack; -+ alter_stackbuf(bt); -+ } else if ((irqstack = x86_64_in_irqstack(bt))) { -+ stacksize = ms->stkinfo.isize; -+ bt->stackbase = irqstack; -+ bt->stacktop = irqstack + ms->stkinfo.isize; -+ bt->stackbuf = ms->irqstack; -+ alter_stackbuf(bt); -+ } else if (!INSTACK(bt->stkptr, bt)) -+ error(FATAL, -+ "unrecognized stack address for this task: %lx\n", -+ bt->hp->esp); -+ } -+ -+ stacksize = bt->stacktop - bt->stackbase - SIZE(pt_regs); -+ -+ if (bt->stkptr) -+ i = (bt->stkptr - bt->stackbase)/sizeof(ulong); -+ else -+ i = 0; -+ -+ for (cnt = 0; i <= stacksize/sizeof(ulong); i++) { -+ up = (ulong *)(&bt->stackbuf[i*sizeof(ulong)]); -+ -+ if (x86_64_exception_frame(EFRAME_SEARCH|EFRAME_PRINT| -+ EFRAME_VERIFY, 0, (char *)up, bt, fp)) -+ cnt++; -+ } -+ -+ return cnt; -+} -+ -+static void -+x86_64_display_full_frame(struct bt_info *bt, ulong rsp, FILE *ofp) -+{ -+ int i, u_idx; -+ ulong *up; -+ ulong words, addr; -+ -+ if (rsp < bt->frameptr) -+ return; -+ -+ words = (rsp - bt->frameptr) / sizeof(ulong) + 1; -+ -+ addr = bt->frameptr; -+ u_idx = (bt->frameptr - bt->stackbase)/sizeof(ulong); -+ for (i = 0; i < words; i++, u_idx++) { -+ if (!(i & 1)) -+ fprintf(ofp, "%s %lx: ", i ? "\n" : "", addr); -+ -+ up = (ulong *)(&bt->stackbuf[u_idx*sizeof(ulong)]); -+ fprintf(ofp, "%016lx ", *up); -+ addr += sizeof(ulong); -+ } -+ fprintf(ofp, "\n"); -+} -+ -+/* -+ * Check a frame for a requested reference. -+ */ -+static void -+x86_64_do_bt_reference_check(struct bt_info *bt, ulong text, char *name) -+{ -+ struct syment *sp; -+ ulong offset; -+ -+ if (!name) -+ sp = value_search(text, &offset); -+ else if (!text) -+ sp = symbol_search(name); -+ -+ switch (bt->ref->cmdflags & (BT_REF_SYMBOL|BT_REF_HEXVAL)) -+ { -+ case BT_REF_SYMBOL: -+ if (name) { -+ if (STREQ(name, bt->ref->str)) -+ bt->ref->cmdflags |= BT_REF_FOUND; -+ } else { -+ if (sp && !offset && STREQ(sp->name, bt->ref->str)) -+ bt->ref->cmdflags |= BT_REF_FOUND; -+ } -+ break; -+ -+ case BT_REF_HEXVAL: -+ if (text) { -+ if (bt->ref->hexval == text) -+ bt->ref->cmdflags |= BT_REF_FOUND; -+ } else if (sp && (bt->ref->hexval == sp->value)) -+ bt->ref->cmdflags |= BT_REF_FOUND; -+ else if (!name && !text && (bt->ref->hexval == 0)) -+ bt->ref->cmdflags |= BT_REF_FOUND; -+ break; -+ } -+} -+ -+/* -+ * Determine the function containing a .text.lock. reference. -+ */ -+static ulong -+text_lock_function(char *name, struct bt_info *bt, ulong locktext) -+{ -+ int c, reterror, instr, arg; -+ char buf[BUFSIZE]; -+ char *arglist[MAXARGS]; -+ char *p1; -+ ulong locking_func; -+ -+ instr = arg = -1; -+ locking_func = 0; -+ -+ open_tmpfile2(); -+ -+ if (STREQ(name, ".text.lock.spinlock")) -+ sprintf(buf, "x/4i 0x%lx", locktext); -+ else -+ sprintf(buf, "x/1i 0x%lx", locktext); -+ -+ if (!gdb_pass_through(buf, pc->tmpfile2, GNU_RETURN_ON_ERROR)) { -+ close_tmpfile2(); -+ bt->flags |= BT_FRAMESIZE_DISABLE; -+ return 0; -+ } -+ -+ rewind(pc->tmpfile2); -+ while (fgets(buf, BUFSIZE, pc->tmpfile2)) { -+ c = parse_line(buf, arglist); -+ -+ if (instr == -1) { -+ /* -+ * Check whether are -+ * in the output string. -+ */ -+ if (LASTCHAR(arglist[0]) == ':') { -+ instr = 1; -+ arg = 2; -+ } else { -+ instr = 2; -+ arg = 3; -+ } -+ } -+ -+ if (c < (arg+1)) -+ break; -+ -+ if (STREQ(arglist[instr], "jmpq") || STREQ(arglist[instr], "jmp")) { -+ p1 = arglist[arg]; -+ reterror = 0; -+ locking_func = htol(p1, RETURN_ON_ERROR, &reterror); -+ if (reterror) -+ locking_func = 0; -+ break; -+ } -+ } -+ close_tmpfile2(); -+ -+ if (!locking_func) -+ bt->flags |= BT_FRAMESIZE_DISABLE; -+ -+ return locking_func; -+ -+} -+ -+ -+/* -+ * print one entry of a stack trace -+ */ -+#define BACKTRACE_COMPLETE (1) -+#define BACKTRACE_ENTRY_IGNORED (2) -+#define BACKTRACE_ENTRY_DISPLAYED (3) -+#define BACKTRACE_ENTRY_AND_EFRAME_DISPLAYED (4) -+ -+static int -+x86_64_print_stack_entry(struct bt_info *bt, FILE *ofp, int level, -+ int stkindex, ulong text) -+{ -+ ulong rsp, offset, locking_func; -+ struct syment *sp, *spl; -+ char *name; -+ int result; -+ long eframe_check; -+ char buf[BUFSIZE]; -+ -+ eframe_check = -1; -+ offset = 0; -+ sp = value_search(text, &offset); -+ if (!sp) -+ return BACKTRACE_ENTRY_IGNORED; -+ -+ name = sp->name; -+ -+ if (bt->flags & BT_TEXT_SYMBOLS) { -+ if (bt->flags & BT_EXCEPTION_FRAME) -+ rsp = bt->stkptr; -+ else -+ rsp = bt->stackbase + (stkindex * sizeof(long)); -+ fprintf(ofp, " [%s] %s at %lx\n", -+ mkstring(buf, VADDR_PRLEN, RJUST|LONG_HEX, MKSTR(rsp)), -+ name, text); -+ if (BT_REFERENCE_CHECK(bt)) -+ x86_64_do_bt_reference_check(bt, text, name); -+ return BACKTRACE_ENTRY_DISPLAYED; -+ } -+ -+ if (!offset && !(bt->flags & BT_EXCEPTION_FRAME) && -+ !(bt->flags & BT_START)) { -+ if (STREQ(name, "child_rip")) { -+ if (symbol_exists("kernel_thread")) -+ name = "kernel_thread"; -+ else if (symbol_exists("arch_kernel_thread")) -+ name = "arch_kernel_thread"; -+ } -+ else if (!(bt->flags & BT_SCHEDULE)) { -+ if (STREQ(name, "error_exit")) -+ eframe_check = 8; -+ else { -+ if (CRASHDEBUG(2)) -+ fprintf(ofp, -+ "< ignoring text symbol with no offset: %s() >\n", -+ sp->name); -+ return BACKTRACE_ENTRY_IGNORED; -+ } -+ } -+ } -+ -+ if (bt->flags & BT_SCHEDULE) -+ name = "schedule"; -+ -+ if (STREQ(name, "child_rip")) { -+ if (symbol_exists("kernel_thread")) -+ name = "kernel_thread"; -+ else if (symbol_exists("arch_kernel_thread")) -+ name = "arch_kernel_thread"; -+ result = BACKTRACE_COMPLETE; -+ } else if (STREQ(name, "cpu_idle")) -+ result = BACKTRACE_COMPLETE; -+ else -+ result = BACKTRACE_ENTRY_DISPLAYED; -+ -+ if (bt->flags & BT_EXCEPTION_FRAME) -+ rsp = bt->stkptr; -+ else if (bt->flags & BT_START) -+ rsp = bt->stkptr; -+ else -+ rsp = bt->stackbase + (stkindex * sizeof(long)); -+ -+ if ((bt->flags & BT_FULL)) { -+ if (bt->frameptr) -+ x86_64_display_full_frame(bt, rsp, ofp); -+ bt->frameptr = rsp + sizeof(ulong); -+ } -+ -+ fprintf(ofp, "%s#%d [%8lx] %s at %lx", level < 10 ? " " : "", level, -+ rsp, name, text); -+ -+ if (STREQ(name, "tracesys")) -+ fprintf(ofp, " (via system_call)"); -+ else if (STRNEQ(name, ".text.lock.")) { -+ if ((locking_func = text_lock_function(name, bt, text)) && -+ (spl = value_search(locking_func, &offset))) -+ fprintf(ofp, " (via %s)", spl->name); -+ } -+ -+ if (bt->flags & BT_FRAMESIZE_DISABLE) -+ fprintf(ofp, " *"); -+ -+ fprintf(ofp, "\n"); -+ -+ if (bt->flags & BT_LINE_NUMBERS) { -+ get_line_number(text, buf, FALSE); -+ if (strlen(buf)) -+ fprintf(ofp, " %s\n", buf); -+ } -+ -+ if (eframe_check >= 0) { -+ if (x86_64_exception_frame(EFRAME_PRINT|EFRAME_VERIFY, -+ bt->stackbase + (stkindex*sizeof(long)) + eframe_check, -+ NULL, bt, ofp)) -+ result = BACKTRACE_ENTRY_AND_EFRAME_DISPLAYED; -+ } -+ -+ if (BT_REFERENCE_CHECK(bt)) -+ x86_64_do_bt_reference_check(bt, text, name); -+ -+ bt->call_target = name; -+ -+ if (is_direct_call_target(bt)) { -+ if (CRASHDEBUG(2)) -+ fprintf(ofp, "< enable BT_CHECK_CALLER for %s >\n", -+ bt->call_target); -+ bt->flags |= BT_CHECK_CALLER; -+ } else { -+ if (CRASHDEBUG(2) && (bt->flags & BT_CHECK_CALLER)) -+ fprintf(ofp, "< disable BT_CHECK_CALLER for %s >\n", -+ bt->call_target); -+ if (bt->flags & BT_CHECK_CALLER) { -+ if (CRASHDEBUG(2)) -+ fprintf(ofp, "< set BT_NO_CHECK_CALLER >\n"); -+ bt->flags |= BT_NO_CHECK_CALLER; -+ } -+ bt->flags &= ~(ulonglong)BT_CHECK_CALLER; -+ } -+ -+ return result; -+} -+ -+/* -+ * Unroll a kernel stack. -+ */ -+static void -+x86_64_back_trace_cmd(struct bt_info *bt) -+{ -+ error(FATAL, "x86_64_back_trace_cmd: TBD\n"); -+} -+ -+ -+ -+/* -+ * Determine whether the initial stack pointer is located in one of the -+ * exception stacks. -+ */ -+static ulong -+x86_64_in_exception_stack(struct bt_info *bt) -+{ -+ int c, i; -+ ulong rsp; -+ ulong estack; -+ struct machine_specific *ms; -+ -+ rsp = bt->stkptr; -+ ms = machdep->machspec; -+ estack = 0; -+ -+ for (c = 0; !estack && (c < kt->cpus); c++) { -+ for (i = 0; i < 7; i++) { -+ if (ms->stkinfo.ebase[c][i] == 0) -+ break; -+ if ((rsp >= ms->stkinfo.ebase[c][i]) && -+ (rsp < (ms->stkinfo.ebase[c][i] + -+ ms->stkinfo.esize))) { -+ estack = ms->stkinfo.ebase[c][i]; -+ if (CRASHDEBUG(1) && (c != bt->tc->processor)) -+ error(INFO, -+ "task cpu: %d exception stack cpu: %d\n", -+ bt->tc->processor, c); -+ break; -+ } -+ } -+ } -+ -+ return estack; -+} -+ -+/* -+ * Determine whether the current stack pointer is in a cpu's irqstack. -+ */ -+static ulong -+x86_64_in_irqstack(struct bt_info *bt) -+{ -+ int c; -+ ulong rsp; -+ ulong irqstack; -+ struct machine_specific *ms; -+ -+ rsp = bt->stkptr; -+ ms = machdep->machspec; -+ irqstack = 0; -+ -+ for (c = 0; !irqstack && (c < kt->cpus); c++) { -+ if (ms->stkinfo.ibase[c] == 0) -+ break; -+ if ((rsp >= ms->stkinfo.ibase[c]) && -+ (rsp < (ms->stkinfo.ibase[c] + ms->stkinfo.isize))) { -+ irqstack = ms->stkinfo.ibase[c]; -+ if (CRASHDEBUG(1) && (c != bt->tc->processor)) -+ error(INFO, -+ "task cpu: %d IRQ stack cpu: %d\n", -+ bt->tc->processor, c); -+ break; -+ } -+ } -+ -+ return irqstack; -+} -+ -+#define STACK_TRANSITION_ERRMSG_E_I_P \ -+"cannot transition from exception stack to IRQ stack to current process stack:\n exception stack pointer: %lx\n IRQ stack pointer: %lx\n process stack pointer: %lx\n current stack base: %lx\n" -+#define STACK_TRANSITION_ERRMSG_E_P \ -+"cannot transition from exception stack to current process stack:\n exception stack pointer: %lx\n process stack pointer: %lx\n current_stack_base: %lx\n" -+#define STACK_TRANSITION_ERRMSG_I_P \ -+"cannot transition from IRQ stack to current process stack:\n IRQ stack pointer: %lx\n process stack pointer: %lx\n current stack base: %lx" -+ -+/* -+ * Low-budget back tracer -- dump text return addresses, following call chain -+ * when possible, along with any verifiable exception frames. -+ */ -+static void -+x86_64_low_budget_back_trace_cmd(struct bt_info *bt_in) -+{ -+ int i, level, done, framesize; -+ ulong rsp, offset, stacktop; -+ ulong *up; -+ long cs; -+ struct syment *sp, *spt; -+ FILE *ofp; -+ ulong estack, irqstack; -+ ulong irq_eframe; -+ struct bt_info bt_local, *bt; -+ struct machine_specific *ms; -+ ulong last_process_stack_eframe; -+ ulong user_mode_eframe; -+ -+ /* -+ * User may have made a run-time switch. -+ */ -+ if (kt->flags & DWARF_UNWIND) { -+ machdep->back_trace = x86_64_dwarf_back_trace_cmd; -+ x86_64_dwarf_back_trace_cmd(bt_in); -+ return; -+ } -+ -+ bt = &bt_local; -+ BCOPY(bt_in, bt, sizeof(struct bt_info)); -+ -+ if (bt->flags & BT_FRAMESIZE_DEBUG) { -+ x86_64_framesize_debug(bt); -+ return; -+ } -+ -+ level = 0; -+ done = FALSE; -+ irq_eframe = 0; -+ last_process_stack_eframe = 0; -+ bt->call_target = NULL; -+ rsp = bt->stkptr; -+ if (!rsp) { -+ error(INFO, "cannot determine starting stack pointer\n"); -+ return; -+ } -+ ms = machdep->machspec; -+ if (BT_REFERENCE_CHECK(bt)) -+ ofp = pc->nullfp; -+ else -+ ofp = fp; -+ -+ if (bt->flags & BT_TEXT_SYMBOLS) { -+ if (!(bt->flags & BT_TEXT_SYMBOLS_ALL)) -+ fprintf(ofp, "%sSTART: %s%s at %lx\n", -+ space(VADDR_PRLEN > 8 ? 14 : 6), -+ closest_symbol(bt->instptr), -+ STREQ(closest_symbol(bt->instptr), "thread_return") ? -+ " (schedule)" : "", -+ bt->instptr); -+ } else if (bt->flags & BT_START) { -+ x86_64_print_stack_entry(bt, ofp, level, -+ 0, bt->instptr); -+ bt->flags &= ~BT_START; -+ level++; -+ } -+ -+ -+ if ((estack = x86_64_in_exception_stack(bt))) { -+in_exception_stack: -+ bt->flags |= BT_EXCEPTION_STACK; -+ /* -+ * The stack buffer will have been loaded with the process -+ * stack, so switch to the indicated exception stack. -+ */ -+ bt->stackbase = estack; -+ bt->stacktop = estack + ms->stkinfo.esize; -+ bt->stackbuf = ms->irqstack; -+ -+ if (!readmem(bt->stackbase, KVADDR, bt->stackbuf, -+ bt->stacktop - bt->stackbase, -+ bt->hp && (bt->hp->esp == bt->stkptr) ? -+ "irqstack contents via hook" : "irqstack contents", -+ RETURN_ON_ERROR)) -+ error(FATAL, "read of exception stack at %lx failed\n", -+ bt->stackbase); -+ -+ /* -+ * If irq_eframe is set, we've jumped back here from the -+ * IRQ stack dump below. Do basically the same thing as if -+ * had come from the processor stack, but presume that we -+ * must have been in kernel mode, i.e., took an exception -+ * while operating on an IRQ stack. (untested) -+ */ -+ if (irq_eframe) { -+ bt->flags |= BT_EXCEPTION_FRAME; -+ i = (irq_eframe - bt->stackbase)/sizeof(ulong); -+ x86_64_print_stack_entry(bt, ofp, level, i, -+ bt->instptr); -+ bt->flags &= ~(ulonglong)BT_EXCEPTION_FRAME; -+ cs = x86_64_exception_frame(EFRAME_PRINT|EFRAME_CS, 0, -+ bt->stackbuf + (irq_eframe - bt->stackbase), -+ bt, ofp); -+ rsp += SIZE(pt_regs); /* guaranteed kernel mode */ -+ level++; -+ irq_eframe = 0; -+ } -+ -+ stacktop = bt->stacktop - SIZE(pt_regs); -+ -+ bt->flags &= ~BT_FRAMESIZE_DISABLE; -+ -+ for (i = (rsp - bt->stackbase)/sizeof(ulong); -+ !done && (rsp < stacktop); i++, rsp += sizeof(ulong)) { -+ -+ up = (ulong *)(&bt->stackbuf[i*sizeof(ulong)]); -+ -+ if (!is_kernel_text(*up)) -+ continue; -+ -+ switch (x86_64_print_stack_entry(bt, ofp, level, i,*up)) -+ { -+ case BACKTRACE_ENTRY_AND_EFRAME_DISPLAYED: -+ rsp += SIZE(pt_regs); -+ i += SIZE(pt_regs)/sizeof(ulong); -+ case BACKTRACE_ENTRY_DISPLAYED: -+ level++; -+ if ((framesize = x86_64_get_framesize(bt, *up)) >= 0) { -+ rsp += framesize; -+ i += framesize/sizeof(ulong); -+ } -+ break; -+ case BACKTRACE_ENTRY_IGNORED: -+ break; -+ case BACKTRACE_COMPLETE: -+ done = TRUE; -+ break; -+ } -+ } -+ -+ cs = x86_64_exception_frame(EFRAME_PRINT|EFRAME_CS, 0, -+ bt->stackbuf + (bt->stacktop - bt->stackbase) - -+ SIZE(pt_regs), bt, ofp); -+ -+ if (!BT_REFERENCE_CHECK(bt)) -+ fprintf(fp, "--- ---\n"); -+ -+ /* -+ * stack = (unsigned long *) estack_end[-2]; -+ */ -+ up = (ulong *)(&bt->stackbuf[bt->stacktop - bt->stackbase]); -+ up -= 2; -+ rsp = bt->stkptr = *up; -+ up -= 3; -+ bt->instptr = *up; -+ if (cs & 3) -+ done = TRUE; /* user-mode exception */ -+ else -+ done = FALSE; /* kernel-mode exception */ -+ bt->frameptr = 0; -+ -+ /* -+ * Print the return values from the estack end. -+ */ -+ if (!done) { -+ bt->flags |= BT_START; -+ x86_64_print_stack_entry(bt, ofp, level, -+ 0, bt->instptr); -+ bt->flags &= ~(BT_START|BT_FRAMESIZE_DISABLE); -+ level++; -+ if ((framesize = x86_64_get_framesize(bt, bt->instptr)) >= 0) -+ rsp += framesize; -+ } -+ } -+ -+ /* -+ * IRQ stack entry always comes in via the process stack, regardless -+ * whether it happened while running in user or kernel space. -+ */ -+ if (!done && (irqstack = x86_64_in_irqstack(bt))) { -+ bt->flags |= BT_IRQSTACK; -+ /* -+ * Until coded otherwise, the stackbase will be pointing to -+ * either the exception stack or, more likely, the process -+ * stack base. Switch it to the IRQ stack. -+ */ -+ bt->stackbase = irqstack; -+ bt->stacktop = irqstack + ms->stkinfo.isize; -+ bt->stackbuf = ms->irqstack; -+ -+ if (!readmem(bt->stackbase, KVADDR, -+ bt->stackbuf, bt->stacktop - bt->stackbase, -+ bt->hp && (bt->hp->esp == bt_in->stkptr) ? -+ "irqstack contents via hook" : "irqstack contents", -+ RETURN_ON_ERROR)) -+ error(FATAL, "read of IRQ stack at %lx failed\n", -+ bt->stackbase); -+ -+ stacktop = bt->stacktop - 64; /* from kernel code */ -+ -+ bt->flags &= ~BT_FRAMESIZE_DISABLE; -+ -+ for (i = (rsp - bt->stackbase)/sizeof(ulong); -+ !done && (rsp < stacktop); i++, rsp += sizeof(ulong)) { -+ -+ up = (ulong *)(&bt->stackbuf[i*sizeof(ulong)]); -+ -+ if (!is_kernel_text(*up)) -+ continue; -+ -+ switch (x86_64_print_stack_entry(bt, ofp, level, i,*up)) -+ { -+ case BACKTRACE_ENTRY_AND_EFRAME_DISPLAYED: -+ rsp += SIZE(pt_regs); -+ i += SIZE(pt_regs)/sizeof(ulong); -+ case BACKTRACE_ENTRY_DISPLAYED: -+ level++; -+ if ((framesize = x86_64_get_framesize(bt, *up)) >= 0) { -+ rsp += framesize; -+ i += framesize/sizeof(ulong); -+ } -+ break; -+ case BACKTRACE_ENTRY_IGNORED: -+ break; -+ case BACKTRACE_COMPLETE: -+ done = TRUE; -+ break; -+ } -+ } -+ -+ if (!BT_REFERENCE_CHECK(bt)) -+ fprintf(fp, "--- ---\n"); -+ -+ /* -+ * stack = (unsigned long *) (irqstack_end[-1]); -+ * (where irqstack_end is 64 bytes below page end) -+ */ -+ up = (ulong *)(&bt->stackbuf[stacktop - bt->stackbase]); -+ up -= 1; -+ irq_eframe = rsp = bt->stkptr = (*up) - ms->irq_eframe_link; -+ up -= 1; -+ bt->instptr = *up; -+ /* -+ * No exception frame when coming from call_softirq. -+ */ -+ if ((sp = value_search(bt->instptr, &offset)) && -+ STREQ(sp->name, "call_softirq")) -+ irq_eframe = 0; -+ bt->frameptr = 0; -+ done = FALSE; -+ } else -+ irq_eframe = 0; -+ -+ if (!done && (estack = x86_64_in_exception_stack(bt))) -+ goto in_exception_stack; -+ -+ if (!done && (bt->flags & (BT_EXCEPTION_STACK|BT_IRQSTACK))) { -+ /* -+ * Verify that the rsp pointer taken from either the -+ * exception or IRQ stack points into the process stack. -+ */ -+ bt->stackbase = GET_STACKBASE(bt->tc->task); -+ bt->stacktop = GET_STACKTOP(bt->tc->task); -+ -+ if (!INSTACK(rsp, bt)) { -+ switch (bt->flags & (BT_EXCEPTION_STACK|BT_IRQSTACK)) -+ { -+ case (BT_EXCEPTION_STACK|BT_IRQSTACK): -+ error(FATAL, STACK_TRANSITION_ERRMSG_E_I_P, -+ bt_in->stkptr, bt->stkptr, rsp, -+ bt->stackbase); -+ -+ case BT_EXCEPTION_STACK: -+ error(FATAL, STACK_TRANSITION_ERRMSG_E_P, -+ bt_in->stkptr, rsp, bt->stackbase); -+ -+ case BT_IRQSTACK: -+ error(FATAL, STACK_TRANSITION_ERRMSG_I_P, -+ bt_in->stkptr, rsp, bt->stackbase); -+ } -+ } -+ -+ /* -+ * Now fill the local stack buffer from the process stack. -+ */ -+ if (!readmem(bt->stackbase, KVADDR, bt->stackbuf, -+ bt->stacktop - bt->stackbase, -+ "irqstack contents", RETURN_ON_ERROR)) -+ error(FATAL, "read of process stack at %lx failed\n", -+ bt->stackbase); -+ } -+ -+ /* -+ * For a normally blocked task, hand-create the first level. -+ */ -+ if (!done && -+ !(bt->flags & (BT_TEXT_SYMBOLS|BT_EXCEPTION_STACK|BT_IRQSTACK)) && -+ STREQ(closest_symbol(bt->instptr), "thread_return")) { -+ bt->flags |= BT_SCHEDULE; -+ i = (rsp - bt->stackbase)/sizeof(ulong); -+ x86_64_print_stack_entry(bt, ofp, level, -+ i, bt->instptr); -+ bt->flags &= ~(ulonglong)BT_SCHEDULE; -+ rsp += sizeof(ulong); -+ level++; -+ } -+ -+ /* -+ * Dump the IRQ exception frame from the process stack. -+ * If the CS register indicates a user exception frame, -+ * then set done to TRUE to avoid the process stack walk-through. -+ * Otherwise, bump up the rsp past the kernel-mode eframe. -+ */ -+ if (irq_eframe) { -+ bt->flags |= BT_EXCEPTION_FRAME; -+ i = (irq_eframe - bt->stackbase)/sizeof(ulong); -+ x86_64_print_stack_entry(bt, ofp, level, i, bt->instptr); -+ bt->flags &= ~(ulonglong)BT_EXCEPTION_FRAME; -+ cs = x86_64_exception_frame(EFRAME_PRINT|EFRAME_CS, 0, -+ bt->stackbuf + (irq_eframe - bt->stackbase), bt, ofp); -+ if (cs & 3) -+ done = TRUE; /* IRQ from user-mode */ -+ else { -+ if (x86_64_print_eframe_location(rsp, level, ofp)) -+ level++; -+ rsp += SIZE(pt_regs); -+ irq_eframe = 0; -+ } -+ level++; -+ } -+ -+ /* -+ * Walk the process stack. -+ */ -+ -+ bt->flags &= ~BT_FRAMESIZE_DISABLE; -+ -+ for (i = (rsp - bt->stackbase)/sizeof(ulong); -+ !done && (rsp < bt->stacktop); i++, rsp += sizeof(ulong)) { -+ -+ up = (ulong *)(&bt->stackbuf[i*sizeof(ulong)]); -+ -+ if (!is_kernel_text(*up)) -+ continue; -+ -+ if ((bt->flags & BT_CHECK_CALLER)) { -+ /* -+ * A non-zero offset value from the value_search() -+ * lets us know if it's a real text return address. -+ */ -+ spt = value_search(*up, &offset); -+ if (!offset && !(bt->flags & BT_FRAMESIZE_DISABLE)) -+ continue; -+ -+ /* -+ * sp gets the syment of the function that the text -+ * routine above called before leaving its return -+ * address on the stack -- if it can be determined. -+ */ -+ sp = x86_64_function_called_by((*up)-5); -+ -+ if (sp == NULL) { -+ /* -+ * We were unable to get the called function. -+ * If the text address had an offset, then -+ * it must have made an indirect call, and -+ * can't have called our target function. -+ */ -+ if (offset) { -+ if (CRASHDEBUG(1)) -+ fprintf(ofp, -+ "< ignoring %s() -- makes indirect call and NOT %s()>\n", -+ spt->name, -+ bt->call_target); -+ continue; -+ } -+ } else if ((machdep->flags & SCHED_TEXT) && -+ STREQ(bt->call_target, "schedule") && -+ STREQ(sp->name, "__sched_text_start")) { -+ ; /* bait and switch */ -+ } else if (!STREQ(sp->name, bt->call_target)) { -+ /* -+ * We got function called by the text routine, -+ * but it's not our target function. -+ */ -+ if (CRASHDEBUG(2)) -+ fprintf(ofp, -+ "< ignoring %s() -- calls %s() and NOT %s()>\n", -+ spt->name, sp->name, -+ bt->call_target); -+ continue; -+ } -+ } -+ -+ switch (x86_64_print_stack_entry(bt, ofp, level, i,*up)) -+ { -+ case BACKTRACE_ENTRY_AND_EFRAME_DISPLAYED: -+ last_process_stack_eframe = rsp + 8; -+ if (x86_64_print_eframe_location(last_process_stack_eframe, level, ofp)) -+ level++; -+ rsp += SIZE(pt_regs); -+ i += SIZE(pt_regs)/sizeof(ulong); -+ case BACKTRACE_ENTRY_DISPLAYED: -+ level++; -+ if ((framesize = x86_64_get_framesize(bt, *up)) >= 0) { -+ rsp += framesize; -+ i += framesize/sizeof(ulong); -+ } -+ break; -+ case BACKTRACE_ENTRY_IGNORED: -+ break; -+ case BACKTRACE_COMPLETE: -+ done = TRUE; -+ break; -+ } -+ } -+ -+ if (!irq_eframe && !is_kernel_thread(bt->tc->task) && -+ (GET_STACKBASE(bt->tc->task) == bt->stackbase)) { -+ user_mode_eframe = bt->stacktop - SIZE(pt_regs); -+ if (last_process_stack_eframe < user_mode_eframe) -+ x86_64_exception_frame(EFRAME_PRINT, 0, bt->stackbuf + -+ (bt->stacktop - bt->stackbase) - SIZE(pt_regs), -+ bt, ofp); -+ } -+ -+ if (bt->flags & BT_TEXT_SYMBOLS) { -+ if (BT_REFERENCE_FOUND(bt)) { -+ print_task_header(fp, task_to_context(bt->task), 0); -+ BCOPY(bt_in, bt, sizeof(struct bt_info)); -+ bt->ref = NULL; -+ machdep->back_trace(bt); -+ fprintf(fp, "\n"); -+ } -+ } -+} -+ -+/* -+ * Use dwarf CFI encodings to correctly follow the call chain. -+ */ -+static void -+x86_64_dwarf_back_trace_cmd(struct bt_info *bt_in) -+{ -+ int i, level, done; -+ ulong rsp, offset, stacktop; -+ ulong *up; -+ long cs; -+ struct syment *sp; -+ FILE *ofp; -+ ulong estack, irqstack; -+ ulong irq_eframe; -+ struct bt_info bt_local, *bt; -+ struct machine_specific *ms; -+ ulong last_process_stack_eframe; -+ ulong user_mode_eframe; -+ -+ /* -+ * User may have made a run-time switch. -+ */ -+ if (!(kt->flags & DWARF_UNWIND)) { -+ machdep->back_trace = x86_64_low_budget_back_trace_cmd; -+ x86_64_low_budget_back_trace_cmd(bt_in); -+ return; -+ } -+ -+ bt = &bt_local; -+ BCOPY(bt_in, bt, sizeof(struct bt_info)); -+ -+ if (bt->flags & BT_FRAMESIZE_DEBUG) { -+ dwarf_debug(bt); -+ return; -+ } -+ -+ level = 0; -+ done = FALSE; -+ irq_eframe = 0; -+ last_process_stack_eframe = 0; -+ bt->call_target = NULL; -+ bt->bptr = 0; -+ rsp = bt->stkptr; -+ if (!rsp) { -+ error(INFO, "cannot determine starting stack pointer\n"); -+ return; -+ } -+ ms = machdep->machspec; -+ if (BT_REFERENCE_CHECK(bt)) -+ ofp = pc->nullfp; -+ else -+ ofp = fp; -+ -+ if (bt->flags & BT_TEXT_SYMBOLS) { -+ if (!(bt->flags & BT_TEXT_SYMBOLS_ALL)) -+ fprintf(ofp, "%sSTART: %s%s at %lx\n", -+ space(VADDR_PRLEN > 8 ? 14 : 6), -+ closest_symbol(bt->instptr), -+ STREQ(closest_symbol(bt->instptr), "thread_return") ? -+ " (schedule)" : "", -+ bt->instptr); -+ } else if (bt->flags & BT_START) { -+ x86_64_print_stack_entry(bt, ofp, level, -+ 0, bt->instptr); -+ bt->flags &= ~BT_START; -+ level++; -+ } -+ -+ -+ if ((estack = x86_64_in_exception_stack(bt))) { -+in_exception_stack: -+ bt->flags |= BT_EXCEPTION_STACK; -+ /* -+ * The stack buffer will have been loaded with the process -+ * stack, so switch to the indicated exception stack. -+ */ -+ bt->stackbase = estack; -+ bt->stacktop = estack + ms->stkinfo.esize; -+ bt->stackbuf = ms->irqstack; -+ -+ if (!readmem(bt->stackbase, KVADDR, bt->stackbuf, -+ bt->stacktop - bt->stackbase, -+ bt->hp && (bt->hp->esp == bt->stkptr) ? -+ "irqstack contents via hook" : "irqstack contents", -+ RETURN_ON_ERROR)) -+ error(FATAL, "read of exception stack at %lx failed\n", -+ bt->stackbase); -+ -+ /* -+ * If irq_eframe is set, we've jumped back here from the -+ * IRQ stack dump below. Do basically the same thing as if -+ * had come from the processor stack, but presume that we -+ * must have been in kernel mode, i.e., took an exception -+ * while operating on an IRQ stack. (untested) -+ */ -+ if (irq_eframe) { -+ bt->flags |= BT_EXCEPTION_FRAME; -+ i = (irq_eframe - bt->stackbase)/sizeof(ulong); -+ x86_64_print_stack_entry(bt, ofp, level, i, -+ bt->instptr); -+ bt->flags &= ~(ulonglong)BT_EXCEPTION_FRAME; -+ cs = x86_64_exception_frame(EFRAME_PRINT|EFRAME_CS, 0, -+ bt->stackbuf + (irq_eframe - bt->stackbase), -+ bt, ofp); -+ rsp += SIZE(pt_regs); /* guaranteed kernel mode */ -+ level++; -+ irq_eframe = 0; -+ } -+ -+ stacktop = bt->stacktop - SIZE(pt_regs); -+ -+ if (!done) { -+ level = dwarf_backtrace(bt, level, stacktop); -+ done = TRUE; -+ } -+ -+ cs = x86_64_exception_frame(EFRAME_PRINT|EFRAME_CS, 0, -+ bt->stackbuf + (bt->stacktop - bt->stackbase) - -+ SIZE(pt_regs), bt, ofp); -+ -+ if (!BT_REFERENCE_CHECK(bt)) -+ fprintf(fp, "--- ---\n"); -+ -+ /* -+ * stack = (unsigned long *) estack_end[-2]; -+ */ -+ up = (ulong *)(&bt->stackbuf[bt->stacktop - bt->stackbase]); -+ up -= 2; -+ rsp = bt->stkptr = *up; -+ up -= 3; -+ bt->instptr = *up; -+ if (cs & 3) -+ done = TRUE; /* user-mode exception */ -+ else -+ done = FALSE; /* kernel-mode exception */ -+ bt->frameptr = 0; -+ -+ /* -+ * Print the return values from the estack end. -+ */ -+ if (!done) { -+ bt->flags |= BT_START; -+ x86_64_print_stack_entry(bt, ofp, level, -+ 0, bt->instptr); -+ bt->flags &= ~BT_START; -+ level++; -+ } -+ } -+ -+ /* -+ * IRQ stack entry always comes in via the process stack, regardless -+ * whether it happened while running in user or kernel space. -+ */ -+ if (!done && (irqstack = x86_64_in_irqstack(bt))) { -+ bt->flags |= BT_IRQSTACK; -+ /* -+ * Until coded otherwise, the stackbase will be pointing to -+ * either the exception stack or, more likely, the process -+ * stack base. Switch it to the IRQ stack. -+ */ -+ bt->stackbase = irqstack; -+ bt->stacktop = irqstack + ms->stkinfo.isize; -+ bt->stackbuf = ms->irqstack; -+ -+ if (!readmem(bt->stackbase, KVADDR, -+ bt->stackbuf, bt->stacktop - bt->stackbase, -+ bt->hp && (bt->hp->esp == bt_in->stkptr) ? -+ "irqstack contents via hook" : "irqstack contents", -+ RETURN_ON_ERROR)) -+ error(FATAL, "read of IRQ stack at %lx failed\n", -+ bt->stackbase); -+ -+ stacktop = bt->stacktop - 64; /* from kernel code */ -+ -+ if (!done) { -+ level = dwarf_backtrace(bt, level, stacktop); -+ done = TRUE; -+ } -+ -+ if (!BT_REFERENCE_CHECK(bt)) -+ fprintf(fp, "--- ---\n"); -+ -+ /* -+ * stack = (unsigned long *) (irqstack_end[-1]); -+ * (where irqstack_end is 64 bytes below page end) -+ */ -+ up = (ulong *)(&bt->stackbuf[stacktop - bt->stackbase]); -+ up -= 1; -+ irq_eframe = rsp = bt->stkptr = (*up) - ms->irq_eframe_link; -+ up -= 1; -+ bt->instptr = *up; -+ /* -+ * No exception frame when coming from call_softirq. -+ */ -+ if ((sp = value_search(bt->instptr, &offset)) && -+ STREQ(sp->name, "call_softirq")) -+ irq_eframe = 0; -+ bt->frameptr = 0; -+ done = FALSE; -+ } else -+ irq_eframe = 0; -+ -+ if (!done && (estack = x86_64_in_exception_stack(bt))) -+ goto in_exception_stack; -+ -+ if (!done && (bt->flags & (BT_EXCEPTION_STACK|BT_IRQSTACK))) { -+ /* -+ * Verify that the rsp pointer taken from either the -+ * exception or IRQ stack points into the process stack. -+ */ -+ bt->stackbase = GET_STACKBASE(bt->tc->task); -+ bt->stacktop = GET_STACKTOP(bt->tc->task); -+ -+ if (!INSTACK(rsp, bt)) { -+ switch (bt->flags & (BT_EXCEPTION_STACK|BT_IRQSTACK)) -+ { -+ case (BT_EXCEPTION_STACK|BT_IRQSTACK): -+ error(FATAL, STACK_TRANSITION_ERRMSG_E_I_P, -+ bt_in->stkptr, bt->stkptr, rsp, -+ bt->stackbase); -+ -+ case BT_EXCEPTION_STACK: -+ error(FATAL, STACK_TRANSITION_ERRMSG_E_P, -+ bt_in->stkptr, rsp, bt->stackbase); -+ -+ case BT_IRQSTACK: -+ error(FATAL, STACK_TRANSITION_ERRMSG_I_P, -+ bt_in->stkptr, rsp, bt->stackbase); -+ } -+ } -+ -+ /* -+ * Now fill the local stack buffer from the process stack. -+ */ -+ if (!readmem(bt->stackbase, KVADDR, bt->stackbuf, -+ bt->stacktop - bt->stackbase, -+ "irqstack contents", RETURN_ON_ERROR)) -+ error(FATAL, "read of process stack at %lx failed\n", -+ bt->stackbase); -+ } -+ -+ /* -+ * Dump the IRQ exception frame from the process stack. -+ * If the CS register indicates a user exception frame, -+ * then set done to TRUE to avoid the process stack walk-through. -+ * Otherwise, bump up the rsp past the kernel-mode eframe. -+ */ -+ if (irq_eframe) { -+ bt->flags |= BT_EXCEPTION_FRAME; -+ level = dwarf_print_stack_entry(bt, level); -+ bt->flags &= ~(ulonglong)BT_EXCEPTION_FRAME; -+ cs = x86_64_exception_frame(EFRAME_PRINT|EFRAME_CS, 0, -+ bt->stackbuf + (irq_eframe - bt->stackbase), bt, ofp); -+ if (cs & 3) -+ done = TRUE; /* IRQ from user-mode */ -+ else { -+ if (x86_64_print_eframe_location(rsp, level, ofp)) -+ level++; -+ rsp += SIZE(pt_regs); -+ irq_eframe = 0; -+ } -+ level++; -+ } -+ -+ /* -+ * Walk the process stack. -+ */ -+ if (!done) { -+ level = dwarf_backtrace(bt, level, bt->stacktop); -+ done = TRUE; -+ } -+ -+ if (!irq_eframe && !is_kernel_thread(bt->tc->task) && -+ (GET_STACKBASE(bt->tc->task) == bt->stackbase)) { -+ user_mode_eframe = bt->stacktop - SIZE(pt_regs); -+ if (last_process_stack_eframe < user_mode_eframe) -+ x86_64_exception_frame(EFRAME_PRINT, 0, bt->stackbuf + -+ (bt->stacktop - bt->stackbase) - SIZE(pt_regs), -+ bt, ofp); -+ } -+ -+ if (bt->flags & BT_TEXT_SYMBOLS) { -+ if (BT_REFERENCE_FOUND(bt)) { -+ print_task_header(fp, task_to_context(bt->task), 0); -+ BCOPY(bt_in, bt, sizeof(struct bt_info)); -+ bt->ref = NULL; -+ machdep->back_trace(bt); -+ fprintf(fp, "\n"); -+ } -+ } -+} -+ -+/* -+ * Functions that won't be called indirectly. -+ * Add more to this as they are discovered. -+ */ -+static const char *direct_call_targets[] = { -+ "schedule", -+ "schedule_timeout", -+ NULL -+}; -+ -+static int -+is_direct_call_target(struct bt_info *bt) -+{ -+ int i; -+ -+ if (!bt->call_target || (bt->flags & BT_NO_CHECK_CALLER)) -+ return FALSE; -+ -+ if (strstr(bt->call_target, "schedule") && -+ is_task_active(bt->task)) -+ return FALSE; -+ -+ for (i = 0; direct_call_targets[i]; i++) { -+ if (STREQ(direct_call_targets[i], bt->call_target)) -+ return TRUE; -+ } -+ -+ return FALSE; -+} -+ -+static struct syment * -+x86_64_function_called_by(ulong rip) -+{ -+ struct syment *sp; -+ char buf[BUFSIZE], *p1; -+ ulong value, offset; -+ unsigned char byte; -+ -+ value = 0; -+ sp = NULL; -+ -+ if (!readmem(rip, KVADDR, &byte, sizeof(unsigned char), "call byte", -+ RETURN_ON_ERROR)) -+ return sp; -+ -+ if (byte != 0xe8) -+ return sp; -+ -+ sprintf(buf, "x/i 0x%lx", rip); -+ -+ open_tmpfile2(); -+ if (gdb_pass_through(buf, pc->tmpfile2, GNU_RETURN_ON_ERROR)) { -+ rewind(pc->tmpfile2); -+ while (fgets(buf, BUFSIZE, pc->tmpfile2)) { -+ if ((p1 = strstr(buf, "callq")) && -+ whitespace(*(p1-1))) { -+ if (extract_hex(p1, &value, NULLCHAR, TRUE)) -+ break; -+ } -+ } -+ } -+ close_tmpfile2(); -+ -+ if (value) -+ sp = value_search(value, &offset); -+ -+ /* -+ * Functions that jmp to schedule() or schedule_timeout(). -+ */ -+ if (sp) { -+ if ((STREQ(sp->name, "schedule_timeout_interruptible") || -+ STREQ(sp->name, "schedule_timeout_uninterruptible"))) -+ sp = symbol_search("schedule_timeout"); -+ -+ if (STREQ(sp->name, "__cond_resched")) -+ sp = symbol_search("schedule"); -+ } -+ -+ return sp; -+} -+ -+/* -+ * Unroll the kernel stack using a minimal amount of gdb services. -+ */ -+static void -+x86_64_back_trace(struct gnu_request *req, struct bt_info *bt) -+{ -+ error(FATAL, "x86_64_back_trace: unused\n"); -+} -+ -+ -+/* -+ * Print exception frame information for x86_64. -+ * -+ * Pid: 0, comm: swapper Not tainted 2.6.5-1.360phro.rootsmp -+ * RIP: 0010:[] {default_idle+36} -+ * RSP: 0018:ffffffff8048bfd8 EFLAGS: 00000246 -+ * RAX: 0000000000000000 RBX: ffffffff8010f510 RCX: 0000000000000018 -+ * RDX: 0000010001e37280 RSI: ffffffff803ac0a0 RDI: 000001007f43c400 -+ * RBP: 0000000000000000 R08: ffffffff8048a000 R09: 0000000000000000 -+ * R10: ffffffff80482188 R11: 0000000000000001 R12: 0000000000000000 -+ * R13: 0000000000000000 R14: 0000000000000000 R15: 0000000000000000 -+ * FS: 0000002a96e14fc0(0000) GS:ffffffff80481d80(0000) GS:0000000055578aa0 -+ * CS: 0010 DS: 0018 ES: 0018 CR0: 000000008005003b -+ * CR2: 0000002a9556b000 CR3: 0000000000101000 CR4: 00000000000006e0 -+ * -+ */ -+ -+static long -+x86_64_exception_frame(ulong flags, ulong kvaddr, char *local, -+ struct bt_info *bt, FILE *ofp) -+{ -+ long rip, rsp, cs, ss, rflags, orig_rax, rbp; -+ long rax, rbx, rcx, rdx, rsi, rdi; -+ long r8, r9, r10, r11, r12, r13, r14, r15; -+ struct machine_specific *ms; -+ struct syment *sp; -+ ulong offset; -+ char *pt_regs_buf; -+ long verified; -+ int err; -+ -+ ms = machdep->machspec; -+ -+ if (!(machdep->flags & PT_REGS_INIT)) { -+ err = 0; -+ err |= ((ms->pto.r15 = MEMBER_OFFSET("pt_regs", "r15")) == -+ INVALID_OFFSET); -+ err |= ((ms->pto.r14 = MEMBER_OFFSET("pt_regs", "r14")) == -+ INVALID_OFFSET); -+ err |= ((ms->pto.r13 = MEMBER_OFFSET("pt_regs", "r13")) == -+ INVALID_OFFSET); -+ err |= ((ms->pto.r12 = MEMBER_OFFSET("pt_regs", "r12")) == -+ INVALID_OFFSET); -+ err |= ((ms->pto.r11 = MEMBER_OFFSET("pt_regs", "r11")) == -+ INVALID_OFFSET); -+ err |= ((ms->pto.r10 = MEMBER_OFFSET("pt_regs", "r10")) == -+ INVALID_OFFSET); -+ err |= ((ms->pto.r9 = MEMBER_OFFSET("pt_regs", "r9")) == -+ INVALID_OFFSET); -+ err |= ((ms->pto.r8 = MEMBER_OFFSET("pt_regs", "r8")) == -+ INVALID_OFFSET); -+ err |= ((ms->pto.rax = MEMBER_OFFSET("pt_regs", "rax")) == -+ INVALID_OFFSET); -+ err |= ((ms->pto.rbx = MEMBER_OFFSET("pt_regs", "rbx")) == -+ INVALID_OFFSET); -+ err |= ((ms->pto.rcx = MEMBER_OFFSET("pt_regs", "rcx")) == -+ INVALID_OFFSET); -+ err |= ((ms->pto.rdx = MEMBER_OFFSET("pt_regs", "rdx")) == -+ INVALID_OFFSET); -+ err |= ((ms->pto.rsi = MEMBER_OFFSET("pt_regs", "rsi")) == -+ INVALID_OFFSET); -+ err |= ((ms->pto.rdi = MEMBER_OFFSET("pt_regs", "rdi")) == -+ INVALID_OFFSET); -+ err |= ((ms->pto.rip = MEMBER_OFFSET("pt_regs", "rip")) == -+ INVALID_OFFSET); -+ err |= ((ms->pto.rsp = MEMBER_OFFSET("pt_regs", "rsp")) == -+ INVALID_OFFSET); -+ err |= ((ms->pto.cs = MEMBER_OFFSET("pt_regs", "cs")) == -+ INVALID_OFFSET); -+ err |= ((ms->pto.ss = MEMBER_OFFSET("pt_regs", "ss")) == -+ INVALID_OFFSET); -+ err |= ((ms->pto.eflags = MEMBER_OFFSET("pt_regs", "eflags")) == -+ INVALID_OFFSET); -+ err |= ((ms->pto.orig_rax = -+ MEMBER_OFFSET("pt_regs", "orig_rax")) == -+ INVALID_OFFSET); -+ err |= ((ms->pto.rbp = MEMBER_OFFSET("pt_regs", "rbp")) == -+ INVALID_OFFSET); -+ -+ if (err) -+ error(WARNING, "pt_regs structure has changed\n"); -+ -+ machdep->flags |= PT_REGS_INIT; -+ } -+ -+ if (kvaddr) { -+ pt_regs_buf = GETBUF(SIZE(pt_regs)); -+ readmem(kvaddr, KVADDR, pt_regs_buf, -+ SIZE(pt_regs), "pt_regs", FAULT_ON_ERROR); -+ } else -+ pt_regs_buf = local; -+ -+ rip = ULONG(pt_regs_buf + ms->pto.rip); -+ rsp = ULONG(pt_regs_buf + ms->pto.rsp); -+ cs = ULONG(pt_regs_buf + ms->pto.cs); -+ ss = ULONG(pt_regs_buf + ms->pto.ss); -+ rflags = ULONG(pt_regs_buf + ms->pto.eflags); -+ orig_rax = ULONG(pt_regs_buf + ms->pto.orig_rax); -+ rbp = ULONG(pt_regs_buf + ms->pto.rbp); -+ rax = ULONG(pt_regs_buf + ms->pto.rax); -+ rbx = ULONG(pt_regs_buf + ms->pto.rbx); -+ rcx = ULONG(pt_regs_buf + ms->pto.rcx); -+ rdx = ULONG(pt_regs_buf + ms->pto.rdx); -+ rsi = ULONG(pt_regs_buf + ms->pto.rsi); -+ rdi = ULONG(pt_regs_buf + ms->pto.rdi); -+ r8 = ULONG(pt_regs_buf + ms->pto.r8); -+ r9 = ULONG(pt_regs_buf + ms->pto.r9); -+ r10 = ULONG(pt_regs_buf + ms->pto.r10); -+ r11 = ULONG(pt_regs_buf + ms->pto.r11); -+ r12 = ULONG(pt_regs_buf + ms->pto.r12); -+ r13 = ULONG(pt_regs_buf + ms->pto.r13); -+ r14 = ULONG(pt_regs_buf + ms->pto.r14); -+ r15 = ULONG(pt_regs_buf + ms->pto.r15); -+ -+ verified = x86_64_eframe_verify(bt, -+ kvaddr ? kvaddr : (local - bt->stackbuf) + bt->stackbase, -+ cs, ss, rip, rsp, rflags); -+ -+ /* -+ * If it's print-if-verified request, don't print bogus eframes. -+ */ -+ if (!verified && ((flags & (EFRAME_VERIFY|EFRAME_PRINT)) == -+ (EFRAME_VERIFY|EFRAME_PRINT))) -+ flags &= ~EFRAME_PRINT; -+ -+ if (CRASHDEBUG(2)) -+ fprintf(ofp, "< exception frame at: %lx >\n", kvaddr ? kvaddr : -+ (local - bt->stackbuf) + bt->stackbase); -+ -+ if (flags & EFRAME_PRINT) { -+ if (flags & EFRAME_SEARCH) { -+ fprintf(ofp, "\n %s-MODE EXCEPTION FRAME AT: %lx\n", -+ cs & 3 ? "USER" : "KERNEL", -+ kvaddr ? kvaddr : -+ (local - bt->stackbuf) + bt->stackbase); -+ } else if (!(cs & 3)) { -+ fprintf(ofp, " [exception RIP: "); -+ if ((sp = value_search(rip, &offset))) { -+ fprintf(ofp, "%s", sp->name); -+ if (offset) -+ fprintf(ofp, (output_radix == 16) ? -+ "+0x%lx" : "+%ld", offset); -+ } else -+ fprintf(ofp, "unknown or invalid address"); -+ fprintf(ofp, "]\n"); -+ } -+ fprintf(ofp, " RIP: %016lx RSP: %016lx RFLAGS: %08lx\n", -+ rip, rsp, rflags); -+ fprintf(ofp, " RAX: %016lx RBX: %016lx RCX: %016lx\n", -+ rax, rbx, rcx); -+ fprintf(ofp, " RDX: %016lx RSI: %016lx RDI: %016lx\n", -+ rdx, rsi, rdi); -+ fprintf(ofp, " RBP: %016lx R8: %016lx R9: %016lx\n", -+ rbp, r8, r9); -+ fprintf(ofp, " R10: %016lx R11: %016lx R12: %016lx\n", -+ r10, r11, r12); -+ fprintf(ofp, " R13: %016lx R14: %016lx R15: %016lx\n", -+ r13, r14, r15); -+ fprintf(ofp, " ORIG_RAX: %016lx CS: %04lx SS: %04lx\n", -+ orig_rax, cs, ss); -+ -+ if (!verified && CRASHDEBUG((pc->flags & RUNTIME) ? 0 : 1)) -+ error(WARNING, "possibly bogus exception frame\n"); -+ } -+ -+ if ((flags & EFRAME_PRINT) && BT_REFERENCE_CHECK(bt)) { -+ x86_64_do_bt_reference_check(bt, rip, NULL); -+ x86_64_do_bt_reference_check(bt, rsp, NULL); -+ x86_64_do_bt_reference_check(bt, cs, NULL); -+ x86_64_do_bt_reference_check(bt, ss, NULL); -+ x86_64_do_bt_reference_check(bt, rflags, NULL); -+ x86_64_do_bt_reference_check(bt, orig_rax, NULL); -+ x86_64_do_bt_reference_check(bt, rbp, NULL); -+ x86_64_do_bt_reference_check(bt, rax, NULL); -+ x86_64_do_bt_reference_check(bt, rbx, NULL); -+ x86_64_do_bt_reference_check(bt, rcx, NULL); -+ x86_64_do_bt_reference_check(bt, rdx, NULL); -+ x86_64_do_bt_reference_check(bt, rsi, NULL); -+ x86_64_do_bt_reference_check(bt, rdi, NULL); -+ x86_64_do_bt_reference_check(bt, r8, NULL); -+ x86_64_do_bt_reference_check(bt, r9, NULL); -+ x86_64_do_bt_reference_check(bt, r10, NULL); -+ x86_64_do_bt_reference_check(bt, r11, NULL); -+ x86_64_do_bt_reference_check(bt, r12, NULL); -+ x86_64_do_bt_reference_check(bt, r13, NULL); -+ x86_64_do_bt_reference_check(bt, r14, NULL); -+ x86_64_do_bt_reference_check(bt, r15, NULL); -+ } -+ -+ /* Remember the rip and rsp for unwinding the process stack */ -+ if (kt->flags & DWARF_UNWIND){ -+ bt->instptr = rip; -+ bt->stkptr = rsp; -+ bt->bptr = rbp; -+ } -+ -+ if (kvaddr) -+ FREEBUF(pt_regs_buf); -+ -+ if (flags & EFRAME_CS) -+ return cs; -+ else if (flags & EFRAME_VERIFY) -+ return verified; -+ -+ return 0; -+} -+ -+static int -+x86_64_print_eframe_location(ulong eframe, int level, FILE *ofp) -+{ -+ return FALSE; -+ -+#ifdef NOTDEF -+ ulong rip; -+ char *pt_regs_buf; -+ struct machine_specific *ms; -+ struct syment *sp; -+ -+ ms = machdep->machspec; -+ -+ pt_regs_buf = GETBUF(SIZE(pt_regs)); -+ if (!readmem(eframe, KVADDR, pt_regs_buf, SIZE(pt_regs), -+ "pt_regs", RETURN_ON_ERROR|QUIET)) { -+ FREEBUF(pt_regs_buf); -+ return FALSE; -+ } -+ -+ rip = ULONG(pt_regs_buf + ms->pto.rip); -+ FREEBUF(pt_regs_buf); -+ -+ if (!(sp = value_search(rip, NULL))) -+ return FALSE; -+ -+ fprintf(ofp, "%s#%d [%8lx] %s at %lx\n", level < 10 ? " " : "", level+1, -+ eframe, sp->name, rip); -+ -+ return TRUE; -+#endif -+} -+ -+/* -+ * Check that the verifiable registers contain reasonable data. -+ */ -+#define RAZ_MASK 0xffffffffffc08028 /* return-as-zero bits */ -+ -+static int -+x86_64_eframe_verify(struct bt_info *bt, long kvaddr, long cs, long ss, -+ long rip, long rsp, long rflags) -+{ -+ if ((rflags & RAZ_MASK) || !(rflags & 0x2)) -+ return FALSE; -+ -+ if ((cs == 0x10) && (ss == 0x18)) { -+ if (is_kernel_text(rip) && IS_KVADDR(rsp)) -+ return TRUE; -+ -+ if (x86_64_is_module_addr(rip) && -+ IS_KVADDR(rsp) && -+ (rsp == (kvaddr + SIZE(pt_regs)))) -+ return TRUE; -+ } -+ -+ if ((cs == 0x10) && kvaddr) { -+ if (is_kernel_text(rip) && IS_KVADDR(rsp) && -+ (rsp == (kvaddr + SIZE(pt_regs) + 8))) -+ return TRUE; -+ } -+ -+ if ((cs == 0x10) && kvaddr) { -+ if (is_kernel_text(rip) && IS_KVADDR(rsp) && -+ (rsp == (kvaddr + SIZE(pt_regs)))) -+ return TRUE; -+ } -+ -+ if ((cs == 0x10) && kvaddr) { -+ if (is_kernel_text(rip) && IS_KVADDR(rsp) && -+ x86_64_in_exception_stack(bt)) -+ return TRUE; -+ } -+ -+ if ((cs == 0x33) && (ss == 0x2b)) { -+ if (IS_UVADDR(rip, bt->tc) && IS_UVADDR(rsp, bt->tc)) -+ return TRUE; -+ } -+ -+ if (XEN() && ((cs == 0x33) || (cs == 0xe033)) && -+ ((ss == 0x2b) || (ss == 0xe02b))) { -+ if (IS_UVADDR(rip, bt->tc) && IS_UVADDR(rsp, bt->tc)) -+ return TRUE; -+ } -+ -+ if (XEN() && ((cs == 0x10000e030) || (cs == 0xe030)) && -+ (ss == 0xe02b)) { -+ if (is_kernel_text(rip) && IS_KVADDR(rsp)) -+ return TRUE; -+ } -+ -+ /* -+ * 32-bit segments -+ */ -+ if ((cs == 0x23) && (ss == 0x2b)) { -+ if (IS_UVADDR(rip, bt->tc) && IS_UVADDR(rsp, bt->tc)) -+ return TRUE; -+ } -+ -+ return FALSE; -+} -+ -+/* -+ * Get a stack frame combination of pc and ra from the most relevent spot. -+ */ -+static void -+x86_64_get_stack_frame(struct bt_info *bt, ulong *pcp, ulong *spp) -+{ -+ if (bt->flags & BT_DUMPFILE_SEARCH) -+ return x86_64_get_dumpfile_stack_frame(bt, pcp, spp); -+ -+ if (pcp) -+ *pcp = x86_64_get_pc(bt); -+ if (spp) -+ *spp = x86_64_get_sp(bt); -+} -+ -+/* -+ * Get the starting point for the active cpus in a diskdump/netdump. -+ */ -+static void -+x86_64_get_dumpfile_stack_frame(struct bt_info *bt_in, ulong *rip, ulong *rsp) -+{ -+ int panic_task; -+ int i, estack, panic, stage; -+ char *sym; -+ struct syment *sp; -+ ulong *up; -+ struct bt_info bt_local, *bt; -+ struct machine_specific *ms; -+ char *user_regs; -+ ulong ur_rip, ur_rsp; -+ ulong halt_rip, halt_rsp; -+ ulong crash_kexec_rip, crash_kexec_rsp; -+ -+ bt = &bt_local; -+ BCOPY(bt_in, bt, sizeof(struct bt_info)); -+ ms = machdep->machspec; -+ ur_rip = ur_rsp = 0; -+ halt_rip = halt_rsp = 0; -+ crash_kexec_rip = crash_kexec_rsp = 0; -+ stage = 0; -+ estack = -1; -+ -+ panic_task = tt->panic_task == bt->task ? TRUE : FALSE; -+ -+ if (panic_task && bt->machdep) { -+ user_regs = bt->machdep; -+ -+ if (x86_64_eframe_verify(bt, -+ 0, -+ ULONG(user_regs + OFFSET(user_regs_struct_cs)), -+ ULONG(user_regs + OFFSET(user_regs_struct_ss)), -+ ULONG(user_regs + OFFSET(user_regs_struct_rip)), -+ ULONG(user_regs + OFFSET(user_regs_struct_rsp)), -+ ULONG(user_regs + OFFSET(user_regs_struct_eflags)))) { -+ bt->stkptr = ULONG(user_regs + -+ OFFSET(user_regs_struct_rsp)); -+ if (x86_64_in_irqstack(bt)) { -+ ur_rip = ULONG(user_regs + -+ OFFSET(user_regs_struct_rip)); -+ ur_rsp = ULONG(user_regs + -+ OFFSET(user_regs_struct_rsp)); -+ goto skip_stage; -+ } -+ } -+ } -+ -+ panic = FALSE; -+ -+ /* -+ * Check the process stack first. -+ */ -+next_stack: -+ for (i = 0, up = (ulong *)bt->stackbuf; -+ i < (bt->stacktop - bt->stackbase)/sizeof(ulong); i++, up++) { -+ sym = closest_symbol(*up); -+ if (XEN_CORE_DUMPFILE()) { -+ if (STREQ(sym, "xen_machine_kexec")) { -+ *rip = *up; -+ *rsp = bt->stackbase + ((char *)(up) - bt->stackbuf); -+ return; -+ } -+ } else if (STREQ(sym, "netconsole_netdump") || -+ STREQ(sym, "netpoll_start_netdump") || -+ STREQ(sym, "start_disk_dump") || -+ STREQ(sym, "disk_dump") || -+ STREQ(sym, "crash_kexec") || -+ STREQ(sym, "machine_kexec") || -+ STREQ(sym, "try_crashdump")) { -+ /* -+ * Use second instance of crash_kexec if it exists. -+ */ -+ if (!(bt->flags & BT_TEXT_SYMBOLS) && -+ STREQ(sym, "crash_kexec") && !crash_kexec_rip) { -+ crash_kexec_rip = *up; -+ crash_kexec_rsp = bt->stackbase + ((char *)(up) - bt->stackbuf); -+ continue; -+ } -+ *rip = *up; -+ *rsp = bt->stackbase + ((char *)(up) - bt->stackbuf); -+ return; -+ } -+ -+ if ((estack >= 0) && -+ (STREQ(sym, "nmi_watchdog_tick") || -+ STREQ(sym, "default_do_nmi"))) { -+ sp = x86_64_function_called_by((*up)-5); -+ if (!sp || !STREQ(sp->name, "die_nmi")) -+ continue; -+ *rip = *up; -+ *rsp = bt->stackbase + ((char *)(up) - bt->stackbuf); -+ bt_in->flags |= BT_START; -+ *rip = symbol_value("die_nmi"); -+ *rsp = (*rsp) - (7*sizeof(ulong)); -+ return; -+ } -+ -+ if (STREQ(sym, "panic")) { -+ *rip = *up; -+ *rsp = bt->stackbase + ((char *)(up) - bt->stackbuf); -+ panic = TRUE; -+ continue; /* keep looking for die */ -+ } -+ -+ if (STREQ(sym, "die")) { -+ *rip = *up; -+ *rsp = bt->stackbase + ((char *)(up) - bt->stackbuf); -+ for (i++, up++; i < LONGS_PER_STACK; i++, up++) { -+ sym = closest_symbol(*up); -+ if (STREQ(sym, "sysrq_handle_crash")) -+ goto next_sysrq; -+ } -+ return; -+ } -+ -+ if (STREQ(sym, "sysrq_handle_crash")) { -+next_sysrq: -+ *rip = *up; -+ *rsp = bt->stackbase + ((char *)(up) - bt->stackbuf); -+ pc->flags |= SYSRQ; -+ for (i++, up++; i < LONGS_PER_STACK; i++, up++) { -+ sym = closest_symbol(*up); -+ if (STREQ(sym, "sysrq_handle_crash")) -+ goto next_sysrq; -+ } -+ return; -+ } -+ -+ if (!panic_task && (stage > 0) && -+ STREQ(sym, "smp_call_function_interrupt")) { -+ *rip = *up; -+ *rsp = bt->stackbase + ((char *)(up) - bt->stackbuf); -+ return; -+ } -+ -+ if (!panic_task && STREQ(sym, "crash_nmi_callback")) { -+ *rip = *up; -+ *rsp = bt->stackbase + ((char *)(up) - bt->stackbuf); -+ return; -+ } -+ -+ if (XEN_CORE_DUMPFILE() && !panic_task && (bt->tc->pid == 0) && -+ (stage == 0) && STREQ(sym, "safe_halt")) { -+ halt_rip = *up; -+ halt_rsp = bt->stackbase + ((char *)(up) - bt->stackbuf); -+ } -+ -+ if (XEN_CORE_DUMPFILE() && !panic_task && (bt->tc->pid == 0) && -+ !halt_rip && (stage == 0) && STREQ(sym, "xen_idle")) { -+ halt_rip = *up; -+ halt_rsp = bt->stackbase + ((char *)(up) - bt->stackbuf); -+ } -+ -+ if (!XEN_CORE_DUMPFILE() && !panic_task && (bt->tc->pid == 0) && -+ !halt_rip && (stage == 0) && STREQ(sym, "cpu_idle")) { -+ halt_rip = *up; -+ halt_rsp = bt->stackbase + ((char *)(up) - bt->stackbuf); -+ } -+ } -+ -+ if (panic) -+ return; -+ -+ if (crash_kexec_rip) { -+ *rip = crash_kexec_rip; -+ *rsp = crash_kexec_rsp; -+ return; -+ } -+ -+skip_stage: -+ switch (stage) -+ { -+ /* -+ * Now check the processor's interrupt stack. -+ */ -+ case 0: -+ bt->stackbase = ms->stkinfo.ibase[bt->tc->processor]; -+ bt->stacktop = ms->stkinfo.ibase[bt->tc->processor] + -+ ms->stkinfo.isize; -+ console("x86_64_get_dumpfile_stack_frame: searching IRQ stack at %lx\n", -+ bt->stackbase); -+ bt->stackbuf = ms->irqstack; -+ alter_stackbuf(bt); -+ stage = 1; -+ goto next_stack; -+ -+ /* -+ * Check the exception stacks. -+ */ -+ case 1: -+ if (++estack == 7) -+ break; -+ bt->stackbase = ms->stkinfo.ebase[bt->tc->processor][estack]; -+ bt->stacktop = ms->stkinfo.ebase[bt->tc->processor][estack] + -+ ms->stkinfo.esize; -+ console("x86_64_get_dumpfile_stack_frame: searching %s estack at %lx\n", -+ x86_64_exception_stacks[estack], bt->stackbase); -+ if (!(bt->stackbase)) -+ goto skip_stage; -+ bt->stackbuf = ms->irqstack; -+ alter_stackbuf(bt); -+ goto next_stack; -+ -+ } -+ -+ /* -+ * We didn't find what we were looking for, so just use what was -+ * passed in from the ELF header. -+ */ -+ if (ur_rip && ur_rsp) { -+ *rip = ur_rip; -+ *rsp = ur_rsp; -+ return; -+ } -+ -+ if (halt_rip && halt_rsp) { -+ *rip = halt_rip; -+ *rsp = halt_rsp; -+ return; -+ } -+ -+ if (CRASHDEBUG(1)) -+ error(INFO, -+ "x86_64_get_dumpfile_stack_frame: cannot find anything useful (task: %lx)\n", -+ bt->task); -+ -+ bt->flags &= ~(ulonglong)BT_DUMPFILE_SEARCH; -+ -+ machdep->get_stack_frame(bt, rip, rsp); -+} -+ -+/* -+ * Get the saved RSP from the task's thread_struct. -+ */ -+static ulong -+x86_64_get_sp(struct bt_info *bt) -+{ -+ ulong offset, rsp; -+ -+ if (tt->flags & THREAD_INFO) { -+ readmem(bt->task + OFFSET(task_struct_thread) + -+ OFFSET(thread_struct_rsp), KVADDR, -+ &rsp, sizeof(void *), -+ "thread_struct rsp", FAULT_ON_ERROR); -+ return rsp; -+ } -+ -+ offset = OFFSET(task_struct_thread) + OFFSET(thread_struct_rsp); -+ -+ return GET_STACK_ULONG(offset); -+} -+ -+/* -+ * Get the saved PC from the task's thread_struct if it exists; -+ * otherwise just use the "thread_return" label value. -+ */ -+static ulong -+x86_64_get_pc(struct bt_info *bt) -+{ -+ ulong offset, rip; -+ -+ if (INVALID_MEMBER(thread_struct_rip)) -+ return symbol_value("thread_return"); -+ -+ if (tt->flags & THREAD_INFO) { -+ readmem(bt->task + OFFSET(task_struct_thread) + -+ OFFSET(thread_struct_rip), KVADDR, -+ &rip, sizeof(void *), -+ "thread_struct rip", FAULT_ON_ERROR); -+ return rip; -+ } -+ -+ offset = OFFSET(task_struct_thread) + OFFSET(thread_struct_rip); -+ -+ return GET_STACK_ULONG(offset); -+} -+ -+ -+/* -+ * Do the work for x86_64_get_sp() and x86_64_get_pc(). -+ */ -+static void -+get_x86_64_frame(struct bt_info *bt, ulong *getpc, ulong *getsp) -+{ -+ error(FATAL, "get_x86_64_frame: TBD\n"); -+} -+ -+/* -+ * Do the work for cmd_irq(). -+ */ -+static void -+x86_64_dump_irq(int irq) -+{ -+ if (symbol_exists("irq_desc")) { -+ machdep->dump_irq = generic_dump_irq; -+ return(generic_dump_irq(irq)); -+ } -+ -+ error(FATAL, "x86_64_dump_irq: irq_desc[] does not exist?\n"); - } - --static void --x86_64_display_full_frame(struct bt_info *bt, ulong rsp, FILE *ofp) -+/* -+ * Do the work for irq -d -+ */ -+void -+x86_64_display_idt_table(void) - { -- int i, u_idx; -- ulong *up; -- ulong words, addr; -+ int i; -+ char *idt_table_buf; -+ char buf[BUFSIZE]; -+ ulong *ip; - -- words = (rsp - bt->frameptr) / sizeof(ulong) + 1; -+ idt_table_buf = GETBUF(SIZE(gate_struct) * 256); -+ readmem(symbol_value("idt_table"), KVADDR, idt_table_buf, -+ SIZE(gate_struct) * 256, "idt_table", FAULT_ON_ERROR); -+ ip = (ulong *)idt_table_buf; - -- addr = bt->frameptr; -- u_idx = (bt->frameptr - bt->stackbase)/sizeof(ulong); -- for (i = 0; i < words; i++, u_idx++) { -- if (!(i & 1)) -- fprintf(ofp, "%s %lx: ", i ? "\n" : "", addr); -- -- up = (ulong *)(&bt->stackbuf[u_idx*sizeof(ulong)]); -- fprintf(ofp, "%016lx ", *up); -- addr += sizeof(ulong); -+ for (i = 0; i < 256; i++, ip += 2) { -+ if (i < 10) -+ fprintf(fp, " "); -+ else if (i < 100) -+ fprintf(fp, " "); -+ fprintf(fp, "[%d] %s\n", -+ i, x86_64_extract_idt_function(ip, buf, NULL)); - } -- fprintf(ofp, "\n"); -+ -+ FREEBUF(idt_table_buf); - } - - /* -- * Check a frame for a requested reference. -+ * Extract the function name out of the IDT entry. - */ --static void --x86_64_do_bt_reference_check(struct bt_info *bt, ulong text, char *name) -+static char * -+x86_64_extract_idt_function(ulong *ip, char *buf, ulong *retaddr) - { -- struct syment *sp; -- ulong offset; -+ ulong i1, i2, addr; -+ char locbuf[BUFSIZE]; -+ physaddr_t phys; - -- if (!name) -- sp = value_search(text, &offset); -- else if (!text) -- sp = symbol_search(name); -+ if (buf) -+ BZERO(buf, BUFSIZE); - -- switch (bt->ref->cmdflags & (BT_REF_SYMBOL|BT_REF_HEXVAL)) -- { -- case BT_REF_SYMBOL: -- if (name) { -- if (STREQ(name, bt->ref->str)) -- bt->ref->cmdflags |= BT_REF_FOUND; -- } else { -- if (sp && !offset && STREQ(sp->name, bt->ref->str)) -- bt->ref->cmdflags |= BT_REF_FOUND; -+ i1 = *ip; -+ i2 = *(ip+1); -+ -+ i2 <<= 32; -+ addr = i2 & 0xffffffff00000000; -+ addr |= (i1 & 0xffff); -+ i1 >>= 32; -+ addr |= (i1 & 0xffff0000); -+ -+ if (retaddr) -+ *retaddr = addr; -+ -+ if (!buf) -+ return NULL; -+ -+ value_to_symstr(addr, locbuf, 0); -+ if (strlen(locbuf)) -+ sprintf(buf, locbuf); -+ else { -+ sprintf(buf, "%016lx", addr); -+ if (kvtop(NULL, addr, &phys, 0)) { -+ addr = machdep->kvbase + (ulong)phys; -+ if (value_to_symstr(addr, locbuf, 0)) { -+ strcat(buf, " <"); -+ strcat(buf, locbuf); -+ strcat(buf, ">"); -+ } - } -- break; -+ } - -- case BT_REF_HEXVAL: -- if (text) { -- if (bt->ref->hexval == text) -- bt->ref->cmdflags |= BT_REF_FOUND; -- } else if (sp && (bt->ref->hexval == sp->value)) -- bt->ref->cmdflags |= BT_REF_FOUND; -- else if (!name && !text && (bt->ref->hexval == 0)) -- bt->ref->cmdflags |= BT_REF_FOUND; -- break; -- } -+ return buf; - } - - /* -- * print one entry of a stack trace -+ * Filter disassembly output if the output radix is not gdb's default 10 - */ --#define BACKTRACE_COMPLETE (1) --#define BACKTRACE_ENTRY_IGNORED (2) --#define BACKTRACE_ENTRY_DISPLAYED (3) --#define BACKTRACE_ENTRY_AND_EFRAME_DISPLAYED (4) -- --static int --x86_64_print_stack_entry(struct bt_info *bt, FILE *ofp, int level, -- int stkindex, ulong text) -+static int -+x86_64_dis_filter(ulong vaddr, char *inbuf) - { -- ulong rsp, offset; -- struct syment *sp; -- char *name; -- int result; -- long eframe_check; -- char buf[BUFSIZE]; -+ char buf1[BUFSIZE]; -+ char buf2[BUFSIZE]; -+ char *colon, *p1; -+ int argc; -+ char *argv[MAXARGS]; -+ ulong value; - -- eframe_check = -1; -- offset = 0; -- sp = value_search(text, &offset); -- if (!sp) -- return BACKTRACE_ENTRY_IGNORED; -+ if (!inbuf) -+ return TRUE; -+/* -+ * For some reason gdb can go off into the weeds translating text addresses, -+ * (on alpha -- not necessarily seen on x86_64) so this routine both fixes the -+ * references as well as imposing the current output radix on the translations. -+ */ -+ console("IN: %s", inbuf); - -- name = sp->name; -+ colon = strstr(inbuf, ":"); - -- if (bt->flags & BT_TEXT_SYMBOLS) { -- if (bt->flags & BT_EXCEPTION_FRAME) -- rsp = bt->stkptr; -- else -- rsp = bt->stackbase + (stkindex * sizeof(long)); -- fprintf(ofp, " [%s] %s at %lx\n", -- mkstring(buf, VADDR_PRLEN, RJUST|LONG_HEX, MKSTR(rsp)), -- name, text); -- if (BT_REFERENCE_CHECK(bt)) -- x86_64_do_bt_reference_check(bt, text, name); -- return BACKTRACE_ENTRY_DISPLAYED; -+ if (colon) { -+ sprintf(buf1, "0x%lx <%s>", vaddr, -+ value_to_symstr(vaddr, buf2, pc->output_radix)); -+ sprintf(buf2, "%s%s", buf1, colon); -+ strcpy(inbuf, buf2); - } - -- if (!offset && !(bt->flags & BT_EXCEPTION_FRAME) && -- !(bt->flags & BT_START)) { -- if (STREQ(name, "child_rip")) { -- if (symbol_exists("kernel_thread")) -- name = "kernel_thread"; -- else if (symbol_exists("arch_kernel_thread")) -- name = "arch_kernel_thread"; -- } -- else if (!(bt->flags & BT_SCHEDULE)) { -- if (STREQ(name, "error_exit")) -- eframe_check = 8; -- else { -- if (CRASHDEBUG(2)) -- fprintf(ofp, -- "< ignoring text symbol with no offset: %s() >\n", -- sp->name); -- return BACKTRACE_ENTRY_IGNORED; -- } -- } -- } -+ strcpy(buf1, inbuf); -+ argc = parse_line(buf1, argv); - -- if (bt->flags & BT_SCHEDULE) -- name = "schedule"; -+ if ((FIRSTCHAR(argv[argc-1]) == '<') && -+ (LASTCHAR(argv[argc-1]) == '>')) { -+ p1 = rindex(inbuf, '<'); -+ while ((p1 > inbuf) && !STRNEQ(p1, " 0x")) -+ p1--; - -- if (STREQ(name, "child_rip")) { -- if (symbol_exists("kernel_thread")) -- name = "kernel_thread"; -- else if (symbol_exists("arch_kernel_thread")) -- name = "arch_kernel_thread"; -- result = BACKTRACE_COMPLETE; -- } else if (STREQ(name, "cpu_idle")) -- result = BACKTRACE_COMPLETE; -- else -- result = BACKTRACE_ENTRY_DISPLAYED; -+ if (!STRNEQ(p1, " 0x")) -+ return FALSE; -+ p1++; - -- if (bt->flags & BT_EXCEPTION_FRAME) -- rsp = bt->stkptr; -- else if (bt->flags & BT_START) -- rsp = bt->stkptr; -- else -- rsp = bt->stackbase + (stkindex * sizeof(long)); -+ if (!extract_hex(p1, &value, NULLCHAR, TRUE)) -+ return FALSE; -+ -+ sprintf(buf1, "0x%lx <%s>\n", value, -+ value_to_symstr(value, buf2, pc->output_radix)); -+ -+ sprintf(p1, buf1); -+ -+ } else if (STREQ(argv[argc-2], "callq") && -+ hexadecimal(argv[argc-1], 0)) { -+ /* -+ * Update module code of the form: -+ * -+ * callq 0xffffffffa0017aa0 -+ * -+ * to show a bracketed direct call target. -+ */ -+ p1 = &LASTCHAR(inbuf); -+ -+ if (extract_hex(argv[argc-1], &value, NULLCHAR, TRUE)) { -+ sprintf(buf1, " <%s>\n", -+ value_to_symstr(value, buf2, -+ pc->output_radix)); -+ if (IS_MODULE_VADDR(value) && -+ !strstr(buf2, "+")) -+ sprintf(p1, buf1); -+ } -+ } -+ -+ console(" %s", inbuf); -+ -+ return TRUE; -+} -+ -+ -+/* -+ * Override smp_num_cpus if possible and necessary. -+ */ -+int -+x86_64_get_smp_cpus(void) -+{ -+ int i, cpus, nr_pda, cpunumber, _cpu_pda; -+ char *cpu_pda_buf; -+ ulong level4_pgt, cpu_pda_addr; -+ -+ if (!VALID_STRUCT(x8664_pda)) -+ return 1; - -- if ((bt->flags & BT_FULL)) { -- if (bt->frameptr) -- x86_64_display_full_frame(bt, rsp, ofp); -- bt->frameptr = rsp + sizeof(ulong); -+ cpu_pda_buf = GETBUF(SIZE(x8664_pda)); -+ -+ if (LKCD_KERNTYPES()) { -+ if (symbol_exists("_cpu_pda")) -+ _cpu_pda = TRUE; -+ else -+ _cpu_pda = FALSE; -+ nr_pda = get_cpus_possible(); -+ } else { -+ if (symbol_exists("_cpu_pda")) { -+ if (!(nr_pda = get_array_length("_cpu_pda", NULL, 0))) -+ nr_pda = NR_CPUS; -+ _cpu_pda = TRUE; -+ } else { -+ if (!(nr_pda = get_array_length("cpu_pda", NULL, 0))) -+ nr_pda = NR_CPUS; -+ _cpu_pda = FALSE; -+ } -+ } -+ for (i = cpus = 0; i < nr_pda; i++) { -+ if (_cpu_pda) { -+ if (!_CPU_PDA_READ(i, cpu_pda_buf)) -+ break; -+ } else { -+ if (!CPU_PDA_READ(i, cpu_pda_buf)) -+ break; -+ } -+ if (VALID_MEMBER(x8664_pda_level4_pgt)) { -+ level4_pgt = ULONG(cpu_pda_buf + OFFSET(x8664_pda_level4_pgt)); -+ if (!VALID_LEVEL4_PGT_ADDR(level4_pgt)) -+ break; -+ } -+ cpunumber = INT(cpu_pda_buf + OFFSET(x8664_pda_cpunumber)); -+ if (cpunumber != cpus) -+ break; -+ cpus++; - } - -- fprintf(ofp, "%s#%d [%8lx] %s at %lx\n", level < 10 ? " " : "", level, -- rsp, name, text); -+ FREEBUF(cpu_pda_buf); - -- if (bt->flags & BT_LINE_NUMBERS) { -- get_line_number(text, buf, FALSE); -- if (strlen(buf)) -- fprintf(ofp, " %s\n", buf); -- } -+ return cpus; -+} - -- if (eframe_check >= 0) { -- if (x86_64_exception_frame(EFRAME_PRINT|EFRAME_VERIFY, -- bt->stackbase + (stkindex*sizeof(long)) + eframe_check, -- NULL, bt, ofp)) -- result = BACKTRACE_ENTRY_AND_EFRAME_DISPLAYED; -- } -+/* -+ * Machine dependent command. -+ */ -+void -+x86_64_cmd_mach(void) -+{ -+ int c; - -- if (BT_REFERENCE_CHECK(bt)) -- x86_64_do_bt_reference_check(bt, text, name); -+ while ((c = getopt(argcnt, args, "cm")) != EOF) { -+ switch(c) -+ { -+ case 'c': -+ x86_64_display_cpu_data(); -+ return; - -- bt->call_target = name; -+ case 'm': -+ x86_64_display_memmap(); -+ return; - -- if (is_direct_call_target(bt)) { -- if (CRASHDEBUG(2)) -- fprintf(ofp, "< enable BT_CHECK_CALLER for %s >\n", -- bt->call_target); -- bt->flags |= BT_CHECK_CALLER; -- } else { -- if (CRASHDEBUG(2) && (bt->flags & BT_CHECK_CALLER)) -- fprintf(ofp, "< disable BT_CHECK_CALLER for %s >\n", -- bt->call_target); -- if (bt->flags & BT_CHECK_CALLER) { -- if (CRASHDEBUG(2)) -- fprintf(ofp, "< set BT_NO_CHECK_CALLER >\n"); -- bt->flags |= BT_NO_CHECK_CALLER; -- } -- bt->flags &= ~(ulonglong)BT_CHECK_CALLER; -- } -+ default: -+ argerrs++; -+ break; -+ } -+ } - -- return result; -+ if (argerrs) -+ cmd_usage(pc->curcmd, SYNOPSIS); -+ -+ x86_64_display_machine_stats(); - } - - /* -- * Unroll a kernel stack. -+ * "mach" command output. - */ - static void --x86_64_back_trace_cmd(struct bt_info *bt) -+x86_64_display_machine_stats(void) - { -- error(FATAL, "x86_64_back_trace_cmd: TBD\n"); --} -+ struct new_utsname *uts; -+ char buf[BUFSIZE]; -+ ulong mhz; - -+ uts = &kt->utsname; - -+ fprintf(fp, " MACHINE TYPE: %s\n", uts->machine); -+ fprintf(fp, " MEMORY SIZE: %s\n", get_memory_size(buf)); -+ fprintf(fp, " CPUS: %d\n", kt->cpus); -+ fprintf(fp, " PROCESSOR SPEED: "); -+ if ((mhz = machdep->processor_speed())) -+ fprintf(fp, "%ld Mhz\n", mhz); -+ else -+ fprintf(fp, "(unknown)\n"); -+ fprintf(fp, " HZ: %d\n", machdep->hz); -+ fprintf(fp, " PAGE SIZE: %d\n", PAGESIZE()); -+ fprintf(fp, " L1 CACHE SIZE: %d\n", l1_cache_size()); -+ fprintf(fp, "KERNEL VIRTUAL BASE: %lx\n", machdep->kvbase); -+ fprintf(fp, "KERNEL VMALLOC BASE: %lx\n", vt->vmalloc_start); -+ fprintf(fp, " KERNEL START MAP: %lx\n", __START_KERNEL_map); -+ fprintf(fp, "KERNEL MODULES BASE: %lx\n", MODULES_VADDR); -+ fprintf(fp, " KERNEL STACK SIZE: %ld\n", STACKSIZE()); -+} - - /* -- * Determine whether the initial stack pointer is located in one of the -- * exception stacks. -+ * "mach -c" - */ --static ulong --x86_64_in_exception_stack(struct bt_info *bt) -+static void -+x86_64_display_cpu_data(void) - { -- int c, i; -- ulong rsp; -- ulong estack; -- struct machine_specific *ms; -+ int cpu, cpus, boot_cpu, _cpu_pda; -+ ulong cpu_data; -+ ulong cpu_pda, cpu_pda_addr; - -- rsp = bt->stkptr; -- ms = machdep->machspec; -- estack = 0; -+ if (symbol_exists("cpu_data")) { -+ cpu_data = symbol_value("cpu_data"); -+ cpus = kt->cpus; -+ boot_cpu = FALSE; -+ } else if (symbol_exists("boot_cpu_data")) { -+ cpu_data = symbol_value("boot_cpu_data"); -+ boot_cpu = TRUE; -+ cpus = 1; -+ } -+ if (symbol_exists("_cpu_pda")) { -+ cpu_pda = symbol_value("_cpu_pda"); -+ _cpu_pda = TRUE; -+ } else if (symbol_exists("cpu_pda")) { -+ cpu_pda = symbol_value("cpu_pda"); -+ _cpu_pda = FALSE; -+ } - -- for (c = 0; !estack && (c < kt->cpus); c++) { -- for (i = 0; i < 7; i++) { -- if (ms->stkinfo.ebase[c][i] == 0) -- break; -- if ((rsp >= ms->stkinfo.ebase[c][i]) && -- (rsp < (ms->stkinfo.ebase[c][i] + -- ms->stkinfo.esize))) { -- estack = ms->stkinfo.ebase[c][i]; -- if (c != bt->tc->processor) -- error(INFO, -- "task cpu: %d exception stack cpu: %d\n", -- bt->tc->processor, c); -- break; -- } -+ for (cpu = 0; cpu < cpus; cpu++) { -+ if (boot_cpu) -+ fprintf(fp, "BOOT CPU:\n"); -+ else -+ fprintf(fp, "%sCPU %d:\n", cpu ? "\n" : "", cpu); -+ -+ dump_struct("cpuinfo_x86", cpu_data, 0); -+ fprintf(fp, "\n"); -+ -+ if (_cpu_pda) { -+ readmem(cpu_pda, KVADDR, &cpu_pda_addr, -+ sizeof(unsigned long), "_cpu_pda addr", FAULT_ON_ERROR); -+ dump_struct("x8664_pda", cpu_pda_addr, 0); -+ cpu_pda += sizeof(void *); -+ } else { -+ dump_struct("x8664_pda", cpu_pda, 0); -+ cpu_pda += SIZE(x8664_pda); - } -+ cpu_data += SIZE(cpuinfo_x86); - } -- -- return estack; - } - - /* -- * Determine whether the current stack pointer is in a cpu's irqstack. -+ * "mach -m" - */ --static ulong --x86_64_in_irqstack(struct bt_info *bt) -+static char *e820type[] = { -+ "(invalid type)", -+ "E820_RAM", -+ "E820_RESERVED", -+ "E820_ACPI", -+ "E820_NVS", -+}; -+ -+static void -+x86_64_display_memmap(void) - { -- int c; -- ulong rsp; -- ulong irqstack; -- struct machine_specific *ms; -+ ulong e820; -+ int nr_map, i; -+ char *buf, *e820entry_ptr; -+ ulonglong addr, size; -+ uint type; - -- rsp = bt->stkptr; -- ms = machdep->machspec; -- irqstack = 0; -+ e820 = symbol_value("e820"); -+ if (CRASHDEBUG(1)) -+ dump_struct("e820map", e820, RADIX(16)); -+ buf = (char *)GETBUF(SIZE(e820map)); - -- for (c = 0; !irqstack && (c < kt->cpus); c++) { -- if (ms->stkinfo.ibase[c] == 0) -- break; -- if ((rsp >= ms->stkinfo.ibase[c]) && -- (rsp < (ms->stkinfo.ibase[c] + ms->stkinfo.isize))) { -- irqstack = ms->stkinfo.ibase[c]; -- if (c != bt->tc->processor) -- error(INFO, -- "task cpu: %d IRQ stack cpu: %d\n", -- bt->tc->processor, c); -- break; -- } -+ readmem(e820, KVADDR, &buf[0], SIZE(e820map), -+ "e820map", FAULT_ON_ERROR); -+ -+ nr_map = INT(buf + OFFSET(e820map_nr_map)); -+ -+ fprintf(fp, " PHYSICAL ADDRESS RANGE TYPE\n"); -+ -+ for (i = 0; i < nr_map; i++) { -+ e820entry_ptr = buf + sizeof(int) + (SIZE(e820entry) * i); -+ addr = ULONGLONG(e820entry_ptr + OFFSET(e820entry_addr)); -+ size = ULONGLONG(e820entry_ptr + OFFSET(e820entry_size)); -+ type = UINT(e820entry_ptr + OFFSET(e820entry_type)); -+ fprintf(fp, "%016llx - %016llx %s\n", addr, addr+size, -+ e820type[type]); - } -+} -+ -+ -+static const char *hook_files[] = { -+ "arch/x86_64/kernel/entry.S", -+ "arch/x86_64/kernel/head.S", -+ "arch/x86_64/kernel/semaphore.c" -+}; -+ -+#define ENTRY_S ((char **)&hook_files[0]) -+#define HEAD_S ((char **)&hook_files[1]) -+#define SEMAPHORE_C ((char **)&hook_files[2]) -+ -+static struct line_number_hook x86_64_line_number_hooks[] = { -+ {"ret_from_fork", ENTRY_S}, -+ {"system_call", ENTRY_S}, -+ {"int_ret_from_sys_call", ENTRY_S}, -+ {"ptregscall_common", ENTRY_S}, -+ {"stub_execve", ENTRY_S}, -+ {"stub_rt_sigreturn", ENTRY_S}, -+ {"common_interrupt", ENTRY_S}, -+ {"ret_from_intr", ENTRY_S}, -+ {"load_gs_index", ENTRY_S}, -+ {"arch_kernel_thread", ENTRY_S}, -+ {"execve", ENTRY_S}, -+ {"page_fault", ENTRY_S}, -+ {"coprocessor_error", ENTRY_S}, -+ {"simd_coprocessor_error", ENTRY_S}, -+ {"device_not_available", ENTRY_S}, -+ {"debug", ENTRY_S}, -+ {"nmi", ENTRY_S}, -+ {"int3", ENTRY_S}, -+ {"overflow", ENTRY_S}, -+ {"bounds", ENTRY_S}, -+ {"invalid_op", ENTRY_S}, -+ {"coprocessor_segment_overrun", ENTRY_S}, -+ {"reserved", ENTRY_S}, -+ {"double_fault", ENTRY_S}, -+ {"invalid_TSS", ENTRY_S}, -+ {"segment_not_present", ENTRY_S}, -+ {"stack_segment", ENTRY_S}, -+ {"general_protection", ENTRY_S}, -+ {"alignment_check", ENTRY_S}, -+ {"divide_error", ENTRY_S}, -+ {"spurious_interrupt_bug", ENTRY_S}, -+ {"machine_check", ENTRY_S}, -+ {"call_debug", ENTRY_S}, - -- return irqstack; -+ {NULL, NULL} /* list must be NULL-terminated */ -+}; -+ -+static void -+x86_64_dump_line_number(ulong callpc) -+{ -+ error(FATAL, "x86_64_dump_line_number: TBD\n"); - } - --#define STACK_TRANSITION_ERRMSG_E_I_P \ --"cannot transition from exception stack to IRQ stack to current process stack:\n exception stack pointer: %lx\n IRQ stack pointer: %lx\n process stack pointer: %lx\n current stack base: %lx\n" --#define STACK_TRANSITION_ERRMSG_E_P \ --"cannot transition from exception stack to current process stack:\n exception stack pointer: %lx\n process stack pointer: %lx\n current_stack_base: %lx\n" --#define STACK_TRANSITION_ERRMSG_I_P \ --"cannot transition from IRQ stack to current process stack:\n IRQ stack pointer: %lx\n process stack pointer: %lx\n current stack base: %lx" -+void -+x86_64_compiler_warning_stub(void) -+{ -+ struct line_number_hook *lhp; -+ char **p; -+ -+ lhp = &x86_64_line_number_hooks[0]; lhp++; -+ p = ENTRY_S; -+ x86_64_back_trace(NULL, NULL); -+ get_x86_64_frame(NULL, NULL, NULL); -+ x86_64_dump_line_number(0); -+} - - /* -- * Low-budget back tracer -- dump text return addresses, following call chain -- * when possible, along with any verifiable exception frames. -+ * Force the VM address-range selection via: -+ * -+ * --machdep vm=orig -+ * --machdep vm=2.6.11 -+ * -+ * Force the phys_base address via: -+ * -+ * --machdep phys_base=
-+ * -+ * Force the IRQ stack back-link via: -+ * -+ * --machdep irq_eframe_link= - */ --static void --x86_64_low_budget_back_trace_cmd(struct bt_info *bt_in) --{ -- int i, level, done; -- ulong rsp, offset, stacktop; -- ulong *up; -- long cs; -- struct syment *sp, *spt; -- FILE *ofp; -- ulong estack, irqstack; -- ulong irq_eframe; -- struct bt_info bt_local, *bt; -- struct machine_specific *ms; -- ulong last_process_stack_eframe; -- ulong user_mode_eframe; -- -- bt = &bt_local; -- BCOPY(bt_in, bt, sizeof(struct bt_info)); - -- level = 0; -- done = FALSE; -- irq_eframe = 0; -- last_process_stack_eframe = 0; -- bt->call_target = NULL; -- rsp = bt->stkptr; -- if (!rsp) { -- error(INFO, "cannot determine starting stack pointer\n"); -+void -+parse_cmdline_arg(void) -+{ -+ int i, c, errflag; -+ char *p; -+ char buf[BUFSIZE]; -+ char *arglist[MAXARGS]; -+ int megabytes; -+ int lines = 0; -+ int vm_flag; -+ ulong value; -+ -+ if (!strstr(machdep->cmdline_arg, "=")) { -+ error(WARNING, "ignoring --machdep option: %s\n\n", -+ machdep->cmdline_arg); - return; -- } -- ms = machdep->machspec; -- if (BT_REFERENCE_CHECK(bt)) -- ofp = pc->nullfp; -- else -- ofp = fp; -+ } - -- if (bt->flags & BT_TEXT_SYMBOLS) { -- fprintf(ofp, "%sSTART: %s%s at %lx\n", -- space(VADDR_PRLEN > 8 ? 14 : 6), -- closest_symbol(bt->instptr), -- STREQ(closest_symbol(bt->instptr), "thread_return") ? -- " (schedule)" : "", -- bt->instptr); -- } else if (bt->flags & BT_START) { -- x86_64_print_stack_entry(bt, ofp, level, -- 0, bt->instptr); -- bt->flags &= ~BT_START; -- level++; -- } -+ strcpy(buf, machdep->cmdline_arg); - -+ for (p = buf; *p; p++) { -+ if (*p == ',') -+ *p = ' '; -+ } - -- if ((estack = x86_64_in_exception_stack(bt))) { --in_exception_stack: -- bt->flags |= BT_EXCEPTION_STACK; -- /* -- * The stack buffer will have been loaded with the process -- * stack, so switch to the indicated exception stack. -- */ -- bt->stackbase = estack; -- bt->stacktop = estack + ms->stkinfo.esize; -- bt->stackbuf = ms->irqstack; -+ c = parse_line(buf, arglist); - -- if (!readmem(bt->stackbase, KVADDR, bt->stackbuf, -- bt->stacktop - bt->stackbase, -- bt->hp && (bt->hp->esp == bt->stkptr) ? -- "irqstack contents via hook" : "irqstack contents", -- RETURN_ON_ERROR)) -- error(FATAL, "read of exception stack at %lx failed\n", -- bt->stackbase); -+ for (i = vm_flag = 0; i < c; i++) { -+ errflag = 0; - -- /* -- * If irq_eframe is set, we've jumped back here from the -- * IRQ stack dump below. Do basically the same thing as if -- * had come from the processor stack, but presume that we -- * must have been in kernel mode, i.e., took an exception -- * while operating on an IRQ stack. (untested) -- */ -- if (irq_eframe) { -- bt->flags |= BT_EXCEPTION_FRAME; -- i = (irq_eframe - bt->stackbase)/sizeof(ulong); -- x86_64_print_stack_entry(bt, ofp, level, i, -- bt->instptr); -- bt->flags &= ~(ulonglong)BT_EXCEPTION_FRAME; -- cs = x86_64_exception_frame(EFRAME_PRINT|EFRAME_CS, 0, -- bt->stackbuf + (irq_eframe - bt->stackbase), -- bt, ofp); -- rsp += SIZE(pt_regs); /* guaranteed kernel mode */ -- level++; -- irq_eframe = 0; -- } -+ if (STRNEQ(arglist[i], "vm=")) { -+ vm_flag++; -+ p = arglist[i] + strlen("vm="); -+ if (strlen(p)) { -+ if (STREQ(p, "orig")) { -+ machdep->flags |= VM_ORIG; -+ continue; -+ } else if (STREQ(p, "2.6.11")) { -+ machdep->flags |= VM_2_6_11; -+ continue; -+ } else if (STREQ(p, "xen")) { -+ machdep->flags |= VM_XEN; -+ continue; -+ } else if (STREQ(p, "xen-rhel4")) { -+ machdep->flags |= VM_XEN_RHEL4; -+ continue; -+ } -+ } -+ } else if (STRNEQ(arglist[i], "phys_base=")) { -+ megabytes = FALSE; -+ if ((LASTCHAR(arglist[i]) == 'm') || -+ (LASTCHAR(arglist[i]) == 'M')) { -+ LASTCHAR(arglist[i]) = NULLCHAR; -+ megabytes = TRUE; -+ } -+ p = arglist[i] + strlen("phys_base="); -+ if (strlen(p)) { -+ if (megabytes) { -+ value = dtol(p, RETURN_ON_ERROR|QUIET, -+ &errflag); -+ } else -+ value = htol(p, RETURN_ON_ERROR|QUIET, -+ &errflag); -+ if (!errflag) { -+ if (megabytes) -+ value = MEGABYTES(value); -+ machdep->machspec->phys_base = value; -+ error(NOTE, -+ "setting phys_base to: 0x%lx\n\n", -+ machdep->machspec->phys_base); -+ machdep->flags |= PHYS_BASE; -+ continue; -+ } -+ } -+ } else if (STRNEQ(arglist[i], "irq_eframe_link=")) { -+ p = arglist[i] + strlen("irq_eframe_link="); -+ if (strlen(p)) { -+ value = stol(p, RETURN_ON_ERROR|QUIET, &errflag); -+ if (!errflag) { -+ machdep->machspec->irq_eframe_link = value; -+ continue; -+ } -+ } -+ } - -- stacktop = bt->stacktop - SIZE(pt_regs); -+ error(WARNING, "ignoring --machdep option: %s\n", arglist[i]); -+ lines++; -+ } - -- for (i = (rsp - bt->stackbase)/sizeof(ulong); -- !done && (rsp < stacktop); i++, rsp += sizeof(ulong)) { -+ if (vm_flag) { -+ switch (machdep->flags & VM_FLAGS) -+ { -+ case 0: -+ break; -+ -+ case VM_ORIG: -+ error(NOTE, "using original x86_64 VM address ranges\n"); -+ lines++; -+ break; -+ -+ case VM_2_6_11: -+ error(NOTE, "using 2.6.11 x86_64 VM address ranges\n"); -+ lines++; -+ break; -+ -+ case VM_XEN: -+ error(NOTE, "using xen x86_64 VM address ranges\n"); -+ lines++; -+ break; - -- up = (ulong *)(&bt->stackbuf[i*sizeof(ulong)]); -+ case VM_XEN_RHEL4: -+ error(NOTE, "using RHEL4 xen x86_64 VM address ranges\n"); -+ lines++; -+ break; -+ -+ default: -+ error(WARNING, "cannot set multiple vm values\n"); -+ lines++; -+ machdep->flags &= ~VM_FLAGS; -+ break; -+ } -+ } - -- if (!is_kernel_text(*up)) -- continue; -+ if (lines) -+ fprintf(fp, "\n"); -+} - -- switch (x86_64_print_stack_entry(bt, ofp, level, i,*up)) -- { -- case BACKTRACE_ENTRY_AND_EFRAME_DISPLAYED: -- rsp += SIZE(pt_regs); -- i += SIZE(pt_regs)/sizeof(ulong); -- case BACKTRACE_ENTRY_DISPLAYED: -- level++; -- break; -- case BACKTRACE_ENTRY_IGNORED: -- break; -- case BACKTRACE_COMPLETE: -- done = TRUE; -- break; -- } -- } -+void -+x86_64_clear_machdep_cache(void) -+{ -+ machdep->machspec->last_upml_read = 0; -+} - -- cs = x86_64_exception_frame(EFRAME_PRINT|EFRAME_CS, 0, -- bt->stackbuf + (bt->stacktop - bt->stackbase) - -- SIZE(pt_regs), bt, ofp); -+static void -+x86_64_irq_eframe_link_init(void) -+{ -+ int c; -+ struct syment *sp, *spn; -+ char buf[BUFSIZE]; -+ char link_register[BUFSIZE]; -+ char *arglist[MAXARGS]; -+ ulong max_instructions; - -- if (!BT_REFERENCE_CHECK(bt)) -- fprintf(fp, "--- ---\n"); -+ if (machdep->machspec->irq_eframe_link == UNINITIALIZED) -+ machdep->machspec->irq_eframe_link = 0; -+ else -+ return; - -- /* -- * stack = (unsigned long *) estack_end[-2]; -- */ -- up = (ulong *)(&bt->stackbuf[bt->stacktop - bt->stackbase]); -- up -= 2; -- rsp = bt->stkptr = *up; -- up -= 3; -- bt->instptr = *up; -- if (cs & 3) -- done = TRUE; /* user-mode exception */ -- else -- done = FALSE; /* kernel-mode exception */ -- bt->frameptr = 0; -+ if (THIS_KERNEL_VERSION < LINUX(2,6,9)) -+ return; - -- /* -- * Print the return values from the estack end. -- */ -- if (!done) { -- bt->flags |= BT_START; -- x86_64_print_stack_entry(bt, ofp, level, -- 0, bt->instptr); -- bt->flags &= ~BT_START; -- level++; -- } -+ if (!(sp = symbol_search("common_interrupt")) || -+ !(spn = next_symbol(NULL, sp))) { -+ return; - } - -- /* -- * IRQ stack entry always comes in via the process stack, regardless -- * whether it happened while running in user or kernel space. -- */ -- if (!done && (irqstack = x86_64_in_irqstack(bt))) { -- bt->flags |= BT_IRQSTACK; -- /* -- * Until coded otherwise, the stackbase will be pointing to -- * either the exception stack or, more likely, the process -- * stack base. Switch it to the IRQ stack. -- */ -- bt->stackbase = irqstack; -- bt->stacktop = irqstack + ms->stkinfo.isize; -- bt->stackbuf = ms->irqstack; -+ max_instructions = spn->value - sp->value; - -- if (!readmem(bt->stackbase, KVADDR, -- bt->stackbuf, bt->stacktop - bt->stackbase, -- bt->hp && (bt->hp->esp == bt_in->stkptr) ? -- "irqstack contents via hook" : "irqstack contents", -- RETURN_ON_ERROR)) -- error(FATAL, "read of IRQ stack at %lx failed\n", -- bt->stackbase); -+ open_tmpfile(); - -- stacktop = bt->stacktop - 64; /* from kernel code */ -+ sprintf(buf, "x/%ldi 0x%lx", -+ max_instructions, sp->value); - -- for (i = (rsp - bt->stackbase)/sizeof(ulong); -- !done && (rsp < stacktop); i++, rsp += sizeof(ulong)) { -+ if (!gdb_pass_through(buf, pc->tmpfile, GNU_RETURN_ON_ERROR)) -+ return; - -- up = (ulong *)(&bt->stackbuf[i*sizeof(ulong)]); -+ link_register[0] = NULLCHAR; - -- if (!is_kernel_text(*up)) -- continue; -+ rewind(pc->tmpfile); -+ while (fgets(buf, BUFSIZE, pc->tmpfile)) { -+ if (!strstr(buf, sp->name)) -+ break; -+ if ((c = parse_line(buf, arglist)) < 4) -+ continue; -+ if (strstr(arglist[2], "push")) -+ strcpy(link_register, arglist[3]); -+ } -+ close_tmpfile(); - -- switch (x86_64_print_stack_entry(bt, ofp, level, i,*up)) -- { -- case BACKTRACE_ENTRY_AND_EFRAME_DISPLAYED: -- rsp += SIZE(pt_regs); -- i += SIZE(pt_regs)/sizeof(ulong); -- case BACKTRACE_ENTRY_DISPLAYED: -- level++; -- break; -- case BACKTRACE_ENTRY_IGNORED: -- break; -- case BACKTRACE_COMPLETE: -- done = TRUE; -- break; -- } -- } -+ if (CRASHDEBUG(1)) -+ fprintf(fp, "IRQ stack link register: %s\n", -+ strlen(link_register) ? -+ link_register : "undetermined"); - -- if (!BT_REFERENCE_CHECK(bt)) -- fprintf(fp, "--- ---\n"); -+ if (STREQ(link_register, "%rbp")) -+ machdep->machspec->irq_eframe_link = 40; -+ -+} -+ -+#include "netdump.h" - -- /* -- * stack = (unsigned long *) (irqstack_end[-1]); -- * (where irqstack_end is 64 bytes below page end) -- */ -- up = (ulong *)(&bt->stackbuf[stacktop - bt->stackbase]); -- up -= 1; -- irq_eframe = rsp = bt->stkptr = *up; -- up -= 1; -- bt->instptr = *up; -- bt->frameptr = 0; -- done = FALSE; -- } else -- irq_eframe = 0; -+/* -+ * Determine the physical address base for relocatable kernels. -+ */ -+static void -+x86_64_calc_phys_base(void) -+{ -+ int i; -+ FILE *iomem; -+ char buf[BUFSIZE]; -+ char *p1; -+ ulong phys_base, text_start, kernel_code_start; -+ int errflag; -+ struct vmcore_data *vd; -+ Elf64_Phdr *phdr; - -- if (!done && (estack = x86_64_in_exception_stack(bt))) -- goto in_exception_stack; -+ if (machdep->flags & PHYS_BASE) /* --machdep override */ -+ return; - -- if (!done && (bt->flags & (BT_EXCEPTION_STACK|BT_IRQSTACK))) { -- /* -- * Verify that the rsp pointer taken from either the -- * exception or IRQ stack points into the process stack. -- */ -- bt->stackbase = GET_STACKBASE(bt->tc->task); -- bt->stacktop = GET_STACKTOP(bt->tc->task); -+ machdep->machspec->phys_base = 0; /* default/traditional */ - -- if (!INSTACK(rsp, bt)) { -- switch (bt->flags & (BT_EXCEPTION_STACK|BT_IRQSTACK)) -- { -- case (BT_EXCEPTION_STACK|BT_IRQSTACK): -- error(FATAL, STACK_TRANSITION_ERRMSG_E_I_P, -- bt_in->stkptr, bt->stkptr, rsp, -- bt->stackbase); -+ if (!kernel_symbol_exists("phys_base")) -+ return; - -- case BT_EXCEPTION_STACK: -- error(FATAL, STACK_TRANSITION_ERRMSG_E_P, -- bt_in->stkptr, rsp, bt->stackbase); -+ if (!symbol_exists("_text")) -+ return; -+ else -+ text_start = symbol_value("_text"); - -- case BT_IRQSTACK: -- error(FATAL, STACK_TRANSITION_ERRMSG_I_P, -- bt_in->stkptr, rsp, bt->stackbase); -+ if (ACTIVE()) { -+ if ((iomem = fopen("/proc/iomem", "r")) == NULL) -+ return; -+ -+ errflag = 1; -+ while (fgets(buf, BUFSIZE, iomem)) { -+ if (strstr(buf, ": Kernel code")) { -+ clean_line(buf); -+ errflag = 0; -+ break; - } - } -+ fclose(iomem); -+ -+ if (errflag) -+ return; -+ -+ if (!(p1 = strstr(buf, "-"))) -+ return; -+ else -+ *p1 = NULLCHAR; -+ -+ errflag = 0; -+ kernel_code_start = htol(buf, RETURN_ON_ERROR|QUIET, &errflag); -+ if (errflag) -+ return; -+ -+ machdep->machspec->phys_base = kernel_code_start - -+ (text_start - __START_KERNEL_map); -+ -+ if (CRASHDEBUG(1)) { -+ fprintf(fp, "_text: %lx ", text_start); -+ fprintf(fp, "Kernel code: %lx -> ", kernel_code_start); -+ fprintf(fp, "phys_base: %lx\n\n", -+ machdep->machspec->phys_base); -+ } - -- /* -- * Now fill the local stack buffer from the process stack. -- */ -- if (!readmem(bt->stackbase, KVADDR, bt->stackbuf, -- bt->stacktop - bt->stackbase, -- "irqstack contents", RETURN_ON_ERROR)) -- error(FATAL, "read of process stack at %lx failed\n", -- bt->stackbase); -- } -- -- /* -- * For a normally blocked task, hand-create the first level. -- */ -- if (!done && -- !(bt->flags & (BT_TEXT_SYMBOLS|BT_EXCEPTION_STACK|BT_IRQSTACK)) && -- STREQ(closest_symbol(bt->instptr), "thread_return")) { -- bt->flags |= BT_SCHEDULE; -- i = (rsp - bt->stackbase)/sizeof(ulong); -- x86_64_print_stack_entry(bt, ofp, level, -- i, bt->instptr); -- bt->flags &= ~(ulonglong)BT_SCHEDULE; -- rsp += sizeof(ulong); -- level++; -+ return; - } - - /* -- * Dump the IRQ exception frame from the process stack. -- * If the CS register indicates a user exception frame, -- * then set done to TRUE to avoid the process stack walk-through. -- * Otherwise, bump up the rsp past the kernel-mode eframe. -- */ -- if (irq_eframe) { -- bt->flags |= BT_EXCEPTION_FRAME; -- i = (irq_eframe - bt->stackbase)/sizeof(ulong); -- x86_64_print_stack_entry(bt, ofp, level, i, bt->instptr); -- bt->flags &= ~(ulonglong)BT_EXCEPTION_FRAME; -- cs = x86_64_exception_frame(EFRAME_PRINT|EFRAME_CS, 0, -- bt->stackbuf + (irq_eframe - bt->stackbase), bt, ofp); -- if (cs & 3) -- done = TRUE; /* IRQ from user-mode */ -- else -- rsp += SIZE(pt_regs); -- level++; -- } -- -- /* -- * Walk the process stack. -+ * Get relocation value from whatever dumpfile format is being used. - */ -- for (i = (rsp - bt->stackbase)/sizeof(ulong); -- !done && (rsp < bt->stacktop); i++, rsp += sizeof(ulong)) { -- -- up = (ulong *)(&bt->stackbuf[i*sizeof(ulong)]); -- -- if (!is_kernel_text(*up)) -- continue; - -- if ((bt->flags & BT_CHECK_CALLER)) { -- /* -- * A non-zero offset value from the value_search() -- * lets us know if it's a real text return address. -- */ -- spt = value_search(*up, &offset); -- /* -- * sp gets the syment of the function that the text -- * routine above called before leaving its return -- * address on the stack -- if it can be determined. -- */ -- sp = x86_64_function_called_by((*up)-5); -+ if (DISKDUMP_DUMPFILE()) { -+ if (diskdump_phys_base(&phys_base)) { -+ machdep->machspec->phys_base = phys_base; -+ if (CRASHDEBUG(1)) -+ fprintf(fp, "compressed kdump: phys_base: %lx\n", -+ phys_base); -+ } -+ return; -+ } - -- if (sp == NULL) { -- /* -- * We were unable to get the called function. -- * If the text address had an offset, then -- * it must have made an indirect call, and -- * can't have called our target function. -- */ -- if (offset) { -- if (CRASHDEBUG(1)) -- fprintf(ofp, -- "< ignoring %s() -- makes indirect call and NOT %s()>\n", -- spt->name, -- bt->call_target); -- continue; -+ if ((vd = get_kdump_vmcore_data())) { -+ for (i = 0; i < vd->num_pt_load_segments; i++) { -+ phdr = vd->load64 + i; -+ if ((phdr->p_vaddr >= __START_KERNEL_map) && -+ !(IS_VMALLOC_ADDR(phdr->p_vaddr))) { -+ -+ machdep->machspec->phys_base = phdr->p_paddr - -+ (phdr->p_vaddr & ~(__START_KERNEL_map)); -+ -+ if (CRASHDEBUG(1)) { -+ fprintf(fp, "p_vaddr: %lx p_paddr: %lx -> ", -+ phdr->p_vaddr, phdr->p_paddr); -+ fprintf(fp, "phys_base: %lx\n\n", -+ machdep->machspec->phys_base); - } -- } else if (!STREQ(sp->name, bt->call_target)) { -- /* -- * We got function called by the text routine, -- * but it's not our target function. -- */ -- if (CRASHDEBUG(2)) -- fprintf(ofp, -- "< ignoring %s() -- calls %s() and NOT %s()>\n", -- spt->name, sp->name, -- bt->call_target); -- continue; -+ break; - } - } - -- switch (x86_64_print_stack_entry(bt, ofp, level, i,*up)) -- { -- case BACKTRACE_ENTRY_AND_EFRAME_DISPLAYED: -- last_process_stack_eframe = rsp + 8; -- rsp += SIZE(pt_regs); -- i += SIZE(pt_regs)/sizeof(ulong); -- case BACKTRACE_ENTRY_DISPLAYED: -- level++; -- break; -- case BACKTRACE_ENTRY_IGNORED: -- break; -- case BACKTRACE_COMPLETE: -- done = TRUE; -- break; -- } -- } -- -- if (!irq_eframe && !is_kernel_thread(bt->tc->task) && -- (GET_STACKBASE(bt->tc->task) == bt->stackbase)) { -- user_mode_eframe = bt->stacktop - SIZE(pt_regs); -- if (last_process_stack_eframe < user_mode_eframe) -- x86_64_exception_frame(EFRAME_PRINT, 0, bt->stackbuf + -- (bt->stacktop - bt->stackbase) - SIZE(pt_regs), -- bt, ofp); -+ return; - } - -- if (bt->flags & BT_TEXT_SYMBOLS) { -- if (BT_REFERENCE_FOUND(bt)) { -- print_task_header(fp, task_to_context(bt->task), 0); -- BCOPY(bt_in, bt, sizeof(struct bt_info)); -- bt->ref = NULL; -- machdep->back_trace(bt); -- fprintf(fp, "\n"); -- } -+ if (XENDUMP_DUMPFILE() && (text_start == __START_KERNEL_map)) { -+ /* -+ * Xen kernels are not relocable (yet) and don't have the -+ * "phys_base" entry point, so this must be a xendump of a -+ * fully-virtualized relocatable kernel. No clues exist in -+ * the xendump header, so hardwire phys_base to 2MB and hope -+ * for the best. -+ */ -+ machdep->machspec->phys_base = 0x200000; -+ if (CRASHDEBUG(1)) -+ fprintf(fp, -+ "default relocatable default phys_base: %lx\n", -+ machdep->machspec->phys_base); - } - } - - /* -- * Functions that won't be called indirectly. -- * Add more to this as they are discovered. -+ * From the xen vmcore, create an index of mfns for each page that makes -+ * up the dom0 kernel's complete phys_to_machine_mapping[max_pfn] array. - */ --static const char *direct_call_targets[] = { -- "schedule", -- "schedule_timeout", -- NULL --}; -+ -+#define MAX_X86_64_FRAMES (512) -+#define MFNS_PER_FRAME (PAGESIZE()/sizeof(ulong)) - - static int --is_direct_call_target(struct bt_info *bt) -+x86_64_xen_kdump_p2m_create(struct xen_kdump_data *xkd) - { -- int i; -+ int i, j; -+ ulong kvaddr; -+ ulong *up; -+ ulong frames; -+ ulong frame_mfn[MAX_X86_64_FRAMES] = { 0 }; -+ int mfns[MAX_X86_64_FRAMES] = { 0 }; - -- if (!bt->call_target || (bt->flags & BT_NO_CHECK_CALLER)) -- return FALSE; -+ /* -+ * Temporarily read physical (machine) addresses from vmcore by -+ * going directly to read_netdump() instead of via read_kdump(). -+ */ -+ pc->readmem = read_netdump; - -- for (i = 0; direct_call_targets[i]; i++) { -- if (STREQ(direct_call_targets[i], bt->call_target)) -- return TRUE; -+ if (xkd->flags & KDUMP_CR3) -+ goto use_cr3; -+ -+ if (CRASHDEBUG(1)) -+ fprintf(fp, "x86_64_xen_kdump_p2m_create: p2m_mfn: %lx\n", -+ xkd->p2m_mfn); -+ -+ if (!readmem(PTOB(xkd->p2m_mfn), PHYSADDR, xkd->page, PAGESIZE(), -+ "xen kdump p2m mfn page", RETURN_ON_ERROR)) -+ error(FATAL, "cannot read xen kdump p2m mfn page\n"); -+ -+ if (CRASHDEBUG(2)) -+ x86_64_debug_dump_page(fp, xkd->page, "pfn_to_mfn_frame_list"); -+ -+ for (i = 0, up = (ulong *)xkd->page; i < MAX_X86_64_FRAMES; i++, up++) -+ frame_mfn[i] = *up; -+ -+ for (i = 0; i < MAX_X86_64_FRAMES; i++) { -+ if (!frame_mfn[i]) -+ break; -+ -+ if (!readmem(PTOB(frame_mfn[i]), PHYSADDR, xkd->page, -+ PAGESIZE(), "xen kdump p2m mfn list page", RETURN_ON_ERROR)) -+ error(FATAL, "cannot read xen kdump p2m mfn list page\n"); -+ -+ for (j = 0, up = (ulong *)xkd->page; j < MFNS_PER_FRAME; j++, up++) -+ if (*up) -+ mfns[i]++; -+ -+ xkd->p2m_frames += mfns[i]; -+ -+ if (CRASHDEBUG(7)) -+ x86_64_debug_dump_page(fp, xkd->page, "pfn_to_mfn_frame_list page"); - } - -- return FALSE; -+ if (CRASHDEBUG(1)) -+ fprintf(fp, "p2m_frames: %d\n", xkd->p2m_frames); -+ -+ if ((xkd->p2m_mfn_frame_list = (ulong *) -+ malloc(xkd->p2m_frames * sizeof(ulong))) == NULL) -+ error(FATAL, "cannot malloc p2m_frame_index_list"); -+ -+ for (i = 0, frames = xkd->p2m_frames; frames; i++) { -+ if (!readmem(PTOB(frame_mfn[i]), PHYSADDR, -+ &xkd->p2m_mfn_frame_list[i * MFNS_PER_FRAME], -+ mfns[i] * sizeof(ulong), "xen kdump p2m mfn list page", -+ RETURN_ON_ERROR)) -+ error(FATAL, "cannot read xen kdump p2m mfn list page\n"); -+ -+ frames -= mfns[i]; -+ } -+ -+ if (CRASHDEBUG(2)) { -+ for (i = 0; i < xkd->p2m_frames; i++) -+ fprintf(fp, "%lx ", xkd->p2m_mfn_frame_list[i]); -+ fprintf(fp, "\n"); -+ } -+ -+ pc->readmem = read_kdump; -+ return TRUE; -+ -+use_cr3: -+ -+ if (CRASHDEBUG(1)) -+ fprintf(fp, "x86_64_xen_kdump_p2m_create: cr3: %lx\n", xkd->cr3); -+ -+ if (!readmem(PTOB(xkd->cr3), PHYSADDR, machdep->machspec->pml4, -+ PAGESIZE(), "xen kdump cr3 page", RETURN_ON_ERROR)) -+ error(FATAL, "cannot read xen kdump cr3 page\n"); -+ -+ if (CRASHDEBUG(7)) -+ x86_64_debug_dump_page(fp, machdep->machspec->pml4, -+ "contents of PML4 page:"); -+ -+ kvaddr = symbol_value("end_pfn"); -+ if (!x86_64_xen_kdump_load_page(kvaddr, xkd->page)) -+ return FALSE; -+ up = (ulong *)(xkd->page + PAGEOFFSET(kvaddr)); -+ -+ xkd->p2m_frames = (*up/(PAGESIZE()/sizeof(ulong))) + -+ ((*up%(PAGESIZE()/sizeof(ulong))) ? 1 : 0); -+ -+ if (CRASHDEBUG(1)) -+ fprintf(fp, "end_pfn at %lx: %lx (%ld) -> %d p2m_frames\n", -+ kvaddr, *up, *up, xkd->p2m_frames); -+ -+ if ((xkd->p2m_mfn_frame_list = (ulong *) -+ malloc(xkd->p2m_frames * sizeof(ulong))) == NULL) -+ error(FATAL, "cannot malloc p2m_frame_index_list"); -+ -+ kvaddr = symbol_value("phys_to_machine_mapping"); -+ if (!x86_64_xen_kdump_load_page(kvaddr, xkd->page)) -+ return FALSE; -+ up = (ulong *)(xkd->page + PAGEOFFSET(kvaddr)); -+ kvaddr = *up; -+ if (CRASHDEBUG(1)) -+ fprintf(fp, "phys_to_machine_mapping: %lx\n", kvaddr); -+ -+ machdep->last_pgd_read = BADADDR; -+ machdep->last_pmd_read = BADADDR; -+ machdep->last_ptbl_read = BADADDR; -+ -+ for (i = 0; i < xkd->p2m_frames; i++) { -+ xkd->p2m_mfn_frame_list[i] = x86_64_xen_kdump_page_mfn(kvaddr); -+ kvaddr += PAGESIZE(); -+ } -+ -+ if (CRASHDEBUG(1)) { -+ for (i = 0; i < xkd->p2m_frames; i++) -+ fprintf(fp, "%lx ", xkd->p2m_mfn_frame_list[i]); -+ fprintf(fp, "\n"); -+ } -+ -+ machdep->last_pgd_read = 0; -+ machdep->last_ptbl_read = 0; -+ machdep->last_pmd_read = 0; -+ pc->readmem = read_kdump; -+ -+ return TRUE; - } - --static struct syment * --x86_64_function_called_by(ulong rip) -+static char * -+x86_64_xen_kdump_load_page(ulong kvaddr, char *pgbuf) - { -- struct syment *sp; -- char buf[BUFSIZE], *p1; -- ulong value, offset; -- unsigned char byte; -+ ulong mfn; -+ ulong *pml4, *pgd, *pmd, *ptep; - -- value = 0; -- sp = NULL; -+ pml4 = ((ulong *)machdep->machspec->pml4) + pml4_index(kvaddr); -+ mfn = ((*pml4) & PHYSICAL_PAGE_MASK) >> PAGESHIFT(); -+ -+ if (CRASHDEBUG(3)) -+ fprintf(fp, -+ "[%lx] pml4: %lx mfn: %lx pml4_index: %lx\n", -+ kvaddr, *pml4, mfn, pml4_index(kvaddr)); -+ -+ if (!readmem(PTOB(mfn), PHYSADDR, machdep->pgd, PAGESIZE(), -+ "xen kdump pud page", RETURN_ON_ERROR)) -+ error(FATAL, "cannot read/find pud page\n"); -+ -+ if (CRASHDEBUG(7)) -+ x86_64_debug_dump_page(fp, machdep->pgd, -+ "contents of page upper directory page:"); -+ -+ pgd = ((ulong *)machdep->pgd) + pgd_index(kvaddr); -+ mfn = ((*pgd) & PHYSICAL_PAGE_MASK) >> PAGESHIFT(); -+ -+ if (CRASHDEBUG(3)) -+ fprintf(fp, -+ "[%lx] pgd: %lx mfn: %lx pgd_index: %lx\n", -+ kvaddr, *pgd, mfn, pgd_index(kvaddr)); -+ -+ if (!readmem(PTOB(mfn), PHYSADDR, machdep->pmd, PAGESIZE(), -+ "xen kdump pmd page", RETURN_ON_ERROR)) -+ error(FATAL, "cannot read/find pmd page\n"); -+ -+ if (CRASHDEBUG(7)) -+ x86_64_debug_dump_page(fp, machdep->pmd, -+ "contents of page middle directory page:"); -+ -+ pmd = ((ulong *)machdep->pmd) + pmd_index(kvaddr); -+ mfn = ((*pmd) & PHYSICAL_PAGE_MASK) >> PAGESHIFT(); -+ -+ if (CRASHDEBUG(3)) -+ fprintf(fp, -+ "[%lx] pmd: %lx mfn: %lx pmd_index: %lx\n", -+ kvaddr, *pmd, mfn, pmd_index(kvaddr)); -+ -+ if (!readmem(PTOB(mfn), PHYSADDR, machdep->ptbl, PAGESIZE(), -+ "xen kdump page table page", RETURN_ON_ERROR)) -+ error(FATAL, "cannot read/find page table page\n"); - -- if (!readmem(rip, KVADDR, &byte, sizeof(unsigned char), "call byte", -- RETURN_ON_ERROR)) -- return sp; -+ if (CRASHDEBUG(7)) -+ x86_64_debug_dump_page(fp, machdep->ptbl, -+ "contents of page table page:"); - -- if (byte != 0xe8) -- return sp; -+ ptep = ((ulong *)machdep->ptbl) + pte_index(kvaddr); -+ mfn = ((*ptep) & PHYSICAL_PAGE_MASK) >> PAGESHIFT(); - -- sprintf(buf, "x/i 0x%lx", rip); -+ if (CRASHDEBUG(3)) -+ fprintf(fp, -+ "[%lx] ptep: %lx mfn: %lx pte_index: %lx\n", -+ kvaddr, *ptep, mfn, pte_index(kvaddr)); - -- open_tmpfile2(); -- if (gdb_pass_through(buf, pc->tmpfile2, GNU_RETURN_ON_ERROR)) { -- rewind(pc->tmpfile2); -- while (fgets(buf, BUFSIZE, pc->tmpfile2)) { -- if ((p1 = strstr(buf, "callq")) && -- whitespace(*(p1-1))) { -- if (extract_hex(p1, &value, NULLCHAR, TRUE)) -- break; -- } -- } -- } -- close_tmpfile2(); -+ if (!readmem(PTOB(mfn), PHYSADDR, pgbuf, PAGESIZE(), -+ "xen kdump page table page", RETURN_ON_ERROR)) -+ error(FATAL, "cannot read/find pte page\n"); - -- if (value) -- sp = value_search(value, &offset); -+ if (CRASHDEBUG(7)) -+ x86_64_debug_dump_page(fp, pgbuf, -+ "contents of page:"); - -- return sp; -+ return pgbuf; - } - --/* -- * Unroll the kernel stack using a minimal amount of gdb services. -- */ --static void --x86_64_back_trace(struct gnu_request *req, struct bt_info *bt) -+static ulong -+x86_64_xen_kdump_page_mfn(ulong kvaddr) - { -- error(FATAL, "x86_64_back_trace: unused\n"); -+ ulong mfn; -+ ulong *pml4, *pgd, *pmd, *ptep; -+ -+ pml4 = ((ulong *)machdep->machspec->pml4) + pml4_index(kvaddr); -+ mfn = ((*pml4) & PHYSICAL_PAGE_MASK) >> PAGESHIFT(); -+ -+ if ((mfn != machdep->last_pgd_read) && -+ !readmem(PTOB(mfn), PHYSADDR, machdep->pgd, PAGESIZE(), -+ "xen kdump pud entry", RETURN_ON_ERROR)) -+ error(FATAL, "cannot read/find pud page\n"); -+ machdep->last_pgd_read = mfn; -+ -+ pgd = ((ulong *)machdep->pgd) + pgd_index(kvaddr); -+ mfn = ((*pgd) & PHYSICAL_PAGE_MASK) >> PAGESHIFT(); -+ -+ if ((mfn != machdep->last_pmd_read) && -+ !readmem(PTOB(mfn), PHYSADDR, machdep->pmd, PAGESIZE(), -+ "xen kdump pmd entry", RETURN_ON_ERROR)) -+ error(FATAL, "cannot read/find pmd page\n"); -+ machdep->last_pmd_read = mfn; -+ -+ pmd = ((ulong *)machdep->pmd) + pmd_index(kvaddr); -+ mfn = ((*pmd) & PHYSICAL_PAGE_MASK) >> PAGESHIFT(); -+ -+ if ((mfn != machdep->last_ptbl_read) && -+ !readmem(PTOB(mfn), PHYSADDR, machdep->ptbl, PAGESIZE(), -+ "xen kdump page table page", RETURN_ON_ERROR)) -+ error(FATAL, "cannot read/find page table page\n"); -+ machdep->last_ptbl_read = mfn; -+ -+ ptep = ((ulong *)machdep->ptbl) + pte_index(kvaddr); -+ mfn = ((*ptep) & PHYSICAL_PAGE_MASK) >> PAGESHIFT(); -+ -+ return mfn; - } - -+#include "xendump.h" - - /* -- * Print exception frame information for x86_64. -- * -- * Pid: 0, comm: swapper Not tainted 2.6.5-1.360phro.rootsmp -- * RIP: 0010:[] {default_idle+36} -- * RSP: 0018:ffffffff8048bfd8 EFLAGS: 00000246 -- * RAX: 0000000000000000 RBX: ffffffff8010f510 RCX: 0000000000000018 -- * RDX: 0000010001e37280 RSI: ffffffff803ac0a0 RDI: 000001007f43c400 -- * RBP: 0000000000000000 R08: ffffffff8048a000 R09: 0000000000000000 -- * R10: ffffffff80482188 R11: 0000000000000001 R12: 0000000000000000 -- * R13: 0000000000000000 R14: 0000000000000000 R15: 0000000000000000 -- * FS: 0000002a96e14fc0(0000) GS:ffffffff80481d80(0000) GS:0000000055578aa0 -- * CS: 0010 DS: 0018 ES: 0018 CR0: 000000008005003b -- * CR2: 0000002a9556b000 CR3: 0000000000101000 CR4: 00000000000006e0 -- * -+ * Create an index of mfns for each page that makes up the -+ * kernel's complete phys_to_machine_mapping[max_pfn] array. - */ -- --static long --x86_64_exception_frame(ulong flags, ulong kvaddr, char *local, -- struct bt_info *bt, FILE *ofp) -+static int -+x86_64_xendump_p2m_create(struct xendump_data *xd) - { -- long rip, rsp, cs, ss, rflags, orig_rax, rbp; -- long rax, rbx, rcx, rdx, rsi, rdi; -- long r8, r9, r10, r11, r12, r13, r14, r15; -- struct machine_specific *ms; -- char *pt_regs_buf; -- long verified; -- int err; -- -- ms = machdep->machspec; -- -- if (!(machdep->flags & PT_REGS_INIT)) { -- err = 0; -- err |= ((ms->pto.r15 = MEMBER_OFFSET("pt_regs", "r15")) == -- INVALID_OFFSET); -- err |= ((ms->pto.r14 = MEMBER_OFFSET("pt_regs", "r14")) == -- INVALID_OFFSET); -- err |= ((ms->pto.r13 = MEMBER_OFFSET("pt_regs", "r13")) == -- INVALID_OFFSET); -- err |= ((ms->pto.r12 = MEMBER_OFFSET("pt_regs", "r12")) == -- INVALID_OFFSET); -- err |= ((ms->pto.r11 = MEMBER_OFFSET("pt_regs", "r11")) == -- INVALID_OFFSET); -- err |= ((ms->pto.r10 = MEMBER_OFFSET("pt_regs", "r10")) == -- INVALID_OFFSET); -- err |= ((ms->pto.r9 = MEMBER_OFFSET("pt_regs", "r9")) == -- INVALID_OFFSET); -- err |= ((ms->pto.r8 = MEMBER_OFFSET("pt_regs", "r8")) == -- INVALID_OFFSET); -- err |= ((ms->pto.rax = MEMBER_OFFSET("pt_regs", "rax")) == -- INVALID_OFFSET); -- err |= ((ms->pto.rbx = MEMBER_OFFSET("pt_regs", "rbx")) == -- INVALID_OFFSET); -- err |= ((ms->pto.rcx = MEMBER_OFFSET("pt_regs", "rcx")) == -- INVALID_OFFSET); -- err |= ((ms->pto.rdx = MEMBER_OFFSET("pt_regs", "rdx")) == -- INVALID_OFFSET); -- err |= ((ms->pto.rsi = MEMBER_OFFSET("pt_regs", "rsi")) == -- INVALID_OFFSET); -- err |= ((ms->pto.rdi = MEMBER_OFFSET("pt_regs", "rdi")) == -- INVALID_OFFSET); -- err |= ((ms->pto.rip = MEMBER_OFFSET("pt_regs", "rip")) == -- INVALID_OFFSET); -- err |= ((ms->pto.rsp = MEMBER_OFFSET("pt_regs", "rsp")) == -- INVALID_OFFSET); -- err |= ((ms->pto.cs = MEMBER_OFFSET("pt_regs", "cs")) == -- INVALID_OFFSET); -- err |= ((ms->pto.ss = MEMBER_OFFSET("pt_regs", "ss")) == -- INVALID_OFFSET); -- err |= ((ms->pto.eflags = MEMBER_OFFSET("pt_regs", "eflags")) == -- INVALID_OFFSET); -- err |= ((ms->pto.orig_rax = -- MEMBER_OFFSET("pt_regs", "orig_rax")) == -- INVALID_OFFSET); -- err |= ((ms->pto.rbp = MEMBER_OFFSET("pt_regs", "rbp")) == -- INVALID_OFFSET); -+ int i, idx; -+ ulong mfn, kvaddr, ctrlreg[8], ctrlreg_offset; -+ ulong *up; -+ off_t offset; - -- if (err) -- error(WARNING, "pt_regs structure has changed\n"); -+ if (!symbol_exists("phys_to_machine_mapping")) { -+ xd->flags |= XC_CORE_NO_P2M; -+ return TRUE; -+ } - -- machdep->flags |= PT_REGS_INIT; -- } -+ if ((ctrlreg_offset = MEMBER_OFFSET("vcpu_guest_context", "ctrlreg")) == -+ INVALID_OFFSET) -+ error(FATAL, -+ "cannot determine vcpu_guest_context.ctrlreg offset\n"); -+ else if (CRASHDEBUG(1)) -+ fprintf(xd->ofp, -+ "MEMBER_OFFSET(vcpu_guest_context, ctrlreg): %ld\n", -+ ctrlreg_offset); -+ -+ offset = (off_t)xd->xc_core.header.xch_ctxt_offset + -+ (off_t)ctrlreg_offset; -+ -+ if (lseek(xd->xfd, offset, SEEK_SET) == -1) -+ error(FATAL, "cannot lseek to xch_ctxt_offset\n"); -+ -+ if (read(xd->xfd, &ctrlreg, sizeof(ctrlreg)) != -+ sizeof(ctrlreg)) -+ error(FATAL, "cannot read vcpu_guest_context ctrlreg[8]\n"); -+ -+ for (i = 0; CRASHDEBUG(1) && (i < 8); i++) -+ fprintf(xd->ofp, "ctrlreg[%d]: %lx\n", i, ctrlreg[i]); -+ -+ mfn = ctrlreg[3] >> PAGESHIFT(); -+ -+ if (!xc_core_mfn_to_page(mfn, machdep->machspec->pml4)) -+ error(FATAL, "cannot read/find cr3 page\n"); -+ -+ if (CRASHDEBUG(7)) -+ x86_64_debug_dump_page(xd->ofp, machdep->machspec->pml4, -+ "contents of PML4 page:"); - -- if (kvaddr) { -- pt_regs_buf = GETBUF(SIZE(pt_regs)); -- readmem(kvaddr, KVADDR, pt_regs_buf, -- SIZE(pt_regs), "pt_regs", FAULT_ON_ERROR); -- } else -- pt_regs_buf = local; -+ kvaddr = symbol_value("end_pfn"); -+ if (!x86_64_xendump_load_page(kvaddr, xd)) -+ return FALSE; - -- rip = ULONG(pt_regs_buf + ms->pto.rip); -- rsp = ULONG(pt_regs_buf + ms->pto.rsp); -- cs = ULONG(pt_regs_buf + ms->pto.cs); -- ss = ULONG(pt_regs_buf + ms->pto.ss); -- rflags = ULONG(pt_regs_buf + ms->pto.eflags); -- orig_rax = ULONG(pt_regs_buf + ms->pto.orig_rax); -- rbp = ULONG(pt_regs_buf + ms->pto.rbp); -- rax = ULONG(pt_regs_buf + ms->pto.rax); -- rbx = ULONG(pt_regs_buf + ms->pto.rbx); -- rcx = ULONG(pt_regs_buf + ms->pto.rcx); -- rdx = ULONG(pt_regs_buf + ms->pto.rdx); -- rsi = ULONG(pt_regs_buf + ms->pto.rsi); -- rdi = ULONG(pt_regs_buf + ms->pto.rdi); -- r8 = ULONG(pt_regs_buf + ms->pto.r8); -- r9 = ULONG(pt_regs_buf + ms->pto.r9); -- r10 = ULONG(pt_regs_buf + ms->pto.r10); -- r11 = ULONG(pt_regs_buf + ms->pto.r11); -- r12 = ULONG(pt_regs_buf + ms->pto.r12); -- r13 = ULONG(pt_regs_buf + ms->pto.r13); -- r14 = ULONG(pt_regs_buf + ms->pto.r14); -- r15 = ULONG(pt_regs_buf + ms->pto.r15); -+ up = (ulong *)(xd->page + PAGEOFFSET(kvaddr)); -+ if (CRASHDEBUG(1)) -+ fprintf(xd->ofp, "end_pfn: %lx\n", *up); - -- verified = x86_64_eframe_verify(bt, -- kvaddr ? kvaddr : (local - bt->stackbuf) + bt->stackbase, -- cs, ss, rip, rsp, rflags); -+ xd->xc_core.p2m_frames = (*up/(PAGESIZE()/sizeof(ulong))) + -+ ((*up%(PAGESIZE()/sizeof(ulong))) ? 1 : 0); - -- /* -- * If it's print-if-verified request, don't print bogus eframes. -- */ -- if (!verified && ((flags & (EFRAME_VERIFY|EFRAME_PRINT)) == -- (EFRAME_VERIFY|EFRAME_PRINT))) -- flags &= ~EFRAME_PRINT; -+ if ((xd->xc_core.p2m_frame_index_list = (ulong *) -+ malloc(xd->xc_core.p2m_frames * sizeof(ulong))) == NULL) -+ error(FATAL, "cannot malloc p2m_frame_list"); - -- if (CRASHDEBUG(2)) -- fprintf(ofp, "< exception frame at: %lx >\n", kvaddr ? kvaddr : -- (local - bt->stackbuf) + bt->stackbase); -+ kvaddr = symbol_value("phys_to_machine_mapping"); -+ if (!x86_64_xendump_load_page(kvaddr, xd)) -+ return FALSE; - -- if (flags & EFRAME_PRINT) { -- if (flags & EFRAME_SEARCH) { -- fprintf(ofp, "\n %s-MODE EXCEPTION FRAME AT: %lx\n", -- cs & 3 ? "USER" : "KERNEL", -- kvaddr ? kvaddr : -- (local - bt->stackbuf) + bt->stackbase); -- } -+ up = (ulong *)(xd->page + PAGEOFFSET(kvaddr)); -+ if (CRASHDEBUG(1)) -+ fprintf(fp, "phys_to_machine_mapping: %lx\n", *up); - -- fprintf(ofp, " RIP: %016lx RSP: %016lx RFLAGS: %08lx\n", -- rip, rsp, rflags); -- fprintf(ofp, " RAX: %016lx RBX: %016lx RCX: %016lx\n", -- rax, rbx, rcx); -- fprintf(ofp, " RDX: %016lx RSI: %016lx RDI: %016lx\n", -- rdx, rsi, rdi); -- fprintf(ofp, " RBP: %016lx R8: %016lx R9: %016lx\n", -- rbp, r8, r9); -- fprintf(ofp, " R10: %016lx R11: %016lx R12: %016lx\n", -- r10, r11, r12); -- fprintf(ofp, " R13: %016lx R14: %016lx R15: %016lx\n", -- r13, r14, r15); -- fprintf(ofp, " ORIG_RAX: %016lx CS: %04lx SS: %04lx\n", -- orig_rax, cs, ss); -+ kvaddr = *up; -+ machdep->last_ptbl_read = BADADDR; - -- if (!verified) -- error(WARNING, "possibly bogus exception frame\n"); -+ for (i = 0; i < xd->xc_core.p2m_frames; i++) { -+ if ((idx = x86_64_xendump_page_index(kvaddr, xd)) == MFN_NOT_FOUND) -+ return FALSE; -+ xd->xc_core.p2m_frame_index_list[i] = idx; -+ kvaddr += PAGESIZE(); - } - -- if ((flags & EFRAME_PRINT) && BT_REFERENCE_CHECK(bt)) { -- x86_64_do_bt_reference_check(bt, rip, NULL); -- x86_64_do_bt_reference_check(bt, rsp, NULL); -- x86_64_do_bt_reference_check(bt, cs, NULL); -- x86_64_do_bt_reference_check(bt, ss, NULL); -- x86_64_do_bt_reference_check(bt, rflags, NULL); -- x86_64_do_bt_reference_check(bt, orig_rax, NULL); -- x86_64_do_bt_reference_check(bt, rbp, NULL); -- x86_64_do_bt_reference_check(bt, rax, NULL); -- x86_64_do_bt_reference_check(bt, rbx, NULL); -- x86_64_do_bt_reference_check(bt, rcx, NULL); -- x86_64_do_bt_reference_check(bt, rdx, NULL); -- x86_64_do_bt_reference_check(bt, rsi, NULL); -- x86_64_do_bt_reference_check(bt, rdi, NULL); -- x86_64_do_bt_reference_check(bt, r8, NULL); -- x86_64_do_bt_reference_check(bt, r9, NULL); -- x86_64_do_bt_reference_check(bt, r10, NULL); -- x86_64_do_bt_reference_check(bt, r11, NULL); -- x86_64_do_bt_reference_check(bt, r12, NULL); -- x86_64_do_bt_reference_check(bt, r13, NULL); -- x86_64_do_bt_reference_check(bt, r14, NULL); -- x86_64_do_bt_reference_check(bt, r15, NULL); -- } -+ machdep->last_ptbl_read = 0; - -- if (kvaddr) -- FREEBUF(pt_regs_buf); -+ return TRUE; -+} - -- if (flags & EFRAME_CS) -- return cs; -- else if (flags & EFRAME_VERIFY) -- return verified; -+static void -+x86_64_debug_dump_page(FILE *ofp, char *page, char *name) -+{ -+ int i; -+ ulong *up; - -- return 0; -+ fprintf(ofp, "%s\n", name); -+ -+ up = (ulong *)page; -+ for (i = 0; i < 256; i++) { -+ fprintf(ofp, "%016lx: %016lx %016lx\n", -+ (ulong)((i * 2) * sizeof(ulong)), -+ *up, *(up+1)); -+ up += 2; -+ } - } - - /* -- * Check that the verifiable registers contain reasonable data. -+ * Find the page associate with the kvaddr, and read its contents -+ * into the passed-in buffer. - */ --#define RAZ_MASK 0xffffffffffc08028 /* return-as-zero bits */ -- --static int --x86_64_eframe_verify(struct bt_info *bt, long kvaddr, long cs, long ss, -- long rip, long rsp, long rflags) -+static char * -+x86_64_xendump_load_page(ulong kvaddr, struct xendump_data *xd) - { -- if ((rflags & RAZ_MASK) || !(rflags & 0x2)) -- return FALSE; -+ ulong mfn; -+ ulong *pml4, *pgd, *pmd, *ptep; - -- if ((cs == 0x10) && (ss == 0x18)) { -- if (is_kernel_text(rip) && IS_KVADDR(rsp)) -- return TRUE; -- } -+ pml4 = ((ulong *)machdep->machspec->pml4) + pml4_index(kvaddr); -+ mfn = ((*pml4) & PHYSICAL_PAGE_MASK) >> PAGESHIFT(); - -- if ((cs == 0x10) && kvaddr) { -- if (is_kernel_text(rip) && IS_KVADDR(rsp) && -- (rsp == (kvaddr + SIZE(pt_regs) + 8))) -- return TRUE; -- } -+ if (CRASHDEBUG(3)) -+ fprintf(xd->ofp, -+ "[%lx] pml4: %lx mfn: %lx pml4_index: %lx\n", -+ kvaddr, *pml4, mfn, pml4_index(kvaddr)); - -- if ((cs == 0x10) && kvaddr) { -- if (is_kernel_text(rip) && IS_KVADDR(rsp) && -- (rsp == (kvaddr + SIZE(pt_regs)))) -- return TRUE; -- } -+ if (!xc_core_mfn_to_page(mfn, machdep->pgd)) -+ error(FATAL, "cannot read/find pud page\n"); - -- if ((cs == 0x33) && (ss == 0x2b)) { -- if (IS_UVADDR(rip, bt->tc) && IS_UVADDR(rsp, bt->tc)) -- return TRUE; -- } -+ if (CRASHDEBUG(7)) -+ x86_64_debug_dump_page(xd->ofp, machdep->pgd, -+ "contents of page upper directory page:"); -+ -+ pgd = ((ulong *)machdep->pgd) + pgd_index(kvaddr); -+ mfn = ((*pgd) & PHYSICAL_PAGE_MASK) >> PAGESHIFT(); -+ -+ if (CRASHDEBUG(3)) -+ fprintf(xd->ofp, -+ "[%lx] pgd: %lx mfn: %lx pgd_index: %lx\n", -+ kvaddr, *pgd, mfn, pgd_index(kvaddr)); -+ -+ if (!xc_core_mfn_to_page(mfn, machdep->pmd)) -+ error(FATAL, "cannot read/find pmd page\n"); -+ -+ if (CRASHDEBUG(7)) -+ x86_64_debug_dump_page(xd->ofp, machdep->pmd, -+ "contents of page middle directory page:"); -+ -+ pmd = ((ulong *)machdep->pmd) + pmd_index(kvaddr); -+ mfn = ((*pmd) & PHYSICAL_PAGE_MASK) >> PAGESHIFT(); -+ -+ if (CRASHDEBUG(3)) -+ fprintf(xd->ofp, -+ "[%lx] pmd: %lx mfn: %lx pmd_index: %lx\n", -+ kvaddr, *pmd, mfn, pmd_index(kvaddr)); -+ -+ if (!xc_core_mfn_to_page(mfn, machdep->ptbl)) -+ error(FATAL, "cannot read/find page table page\n"); -+ -+ if (CRASHDEBUG(7)) -+ x86_64_debug_dump_page(xd->ofp, machdep->ptbl, -+ "contents of page table page:"); -+ -+ ptep = ((ulong *)machdep->ptbl) + pte_index(kvaddr); -+ mfn = ((*ptep) & PHYSICAL_PAGE_MASK) >> PAGESHIFT(); - -- return FALSE; --} -+ if (CRASHDEBUG(3)) -+ fprintf(xd->ofp, -+ "[%lx] ptep: %lx mfn: %lx pte_index: %lx\n", -+ kvaddr, *ptep, mfn, pte_index(kvaddr)); - --/* -- * Get a stack frame combination of pc and ra from the most relevent spot. -- */ --static void --x86_64_get_stack_frame(struct bt_info *bt, ulong *pcp, ulong *spp) --{ -- if (bt->flags & BT_DUMPFILE_SEARCH) -- return x86_64_get_dumpfile_stack_frame(bt, pcp, spp); -+ if (!xc_core_mfn_to_page(mfn, xd->page)) -+ error(FATAL, "cannot read/find pte page\n"); - -- if (pcp) -- *pcp = x86_64_get_pc(bt); -- if (spp) -- *spp = x86_64_get_sp(bt); -+ if (CRASHDEBUG(7)) -+ x86_64_debug_dump_page(xd->ofp, xd->page, -+ "contents of page:"); -+ -+ return xd->page; - } - - /* -- * Get the starting point for the active cpus in a diskdump/netdump. -+ * Find the dumpfile page index associated with the kvaddr. - */ --static void --x86_64_get_dumpfile_stack_frame(struct bt_info *bt_in, ulong *rip, ulong *rsp) -+static int -+x86_64_xendump_page_index(ulong kvaddr, struct xendump_data *xd) - { -- int panic_task; -- int i, panic, stage; -- char *sym; -- struct syment *sp; -- ulong *up; -- struct bt_info bt_local, *bt; -- struct machine_specific *ms; -- char *user_regs; -- ulong ur_rip; -- ulong ur_rsp; -+ int idx; -+ ulong mfn; -+ ulong *pml4, *pgd, *pmd, *ptep; - -- bt = &bt_local; -- BCOPY(bt_in, bt, sizeof(struct bt_info)); -- ms = machdep->machspec; -- ur_rip = ur_rsp = 0; -- stage = 0; -+ pml4 = ((ulong *)machdep->machspec->pml4) + pml4_index(kvaddr); -+ mfn = ((*pml4) & PHYSICAL_PAGE_MASK) >> PAGESHIFT(); - -- panic_task = tt->panic_task == bt->task ? TRUE : FALSE; -+ if ((mfn != machdep->last_pgd_read) && -+ !xc_core_mfn_to_page(mfn, machdep->pgd)) -+ error(FATAL, "cannot read/find pud page\n"); -+ machdep->last_pgd_read = mfn; - -- if (panic_task && bt->machdep) { -- user_regs = bt->machdep; -+ pgd = ((ulong *)machdep->pgd) + pgd_index(kvaddr); -+ mfn = ((*pgd) & PHYSICAL_PAGE_MASK) >> PAGESHIFT(); - -- if (x86_64_eframe_verify(bt, -- 0, -- ULONG(user_regs + OFFSET(user_regs_struct_cs)), -- ULONG(user_regs + OFFSET(user_regs_struct_ss)), -- ULONG(user_regs + OFFSET(user_regs_struct_rip)), -- ULONG(user_regs + OFFSET(user_regs_struct_rsp)), -- ULONG(user_regs + OFFSET(user_regs_struct_eflags)))) { -- bt->stkptr = ULONG(user_regs + -- OFFSET(user_regs_struct_rsp)); -- if (x86_64_in_irqstack(bt)) { -- ur_rip = ULONG(user_regs + -- OFFSET(user_regs_struct_rip)); -- ur_rsp = ULONG(user_regs + -- OFFSET(user_regs_struct_rsp)); -- goto skip_stage; -- } -- } -- } -+ if ((mfn != machdep->last_pmd_read) && -+ !xc_core_mfn_to_page(mfn, machdep->pmd)) -+ error(FATAL, "cannot read/find pmd page\n"); - -- panic = FALSE; -+ machdep->last_pmd_read = mfn; - -- /* -- * Check the process stack first. -- */ --next_stack: -- for (i = 0, up = (ulong *)bt->stackbuf; -- i < (bt->stacktop - bt->stackbase)/sizeof(ulong); i++, up++) { -- sym = closest_symbol(*up); -+ pmd = ((ulong *)machdep->pmd) + pmd_index(kvaddr); -+ mfn = ((*pmd) & PHYSICAL_PAGE_MASK) >> PAGESHIFT(); - -- if (STREQ(sym, "netconsole_netdump") || -- STREQ(sym, "netpoll_start_netdump") || -- STREQ(sym, "start_disk_dump") || -- STREQ(sym, "disk_dump") || -- STREQ(sym, "try_crashdump")) { -- *rip = *up; -- *rsp = bt->stackbase + ((char *)(up) - bt->stackbuf); -- return; -- } -+ if ((mfn != machdep->last_ptbl_read) && -+ !xc_core_mfn_to_page(mfn, machdep->ptbl)) -+ error(FATAL, "cannot read/find page table page\n"); -+ machdep->last_ptbl_read = mfn; - -- if ((stage == 2) && -- (STREQ(sym, "nmi_watchdog_tick") || -- STREQ(sym, "default_do_nmi"))) { -- sp = x86_64_function_called_by((*up)-5); -- if (!sp || !STREQ(sp->name, "die_nmi")) -- continue; -- *rip = *up; -- *rsp = bt->stackbase + ((char *)(up) - bt->stackbuf); -- bt_in->flags |= BT_START; -- *rip = symbol_value("die_nmi"); -- *rsp = (*rsp) - (7*sizeof(ulong)); -- return; -- } -+ ptep = ((ulong *)machdep->ptbl) + pte_index(kvaddr); -+ mfn = ((*ptep) & PHYSICAL_PAGE_MASK) >> PAGESHIFT(); - -- if (STREQ(sym, "panic")) { -- *rip = *up; -- *rsp = bt->stackbase + ((char *)(up) - bt->stackbuf); -- panic = TRUE; -- continue; /* keep looking for die */ -- } -+ if ((idx = xc_core_mfn_to_page_index(mfn)) == MFN_NOT_FOUND) -+ error(INFO, "cannot determine page index for %lx\n", -+ kvaddr); - -- if (STREQ(sym, "die")) { -- *rip = *up; -- *rsp = bt->stackbase + ((char *)(up) - bt->stackbuf); -- for (i++, up++; i < LONGS_PER_STACK; i++, up++) { -- sym = closest_symbol(*up); -- if (STREQ(sym, "sysrq_handle_crash")) -- goto next_sysrq; -- } -- return; -- } -+ return idx; -+} - -- if (STREQ(sym, "sysrq_handle_crash")) { --next_sysrq: -- *rip = *up; -- *rsp = bt->stackbase + ((char *)(up) - bt->stackbuf); -- machdep->flags |= SYSRQ; -- for (i++, up++; i < LONGS_PER_STACK; i++, up++) { -- sym = closest_symbol(*up); -- if (STREQ(sym, "sysrq_handle_crash")) -- goto next_sysrq; -- } -- return; -- } -+/* -+ * Pull the rsp from the cpu_user_regs struct in the header -+ * turn it into a task, and match it with the active_set. -+ * Unfortunately, the registers in the vcpu_guest_context -+ * are not necessarily those of the panic task, so for now -+ * let get_active_set_panic_task() get the right task. -+ */ -+static ulong -+x86_64_xendump_panic_task(struct xendump_data *xd) -+{ -+ int i; -+ ulong rsp; -+ off_t offset; -+ ulong task; - -- if (!panic_task && (stage > 0) && -- STREQ(sym, "smp_call_function_interrupt")) { -- *rip = *up; -- *rsp = bt->stackbase + ((char *)(up) - bt->stackbuf); -- return; -- } -- } -+ if (INVALID_MEMBER(vcpu_guest_context_user_regs) || -+ INVALID_MEMBER(cpu_user_regs_esp)) -+ return NO_TASK; -+ -+ offset = (off_t)xd->xc_core.header.xch_ctxt_offset + -+ (off_t)OFFSET(vcpu_guest_context_user_regs) + -+ (off_t)OFFSET(cpu_user_regs_rsp); -+ -+ if (lseek(xd->xfd, offset, SEEK_SET) == -1) -+ return NO_TASK; -+ -+ if (read(xd->xfd, &rsp, sizeof(ulong)) != sizeof(ulong)) -+ return NO_TASK; -+ -+ if (IS_KVADDR(rsp) && (task = stkptr_to_task(rsp))) { -+ -+ for (i = 0; i < NR_CPUS; i++) { -+ if (task == tt->active_set[i]) { -+ if (CRASHDEBUG(0)) -+ error(INFO, -+ "x86_64_xendump_panic_task: rsp: %lx -> task: %lx\n", -+ rsp, task); -+ return task; -+ } -+ } - -- if (panic) -- return; -+ error(WARNING, -+ "x86_64_xendump_panic_task: rsp: %lx -> task: %lx (not active)\n", -+ rsp); -+ } - --skip_stage: -- switch (stage) -- { -- /* -- * Now check the processor's interrupt stack. -- */ -- case 0: -- bt->stackbase = ms->stkinfo.ibase[bt->tc->processor]; -- bt->stacktop = ms->stkinfo.ibase[bt->tc->processor] + -- ms->stkinfo.isize; -- bt->stackbuf = ms->irqstack; -- alter_stackbuf(bt); -- stage = 1; -- goto next_stack; -+ return NO_TASK; -+} - -- /* -- * Check the NMI exception stack. -- */ -- case 1: -- bt->stackbase = ms->stkinfo.ebase[bt->tc->processor][NMI_STACK]; -- bt->stacktop = ms->stkinfo.ebase[bt->tc->processor][NMI_STACK] + -- ms->stkinfo.esize; -- bt->stackbuf = ms->irqstack; -- alter_stackbuf(bt); -- stage = 2; -- goto next_stack; -+/* -+ * Because of an off-by-one vcpu bug in early xc_domain_dumpcore() -+ * instantiations, the registers in the vcpu_guest_context are not -+ * necessarily those of the panic task. Furthermore, the rsp is -+ * seemingly unassociated with the task, presumably due a hypervisor -+ * callback, so only accept the contents if they retfer to the panic -+ * task's stack. -+ */ -+static void -+x86_64_get_xendump_regs(struct xendump_data *xd, struct bt_info *bt, ulong *rip, ulong *rsp) -+{ -+ ulong task, xrip, xrsp; -+ off_t offset; -+ struct syment *sp; -+ int cpu; - -- case 2: -- break; -+ if (INVALID_MEMBER(vcpu_guest_context_user_regs) || -+ INVALID_MEMBER(cpu_user_regs_rip) || -+ INVALID_MEMBER(cpu_user_regs_rsp)) -+ goto generic; -+ -+ offset = (off_t)xd->xc_core.header.xch_ctxt_offset + -+ (off_t)OFFSET(vcpu_guest_context_user_regs) + -+ (off_t)OFFSET(cpu_user_regs_rsp); -+ if (lseek(xd->xfd, offset, SEEK_SET) == -1) -+ goto generic; -+ if (read(xd->xfd, &xrsp, sizeof(ulong)) != sizeof(ulong)) -+ goto generic; -+ -+ offset = (off_t)xd->xc_core.header.xch_ctxt_offset + -+ (off_t)OFFSET(vcpu_guest_context_user_regs) + -+ (off_t)OFFSET(cpu_user_regs_rip); -+ if (lseek(xd->xfd, offset, SEEK_SET) == -1) -+ goto generic; -+ if (read(xd->xfd, &xrip, sizeof(ulong)) != sizeof(ulong)) -+ goto generic; -+ -+ /* -+ * This works -- comes from smp_send_stop call in panic. -+ * But xendump_panic_hook() will forestall this function -+ * from being called (for now). -+ */ -+ if (IS_KVADDR(xrsp) && (task = stkptr_to_task(xrsp)) && -+ (task == bt->task)) { -+ if (CRASHDEBUG(1)) -+ fprintf(xd->ofp, -+ "hooks from vcpu_guest_context: rip: %lx rsp: %lx\n", xrip, xrsp); -+ *rip = xrip; -+ *rsp = xrsp; -+ return; - } - -+generic: -+ -+ machdep->get_stack_frame(bt, rip, rsp); -+ - /* -- * We didn't find what we were looking for, so just use what was -- * passed in from the ELF header. -+ * If this is an active task showing itself in schedule(), -+ * then the thread_struct rsp is stale. It has to be coming -+ * from a callback via the interrupt stack. - */ -- if (ur_rip && ur_rsp) { -- *rip = ur_rip; -- *rsp = ur_rsp; -+ if (is_task_active(bt->task) && (symbol_value("thread_return") == *rip)) { -+ cpu = bt->tc->processor; -+ xrsp = machdep->machspec->stkinfo.ibase[cpu] + -+ machdep->machspec->stkinfo.isize - sizeof(ulong); -+ -+ while (readmem(xrsp, KVADDR, &xrip, -+ sizeof(ulong), "xendump rsp", RETURN_ON_ERROR)) { -+ if ((sp = value_search(xrip, (ulong *)&offset)) && -+ STREQ(sp->name, "smp_really_stop_cpu") && offset) { -+ *rip = xrip; -+ *rsp = xrsp; -+ if (CRASHDEBUG(1)) -+ error(INFO, -+ "switch thread_return to smp_call_function_interrupt\n"); -+ break; -+ } -+ xrsp -= sizeof(ulong); -+ if (xrsp <= machdep->machspec->stkinfo.ibase[cpu]) -+ break; -+ } - } -+} - -- console("x86_64_get_dumpfile_stack_frame: cannot find anything useful\n"); -+/* for XEN Hypervisor analysis */ - -- bt->flags &= ~(ulonglong)BT_DUMPFILE_SEARCH; -+static int -+x86_64_is_kvaddr_hyper(ulong addr) -+{ -+ return (addr >= HYPERVISOR_VIRT_START && addr < HYPERVISOR_VIRT_END); -+} - -- machdep->get_stack_frame(bt, rip, rsp); -+static ulong -+x86_64_get_stackbase_hyper(ulong task) -+{ -+ struct xen_hyper_vcpu_context *vcc; -+ struct xen_hyper_pcpu_context *pcc; -+ ulong rsp0, base; -+ -+ /* task means vcpu here */ -+ vcc = xen_hyper_vcpu_to_vcpu_context(task); -+ if (!vcc) -+ error(FATAL, "invalid vcpu\n"); -+ -+ pcc = xen_hyper_id_to_pcpu_context(vcc->processor); -+ if (!pcc) -+ error(FATAL, "invalid pcpu number\n"); -+ -+ rsp0 = pcc->sp.rsp0; -+ base = rsp0 & (~(STACKSIZE() - 1)); -+ return base; - } - --/* -- * Get the saved RSP from the task's thread_struct. -- */ - static ulong --x86_64_get_sp(struct bt_info *bt) -+x86_64_get_stacktop_hyper(ulong task) - { -- ulong offset, rsp; -+ return x86_64_get_stackbase_hyper(task) + STACKSIZE(); -+} - -- if (tt->flags & THREAD_INFO) { -- readmem(bt->task + OFFSET(task_struct_thread) + -- OFFSET(thread_struct_rsp), KVADDR, -- &rsp, sizeof(void *), -- "thread_struct rsp", FAULT_ON_ERROR); -- return rsp; -- } -+#define EXCEPTION_STACKSIZE_HYPER (1024UL) - -- offset = OFFSET(task_struct_thread) + OFFSET(thread_struct_rsp); -+static ulong -+x86_64_in_exception_stack_hyper(ulong vcpu, ulong rsp) -+{ -+ struct xen_hyper_vcpu_context *vcc; -+ struct xen_hyper_pcpu_context *pcc; -+ int i; -+ ulong stackbase; - -- return GET_STACK_ULONG(offset); -+ vcc = xen_hyper_vcpu_to_vcpu_context(vcpu); -+ if (!vcc) -+ error(FATAL, "invalid vcpu\n"); -+ -+ pcc = xen_hyper_id_to_pcpu_context(vcc->processor); -+ if (!pcc) -+ error(FATAL, "invalid pcpu number\n"); -+ -+ for (i = 0; i < XEN_HYPER_TSS_IST_MAX; i++) { -+ if (pcc->ist[i] == 0) { -+ continue; -+ } -+ stackbase = pcc->ist[i] - EXCEPTION_STACKSIZE_HYPER; -+ if ((rsp & ~(EXCEPTION_STACKSIZE_HYPER - 1)) == stackbase) { -+ return stackbase; -+ } -+ } -+ -+ return 0; - } - --/* -- * Get the saved PC from the task's thread_struct if it exists; -- * otherwise just use the "thread_return" label value. -- */ --static ulong --x86_64_get_pc(struct bt_info *bt) -+static void -+x86_64_get_stack_frame_hyper(struct bt_info *bt, ulong *pcp, ulong *spp) - { -- ulong offset, rip; -+ struct xen_hyper_vcpu_context *vcc; -+ int pcpu; -+ ulong *regs; -+ ulong rsp, rip; -+ -+ /* task means vcpu here */ -+ vcc = xen_hyper_vcpu_to_vcpu_context(bt->task); -+ if (!vcc) -+ error(FATAL, "invalid vcpu\n"); -+ -+ pcpu = vcc->processor; -+ if (!xen_hyper_test_pcpu_id(pcpu)) { -+ error(FATAL, "invalid pcpu number\n"); -+ } -+ -+ if (bt->flags & BT_TEXT_SYMBOLS_ALL) { -+ if (spp) -+ *spp = x86_64_get_stackbase_hyper(bt->task); -+ if (pcp) -+ *pcp = 0; -+ bt->flags &= ~BT_TEXT_SYMBOLS_ALL; -+ return; -+ } - -- if (INVALID_MEMBER(thread_struct_rip)) -- return symbol_value("thread_return"); -+ regs = (ulong *)xen_hyper_id_to_dumpinfo_context(pcpu)->pr_reg_ptr; -+ rsp = XEN_HYPER_X86_64_NOTE_RSP(regs); -+ rip = XEN_HYPER_X86_64_NOTE_RIP(regs); -+ -+ if (spp) { -+ if (x86_64_in_exception_stack_hyper(bt->task, rsp)) -+ *spp = rsp; -+ else if (rsp < x86_64_get_stackbase_hyper(bt->task) || -+ rsp >= x86_64_get_stacktop_hyper(bt->task)) -+ *spp = x86_64_get_stackbase_hyper(bt->task); -+ else -+ *spp = rsp; -+ } -+ if (pcp) { -+ if (is_kernel_text(rip)) -+ *pcp = rip; -+ else -+ *pcp = 0; -+ } -+} - -- if (tt->flags & THREAD_INFO) { -- readmem(bt->task + OFFSET(task_struct_thread) + -- OFFSET(thread_struct_rip), KVADDR, -- &rip, sizeof(void *), -- "thread_struct rip", FAULT_ON_ERROR); -- return rip; -- } -+static int -+x86_64_print_stack_entry_hyper(struct bt_info *bt, FILE *ofp, int level, -+ int stkindex, ulong text) -+{ -+ ulong rsp, offset; -+ struct syment *sp; -+ char *name; -+ int result; -+ char buf[BUFSIZE]; - -- offset = OFFSET(task_struct_thread) + OFFSET(thread_struct_rip); -+ offset = 0; -+ sp = value_search(text, &offset); -+ if (!sp) -+ return BACKTRACE_ENTRY_IGNORED; - -- return GET_STACK_ULONG(offset); --} -+ name = sp->name; -+ -+ if (STREQ(name, "syscall_enter")) -+ result = BACKTRACE_COMPLETE; -+ else -+ result = BACKTRACE_ENTRY_DISPLAYED; -+ -+ rsp = bt->stackbase + (stkindex * sizeof(long)); -+ -+ if ((bt->flags & BT_FULL)) { -+ if (bt->frameptr) -+ x86_64_display_full_frame(bt, rsp, ofp); -+ bt->frameptr = rsp + sizeof(ulong); -+ } -+ -+ fprintf(ofp, "%s#%d [%8lx] %s at %lx\n", level < 10 ? " " : "", level, -+ rsp, name, text); -+ -+ if (bt->flags & BT_LINE_NUMBERS) { -+ get_line_number(text, buf, FALSE); -+ if (strlen(buf)) -+ fprintf(ofp, " %s\n", buf); -+ } - -+ if (BT_REFERENCE_CHECK(bt)) -+ x86_64_do_bt_reference_check(bt, text, name); - --/* -- * Do the work for x86_64_get_sp() and x86_64_get_pc(). -- */ --static void --get_x86_64_frame(struct bt_info *bt, ulong *getpc, ulong *getsp) --{ -- error(FATAL, "get_x86_64_frame: TBD\n"); -+ return result; - } - --/* -- * Do the work for cmd_irq(). -- */ --static void --x86_64_dump_irq(int irq) -+static void -+x86_64_print_eframe_regs_hyper(struct bt_info *bt) - { -- if (symbol_exists("irq_desc")) { -- machdep->dump_irq = generic_dump_irq; -- return(generic_dump_irq(irq)); -- } -+ ulong *up; -+ ulong offset; -+ struct syment *sp; - -- error(FATAL, "ia64_dump_irq: irq_desc[] does not exist?\n"); --} - --/* -- * Do the work for irq -d -- */ --void --x86_64_display_idt_table(void) --{ -- int i; -- char *idt_table_buf; -- char buf[BUFSIZE]; -- ulong *ip; -+ up = (ulong *)(&bt->stackbuf[bt->stacktop - bt->stackbase]); -+ up -= 21; - -- idt_table_buf = GETBUF(SIZE(gate_struct) * 256); -- readmem(symbol_value("idt_table"), KVADDR, idt_table_buf, -- SIZE(gate_struct) * 256, "idt_table", FAULT_ON_ERROR); -- ip = (ulong *)idt_table_buf; -+ fprintf(fp, " [exception RIP: "); -+ if ((sp = value_search(up[16], &offset))) { -+ fprintf(fp, "%s", sp->name); -+ if (offset) -+ fprintf(fp, (output_radix == 16) ? -+ "+0x%lx" : "+%ld", offset); -+ } else -+ fprintf(fp, "unknown or invalid address"); -+ fprintf(fp, "]\n"); - -- for (i = 0; i < 256; i++, ip += 2) { -- if (i < 10) -- fprintf(fp, " "); -- else if (i < 100) -- fprintf(fp, " "); -- fprintf(fp, "[%d] %s\n", -- i, x86_64_extract_idt_function(ip, buf, NULL)); -- } -+ fprintf(fp, " RIP: %016lx RSP: %016lx RFLAGS: %08lx\n", -+ up[16], up[19], up[18]); -+ fprintf(fp, " RAX: %016lx RBX: %016lx RCX: %016lx\n", -+ up[10], up[5], up[11]); -+ fprintf(fp, " RDX: %016lx RSI: %016lx RDI: %016lx\n", -+ up[11], up[13], up[14]); -+ fprintf(fp, " RBP: %016lx R8: %016lx R9: %016lx\n", -+ up[4], up[9], up[8]); -+ fprintf(fp, " R10: %016lx R11: %016lx R12: %016lx\n", -+ up[7], up[6], up[3]); -+ fprintf(fp, " R13: %016lx R14: %016lx R15: %016lx\n", -+ up[2], up[1], up[0]); -+ fprintf(fp, " ORIG_RAX: %016lx CS: %04lx SS: %04lx\n", -+ up[15], up[17], up[20]); - -- FREEBUF(idt_table_buf); -+ fprintf(fp, "--- ---\n"); - } - - /* -- * Extract the function name out of the IDT entry. -+ * simple back tracer for xen hypervisor -+ * irq stack does not exist. so relative easy. - */ --static char * --x86_64_extract_idt_function(ulong *ip, char *buf, ulong *retaddr) -+static void -+x86_64_simple_back_trace_cmd_hyper(struct bt_info *bt_in) - { -- ulong i1, i2, addr; -- char locbuf[BUFSIZE]; -- physaddr_t phys; -+ int i, level, done; -+ ulong rsp, estack, stacktop; -+ ulong *up; -+ FILE *ofp; -+ struct bt_info bt_local, *bt; -+ char ebuf[EXCEPTION_STACKSIZE_HYPER]; - -- if (buf) -- BZERO(buf, BUFSIZE); -+ bt = &bt_local; -+ BCOPY(bt_in, bt, sizeof(struct bt_info)); - -- i1 = *ip; -- i2 = *(ip+1); -+ if (bt->flags & BT_FRAMESIZE_DEBUG) { -+ error(INFO, "-F not support\n"); -+ return; -+ } - -- i2 <<= 32; -- addr = i2 & 0xffffffff00000000; -- addr |= (i1 & 0xffff); -- i1 >>= 32; -- addr |= (i1 & 0xffff0000); -+ level = 0; -+ done = FALSE; -+ bt->call_target = NULL; -+ rsp = bt->stkptr; -+ if (!rsp) { -+ error(INFO, "cannot determine starting stack pointer\n"); -+ return; -+ } -+ if (BT_REFERENCE_CHECK(bt)) -+ ofp = pc->nullfp; -+ else -+ ofp = fp; - -- if (retaddr) -- *retaddr = addr; -+ while ((estack = x86_64_in_exception_stack_hyper(bt->task, rsp))) { -+ bt->flags |= BT_EXCEPTION_STACK; -+ bt->stackbase = estack; -+ bt->stacktop = estack + EXCEPTION_STACKSIZE_HYPER; -+ bt->stackbuf = ebuf; - -- if (!buf) -- return NULL; -+ if (!readmem(bt->stackbase, KVADDR, bt->stackbuf, -+ bt->stacktop - bt->stackbase, "exception stack contents", -+ RETURN_ON_ERROR)) -+ error(FATAL, "read of exception stack at %lx failed\n", -+ bt->stackbase); - -- value_to_symstr(addr, locbuf, 0); -- if (strlen(locbuf)) -- sprintf(buf, locbuf); -- else { -- sprintf(buf, "%016lx", addr); -- if (kvtop(NULL, addr, &phys, 0)) { -- addr = machdep->kvbase + (ulong)phys; -- if (value_to_symstr(addr, locbuf, 0)) { -- strcat(buf, " <"); -- strcat(buf, locbuf); -- strcat(buf, ">"); -- } -- } -- } -+ stacktop = bt->stacktop - 168; - -- return buf; --} -+ for (i = (rsp - bt->stackbase)/sizeof(ulong); -+ !done && (rsp < stacktop); i++, rsp += sizeof(ulong)) { -+ -+ up = (ulong *)(&bt->stackbuf[i*sizeof(ulong)]); - --/* -- * Filter disassembly output if the output radix is not gdb's default 10 -- */ --static int --x86_64_dis_filter(ulong vaddr, char *inbuf) --{ -- char buf1[BUFSIZE]; -- char buf2[BUFSIZE]; -- char *colon, *p1; -- int argc; -- char *argv[MAXARGS]; -- ulong value; -+ if (!is_kernel_text(*up)) -+ continue; - -- if (!inbuf) -- return TRUE; --/* -- * For some reason gdb can go off into the weeds translating text addresses, -- * (on alpha -- not necessarily seen on x86_64) so this routine both fixes the -- * references as well as imposing the current output radix on the translations. -- */ -- console("IN: %s", inbuf); -+ switch (x86_64_print_stack_entry_hyper(bt, ofp, level, i,*up)) -+ { -+ case BACKTRACE_ENTRY_DISPLAYED: -+ level++; -+ break; -+ case BACKTRACE_ENTRY_IGNORED: -+ break; -+ case BACKTRACE_COMPLETE: -+ done = TRUE; -+ break; -+ } -+ } - -- colon = strstr(inbuf, ":"); -+ if (!BT_REFERENCE_CHECK(bt)) -+ x86_64_print_eframe_regs_hyper(bt); - -- if (colon) { -- sprintf(buf1, "0x%lx <%s>", vaddr, -- value_to_symstr(vaddr, buf2, pc->output_radix)); -- sprintf(buf2, "%s%s", buf1, colon); -- strcpy(inbuf, buf2); -+ up = (ulong *)(&bt->stackbuf[bt->stacktop - bt->stackbase]); -+ up -= 2; -+ rsp = bt->stkptr = *up; -+ up -= 3; -+ bt->instptr = *up; -+ done = FALSE; -+ bt->frameptr = 0; - } - -- strcpy(buf1, inbuf); -- argc = parse_line(buf1, argv); -+ if (bt->flags & BT_EXCEPTION_STACK) { -+ bt->flags &= ~BT_EXCEPTION_STACK; -+ bt->stackbase = bt_in->stackbase; -+ bt->stacktop = bt_in->stacktop; -+ bt->stackbuf = bt_in->stackbuf; -+ } - -- if ((FIRSTCHAR(argv[argc-1]) == '<') && -- (LASTCHAR(argv[argc-1]) == '>')) { -- p1 = rindex(inbuf, '<'); -- while ((p1 > inbuf) && !STRNEQ(p1, " 0x")) -- p1--; -+ for (i = (rsp - bt->stackbase)/sizeof(ulong); -+ !done && (rsp < bt->stacktop); i++, rsp += sizeof(ulong)) { - -- if (!STRNEQ(p1, " 0x")) -- return FALSE; -- p1++; -+ up = (ulong *)(&bt->stackbuf[i*sizeof(ulong)]); - -- if (!extract_hex(p1, &value, NULLCHAR, TRUE)) -- return FALSE; -+ if (!is_kernel_text(*up)) -+ continue; - -- sprintf(buf1, "0x%lx <%s>\n", value, -- value_to_symstr(value, buf2, pc->output_radix)); -+ switch (x86_64_print_stack_entry_hyper(bt, ofp, level, i,*up)) -+ { -+ case BACKTRACE_ENTRY_DISPLAYED: -+ level++; -+ break; -+ case BACKTRACE_ENTRY_IGNORED: -+ break; -+ case BACKTRACE_COMPLETE: -+ done = TRUE; -+ break; -+ } -+ } -+} - -- sprintf(p1, buf1); -- -- } else if (STREQ(argv[argc-2], "callq") && -- hexadecimal(argv[argc-1], 0)) { -- /* -- * Update module code of the form: -- * -- * callq 0xffffffffa0017aa0 -- * -- * to show a bracketed direct call target. -- */ -- p1 = &LASTCHAR(inbuf); -+static void -+x86_64_init_hyper(int when) -+{ -+ switch (when) -+ { -+ case PRE_SYMTAB: -+ machdep->verify_symbol = x86_64_verify_symbol; -+ machdep->machspec = &x86_64_machine_specific; -+ if (pc->flags & KERNEL_DEBUG_QUERY) -+ return; -+ machdep->pagesize = memory_page_size(); -+ machdep->pageshift = ffs(machdep->pagesize) - 1; -+ machdep->pageoffset = machdep->pagesize - 1; -+ machdep->pagemask = ~((ulonglong)machdep->pageoffset); -+ machdep->stacksize = machdep->pagesize * 2; -+ if ((machdep->machspec->upml = (char *)malloc(PAGESIZE())) == NULL) -+ error(FATAL, "cannot malloc upml space."); -+ if ((machdep->pgd = (char *)malloc(PAGESIZE())) == NULL) -+ error(FATAL, "cannot malloc pgd space."); -+ if ((machdep->pmd = (char *)malloc(PAGESIZE())) == NULL) -+ error(FATAL, "cannot malloc pmd space."); -+ if ((machdep->ptbl = (char *)malloc(PAGESIZE())) == NULL) -+ error(FATAL, "cannot malloc ptbl space."); -+ if ((machdep->machspec->pml4 = -+ (char *)malloc(PAGESIZE()*2)) == NULL) -+ error(FATAL, "cannot malloc pml4 space."); -+ machdep->machspec->last_upml_read = 0; -+ machdep->machspec->last_pml4_read = 0; -+ machdep->last_pgd_read = 0; -+ machdep->last_pmd_read = 0; -+ machdep->last_ptbl_read = 0; -+ machdep->verify_paddr = generic_verify_paddr; -+ machdep->ptrs_per_pgd = PTRS_PER_PGD; -+ if (machdep->cmdline_arg) -+ parse_cmdline_arg(); -+ break; - -- if (extract_hex(argv[argc-1], &value, NULLCHAR, TRUE)) { -- sprintf(buf1, " <%s>\n", -- value_to_symstr(value, buf2, -- pc->output_radix)); -- if (IS_MODULE_VADDR(value) && -- !strstr(buf2, "+")) -- sprintf(p1, buf1); -- } -- } -+ case PRE_GDB: -+ machdep->machspec->page_offset = PAGE_OFFSET_XEN_HYPER; -+ machdep->kvbase = (ulong)HYPERVISOR_VIRT_START; -+ machdep->identity_map_base = (ulong)PAGE_OFFSET_XEN_HYPER; -+ machdep->is_kvaddr = x86_64_is_kvaddr_hyper; -+ machdep->is_uvaddr = x86_64_is_uvaddr; -+ machdep->eframe_search = x86_64_eframe_search; -+ machdep->back_trace = x86_64_simple_back_trace_cmd_hyper; -+ machdep->processor_speed = x86_64_processor_speed; -+ machdep->kvtop = x86_64_kvtop; -+ machdep->get_task_pgd = x86_64_get_task_pgd; -+ machdep->get_stack_frame = x86_64_get_stack_frame_hyper; -+ machdep->get_stackbase = x86_64_get_stackbase_hyper; -+ machdep->get_stacktop = x86_64_get_stacktop_hyper; -+ machdep->translate_pte = x86_64_translate_pte; -+ machdep->memory_size = xen_hyper_x86_memory_size; /* KAK add */ -+ machdep->is_task_addr = x86_64_is_task_addr; -+ machdep->dis_filter = x86_64_dis_filter; -+ machdep->cmd_mach = x86_64_cmd_mach; -+ machdep->get_smp_cpus = xen_hyper_x86_get_smp_cpus; /* KAK add */ -+ machdep->line_number_hooks = x86_64_line_number_hooks; -+ machdep->value_to_symbol = generic_machdep_value_to_symbol; -+ machdep->init_kernel_pgd = x86_64_init_kernel_pgd; -+ machdep->clear_machdep_cache = x86_64_clear_machdep_cache; -+ -+ /* machdep table for Xen Hypervisor */ -+ xhmachdep->pcpu_init = xen_hyper_x86_pcpu_init; -+ break; -+ -+ case POST_GDB: -+ XEN_HYPER_STRUCT_SIZE_INIT(cpuinfo_x86, "cpuinfo_x86"); -+ XEN_HYPER_STRUCT_SIZE_INIT(tss_struct, "tss_struct"); -+ XEN_HYPER_ASSIGN_OFFSET(tss_struct_rsp0) = MEMBER_OFFSET("tss_struct", "__blh") + sizeof(short unsigned int); -+ XEN_HYPER_MEMBER_OFFSET_INIT(tss_struct_ist, "tss_struct", "ist"); -+ if (symbol_exists("cpu_data")) { -+ xht->cpu_data_address = symbol_value("cpu_data"); -+ } -+/* KAK Can this be calculated? */ -+ if (!machdep->hz) { -+ machdep->hz = XEN_HYPER_HZ; -+ } -+ break; -+ -+ case POST_INIT: -+ break; -+ } -+} - -- console(" %s", inbuf); - -- return TRUE; --} -+struct framesize_cache { -+ ulong textaddr; -+ int framesize; -+}; - -+static struct framesize_cache *x86_64_framesize_cache = NULL; -+static int framesize_cache_entries = 0; - --/* -- * Override smp_num_cpus if possible and necessary. -- */ --int --x86_64_get_smp_cpus(void) --{ -- int i, cpus, nr_pda, cpunumber; -- char *cpu_pda_buf; -- ulong level4_pgt; -+#define FRAMESIZE_QUERY (1) -+#define FRAMESIZE_ENTER (2) -+#define FRAMESIZE_DUMP (3) - -- if (!VALID_STRUCT(x8664_pda)) -- return 1; -+#define FRAMESIZE_CACHE_INCR (50) - -- cpu_pda_buf = GETBUF(SIZE(x8664_pda)); -+static int -+x86_64_framesize_cache_resize(void) -+{ -+ int i; -+ struct framesize_cache *new_fc, *fc; - -- if (!(nr_pda = get_array_length("cpu_pda", NULL, 0))) -- nr_pda = NR_CPUS; -+ if ((new_fc = realloc(x86_64_framesize_cache, -+ (framesize_cache_entries+FRAMESIZE_CACHE_INCR) * -+ sizeof(struct framesize_cache))) == NULL) { -+ error(INFO, "cannot realloc x86_64_framesize_cache space!\n"); -+ return FALSE; -+ } - -- for (i = cpus = 0; i < nr_pda; i++) { -- if (!CPU_PDA_READ(i, cpu_pda_buf)) -- break; -- level4_pgt = ULONG(cpu_pda_buf + OFFSET(x8664_pda_level4_pgt)); -- cpunumber = INT(cpu_pda_buf + OFFSET(x8664_pda_cpunumber)); -- if (!VALID_LEVEL4_PGT_ADDR(level4_pgt) || (cpunumber != cpus)) -- break; -- cpus++; -- } -+ fc = new_fc + framesize_cache_entries; -+ for (i = framesize_cache_entries; -+ i < (framesize_cache_entries+FRAMESIZE_CACHE_INCR); -+ fc++, i++) { -+ fc->textaddr = 0; -+ fc->framesize = 0; -+ } - -- FREEBUF(cpu_pda_buf); -+ x86_64_framesize_cache = new_fc; -+ framesize_cache_entries += FRAMESIZE_CACHE_INCR; - -- return cpus; -+ return TRUE; - } - --/* -- * Machine dependent command. -- */ --void --x86_64_cmd_mach(void) -+static int -+x86_64_framesize_cache_func(int cmd, ulong textaddr, int *framesize) - { -- int c; -+ int i; -+ struct framesize_cache *fc; -+ char buf[BUFSIZE]; - -- while ((c = getopt(argcnt, args, "cm")) != EOF) { -- switch(c) -- { -- case 'c': -- x86_64_display_cpu_data(); -- return; -+ if (!x86_64_framesize_cache) { -+ framesize_cache_entries = FRAMESIZE_CACHE_INCR; -+ if ((x86_64_framesize_cache = calloc(framesize_cache_entries, -+ sizeof(struct framesize_cache))) == NULL) -+ error(FATAL, -+ "cannot calloc x86_64_framesize_cache space!\n"); -+ } - -- case 'm': -- x86_64_display_memmap(); -- return; -+ switch (cmd) -+ { -+ case FRAMESIZE_QUERY: -+ fc = &x86_64_framesize_cache[0]; -+ for (i = 0; i < framesize_cache_entries; i++, fc++) { -+ if (fc->textaddr == textaddr) { -+ *framesize = fc->framesize; -+ return TRUE; -+ } -+ } -+ return FALSE; - -- default: -- argerrs++; -- break; -- } -- } -+ case FRAMESIZE_ENTER: -+retry: -+ fc = &x86_64_framesize_cache[0]; -+ for (i = 0; i < framesize_cache_entries; i++, fc++) { -+ if ((fc->textaddr == 0) || -+ (fc->textaddr == textaddr)) { -+ fc->textaddr = textaddr; -+ fc->framesize = *framesize; -+ return fc->framesize; -+ } -+ } - -- if (argerrs) -- cmd_usage(pc->curcmd, SYNOPSIS); -+ if (x86_64_framesize_cache_resize()) -+ goto retry; - -- x86_64_display_machine_stats(); --} -+ return *framesize; - --/* -- * "mach" command output. -- */ --static void --x86_64_display_machine_stats(void) --{ -- struct new_utsname *uts; -- char buf[BUFSIZE]; -- ulong mhz; -+ case FRAMESIZE_DUMP: -+ fc = &x86_64_framesize_cache[0]; -+ for (i = 0; i < framesize_cache_entries; i++, fc++) { -+ if (fc->textaddr == 0) { -+ if (i < (framesize_cache_entries-1)) { -+ fprintf(fp, "[%d-%d]: (unused)\n", -+ i, framesize_cache_entries-1); -+ } -+ break; -+ } - -- uts = &kt->utsname; -+ fprintf(fp, "[%3d]: %lx %3d (%s)\n", i, -+ fc->textaddr, fc->framesize, -+ value_to_symstr(fc->textaddr, buf, 0)); -+ } -+ break; -+ } - -- fprintf(fp, " MACHINE TYPE: %s\n", uts->machine); -- fprintf(fp, " MEMORY SIZE: %s\n", get_memory_size(buf)); -- fprintf(fp, " CPUS: %d\n", kt->cpus); -- fprintf(fp, " PROCESSOR SPEED: "); -- if ((mhz = machdep->processor_speed())) -- fprintf(fp, "%ld Mhz\n", mhz); -- else -- fprintf(fp, "(unknown)\n"); -- fprintf(fp, " HZ: %d\n", machdep->hz); -- fprintf(fp, " PAGE SIZE: %d\n", PAGESIZE()); -- fprintf(fp, " L1 CACHE SIZE: %d\n", l1_cache_size()); -- fprintf(fp, "KERNEL VIRTUAL BASE: %lx\n", machdep->kvbase); -- fprintf(fp, "KERNEL VMALLOC BASE: %lx\n", vt->vmalloc_start); -- fprintf(fp, " KERNEL START MAP: %lx\n", __START_KERNEL_map); -- fprintf(fp, "KERNEL MODULES BASE: %lx\n", MODULES_VADDR); -- fprintf(fp, " KERNEL STACK SIZE: %ld\n", STACKSIZE()); -+ return TRUE; - } - --/* -- * "mach -c" -- */ --static void --x86_64_display_cpu_data(void) -+#define BT_FRAMESIZE_IGNORE_MASK \ -+ (BT_OLD_BACK_TRACE|BT_TEXT_SYMBOLS|BT_TEXT_SYMBOLS_ALL|BT_FRAMESIZE_DISABLE) -+ -+static int -+x86_64_get_framesize(struct bt_info *bt, ulong textaddr) - { -- int cpu, cpus, boot_cpu; -- ulong cpu_data; -- ulong cpu_pda; -- -- if (symbol_exists("cpu_data")) { -- cpu_data = symbol_value("cpu_data"); -- cpus = kt->cpus; -- boot_cpu = FALSE; -- } else if (symbol_exists("boot_cpu_data")) { -- cpu_data = symbol_value("boot_cpu_data"); -- boot_cpu = TRUE; -- cpus = 1; -+ int c, framesize, instr, arg; -+ struct syment *sp; -+ long max_instructions; -+ ulong offset; -+ char buf[BUFSIZE]; -+ char buf2[BUFSIZE]; -+ char *arglist[MAXARGS]; -+ ulong locking_func, textaddr_save, current; -+ char *p1, *p2; -+ int reterror; -+ -+ if (!(bt->flags & BT_FRAMESIZE_DEBUG)) { -+ if ((bt->flags & BT_FRAMESIZE_IGNORE_MASK) || -+ (kt->flags & USE_OLD_BT)) -+ return 0; -+ } -+ -+ if (!(sp = value_search(textaddr, &offset))) { -+ if (!(bt->flags & BT_FRAMESIZE_DEBUG)) -+ bt->flags |= BT_FRAMESIZE_DISABLE; -+ return 0; -+ } -+ -+ if (!(bt->flags & BT_FRAMESIZE_DEBUG) && -+ x86_64_framesize_cache_func(FRAMESIZE_QUERY, textaddr, &framesize)) { -+ if (framesize == -1) -+ bt->flags |= BT_FRAMESIZE_DISABLE; -+ return framesize; - } -- cpu_pda = symbol_value("cpu_pda"); -- -- for (cpu = 0; cpu < cpus; cpu++) { -- if (boot_cpu) -- fprintf(fp, "BOOT CPU:\n"); -- else -- fprintf(fp, "%sCPU %d:\n", cpu ? "\n" : "", cpu); - -- dump_struct("cpuinfo_x86", cpu_data, 0); -- fprintf(fp, "\n"); -- dump_struct("x8664_pda", cpu_pda, 0); -- -- cpu_data += SIZE(cpuinfo_x86); -- cpu_pda += SIZE(x8664_pda); -- } --} -+ /* -+ * Bait and switch an incoming .text.lock address -+ * with the containing function's address. -+ */ -+ if (STRNEQ(sp->name, ".text.lock.") && -+ (locking_func = text_lock_function(sp->name, bt, textaddr))) { -+ if (!(sp = value_search(locking_func, &offset))) { -+ bt->flags |= BT_FRAMESIZE_DISABLE; -+ return 0; -+ } -+ textaddr_save = textaddr; -+ textaddr = locking_func; -+ } else -+ textaddr_save = 0; - --/* -- * "mach -m" -- */ --static char *e820type[] = { -- "(invalid type)", -- "E820_RAM", -- "E820_RESERVED", -- "E820_ACPI", -- "E820_NVS", --}; -+ framesize = 0; -+ max_instructions = textaddr - sp->value; -+ instr = arg = -1; - --static void --x86_64_display_memmap(void) --{ -- ulong e820; -- int nr_map, i; -- char *buf, *e820entry_ptr; -- ulonglong addr, size; -- uint type; -+ open_tmpfile2(); - -- e820 = symbol_value("e820"); -- if (CRASHDEBUG(1)) -- dump_struct("e820map", e820, RADIX(16)); -- buf = (char *)GETBUF(SIZE(e820map)); -+ sprintf(buf, "x/%ldi 0x%lx", -+ max_instructions, sp->value); - -- readmem(e820, KVADDR, &buf[0], SIZE(e820map), -- "e820map", FAULT_ON_ERROR); -+ if (!gdb_pass_through(buf, pc->tmpfile2, GNU_RETURN_ON_ERROR)) { -+ close_tmpfile2(); -+ bt->flags |= BT_FRAMESIZE_DISABLE; -+ return 0; -+ } - -- nr_map = INT(buf + OFFSET(e820map_nr_map)); -+ rewind(pc->tmpfile2); -+ while (fgets(buf, BUFSIZE, pc->tmpfile2)) { -+ strcpy(buf2, buf); - -- fprintf(fp, " PHYSICAL ADDRESS RANGE TYPE\n"); -+ if (CRASHDEBUG(3)) -+ fprintf(pc->saved_fp, buf2); - -- for (i = 0; i < nr_map; i++) { -- e820entry_ptr = buf + sizeof(int) + (SIZE(e820entry) * i); -- addr = ULONGLONG(e820entry_ptr + OFFSET(e820entry_addr)); -- size = ULONGLONG(e820entry_ptr + OFFSET(e820entry_size)); -- type = UINT(e820entry_ptr + OFFSET(e820entry_type)); -- fprintf(fp, "%016llx - %016llx %s\n", addr, addr+size, -- e820type[type]); -- } --} -+ c = parse_line(buf, arglist); - -+ if (instr == -1) { -+ /* -+ * Check whether are -+ * in the output string. -+ */ -+ if (LASTCHAR(arglist[0]) == ':') { -+ instr = 1; -+ arg = 2; -+ } else { -+ instr = 2; -+ arg = 3; -+ } -+ } - --static const char *hook_files[] = { -- "arch/x86_64/kernel/entry.S", -- "arch/x86_64/kernel/head.S", -- "arch/x86_64/kernel/semaphore.c" --}; -+ if (c < (arg+1)) -+ continue; - --#define ENTRY_S ((char **)&hook_files[0]) --#define HEAD_S ((char **)&hook_files[1]) --#define SEMAPHORE_C ((char **)&hook_files[2]) -+ reterror = 0; -+ current = htol(strip_ending_char(arglist[0], ':'), -+ RETURN_ON_ERROR, &reterror); -+ if (reterror) -+ continue; -+ if (current >= textaddr) -+ break; - --static struct line_number_hook x86_64_line_number_hooks[] = { -- {"ret_from_fork", ENTRY_S}, -- {"system_call", ENTRY_S}, -- {"int_ret_from_sys_call", ENTRY_S}, -- {"ptregscall_common", ENTRY_S}, -- {"stub_execve", ENTRY_S}, -- {"stub_rt_sigreturn", ENTRY_S}, -- {"common_interrupt", ENTRY_S}, -- {"ret_from_intr", ENTRY_S}, -- {"load_gs_index", ENTRY_S}, -- {"arch_kernel_thread", ENTRY_S}, -- {"execve", ENTRY_S}, -- {"page_fault", ENTRY_S}, -- {"coprocessor_error", ENTRY_S}, -- {"simd_coprocessor_error", ENTRY_S}, -- {"device_not_available", ENTRY_S}, -- {"debug", ENTRY_S}, -- {"nmi", ENTRY_S}, -- {"int3", ENTRY_S}, -- {"overflow", ENTRY_S}, -- {"bounds", ENTRY_S}, -- {"invalid_op", ENTRY_S}, -- {"coprocessor_segment_overrun", ENTRY_S}, -- {"reserved", ENTRY_S}, -- {"double_fault", ENTRY_S}, -- {"invalid_TSS", ENTRY_S}, -- {"segment_not_present", ENTRY_S}, -- {"stack_segment", ENTRY_S}, -- {"general_protection", ENTRY_S}, -- {"alignment_check", ENTRY_S}, -- {"divide_error", ENTRY_S}, -- {"spurious_interrupt_bug", ENTRY_S}, -- {"machine_check", ENTRY_S}, -- {"call_debug", ENTRY_S}, -+ if (STRNEQ(arglist[instr], "push")) { -+ framesize += 8; -+ if (CRASHDEBUG(2) || (bt->flags & BT_FRAMESIZE_DEBUG)) -+ fprintf(pc->saved_fp, "%s\t[framesize: %d]\n", -+ strip_linefeeds(buf2), framesize); -+ } else if (STRNEQ(arglist[instr], "pop")) { -+ framesize -= 8; -+ if (CRASHDEBUG(2) || (bt->flags & BT_FRAMESIZE_DEBUG)) -+ fprintf(pc->saved_fp, "%s\t[framesize: %d]\n", -+ strip_linefeeds(buf2), framesize); -+ } else if (STRNEQ(arglist[instr], "add") && -+ (p1 = strstr(arglist[arg], ",%rsp"))) { -+ *p1 = NULLCHAR; -+ p2 = arglist[arg]; -+ reterror = 0; -+ offset = htol(p2+1, RETURN_ON_ERROR, &reterror); -+ if (reterror) -+ continue; -+ framesize -= offset; -+ if (CRASHDEBUG(2) || (bt->flags & BT_FRAMESIZE_DEBUG)) -+ fprintf(pc->saved_fp, "%s\t[framesize: %d]\n", -+ strip_linefeeds(buf2), framesize); -+ } else if (STRNEQ(arglist[instr], "sub") && -+ (p1 = strstr(arglist[arg], ",%rsp"))) { -+ *p1 = NULLCHAR; -+ p2 = arglist[arg]; -+ reterror = 0; -+ offset = htol(p2+1, RETURN_ON_ERROR, &reterror); -+ if (reterror) -+ continue; -+ framesize += offset; -+ if (CRASHDEBUG(2) || (bt->flags & BT_FRAMESIZE_DEBUG)) -+ fprintf(pc->saved_fp, "%s\t[framesize: %d]\n", -+ strip_linefeeds(buf2), framesize); -+ } else if (STRNEQ(arglist[instr], "retq")) { -+ bt->flags |= BT_FRAMESIZE_DISABLE; -+ framesize = -1; -+ if (CRASHDEBUG(2) || (bt->flags & BT_FRAMESIZE_DEBUG)) -+ fprintf(pc->saved_fp, "%s\t[framesize: DISABLED]\n", -+ strip_linefeeds(buf2)); -+ break; -+ } -+ } -+ close_tmpfile2(); - -- {NULL, NULL} /* list must be NULL-terminated */ --}; -+ if (textaddr_save) -+ textaddr = textaddr_save; - --static void --x86_64_dump_line_number(ulong callpc) --{ -- error(FATAL, "x86_64_dump_line_number: TBD\n"); -+ return (x86_64_framesize_cache_func(FRAMESIZE_ENTER, textaddr, &framesize)); - } - --void --x86_64_compiler_warning_stub(void) -+static void -+x86_64_framesize_debug(struct bt_info *bt) - { -- struct line_number_hook *lhp; -- char **p; -+ int framesize; - -- lhp = &x86_64_line_number_hooks[0]; lhp++; -- p = ENTRY_S; -- x86_64_back_trace(NULL, NULL); -- get_x86_64_frame(NULL, NULL, NULL); -- x86_64_dump_line_number(0); --} -+ switch (bt->hp->esp) -+ { -+ case 1: /* "dump" */ -+ if (bt->hp->eip) { -+ framesize = 1; -+ x86_64_framesize_cache_func(FRAMESIZE_ENTER, bt->hp->eip, -+ &framesize); -+ } else -+ x86_64_framesize_cache_func(FRAMESIZE_DUMP, 0, NULL); -+ break; -+ -+ case 0: -+ if (bt->hp->eip) { -+ framesize = 0; -+ x86_64_framesize_cache_func(FRAMESIZE_ENTER, bt->hp->eip, -+ &framesize); -+ } else /* "clear" */ -+ BZERO(&x86_64_framesize_cache[0], -+ sizeof(struct framesize_cache)*framesize_cache_entries); -+ break; -+ -+ case -1: -+ if (!bt->hp->eip) -+ error(INFO, "x86_64_framesize_debug: ignoring command\n"); -+ else -+ x86_64_get_framesize(bt, bt->hp->eip); -+ break; - -+ default: -+ if (bt->hp->esp > 1) { -+ framesize = bt->hp->esp; -+ if (bt->hp->eip) -+ x86_64_framesize_cache_func(FRAMESIZE_ENTER, bt->hp->eip, -+ &framesize); -+ } else -+ error(INFO, "x86_64_framesize_debug: ignoring command\n"); -+ break; -+ } -+} - #endif /* X86_64 */ ---- crash/extensions.c.orig 2007-08-27 15:02:36.000000000 -0400 -+++ crash/extensions.c 2007-05-24 16:26:41.000000000 -0400 -@@ -18,9 +18,6 @@ - #include "defs.h" - #include - --static void load_extension(char *); --static void unload_extension(char *); -- - #define DUMP_EXTENSIONS (0) - #define LOAD_EXTENSION (1) - #define UNLOAD_EXTENSION (2) -@@ -110,6 +107,7 @@ - void - dump_extension_table(int verbose) - { -+ int i; - struct extension_table *ext; - struct command_table_entry *cp; - char buf[BUFSIZE]; -@@ -120,23 +118,37 @@ - - if (verbose) { - for (ext = extension_table; ext; ext = ext->next) { -- fprintf(fp, " filename: %s\n", ext->filename); -- fprintf(fp, " handle: %lx\n", (ulong)ext->handle); -- fprintf(fp, "command_table: %lx (", -- (ulong)ext->command_table); -- for (others = 0, cp = ext->command_table; cp->name;cp++) -- fprintf(fp, "%s%s%s", others++ ? " " : "", -- cp->name, cp->help_data ? "*" : ""); -- fprintf(fp, ")\n"); -- fprintf(fp, " flags: %lx (", ext->flags); -+ fprintf(fp, " filename: %s\n", ext->filename); -+ fprintf(fp, " handle: %lx\n", (ulong)ext->handle); -+ -+ -+ fprintf(fp, " flags: %lx (", ext->flags); - others = 0; - if (ext->flags & REGISTERED) - fprintf(fp, "%sREGISTERED", others++ ? - "|" : ""); - fprintf(fp, ")\n"); -- fprintf(fp, " next: %lx\n", (ulong)ext->next); -- fprintf(fp, " prev: %lx\n%s", -- (ulong)ext->prev, ext->next ? "\n" : ""); -+ fprintf(fp, " next: %lx\n", (ulong)ext->next); -+ fprintf(fp, " prev: %lx\n", (ulong)ext->prev); -+ -+ for (i = 0, cp = ext->command_table; cp->name; cp++, i++) { -+ fprintf(fp, "command_table[%d]: %lx\n", i, (ulong)cp); -+ fprintf(fp, " name: %s\n", cp->name); -+ fprintf(fp, " func: %lx\n", (ulong)cp->func); -+ fprintf(fp, " help_data: %lx\n", (ulong)cp->help_data); -+ fprintf(fp, " flags: %lx (", cp->flags); -+ others = 0; -+ if (cp->flags & CLEANUP) -+ fprintf(fp, "%sCLEANUP", others++ ? "|" : ""); -+ if (cp->flags & REFRESH_TASK_TABLE) -+ fprintf(fp, "%sREFRESH_TASK_TABLE", others++ ? "|" : ""); -+ if (cp->flags & HIDDEN_COMMAND) -+ fprintf(fp, "%sHIDDEN_COMMAND", others++ ? "|" : ""); -+ fprintf(fp, ")\n"); -+ } -+ -+ if (ext->next) -+ fprintf(fp, "\n"); - } - return; - } -@@ -171,7 +183,7 @@ - /* - * Load an extension library. - */ --static void -+void - load_extension(char *lib) - { - struct extension_table *ext; -@@ -208,7 +220,7 @@ - * _init() function before dlopen() returns below. - */ - pc->curext = ext; -- ext->handle = dlopen(ext->filename, RTLD_NOW); -+ ext->handle = dlopen(ext->filename, RTLD_NOW|RTLD_GLOBAL); - - if (!ext->handle) { - strcpy(buf, dlerror()); -@@ -252,7 +264,7 @@ - /* - * Unload all, or as specified, extension libraries. - */ --static void -+void - unload_extension(char *lib) - { - struct extension_table *ext; ---- crash/va_server.c.orig 2007-08-27 15:02:36.000000000 -0400 -+++ crash/va_server.c 2006-10-11 09:14:36.000000000 -0400 -@@ -1,8 +1,8 @@ - /* va_server.c - kernel crash dump file translation library - * - * Copyright (C) 1999, 2000, 2001, 2002 Mission Critical Linux, Inc. -- * Copyright (C) 2002, 2003, 2004, 2005 David Anderson -- * Copyright (C) 2002, 2003, 2004, 2005 Red Hat, Inc. All rights reserved. + * Copyright (C) 2002, 2003, 2004, 2005, 2006 David Anderson + * Copyright (C) 2002, 2003, 2004, 2005, 2006 Red Hat, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by -@@ -57,13 +57,15 @@ +@@ -35,7 +35,7 @@ + #ifndef _DUMP_H + #define _DUMP_H - extern int monitor_memory(long *, long *, long *, long *); +-#include ++//#include --int Page_Size = PAGE_SIZE; /* temporary setting until disk header is read */ -+int Page_Size; - ulong vas_debug = 0; - - extern void *malloc(size_t); - - int va_server_init(char *crash_file, u_long *start, u_long *end, u_long *stride) - { -+ Page_Size = getpagesize(); /* temporary setting until disk header is read */ -+ - if(read_map(crash_file)) { - if(va_server_init_v1(crash_file, start, end, stride)) - return -1; ---- crash/symbols.c.orig 2007-08-27 15:02:36.000000000 -0400 -+++ crash/symbols.c 2007-07-31 16:05:08.000000000 -0400 -@@ -1,8 +1,8 @@ - /* symbols.c - core analysis suite - * - * Copyright (C) 1999, 2000, 2001, 2002 Mission Critical Linux, Inc. -- * Copyright (C) 2002, 2003, 2004, 2005 David Anderson -- * Copyright (C) 2002, 2003, 2004, 2005 Red Hat, Inc. All rights reserved. -+ * Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007 David Anderson -+ * Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007 Red Hat, Inc. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by -@@ -21,6 +21,8 @@ - - static void store_symbols(bfd *, int, void *, long, unsigned int); - static void store_sysmap_symbols(void); -+static ulong relocate(ulong, char *, int); -+static int relocate_force(ulong, char *); - static void strip_module_symbol_end(char *s); - static int compare_syms(const void *, const void *); - static int compare_mods(const void *, const void *); -@@ -36,7 +38,9 @@ - static int load_module_index(struct syment *); - static void section_header_info(bfd *, asection *, void *); - static void store_section_data(struct load_module *, bfd *, asection *); --static void calculate_load_order(struct load_module *, bfd *); -+static void calculate_load_order_v1(struct load_module *, bfd *); -+static void calculate_load_order_v2(struct load_module *, bfd *, int, -+ void *, long, unsigned int); - static void check_insmod_builtin(struct load_module *, int, ulong *); - static int is_insmod_builtin(struct load_module *, struct syment *); - struct load_module; -@@ -61,12 +65,16 @@ - struct elf_common; - static void Elf32_Sym_to_common(Elf32_Sym *, struct elf_common *); - static void Elf64_Sym_to_common(Elf64_Sym *, struct elf_common *); -+static void cmd_datatype_common(ulong); -+static int display_per_cpu_info(struct syment *); - - - #define KERNEL_SECTIONS (void *)(1) - #define MODULE_SECTIONS (void *)(2) - #define VERIFY_SECTIONS (void *)(3) - -+#define EV_DWARFEXTRACT 101010101 -+ - #define PARSE_FOR_DATA (1) - #define PARSE_FOR_DECLARATION (2) - static void parse_for_member(struct datatype_member *, ulong); -@@ -96,6 +104,7 @@ - #define SHOW_OFFSET (0x10000) - #define IN_UNION (0x20000) - #define IN_STRUCT (0x40000) -+#define DATATYPE_QUERY (0x80000) - - #define INTEGER_TYPE (UINT8|INT8|UINT16|INT16|UINT32|INT32|UINT64|INT64) - -@@ -139,6 +148,12 @@ - if (!bfd_check_format_matches(st->bfd, bfd_object, &matching)) - error(FATAL, "cannot determine object file format: %s\n", - pc->namelist); -+ /* -+ * Check whether the namelist is a kerntypes file built by -+ * dwarfextract, which places a magic number in e_version. -+ */ -+ if (file_elf_version(pc->namelist) == EV_DWARFEXTRACT) -+ pc->flags |= KERNTYPES; - - if (pc->flags & SYSMAP) { - bfd_map_over_sections(st->bfd, section_header_info, -@@ -153,13 +168,16 @@ - } - store_sysmap_symbols(); - return; -- } -+ } else if (LKCD_KERNTYPES()) -+ error(FATAL, "%s: use of kerntypes requires a system map\n", -+ pc->namelist); - - /* - * Pull a bait-and-switch on st->bfd if we've got a separate -- * .gnu_debuglink file that matches the CRC. -+ * .gnu_debuglink file that matches the CRC. Not done for kerntypes. - */ -- if (!(bfd_get_file_flags(st->bfd) & HAS_SYMS)) { -+ if (!(LKCD_KERNTYPES()) && -+ !(bfd_get_file_flags(st->bfd) & HAS_SYMS)) { - if (!check_gnu_debuglink(st->bfd)) - no_debugging_data(FATAL); - } -@@ -471,6 +489,11 @@ - kt->stext_init = (ulong)bfd_get_section_vma(st->bfd, section); - kt->etext_init = kt->stext_init + - (ulong)bfd_section_size(st->bfd, section); -+ -+ if (kt->relocate) { -+ kt->stext_init -= kt->relocate; -+ kt->etext_init -= kt->relocate; -+ } - } - - /* -@@ -486,6 +509,7 @@ - bfd_byte *from, *fromend; - symbol_info syminfo; - struct syment *sp; -+ int first; - - if ((store = bfd_make_empty_symbol(abfd)) == NULL) - error(FATAL, "bfd_make_empty_symbol() failed\n"); -@@ -505,6 +529,13 @@ - st->symcnt = 0; - sp = st->symtable; - -+ if (machine_type("X86")) { -+ if (!(kt->flags & RELOC_SET)) -+ kt->flags |= RELOC_FORCE; -+ } else -+ kt->flags &= ~RELOC_SET; -+ -+ first = 0; - from = (bfd_byte *) minisyms; - fromend = from + symcount * size; - for (; from < fromend; from += size) -@@ -516,7 +547,11 @@ - bfd_get_symbol_info(abfd, sym, &syminfo); - if (machdep->verify_symbol(syminfo.name, syminfo.value, - syminfo.type)) { -- sp->value = syminfo.value; -+ if (kt->flags & (RELOC_SET|RELOC_FORCE)) -+ sp->value = relocate(syminfo.value, -+ (char *)syminfo.name, !(first++)); -+ else -+ sp->value = syminfo.value; - sp->type = syminfo.type; - namespace_ctl(NAMESPACE_INSTALL, &st->namespace, - sp, (char *)syminfo.name); -@@ -540,7 +575,7 @@ - static void - store_sysmap_symbols(void) - { -- int c; -+ int c, first; - long symcount; - char buf[BUFSIZE]; - FILE *map; -@@ -564,6 +599,10 @@ - error(FATAL, "symbol table namespace malloc: %s\n", - strerror(errno)); - -+ if (!machine_type("X86")) -+ kt->flags &= ~RELOC_SET; -+ -+ first = 0; - st->syment_size = symcount * sizeof(struct syment); - st->symcnt = 0; - sp = st->symtable; -@@ -580,7 +619,11 @@ - - if (machdep->verify_symbol(syment.name, syment.value, - syment.type)) { -- sp->value = syment.value; -+ if (kt->flags & RELOC_SET) -+ sp->value = relocate(syment.value, -+ syment.name, !(first++)); -+ else -+ sp->value = syment.value; - sp->type = syment.type; - namespace_ctl(NAMESPACE_INSTALL, &st->namespace, - sp, syment.name); -@@ -603,6 +646,96 @@ - } - - /* -+ * Handle x86 kernels configured such that the vmlinux symbols -+ * are not as loaded into the kernel (not unity-mapped). -+ */ -+static ulong -+relocate(ulong symval, char *symname, int first_symbol) -+{ -+ switch (kt->flags & (RELOC_SET|RELOC_FORCE)) -+ { -+ case RELOC_SET: -+ break; -+ -+ case RELOC_FORCE: -+ if (first_symbol && !relocate_force(symval, symname)) -+ kt->flags &= ~RELOC_FORCE; -+ break; -+ } -+ -+ return (symval - kt->relocate); -+} -+ -+/* -+ * If no --reloc argument was passed, try to figure it out -+ * by comparing the first vmlinux kernel symbol with the -+ * first /proc/kallsyms symbol. (should be "_text") -+ * -+ * Live system only (at least for now). -+ */ -+static int -+relocate_force(ulong symval, char *symname) -+{ -+ FILE *kp; -+ char buf[BUFSIZE]; -+ char *kallsyms[MAXARGS]; -+ ulong first; -+ -+ if (!ACTIVE() || !file_exists("/proc/kallsyms", NULL)) { -+ if (CRASHDEBUG(1)) -+ fprintf(fp, -+ "cannot determine relocation value: %s\n", -+ !ACTIVE() ? "not a live system" : -+ "/proc/kallsyms does not exist"); -+ return FALSE; -+ } -+ -+ if ((kp = fopen("/proc/kallsyms", "r")) == NULL) { -+ if (CRASHDEBUG(1)) -+ fprintf(fp, -+ "cannot open /proc/kallsyms to determine relocation\n"); -+ return FALSE; -+ } -+ -+ if (!fgets(buf, BUFSIZE, kp) || -+ (parse_line(buf, kallsyms) != 3) || -+ !hexadecimal(kallsyms[0], 0)) { -+ fclose(kp); -+ if (CRASHDEBUG(1)) -+ fprintf(fp, -+ "malformed /proc/kallsyms: cannot determine relocation value\n"); -+ return FALSE; -+ } -+ fclose(kp); -+ -+ first = htol(kallsyms[0], RETURN_ON_ERROR, NULL); -+ -+ if (CRASHDEBUG(1)) -+ fprintf(fp, -+ "RELOCATE: %s @ %lx %s\n" -+ " %s @ %lx /proc/kallsyms\n", -+ symname, symval, pc->namelist, -+ kallsyms[2], first); -+ -+ /* -+ * If the symbols match and have different values, -+ * force the relocation. -+ */ -+ if (STREQ(symname, kallsyms[2])) { -+ if (symval > first) { -+ kt->relocate = symval - first; -+ return TRUE; -+ } -+ } -+ -+ if (CRASHDEBUG(1)) -+ fprintf(fp, -+ "cannot determine relocation value from first symbol\n"); -+ -+ return FALSE; -+} -+ -+/* - * Install all static kernel symbol values into the symval_hash. - */ - static void -@@ -1159,7 +1292,7 @@ - mod_name); - strncpy(lm->mod_name, mod_name, MAX_MOD_NAME-1); - } -- if (CRASHDEBUG(1)) -+ if (CRASHDEBUG(3)) - fprintf(fp, - "%lx (%lx): %s syms: %d gplsyms: %d ksyms: %ld\n", - mod, lm->mod_base, lm->mod_name, nsyms, -@@ -2121,22 +2254,13 @@ - fprintf(fp, "%sFORCE_DEBUGINFO", others++ ? "|" : ""); - if (st->flags & CRC_MATCHES) - fprintf(fp, "%sCRC_MATCHES", others++ ? "|" : ""); -+ if (st->flags & ADD_SYMBOL_FILE) -+ fprintf(fp, "%sADD_SYMBOL_FILE", others++ ? "|" : ""); -+ if (st->flags & USE_OLD_ADD_SYM) -+ fprintf(fp, "%sUSE_OLD_ADD_SYM", others++ ? "|" : ""); - fprintf(fp, ")\n"); - - fprintf(fp, " bfd: %lx\n", (ulong)st->bfd); -- -- sec = (asection **)st->sections; -- fprintf(fp, " sections: %s\n", sec ? "" : "(not in use)"); -- for (i = 0; sec && (i < st->bfd->section_count); i++, sec++) { -- asection *section; -- -- section = *sec; -- fprintf(fp, "%25s vma: %.*lx size: %ld\n", -- section->name, VADDR_PRLEN, -- (ulong)bfd_get_section_vma(st->bfd, section), -- (ulong)bfd_section_size(st->bfd, section)); -- } -- - fprintf(fp, " symtable: %lx\n", (ulong)st->symtable); - fprintf(fp, " symend: %lx\n", (ulong)st->symend); - fprintf(fp, " symcnt: %ld\n", st->symcnt); -@@ -2320,6 +2444,24 @@ - } - } - } -+ -+ fprintf(fp, "\n"); -+ fprintf(fp, "dwarf_eh_frame_file_offset: %llx\n", -+ (unsigned long long)st->dwarf_eh_frame_file_offset); -+ fprintf(fp, " dwarf_eh_frame_size: %ld\n", st->dwarf_eh_frame_size); -+ fprintf(fp, "\n"); -+ -+ sec = (asection **)st->sections; -+ fprintf(fp, " sections: %s\n", sec ? "" : "(not in use)"); -+ for (i = 0; sec && (i < st->bfd->section_count); i++, sec++) { -+ asection *section; -+ -+ section = *sec; -+ fprintf(fp, "%25s vma: %.*lx size: %ld\n", -+ section->name, VADDR_PRLEN, -+ (ulong)bfd_get_section_vma(st->bfd, section), -+ (ulong)bfd_section_size(st->bfd, section)); -+ } - } - - -@@ -2427,7 +2569,7 @@ - goto not_system_map; - if (parse_line(buf, mapitems) != 3) - goto not_system_map; -- if ((strlen(mapitems[0]) != MAX_HEXADDR_STRLEN) || -+ if ((strlen(mapitems[0]) > MAX_HEXADDR_STRLEN) || - !hexadecimal(mapitems[0], 0) || (strlen(mapitems[1]) > 1)) - goto not_system_map; - } -@@ -3463,6 +3605,22 @@ - } - - /* -+ * Same as above, but allow for failure. -+ */ -+int -+try_get_symbol_data(char *symbol, long size, void *local) -+{ -+ struct syment *sp; -+ -+ if ((sp = symbol_search(symbol)) && -+ readmem(sp->value, KVADDR, local, -+ size, symbol, RETURN_ON_ERROR|QUIET)) -+ return TRUE; -+ -+ return FALSE; -+} -+ -+/* - * Return the value of a given symbol. - */ - ulong -@@ -3477,6 +3635,34 @@ - } - - /* -+ * Return the value of a symbol from a specific module. -+ */ -+ulong -+symbol_value_module(char *symbol, char *module) -+{ -+ int i; -+ struct syment *sp, *sp_end; -+ struct load_module *lm; -+ -+ for (i = 0; i < st->mods_installed; i++) { -+ lm = &st->load_modules[i]; -+ -+ if (!STREQ(module, lm->mod_name)) -+ continue; -+ -+ sp = lm->mod_symtable; -+ sp_end = lm->mod_symend; -+ -+ for ( ; sp < sp_end; sp++) { -+ if (STREQ(symbol, sp->name)) -+ return(sp->value); -+ } -+ } -+ -+ return 0; -+} -+ -+/* - * Return the symbol name of a given value, with no allowance for offsets. - * Returns NULL on failure to allow for testing of a value. - */ -@@ -3748,6 +3934,7 @@ - dm->size = size; - dm->member_size = member_size; - dm->member_typecode = member_typecode; -+ dm->member_offset = offset; - if (req->is_typedef) { - dm->flags |= TYPEDEF; - } -@@ -3928,25 +4115,59 @@ - void - cmd_struct(void) - { -- int c; -+ cmd_datatype_common(STRUCT_REQUEST); -+} -+/* -+ * This command displays either a union definition, or a formatted display -+ * of the contents of a union at a specified address. If no address is -+ * specified, the union size and the file in which the union is defined -+ * are also displayed. A union member may be appended to the union -+ * name (in a "union.member" format) in order to limit the scope of the data -+ * displayed to that particular member. Structure data is shown in hexadecimal -+ * format. The raw data in a union may be dumped with the -r flag. -+ */ -+void -+cmd_union(void) -+{ -+ cmd_datatype_common(UNION_REQUEST); -+} -+ -+/* -+ * After determining what type of data type follows the *, this routine -+ * has the identical functionality as cmd_struct() or cmd_union(). -+ */ -+void -+cmd_pointer(void) -+{ -+ cmd_datatype_common(0); -+} -+ -+static void -+cmd_datatype_common(ulong flags) -+{ -+ int i, c; - ulong addr, aflag; - struct syment *sp; - int rawdata; - long len; -- ulong flags; - ulong list_head_offset; - int count; -- struct datatype_member struct_member, *sm; -+ int argc_members; -+ int optind_save; -+ struct datatype_member datatype_member, *dm; -+ char *separator; -+ char *structname, *members; -+ char *memberlist[MAXARGS]; - -- sm = &struct_member; -- count = 1; -+ dm = &datatype_member; -+ count = 0xdeadbeef; - rawdata = 0; - aflag = 0; -- list_head_offset = 0; -- flags = STRUCT_REQUEST; -+ list_head_offset = 0; -+ argc_members = 0; - -- while ((c = getopt(argcnt, args, "c:rvol:")) != EOF) { -- switch(c) -+ while ((c = getopt(argcnt, args, "fuc:rvol:")) != EOF) { -+ switch (c) - { - case 'c': - count = atoi(optarg); -@@ -3969,8 +4190,22 @@ - list_head_offset = stol(optarg, - FAULT_ON_ERROR, NULL); - else if (arg_to_datatype(optarg, -- sm, RETURN_ON_ERROR) > 1) -- list_head_offset = sm->member_offset; -+ dm, RETURN_ON_ERROR) > 1) -+ list_head_offset = dm->member_offset; -+ else -+ error(FATAL, "invalid -l option: %s\n", -+ optarg); -+ break; -+ -+ case 'f': -+ if (!pc->dumpfile) -+ error(FATAL, -+ "-f option requires a dumpfile\n"); -+ pc->curcmd_flags |= MEMTYPE_FILEADDR; -+ break; -+ -+ case 'u': -+ pc->curcmd_flags |= MEMTYPE_UVADDR; - break; - - default: -@@ -3982,35 +4217,42 @@ - if (argerrs || !args[optind]) - cmd_usage(pc->curcmd, SYNOPSIS); - -- if ((arg_to_datatype(args[optind++], sm, FAULT_ON_ERROR) > 1) && -- rawdata) -- error(FATAL, "member-specific output not allowed with -r\n"); -- -- if ((len = sm->size) < 0) { -- error(INFO, "structure not found: %s\n", sm->name); -- cmd_usage(pc->curcmd, SYNOPSIS); -- } -- -- if (!args[optind]) { -- do_datatype_declaration(sm, flags | (sm->flags & TYPEDEF)); -- return; -- } -+ if ((count_chars(args[optind], ',')+1) > MAXARGS) -+ error(FATAL, "too many members in comma-separated list!\n"); -+ -+ if ((count_chars(args[optind], '.') > 1) || -+ (LASTCHAR(args[optind]) == ',') || -+ (LASTCHAR(args[optind]) == '.')) -+ error(FATAL, "invalid format: %s\n", args[optind]); -+ -+ optind_save = optind; -+ -+ /* -+ * Take care of address and count (array). -+ */ -+ while (args[++optind]) { -+ if (aflag && (count != 0xdeadbeef)) -+ error(FATAL, "too many arguments!\n"); - -- while (args[optind]) { - if (clean_arg() && IS_A_NUMBER(args[optind])) { - if (aflag) - count = stol(args[optind], - FAULT_ON_ERROR, NULL); - else { -- if (!IS_KVADDR(addr = htol(args[optind], -+ if (pc->curcmd_flags & MEMTYPE_FILEADDR) -+ pc->curcmd_private = stoll(args[optind], -+ FAULT_ON_ERROR, NULL); -+ else if (pc->curcmd_flags & MEMTYPE_UVADDR) { -+ addr = htol(args[optind], FAULT_ON_ERROR, -+ NULL); -+ } else if (!IS_KVADDR(addr = htol(args[optind], - FAULT_ON_ERROR, NULL))) - error(FATAL, - "invalid kernel virtual address: %s\n", - args[optind]); - aflag++; - } -- } -- else if ((sp = symbol_search(args[optind]))) { -+ } else if ((sp = symbol_search(args[optind]))) { - addr = sp->value; - aflag++; - } else { -@@ -4018,298 +4260,134 @@ - fprintf(fp, "possible aternatives:\n"); - if (!symbol_query(args[optind], " ", NULL)) - fprintf(fp, " (none found)\n"); -- return; -- } -- optind++; -- } -- -- if (!aflag) -- error(FATAL, "no kernel virtual address argument entered\n"); -- -- if (list_head_offset) -- addr -= list_head_offset; -- -- if (count < 0) { -- addr -= len * abs(count); -- addr += len; -- } -- -- for (c = 0; c < abs(count); c++, addr += len) { -- if (rawdata) -- raw_data_dump(addr, len, flags & STRUCT_VERBOSE); -- else { -- if (sm->member) -- open_tmpfile(); -- -- print_struct(sm->name, addr); -- -- if (sm->member) { -- parse_for_member(sm, PARSE_FOR_DATA); -- close_tmpfile(); -- } -- } -- } --} -- --/* -- * After determining what type of data type follows the *, this routine -- * has the identical functionality as cmd_struct() or cmd_union(). -- */ --void --cmd_pointer(void) --{ -- int c; -- ulong addr, aflag; -- struct syment *sp; -- int rawdata; -- long len; -- ulong flags; -- int count; -- struct datatype_member datatype_member, *dm; -- -- dm = &datatype_member; -- rawdata = 0; -- flags = 0; -- aflag = 0; -- count = 1; -- -- while ((c = getopt(argcnt, args, "c:rvo")) != EOF) { -- switch(c) -- { -- case 'c': -- count = atoi(optarg); -- break; -- -- case 'r': -- rawdata = 1; -- break; -- -- case 'v': -- flags |= STRUCT_VERBOSE; -- break; -- -- case 'o': -- flags |= SHOW_OFFSET; -- break; -- -- default: -- argerrs++; -- break; -+ goto freebuf; - } - } - -- if (argerrs || !args[optind]) -- cmd_usage(pc->curcmd, SYNOPSIS); -+ optind = optind_save; - -- if ((arg_to_datatype(args[optind++], dm, FAULT_ON_ERROR) > 1) && -- rawdata) -- error(FATAL, "member-specific output not allowed with -r\n"); -+ if (count == 0xdeadbeef) -+ count = 1; -+ else if (!aflag) -+ error(FATAL, "no kernel virtual address argument entered\n"); - -- if ((len = dm->size) < 0) { -- error(INFO, "structure or union not found: %s\n", dm->name); -- cmd_usage(pc->curcmd, SYNOPSIS); -+ if ((flags & SHOW_OFFSET) && aflag) { -+ error(INFO, "-o option not valid with an address argument\n"); -+ flags &= ~SHOW_OFFSET; - } - -- flags |= dm->type; -- -- if (!args[optind]) { -- do_datatype_declaration(dm, flags | (dm->flags & TYPEDEF)); -- return; -- } -+ if (list_head_offset) -+ addr -= list_head_offset; - -- while (args[optind]) { -- if (clean_arg() && IS_A_NUMBER(args[optind])) { -- if (aflag) -- count = stol(args[optind], -- FAULT_ON_ERROR, NULL); -- else { -- if (!IS_KVADDR(addr = htol(args[optind], -- FAULT_ON_ERROR, NULL))) -- error(FATAL, -- "invalid kernel virtual address: %s\n", -- args[optind]); -- aflag++; -- } -- } -- else if ((sp = symbol_search(args[optind]))) { -- addr = sp->value; -- aflag++; -- } else { -- fprintf(fp, "symbol not found: %s\n", args[optind]); -- fprintf(fp, "possible aternatives:\n"); -- if (!symbol_query(args[optind], " ", NULL)) -- fprintf(fp, " (none found)\n"); -- return; -- } -- optind++; -+ /* -+ * Handle struct.member[,member] argument format. -+ */ -+ if (strstr(args[optind], ".")) { -+ structname = GETBUF(strlen(args[optind])+1); -+ strcpy(structname, args[optind]); -+ separator = strstr(structname, "."); -+ -+ members = GETBUF(strlen(args[optind])+1); -+ strcpy(members, separator+1); -+ replace_string(members, ",", ' '); -+ argc_members = parse_line(members, memberlist); -+ } else -+ structname = args[optind]; -+ -+ if ((arg_to_datatype(structname, dm, DATATYPE_QUERY|RETURN_ON_ERROR) < 1)) -+ error(FATAL, "invalid data structure reference: %s\n", structname); -+ -+ if ((argc_members > 1) && !aflag) { -+ error(INFO, flags & SHOW_OFFSET ? -+ "-o option not valid with multiple member format\n" : -+ "multiple member format not supported in this syntax\n"); -+ *separator = NULLCHAR; -+ argc_members = 0; -+ flags |= SHOW_OFFSET; - } - -- if (!(flags & (UNION_REQUEST|STRUCT_REQUEST))) -- error(FATAL, "invalid argument!"); -- -- if (!aflag) -- error(FATAL, "no kernel virtual address argument entered\n"); -+ len = dm->size; - - if (count < 0) { - addr -= len * abs(count); - addr += len; - } - -- for (c = 0; c < abs(count); c++, addr += len) { -- if (rawdata) -- raw_data_dump(addr, len, flags & STRUCT_VERBOSE); -- else { -- if (dm->member) -- open_tmpfile(); -- -- if (flags & UNION_REQUEST) -- print_union(dm->name, addr); -- else if (flags & STRUCT_REQUEST) -- print_struct(dm->name, addr); -- -- if (dm->member) { -- parse_for_member(dm, PARSE_FOR_DATA); -- close_tmpfile(); -- } -- } -- } --} -- --/* -- * This command displays either a union definition, or a formatted display -- * of the contents of a union at a specified address. If no address is -- * specified, the union size and the file in which the union is defined -- * are also displayed. A union member may be appended to the union -- * name (in a "union.member" format) in order to limit the scope of the data -- * displayed to that particular member. Structure data is shown in hexadecimal -- * format. The raw data in a union may be dumped with the -r flag. -- */ --void --cmd_union(void) --{ -- int c; -- ulong addr, aflag; -- struct syment *sp; -- int rawdata; -- long len; -- ulong flags; -- int count; -- struct datatype_member union_member, *um; -- ulong list_head_offset; -- -- um = &union_member; -- count = 1; -- rawdata = 0; -- aflag = 0; -- list_head_offset = 0; -- flags = UNION_REQUEST; -- -- while ((c = getopt(argcnt, args, "c:rvol:")) != EOF) { -- switch(c) -- { -- case 'c': -- count = atoi(optarg); -- break; -- -- case 'r': -- rawdata = 1; -- break; -- -- case 'v': -- flags |= STRUCT_VERBOSE; -- break; -+ if (pc->curcmd_flags & MEMTYPE_FILEADDR) -+ addr = 0; /* unused, but parsed by gdb */ - -- case 'o': -- flags |= SHOW_OFFSET; -- break; -- -- case 'l': -- if (IS_A_NUMBER(optarg)) -- list_head_offset = stol(optarg, -- FAULT_ON_ERROR, NULL); -- else if (arg_to_datatype(optarg, -- um, RETURN_ON_ERROR) > 1) -- list_head_offset = um->member_offset; -- break; -+ for (c = 0; c < abs(count); c++, addr += len, pc->curcmd_private += len) { -+ if (c) -+ fprintf(fp,"\n"); - -- default: -- argerrs++; -- break; -- } -- } -+ i = 0; -+ do { -+ if (argc_members) { -+ *separator = '.'; -+ strcpy(separator+1, memberlist[i]); -+ } - -- if (argerrs || !args[optind]) -- cmd_usage(pc->curcmd, SYNOPSIS); -+ switch (arg_to_datatype(structname, dm, RETURN_ON_ERROR)) -+ { -+ case 0: error(FATAL, "invalid data structure reference: %s\n", -+ structname); -+ break; -+ case 1: break; -+ case 2: if (rawdata) -+ error(FATAL, -+ "member-specific output not allowed with -r\n"); -+ break; -+ } - -- if ((arg_to_datatype(args[optind++], um, FAULT_ON_ERROR) > 1) && -- rawdata) -- error(FATAL, "member-specific output not allowed with -r\n"); -+ if (!(dm->flags & TYPEDEF)) { -+ if (flags &(STRUCT_REQUEST|UNION_REQUEST) ) { -+ if ((flags & (STRUCT_REQUEST|UNION_REQUEST)) != dm->type) -+ goto freebuf; -+ } else -+ flags |= dm->type; -+ } - -- if ((len = um->size) < 0) { -- error(INFO, "union not found: %s\n", um->name); -- cmd_usage(pc->curcmd, SYNOPSIS); -- } -+ /* -+ * No address was passed -- dump the structure/member declaration. -+ */ -+ if (!aflag) { -+ do_datatype_declaration(dm, flags | (dm->flags & TYPEDEF)); -+ goto freebuf; -+ } -+ -+ if (!(flags & (UNION_REQUEST|STRUCT_REQUEST))) -+ error(FATAL, "invalid argument"); -+ -+ /* -+ * Display data. -+ */ -+ if (rawdata) -+ raw_data_dump(addr, len, flags & STRUCT_VERBOSE); -+ else { -+ if (dm->member) -+ open_tmpfile(); - -- if (!args[optind]) { -- do_datatype_declaration(um, flags | (um->flags & TYPEDEF)); -- return; -- } -- -- while (args[optind]) { -- if (clean_arg() && IS_A_NUMBER(args[optind])) { -- if (aflag) -- count = stol(args[optind], -- FAULT_ON_ERROR, NULL); -- else { -- if (!IS_KVADDR(addr = htol(args[optind], -- FAULT_ON_ERROR, NULL))) -- error(FATAL, -- "invalid kernel virtual address: %s\n", -- args[optind]); -- aflag++; -- } -- } -- else if ((sp = symbol_search(args[optind]))) { -- addr = sp->value; -- aflag++; -- } else { -- fprintf(fp, "symbol not found: %s\n", args[optind]); -- fprintf(fp, "possible aternatives:\n"); -- if (!symbol_query(args[optind], " ", NULL)) -- fprintf(fp, " (none found)\n"); -- return; -- } -- optind++; -- } -- -- if (!aflag) -- error(FATAL, "no kernel virtual address argument entered\n"); -- -- if (list_head_offset) -- addr -= list_head_offset; -- -- if (count < 0) { -- addr -= len * abs(count); -- addr += len; -- } -+ if (flags & UNION_REQUEST) -+ print_union(dm->name, addr); -+ else if (flags & STRUCT_REQUEST) -+ print_struct(dm->name, addr); -+ -+ if (dm->member) { -+ parse_for_member(dm, PARSE_FOR_DATA); -+ close_tmpfile(); -+ } -+ } -+ } while (++i < argc_members); -+ } - -- for (c = 0; c < abs(count); c++, addr += len) { -- if (rawdata) -- raw_data_dump(addr, len, flags & STRUCT_VERBOSE); -- else { -- if (um->member) -- open_tmpfile(); -- -- print_union(um->name, addr); -- -- if (um->member) { -- parse_for_member(um, PARSE_FOR_DATA); -- close_tmpfile(); -- } -- } -+freebuf: -+ if (argc_members) { -+ FREEBUF(structname); -+ FREEBUF(members); - } - } - -+ - /* - * Generic function for dumping data structure declarations, with a small - * fixup for typedefs, sizes and member offsets. -@@ -4405,7 +4483,10 @@ - - if (!(p1 = strstr(s, "."))) - both = FALSE; -- else { -+ else if (flags & DATATYPE_QUERY) { -+ *p1 = NULLCHAR; -+ both = FALSE; -+ } else { - if ((p1 == s) || !strlen(p1+1)) - goto datatype_member_fatal; - *p1 = NULLCHAR; -@@ -4634,6 +4715,27 @@ - } - - /* -+ * Given the name of an enum, return its value. -+ */ -+int -+enumerator_value(char *e, long *value) -+{ -+ struct datatype_member datatype_member, *dm; -+ -+ dm = &datatype_member; -+ -+ if (arg_to_datatype(e, dm, RETURN_ON_ERROR)) { -+ if ((dm->size >= 0) && -+ (dm->type == ENUM) && dm->tagname) { -+ *value = dm->value; -+ return TRUE; -+ } -+ } -+ -+ return FALSE; -+} -+ -+/* - * Verify that a datatype exists, but return on error. - */ - int -@@ -4705,6 +4807,8 @@ - cmd_usage(pc->curcmd, SYNOPSIS); - - if ((sp = symbol_search(args[optind])) && !args[optind+1]) { -+ if (STRNEQ(sp->name, "per_cpu__") && display_per_cpu_info(sp)) -+ return; - sprintf(buf2, "%s = ", args[optind]); - leader = strlen(buf2); - if (module_symbol(sp->value, NULL, NULL, NULL, output_radix)) -@@ -4758,6 +4862,39 @@ - } - - /* -+ * Display the datatype of the per_cpu__xxx symbol and -+ * the addresses of each its per-cpu instances. -+ */ -+static int -+display_per_cpu_info(struct syment *sp) -+{ -+ int c; -+ ulong addr; -+ char buf[BUFSIZE]; -+ -+ if (((kt->flags & (SMP|PER_CPU_OFF)) != (SMP|PER_CPU_OFF)) || -+ (sp->value < symbol_value("__per_cpu_start")) || -+ (sp->value >= symbol_value("__per_cpu_end")) || -+ !((sp->type == 'd') || (sp->type == 'D'))) -+ return FALSE; -+ -+ fprintf(fp, "PER-CPU DATA TYPE:\n "); -+ sprintf(buf, "whatis %s", sp->name); -+ if (!gdb_pass_through(buf, pc->nullfp, GNU_RETURN_ON_ERROR)) -+ fprintf(fp, "[undetermined type] %s;\n", sp->name); -+ else -+ whatis_variable(sp); -+ -+ fprintf(fp, "PER-CPU ADDRESSES:\n"); -+ for (c = 0; c < kt->cpus; c++) { -+ addr = sp->value + kt->__per_cpu_offset[c]; -+ fprintf(fp, " [%d]: %lx\n", c, addr); -+ } -+ -+ return TRUE; -+} -+ -+/* - * As a latch ditch effort before a command is thrown away by exec_command(), - * args[0] is checked to see whether it's the name of a variable, structure, - * union, or typedef. If so, args[0] is changed to the appropriate command, -@@ -4793,9 +4930,9 @@ - command = "whatis"; - else if (!datatype_exists(args[0])) - return FALSE; -- else if (!arg_to_datatype(buf, dm, RETURN_ON_ERROR)) { -+ else if (!arg_to_datatype(buf, dm, RETURN_ON_ERROR|DATATYPE_QUERY)) - return FALSE; -- } else { -+ else { - if (is_gdb_command(FALSE, RETURN_ON_ERROR)) { - pc->curcmd = pc->program_name; - error(FATAL, -@@ -5056,6 +5193,8 @@ - fprintf(ofp, "%sSTRUCT_VERBOSE", others++ ? "|" : ""); - if (flags & SHOW_OFFSET) - fprintf(ofp, "%sSHOW_OFFSET", others++ ? "|" : ""); -+ if (flags & DATATYPE_QUERY) -+ fprintf(ofp, "%sDATATYPE_QUERY", others++ ? "|" : ""); - fprintf(ofp, ")\n"); - } - -@@ -5079,7 +5218,8 @@ - - s = dm->member; - indent = 0; -- on = array = FALSE; -+ array = FALSE; -+ on = 0; - rewind(pc->tmpfile); - - switch (flag) -@@ -5090,7 +5230,7 @@ - next_item: - while (fgets(buf, BUFSIZE, pc->tmpfile)) { - if (STRNEQ(buf, lookfor1) || STRNEQ(buf, lookfor2)) { -- on = TRUE; -+ on++; - if (strstr(buf, "= {")) - indent = count_leading_spaces(buf); - if (strstr(buf, "[")) -@@ -5098,16 +5238,22 @@ - } - - if (on) { -+ if ((indent && (on > 1) && (count_leading_spaces(buf) == indent) && -+ !strstr(buf, "}")) || (buf[0] == '}')) { -+ break; -+ } - fprintf(pc->saved_fp, buf); - if (!indent) - break; - if (strstr(buf, "}") && - (count_leading_spaces(buf) == indent)) - break; -+ on++; - } - } - if (array) { - on = array = FALSE; -+ on = 0; - goto next_item; - } - break; -@@ -5174,7 +5320,7 @@ - { - int i, c, len; - long offset; -- char *target; -+ char *t1, *target; - char *arglist[MAXARGS]; - char buf1[BUFSIZE]; - char fmt[BUFSIZE]; -@@ -5186,6 +5332,9 @@ - return FALSE; - } - -+ if (STRNEQ(inbuf, " ")) -+ goto do_empty_offset; -+ - if (STRNEQ(inbuf, " union {")) - dm->flags |= IN_UNION; - if (STRNEQ(inbuf, " struct {")) -@@ -5215,9 +5364,20 @@ - } - } - } else if (c) { -- target = arglist[c-1]; -- if (!strstr(target, ";")) -- target = NULL; -+ for (i = 0; i < c; i++) { -+ if (STRNEQ(arglist[i], "(*")) { -+ target = arglist[i]+2; -+ if (!(t1 = strstr(target, ")"))) -+ continue; -+ *t1 = NULLCHAR; -+ break; -+ } -+ } -+ if (i == c) { -+ target = arglist[c-1]; -+ if (!strstr(target, ";")) -+ target = NULL; -+ } - } - - if (!target) -@@ -5307,7 +5467,8 @@ - if ((retval = builtin_array_length(s, 0, two_dim))) - return retval; - -- if (symbol_search(s)) { -+ /* symbol_search cannot be done with just kernel type information */ -+ if (!(LKCD_KERNTYPES()) && symbol_search(s)) { - if (!two_dim) { - req = &gnu_request; - if ((get_symbol_type(copy, NULL, req) == -@@ -5417,6 +5578,23 @@ - } - - /* -+ * Get and store the size of a "known" array. -+ * A wrapper for get_array_length(), for cases in which -+ * the name of the result to be stored is different from the -+ * structure.member to be evaluated. -+ */ -+int -+get_array_length_alt(char *name, char *s, int *two_dim, long entry_size) -+{ -+ int retval; -+ -+ retval = get_array_length(s, two_dim, entry_size); -+ if (retval) -+ retval = builtin_array_length(name, retval, two_dim); -+ return retval; -+} -+ -+/* - * Designed for use by non-debug kernels, but used by all. - */ - int -@@ -5433,6 +5611,8 @@ - lenptr = &array_table.kmem_cache_s_c_name; - else if (STREQ(s, "kmem_cache_s.array")) - lenptr = &array_table.kmem_cache_s_array; -+ else if (STREQ(s, "kmem_cache.array")) -+ lenptr = &array_table.kmem_cache_s_array; - else if (STREQ(s, "kmem_cache_s.cpudata")) - lenptr = &array_table.kmem_cache_s_cpudata; - else if (STREQ(s, "log_buf")) -@@ -5469,6 +5649,8 @@ - lenptr = &array_table.prio_array_queue; - else if (STREQ(s, "height_to_maxindex")) - lenptr = &array_table.height_to_maxindex; -+ else if (STREQ(s, "pid_hash")) -+ lenptr = &array_table.pid_hash; - else if (STREQ(s, "free_area")) { - lenptr = &array_table.free_area; - if (two_dim) -@@ -5608,6 +5790,10 @@ - OFFSET(task_struct_timestamp)); - fprintf(fp, " task_struct_thread_info: %ld\n", - OFFSET(task_struct_thread_info)); -+ fprintf(fp, " task_struct_nsproxy: %ld\n", -+ OFFSET(task_struct_nsproxy)); -+ fprintf(fp, " task_struct_rlim: %ld\n", -+ OFFSET(task_struct_rlim)); - - fprintf(fp, " thread_info_task: %ld\n", - OFFSET(thread_info_task)); -@@ -5618,6 +5804,13 @@ - fprintf(fp, " thread_info_previous_esp: %ld\n", - OFFSET(thread_info_previous_esp)); - -+ fprintf(fp, " nsproxy_mnt_ns: %ld\n", -+ OFFSET(nsproxy_mnt_ns)); -+ fprintf(fp, " mnt_namespace_root: %ld\n", -+ OFFSET(mnt_namespace_root)); -+ fprintf(fp, " mnt_namespace_list: %ld\n", -+ OFFSET(mnt_namespace_list)); -+ - fprintf(fp, " pid_link_pid: %ld\n", - OFFSET(pid_link_pid)); - fprintf(fp, " pid_hash_chain: %ld\n", -@@ -5647,6 +5840,11 @@ - OFFSET(signal_struct_count)); - fprintf(fp, " signal_struct_action: %ld\n", - OFFSET(signal_struct_action)); -+ fprintf(fp, " signal_struct_shared_pending: %ld\n", -+ OFFSET(signal_struct_shared_pending)); -+ fprintf(fp, " signal_struct_rlim: %ld\n", -+ OFFSET(signal_struct_rlim)); -+ - fprintf(fp, " task_struct_start_time: %ld\n", - OFFSET(task_struct_start_time)); - fprintf(fp, " task_struct_times: %ld\n", -@@ -5766,10 +5964,20 @@ - OFFSET(mm_struct_pgd)); - fprintf(fp, " mm_struct_rss: %ld\n", - OFFSET(mm_struct_rss)); -+ fprintf(fp, " mm_struct_anon_rss: %ld\n", -+ OFFSET(mm_struct_anon_rss)); - fprintf(fp, " mm_struct_total_vm: %ld\n", - OFFSET(mm_struct_total_vm)); - fprintf(fp, " mm_struct_start_code: %ld\n", - OFFSET(mm_struct_start_code)); -+ fprintf(fp, " mm_struct_arg_start: %ld\n", -+ OFFSET(mm_struct_arg_start)); -+ fprintf(fp, " mm_struct_arg_end: %ld\n", -+ OFFSET(mm_struct_arg_end)); -+ fprintf(fp, " mm_struct_env_start: %ld\n", -+ OFFSET(mm_struct_env_start)); -+ fprintf(fp, " mm_struct_env_end: %ld\n", -+ OFFSET(mm_struct_env_end)); - - fprintf(fp, " vm_area_struct_vm_mm: %ld\n", - OFFSET(vm_area_struct_vm_mm)); -@@ -5922,6 +6130,8 @@ - OFFSET(irq_desc_t_status)); - fprintf(fp, " irq_desc_t_handler: %ld\n", - OFFSET(irq_desc_t_handler)); -+ fprintf(fp, " irq_desc_t_chip: %ld\n", -+ OFFSET(irq_desc_t_chip)); - fprintf(fp, " irq_desc_t_action: %ld\n", - OFFSET(irq_desc_t_action)); - fprintf(fp, " irq_desc_t_depth: %ld\n", -@@ -5967,11 +6177,52 @@ - fprintf(fp, "hw_interrupt_type_set_affinity: %ld\n", - OFFSET(hw_interrupt_type_set_affinity)); - -+ fprintf(fp, " irq_chip_typename: %ld\n", -+ OFFSET(irq_chip_typename)); -+ fprintf(fp, " irq_chip_startup: %ld\n", -+ OFFSET(irq_chip_startup)); -+ fprintf(fp, " irq_chip_shutdown: %ld\n", -+ OFFSET(irq_chip_shutdown)); -+ fprintf(fp, " irq_chip_enable: %ld\n", -+ OFFSET(irq_chip_enable)); -+ fprintf(fp, " irq_chip_disable: %ld\n", -+ OFFSET(irq_chip_disable)); -+ fprintf(fp, " irq_chip_ack: %ld\n", -+ OFFSET(irq_chip_ack)); -+ fprintf(fp, " irq_chip_mask: %ld\n", -+ OFFSET(irq_chip_mask)); -+ fprintf(fp, " irq_chip_mask_ack: %ld\n", -+ OFFSET(irq_chip_mask_ack)); -+ fprintf(fp, " irq_chip_unmask: %ld\n", -+ OFFSET(irq_chip_unmask)); -+ fprintf(fp, " irq_chip_eoi: %ld\n", -+ OFFSET(irq_chip_eoi)); -+ fprintf(fp, " irq_chip_end: %ld\n", -+ OFFSET(irq_chip_end)); -+ fprintf(fp, " irq_chip_set_affinity: %ld\n", -+ OFFSET(irq_chip_set_affinity)); -+ fprintf(fp, " irq_chip_retrigger: %ld\n", -+ OFFSET(irq_chip_retrigger)); -+ fprintf(fp, " irq_chip_set_type: %ld\n", -+ OFFSET(irq_chip_set_type)); -+ fprintf(fp, " irq_chip_set_wake: %ld\n", -+ OFFSET(irq_chip_set_wake)); -+ - fprintf(fp, "irq_cpustat_t___softirq_active: %ld\n", - OFFSET(irq_cpustat_t___softirq_active)); - fprintf(fp, " irq_cpustat_t___softirq_mask: %ld\n", - OFFSET(irq_cpustat_t___softirq_mask)); - -+ fprintf(fp, " files_struct_fdt: %ld\n", -+ OFFSET(files_struct_fdt)); -+ fprintf(fp, " fdtable_max_fds: %ld\n", -+ OFFSET(fdtable_max_fds)); -+ fprintf(fp, " fdtable_max_fdset: %ld\n", -+ OFFSET(fdtable_max_fdset)); -+ fprintf(fp, " fdtable_open_fds: %ld\n", -+ OFFSET(fdtable_open_fds)); -+ fprintf(fp, " fdtable_fd: %ld\n", -+ OFFSET(fdtable_fd)); - fprintf(fp, " files_struct_max_fds: %ld\n", - OFFSET(files_struct_max_fds)); - fprintf(fp, " files_struct_max_fdset: %ld\n", -@@ -5988,6 +6239,12 @@ - OFFSET(file_f_vfsmnt)); - fprintf(fp, " file_f_count: %ld\n", - OFFSET(file_f_count)); -+ fprintf(fp, " file_f_path: %ld\n", -+ OFFSET(file_f_path)); -+ fprintf(fp, " path_mnt: %ld\n", -+ OFFSET(path_mnt)); -+ fprintf(fp, " path_dentry: %ld\n", -+ OFFSET(path_dentry)); - fprintf(fp, " fs_struct_root: %ld\n", - OFFSET(fs_struct_root)); - fprintf(fp, " fs_struct_pwd: %ld\n", -@@ -6217,6 +6474,11 @@ - fprintf(fp, " inet_opt_num: %ld\n", - OFFSET(inet_opt_num)); - -+ fprintf(fp, " ipv6_pinfo_rcv_saddr: %ld\n", -+ OFFSET(ipv6_pinfo_rcv_saddr)); -+ fprintf(fp, " ipv6_pinfo_daddr: %ld\n", -+ OFFSET(ipv6_pinfo_daddr)); -+ - fprintf(fp, " timer_list_list: %ld\n", - OFFSET(timer_list_list)); - fprintf(fp, " timer_list_next: %ld\n", -@@ -6291,6 +6553,8 @@ - OFFSET(zone_struct_size)); - fprintf(fp, " zone_struct_memsize: %ld\n", - OFFSET(zone_struct_memsize)); -+ fprintf(fp, " zone_struct_zone_start_pfn: %ld\n", -+ OFFSET(zone_struct_zone_start_pfn)); - fprintf(fp, " zone_struct_zone_start_paddr: %ld\n", - OFFSET(zone_struct_zone_start_paddr)); - fprintf(fp, " zone_struct_zone_start_mapnr: %ld\n", -@@ -6332,6 +6596,8 @@ - OFFSET(zone_pages_low)); - fprintf(fp, " zone_pages_high: %ld\n", - OFFSET(zone_pages_high)); -+ fprintf(fp, " zone_vm_stat: %ld\n", -+ OFFSET(zone_vm_stat)); - - fprintf(fp, " neighbour_next: %ld\n", - OFFSET(neighbour_next)); -@@ -6471,10 +6737,55 @@ - OFFSET(x8664_pda_irqstackptr)); - fprintf(fp, " x8664_pda_level4_pgt: %ld\n", - OFFSET(x8664_pda_level4_pgt)); -+ fprintf(fp, " x8664_pda_me: %ld\n", -+ OFFSET(x8664_pda_me)); - - fprintf(fp, " tss_struct_ist: %ld\n", - OFFSET(tss_struct_ist)); -+ fprintf(fp, " mem_section_section_mem_map: %ld\n", -+ OFFSET(mem_section_section_mem_map)); - -+ fprintf(fp, " vcpu_guest_context_user_regs: %ld\n", -+ OFFSET(vcpu_guest_context_user_regs)); -+ fprintf(fp, " cpu_user_regs_eip: %ld\n", -+ OFFSET(cpu_user_regs_eip)); -+ fprintf(fp, " cpu_user_regs_esp: %ld\n", -+ OFFSET(cpu_user_regs_esp)); -+ fprintf(fp, " cpu_user_regs_rip: %ld\n", -+ OFFSET(cpu_user_regs_rip)); -+ fprintf(fp, " cpu_user_regs_rsp: %ld\n", -+ OFFSET(cpu_user_regs_rsp)); -+ fprintf(fp, " unwind_table_core: %ld\n", -+ OFFSET(unwind_table_core)); -+ fprintf(fp, " unwind_table_init: %ld\n", -+ OFFSET(unwind_table_init)); -+ fprintf(fp, " unwind_table_address: %ld\n", -+ OFFSET(unwind_table_address)); -+ fprintf(fp, " unwind_table_size: %ld\n", -+ OFFSET(unwind_table_size)); -+ fprintf(fp, " unwind_table_link: %ld\n", -+ OFFSET(unwind_table_link)); -+ fprintf(fp, " unwind_table_name: %ld\n", -+ OFFSET(unwind_table_name)); -+ -+ fprintf(fp, " rq_cfs: %ld\n", -+ OFFSET(rq_cfs)); -+ fprintf(fp, " rq_rt: %ld\n", -+ OFFSET(rq_rt)); -+ fprintf(fp, " rq_nr_running: %ld\n", -+ OFFSET(rq_nr_running)); -+ fprintf(fp, " task_struct_se: %ld\n", -+ OFFSET(task_struct_se)); -+ fprintf(fp, " sched_entity_run_node: %ld\n", -+ OFFSET(sched_entity_run_node)); -+ fprintf(fp, " cfs_rq_nr_running: %ld\n", -+ OFFSET(cfs_rq_nr_running)); -+ fprintf(fp, " cfs_rq_rb_leftmost: %ld\n", -+ OFFSET(cfs_rq_rb_leftmost)); -+ fprintf(fp, " cfs_rq_tasks_timeline: %ld\n", -+ OFFSET(cfs_rq_tasks_timeline)); -+ fprintf(fp, " rt_rq_active: %ld\n", -+ OFFSET(rt_rq_active)); - - fprintf(fp, "\n size_table:\n"); - fprintf(fp, " page: %ld\n", SIZE(page)); -@@ -6512,6 +6823,7 @@ - fprintf(fp, " fs_struct: %ld\n", SIZE(fs_struct)); - fprintf(fp, " files_struct: %ld\n", - SIZE(files_struct)); -+ fprintf(fp, " fdtable: %ld\n", SIZE(fdtable)); - fprintf(fp, " file: %ld\n", SIZE(file)); - fprintf(fp, " inode: %ld\n", SIZE(inode)); - fprintf(fp, " vfsmount: %ld\n", SIZE(vfsmount)); -@@ -6546,8 +6858,11 @@ - fprintf(fp, " sock: %ld\n", SIZE(sock)); - fprintf(fp, " inet_sock: %ld\n", SIZE(inet_sock)); - fprintf(fp, " socket: %ld\n", SIZE(socket)); -+ fprintf(fp, " in6_addr: %ld\n", SIZE(in6_addr)); - fprintf(fp, " signal_struct: %ld\n", - SIZE(signal_struct)); -+ fprintf(fp, " sigpending_signal: %ld\n", -+ SIZE(sigpending_signal)); - fprintf(fp, " signal_queue: %ld\n", - SIZE(signal_queue)); - fprintf(fp, " sigqueue: %ld\n", SIZE(sigqueue)); -@@ -6601,6 +6916,8 @@ - - fprintf(fp, " x8664_pda: %ld\n", - SIZE(x8664_pda)); -+ fprintf(fp, " ppc64_paca: %ld\n", -+ SIZE(ppc64_paca)); - fprintf(fp, " gate_struct: %ld\n", - SIZE(gate_struct)); - fprintf(fp, " tss_struct: %ld\n", -@@ -6609,7 +6926,14 @@ - SIZE(task_struct_start_time)); - fprintf(fp, " cputime_t: %ld\n", - SIZE(cputime_t)); -- -+ fprintf(fp, " mem_section: %ld\n", -+ SIZE(mem_section)); -+ fprintf(fp, " pid_link: %ld\n", -+ SIZE(pid_link)); -+ fprintf(fp, " unwind_table: %ld\n", -+ SIZE(unwind_table)); -+ fprintf(fp, " rlimit: %ld\n", -+ SIZE(rlimit)); - - fprintf(fp, "\n array_table:\n"); - /* -@@ -6663,6 +6987,8 @@ - get_array_length("prio_array.queue", NULL, SIZE(list_head))); - fprintf(fp, " height_to_maxindex: %d\n", - ARRAY_LENGTH(height_to_maxindex)); -+ fprintf(fp, " pid_hash: %d\n", -+ ARRAY_LENGTH(pid_hash)); - - if (spec) { - int in_size_table, in_array_table, arrays, offsets, sizes; -@@ -6890,6 +7216,10 @@ - SEC_HAS_CONTENTS)) - st->flags |= NO_SEC_CONTENTS; - } -+ if (STREQ(bfd_get_section_name(bfd, section), ".eh_frame")) { -+ st->dwarf_eh_frame_file_offset = (off_t)section->filepos; -+ st->dwarf_eh_frame_size = (ulong)bfd_section_size(bfd, section); -+ } - break; - - case (uint)MODULE_SECTIONS: -@@ -6906,6 +7236,10 @@ - SEC_HAS_CONTENTS)) - st->flags |= NO_SEC_CONTENTS; - } -+ if (STREQ(bfd_get_section_name(bfd, section), ".eh_frame")) { -+ st->dwarf_eh_frame_file_offset = (off_t)section->filepos; -+ st->dwarf_eh_frame_size = (ulong)bfd_section_size(bfd, section); -+ } - break; - - default: -@@ -6960,8 +7294,9 @@ - i = lm->mod_sections; - lm->mod_section_data[i].section = section; - lm->mod_section_data[i].priority = prio; -- lm->mod_section_data[i].flags = section->flags; -+ lm->mod_section_data[i].flags = section->flags & ~SEC_FOUND; - lm->mod_section_data[i].size = bfd_section_size(bfd, section); -+ lm->mod_section_data[i].offset = 0; - if (strlen(name) < MAX_MOD_SEC_NAME) - strcpy(lm->mod_section_data[i].name, name); - else -@@ -7013,7 +7348,7 @@ - */ - - static void --calculate_load_order(struct load_module *lm, bfd *bfd) -+calculate_load_order_v1(struct load_module *lm, bfd *bfd) - { - int i; - asection *section; -@@ -7073,6 +7408,131 @@ - } - - /* -+ * Later versions of kmod no longer get the help from insmod, -+ * and while the heuristics might work, it's relatively -+ * straightforward to just try to match the sections in the object file -+ * with exported symbols. -+ * -+ * This works well if kallsyms is set, but may not work so well in other -+ * instances. -+ */ -+static void -+calculate_load_order_v2(struct load_module *lm, bfd *bfd, int dynamic, -+ void *minisyms, long symcount, unsigned int size) -+{ -+ struct syment *s1, *s2; -+ ulong sec_start, sec_end; -+ bfd_byte *from, *fromend; -+ asymbol *store; -+ asymbol *sym; -+ symbol_info syminfo; -+ char *secname; -+ int i; -+ -+ s1 = lm->mod_symtable; -+ s2 = lm->mod_symend; -+ while (s1 < s2) { -+ ulong sym_offset = s1->value - lm->mod_base; -+ if (MODULE_PSEUDO_SYMBOL(s1)) { -+ s1++; -+ continue; -+ } -+ -+ /* Skip over symbols whose sections have been identified. */ -+ for (i = 0; i < lm->mod_sections; i++) { -+ if ((lm->mod_section_data[i].flags & SEC_FOUND) == 0) -+ continue; -+ if (sym_offset >= lm->mod_section_data[i].offset -+ && sym_offset < lm->mod_section_data[i].offset -+ + lm->mod_section_data[i].size) { -+ break; -+ } -+ } -+ -+ /* Matched one of the sections. Skip symbol. */ -+ if (i < lm->mod_sections) { -+ if (CRASHDEBUG(2)) { -+ fprintf(fp, "skip %lx %s %s\n", s1->value, s1->name, -+ lm->mod_section_data[i].name); -+ } -+ s1++; -+ continue; -+ } -+ -+ /* Find the symbol in the object file. */ -+ from = (bfd_byte *) minisyms; -+ fromend = from + symcount * size; -+ secname = NULL; -+ for (; from < fromend; from += size) { -+ if ((sym = bfd_minisymbol_to_symbol(bfd, dynamic, from, -+ store)) == NULL) -+ error(FATAL, -+ "bfd_minisymbol_to_symbol() failed\n"); -+ -+ bfd_get_symbol_info(bfd, sym, &syminfo); -+ if (CRASHDEBUG(3)) { -+ fprintf(fp,"matching sym %s %lx against bfd %s %lx\n", -+ s1->name, (long) s1->value, syminfo.name, -+ (long) syminfo.value); -+ } -+ if (strcmp(syminfo.name, s1->name) == 0) { -+ secname = (char *)bfd_get_section_name(bfd, sym->section); -+ break; -+ } -+ -+ } -+ if (secname == NULL) { -+ if (CRASHDEBUG(1)) { -+ fprintf(fp, "symbol %s not found in module\n", s1->name); -+ } -+ s1++; -+ continue; -+ } -+ -+ /* Match the section it came in. */ -+ for (i = 0; i < lm->mod_sections; i++) { -+ if (STREQ(lm->mod_section_data[i].name, secname)) { -+ break; -+ } -+ } -+ -+ if (i == lm->mod_sections) { -+ fprintf(fp, "?? Section %s not found for symbol %s\n", -+ secname, s1->name); -+ s1++; -+ continue; -+ } -+ -+ /* Update the offset information for the section */ -+ sec_start = s1->value - syminfo.value; -+ sec_end = sec_start + lm->mod_section_data[i].size; -+ lm->mod_section_data[i].offset = sec_start - lm->mod_base; -+ lm->mod_section_data[i].flags |= SEC_FOUND; -+ -+ if (CRASHDEBUG(1)) { -+ fprintf(fp, "update sec offset sym %s @ %lx val %lx section %s\n", -+ s1->name, s1->value, syminfo.value, secname); -+ } -+ -+ if (strcmp(secname, ".text") == 0) -+ lm->mod_text_start = sec_start; -+ -+ if (strcmp(secname, ".bss") == 0) -+ lm->mod_bss_start = sec_start; -+ -+ if (strcmp(secname, ".data") == 0) -+ lm->mod_data_start = sec_start; -+ -+ if (strcmp(secname, ".data") == 0) -+ lm->mod_data_start = sec_start; -+ -+ if (strcmp(secname, ".rodata") == 0) -+ lm->mod_rodata_start = sec_start; -+ s1++; -+ } -+} -+ -+/* - * Later versons of insmod store basic address information of each - * module in a format that looks like the following example of the - * nfsd module: -@@ -7185,8 +7645,8 @@ - } - - if (CRASHDEBUG(1)) -- fprintf(fp, "load_module_symbols: %s %s %lx\n", -- modref, namelist, base_addr); -+ fprintf(fp, "load_module_symbols: %s %s %lx %lx\n", -+ modref, namelist, base_addr, kt->flags); - - switch (kt->flags & (KMOD_V1|KMOD_V2)) - { -@@ -7199,7 +7659,8 @@ - strcpy(lm->mod_namelist, namelist); - else - strncpy(lm->mod_namelist, namelist, MAX_MOD_NAMELIST-1); -- goto add_symbols; -+ if (st->flags & USE_OLD_ADD_SYM) -+ goto add_symbols; - } - - if ((mbfd = bfd_openr(namelist, NULL)) == NULL) -@@ -7219,6 +7680,10 @@ - else if (symcount == 0) - error(FATAL, "no symbols in object file: %s\n", namelist); - -+ if (CRASHDEBUG(1)) { -+ fprintf(fp, "%ld symbols found in obj file %s\n", symcount, -+ namelist); -+ } - sort_x = bfd_make_empty_symbol(mbfd); - sort_y = bfd_make_empty_symbol(mbfd); - if (sort_x == NULL || sort_y == NULL) -@@ -7261,7 +7726,9 @@ - if (!CRASHDEBUG(1)) - req->fp = pc->nullfp; - -+ st->flags |= ADD_SYMBOL_FILE; - gdb_interface(req); -+ st->flags &= ~ADD_SYMBOL_FILE; - - sprintf(buf, "set complaints 0"); - gdb_pass_through(buf, NULL, 0); -@@ -7382,7 +7849,12 @@ - - bfd_map_over_sections(bfd, section_header_info, MODULE_SECTIONS); - -- calculate_load_order(lm, bfd); -+ if (kt->flags & KMOD_V1) -+ calculate_load_order_v1(lm, bfd); -+ else -+ calculate_load_order_v2(lm, bfd, dynamic, minisyms, -+ symcount, size); -+ - - from = (bfd_byte *) minisyms; - fromend = from + symcount * size; -@@ -7395,104 +7867,112 @@ - bfd_get_symbol_info(bfd, sym, &syminfo); - - secname = (char *)bfd_get_section_name(bfd, sym->section); -+ found = 0; - -- switch (syminfo.type) -- { -- case 'b': -- case 'B': -- if (CRASHDEBUG(2)) -- fprintf(fp, "%08lx (%c) [%s] %s\n", -- (ulong)syminfo.value, -- syminfo.type, secname, syminfo.name); -+ if (kt->flags & KMOD_V1) { -+ switch (syminfo.type) -+ { -+ case 'b': -+ case 'B': -+ if (CRASHDEBUG(2)) -+ fprintf(fp, "%08lx (%c) [%s] %s\n", -+ (ulong)syminfo.value, -+ syminfo.type, secname, syminfo.name); - -- syminfo.value += lm->mod_bss_start; -- strcpy(name, syminfo.name); -- strip_module_symbol_end(name); -+ if (!lm->mod_bss_start) -+ break; - -- if (machdep->verify_symbol(name, syminfo.value, -- syminfo.type)) { -- sp->value = syminfo.value; -- sp->type = syminfo.type; -- -- namespace_ctl(NAMESPACE_INSTALL, -- &lm->mod_load_namespace, sp, name); -+ syminfo.value += lm->mod_bss_start; -+ found = 1; -+ break; - -- if (CRASHDEBUG(1)) -- fprintf(fp, "%08lx %s\n", sp->value, -- name); -+ case 'd': -+ case 'D': -+ if (CRASHDEBUG(2)) -+ fprintf(fp, "%08lx (%c) [%s] %s\n", -+ (ulong)syminfo.value, -+ syminfo.type, secname, syminfo.name); -+ -+ if (STREQ(secname, ".rodata")) { -+ if (!lm->mod_rodata_start) -+ break; -+ syminfo.value += lm->mod_rodata_start; -+ } else { -+ if (!lm->mod_data_start) -+ break; -+ syminfo.value += lm->mod_data_start; -+ } -+ found = 1; -+ break; - -- sp++; -- lm->mod_load_symcnt++; -- } -- break; -+ case 't': -+ case 'T': -+ if (CRASHDEBUG(2)) -+ fprintf(fp, "%08lx (%c) [%s] %s\n", -+ (ulong)syminfo.value, -+ syminfo.type, secname, syminfo.name); - -- case 'd': -- case 'D': -- if (CRASHDEBUG(2)) -- fprintf(fp, "%08lx (%c) [%s] %s\n", -- (ulong)syminfo.value, -- syminfo.type, secname, syminfo.name); -+ if (! lm->mod_text_start) { -+ break; -+ } - -- if (STREQ(secname, ".rodata")) -- syminfo.value += lm->mod_rodata_start; -- else -- syminfo.value += lm->mod_data_start; -+ if ((st->flags & INSMOD_BUILTIN) && -+ (STREQ(name, "init_module") || -+ STREQ(name, "cleanup_module"))) -+ break; - -+ syminfo.value += lm->mod_text_start; -+ found = 1; -+ break; -+ -+ default: -+ break; -+ } -+ -+ } else { -+ /* Match the section it came in. */ -+ for (i = 0; i < lm->mod_sections; i++) { -+ if (STREQ(lm->mod_section_data[i].name, secname) -+ && (lm->mod_section_data[i].flags & SEC_FOUND)) { -+ break; -+ } -+ } -+ if (i < lm->mod_sections) { -+ if (CRASHDEBUG(2)) -+ fprintf(fp, "%08lx (%c) [%s] %s\n", -+ (ulong)syminfo.value, -+ syminfo.type, secname, syminfo.name); -+ -+ if ((st->flags & INSMOD_BUILTIN) && -+ (STREQ(name, "init_module") || -+ STREQ(name, "cleanup_module"))) { -+ found = 0; -+ } else { -+ syminfo.value += lm->mod_section_data[i].offset + lm->mod_base; -+ found = 1; -+ } -+ } -+ } -+ -+ if (found) { - strcpy(name, syminfo.name); - strip_module_symbol_end(name); - -- if (machdep->verify_symbol(name, syminfo.value, -- syminfo.type)) { -+ if (machdep->verify_symbol(name, syminfo.value, -+ syminfo.type)) { - sp->value = syminfo.value; -- sp->type = syminfo.type; -+ sp->type = syminfo.type; - namespace_ctl(NAMESPACE_INSTALL, -- &lm->mod_load_namespace, sp, name); -+ &lm->mod_load_namespace, sp, name); - - if (CRASHDEBUG(1)) -- fprintf(fp, "%08lx %s\n", sp->value, -+ fprintf(fp, "installing %c %08lx %s\n", syminfo.type, sp->value, - name); - - sp++; - lm->mod_load_symcnt++; - } -- break; -- -- case 't': -- case 'T': -- if (CRASHDEBUG(2)) -- fprintf(fp, "%08lx (%c) [%s] %s\n", -- (ulong)syminfo.value, -- syminfo.type, secname, syminfo.name); -- -- syminfo.value += lm->mod_text_start; -- strcpy(name, syminfo.name); -- strip_module_symbol_end(name); -- -- if ((st->flags & INSMOD_BUILTIN) && -- (STREQ(name, "init_module") || -- STREQ(name, "cleanup_module"))) -- break; -- -- if (machdep->verify_symbol(name, syminfo.value, -- syminfo.type)) { -- sp->value = syminfo.value; -- sp->type = syminfo.type; -- namespace_ctl(NAMESPACE_INSTALL, -- &lm->mod_load_namespace, sp, name); -- -- if (CRASHDEBUG(1)) -- fprintf(fp, "%08lx %s\n", sp->value, -- name); -- -- sp++; -- lm->mod_load_symcnt++; -- } -- -- break; -- -- default: -- break; -- } -+ } - } - - lm->mod_load_symend = &lm->mod_load_symtable[lm->mod_load_symcnt]; -@@ -7713,7 +8193,7 @@ - ulong start, end; - char *modbuf; - ulong maxchunk, alloc; -- long offset; -+ long offset = 0; - - start = roundup(lm->mod_size_of_struct, sizeof(long)) + lm->mod_base; - end = lm->mod_base + lm->mod_size; -@@ -8089,6 +8569,10 @@ - struct syment *sp_array[200], *sp; - - if (req->name == PATCH_KERNEL_SYMBOLS_START) { -+ if (kt->flags & RELOC_FORCE) -+ error(WARNING, -+ "\nkernel relocated [%ldMB]: patching %ld gdb minimal_symbol values\n", -+ kt->relocate >> 20, st->symcnt); - fprintf(fp, (pc->flags & SILENT) || !(pc->flags & TTY) ? "" : - "\nplease wait... (patching %ld gdb minimal_symbol values) ", - st->symcnt); ---- crash/cmdline.c.orig 2007-08-27 15:02:36.000000000 -0400 -+++ crash/cmdline.c 2007-07-18 14:41:55.000000000 -0400 + /* define TRUE and FALSE for use in our dump modules */ + #ifndef FALSE +--- crash/cmdline.c.orig 2008-01-17 15:17:20.000000000 -0500 ++++ crash/cmdline.c 2008-01-04 09:42:08.000000000 -0500 @@ -1,8 +1,8 @@ /* cmdline.c - core analysis suite * @@ -32268,7 +562,7 @@ if (!(pc->readline = readline(pc->prompt))) { args[0] = NULL; fprintf(fp, "\n"); -@@ -276,6 +306,106 @@ +@@ -276,6 +306,108 @@ } /* @@ -32366,6 +660,8 @@ + CRASHPAGER_argv[i]); + } + break; ++ default: ++ return NULL; + } + + return buf; @@ -32375,7 +671,7 @@ * Parse the command line for pipe or redirect characters: * * 1. if a "|" character is found, popen() what comes after it, and -@@ -415,6 +545,9 @@ +@@ -415,6 +547,9 @@ return REDIRECT_FAILURE; } @@ -32385,7 +681,7 @@ if ((ofile = fopen(p, append ? "a+" : "w+")) == NULL) { error(INFO, "unable to open %s\n", p); -@@ -464,10 +597,13 @@ +@@ -464,10 +599,13 @@ switch (pc->scroll_command) { case SCROLL_LESS: @@ -32401,7 +697,7 @@ break; } -@@ -839,13 +975,15 @@ +@@ -839,13 +977,15 @@ restore_sanity(void) { int fd, waitstatus; @@ -32418,7 +714,7 @@ } pc->stdpipe_pid = 0; } -@@ -855,12 +993,16 @@ +@@ -855,12 +995,16 @@ console("wait for redirect %d->%d to finish...\n", pc->pipe_shell_pid, pc->pipe_pid); if (pc->pipe_pid) @@ -32437,7 +733,7 @@ pc->pipe_pid = 0; } if (pc->ifile_pipe) { -@@ -872,12 +1014,16 @@ +@@ -872,12 +1016,16 @@ (FROM_INPUT_FILE|REDIRECT_TO_PIPE|REDIRECT_PID_KNOWN))) { console("wait for redirect %d->%d to finish...\n", pc->pipe_shell_pid, pc->pipe_pid); @@ -32456,7 +752,7 @@ if (pc->redirect & (REDIRECT_MULTI_PIPE)) wait_for_children(ALL_CHILDREN); } -@@ -918,13 +1064,20 @@ +@@ -918,13 +1066,20 @@ wait_for_children(ZOMBIES_ONLY); @@ -32478,7 +774,7 @@ restore_gdb_sanity(); -@@ -942,6 +1095,16 @@ +@@ -942,6 +1097,16 @@ clear_vma_cache(); clear_active_set(); @@ -32495,7 +791,7 @@ if (CRASHDEBUG(4)) { dump_filesys_table(0); dump_vma_cache(0); -@@ -961,6 +1124,8 @@ +@@ -961,6 +1126,8 @@ { int fd; @@ -32504,7 +800,7 @@ if (pc->ifile_pipe) { close(fileno(pc->ifile_pipe)); pc->ifile_pipe = NULL; -@@ -1076,7 +1241,6 @@ +@@ -1076,7 +1243,6 @@ } else this = 0; @@ -32512,7 +808,7 @@ if (pc->flags & RUNTIME_IFILE) { error(INFO, "embedded input files not allowed!\n"); return; -@@ -1111,6 +1275,28 @@ +@@ -1111,6 +1277,28 @@ pc->flags |= RUNTIME_IFILE; incoming_fp = fp; @@ -32541,7 +837,7 @@ while (fgets(buf, BUFSIZE-1, pc->ifile)) { /* * Restore normal environment. -@@ -1120,6 +1306,8 @@ +@@ -1120,6 +1308,8 @@ BZERO(pc->command_line, BUFSIZE); BZERO(pc->orig_line, BUFSIZE); @@ -32550,7 +846,7 @@ if (STRNEQ(buf, "#") || STREQ(buf, "\n")) continue; -@@ -1168,6 +1356,10 @@ +@@ -1168,6 +1358,10 @@ fclose(pc->ifile); pc->ifile = NULL; pc->flags &= ~RUNTIME_IFILE; @@ -32561,7 +857,7 @@ } /* -@@ -1706,15 +1898,20 @@ +@@ -1706,15 +1900,20 @@ error(FATAL, "scrolling must be turned off when repeating an input file\n"); @@ -32583,7 +879,7 @@ if (delay) sleep(delay); -@@ -1829,19 +2026,6 @@ +@@ -1829,19 +2028,6 @@ * Set up the standard output pipe using whichever was selected during init. */ @@ -32603,7 +899,7 @@ static int setup_stdpipe(void) { -@@ -1877,6 +2061,9 @@ +@@ -1877,6 +2063,9 @@ case SCROLL_MORE: strcpy(pc->pipe_command, more_argv[0]); break; @@ -32613,7 +909,7 @@ } if (CRASHDEBUG(2)) -@@ -1905,10 +2092,16 @@ +@@ -1905,10 +2094,16 @@ path = more_argv[0]; execv(path, more_argv); break; @@ -32632,7620 +928,37 @@ } } -@@ -1939,5 +2132,6 @@ +@@ -1939,5 +2134,6 @@ fprintf(fp, "wait_for_children: reaped %d\n", pid); break; } + stall(1000); } } ---- crash/lkcd_common.c.orig 2007-08-27 15:02:36.000000000 -0400 -+++ crash/lkcd_common.c 2007-01-17 10:14:26.000000000 -0500 -@@ -3,8 +3,8 @@ - * Copyright (C) 1999, 2000, 2001, 2002 Mission Critical Linux, Inc. - * Copyright (C) 2002 Silicon Graphics, Inc. - * Copyright (C) 2002 Free Software Foundation, Inc. -- * Copyright (C) 2002, 2003, 2004, 2005 David Anderson -- * Copyright (C) 2002, 2003, 2004, 2005 Red Hat, Inc. All rights reserved. -+ * Copyright (C) 2002, 2003, 2004, 2005, 2007 David Anderson -+ * Copyright (C) 2002, 2003, 2004, 2005, 2007 Red Hat, Inc. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by -@@ -53,6 +53,8 @@ - - struct lkcd_environment lkcd_environment = { 0 }; - struct lkcd_environment *lkcd = &lkcd_environment; -+static int uncompress_errloc; -+static int uncompress_recover(unsigned char *, ulong, unsigned char *, ulong); - - ulonglong - fix_lkcd_address(ulonglong addr) -@@ -208,6 +210,7 @@ - - case LKCD_DUMP_V8: - case LKCD_DUMP_V9: -+ case LKCD_DUMP_V10: - lkcd->version = LKCD_DUMP_V8; - return TRUE; - -@@ -623,6 +626,10 @@ - { - static int i = 0; - -+ if (pc->flags & SILENT) { -+ return; -+ } -+ - switch (++i%4) { - case 0: - lkcd_print("|\b"); -@@ -667,6 +674,8 @@ - { - uint64_t zone, page; - int ii, ret; -+ int max_zones; -+ struct physmem_zone *zones; - - zone = paddr & lkcd->zone_mask; - -@@ -693,6 +702,7 @@ - lkcd->num_zones++; - } - -+retry: - /* find the zone */ - for (ii=0; ii < lkcd->num_zones; ii++) { - if (lkcd->zones[ii].start == zone) { -@@ -734,8 +744,20 @@ - ret = 1; - lkcd->num_zones++; - } else { -- lkcd_print("fixme, need to add more zones (ZONE_ALLOC)\n"); -- exit(1); -+ /* need to expand zone */ -+ max_zones = lkcd->max_zones * 2; -+ zones = malloc(max_zones * sizeof(struct physmem_zone)); -+ if (!zones) { -+ return -1; /* This should be fatal */ -+ } -+ BZERO(zones, max_zones * sizeof(struct physmem_zone)); -+ memcpy(zones, lkcd->zones, -+ lkcd->max_zones * sizeof(struct physmem_zone)); -+ free(lkcd->zones); -+ -+ lkcd->zones = zones; -+ lkcd->max_zones = max_zones; -+ goto retry; - } - } - -@@ -769,7 +791,7 @@ - int - lkcd_lseek(physaddr_t paddr) - { -- long i; -+ long i = 0; - int err; - int eof; - void *dp; -@@ -814,7 +836,7 @@ - lseek(lkcd->fd, lkcd->page_offset_max, SEEK_SET); - eof = FALSE; - while (!eof) { -- if( (i%2048) == 0) { -+ if( (i++%2048) == 0) { - lkcd_speedo(); - } - -@@ -1164,40 +1186,103 @@ - return 1; - } - -+/* Returns the bit offset if it's able to correct, or negative if not */ -+static int -+uncompress_recover(unsigned char *dest, ulong destlen, -+ unsigned char *source, ulong sourcelen) -+{ -+ int byte, bit; -+ ulong retlen = destlen; -+ int good_decomp = 0, good_rv = -1; -+ -+ /* Generate all single bit errors */ -+ if (sourcelen > 16384) { -+ lkcd_print("uncompress_recover: sourcelen %ld too long\n", -+ sourcelen); -+ return(-1); -+ } -+ for (byte = 0; byte < sourcelen; byte++) { -+ for (bit = 0; bit < 8; bit++) { -+ source[byte] ^= (1 << bit); -+ -+ if (uncompress(dest, &retlen, source, sourcelen) == Z_OK && -+ retlen == destlen) { -+ good_decomp++; -+ lkcd_print("good for flipping byte %d bit %d\n", -+ byte, bit); -+ good_rv = bit + byte * 8; -+ } -+ -+ /* Put it back */ -+ source[byte] ^= (1 << bit); -+ } -+ } -+ if (good_decomp == 0) { -+ lkcd_print("Could not correct gzip errors.\n"); -+ return -2; -+ } else if (good_decomp > 1) { -+ lkcd_print("Too many valid gzip decompressions: %d.\n", good_decomp); -+ return -3; -+ } else { -+ source[good_rv >> 8] ^= 1 << (good_rv % 8); -+ uncompress(dest, &retlen, source, sourcelen); -+ source[good_rv >> 8] ^= 1 << (good_rv % 8); -+ return good_rv; -+ } -+} -+ -+ - /* - * Uncompress a gzip'd buffer. -+ * -+ * Returns FALSE on error. If set, then -+ * a non-negative value of uncompress_errloc indicates the location of -+ * a single-bit error, and the data may be used. - */ - static int - lkcd_uncompress_gzip(unsigned char *dest, ulong destlen, - unsigned char *source, ulong sourcelen) - { - ulong retlen = destlen; -+ int rc; - - switch (uncompress(dest, &retlen, source, sourcelen)) - { - case Z_OK: - if (retlen == destlen) -- return TRUE; -+ rc = TRUE; -+ break; - - lkcd_print("uncompress: returned length not page size: %ld\n", - retlen); -- return FALSE; -+ rc = FALSE; -+ break; - - case Z_MEM_ERROR: - lkcd_print("uncompress: Z_MEM_ERROR (not enough memory)\n"); -- return FALSE; -+ rc = FALSE; -+ break; - - case Z_BUF_ERROR: - lkcd_print("uncompress: " - "Z_BUF_ERROR (not enough room in output buffer)\n"); -- return FALSE; -+ rc = FALSE; -+ break; - - case Z_DATA_ERROR: - lkcd_print("uncompress: Z_DATA_ERROR (input data corrupted)\n"); -- return FALSE; -+ rc = FALSE; -+ break; -+ default: -+ rc = FALSE; -+ break; - } - -- return FALSE; -+ if (rc == FALSE) { -+ uncompress_errloc = -+ uncompress_recover(dest, destlen, source, sourcelen); -+ } -+ return rc; - } - - -@@ -1252,8 +1337,9 @@ - dp_flags = lkcd->get_dp_flags(); - dp_address = lkcd->get_dp_address(); - -- if (dp_flags & LKCD_DUMP_END) -+ if (dp_flags & LKCD_DUMP_END) { - return LKCD_DUMPFILE_END; -+ } - - if ((lkcd->flags & LKCD_VALID) && (page > lkcd->total_pages)) - lkcd->total_pages = page; ---- crash/lkcd_v7.c.orig 2007-08-27 15:02:36.000000000 -0400 -+++ crash/lkcd_v7.c 2005-11-10 15:25:45.000000000 -0500 -@@ -89,7 +89,11 @@ - ifd = 0; - - #ifdef LKCD_INDEX_FILE -- lkcd->memory_pages = (dh->dh_memory_size * (getpagesize()/lkcd->page_size)) * 2; -+ if (dh->dh_memory_end < 0x1000000000LL) { -+ lkcd->memory_pages = dh->dh_memory_end / lkcd->page_size + 1; -+ } else { -+ lkcd->memory_pages = (dh->dh_memory_size * (getpagesize()/lkcd->page_size)) * 2; -+ } - dump_index_size = (lkcd->memory_pages * sizeof(off_t)); - lkcd->page_offsets = 0; - strcpy(dumpfile_index_name, dumpfile); ---- crash/lkcd_v8.c.orig 2007-08-27 15:02:36.000000000 -0400 -+++ crash/lkcd_v8.c 2005-12-15 15:19:21.000000000 -0500 -@@ -26,6 +26,7 @@ - // static dump_header_asm_t dump_header_asm_v8 = { 0 }; - static dump_page_t dump_page = { 0 }; - static void mclx_cache_page_headers_v8(void); -+static off_t lkcd_offset_to_first_page = LKCD_OFFSET_TO_FIRST_PAGE; - - /* - * Verify and initialize the LKCD environment, storing the common data -@@ -56,10 +57,13 @@ - if (read(lkcd->fd, dh, sizeof(dump_header_t)) != - sizeof(dump_header_t)) - return FALSE; -- if ((dh->dh_version & LKCD_DUMP_VERSION_NUMBER_MASK) == LKCD_DUMP_V9) -+ if ((dh->dh_version & LKCD_DUMP_VERSION_NUMBER_MASK) == LKCD_DUMP_V9){ - if (read(lkcd->fd, &dh_dump_buffer_size, sizeof(dh_dump_buffer_size)) != - sizeof(dh_dump_buffer_size)) - return FALSE; -+ lkcd_offset_to_first_page = dh_dump_buffer_size; -+ } else -+ lkcd_offset_to_first_page = LKCD_OFFSET_TO_FIRST_PAGE; - - lkcd->dump_page = dp; - lkcd->dump_header = dh; -@@ -146,7 +150,7 @@ - lkcd->compression = dh->dh_dump_compress; - lkcd->page_header_size = sizeof(dump_page_t); - -- lseek(lkcd->fd, LKCD_OFFSET_TO_FIRST_PAGE, SEEK_SET); -+ lseek(lkcd->fd, lkcd_offset_to_first_page, SEEK_SET); - - /* - * Read all of the pages and save the page offsets for lkcd_lseek(). -@@ -483,7 +487,7 @@ - /* - * Determine the granularity between offsets. - */ -- if (lseek(lkcd->fd, page_headers[0] + LKCD_OFFSET_TO_FIRST_PAGE, -+ if (lseek(lkcd->fd, page_headers[0] + lkcd_offset_to_first_page, - SEEK_SET) == -1) - return; - if (read(lkcd->fd, dp, lkcd->page_header_size) != -@@ -491,7 +495,7 @@ - return; - physaddr1 = (dp->dp_address - lkcd->kvbase) << lkcd->page_shift; - -- if (lseek(lkcd->fd, page_headers[1] + LKCD_OFFSET_TO_FIRST_PAGE, -+ if (lseek(lkcd->fd, page_headers[1] + lkcd_offset_to_first_page, - SEEK_SET) == -1) - return; - if (read(lkcd->fd, dp, lkcd->page_header_size) -@@ -508,7 +512,7 @@ - for (i = 0; i < (MCLX_PAGE_HEADERS-1); i++) { - if (!page_headers[i]) - break; -- lkcd->curhdroffs = page_headers[i] + LKCD_OFFSET_TO_FIRST_PAGE; -+ lkcd->curhdroffs = page_headers[i] + lkcd_offset_to_first_page; - set_mb_benchmark((granularity * (i+1))/lkcd->page_size); - } - } ---- crash/s390_dump.c.orig 2007-08-27 15:02:36.000000000 -0400 -+++ crash/s390_dump.c 2006-10-11 09:14:35.000000000 -0400 +--- crash/lkcd_dump_v5.h.orig 2008-01-17 15:17:20.000000000 -0500 ++++ crash/lkcd_dump_v5.h 2008-01-04 09:42:08.000000000 -0500 @@ -1,8 +1,8 @@ - /* s390_dump.c - core analysis suite + /* lkcd_dump_v5.h - core analysis suite * * Copyright (C) 2001, 2002 Mission Critical Linux, Inc. - * Copyright (C) 2002, 2003, 2004, 2005 David Anderson - * Copyright (C) 2002, 2003, 2004, 2005 Red Hat, Inc. All rights reserved. + * Copyright (C) 2002, 2003, 2004, 2005, 2006 David Anderson + * Copyright (C) 2002, 2003, 2004, 2005, 2006 Red Hat, Inc. All rights reserved. - * Copyright (C) 2005 Michael Holzheu, IBM Corporation * * This program is free software; you can redistribute it and/or modify -@@ -16,7 +16,7 @@ - * GNU General Public License for more details. - */ - #include "defs.h" --#include -+//#include - #include "ibm_common.h" - - static FILE * s390_file; -@@ -69,10 +69,13 @@ - return WRITE_ERROR; - } - -+#define S390_PAGE_SHIFT 12 -+#define S390_PAGE_SIZE (1UL << S390_PAGE_SHIFT) -+ - uint - s390_page_size(void) - { -- return PAGE_SIZE; -+ return S390_PAGE_SIZE; - } - - int ---- crash/lkcd_x86_trace.c.orig 2007-08-27 15:02:36.000000000 -0400 -+++ crash/lkcd_x86_trace.c 2007-08-23 17:02:54.000000000 -0400 -@@ -5,8 +5,8 @@ - /* - * lkcd_x86_trace.c - * -- * Copyright (C) 2002, 2003, 2004, 2005 David Anderson -- * Copyright (C) 2002, 2003, 2004, 2005 Red Hat, Inc. All rights reserved. -+ * Copyright (C) 2002, 2003, 2004, 2005, 2006 David Anderson -+ * Copyright (C) 2002, 2003, 2004, 2005, 2006 Red Hat, Inc. All rights reserved. - * - * Adapted as noted from the following LKCD files: - * -@@ -21,6 +21,9 @@ - - #include "lkcd_x86_trace.h" - -+#undef XEN_HYPER_MODE -+static int XEN_HYPER_MODE(void) { return (pc->flags & XEN_HYPER) != 0; } -+ - static void *kl_alloc_block(int, int); - static void kl_free_block(void *); - static void GET_BLOCK(kaddr_t, unsigned, void *); -@@ -47,11 +50,13 @@ - static int setup_trace_rec(kaddr_t, kaddr_t, int, trace_t *); - static int valid_ra(kaddr_t); - static int valid_ra_function(kaddr_t, char *); -+static int eframe_incr(kaddr_t, char *); - static int find_trace(kaddr_t, kaddr_t, kaddr_t, kaddr_t, trace_t *, int); - static void dump_stack_frame(trace_t *, sframe_t *, FILE *); - static void print_trace(trace_t *, int, FILE *); - struct pt_regs; - static int eframe_type(struct pt_regs *); -+char *funcname_display(char *); - static void print_eframe(FILE *, struct pt_regs *); - static void trace_banner(FILE *); - static void print_kaddr(kaddr_t, FILE *, int); -@@ -505,7 +510,7 @@ - { "receive_chars", NULL, - COMPILER_VERSION_EQUAL, GCC(2,96,0), 0, 0, 48 }, - { "default_idle", NULL, -- COMPILER_VERSION_START, GCC(3,3,2), 0, -4, 0 }, -+ COMPILER_VERSION_START, GCC(2,96,0), 0, -4, 0 }, - { NULL, NULL, 0, 0, 0, 0, 0 }, - }; - -@@ -1118,7 +1123,9 @@ - } - - #include -+#ifndef REDHAT - #include -+#endif - #define KERNEL_EFRAME 0 - #define USER_EFRAME 1 - #define KERNEL_EFRAME_SZ 13 /* no ss and esp */ -@@ -1153,6 +1160,9 @@ - else if (((regs->xcs & 0xffff) == 0x60) && - ((regs->xds & 0xffff) == 0x7b)) - return KERNEL_EFRAME; -+ else if (XEN() && ((regs->xcs & 0xffff) == 0x61) && -+ ((regs->xds & 0xffff) == 0x7b)) -+ return KERNEL_EFRAME; - #endif - else if (((regs->xcs & 0xffff) == __USER_CS) && - ((regs->xds & 0xffff) == __USER_DS)) -@@ -1206,6 +1216,93 @@ - } \ - } - #endif -+ -+/* -+ * Determine how much to increment the stack pointer to find the -+ * exception frame associated with a generic "error_code" or "nmi" -+ * exception. -+ * -+ * The incoming addr is that of the call to the generic error_code -+ * or nmi exception handler function. Until later 2.6 kernels, the next -+ * instruction had always been an "addl $8,%esp". However, with later -+ * 2.6 kernels, that esp adjustment is no long valid, and there will be -+ * an immediate "jmp" instruction. Returns 4 or 12, whichever is appropriate. -+ * Cache the value the first time, and allow for future changes or additions. -+ */ -+ -+#define NMI_ADJ (0) -+#define ERROR_CODE_ADJ (1) -+#define EFRAME_ADJUSTS (ERROR_CODE_ADJ+1) -+ -+static int eframe_adjust[EFRAME_ADJUSTS] = { 0 }; -+ -+static int -+eframe_incr(kaddr_t addr, char *funcname) -+{ -+ instr_rec_t irp; -+ kaddr_t next; -+ int size, adj, val; -+ -+ if (STRNEQ(funcname, "nmi")) { -+ adj = NMI_ADJ; -+ val = eframe_adjust[NMI_ADJ]; -+ } else if (strstr(funcname, "error_code")) { -+ adj = ERROR_CODE_ADJ; -+ val = eframe_adjust[ERROR_CODE_ADJ]; -+ } else { -+ adj = -1; -+ val = 0; -+ error(INFO, -+ "unexpected exception frame marker: %lx (%s)\n", -+ addr, funcname); -+ } -+ -+ if (val) { -+ console("eframe_incr(%lx, %s): eframe_adjust[%d]: %d\n", -+ addr, funcname, adj, val); -+ return val; -+ } -+ -+ console("eframe_incr(%lx, %s): TBD:\n", addr, funcname); -+ -+ bzero(&irp, sizeof(irp)); -+ irp.aflag = 1; -+ irp.dflag = 1; -+ if (!(size = get_instr_info(addr, &irp))) { -+ if (CRASHDEBUG(1)) -+ error(INFO, -+ "eframe_incr(%lx, %s): get_instr_info(%lx) failed\n", -+ addr, funcname, addr); -+ return((THIS_KERNEL_VERSION > LINUX(2,6,9)) ? 4 : 12); -+ } -+ console(" addr: %lx size: %d opcode: 0x%x insn: \"%s\"\n", -+ addr, size, irp.opcode, irp.opcodep->name); -+ -+ next = addr + size; -+ bzero(&irp, sizeof(irp)); -+ irp.aflag = 1; -+ irp.dflag = 1; -+ if (!(size = get_instr_info(next, &irp))) { -+ if (CRASHDEBUG(1)) -+ error(INFO, -+ "eframe_incr(%lx, %s): get_instr_info(%lx) failed\n", -+ addr, funcname, next); -+ return((THIS_KERNEL_VERSION > LINUX(2,6,9)) ? 4 : 12); -+ } -+ console(" next: %lx size: %d opcode: 0x%x insn: \"%s\"\n", -+ next, size, irp.opcode, irp.opcodep->name); -+ -+ if (STREQ(irp.opcodep->name, "jmp")) -+ val = 4; -+ else -+ val = 12; -+ -+ if (adj >= 0) -+ eframe_adjust[adj] = val; -+ -+ return val; -+} -+ - /* - * find_trace() - * -@@ -1253,6 +1350,7 @@ - int flag; - int interrupted_system_call = FALSE; - struct bt_info *bt = trace->bt; -+ struct pt_regs *pt; - #endif - sbp = trace->stack[curstkidx].ptr; - sbase = trace->stack[curstkidx].addr; -@@ -1322,7 +1420,17 @@ - } - } - asp = (uaddr_t*)((uaddr_t)sbp + (STACK_SIZE - (saddr - sp))); -+ - #ifdef REDHAT -+ if (XEN_HYPER_MODE()) { -+ func_name = kl_funcname(pc); -+ if (STREQ(func_name, "idle_loop") || STREQ(func_name, "hypercall") -+ || STREQ(func_name, "handle_exception")) { -+ UPDATE_FRAME(func_name, pc, 0, sp, bp, asp, 0, 0, bp - sp, 0); -+ return(trace->nframes); -+ } -+ } -+ - ra = GET_STACK_ULONG(bp + 4); - /* - * HACK: The get_framesize() function can return the proper -@@ -1447,7 +1555,8 @@ - bp = curframe->fp + frame_size; - } - #endif -- if ((func_name = kl_funcname(pc))) { -+ func_name = kl_funcname(pc); -+ if (func_name && !XEN_HYPER_MODE()) { - if (strstr(func_name, "kernel_thread")) { - ra = 0; - bp = saddr - 4; -@@ -1503,12 +1612,13 @@ - return(trace->nframes); - #ifdef REDHAT - } else if (strstr(func_name, "error_code") -+ || STREQ(func_name, "nmi_stack_correct") - || STREQ(func_name, "nmi")) { - #else - } else if (strstr(func_name, "error_code")) { - #endif - /* an exception frame */ -- sp = curframe->fp+12; -+ sp = curframe->fp + eframe_incr(pc, func_name); - - bp = sp + (KERNEL_EFRAME_SZ-1)*4; - asp = (uaddr_t*)((uaddr_t)sbp + (STACK_SIZE - -@@ -1571,6 +1681,46 @@ - } - } - } -+ if (func_name && XEN_HYPER_MODE()) { -+ if (STREQ(func_name, "continue_nmi") || -+ STREQ(func_name, "vmx_asm_vmexit_handler") || -+ STREQ(func_name, "deferred_nmi")) { -+ /* Interrupt frame */ -+ sp = curframe->fp + 4; -+ asp = (uaddr_t*)((uaddr_t)sbp + (STACK_SIZE - -+ (saddr - sp))); -+ bp = curframe->fp + (12 * 4); -+ curframe = alloc_sframe(trace, flags); -+ ra = *(asp + 9); -+ UPDATE_FRAME(func_name, pc, ra, sp, bp + 4, asp, -+ 0, 0, curframe->fp - curframe->sp+4, 12 * 4); -+ -+ /* contunue next frame */ -+ pc = ra; -+ sp = curframe->fp + 4; -+ bp = sp + get_framesize(pc, bt); -+ func_name = kl_funcname(pc); -+ if (!func_name) -+ return trace->nframes; -+ continue; -+ } -+ } -+ -+ /* -+ * Check for hypervisor_callback from user-space. -+ */ -+ if ((bt->flags & BT_XEN_STOP_THIS_CPU) && bt->tc->mm_struct && -+ STREQ(kl_funcname(curframe->pc), "hypervisor_callback")) { -+ pt = (struct pt_regs *)(curframe->asp+1); -+ if (eframe_type(pt) == USER_EFRAME) { -+ if (program_context.debug >= 1) /* pc above */ -+ error(INFO, -+ "hypervisor_callback from user space\n"); -+ curframe->asp++; -+ curframe->flag |= EX_FRAME; -+ return(trace->nframes); -+ } -+ } - - /* Make sure our next frame pointer is valid (in the stack). - */ -@@ -1684,8 +1834,15 @@ - (bt->flags & (BT_HARDIRQ|BT_SOFTIRQ))) - return; - -- print_stack_entry(trace->bt, -- trace->bt->flags & BT_BUMP_FRAME_LEVEL ? -+ if ((frmp->level == 0) && (bt->flags & BT_XEN_STOP_THIS_CPU)) { -+ print_stack_entry(trace->bt, 0, trace->bt->stkptr, -+ symbol_value("stop_this_cpu"), -+ value_symbol(symbol_value("stop_this_cpu")), -+ frmp, ofp); -+ } -+ -+ print_stack_entry(trace->bt, (trace->bt->flags & -+ (BT_BUMP_FRAME_LEVEL|BT_XEN_STOP_THIS_CPU)) ? - frmp->level + 1 : frmp->level, - fp ? (ulong)fp : trace->bt->stkptr, - (ulong)frmp->pc, frmp->funcname, frmp, ofp); -@@ -1708,6 +1865,10 @@ - #endif - if (frmp->flag & EX_FRAME) { - pt = (struct pt_regs *)frmp->asp; -+ if (CRASHDEBUG(1)) -+ fprintf(ofp, -+ " EXCEPTION FRAME: %lx\n", -+ (unsigned long)frmp->sp); - print_eframe(ofp, pt); - } - #ifdef REDHAT -@@ -1789,6 +1950,114 @@ - if (kt->flags & RA_SEEK) - bt->flags |= BT_SPECULATE; - -+ if (XENDUMP_DUMPFILE() && XEN() && is_task_active(bt->task) && -+ STREQ(kl_funcname(bt->instptr), "stop_this_cpu")) { -+ /* -+ * bt->instptr of "stop_this_cpu" is not a return -+ * address -- replace it with the actual return -+ * address found at the bt->stkptr location. -+ */ -+ if (readmem((ulong)bt->stkptr, KVADDR, &eip, -+ sizeof(ulong), "xendump eip", RETURN_ON_ERROR)) -+ bt->instptr = eip; -+ bt->flags |= BT_XEN_STOP_THIS_CPU; -+ if (CRASHDEBUG(1)) -+ error(INFO, "replacing stop_this_cpu with %s\n", -+ kl_funcname(bt->instptr)); -+ } -+ -+ if (XENDUMP_DUMPFILE() && XEN() && is_idle_thread(bt->task) && -+ is_task_active(bt->task) && -+ !(kt->xen_flags & XEN_SUSPEND) && -+ STREQ(kl_funcname(bt->instptr), "schedule")) { -+ /* -+ * This is an invalid (stale) schedule reference -+ * left in the task->thread. Move down the stack -+ * until the smp_call_function_interrupt return -+ * address is found. -+ */ -+ saddr = bt->stkptr; -+ while (readmem(saddr, KVADDR, &eip, -+ sizeof(ulong), "xendump esp", RETURN_ON_ERROR)) { -+ if (STREQ(kl_funcname(eip), "smp_call_function_interrupt")) { -+ bt->instptr = eip; -+ bt->stkptr = saddr; -+ bt->flags |= BT_XEN_STOP_THIS_CPU; -+ if (CRASHDEBUG(1)) -+ error(INFO, -+ "switch schedule to smp_call_function_interrupt\n"); -+ break; -+ } -+ saddr -= sizeof(void *); -+ if (saddr <= bt->stackbase) -+ break; -+ } -+ } -+ -+ if (XENDUMP_DUMPFILE() && XEN() && is_idle_thread(bt->task) && -+ is_task_active(bt->task) && -+ (kt->xen_flags & XEN_SUSPEND) && -+ STREQ(kl_funcname(bt->instptr), "schedule")) { -+ int framesize = 0; -+ /* -+ * This is an invalid (stale) schedule reference -+ * left in the task->thread. Move down the stack -+ * until the hypercall_page() return address is -+ * found, and fix up its framesize as we go. -+ */ -+ saddr = bt->stacktop; -+ while (readmem(saddr, KVADDR, &eip, -+ sizeof(ulong), "xendump esp", RETURN_ON_ERROR)) { -+ -+ if (STREQ(kl_funcname(eip), "xen_idle")) -+ framesize += sizeof(ulong); -+ else if (framesize) -+ framesize += sizeof(ulong); -+ -+ if (STREQ(kl_funcname(eip), "hypercall_page")) { -+ int framesize = 24; -+ bt->instptr = eip; -+ bt->stkptr = saddr; -+ if (CRASHDEBUG(1)) -+ error(INFO, -+ "switch schedule to hypercall_page (framesize: %d)\n", -+ framesize); -+ FRAMESIZE_CACHE_ENTER(eip, &framesize); -+ break; -+ } -+ saddr -= sizeof(void *); -+ if (saddr <= bt->stackbase) -+ break; -+ } -+ } -+ -+ if (XENDUMP_DUMPFILE() && XEN() && !is_idle_thread(bt->task) && -+ is_task_active(bt->task) && -+ STREQ(kl_funcname(bt->instptr), "schedule")) { -+ /* -+ * This is an invalid (stale) schedule reference -+ * left in the task->thread. Move down the stack -+ * until the smp_call_function_interrupt return -+ * address is found. -+ */ -+ saddr = bt->stacktop; -+ while (readmem(saddr, KVADDR, &eip, -+ sizeof(ulong), "xendump esp", RETURN_ON_ERROR)) { -+ if (STREQ(kl_funcname(eip), "smp_call_function_interrupt")) { -+ bt->instptr = eip; -+ bt->stkptr = saddr; -+ bt->flags |= BT_XEN_STOP_THIS_CPU; -+ if (CRASHDEBUG(1)) -+ error(INFO, -+ "switch schedule to smp_call_function_interrupt\n"); -+ break; -+ } -+ saddr -= sizeof(void *); -+ if (saddr <= bt->stackbase) -+ break; -+ } -+ } -+ - if (!verify_back_trace(bt) && !recoverable(bt, ofp) && - !BT_REFERENCE_CHECK(bt)) - error(INFO, "cannot resolve stack trace:\n"); -@@ -1797,12 +2066,14 @@ - return(0); - #endif - -- if (!(tsp = kl_alloc_block(TASK_STRUCT_SZ, K_TEMP))) { -- return(1); -- } -- if (kl_get_task_struct(task, 2, tsp)) { -- kl_free_block(tsp); -- return(1); -+ if (!XEN_HYPER_MODE()) { -+ if (!(tsp = kl_alloc_block(TASK_STRUCT_SZ, K_TEMP))) { -+ return(1); -+ } -+ if (kl_get_task_struct(task, 2, tsp)) { -+ kl_free_block(tsp); -+ return(1); -+ } - } - trace = (trace_t *)alloc_trace_rec(C_TEMP); - if (!trace) { -@@ -1874,7 +2145,9 @@ - #endif - print_trace(trace, flags, ofp); - } -- kl_free_block(tsp); -+ if (!XEN_HYPER_MODE()) -+ kl_free_block(tsp); -+ - free_trace_rec(trace); - #ifdef REDHAT - if (KL_ERROR == KLE_PRINT_TRACE_ERROR) { -@@ -1901,13 +2174,15 @@ - errcnt = 0; - KL_ERROR = 0; - -- if (!(tsp = kl_alloc_block(TASK_STRUCT_SZ, K_TEMP))) -- return FALSE; -- -- if (kl_get_task_struct(bt->task, 2, tsp)) { -- kl_free_block(tsp); -- return FALSE; -- } -+ if (!XEN_HYPER_MODE()) { -+ if (!(tsp = kl_alloc_block(TASK_STRUCT_SZ, K_TEMP))) -+ return FALSE; -+ -+ if (kl_get_task_struct(bt->task, 2, tsp)) { -+ kl_free_block(tsp); -+ return FALSE; -+ } -+ } - - trace = (trace_t *)alloc_trace_rec(C_TEMP); - if (!trace) -@@ -1952,7 +2227,9 @@ - } while (frmp != trace->frame); - } - -- kl_free_block(tsp); -+ if (!XEN_HYPER_MODE()) -+ kl_free_block(tsp); -+ - free_trace_rec(trace); - return (errcnt ? FALSE : TRUE); - } -@@ -2192,11 +2469,12 @@ - else - buf[0] = NULLCHAR; - -- if ((sp = eframe_label(funcname, eip))) -+ if ((sp = eframe_label(funcname, eip))) - funcname = sp->name; - - fprintf(ofp, "%s#%d [%8lx] %s%s at %lx\n", -- level < 10 ? " " : "", level, esp, funcname, -+ level < 10 ? " " : "", level, esp, -+ funcname_display(funcname), - strlen(buf) ? buf : "", eip); - - if (bt->flags & BT_LINE_NUMBERS) { -@@ -2236,6 +2514,9 @@ - struct eframe_labels *efp; - struct syment *sp; - -+ if (XEN_HYPER_MODE()) -+ return NULL; /* ODA: need support ? */ -+ - efp = &eframe_labels; - - if (!efp->init) { -@@ -2325,6 +2606,25 @@ - } - - /* -+ * If it makes sense to display a different function/label name -+ * in a stack entry, it can be done here. Unlike eframe_label(), -+ * this routine won't cause the passed-in function name pointer -+ * to be changed -- this is strictly for display purposes only. -+ */ -+char * -+funcname_display(char *funcname) -+{ -+ struct syment *sp; -+ -+ if (STREQ(funcname, "nmi_stack_correct") && -+ (sp = symbol_search("nmi"))) -+ return sp->name; -+ -+ return funcname; -+} -+ -+ -+/* - * Cache 2k starting from the passed-in text address. This sits on top - * of the instrbuf 256-byte cache, but we don't want to extend its size - * because we can run off the end of a module segment -- if this routine -@@ -4858,6 +5158,8 @@ - } else { - codeptr++; - } -+ if (STREQ(op->name, "ud2a")) -+ codeptr += kt->BUG_bytes; - } else { - opcode = *codeptr; - op = &op_386[*codeptr]; ---- crash/netdump.c.orig 2007-08-27 15:02:36.000000000 -0400 -+++ crash/netdump.c 2007-07-20 11:50:42.000000000 -0400 -@@ -1,7 +1,7 @@ - /* netdump.c - * -- * Copyright (C) 2002, 2003, 2004, 2005 David Anderson -- * Copyright (C) 2002, 2003, 2004, 2005 Red Hat, Inc. All rights reserved. -+ * Copyright (C) 2002, 2003, 2004, 2005, 2006 David Anderson -+ * Copyright (C) 2002, 2003, 2004, 2005, 2006 Red Hat, Inc. All rights reserved. - * - * This software may be freely redistributed under the terms of the - * GNU General Public License. -@@ -16,35 +16,9 @@ - #include "defs.h" - #include "netdump.h" - --struct pt_load_segment { -- off_t file_offset; -- physaddr_t phys_start; -- physaddr_t phys_end; --}; -- --struct netdump_data { -- ulong flags; -- int ndfd; -- FILE *ofp; -- uint header_size; -- char *netdump_header; -- uint num_pt_load_segments; -- struct pt_load_segment *pt_load_segments; -- Elf32_Ehdr *elf32; -- Elf32_Phdr *notes32; -- Elf32_Phdr *load32; -- Elf64_Ehdr *elf64; -- Elf64_Phdr *notes64; -- Elf64_Phdr *load64; -- void *nt_prstatus; -- void *nt_prpsinfo; -- void *nt_taskstruct; -- ulong task_struct; -- ulong switch_stack; --}; -- --static struct netdump_data netdump_data = { 0 }; --static struct netdump_data *nd = &netdump_data; -+static struct vmcore_data vmcore_data = { 0 }; -+static struct vmcore_data *nd = &vmcore_data; -+static struct xen_kdump_data xen_kdump_data = { 0 }; - static void netdump_print(char *, ...); - static void dump_Elf32_Ehdr(Elf32_Ehdr *); - static void dump_Elf32_Phdr(Elf32_Phdr *, int); -@@ -52,19 +26,20 @@ - static void dump_Elf64_Ehdr(Elf64_Ehdr *); - static void dump_Elf64_Phdr(Elf64_Phdr *, int); - static size_t dump_Elf64_Nhdr(Elf64_Off offset, int); --static void get_netdump_regs_x86(struct bt_info *, ulong *, ulong *); --static void get_netdump_regs_x86_64(struct bt_info *, ulong *, ulong *); - static void get_netdump_regs_ppc64(struct bt_info *, ulong *, ulong *); -+static physaddr_t xen_kdump_p2m(physaddr_t); - - #define ELFSTORE 1 - #define ELFREAD 0 -+ -+#define MIN_PAGE_SIZE (4096) - - /* -- * Determine whether a file is a netdump creation, and if TRUE, -- * initialize the netdump_data structure. -+ * Determine whether a file is a netdump/diskdump/kdump creation, -+ * and if TRUE, initialize the vmcore_data structure. - */ - int --is_netdump(char *file, ulong source) -+is_netdump(char *file, ulong source_query) - { - int i; - int fd; -@@ -77,6 +52,8 @@ - size_t size, len, tot; - Elf32_Off offset32; - Elf64_Off offset64; -+ ulong tmp_flags; -+ char *tmp_elf_header; - - if ((fd = open(file, O_RDWR)) < 0) { - if ((fd = open(file, O_RDONLY)) < 0) { -@@ -99,11 +76,24 @@ - goto bailout; - } - -+ tmp_flags = 0; - elf32 = (Elf32_Ehdr *)&header[0]; - elf64 = (Elf64_Ehdr *)&header[0]; - - /* -- * Verify the ELF header -+ * Verify the ELF header, and determine the dumpfile format. -+ * -+ * For now, kdump vmcores differ from netdump/diskdump like so: -+ * -+ * 1. The first kdump PT_LOAD segment is packed just after -+ * the ELF header, whereas netdump/diskdump page-align -+ * the first PT_LOAD segment. -+ * 2. Each kdump PT_LOAD segment has a p_align field of zero, -+ * whereas netdump/diskdump have their p_align fields set -+ * to the system page-size. -+ * -+ * If either kdump difference is seen, presume kdump -- this -+ * is obviously subject to change. - */ - if (STRNEQ(elf32->e_ident, ELFMAG) && - (elf32->e_ident[EI_CLASS] == ELFCLASS32) && -@@ -120,10 +110,16 @@ - default: - goto bailout; - } -- nd->flags |= NETDUMP_ELF32; -+ - load32 = (Elf32_Phdr *) - &header[sizeof(Elf32_Ehdr)+sizeof(Elf32_Phdr)]; - size = (size_t)load32->p_offset; -+ -+ if ((load32->p_offset & (MIN_PAGE_SIZE-1)) && -+ (load32->p_align == 0)) -+ tmp_flags |= KDUMP_ELF32; -+ else -+ tmp_flags |= NETDUMP_ELF32; - } else if (STRNEQ(elf64->e_ident, ELFMAG) && - (elf64->e_ident[EI_CLASS] == ELFCLASS64) && - (elf64->e_ident[EI_VERSION] == EV_CURRENT) && -@@ -153,35 +149,68 @@ - else - goto bailout; - -+ case EM_386: -+ if ((elf64->e_ident[EI_DATA] == ELFDATA2LSB) && -+ machine_type("X86")) -+ break; -+ else -+ goto bailout; -+ - default: - goto bailout; - } -- nd->flags |= NETDUMP_ELF64; -+ - load64 = (Elf64_Phdr *) - &header[sizeof(Elf64_Ehdr)+sizeof(Elf64_Phdr)]; - size = (size_t)load64->p_offset; -+ if ((load64->p_offset & (MIN_PAGE_SIZE-1)) && -+ (load64->p_align == 0)) -+ tmp_flags |= KDUMP_ELF64; -+ else -+ tmp_flags |= NETDUMP_ELF64; - } else - goto bailout; - -- if ((nd->netdump_header = (char *)malloc(size)) == NULL) { -- fprintf(stderr, "cannot malloc netdump header buffer\n"); -+ switch (DUMPFILE_FORMAT(tmp_flags)) -+ { -+ case NETDUMP_ELF32: -+ case NETDUMP_ELF64: -+ if (source_query & (NETDUMP_LOCAL|NETDUMP_REMOTE)) -+ break; -+ else -+ goto bailout; -+ -+ case KDUMP_ELF32: -+ case KDUMP_ELF64: -+ if (source_query & KDUMP_LOCAL) -+ break; -+ else -+ goto bailout; -+ } -+ -+ if ((tmp_elf_header = (char *)malloc(size)) == NULL) { -+ fprintf(stderr, "cannot malloc ELF header buffer\n"); - clean_exit(1); - } - -- if (read(fd, nd->netdump_header, size) != size) { -+ if (read(fd, tmp_elf_header, size) != size) { - sprintf(buf, "%s: read", file); - perror(buf); -+ free(tmp_elf_header); - goto bailout; - } - - nd->ndfd = fd; -- nd->flags |= source; -+ nd->elf_header = tmp_elf_header; -+ nd->flags = tmp_flags; -+ nd->flags |= source_query; - -- switch (nd->flags & (NETDUMP_ELF32|NETDUMP_ELF64)) -+ switch (DUMPFILE_FORMAT(nd->flags)) - { - case NETDUMP_ELF32: -+ case KDUMP_ELF32: - nd->header_size = load32->p_offset; -- nd->elf32 = (Elf32_Ehdr *)&nd->netdump_header[0]; -+ nd->elf32 = (Elf32_Ehdr *)&nd->elf_header[0]; - nd->num_pt_load_segments = nd->elf32->e_phnum - 1; - if ((nd->pt_load_segments = (struct pt_load_segment *) - malloc(sizeof(struct pt_load_segment) * -@@ -190,9 +219,11 @@ - clean_exit(1); - } - nd->notes32 = (Elf32_Phdr *) -- &nd->netdump_header[sizeof(Elf32_Ehdr)]; -+ &nd->elf_header[sizeof(Elf32_Ehdr)]; - nd->load32 = (Elf32_Phdr *) -- &nd->netdump_header[sizeof(Elf32_Ehdr)+sizeof(Elf32_Phdr)]; -+ &nd->elf_header[sizeof(Elf32_Ehdr)+sizeof(Elf32_Phdr)]; -+ if (DUMPFILE_FORMAT(nd->flags) == NETDUMP_ELF32) -+ nd->page_size = (uint)nd->load32->p_align; - dump_Elf32_Ehdr(nd->elf32); - dump_Elf32_Phdr(nd->notes32, ELFREAD); - for (i = 0; i < nd->num_pt_load_segments; i++) -@@ -205,8 +236,9 @@ - break; - - case NETDUMP_ELF64: -+ case KDUMP_ELF64: - nd->header_size = load64->p_offset; -- nd->elf64 = (Elf64_Ehdr *)&nd->netdump_header[0]; -+ nd->elf64 = (Elf64_Ehdr *)&nd->elf_header[0]; - nd->num_pt_load_segments = nd->elf64->e_phnum - 1; - if ((nd->pt_load_segments = (struct pt_load_segment *) - malloc(sizeof(struct pt_load_segment) * -@@ -215,9 +247,11 @@ - clean_exit(1); - } - nd->notes64 = (Elf64_Phdr *) -- &nd->netdump_header[sizeof(Elf64_Ehdr)]; -+ &nd->elf_header[sizeof(Elf64_Ehdr)]; - nd->load64 = (Elf64_Phdr *) -- &nd->netdump_header[sizeof(Elf64_Ehdr)+sizeof(Elf64_Phdr)]; -+ &nd->elf_header[sizeof(Elf64_Ehdr)+sizeof(Elf64_Phdr)]; -+ if (DUMPFILE_FORMAT(nd->flags) == NETDUMP_ELF64) -+ nd->page_size = (uint)nd->load64->p_align; - dump_Elf64_Ehdr(nd->elf64); - dump_Elf64_Phdr(nd->notes64, ELFREAD); - for (i = 0; i < nd->num_pt_load_segments; i++) -@@ -230,6 +264,9 @@ - break; - } - -+ if (CRASHDEBUG(1)) -+ netdump_memory_dump(fp); -+ - return nd->header_size; - - bailout: -@@ -238,12 +275,57 @@ - } - - /* -+ * Return the e_version number of an ELF file -+ * (or -1 if its not readable ELF file) -+ */ -+int -+file_elf_version(char *file) -+{ -+ int fd, size; -+ Elf32_Ehdr *elf32; -+ Elf64_Ehdr *elf64; -+ char header[MIN_NETDUMP_ELF_HEADER_SIZE]; -+ char buf[BUFSIZE]; -+ -+ if ((fd = open(file, O_RDONLY)) < 0) { -+ sprintf(buf, "%s: open", file); -+ perror(buf); -+ return -1; -+ } -+ -+ size = MIN_NETDUMP_ELF_HEADER_SIZE; -+ if (read(fd, header, size) != size) { -+ sprintf(buf, "%s: read", file); -+ perror(buf); -+ close(fd); -+ return -1; -+ } -+ close(fd); -+ -+ elf32 = (Elf32_Ehdr *)&header[0]; -+ elf64 = (Elf64_Ehdr *)&header[0]; -+ -+ if (STRNEQ(elf32->e_ident, ELFMAG) && -+ (elf32->e_ident[EI_CLASS] == ELFCLASS32) && -+ (elf32->e_ident[EI_DATA] == ELFDATA2LSB) && -+ (elf32->e_ident[EI_VERSION] == EV_CURRENT)) { -+ return (elf32->e_version); -+ } else if (STRNEQ(elf64->e_ident, ELFMAG) && -+ (elf64->e_ident[EI_CLASS] == ELFCLASS64) && -+ (elf64->e_ident[EI_VERSION] == EV_CURRENT)) { -+ return (elf64->e_version); -+ } -+ -+ return -1; -+} -+ -+/* - * Perform any post-dumpfile determination stuff here. - */ - int - netdump_init(char *unused, FILE *fptr) - { -- if (!NETDUMP_VALID()) -+ if (!VMCORE_VALID()) - return FALSE; - - nd->ofp = fptr; -@@ -263,19 +345,19 @@ - /* - * The Elf32_Phdr has 32-bit fields for p_paddr, p_filesz and - * p_memsz, so for now, multiple PT_LOAD segment support is -- * restricted to 64-bit machines. Until a "standard" becomes -- * available in the future that deals with physical memory -- * segments that start at greater then 4GB, or memory segments -- * sizes that are greater than 4GB (kexec?), then this feature -- * is restricted to 64-bit machines. -+ * restricted to 64-bit machines for netdump/diskdump vmcores. -+ * However, kexec/kdump has introduced the optional use of a -+ * 64-bit ELF header for 32-bit processors. - */ -- switch (nd->flags & (NETDUMP_ELF32|NETDUMP_ELF64)) -+ switch (DUMPFILE_FORMAT(nd->flags)) - { - case NETDUMP_ELF32: - offset = (off_t)paddr + (off_t)nd->header_size; - break; - - case NETDUMP_ELF64: -+ case KDUMP_ELF32: -+ case KDUMP_ELF64: - if (nd->num_pt_load_segments == 1) { - offset = (off_t)paddr + (off_t)nd->header_size; - break; -@@ -289,6 +371,11 @@ - pls->file_offset; - break; - } -+ if (pls->zero_fill && (paddr >= pls->phys_end) && -+ (paddr < pls->zero_fill)) { -+ memset(bufptr, 0, cnt); -+ return cnt; -+ } - } - - if (!offset) -@@ -302,24 +389,57 @@ - - if (read(nd->ndfd, bufptr, cnt) != cnt) - return READ_ERROR; -+ - return cnt; - } - - /* -- * Write to a netdump-created dumpfile. -+ * Write to a netdump-created dumpfile. Note that cmd_wr() does not -+ * allow writes to dumpfiles, so you can't get here from there. -+ * But, if it would ever be helpful, here it is... - */ - int - write_netdump(int fd, void *bufptr, int cnt, ulong addr, physaddr_t paddr) - { - off_t offset; -+ struct pt_load_segment *pls; -+ int i; - -- offset = (off_t)paddr + (off_t)nd->header_size; -+ switch (DUMPFILE_FORMAT(nd->flags)) -+ { -+ case NETDUMP_ELF32: -+ offset = (off_t)paddr + (off_t)nd->header_size; -+ break; -+ -+ case NETDUMP_ELF64: -+ case KDUMP_ELF32: -+ case KDUMP_ELF64: -+ if (nd->num_pt_load_segments == 1) { -+ offset = (off_t)paddr + (off_t)nd->header_size; -+ break; -+ } - -- if (lseek(nd->ndfd, offset, SEEK_SET) != offset) -+ for (i = offset = 0; i < nd->num_pt_load_segments; i++) { -+ pls = &nd->pt_load_segments[i]; -+ if ((paddr >= pls->phys_start) && -+ (paddr < pls->phys_end)) { -+ offset = (off_t)(paddr - pls->phys_start) + -+ pls->file_offset; -+ break; -+ } -+ } -+ -+ if (!offset) -+ return READ_ERROR; -+ -+ break; -+ } -+ -+ if (lseek(nd->ndfd, offset, SEEK_SET) == -1) - return SEEK_ERROR; - - if (write(nd->ndfd, bufptr, cnt) != cnt) -- return WRITE_ERROR; -+ return READ_ERROR; - - return cnt; - } -@@ -330,7 +450,7 @@ - FILE * - set_netdump_fp(FILE *fp) - { -- if (!NETDUMP_VALID()) -+ if (!VMCORE_VALID()) - return NULL; - - nd->ofp = fp; -@@ -346,7 +466,7 @@ - char buf[BUFSIZE]; - va_list ap; - -- if (!fmt || !strlen(fmt) || !NETDUMP_VALID()) -+ if (!fmt || !strlen(fmt) || !VMCORE_VALID()) - return; - - va_start(ap, fmt); -@@ -362,33 +482,21 @@ - uint - netdump_page_size(void) - { -- uint pagesz; -- -- if (!NETDUMP_VALID()) -+ if (!VMCORE_VALID()) - return 0; - -- switch (nd->flags & (NETDUMP_ELF32|NETDUMP_ELF64)) -- { -- case NETDUMP_ELF32: -- pagesz = (uint)nd->load32->p_align; -- break; -- case NETDUMP_ELF64: -- pagesz = (uint)nd->load64->p_align; -- break; -- } -- -- return pagesz; -+ return nd->page_size; - } - - int - netdump_free_memory(void) - { -- return (NETDUMP_VALID() ? 0 : 0); -+ return (VMCORE_VALID() ? 0 : 0); - } - - int netdump_memory_used(void) - { -- return (NETDUMP_VALID() ? 0 : 0); -+ return (VMCORE_VALID() ? 0 : 0); - } - - /* -@@ -414,21 +522,57 @@ - #ifdef DAEMON - return nd->task_struct; - #else -- int i; -+ int i, crashing_cpu; - size_t len; - char *user_regs; - ulong ebp, esp, task; - -- if (!NETDUMP_VALID() || !get_active_set()) -- return NO_TASK; -+ if (!VMCORE_VALID() || !get_active_set()) -+ goto panic_task_undetermined; - -- if (nd->task_struct) -+ if (nd->task_struct) { -+ if (CRASHDEBUG(1)) -+ error(INFO, -+ "get_netdump_panic_task: NT_TASKSTRUCT: %lx\n", -+ nd->task_struct); - return nd->task_struct; -+ } -+ -+ switch (DUMPFILE_FORMAT(nd->flags)) -+ { -+ case NETDUMP_ELF32: -+ case NETDUMP_ELF64: -+ crashing_cpu = -1; -+ break; -+ -+ case KDUMP_ELF32: -+ case KDUMP_ELF64: -+ crashing_cpu = -1; -+ if (symbol_exists("crashing_cpu")) { -+ get_symbol_data("crashing_cpu", sizeof(int), &i); -+ if ((i >= 0) && (i < nd->num_prstatus_notes)) { -+ crashing_cpu = i; -+ if (CRASHDEBUG(1)) -+ error(INFO, -+ "get_netdump_panic_task: crashing_cpu: %d\n", -+ crashing_cpu); -+ } -+ } -+ -+ if ((nd->num_prstatus_notes > 1) && (crashing_cpu == -1)) -+ goto panic_task_undetermined; -+ break; -+ } -+ -+ if (nd->elf32 && (nd->elf32->e_machine == EM_386)) { -+ Elf32_Nhdr *note32; -+ -+ if ((nd->num_prstatus_notes > 1) && (crashing_cpu != -1)) -+ note32 = (Elf32_Nhdr *) -+ nd->nt_prstatus_percpu[crashing_cpu]; -+ else -+ note32 = (Elf32_Nhdr *)nd->nt_prstatus; - -- if (nd->elf32 && nd->elf32->e_machine == EM_386) { -- Elf32_Nhdr *note32 = (Elf32_Nhdr *) -- ((char *)nd->elf32 + nd->notes32->p_offset); -- - len = sizeof(Elf32_Nhdr); - len = roundup(len + note32->n_namesz, 4); - len = roundup(len + note32->n_descsz, 4); -@@ -437,14 +581,15 @@ - - SIZE(user_regs_struct) - sizeof(int); - ebp = ULONG(user_regs + OFFSET(user_regs_struct_ebp)); - esp = ULONG(user_regs + OFFSET(user_regs_struct_esp)); -+check_ebp_esp: - if (CRASHDEBUG(1)) -- fprintf(fp, -- "get_netdump_panic_task: esp: %lx ebp: %lx\n", -+ error(INFO, -+ "get_netdump_panic_task: NT_PRSTATUS esp: %lx ebp: %lx\n", - esp, ebp); - if (IS_KVADDR(esp)) { - task = stkptr_to_task(esp); - if (CRASHDEBUG(1)) -- fprintf(fp, -+ error(INFO, - "get_netdump_panic_task: esp: %lx -> task: %lx\n", - esp, task); - for (i = 0; task && (i < NR_CPUS); i++) { -@@ -455,7 +600,7 @@ - if (IS_KVADDR(ebp)) { - task = stkptr_to_task(ebp); - if (CRASHDEBUG(1)) -- fprintf(fp, -+ error(INFO, - "get_netdump_panic_task: ebp: %lx -> task: %lx\n", - ebp, task); - for (i = 0; task && (i < NR_CPUS); i++) { -@@ -464,25 +609,37 @@ - } - } - } else if (nd->elf64) { -- Elf64_Nhdr *note64 = (Elf64_Nhdr *) -- ((char *)nd->elf64 + nd->notes64->p_offset); -- -+ Elf64_Nhdr *note64; -+ -+ if ((nd->num_prstatus_notes > 1) && (crashing_cpu != -1)) -+ note64 = (Elf64_Nhdr *) -+ nd->nt_prstatus_percpu[crashing_cpu]; -+ else -+ note64 = (Elf64_Nhdr *)nd->nt_prstatus; -+ - len = sizeof(Elf64_Nhdr); - len = roundup(len + note64->n_namesz, 4); - user_regs = (char *)((char *)note64 + len + - MEMBER_OFFSET("elf_prstatus", "pr_reg")); -+ -+ if (nd->elf64->e_machine == EM_386) { -+ ebp = ULONG(user_regs + OFFSET(user_regs_struct_ebp)); -+ esp = ULONG(user_regs + OFFSET(user_regs_struct_esp)); -+ goto check_ebp_esp; -+ } -+ - if (nd->elf64->e_machine == EM_PPC64) { - /* - * Get the GPR1 register value. - */ - esp = *(ulong *)((char *)user_regs + 8); - if (CRASHDEBUG(1)) -- fprintf(fp, -- "get_netdump_panic_task: esp: %lx\n", esp); -+ error(INFO, -+ "get_netdump_panic_task: NT_PRSTATUS esp: %lx\n", esp); - if (IS_KVADDR(esp)) { - task = stkptr_to_task(esp); - if (CRASHDEBUG(1)) -- fprintf(fp, -+ error(INFO, - "get_netdump_panic_task: esp: %lx -> task: %lx\n", - esp, task); - for (i = 0; task && (i < NR_CPUS); i++) { -@@ -493,8 +650,10 @@ - } - } - -+panic_task_undetermined: -+ - if (CRASHDEBUG(1)) -- fprintf(fp, "get_netdump_panic_task: returning NO_TASK\n"); -+ error(INFO, "get_netdump_panic_task: failed\n"); - - return NO_TASK; - #endif -@@ -512,7 +671,7 @@ - return nd->switch_stack; - return 0; - #else -- if (!NETDUMP_VALID() || !get_active_set()) -+ if (!VMCORE_VALID() || !get_active_set()) - return 0; - - if (nd->task_struct == task) -@@ -525,33 +684,75 @@ - int - netdump_memory_dump(FILE *fp) - { -- int i, others; -+ int i, others, wrap, flen; - size_t len, tot; - FILE *fpsave; - Elf32_Off offset32; - Elf32_Off offset64; - struct pt_load_segment *pls; - -- if (!NETDUMP_VALID()) -+ if (!VMCORE_VALID()) - return FALSE; - - fpsave = nd->ofp; - nd->ofp = fp; - -- netdump_print("netdump_data: \n"); -+ netdump_print("vmcore_data: \n"); - netdump_print(" flags: %lx (", nd->flags); - others = 0; - if (nd->flags & NETDUMP_LOCAL) - netdump_print("%sNETDUMP_LOCAL", others++ ? "|" : ""); -+ if (nd->flags & KDUMP_LOCAL) -+ netdump_print("%sKDUMP_LOCAL", others++ ? "|" : ""); - if (nd->flags & NETDUMP_REMOTE) - netdump_print("%sNETDUMP_REMOTE", others++ ? "|" : ""); - if (nd->flags & NETDUMP_ELF32) - netdump_print("%sNETDUMP_ELF32", others++ ? "|" : ""); - if (nd->flags & NETDUMP_ELF64) - netdump_print("%sNETDUMP_ELF64", others++ ? "|" : ""); -+ if (nd->flags & KDUMP_ELF32) -+ netdump_print("%sKDUMP_ELF32", others++ ? "|" : ""); -+ if (nd->flags & KDUMP_ELF64) -+ netdump_print("%sKDUMP_ELF64", others++ ? "|" : ""); - if (nd->flags & PARTIAL_DUMP) - netdump_print("%sPARTIAL_DUMP", others++ ? "|" : ""); - netdump_print(")\n"); -+ if ((pc->flags & RUNTIME) && symbol_exists("dump_level")) { -+ int dump_level; -+ if (readmem(symbol_value("dump_level"), KVADDR, &dump_level, -+ sizeof(dump_level), "dump_level", QUIET|RETURN_ON_ERROR)) { -+ netdump_print(" dump_level: %d (0x%x) %s", -+ dump_level, dump_level, -+ dump_level > 0 ? "(" : ""); -+ -+#define DUMP_EXCLUDE_CACHE 0x00000001 /* Exclude LRU & SwapCache pages*/ -+#define DUMP_EXCLUDE_CLEAN 0x00000002 /* Exclude all-zero pages */ -+#define DUMP_EXCLUDE_FREE 0x00000004 /* Exclude free pages */ -+#define DUMP_EXCLUDE_ANON 0x00000008 /* Exclude Anon pages */ -+#define DUMP_SAVE_PRIVATE 0x00000010 /* Save private pages */ -+ -+ others = 0; -+ if (dump_level & DUMP_EXCLUDE_CACHE) -+ netdump_print("%sDUMP_EXCLUDE_CACHE", -+ others++ ? "|" : ""); -+ if (dump_level & DUMP_EXCLUDE_CLEAN) -+ netdump_print("%sDUMP_EXCLUDE_CLEAN", -+ others++ ? "|" : ""); -+ if (dump_level & DUMP_EXCLUDE_FREE) -+ netdump_print("%sDUMP_EXCLUDE_FREE", -+ others++ ? "|" : ""); -+ if (dump_level & DUMP_EXCLUDE_ANON) -+ netdump_print("%sDUMP_EXCLUDE_ANON", -+ others++ ? "|" : ""); -+ if (dump_level & DUMP_SAVE_PRIVATE) -+ netdump_print("%sDUMP_SAVE_PRIVATE", -+ others++ ? "|" : ""); -+ netdump_print("%s\n", dump_level > 0 ? ")" : ""); -+ } else -+ netdump_print(" dump_level: (unknown)\n"); -+ } else if (!(pc->flags & RUNTIME) && symbol_exists("dump_level")) -+ netdump_print(" dump_level: (undetermined)\n"); -+ - netdump_print(" ndfd: %d\n", nd->ndfd); - netdump_print(" ofp: %lx\n", nd->ofp); - netdump_print(" header_size: %d\n", nd->header_size); -@@ -565,8 +766,10 @@ - pls->phys_start); - netdump_print(" phys_end: %llx\n", - pls->phys_end); -+ netdump_print(" zero_fill: %llx\n", -+ pls->zero_fill); - } -- netdump_print(" netdump_header: %lx\n", nd->netdump_header); -+ netdump_print(" elf_header: %lx\n", nd->elf_header); - netdump_print(" elf32: %lx\n", nd->elf32); - netdump_print(" notes32: %lx\n", nd->notes32); - netdump_print(" load32: %lx\n", nd->load32); -@@ -577,11 +780,66 @@ - netdump_print(" nt_prpsinfo: %lx\n", nd->nt_prpsinfo); - netdump_print(" nt_taskstruct: %lx\n", nd->nt_taskstruct); - netdump_print(" task_struct: %lx\n", nd->task_struct); -- netdump_print(" switch_stack: %lx\n\n", nd->switch_stack); -+ netdump_print(" page_size: %d\n", nd->page_size); -+ netdump_print(" switch_stack: %lx\n", nd->switch_stack); -+ netdump_print(" xen_kdump_data: %s\n", -+ XEN_CORE_DUMPFILE() ? " " : "(unused)"); -+ if (XEN_CORE_DUMPFILE()) { -+ netdump_print(" flags: %lx (", nd->xen_kdump_data->flags); -+ others = 0; -+ if (nd->xen_kdump_data->flags & KDUMP_P2M_INIT) -+ netdump_print("%sKDUMP_P2M_INIT", others++ ? "|" : ""); -+ if (nd->xen_kdump_data->flags & KDUMP_CR3) -+ netdump_print("%sKDUMP_CR3", others++ ? "|" : ""); -+ if (nd->xen_kdump_data->flags & KDUMP_MFN_LIST) -+ netdump_print("%sKDUMP_MFN_LIST", others++ ? "|" : ""); -+ netdump_print(")\n"); -+ netdump_print(" p2m_mfn: %lx\n", -+ nd->xen_kdump_data->p2m_mfn); -+ netdump_print(" cr3: %lx\n", -+ nd->xen_kdump_data->cr3); -+ netdump_print(" last_mfn_read: %lx\n", -+ nd->xen_kdump_data->last_mfn_read); -+ netdump_print(" last_pmd_read: %lx\n", -+ nd->xen_kdump_data->last_pmd_read); -+ netdump_print(" page: %lx\n", -+ nd->xen_kdump_data->page); -+ netdump_print(" accesses: %ld\n", -+ nd->xen_kdump_data->accesses); -+ netdump_print(" cache_hits: %ld ", -+ nd->xen_kdump_data->cache_hits); -+ if (nd->xen_kdump_data->accesses) -+ netdump_print("(%ld%%)", -+ nd->xen_kdump_data->cache_hits * 100 / nd->xen_kdump_data->accesses); -+ netdump_print("\n p2m_frames: %d\n", -+ nd->xen_kdump_data->p2m_frames); -+ netdump_print(" p2m_mfn_frame_list: %lx\n", -+ nd->xen_kdump_data->p2m_mfn_frame_list); -+ for (i = 0; i < nd->xen_kdump_data->p2m_frames; i++) -+ netdump_print("%lx ", -+ nd->xen_kdump_data->p2m_mfn_frame_list[i]); -+ if (i) netdump_print("\n"); -+ } -+ netdump_print(" num_prstatus_notes: %d\n", nd->num_prstatus_notes); -+ netdump_print(" nt_prstatus_percpu: "); -+ wrap = sizeof(void *) == SIZEOF_32BIT ? 8 : 4; -+ flen = sizeof(void *) == SIZEOF_32BIT ? 8 : 16; -+ if (nd->num_prstatus_notes == 1) -+ netdump_print("%.*lx\n", flen, nd->nt_prstatus_percpu[0]); -+ else { -+ for (i = 0; i < nd->num_prstatus_notes; i++) { -+ if ((i % wrap) == 0) -+ netdump_print("\n "); -+ netdump_print("%.*lx ", flen, -+ nd->nt_prstatus_percpu[i]); -+ } -+ } -+ netdump_print("\n\n"); - -- switch (nd->flags & (NETDUMP_ELF32|NETDUMP_ELF64)) -+ switch (DUMPFILE_FORMAT(nd->flags)) - { - case NETDUMP_ELF32: -+ case KDUMP_ELF32: - dump_Elf32_Ehdr(nd->elf32); - dump_Elf32_Phdr(nd->notes32, ELFREAD); - for (i = 0; i < nd->num_pt_load_segments; i++) -@@ -594,6 +852,7 @@ - break; - - case NETDUMP_ELF64: -+ case KDUMP_ELF64: - dump_Elf64_Ehdr(nd->elf64); - dump_Elf64_Phdr(nd->notes64, ELFREAD); - for (i = 0; i < nd->num_pt_load_segments; i++) -@@ -865,6 +1124,9 @@ - netdump_print(" e_machine: %d ", elf->e_machine); - switch (elf->e_machine) - { -+ case EM_386: -+ netdump_print("(EM_386)\n"); -+ break; - case EM_IA_64: - netdump_print("(EM_IA_64)\n"); - break; -@@ -961,8 +1223,11 @@ - pls->phys_start = prog->p_paddr; - netdump_print(" p_filesz: %lu (%lx)\n", prog->p_filesz, - prog->p_filesz); -- if (store_pt_load_data) -+ if (store_pt_load_data) { - pls->phys_end = pls->phys_start + prog->p_filesz; -+ pls->zero_fill = (prog->p_filesz == prog->p_memsz) ? -+ 0 : pls->phys_start + prog->p_memsz; -+ } - netdump_print(" p_memsz: %lu (%lx)\n", prog->p_memsz, - prog->p_memsz); - netdump_print(" p_flags: %lx (", prog->p_flags); -@@ -1040,8 +1305,11 @@ - pls->phys_start = prog->p_paddr; - netdump_print(" p_filesz: %lu (%lx)\n", prog->p_filesz, - prog->p_filesz); -- if (store_pt_load_data) -+ if (store_pt_load_data) { - pls->phys_end = pls->phys_start + prog->p_filesz; -+ pls->zero_fill = (prog->p_filesz == prog->p_memsz) ? -+ 0 : pls->phys_start + prog->p_memsz; -+ } - netdump_print(" p_memsz: %lu (%lx)\n", prog->p_memsz, - prog->p_memsz); - netdump_print(" p_flags: %lx (", prog->p_flags); -@@ -1061,20 +1329,22 @@ - */ - - static size_t --dump_Elf32_Nhdr(Elf32_Off offset, int store_addresses) -+dump_Elf32_Nhdr(Elf32_Off offset, int store) - { -- int i, lf; -+ int i, lf, words; - Elf32_Nhdr *note; - size_t len; - char buf[BUFSIZE]; - char *ptr; - ulong *uptr; -+ int xen_core; - - note = (Elf32_Nhdr *)((char *)nd->elf32 + offset); - - netdump_print("Elf32_Nhdr:\n"); - netdump_print(" n_namesz: %ld ", note->n_namesz); - BZERO(buf, BUFSIZE); -+ xen_core = FALSE; - ptr = (char *)note + sizeof(Elf32_Nhdr); - BCOPY(ptr, buf, note->n_namesz); - netdump_print("(\"%s\")\n", buf); -@@ -1085,17 +1355,26 @@ - { - case NT_PRSTATUS: - netdump_print("(NT_PRSTATUS)\n"); -- if (store_addresses) -- nd->nt_prstatus = (void *)note; -+ if (store) { -+ if (!nd->nt_prstatus) -+ nd->nt_prstatus = (void *)note; -+ for (i = 0; i < NR_CPUS; i++) { -+ if (!nd->nt_prstatus_percpu[i]) { -+ nd->nt_prstatus_percpu[i] = (void *)note; -+ nd->num_prstatus_notes++; -+ break; -+ } -+ } -+ } - break; - case NT_PRPSINFO: - netdump_print("(NT_PRPSINFO)\n"); -- if (store_addresses) -+ if (store) - nd->nt_prpsinfo = (void *)note; - break; - case NT_TASKSTRUCT: - netdump_print("(NT_TASKSTRUCT)\n"); -- if (store_addresses) { -+ if (store) { - nd->nt_taskstruct = (void *)note; - nd->task_struct = *((ulong *)(ptr + note->n_namesz)); - nd->switch_stack = *((ulong *) -@@ -1105,14 +1384,103 @@ - case NT_DISKDUMP: - netdump_print("(NT_DISKDUMP)\n"); - uptr = (ulong *)(ptr + note->n_namesz); -- if (*uptr) -+ if (*uptr && store) - nd->flags |= PARTIAL_DUMP; - break; -+#ifdef NOTDEF -+ /* -+ * Note: Based upon the original, abandoned, proposal for -+ * its contents -- keep around for potential future use. -+ */ -+ case NT_KDUMPINFO: -+ netdump_print("(NT_KDUMPINFO)\n"); -+ if (store) { -+ uptr = (note->n_namesz == 5) ? -+ (ulong *)(ptr + ((note->n_namesz + 3) & ~3)) : -+ (ulong *)(ptr + note->n_namesz); -+ nd->page_size = (uint)(1 << *uptr); -+ uptr++; -+ nd->task_struct = *uptr; -+ } -+ break; -+#endif - default: -- netdump_print("(?)\n"); -+ xen_core = STRNEQ(buf, "XEN CORE") || STRNEQ(buf, "Xen"); -+ if (xen_core) { -+ netdump_print("(unknown Xen n_type)\n"); -+ if (store) -+ error(WARNING, "unknown Xen n_type: %lx\n\n", -+ note->n_type); -+ } else -+ netdump_print("(?)\n"); -+ break; -+ -+ case NT_XEN_KDUMP_CR3: -+ netdump_print("(NT_XEN_KDUMP_CR3) [obsolete]\n"); -+ if (store) -+ error(WARNING, -+ "obsolete Xen n_type: %lx (NT_XEN_KDUMP_CR3)\n\n", -+ note->n_type); -+ /* FALL THROUGH */ -+ -+ case XEN_ELFNOTE_CRASH_INFO: -+ /* -+ * x86 and x86_64: p2m mfn appended to crash_xen_info_t structure -+ */ -+ if (note->n_type == XEN_ELFNOTE_CRASH_INFO) -+ netdump_print("(XEN_ELFNOTE_CRASH_INFO)\n"); -+ xen_core = TRUE; -+ if (store) { -+ pc->flags |= XEN_CORE; -+ nd->xen_kdump_data = &xen_kdump_data; -+ nd->xen_kdump_data->last_mfn_read = UNINITIALIZED; -+ nd->xen_kdump_data->last_pmd_read = UNINITIALIZED; -+ -+ if ((note->n_type == NT_XEN_KDUMP_CR3) && -+ ((note->n_descsz/sizeof(ulong)) == 1)) { -+ nd->xen_kdump_data->flags |= KDUMP_CR3; -+ /* -+ * Use the first cr3 found. -+ */ -+ if (!nd->xen_kdump_data->cr3) { -+ uptr = (ulong *)(ptr + note->n_namesz); -+ uptr = (ulong *)roundup((ulong)uptr, 4); -+ nd->xen_kdump_data->cr3 = *uptr; -+ } -+ } else { -+ nd->xen_kdump_data->flags |= KDUMP_MFN_LIST; -+ uptr = (ulong *)(ptr + note->n_namesz); -+ uptr = (ulong *)roundup((ulong)uptr, 4); -+ words = note->n_descsz/sizeof(ulong); -+ /* -+ * If already set, overridden with --pfm_mfn -+ */ -+ if (!nd->xen_kdump_data->p2m_mfn) -+ nd->xen_kdump_data->p2m_mfn = *(uptr+(words-1)); -+ } -+ } -+ break; -+ -+ case XEN_ELFNOTE_CRASH_REGS: -+ /* -+ * x86 and x86_64: cr0, cr2, cr3, cr4 -+ */ -+ xen_core = TRUE; -+ netdump_print("(XEN_ELFNOTE_CRASH_REGS)\n"); -+ break; - } - - uptr = (ulong *)(ptr + note->n_namesz); -+ -+ /* -+ * kdumps are off-by-1, because their n_namesz is 5 for "CORE". -+ */ -+ if ((nd->flags & KDUMP_ELF32) && (note->n_namesz == 5)) -+ uptr = (ulong *)(ptr + ((note->n_namesz + 3) & ~3)); -+ -+ if (xen_core) -+ uptr = (ulong *)roundup((ulong)uptr, 4); -+ - for (i = lf = 0; i < note->n_descsz/sizeof(ulong); i++) { - if (((i%4)==0)) { - netdump_print("%s ", -@@ -1123,7 +1491,7 @@ - netdump_print("%08lx ", *uptr++); - } - if (!lf || (note->n_type == NT_TASKSTRUCT) || -- (note->n_type == NT_DISKDUMP)) -+ (note->n_type == NT_DISKDUMP) || xen_core) - netdump_print("\n"); - - len = sizeof(Elf32_Nhdr); -@@ -1135,15 +1503,17 @@ - - - static size_t --dump_Elf64_Nhdr(Elf64_Off offset, int store_addresses) -+dump_Elf64_Nhdr(Elf64_Off offset, int store) - { -- int i, lf; -+ int i, lf, words; - Elf64_Nhdr *note; - size_t len; - char buf[BUFSIZE]; - char *ptr; - ulonglong *uptr; - int *iptr; -+ ulong *up; -+ int xen_core; - - note = (Elf64_Nhdr *)((char *)nd->elf64 + offset); - -@@ -1151,6 +1521,7 @@ - netdump_print(" n_namesz: %ld ", note->n_namesz); - BZERO(buf, BUFSIZE); - ptr = (char *)note + sizeof(Elf64_Nhdr); -+ xen_core = FALSE; - BCOPY(ptr, buf, note->n_namesz); - netdump_print("(\"%s\")\n", buf); - -@@ -1160,17 +1531,26 @@ - { - case NT_PRSTATUS: - netdump_print("(NT_PRSTATUS)\n"); -- if (store_addresses) -- nd->nt_prstatus = (void *)note; -+ if (store) { -+ if (!nd->nt_prstatus) -+ nd->nt_prstatus = (void *)note; -+ for (i = 0; i < NR_CPUS; i++) { -+ if (!nd->nt_prstatus_percpu[i]) { -+ nd->nt_prstatus_percpu[i] = (void *)note; -+ nd->num_prstatus_notes++; -+ break; -+ } -+ } -+ } - break; - case NT_PRPSINFO: - netdump_print("(NT_PRPSINFO)\n"); -- if (store_addresses) -+ if (store) - nd->nt_prpsinfo = (void *)note; - break; - case NT_TASKSTRUCT: - netdump_print("(NT_TASKSTRUCT)\n"); -- if (store_addresses) { -+ if (store) { - nd->nt_taskstruct = (void *)note; - nd->task_struct = *((ulong *)(ptr + note->n_namesz)); - nd->switch_stack = *((ulong *) -@@ -1180,24 +1560,137 @@ - case NT_DISKDUMP: - netdump_print("(NT_DISKDUMP)\n"); - iptr = (int *)(ptr + note->n_namesz); -- if (*iptr) -+ if (*iptr && store) - nd->flags |= PARTIAL_DUMP; - if (note->n_descsz < sizeof(ulonglong)) - netdump_print(" %08x", *iptr); - break; -+#ifdef NOTDEF -+ /* -+ * Note: Based upon the original, abandoned, proposal for -+ * its contents -- keep around for potential future use. -+ */ -+ case NT_KDUMPINFO: -+ netdump_print("(NT_KDUMPINFO)\n"); -+ if (store) { -+ uint32_t *u32ptr; -+ -+ if (nd->elf64->e_machine == EM_386) { -+ u32ptr = (note->n_namesz == 5) ? -+ (uint *)(ptr + ((note->n_namesz + 3) & ~3)) : -+ (uint *)(ptr + note->n_namesz); -+ nd->page_size = 1 << *u32ptr; -+ u32ptr++; -+ nd->task_struct = *u32ptr; -+ } else { -+ uptr = (note->n_namesz == 5) ? -+ (ulonglong *)(ptr + ((note->n_namesz + 3) & ~3)) : -+ (ulonglong *)(ptr + note->n_namesz); -+ nd->page_size = (uint)(1 << *uptr); -+ uptr++; -+ nd->task_struct = *uptr; -+ } -+ } -+ break; -+#endif - default: -- netdump_print("(?)\n"); -+ xen_core = STRNEQ(buf, "XEN CORE") || STRNEQ(buf, "Xen"); -+ if (xen_core) { -+ netdump_print("(unknown Xen n_type)\n"); -+ if (store) -+ error(WARNING, -+ "unknown Xen n_type: %lx\n\n", note->n_type); -+ } else -+ netdump_print("(?)\n"); -+ break; -+ -+ case NT_XEN_KDUMP_CR3: -+ netdump_print("(NT_XEN_KDUMP_CR3) [obsolete]\n"); -+ if (store) -+ error(WARNING, -+ "obsolete Xen n_type: %lx (NT_XEN_KDUMP_CR3)\n\n", -+ note->n_type); -+ /* FALL THROUGH */ -+ -+ case XEN_ELFNOTE_CRASH_INFO: -+ /* -+ * x86 and x86_64: p2m mfn appended to crash_xen_info_t structure -+ */ -+ if (note->n_type == XEN_ELFNOTE_CRASH_INFO) -+ netdump_print("(XEN_ELFNOTE_CRASH_INFO)\n"); -+ xen_core = TRUE; -+ if (store) { -+ pc->flags |= XEN_CORE; -+ nd->xen_kdump_data = &xen_kdump_data; -+ nd->xen_kdump_data->last_mfn_read = UNINITIALIZED; -+ nd->xen_kdump_data->last_pmd_read = UNINITIALIZED; -+ -+ if ((note->n_type == NT_XEN_KDUMP_CR3) && -+ ((note->n_descsz/sizeof(ulong)) == 1)) { -+ nd->xen_kdump_data->flags |= KDUMP_CR3; -+ /* -+ * Use the first cr3 found. -+ */ -+ if (!nd->xen_kdump_data->cr3) { -+ up = (ulong *)(ptr + note->n_namesz); -+ up = (ulong *)roundup((ulong)up, 4); -+ nd->xen_kdump_data->cr3 = *up; -+ } -+ } else { -+ nd->xen_kdump_data->flags |= KDUMP_MFN_LIST; -+ up = (ulong *)(ptr + note->n_namesz); -+ up = (ulong *)roundup((ulong)up, 4); -+ words = note->n_descsz/sizeof(ulong); -+ /* -+ * If already set, overridden with --p2m_mfn -+ */ -+ if (!nd->xen_kdump_data->p2m_mfn) -+ nd->xen_kdump_data->p2m_mfn = *(up+(words-1)); -+ } -+ } -+ break; -+ -+ case XEN_ELFNOTE_CRASH_REGS: -+ /* -+ * x86 and x86_64: cr0, cr2, cr3, cr4 -+ */ -+ xen_core = TRUE; -+ netdump_print("(XEN_ELFNOTE_CRASH_REGS)\n"); -+ break; - } - - uptr = (ulonglong *)(ptr + note->n_namesz); -- for (i = lf = 0; i < note->n_descsz/sizeof(ulonglong); i++) { -- if (((i%2)==0)) { -- netdump_print("%s ", -- i ? "\n" : ""); -- lf++; -- } else -- lf = 0; -- netdump_print("%016llx ", *uptr++); -+ -+ /* -+ * kdumps are off-by-1, because their n_namesz is 5 for "CORE". -+ */ -+ if ((nd->flags & KDUMP_ELF64) && (note->n_namesz == 5)) -+ uptr = (ulonglong *)(ptr + ((note->n_namesz + 3) & ~3)); -+ -+ if (xen_core) -+ uptr = (ulonglong *)roundup((ulong)uptr, 4); -+ -+ if (BITS32() && (xen_core || (note->n_type == NT_PRSTATUS))) { -+ iptr = (int *)uptr; -+ for (i = lf = 0; i < note->n_descsz/sizeof(ulong); i++) { -+ if (((i%4)==0)) { -+ netdump_print("%s ", -+ i ? "\n" : ""); -+ lf++; -+ } else -+ lf = 0; -+ netdump_print("%08lx ", *iptr++); -+ } -+ } else { -+ for (i = lf = 0; i < note->n_descsz/sizeof(ulonglong); i++) { -+ if (((i%2)==0)) { -+ netdump_print("%s ", -+ i ? "\n" : ""); -+ lf++; -+ } else -+ lf = 0; -+ netdump_print("%016llx ", *uptr++); -+ } - } - if (!lf) - netdump_print("\n"); -@@ -1251,39 +1744,71 @@ - - default: - error(FATAL, -- "netdump support for ELF machine type %d not available\n", -+ "support for ELF machine type %d not available\n", - e_machine); - } - } - --static void -+struct x86_64_user_regs_struct { -+ unsigned long r15,r14,r13,r12,rbp,rbx,r11,r10; -+ unsigned long r9,r8,rax,rcx,rdx,rsi,rdi,orig_rax; -+ unsigned long rip,cs,eflags; -+ unsigned long rsp,ss; -+ unsigned long fs_base, gs_base; -+ unsigned long ds,es,fs,gs; -+}; -+#define offsetof(TYPE, MEMBER) ((ulong)&((TYPE *)0)->MEMBER) -+ -+void - get_netdump_regs_x86_64(struct bt_info *bt, ulong *ripp, ulong *rspp) - { - Elf64_Nhdr *note; - size_t len; - char *user_regs; -- ulong rsp, rip; -+ ulong regs_size, rsp_offset, rip_offset; - - if (is_task_active(bt->task)) - bt->flags |= BT_DUMPFILE_SEARCH; - -- if (VALID_STRUCT(user_regs_struct) && (bt->task == tt->panic_task)) { -- note = (Elf64_Nhdr *)nd->nt_prstatus; -+ if (((NETDUMP_DUMPFILE() || KDUMP_DUMPFILE()) && -+ VALID_STRUCT(user_regs_struct) && (bt->task == tt->panic_task)) || -+ (KDUMP_DUMPFILE() && (kt->flags & DWARF_UNWIND) && -+ (bt->flags & BT_DUMPFILE_SEARCH))) { -+ if (nd->num_prstatus_notes > 1) -+ note = (Elf64_Nhdr *) -+ nd->nt_prstatus_percpu[bt->tc->processor]; -+ else -+ note = (Elf64_Nhdr *)nd->nt_prstatus; - - len = sizeof(Elf64_Nhdr); - len = roundup(len + note->n_namesz, 4); - len = roundup(len + note->n_descsz, 4); - -- user_regs = ((char *)note + len) -- - SIZE(user_regs_struct) - sizeof(long); -+ regs_size = VALID_STRUCT(user_regs_struct) ? -+ SIZE(user_regs_struct) : -+ sizeof(struct x86_64_user_regs_struct); -+ rsp_offset = VALID_MEMBER(user_regs_struct_rsp) ? -+ OFFSET(user_regs_struct_rsp) : -+ offsetof(struct x86_64_user_regs_struct, rsp); -+ rip_offset = VALID_MEMBER(user_regs_struct_rip) ? -+ OFFSET(user_regs_struct_rip) : -+ offsetof(struct x86_64_user_regs_struct, rip); -+ -+ user_regs = ((char *)note + len) - regs_size - sizeof(long); - -- if (CRASHDEBUG(1)) { -- rsp = ULONG(user_regs + OFFSET(user_regs_struct_rsp)); -- rip = ULONG(user_regs + OFFSET(user_regs_struct_rip)); -+ if (CRASHDEBUG(1)) - netdump_print("ELF prstatus rsp: %lx rip: %lx\n", -- rsp, rip); -- } -+ ULONG(user_regs + rsp_offset), -+ ULONG(user_regs + rip_offset)); -+ -+ if (KDUMP_DUMPFILE()) { -+ *rspp = ULONG(user_regs + rsp_offset); -+ *ripp = ULONG(user_regs + rip_offset); - -+ if (*ripp && *rspp) -+ return; -+ } -+ - bt->machdep = (void *)user_regs; - } - -@@ -1295,13 +1820,14 @@ - * the raw stack for some reasonable hooks. - */ - --static void -+void - get_netdump_regs_x86(struct bt_info *bt, ulong *eip, ulong *esp) - { -- int i, search, panic; -+ int i, search, panic, panic_task; - char *sym; - ulong *up; - ulong ipintr_eip, ipintr_esp, ipintr_func; -+ ulong halt_eip, halt_esp; - int check_hardirq, check_softirq; - - if (!is_task_active(bt->task)) { -@@ -1309,17 +1835,31 @@ - return; - } - -+ panic_task = tt->panic_task == bt->task ? TRUE : FALSE; -+ - ipintr_eip = ipintr_esp = ipintr_func = panic = 0; -+ halt_eip = halt_esp = 0; - check_hardirq = check_softirq = tt->flags & IRQSTACKS ? TRUE : FALSE; - search = ((bt->flags & BT_TEXT_SYMBOLS) && (tt->flags & TASK_INIT_DONE)) - || (machdep->flags & OMIT_FRAME_PTR); -- - retry: - for (i = 0, up = (ulong *)bt->stackbuf; i < LONGS_PER_STACK; i++, up++){ - sym = closest_symbol(*up); -- if (STREQ(sym, "netconsole_netdump") || -+ -+ if (XEN_CORE_DUMPFILE()) { -+ if (STREQ(sym, "xen_machine_kexec")) { -+ *eip = *up; -+ *esp = bt->stackbase + ((char *)(up+1) - bt->stackbuf); -+ return; -+ } -+ if (STREQ(sym, "crash_kexec")) { -+ halt_eip = *up; -+ halt_esp = bt->stackbase + ((char *)(up+1) - bt->stackbuf); -+ } -+ } else if (STREQ(sym, "netconsole_netdump") || - STREQ(sym, "netpoll_start_netdump") || - STREQ(sym, "start_disk_dump") || -+ STREQ(sym, "crash_kexec") || - STREQ(sym, "disk_dump")) { - *eip = *up; - *esp = search ? -@@ -1354,7 +1894,7 @@ - next_sysrq: - *eip = *up; - *esp = bt->stackbase + ((char *)(up+4) - bt->stackbuf); -- machdep->flags |= SYSRQ; -+ pc->flags |= SYSRQ; - for (i++, up++; i < LONGS_PER_STACK; i++, up++) { - sym = closest_symbol(*up); - if (STREQ(sym, "sysrq_handle_crash")) -@@ -1371,7 +1911,15 @@ - *esp = search ? - bt->stackbase + ((char *)(up+1) - bt->stackbuf) : - *(up-1); -- machdep->flags |= SYSRQ; -+ pc->flags |= SYSRQ; -+ return; -+ } -+ -+ if (STREQ(sym, "crash_nmi_callback")) { -+ *eip = *up; -+ *esp = search ? -+ bt->stackbase + ((char *)(up+1) - bt->stackbuf) : -+ *(up-1); - return; - } - -@@ -1385,6 +1933,18 @@ - bt->stackbase + ((char *)(up-1) - bt->stackbuf); - ipintr_func = *(up - 2); - } -+ -+ if (XEN_CORE_DUMPFILE() && !panic_task && (bt->tc->pid == 0) && -+ STREQ(sym, "safe_halt")) { -+ halt_eip = *up; -+ halt_esp = bt->stackbase + ((char *)(up+1) - bt->stackbuf); -+ } -+ -+ if (XEN_CORE_DUMPFILE() && !panic_task && (bt->tc->pid == 0) && -+ !halt_eip && STREQ(sym, "xen_idle")) { -+ halt_eip = *up; -+ halt_esp = bt->stackbase + ((char *)(up+1) - bt->stackbuf); -+ } - } - - if (ipintr_eip) { -@@ -1418,7 +1978,15 @@ - goto retry; - } - -- console("get_netdump_regs_x86: cannot find anything useful\n"); -+ if (halt_eip && halt_esp) { -+ *eip = halt_eip; -+ *esp = halt_esp; -+ return; -+ } -+ -+ if (CRASHDEBUG(1)) -+ error(INFO, -+ "get_netdump_regs_x86: cannot find anything useful (task: %lx)\n", bt->task); - - machdep->get_stack_frame(bt, eip, esp); - } -@@ -1429,8 +1997,18 @@ - Elf64_Nhdr *note; - size_t len; - -- if (bt->task == tt->panic_task) { -- note = (Elf64_Nhdr *)nd->nt_prstatus; -+ if ((bt->task == tt->panic_task) || -+ (is_task_active(bt->task) && nd->num_prstatus_notes > 1)) { -+ /* -+ * Registers are saved during the dump process for the -+ * panic task. Whereas in kdump, regs are captured for all -+ * CPUs if they responded to an IPI. -+ */ -+ if (nd->num_prstatus_notes > 1) -+ note = (Elf64_Nhdr *) -+ nd->nt_prstatus_percpu[bt->tc->processor]; -+ else -+ note = (Elf64_Nhdr *)nd->nt_prstatus; - - len = sizeof(Elf64_Nhdr); - len = roundup(len + note->n_namesz, 4); -@@ -1446,3 +2024,205 @@ - { - return (nd->flags & PARTIAL_DUMP ? TRUE : FALSE); - } -+ -+ -+/* -+ * kexec/kdump generated vmcore files are similar enough in -+ * nature to netdump/diskdump such that most vmcore access -+ * functionality may be borrowed from the equivalent netdump -+ * function. If not, re-work them here. -+ */ -+int -+is_kdump(char *file, ulong source_query) -+{ -+ return is_netdump(file, source_query); -+} -+ -+int -+kdump_init(char *unused, FILE *fptr) -+{ -+ return netdump_init(unused, fptr); -+} -+ -+ulong -+get_kdump_panic_task(void) -+{ -+ return get_netdump_panic_task(); -+} -+ -+int -+read_kdump(int fd, void *bufptr, int cnt, ulong addr, physaddr_t paddr) -+{ -+ if (XEN_CORE_DUMPFILE() && !XEN_HYPER_MODE()) { -+ if (!(nd->xen_kdump_data->flags & KDUMP_P2M_INIT)) { -+ if (!machdep->xen_kdump_p2m_create) -+ error(FATAL, -+ "xen kdump dumpfiles not supported on this architecture\n"); -+ -+ if ((nd->xen_kdump_data->page = -+ (char *)malloc(PAGESIZE())) == NULL) -+ error(FATAL, -+ "cannot malloc xen kdump data page\n"); -+ -+ if (!machdep->xen_kdump_p2m_create(nd->xen_kdump_data)) -+ error(FATAL, -+ "cannot create xen kdump pfn-to-mfn mapping\n"); -+ -+ nd->xen_kdump_data->flags |= KDUMP_P2M_INIT; -+ } -+ -+ if ((paddr = xen_kdump_p2m(paddr)) == P2M_FAILURE) -+ return READ_ERROR; -+ } -+ -+ return read_netdump(fd, bufptr, cnt, addr, paddr); -+} -+ -+int -+write_kdump(int fd, void *bufptr, int cnt, ulong addr, physaddr_t paddr) -+{ -+ return write_netdump(fd, bufptr, cnt, addr, paddr); -+} -+ -+void -+get_kdump_regs(struct bt_info *bt, ulong *eip, ulong *esp) -+{ -+ get_netdump_regs(bt, eip, esp); -+} -+ -+uint -+kdump_page_size(void) -+{ -+ uint pagesz; -+ -+ if (!VMCORE_VALID()) -+ return 0; -+ -+ if (!(pagesz = nd->page_size)) -+ pagesz = (uint)getpagesize(); -+ -+ return pagesz; -+} -+ -+int -+kdump_free_memory(void) -+{ -+ return netdump_free_memory(); -+} -+ -+int -+kdump_memory_used(void) -+{ -+ return netdump_memory_used(); -+} -+ -+int -+kdump_memory_dump(FILE *fp) -+{ -+ return netdump_memory_dump(fp); -+} -+ -+/* -+ * Translate a xen domain's pseudo-physical address into the -+ * xen machine address. Since there's no compression involved, -+ * just the last phys_to_machine_mapping[] page read is cached, -+ * which essentially caches 1024 p2m translations. -+ */ -+static physaddr_t -+xen_kdump_p2m(physaddr_t pseudo) -+{ -+ ulong pfn, mfn_frame; -+ ulong *mfnptr; -+ ulong mfn_idx, frame_idx; -+ physaddr_t paddr; -+ struct xen_kdump_data *xkd = nd->xen_kdump_data; -+ -+ if (pc->curcmd_flags & XEN_MACHINE_ADDR) -+ return pseudo; -+ -+#ifdef IA64 -+ return ia64_xen_kdump_p2m(xkd, pseudo); -+#endif -+ -+ xkd->accesses++; -+ -+ pfn = (ulong)BTOP(pseudo); -+ mfn_idx = pfn / (PAGESIZE()/sizeof(ulong)); -+ frame_idx = pfn % (PAGESIZE()/sizeof(ulong)); -+ if (mfn_idx >= xkd->p2m_frames) -+ return P2M_FAILURE; -+ mfn_frame = xkd->p2m_mfn_frame_list[mfn_idx]; -+ -+ if (mfn_frame == xkd->last_mfn_read) -+ xkd->cache_hits++; -+ else if (read_netdump(0, xkd->page, PAGESIZE(), 0, -+ (physaddr_t)PTOB(mfn_frame)) != PAGESIZE()) -+ return P2M_FAILURE; -+ -+ xkd->last_mfn_read = mfn_frame; -+ -+ mfnptr = ((ulong *)(xkd->page)) + frame_idx; -+ paddr = (physaddr_t)PTOB((ulonglong)(*mfnptr)); -+ paddr |= PAGEOFFSET(pseudo); -+ -+ if (CRASHDEBUG(7)) -+ fprintf(fp, -+ "xen_dump_p2m(%llx): mfn_idx: %ld frame_idx: %ld" -+ " mfn_frame: %lx mfn: %lx => %llx\n", -+ (ulonglong)pseudo, mfn_idx, frame_idx, -+ mfn_frame, *mfnptr, (ulonglong)paddr); -+ -+ return paddr; -+} -+ -+struct vmcore_data * -+get_kdump_vmcore_data(void) -+{ -+ if (!VMCORE_VALID() || !KDUMP_DUMPFILE()) -+ return NULL; -+ -+ return &vmcore_data; -+} -+ -+/* -+ * Override the dom0 p2m mfn in the XEN_ELFNOTE_CRASH_INFO note -+ * in order to initiate a crash session of a guest kernel. -+ */ -+void -+xen_kdump_p2m_mfn(char *arg) -+{ -+ ulong value; -+ int errflag; -+ -+ errflag = 0; -+ value = htol(arg, RETURN_ON_ERROR|QUIET, &errflag); -+ if (!errflag) { -+ xen_kdump_data.p2m_mfn = value; -+ if (CRASHDEBUG(1)) -+ error(INFO, -+ "xen_kdump_data.p2m_mfn override: %lx\n", -+ value); -+ } else -+ error(WARNING, "invalid p2m_mfn argument: %s\n", arg); -+} -+ -+/* -+ * Fujitsu dom0/HV sadump-generated dumpfile, which requires -+ * the --p2m_mfn command line argument. -+ */ -+int -+is_sadump_xen(void) -+{ -+ if (xen_kdump_data.p2m_mfn) { -+ if (!XEN_CORE_DUMPFILE()) { -+ pc->flags |= XEN_CORE; -+ nd->xen_kdump_data = &xen_kdump_data; -+ nd->xen_kdump_data->last_mfn_read = UNINITIALIZED; -+ nd->xen_kdump_data->last_pmd_read = UNINITIALIZED; -+ nd->xen_kdump_data->flags |= KDUMP_MFN_LIST; -+ } -+ return TRUE; -+ } -+ -+ return FALSE; -+} ---- crash/diskdump.c.orig 2007-08-27 15:02:36.000000000 -0400 -+++ crash/diskdump.c 2007-04-10 10:20:29.000000000 -0400 -@@ -1,16 +1,16 @@ - /* - * diskdump.c - * -- * NOTE: The Red Hat diskdump module currently creates -- * vmcore dumpfiles that are identical to those made -- * by the Red Hat netdump module, and therefore the -- * dumpfile is recognized as such. But just in case -- * there's ever a divergence, this file is being kept -- * in place, along with the DISKDUMP-related #define's -- * and their usage throughout the crash sources. -+ * The diskdump module optionally creates either ELF vmcore -+ * dumpfiles, or compressed dumpfiles derived from the LKCD format. -+ * In the case of ELF vmcore files, since they are identical to -+ * netdump dumpfiles, the facilities in netdump.c are used. For -+ * compressed dumpfiles, the facilities in this file are used. - * -- * Copyright (C) 2004, 2005 David Anderson -- * Copyright (C) 2004, 2005 Red Hat, Inc. All rights reserved. -+ * Copyright (C) 2004, 2005, 2006 David Anderson -+ * Copyright (C) 2004, 2005, 2006 Red Hat, Inc. All rights reserved. -+ * Copyright (C) 2005 FUJITSU LIMITED -+ * Copyright (C) 2005 NEC Corporation - * - * This software may be freely redistributed under the terms of the - * GNU General Public License. -@@ -18,22 +18,261 @@ - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. -- * -- * Author: David Anderson - */ - - #include "defs.h" - #include "diskdump.h" - -+#define BITMAP_SECT_LEN 4096 -+ - struct diskdump_data { - ulong flags; /* DISKDUMP_LOCAL, plus anything else... */ - int dfd; /* dumpfile file descriptor */ - FILE *ofp; /* fprintf(dd->ofp, "xxx"); */ - int machine_type; /* machine type identifier */ -+ -+ /* header */ -+ struct disk_dump_header *header; -+ struct disk_dump_sub_header *sub_header; -+ struct kdump_sub_header *sub_header_kdump; -+ -+ size_t data_offset; -+ int block_size; -+ int block_shift; -+ char *bitmap; -+ int bitmap_len; -+ char *dumpable_bitmap; -+ int byte, bit; -+ char *compressed_page; /* copy of compressed page data */ -+ char *curbufptr; /* ptr to uncompressed page buffer */ -+ -+ /* page cache */ -+ struct page_cache_hdr { /* header for each cached page */ -+ uint32_t pg_flags; -+ uint64_t pg_addr; -+ char *pg_bufptr; -+ ulong pg_hit_count; -+ } page_cache_hdr[DISKDUMP_CACHED_PAGES]; -+ char *page_cache_buf; /* base of cached buffer pages */ -+ int evict_index; /* next page to evict */ -+ ulong evictions; /* total evictions done */ -+ ulong cached_reads; -+ ulong *valid_pages; -+ ulong accesses; - }; - - static struct diskdump_data diskdump_data = { 0 }; - static struct diskdump_data *dd = &diskdump_data; -+static int get_dump_level(void); -+ -+ulong *diskdump_flags = &diskdump_data.flags; -+ -+static inline int get_bit(char *map, int byte, int bit) -+{ -+ return map[byte] & (1<bitmap, nr >> 3, nr & 7); -+} -+ -+static inline int page_is_dumpable(unsigned int nr) -+{ -+ return dd->dumpable_bitmap[nr>>3] & (1 << (nr & 7)); -+} -+ -+static inline int dump_is_partial(const struct disk_dump_header *header) -+{ -+ return header->bitmap_blocks >= -+ divideup(divideup(header->max_mapnr, 8), dd->block_size) * 2; -+} -+ -+static int open_dump_file(char *file) -+{ -+ int fd; -+ -+ fd = open(file, O_RDONLY); -+ if (fd < 0) { -+ error(INFO, "diskdump: unable to open dump file %s", file); -+ return FALSE; -+ } -+ dd->dfd = fd; -+ return TRUE; -+} -+ -+static int read_dump_header(void) -+{ -+ struct disk_dump_header *header = NULL; -+ struct disk_dump_sub_header *sub_header = NULL; -+ struct kdump_sub_header *sub_header_kdump = NULL; -+ int bitmap_len; -+ const int block_size = (int)sysconf(_SC_PAGESIZE); -+ off_t offset; -+ const off_t failed = (off_t)-1; -+ ulong pfn; -+ int i, j, max_sect_len; -+ -+ if (block_size < 0) -+ return FALSE; -+ -+ if ((header = malloc(block_size)) == NULL) -+ error(FATAL, "diskdump: cannot malloc block_size buffer\n"); -+ -+ if (lseek(dd->dfd, 0, SEEK_SET) == failed) { -+ if (CRASHDEBUG(1)) -+ error(INFO, "diskdump: cannot lseek dump header\n"); -+ goto err; -+ } -+ -+ if (read(dd->dfd, header, block_size) < block_size) { -+ if (CRASHDEBUG(1)) -+ error(INFO, "diskdump: cannot read dump header\n"); -+ goto err; -+ } -+ -+ /* validate dump header */ -+ if (!memcmp(header->signature, DISK_DUMP_SIGNATURE, -+ sizeof(header->signature))) { -+ dd->flags |= DISKDUMP_LOCAL; -+ } else if (!memcmp(header->signature, KDUMP_SIGNATURE, -+ sizeof(header->signature))) { -+ dd->flags |= KDUMP_CMPRS_LOCAL; -+ if (header->header_version >= 1) -+ dd->flags |= ERROR_EXCLUDED; -+ } else { -+ if (CRASHDEBUG(1)) -+ error(INFO, "diskdump: dump does not have panic dump header\n"); -+ goto err; -+ } -+ -+ if (header->block_size != block_size) { -+ error(INFO, "diskdump: block size in the dump header does not match" -+ " with system page size\n"); -+ goto err; -+ } -+ dd->block_size = block_size; -+ dd->block_shift = ffs(block_size) - 1; -+ -+ if (sizeof(*header) + sizeof(void *) * header->nr_cpus > block_size || -+ header->nr_cpus <= 0) { -+ error(INFO, "diskdump: invalid nr_cpus value: %d\n", header->nr_cpus); -+ goto err; -+ } -+ -+ /* read sub header */ -+ offset = (off_t)block_size; -+ if (lseek(dd->dfd, offset, SEEK_SET) == failed) { -+ error(INFO, "diskdump: cannot lseek dump sub header\n"); -+ goto err; -+ } -+ -+ if (DISKDUMP_VALID()) { -+ if ((sub_header = malloc(block_size)) == NULL) -+ error(FATAL, "diskdump: cannot malloc sub_header buffer\n"); -+ -+ if (read(dd->dfd, sub_header, block_size) -+ < block_size) { -+ error(INFO, "diskdump: cannot read dump sub header\n"); -+ goto err; -+ } -+ dd->sub_header = sub_header; -+ } else if (KDUMP_CMPRS_VALID()) { -+ if ((sub_header_kdump = malloc(block_size)) == NULL) -+ error(FATAL, "diskdump: cannot malloc sub_header_kdump buffer\n"); -+ -+ if (read(dd->dfd, sub_header_kdump, block_size) -+ < block_size) { -+ error(INFO, "diskdump: cannot read dump sub header\n"); -+ goto err; -+ } -+ dd->sub_header_kdump = sub_header_kdump; -+ } -+ -+ /* read memory bitmap */ -+ bitmap_len = block_size * header->bitmap_blocks; -+ dd->bitmap_len = bitmap_len; -+ -+ offset = (off_t)block_size * (1 + header->sub_hdr_size); -+ if (lseek(dd->dfd, offset, SEEK_SET) == failed) { -+ error(INFO, "diskdump: cannot lseek memory bitmap\n"); -+ goto err; -+ } -+ -+ if ((dd->bitmap = malloc(bitmap_len)) == NULL) -+ error(FATAL, "diskdump: cannot malloc bitmap buffer\n"); -+ dd->dumpable_bitmap = calloc(bitmap_len, 1); -+ if (read(dd->dfd, dd->bitmap, bitmap_len) < bitmap_len) { -+ error(INFO, "diskdump: cannot read memory bitmap\n"); -+ goto err; -+ } -+ -+ if (dump_is_partial(header)) -+ memcpy(dd->dumpable_bitmap, dd->bitmap + bitmap_len/2, -+ bitmap_len/2); -+ else -+ memcpy(dd->dumpable_bitmap, dd->bitmap, bitmap_len); -+ -+ dd->data_offset -+ = (1 + header->sub_hdr_size + header->bitmap_blocks) -+ * header->block_size; -+ -+ dd->header = header; -+ -+ if (machine_type("X86")) -+ dd->machine_type = EM_386; -+ else if (machine_type("X86_64")) -+ dd->machine_type = EM_X86_64; -+ else if (machine_type("IA64")) -+ dd->machine_type = EM_IA_64; -+ else if (machine_type("PPC64")) -+ dd->machine_type = EM_PPC64; -+ else { -+ error(INFO, "diskdump: unsupported machine type: %s\n", MACHINE_TYPE); -+ goto err; -+ } -+ -+ max_sect_len = divideup(header->max_mapnr, BITMAP_SECT_LEN); -+ -+ dd->valid_pages = calloc(sizeof(ulong), max_sect_len + 1); -+ pfn = 0; -+ for (i = 1; i < max_sect_len + 1; i++) { -+ dd->valid_pages[i] = dd->valid_pages[i - 1]; -+ for (j = 0; j < BITMAP_SECT_LEN; j++, pfn++) -+ if (page_is_dumpable(pfn)) -+ dd->valid_pages[i]++; -+ } -+ -+ return TRUE; -+ -+err: -+ free(header); -+ if (sub_header) -+ free(sub_header); -+ if (sub_header_kdump) -+ free(sub_header_kdump); -+ if (dd->bitmap) -+ free(dd->bitmap); -+ if (dd->dumpable_bitmap) -+ free(dd->dumpable_bitmap); -+ dd->flags &= ~(DISKDUMP_LOCAL|KDUMP_CMPRS_LOCAL); -+ return FALSE; -+} -+ -+static int -+pfn_to_pos(ulong pfn) -+{ -+ int desc_pos, j, valid; -+ -+ valid = dd->valid_pages[pfn / BITMAP_SECT_LEN]; -+ -+ for (j = round(pfn, BITMAP_SECT_LEN), desc_pos = valid; j <= pfn; j++) -+ if (page_is_dumpable(j)) -+ desc_pos++; -+ -+ return desc_pos; -+} -+ - - /* - * Determine whether a file is a diskdump creation, and if TRUE, -@@ -43,7 +282,26 @@ - int - is_diskdump(char *file) - { -- return FALSE; -+ int sz, i; -+ -+ if (!open_dump_file(file) || !read_dump_header()) -+ return FALSE; -+ -+ sz = dd->block_size * (DISKDUMP_CACHED_PAGES); -+ if ((dd->page_cache_buf = malloc(sz)) == NULL) -+ error(FATAL, "diskdump: cannot malloc compressed page_cache_buf\n"); -+ -+ for (i = 0; i < DISKDUMP_CACHED_PAGES; i++) -+ dd->page_cache_hdr[i].pg_bufptr = -+ &dd->page_cache_buf[i * dd->block_size]; -+ -+ if ((dd->compressed_page = (char *)malloc(dd->block_size)) == NULL) -+ error(FATAL, "diskdump: cannot malloc compressed page space\n"); -+ -+ if (CRASHDEBUG(1)) -+ diskdump_memory_dump(fp); -+ -+ return TRUE; - } - - /* -@@ -53,11 +311,139 @@ - int - diskdump_init(char *unused, FILE *fptr) - { -- if (!DISKDUMP_VALID()) -- return FALSE; -+ if (!DISKDUMP_VALID() && !KDUMP_CMPRS_VALID()) -+ return FALSE; - -- dd->ofp = fptr; -- return TRUE; -+ dd->ofp = fptr; -+ return TRUE; -+} -+ -+/* -+ * Get the relocational offset from the sub header of kdump. -+ */ -+int -+diskdump_phys_base(unsigned long *phys_base) -+{ -+ if (KDUMP_CMPRS_VALID()) { -+ *phys_base = dd->sub_header_kdump->phys_base; -+ return TRUE; -+ } -+ -+ return FALSE; -+} -+ -+/* -+ * Check whether paddr is already cached. -+ */ -+static int -+page_is_cached(physaddr_t paddr) -+{ -+ int i; -+ struct page_cache_hdr *pgc; -+ -+ dd->accesses++; -+ -+ for (i = 0; i < DISKDUMP_CACHED_PAGES; i++) { -+ -+ pgc = &dd->page_cache_hdr[i]; -+ -+ if (!DISKDUMP_VALID_PAGE(pgc->pg_flags)) -+ continue; -+ -+ if (pgc->pg_addr == paddr) { -+ pgc->pg_hit_count++; -+ dd->curbufptr = pgc->pg_bufptr; -+ dd->cached_reads++; -+ return TRUE; -+ } -+ } -+ return FALSE; -+} -+ -+/* -+ * Cache the page's data. -+ * -+ * If an empty page cache location is available, take it. Otherwise, evict -+ * the entry indexed by evict_index, and then bump evict index. The hit_count -+ * is only gathered for dump_diskdump_environment(). -+ * -+ * If the page is compressed, uncompress it into the selected page cache entry. -+ * If the page is raw, just copy it into the selected page cache entry. -+ * If all works OK, update diskdump->curbufptr to point to the page's -+ * uncompressed data. -+ */ -+static int -+cache_page(physaddr_t paddr) -+{ -+ int i, ret; -+ int found; -+ ulong pfn; -+ int desc_pos; -+ off_t seek_offset; -+ page_desc_t pd; -+ const int block_size = dd->block_size; -+ const off_t failed = (off_t)-1; -+ ulong retlen; -+ -+ for (i = found = 0; i < DISKDUMP_CACHED_PAGES; i++) { -+ if (DISKDUMP_VALID_PAGE(dd->page_cache_hdr[i].pg_flags)) -+ continue; -+ found = TRUE; -+ break; -+ } -+ -+ if (!found) { -+ i = dd->evict_index; -+ dd->page_cache_hdr[i].pg_hit_count = 0; -+ dd->evict_index = -+ (dd->evict_index+1) % DISKDUMP_CACHED_PAGES; -+ dd->evictions++; -+ } -+ -+ dd->page_cache_hdr[i].pg_flags = 0; -+ dd->page_cache_hdr[i].pg_addr = paddr; -+ dd->page_cache_hdr[i].pg_hit_count++; -+ -+ /* find page descriptor */ -+ pfn = paddr >> dd->block_shift; -+ desc_pos = pfn_to_pos(pfn); -+ seek_offset = dd->data_offset -+ + (off_t)(desc_pos - 1)*sizeof(page_desc_t); -+ lseek(dd->dfd, seek_offset, SEEK_SET); -+ -+ /* read page descriptor */ -+ if (read(dd->dfd, &pd, sizeof(pd)) != sizeof(pd)) -+ return READ_ERROR; -+ -+ /* sanity check */ -+ if (pd.size > block_size) -+ return READ_ERROR; -+ -+ if (lseek(dd->dfd, pd.offset, SEEK_SET) == failed) -+ return SEEK_ERROR; -+ -+ /* read page data */ -+ if (read(dd->dfd, dd->compressed_page, pd.size) != pd.size) -+ return READ_ERROR; -+ -+ if (pd.flags & DUMP_DH_COMPRESSED) { -+ retlen = block_size; -+ ret = uncompress((unsigned char *)dd->page_cache_hdr[i].pg_bufptr, -+ &retlen, -+ (unsigned char *)dd->compressed_page, -+ pd.size); -+ if ((ret != Z_OK) || (retlen != block_size)) { -+ error(INFO, "diskdump: uncompress failed: %d\n", ret); -+ return READ_ERROR; -+ } -+ } else -+ memcpy(dd->page_cache_hdr[i].pg_bufptr, -+ dd->compressed_page, block_size); -+ -+ dd->page_cache_hdr[i].pg_flags |= PAGE_VALID; -+ dd->curbufptr = dd->page_cache_hdr[i].pg_bufptr; -+ -+ return TRUE; - } - - /* -@@ -66,7 +452,31 @@ - int - read_diskdump(int fd, void *bufptr, int cnt, ulong addr, physaddr_t paddr) - { -- return 0; -+ int ret; -+ physaddr_t curpaddr; -+ ulong pfn, page_offset; -+ -+ pfn = paddr >> dd->block_shift; -+ curpaddr = paddr & ~((physaddr_t)(dd->block_size-1)); -+ page_offset = paddr & ((physaddr_t)(dd->block_size-1)); -+ -+ if ((pfn >= dd->header->max_mapnr) || !page_is_ram(pfn)) -+ return SEEK_ERROR; -+ if (!page_is_dumpable(pfn)) { -+ if ((dd->flags & (ZERO_EXCLUDED|ERROR_EXCLUDED)) == -+ ERROR_EXCLUDED) -+ return PAGE_EXCLUDED; -+ memset(bufptr, 0, cnt); -+ return cnt; -+ } -+ -+ if (!page_is_cached(curpaddr)) -+ if ((ret = cache_page(curpaddr)) < 0) -+ return ret; -+ -+ memcpy(bufptr, dd->curbufptr + page_offset, cnt); -+ -+ return cnt; - } - - /* -@@ -81,7 +491,23 @@ - ulong - get_diskdump_panic_task(void) - { -- return NO_TASK; -+ if ((!DISKDUMP_VALID() && !KDUMP_CMPRS_VALID()) -+ || !get_active_set()) -+ return NO_TASK; -+ -+ return (ulong)dd->header->tasks[dd->header->current_cpu]; -+} -+ -+extern void get_netdump_regs_x86(struct bt_info *, ulong *, ulong *); -+extern void get_netdump_regs_x86_64(struct bt_info *, ulong *, ulong *); -+ -+static void -+get_diskdump_regs_ppc64(struct bt_info *bt, ulong *eip, ulong *esp) -+{ -+ if ((bt->task == tt->panic_task) && DISKDUMP_VALID()) -+ bt->machdep = &dd->sub_header->elf_regs; -+ -+ machdep->get_stack_frame(bt, eip, esp); - } - - /* -@@ -91,12 +517,32 @@ - void - get_diskdump_regs(struct bt_info *bt, ulong *eip, ulong *esp) - { -- switch (dd->machine_type) -- { -- default: -- error(FATAL, -- "diskdump support for this machine type is not available\n"); -- } -+ switch (dd->machine_type) -+ { -+ case EM_386: -+ return get_netdump_regs_x86(bt, eip, esp); -+ break; -+ -+ case EM_IA_64: -+ /* For normal backtraces, this information will be obtained -+ * frome the switch_stack structure, which is pointed to by -+ * the thread.ksp field of the task_struct. But it's still -+ * needed by the "bt -t" option. -+ */ -+ machdep->get_stack_frame(bt, eip, esp); -+ break; -+ -+ case EM_PPC64: -+ return get_diskdump_regs_ppc64(bt, eip, esp); -+ break; -+ -+ case EM_X86_64: -+ return get_netdump_regs_x86_64(bt, eip, esp); -+ break; -+ -+ default: -+ error(FATAL, "diskdump: unsupported machine type: %s\n", MACHINE_TYPE); -+ } - } - - /* -@@ -105,7 +551,10 @@ - uint - diskdump_page_size(void) - { -- return 0; -+ if (!DISKDUMP_VALID() && !KDUMP_CMPRS_VALID()) -+ return 0; -+ -+ return dd->header->block_size; - } - - /* -@@ -131,6 +580,197 @@ - int - diskdump_memory_dump(FILE *fp) - { -+ int i, others, dump_level; -+ struct disk_dump_header *dh; -+ struct disk_dump_sub_header *dsh; -+ struct kdump_sub_header *kdsh; -+ ulong *tasks; -+ -+ fprintf(fp, "diskdump_data: \n"); -+ fprintf(fp, " flags: %lx (", dd->flags); -+ others = 0; -+ if (dd->flags & DISKDUMP_LOCAL) -+ fprintf(fp, "%sDISKDUMP_LOCAL", others++ ? "|" : ""); -+ if (dd->flags & KDUMP_CMPRS_LOCAL) -+ fprintf(fp, "%sKDUMP_CMPRS_LOCAL", others++ ? "|" : ""); -+ if (dd->flags & ERROR_EXCLUDED) -+ fprintf(fp, "%sERROR_EXCLUDED", others++ ? "|" : ""); -+ if (dd->flags & ZERO_EXCLUDED) -+ fprintf(fp, "%sZERO_EXCLUDED", others++ ? "|" : ""); -+ fprintf(fp, ")\n"); -+ fprintf(fp, " dfd: %d\n", dd->dfd); -+ fprintf(fp, " ofp: %lx\n", (ulong)dd->ofp); -+ fprintf(fp, " machine_type: %d ", dd->machine_type); -+ switch (dd->machine_type) -+ { -+ case EM_386: -+ fprintf(fp, "(EM_386)\n"); break; -+ case EM_X86_64: -+ fprintf(fp, "(EM_X86_64)\n"); break; -+ case EM_IA_64: -+ fprintf(fp, "(EM_IA_64)\n"); break; -+ case EM_PPC64: -+ fprintf(fp, "(EM_PPC64)\n"); break; -+ default: -+ fprintf(fp, "(unknown)\n"); break; -+ } -+ -+ fprintf(fp, "\n header: %lx\n", (ulong)dd->header); -+ dh = dd->header; -+ fprintf(fp, " signature: \""); -+ for (i = 0; i < SIG_LEN; i++) -+ if (dh->signature[i]) -+ fprintf(fp, "%c", dh->signature[i]); -+ fprintf(fp, "\"\n"); -+ fprintf(fp, " header_version: %d\n", dh->header_version); -+ fprintf(fp, " utsname:\n"); -+ fprintf(fp, " sysname: %s\n", dh->utsname.sysname); -+ fprintf(fp, " nodename: %s\n", dh->utsname.nodename); -+ fprintf(fp, " release: %s\n", dh->utsname.release); -+ fprintf(fp, " version: %s\n", dh->utsname.version); -+ fprintf(fp, " machine: %s\n", dh->utsname.machine); -+ fprintf(fp, " domainname: %s\n", dh->utsname.domainname); -+ fprintf(fp, " timestamp:\n"); -+ fprintf(fp, " tv_sec: %lx\n", dh->timestamp.tv_sec); -+ fprintf(fp, " tv_usec: %lx\n", dh->timestamp.tv_usec); -+ fprintf(fp, " status: %x (", dh->status); -+ others = 0; -+ if (dh->status & DUMP_HEADER_COMPLETED) -+ fprintf(fp, "%sDUMP_HEADER_COMPLETED", others++ ? "|" : ""); -+ if (dh->status & DUMP_HEADER_INCOMPLETED) -+ fprintf(fp, "%sDUMP_HEADER_INCOMPLETED", others++ ? "|" : ""); -+ if (dh->status & DUMP_HEADER_COMPRESSED) -+ fprintf(fp, "%sDUMP_HEADER_COMPRESSED", others++ ? "|" : ""); -+ fprintf(fp, ")\n"); -+ fprintf(fp, " block_size: %d\n", dh->block_size); -+ fprintf(fp, " sub_hdr_size: %d\n", dh->sub_hdr_size); -+ fprintf(fp, " bitmap_blocks: %u\n", dh->bitmap_blocks); -+ fprintf(fp, " max_mapnr: %u\n", dh->max_mapnr); -+ fprintf(fp, " total_ram_blocks: %u\n", dh->total_ram_blocks); -+ fprintf(fp, " device_blocks: %u\n", dh->device_blocks); -+ fprintf(fp, " written_blocks: %u\n", dh->written_blocks); -+ fprintf(fp, " current_cpu: %u\n", dh->current_cpu); -+ fprintf(fp, " nr_cpus: %d\n", dh->nr_cpus); -+ tasks = (ulong *)&dh->tasks[0]; -+ fprintf(fp, " tasks[nr_cpus]: %lx\n", *tasks); -+ for (tasks++, i = 1; i < dh->nr_cpus; i++) { -+ fprintf(fp, " %lx\n", *tasks); -+ tasks++; -+ } -+ fprintf(fp, "\n"); -+ fprintf(fp, " sub_header: %lx ", (ulong)dd->sub_header); -+ if ((dsh = dd->sub_header)) { -+ fprintf(fp, "\n elf_regs: %lx\n", -+ (ulong)&dsh->elf_regs); -+ fprintf(fp, " dump_level: "); -+ if ((pc->flags & RUNTIME) && -+ ((dump_level = get_dump_level()) >= 0)) { -+ fprintf(fp, "%d (0x%x) %s", dump_level, dump_level, -+ dump_level ? "(" : ""); -+ -+#define DUMP_EXCLUDE_CACHE 0x00000001 /* Exclude LRU & SwapCache pages*/ -+#define DUMP_EXCLUDE_CLEAN 0x00000002 /* Exclude all-zero pages */ -+#define DUMP_EXCLUDE_FREE 0x00000004 /* Exclude free pages */ -+#define DUMP_EXCLUDE_ANON 0x00000008 /* Exclude Anon pages */ -+#define DUMP_SAVE_PRIVATE 0x00000010 /* Save private pages */ -+ -+ others = 0; -+ if (dump_level & DUMP_EXCLUDE_CACHE) -+ fprintf(fp, "%sDUMP_EXCLUDE_CACHE", -+ others++ ? "|" : ""); -+ if (dump_level & DUMP_EXCLUDE_CLEAN) -+ fprintf(fp, "%sDUMP_EXCLUDE_CLEAN", -+ others++ ? "|" : ""); -+ if (dump_level & DUMP_EXCLUDE_FREE) -+ fprintf(fp, "%sDUMP_EXCLUDE_FREE", -+ others++ ? "|" : ""); -+ if (dump_level & DUMP_EXCLUDE_ANON) -+ fprintf(fp, "%sDUMP_EXCLUDE_ANON", -+ others++ ? "|" : ""); -+ if (dump_level & DUMP_SAVE_PRIVATE) -+ fprintf(fp, "%sDUMP_SAVE_PRIVATE", -+ others++ ? "|" : ""); -+ fprintf(fp, "%s\n\n", dump_level ? ")" : ""); -+ } else -+ fprintf(fp, "%s\n\n", pc->flags & RUNTIME ? -+ "(unknown)" : "(undetermined)"); -+ -+ } else -+ fprintf(fp, "(n/a)\n\n"); -+ -+ fprintf(fp, " sub_header_kdump: %lx ", (ulong)dd->sub_header_kdump); -+ if ((kdsh = dd->sub_header_kdump)) { -+ fprintf(fp, "\n phys_base: %lx\n", -+ (ulong)kdsh->phys_base); -+ fprintf(fp, " dump_level: "); -+ if ((dump_level = get_dump_level()) >= 0) { -+ fprintf(fp, "%d (0x%x) %s", dump_level, dump_level, -+ dump_level ? "(" : ""); -+ -+#define DL_EXCLUDE_ZERO (0x001) /* Exclude Pages filled with Zeros */ -+#define DL_EXCLUDE_CACHE (0x002) /* Exclude Cache Pages without Private Pages */ -+#define DL_EXCLUDE_CACHE_PRI (0x004) /* Exclude Cache Pages with Private Pages */ -+#define DL_EXCLUDE_USER_DATA (0x008) /* Exclude UserProcessData Pages */ -+#define DL_EXCLUDE_FREE (0x010) /* Exclude Free Pages */ -+ -+ if (dump_level & DL_EXCLUDE_ZERO) -+ fprintf(fp, "%sDUMP_EXCLUDE_ZERO", -+ others++ ? "|" : ""); -+ if (dump_level & DL_EXCLUDE_CACHE) -+ fprintf(fp, "%sDUMP_EXCLUDE_CACHE", -+ others++ ? "|" : ""); -+ if (dump_level & DL_EXCLUDE_CACHE_PRI) -+ fprintf(fp, "%sDUMP_EXCLUDE_CACHE_PRI", -+ others++ ? "|" : ""); -+ if (dump_level & DL_EXCLUDE_USER_DATA) -+ fprintf(fp, "%sDUMP_EXCLUDE_USER_DATA", -+ others++ ? "|" : ""); -+ if (dump_level & DL_EXCLUDE_FREE) -+ fprintf(fp, "%sDUMP_EXCLUDE_FREE", -+ others++ ? "|" : ""); -+ others = 0; -+ -+ fprintf(fp, "%s\n\n", dump_level ? ")" : ""); -+ } else -+ fprintf(fp, "(unknown)\n\n"); -+ } else -+ fprintf(fp, "(n/a)\n\n"); -+ -+ fprintf(fp, " data_offset: %lx\n", (ulong)dd->data_offset); -+ fprintf(fp, " block_size: %d\n", dd->block_size); -+ fprintf(fp, " block_shift: %d\n", dd->block_shift); -+ fprintf(fp, " bitmap: %lx\n", (ulong)dd->bitmap); -+ fprintf(fp, " bitmap_len: %d\n", dd->bitmap_len); -+ fprintf(fp, " dumpable_bitmap: %lx\n", (ulong)dd->dumpable_bitmap); -+ fprintf(fp, " byte: %d\n", dd->byte); -+ fprintf(fp, " bit: %d\n", dd->bit); -+ fprintf(fp, " compressed_page: %lx\n", (ulong)dd->compressed_page); -+ fprintf(fp, " curbufptr: %lx\n\n", (ulong)dd->curbufptr); -+ -+ for (i = 0; i < DISKDUMP_CACHED_PAGES; i++) { -+ fprintf(fp, "%spage_cache_hdr[%d]:\n", i < 10 ? " " : "", i); -+ fprintf(fp, " pg_flags: %x (", dd->page_cache_hdr[i].pg_flags); -+ others = 0; -+ if (dd->page_cache_hdr[i].pg_flags & PAGE_VALID) -+ fprintf(fp, "%sPAGE_VALID", others++ ? "|" : ""); -+ fprintf(fp, ")\n"); -+ fprintf(fp, " pg_addr: %llx\n", (ulonglong)dd->page_cache_hdr[i].pg_addr); -+ fprintf(fp, " pg_bufptr: %lx\n", (ulong)dd->page_cache_hdr[i].pg_bufptr); -+ fprintf(fp, " pg_hit_count: %ld\n", dd->page_cache_hdr[i].pg_hit_count); -+ } -+ -+ fprintf(fp, "\n page_cache_buf: %lx\n", (ulong)dd->page_cache_buf); -+ fprintf(fp, " evict_index: %d\n", dd->evict_index); -+ fprintf(fp, " evictions: %ld\n", dd->evictions); -+ fprintf(fp, " accesses: %ld\n", dd->accesses); -+ fprintf(fp, " cached_reads: %ld ", dd->cached_reads); -+ if (dd->accesses) -+ fprintf(fp, "(%ld%%)\n", -+ dd->cached_reads * 100 / dd->accesses); -+ else -+ fprintf(fp, "\n"); -+ fprintf(fp, " valid_pages: %lx\n", (ulong)dd->valid_pages); -+ - return 0; - } - -@@ -142,3 +782,36 @@ - { - return 0; - } -+ -+/* -+ * Versions of disk_dump that support it contain the "dump_level" symbol. -+ * Version 1 and later compressed kdump dumpfiles contain the dump level -+ * in an additional field of the sub_header_kdump structure. -+ */ -+static int -+get_dump_level(void) -+{ -+ int dump_level; -+ -+ if (DISKDUMP_VALID()) { -+ if (symbol_exists("dump_level") && -+ readmem(symbol_value("dump_level"), KVADDR, &dump_level, -+ sizeof(dump_level), "dump_level", QUIET|RETURN_ON_ERROR)) -+ return dump_level; -+ } else if (KDUMP_CMPRS_VALID()) { -+ if (dd->header->header_version >= 1) -+ return dd->sub_header_kdump->dump_level; -+ } -+ -+ return -1; -+} -+ -+/* -+ * Used by the "sys" command to display [PARTIAL DUMP] -+ * after the dumpfile name. -+ */ -+int -+is_partial_diskdump(void) -+{ -+ return (get_dump_level() > 0 ? TRUE : FALSE); -+} ---- crash/xendump.c.orig 2007-08-27 15:02:36.000000000 -0400 -+++ crash/xendump.c 2007-03-23 14:10:12.000000000 -0500 -@@ -0,0 +1,2829 @@ -+/* -+ * xendump.c -+ * -+ * Copyright (C) 2006, 2007 David Anderson -+ * Copyright (C) 2006, 2007 Red Hat, Inc. All rights reserved. -+ * -+ * This software may be freely redistributed under the terms of the -+ * GNU General Public License. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. -+ */ -+ -+#include "defs.h" -+#include "xendump.h" -+ -+static struct xendump_data xendump_data = { 0 }; -+static struct xendump_data *xd = &xendump_data; -+ -+static int xc_save_verify(char *); -+static int xc_core_verify(char *); -+static int xc_save_read(void *, int, ulong, physaddr_t); -+static int xc_core_read(void *, int, ulong, physaddr_t); -+static int xc_core_mfns(ulong, FILE *); -+ -+static void poc_store(ulong, off_t); -+static off_t poc_get(ulong, int *); -+ -+static void xen_dump_vmconfig(FILE *); -+ -+static void xc_core_create_pfn_tables(void); -+static ulong xc_core_pfn_to_page_index(ulong); -+static int xc_core_pfn_valid(ulong); -+ -+static void xendump_print(char *fmt, ...); -+ -+static int xc_core_elf_verify(char *); -+static void xc_core_elf_dump(void); -+static char *xc_core_elf_mfn_to_page(ulong, char *); -+static int xc_core_elf_mfn_to_page_index(ulong); -+static ulong xc_core_elf_pfn_valid(ulong); -+static ulong xc_core_elf_pfn_to_page_index(ulong); -+static void xc_core_dump_Elf32_Ehdr(Elf32_Ehdr *); -+static void xc_core_dump_Elf64_Ehdr(Elf64_Ehdr *); -+static void xc_core_dump_Elf32_Shdr(Elf32_Off offset, int); -+static void xc_core_dump_Elf64_Shdr(Elf64_Off offset, int); -+static char *xc_core_strtab(uint32_t, char *); -+static void xc_core_dump_elfnote(off_t, size_t, int); -+static void xc_core_elf_pfn_init(void); -+ -+#define ELFSTORE 1 -+#define ELFREAD 0 -+ -+/* -+ * Determine whether a file is a xendump creation, and if TRUE, -+ * initialize the xendump_data structure. -+ */ -+int -+is_xendump(char *file) -+{ -+ int verified; -+ char buf[BUFSIZE]; -+ -+ if ((xd->xfd = open(file, O_RDWR)) < 0) { -+ if ((xd->xfd = open(file, O_RDONLY)) < 0) { -+ sprintf(buf, "%s: open", file); -+ perror(buf); -+ return FALSE; -+ } -+ } -+ -+ if (read(xd->xfd, buf, BUFSIZE) != BUFSIZE) -+ return FALSE; -+ -+ if (machine_type("X86") || machine_type("X86_64")) -+ xd->page_size = 4096; -+ else if (machine_type("IA64") && !machdep->pagesize) -+ xd->page_size = 16384; -+ else -+ xd->page_size = machdep->pagesize; -+ -+ verified = xc_save_verify(buf) || xc_core_verify(buf); -+ -+ if (!verified) -+ close(xd->xfd); -+ -+ return (verified); -+} -+ -+/* -+ * Verify whether the dump was created by the xc_domain_dumpcore() -+ * library function in libxc/xc_core.c. -+ */ -+static int -+xc_core_verify(char *buf) -+{ -+ struct xc_core_header *xcp; -+ -+ xcp = (struct xc_core_header *)buf; -+ -+ if (xc_core_elf_verify(buf)) -+ return TRUE; -+ -+ if ((xcp->xch_magic != XC_CORE_MAGIC) && -+ (xcp->xch_magic != XC_CORE_MAGIC_HVM)) -+ return FALSE; -+ -+ if (!xcp->xch_nr_vcpus) { -+ error(INFO, -+ "faulty xc_core dump file header: xch_nr_vcpus is 0\n\n"); -+ -+ fprintf(stderr, " xch_magic: %x\n", xcp->xch_magic); -+ fprintf(stderr, " xch_nr_vcpus: %d\n", xcp->xch_nr_vcpus); -+ fprintf(stderr, " xch_nr_pages: %d\n", xcp->xch_nr_pages); -+ fprintf(stderr, " xch_ctxt_offset: %d\n", xcp->xch_ctxt_offset); -+ fprintf(stderr, " xch_index_offset: %d\n", xcp->xch_index_offset); -+ fprintf(stderr, " xch_pages_offset: %d\n\n", xcp->xch_pages_offset); -+ -+ clean_exit(1); -+ } -+ -+ BCOPY(xcp, &xd->xc_core.header, -+ sizeof(struct xc_core_header)); -+ -+ xd->flags |= (XENDUMP_LOCAL | XC_CORE_ORIG | XC_CORE_P2M_CREATE); -+ -+ if (xc_core_mfns(XC_CORE_64BIT_HOST, stderr)) -+ xd->flags |= XC_CORE_64BIT_HOST; -+ -+ if (!xd->page_size) -+ error(FATAL, -+ "unknown page size: use -p command line option\n"); -+ -+ if (!(xd->page = (char *)malloc(xd->page_size))) -+ error(FATAL, "cannot malloc page space."); -+ -+ if (!(xd->poc = (struct pfn_offset_cache *)calloc -+ (PFN_TO_OFFSET_CACHE_ENTRIES, -+ sizeof(struct pfn_offset_cache)))) -+ error(FATAL, "cannot malloc pfn_offset_cache\n"); -+ xd->last_pfn = ~(0UL); -+ -+ if (CRASHDEBUG(1)) -+ xendump_memory_dump(stderr); -+ -+ return TRUE; -+} -+ -+/* -+ * Do the work for read_xendump() for the XC_CORE dumpfile format. -+ */ -+static int -+xc_core_read(void *bufptr, int cnt, ulong addr, physaddr_t paddr) -+{ -+ ulong pfn, page_index; -+ off_t offset; -+ int redundant; -+ -+ if (xd->flags & (XC_CORE_P2M_CREATE|XC_CORE_PFN_CREATE)) -+ xc_core_create_pfn_tables(); -+ -+ pfn = (ulong)BTOP(paddr); -+ -+ if ((offset = poc_get(pfn, &redundant))) { -+ if (!redundant) { -+ if (lseek(xd->xfd, offset, SEEK_SET) == -1) -+ return SEEK_ERROR; -+ if (read(xd->xfd, xd->page, xd->page_size) != -+ xd->page_size) -+ return READ_ERROR; -+ xd->last_pfn = pfn; -+ } -+ -+ BCOPY(xd->page + PAGEOFFSET(paddr), bufptr, cnt); -+ return cnt; -+ } -+ -+ if ((page_index = xc_core_pfn_to_page_index(pfn)) == -+ PFN_NOT_FOUND) -+ return READ_ERROR; -+ -+ offset = (off_t)xd->xc_core.header.xch_pages_offset + -+ ((off_t)(page_index) * (off_t)xd->page_size); -+ -+ if (lseek(xd->xfd, offset, SEEK_SET) == -1) -+ return SEEK_ERROR; -+ -+ if (read(xd->xfd, xd->page, xd->page_size) != xd->page_size) -+ return READ_ERROR; -+ -+ poc_store(pfn, offset); -+ -+ BCOPY(xd->page + PAGEOFFSET(paddr), bufptr, cnt); -+ -+ return cnt; -+} -+ -+/* -+ * Verify whether the dumpfile was created by the "xm save" facility. -+ * This gets started by the "save" function in XendCheckpoint.py, and -+ * then by xc_save.c, with the work done in the xc_linux_save() library -+ * function in libxc/xc_linux_save.c. -+ */ -+ -+#define MAX_BATCH_SIZE 1024 -+/* -+ * Number of P2M entries in a page. -+ */ -+#define ULPP (xd->page_size/sizeof(unsigned long)) -+/* -+ * Number of P2M entries in the pfn_to_mfn_frame_list. -+ */ -+#define P2M_FL_ENTRIES (((xd->xc_save.nr_pfns)+ULPP-1)/ULPP) -+/* -+ * Size in bytes of the pfn_to_mfn_frame_list. -+ */ -+#define P2M_FL_SIZE ((P2M_FL_ENTRIES)*sizeof(unsigned long)) -+ -+#define XTAB (0xf<<28) /* invalid page */ -+#define LTAB_MASK XTAB -+ -+static int -+xc_save_verify(char *buf) -+{ -+ int i, batch_count, done_batch, *intptr; -+ ulong flags, *ulongptr; -+ ulong batch_index, total_pages_read; -+ ulong N; -+ -+ if (!STRNEQ(buf, XC_SAVE_SIGNATURE)) -+ return FALSE; -+ -+ if (lseek(xd->xfd, strlen(XC_SAVE_SIGNATURE), SEEK_SET) == -1) -+ return FALSE; -+ -+ flags = XC_SAVE; -+ -+ if (CRASHDEBUG(1)) { -+ fprintf(stderr, "\"%s\"\n", buf); -+ fprintf(stderr, "endian: %d %s\n", __BYTE_ORDER, -+ __BYTE_ORDER == __BIG_ENDIAN ? "__BIG_ENDIAN" : -+ (__BYTE_ORDER == __LITTLE_ENDIAN ? -+ "__LITTLE_ENDIAN" : "???")); -+ } -+ -+ /* -+ * size of vmconfig data structure (big-endian) -+ */ -+ if (read(xd->xfd, buf, sizeof(int)) != sizeof(int)) -+ return FALSE; -+ -+ intptr = (int *)buf; -+ -+ if (CRASHDEBUG(1) && BYTE_SWAP_REQUIRED(__BIG_ENDIAN)) { -+ fprintf(stderr, "byte-swap required for this:\n"); -+ for (i = 0; i < sizeof(int); i++) -+ fprintf(stderr, "[%x]", buf[i] & 0xff); -+ fprintf(stderr, ": %x -> ", *intptr); -+ } -+ -+ xd->xc_save.vmconfig_size = swab32(*intptr); -+ -+ if (CRASHDEBUG(1)) -+ fprintf(stderr, "%x\n", xd->xc_save.vmconfig_size); -+ -+ if (!(xd->xc_save.vmconfig_buf = (char *)malloc -+ (xd->xc_save.vmconfig_size))) -+ error(FATAL, "cannot malloc xc_save vmconfig space."); -+ -+ if (!xd->page_size) -+ error(FATAL, -+ "unknown page size: use -p command line option\n"); -+ -+ if (!(xd->page = (char *)malloc(xd->page_size))) -+ error(FATAL, "cannot malloc page space."); -+ -+ if (!(xd->poc = (struct pfn_offset_cache *)calloc -+ (PFN_TO_OFFSET_CACHE_ENTRIES, -+ sizeof(struct pfn_offset_cache)))) -+ error(FATAL, "cannot malloc pfn_offset_cache\n"); -+ xd->last_pfn = ~(0UL); -+ -+ if (!(xd->xc_save.region_pfn_type = (ulong *)calloc -+ (MAX_BATCH_SIZE, sizeof(ulong)))) -+ error(FATAL, "cannot malloc region_pfn_type\n"); -+ -+ if (read(xd->xfd, xd->xc_save.vmconfig_buf, -+ xd->xc_save.vmconfig_size) != xd->xc_save.vmconfig_size) -+ goto xc_save_bailout; -+ -+ /* -+ * nr_pfns (native byte order) -+ */ -+ if (read(xd->xfd, buf, sizeof(ulong)) != sizeof(ulong)) -+ goto xc_save_bailout; -+ -+ ulongptr = (ulong *)buf; -+ -+ if (CRASHDEBUG(1)) { -+ for (i = 0; i < sizeof(ulong); i++) -+ fprintf(stderr, "[%x]", buf[i] & 0xff); -+ fprintf(stderr, ": %lx (nr_pfns)\n", *ulongptr); -+ } -+ -+ xd->xc_save.nr_pfns = *ulongptr; -+ -+ if (machine_type("IA64")) -+ goto xc_save_ia64; -+ -+ /* -+ * Get a local copy of the live_P2M_frame_list -+ */ -+ if (!(xd->xc_save.p2m_frame_list = (unsigned long *)malloc(P2M_FL_SIZE))) -+ error(FATAL, "cannot allocate p2m_frame_list array"); -+ -+ if (!(xd->xc_save.batch_offsets = (off_t *)calloc((size_t)P2M_FL_ENTRIES, -+ sizeof(off_t)))) -+ error(FATAL, "cannot allocate batch_offsets array"); -+ -+ xd->xc_save.batch_count = P2M_FL_ENTRIES; -+ -+ if (read(xd->xfd, xd->xc_save.p2m_frame_list, P2M_FL_SIZE) != -+ P2M_FL_SIZE) -+ goto xc_save_bailout; -+ -+ if (CRASHDEBUG(1)) -+ fprintf(stderr, "pre-batch file pointer: %lld\n", -+ (ulonglong)lseek(xd->xfd, 0L, SEEK_CUR)); -+ -+ /* -+ * ... -+ * int batch_count -+ * ulong region pfn_type[batch_count] -+ * page 0 -+ * page 1 -+ * ... -+ * page batch_count-1 -+ * (repeat) -+ */ -+ -+ total_pages_read = 0; -+ batch_index = 0; -+ done_batch = FALSE; -+ -+ while (!done_batch) { -+ -+ xd->xc_save.batch_offsets[batch_index] = (off_t) -+ lseek(xd->xfd, 0L, SEEK_CUR); -+ -+ if (read(xd->xfd, &batch_count, sizeof(int)) != sizeof(int)) -+ goto xc_save_bailout; -+ -+ if (CRASHDEBUG(1)) -+ fprintf(stderr, "batch[%ld]: %d ", -+ batch_index, batch_count); -+ -+ batch_index++; -+ -+ if (batch_index >= P2M_FL_ENTRIES) { -+ fprintf(stderr, "more than %ld batches encountered?\n", -+ P2M_FL_ENTRIES); -+ goto xc_save_bailout; -+ } -+ -+ switch (batch_count) -+ { -+ case 0: -+ if (CRASHDEBUG(1)) { -+ fprintf(stderr, -+ ": Batch work is done: %ld pages read (P2M_FL_ENTRIES: %ld)\n", -+ total_pages_read, P2M_FL_ENTRIES); -+ } -+ done_batch = TRUE; -+ continue; -+ -+ case -1: -+ if (CRASHDEBUG(1)) -+ fprintf(stderr, ": Entering page verify mode\n"); -+ continue; -+ -+ default: -+ if (batch_count > MAX_BATCH_SIZE) { -+ if (CRASHDEBUG(1)) -+ fprintf(stderr, -+ ": Max batch size exceeded. Giving up.\n"); -+ done_batch = TRUE; -+ continue; -+ } -+ if (CRASHDEBUG(1)) -+ fprintf(stderr, "\n"); -+ break; -+ } -+ -+ if (read(xd->xfd, xd->xc_save.region_pfn_type, batch_count * sizeof(ulong)) != -+ batch_count * sizeof(ulong)) -+ goto xc_save_bailout; -+ -+ for (i = 0; i < batch_count; i++) { -+ unsigned long pagetype; -+ unsigned long pfn; -+ -+ pfn = xd->xc_save.region_pfn_type[i] & ~LTAB_MASK; -+ pagetype = xd->xc_save.region_pfn_type[i] & LTAB_MASK; -+ -+ if (pagetype == XTAB) -+ /* a bogus/unmapped page: skip it */ -+ continue; -+ -+ if (pfn > xd->xc_save.nr_pfns) { -+ if (CRASHDEBUG(1)) -+ fprintf(stderr, -+ "batch_count: %d pfn %ld out of range", -+ batch_count, pfn); -+ } -+ -+ if (lseek(xd->xfd, xd->page_size, SEEK_CUR) == -1) -+ goto xc_save_bailout; -+ -+ total_pages_read++; -+ } -+ } -+ -+ /* -+ * Get the list of PFNs that are not in the psuedo-phys map -+ */ -+ if (read(xd->xfd, &xd->xc_save.pfns_not, -+ sizeof(xd->xc_save.pfns_not)) != sizeof(xd->xc_save.pfns_not)) -+ goto xc_save_bailout; -+ -+ if (CRASHDEBUG(1)) -+ fprintf(stderr, "PFNs not in pseudo-phys map: %d\n", -+ xd->xc_save.pfns_not); -+ -+ if ((total_pages_read + xd->xc_save.pfns_not) != -+ xd->xc_save.nr_pfns) -+ error(WARNING, -+ "nr_pfns: %ld != (total pages: %ld + pages not saved: %d)\n", -+ xd->xc_save.nr_pfns, total_pages_read, -+ xd->xc_save.pfns_not); -+ -+ xd->xc_save.pfns_not_offset = lseek(xd->xfd, 0L, SEEK_CUR); -+ -+ if (lseek(xd->xfd, sizeof(ulong) * xd->xc_save.pfns_not, SEEK_CUR) == -1) -+ goto xc_save_bailout; -+ -+ xd->xc_save.vcpu_ctxt_offset = lseek(xd->xfd, 0L, SEEK_CUR); -+ -+ lseek(xd->xfd, 0, SEEK_END); -+ lseek(xd->xfd, -((off_t)(xd->page_size)), SEEK_CUR); -+ -+ xd->xc_save.shared_info_page_offset = lseek(xd->xfd, 0L, SEEK_CUR); -+ -+ xd->flags |= (XENDUMP_LOCAL | flags); -+ kt->xen_flags |= (CANONICAL_PAGE_TABLES|XEN_SUSPEND); -+ -+ if (CRASHDEBUG(1)) -+ xendump_memory_dump(stderr); -+ -+ return TRUE; -+ -+xc_save_ia64: -+ -+ /* -+ * Completely different format for ia64: -+ * -+ * ... -+ * pfn # -+ * page data -+ * pfn # -+ * page data -+ * ... -+ */ -+ free(xd->poc); -+ xd->poc = NULL; -+ free(xd->xc_save.region_pfn_type); -+ xd->xc_save.region_pfn_type = NULL; -+ -+ if (!(xd->xc_save.ia64_page_offsets = -+ (ulong *)calloc(xd->xc_save.nr_pfns, sizeof(off_t)))) -+ error(FATAL, "cannot allocate ia64_page_offsets array"); -+ -+ /* -+ * version -+ */ -+ if (read(xd->xfd, buf, sizeof(ulong)) != sizeof(ulong)) -+ goto xc_save_bailout; -+ -+ xd->xc_save.ia64_version = *((ulong *)buf); -+ -+ if (CRASHDEBUG(1)) -+ fprintf(stderr, "ia64 version: %lx\n", -+ xd->xc_save.ia64_version); -+ -+ /* -+ * xen_domctl_arch_setup structure -+ */ -+ if (read(xd->xfd, buf, sizeof(xen_domctl_arch_setup_t)) != -+ sizeof(xen_domctl_arch_setup_t)) -+ goto xc_save_bailout; -+ -+ if (CRASHDEBUG(1)) { -+ xen_domctl_arch_setup_t *setup = -+ (xen_domctl_arch_setup_t *)buf; -+ -+ fprintf(stderr, "xen_domctl_arch_setup:\n"); -+ fprintf(stderr, " flags: %lx\n", (ulong)setup->flags); -+ fprintf(stderr, " bp: %lx\n", (ulong)setup->bp); -+ fprintf(stderr, " maxmem: %lx\n", (ulong)setup->maxmem); -+ fprintf(stderr, " xsi_va: %lx\n", (ulong)setup->xsi_va); -+ fprintf(stderr, "hypercall_imm: %x\n", setup->hypercall_imm); -+ } -+ -+ for (i = N = 0; i < xd->xc_save.nr_pfns; i++) { -+ if (read(xd->xfd, &N, sizeof(N)) != sizeof(N)) -+ goto xc_save_bailout; -+ -+ if (N < xd->xc_save.nr_pfns) -+ xd->xc_save.ia64_page_offsets[N] = -+ lseek(xd->xfd, 0, SEEK_CUR); -+ else -+ error(WARNING, -+ "[%d]: pfn of %lx (0x%lx) in ia64 canonical page list exceeds %ld\n", -+ i, N, N, xd->xc_save.nr_pfns); -+ -+ if (CRASHDEBUG(1)) { -+ if ((i < 10) || (N >= (xd->xc_save.nr_pfns-10))) -+ fprintf(stderr, "[%d]: %ld\n%s", i, N, -+ i == 9 ? "...\n" : ""); -+ } -+ -+ if ((N+1) >= xd->xc_save.nr_pfns) -+ break; -+ -+ if (lseek(xd->xfd, xd->page_size, SEEK_CUR) == -1) -+ goto xc_save_bailout; -+ } -+ -+ if (CRASHDEBUG(1)) { -+ for (i = N = 0; i < xd->xc_save.nr_pfns; i++) { -+ if (!xd->xc_save.ia64_page_offsets[i]) -+ N++; -+ } -+ fprintf(stderr, "%ld out of %ld pfns not dumped\n", -+ N, xd->xc_save.nr_pfns); -+ } -+ -+ xd->flags |= (XENDUMP_LOCAL | flags | XC_SAVE_IA64); -+ kt->xen_flags |= (CANONICAL_PAGE_TABLES|XEN_SUSPEND); -+ -+ if (CRASHDEBUG(1)) -+ xendump_memory_dump(stderr); -+ -+ return TRUE; -+ -+xc_save_bailout: -+ -+ error(INFO, -+ "xc_save_verify: \"LinuxGuestRecord\" file handling/format error\n"); -+ -+ if (xd->xc_save.p2m_frame_list) { -+ free(xd->xc_save.p2m_frame_list); -+ xd->xc_save.p2m_frame_list = NULL; -+ } -+ if (xd->xc_save.batch_offsets) { -+ free(xd->xc_save.batch_offsets); -+ xd->xc_save.batch_offsets = NULL; -+ } -+ if (xd->xc_save.vmconfig_buf) { -+ free(xd->xc_save.vmconfig_buf); -+ xd->xc_save.vmconfig_buf = NULL; -+ } -+ if (xd->page) { -+ free(xd->page); -+ xd->page = NULL; -+ } -+ -+ return FALSE; -+} -+ -+/* -+ * Do the work for read_xendump() for the XC_SAVE dumpfile format. -+ */ -+static int -+xc_save_read(void *bufptr, int cnt, ulong addr, physaddr_t paddr) -+{ -+ int b, i, redundant; -+ ulong reqpfn; -+ int batch_count; -+ off_t file_offset; -+ -+ reqpfn = (ulong)BTOP(paddr); -+ -+ if (CRASHDEBUG(8)) -+ fprintf(xd->ofp, -+ "xc_save_read(bufptr: %lx cnt: %d addr: %lx paddr: %llx (%ld, 0x%lx)\n", -+ (ulong)bufptr, cnt, addr, (ulonglong)paddr, reqpfn, reqpfn); -+ -+ if (xd->flags & XC_SAVE_IA64) { -+ if (reqpfn >= xd->xc_save.nr_pfns) { -+ if (CRASHDEBUG(1)) -+ fprintf(xd->ofp, -+ "xc_save_read: pfn %lx too large: nr_pfns: %lx\n", -+ reqpfn, xd->xc_save.nr_pfns); -+ return SEEK_ERROR; -+ } -+ -+ file_offset = xd->xc_save.ia64_page_offsets[reqpfn]; -+ if (!file_offset) { -+ if (CRASHDEBUG(1)) -+ fprintf(xd->ofp, -+ "xc_save_read: pfn %lx not stored in xendump\n", -+ reqpfn); -+ return SEEK_ERROR; -+ } -+ -+ if (reqpfn != xd->last_pfn) { -+ if (lseek(xd->xfd, file_offset, SEEK_SET) == -1) -+ return SEEK_ERROR; -+ -+ if (read(xd->xfd, xd->page, xd->page_size) != xd->page_size) -+ return READ_ERROR; -+ } else { -+ xd->redundant++; -+ xd->cache_hits++; -+ } -+ -+ xd->accesses++; -+ xd->last_pfn = reqpfn; -+ -+ BCOPY(xd->page + PAGEOFFSET(paddr), bufptr, cnt); -+ return cnt; -+ } -+ -+ if ((file_offset = poc_get(reqpfn, &redundant))) { -+ if (!redundant) { -+ if (lseek(xd->xfd, file_offset, SEEK_SET) == -1) -+ return SEEK_ERROR; -+ if (read(xd->xfd, xd->page, xd->page_size) != xd->page_size) -+ return READ_ERROR; -+ xd->last_pfn = reqpfn; -+ } else if (CRASHDEBUG(1)) -+ console("READ %ld (0x%lx) skipped!\n", reqpfn, reqpfn); -+ -+ BCOPY(xd->page + PAGEOFFSET(paddr), bufptr, cnt); -+ return cnt; -+ } -+ -+ /* -+ * ... -+ * int batch_count -+ * ulong region pfn_type[batch_count] -+ * page 0 -+ * page 1 -+ * ... -+ * page batch_count-1 -+ * (repeat) -+ */ -+ for (b = 0; b < xd->xc_save.batch_count; b++) { -+ -+ if (lseek(xd->xfd, xd->xc_save.batch_offsets[b], SEEK_SET) == -1) -+ return SEEK_ERROR; -+ -+ if (CRASHDEBUG(8)) -+ fprintf(xd->ofp, "check batch[%d]: offset: %llx\n", -+ b, (ulonglong)xd->xc_save.batch_offsets[b]); -+ -+ if (read(xd->xfd, &batch_count, sizeof(int)) != sizeof(int)) -+ return READ_ERROR; -+ -+ switch (batch_count) -+ { -+ case 0: -+ if (CRASHDEBUG(1)) { -+ fprintf(xd->ofp, -+ "batch[%d]: has count of zero -- bailing out on pfn %ld\n", -+ b, reqpfn); -+ } -+ return READ_ERROR; -+ -+ case -1: -+ return READ_ERROR; -+ -+ default: -+ if (CRASHDEBUG(8)) -+ fprintf(xd->ofp, -+ "batch[%d]: offset: %llx batch count: %d\n", -+ b, (ulonglong)xd->xc_save.batch_offsets[b], -+ batch_count); -+ break; -+ } -+ -+ if (read(xd->xfd, xd->xc_save.region_pfn_type, batch_count * sizeof(ulong)) != -+ batch_count * sizeof(ulong)) -+ return READ_ERROR; -+ -+ for (i = 0; i < batch_count; i++) { -+ unsigned long pagetype; -+ unsigned long pfn; -+ -+ pfn = xd->xc_save.region_pfn_type[i] & ~LTAB_MASK; -+ pagetype = xd->xc_save.region_pfn_type[i] & LTAB_MASK; -+ -+ if (pagetype == XTAB) -+ /* a bogus/unmapped page: skip it */ -+ continue; -+ -+ if (pfn > xd->xc_save.nr_pfns) { -+ if (CRASHDEBUG(1)) -+ fprintf(stderr, -+ "batch_count: %d pfn %ld out of range", -+ batch_count, pfn); -+ } -+ -+ if (pfn == reqpfn) { -+ file_offset = lseek(xd->xfd, 0, SEEK_CUR); -+ poc_store(pfn, file_offset); -+ -+ if (read(xd->xfd, xd->page, xd->page_size) != -+ xd->page_size) -+ return READ_ERROR; -+ -+ BCOPY(xd->page + PAGEOFFSET(paddr), bufptr, cnt); -+ return cnt; -+ } -+ -+ if (lseek(xd->xfd, xd->page_size, SEEK_CUR) == -1) -+ return SEEK_ERROR; -+ } -+ } -+ -+ return READ_ERROR; -+} -+ -+/* -+ * Stash a pfn's offset. If they're all in use, put it in the -+ * least-used slot that's closest to the beginning of the array. -+ */ -+static void -+poc_store(ulong pfn, off_t file_offset) -+{ -+ int i; -+ struct pfn_offset_cache *poc, *plow; -+ ulong curlow; -+ -+ curlow = ~(0UL); -+ plow = NULL; -+ poc = xd->poc; -+ -+ for (i = 0; i < PFN_TO_OFFSET_CACHE_ENTRIES; i++, poc++) { -+ if (poc->cnt == 0) { -+ poc->cnt = 1; -+ poc->pfn = pfn; -+ poc->file_offset = file_offset; -+ xd->last_pfn = pfn; -+ return; -+ } -+ -+ if (poc->cnt < curlow) { -+ curlow = poc->cnt; -+ plow = poc; -+ } -+ } -+ -+ plow->cnt = 1; -+ plow->pfn = pfn; -+ plow->file_offset = file_offset; -+ xd->last_pfn = pfn; -+} -+ -+/* -+ * Check whether a pfn's offset has been cached. -+ */ -+static off_t -+poc_get(ulong pfn, int *redundant) -+{ -+ int i; -+ struct pfn_offset_cache *poc; -+ -+ xd->accesses++; -+ -+ if (pfn == xd->last_pfn) { -+ xd->redundant++; -+ *redundant = TRUE; -+ return 1; -+ } else -+ *redundant = FALSE; -+ -+ poc = xd->poc; -+ -+ for (i = 0; i < PFN_TO_OFFSET_CACHE_ENTRIES; i++, poc++) { -+ if (poc->cnt && (poc->pfn == pfn)) { -+ poc->cnt++; -+ xd->cache_hits++; -+ return poc->file_offset; -+ } -+ } -+ -+ return 0; -+} -+ -+ -+/* -+ * Perform any post-dumpfile determination stuff here. -+ */ -+int -+xendump_init(char *unused, FILE *fptr) -+{ -+ if (!XENDUMP_VALID()) -+ return FALSE; -+ -+ xd->ofp = fptr; -+ return TRUE; -+} -+ -+int -+read_xendump(int fd, void *bufptr, int cnt, ulong addr, physaddr_t paddr) -+{ -+ if (pc->curcmd_flags & XEN_MACHINE_ADDR) -+ return READ_ERROR; -+ -+ switch (xd->flags & (XC_SAVE|XC_CORE_ORIG|XC_CORE_ELF)) -+ { -+ case XC_SAVE: -+ return xc_save_read(bufptr, cnt, addr, paddr); -+ -+ case XC_CORE_ORIG: -+ case XC_CORE_ELF: -+ return xc_core_read(bufptr, cnt, addr, paddr); -+ -+ default: -+ return READ_ERROR; -+ } -+} -+ -+int -+read_xendump_hyper(int fd, void *bufptr, int cnt, ulong addr, physaddr_t paddr) -+{ -+ ulong pfn, page_index; -+ off_t offset; -+ -+ pfn = (ulong)BTOP(paddr); -+ -+ /* ODA: pfn == mfn !!! */ -+ if ((page_index = xc_core_mfn_to_page_index(pfn)) == PFN_NOT_FOUND) -+ return READ_ERROR; -+ -+ offset = (off_t)xd->xc_core.header.xch_pages_offset + -+ ((off_t)(page_index) * (off_t)xd->page_size); -+ -+ if (lseek(xd->xfd, offset, SEEK_SET) == -1) -+ return SEEK_ERROR; -+ -+ if (read(xd->xfd, xd->page, xd->page_size) != xd->page_size) -+ return READ_ERROR; -+ -+ BCOPY(xd->page + PAGEOFFSET(paddr), bufptr, cnt); -+ -+ return cnt; -+} -+ -+int -+write_xendump(int fd, void *bufptr, int cnt, ulong addr, physaddr_t paddr) -+{ -+ return WRITE_ERROR; -+} -+ -+uint -+xendump_page_size(void) -+{ -+ if (!XENDUMP_VALID()) -+ return 0; -+ -+ return xd->page_size; -+} -+ -+/* -+ * xendump_free_memory(), and xendump_memory_used() -+ * are debug only, and typically unnecessary to implement. -+ */ -+int -+xendump_free_memory(void) -+{ -+ return 0; -+} -+ -+int -+xendump_memory_used(void) -+{ -+ return 0; -+} -+ -+/* -+ * This function is dump-type independent, used here to -+ * to dump the xendump_data structure contents. -+ */ -+int -+xendump_memory_dump(FILE *fp) -+{ -+ int i, linefeed, used, others; -+ ulong *ulongptr; -+ Elf32_Off offset32; -+ Elf64_Off offset64; -+ FILE *fpsave; -+ -+ fprintf(fp, " flags: %lx (", xd->flags); -+ others = 0; -+ if (xd->flags & XENDUMP_LOCAL) -+ fprintf(fp, "%sXENDUMP_LOCAL", others++ ? "|" : ""); -+ if (xd->flags & XC_SAVE) -+ fprintf(fp, "%sXC_SAVE", others++ ? "|" : ""); -+ if (xd->flags & XC_CORE_ORIG) -+ fprintf(fp, "%sXC_CORE_ORIG", others++ ? "|" : ""); -+ if (xd->flags & XC_CORE_ELF) -+ fprintf(fp, "%sXC_CORE_ELF", others++ ? "|" : ""); -+ if (xd->flags & XC_CORE_P2M_CREATE) -+ fprintf(fp, "%sXC_CORE_P2M_CREATE", others++ ? "|" : ""); -+ if (xd->flags & XC_CORE_PFN_CREATE) -+ fprintf(fp, "%sXC_CORE_PFN_CREATE", others++ ? "|" : ""); -+ if (xd->flags & XC_CORE_NO_P2M) -+ fprintf(fp, "%sXC_CORE_NO_P2M", others++ ? "|" : ""); -+ if (xd->flags & XC_SAVE_IA64) -+ fprintf(fp, "%sXC_SAVE_IA64", others++ ? "|" : ""); -+ if (xd->flags & XC_CORE_64BIT_HOST) -+ fprintf(fp, "%sXC_CORE_64BIT_HOST", others++ ? "|" : ""); -+ fprintf(fp, ")\n"); -+ fprintf(fp, " xfd: %d\n", xd->xfd); -+ fprintf(fp, " page_size: %d\n", xd->page_size); -+ fprintf(fp, " ofp: %lx\n", (ulong)xd->ofp); -+ fprintf(fp, " page: %lx\n", (ulong)xd->page); -+ fprintf(fp, " panic_pc: %lx\n", xd->panic_pc); -+ fprintf(fp, " panic_sp: %lx\n", xd->panic_sp); -+ fprintf(fp, " accesses: %ld\n", (ulong)xd->accesses); -+ fprintf(fp, " cache_hits: %ld ", (ulong)xd->cache_hits); -+ if (xd->accesses) -+ fprintf(fp, "(%ld%%)\n", xd->cache_hits * 100 / xd->accesses); -+ else -+ fprintf(fp, "\n"); -+ fprintf(fp, " last_pfn: %ld\n", xd->last_pfn); -+ fprintf(fp, " redundant: %ld ", (ulong)xd->redundant); -+ if (xd->accesses) -+ fprintf(fp, "(%ld%%)\n", xd->redundant * 100 / xd->accesses); -+ else -+ fprintf(fp, "\n"); -+ for (i = used = 0; i < PFN_TO_OFFSET_CACHE_ENTRIES; i++) -+ if (xd->poc && xd->poc[i].cnt) -+ used++; -+ if (xd->poc) -+ fprintf(fp, " poc[%d]: %lx %s", PFN_TO_OFFSET_CACHE_ENTRIES, -+ (ulong)xd->poc, xd->poc ? "" : "(none)"); -+ else -+ fprintf(fp, " poc[0]: (unused)\n"); -+ for (i = 0; i < PFN_TO_OFFSET_CACHE_ENTRIES; i++) { -+ if (!xd->poc) -+ break; -+ if (!xd->poc[i].cnt) { -+ if (!i) -+ fprintf(fp, "(none used)\n"); -+ break; -+ } else if (!i) -+ fprintf(fp, "(%d used)\n", used); -+ if (CRASHDEBUG(2)) -+ fprintf(fp, -+ " [%d]: pfn: %ld (0x%lx) count: %ld file_offset: %llx\n", -+ i, -+ xd->poc[i].pfn, -+ xd->poc[i].pfn, -+ xd->poc[i].cnt, -+ (ulonglong)xd->poc[i].file_offset); -+ } -+ if (!xd->poc) -+ fprintf(fp, "\n"); -+ -+ fprintf(fp, "\n xc_save:\n"); -+ fprintf(fp, " nr_pfns: %ld (0x%lx)\n", -+ xd->xc_save.nr_pfns, xd->xc_save.nr_pfns); -+ fprintf(fp, " vmconfig_size: %d (0x%x)\n", xd->xc_save.vmconfig_size, -+ xd->xc_save.vmconfig_size); -+ fprintf(fp, " vmconfig_buf: %lx\n", (ulong)xd->xc_save.vmconfig_buf); -+ if (xd->flags & XC_SAVE) -+ xen_dump_vmconfig(fp); -+ fprintf(fp, " p2m_frame_list: %lx ", (ulong)xd->xc_save.p2m_frame_list); -+ if ((xd->flags & XC_SAVE) && xd->xc_save.p2m_frame_list) { -+ fprintf(fp, "\n"); -+ ulongptr = xd->xc_save.p2m_frame_list; -+ for (i = 0; i < P2M_FL_ENTRIES; i++, ulongptr++) -+ fprintf(fp, "%ld ", *ulongptr); -+ fprintf(fp, "\n"); -+ } else -+ fprintf(fp, "(none)\n"); -+ fprintf(fp, " pfns_not: %d\n", xd->xc_save.pfns_not); -+ fprintf(fp, " pfns_not_offset: %lld\n", -+ (ulonglong)xd->xc_save.pfns_not_offset); -+ fprintf(fp, " vcpu_ctxt_offset: %lld\n", -+ (ulonglong)xd->xc_save.vcpu_ctxt_offset); -+ fprintf(fp, " shared_info_page_offset: %lld\n", -+ (ulonglong)xd->xc_save.shared_info_page_offset); -+ fprintf(fp, " region_pfn_type: %lx\n", (ulong)xd->xc_save.region_pfn_type); -+ fprintf(fp, " batch_count: %ld\n", (ulong)xd->xc_save.batch_count); -+ fprintf(fp, " batch_offsets: %lx %s\n", -+ (ulong)xd->xc_save.batch_offsets, -+ xd->xc_save.batch_offsets ? "" : "(none)"); -+ for (i = linefeed = 0; i < xd->xc_save.batch_count; i++) { -+ fprintf(fp, "[%d]: %llx ", i, -+ (ulonglong)xd->xc_save.batch_offsets[i]); -+ if (((i+1)%4) == 0) { -+ fprintf(fp, "\n"); -+ linefeed = FALSE; -+ } else -+ linefeed = TRUE; -+ } -+ if (linefeed) -+ fprintf(fp, "\n"); -+ fprintf(fp, " ia64_version: %ld\n", (ulong)xd->xc_save.ia64_version); -+ fprintf(fp, " ia64_page_offsets: %lx ", (ulong)xd->xc_save.ia64_page_offsets); -+ if (xd->xc_save.ia64_page_offsets) -+ fprintf(fp, "(%ld entries)\n\n", xd->xc_save.nr_pfns); -+ else -+ fprintf(fp, "(none)\n\n"); -+ -+ fprintf(fp, " xc_core:\n"); -+ fprintf(fp, " header:\n"); -+ fprintf(fp, " xch_magic: %x ", -+ xd->xc_core.header.xch_magic); -+ if (xd->xc_core.header.xch_magic == XC_CORE_MAGIC) -+ fprintf(fp, "(XC_CORE_MAGIC)\n"); -+ else if (xd->xc_core.header.xch_magic == XC_CORE_MAGIC_HVM) -+ fprintf(fp, "(XC_CORE_MAGIC_HVM)\n"); -+ else -+ fprintf(fp, "(unknown)\n"); -+ fprintf(fp, " xch_nr_vcpus: %d\n", -+ xd->xc_core.header.xch_nr_vcpus); -+ fprintf(fp, " xch_nr_pages: %d (0x%x)\n", -+ xd->xc_core.header.xch_nr_pages, -+ xd->xc_core.header.xch_nr_pages); -+ fprintf(fp, " xch_ctxt_offset: %d (0x%x)\n", -+ xd->xc_core.header.xch_ctxt_offset, -+ xd->xc_core.header.xch_ctxt_offset); -+ fprintf(fp, " xch_index_offset: %d (0x%x)\n", -+ xd->xc_core.header.xch_index_offset, -+ xd->xc_core.header.xch_index_offset); -+ fprintf(fp, " xch_pages_offset: %d (0x%x)\n", -+ xd->xc_core.header.xch_pages_offset, -+ xd->xc_core.header.xch_pages_offset); -+ -+ fprintf(fp, " elf_class: %s\n", xd->xc_core.elf_class == ELFCLASS64 ? "ELFCLASS64" : -+ xd->xc_core.elf_class == ELFCLASS32 ? "ELFCLASS32" : "n/a"); -+ fprintf(fp, " elf_strtab_offset: %lld (0x%llx)\n", -+ (ulonglong)xd->xc_core.elf_strtab_offset, -+ (ulonglong)xd->xc_core.elf_strtab_offset); -+ fprintf(fp, " format_version: %016llx\n", -+ (ulonglong)xd->xc_core.format_version); -+ fprintf(fp, " shared_info_offset: %lld (0x%llx)\n", -+ (ulonglong)xd->xc_core.shared_info_offset, -+ (ulonglong)xd->xc_core.shared_info_offset); -+ if (machine_type("IA64")) -+ fprintf(fp, " ia64_mapped_regs_offset: %lld (0x%llx)\n", -+ (ulonglong)xd->xc_core.ia64_mapped_regs_offset, -+ (ulonglong)xd->xc_core.ia64_mapped_regs_offset); -+ fprintf(fp, " elf_index_pfn[%d]: %s", INDEX_PFN_COUNT, -+ xd->xc_core.elf_class ? "\n" : "(none used)\n"); -+ if (xd->xc_core.elf_class) { -+ for (i = 0; i < INDEX_PFN_COUNT; i++) { -+ fprintf(fp, "%ld:%ld ", -+ xd->xc_core.elf_index_pfn[i].index, -+ xd->xc_core.elf_index_pfn[i].pfn); -+ } -+ fprintf(fp, "\n"); -+ } -+ fprintf(fp, " last_batch:\n"); -+ fprintf(fp, " index: %ld (%ld - %ld)\n", -+ xd->xc_core.last_batch.index, -+ xd->xc_core.last_batch.start, xd->xc_core.last_batch.end); -+ fprintf(fp, " accesses: %ld\n", -+ xd->xc_core.last_batch.accesses); -+ fprintf(fp, " duplicates: %ld ", -+ xd->xc_core.last_batch.duplicates); -+ if (xd->xc_core.last_batch.accesses) -+ fprintf(fp, "(%ld%%)\n", -+ xd->xc_core.last_batch.duplicates * 100 / -+ xd->xc_core.last_batch.accesses); -+ else -+ fprintf(fp, "\n"); -+ -+ fprintf(fp, " elf32: %lx\n", (ulong)xd->xc_core.elf32); -+ fprintf(fp, " elf64: %lx\n", (ulong)xd->xc_core.elf64); -+ -+ fprintf(fp, " p2m_frames: %d\n", -+ xd->xc_core.p2m_frames); -+ fprintf(fp, " p2m_frame_index_list: %s\n", -+ (xd->flags & (XC_CORE_NO_P2M|XC_SAVE)) ? "(not used)" : ""); -+ for (i = 0; i < xd->xc_core.p2m_frames; i++) { -+ fprintf(fp, "%ld ", -+ xd->xc_core.p2m_frame_index_list[i]); -+ } -+ fprintf(fp, xd->xc_core.p2m_frames ? "\n" : ""); -+ -+ if ((xd->flags & XC_CORE_ORIG) && CRASHDEBUG(8)) -+ xc_core_mfns(XENDUMP_LOCAL, fp); -+ -+ switch (xd->xc_core.elf_class) -+ { -+ case ELFCLASS32: -+ fpsave = xd->ofp; -+ xd->ofp = fp; -+ xc_core_elf_dump(); -+ offset32 = xd->xc_core.elf32->e_shoff; -+ for (i = 0; i < xd->xc_core.elf32->e_shnum; i++) { -+ xc_core_dump_Elf32_Shdr(offset32, ELFREAD); -+ offset32 += xd->xc_core.elf32->e_shentsize; -+ } -+ xendump_print("\n"); -+ xd->ofp = fpsave; -+ break; -+ -+ case ELFCLASS64: -+ fpsave = xd->ofp; -+ xd->ofp = fp; -+ xc_core_elf_dump(); -+ offset64 = xd->xc_core.elf64->e_shoff; -+ for (i = 0; i < xd->xc_core.elf64->e_shnum; i++) { -+ xc_core_dump_Elf64_Shdr(offset64, ELFREAD); -+ offset64 += xd->xc_core.elf64->e_shentsize; -+ } -+ xendump_print("\n"); -+ xd->ofp = fpsave; -+ break; -+ } -+ -+ return 0; -+} -+ -+static void -+xen_dump_vmconfig(FILE *fp) -+{ -+ int i, opens, closes; -+ char *p; -+ -+ opens = closes = 0; -+ p = xd->xc_save.vmconfig_buf; -+ for (i = 0; i < xd->xc_save.vmconfig_size; i++, p++) { -+ if (ascii(*p)) -+ fprintf(fp, "%c", *p); -+ else -+ fprintf(fp, "<%x>", *p); -+ -+ if (*p == '(') -+ opens++; -+ else if (*p == ')') -+ closes++; -+ } -+ fprintf(fp, "\n"); -+ -+ if (opens != closes) -+ error(WARNING, "invalid vmconfig contents?\n"); -+} -+ -+/* -+ * Looking at the active set, try to determine who panicked, -+ * or who was the "suspend" kernel thread. -+ */ -+ulong get_xendump_panic_task(void) -+{ -+ int i; -+ ulong task; -+ struct task_context *tc; -+ -+ switch (xd->flags & (XC_CORE_ORIG|XC_CORE_ELF|XC_SAVE)) -+ { -+ case XC_CORE_ORIG: -+ case XC_CORE_ELF: -+ if (machdep->xendump_panic_task) -+ return (machdep->xendump_panic_task((void *)xd)); -+ break; -+ -+ case XC_SAVE: -+ for (i = 0; i < NR_CPUS; i++) { -+ if (!(task = tt->active_set[i])) -+ continue; -+ tc = task_to_context(task); -+ if (is_kernel_thread(task) && -+ STREQ(tc->comm, "suspend")) -+ return tc->task; -+ } -+ break; -+ } -+ -+ return NO_TASK; -+} -+ -+/* -+ * Figure out the back trace hooks. -+ */ -+void get_xendump_regs(struct bt_info *bt, ulong *pc, ulong *sp) -+{ -+ int i; -+ ulong *up; -+ -+ if ((tt->panic_task == bt->task) && -+ (xd->panic_pc && xd->panic_sp)) { -+ *pc = xd->panic_pc; -+ *sp = xd->panic_sp; -+ return; -+ } -+ -+ switch (xd->flags & (XC_CORE_ORIG|XC_CORE_ELF|XC_SAVE)) -+ { -+ case XC_CORE_ORIG: -+ case XC_CORE_ELF: -+ if (machdep->get_xendump_regs) -+ return (machdep->get_xendump_regs(xd, bt, pc, sp)); -+ break; -+ -+ case XC_SAVE: -+ if (tt->panic_task != bt->task) -+ break; -+ -+ for (i = 0, up = (ulong *)bt->stackbuf; -+ i < LONGS_PER_STACK; i++, up++) { -+ if (is_kernel_text(*up) && -+ (STREQ(closest_symbol(*up), -+ "__do_suspend"))) { -+ *pc = *up; -+ *sp = tt->flags & THREAD_INFO ? -+ bt->tc->thread_info + -+ (i * sizeof(long)) : -+ bt->task + -+ (i * sizeof(long)); -+ xd->panic_pc = *pc; -+ xd->panic_sp = *sp; -+ return; -+ } -+ } -+ } -+ -+ machdep->get_stack_frame(bt, pc, sp); -+} -+ -+/* -+ * Farm out most of the work to the proper architecture to create -+ * the p2m table. For ELF core dumps, create the index;pfn table. -+ */ -+static void -+xc_core_create_pfn_tables(void) -+{ -+ if (xd->flags & XC_CORE_P2M_CREATE) { -+ if (!machdep->xendump_p2m_create) -+ error(FATAL, -+ "xen xc_core dumpfiles not supported on this architecture"); -+ -+ if (!machdep->xendump_p2m_create((void *)xd)) -+ error(FATAL, -+ "cannot create xen pfn-to-mfn mapping\n"); -+ } -+ -+ if (xd->flags & XC_CORE_ELF) -+ xc_core_elf_pfn_init(); -+ -+ xd->flags &= ~(XC_CORE_P2M_CREATE|XC_CORE_PFN_CREATE); -+ -+ if (CRASHDEBUG(1)) -+ xendump_memory_dump(xd->ofp); -+} -+ -+/* -+ * Find the page index containing the mfn, and read the -+ * machine page into the buffer. -+ */ -+char * -+xc_core_mfn_to_page(ulong mfn, char *pgbuf) -+{ -+ int i, b, idx, done; -+ ulong tmp[MAX_BATCH_SIZE]; -+ off_t offset; -+ uint nr_pages; -+ -+ if (xd->flags & XC_CORE_ELF) -+ return xc_core_elf_mfn_to_page(mfn, pgbuf); -+ -+ if (lseek(xd->xfd, (off_t)xd->xc_core.header.xch_index_offset, -+ SEEK_SET) == -1) { -+ error(INFO, "cannot lseek to page index\n"); -+ return NULL; -+ } -+ -+ nr_pages = xd->xc_core.header.xch_nr_pages; -+ if (xd->flags & XC_CORE_64BIT_HOST) -+ nr_pages *= 2; -+ -+ for (b = 0, idx = -1, done = FALSE; -+ !done && (b < nr_pages); b += MAX_BATCH_SIZE) { -+ -+ if (read(xd->xfd, tmp, sizeof(ulong) * MAX_BATCH_SIZE) != -+ (MAX_BATCH_SIZE * sizeof(ulong))) { -+ error(INFO, "cannot read index page %d\n", b); -+ return NULL; -+ } -+ -+ for (i = 0; i < MAX_BATCH_SIZE; i++) { -+ if ((b+i) >= nr_pages) { -+ done = TRUE; -+ break; -+ } -+ if (tmp[i] == mfn) { -+ idx = i+b; -+ if (CRASHDEBUG(4)) -+ fprintf(xd->ofp, -+ "page: found mfn 0x%lx (%ld) at index %d\n", -+ mfn, mfn, idx); -+ done = TRUE; -+ } -+ } -+ } -+ -+ if (idx == -1) { -+ error(INFO, "cannot find mfn %ld (0x%lx) in page index\n", -+ mfn, mfn); -+ return NULL; -+ } -+ -+ if (lseek(xd->xfd, (off_t)xd->xc_core.header.xch_pages_offset, -+ SEEK_SET) == -1) { -+ error(INFO, "cannot lseek to xch_pages_offset\n"); -+ return NULL; -+ } -+ -+ offset = (off_t)(idx) * (off_t)xd->page_size; -+ -+ if (lseek(xd->xfd, offset, SEEK_CUR) == -1) { -+ error(INFO, "cannot lseek to mfn-specified page\n"); -+ return NULL; -+ } -+ -+ if (read(xd->xfd, pgbuf, xd->page_size) != xd->page_size) { -+ error(INFO, "cannot read mfn-specified page\n"); -+ return NULL; -+ } -+ -+ return pgbuf; -+} -+ -+/* -+ * Find the page index containing the mfn, and read the -+ * machine page into the buffer. -+ */ -+static char * -+xc_core_elf_mfn_to_page(ulong mfn, char *pgbuf) -+{ -+ int i, b, idx, done; -+ off_t offset; -+ size_t size; -+ uint nr_pages; -+ ulong tmp; -+ struct xen_dumpcore_p2m p2m_batch[MAX_BATCH_SIZE]; -+ -+ offset = xd->xc_core.header.xch_index_offset; -+ size = sizeof(struct xen_dumpcore_p2m) * MAX_BATCH_SIZE; -+ nr_pages = xd->xc_core.header.xch_nr_pages; -+ -+ if (lseek(xd->xfd, offset, SEEK_SET) == -1) -+ error(FATAL, "cannot lseek to page index\n"); -+ -+ for (b = 0, idx = -1, done = FALSE; -+ !done && (b < nr_pages); b += MAX_BATCH_SIZE) { -+ -+ if (read(xd->xfd, &p2m_batch[0], size) != size) { -+ error(INFO, "cannot read index page %d\n", b); -+ return NULL; -+ } -+ -+ for (i = 0; i < MAX_BATCH_SIZE; i++) { -+ if ((b+i) >= nr_pages) { -+ done = TRUE; -+ break; -+ } -+ -+ tmp = (ulong)p2m_batch[i].gmfn; -+ -+ if (tmp == mfn) { -+ idx = i+b; -+ if (CRASHDEBUG(4)) -+ fprintf(xd->ofp, -+ "page: found mfn 0x%lx (%ld) at index %d\n", -+ mfn, mfn, idx); -+ done = TRUE; -+ } -+ } -+ } -+ -+ if (idx == -1) { -+ error(INFO, "cannot find mfn %ld (0x%lx) in page index\n", -+ mfn, mfn); -+ return NULL; -+ } -+ -+ if (lseek(xd->xfd, (off_t)xd->xc_core.header.xch_pages_offset, -+ SEEK_SET) == -1) -+ error(FATAL, "cannot lseek to xch_pages_offset\n"); -+ -+ offset = (off_t)(idx) * (off_t)xd->page_size; -+ -+ if (lseek(xd->xfd, offset, SEEK_CUR) == -1) { -+ error(INFO, "cannot lseek to mfn-specified page\n"); -+ return NULL; -+ } -+ -+ if (read(xd->xfd, pgbuf, xd->page_size) != xd->page_size) { -+ error(INFO, "cannot read mfn-specified page\n"); -+ return NULL; -+ } -+ -+ return pgbuf; -+} -+ -+ -+/* -+ * Find and return the page index containing the mfn. -+ */ -+int -+xc_core_mfn_to_page_index(ulong mfn) -+{ -+ int i, b; -+ ulong tmp[MAX_BATCH_SIZE]; -+ uint nr_pages; -+ -+ if (xd->flags & XC_CORE_ELF) -+ return xc_core_elf_mfn_to_page_index(mfn); -+ -+ if (lseek(xd->xfd, (off_t)xd->xc_core.header.xch_index_offset, -+ SEEK_SET) == -1) { -+ error(INFO, "cannot lseek to page index\n"); -+ return MFN_NOT_FOUND; -+ } -+ -+ nr_pages = xd->xc_core.header.xch_nr_pages; -+ if (xd->flags & XC_CORE_64BIT_HOST) -+ nr_pages *= 2; -+ -+ for (b = 0; b < nr_pages; b += MAX_BATCH_SIZE) { -+ -+ if (read(xd->xfd, tmp, sizeof(ulong) * MAX_BATCH_SIZE) != -+ (MAX_BATCH_SIZE * sizeof(ulong))) { -+ error(INFO, "cannot read index page %d\n", b); -+ return MFN_NOT_FOUND; -+ } -+ -+ for (i = 0; i < MAX_BATCH_SIZE; i++) { -+ if ((b+i) >= nr_pages) -+ break; -+ -+ if (tmp[i] == mfn) { -+ if (CRASHDEBUG(4)) -+ fprintf(xd->ofp, -+ "index: batch: %d found mfn %ld (0x%lx) at index %d\n", -+ b/MAX_BATCH_SIZE, mfn, mfn, i+b); -+ return (i+b); -+ } -+ } -+ } -+ -+ return MFN_NOT_FOUND; -+} -+ -+/* -+ * Find and return the page index containing the mfn. -+ */ -+static int -+xc_core_elf_mfn_to_page_index(ulong mfn) -+{ -+ int i, b; -+ off_t offset; -+ size_t size; -+ uint nr_pages; -+ ulong tmp; -+ struct xen_dumpcore_p2m p2m_batch[MAX_BATCH_SIZE]; -+ -+ offset = xd->xc_core.header.xch_index_offset; -+ size = sizeof(struct xen_dumpcore_p2m) * MAX_BATCH_SIZE; -+ nr_pages = xd->xc_core.header.xch_nr_pages; -+ -+ if (lseek(xd->xfd, offset, SEEK_SET) == -1) -+ error(FATAL, "cannot lseek to page index\n"); -+ -+ for (b = 0; b < nr_pages; b += MAX_BATCH_SIZE) { -+ -+ if (read(xd->xfd, &p2m_batch[0], size) != size) { -+ error(INFO, "cannot read index page %d\n", b); -+ return MFN_NOT_FOUND; -+ } -+ -+ for (i = 0; i < MAX_BATCH_SIZE; i++) { -+ if ((b+i) >= nr_pages) -+ break; -+ -+ tmp = (ulong)p2m_batch[i].gmfn; -+ -+ if (tmp == mfn) { -+ if (CRASHDEBUG(4)) -+ fprintf(xd->ofp, -+ "index: batch: %d found mfn %ld (0x%lx) at index %d\n", -+ b/MAX_BATCH_SIZE, mfn, mfn, i+b); -+ return (i+b); -+ } -+ } -+ } -+ -+ return MFN_NOT_FOUND; -+} -+ -+ -+/* -+ * XC_CORE mfn-related utility function. -+ */ -+static int -+xc_core_mfns(ulong arg, FILE *ofp) -+{ -+ int i, b; -+ uint nr_pages; -+ ulong tmp[MAX_BATCH_SIZE]; -+ ulonglong tmp64[MAX_BATCH_SIZE]; -+ -+ if (lseek(xd->xfd, (off_t)xd->xc_core.header.xch_index_offset, -+ SEEK_SET) == -1) { -+ error(INFO, "cannot lseek to page index\n"); -+ return FALSE; -+ } -+ -+ switch (arg) -+ { -+ case XC_CORE_64BIT_HOST: -+ /* -+ * Determine whether this is a 32-bit guest xendump that -+ * was taken on a 64-bit xen host. -+ */ -+ if (machine_type("X86_64") || machine_type("IA64")) -+ return FALSE; -+check_next_4: -+ if (read(xd->xfd, tmp, sizeof(ulong) * 4) != (4 * sizeof(ulong))) { -+ error(INFO, "cannot read index pages\n"); -+ return FALSE; -+ } -+ -+ if ((tmp[0] == 0xffffffff) || (tmp[1] == 0xffffffff) || -+ (tmp[2] == 0xffffffff) || (tmp[3] == 0xffffffff) || -+ (!tmp[0] && !tmp[1]) || (!tmp[2] && !tmp[3])) -+ goto check_next_4; -+ -+ if (CRASHDEBUG(2)) -+ fprintf(ofp, "mfns: %08lx %08lx %08lx %08lx\n", -+ tmp[0], tmp[1], tmp[2], tmp[3]); -+ -+ if (tmp[0] && !tmp[1] && tmp[2] && !tmp[3]) -+ return TRUE; -+ else -+ return FALSE; -+ -+ case XENDUMP_LOCAL: -+ if (BITS64() || (xd->flags & XC_CORE_64BIT_HOST)) -+ goto show_64bit_mfns; -+ -+ fprintf(ofp, "xch_index_offset mfn list:\n"); -+ -+ nr_pages = xd->xc_core.header.xch_nr_pages; -+ -+ for (b = 0; b < nr_pages; b += MAX_BATCH_SIZE) { -+ if (read(xd->xfd, tmp, sizeof(ulong) * MAX_BATCH_SIZE) != -+ (MAX_BATCH_SIZE * sizeof(ulong))) { -+ error(INFO, "cannot read index page %d\n", b); -+ return FALSE; -+ } -+ -+ if (b) fprintf(ofp, "\n"); -+ -+ for (i = 0; i < MAX_BATCH_SIZE; i++) { -+ if ((b+i) >= nr_pages) -+ break; -+ if ((i%8) == 0) -+ fprintf(ofp, "%s[%d]:", -+ i ? "\n" : "", b+i); -+ if (tmp[i] == 0xffffffff) -+ fprintf(ofp, " INVALID"); -+ else -+ fprintf(ofp, " %lx", tmp[i]); -+ } -+ } -+ -+ fprintf(ofp, "\nxch_nr_pages: %d\n", -+ xd->xc_core.header.xch_nr_pages); -+ return TRUE; -+ -+show_64bit_mfns: -+ fprintf(ofp, "xch_index_offset mfn list: %s\n", -+ BITS32() ? "(64-bit mfns)" : ""); -+ -+ nr_pages = xd->xc_core.header.xch_nr_pages; -+ -+ for (b = 0; b < nr_pages; b += MAX_BATCH_SIZE) { -+ if (read(xd->xfd, tmp64, sizeof(ulonglong) * MAX_BATCH_SIZE) != -+ (MAX_BATCH_SIZE * sizeof(ulonglong))) { -+ error(INFO, "cannot read index page %d\n", b); -+ return FALSE; -+ } -+ -+ if (b) fprintf(ofp, "\n"); -+ -+ for (i = 0; i < MAX_BATCH_SIZE; i++) { -+ if ((b+i) >= nr_pages) -+ break; -+ if ((i%8) == 0) -+ fprintf(ofp, "%s[%d]:", -+ i ? "\n" : "", b+i); -+ if (tmp64[i] == 0xffffffffffffffffULL) -+ fprintf(ofp, " INVALID"); -+ else -+ fprintf(ofp, " %llx", tmp64[i]); -+ } -+ } -+ -+ fprintf(ofp, "\nxch_nr_pages: %d\n", nr_pages); -+ return TRUE; -+ -+ default: -+ return FALSE; -+ } -+} -+ -+/* -+ * Given a normal kernel pfn, determine the page index in the dumpfile. -+ * -+ * - First determine which of the pages making up the -+ * phys_to_machine_mapping[] array would contain the pfn. -+ * - From the phys_to_machine_mapping page, determine the mfn. -+ * - Find the mfn in the dumpfile page index. -+ */ -+#define PFNS_PER_PAGE (xd->page_size/sizeof(unsigned long)) -+ -+static ulong -+xc_core_pfn_to_page_index(ulong pfn) -+{ -+ ulong idx, p2m_idx, mfn_idx; -+ ulong *up, mfn; -+ off_t offset; -+ -+ /* -+ * This function does not apply when there's no p2m -+ * mapping and/or if this is an ELF format dumpfile. -+ */ -+ switch (xd->flags & (XC_CORE_NO_P2M|XC_CORE_ELF)) -+ { -+ case (XC_CORE_NO_P2M|XC_CORE_ELF): -+ return xc_core_elf_pfn_valid(pfn); -+ -+ case XC_CORE_NO_P2M: -+ return(xc_core_pfn_valid(pfn) ? pfn : PFN_NOT_FOUND); -+ -+ case XC_CORE_ELF: -+ return xc_core_elf_pfn_to_page_index(pfn); -+ } -+ -+ idx = pfn/PFNS_PER_PAGE; -+ -+ if (idx >= xd->xc_core.p2m_frames) { -+ error(INFO, "pfn: %lx is too large for dumpfile\n", -+ pfn); -+ return PFN_NOT_FOUND; -+ } -+ -+ p2m_idx = xd->xc_core.p2m_frame_index_list[idx]; -+ -+ if (lseek(xd->xfd, (off_t)xd->xc_core.header.xch_pages_offset, -+ SEEK_SET) == -1) { -+ error(INFO, "cannot lseek to xch_pages_offset\n"); -+ return PFN_NOT_FOUND; -+ } -+ -+ offset = (off_t)(p2m_idx) * (off_t)xd->page_size; -+ -+ if (lseek(xd->xfd, offset, SEEK_CUR) == -1) { -+ error(INFO, "cannot lseek to pfn-specified page\n"); -+ return PFN_NOT_FOUND; -+ } -+ -+ if (read(xd->xfd, xd->page, xd->page_size) != xd->page_size) { -+ error(INFO, "cannot read pfn-specified page\n"); -+ return PFN_NOT_FOUND; -+ } -+ -+ up = (ulong *)xd->page; -+ up += (pfn%PFNS_PER_PAGE); -+ -+ mfn = *up; -+ -+ if ((mfn_idx = xc_core_mfn_to_page_index(mfn)) == MFN_NOT_FOUND) { -+ error(INFO, "cannot find mfn in page index\n"); -+ return PFN_NOT_FOUND; -+ } -+ -+ return mfn_idx; -+} -+ -+ -+/* -+ * Search the .xen_p2m array for the target pfn, starting at a -+ * higher batch if appropriate. This presumes that the pfns -+ * are laid out in ascending order. -+ */ -+static ulong -+xc_core_elf_pfn_to_page_index(ulong pfn) -+{ -+ int i, b, start_index; -+ off_t offset; -+ size_t size; -+ uint nr_pages; -+ ulong tmp; -+ struct xen_dumpcore_p2m p2m_batch[MAX_BATCH_SIZE]; -+ -+ offset = xd->xc_core.header.xch_index_offset; -+ size = sizeof(struct xen_dumpcore_p2m) * MAX_BATCH_SIZE; -+ nr_pages = xd->xc_core.header.xch_nr_pages; -+ -+ /* -+ * Initialize the start_index. -+ */ -+ xd->xc_core.last_batch.accesses++; -+ -+ if ((pfn >= xd->xc_core.last_batch.start) && -+ (pfn <= xd->xc_core.last_batch.end)) { -+ xd->xc_core.last_batch.duplicates++; -+ start_index = xd->xc_core.last_batch.index; -+ } else { -+ for (i = 0; i <= INDEX_PFN_COUNT; i++) { -+ if ((i == INDEX_PFN_COUNT) || -+ (pfn < xd->xc_core.elf_index_pfn[i].pfn)) { -+ if (--i < 0) -+ i = 0; -+ start_index = xd->xc_core.elf_index_pfn[i].index; -+ break; -+ } -+ } -+ } -+ -+ offset += (start_index * sizeof(struct xen_dumpcore_p2m)); -+ if (lseek(xd->xfd, offset, SEEK_SET) == -1) -+ error(FATAL, "cannot lseek to page index\n"); -+ -+ for (b = start_index; b < nr_pages; b += MAX_BATCH_SIZE) { -+ -+ if (read(xd->xfd, &p2m_batch[0], size) != size) { -+ error(INFO, "cannot read index page %d\n", b); -+ return PFN_NOT_FOUND; -+ } -+ -+ for (i = 0; i < MAX_BATCH_SIZE; i++) { -+ if ((b+i) >= nr_pages) -+ break; -+ -+ tmp = (ulong)p2m_batch[i].pfn; -+ -+ if (tmp == pfn) { -+ if (CRASHDEBUG(4)) -+ fprintf(xd->ofp, -+ "index: batch: %d found pfn %ld (0x%lx) at index %d\n", -+ b/MAX_BATCH_SIZE, pfn, pfn, i+b); -+ -+ if ((b+MAX_BATCH_SIZE) < nr_pages) { -+ xd->xc_core.last_batch.index = b; -+ xd->xc_core.last_batch.start = p2m_batch[0].pfn; -+ xd->xc_core.last_batch.end = p2m_batch[MAX_BATCH_SIZE-1].pfn; -+ } -+ -+ return (i+b); -+ } -+ } -+ } -+ -+ return PFN_NOT_FOUND; -+} -+ -+/* -+ * In xendumps containing INVALID_MFN markers in the page index, -+ * return the validity of the pfn. -+ */ -+static int -+xc_core_pfn_valid(ulong pfn) -+{ -+ ulong mfn; -+ off_t offset; -+ -+ if (pfn >= (ulong)xd->xc_core.header.xch_nr_pages) -+ return FALSE; -+ -+ offset = (off_t)xd->xc_core.header.xch_index_offset; -+ -+ if (xd->flags & XC_CORE_64BIT_HOST) -+ offset += (off_t)(pfn * sizeof(ulonglong)); -+ else -+ offset += (off_t)(pfn * sizeof(ulong)); -+ -+ /* -+ * The lseek and read should never fail, so report -+ * any errors unconditionally. -+ */ -+ if (lseek(xd->xfd, offset, SEEK_SET) == -1) { -+ error(INFO, -+ "xendump: cannot lseek to page index for pfn %lx\n", -+ pfn); -+ return FALSE; -+ } -+ -+ if (read(xd->xfd, &mfn, sizeof(ulong)) != sizeof(ulong)) { -+ error(INFO, -+ "xendump: cannot read index page for pfn %lx\n", -+ pfn); -+ return FALSE; -+ } -+ -+ /* -+ * If it's an invalid mfn, let the caller decide whether -+ * to display an error message (unless debugging). -+ */ -+ if (mfn == INVALID_MFN) { -+ if (CRASHDEBUG(1)) -+ error(INFO, -+ "xendump: pfn %lx contains INVALID_MFN\n", -+ pfn); -+ return FALSE; -+ } -+ -+ return TRUE; -+} -+ -+/* -+ * Return the index into the .xen_pfn array containing the pfn. -+ * If not found, return PFN_NOT_FOUND. -+ */ -+static ulong -+xc_core_elf_pfn_valid(ulong pfn) -+{ -+ int i, b, start_index; -+ off_t offset; -+ size_t size; -+ uint nr_pages; -+ ulong tmp; -+ uint64_t pfn_batch[MAX_BATCH_SIZE]; -+ -+ offset = xd->xc_core.header.xch_index_offset; -+ size = sizeof(uint64_t) * MAX_BATCH_SIZE; -+ nr_pages = xd->xc_core.header.xch_nr_pages; -+ -+ /* -+ * Initialize the start_index. -+ */ -+ xd->xc_core.last_batch.accesses++; -+ -+ if ((pfn >= xd->xc_core.last_batch.start) && -+ (pfn <= xd->xc_core.last_batch.end)) { -+ xd->xc_core.last_batch.duplicates++; -+ start_index = xd->xc_core.last_batch.index; -+ } else { -+ for (i = 0; i <= INDEX_PFN_COUNT; i++) { -+ if ((i == INDEX_PFN_COUNT) || -+ (pfn < xd->xc_core.elf_index_pfn[i].pfn)) { -+ if (--i < 0) -+ i = 0; -+ start_index = xd->xc_core.elf_index_pfn[i].index; -+ break; -+ } -+ } -+ } -+ -+ offset += (start_index * sizeof(uint64_t)); -+ if (lseek(xd->xfd, offset, SEEK_SET) == -1) -+ error(FATAL, "cannot lseek to page index\n"); -+ -+ for (b = start_index; b < nr_pages; b += MAX_BATCH_SIZE) { -+ -+ if (read(xd->xfd, &pfn_batch[0], size) != size) { -+ error(INFO, "cannot read index page %d\n", b); -+ return PFN_NOT_FOUND; -+ } -+ -+ for (i = 0; i < MAX_BATCH_SIZE; i++) { -+ if ((b+i) >= nr_pages) -+ break; -+ -+ tmp = (ulong)pfn_batch[i]; -+ -+ if (tmp == pfn) { -+ if (CRASHDEBUG(4)) -+ fprintf(xd->ofp, -+ "index: batch: %d found pfn %ld (0x%lx) at index %d\n", -+ b/MAX_BATCH_SIZE, pfn, pfn, i+b); -+ -+ if ((b+MAX_BATCH_SIZE) < nr_pages) { -+ xd->xc_core.last_batch.index = b; -+ xd->xc_core.last_batch.start = (ulong)pfn_batch[0]; -+ xd->xc_core.last_batch.end = (ulong)pfn_batch[MAX_BATCH_SIZE-1]; -+ } -+ -+ return (i+b); -+ } -+ } -+ } -+ -+ return PFN_NOT_FOUND; -+} -+ -+/* -+ * Store the panic task's stack hooks from where it was found -+ * in get_active_set_panic_task(). -+ */ -+void -+xendump_panic_hook(char *stack) -+{ -+ int i, err, argc; -+ char *arglist[MAXARGS]; -+ char buf[BUFSIZE]; -+ ulong value, *sp; -+ -+ if (machine_type("IA64")) /* needs switch_stack address */ -+ return; -+ -+ strcpy(buf, stack); -+ -+ argc = parse_line(buf, arglist); -+ -+ if ((value = htol(strip_ending_char(arglist[0], ':'), -+ RETURN_ON_ERROR, &err)) == BADADDR) -+ return; -+ for (sp = (ulong *)value, i = 1; i < argc; i++, sp++) { -+ if (strstr(arglist[i], "xen_panic_event")) { -+ if (!readmem((ulong)sp, KVADDR, &value, -+ sizeof(ulong), "xen_panic_event address", -+ RETURN_ON_ERROR)) -+ return; -+ -+ xd->panic_sp = (ulong)sp; -+ xd->panic_pc = value; -+ } else if (strstr(arglist[i], "panic") && !xd->panic_sp) { -+ if (!readmem((ulong)sp, KVADDR, &value, -+ sizeof(ulong), "xen_panic_event address", -+ RETURN_ON_ERROR)) -+ return; -+ -+ xd->panic_sp = (ulong)sp; -+ xd->panic_pc = value; -+ } -+ } -+} -+ -+static void -+xendump_print(char *fmt, ...) -+{ -+ char buf[BUFSIZE]; -+ va_list ap; -+ -+ if (!fmt || !strlen(fmt)) -+ return; -+ -+ va_start(ap, fmt); -+ (void)vsnprintf(buf, BUFSIZE, fmt, ap); -+ va_end(ap); -+ -+ if (xd->ofp) -+ fprintf(xd->ofp, buf); -+ else if (!XENDUMP_VALID() && CRASHDEBUG(7)) -+ fprintf(stderr, buf); -+ -+} -+ -+/* -+ * Support for xc_core ELF dumpfile format. -+ */ -+static int -+xc_core_elf_verify(char *buf) -+{ -+ int i; -+ Elf32_Ehdr *elf32; -+ Elf64_Ehdr *elf64; -+ Elf32_Off offset32; -+ Elf64_Off offset64; -+ -+ elf32 = (Elf32_Ehdr *)buf; -+ elf64 = (Elf64_Ehdr *)buf; -+ -+ if (STRNEQ(elf32->e_ident, ELFMAG) && -+ (elf32->e_ident[EI_CLASS] == ELFCLASS32) && -+ (elf32->e_ident[EI_DATA] == ELFDATA2LSB) && -+ (elf32->e_ident[EI_VERSION] == EV_CURRENT) && -+ (elf32->e_type == ET_CORE) && -+ (elf32->e_version == EV_CURRENT) && -+ (elf32->e_shnum > 0)) { -+ switch (elf32->e_machine) -+ { -+ case EM_386: -+ if (machine_type("X86")) -+ break; -+ default: -+ goto bailout; -+ } -+ -+ xd->xc_core.elf_class = ELFCLASS32; -+ if ((xd->xc_core.elf32 = (Elf32_Ehdr *)malloc(sizeof(Elf32_Ehdr))) == NULL) { -+ fprintf(stderr, "cannot malloc ELF header buffer\n"); -+ clean_exit(1); -+ } -+ BCOPY(buf, xd->xc_core.elf32, sizeof(Elf32_Ehdr)); -+ -+ } else if (STRNEQ(elf64->e_ident, ELFMAG) && -+ (elf64->e_ident[EI_CLASS] == ELFCLASS64) && -+ (elf64->e_ident[EI_VERSION] == EV_CURRENT) && -+ (elf64->e_type == ET_CORE) && -+ (elf64->e_version == EV_CURRENT) && -+ (elf64->e_shnum > 0)) { -+ switch (elf64->e_machine) -+ { -+ case EM_IA_64: -+ if ((elf64->e_ident[EI_DATA] == ELFDATA2LSB) && -+ machine_type("IA64")) -+ break; -+ else -+ goto bailout; -+ -+ case EM_X86_64: -+ if ((elf64->e_ident[EI_DATA] == ELFDATA2LSB) && -+ machine_type("X86_64")) -+ break; -+ else -+ goto bailout; -+ -+ case EM_386: -+ if ((elf64->e_ident[EI_DATA] == ELFDATA2LSB) && -+ machine_type("X86")) -+ break; -+ else -+ goto bailout; -+ -+ default: -+ goto bailout; -+ } -+ -+ xd->xc_core.elf_class = ELFCLASS64; -+ if ((xd->xc_core.elf64 = (Elf64_Ehdr *)malloc(sizeof(Elf64_Ehdr))) == NULL) { -+ fprintf(stderr, "cannot malloc ELF header buffer\n"); -+ clean_exit(1); -+ } -+ BCOPY(buf, xd->xc_core.elf64, sizeof(Elf64_Ehdr)); -+ -+ } else { -+ if (CRASHDEBUG(1)) -+ error(INFO, "xc_core_elf_verify: not a xen ELF core file\n"); -+ goto bailout; -+ } -+ -+ xc_core_elf_dump(); -+ -+ switch (xd->xc_core.elf_class) -+ { -+ case ELFCLASS32: -+ offset32 = xd->xc_core.elf32->e_shoff; -+ for (i = 0; i < xd->xc_core.elf32->e_shnum; i++) { -+ xc_core_dump_Elf32_Shdr(offset32, ELFSTORE); -+ offset32 += xd->xc_core.elf32->e_shentsize; -+ } -+ xendump_print("\n"); -+ break; -+ -+ case ELFCLASS64: -+ offset64 = xd->xc_core.elf64->e_shoff; -+ for (i = 0; i < xd->xc_core.elf64->e_shnum; i++) { -+ xc_core_dump_Elf64_Shdr(offset64, ELFSTORE); -+ offset64 += xd->xc_core.elf64->e_shentsize; -+ } -+ xendump_print("\n"); -+ break; -+ } -+ -+ xd->flags |= (XENDUMP_LOCAL | XC_CORE_ELF); -+ -+ if (!xd->page_size) -+ error(FATAL, -+ "unknown page size: use -p command line option\n"); -+ -+ if (!(xd->page = (char *)malloc(xd->page_size))) -+ error(FATAL, "cannot malloc page space."); -+ -+ if (!(xd->poc = (struct pfn_offset_cache *)calloc -+ (PFN_TO_OFFSET_CACHE_ENTRIES, -+ sizeof(struct pfn_offset_cache)))) -+ error(FATAL, "cannot malloc pfn_offset_cache\n"); -+ xd->last_pfn = ~(0UL); -+ -+ for (i = 0; i < INDEX_PFN_COUNT; i++) -+ xd->xc_core.elf_index_pfn[i].pfn = ~0UL; -+ -+ if (CRASHDEBUG(1)) -+ xendump_memory_dump(fp); -+ -+ return TRUE; -+ -+bailout: -+ return FALSE; -+} -+ -+/* -+ * Dump the relevant ELF header. -+ */ -+static void -+xc_core_elf_dump(void) -+{ -+ switch (xd->xc_core.elf_class) -+ { -+ case ELFCLASS32: -+ xc_core_dump_Elf32_Ehdr(xd->xc_core.elf32); -+ break; -+ case ELFCLASS64: -+ xc_core_dump_Elf64_Ehdr(xd->xc_core.elf64); -+ break; -+ } -+} -+ -+ -+/* -+ * Dump the 32-bit ELF header, and grab a pointer to the strtab section. -+ */ -+static void -+xc_core_dump_Elf32_Ehdr(Elf32_Ehdr *elf) -+{ -+ char buf[BUFSIZE]; -+ Elf32_Off offset32; -+ Elf32_Shdr shdr; -+ -+ BZERO(buf, BUFSIZE); -+ BCOPY(elf->e_ident, buf, SELFMAG); -+ xendump_print("\nElf32_Ehdr:\n"); -+ xendump_print(" e_ident: \\%o%s\n", buf[0], -+ &buf[1]); -+ xendump_print(" e_ident[EI_CLASS]: %d ", elf->e_ident[EI_CLASS]); -+ switch (elf->e_ident[EI_CLASS]) -+ { -+ case ELFCLASSNONE: -+ xendump_print("(ELFCLASSNONE)"); -+ break; -+ case ELFCLASS32: -+ xendump_print("(ELFCLASS32)\n"); -+ break; -+ case ELFCLASS64: -+ xendump_print("(ELFCLASS64)\n"); -+ break; -+ case ELFCLASSNUM: -+ xendump_print("(ELFCLASSNUM)\n"); -+ break; -+ default: -+ xendump_print("(?)\n"); -+ break; -+ } -+ xendump_print(" e_ident[EI_DATA]: %d ", elf->e_ident[EI_DATA]); -+ switch (elf->e_ident[EI_DATA]) -+ { -+ case ELFDATANONE: -+ xendump_print("(ELFDATANONE)\n"); -+ break; -+ case ELFDATA2LSB: -+ xendump_print("(ELFDATA2LSB)\n"); -+ break; -+ case ELFDATA2MSB: -+ xendump_print("(ELFDATA2MSB)\n"); -+ break; -+ case ELFDATANUM: -+ xendump_print("(ELFDATANUM)\n"); -+ break; -+ default: -+ xendump_print("(?)\n"); -+ } -+ xendump_print(" e_ident[EI_VERSION]: %d ", -+ elf->e_ident[EI_VERSION]); -+ if (elf->e_ident[EI_VERSION] == EV_CURRENT) -+ xendump_print("(EV_CURRENT)\n"); -+ else -+ xendump_print("(?)\n"); -+ xendump_print(" e_ident[EI_OSABI]: %d ", elf->e_ident[EI_OSABI]); -+ switch (elf->e_ident[EI_OSABI]) -+ { -+ case ELFOSABI_SYSV: -+ xendump_print("(ELFOSABI_SYSV)\n"); -+ break; -+ case ELFOSABI_HPUX: -+ xendump_print("(ELFOSABI_HPUX)\n"); -+ break; -+ case ELFOSABI_ARM: -+ xendump_print("(ELFOSABI_ARM)\n"); -+ break; -+ case ELFOSABI_STANDALONE: -+ xendump_print("(ELFOSABI_STANDALONE)\n"); -+ break; -+ default: -+ xendump_print("(?)\n"); -+ } -+ xendump_print(" e_ident[EI_ABIVERSION]: %d\n", -+ elf->e_ident[EI_ABIVERSION]); -+ -+ xendump_print(" e_type: %d ", elf->e_type); -+ switch (elf->e_type) -+ { -+ case ET_NONE: -+ xendump_print("(ET_NONE)\n"); -+ break; -+ case ET_REL: -+ xendump_print("(ET_REL)\n"); -+ break; -+ case ET_EXEC: -+ xendump_print("(ET_EXEC)\n"); -+ break; -+ case ET_DYN: -+ xendump_print("(ET_DYN)\n"); -+ break; -+ case ET_CORE: -+ xendump_print("(ET_CORE)\n"); -+ break; -+ case ET_NUM: -+ xendump_print("(ET_NUM)\n"); -+ break; -+ case ET_LOOS: -+ xendump_print("(ET_LOOS)\n"); -+ break; -+ case ET_HIOS: -+ xendump_print("(ET_HIOS)\n"); -+ break; -+ case ET_LOPROC: -+ xendump_print("(ET_LOPROC)\n"); -+ break; -+ case ET_HIPROC: -+ xendump_print("(ET_HIPROC)\n"); -+ break; -+ default: -+ xendump_print("(?)\n"); -+ } -+ -+ xendump_print(" e_machine: %d ", elf->e_machine); -+ switch (elf->e_machine) -+ { -+ case EM_386: -+ xendump_print("(EM_386)\n"); -+ break; -+ default: -+ xendump_print("(unsupported)\n"); -+ break; -+ } -+ -+ xendump_print(" e_version: %ld ", (ulong)elf->e_version); -+ xendump_print("%s\n", elf->e_version == EV_CURRENT ? -+ "(EV_CURRENT)" : ""); -+ -+ xendump_print(" e_entry: %lx\n", (ulong)elf->e_entry); -+ xendump_print(" e_phoff: %lx\n", (ulong)elf->e_phoff); -+ xendump_print(" e_shoff: %lx\n", (ulong)elf->e_shoff); -+ xendump_print(" e_flags: %lx\n", (ulong)elf->e_flags); -+ xendump_print(" e_ehsize: %x\n", elf->e_ehsize); -+ xendump_print(" e_phentsize: %x\n", elf->e_phentsize); -+ xendump_print(" e_phnum: %x\n", elf->e_phnum); -+ xendump_print(" e_shentsize: %x\n", elf->e_shentsize); -+ xendump_print(" e_shnum: %x\n", elf->e_shnum); -+ xendump_print(" e_shstrndx: %x\n", elf->e_shstrndx); -+ -+ /* Determine the strtab location. */ -+ -+ offset32 = elf->e_shoff + -+ (elf->e_shstrndx * elf->e_shentsize); -+ -+ if (lseek(xd->xfd, offset32, SEEK_SET) != offset32) -+ error(FATAL, -+ "xc_core_dump_Elf32_Ehdr: cannot seek to strtab Elf32_Shdr\n"); -+ if (read(xd->xfd, &shdr, sizeof(Elf32_Shdr)) != sizeof(Elf32_Shdr)) -+ error(FATAL, -+ "xc_core_dump_Elf32_Ehdr: cannot read strtab Elf32_Shdr\n"); -+ -+ xd->xc_core.elf_strtab_offset = (ulonglong)shdr.sh_offset; -+} -+ -+/* -+ * Dump the 64-bit ELF header, and grab a pointer to the strtab section. -+ */ -+static void -+xc_core_dump_Elf64_Ehdr(Elf64_Ehdr *elf) -+{ -+ char buf[BUFSIZE]; -+ Elf64_Off offset64; -+ Elf64_Shdr shdr; -+ -+ BZERO(buf, BUFSIZE); -+ BCOPY(elf->e_ident, buf, SELFMAG); -+ xendump_print("\nElf64_Ehdr:\n"); -+ xendump_print(" e_ident: \\%o%s\n", buf[0], -+ &buf[1]); -+ xendump_print(" e_ident[EI_CLASS]: %d ", elf->e_ident[EI_CLASS]); -+ switch (elf->e_ident[EI_CLASS]) -+ { -+ case ELFCLASSNONE: -+ xendump_print("(ELFCLASSNONE)"); -+ break; -+ case ELFCLASS32: -+ xendump_print("(ELFCLASS32)\n"); -+ break; -+ case ELFCLASS64: -+ xendump_print("(ELFCLASS64)\n"); -+ break; -+ case ELFCLASSNUM: -+ xendump_print("(ELFCLASSNUM)\n"); -+ break; -+ default: -+ xendump_print("(?)\n"); -+ break; -+ } -+ xendump_print(" e_ident[EI_DATA]: %d ", elf->e_ident[EI_DATA]); -+ switch (elf->e_ident[EI_DATA]) -+ { -+ case ELFDATANONE: -+ xendump_print("(ELFDATANONE)\n"); -+ break; -+ case ELFDATA2LSB: -+ xendump_print("(ELFDATA2LSB)\n"); -+ break; -+ case ELFDATA2MSB: -+ xendump_print("(ELFDATA2MSB)\n"); -+ break; -+ case ELFDATANUM: -+ xendump_print("(ELFDATANUM)\n"); -+ break; -+ default: -+ xendump_print("(?)\n"); -+ } -+ xendump_print(" e_ident[EI_VERSION]: %d ", -+ elf->e_ident[EI_VERSION]); -+ if (elf->e_ident[EI_VERSION] == EV_CURRENT) -+ xendump_print("(EV_CURRENT)\n"); -+ else -+ xendump_print("(?)\n"); -+ xendump_print(" e_ident[EI_OSABI]: %d ", elf->e_ident[EI_OSABI]); -+ switch (elf->e_ident[EI_OSABI]) -+ { -+ case ELFOSABI_SYSV: -+ xendump_print("(ELFOSABI_SYSV)\n"); -+ break; -+ case ELFOSABI_HPUX: -+ xendump_print("(ELFOSABI_HPUX)\n"); -+ break; -+ case ELFOSABI_ARM: -+ xendump_print("(ELFOSABI_ARM)\n"); -+ break; -+ case ELFOSABI_STANDALONE: -+ xendump_print("(ELFOSABI_STANDALONE)\n"); -+ break; -+ default: -+ xendump_print("(?)\n"); -+ } -+ xendump_print(" e_ident[EI_ABIVERSION]: %d\n", -+ elf->e_ident[EI_ABIVERSION]); -+ -+ xendump_print(" e_type: %d ", elf->e_type); -+ switch (elf->e_type) -+ { -+ case ET_NONE: -+ xendump_print("(ET_NONE)\n"); -+ break; -+ case ET_REL: -+ xendump_print("(ET_REL)\n"); -+ break; -+ case ET_EXEC: -+ xendump_print("(ET_EXEC)\n"); -+ break; -+ case ET_DYN: -+ xendump_print("(ET_DYN)\n"); -+ break; -+ case ET_CORE: -+ xendump_print("(ET_CORE)\n"); -+ break; -+ case ET_NUM: -+ xendump_print("(ET_NUM)\n"); -+ break; -+ case ET_LOOS: -+ xendump_print("(ET_LOOS)\n"); -+ break; -+ case ET_HIOS: -+ xendump_print("(ET_HIOS)\n"); -+ break; -+ case ET_LOPROC: -+ xendump_print("(ET_LOPROC)\n"); -+ break; -+ case ET_HIPROC: -+ xendump_print("(ET_HIPROC)\n"); -+ break; -+ default: -+ xendump_print("(?)\n"); -+ } -+ -+ xendump_print(" e_machine: %d ", elf->e_machine); -+ switch (elf->e_machine) -+ { -+ case EM_386: -+ xendump_print("(EM_386)\n"); -+ break; -+ case EM_IA_64: -+ xendump_print("(EM_IA_64)\n"); -+ break; -+ case EM_PPC64: -+ xendump_print("(EM_PPC64)\n"); -+ break; -+ case EM_X86_64: -+ xendump_print("(EM_X86_64)\n"); -+ break; -+ default: -+ xendump_print("(unsupported)\n"); -+ break; -+ } -+ -+ xendump_print(" e_version: %ld ", (ulong)elf->e_version); -+ xendump_print("%s\n", elf->e_version == EV_CURRENT ? -+ "(EV_CURRENT)" : ""); -+ -+ xendump_print(" e_entry: %lx\n", (ulong)elf->e_entry); -+ xendump_print(" e_phoff: %lx\n", (ulong)elf->e_phoff); -+ xendump_print(" e_shoff: %lx\n", (ulong)elf->e_shoff); -+ xendump_print(" e_flags: %lx\n", (ulong)elf->e_flags); -+ xendump_print(" e_ehsize: %x\n", elf->e_ehsize); -+ xendump_print(" e_phentsize: %x\n", elf->e_phentsize); -+ xendump_print(" e_phnum: %x\n", elf->e_phnum); -+ xendump_print(" e_shentsize: %x\n", elf->e_shentsize); -+ xendump_print(" e_shnum: %x\n", elf->e_shnum); -+ xendump_print(" e_shstrndx: %x\n", elf->e_shstrndx); -+ -+ /* Determine the strtab location. */ -+ -+ offset64 = elf->e_shoff + -+ (elf->e_shstrndx * elf->e_shentsize); -+ -+ if (lseek(xd->xfd, offset64, SEEK_SET) != offset64) -+ error(FATAL, -+ "xc_core_dump_Elf64_Ehdr: cannot seek to strtab Elf32_Shdr\n"); -+ if (read(xd->xfd, &shdr, sizeof(Elf32_Shdr)) != sizeof(Elf32_Shdr)) -+ error(FATAL, -+ "xc_core_dump_Elf64_Ehdr: cannot read strtab Elf32_Shdr\n"); -+ -+ xd->xc_core.elf_strtab_offset = (ulonglong)shdr.sh_offset; -+} -+ -+/* -+ * Dump each 32-bit section header and the data that they reference. -+ */ -+static void -+xc_core_dump_Elf32_Shdr(Elf32_Off offset, int store) -+{ -+ Elf32_Shdr shdr; -+ char name[BUFSIZE]; -+ int i; -+ char c; -+ -+ if (lseek(xd->xfd, offset, SEEK_SET) != offset) -+ error(FATAL, -+ "xc_core_dump_Elf32_Shdr: cannot seek to Elf32_Shdr\n"); -+ if (read(xd->xfd, &shdr, sizeof(Elf32_Shdr)) != sizeof(Elf32_Shdr)) -+ error(FATAL, -+ "xc_core_dump_Elf32_Shdr: cannot read Elf32_Shdr\n"); -+ -+ xendump_print("\nElf32_Shdr:\n"); -+ xendump_print(" sh_name: %lx ", shdr.sh_name); -+ xendump_print("\"%s\"\n", xc_core_strtab(shdr.sh_name, name)); -+ xendump_print(" sh_type: %lx ", shdr.sh_type); -+ switch (shdr.sh_type) -+ { -+ case SHT_NULL: -+ xendump_print("(SHT_NULL)\n"); -+ break; -+ case SHT_PROGBITS: -+ xendump_print("(SHT_PROGBITS)\n"); -+ break; -+ case SHT_STRTAB: -+ xendump_print("(SHT_STRTAB)\n"); -+ break; -+ case SHT_NOTE: -+ xendump_print("(SHT_NOTE)\n"); -+ break; -+ default: -+ xendump_print("\n"); -+ break; -+ } -+ xendump_print(" sh_flags: %lx\n", shdr.sh_flags); -+ xendump_print(" sh_addr: %lx\n", shdr.sh_addr); -+ xendump_print(" sh_offset: %lx\n", shdr.sh_offset); -+ xendump_print(" sh_size: %lx\n", shdr.sh_size); -+ xendump_print(" sh_link: %lx\n", shdr.sh_link); -+ xendump_print(" sh_info: %lx\n", shdr.sh_info); -+ xendump_print(" sh_addralign: %lx\n", shdr.sh_addralign); -+ xendump_print(" sh_entsize: %lx\n", shdr.sh_entsize); -+ -+ if (STREQ(name, ".shstrtab")) { -+ if (lseek(xd->xfd, xd->xc_core.elf_strtab_offset, SEEK_SET) != -+ xd->xc_core.elf_strtab_offset) -+ error(FATAL, -+ "xc_core_dump_Elf32_Shdr: cannot seek to strtab data\n"); -+ -+ xendump_print(" "); -+ for (i = 0; i < shdr.sh_size; i++) { -+ if (read(xd->xfd, &c, sizeof(char)) != sizeof(char)) -+ error(FATAL, -+ "xc_core_dump_Elf32_Shdr: cannot read strtab data\n"); -+ if (i && !c) -+ xendump_print("\n "); -+ else -+ xendump_print("%c", c); -+ } -+ } -+ -+ if (STREQ(name, ".note.Xen")) -+ xc_core_dump_elfnote((off_t)shdr.sh_offset, -+ (size_t)shdr.sh_size, store); -+ -+ if (!store) -+ return; -+ -+ if (STREQ(name, ".xen_prstatus")) -+ xd->xc_core.header.xch_ctxt_offset = -+ (unsigned int)shdr.sh_offset; -+ -+ if (STREQ(name, ".xen_shared_info")) -+ xd->xc_core.shared_info_offset = (off_t)shdr.sh_offset; -+ -+ if (STREQ(name, ".xen_pfn")) { -+ xd->xc_core.header.xch_index_offset = shdr.sh_offset; -+ xd->flags |= (XC_CORE_NO_P2M|XC_CORE_PFN_CREATE); -+ } -+ -+ if (STREQ(name, ".xen_p2m")) { -+ xd->xc_core.header.xch_index_offset = shdr.sh_offset; -+ xd->flags |= XC_CORE_P2M_CREATE; -+ } -+ -+ if (STREQ(name, ".xen_pages")) -+ xd->xc_core.header.xch_pages_offset = -+ (unsigned int)shdr.sh_offset; -+ -+ if (STREQ(name, ".xen_ia64_mapped_regs")) -+ xd->xc_core.ia64_mapped_regs_offset = -+ (off_t)shdr.sh_offset; -+} -+ -+/* -+ * Dump each 64-bit section header and the data that they reference. -+ */ -+static void -+xc_core_dump_Elf64_Shdr(Elf64_Off offset, int store) -+{ -+ Elf64_Shdr shdr; -+ char name[BUFSIZE]; -+ int i; -+ char c; -+ -+ if (lseek(xd->xfd, offset, SEEK_SET) != offset) -+ error(FATAL, -+ "xc_core_dump_Elf64_Shdr: cannot seek to Elf64_Shdr\n"); -+ if (read(xd->xfd, &shdr, sizeof(Elf64_Shdr)) != sizeof(Elf64_Shdr)) -+ error(FATAL, -+ "xc_core_dump_Elf64_Shdr: cannot read Elf64_Shdr\n"); -+ -+ xendump_print("\nElf64_Shdr:\n"); -+ xendump_print(" sh_name: %x ", shdr.sh_name); -+ xendump_print("\"%s\"\n", xc_core_strtab(shdr.sh_name, name)); -+ xendump_print(" sh_type: %x ", shdr.sh_type); -+ switch (shdr.sh_type) -+ { -+ case SHT_NULL: -+ xendump_print("(SHT_NULL)\n"); -+ break; -+ case SHT_PROGBITS: -+ xendump_print("(SHT_PROGBITS)\n"); -+ break; -+ case SHT_STRTAB: -+ xendump_print("(SHT_STRTAB)\n"); -+ break; -+ case SHT_NOTE: -+ xendump_print("(SHT_NOTE)\n"); -+ break; -+ default: -+ xendump_print("\n"); -+ break; -+ } -+ xendump_print(" sh_flags: %lx\n", shdr.sh_flags); -+ xendump_print(" sh_addr: %lx\n", shdr.sh_addr); -+ xendump_print(" sh_offset: %lx\n", shdr.sh_offset); -+ xendump_print(" sh_size: %lx\n", shdr.sh_size); -+ xendump_print(" sh_link: %x\n", shdr.sh_link); -+ xendump_print(" sh_info: %x\n", shdr.sh_info); -+ xendump_print(" sh_addralign: %lx\n", shdr.sh_addralign); -+ xendump_print(" sh_entsize: %lx\n", shdr.sh_entsize); -+ -+ if (STREQ(name, ".shstrtab")) { -+ if (lseek(xd->xfd, xd->xc_core.elf_strtab_offset, SEEK_SET) != -+ xd->xc_core.elf_strtab_offset) -+ error(FATAL, -+ "xc_core_dump_Elf64_Shdr: cannot seek to strtab data\n"); -+ -+ xendump_print(" "); -+ for (i = 0; i < shdr.sh_size; i++) { -+ if (read(xd->xfd, &c, sizeof(char)) != sizeof(char)) -+ error(FATAL, -+ "xc_core_dump_Elf64_Shdr: cannot read strtab data\n"); -+ if (i && !c) -+ xendump_print("\n "); -+ else -+ xendump_print("%c", c); -+ } -+ } -+ -+ if (STREQ(name, ".note.Xen")) -+ xc_core_dump_elfnote((off_t)shdr.sh_offset, -+ (size_t)shdr.sh_size, store); -+ -+ if (!store) -+ return; -+ -+ if (STREQ(name, ".xen_prstatus")) -+ xd->xc_core.header.xch_ctxt_offset = -+ (unsigned int)shdr.sh_offset; -+ -+ if (STREQ(name, ".xen_shared_info")) -+ xd->xc_core.shared_info_offset = (off_t)shdr.sh_offset; -+ -+ if (STREQ(name, ".xen_pfn")) { -+ xd->xc_core.header.xch_index_offset = shdr.sh_offset; -+ xd->flags |= (XC_CORE_NO_P2M|XC_CORE_PFN_CREATE); -+ } -+ -+ if (STREQ(name, ".xen_p2m")) { -+ xd->xc_core.header.xch_index_offset = shdr.sh_offset; -+ xd->flags |= XC_CORE_P2M_CREATE; -+ } -+ -+ if (STREQ(name, ".xen_pages")) -+ xd->xc_core.header.xch_pages_offset = -+ (unsigned int)shdr.sh_offset; -+ -+ if (STREQ(name, ".xen_ia64_mapped_regs")) -+ xd->xc_core.ia64_mapped_regs_offset = -+ (off_t)shdr.sh_offset; -+} -+ -+/* -+ * Return the string found at the specified index into -+ * the dumpfile's strtab. -+ */ -+static char * -+xc_core_strtab(uint32_t index, char *buf) -+{ -+ off_t offset; -+ int i; -+ -+ offset = xd->xc_core.elf_strtab_offset + index; -+ -+ if (lseek(xd->xfd, offset, SEEK_SET) != offset) -+ error(FATAL, -+ "xc_core_strtab: cannot seek to Elf64_Shdr\n"); -+ -+ BZERO(buf, BUFSIZE); -+ i = 0; -+ -+ while (read(xd->xfd, &buf[i], sizeof(char)) == sizeof(char)) { -+ if (buf[i] == NULLCHAR) -+ break; -+ i++; -+ } -+ -+ return buf; -+} -+ -+ -+/* -+ * Dump the array of elfnote structures, storing relevant info -+ * when requested during initialization. This function is -+ * common to both 32-bit and 64-bit ELF files. -+ */ -+static void -+xc_core_dump_elfnote(off_t sh_offset, size_t sh_size, int store) -+{ -+ int i, lf, index; -+ char *notes_buffer; -+ struct elfnote *elfnote; -+ ulonglong *data; -+ struct xen_dumpcore_elfnote_header_desc *elfnote_header; -+ struct xen_dumpcore_elfnote_format_version_desc *format_version; -+ -+ elfnote_header = NULL; -+ -+ if (!(notes_buffer = (char *)malloc(sh_size))) -+ error(FATAL, "cannot malloc notes space."); -+ -+ if (lseek(xd->xfd, sh_offset, SEEK_SET) != sh_offset) -+ error(FATAL, -+ "xc_core_dump_elfnote: cannot seek to sh_offset\n"); -+ -+ if (read(xd->xfd, notes_buffer, sh_size) != sh_size) -+ error(FATAL, -+ "xc_core_dump_elfnote: cannot read elfnote data\n"); -+ -+ for (index = 0; index < sh_size; ) { -+ elfnote = (struct elfnote *)¬es_buffer[index]; -+ xendump_print(" namesz: %d\n", elfnote->namesz); -+ xendump_print(" descz: %d\n", elfnote->descsz); -+ xendump_print(" type: %x ", elfnote->type); -+ switch (elfnote->type) -+ { -+ case XEN_ELFNOTE_DUMPCORE_NONE: -+ xendump_print("(XEN_ELFNOTE_DUMPCORE_NONE)\n"); -+ break; -+ case XEN_ELFNOTE_DUMPCORE_HEADER: -+ xendump_print("(XEN_ELFNOTE_DUMPCORE_HEADER)\n"); -+ elfnote_header = (struct xen_dumpcore_elfnote_header_desc *) -+ (elfnote+1); -+ break; -+ case XEN_ELFNOTE_DUMPCORE_XEN_VERSION: -+ xendump_print("(XEN_ELFNOTE_DUMPCORE_XEN_VERSION)\n"); -+ break; -+ case XEN_ELFNOTE_DUMPCORE_FORMAT_VERSION: -+ xendump_print("(XEN_ELFNOTE_DUMPCORE_FORMAT_VERSION)\n"); -+ format_version = (struct xen_dumpcore_elfnote_format_version_desc *) -+ (elfnote+1); -+ break; -+ default: -+ xendump_print("(unknown)\n"); -+ break; -+ } -+ xendump_print(" name: %s\n", elfnote->name); -+ -+ data = (ulonglong *)(elfnote+1); -+ for (i = lf = 0; i < elfnote->descsz/sizeof(ulonglong); i++) { -+ if (((i%2)==0)) { -+ xendump_print("%s ", -+ i ? "\n" : ""); -+ lf++; -+ } else -+ lf = 0; -+ xendump_print("%016llx ", *data++); -+ } -+ if (!elfnote->descsz) -+ xendump_print(" (empty)"); -+ xendump_print("\n"); -+ -+ index += sizeof(struct elfnote) + elfnote->descsz; -+ } -+ -+ if (!store) -+ return; -+ -+ if (elfnote_header) { -+ xd->xc_core.header.xch_magic = elfnote_header->xch_magic; -+ xd->xc_core.header.xch_nr_vcpus = elfnote_header->xch_nr_vcpus; -+ xd->xc_core.header.xch_nr_pages = elfnote_header->xch_nr_pages; -+ xd->page_size = elfnote_header->xch_page_size; -+ } -+ -+ if (format_version) { -+ switch (format_version->version) -+ { -+ case FORMAT_VERSION_0000000000000001: -+ break; -+ default: -+ error(WARNING, -+ "unsupported xen dump-core format version: %016llx\n", -+ format_version->version); -+ } -+ xd->xc_core.format_version = format_version->version; -+ } -+ -+} -+ -+/* -+ * Initialize the batching list for the .xen_p2m or .xen_pfn -+ * arrays. -+ */ -+static void -+xc_core_elf_pfn_init(void) -+{ -+ int i, c, chunk; -+ off_t offset; -+ struct xen_dumpcore_p2m p2m; -+ uint64_t pfn; -+ -+ switch (xd->flags & (XC_CORE_ELF|XC_CORE_NO_P2M)) -+ { -+ case (XC_CORE_ELF|XC_CORE_NO_P2M): -+ chunk = xd->xc_core.header.xch_nr_pages/INDEX_PFN_COUNT; -+ -+ for (i = c = 0; i < INDEX_PFN_COUNT; i++, c += chunk) { -+ offset = (off_t)xd->xc_core.header.xch_index_offset + -+ (off_t)(c * sizeof(uint64_t)); -+ -+ if (lseek(xd->xfd, offset, SEEK_SET) == -1) -+ error(FATAL, -+ "cannot lseek to page index %d\n", c); -+ if (read(xd->xfd, &pfn, sizeof(uint64_t)) != -+ sizeof(uint64_t)) -+ error(FATAL, -+ "cannot read page index %d\n", c); -+ -+ xd->xc_core.elf_index_pfn[i].index = c; -+ xd->xc_core.elf_index_pfn[i].pfn = (ulong)pfn; -+ } -+ break; -+ -+ case XC_CORE_ELF: -+ chunk = xd->xc_core.header.xch_nr_pages/INDEX_PFN_COUNT; -+ -+ for (i = c = 0; i < INDEX_PFN_COUNT; i++, c += chunk) { -+ offset = (off_t)xd->xc_core.header.xch_index_offset + -+ (off_t)(c * sizeof(struct xen_dumpcore_p2m)); -+ -+ if (lseek(xd->xfd, offset, SEEK_SET) == -1) -+ error(FATAL, -+ "cannot lseek to page index %d\n", c); -+ if (read(xd->xfd, &p2m, sizeof(struct xen_dumpcore_p2m)) != -+ sizeof(struct xen_dumpcore_p2m)) -+ error(FATAL, -+ "cannot read page index %d\n", c); -+ -+ xd->xc_core.elf_index_pfn[i].index = c; -+ xd->xc_core.elf_index_pfn[i].pfn = (ulong)p2m.pfn; -+ } -+ break; -+ } -+} -+ ---- crash/unwind.c.orig 2007-08-27 15:02:36.000000000 -0400 -+++ crash/unwind.c 2007-03-15 08:16:14.000000000 -0500 -@@ -6,8 +6,8 @@ - /* - * unwind.c - * -- * Copyright (C) 2002, 2003, 2004, 2005 David Anderson -- * Copyright (C) 2002, 2003, 2004, 2005 Red Hat, Inc. All rights reserved. -+ * Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007 David Anderson -+ * Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007 Red Hat, Inc. All rights reserved. - * - * Adapted from: - * -@@ -36,6 +36,7 @@ - /* #include can't include this -- it's changing over time! */ - - #include "defs.h" -+#include "xen_hyper_defs.h" - - typedef unsigned char u8; - typedef unsigned long long u64; -@@ -64,6 +65,8 @@ - struct bt_info *); - static int unw_switch_from_osinit_v2(struct unw_frame_info *, - struct bt_info *); -+static int unw_switch_from_osinit_v3(struct unw_frame_info *, -+ struct bt_info *, char *); - static unsigned long get_init_stack_ulong(unsigned long addr); - static void unw_init_frame_info(struct unw_frame_info *, - struct bt_info *, ulong); -@@ -1397,9 +1400,22 @@ - req = &request; - - if (get_symbol_type("unw", "tables", req) == TYPE_CODE_UNDEF) { -- error(WARNING, "cannot determine unw.tables offset\n"); -- machdep->flags |= UNW_OUT_OF_SYNC; -- } else { -+ /* -+ * KLUDGE ALERT: -+ * If unw.tables cannot be ascertained by gdb, try unw.save_order, -+ * given that it is the field just after unw.tables. -+ */ -+ if (get_symbol_type("unw", "save_order", req) == TYPE_CODE_UNDEF) { -+ error(WARNING, "cannot determine unw.tables offset\n"); -+ machdep->flags |= UNW_OUT_OF_SYNC; -+ } else -+ req->member_offset -= BITS_PER_BYTE * sizeof(void *); -+ -+ if (CRASHDEBUG(1)) -+ error(WARNING, "using unw.save_order to determine unw.tables\n"); -+ } -+ -+ if (!(machdep->flags & UNW_OUT_OF_SYNC)) { - machdep->machspec->unw_tables_offset = - req->member_offset/BITS_PER_BYTE; - -@@ -1658,8 +1674,13 @@ - unw_get_sp(info, &sp); - unw_get_bsp(info, &bsp); - -- if (ip < GATE_ADDR + PAGE_SIZE) -- break; -+ if (XEN_HYPER_MODE()) { -+ if (!IS_KVADDR(ip)) -+ break; -+ } else { -+ if (ip < GATE_ADDR + PAGE_SIZE) -+ break; -+ } - - if ((sm = value_search(ip, NULL))) - name = sm->name; -@@ -1720,11 +1741,29 @@ - * ia64_init_handler. - */ - if (STREQ(name, "ia64_init_handler")) { -- unw_switch_from_osinit_v2(info, bt); -- frame++; -- goto restart; -+ if (symbol_exists("ia64_mca_modify_original_stack")) { -+ /* -+ * 2.6.14 or later kernels no longer keep -+ * minstate info in pt_regs/switch_stack. -+ * unw_switch_from_osinit_v3() will try -+ * to find the interrupted task and restart -+ * backtrace itself. -+ */ -+ if (unw_switch_from_osinit_v3(info, bt, "INIT") == FALSE) -+ break; -+ } else { -+ if (unw_switch_from_osinit_v2(info, bt) == FALSE) -+ break; -+ frame++; -+ goto restart; -+ } - } - -+ if (STREQ(name, "ia64_mca_handler") && -+ symbol_exists("ia64_mca_modify_original_stack")) -+ if (unw_switch_from_osinit_v3(info, bt, "MCA") == FALSE) -+ break; -+ - frame++; - - } while (unw_unwind(info) >= 0); -@@ -1844,8 +1883,13 @@ - ulong sw; - - sw = SWITCH_STACK_ADDR(bt->task); -- if (!INSTACK(sw, bt) && !ia64_in_init_stack(sw)) -- return FALSE; -+ if (XEN_HYPER_MODE()) { -+ if (!INSTACK(sw, bt) && !ia64_in_mca_stack_hyper(sw, bt)) -+ return FALSE; -+ } else { -+ if (!INSTACK(sw, bt) && !ia64_in_init_stack(sw)) -+ return FALSE; -+ } - - unw_init_frame_info(info, bt, sw); - return TRUE; -@@ -1967,6 +2011,124 @@ - return TRUE; - } - -+/* CPL (current privilege level) is 2-bit field */ -+#define IA64_PSR_CPL0_BIT 32 -+#define IA64_PSR_CPL_MASK (3UL << IA64_PSR_CPL0_BIT) -+ -+static int -+user_mode(struct bt_info *bt, unsigned long pt) -+{ -+ unsigned long cr_ipsr; -+ -+ cr_ipsr = IA64_GET_STACK_ULONG(pt + offsetof(struct pt_regs, cr_ipsr)); -+ if (cr_ipsr & IA64_PSR_CPL_MASK) -+ return 1; -+ return 0; -+} -+ -+/* -+ * Cope with INIT/MCA stack for the kernel 2.6.14 or later -+ * -+ * Returns FALSE if no more unwinding is needed. -+ */ -+#define ALIGN16(x) ((x)&~15) -+static int -+unw_switch_from_osinit_v3(struct unw_frame_info *info, struct bt_info *bt, -+ char *type) -+{ -+ unsigned long pt, sw, pid; -+ int processor; -+ char *p, *q; -+ struct task_context *tc = NULL; -+ struct bt_info clone_bt; -+ -+ /* -+ * The structure of INIT/MCA stack -+ * -+ * +---------------------------+ <-------- IA64_STK_OFFSET -+ * | pt_regs | -+ * +---------------------------+ -+ * | switch_stack | -+ * +---------------------------+ -+ * | SAL/OS state | -+ * +---------------------------+ -+ * | 16 byte scratch area | -+ * +---------------------------+ <-------- SP at start of C handler -+ * | ..... | -+ * +---------------------------+ -+ * | RBS for MCA/INIT handler | -+ * +---------------------------+ -+ * | struct task for MCA/INIT | -+ * +---------------------------+ <-------- bt->task -+ */ -+ pt = ALIGN16(bt->task + IA64_STK_OFFSET - STRUCT_SIZE("pt_regs")); -+ sw = ALIGN16(pt - STRUCT_SIZE("switch_stack")); -+ -+ /* -+ * 1. Try to find interrupted task from comm -+ * -+ * comm format of INIT/MCA task: -+ * - " " -+ * - " " -+ * where "" is either "INIT" or "MCA". -+ * The latter form is chosen if PID is 0. -+ * -+ * See ia64_mca_modify_comm() in arch/ia64/kernel/mca.c -+ */ -+ if (!bt->tc || !bt->tc->comm) -+ goto find_exframe; -+ -+ if ((p = strstr(bt->tc->comm, type))) { -+ p += strlen(type); -+ if (*p != ' ') -+ goto find_exframe; -+ if ((q = strchr(++p, ' '))) { -+ /* " " */ -+ if (sscanf(++q, "%d", &processor) > 0) { -+ tc = pid_to_context(0); -+ while (tc) { -+ if (tc != bt->tc && -+ tc->processor == processor) -+ break; -+ tc = tc->tc_next; -+ } -+ } -+ } else if (sscanf(p, "%lu", &pid) > 0) -+ /* " " */ -+ tc = pid_to_context(pid); -+ } -+ -+ if (tc) { -+ /* Clone bt_info and do backtrace */ -+ clone_bt_info(bt, &clone_bt, tc); -+ if (!BT_REFERENCE_CHECK(&clone_bt)) { -+ fprintf(fp, "(%s) INTERRUPTED TASK\n", type); -+ print_task_header(fp, tc, 0); -+ } -+ if (!user_mode(bt, pt)) -+ back_trace(&clone_bt); -+ else if (!BT_REFERENCE_CHECK(bt)) { -+ fprintf(fp, " #0 [interrupted in user space]\n"); -+ /* at least show the incomplete exception frame */ -+ bt->flags |= BT_INCOMPLETE_USER_EFRAME; -+ ia64_exception_frame(pt, bt); -+ } -+ return FALSE; -+ } -+ -+ /* task matching with INIT/MCA task's comm is not found */ -+ -+find_exframe: -+ /* -+ * 2. If step 1 doesn't work, try best to find exception frame -+ */ -+ unw_init_from_interruption(info, bt, pt, sw); -+ if (!BT_REFERENCE_CHECK(bt)) -+ ia64_exception_frame(pt, bt); -+ -+ return TRUE; -+} -+ - static void - unw_init_frame_info (struct unw_frame_info *info, struct bt_info *bt, ulong sw) - { ---- crash/unwind_x86_32_64.c.orig 2007-08-27 15:02:36.000000000 -0400 -+++ crash/unwind_x86_32_64.c 2006-11-15 14:44:58.000000000 -0500 -@@ -0,0 +1,1220 @@ -+#if defined(X86_64) -+/* -+ * Support for genarating DWARF CFI based backtraces. -+ * Borrowed heavily from the kernel's implementation of unwinding using the -+ * DWARF CFI written by Jan Beulich -+ */ -+ -+#ifdef X86_64 -+#include "unwind_x86_64.h" -+#endif -+#ifdef X86 -+#include "unwind_x86.h" -+#endif -+ -+#include "defs.h" -+ -+#define MAX_STACK_DEPTH 8 -+ -+static struct local_unwind_table { -+ struct { -+ unsigned long pc; -+ unsigned long range; -+ } core, init; -+ void *address; -+ unsigned long size; -+} *local_unwind_tables, default_unwind_table; -+ -+static int gather_in_memory_unwind_tables(void); -+static int populate_local_tables(ulong, char *); -+static int unwind_tables_cnt = 0; -+static struct local_unwind_table *find_table(unsigned long); -+static void dump_local_unwind_tables(void); -+ -+static const struct { -+ unsigned offs:BITS_PER_LONG / 2; -+ unsigned width:BITS_PER_LONG / 2; -+} reg_info[] = { -+ UNW_REGISTER_INFO -+}; -+ -+#undef PTREGS_INFO -+#undef EXTRA_INFO -+ -+#ifndef REG_INVALID -+#define REG_INVALID(r) (reg_info[r].width == 0) -+#endif -+ -+#define DW_CFA_nop 0x00 -+#define DW_CFA_set_loc 0x01 -+#define DW_CFA_advance_loc1 0x02 -+#define DW_CFA_advance_loc2 0x03 -+#define DW_CFA_advance_loc4 0x04 -+#define DW_CFA_offset_extended 0x05 -+#define DW_CFA_restore_extended 0x06 -+#define DW_CFA_undefined 0x07 -+#define DW_CFA_same_value 0x08 -+#define DW_CFA_register 0x09 -+#define DW_CFA_remember_state 0x0a -+#define DW_CFA_restore_state 0x0b -+#define DW_CFA_def_cfa 0x0c -+#define DW_CFA_def_cfa_register 0x0d -+#define DW_CFA_def_cfa_offset 0x0e -+#define DW_CFA_def_cfa_expression 0x0f -+#define DW_CFA_expression 0x10 -+#define DW_CFA_offset_extended_sf 0x11 -+#define DW_CFA_def_cfa_sf 0x12 -+#define DW_CFA_def_cfa_offset_sf 0x13 -+#define DW_CFA_val_offset 0x14 -+#define DW_CFA_val_offset_sf 0x15 -+#define DW_CFA_val_expression 0x16 -+#define DW_CFA_lo_user 0x1c -+#define DW_CFA_GNU_window_save 0x2d -+#define DW_CFA_GNU_args_size 0x2e -+#define DW_CFA_GNU_negative_offset_extended 0x2f -+#define DW_CFA_hi_user 0x3f -+ -+#define DW_EH_PE_FORM 0x07 -+#define DW_EH_PE_native 0x00 -+#define DW_EH_PE_leb128 0x01 -+#define DW_EH_PE_data2 0x02 -+#define DW_EH_PE_data4 0x03 -+#define DW_EH_PE_data8 0x04 -+#define DW_EH_PE_signed 0x08 -+#define DW_EH_PE_ADJUST 0x70 -+#define DW_EH_PE_abs 0x00 -+#define DW_EH_PE_pcrel 0x10 -+#define DW_EH_PE_textrel 0x20 -+#define DW_EH_PE_datarel 0x30 -+#define DW_EH_PE_funcrel 0x40 -+#define DW_EH_PE_aligned 0x50 -+#define DW_EH_PE_indirect 0x80 -+#define DW_EH_PE_omit 0xff -+ -+#define min(x,y) ({ \ -+ typeof(x) _x = (x); \ -+ typeof(y) _y = (y); \ -+ (void) (&_x == &_y); \ -+ _x < _y ? _x : _y; }) -+ -+#define max(x,y) ({ \ -+ typeof(x) _x = (x); \ -+ typeof(y) _y = (y); \ -+ (void) (&_x == &_y); \ -+ _x > _y ? _x : _y; }) -+#define STACK_LIMIT(ptr) (((ptr) - 1) & ~(THREAD_SIZE - 1)) -+ -+typedef unsigned long uleb128_t; -+typedef signed long sleb128_t; -+ -+struct unwind_item { -+ enum item_location { -+ Nowhere, -+ Memory, -+ Register, -+ Value -+ } where; -+ uleb128_t value; -+}; -+ -+struct unwind_state { -+ uleb128_t loc, org; -+ const u8 *cieStart, *cieEnd; -+ uleb128_t codeAlign; -+ sleb128_t dataAlign; -+ struct cfa { -+ uleb128_t reg, offs; -+ } cfa; -+ struct unwind_item regs[ARRAY_SIZE(reg_info)]; -+ unsigned stackDepth:8; -+ unsigned version:8; -+ const u8 *label; -+ const u8 *stack[MAX_STACK_DEPTH]; -+}; -+ -+static const struct cfa badCFA = { ARRAY_SIZE(reg_info), 1 }; -+ -+static uleb128_t get_uleb128(const u8 **pcur, const u8 *end) -+{ -+ const u8 *cur = *pcur; -+ uleb128_t value; -+ unsigned shift; -+ -+ for (shift = 0, value = 0; cur < end; shift += 7) { -+ if (shift + 7 > 8 * sizeof(value) -+ && (*cur & 0x7fU) >= (1U << (8 * sizeof(value) - shift))) { -+ cur = end + 1; -+ break; -+ } -+ value |= (uleb128_t)(*cur & 0x7f) << shift; -+ if (!(*cur++ & 0x80)) -+ break; -+ } -+ *pcur = cur; -+ -+ return value; -+} -+ -+static sleb128_t get_sleb128(const u8 **pcur, const u8 *end) -+{ -+ const u8 *cur = *pcur; -+ sleb128_t value; -+ unsigned shift; -+ -+ for (shift = 0, value = 0; cur < end; shift += 7) { -+ if (shift + 7 > 8 * sizeof(value) -+ && (*cur & 0x7fU) >= (1U << (8 * sizeof(value) - shift))) { -+ cur = end + 1; -+ break; -+ } -+ value |= (sleb128_t)(*cur & 0x7f) << shift; -+ if (!(*cur & 0x80)) { -+ value |= -(*cur++ & 0x40) << shift; -+ break; -+ } -+ } -+ *pcur = cur; -+ -+ return value; -+} -+ -+static unsigned long read_pointer(const u8 **pLoc, -+ const void *end, -+ signed ptrType) -+{ -+ unsigned long value = 0; -+ union { -+ const u8 *p8; -+ const u16 *p16u; -+ const s16 *p16s; -+ const u32 *p32u; -+ const s32 *p32s; -+ const unsigned long *pul; -+ } ptr; -+ -+ if (ptrType < 0 || ptrType == DW_EH_PE_omit) -+ return 0; -+ ptr.p8 = *pLoc; -+ switch(ptrType & DW_EH_PE_FORM) { -+ case DW_EH_PE_data2: -+ if (end < (const void *)(ptr.p16u + 1)) -+ return 0; -+ if(ptrType & DW_EH_PE_signed) -+ value = get_unaligned(ptr.p16s++); -+ else -+ value = get_unaligned(ptr.p16u++); -+ break; -+ case DW_EH_PE_data4: -+#ifdef CONFIG_64BIT -+ if (end < (const void *)(ptr.p32u + 1)) -+ return 0; -+ if(ptrType & DW_EH_PE_signed) -+ value = get_unaligned(ptr.p32s++); -+ else -+ value = get_unaligned(ptr.p32u++); -+ break; -+ case DW_EH_PE_data8: -+ BUILD_BUG_ON(sizeof(u64) != sizeof(value)); -+#else -+ BUILD_BUG_ON(sizeof(u32) != sizeof(value)); -+#endif -+ case DW_EH_PE_native: -+ if (end < (const void *)(ptr.pul + 1)) -+ return 0; -+ value = get_unaligned(ptr.pul++); -+ break; -+ case DW_EH_PE_leb128: -+ BUILD_BUG_ON(sizeof(uleb128_t) > sizeof(value)); -+ value = ptrType & DW_EH_PE_signed -+ ? get_sleb128(&ptr.p8, end) -+ : get_uleb128(&ptr.p8, end); -+ if ((const void *)ptr.p8 > end) -+ return 0; -+ break; -+ default: -+ return 0; -+ } -+ switch(ptrType & DW_EH_PE_ADJUST) { -+ case DW_EH_PE_abs: -+ break; -+ case DW_EH_PE_pcrel: -+ value += (unsigned long)*pLoc; -+ break; -+ default: -+ return 0; -+ } -+ -+/* TBD -+ if ((ptrType & DW_EH_PE_indirect) -+ && __get_user(value, (unsigned long *)value)) -+ return 0; -+*/ -+ *pLoc = ptr.p8; -+ -+ return value; -+} -+ -+static signed fde_pointer_type(const u32 *cie) -+{ -+ const u8 *ptr = (const u8 *)(cie + 2); -+ unsigned version = *ptr; -+ -+ if (version != 1) -+ return -1; /* unsupported */ -+ if (*++ptr) { -+ const char *aug; -+ const u8 *end = (const u8 *)(cie + 1) + *cie; -+ uleb128_t len; -+ -+ /* check if augmentation size is first (and thus present) */ -+ if (*ptr != 'z') -+ return -1; -+ /* check if augmentation string is nul-terminated */ -+ if ((ptr = memchr(aug = (const void *)ptr, 0, end - ptr)) == NULL) -+ return -1; -+ ++ptr; /* skip terminator */ -+ get_uleb128(&ptr, end); /* skip code alignment */ -+ get_sleb128(&ptr, end); /* skip data alignment */ -+ /* skip return address column */ -+ version <= 1 ? (void)++ptr : (void)get_uleb128(&ptr, end); -+ len = get_uleb128(&ptr, end); /* augmentation length */ -+ if (ptr + len < ptr || ptr + len > end) -+ return -1; -+ end = ptr + len; -+ while (*++aug) { -+ if (ptr >= end) -+ return -1; -+ switch(*aug) { -+ case 'L': -+ ++ptr; -+ break; -+ case 'P': { -+ signed ptrType = *ptr++; -+ -+ if (!read_pointer(&ptr, end, ptrType) || ptr > end) -+ return -1; -+ } -+ break; -+ case 'R': -+ return *ptr; -+ default: -+ return -1; -+ } -+ } -+ } -+ return DW_EH_PE_native|DW_EH_PE_abs; -+} -+ -+static int advance_loc(unsigned long delta, struct unwind_state *state) -+{ -+ state->loc += delta * state->codeAlign; -+ -+ return delta > 0; -+} -+ -+static void set_rule(uleb128_t reg, -+ enum item_location where, -+ uleb128_t value, -+ struct unwind_state *state) -+{ -+ if (reg < ARRAY_SIZE(state->regs)) { -+ state->regs[reg].where = where; -+ state->regs[reg].value = value; -+ } -+} -+ -+static int processCFI(const u8 *start, -+ const u8 *end, -+ unsigned long targetLoc, -+ signed ptrType, -+ struct unwind_state *state) -+{ -+ union { -+ const u8 *p8; -+ const u16 *p16; -+ const u32 *p32; -+ } ptr; -+ int result = 1; -+ -+ if (start != state->cieStart) { -+ state->loc = state->org; -+ result = processCFI(state->cieStart, state->cieEnd, 0, ptrType, state); -+ if (targetLoc == 0 && state->label == NULL) -+ return result; -+ } -+ for (ptr.p8 = start; result && ptr.p8 < end; ) { -+ switch(*ptr.p8 >> 6) { -+ uleb128_t value; -+ -+ case 0: -+ switch(*ptr.p8++) { -+ case DW_CFA_nop: -+ break; -+ case DW_CFA_set_loc: -+ if ((state->loc = read_pointer(&ptr.p8, end, -+ ptrType)) == 0) -+ result = 0; -+ break; -+ case DW_CFA_advance_loc1: -+ result = ptr.p8 < end && advance_loc(*ptr.p8++, state); -+ break; -+ case DW_CFA_advance_loc2: -+ result = ptr.p8 <= end + 2 -+ && advance_loc(*ptr.p16++, state); -+ break; -+ case DW_CFA_advance_loc4: -+ result = ptr.p8 <= end + 4 -+ && advance_loc(*ptr.p32++, state); -+ break; -+ case DW_CFA_offset_extended: -+ value = get_uleb128(&ptr.p8, end); -+ set_rule(value, Memory, -+ get_uleb128(&ptr.p8, end), state); -+ break; -+ case DW_CFA_val_offset: -+ value = get_uleb128(&ptr.p8, end); -+ set_rule(value, Value, -+ get_uleb128(&ptr.p8, end), state); -+ break; -+ case DW_CFA_offset_extended_sf: -+ value = get_uleb128(&ptr.p8, end); -+ set_rule(value, Memory, -+ get_sleb128(&ptr.p8, end), state); -+ break; -+ case DW_CFA_val_offset_sf: -+ value = get_uleb128(&ptr.p8, end); -+ set_rule(value, Value, -+ get_sleb128(&ptr.p8, end), state); -+ break; -+ case DW_CFA_restore_extended: -+ case DW_CFA_undefined: -+ case DW_CFA_same_value: -+ set_rule(get_uleb128(&ptr.p8, end), Nowhere, 0, state); -+ break; -+ case DW_CFA_register: -+ value = get_uleb128(&ptr.p8, end); -+ set_rule(value, Register, -+ get_uleb128(&ptr.p8, end), state); -+ break; -+ case DW_CFA_remember_state: -+ if (ptr.p8 == state->label) { -+ state->label = NULL; -+ return 1; -+ } -+ if (state->stackDepth >= MAX_STACK_DEPTH) -+ return 0; -+ state->stack[state->stackDepth++] = ptr.p8; -+ break; -+ case DW_CFA_restore_state: -+ if (state->stackDepth) { -+ const uleb128_t loc = state->loc; -+ const u8 *label = state->label; -+ -+ state->label = state->stack[state->stackDepth - 1]; -+ memcpy(&state->cfa, &badCFA, sizeof(state->cfa)); -+ memset(state->regs, 0, sizeof(state->regs)); -+ state->stackDepth = 0; -+ result = processCFI(start, end, 0, ptrType, state); -+ state->loc = loc; -+ state->label = label; -+ } else -+ return 0; -+ break; -+ case DW_CFA_def_cfa: -+ state->cfa.reg = get_uleb128(&ptr.p8, end); -+ /*nobreak*/ -+ case DW_CFA_def_cfa_offset: -+ state->cfa.offs = get_uleb128(&ptr.p8, end); -+ break; -+ case DW_CFA_def_cfa_sf: -+ state->cfa.reg = get_uleb128(&ptr.p8, end); -+ /*nobreak*/ -+ case DW_CFA_def_cfa_offset_sf: -+ state->cfa.offs = get_sleb128(&ptr.p8, end) -+ * state->dataAlign; -+ break; -+ case DW_CFA_def_cfa_register: -+ state->cfa.reg = get_uleb128(&ptr.p8, end); -+ break; -+ /*todo case DW_CFA_def_cfa_expression: */ -+ /*todo case DW_CFA_expression: */ -+ /*todo case DW_CFA_val_expression: */ -+ case DW_CFA_GNU_args_size: -+ get_uleb128(&ptr.p8, end); -+ break; -+ case DW_CFA_GNU_negative_offset_extended: -+ value = get_uleb128(&ptr.p8, end); -+ set_rule(value, Memory, (uleb128_t)0 - -+ get_uleb128(&ptr.p8, end), state); -+ break; -+ case DW_CFA_GNU_window_save: -+ default: -+ result = 0; -+ break; -+ } -+ break; -+ case 1: -+ result = advance_loc(*ptr.p8++ & 0x3f, state); -+ break; -+ case 2: -+ value = *ptr.p8++ & 0x3f; -+ set_rule(value, Memory, get_uleb128(&ptr.p8, end), -+ state); -+ break; -+ case 3: -+ set_rule(*ptr.p8++ & 0x3f, Nowhere, 0, state); -+ break; -+ } -+ if (ptr.p8 > end) -+ result = 0; -+ if (result && targetLoc != 0 && targetLoc < state->loc) -+ return 1; -+ } -+ -+ return result -+ && ptr.p8 == end -+ && (targetLoc == 0 -+ || (/*todo While in theory this should apply, gcc in practice omits -+ everything past the function prolog, and hence the location -+ never reaches the end of the function. -+ targetLoc < state->loc &&*/ state->label == NULL)); -+} -+ -+ -+/* Unwind to previous to frame. Returns 0 if successful, negative -+ * number in case of an error. */ -+int -+unwind(struct unwind_frame_info *frame) -+{ -+#define FRAME_REG(r, t) (((t *)frame)[reg_info[r].offs]) -+ const u32 *fde = NULL, *cie = NULL; -+ const u8 *ptr = NULL, *end = NULL; -+ unsigned long startLoc = 0, endLoc = 0, cfa; -+ unsigned i; -+ signed ptrType = -1; -+ uleb128_t retAddrReg = 0; -+// struct unwind_table *table; -+ void *unwind_table; -+ struct local_unwind_table *table; -+ struct unwind_state state; -+ u64 reg_ptr = 0; -+ -+ -+ if (UNW_PC(frame) == 0) -+ return -EINVAL; -+ -+ if ((table = find_table(UNW_PC(frame)))) { -+// unsigned long tableSize = unwind_table_size; -+ unsigned long tableSize = table->size; -+ -+ unwind_table = table->address; -+ -+ for (fde = unwind_table; -+ tableSize > sizeof(*fde) && tableSize - sizeof(*fde) >= *fde; -+ tableSize -= sizeof(*fde) + *fde, -+ fde += 1 + *fde / sizeof(*fde)) { -+ if (!*fde || (*fde & (sizeof(*fde) - 1))) -+ break; -+ if (!fde[1]) -+ continue; /* this is a CIE */ -+ if ((fde[1] & (sizeof(*fde) - 1)) -+ || fde[1] > (unsigned long)(fde + 1) -+ - (unsigned long)unwind_table) -+ continue; /* this is not a valid FDE */ -+ cie = fde + 1 - fde[1] / sizeof(*fde); -+ if (*cie <= sizeof(*cie) + 4 -+ || *cie >= fde[1] - sizeof(*fde) -+ || (*cie & (sizeof(*cie) - 1)) -+ || cie[1] -+ || (ptrType = fde_pointer_type(cie)) < 0) { -+ cie = NULL; /* this is not a (valid) CIE */ -+ continue; -+ } -+ ptr = (const u8 *)(fde + 2); -+ startLoc = read_pointer(&ptr, -+ (const u8 *)(fde + 1) + *fde, -+ ptrType); -+ endLoc = startLoc -+ + read_pointer(&ptr, -+ (const u8 *)(fde + 1) + *fde, -+ ptrType & DW_EH_PE_indirect -+ ? ptrType -+ : ptrType & (DW_EH_PE_FORM|DW_EH_PE_signed)); -+ if (UNW_PC(frame) >= startLoc && UNW_PC(frame) < endLoc) -+ break; -+ cie = NULL; -+ } -+ } -+ if (cie != NULL) { -+ memset(&state, 0, sizeof(state)); -+ state.cieEnd = ptr; /* keep here temporarily */ -+ ptr = (const u8 *)(cie + 2); -+ end = (const u8 *)(cie + 1) + *cie; -+ if ((state.version = *ptr) != 1) -+ cie = NULL; /* unsupported version */ -+ else if (*++ptr) { -+ /* check if augmentation size is first (and thus present) */ -+ if (*ptr == 'z') { -+ /* check for ignorable (or already handled) -+ * nul-terminated augmentation string */ -+ while (++ptr < end && *ptr) -+ if (strchr("LPR", *ptr) == NULL) -+ break; -+ } -+ if (ptr >= end || *ptr) -+ cie = NULL; -+ } -+ ++ptr; -+ } -+ if (cie != NULL) { -+ /* get code aligment factor */ -+ state.codeAlign = get_uleb128(&ptr, end); -+ /* get data aligment factor */ -+ state.dataAlign = get_sleb128(&ptr, end); -+ if (state.codeAlign == 0 || state.dataAlign == 0 || ptr >= end) -+ cie = NULL; -+ else { -+ retAddrReg = state.version <= 1 ? *ptr++ : get_uleb128(&ptr, end); -+ /* skip augmentation */ -+ if (((const char *)(cie + 2))[1] == 'z') -+ ptr += get_uleb128(&ptr, end); -+ if (ptr > end -+ || retAddrReg >= ARRAY_SIZE(reg_info) -+ || REG_INVALID(retAddrReg) -+ || reg_info[retAddrReg].width != sizeof(unsigned long)) -+ cie = NULL; -+ } -+ } -+ if (cie != NULL) { -+ state.cieStart = ptr; -+ ptr = state.cieEnd; -+ state.cieEnd = end; -+ end = (const u8 *)(fde + 1) + *fde; -+ /* skip augmentation */ -+ if (((const char *)(cie + 2))[1] == 'z') { -+ uleb128_t augSize = get_uleb128(&ptr, end); -+ -+ if ((ptr += augSize) > end) -+ fde = NULL; -+ } -+ } -+ if (cie == NULL || fde == NULL) -+ return -ENXIO; -+ -+ state.org = startLoc; -+ memcpy(&state.cfa, &badCFA, sizeof(state.cfa)); -+ /* process instructions */ -+ if (!processCFI(ptr, end, UNW_PC(frame), ptrType, &state) -+ || state.loc > endLoc -+ || state.regs[retAddrReg].where == Nowhere -+ || state.cfa.reg >= ARRAY_SIZE(reg_info) -+ || reg_info[state.cfa.reg].width != sizeof(unsigned long) -+ || state.cfa.offs % sizeof(unsigned long)) { -+ return -EIO; -+ } -+ /* update frame */ -+ cfa = FRAME_REG(state.cfa.reg, unsigned long) + state.cfa.offs; -+ startLoc = min((unsigned long)UNW_SP(frame), cfa); -+ endLoc = max((unsigned long)UNW_SP(frame), cfa); -+ if (STACK_LIMIT(startLoc) != STACK_LIMIT(endLoc)) { -+ startLoc = min(STACK_LIMIT(cfa), cfa); -+ endLoc = max(STACK_LIMIT(cfa), cfa); -+ } -+#ifndef CONFIG_64BIT -+# define CASES CASE(8); CASE(16); CASE(32) -+#else -+# define CASES CASE(8); CASE(16); CASE(32); CASE(64) -+#endif -+ for (i = 0; i < ARRAY_SIZE(state.regs); ++i) { -+ if (REG_INVALID(i)) { -+ if (state.regs[i].where == Nowhere) -+ continue; -+ return -EIO; -+ } -+ switch(state.regs[i].where) { -+ default: -+ break; -+ case Register: -+ if (state.regs[i].value >= ARRAY_SIZE(reg_info) -+ || REG_INVALID(state.regs[i].value) -+ || reg_info[i].width > reg_info[state.regs[i].value].width){ -+ return -EIO; -+ } -+ switch(reg_info[state.regs[i].value].width) { -+#define CASE(n) \ -+ case sizeof(u##n): \ -+ state.regs[i].value = FRAME_REG(state.regs[i].value, \ -+ const u##n); \ -+ break -+ CASES; -+#undef CASE -+ default: -+ return -EIO; -+ } -+ break; -+ } -+ } -+ for (i = 0; i < ARRAY_SIZE(state.regs); ++i) { -+ if (REG_INVALID(i)) -+ continue; -+ switch(state.regs[i].where) { -+ case Nowhere: -+ if (reg_info[i].width != sizeof(UNW_SP(frame)) -+ || &FRAME_REG(i, __typeof__(UNW_SP(frame))) -+ != &UNW_SP(frame)) -+ continue; -+ UNW_SP(frame) = cfa; -+ break; -+ case Register: -+ switch(reg_info[i].width) { -+#define CASE(n) case sizeof(u##n): \ -+ FRAME_REG(i, u##n) = state.regs[i].value; \ -+ break -+ CASES; -+#undef CASE -+ default: -+ return -EIO; -+ } -+ break; -+ case Value: -+ if (reg_info[i].width != sizeof(unsigned long)){ -+ return -EIO;} -+ FRAME_REG(i, unsigned long) = cfa + state.regs[i].value -+ * state.dataAlign; -+ break; -+ case Memory: { -+ unsigned long addr = cfa + state.regs[i].value -+ * state.dataAlign; -+ if ((state.regs[i].value * state.dataAlign) -+ % sizeof(unsigned long) -+ || addr < startLoc -+ || addr + sizeof(unsigned long) < addr -+ || addr + sizeof(unsigned long) > endLoc){ -+ return -EIO;} -+ switch(reg_info[i].width) { -+#define CASE(n) case sizeof(u##n): \ -+ readmem(addr, KVADDR, ®_ptr,sizeof(u##n), "register", RETURN_ON_ERROR|QUIET); \ -+ FRAME_REG(i, u##n) = (u##n)reg_ptr;\ -+ break -+ CASES; -+#undef CASE -+ default: -+ return -EIO; -+ } -+ } -+ break; -+ } -+ } -+ return 0; -+#undef CASES -+#undef FRAME_REG -+} -+ -+/* -+ * Initialize the unwind table(s) in the best-case order: -+ * -+ * 1. Use the in-memory kernel and module unwind tables. -+ * 2. Use the in-memory kernel-only .eh_frame data. (possible?) -+ * 3. Use the kernel-only .eh_frame data from the vmlinux file. -+ */ -+void -+init_unwind_table(void) -+{ -+ ulong unwind_table_size; -+ void *unwind_table; -+ -+ kt->flags &= ~DWARF_UNWIND; -+ -+ if (gather_in_memory_unwind_tables()) { -+ if (CRASHDEBUG(1)) -+ fprintf(fp, "init_unwind_table: DWARF_UNWIND_MEMORY (%d tables)\n", -+ unwind_tables_cnt); -+ -+ kt->flags |= DWARF_UNWIND_MEMORY; -+ if (unwind_tables_cnt > 1) -+ kt->flags |= DWARF_UNWIND_MODULES; -+ if (!(kt->flags & NO_DWARF_UNWIND)) -+ kt->flags |= DWARF_UNWIND; -+ -+ return; -+ } -+ -+ if (symbol_exists("__start_unwind") && -+ symbol_exists("__end_unwind")) { -+ unwind_table_size = symbol_value("__end_unwind") - -+ symbol_value("__start_unwind"); -+ -+ if (!(unwind_table = malloc(unwind_table_size))) { -+ error(WARNING, "cannot malloc unwind table space\n"); -+ goto try_eh_frame; -+ } -+ -+ if (!readmem(symbol_value("__start_unwind"), KVADDR, unwind_table, -+ unwind_table_size, "unwind table", RETURN_ON_ERROR)) { -+ error(WARNING, "cannot read unwind table data\n"); -+ free(unwind_table); -+ goto try_eh_frame; -+ } -+ -+ kt->flags |= DWARF_UNWIND_MEMORY; -+ if (!(kt->flags & NO_DWARF_UNWIND)) -+ kt->flags |= DWARF_UNWIND; -+ -+ default_unwind_table.size = unwind_table_size; -+ default_unwind_table.address = unwind_table; -+ -+ if (CRASHDEBUG(1)) -+ fprintf(fp, "init_unwind_table: DWARF_UNWIND_MEMORY\n"); -+ -+ return; -+ } -+ -+try_eh_frame: -+ -+ if (st->dwarf_eh_frame_size) { -+ int fd; -+ -+ unwind_table_size = st->dwarf_eh_frame_size; -+ -+ if (!(unwind_table = malloc(unwind_table_size))) { -+ error(WARNING, "cannot malloc unwind table space\n"); -+ return; -+ } -+ -+ if ((fd = open(pc->namelist, O_RDONLY)) < 0) { -+ error(WARNING, "cannot open %s for .eh_frame data\n", -+ pc->namelist); -+ free(unwind_table); -+ return; -+ } -+ -+ lseek(fd, st->dwarf_eh_frame_file_offset, SEEK_SET); -+ -+ if (read(fd, unwind_table, st->dwarf_eh_frame_size) != -+ st->dwarf_eh_frame_size) { -+ error(WARNING, "cannot read .eh_frame data from %s\n", -+ pc->namelist); -+ free(unwind_table); -+ return; -+ } -+ -+ close(fd); -+ -+ default_unwind_table.size = unwind_table_size; -+ default_unwind_table.address = unwind_table; -+ -+ kt->flags |= DWARF_UNWIND_EH_FRAME; -+ if (!(kt->flags & NO_DWARF_UNWIND)) -+ kt->flags |= DWARF_UNWIND; -+ -+ if (CRASHDEBUG(1)) -+ fprintf(fp, "init_unwind_table: DWARF_UNWIND_EH_FRAME\n"); -+ -+ return; -+ } -+} -+ -+/* -+ * Find the appropriate kernel-only "root_table" unwind_table, -+ * and pass it to populate_local_tables() to do the heavy lifting. -+ */ -+static int -+gather_in_memory_unwind_tables(void) -+{ -+ int i, cnt, found; -+ struct syment *sp, *root_tables[10]; -+ char *root_table_buf; -+ char buf[BUFSIZE]; -+ ulong name; -+ -+ STRUCT_SIZE_INIT(unwind_table, "unwind_table"); -+ MEMBER_OFFSET_INIT(unwind_table_core, "unwind_table", "core"); -+ MEMBER_OFFSET_INIT(unwind_table_init, "unwind_table", "init"); -+ MEMBER_OFFSET_INIT(unwind_table_address, "unwind_table", "address"); -+ MEMBER_OFFSET_INIT(unwind_table_size, "unwind_table", "size"); -+ MEMBER_OFFSET_INIT(unwind_table_link, "unwind_table", "link"); -+ MEMBER_OFFSET_INIT(unwind_table_name, "unwind_table", "name"); -+ -+ if (INVALID_SIZE(unwind_table) || -+ INVALID_MEMBER(unwind_table_core) || -+ INVALID_MEMBER(unwind_table_init) || -+ INVALID_MEMBER(unwind_table_address) || -+ INVALID_MEMBER(unwind_table_size) || -+ INVALID_MEMBER(unwind_table_link) || -+ INVALID_MEMBER(unwind_table_name)) { -+ if (CRASHDEBUG(1)) -+ error(NOTE, -+ "unwind_table structure has changed, or does not exist in this kernel\n"); -+ return 0; -+ } -+ -+ /* -+ * Unfortunately there are two kernel root_table symbols. -+ */ -+ if (!(cnt = get_syment_array("root_table", root_tables, 10))) -+ return 0; -+ -+ root_table_buf = GETBUF(SIZE(unwind_table)); -+ for (i = found = 0; i < cnt; i++) { -+ sp = root_tables[i]; -+ if (!readmem(sp->value, KVADDR, root_table_buf, -+ SIZE(unwind_table), "root unwind_table", -+ RETURN_ON_ERROR|QUIET)) -+ goto gather_failed; -+ -+ name = ULONG(root_table_buf + OFFSET(unwind_table_name)); -+ if (read_string(name, buf, strlen("kernel")+1) && -+ STREQ("kernel", buf)) { -+ found++; -+ if (CRASHDEBUG(1)) -+ fprintf(fp, "root_table name: %lx [%s]\n", -+ name, buf); -+ break; -+ } -+ } -+ -+ if (!found) -+ goto gather_failed; -+ -+ cnt = populate_local_tables(sp->value, root_table_buf); -+ -+ FREEBUF(root_table_buf); -+ return cnt; -+ -+gather_failed: -+ -+ FREEBUF(root_table_buf); -+ return 0; -+} -+ -+/* -+ * Transfer the relevant data from the kernel and module unwind_table -+ * structures to the local_unwind_table structures. -+ */ -+static int -+populate_local_tables(ulong root, char *buf) -+{ -+ struct list_data list_data, *ld; -+ int i, cnt; -+ ulong *table_list; -+ ulong vaddr; -+ struct local_unwind_table *tp; -+ -+ ld = &list_data; -+ BZERO(ld, sizeof(struct list_data)); -+ ld->start = root; -+ ld->member_offset = OFFSET(unwind_table_link); -+ if (CRASHDEBUG(1)) -+ ld->flags |= VERBOSE; -+ -+ hq_open(); -+ cnt = do_list(ld); -+ table_list = (ulong *)GETBUF(cnt * sizeof(ulong)); -+ cnt = retrieve_list(table_list, cnt); -+ hq_close(); -+ -+ if (!(local_unwind_tables = -+ malloc(sizeof(struct local_unwind_table) * cnt))) { -+ error(WARNING, "cannot malloc unwind_table space (%d tables)\n", -+ cnt); -+ FREEBUF(table_list); -+ return 0; -+ } -+ -+ for (i = 0; i < cnt; i++, tp++) { -+ -+ if (!readmem(table_list[i], KVADDR, buf, -+ SIZE(unwind_table), "unwind_table", -+ RETURN_ON_ERROR|QUIET)) { -+ error(WARNING, "cannot read unwind_table\n"); -+ goto failed; -+ } -+ -+ tp = &local_unwind_tables[i]; -+ -+ /* -+ * Copy the required table info for find_table(). -+ */ -+ BCOPY(buf + OFFSET(unwind_table_core), -+ (char *)&tp->core.pc, sizeof(ulong)*2); -+ BCOPY(buf + OFFSET(unwind_table_init), -+ (char *)&tp->init.pc, sizeof(ulong)*2); -+ BCOPY(buf + OFFSET(unwind_table_size), -+ (char *)&tp->size, sizeof(ulong)); -+ -+ /* -+ * Then read the DWARF CFI data. -+ */ -+ vaddr = ULONG(buf + OFFSET(unwind_table_address)); -+ -+ if (!(tp->address = malloc(tp->size))) { -+ error(WARNING, "cannot malloc unwind_table space\n"); -+ goto failed; -+ break; -+ } -+ if (!readmem(vaddr, KVADDR, tp->address, -+ tp->size, "DWARF CFI data", RETURN_ON_ERROR|QUIET)) { -+ error(WARNING, "cannot read unwind_table data\n"); -+ goto failed; -+ } -+ } -+ -+ unwind_tables_cnt = cnt; -+ -+ if (CRASHDEBUG(7)) -+ dump_local_unwind_tables(); -+ -+failed: -+ -+ FREEBUF(table_list); -+ return unwind_tables_cnt; -+} -+ -+/* -+ * Find the unwind_table containing a pc. -+ */ -+static struct local_unwind_table * -+find_table(unsigned long pc) -+{ -+ int i; -+ struct local_unwind_table *tp, *table; -+ -+ table = &default_unwind_table; -+ -+ for (i = 0; i < unwind_tables_cnt; i++, tp++) { -+ tp = &local_unwind_tables[i]; -+ if ((pc >= tp->core.pc -+ && pc < tp->core.pc + tp->core.range) -+ || (pc >= tp->init.pc -+ && pc < tp->init.pc + tp->init.range)) { -+ table = tp; -+ break; -+ } -+ } -+ -+ return table; -+} -+ -+static void -+dump_local_unwind_tables(void) -+{ -+ int i, others; -+ struct local_unwind_table *tp; -+ -+ others = 0; -+ fprintf(fp, "DWARF flags: ("); -+ if (kt->flags & DWARF_UNWIND) -+ fprintf(fp, "%sDWARF_UNWIND", others++ ? "|" : ""); -+ if (kt->flags & NO_DWARF_UNWIND) -+ fprintf(fp, "%sNO_DWARF_UNWIND", others++ ? "|" : ""); -+ if (kt->flags & DWARF_UNWIND_MEMORY) -+ fprintf(fp, "%sDWARF_UNWIND_MEMORY", others++ ? "|" : ""); -+ if (kt->flags & DWARF_UNWIND_EH_FRAME) -+ fprintf(fp, "%sDWARF_UNWIND_EH_FRAME", others++ ? "|" : ""); -+ if (kt->flags & DWARF_UNWIND_MODULES) -+ fprintf(fp, "%sDWARF_UNWIND_MODULES", others++ ? "|" : ""); -+ fprintf(fp, ")\n\n"); -+ -+ fprintf(fp, "default_unwind_table:\n"); -+ fprintf(fp, " address: %lx\n", -+ (ulong)default_unwind_table.address); -+ fprintf(fp, " size: %ld\n\n", -+ (ulong)default_unwind_table.size); -+ -+ fprintf(fp, "local_unwind_tables[%d]:\n", unwind_tables_cnt); -+ for (i = 0; i < unwind_tables_cnt; i++, tp++) { -+ tp = &local_unwind_tables[i]; -+ fprintf(fp, "[%d]\n", i); -+ fprintf(fp, " core: pc: %lx\n", tp->core.pc); -+ fprintf(fp, " range: %ld\n", tp->core.range); -+ fprintf(fp, " init: pc: %lx\n", tp->init.pc); -+ fprintf(fp, " range: %ld\n", tp->init.range); -+ fprintf(fp, " address: %lx\n", (ulong)tp->address); -+ fprintf(fp, " size: %ld\n", tp->size); -+ } -+} -+ -+ -+int -+dwarf_backtrace(struct bt_info *bt, int level, ulong stacktop) -+{ -+ unsigned long bp, offset; -+ struct syment *sp; -+ char *name; -+ struct unwind_frame_info *frame; -+ -+ frame = (struct unwind_frame_info *)GETBUF(sizeof(struct unwind_frame_info)); -+// frame->regs.rsp = bt->stkptr; -+// frame->regs.rip = bt->instptr; -+ UNW_SP(frame) = bt->stkptr; -+ UNW_PC(frame) = bt->instptr; -+ -+ /* read rbp from stack for non active tasks */ -+ if (!(bt->flags & BT_DUMPFILE_SEARCH) && !bt->bptr) { -+// readmem(frame->regs.rsp, KVADDR, &bp, -+ readmem(UNW_SP(frame), KVADDR, &bp, -+ sizeof(unsigned long), "reading bp", FAULT_ON_ERROR); -+ frame->regs.rbp = bp; /* fixme for x86 */ -+ } -+ -+ sp = value_search(UNW_PC(frame), &offset); -+ if (!sp) { -+ if (CRASHDEBUG(1)) -+ fprintf(fp, "unwind: cannot find symbol for PC: %lx\n", -+ UNW_PC(frame)); -+ goto bailout; -+ } -+ -+ /* -+ * If offset is zero, it means we have crossed over to the next -+ * function. Recalculate by adjusting the text address -+ */ -+ if (!offset) { -+ sp = value_search(UNW_PC(frame) - 1, &offset); -+ if (!sp) { -+ if (CRASHDEBUG(1)) -+ fprintf(fp, -+ "unwind: cannot find symbol for PC: %lx\n", -+ UNW_PC(frame)-1); -+ goto bailout; -+ } -+ } -+ -+ -+ -+ name = sp->name; -+ fprintf(fp, " #%d [%016lx] %s at %016lx \n", level, UNW_SP(frame), name, UNW_PC(frame)); -+ -+ if (CRASHDEBUG(2)) -+ fprintf(fp, " < SP: %lx PC: %lx FP: %lx >\n", UNW_SP(frame), -+ UNW_PC(frame), frame->regs.rbp); -+ -+ while ((UNW_SP(frame) < stacktop) -+ && !unwind(frame) && UNW_PC(frame)) { -+ /* To prevent rip pushed on IRQ stack being reported both -+ * both on the IRQ and process stacks -+ */ -+ if ((bt->flags & BT_IRQSTACK) && (UNW_SP(frame) >= stacktop - 16)) -+ break; -+ level++; -+ sp = value_search(UNW_PC(frame), &offset); -+ if (!sp) { -+ if (CRASHDEBUG(1)) -+ fprintf(fp, -+ "unwind: cannot find symbol for PC: %lx\n", -+ UNW_PC(frame)); -+ break; -+ } -+ -+ /* -+ * If offset is zero, it means we have crossed over to the next -+ * function. Recalculate by adjusting the text address -+ */ -+ if (!offset) { -+ sp = value_search(UNW_PC(frame) - 1, &offset); -+ if (!sp) { -+ if (CRASHDEBUG(1)) -+ fprintf(fp, -+ "unwind: cannot find symbol for PC: %lx\n", -+ UNW_PC(frame)-1); -+ goto bailout; -+ } -+ } -+ name = sp->name; -+ fprintf(fp, "%s#%d [%016lx] %s at %016lx \n", level < 10 ? " " : "", -+ level, UNW_SP(frame), name, UNW_PC(frame)); -+ -+ if (CRASHDEBUG(2)) -+ fprintf(fp, " < SP: %lx PC: %lx FP: %lx >\n", UNW_SP(frame), -+ UNW_PC(frame), frame->regs.rbp); -+ } -+ -+bailout: -+ FREEBUF(frame); -+ return ++level; -+} -+ -+int -+dwarf_print_stack_entry(struct bt_info *bt, int level) -+{ -+ unsigned long offset; -+ struct syment *sp; -+ char *name; -+ struct unwind_frame_info *frame; -+ -+ frame = (struct unwind_frame_info *)GETBUF(sizeof(struct unwind_frame_info)); -+ UNW_SP(frame) = bt->stkptr; -+ UNW_PC(frame) = bt->instptr; -+ -+ sp = value_search(UNW_PC(frame), &offset); -+ if (!sp) { -+ if (CRASHDEBUG(1)) -+ fprintf(fp, "unwind: cannot find symbol for PC: %lx\n", -+ UNW_PC(frame)); -+ goto bailout; -+ } -+ -+ /* -+ * If offset is zero, it means we have crossed over to the next -+ * function. Recalculate by adjusting the text address -+ */ -+ if (!offset) { -+ sp = value_search(UNW_PC(frame) - 1, &offset); -+ if (!sp) { -+ if (CRASHDEBUG(1)) -+ fprintf(fp, -+ "unwind: cannot find symbol for PC: %lx\n", -+ UNW_PC(frame)-1); -+ goto bailout; -+ } -+ } -+ name = sp->name; -+ fprintf(fp, " #%d [%016lx] %s at %016lx \n", level, UNW_SP(frame), name, UNW_PC(frame)); -+ -+bailout: -+ FREEBUF(frame); -+ return level; -+} -+ -+void -+dwarf_debug(struct bt_info *bt) -+{ -+ struct unwind_frame_info *frame; -+ ulong bp; -+ -+ if (!bt->hp->eip) { -+ dump_local_unwind_tables(); -+ return; -+ } -+ -+ if (!(kt->flags & DWARF_UNWIND_CAPABLE)) { -+ error(INFO, "not DWARF capable\n"); -+ return; -+ } -+ -+ frame = (struct unwind_frame_info *)GETBUF(sizeof(struct unwind_frame_info)); -+ -+ /* -+ * XXX: This only works for the first PC/SP pair seen in a normal -+ * backtrace, so it's not particularly helpful. Ideally it should -+ * be capable to take any PC/SP pair in a stack, but it appears to -+ * related to the rbp value. -+ */ -+ -+ UNW_PC(frame) = bt->hp->eip; -+ UNW_SP(frame) = bt->hp->esp; -+ -+ readmem(UNW_SP(frame), KVADDR, &bp, -+ sizeof(unsigned long), "reading bp", FAULT_ON_ERROR); -+ frame->regs.rbp = bp; /* fixme for x86 */ -+ -+ unwind(frame); -+ -+ fprintf(fp, "frame size: %lx (%lx)\n", -+ (ulong)UNW_SP(frame), (ulong)UNW_SP(frame) - bt->hp->esp); -+ -+ FREEBUF(frame); -+} -+ -+ -+#endif ---- crash/xen_hyper.c.orig 2007-08-27 15:02:36.000000000 -0400 -+++ crash/xen_hyper.c 2007-08-23 17:02:54.000000000 -0400 + * it under the terms of the GNU General Public License as published by +@@ -35,7 +35,7 @@ + #ifndef _DUMP_H + #define _DUMP_H + +-#include ++//#include + + /* define TRUE and FALSE for use in our dump modules */ + #ifndef FALSE +--- crash/xen_hyper.c.orig 2008-01-17 15:17:20.000000000 -0500 ++++ crash/xen_hyper.c 2008-01-04 09:42:08.000000000 -0500 @@ -0,0 +1,1975 @@ +/* + * xen_hyper.c @@ -42222,8 +2935,58957 @@ + fprintf(out, "PCPU: %2d VCPU: %lx\n", vcc->processor, vcpu); +} +#endif ---- crash/xen_hyper_command.c.orig 2007-08-27 15:02:36.000000000 -0400 -+++ crash/xen_hyper_command.c 2007-08-23 17:02:54.000000000 -0400 +--- crash/netdump.c.orig 2008-01-17 15:17:20.000000000 -0500 ++++ crash/netdump.c 2008-01-16 10:00:24.000000000 -0500 +@@ -1,7 +1,7 @@ + /* netdump.c + * +- * Copyright (C) 2002, 2003, 2004, 2005 David Anderson +- * Copyright (C) 2002, 2003, 2004, 2005 Red Hat, Inc. All rights reserved. ++ * Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2008 David Anderson ++ * Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2008 Red Hat, Inc. All rights reserved. + * + * This software may be freely redistributed under the terms of the + * GNU General Public License. +@@ -13,38 +13,14 @@ + * Author: David Anderson + */ + ++#define _LARGEFILE64_SOURCE 1 /* stat64() */ ++ + #include "defs.h" + #include "netdump.h" + +-struct pt_load_segment { +- off_t file_offset; +- physaddr_t phys_start; +- physaddr_t phys_end; +-}; +- +-struct netdump_data { +- ulong flags; +- int ndfd; +- FILE *ofp; +- uint header_size; +- char *netdump_header; +- uint num_pt_load_segments; +- struct pt_load_segment *pt_load_segments; +- Elf32_Ehdr *elf32; +- Elf32_Phdr *notes32; +- Elf32_Phdr *load32; +- Elf64_Ehdr *elf64; +- Elf64_Phdr *notes64; +- Elf64_Phdr *load64; +- void *nt_prstatus; +- void *nt_prpsinfo; +- void *nt_taskstruct; +- ulong task_struct; +- ulong switch_stack; +-}; +- +-static struct netdump_data netdump_data = { 0 }; +-static struct netdump_data *nd = &netdump_data; ++static struct vmcore_data vmcore_data = { 0 }; ++static struct vmcore_data *nd = &vmcore_data; ++static struct xen_kdump_data xen_kdump_data = { 0 }; + static void netdump_print(char *, ...); + static void dump_Elf32_Ehdr(Elf32_Ehdr *); + static void dump_Elf32_Phdr(Elf32_Phdr *, int); +@@ -52,31 +28,34 @@ + static void dump_Elf64_Ehdr(Elf64_Ehdr *); + static void dump_Elf64_Phdr(Elf64_Phdr *, int); + static size_t dump_Elf64_Nhdr(Elf64_Off offset, int); +-static void get_netdump_regs_x86(struct bt_info *, ulong *, ulong *); +-static void get_netdump_regs_x86_64(struct bt_info *, ulong *, ulong *); + static void get_netdump_regs_ppc64(struct bt_info *, ulong *, ulong *); ++static physaddr_t xen_kdump_p2m(physaddr_t); ++static void check_dumpfile_size(char *); + + #define ELFSTORE 1 + #define ELFREAD 0 ++ ++#define MIN_PAGE_SIZE (4096) + + /* +- * Determine whether a file is a netdump creation, and if TRUE, +- * initialize the netdump_data structure. ++ * Determine whether a file is a netdump/diskdump/kdump creation, ++ * and if TRUE, initialize the vmcore_data structure. + */ + int +-is_netdump(char *file, ulong source) ++is_netdump(char *file, ulong source_query) + { +- int i; +- int fd; ++ int i, fd, swap; + Elf32_Ehdr *elf32; + Elf32_Phdr *load32; + Elf64_Ehdr *elf64; + Elf64_Phdr *load64; +- char header[MIN_NETDUMP_ELF_HEADER_SIZE]; ++ char eheader[MIN_NETDUMP_ELF_HEADER_SIZE]; + char buf[BUFSIZE]; + size_t size, len, tot; + Elf32_Off offset32; + Elf64_Off offset64; ++ ulong tmp_flags; ++ char *tmp_elf_header; + + if ((fd = open(file, O_RDWR)) < 0) { + if ((fd = open(file, O_RDONLY)) < 0) { +@@ -87,7 +66,7 @@ + } + + size = MIN_NETDUMP_ELF_HEADER_SIZE; +- if (read(fd, header, size) != size) { ++ if (read(fd, eheader, size) != size) { + sprintf(buf, "%s: read", file); + perror(buf); + goto bailout; +@@ -99,89 +78,163 @@ + goto bailout; + } + +- elf32 = (Elf32_Ehdr *)&header[0]; +- elf64 = (Elf64_Ehdr *)&header[0]; ++ tmp_flags = 0; ++ elf32 = (Elf32_Ehdr *)&eheader[0]; ++ elf64 = (Elf64_Ehdr *)&eheader[0]; + + /* +- * Verify the ELF header ++ * Verify the ELF header, and determine the dumpfile format. ++ * ++ * For now, kdump vmcores differ from netdump/diskdump like so: ++ * ++ * 1. The first kdump PT_LOAD segment is packed just after ++ * the ELF header, whereas netdump/diskdump page-align ++ * the first PT_LOAD segment. ++ * 2. Each kdump PT_LOAD segment has a p_align field of zero, ++ * whereas netdump/diskdump have their p_align fields set ++ * to the system page-size. ++ * ++ * If either kdump difference is seen, presume kdump -- this ++ * is obviously subject to change. + */ +- if (STRNEQ(elf32->e_ident, ELFMAG) && +- (elf32->e_ident[EI_CLASS] == ELFCLASS32) && +- (elf32->e_ident[EI_DATA] == ELFDATA2LSB) && +- (elf32->e_ident[EI_VERSION] == EV_CURRENT) && +- (elf32->e_type == ET_CORE) && +- (elf32->e_version == EV_CURRENT) && +- (elf32->e_phnum >= 2)) { +- switch (elf32->e_machine) ++ ++ if (!STRNEQ(eheader, ELFMAG) || eheader[EI_VERSION] != EV_CURRENT) ++ goto bailout; ++ ++ swap = (((eheader[EI_DATA] == ELFDATA2LSB) && ++ (__BYTE_ORDER == __BIG_ENDIAN)) || ++ ((eheader[EI_DATA] == ELFDATA2MSB) && ++ (__BYTE_ORDER == __LITTLE_ENDIAN))); ++ ++ if ((elf32->e_ident[EI_CLASS] == ELFCLASS32) && ++ (swap16(elf32->e_type, swap) == ET_CORE) && ++ (swap32(elf32->e_version, swap) == EV_CURRENT) && ++ (swap16(elf32->e_phnum, swap) >= 2)) { ++ switch (swap16(elf32->e_machine, swap)) + { + case EM_386: +- if (machine_type("X86")) +- break; ++ if (machine_type_mismatch(file, "X86", NULL, ++ source_query)) ++ goto bailout; ++ break; ++ + default: +- goto bailout; ++ if (machine_type_mismatch(file, "(unknown)", NULL, ++ source_query)) ++ goto bailout; + } +- nd->flags |= NETDUMP_ELF32; ++ ++ if (endian_mismatch(file, elf32->e_ident[EI_DATA], ++ source_query)) ++ goto bailout; ++ + load32 = (Elf32_Phdr *) +- &header[sizeof(Elf32_Ehdr)+sizeof(Elf32_Phdr)]; ++ &eheader[sizeof(Elf32_Ehdr)+sizeof(Elf32_Phdr)]; + size = (size_t)load32->p_offset; +- } else if (STRNEQ(elf64->e_ident, ELFMAG) && +- (elf64->e_ident[EI_CLASS] == ELFCLASS64) && +- (elf64->e_ident[EI_VERSION] == EV_CURRENT) && +- (elf64->e_type == ET_CORE) && +- (elf64->e_version == EV_CURRENT) && +- (elf64->e_phnum >= 2)) { +- switch (elf64->e_machine) ++ ++ if ((load32->p_offset & (MIN_PAGE_SIZE-1)) && ++ (load32->p_align == 0)) ++ tmp_flags |= KDUMP_ELF32; ++ else ++ tmp_flags |= NETDUMP_ELF32; ++ } else if ((elf64->e_ident[EI_CLASS] == ELFCLASS64) && ++ (swap16(elf64->e_type, swap) == ET_CORE) && ++ (swap32(elf64->e_version, swap) == EV_CURRENT) && ++ (swap16(elf64->e_phnum, swap) >= 2)) { ++ switch (swap16(elf64->e_machine, swap)) + { + case EM_IA_64: +- if ((elf64->e_ident[EI_DATA] == ELFDATA2LSB) && +- machine_type("IA64")) +- break; +- else ++ if (machine_type_mismatch(file, "IA64", NULL, ++ source_query)) + goto bailout; ++ break; + + case EM_PPC64: +- if ((elf64->e_ident[EI_DATA] == ELFDATA2MSB) && +- machine_type("PPC64")) +- break; +- else ++ if (machine_type_mismatch(file, "PPC64", NULL, ++ source_query)) + goto bailout; ++ break; + + case EM_X86_64: +- if ((elf64->e_ident[EI_DATA] == ELFDATA2LSB) && +- machine_type("X86_64")) +- break; +- else ++ if (machine_type_mismatch(file, "X86_64", NULL, ++ source_query)) + goto bailout; ++ break; ++ ++ case EM_386: ++ if (machine_type_mismatch(file, "X86", NULL, ++ source_query)) ++ goto bailout; ++ break; + + default: +- goto bailout; ++ if (machine_type_mismatch(file, "(unknown)", NULL, ++ source_query)) ++ goto bailout; + } +- nd->flags |= NETDUMP_ELF64; ++ ++ if (endian_mismatch(file, elf64->e_ident[EI_DATA], ++ source_query)) ++ goto bailout; ++ + load64 = (Elf64_Phdr *) +- &header[sizeof(Elf64_Ehdr)+sizeof(Elf64_Phdr)]; ++ &eheader[sizeof(Elf64_Ehdr)+sizeof(Elf64_Phdr)]; + size = (size_t)load64->p_offset; +- } else ++ if ((load64->p_offset & (MIN_PAGE_SIZE-1)) && ++ (load64->p_align == 0)) ++ tmp_flags |= KDUMP_ELF64; ++ else ++ tmp_flags |= NETDUMP_ELF64; ++ } else { ++ if (CRASHDEBUG(2)) ++ error(INFO, "%s: not a %s ELF dumpfile\n", ++ file, source_query == NETDUMP_LOCAL ? ++ "netdump" : "kdump"); ++ ++ + goto bailout; ++ } ++ ++ switch (DUMPFILE_FORMAT(tmp_flags)) ++ { ++ case NETDUMP_ELF32: ++ case NETDUMP_ELF64: ++ if (source_query & (NETDUMP_LOCAL|NETDUMP_REMOTE)) ++ break; ++ else ++ goto bailout; + +- if ((nd->netdump_header = (char *)malloc(size)) == NULL) { +- fprintf(stderr, "cannot malloc netdump header buffer\n"); ++ case KDUMP_ELF32: ++ case KDUMP_ELF64: ++ if (source_query & KDUMP_LOCAL) ++ break; ++ else ++ goto bailout; ++ } ++ ++ if ((tmp_elf_header = (char *)malloc(size)) == NULL) { ++ fprintf(stderr, "cannot malloc ELF header buffer\n"); + clean_exit(1); + } + +- if (read(fd, nd->netdump_header, size) != size) { ++ if (read(fd, tmp_elf_header, size) != size) { + sprintf(buf, "%s: read", file); + perror(buf); ++ free(tmp_elf_header); + goto bailout; + } + + nd->ndfd = fd; +- nd->flags |= source; ++ nd->elf_header = tmp_elf_header; ++ nd->flags = tmp_flags; ++ nd->flags |= source_query; + +- switch (nd->flags & (NETDUMP_ELF32|NETDUMP_ELF64)) ++ switch (DUMPFILE_FORMAT(nd->flags)) + { + case NETDUMP_ELF32: ++ case KDUMP_ELF32: + nd->header_size = load32->p_offset; +- nd->elf32 = (Elf32_Ehdr *)&nd->netdump_header[0]; ++ nd->elf32 = (Elf32_Ehdr *)&nd->elf_header[0]; + nd->num_pt_load_segments = nd->elf32->e_phnum - 1; + if ((nd->pt_load_segments = (struct pt_load_segment *) + malloc(sizeof(struct pt_load_segment) * +@@ -190,9 +243,11 @@ + clean_exit(1); + } + nd->notes32 = (Elf32_Phdr *) +- &nd->netdump_header[sizeof(Elf32_Ehdr)]; ++ &nd->elf_header[sizeof(Elf32_Ehdr)]; + nd->load32 = (Elf32_Phdr *) +- &nd->netdump_header[sizeof(Elf32_Ehdr)+sizeof(Elf32_Phdr)]; ++ &nd->elf_header[sizeof(Elf32_Ehdr)+sizeof(Elf32_Phdr)]; ++ if (DUMPFILE_FORMAT(nd->flags) == NETDUMP_ELF32) ++ nd->page_size = (uint)nd->load32->p_align; + dump_Elf32_Ehdr(nd->elf32); + dump_Elf32_Phdr(nd->notes32, ELFREAD); + for (i = 0; i < nd->num_pt_load_segments; i++) +@@ -205,8 +260,9 @@ + break; + + case NETDUMP_ELF64: ++ case KDUMP_ELF64: + nd->header_size = load64->p_offset; +- nd->elf64 = (Elf64_Ehdr *)&nd->netdump_header[0]; ++ nd->elf64 = (Elf64_Ehdr *)&nd->elf_header[0]; + nd->num_pt_load_segments = nd->elf64->e_phnum - 1; + if ((nd->pt_load_segments = (struct pt_load_segment *) + malloc(sizeof(struct pt_load_segment) * +@@ -215,9 +271,11 @@ + clean_exit(1); + } + nd->notes64 = (Elf64_Phdr *) +- &nd->netdump_header[sizeof(Elf64_Ehdr)]; ++ &nd->elf_header[sizeof(Elf64_Ehdr)]; + nd->load64 = (Elf64_Phdr *) +- &nd->netdump_header[sizeof(Elf64_Ehdr)+sizeof(Elf64_Phdr)]; ++ &nd->elf_header[sizeof(Elf64_Ehdr)+sizeof(Elf64_Phdr)]; ++ if (DUMPFILE_FORMAT(nd->flags) == NETDUMP_ELF64) ++ nd->page_size = (uint)nd->load64->p_align; + dump_Elf64_Ehdr(nd->elf64); + dump_Elf64_Phdr(nd->notes64, ELFREAD); + for (i = 0; i < nd->num_pt_load_segments; i++) +@@ -230,6 +288,9 @@ + break; + } + ++ if (CRASHDEBUG(1)) ++ netdump_memory_dump(fp); ++ + return nd->header_size; + + bailout: +@@ -238,15 +299,97 @@ + } + + /* ++ * Return the e_version number of an ELF file ++ * (or -1 if its not readable ELF file) ++ */ ++int ++file_elf_version(char *file) ++{ ++ int fd, size; ++ Elf32_Ehdr *elf32; ++ Elf64_Ehdr *elf64; ++ char header[MIN_NETDUMP_ELF_HEADER_SIZE]; ++ char buf[BUFSIZE]; ++ ++ if ((fd = open(file, O_RDONLY)) < 0) { ++ sprintf(buf, "%s: open", file); ++ perror(buf); ++ return -1; ++ } ++ ++ size = MIN_NETDUMP_ELF_HEADER_SIZE; ++ if (read(fd, header, size) != size) { ++ sprintf(buf, "%s: read", file); ++ perror(buf); ++ close(fd); ++ return -1; ++ } ++ close(fd); ++ ++ elf32 = (Elf32_Ehdr *)&header[0]; ++ elf64 = (Elf64_Ehdr *)&header[0]; ++ ++ if (STRNEQ(elf32->e_ident, ELFMAG) && ++ (elf32->e_ident[EI_CLASS] == ELFCLASS32) && ++ (elf32->e_ident[EI_DATA] == ELFDATA2LSB) && ++ (elf32->e_ident[EI_VERSION] == EV_CURRENT)) { ++ return (elf32->e_version); ++ } else if (STRNEQ(elf64->e_ident, ELFMAG) && ++ (elf64->e_ident[EI_CLASS] == ELFCLASS64) && ++ (elf64->e_ident[EI_VERSION] == EV_CURRENT)) { ++ return (elf64->e_version); ++ } ++ ++ return -1; ++} ++ ++/* ++ * Check whether any PT_LOAD segment goes beyond the file size. ++ */ ++static void ++check_dumpfile_size(char *file) ++{ ++ int i; ++ struct stat64 stat; ++ struct pt_load_segment *pls; ++ uint64_t segment_end; ++ ++ if (stat64(file, &stat) < 0) ++ return; ++ ++ for (i = 0; i < nd->num_pt_load_segments; i++) { ++ pls = &nd->pt_load_segments[i]; ++ ++ segment_end = pls->file_offset + ++ (pls->phys_end - pls->phys_start); ++ ++ if (segment_end > stat.st_size) { ++ error(WARNING, "%s: may be truncated or incomplete\n" ++ " PT_LOAD p_offset: %lld\n" ++ " p_filesz: %lld\n" ++ " bytes required: %lld\n" ++ " dumpfile size: %lld\n\n", ++ file, pls->file_offset, ++ pls->phys_end - pls->phys_start, ++ segment_end, stat.st_size); ++ return; ++ } ++ } ++} ++ ++/* + * Perform any post-dumpfile determination stuff here. + */ + int + netdump_init(char *unused, FILE *fptr) + { +- if (!NETDUMP_VALID()) ++ if (!VMCORE_VALID()) + return FALSE; + + nd->ofp = fptr; ++ ++ check_dumpfile_size(pc->dumpfile); ++ + return TRUE; + } + +@@ -263,19 +406,19 @@ + /* + * The Elf32_Phdr has 32-bit fields for p_paddr, p_filesz and + * p_memsz, so for now, multiple PT_LOAD segment support is +- * restricted to 64-bit machines. Until a "standard" becomes +- * available in the future that deals with physical memory +- * segments that start at greater then 4GB, or memory segments +- * sizes that are greater than 4GB (kexec?), then this feature +- * is restricted to 64-bit machines. ++ * restricted to 64-bit machines for netdump/diskdump vmcores. ++ * However, kexec/kdump has introduced the optional use of a ++ * 64-bit ELF header for 32-bit processors. + */ +- switch (nd->flags & (NETDUMP_ELF32|NETDUMP_ELF64)) ++ switch (DUMPFILE_FORMAT(nd->flags)) + { + case NETDUMP_ELF32: + offset = (off_t)paddr + (off_t)nd->header_size; + break; + + case NETDUMP_ELF64: ++ case KDUMP_ELF32: ++ case KDUMP_ELF64: + if (nd->num_pt_load_segments == 1) { + offset = (off_t)paddr + (off_t)nd->header_size; + break; +@@ -289,6 +432,11 @@ + pls->file_offset; + break; + } ++ if (pls->zero_fill && (paddr >= pls->phys_end) && ++ (paddr < pls->zero_fill)) { ++ memset(bufptr, 0, cnt); ++ return cnt; ++ } + } + + if (!offset) +@@ -302,24 +450,57 @@ + + if (read(nd->ndfd, bufptr, cnt) != cnt) + return READ_ERROR; ++ + return cnt; + } + + /* +- * Write to a netdump-created dumpfile. ++ * Write to a netdump-created dumpfile. Note that cmd_wr() does not ++ * allow writes to dumpfiles, so you can't get here from there. ++ * But, if it would ever be helpful, here it is... + */ + int + write_netdump(int fd, void *bufptr, int cnt, ulong addr, physaddr_t paddr) + { + off_t offset; ++ struct pt_load_segment *pls; ++ int i; + +- offset = (off_t)paddr + (off_t)nd->header_size; ++ switch (DUMPFILE_FORMAT(nd->flags)) ++ { ++ case NETDUMP_ELF32: ++ offset = (off_t)paddr + (off_t)nd->header_size; ++ break; + +- if (lseek(nd->ndfd, offset, SEEK_SET) != offset) ++ case NETDUMP_ELF64: ++ case KDUMP_ELF32: ++ case KDUMP_ELF64: ++ if (nd->num_pt_load_segments == 1) { ++ offset = (off_t)paddr + (off_t)nd->header_size; ++ break; ++ } ++ ++ for (i = offset = 0; i < nd->num_pt_load_segments; i++) { ++ pls = &nd->pt_load_segments[i]; ++ if ((paddr >= pls->phys_start) && ++ (paddr < pls->phys_end)) { ++ offset = (off_t)(paddr - pls->phys_start) + ++ pls->file_offset; ++ break; ++ } ++ } ++ ++ if (!offset) ++ return READ_ERROR; ++ ++ break; ++ } ++ ++ if (lseek(nd->ndfd, offset, SEEK_SET) == -1) + return SEEK_ERROR; + + if (write(nd->ndfd, bufptr, cnt) != cnt) +- return WRITE_ERROR; ++ return READ_ERROR; + + return cnt; + } +@@ -330,7 +511,7 @@ + FILE * + set_netdump_fp(FILE *fp) + { +- if (!NETDUMP_VALID()) ++ if (!VMCORE_VALID()) + return NULL; + + nd->ofp = fp; +@@ -346,7 +527,7 @@ + char buf[BUFSIZE]; + va_list ap; + +- if (!fmt || !strlen(fmt) || !NETDUMP_VALID()) ++ if (!fmt || !strlen(fmt) || !VMCORE_VALID()) + return; + + va_start(ap, fmt); +@@ -362,33 +543,21 @@ + uint + netdump_page_size(void) + { +- uint pagesz; +- +- if (!NETDUMP_VALID()) ++ if (!VMCORE_VALID()) + return 0; + +- switch (nd->flags & (NETDUMP_ELF32|NETDUMP_ELF64)) +- { +- case NETDUMP_ELF32: +- pagesz = (uint)nd->load32->p_align; +- break; +- case NETDUMP_ELF64: +- pagesz = (uint)nd->load64->p_align; +- break; +- } +- +- return pagesz; ++ return nd->page_size; + } + + int + netdump_free_memory(void) + { +- return (NETDUMP_VALID() ? 0 : 0); ++ return (VMCORE_VALID() ? 0 : 0); + } + + int netdump_memory_used(void) + { +- return (NETDUMP_VALID() ? 0 : 0); ++ return (VMCORE_VALID() ? 0 : 0); + } + + /* +@@ -414,21 +583,57 @@ + #ifdef DAEMON + return nd->task_struct; + #else +- int i; ++ int i, crashing_cpu; + size_t len; + char *user_regs; + ulong ebp, esp, task; + +- if (!NETDUMP_VALID() || !get_active_set()) +- return NO_TASK; ++ if (!VMCORE_VALID() || !get_active_set()) ++ goto panic_task_undetermined; + +- if (nd->task_struct) ++ if (nd->task_struct) { ++ if (CRASHDEBUG(1)) ++ error(INFO, ++ "get_netdump_panic_task: NT_TASKSTRUCT: %lx\n", ++ nd->task_struct); + return nd->task_struct; ++ } ++ ++ switch (DUMPFILE_FORMAT(nd->flags)) ++ { ++ case NETDUMP_ELF32: ++ case NETDUMP_ELF64: ++ crashing_cpu = -1; ++ break; ++ ++ case KDUMP_ELF32: ++ case KDUMP_ELF64: ++ crashing_cpu = -1; ++ if (symbol_exists("crashing_cpu")) { ++ get_symbol_data("crashing_cpu", sizeof(int), &i); ++ if ((i >= 0) && (i < nd->num_prstatus_notes)) { ++ crashing_cpu = i; ++ if (CRASHDEBUG(1)) ++ error(INFO, ++ "get_netdump_panic_task: crashing_cpu: %d\n", ++ crashing_cpu); ++ } ++ } ++ ++ if ((nd->num_prstatus_notes > 1) && (crashing_cpu == -1)) ++ goto panic_task_undetermined; ++ break; ++ } ++ ++ if (nd->elf32 && (nd->elf32->e_machine == EM_386)) { ++ Elf32_Nhdr *note32; ++ ++ if ((nd->num_prstatus_notes > 1) && (crashing_cpu != -1)) ++ note32 = (Elf32_Nhdr *) ++ nd->nt_prstatus_percpu[crashing_cpu]; ++ else ++ note32 = (Elf32_Nhdr *)nd->nt_prstatus; + +- if (nd->elf32 && nd->elf32->e_machine == EM_386) { +- Elf32_Nhdr *note32 = (Elf32_Nhdr *) +- ((char *)nd->elf32 + nd->notes32->p_offset); +- + len = sizeof(Elf32_Nhdr); + len = roundup(len + note32->n_namesz, 4); + len = roundup(len + note32->n_descsz, 4); +@@ -437,14 +642,15 @@ + - SIZE(user_regs_struct) - sizeof(int); + ebp = ULONG(user_regs + OFFSET(user_regs_struct_ebp)); + esp = ULONG(user_regs + OFFSET(user_regs_struct_esp)); ++check_ebp_esp: + if (CRASHDEBUG(1)) +- fprintf(fp, +- "get_netdump_panic_task: esp: %lx ebp: %lx\n", ++ error(INFO, ++ "get_netdump_panic_task: NT_PRSTATUS esp: %lx ebp: %lx\n", + esp, ebp); + if (IS_KVADDR(esp)) { + task = stkptr_to_task(esp); + if (CRASHDEBUG(1)) +- fprintf(fp, ++ error(INFO, + "get_netdump_panic_task: esp: %lx -> task: %lx\n", + esp, task); + for (i = 0; task && (i < NR_CPUS); i++) { +@@ -455,7 +661,7 @@ + if (IS_KVADDR(ebp)) { + task = stkptr_to_task(ebp); + if (CRASHDEBUG(1)) +- fprintf(fp, ++ error(INFO, + "get_netdump_panic_task: ebp: %lx -> task: %lx\n", + ebp, task); + for (i = 0; task && (i < NR_CPUS); i++) { +@@ -464,25 +670,37 @@ + } + } + } else if (nd->elf64) { +- Elf64_Nhdr *note64 = (Elf64_Nhdr *) +- ((char *)nd->elf64 + nd->notes64->p_offset); +- ++ Elf64_Nhdr *note64; ++ ++ if ((nd->num_prstatus_notes > 1) && (crashing_cpu != -1)) ++ note64 = (Elf64_Nhdr *) ++ nd->nt_prstatus_percpu[crashing_cpu]; ++ else ++ note64 = (Elf64_Nhdr *)nd->nt_prstatus; ++ + len = sizeof(Elf64_Nhdr); + len = roundup(len + note64->n_namesz, 4); + user_regs = (char *)((char *)note64 + len + + MEMBER_OFFSET("elf_prstatus", "pr_reg")); ++ ++ if (nd->elf64->e_machine == EM_386) { ++ ebp = ULONG(user_regs + OFFSET(user_regs_struct_ebp)); ++ esp = ULONG(user_regs + OFFSET(user_regs_struct_esp)); ++ goto check_ebp_esp; ++ } ++ + if (nd->elf64->e_machine == EM_PPC64) { + /* + * Get the GPR1 register value. + */ + esp = *(ulong *)((char *)user_regs + 8); + if (CRASHDEBUG(1)) +- fprintf(fp, +- "get_netdump_panic_task: esp: %lx\n", esp); ++ error(INFO, ++ "get_netdump_panic_task: NT_PRSTATUS esp: %lx\n", esp); + if (IS_KVADDR(esp)) { + task = stkptr_to_task(esp); + if (CRASHDEBUG(1)) +- fprintf(fp, ++ error(INFO, + "get_netdump_panic_task: esp: %lx -> task: %lx\n", + esp, task); + for (i = 0; task && (i < NR_CPUS); i++) { +@@ -493,8 +711,10 @@ + } + } + ++panic_task_undetermined: ++ + if (CRASHDEBUG(1)) +- fprintf(fp, "get_netdump_panic_task: returning NO_TASK\n"); ++ error(INFO, "get_netdump_panic_task: failed\n"); + + return NO_TASK; + #endif +@@ -512,7 +732,7 @@ + return nd->switch_stack; + return 0; + #else +- if (!NETDUMP_VALID() || !get_active_set()) ++ if (!VMCORE_VALID() || !get_active_set()) + return 0; + + if (nd->task_struct == task) +@@ -525,33 +745,75 @@ + int + netdump_memory_dump(FILE *fp) + { +- int i, others; ++ int i, others, wrap, flen; + size_t len, tot; + FILE *fpsave; + Elf32_Off offset32; + Elf32_Off offset64; + struct pt_load_segment *pls; + +- if (!NETDUMP_VALID()) ++ if (!VMCORE_VALID()) + return FALSE; + + fpsave = nd->ofp; + nd->ofp = fp; + +- netdump_print("netdump_data: \n"); ++ netdump_print("vmcore_data: \n"); + netdump_print(" flags: %lx (", nd->flags); + others = 0; + if (nd->flags & NETDUMP_LOCAL) + netdump_print("%sNETDUMP_LOCAL", others++ ? "|" : ""); ++ if (nd->flags & KDUMP_LOCAL) ++ netdump_print("%sKDUMP_LOCAL", others++ ? "|" : ""); + if (nd->flags & NETDUMP_REMOTE) + netdump_print("%sNETDUMP_REMOTE", others++ ? "|" : ""); + if (nd->flags & NETDUMP_ELF32) + netdump_print("%sNETDUMP_ELF32", others++ ? "|" : ""); + if (nd->flags & NETDUMP_ELF64) + netdump_print("%sNETDUMP_ELF64", others++ ? "|" : ""); ++ if (nd->flags & KDUMP_ELF32) ++ netdump_print("%sKDUMP_ELF32", others++ ? "|" : ""); ++ if (nd->flags & KDUMP_ELF64) ++ netdump_print("%sKDUMP_ELF64", others++ ? "|" : ""); + if (nd->flags & PARTIAL_DUMP) + netdump_print("%sPARTIAL_DUMP", others++ ? "|" : ""); + netdump_print(")\n"); ++ if ((pc->flags & RUNTIME) && symbol_exists("dump_level")) { ++ int dump_level; ++ if (readmem(symbol_value("dump_level"), KVADDR, &dump_level, ++ sizeof(dump_level), "dump_level", QUIET|RETURN_ON_ERROR)) { ++ netdump_print(" dump_level: %d (0x%x) %s", ++ dump_level, dump_level, ++ dump_level > 0 ? "(" : ""); ++ ++#define DUMP_EXCLUDE_CACHE 0x00000001 /* Exclude LRU & SwapCache pages*/ ++#define DUMP_EXCLUDE_CLEAN 0x00000002 /* Exclude all-zero pages */ ++#define DUMP_EXCLUDE_FREE 0x00000004 /* Exclude free pages */ ++#define DUMP_EXCLUDE_ANON 0x00000008 /* Exclude Anon pages */ ++#define DUMP_SAVE_PRIVATE 0x00000010 /* Save private pages */ ++ ++ others = 0; ++ if (dump_level & DUMP_EXCLUDE_CACHE) ++ netdump_print("%sDUMP_EXCLUDE_CACHE", ++ others++ ? "|" : ""); ++ if (dump_level & DUMP_EXCLUDE_CLEAN) ++ netdump_print("%sDUMP_EXCLUDE_CLEAN", ++ others++ ? "|" : ""); ++ if (dump_level & DUMP_EXCLUDE_FREE) ++ netdump_print("%sDUMP_EXCLUDE_FREE", ++ others++ ? "|" : ""); ++ if (dump_level & DUMP_EXCLUDE_ANON) ++ netdump_print("%sDUMP_EXCLUDE_ANON", ++ others++ ? "|" : ""); ++ if (dump_level & DUMP_SAVE_PRIVATE) ++ netdump_print("%sDUMP_SAVE_PRIVATE", ++ others++ ? "|" : ""); ++ netdump_print("%s\n", dump_level > 0 ? ")" : ""); ++ } else ++ netdump_print(" dump_level: (unknown)\n"); ++ } else if (!(pc->flags & RUNTIME) && symbol_exists("dump_level")) ++ netdump_print(" dump_level: (undetermined)\n"); ++ + netdump_print(" ndfd: %d\n", nd->ndfd); + netdump_print(" ofp: %lx\n", nd->ofp); + netdump_print(" header_size: %d\n", nd->header_size); +@@ -565,8 +827,10 @@ + pls->phys_start); + netdump_print(" phys_end: %llx\n", + pls->phys_end); ++ netdump_print(" zero_fill: %llx\n", ++ pls->zero_fill); + } +- netdump_print(" netdump_header: %lx\n", nd->netdump_header); ++ netdump_print(" elf_header: %lx\n", nd->elf_header); + netdump_print(" elf32: %lx\n", nd->elf32); + netdump_print(" notes32: %lx\n", nd->notes32); + netdump_print(" load32: %lx\n", nd->load32); +@@ -577,11 +841,66 @@ + netdump_print(" nt_prpsinfo: %lx\n", nd->nt_prpsinfo); + netdump_print(" nt_taskstruct: %lx\n", nd->nt_taskstruct); + netdump_print(" task_struct: %lx\n", nd->task_struct); +- netdump_print(" switch_stack: %lx\n\n", nd->switch_stack); ++ netdump_print(" page_size: %d\n", nd->page_size); ++ netdump_print(" switch_stack: %lx\n", nd->switch_stack); ++ netdump_print(" xen_kdump_data: %s\n", ++ XEN_CORE_DUMPFILE() ? " " : "(unused)"); ++ if (XEN_CORE_DUMPFILE()) { ++ netdump_print(" flags: %lx (", nd->xen_kdump_data->flags); ++ others = 0; ++ if (nd->xen_kdump_data->flags & KDUMP_P2M_INIT) ++ netdump_print("%sKDUMP_P2M_INIT", others++ ? "|" : ""); ++ if (nd->xen_kdump_data->flags & KDUMP_CR3) ++ netdump_print("%sKDUMP_CR3", others++ ? "|" : ""); ++ if (nd->xen_kdump_data->flags & KDUMP_MFN_LIST) ++ netdump_print("%sKDUMP_MFN_LIST", others++ ? "|" : ""); ++ netdump_print(")\n"); ++ netdump_print(" p2m_mfn: %lx\n", ++ nd->xen_kdump_data->p2m_mfn); ++ netdump_print(" cr3: %lx\n", ++ nd->xen_kdump_data->cr3); ++ netdump_print(" last_mfn_read: %lx\n", ++ nd->xen_kdump_data->last_mfn_read); ++ netdump_print(" last_pmd_read: %lx\n", ++ nd->xen_kdump_data->last_pmd_read); ++ netdump_print(" page: %lx\n", ++ nd->xen_kdump_data->page); ++ netdump_print(" accesses: %ld\n", ++ nd->xen_kdump_data->accesses); ++ netdump_print(" cache_hits: %ld ", ++ nd->xen_kdump_data->cache_hits); ++ if (nd->xen_kdump_data->accesses) ++ netdump_print("(%ld%%)", ++ nd->xen_kdump_data->cache_hits * 100 / nd->xen_kdump_data->accesses); ++ netdump_print("\n p2m_frames: %d\n", ++ nd->xen_kdump_data->p2m_frames); ++ netdump_print(" p2m_mfn_frame_list: %lx\n", ++ nd->xen_kdump_data->p2m_mfn_frame_list); ++ for (i = 0; i < nd->xen_kdump_data->p2m_frames; i++) ++ netdump_print("%lx ", ++ nd->xen_kdump_data->p2m_mfn_frame_list[i]); ++ if (i) netdump_print("\n"); ++ } ++ netdump_print(" num_prstatus_notes: %d\n", nd->num_prstatus_notes); ++ netdump_print(" nt_prstatus_percpu: "); ++ wrap = sizeof(void *) == SIZEOF_32BIT ? 8 : 4; ++ flen = sizeof(void *) == SIZEOF_32BIT ? 8 : 16; ++ if (nd->num_prstatus_notes == 1) ++ netdump_print("%.*lx\n", flen, nd->nt_prstatus_percpu[0]); ++ else { ++ for (i = 0; i < nd->num_prstatus_notes; i++) { ++ if ((i % wrap) == 0) ++ netdump_print("\n "); ++ netdump_print("%.*lx ", flen, ++ nd->nt_prstatus_percpu[i]); ++ } ++ } ++ netdump_print("\n\n"); + +- switch (nd->flags & (NETDUMP_ELF32|NETDUMP_ELF64)) ++ switch (DUMPFILE_FORMAT(nd->flags)) + { + case NETDUMP_ELF32: ++ case KDUMP_ELF32: + dump_Elf32_Ehdr(nd->elf32); + dump_Elf32_Phdr(nd->notes32, ELFREAD); + for (i = 0; i < nd->num_pt_load_segments; i++) +@@ -594,6 +913,7 @@ + break; + + case NETDUMP_ELF64: ++ case KDUMP_ELF64: + dump_Elf64_Ehdr(nd->elf64); + dump_Elf64_Phdr(nd->notes64, ELFREAD); + for (i = 0; i < nd->num_pt_load_segments; i++) +@@ -865,6 +1185,9 @@ + netdump_print(" e_machine: %d ", elf->e_machine); + switch (elf->e_machine) + { ++ case EM_386: ++ netdump_print("(EM_386)\n"); ++ break; + case EM_IA_64: + netdump_print("(EM_IA_64)\n"); + break; +@@ -961,8 +1284,11 @@ + pls->phys_start = prog->p_paddr; + netdump_print(" p_filesz: %lu (%lx)\n", prog->p_filesz, + prog->p_filesz); +- if (store_pt_load_data) ++ if (store_pt_load_data) { + pls->phys_end = pls->phys_start + prog->p_filesz; ++ pls->zero_fill = (prog->p_filesz == prog->p_memsz) ? ++ 0 : pls->phys_start + prog->p_memsz; ++ } + netdump_print(" p_memsz: %lu (%lx)\n", prog->p_memsz, + prog->p_memsz); + netdump_print(" p_flags: %lx (", prog->p_flags); +@@ -1030,19 +1356,22 @@ + netdump_print("(?)\n"); + } + +- netdump_print(" p_offset: %ld (%lx)\n", prog->p_offset, ++ netdump_print(" p_offset: %lld (%llx)\n", prog->p_offset, + prog->p_offset); + if (store_pt_load_data) + pls->file_offset = prog->p_offset; +- netdump_print(" p_vaddr: %lx\n", prog->p_vaddr); +- netdump_print(" p_paddr: %lx\n", prog->p_paddr); ++ netdump_print(" p_vaddr: %llx\n", prog->p_vaddr); ++ netdump_print(" p_paddr: %llx\n", prog->p_paddr); + if (store_pt_load_data) + pls->phys_start = prog->p_paddr; +- netdump_print(" p_filesz: %lu (%lx)\n", prog->p_filesz, ++ netdump_print(" p_filesz: %llu (%llx)\n", prog->p_filesz, + prog->p_filesz); +- if (store_pt_load_data) ++ if (store_pt_load_data) { + pls->phys_end = pls->phys_start + prog->p_filesz; +- netdump_print(" p_memsz: %lu (%lx)\n", prog->p_memsz, ++ pls->zero_fill = (prog->p_filesz == prog->p_memsz) ? ++ 0 : pls->phys_start + prog->p_memsz; ++ } ++ netdump_print(" p_memsz: %llu (%llx)\n", prog->p_memsz, + prog->p_memsz); + netdump_print(" p_flags: %lx (", prog->p_flags); + others = 0; +@@ -1053,7 +1382,7 @@ + if (prog->p_flags & PF_R) + netdump_print("%sPF_R", others++ ? "|" : ""); + netdump_print(")\n"); +- netdump_print(" p_align: %ld\n", prog->p_align); ++ netdump_print(" p_align: %lld\n", prog->p_align); + } + + /* +@@ -1061,20 +1390,22 @@ + */ + + static size_t +-dump_Elf32_Nhdr(Elf32_Off offset, int store_addresses) ++dump_Elf32_Nhdr(Elf32_Off offset, int store) + { +- int i, lf; ++ int i, lf, words; + Elf32_Nhdr *note; + size_t len; + char buf[BUFSIZE]; + char *ptr; + ulong *uptr; ++ int xen_core, vmcoreinfo; + + note = (Elf32_Nhdr *)((char *)nd->elf32 + offset); + + netdump_print("Elf32_Nhdr:\n"); + netdump_print(" n_namesz: %ld ", note->n_namesz); + BZERO(buf, BUFSIZE); ++ xen_core = vmcoreinfo = FALSE; + ptr = (char *)note + sizeof(Elf32_Nhdr); + BCOPY(ptr, buf, note->n_namesz); + netdump_print("(\"%s\")\n", buf); +@@ -1085,17 +1416,26 @@ + { + case NT_PRSTATUS: + netdump_print("(NT_PRSTATUS)\n"); +- if (store_addresses) +- nd->nt_prstatus = (void *)note; ++ if (store) { ++ if (!nd->nt_prstatus) ++ nd->nt_prstatus = (void *)note; ++ for (i = 0; i < NR_CPUS; i++) { ++ if (!nd->nt_prstatus_percpu[i]) { ++ nd->nt_prstatus_percpu[i] = (void *)note; ++ nd->num_prstatus_notes++; ++ break; ++ } ++ } ++ } + break; + case NT_PRPSINFO: + netdump_print("(NT_PRPSINFO)\n"); +- if (store_addresses) ++ if (store) + nd->nt_prpsinfo = (void *)note; + break; + case NT_TASKSTRUCT: + netdump_print("(NT_TASKSTRUCT)\n"); +- if (store_addresses) { ++ if (store) { + nd->nt_taskstruct = (void *)note; + nd->task_struct = *((ulong *)(ptr + note->n_namesz)); + nd->switch_stack = *((ulong *) +@@ -1105,25 +1445,128 @@ + case NT_DISKDUMP: + netdump_print("(NT_DISKDUMP)\n"); + uptr = (ulong *)(ptr + note->n_namesz); +- if (*uptr) ++ if (*uptr && store) + nd->flags |= PARTIAL_DUMP; + break; ++#ifdef NOTDEF ++ /* ++ * Note: Based upon the original, abandoned, proposal for ++ * its contents -- keep around for potential future use. ++ */ ++ case NT_KDUMPINFO: ++ netdump_print("(NT_KDUMPINFO)\n"); ++ if (store) { ++ uptr = (note->n_namesz == 5) ? ++ (ulong *)(ptr + ((note->n_namesz + 3) & ~3)) : ++ (ulong *)(ptr + note->n_namesz); ++ nd->page_size = (uint)(1 << *uptr); ++ uptr++; ++ nd->task_struct = *uptr; ++ } ++ break; ++#endif + default: +- netdump_print("(?)\n"); ++ xen_core = STRNEQ(buf, "XEN CORE") || STRNEQ(buf, "Xen"); ++ vmcoreinfo = STRNEQ(buf, "VMCOREINFO"); ++ if (xen_core) { ++ netdump_print("(unknown Xen n_type)\n"); ++ if (store) ++ error(WARNING, "unknown Xen n_type: %lx\n\n", ++ note->n_type); ++ } else if (vmcoreinfo) ++ netdump_print("(unused)\n"); ++ else ++ netdump_print("(?)\n"); ++ break; ++ ++ case NT_XEN_KDUMP_CR3: ++ netdump_print("(NT_XEN_KDUMP_CR3) [obsolete]\n"); ++ if (store) ++ error(WARNING, ++ "obsolete Xen n_type: %lx (NT_XEN_KDUMP_CR3)\n\n", ++ note->n_type); ++ /* FALL THROUGH */ ++ ++ case XEN_ELFNOTE_CRASH_INFO: ++ /* ++ * x86 and x86_64: p2m mfn appended to crash_xen_info_t structure ++ */ ++ if (note->n_type == XEN_ELFNOTE_CRASH_INFO) ++ netdump_print("(XEN_ELFNOTE_CRASH_INFO)\n"); ++ xen_core = TRUE; ++ if (store) { ++ pc->flags |= XEN_CORE; ++ nd->xen_kdump_data = &xen_kdump_data; ++ nd->xen_kdump_data->last_mfn_read = UNINITIALIZED; ++ nd->xen_kdump_data->last_pmd_read = UNINITIALIZED; ++ ++ if ((note->n_type == NT_XEN_KDUMP_CR3) && ++ ((note->n_descsz/sizeof(ulong)) == 1)) { ++ nd->xen_kdump_data->flags |= KDUMP_CR3; ++ /* ++ * Use the first cr3 found. ++ */ ++ if (!nd->xen_kdump_data->cr3) { ++ uptr = (ulong *)(ptr + note->n_namesz); ++ uptr = (ulong *)roundup((ulong)uptr, 4); ++ nd->xen_kdump_data->cr3 = *uptr; ++ } ++ } else { ++ nd->xen_kdump_data->flags |= KDUMP_MFN_LIST; ++ uptr = (ulong *)(ptr + note->n_namesz); ++ uptr = (ulong *)roundup((ulong)uptr, 4); ++ words = note->n_descsz/sizeof(ulong); ++ /* ++ * If already set, overridden with --pfm_mfn ++ */ ++ if (!nd->xen_kdump_data->p2m_mfn) ++ nd->xen_kdump_data->p2m_mfn = *(uptr+(words-1)); ++ } ++ } ++ break; ++ ++ case XEN_ELFNOTE_CRASH_REGS: ++ /* ++ * x86 and x86_64: cr0, cr2, cr3, cr4 ++ */ ++ xen_core = TRUE; ++ netdump_print("(XEN_ELFNOTE_CRASH_REGS)\n"); ++ break; + } + + uptr = (ulong *)(ptr + note->n_namesz); +- for (i = lf = 0; i < note->n_descsz/sizeof(ulong); i++) { +- if (((i%4)==0)) { +- netdump_print("%s ", +- i ? "\n" : ""); +- lf++; +- } else +- lf = 0; +- netdump_print("%08lx ", *uptr++); ++ ++ /* ++ * kdumps are off-by-1, because their n_namesz is 5 for "CORE". ++ */ ++ if ((nd->flags & KDUMP_ELF32) && (note->n_namesz == 5)) ++ uptr = (ulong *)(ptr + ((note->n_namesz + 3) & ~3)); ++ ++ if (xen_core) ++ uptr = (ulong *)roundup((ulong)uptr, 4); ++ ++ if (vmcoreinfo) { ++ netdump_print(" "); ++ ptr += note->n_namesz + 1; ++ for (i = 0; i < note->n_descsz; i++, ptr++) { ++ netdump_print("%c", *ptr); ++ if (*ptr == '\n') ++ netdump_print(" "); ++ } ++ lf = 0; ++ } else { ++ for (i = lf = 0; i < note->n_descsz/sizeof(ulong); i++) { ++ if (((i%4)==0)) { ++ netdump_print("%s ", ++ i ? "\n" : ""); ++ lf++; ++ } else ++ lf = 0; ++ netdump_print("%08lx ", *uptr++); ++ } + } + if (!lf || (note->n_type == NT_TASKSTRUCT) || +- (note->n_type == NT_DISKDUMP)) ++ (note->n_type == NT_DISKDUMP) || xen_core) + netdump_print("\n"); + + len = sizeof(Elf32_Nhdr); +@@ -1135,15 +1578,17 @@ + + + static size_t +-dump_Elf64_Nhdr(Elf64_Off offset, int store_addresses) ++dump_Elf64_Nhdr(Elf64_Off offset, int store) + { +- int i, lf; ++ int i, lf, words; + Elf64_Nhdr *note; + size_t len; + char buf[BUFSIZE]; + char *ptr; + ulonglong *uptr; + int *iptr; ++ ulong *up; ++ int xen_core, vmcoreinfo; + + note = (Elf64_Nhdr *)((char *)nd->elf64 + offset); + +@@ -1151,6 +1596,7 @@ + netdump_print(" n_namesz: %ld ", note->n_namesz); + BZERO(buf, BUFSIZE); + ptr = (char *)note + sizeof(Elf64_Nhdr); ++ xen_core = vmcoreinfo = FALSE; + BCOPY(ptr, buf, note->n_namesz); + netdump_print("(\"%s\")\n", buf); + +@@ -1160,17 +1606,26 @@ + { + case NT_PRSTATUS: + netdump_print("(NT_PRSTATUS)\n"); +- if (store_addresses) +- nd->nt_prstatus = (void *)note; ++ if (store) { ++ if (!nd->nt_prstatus) ++ nd->nt_prstatus = (void *)note; ++ for (i = 0; i < NR_CPUS; i++) { ++ if (!nd->nt_prstatus_percpu[i]) { ++ nd->nt_prstatus_percpu[i] = (void *)note; ++ nd->num_prstatus_notes++; ++ break; ++ } ++ } ++ } + break; + case NT_PRPSINFO: + netdump_print("(NT_PRPSINFO)\n"); +- if (store_addresses) ++ if (store) + nd->nt_prpsinfo = (void *)note; + break; + case NT_TASKSTRUCT: + netdump_print("(NT_TASKSTRUCT)\n"); +- if (store_addresses) { ++ if (store) { + nd->nt_taskstruct = (void *)note; + nd->task_struct = *((ulong *)(ptr + note->n_namesz)); + nd->switch_stack = *((ulong *) +@@ -1180,24 +1635,149 @@ + case NT_DISKDUMP: + netdump_print("(NT_DISKDUMP)\n"); + iptr = (int *)(ptr + note->n_namesz); +- if (*iptr) ++ if (*iptr && store) + nd->flags |= PARTIAL_DUMP; + if (note->n_descsz < sizeof(ulonglong)) + netdump_print(" %08x", *iptr); + break; ++#ifdef NOTDEF ++ /* ++ * Note: Based upon the original, abandoned, proposal for ++ * its contents -- keep around for potential future use. ++ */ ++ case NT_KDUMPINFO: ++ netdump_print("(NT_KDUMPINFO)\n"); ++ if (store) { ++ uint32_t *u32ptr; ++ ++ if (nd->elf64->e_machine == EM_386) { ++ u32ptr = (note->n_namesz == 5) ? ++ (uint *)(ptr + ((note->n_namesz + 3) & ~3)) : ++ (uint *)(ptr + note->n_namesz); ++ nd->page_size = 1 << *u32ptr; ++ u32ptr++; ++ nd->task_struct = *u32ptr; ++ } else { ++ uptr = (note->n_namesz == 5) ? ++ (ulonglong *)(ptr + ((note->n_namesz + 3) & ~3)) : ++ (ulonglong *)(ptr + note->n_namesz); ++ nd->page_size = (uint)(1 << *uptr); ++ uptr++; ++ nd->task_struct = *uptr; ++ } ++ } ++ break; ++#endif + default: +- netdump_print("(?)\n"); ++ xen_core = STRNEQ(buf, "XEN CORE") || STRNEQ(buf, "Xen"); ++ vmcoreinfo = STRNEQ(buf, "VMCOREINFO"); ++ if (xen_core) { ++ netdump_print("(unknown Xen n_type)\n"); ++ if (store) ++ error(WARNING, ++ "unknown Xen n_type: %lx\n\n", note->n_type); ++ } else if (vmcoreinfo) ++ netdump_print("(unused)\n"); ++ else ++ netdump_print("(?)\n"); ++ break; ++ ++ case NT_XEN_KDUMP_CR3: ++ netdump_print("(NT_XEN_KDUMP_CR3) [obsolete]\n"); ++ if (store) ++ error(WARNING, ++ "obsolete Xen n_type: %lx (NT_XEN_KDUMP_CR3)\n\n", ++ note->n_type); ++ /* FALL THROUGH */ ++ ++ case XEN_ELFNOTE_CRASH_INFO: ++ /* ++ * x86 and x86_64: p2m mfn appended to crash_xen_info_t structure ++ */ ++ if (note->n_type == XEN_ELFNOTE_CRASH_INFO) ++ netdump_print("(XEN_ELFNOTE_CRASH_INFO)\n"); ++ xen_core = TRUE; ++ if (store) { ++ pc->flags |= XEN_CORE; ++ nd->xen_kdump_data = &xen_kdump_data; ++ nd->xen_kdump_data->last_mfn_read = UNINITIALIZED; ++ nd->xen_kdump_data->last_pmd_read = UNINITIALIZED; ++ ++ if ((note->n_type == NT_XEN_KDUMP_CR3) && ++ ((note->n_descsz/sizeof(ulong)) == 1)) { ++ nd->xen_kdump_data->flags |= KDUMP_CR3; ++ /* ++ * Use the first cr3 found. ++ */ ++ if (!nd->xen_kdump_data->cr3) { ++ up = (ulong *)(ptr + note->n_namesz); ++ up = (ulong *)roundup((ulong)up, 4); ++ nd->xen_kdump_data->cr3 = *up; ++ } ++ } else { ++ nd->xen_kdump_data->flags |= KDUMP_MFN_LIST; ++ up = (ulong *)(ptr + note->n_namesz); ++ up = (ulong *)roundup((ulong)up, 4); ++ words = note->n_descsz/sizeof(ulong); ++ /* ++ * If already set, overridden with --p2m_mfn ++ */ ++ if (!nd->xen_kdump_data->p2m_mfn) ++ nd->xen_kdump_data->p2m_mfn = *(up+(words-1)); ++ } ++ } ++ break; ++ ++ case XEN_ELFNOTE_CRASH_REGS: ++ /* ++ * x86 and x86_64: cr0, cr2, cr3, cr4 ++ */ ++ xen_core = TRUE; ++ netdump_print("(XEN_ELFNOTE_CRASH_REGS)\n"); ++ break; + } + + uptr = (ulonglong *)(ptr + note->n_namesz); +- for (i = lf = 0; i < note->n_descsz/sizeof(ulonglong); i++) { +- if (((i%2)==0)) { +- netdump_print("%s ", +- i ? "\n" : ""); +- lf++; +- } else +- lf = 0; +- netdump_print("%016llx ", *uptr++); ++ ++ /* ++ * kdumps are off-by-1, because their n_namesz is 5 for "CORE". ++ */ ++ if ((nd->flags & KDUMP_ELF64) && (note->n_namesz == 5)) ++ uptr = (ulonglong *)(ptr + ((note->n_namesz + 3) & ~3)); ++ ++ if (xen_core) ++ uptr = (ulonglong *)roundup((ulong)uptr, 4); ++ ++ if (BITS32() && (xen_core || (note->n_type == NT_PRSTATUS))) { ++ iptr = (int *)uptr; ++ for (i = lf = 0; i < note->n_descsz/sizeof(ulong); i++) { ++ if (((i%4)==0)) { ++ netdump_print("%s ", ++ i ? "\n" : ""); ++ lf++; ++ } else ++ lf = 0; ++ netdump_print("%08lx ", *iptr++); ++ } ++ } else if (vmcoreinfo) { ++ netdump_print(" "); ++ ptr += note->n_namesz + 1; ++ for (i = 0; i < note->n_descsz; i++, ptr++) { ++ netdump_print("%c", *ptr); ++ if (*ptr == '\n') ++ netdump_print(" "); ++ } ++ lf = 0; ++ } else { ++ for (i = lf = 0; i < note->n_descsz/sizeof(ulonglong); i++) { ++ if (((i%2)==0)) { ++ netdump_print("%s ", ++ i ? "\n" : ""); ++ lf++; ++ } else ++ lf = 0; ++ netdump_print("%016llx ", *uptr++); ++ } + } + if (!lf) + netdump_print("\n"); +@@ -1251,39 +1831,70 @@ + + default: + error(FATAL, +- "netdump support for ELF machine type %d not available\n", ++ "support for ELF machine type %d not available\n", + e_machine); + } + } + +-static void ++struct x86_64_user_regs_struct { ++ unsigned long r15,r14,r13,r12,rbp,rbx,r11,r10; ++ unsigned long r9,r8,rax,rcx,rdx,rsi,rdi,orig_rax; ++ unsigned long rip,cs,eflags; ++ unsigned long rsp,ss; ++ unsigned long fs_base, gs_base; ++ unsigned long ds,es,fs,gs; ++}; ++ ++void + get_netdump_regs_x86_64(struct bt_info *bt, ulong *ripp, ulong *rspp) + { + Elf64_Nhdr *note; + size_t len; + char *user_regs; +- ulong rsp, rip; ++ ulong regs_size, rsp_offset, rip_offset; + + if (is_task_active(bt->task)) + bt->flags |= BT_DUMPFILE_SEARCH; + +- if (VALID_STRUCT(user_regs_struct) && (bt->task == tt->panic_task)) { +- note = (Elf64_Nhdr *)nd->nt_prstatus; ++ if (((NETDUMP_DUMPFILE() || KDUMP_DUMPFILE()) && ++ VALID_STRUCT(user_regs_struct) && (bt->task == tt->panic_task)) || ++ (KDUMP_DUMPFILE() && (kt->flags & DWARF_UNWIND) && ++ (bt->flags & BT_DUMPFILE_SEARCH))) { ++ if (nd->num_prstatus_notes > 1) ++ note = (Elf64_Nhdr *) ++ nd->nt_prstatus_percpu[bt->tc->processor]; ++ else ++ note = (Elf64_Nhdr *)nd->nt_prstatus; + + len = sizeof(Elf64_Nhdr); + len = roundup(len + note->n_namesz, 4); + len = roundup(len + note->n_descsz, 4); + +- user_regs = ((char *)note + len) +- - SIZE(user_regs_struct) - sizeof(long); ++ regs_size = VALID_STRUCT(user_regs_struct) ? ++ SIZE(user_regs_struct) : ++ sizeof(struct x86_64_user_regs_struct); ++ rsp_offset = VALID_MEMBER(user_regs_struct_rsp) ? ++ OFFSET(user_regs_struct_rsp) : ++ offsetof(struct x86_64_user_regs_struct, rsp); ++ rip_offset = VALID_MEMBER(user_regs_struct_rip) ? ++ OFFSET(user_regs_struct_rip) : ++ offsetof(struct x86_64_user_regs_struct, rip); ++ ++ user_regs = ((char *)note + len) - regs_size - sizeof(long); + +- if (CRASHDEBUG(1)) { +- rsp = ULONG(user_regs + OFFSET(user_regs_struct_rsp)); +- rip = ULONG(user_regs + OFFSET(user_regs_struct_rip)); ++ if (CRASHDEBUG(1)) + netdump_print("ELF prstatus rsp: %lx rip: %lx\n", +- rsp, rip); +- } ++ ULONG(user_regs + rsp_offset), ++ ULONG(user_regs + rip_offset)); + ++ if (KDUMP_DUMPFILE()) { ++ *rspp = ULONG(user_regs + rsp_offset); ++ *ripp = ULONG(user_regs + rip_offset); ++ ++ if (*ripp && *rspp) ++ return; ++ } ++ + bt->machdep = (void *)user_regs; + } + +@@ -1295,13 +1906,14 @@ + * the raw stack for some reasonable hooks. + */ + +-static void ++void + get_netdump_regs_x86(struct bt_info *bt, ulong *eip, ulong *esp) + { +- int i, search, panic; ++ int i, search, panic, panic_task; + char *sym; + ulong *up; + ulong ipintr_eip, ipintr_esp, ipintr_func; ++ ulong halt_eip, halt_esp; + int check_hardirq, check_softirq; + + if (!is_task_active(bt->task)) { +@@ -1309,17 +1921,31 @@ + return; + } + ++ panic_task = tt->panic_task == bt->task ? TRUE : FALSE; ++ + ipintr_eip = ipintr_esp = ipintr_func = panic = 0; ++ halt_eip = halt_esp = 0; + check_hardirq = check_softirq = tt->flags & IRQSTACKS ? TRUE : FALSE; + search = ((bt->flags & BT_TEXT_SYMBOLS) && (tt->flags & TASK_INIT_DONE)) + || (machdep->flags & OMIT_FRAME_PTR); +- + retry: + for (i = 0, up = (ulong *)bt->stackbuf; i < LONGS_PER_STACK; i++, up++){ + sym = closest_symbol(*up); +- if (STREQ(sym, "netconsole_netdump") || ++ ++ if (XEN_CORE_DUMPFILE()) { ++ if (STREQ(sym, "xen_machine_kexec")) { ++ *eip = *up; ++ *esp = bt->stackbase + ((char *)(up+1) - bt->stackbuf); ++ return; ++ } ++ if (STREQ(sym, "crash_kexec")) { ++ halt_eip = *up; ++ halt_esp = bt->stackbase + ((char *)(up+1) - bt->stackbuf); ++ } ++ } else if (STREQ(sym, "netconsole_netdump") || + STREQ(sym, "netpoll_start_netdump") || + STREQ(sym, "start_disk_dump") || ++ STREQ(sym, "crash_kexec") || + STREQ(sym, "disk_dump")) { + *eip = *up; + *esp = search ? +@@ -1354,7 +1980,7 @@ + next_sysrq: + *eip = *up; + *esp = bt->stackbase + ((char *)(up+4) - bt->stackbuf); +- machdep->flags |= SYSRQ; ++ pc->flags |= SYSRQ; + for (i++, up++; i < LONGS_PER_STACK; i++, up++) { + sym = closest_symbol(*up); + if (STREQ(sym, "sysrq_handle_crash")) +@@ -1371,7 +1997,15 @@ + *esp = search ? + bt->stackbase + ((char *)(up+1) - bt->stackbuf) : + *(up-1); +- machdep->flags |= SYSRQ; ++ pc->flags |= SYSRQ; ++ return; ++ } ++ ++ if (STREQ(sym, "crash_nmi_callback")) { ++ *eip = *up; ++ *esp = search ? ++ bt->stackbase + ((char *)(up+1) - bt->stackbuf) : ++ *(up-1); + return; + } + +@@ -1385,6 +2019,18 @@ + bt->stackbase + ((char *)(up-1) - bt->stackbuf); + ipintr_func = *(up - 2); + } ++ ++ if (XEN_CORE_DUMPFILE() && !panic_task && (bt->tc->pid == 0) && ++ STREQ(sym, "safe_halt")) { ++ halt_eip = *up; ++ halt_esp = bt->stackbase + ((char *)(up+1) - bt->stackbuf); ++ } ++ ++ if (XEN_CORE_DUMPFILE() && !panic_task && (bt->tc->pid == 0) && ++ !halt_eip && STREQ(sym, "xen_idle")) { ++ halt_eip = *up; ++ halt_esp = bt->stackbase + ((char *)(up+1) - bt->stackbuf); ++ } + } + + if (ipintr_eip) { +@@ -1393,6 +2039,12 @@ + return; + } + ++ if (halt_eip && halt_esp) { ++ *eip = halt_eip; ++ *esp = halt_esp; ++ return; ++ } ++ + if (panic) + return; + +@@ -1418,7 +2070,9 @@ + goto retry; + } + +- console("get_netdump_regs_x86: cannot find anything useful\n"); ++ if (CRASHDEBUG(1)) ++ error(INFO, ++ "get_netdump_regs_x86: cannot find anything useful (task: %lx)\n", bt->task); + + machdep->get_stack_frame(bt, eip, esp); + } +@@ -1429,8 +2083,18 @@ + Elf64_Nhdr *note; + size_t len; + +- if (bt->task == tt->panic_task) { +- note = (Elf64_Nhdr *)nd->nt_prstatus; ++ if ((bt->task == tt->panic_task) || ++ (is_task_active(bt->task) && nd->num_prstatus_notes > 1)) { ++ /* ++ * Registers are saved during the dump process for the ++ * panic task. Whereas in kdump, regs are captured for all ++ * CPUs if they responded to an IPI. ++ */ ++ if (nd->num_prstatus_notes > 1) ++ note = (Elf64_Nhdr *) ++ nd->nt_prstatus_percpu[bt->tc->processor]; ++ else ++ note = (Elf64_Nhdr *)nd->nt_prstatus; + + len = sizeof(Elf64_Nhdr); + len = roundup(len + note->n_namesz, 4); +@@ -1446,3 +2110,205 @@ + { + return (nd->flags & PARTIAL_DUMP ? TRUE : FALSE); + } ++ ++ ++/* ++ * kexec/kdump generated vmcore files are similar enough in ++ * nature to netdump/diskdump such that most vmcore access ++ * functionality may be borrowed from the equivalent netdump ++ * function. If not, re-work them here. ++ */ ++int ++is_kdump(char *file, ulong source_query) ++{ ++ return is_netdump(file, source_query); ++} ++ ++int ++kdump_init(char *unused, FILE *fptr) ++{ ++ return netdump_init(unused, fptr); ++} ++ ++ulong ++get_kdump_panic_task(void) ++{ ++ return get_netdump_panic_task(); ++} ++ ++int ++read_kdump(int fd, void *bufptr, int cnt, ulong addr, physaddr_t paddr) ++{ ++ if (XEN_CORE_DUMPFILE() && !XEN_HYPER_MODE()) { ++ if (!(nd->xen_kdump_data->flags & KDUMP_P2M_INIT)) { ++ if (!machdep->xen_kdump_p2m_create) ++ error(FATAL, ++ "xen kdump dumpfiles not supported on this architecture\n"); ++ ++ if ((nd->xen_kdump_data->page = ++ (char *)malloc(PAGESIZE())) == NULL) ++ error(FATAL, ++ "cannot malloc xen kdump data page\n"); ++ ++ if (!machdep->xen_kdump_p2m_create(nd->xen_kdump_data)) ++ error(FATAL, ++ "cannot create xen kdump pfn-to-mfn mapping\n"); ++ ++ nd->xen_kdump_data->flags |= KDUMP_P2M_INIT; ++ } ++ ++ if ((paddr = xen_kdump_p2m(paddr)) == P2M_FAILURE) ++ return READ_ERROR; ++ } ++ ++ return read_netdump(fd, bufptr, cnt, addr, paddr); ++} ++ ++int ++write_kdump(int fd, void *bufptr, int cnt, ulong addr, physaddr_t paddr) ++{ ++ return write_netdump(fd, bufptr, cnt, addr, paddr); ++} ++ ++void ++get_kdump_regs(struct bt_info *bt, ulong *eip, ulong *esp) ++{ ++ get_netdump_regs(bt, eip, esp); ++} ++ ++uint ++kdump_page_size(void) ++{ ++ uint pagesz; ++ ++ if (!VMCORE_VALID()) ++ return 0; ++ ++ if (!(pagesz = nd->page_size)) ++ pagesz = (uint)getpagesize(); ++ ++ return pagesz; ++} ++ ++int ++kdump_free_memory(void) ++{ ++ return netdump_free_memory(); ++} ++ ++int ++kdump_memory_used(void) ++{ ++ return netdump_memory_used(); ++} ++ ++int ++kdump_memory_dump(FILE *fp) ++{ ++ return netdump_memory_dump(fp); ++} ++ ++/* ++ * Translate a xen domain's pseudo-physical address into the ++ * xen machine address. Since there's no compression involved, ++ * just the last phys_to_machine_mapping[] page read is cached, ++ * which essentially caches 1024 p2m translations. ++ */ ++static physaddr_t ++xen_kdump_p2m(physaddr_t pseudo) ++{ ++ ulong pfn, mfn_frame; ++ ulong *mfnptr; ++ ulong mfn_idx, frame_idx; ++ physaddr_t paddr; ++ struct xen_kdump_data *xkd = nd->xen_kdump_data; ++ ++ if (pc->curcmd_flags & XEN_MACHINE_ADDR) ++ return pseudo; ++ ++#ifdef IA64 ++ return ia64_xen_kdump_p2m(xkd, pseudo); ++#endif ++ ++ xkd->accesses++; ++ ++ pfn = (ulong)BTOP(pseudo); ++ mfn_idx = pfn / (PAGESIZE()/sizeof(ulong)); ++ frame_idx = pfn % (PAGESIZE()/sizeof(ulong)); ++ if (mfn_idx >= xkd->p2m_frames) ++ return P2M_FAILURE; ++ mfn_frame = xkd->p2m_mfn_frame_list[mfn_idx]; ++ ++ if (mfn_frame == xkd->last_mfn_read) ++ xkd->cache_hits++; ++ else if (read_netdump(0, xkd->page, PAGESIZE(), 0, ++ (physaddr_t)PTOB(mfn_frame)) != PAGESIZE()) ++ return P2M_FAILURE; ++ ++ xkd->last_mfn_read = mfn_frame; ++ ++ mfnptr = ((ulong *)(xkd->page)) + frame_idx; ++ paddr = (physaddr_t)PTOB((ulonglong)(*mfnptr)); ++ paddr |= PAGEOFFSET(pseudo); ++ ++ if (CRASHDEBUG(7)) ++ fprintf(fp, ++ "xen_dump_p2m(%llx): mfn_idx: %ld frame_idx: %ld" ++ " mfn_frame: %lx mfn: %lx => %llx\n", ++ (ulonglong)pseudo, mfn_idx, frame_idx, ++ mfn_frame, *mfnptr, (ulonglong)paddr); ++ ++ return paddr; ++} ++ ++struct vmcore_data * ++get_kdump_vmcore_data(void) ++{ ++ if (!VMCORE_VALID() || !KDUMP_DUMPFILE()) ++ return NULL; ++ ++ return &vmcore_data; ++} ++ ++/* ++ * Override the dom0 p2m mfn in the XEN_ELFNOTE_CRASH_INFO note ++ * in order to initiate a crash session of a guest kernel. ++ */ ++void ++xen_kdump_p2m_mfn(char *arg) ++{ ++ ulong value; ++ int errflag; ++ ++ errflag = 0; ++ value = htol(arg, RETURN_ON_ERROR|QUIET, &errflag); ++ if (!errflag) { ++ xen_kdump_data.p2m_mfn = value; ++ if (CRASHDEBUG(1)) ++ error(INFO, ++ "xen_kdump_data.p2m_mfn override: %lx\n", ++ value); ++ } else ++ error(WARNING, "invalid p2m_mfn argument: %s\n", arg); ++} ++ ++/* ++ * Fujitsu dom0/HV sadump-generated dumpfile, which requires ++ * the --p2m_mfn command line argument. ++ */ ++int ++is_sadump_xen(void) ++{ ++ if (xen_kdump_data.p2m_mfn) { ++ if (!XEN_CORE_DUMPFILE()) { ++ pc->flags |= XEN_CORE; ++ nd->xen_kdump_data = &xen_kdump_data; ++ nd->xen_kdump_data->last_mfn_read = UNINITIALIZED; ++ nd->xen_kdump_data->last_pmd_read = UNINITIALIZED; ++ nd->xen_kdump_data->flags |= KDUMP_MFN_LIST; ++ } ++ return TRUE; ++ } ++ ++ return FALSE; ++} +--- crash/x86_64.c.orig 2008-01-17 15:17:20.000000000 -0500 ++++ crash/x86_64.c 2008-01-04 09:42:08.000000000 -0500 +@@ -1,7 +1,7 @@ + /* x86_64.c -- core analysis suite + * +- * Copyright (C) 2004, 2005 David Anderson +- * Copyright (C) 2004, 2005 Red Hat, Inc. All rights reserved. ++ * Copyright (C) 2004, 2005, 2006, 2007 David Anderson ++ * Copyright (C) 2004, 2005, 2006, 2007 Red Hat, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by +@@ -14,11 +14,16 @@ + * GNU General Public License for more details. + */ + #include "defs.h" ++#include "xen_hyper_defs.h" + + #ifdef X86_64 + + static int x86_64_kvtop(struct task_context *, ulong, physaddr_t *, int); ++static int x86_64_kvtop_xen_wpt(struct task_context *, ulong, physaddr_t *, int); + static int x86_64_uvtop(struct task_context *, ulong, physaddr_t *, int); ++static int x86_64_uvtop_level4(struct task_context *, ulong, physaddr_t *, int); ++static int x86_64_uvtop_level4_xen_wpt(struct task_context *, ulong, physaddr_t *, int); ++static int x86_64_uvtop_level4_rhel4_xen_wpt(struct task_context *, ulong, physaddr_t *, int); + static ulong x86_64_vmalloc_start(void); + static int x86_64_is_task_addr(ulong); + static int x86_64_verify_symbol(const char *, ulong, char); +@@ -32,14 +37,17 @@ + #define EFRAME_VERIFY (0x2) + #define EFRAME_CS (0x4) + #define EFRAME_SEARCH (0x8) ++static int x86_64_print_eframe_location(ulong, int, FILE *); + static void x86_64_back_trace_cmd(struct bt_info *); + static ulong x86_64_in_exception_stack(struct bt_info *); + static ulong x86_64_in_irqstack(struct bt_info *); + static void x86_64_low_budget_back_trace_cmd(struct bt_info *); ++static void x86_64_dwarf_back_trace_cmd(struct bt_info *); + static void x86_64_get_dumpfile_stack_frame(struct bt_info *, ulong *, ulong *); + static struct syment *x86_64_function_called_by(ulong); + static int is_direct_call_target(struct bt_info *); + static void get_x86_64_frame(struct bt_info *, ulong *, ulong *); ++static ulong text_lock_function(char *, struct bt_info *, ulong); + static int x86_64_print_stack_entry(struct bt_info *, FILE *, int, int, ulong); + static void x86_64_display_full_frame(struct bt_info *, ulong, FILE *); + static void x86_64_do_bt_reference_check(struct bt_info *, ulong,char *); +@@ -56,6 +64,8 @@ + static void x86_64_display_memmap(void); + static void x86_64_dump_line_number(ulong); + static struct line_number_hook x86_64_line_number_hooks[]; ++static void x86_64_calc_phys_base(void); ++static int x86_64_is_module_addr(ulong); + static int x86_64_is_kvaddr(ulong); + static int x86_64_is_uvaddr(ulong, struct task_context *); + void x86_64_compiler_warning_stub(void); +@@ -63,7 +73,25 @@ + static void x86_64_cpu_pda_init(void); + static void x86_64_ist_init(void); + static void x86_64_post_init(void); +- ++static void parse_cmdline_arg(void); ++static void x86_64_clear_machdep_cache(void); ++static void x86_64_irq_eframe_link_init(void); ++static int x86_64_xendump_p2m_create(struct xendump_data *); ++static char *x86_64_xendump_load_page(ulong, struct xendump_data *); ++static int x86_64_xendump_page_index(ulong, struct xendump_data *); ++static int x86_64_xen_kdump_p2m_create(struct xen_kdump_data *); ++static char *x86_64_xen_kdump_load_page(ulong, char *); ++static ulong x86_64_xen_kdump_page_mfn(ulong); ++static void x86_64_debug_dump_page(FILE *, char *, char *); ++static void x86_64_get_xendump_regs(struct xendump_data *, struct bt_info *, ulong *, ulong *); ++static ulong x86_64_xendump_panic_task(struct xendump_data *); ++static void x86_64_init_hyper(int); ++static ulong x86_64_get_stackbase_hyper(ulong); ++static ulong x86_64_get_stacktop_hyper(ulong); ++static int x86_64_framesize_cache_resize(void); ++static int x86_64_framesize_cache_func(int, ulong, int *); ++static int x86_64_get_framesize(struct bt_info *, ulong); ++static void x86_64_framesize_debug(struct bt_info *); + + struct machine_specific x86_64_machine_specific = { 0 }; + +@@ -74,6 +102,11 @@ + void + x86_64_init(int when) + { ++ if (XEN_HYPER_MODE()) { ++ x86_64_init_hyper(when); ++ return; ++ } ++ + switch (when) + { + case PRE_SYMTAB: +@@ -86,6 +119,8 @@ + machdep->pageoffset = machdep->pagesize - 1; + machdep->pagemask = ~((ulonglong)machdep->pageoffset); + machdep->stacksize = machdep->pagesize * 2; ++ if ((machdep->machspec->upml = (char *)malloc(PAGESIZE())) == NULL) ++ error(FATAL, "cannot malloc upml space."); + if ((machdep->pgd = (char *)malloc(PAGESIZE())) == NULL) + error(FATAL, "cannot malloc pgd space."); + if ((machdep->pmd = (char *)malloc(PAGESIZE())) == NULL) +@@ -93,17 +128,91 @@ + if ((machdep->ptbl = (char *)malloc(PAGESIZE())) == NULL) + error(FATAL, "cannot malloc ptbl space."); + if ((machdep->machspec->pml4 = +- (char *)malloc(PAGESIZE())) == NULL) ++ (char *)malloc(PAGESIZE()*2)) == NULL) + error(FATAL, "cannot malloc pml4 space."); ++ machdep->machspec->last_upml_read = 0; ++ machdep->machspec->last_pml4_read = 0; + machdep->last_pgd_read = 0; + machdep->last_pmd_read = 0; + machdep->last_ptbl_read = 0; + machdep->verify_paddr = generic_verify_paddr; + machdep->ptrs_per_pgd = PTRS_PER_PGD; + machdep->flags |= MACHDEP_BT_TEXT; ++ machdep->flags |= FRAMESIZE_DEBUG; ++ machdep->machspec->irq_eframe_link = UNINITIALIZED; ++ if (machdep->cmdline_arg) ++ parse_cmdline_arg(); + break; + + case PRE_GDB: ++ if (!(machdep->flags & VM_FLAGS)) { ++ if (symbol_exists("xen_start_info")) { ++ if (symbol_exists("low_pml4") && ++ symbol_exists("swap_low_mappings")) ++ machdep->flags |= VM_XEN_RHEL4; ++ else ++ machdep->flags |= VM_XEN; ++ } else if (symbol_exists("boot_vmalloc_pgt")) ++ machdep->flags |= VM_ORIG; ++ else ++ machdep->flags |= VM_2_6_11; ++ } ++ ++ switch (machdep->flags & VM_FLAGS) ++ { ++ case VM_ORIG: ++ /* pre-2.6.11 layout */ ++ machdep->machspec->userspace_top = USERSPACE_TOP_ORIG; ++ machdep->machspec->page_offset = PAGE_OFFSET_ORIG; ++ machdep->machspec->vmalloc_start_addr = VMALLOC_START_ADDR_ORIG; ++ machdep->machspec->vmalloc_end = VMALLOC_END_ORIG; ++ machdep->machspec->modules_vaddr = MODULES_VADDR_ORIG; ++ machdep->machspec->modules_end = MODULES_END_ORIG; ++ ++ free(machdep->machspec->upml); ++ machdep->machspec->upml = NULL; ++ ++ machdep->uvtop = x86_64_uvtop; ++ break; ++ ++ case VM_2_6_11: ++ /* 2.6.11 layout */ ++ machdep->machspec->userspace_top = USERSPACE_TOP_2_6_11; ++ machdep->machspec->page_offset = PAGE_OFFSET_2_6_11; ++ machdep->machspec->vmalloc_start_addr = VMALLOC_START_ADDR_2_6_11; ++ machdep->machspec->vmalloc_end = VMALLOC_END_2_6_11; ++ machdep->machspec->modules_vaddr = MODULES_VADDR_2_6_11; ++ machdep->machspec->modules_end = MODULES_END_2_6_11; ++ ++ /* 2.6.24 layout */ ++ machdep->machspec->vmemmap_vaddr = VMEMMAP_VADDR_2_6_24; ++ machdep->machspec->vmemmap_end = VMEMMAP_END_2_6_24; ++ if (symbol_exists("vmemmap_populate")) ++ machdep->flags |= VMEMMAP; ++ ++ machdep->uvtop = x86_64_uvtop_level4; ++ break; ++ ++ case VM_XEN: ++ /* Xen layout */ ++ machdep->machspec->userspace_top = USERSPACE_TOP_XEN; ++ machdep->machspec->page_offset = PAGE_OFFSET_XEN; ++ machdep->machspec->vmalloc_start_addr = VMALLOC_START_ADDR_XEN; ++ machdep->machspec->vmalloc_end = VMALLOC_END_XEN; ++ machdep->machspec->modules_vaddr = MODULES_VADDR_XEN; ++ machdep->machspec->modules_end = MODULES_END_XEN; ++ break; ++ ++ case VM_XEN_RHEL4: ++ /* RHEL4 Xen layout */ ++ machdep->machspec->userspace_top = USERSPACE_TOP_XEN_RHEL4; ++ machdep->machspec->page_offset = PAGE_OFFSET_XEN_RHEL4; ++ machdep->machspec->vmalloc_start_addr = VMALLOC_START_ADDR_XEN_RHEL4; ++ machdep->machspec->vmalloc_end = VMALLOC_END_XEN_RHEL4; ++ machdep->machspec->modules_vaddr = MODULES_VADDR_XEN_RHEL4; ++ machdep->machspec->modules_end = MODULES_END_XEN_RHEL4; ++ break; ++ } + machdep->kvbase = (ulong)PAGE_OFFSET; + machdep->identity_map_base = (ulong)PAGE_OFFSET; + machdep->is_kvaddr = x86_64_is_kvaddr; +@@ -111,7 +220,6 @@ + machdep->eframe_search = x86_64_eframe_search; + machdep->back_trace = x86_64_low_budget_back_trace_cmd; + machdep->processor_speed = x86_64_processor_speed; +- machdep->uvtop = x86_64_uvtop; + machdep->kvtop = x86_64_kvtop; + machdep->get_task_pgd = x86_64_get_task_pgd; + machdep->get_stack_frame = x86_64_get_stack_frame; +@@ -126,6 +234,12 @@ + machdep->line_number_hooks = x86_64_line_number_hooks; + machdep->value_to_symbol = generic_machdep_value_to_symbol; + machdep->init_kernel_pgd = x86_64_init_kernel_pgd; ++ machdep->clear_machdep_cache = x86_64_clear_machdep_cache; ++ machdep->xendump_p2m_create = x86_64_xendump_p2m_create; ++ machdep->get_xendump_regs = x86_64_get_xendump_regs; ++ machdep->xen_kdump_p2m_create = x86_64_xen_kdump_p2m_create; ++ machdep->xendump_panic_task = x86_64_xendump_panic_task; ++ x86_64_calc_phys_base(); + break; + + case POST_GDB: +@@ -158,16 +272,49 @@ + if ((machdep->machspec->irqstack = (char *) + malloc(machdep->machspec->stkinfo.isize)) == NULL) + error(FATAL, "cannot malloc irqstack space."); +- if (symbol_exists("irq_desc")) +- ARRAY_LENGTH_INIT(machdep->nr_irqs, irq_desc, +- "irq_desc", NULL, 0); +- else +- machdep->nr_irqs = 224; /* NR_IRQS (at least) */ ++ if (symbol_exists("irq_desc")) { ++ if (LKCD_KERNTYPES()) ++ ARRAY_LENGTH_INIT_ALT(machdep->nr_irqs, ++ "irq_desc", "kernel_stat.irqs", NULL, 0); ++ else ++ ARRAY_LENGTH_INIT(machdep->nr_irqs, irq_desc, ++ "irq_desc", NULL, 0); ++ } else ++ machdep->nr_irqs = 224; /* NR_IRQS (at least) */ + machdep->vmalloc_start = x86_64_vmalloc_start; + machdep->dump_irq = x86_64_dump_irq; +- machdep->hz = HZ; +- if (THIS_KERNEL_VERSION >= LINUX(2,6,0)) +- machdep->hz = 1000; ++ if (!machdep->hz) { ++ machdep->hz = HZ; ++ if (THIS_KERNEL_VERSION >= LINUX(2,6,0)) ++ machdep->hz = 1000; ++ } ++ machdep->section_size_bits = _SECTION_SIZE_BITS; ++ machdep->max_physmem_bits = _MAX_PHYSMEM_BITS; ++ if (XEN()) { ++ if (kt->xen_flags & WRITABLE_PAGE_TABLES) { ++ switch (machdep->flags & VM_FLAGS) ++ { ++ case VM_XEN: ++ machdep->uvtop = x86_64_uvtop_level4_xen_wpt; ++ break; ++ case VM_XEN_RHEL4: ++ machdep->uvtop = x86_64_uvtop_level4_rhel4_xen_wpt; ++ break; ++ } ++ } else ++ machdep->uvtop = x86_64_uvtop_level4; ++ MEMBER_OFFSET_INIT(vcpu_guest_context_user_regs, ++ "vcpu_guest_context", "user_regs"); ++ ASSIGN_OFFSET(cpu_user_regs_rsp) = ++ MEMBER_OFFSET("cpu_user_regs", "ss") - sizeof(ulong); ++ ASSIGN_OFFSET(cpu_user_regs_rip) = ++ MEMBER_OFFSET("cpu_user_regs", "cs") - sizeof(ulong); ++ } ++ x86_64_irq_eframe_link_init(); ++ break; ++ ++ case POST_VM: ++ init_unwind_table(); + break; + + case POST_INIT: +@@ -191,10 +338,26 @@ + fprintf(fp, "%sKSYMS_START", others++ ? "|" : ""); + if (machdep->flags & PT_REGS_INIT) + fprintf(fp, "%sPT_REGS_INIT", others++ ? "|" : ""); +- if (machdep->flags & SYSRQ) +- fprintf(fp, "%sSYSRQ", others++ ? "|" : ""); + if (machdep->flags & MACHDEP_BT_TEXT) + fprintf(fp, "%sMACHDEP_BT_TEXT", others++ ? "|" : ""); ++ if (machdep->flags & VM_ORIG) ++ fprintf(fp, "%sVM_ORIG", others++ ? "|" : ""); ++ if (machdep->flags & VM_2_6_11) ++ fprintf(fp, "%sVM_2_6_11", others++ ? "|" : ""); ++ if (machdep->flags & VM_XEN) ++ fprintf(fp, "%sVM_XEN", others++ ? "|" : ""); ++ if (machdep->flags & VM_XEN_RHEL4) ++ fprintf(fp, "%sVM_XEN_RHEL4", others++ ? "|" : ""); ++ if (machdep->flags & VMEMMAP) ++ fprintf(fp, "%sVMEMMAP", others++ ? "|" : ""); ++ if (machdep->flags & NO_TSS) ++ fprintf(fp, "%sNO_TSS", others++ ? "|" : ""); ++ if (machdep->flags & SCHED_TEXT) ++ fprintf(fp, "%sSCHED_TEXT", others++ ? "|" : ""); ++ if (machdep->flags & PHYS_BASE) ++ fprintf(fp, "%sPHYS_BASE", others++ ? "|" : ""); ++ if (machdep->flags & FRAMESIZE_DEBUG) ++ fprintf(fp, "%sFRAMESIZE_DEBUG", others++ ? "|" : ""); + fprintf(fp, ")\n"); + + fprintf(fp, " kvbase: %lx\n", machdep->kvbase); +@@ -215,13 +378,32 @@ + fprintf(fp, " back_trace: x86_64_back_trace_cmd()\n"); + else if (machdep->back_trace == x86_64_low_budget_back_trace_cmd) + fprintf(fp, +- " back_trace: x86_64_low_budget_back_trace_cmd()\n"); ++ " back_trace: x86_64_low_budget_back_trace_cmd() %s\n", ++ kt->flags & DWARF_UNWIND ? ++ "-> x86_64_dwarf_back_trace_cmd()" : ""); ++ else if (machdep->back_trace == x86_64_dwarf_back_trace_cmd) ++ fprintf(fp, ++ " back_trace: x86_64_dwarf_back_trace_cmd() %s\n", ++ kt->flags & DWARF_UNWIND ? ++ "" : "->x86_64_low_budget_back_trace_cmd()"); + else + fprintf(fp, " back_trace: %lx\n", + (ulong)machdep->back_trace); + fprintf(fp, " processor_speed: x86_64_processor_speed()\n"); +- fprintf(fp, " uvtop: x86_64_uvtop()\n"); +- fprintf(fp, " kvtop: x86_64_kvtop()\n"); ++ if (machdep->uvtop == x86_64_uvtop) ++ fprintf(fp, " uvtop: x86_64_uvtop()\n"); ++ else if (machdep->uvtop == x86_64_uvtop_level4) ++ fprintf(fp, " uvtop: x86_64_uvtop_level4()\n"); ++ else if (machdep->uvtop == x86_64_uvtop_level4_xen_wpt) ++ fprintf(fp, " uvtop: x86_64_uvtop_level4_xen_wpt()\n"); ++ else if (machdep->uvtop == x86_64_uvtop_level4_rhel4_xen_wpt) ++ fprintf(fp, " uvtop: x86_64_uvtop_level4_rhel4_xen_wpt()\n"); ++ else ++ fprintf(fp, " uvtop: %lx\n", (ulong)machdep->uvtop); ++ fprintf(fp, " kvtop: x86_64_kvtop()"); ++ if (XEN() && (kt->xen_flags & WRITABLE_PAGE_TABLES)) ++ fprintf(fp, " -> x86_64_kvtop_xen_wpt()"); ++ fprintf(fp, "\n"); + fprintf(fp, " get_task_pgd: x86_64_get_task_pgd()\n"); + fprintf(fp, " dump_irq: x86_64_dump_irq()\n"); + fprintf(fp, " get_stack_frame: x86_64_get_stack_frame()\n"); +@@ -239,6 +421,11 @@ + fprintf(fp, " is_uvaddr: x86_64_is_uvaddr()\n"); + fprintf(fp, " verify_paddr: generic_verify_paddr()\n"); + fprintf(fp, " init_kernel_pgd: x86_64_init_kernel_pgd()\n"); ++ fprintf(fp, "clear_machdep_cache: x86_64_clear_machdep_cache()\n"); ++ fprintf(fp, " xendump_p2m_create: x86_64_xendump_p2m_create()\n"); ++ fprintf(fp, " get_xendump_regs: x86_64_get_xendump_regs()\n"); ++ fprintf(fp, " xendump_panic_task: x86_64_xendump_panic_task()\n"); ++ fprintf(fp, "xen_kdump_p2m_create: x86_64_xen_kdump_p2m_create()\n"); + fprintf(fp, " line_number_hooks: x86_64_line_number_hooks\n"); + fprintf(fp, " value_to_symbol: generic_machdep_value_to_symbol()\n"); + fprintf(fp, " last_pgd_read: %lx\n", machdep->last_pgd_read); +@@ -248,9 +435,33 @@ + fprintf(fp, " pmd: %lx\n", (ulong)machdep->pmd); + fprintf(fp, " ptbl: %lx\n", (ulong)machdep->ptbl); + fprintf(fp, " ptrs_per_pgd: %d\n", machdep->ptrs_per_pgd); +- fprintf(fp, " machspec: %lx\n", (ulong)machdep->machspec); ++ fprintf(fp, " section_size_bits: %ld\n", machdep->section_size_bits); ++ fprintf(fp, " max_physmem_bits: %ld\n", machdep->max_physmem_bits); ++ fprintf(fp, " sections_per_root: %ld\n", machdep->sections_per_root); ++ ++ fprintf(fp, " machspec: %016lx\n", (ulong)machdep->machspec); ++ fprintf(fp, " userspace_top: %016lx\n", (ulong)ms->userspace_top); ++ fprintf(fp, " page_offset: %016lx\n", (ulong)ms->page_offset); ++ fprintf(fp, " vmalloc_start_addr: %016lx\n", (ulong)ms->vmalloc_start_addr); ++ fprintf(fp, " vmalloc_end: %016lx\n", (ulong)ms->vmalloc_end); ++ fprintf(fp, " modules_vaddr: %016lx\n", (ulong)ms->modules_vaddr); ++ fprintf(fp, " modules_end: %016lx\n", (ulong)ms->modules_end); ++ fprintf(fp, " vmemmap_vaddr: %016lx %s\n", (ulong)ms->vmemmap_vaddr, ++ machdep->flags & VMEMMAP ? "" : "(unused)"); ++ fprintf(fp, " vmemmap_end: %016lx %s\n", (ulong)ms->vmemmap_end, ++ machdep->flags & VMEMMAP ? "" : "(unused)"); ++ fprintf(fp, " phys_base: %lx\n", (ulong)ms->phys_base); + fprintf(fp, " pml4: %lx\n", (ulong)ms->pml4); ++ fprintf(fp, " last_pml4_read: %lx\n", (ulong)ms->last_pml4_read); ++ if (ms->upml) { ++ fprintf(fp, " upml: %lx\n", (ulong)ms->upml); ++ fprintf(fp, " last_upml_read: %lx\n", (ulong)ms->last_upml_read); ++ } else { ++ fprintf(fp, " upml: (unused)\n"); ++ fprintf(fp, " last_upml_read: (unused)\n"); ++ } + fprintf(fp, " irqstack: %lx\n", (ulong)ms->irqstack); ++ fprintf(fp, " irq_eframe_link: %ld\n", ms->irq_eframe_link); + fprintf(fp, " pto: %s", + machdep->flags & PT_REGS_INIT ? "\n" : "(uninitialized)\n"); + if (machdep->flags & PT_REGS_INIT) { +@@ -276,8 +487,10 @@ + fprintf(fp, " rsp: %ld\n", ms->pto.rsp); + fprintf(fp, " ss: %ld\n", ms->pto.ss); + } +- fprintf(fp, " stkinfo: esize: %d isize: %d\n", +- ms->stkinfo.esize, ms->stkinfo.isize); ++ fprintf(fp, " stkinfo: esize: %d%sisize: %d\n", ++ ms->stkinfo.esize, ++ machdep->flags & NO_TSS ? " (NO TSS) " : " ", ++ ms->stkinfo.isize); + fprintf(fp, " ebase[%s][7]:", + arg ? "NR_CPUS" : "cpus"); + cpus = arg ? NR_CPUS : kt->cpus; +@@ -306,9 +519,9 @@ + static void + x86_64_cpu_pda_init(void) + { +- int i, cpus, nr_pda, cpunumber; ++ int i, cpus, nr_pda, cpunumber, _cpu_pda; + char *cpu_pda_buf; +- ulong level4_pgt, data_offset; ++ ulong level4_pgt, data_offset, cpu_pda_addr; + struct syment *sp, *nsp; + ulong offset, istacksize; + +@@ -320,18 +533,44 @@ + MEMBER_OFFSET_INIT(x8664_pda_irqstackptr, "x8664_pda", "irqstackptr"); + MEMBER_OFFSET_INIT(x8664_pda_level4_pgt, "x8664_pda", "level4_pgt"); + MEMBER_OFFSET_INIT(x8664_pda_cpunumber, "x8664_pda", "cpunumber"); ++ MEMBER_OFFSET_INIT(x8664_pda_me, "x8664_pda", "me"); + + cpu_pda_buf = GETBUF(SIZE(x8664_pda)); + +- if (!(nr_pda = get_array_length("cpu_pda", NULL, 0))) +- nr_pda = NR_CPUS; ++ if (LKCD_KERNTYPES()) { ++ if (symbol_exists("_cpu_pda")) ++ _cpu_pda = TRUE; ++ else ++ _cpu_pda = FALSE; ++ nr_pda = get_cpus_possible(); ++ } else { ++ if (symbol_exists("_cpu_pda")) { ++ if (!(nr_pda = get_array_length("_cpu_pda", NULL, 0))) ++ nr_pda = NR_CPUS; ++ _cpu_pda = TRUE; ++ } else { ++ if (!(nr_pda = get_array_length("cpu_pda", NULL, 0))) ++ nr_pda = NR_CPUS; ++ _cpu_pda = FALSE; ++ } ++ } + + for (i = cpus = 0; i < nr_pda; i++) { +- if (!CPU_PDA_READ(i, cpu_pda_buf)) +- break; +- level4_pgt = ULONG(cpu_pda_buf + OFFSET(x8664_pda_level4_pgt)); ++ if (_cpu_pda) { ++ if (!_CPU_PDA_READ(i, cpu_pda_buf)) ++ break; ++ } else { ++ if (!CPU_PDA_READ(i, cpu_pda_buf)) ++ break; ++ } ++ ++ if (VALID_MEMBER(x8664_pda_level4_pgt)) { ++ level4_pgt = ULONG(cpu_pda_buf + OFFSET(x8664_pda_level4_pgt)); ++ if (!VALID_LEVEL4_PGT_ADDR(level4_pgt)) ++ break; ++ } + cpunumber = INT(cpu_pda_buf + OFFSET(x8664_pda_cpunumber)); +- if (!VALID_LEVEL4_PGT_ADDR(level4_pgt) || (cpunumber != cpus)) ++ if (cpunumber != cpus) + break; + cpus++; + +@@ -351,8 +590,8 @@ + i, level4_pgt, data_offset); + } + +- +- if ((i = get_array_length("boot_cpu_stack", NULL, 0))) { ++ if (!LKCD_KERNTYPES() && ++ (i = get_array_length("boot_cpu_stack", NULL, 0))) { + istacksize = i; + } else if ((sp = symbol_search("boot_cpu_stack")) && + (nsp = next_symbol(NULL, sp))) { +@@ -381,8 +620,9 @@ + * the address of &boot_cpu_stack[0]. + */ + sp = value_search(machdep->machspec->stkinfo.ibase[0], &offset); +- if (!sp || offset || !STREQ(sp->name, "boot_cpu_stack")) { +- if (symbol_value("boot_cpu_stack")) { ++ nsp = symbol_search("boot_cpu_stack"); ++ if (!sp || offset || !nsp || (sp->value != nsp->value)) { ++ if (symbol_exists("boot_cpu_stack")) { + error(WARNING, + "cpu 0 IRQ stack: %lx\n boot_cpu_stack: %lx\n\n", + machdep->machspec->stkinfo.ibase[0], +@@ -448,6 +688,13 @@ + if (ms->stkinfo.ebase[c][0] == 0) + break; + } ++ } else if (!symbol_exists("boot_exception_stacks")) { ++ machdep->flags |= NO_TSS; ++ ++ if (CRASHDEBUG(1)) ++ error(NOTE, "CONFIG_X86_NO_TSS\n"); ++ ++ return; + } + + if (ms->stkinfo.ebase[0][0] && ms->stkinfo.ebase[0][1]) +@@ -535,6 +782,10 @@ + if (clues >= 2) + kt->cpu_flags[c] |= NMI; + } ++ ++ if (symbol_exists("__sched_text_start") && ++ (symbol_value("__sched_text_start") == symbol_value("schedule"))) ++ machdep->flags |= SCHED_TEXT; + } + + /* +@@ -576,7 +827,7 @@ + ulong x86_64_VTOP(ulong vaddr) + { + if (vaddr >= __START_KERNEL_map) +- return ((vaddr) - (ulong)__START_KERNEL_map); ++ return ((vaddr) - (ulong)__START_KERNEL_map + machdep->machspec->phys_base); + else + return ((vaddr) - PAGE_OFFSET); + } +@@ -584,12 +835,21 @@ + /* + * Include both vmalloc'd and module address space as VMALLOC space. + */ +-int x86_64_IS_VMALLOC_ADDR(ulong vaddr) ++int ++x86_64_IS_VMALLOC_ADDR(ulong vaddr) + { + return ((vaddr >= VMALLOC_START && vaddr <= VMALLOC_END) || ++ ((machdep->flags & VMEMMAP) && ++ (vaddr >= VMEMMAP_VADDR && vaddr <= VMEMMAP_END)) || + (vaddr >= MODULES_VADDR && vaddr <= MODULES_END)); + } + ++static int ++x86_64_is_module_addr(ulong vaddr) ++{ ++ return (vaddr >= MODULES_VADDR && vaddr <= MODULES_END); ++} ++ + /* + * Refining this may cause more problems than just doing it this way. + */ +@@ -616,43 +876,52 @@ + */ + + static int +-x86_64_uvtop(struct task_context *tc, ulong uvaddr, physaddr_t *paddr, int verbose) ++x86_64_uvtop_level4(struct task_context *tc, ulong uvaddr, physaddr_t *paddr, int verbose) + { +- ulong mm; +- ulong *pgd; ++ ulong mm; ++ ulong *pml; ++ ulong pml_paddr; ++ ulong pml_pte; ++ ulong *pgd; + ulong pgd_paddr; + ulong pgd_pte; + ulong *pmd; + ulong pmd_paddr; + ulong pmd_pte; +- ulong *ptep; +- ulong pte_paddr; +- ulong pte; +- physaddr_t physpage; ++ ulong *ptep; ++ ulong pte_paddr; ++ ulong pte; ++ physaddr_t physpage; + +- if (!tc) +- error(FATAL, "current context invalid\n"); ++ if (!tc) ++ error(FATAL, "current context invalid\n"); + +- *paddr = 0; ++ *paddr = 0; + +- if (IS_KVADDR(uvaddr)) +- return x86_64_kvtop(tc, uvaddr, paddr, verbose); ++ if (IS_KVADDR(uvaddr)) ++ return x86_64_kvtop(tc, uvaddr, paddr, verbose); + +- /* +- * pgd = pgd_offset(mm, address); +- */ +- if ((mm = task_mm(tc->task, TRUE))) +- pgd = ULONG_PTR(tt->mm_struct + OFFSET(mm_struct_pgd)); +- else +- readmem(tc->mm_struct + OFFSET(mm_struct_pgd), KVADDR, &pgd, +- sizeof(long), "mm_struct pgd", FAULT_ON_ERROR); ++ if ((mm = task_mm(tc->task, TRUE))) ++ pml = ULONG_PTR(tt->mm_struct + OFFSET(mm_struct_pgd)); ++ else ++ readmem(tc->mm_struct + OFFSET(mm_struct_pgd), KVADDR, &pml, ++ sizeof(long), "mm_struct pgd", FAULT_ON_ERROR); + +- pgd_paddr = x86_64_VTOP((ulong)pgd); +- FILL_PGD(pgd_paddr, PHYSADDR, PAGESIZE()); ++ pml_paddr = x86_64_VTOP((ulong)pml); ++ FILL_UPML(pml_paddr, PHYSADDR, PAGESIZE()); ++ pml = ((ulong *)pml_paddr) + pml4_index(uvaddr); ++ pml_pte = ULONG(machdep->machspec->upml + PAGEOFFSET(pml)); ++ if (verbose) ++ fprintf(fp, " PML: %lx => %lx\n", (ulong)pml, pml_pte); ++ if (!(pml_pte & _PAGE_PRESENT)) ++ goto no_upage; ++ ++ pgd_paddr = pml_pte & PHYSICAL_PAGE_MASK; ++ FILL_PGD(pgd_paddr, PHYSADDR, PAGESIZE()); + pgd = ((ulong *)pgd_paddr) + pgd_index(uvaddr); + pgd_pte = ULONG(machdep->pgd + PAGEOFFSET(pgd)); +- if (verbose) +- fprintf(fp, " PGD: %lx => %lx\n", (ulong)pgd, pgd_pte); ++ if (verbose) ++ fprintf(fp, " PUD: %lx => %lx\n", (ulong)pgd, pgd_pte); + if (!(pgd_pte & _PAGE_PRESENT)) + goto no_upage; + +@@ -682,29 +951,31 @@ + + /* + * ptep = pte_offset_map(pmd, address); +- * pte = *ptep; ++ * pte = *ptep; + */ +- pte_paddr = pmd_pte & PHYSICAL_PAGE_MASK; +- FILL_PTBL(pte_paddr, PHYSADDR, PAGESIZE()); +- ptep = ((ulong *)pte_paddr) + pte_index(uvaddr); +- pte = ULONG(machdep->ptbl + PAGEOFFSET(ptep)); +- if (verbose) +- fprintf(fp, " PTE: %lx => %lx\n", (ulong)ptep, pte); +- if (!(pte & (_PAGE_PRESENT))) { +- if (pte && verbose) { +- fprintf(fp, "\n"); +- x86_64_translate_pte(pte, 0, 0); +- } +- goto no_upage; +- } ++ pte_paddr = pmd_pte & PHYSICAL_PAGE_MASK; ++ FILL_PTBL(pte_paddr, PHYSADDR, PAGESIZE()); ++ ptep = ((ulong *)pte_paddr) + pte_index(uvaddr); ++ pte = ULONG(machdep->ptbl + PAGEOFFSET(ptep)); ++ if (verbose) ++ fprintf(fp, " PTE: %lx => %lx\n", (ulong)ptep, pte); ++ if (!(pte & (_PAGE_PRESENT))) { ++ *paddr = pte; ++ ++ if (pte && verbose) { ++ fprintf(fp, "\n"); ++ x86_64_translate_pte(pte, 0, 0); ++ } ++ goto no_upage; ++ } + +- *paddr = (PAGEBASE(pte) & PHYSICAL_PAGE_MASK) + PAGEOFFSET(uvaddr); ++ *paddr = (PAGEBASE(pte) & PHYSICAL_PAGE_MASK) + PAGEOFFSET(uvaddr); + +- if (verbose) { +- fprintf(fp, " PAGE: %lx\n\n", ++ if (verbose) { ++ fprintf(fp, " PAGE: %lx\n\n", + PAGEBASE(*paddr) & PHYSICAL_PAGE_MASK); +- x86_64_translate_pte(pte, 0, 0); +- } ++ x86_64_translate_pte(pte, 0, 0); ++ } + + return TRUE; + +@@ -713,1982 +984,4815 @@ + return FALSE; + } + +- +-/* +- * Translates a kernel virtual address to its physical address. cmd_vtop() +- * sets the verbose flag so that the pte translation gets displayed; all +- * other callers quietly accept the translation. +- */ + static int +-x86_64_kvtop(struct task_context *tc, ulong kvaddr, physaddr_t *paddr, int verbose) ++x86_64_uvtop_level4_xen_wpt(struct task_context *tc, ulong uvaddr, physaddr_t *paddr, int verbose) + { +- ulong *pml4; +- ulong *pgd; ++ ulong mm; ++ ulong *pml; ++ ulong pml_paddr; ++ ulong pml_pte; ++ ulong *pgd; + ulong pgd_paddr; + ulong pgd_pte; + ulong *pmd; + ulong pmd_paddr; + ulong pmd_pte; ++ ulong pseudo_pmd_pte; + ulong *ptep; + ulong pte_paddr; + ulong pte; ++ ulong pseudo_pte; + physaddr_t physpage; ++ char buf[BUFSIZE]; + +- if (!IS_KVADDR(kvaddr)) +- return FALSE; ++ if (!tc) ++ error(FATAL, "current context invalid\n"); + +- if (!vt->vmalloc_start) { +- *paddr = x86_64_VTOP(kvaddr); +- return TRUE; +- } ++ *paddr = 0; + +- if (!IS_VMALLOC_ADDR(kvaddr)) { +- *paddr = x86_64_VTOP(kvaddr); +- if (!verbose) +- return TRUE; +- } +- +- /* +- * pgd = pgd_offset_k(addr); +- */ +- FILL_PML4(); +- pml4 = ((ulong *)machdep->machspec->pml4) + pml4_index(kvaddr); +- if (verbose) { +- fprintf(fp, "PML4 DIRECTORY: %lx\n", vt->kernel_pgd[0]); +- fprintf(fp, "PAGE DIRECTORY: %lx\n", *pml4); +- } +- if (!(*pml4) & _PAGE_PRESENT) +- goto no_kpage; +- pgd_paddr = (*pml4) & PHYSICAL_PAGE_MASK; ++ if (IS_KVADDR(uvaddr)) ++ return x86_64_kvtop(tc, uvaddr, paddr, verbose); ++ ++ if ((mm = task_mm(tc->task, TRUE))) ++ pml = ULONG_PTR(tt->mm_struct + OFFSET(mm_struct_pgd)); ++ else ++ readmem(tc->mm_struct + OFFSET(mm_struct_pgd), KVADDR, &pml, ++ sizeof(long), "mm_struct pgd", FAULT_ON_ERROR); ++ ++ pml_paddr = x86_64_VTOP((ulong)pml); ++ FILL_UPML(pml_paddr, PHYSADDR, PAGESIZE()); ++ pml = ((ulong *)pml_paddr) + pml4_index(uvaddr); ++ pml_pte = ULONG(machdep->machspec->upml + PAGEOFFSET(pml)); ++ if (verbose) ++ fprintf(fp, " PML: %lx => %lx [machine]\n", (ulong)pml, pml_pte); ++ if (!(pml_pte & _PAGE_PRESENT)) ++ goto no_upage; ++ ++ pgd_paddr = pml_pte & PHYSICAL_PAGE_MASK; ++ pgd_paddr = xen_m2p(pgd_paddr); ++ if (verbose) ++ fprintf(fp, " PML: %lx\n", pgd_paddr); + FILL_PGD(pgd_paddr, PHYSADDR, PAGESIZE()); +- pgd = ((ulong *)pgd_paddr) + pgd_index(kvaddr); ++ pgd = ((ulong *)pgd_paddr) + pgd_index(uvaddr); + pgd_pte = ULONG(machdep->pgd + PAGEOFFSET(pgd)); +- if (verbose) +- fprintf(fp, " PGD: %lx => %lx\n", (ulong)pgd, pgd_pte); ++ if (verbose) ++ fprintf(fp, " PUD: %lx => %lx [machine]\n", (ulong)pgd, pgd_pte); + if (!(pgd_pte & _PAGE_PRESENT)) +- goto no_kpage; ++ goto no_upage; + + /* +- * pmd = pmd_offset(pgd, addr); ++ * pmd = pmd_offset(pgd, address); + */ + pmd_paddr = pgd_pte & PHYSICAL_PAGE_MASK; ++ pmd_paddr = xen_m2p(pmd_paddr); ++ if (verbose) ++ fprintf(fp, " PUD: %lx\n", pmd_paddr); + FILL_PMD(pmd_paddr, PHYSADDR, PAGESIZE()); +- pmd = ((ulong *)pmd_paddr) + pmd_index(kvaddr); ++ pmd = ((ulong *)pmd_paddr) + pmd_index(uvaddr); + pmd_pte = ULONG(machdep->pmd + PAGEOFFSET(pmd)); + if (verbose) +- fprintf(fp, " PMD: %lx => %lx\n", (ulong)pmd, pmd_pte); ++ fprintf(fp, " PMD: %lx => %lx [machine]\n", (ulong)pmd, pmd_pte); + if (!(pmd_pte & _PAGE_PRESENT)) +- goto no_kpage; +- if (pmd_pte & _PAGE_PSE) { +- if (verbose) { +- fprintf(fp, " PAGE: %lx (2MB)\n\n", ++ goto no_upage; ++ if (pmd_pte & _PAGE_PSE) { ++ if (verbose) ++ fprintf(fp, " PAGE: %lx (2MB) [machine]\n", + PAGEBASE(pmd_pte) & PHYSICAL_PAGE_MASK); +- x86_64_translate_pte(pmd_pte, 0, 0); ++ ++ pseudo_pmd_pte = xen_m2p(PAGEBASE(pmd_pte)); ++ ++ if (pseudo_pmd_pte == XEN_MACHADDR_NOT_FOUND) { ++ if (verbose) ++ fprintf(fp, " PAGE: page not available\n"); ++ *paddr = PADDR_NOT_AVAILABLE; ++ return FALSE; + } + +- physpage = (PAGEBASE(pmd_pte) & PHYSICAL_PAGE_MASK) + +- (kvaddr & ~_2MB_PAGE_MASK); ++ pseudo_pmd_pte |= PAGEOFFSET(pmd_pte); ++ ++ if (verbose) { ++ fprintf(fp, " PAGE: %s (2MB)\n\n", ++ mkstring(buf, VADDR_PRLEN, RJUST|LONG_HEX, ++ MKSTR(PAGEBASE(pseudo_pmd_pte) & ++ PHYSICAL_PAGE_MASK))); ++ ++ x86_64_translate_pte(pseudo_pmd_pte, 0, 0); ++ } ++ ++ physpage = (PAGEBASE(pseudo_pmd_pte) & PHYSICAL_PAGE_MASK) + ++ (uvaddr & ~_2MB_PAGE_MASK); ++ + *paddr = physpage; + return TRUE; +- } ++ } + +- /* +- * ptep = pte_offset_map(pmd, addr); ++ /* ++ * ptep = pte_offset_map(pmd, address); + * pte = *ptep; + */ + pte_paddr = pmd_pte & PHYSICAL_PAGE_MASK; ++ pte_paddr = xen_m2p(pte_paddr); ++ if (verbose) ++ fprintf(fp, " PMD: %lx\n", pte_paddr); + FILL_PTBL(pte_paddr, PHYSADDR, PAGESIZE()); +- ptep = ((ulong *)pte_paddr) + pte_index(kvaddr); ++ ptep = ((ulong *)pte_paddr) + pte_index(uvaddr); + pte = ULONG(machdep->ptbl + PAGEOFFSET(ptep)); +- if (verbose) +- fprintf(fp, " PTE: %lx => %lx\n", (ulong)ptep, pte); +- if (!(pte & (_PAGE_PRESENT))) { +- if (pte && verbose) { +- fprintf(fp, "\n"); +- x86_64_translate_pte(pte, 0, 0); +- } +- goto no_kpage; +- } +- +- *paddr = (PAGEBASE(pte) & PHYSICAL_PAGE_MASK) + PAGEOFFSET(kvaddr); +- +- if (verbose) { +- fprintf(fp, " PAGE: %lx\n\n", ++ if (verbose) ++ fprintf(fp, " PTE: %lx => %lx [machine]\n", (ulong)ptep, pte); ++ if (!(pte & (_PAGE_PRESENT))) { ++ *paddr = pte; ++ ++ if (pte && verbose) { ++ fprintf(fp, "\n"); ++ x86_64_translate_pte(pte, 0, 0); ++ } ++ goto no_upage; ++ } ++ ++ pseudo_pte = xen_m2p(pte & PHYSICAL_PAGE_MASK); ++ if (verbose) ++ fprintf(fp, " PTE: %lx\n", pseudo_pte + PAGEOFFSET(pte)); ++ ++ *paddr = (PAGEBASE(pseudo_pte) & PHYSICAL_PAGE_MASK) + PAGEOFFSET(uvaddr); ++ ++ if (verbose) { ++ fprintf(fp, " PAGE: %lx [machine]\n", ++ PAGEBASE(pte) & PHYSICAL_PAGE_MASK); ++ fprintf(fp, " PAGE: %lx\n\n", + PAGEBASE(*paddr) & PHYSICAL_PAGE_MASK); +- x86_64_translate_pte(pte, 0, 0); +- } ++ x86_64_translate_pte(pseudo_pte + PAGEOFFSET(pte), 0, 0); ++ } + +- return TRUE; ++ return TRUE; + +-no_kpage: +- return FALSE; +-} ++no_upage: + +-/* +- * Determine where vmalloc'd memory starts. +- */ +-static ulong +-x86_64_vmalloc_start(void) +-{ +- return ((ulong)VMALLOC_START); ++ return FALSE; + } + +-/* +- * thread_info implementation makes for less accurate results here. +- */ + static int +-x86_64_is_task_addr(ulong task) ++x86_64_uvtop_level4_rhel4_xen_wpt(struct task_context *tc, ulong uvaddr, physaddr_t *paddr, int verbose) + { +- if (tt->flags & THREAD_INFO) +- return IS_KVADDR(task); +- else +- return (IS_KVADDR(task) && (ALIGNED_STACK_OFFSET(task) == 0)); +-} +- ++ ulong mm; ++ ulong *pgd; ++ ulong pgd_paddr; ++ ulong pgd_pte; ++ ulong *pmd; ++ ulong pmd_paddr; ++ ulong pmd_pte; ++ ulong pseudo_pmd_pte; ++ ulong *ptep; ++ ulong pte_paddr; ++ ulong pte; ++ ulong pseudo_pte; ++ physaddr_t physpage; ++ char buf[BUFSIZE]; + +-/* +- * easy enough... +- */ +-static ulong +-x86_64_processor_speed(void) +-{ +- unsigned long cpu_khz; ++ if (!tc) ++ error(FATAL, "current context invalid\n"); + +- if (machdep->mhz) +- return (machdep->mhz); ++ *paddr = 0; + +- if (symbol_exists("cpu_khz")) { +- get_symbol_data("cpu_khz", sizeof(long), &cpu_khz); +- if (cpu_khz) +- return(machdep->mhz = cpu_khz/1000); +- } ++ if (IS_KVADDR(uvaddr)) ++ return x86_64_kvtop(tc, uvaddr, paddr, verbose); + +- return 0; +-} ++ if ((mm = task_mm(tc->task, TRUE))) ++ pgd = ULONG_PTR(tt->mm_struct + OFFSET(mm_struct_pgd)); ++ else ++ readmem(tc->mm_struct + OFFSET(mm_struct_pgd), KVADDR, &pgd, ++ sizeof(long), "mm_struct pgd", FAULT_ON_ERROR); + ++ pgd_paddr = x86_64_VTOP((ulong)pgd); ++ FILL_PGD(pgd_paddr, PHYSADDR, PAGESIZE()); ++ pgd = ((ulong *)pgd_paddr) + pgd_index(uvaddr); ++ pgd_pte = ULONG(machdep->pgd + PAGEOFFSET(pgd)); ++ if (verbose) ++ fprintf(fp, " PGD: %lx => %lx [machine]\n", (ulong)pgd, pgd_pte); ++ if (!(pgd_pte & _PAGE_PRESENT)) ++ goto no_upage; + +-/* +- * Accept or reject a symbol from the kernel namelist. +- */ +-static int +-x86_64_verify_symbol(const char *name, ulong value, char type) +-{ +- if (STREQ(name, "_text") || STREQ(name, "_stext")) +- machdep->flags |= KSYMS_START; ++ /* ++ * pmd = pmd_offset(pgd, address); ++ */ ++ pmd_paddr = pgd_pte & PHYSICAL_PAGE_MASK; ++ pmd_paddr = xen_m2p(pmd_paddr); ++ if (verbose) ++ fprintf(fp, " PGD: %lx\n", pmd_paddr); ++ FILL_PMD(pmd_paddr, PHYSADDR, PAGESIZE()); ++ pmd = ((ulong *)pmd_paddr) + pmd_index(uvaddr); ++ pmd_pte = ULONG(machdep->pmd + PAGEOFFSET(pmd)); ++ if (verbose) ++ fprintf(fp, " PMD: %lx => %lx [machine]\n", (ulong)pmd, pmd_pte); ++ if (!(pmd_pte & _PAGE_PRESENT)) ++ goto no_upage; ++ if (pmd_pte & _PAGE_PSE) { ++ if (verbose) ++ fprintf(fp, " PAGE: %lx (2MB) [machine]\n", ++ PAGEBASE(pmd_pte) & PHYSICAL_PAGE_MASK); + +- if (!name || !strlen(name) || !(machdep->flags & KSYMS_START)) +- return FALSE; ++ pseudo_pmd_pte = xen_m2p(PAGEBASE(pmd_pte)); ++ ++ if (pseudo_pmd_pte == XEN_MACHADDR_NOT_FOUND) { ++ if (verbose) ++ fprintf(fp, " PAGE: page not available\n"); ++ *paddr = PADDR_NOT_AVAILABLE; ++ return FALSE; ++ } ++ ++ pseudo_pmd_pte |= PAGEOFFSET(pmd_pte); ++ ++ if (verbose) { ++ fprintf(fp, " PAGE: %s (2MB)\n\n", ++ mkstring(buf, VADDR_PRLEN, RJUST|LONG_HEX, ++ MKSTR(PAGEBASE(pseudo_pmd_pte) & ++ PHYSICAL_PAGE_MASK))); ++ ++ x86_64_translate_pte(pseudo_pmd_pte, 0, 0); ++ } ++ ++ physpage = (PAGEBASE(pseudo_pmd_pte) & PHYSICAL_PAGE_MASK) + ++ (uvaddr & ~_2MB_PAGE_MASK); ++ ++ *paddr = physpage; ++ return TRUE; ++ } ++ ++ /* ++ * ptep = pte_offset_map(pmd, address); ++ * pte = *ptep; ++ */ ++ pte_paddr = pmd_pte & PHYSICAL_PAGE_MASK; ++ pte_paddr = xen_m2p(pte_paddr); ++ if (verbose) ++ fprintf(fp, " PMD: %lx\n", pte_paddr); ++ FILL_PTBL(pte_paddr, PHYSADDR, PAGESIZE()); ++ ptep = ((ulong *)pte_paddr) + pte_index(uvaddr); ++ pte = ULONG(machdep->ptbl + PAGEOFFSET(ptep)); ++ if (verbose) ++ fprintf(fp, " PTE: %lx => %lx [machine]\n", (ulong)ptep, pte); ++ if (!(pte & (_PAGE_PRESENT))) { ++ *paddr = pte; ++ ++ if (pte && verbose) { ++ fprintf(fp, "\n"); ++ x86_64_translate_pte(pte, 0, 0); ++ } ++ goto no_upage; ++ } ++ ++ pseudo_pte = xen_m2p(pte & PHYSICAL_PAGE_MASK); ++ if (verbose) ++ fprintf(fp, " PTE: %lx\n", pseudo_pte + PAGEOFFSET(pte)); ++ ++ *paddr = (PAGEBASE(pseudo_pte) & PHYSICAL_PAGE_MASK) + PAGEOFFSET(uvaddr); ++ ++ if (verbose) { ++ fprintf(fp, " PAGE: %lx [machine]\n", ++ PAGEBASE(pte) & PHYSICAL_PAGE_MASK); ++ fprintf(fp, " PAGE: %lx\n\n", ++ PAGEBASE(*paddr) & PHYSICAL_PAGE_MASK); ++ x86_64_translate_pte(pseudo_pte + PAGEOFFSET(pte), 0, 0); ++ } + + return TRUE; +-} + ++no_upage: + +-/* +- * Get the relevant page directory pointer from a task structure. +- */ +-static ulong +-x86_64_get_task_pgd(ulong task) +-{ +- return (error(FATAL, "x86_64_get_task_pgd: N/A\n")); ++ return FALSE; + } + +- +-/* +- * Translate a PTE, returning TRUE if the page is present. +- * If a physaddr pointer is passed in, don't print anything. +- */ + static int +-x86_64_translate_pte(ulong pte, void *physaddr, ulonglong unused) ++x86_64_uvtop(struct task_context *tc, ulong uvaddr, physaddr_t *paddr, int verbose) + { +- int c, others, len1, len2, len3; +- ulong paddr; +- char buf[BUFSIZE]; +- char buf2[BUFSIZE]; +- char buf3[BUFSIZE]; +- char ptebuf[BUFSIZE]; +- char physbuf[BUFSIZE]; +- char *arglist[MAXARGS]; +- int page_present; ++ ulong mm; ++ ulong *pgd; ++ ulong pgd_paddr; ++ ulong pgd_pte; ++ ulong *pmd; ++ ulong pmd_paddr; ++ ulong pmd_pte; ++ ulong *ptep; ++ ulong pte_paddr; ++ ulong pte; ++ physaddr_t physpage; + +- paddr = pte & PHYSICAL_PAGE_MASK; +- page_present = pte & _PAGE_PRESENT; ++ if (!tc) ++ error(FATAL, "current context invalid\n"); + +- if (physaddr) { +- *((ulong *)physaddr) = paddr; +- return page_present; +- } +- +- sprintf(ptebuf, "%lx", pte); +- len1 = MAX(strlen(ptebuf), strlen("PTE")); +- fprintf(fp, "%s ", mkstring(buf, len1, CENTER|LJUST, "PTE")); ++ *paddr = 0; + +- if (!page_present && pte) { +- swap_location(pte, buf); +- if ((c = parse_line(buf, arglist)) != 3) +- error(FATAL, "cannot determine swap location\n"); ++ if (IS_KVADDR(uvaddr)) ++ return x86_64_kvtop(tc, uvaddr, paddr, verbose); + +- len2 = MAX(strlen(arglist[0]), strlen("SWAP")); +- len3 = MAX(strlen(arglist[2]), strlen("OFFSET")); ++ /* ++ * pgd = pgd_offset(mm, address); ++ */ ++ if ((mm = task_mm(tc->task, TRUE))) ++ pgd = ULONG_PTR(tt->mm_struct + OFFSET(mm_struct_pgd)); ++ else ++ readmem(tc->mm_struct + OFFSET(mm_struct_pgd), KVADDR, &pgd, ++ sizeof(long), "mm_struct pgd", FAULT_ON_ERROR); + +- fprintf(fp, "%s %s\n", +- mkstring(buf2, len2, CENTER|LJUST, "SWAP"), +- mkstring(buf3, len3, CENTER|LJUST, "OFFSET")); ++ pgd_paddr = x86_64_VTOP((ulong)pgd); ++ FILL_PGD(pgd_paddr, PHYSADDR, PAGESIZE()); ++ pgd = ((ulong *)pgd_paddr) + pgd_index(uvaddr); ++ pgd_pte = ULONG(machdep->pgd + PAGEOFFSET(pgd)); ++ if (verbose) ++ fprintf(fp, " PGD: %lx => %lx\n", (ulong)pgd, pgd_pte); ++ if (!(pgd_pte & _PAGE_PRESENT)) ++ goto no_upage; + +- strcpy(buf2, arglist[0]); +- strcpy(buf3, arglist[2]); +- fprintf(fp, "%s %s %s\n", +- mkstring(ptebuf, len1, CENTER|RJUST, NULL), +- mkstring(buf2, len2, CENTER|RJUST, NULL), +- mkstring(buf3, len3, CENTER|RJUST, NULL)); ++ /* ++ * pmd = pmd_offset(pgd, address); ++ */ ++ pmd_paddr = pgd_pte & PHYSICAL_PAGE_MASK; ++ FILL_PMD(pmd_paddr, PHYSADDR, PAGESIZE()); ++ pmd = ((ulong *)pmd_paddr) + pmd_index(uvaddr); ++ pmd_pte = ULONG(machdep->pmd + PAGEOFFSET(pmd)); ++ if (verbose) ++ fprintf(fp, " PMD: %lx => %lx\n", (ulong)pmd, pmd_pte); ++ if (!(pmd_pte & _PAGE_PRESENT)) ++ goto no_upage; ++ if (pmd_pte & _PAGE_PSE) { ++ if (verbose) { ++ fprintf(fp, " PAGE: %lx (2MB)\n\n", ++ PAGEBASE(pmd_pte) & PHYSICAL_PAGE_MASK); ++ x86_64_translate_pte(pmd_pte, 0, 0); ++ } + +- return page_present; ++ physpage = (PAGEBASE(pmd_pte) & PHYSICAL_PAGE_MASK) + ++ (uvaddr & ~_2MB_PAGE_MASK); ++ *paddr = physpage; ++ return TRUE; + } + +- sprintf(physbuf, "%lx", paddr); +- len2 = MAX(strlen(physbuf), strlen("PHYSICAL")); +- fprintf(fp, "%s ", mkstring(buf, len2, CENTER|LJUST, "PHYSICAL")); ++ /* ++ * ptep = pte_offset_map(pmd, address); ++ * pte = *ptep; ++ */ ++ pte_paddr = pmd_pte & PHYSICAL_PAGE_MASK; ++ FILL_PTBL(pte_paddr, PHYSADDR, PAGESIZE()); ++ ptep = ((ulong *)pte_paddr) + pte_index(uvaddr); ++ pte = ULONG(machdep->ptbl + PAGEOFFSET(ptep)); ++ if (verbose) ++ fprintf(fp, " PTE: %lx => %lx\n", (ulong)ptep, pte); ++ if (!(pte & (_PAGE_PRESENT))) { ++ *paddr = pte; + +- fprintf(fp, "FLAGS\n"); ++ if (pte && verbose) { ++ fprintf(fp, "\n"); ++ x86_64_translate_pte(pte, 0, 0); ++ } ++ goto no_upage; ++ } + +- fprintf(fp, "%s %s ", +- mkstring(ptebuf, len1, CENTER|RJUST, NULL), +- mkstring(physbuf, len2, CENTER|RJUST, NULL)); +- fprintf(fp, "("); +- others = 0; ++ *paddr = (PAGEBASE(pte) & PHYSICAL_PAGE_MASK) + PAGEOFFSET(uvaddr); + +- if (pte) { +- if (pte & _PAGE_PRESENT) +- fprintf(fp, "%sPRESENT", others++ ? "|" : ""); +- if (pte & _PAGE_RW) +- fprintf(fp, "%sRW", others++ ? "|" : ""); +- if (pte & _PAGE_USER) +- fprintf(fp, "%sUSER", others++ ? "|" : ""); +- if (pte & _PAGE_PWT) +- fprintf(fp, "%sPWT", others++ ? "|" : ""); +- if (pte & _PAGE_PCD) +- fprintf(fp, "%sPCD", others++ ? "|" : ""); +- if (pte & _PAGE_ACCESSED) +- fprintf(fp, "%sACCESSED", others++ ? "|" : ""); +- if (pte & _PAGE_DIRTY) +- fprintf(fp, "%sDIRTY", others++ ? "|" : ""); +- if ((pte & _PAGE_PSE) && (pte & _PAGE_PRESENT)) +- fprintf(fp, "%sPSE", others++ ? "|" : ""); +- if ((pte & _PAGE_PROTNONE) && !(pte & _PAGE_PRESENT)) +- fprintf(fp, "%sPROTNONE", others++ ? "|" : ""); +- if (pte & _PAGE_GLOBAL) +- fprintf(fp, "%sGLOBAL", others++ ? "|" : ""); +- if (pte & _PAGE_NX) +- fprintf(fp, "%sNX", others++ ? "|" : ""); +- } else { +- fprintf(fp, "no mapping"); ++ if (verbose) { ++ fprintf(fp, " PAGE: %lx\n\n", ++ PAGEBASE(*paddr) & PHYSICAL_PAGE_MASK); ++ x86_64_translate_pte(pte, 0, 0); + } + +- fprintf(fp, ")\n"); ++ return TRUE; + +- return (page_present); ++no_upage: ++ ++ return FALSE; + } + +-static char * +-x86_64_exception_stacks[7] = { +- "STACKFAULT", +- "DOUBLEFAULT", +- "NMI", +- "DEBUG", +- "MCE", +- "(unknown)", +- "(unknown)" +-}; + + /* +- * Look for likely exception frames in a stack. ++ * Translates a kernel virtual address to its physical address. cmd_vtop() ++ * sets the verbose flag so that the pte translation gets displayed; all ++ * other callers quietly accept the translation. + */ +-static int +-x86_64_eframe_search(struct bt_info *bt) ++static int ++x86_64_kvtop(struct task_context *tc, ulong kvaddr, physaddr_t *paddr, int verbose) + { +- int i, c, cnt; +- ulong estack, irqstack, stacksize; +- ulong *up; +- struct machine_specific *ms; +- struct bt_info bt_local; ++ ulong *pml4; ++ ulong *pgd; ++ ulong pgd_paddr; ++ ulong pgd_pte; ++ ulong *pmd; ++ ulong pmd_paddr; ++ ulong pmd_pte; ++ ulong *ptep; ++ ulong pte_paddr; ++ ulong pte; ++ physaddr_t physpage; + +- if (bt->flags & BT_EFRAME_SEARCH2) { +- BCOPY(bt, &bt_local, sizeof(struct bt_info)); +- bt->flags &= ~(ulonglong)BT_EFRAME_SEARCH2; ++ if (!IS_KVADDR(kvaddr)) ++ return FALSE; + +- ms = machdep->machspec; ++ if (XEN_HYPER_MODE()) { ++ if (DIRECTMAP_VIRT_ADDR(kvaddr)) { ++ *paddr = kvaddr - DIRECTMAP_VIRT_START; ++ return TRUE; ++ } ++ FILL_PML4_HYPER(); ++ pml4 = ((ulong *)machdep->machspec->pml4) + pml4_index(kvaddr); ++ if (verbose) { ++ fprintf(fp, "PML4 DIRECTORY: %lx\n", vt->kernel_pgd[0]); ++ fprintf(fp, "PAGE DIRECTORY: %lx\n", *pml4); ++ } ++ } else { ++ if (!vt->vmalloc_start) { ++ *paddr = x86_64_VTOP(kvaddr); ++ return TRUE; ++ } + +- for (c = 0; c < kt->cpus; c++) { +- if (ms->stkinfo.ibase[c] == 0) +- break; +- bt->hp->esp = ms->stkinfo.ibase[c]; +- fprintf(fp, "CPU %d IRQ STACK:\n", c); +- if ((cnt = x86_64_eframe_search(bt))) +- fprintf(fp, "\n"); +- else +- fprintf(fp, "(none found)\n\n"); ++ if (!IS_VMALLOC_ADDR(kvaddr)) { ++ *paddr = x86_64_VTOP(kvaddr); ++ if (!verbose) ++ return TRUE; ++ } ++ ++ if (XEN() && (kt->xen_flags & WRITABLE_PAGE_TABLES)) ++ return (x86_64_kvtop_xen_wpt(tc, kvaddr, paddr, verbose)); ++ ++ /* ++ * pgd = pgd_offset_k(addr); ++ */ ++ FILL_PML4(); ++ pml4 = ((ulong *)machdep->machspec->pml4) + pml4_index(kvaddr); ++ if (verbose) { ++ fprintf(fp, "PML4 DIRECTORY: %lx\n", vt->kernel_pgd[0]); ++ fprintf(fp, "PAGE DIRECTORY: %lx\n", *pml4); ++ } ++ } ++ if (!(*pml4) & _PAGE_PRESENT) ++ goto no_kpage; ++ pgd_paddr = (*pml4) & PHYSICAL_PAGE_MASK; ++ FILL_PGD(pgd_paddr, PHYSADDR, PAGESIZE()); ++ pgd = ((ulong *)pgd_paddr) + pgd_index(kvaddr); ++ pgd_pte = ULONG(machdep->pgd + PAGEOFFSET(pgd)); ++ if (verbose) ++ fprintf(fp, " PUD: %lx => %lx\n", (ulong)pgd, pgd_pte); ++ if (!(pgd_pte & _PAGE_PRESENT)) ++ goto no_kpage; ++ ++ /* ++ * pmd = pmd_offset(pgd, addr); ++ */ ++ pmd_paddr = pgd_pte & PHYSICAL_PAGE_MASK; ++ FILL_PMD(pmd_paddr, PHYSADDR, PAGESIZE()); ++ pmd = ((ulong *)pmd_paddr) + pmd_index(kvaddr); ++ pmd_pte = ULONG(machdep->pmd + PAGEOFFSET(pmd)); ++ if (verbose) ++ fprintf(fp, " PMD: %lx => %lx\n", (ulong)pmd, pmd_pte); ++ if (!(pmd_pte & _PAGE_PRESENT)) ++ goto no_kpage; ++ if (pmd_pte & _PAGE_PSE) { ++ if (verbose) { ++ fprintf(fp, " PAGE: %lx (2MB)\n\n", ++ PAGEBASE(pmd_pte) & PHYSICAL_PAGE_MASK); ++ x86_64_translate_pte(pmd_pte, 0, 0); + } + +- for (c = 0; c < kt->cpus; c++) { +- for (i = 0; i < 7; i++) { +- if (ms->stkinfo.ebase[c][i] == 0) +- break; +- bt->hp->esp = ms->stkinfo.ebase[c][i]; +- fprintf(fp, "CPU %d %s EXCEPTION STACK:\n", +- c, x86_64_exception_stacks[i]); +- if ((cnt = x86_64_eframe_search(bt))) +- fprintf(fp, "\n"); +- else +- fprintf(fp, "(none found)\n\n"); +- } +- } ++ physpage = (PAGEBASE(pmd_pte) & PHYSICAL_PAGE_MASK) + ++ (kvaddr & ~_2MB_PAGE_MASK); ++ *paddr = physpage; ++ return TRUE; ++ } + +- return 0; ++ /* ++ * ptep = pte_offset_map(pmd, addr); ++ * pte = *ptep; ++ */ ++ pte_paddr = pmd_pte & PHYSICAL_PAGE_MASK; ++ FILL_PTBL(pte_paddr, PHYSADDR, PAGESIZE()); ++ ptep = ((ulong *)pte_paddr) + pte_index(kvaddr); ++ pte = ULONG(machdep->ptbl + PAGEOFFSET(ptep)); ++ if (verbose) ++ fprintf(fp, " PTE: %lx => %lx\n", (ulong)ptep, pte); ++ if (!(pte & (_PAGE_PRESENT))) { ++ if (pte && verbose) { ++ fprintf(fp, "\n"); ++ x86_64_translate_pte(pte, 0, 0); ++ } ++ goto no_kpage; + } + +- if (bt->hp && bt->hp->esp) { +- ms = machdep->machspec; +- bt->stkptr = bt->hp->esp; +- if ((estack = x86_64_in_exception_stack(bt))) { +- stacksize = ms->stkinfo.esize; +- bt->stackbase = estack; +- bt->stacktop = estack + ms->stkinfo.esize; +- bt->stackbuf = ms->irqstack; +- alter_stackbuf(bt); +- } else if ((irqstack = x86_64_in_irqstack(bt))) { +- stacksize = ms->stkinfo.isize; +- bt->stackbase = irqstack; +- bt->stacktop = irqstack + ms->stkinfo.isize; +- bt->stackbuf = ms->irqstack; +- alter_stackbuf(bt); +- } else if (!INSTACK(bt->stkptr, bt)) +- error(FATAL, +- "unrecognized stack address for this task: %lx\n", +- bt->hp->esp); +- } ++ *paddr = (PAGEBASE(pte) & PHYSICAL_PAGE_MASK) + PAGEOFFSET(kvaddr); + +- stacksize = bt->stacktop - bt->stackbase - SIZE(pt_regs); ++ if (verbose) { ++ fprintf(fp, " PAGE: %lx\n\n", ++ PAGEBASE(*paddr) & PHYSICAL_PAGE_MASK); ++ x86_64_translate_pte(pte, 0, 0); ++ } + +- if (bt->stkptr) +- i = (bt->stkptr - bt->stackbase)/sizeof(ulong); +- else +- i = 0; ++ return TRUE; + +- for (cnt = 0; i <= stacksize/sizeof(ulong); i++) { +- up = (ulong *)(&bt->stackbuf[i*sizeof(ulong)]); ++no_kpage: ++ return FALSE; ++} + +- if (x86_64_exception_frame(EFRAME_SEARCH|EFRAME_PRINT| +- EFRAME_VERIFY, 0, (char *)up, bt, fp)) +- cnt++; ++ ++static int ++x86_64_kvtop_xen_wpt(struct task_context *tc, ulong kvaddr, physaddr_t *paddr, int verbose) ++{ ++ ulong *pml4; ++ ulong *pgd; ++ ulong pgd_paddr; ++ ulong pgd_pte; ++ ulong *pmd; ++ ulong pmd_paddr; ++ ulong pmd_pte; ++ ulong pseudo_pmd_pte; ++ ulong *ptep; ++ ulong pte_paddr; ++ ulong pte; ++ ulong pseudo_pte; ++ physaddr_t physpage; ++ char buf[BUFSIZE]; ++ ++ /* ++ * pgd = pgd_offset_k(addr); ++ */ ++ FILL_PML4(); ++ pml4 = ((ulong *)machdep->machspec->pml4) + pml4_index(kvaddr); ++ if (verbose) { ++ fprintf(fp, "PML4 DIRECTORY: %lx\n", vt->kernel_pgd[0]); ++ fprintf(fp, "PAGE DIRECTORY: %lx [machine]\n", *pml4); + } ++ if (!(*pml4) & _PAGE_PRESENT) ++ goto no_kpage; ++ pgd_paddr = (*pml4) & PHYSICAL_PAGE_MASK; ++ pgd_paddr = xen_m2p(pgd_paddr); ++ if (verbose) ++ fprintf(fp, "PAGE DIRECTORY: %lx\n", pgd_paddr); ++ FILL_PGD(pgd_paddr, PHYSADDR, PAGESIZE()); ++ pgd = ((ulong *)pgd_paddr) + pgd_index(kvaddr); ++ pgd_pte = ULONG(machdep->pgd + PAGEOFFSET(pgd)); ++ if (verbose) ++ fprintf(fp, " PUD: %lx => %lx [machine]\n", (ulong)pgd, pgd_pte); ++ if (!(pgd_pte & _PAGE_PRESENT)) ++ goto no_kpage; + +- return cnt; +-} ++ /* ++ * pmd = pmd_offset(pgd, addr); ++ */ ++ pmd_paddr = pgd_pte & PHYSICAL_PAGE_MASK; ++ pmd_paddr = xen_m2p(pmd_paddr); ++ if (verbose) ++ fprintf(fp, " PUD: %lx\n", pmd_paddr); ++ FILL_PMD(pmd_paddr, PHYSADDR, PAGESIZE()); ++ pmd = ((ulong *)pmd_paddr) + pmd_index(kvaddr); ++ pmd_pte = ULONG(machdep->pmd + PAGEOFFSET(pmd)); ++ if (verbose) ++ fprintf(fp, " PMD: %lx => %lx [machine]\n", (ulong)pmd, pmd_pte); ++ if (!(pmd_pte & _PAGE_PRESENT)) ++ goto no_kpage; ++ if (pmd_pte & _PAGE_PSE) { ++ if (verbose) ++ fprintf(fp, " PAGE: %lx (2MB) [machine]\n", ++ PAGEBASE(pmd_pte) & PHYSICAL_PAGE_MASK); + +-static void +-x86_64_display_full_frame(struct bt_info *bt, ulong rsp, FILE *ofp) +-{ +- int i, u_idx; +- ulong *up; +- ulong words, addr; ++ pseudo_pmd_pte = xen_m2p(PAGEBASE(pmd_pte)); + +- words = (rsp - bt->frameptr) / sizeof(ulong) + 1; ++ if (pseudo_pmd_pte == XEN_MACHADDR_NOT_FOUND) { ++ if (verbose) ++ fprintf(fp, " PAGE: page not available\n"); ++ *paddr = PADDR_NOT_AVAILABLE; ++ return FALSE; ++ } + +- addr = bt->frameptr; +- u_idx = (bt->frameptr - bt->stackbase)/sizeof(ulong); +- for (i = 0; i < words; i++, u_idx++) { +- if (!(i & 1)) +- fprintf(ofp, "%s %lx: ", i ? "\n" : "", addr); +- +- up = (ulong *)(&bt->stackbuf[u_idx*sizeof(ulong)]); +- fprintf(ofp, "%016lx ", *up); +- addr += sizeof(ulong); ++ pseudo_pmd_pte |= PAGEOFFSET(pmd_pte); ++ ++ if (verbose) { ++ fprintf(fp, " PAGE: %s (2MB)\n\n", ++ mkstring(buf, VADDR_PRLEN, RJUST|LONG_HEX, ++ MKSTR(PAGEBASE(pseudo_pmd_pte) & ++ PHYSICAL_PAGE_MASK))); ++ ++ x86_64_translate_pte(pseudo_pmd_pte, 0, 0); ++ } ++ ++ physpage = (PAGEBASE(pseudo_pmd_pte) & PHYSICAL_PAGE_MASK) + ++ (kvaddr & ~_2MB_PAGE_MASK); ++ ++ *paddr = physpage; ++ return TRUE; + } +- fprintf(ofp, "\n"); +-} + +-/* +- * Check a frame for a requested reference. +- */ +-static void +-x86_64_do_bt_reference_check(struct bt_info *bt, ulong text, char *name) +-{ +- struct syment *sp; +- ulong offset; ++ /* ++ * ptep = pte_offset_map(pmd, addr); ++ * pte = *ptep; ++ */ ++ pte_paddr = pmd_pte & PHYSICAL_PAGE_MASK; ++ pte_paddr = xen_m2p(pte_paddr); ++ if (verbose) ++ fprintf(fp, " PMD: %lx\n", pte_paddr); ++ FILL_PTBL(pte_paddr, PHYSADDR, PAGESIZE()); ++ ptep = ((ulong *)pte_paddr) + pte_index(kvaddr); ++ pte = ULONG(machdep->ptbl + PAGEOFFSET(ptep)); ++ if (verbose) ++ fprintf(fp, " PTE: %lx => %lx [machine]\n", (ulong)ptep, pte); ++ if (!(pte & (_PAGE_PRESENT))) { ++ if (pte && verbose) { ++ fprintf(fp, "\n"); ++ x86_64_translate_pte(pte, 0, 0); ++ } ++ goto no_kpage; ++ } + +- if (!name) +- sp = value_search(text, &offset); +- else if (!text) +- sp = symbol_search(name); ++ pseudo_pte = xen_m2p(pte & PHYSICAL_PAGE_MASK); ++ if (verbose) ++ fprintf(fp, " PTE: %lx\n", pseudo_pte + PAGEOFFSET(pte)); + +- switch (bt->ref->cmdflags & (BT_REF_SYMBOL|BT_REF_HEXVAL)) +- { +- case BT_REF_SYMBOL: +- if (name) { +- if (STREQ(name, bt->ref->str)) +- bt->ref->cmdflags |= BT_REF_FOUND; +- } else { +- if (sp && !offset && STREQ(sp->name, bt->ref->str)) +- bt->ref->cmdflags |= BT_REF_FOUND; +- } +- break; ++ *paddr = (PAGEBASE(pseudo_pte) & PHYSICAL_PAGE_MASK) + PAGEOFFSET(kvaddr); + +- case BT_REF_HEXVAL: +- if (text) { +- if (bt->ref->hexval == text) +- bt->ref->cmdflags |= BT_REF_FOUND; +- } else if (sp && (bt->ref->hexval == sp->value)) +- bt->ref->cmdflags |= BT_REF_FOUND; +- else if (!name && !text && (bt->ref->hexval == 0)) +- bt->ref->cmdflags |= BT_REF_FOUND; +- break; ++ if (verbose) { ++ fprintf(fp, " PAGE: %lx [machine]\n", ++ PAGEBASE(pte) & PHYSICAL_PAGE_MASK); ++ fprintf(fp, " PAGE: %lx\n\n", ++ PAGEBASE(*paddr) & PHYSICAL_PAGE_MASK); ++ x86_64_translate_pte(pseudo_pte + PAGEOFFSET(pte), 0, 0); + } ++ ++ return TRUE; ++ ++no_kpage: ++ return FALSE; + } + ++ + /* +- * print one entry of a stack trace ++ * Determine where vmalloc'd memory starts. + */ +-#define BACKTRACE_COMPLETE (1) +-#define BACKTRACE_ENTRY_IGNORED (2) +-#define BACKTRACE_ENTRY_DISPLAYED (3) +-#define BACKTRACE_ENTRY_AND_EFRAME_DISPLAYED (4) +- +-static int +-x86_64_print_stack_entry(struct bt_info *bt, FILE *ofp, int level, +- int stkindex, ulong text) ++static ulong ++x86_64_vmalloc_start(void) + { +- ulong rsp, offset; +- struct syment *sp; +- char *name; +- int result; +- long eframe_check; +- char buf[BUFSIZE]; +- +- eframe_check = -1; +- offset = 0; +- sp = value_search(text, &offset); +- if (!sp) +- return BACKTRACE_ENTRY_IGNORED; +- +- name = sp->name; +- +- if (bt->flags & BT_TEXT_SYMBOLS) { +- if (bt->flags & BT_EXCEPTION_FRAME) +- rsp = bt->stkptr; +- else +- rsp = bt->stackbase + (stkindex * sizeof(long)); +- fprintf(ofp, " [%s] %s at %lx\n", +- mkstring(buf, VADDR_PRLEN, RJUST|LONG_HEX, MKSTR(rsp)), +- name, text); +- if (BT_REFERENCE_CHECK(bt)) +- x86_64_do_bt_reference_check(bt, text, name); +- return BACKTRACE_ENTRY_DISPLAYED; +- } +- +- if (!offset && !(bt->flags & BT_EXCEPTION_FRAME) && +- !(bt->flags & BT_START)) { +- if (STREQ(name, "child_rip")) { +- if (symbol_exists("kernel_thread")) +- name = "kernel_thread"; +- else if (symbol_exists("arch_kernel_thread")) +- name = "arch_kernel_thread"; +- } +- else if (!(bt->flags & BT_SCHEDULE)) { +- if (STREQ(name, "error_exit")) +- eframe_check = 8; +- else { +- if (CRASHDEBUG(2)) +- fprintf(ofp, +- "< ignoring text symbol with no offset: %s() >\n", +- sp->name); +- return BACKTRACE_ENTRY_IGNORED; +- } +- } +- } +- +- if (bt->flags & BT_SCHEDULE) +- name = "schedule"; +- +- if (STREQ(name, "child_rip")) { +- if (symbol_exists("kernel_thread")) +- name = "kernel_thread"; +- else if (symbol_exists("arch_kernel_thread")) +- name = "arch_kernel_thread"; +- result = BACKTRACE_COMPLETE; +- } else if (STREQ(name, "cpu_idle")) +- result = BACKTRACE_COMPLETE; +- else +- result = BACKTRACE_ENTRY_DISPLAYED; +- +- if (bt->flags & BT_EXCEPTION_FRAME) +- rsp = bt->stkptr; +- else if (bt->flags & BT_START) +- rsp = bt->stkptr; +- else +- rsp = bt->stackbase + (stkindex * sizeof(long)); +- +- if ((bt->flags & BT_FULL)) { +- if (bt->frameptr) +- x86_64_display_full_frame(bt, rsp, ofp); +- bt->frameptr = rsp + sizeof(ulong); +- } +- +- fprintf(ofp, "%s#%d [%8lx] %s at %lx\n", level < 10 ? " " : "", level, +- rsp, name, text); +- +- if (bt->flags & BT_LINE_NUMBERS) { +- get_line_number(text, buf, FALSE); +- if (strlen(buf)) +- fprintf(ofp, " %s\n", buf); +- } +- +- if (eframe_check >= 0) { +- if (x86_64_exception_frame(EFRAME_PRINT|EFRAME_VERIFY, +- bt->stackbase + (stkindex*sizeof(long)) + eframe_check, +- NULL, bt, ofp)) +- result = BACKTRACE_ENTRY_AND_EFRAME_DISPLAYED; +- } +- +- if (BT_REFERENCE_CHECK(bt)) +- x86_64_do_bt_reference_check(bt, text, name); +- +- bt->call_target = name; +- +- if (is_direct_call_target(bt)) { +- if (CRASHDEBUG(2)) +- fprintf(ofp, "< enable BT_CHECK_CALLER for %s >\n", +- bt->call_target); +- bt->flags |= BT_CHECK_CALLER; +- } else { +- if (CRASHDEBUG(2) && (bt->flags & BT_CHECK_CALLER)) +- fprintf(ofp, "< disable BT_CHECK_CALLER for %s >\n", +- bt->call_target); +- if (bt->flags & BT_CHECK_CALLER) { +- if (CRASHDEBUG(2)) +- fprintf(ofp, "< set BT_NO_CHECK_CALLER >\n"); +- bt->flags |= BT_NO_CHECK_CALLER; +- } +- bt->flags &= ~(ulonglong)BT_CHECK_CALLER; +- } +- +- return result; ++ return ((ulong)VMALLOC_START); + } + + /* +- * Unroll a kernel stack. ++ * thread_info implementation makes for less accurate results here. + */ +-static void +-x86_64_back_trace_cmd(struct bt_info *bt) ++static int ++x86_64_is_task_addr(ulong task) + { +- error(FATAL, "x86_64_back_trace_cmd: TBD\n"); ++ if (tt->flags & THREAD_INFO) ++ return IS_KVADDR(task); ++ else ++ return (IS_KVADDR(task) && (ALIGNED_STACK_OFFSET(task) == 0)); + } + + +- + /* +- * Determine whether the initial stack pointer is located in one of the +- * exception stacks. ++ * easy enough... + */ + static ulong +-x86_64_in_exception_stack(struct bt_info *bt) ++x86_64_processor_speed(void) + { +- int c, i; +- ulong rsp; +- ulong estack; +- struct machine_specific *ms; ++ unsigned long cpu_khz = 0; + +- rsp = bt->stkptr; +- ms = machdep->machspec; +- estack = 0; ++ if (machdep->mhz) ++ return (machdep->mhz); + +- for (c = 0; !estack && (c < kt->cpus); c++) { +- for (i = 0; i < 7; i++) { +- if (ms->stkinfo.ebase[c][i] == 0) +- break; +- if ((rsp >= ms->stkinfo.ebase[c][i]) && +- (rsp < (ms->stkinfo.ebase[c][i] + +- ms->stkinfo.esize))) { +- estack = ms->stkinfo.ebase[c][i]; +- if (c != bt->tc->processor) +- error(INFO, +- "task cpu: %d exception stack cpu: %d\n", +- bt->tc->processor, c); +- break; +- } +- } ++ if (symbol_exists("cpu_khz")) { ++ get_symbol_data("cpu_khz", sizeof(int), &cpu_khz); ++ if (cpu_khz) ++ return(machdep->mhz = cpu_khz/1000); + } + +- return estack; ++ return 0; + } + ++ + /* +- * Determine whether the current stack pointer is in a cpu's irqstack. ++ * Accept or reject a symbol from the kernel namelist. + */ +-static ulong +-x86_64_in_irqstack(struct bt_info *bt) ++static int ++x86_64_verify_symbol(const char *name, ulong value, char type) + { +- int c; +- ulong rsp; +- ulong irqstack; +- struct machine_specific *ms; ++ if (STREQ(name, "_text") || STREQ(name, "_stext")) ++ machdep->flags |= KSYMS_START; + +- rsp = bt->stkptr; +- ms = machdep->machspec; +- irqstack = 0; ++ if (!name || !strlen(name) || !(machdep->flags & KSYMS_START)) ++ return FALSE; ++ return TRUE; ++} + +- for (c = 0; !irqstack && (c < kt->cpus); c++) { +- if (ms->stkinfo.ibase[c] == 0) +- break; +- if ((rsp >= ms->stkinfo.ibase[c]) && +- (rsp < (ms->stkinfo.ibase[c] + ms->stkinfo.isize))) { +- irqstack = ms->stkinfo.ibase[c]; +- if (c != bt->tc->processor) +- error(INFO, +- "task cpu: %d IRQ stack cpu: %d\n", +- bt->tc->processor, c); +- break; +- } +- } + +- return irqstack; ++/* ++ * Get the relevant page directory pointer from a task structure. ++ */ ++static ulong ++x86_64_get_task_pgd(ulong task) ++{ ++ return (error(FATAL, "x86_64_get_task_pgd: N/A\n")); + } + +-#define STACK_TRANSITION_ERRMSG_E_I_P \ +-"cannot transition from exception stack to IRQ stack to current process stack:\n exception stack pointer: %lx\n IRQ stack pointer: %lx\n process stack pointer: %lx\n current stack base: %lx\n" +-#define STACK_TRANSITION_ERRMSG_E_P \ +-"cannot transition from exception stack to current process stack:\n exception stack pointer: %lx\n process stack pointer: %lx\n current_stack_base: %lx\n" +-#define STACK_TRANSITION_ERRMSG_I_P \ +-"cannot transition from IRQ stack to current process stack:\n IRQ stack pointer: %lx\n process stack pointer: %lx\n current stack base: %lx" + + /* +- * Low-budget back tracer -- dump text return addresses, following call chain +- * when possible, along with any verifiable exception frames. ++ * Translate a PTE, returning TRUE if the page is present. ++ * If a physaddr pointer is passed in, don't print anything. + */ +-static void +-x86_64_low_budget_back_trace_cmd(struct bt_info *bt_in) ++static int ++x86_64_translate_pte(ulong pte, void *physaddr, ulonglong unused) + { +- int i, level, done; +- ulong rsp, offset, stacktop; +- ulong *up; +- long cs; +- struct syment *sp, *spt; +- FILE *ofp; +- ulong estack, irqstack; +- ulong irq_eframe; +- struct bt_info bt_local, *bt; +- struct machine_specific *ms; +- ulong last_process_stack_eframe; +- ulong user_mode_eframe; ++ int c, others, len1, len2, len3; ++ ulong paddr; ++ char buf[BUFSIZE]; ++ char buf2[BUFSIZE]; ++ char buf3[BUFSIZE]; ++ char ptebuf[BUFSIZE]; ++ char physbuf[BUFSIZE]; ++ char *arglist[MAXARGS]; ++ int page_present; + +- bt = &bt_local; +- BCOPY(bt_in, bt, sizeof(struct bt_info)); ++ paddr = pte & PHYSICAL_PAGE_MASK; ++ page_present = pte & _PAGE_PRESENT; + +- level = 0; +- done = FALSE; +- irq_eframe = 0; +- last_process_stack_eframe = 0; +- bt->call_target = NULL; +- rsp = bt->stkptr; +- if (!rsp) { +- error(INFO, "cannot determine starting stack pointer\n"); +- return; ++ if (physaddr) { ++ *((ulong *)physaddr) = paddr; ++ return page_present; + } +- ms = machdep->machspec; +- if (BT_REFERENCE_CHECK(bt)) +- ofp = pc->nullfp; +- else +- ofp = fp; ++ ++ sprintf(ptebuf, "%lx", pte); ++ len1 = MAX(strlen(ptebuf), strlen("PTE")); ++ fprintf(fp, "%s ", mkstring(buf, len1, CENTER|LJUST, "PTE")); + +- if (bt->flags & BT_TEXT_SYMBOLS) { +- fprintf(ofp, "%sSTART: %s%s at %lx\n", +- space(VADDR_PRLEN > 8 ? 14 : 6), +- closest_symbol(bt->instptr), +- STREQ(closest_symbol(bt->instptr), "thread_return") ? +- " (schedule)" : "", +- bt->instptr); +- } else if (bt->flags & BT_START) { +- x86_64_print_stack_entry(bt, ofp, level, +- 0, bt->instptr); +- bt->flags &= ~BT_START; +- level++; +- } ++ if (!page_present && pte) { ++ swap_location(pte, buf); ++ if ((c = parse_line(buf, arglist)) != 3) ++ error(FATAL, "cannot determine swap location\n"); + ++ len2 = MAX(strlen(arglist[0]), strlen("SWAP")); ++ len3 = MAX(strlen(arglist[2]), strlen("OFFSET")); + +- if ((estack = x86_64_in_exception_stack(bt))) { +-in_exception_stack: +- bt->flags |= BT_EXCEPTION_STACK; +- /* +- * The stack buffer will have been loaded with the process +- * stack, so switch to the indicated exception stack. +- */ +- bt->stackbase = estack; +- bt->stacktop = estack + ms->stkinfo.esize; +- bt->stackbuf = ms->irqstack; ++ fprintf(fp, "%s %s\n", ++ mkstring(buf2, len2, CENTER|LJUST, "SWAP"), ++ mkstring(buf3, len3, CENTER|LJUST, "OFFSET")); + +- if (!readmem(bt->stackbase, KVADDR, bt->stackbuf, +- bt->stacktop - bt->stackbase, +- bt->hp && (bt->hp->esp == bt->stkptr) ? +- "irqstack contents via hook" : "irqstack contents", +- RETURN_ON_ERROR)) +- error(FATAL, "read of exception stack at %lx failed\n", +- bt->stackbase); ++ strcpy(buf2, arglist[0]); ++ strcpy(buf3, arglist[2]); ++ fprintf(fp, "%s %s %s\n", ++ mkstring(ptebuf, len1, CENTER|RJUST, NULL), ++ mkstring(buf2, len2, CENTER|RJUST, NULL), ++ mkstring(buf3, len3, CENTER|RJUST, NULL)); + +- /* +- * If irq_eframe is set, we've jumped back here from the +- * IRQ stack dump below. Do basically the same thing as if +- * had come from the processor stack, but presume that we +- * must have been in kernel mode, i.e., took an exception +- * while operating on an IRQ stack. (untested) +- */ +- if (irq_eframe) { +- bt->flags |= BT_EXCEPTION_FRAME; +- i = (irq_eframe - bt->stackbase)/sizeof(ulong); +- x86_64_print_stack_entry(bt, ofp, level, i, +- bt->instptr); +- bt->flags &= ~(ulonglong)BT_EXCEPTION_FRAME; +- cs = x86_64_exception_frame(EFRAME_PRINT|EFRAME_CS, 0, +- bt->stackbuf + (irq_eframe - bt->stackbase), +- bt, ofp); +- rsp += SIZE(pt_regs); /* guaranteed kernel mode */ +- level++; +- irq_eframe = 0; +- } ++ return page_present; ++ } + +- stacktop = bt->stacktop - SIZE(pt_regs); ++ sprintf(physbuf, "%lx", paddr); ++ len2 = MAX(strlen(physbuf), strlen("PHYSICAL")); ++ fprintf(fp, "%s ", mkstring(buf, len2, CENTER|LJUST, "PHYSICAL")); + +- for (i = (rsp - bt->stackbase)/sizeof(ulong); +- !done && (rsp < stacktop); i++, rsp += sizeof(ulong)) { ++ fprintf(fp, "FLAGS\n"); + +- up = (ulong *)(&bt->stackbuf[i*sizeof(ulong)]); ++ fprintf(fp, "%s %s ", ++ mkstring(ptebuf, len1, CENTER|RJUST, NULL), ++ mkstring(physbuf, len2, CENTER|RJUST, NULL)); ++ fprintf(fp, "("); ++ others = 0; + +- if (!is_kernel_text(*up)) +- continue; ++ if (pte) { ++ if (pte & _PAGE_PRESENT) ++ fprintf(fp, "%sPRESENT", others++ ? "|" : ""); ++ if (pte & _PAGE_RW) ++ fprintf(fp, "%sRW", others++ ? "|" : ""); ++ if (pte & _PAGE_USER) ++ fprintf(fp, "%sUSER", others++ ? "|" : ""); ++ if (pte & _PAGE_PWT) ++ fprintf(fp, "%sPWT", others++ ? "|" : ""); ++ if (pte & _PAGE_PCD) ++ fprintf(fp, "%sPCD", others++ ? "|" : ""); ++ if (pte & _PAGE_ACCESSED) ++ fprintf(fp, "%sACCESSED", others++ ? "|" : ""); ++ if (pte & _PAGE_DIRTY) ++ fprintf(fp, "%sDIRTY", others++ ? "|" : ""); ++ if ((pte & _PAGE_PSE) && (pte & _PAGE_PRESENT)) ++ fprintf(fp, "%sPSE", others++ ? "|" : ""); ++ if ((pte & _PAGE_PROTNONE) && !(pte & _PAGE_PRESENT)) ++ fprintf(fp, "%sPROTNONE", others++ ? "|" : ""); ++ if (pte & _PAGE_GLOBAL) ++ fprintf(fp, "%sGLOBAL", others++ ? "|" : ""); ++ if (pte & _PAGE_NX) ++ fprintf(fp, "%sNX", others++ ? "|" : ""); ++ } else { ++ fprintf(fp, "no mapping"); ++ } + +- switch (x86_64_print_stack_entry(bt, ofp, level, i,*up)) +- { +- case BACKTRACE_ENTRY_AND_EFRAME_DISPLAYED: +- rsp += SIZE(pt_regs); +- i += SIZE(pt_regs)/sizeof(ulong); +- case BACKTRACE_ENTRY_DISPLAYED: +- level++; +- break; +- case BACKTRACE_ENTRY_IGNORED: +- break; +- case BACKTRACE_COMPLETE: +- done = TRUE; +- break; +- } +- } ++ fprintf(fp, ")\n"); + +- cs = x86_64_exception_frame(EFRAME_PRINT|EFRAME_CS, 0, +- bt->stackbuf + (bt->stacktop - bt->stackbase) - +- SIZE(pt_regs), bt, ofp); ++ return (page_present); ++} + +- if (!BT_REFERENCE_CHECK(bt)) +- fprintf(fp, "--- ---\n"); ++static char * ++x86_64_exception_stacks[7] = { ++ "STACKFAULT", ++ "DOUBLEFAULT", ++ "NMI", ++ "DEBUG", ++ "MCE", ++ "(unknown)", ++ "(unknown)" ++}; + +- /* +- * stack = (unsigned long *) estack_end[-2]; +- */ +- up = (ulong *)(&bt->stackbuf[bt->stacktop - bt->stackbase]); +- up -= 2; +- rsp = bt->stkptr = *up; +- up -= 3; +- bt->instptr = *up; +- if (cs & 3) +- done = TRUE; /* user-mode exception */ +- else +- done = FALSE; /* kernel-mode exception */ +- bt->frameptr = 0; ++/* ++ * Look for likely exception frames in a stack. ++ */ ++static int ++x86_64_eframe_search(struct bt_info *bt) ++{ ++ int i, c, cnt; ++ ulong estack, irqstack, stacksize; ++ ulong *up; ++ struct machine_specific *ms; ++ struct bt_info bt_local; + +- /* +- * Print the return values from the estack end. +- */ +- if (!done) { +- bt->flags |= BT_START; +- x86_64_print_stack_entry(bt, ofp, level, +- 0, bt->instptr); +- bt->flags &= ~BT_START; +- level++; +- } +- } ++ if (bt->flags & BT_EFRAME_SEARCH2) { ++ BCOPY(bt, &bt_local, sizeof(struct bt_info)); ++ bt->flags &= ~(ulonglong)BT_EFRAME_SEARCH2; + +- /* +- * IRQ stack entry always comes in via the process stack, regardless +- * whether it happened while running in user or kernel space. +- */ +- if (!done && (irqstack = x86_64_in_irqstack(bt))) { +- bt->flags |= BT_IRQSTACK; +- /* +- * Until coded otherwise, the stackbase will be pointing to +- * either the exception stack or, more likely, the process +- * stack base. Switch it to the IRQ stack. +- */ +- bt->stackbase = irqstack; +- bt->stacktop = irqstack + ms->stkinfo.isize; +- bt->stackbuf = ms->irqstack; ++ ms = machdep->machspec; + +- if (!readmem(bt->stackbase, KVADDR, +- bt->stackbuf, bt->stacktop - bt->stackbase, +- bt->hp && (bt->hp->esp == bt_in->stkptr) ? +- "irqstack contents via hook" : "irqstack contents", +- RETURN_ON_ERROR)) +- error(FATAL, "read of IRQ stack at %lx failed\n", +- bt->stackbase); ++ for (c = 0; c < kt->cpus; c++) { ++ if (ms->stkinfo.ibase[c] == 0) ++ break; ++ bt->hp->esp = ms->stkinfo.ibase[c]; ++ fprintf(fp, "CPU %d IRQ STACK:\n", c); ++ if ((cnt = x86_64_eframe_search(bt))) ++ fprintf(fp, "\n"); ++ else ++ fprintf(fp, "(none found)\n\n"); ++ } + +- stacktop = bt->stacktop - 64; /* from kernel code */ ++ for (c = 0; c < kt->cpus; c++) { ++ for (i = 0; i < 7; i++) { ++ if (ms->stkinfo.ebase[c][i] == 0) ++ break; ++ bt->hp->esp = ms->stkinfo.ebase[c][i]; ++ fprintf(fp, "CPU %d %s EXCEPTION STACK:\n", ++ c, x86_64_exception_stacks[i]); ++ if ((cnt = x86_64_eframe_search(bt))) ++ fprintf(fp, "\n"); ++ else ++ fprintf(fp, "(none found)\n\n"); ++ } ++ } + +- for (i = (rsp - bt->stackbase)/sizeof(ulong); +- !done && (rsp < stacktop); i++, rsp += sizeof(ulong)) { ++ return 0; ++ } + +- up = (ulong *)(&bt->stackbuf[i*sizeof(ulong)]); ++ if (bt->hp && bt->hp->esp) { ++ ms = machdep->machspec; ++ bt->stkptr = bt->hp->esp; ++ if ((estack = x86_64_in_exception_stack(bt))) { ++ stacksize = ms->stkinfo.esize; ++ bt->stackbase = estack; ++ bt->stacktop = estack + ms->stkinfo.esize; ++ bt->stackbuf = ms->irqstack; ++ alter_stackbuf(bt); ++ } else if ((irqstack = x86_64_in_irqstack(bt))) { ++ stacksize = ms->stkinfo.isize; ++ bt->stackbase = irqstack; ++ bt->stacktop = irqstack + ms->stkinfo.isize; ++ bt->stackbuf = ms->irqstack; ++ alter_stackbuf(bt); ++ } else if (!INSTACK(bt->stkptr, bt)) ++ error(FATAL, ++ "unrecognized stack address for this task: %lx\n", ++ bt->hp->esp); ++ } + +- if (!is_kernel_text(*up)) +- continue; ++ stacksize = bt->stacktop - bt->stackbase - SIZE(pt_regs); + +- switch (x86_64_print_stack_entry(bt, ofp, level, i,*up)) +- { +- case BACKTRACE_ENTRY_AND_EFRAME_DISPLAYED: +- rsp += SIZE(pt_regs); +- i += SIZE(pt_regs)/sizeof(ulong); +- case BACKTRACE_ENTRY_DISPLAYED: +- level++; +- break; +- case BACKTRACE_ENTRY_IGNORED: +- break; +- case BACKTRACE_COMPLETE: +- done = TRUE; +- break; ++ if (bt->stkptr) ++ i = (bt->stkptr - bt->stackbase)/sizeof(ulong); ++ else ++ i = 0; ++ ++ for (cnt = 0; i <= stacksize/sizeof(ulong); i++) { ++ up = (ulong *)(&bt->stackbuf[i*sizeof(ulong)]); ++ ++ if (x86_64_exception_frame(EFRAME_SEARCH|EFRAME_PRINT| ++ EFRAME_VERIFY, 0, (char *)up, bt, fp)) ++ cnt++; ++ } ++ ++ return cnt; ++} ++ ++static void ++x86_64_display_full_frame(struct bt_info *bt, ulong rsp, FILE *ofp) ++{ ++ int i, u_idx; ++ ulong *up; ++ ulong words, addr; ++ ++ if (rsp < bt->frameptr) ++ return; ++ ++ words = (rsp - bt->frameptr) / sizeof(ulong) + 1; ++ ++ addr = bt->frameptr; ++ u_idx = (bt->frameptr - bt->stackbase)/sizeof(ulong); ++ for (i = 0; i < words; i++, u_idx++) { ++ if (!(i & 1)) ++ fprintf(ofp, "%s %lx: ", i ? "\n" : "", addr); ++ ++ up = (ulong *)(&bt->stackbuf[u_idx*sizeof(ulong)]); ++ fprintf(ofp, "%016lx ", *up); ++ addr += sizeof(ulong); ++ } ++ fprintf(ofp, "\n"); ++} ++ ++/* ++ * Check a frame for a requested reference. ++ */ ++static void ++x86_64_do_bt_reference_check(struct bt_info *bt, ulong text, char *name) ++{ ++ struct syment *sp; ++ ulong offset; ++ ++ if (!name) ++ sp = value_search(text, &offset); ++ else if (!text) ++ sp = symbol_search(name); ++ ++ switch (bt->ref->cmdflags & (BT_REF_SYMBOL|BT_REF_HEXVAL)) ++ { ++ case BT_REF_SYMBOL: ++ if (name) { ++ if (STREQ(name, bt->ref->str)) ++ bt->ref->cmdflags |= BT_REF_FOUND; ++ } else { ++ if (sp && !offset && STREQ(sp->name, bt->ref->str)) ++ bt->ref->cmdflags |= BT_REF_FOUND; ++ } ++ break; ++ ++ case BT_REF_HEXVAL: ++ if (text) { ++ if (bt->ref->hexval == text) ++ bt->ref->cmdflags |= BT_REF_FOUND; ++ } else if (sp && (bt->ref->hexval == sp->value)) ++ bt->ref->cmdflags |= BT_REF_FOUND; ++ else if (!name && !text && (bt->ref->hexval == 0)) ++ bt->ref->cmdflags |= BT_REF_FOUND; ++ break; ++ } ++} ++ ++/* ++ * Determine the function containing a .text.lock. reference. ++ */ ++static ulong ++text_lock_function(char *name, struct bt_info *bt, ulong locktext) ++{ ++ int c, reterror, instr, arg; ++ char buf[BUFSIZE]; ++ char *arglist[MAXARGS]; ++ char *p1; ++ ulong locking_func; ++ ++ instr = arg = -1; ++ locking_func = 0; ++ ++ open_tmpfile2(); ++ ++ if (STREQ(name, ".text.lock.spinlock")) ++ sprintf(buf, "x/4i 0x%lx", locktext); ++ else ++ sprintf(buf, "x/1i 0x%lx", locktext); ++ ++ if (!gdb_pass_through(buf, pc->tmpfile2, GNU_RETURN_ON_ERROR)) { ++ close_tmpfile2(); ++ bt->flags |= BT_FRAMESIZE_DISABLE; ++ return 0; ++ } ++ ++ rewind(pc->tmpfile2); ++ while (fgets(buf, BUFSIZE, pc->tmpfile2)) { ++ c = parse_line(buf, arglist); ++ ++ if (instr == -1) { ++ /* ++ * Check whether are ++ * in the output string. ++ */ ++ if (LASTCHAR(arglist[0]) == ':') { ++ instr = 1; ++ arg = 2; ++ } else { ++ instr = 2; ++ arg = 3; + } + } + +- if (!BT_REFERENCE_CHECK(bt)) +- fprintf(fp, "--- ---\n"); ++ if (c < (arg+1)) ++ break; + +- /* +- * stack = (unsigned long *) (irqstack_end[-1]); +- * (where irqstack_end is 64 bytes below page end) +- */ +- up = (ulong *)(&bt->stackbuf[stacktop - bt->stackbase]); +- up -= 1; +- irq_eframe = rsp = bt->stkptr = *up; +- up -= 1; +- bt->instptr = *up; +- bt->frameptr = 0; +- done = FALSE; +- } else +- irq_eframe = 0; ++ if (STREQ(arglist[instr], "jmpq") || STREQ(arglist[instr], "jmp")) { ++ p1 = arglist[arg]; ++ reterror = 0; ++ locking_func = htol(p1, RETURN_ON_ERROR, &reterror); ++ if (reterror) ++ locking_func = 0; ++ break; ++ } ++ } ++ close_tmpfile2(); + +- if (!done && (estack = x86_64_in_exception_stack(bt))) +- goto in_exception_stack; ++ if (!locking_func) ++ bt->flags |= BT_FRAMESIZE_DISABLE; + +- if (!done && (bt->flags & (BT_EXCEPTION_STACK|BT_IRQSTACK))) { +- /* +- * Verify that the rsp pointer taken from either the +- * exception or IRQ stack points into the process stack. +- */ +- bt->stackbase = GET_STACKBASE(bt->tc->task); +- bt->stacktop = GET_STACKTOP(bt->tc->task); ++ return locking_func; + +- if (!INSTACK(rsp, bt)) { +- switch (bt->flags & (BT_EXCEPTION_STACK|BT_IRQSTACK)) +- { +- case (BT_EXCEPTION_STACK|BT_IRQSTACK): +- error(FATAL, STACK_TRANSITION_ERRMSG_E_I_P, +- bt_in->stkptr, bt->stkptr, rsp, +- bt->stackbase); ++} + +- case BT_EXCEPTION_STACK: +- error(FATAL, STACK_TRANSITION_ERRMSG_E_P, +- bt_in->stkptr, rsp, bt->stackbase); + +- case BT_IRQSTACK: +- error(FATAL, STACK_TRANSITION_ERRMSG_I_P, +- bt_in->stkptr, rsp, bt->stackbase); ++/* ++ * print one entry of a stack trace ++ */ ++#define BACKTRACE_COMPLETE (1) ++#define BACKTRACE_ENTRY_IGNORED (2) ++#define BACKTRACE_ENTRY_DISPLAYED (3) ++#define BACKTRACE_ENTRY_AND_EFRAME_DISPLAYED (4) ++ ++static int ++x86_64_print_stack_entry(struct bt_info *bt, FILE *ofp, int level, ++ int stkindex, ulong text) ++{ ++ ulong rsp, offset, locking_func; ++ struct syment *sp, *spl; ++ char *name; ++ int result; ++ long eframe_check; ++ char buf[BUFSIZE]; ++ ++ eframe_check = -1; ++ offset = 0; ++ sp = value_search(text, &offset); ++ if (!sp) ++ return BACKTRACE_ENTRY_IGNORED; ++ ++ name = sp->name; ++ ++ if (bt->flags & BT_TEXT_SYMBOLS) { ++ if (bt->flags & BT_EXCEPTION_FRAME) ++ rsp = bt->stkptr; ++ else ++ rsp = bt->stackbase + (stkindex * sizeof(long)); ++ fprintf(ofp, " [%s] %s at %lx\n", ++ mkstring(buf, VADDR_PRLEN, RJUST|LONG_HEX, MKSTR(rsp)), ++ name, text); ++ if (BT_REFERENCE_CHECK(bt)) ++ x86_64_do_bt_reference_check(bt, text, name); ++ return BACKTRACE_ENTRY_DISPLAYED; ++ } ++ ++ if (!offset && !(bt->flags & BT_EXCEPTION_FRAME) && ++ !(bt->flags & BT_START)) { ++ if (STREQ(name, "child_rip")) { ++ if (symbol_exists("kernel_thread")) ++ name = "kernel_thread"; ++ else if (symbol_exists("arch_kernel_thread")) ++ name = "arch_kernel_thread"; ++ } ++ else if (!(bt->flags & BT_SCHEDULE)) { ++ if (STREQ(name, "error_exit")) ++ eframe_check = 8; ++ else { ++ if (CRASHDEBUG(2)) ++ fprintf(ofp, ++ "< ignoring text symbol with no offset: %s() >\n", ++ sp->name); ++ return BACKTRACE_ENTRY_IGNORED; + } + } ++ } + +- /* +- * Now fill the local stack buffer from the process stack. +- */ +- if (!readmem(bt->stackbase, KVADDR, bt->stackbuf, +- bt->stacktop - bt->stackbase, +- "irqstack contents", RETURN_ON_ERROR)) +- error(FATAL, "read of process stack at %lx failed\n", +- bt->stackbase); ++ if (bt->flags & BT_SCHEDULE) ++ name = "schedule"; ++ ++ if (STREQ(name, "child_rip")) { ++ if (symbol_exists("kernel_thread")) ++ name = "kernel_thread"; ++ else if (symbol_exists("arch_kernel_thread")) ++ name = "arch_kernel_thread"; ++ result = BACKTRACE_COMPLETE; ++ } else if (STREQ(name, "cpu_idle")) ++ result = BACKTRACE_COMPLETE; ++ else ++ result = BACKTRACE_ENTRY_DISPLAYED; ++ ++ if (bt->flags & BT_EXCEPTION_FRAME) ++ rsp = bt->stkptr; ++ else if (bt->flags & BT_START) ++ rsp = bt->stkptr; ++ else ++ rsp = bt->stackbase + (stkindex * sizeof(long)); ++ ++ if ((bt->flags & BT_FULL)) { ++ if (bt->frameptr) ++ x86_64_display_full_frame(bt, rsp, ofp); ++ bt->frameptr = rsp + sizeof(ulong); + } + +- /* +- * For a normally blocked task, hand-create the first level. +- */ +- if (!done && +- !(bt->flags & (BT_TEXT_SYMBOLS|BT_EXCEPTION_STACK|BT_IRQSTACK)) && +- STREQ(closest_symbol(bt->instptr), "thread_return")) { +- bt->flags |= BT_SCHEDULE; +- i = (rsp - bt->stackbase)/sizeof(ulong); +- x86_64_print_stack_entry(bt, ofp, level, +- i, bt->instptr); +- bt->flags &= ~(ulonglong)BT_SCHEDULE; +- rsp += sizeof(ulong); +- level++; ++ fprintf(ofp, "%s#%d [%8lx] %s at %lx", level < 10 ? " " : "", level, ++ rsp, name, text); ++ ++ if (STREQ(name, "tracesys")) ++ fprintf(ofp, " (via system_call)"); ++ else if (STRNEQ(name, ".text.lock.")) { ++ if ((locking_func = text_lock_function(name, bt, text)) && ++ (spl = value_search(locking_func, &offset))) ++ fprintf(ofp, " (via %s)", spl->name); + } + +- /* +- * Dump the IRQ exception frame from the process stack. +- * If the CS register indicates a user exception frame, +- * then set done to TRUE to avoid the process stack walk-through. ++ if (bt->flags & BT_FRAMESIZE_DISABLE) ++ fprintf(ofp, " *"); ++ ++ fprintf(ofp, "\n"); ++ ++ if (bt->flags & BT_LINE_NUMBERS) { ++ get_line_number(text, buf, FALSE); ++ if (strlen(buf)) ++ fprintf(ofp, " %s\n", buf); ++ } ++ ++ if (eframe_check >= 0) { ++ if (x86_64_exception_frame(EFRAME_PRINT|EFRAME_VERIFY, ++ bt->stackbase + (stkindex*sizeof(long)) + eframe_check, ++ NULL, bt, ofp)) ++ result = BACKTRACE_ENTRY_AND_EFRAME_DISPLAYED; ++ } ++ ++ if (BT_REFERENCE_CHECK(bt)) ++ x86_64_do_bt_reference_check(bt, text, name); ++ ++ bt->call_target = name; ++ ++ if (is_direct_call_target(bt)) { ++ if (CRASHDEBUG(2)) ++ fprintf(ofp, "< enable BT_CHECK_CALLER for %s >\n", ++ bt->call_target); ++ bt->flags |= BT_CHECK_CALLER; ++ } else { ++ if (CRASHDEBUG(2) && (bt->flags & BT_CHECK_CALLER)) ++ fprintf(ofp, "< disable BT_CHECK_CALLER for %s >\n", ++ bt->call_target); ++ if (bt->flags & BT_CHECK_CALLER) { ++ if (CRASHDEBUG(2)) ++ fprintf(ofp, "< set BT_NO_CHECK_CALLER >\n"); ++ bt->flags |= BT_NO_CHECK_CALLER; ++ } ++ bt->flags &= ~(ulonglong)BT_CHECK_CALLER; ++ } ++ ++ return result; ++} ++ ++/* ++ * Unroll a kernel stack. ++ */ ++static void ++x86_64_back_trace_cmd(struct bt_info *bt) ++{ ++ error(FATAL, "x86_64_back_trace_cmd: TBD\n"); ++} ++ ++ ++ ++/* ++ * Determine whether the initial stack pointer is located in one of the ++ * exception stacks. ++ */ ++static ulong ++x86_64_in_exception_stack(struct bt_info *bt) ++{ ++ int c, i; ++ ulong rsp; ++ ulong estack; ++ struct machine_specific *ms; ++ ++ rsp = bt->stkptr; ++ ms = machdep->machspec; ++ estack = 0; ++ ++ for (c = 0; !estack && (c < kt->cpus); c++) { ++ for (i = 0; i < 7; i++) { ++ if (ms->stkinfo.ebase[c][i] == 0) ++ break; ++ if ((rsp >= ms->stkinfo.ebase[c][i]) && ++ (rsp < (ms->stkinfo.ebase[c][i] + ++ ms->stkinfo.esize))) { ++ estack = ms->stkinfo.ebase[c][i]; ++ if (CRASHDEBUG(1) && (c != bt->tc->processor)) ++ error(INFO, ++ "task cpu: %d exception stack cpu: %d\n", ++ bt->tc->processor, c); ++ break; ++ } ++ } ++ } ++ ++ return estack; ++} ++ ++/* ++ * Determine whether the current stack pointer is in a cpu's irqstack. ++ */ ++static ulong ++x86_64_in_irqstack(struct bt_info *bt) ++{ ++ int c; ++ ulong rsp; ++ ulong irqstack; ++ struct machine_specific *ms; ++ ++ rsp = bt->stkptr; ++ ms = machdep->machspec; ++ irqstack = 0; ++ ++ for (c = 0; !irqstack && (c < kt->cpus); c++) { ++ if (ms->stkinfo.ibase[c] == 0) ++ break; ++ if ((rsp >= ms->stkinfo.ibase[c]) && ++ (rsp < (ms->stkinfo.ibase[c] + ms->stkinfo.isize))) { ++ irqstack = ms->stkinfo.ibase[c]; ++ if (CRASHDEBUG(1) && (c != bt->tc->processor)) ++ error(INFO, ++ "task cpu: %d IRQ stack cpu: %d\n", ++ bt->tc->processor, c); ++ break; ++ } ++ } ++ ++ return irqstack; ++} ++ ++#define STACK_TRANSITION_ERRMSG_E_I_P \ ++"cannot transition from exception stack to IRQ stack to current process stack:\n exception stack pointer: %lx\n IRQ stack pointer: %lx\n process stack pointer: %lx\n current stack base: %lx\n" ++#define STACK_TRANSITION_ERRMSG_E_P \ ++"cannot transition from exception stack to current process stack:\n exception stack pointer: %lx\n process stack pointer: %lx\n current_stack_base: %lx\n" ++#define STACK_TRANSITION_ERRMSG_I_P \ ++"cannot transition from IRQ stack to current process stack:\n IRQ stack pointer: %lx\n process stack pointer: %lx\n current stack base: %lx" ++ ++/* ++ * Low-budget back tracer -- dump text return addresses, following call chain ++ * when possible, along with any verifiable exception frames. ++ */ ++static void ++x86_64_low_budget_back_trace_cmd(struct bt_info *bt_in) ++{ ++ int i, level, done, framesize; ++ ulong rsp, offset, stacktop; ++ ulong *up; ++ long cs; ++ struct syment *sp, *spt; ++ FILE *ofp; ++ ulong estack, irqstack; ++ ulong irq_eframe; ++ struct bt_info bt_local, *bt; ++ struct machine_specific *ms; ++ ulong last_process_stack_eframe; ++ ulong user_mode_eframe; ++ ++ /* ++ * User may have made a run-time switch. ++ */ ++ if (kt->flags & DWARF_UNWIND) { ++ machdep->back_trace = x86_64_dwarf_back_trace_cmd; ++ x86_64_dwarf_back_trace_cmd(bt_in); ++ return; ++ } ++ ++ bt = &bt_local; ++ BCOPY(bt_in, bt, sizeof(struct bt_info)); ++ ++ if (bt->flags & BT_FRAMESIZE_DEBUG) { ++ x86_64_framesize_debug(bt); ++ return; ++ } ++ ++ level = 0; ++ done = FALSE; ++ irq_eframe = 0; ++ last_process_stack_eframe = 0; ++ bt->call_target = NULL; ++ rsp = bt->stkptr; ++ if (!rsp) { ++ error(INFO, "cannot determine starting stack pointer\n"); ++ return; ++ } ++ ms = machdep->machspec; ++ if (BT_REFERENCE_CHECK(bt)) ++ ofp = pc->nullfp; ++ else ++ ofp = fp; ++ ++ if (bt->flags & BT_TEXT_SYMBOLS) { ++ if (!(bt->flags & BT_TEXT_SYMBOLS_ALL)) ++ fprintf(ofp, "%sSTART: %s%s at %lx\n", ++ space(VADDR_PRLEN > 8 ? 14 : 6), ++ closest_symbol(bt->instptr), ++ STREQ(closest_symbol(bt->instptr), "thread_return") ? ++ " (schedule)" : "", ++ bt->instptr); ++ } else if (bt->flags & BT_START) { ++ x86_64_print_stack_entry(bt, ofp, level, ++ 0, bt->instptr); ++ bt->flags &= ~BT_START; ++ level++; ++ } ++ ++ ++ if ((estack = x86_64_in_exception_stack(bt))) { ++in_exception_stack: ++ bt->flags |= BT_EXCEPTION_STACK; ++ /* ++ * The stack buffer will have been loaded with the process ++ * stack, so switch to the indicated exception stack. ++ */ ++ bt->stackbase = estack; ++ bt->stacktop = estack + ms->stkinfo.esize; ++ bt->stackbuf = ms->irqstack; ++ ++ if (!readmem(bt->stackbase, KVADDR, bt->stackbuf, ++ bt->stacktop - bt->stackbase, ++ bt->hp && (bt->hp->esp == bt->stkptr) ? ++ "irqstack contents via hook" : "irqstack contents", ++ RETURN_ON_ERROR)) ++ error(FATAL, "read of exception stack at %lx failed\n", ++ bt->stackbase); ++ ++ /* ++ * If irq_eframe is set, we've jumped back here from the ++ * IRQ stack dump below. Do basically the same thing as if ++ * had come from the processor stack, but presume that we ++ * must have been in kernel mode, i.e., took an exception ++ * while operating on an IRQ stack. (untested) ++ */ ++ if (irq_eframe) { ++ bt->flags |= BT_EXCEPTION_FRAME; ++ i = (irq_eframe - bt->stackbase)/sizeof(ulong); ++ x86_64_print_stack_entry(bt, ofp, level, i, ++ bt->instptr); ++ bt->flags &= ~(ulonglong)BT_EXCEPTION_FRAME; ++ cs = x86_64_exception_frame(EFRAME_PRINT|EFRAME_CS, 0, ++ bt->stackbuf + (irq_eframe - bt->stackbase), ++ bt, ofp); ++ rsp += SIZE(pt_regs); /* guaranteed kernel mode */ ++ level++; ++ irq_eframe = 0; ++ } ++ ++ stacktop = bt->stacktop - SIZE(pt_regs); ++ ++ bt->flags &= ~BT_FRAMESIZE_DISABLE; ++ ++ for (i = (rsp - bt->stackbase)/sizeof(ulong); ++ !done && (rsp < stacktop); i++, rsp += sizeof(ulong)) { ++ ++ up = (ulong *)(&bt->stackbuf[i*sizeof(ulong)]); ++ ++ if (!is_kernel_text(*up)) ++ continue; ++ ++ switch (x86_64_print_stack_entry(bt, ofp, level, i,*up)) ++ { ++ case BACKTRACE_ENTRY_AND_EFRAME_DISPLAYED: ++ rsp += SIZE(pt_regs); ++ i += SIZE(pt_regs)/sizeof(ulong); ++ case BACKTRACE_ENTRY_DISPLAYED: ++ level++; ++ if ((framesize = x86_64_get_framesize(bt, *up)) >= 0) { ++ rsp += framesize; ++ i += framesize/sizeof(ulong); ++ } ++ break; ++ case BACKTRACE_ENTRY_IGNORED: ++ break; ++ case BACKTRACE_COMPLETE: ++ done = TRUE; ++ break; ++ } ++ } ++ ++ cs = x86_64_exception_frame(EFRAME_PRINT|EFRAME_CS, 0, ++ bt->stackbuf + (bt->stacktop - bt->stackbase) - ++ SIZE(pt_regs), bt, ofp); ++ ++ if (!BT_REFERENCE_CHECK(bt)) ++ fprintf(fp, "--- ---\n"); ++ ++ /* ++ * stack = (unsigned long *) estack_end[-2]; ++ */ ++ up = (ulong *)(&bt->stackbuf[bt->stacktop - bt->stackbase]); ++ up -= 2; ++ rsp = bt->stkptr = *up; ++ up -= 3; ++ bt->instptr = *up; ++ if (cs & 3) ++ done = TRUE; /* user-mode exception */ ++ else ++ done = FALSE; /* kernel-mode exception */ ++ bt->frameptr = 0; ++ ++ /* ++ * Print the return values from the estack end. ++ */ ++ if (!done) { ++ bt->flags |= BT_START; ++ x86_64_print_stack_entry(bt, ofp, level, ++ 0, bt->instptr); ++ bt->flags &= ~(BT_START|BT_FRAMESIZE_DISABLE); ++ level++; ++ if ((framesize = x86_64_get_framesize(bt, bt->instptr)) >= 0) ++ rsp += framesize; ++ } ++ } ++ ++ /* ++ * IRQ stack entry always comes in via the process stack, regardless ++ * whether it happened while running in user or kernel space. ++ */ ++ if (!done && (irqstack = x86_64_in_irqstack(bt))) { ++ bt->flags |= BT_IRQSTACK; ++ /* ++ * Until coded otherwise, the stackbase will be pointing to ++ * either the exception stack or, more likely, the process ++ * stack base. Switch it to the IRQ stack. ++ */ ++ bt->stackbase = irqstack; ++ bt->stacktop = irqstack + ms->stkinfo.isize; ++ bt->stackbuf = ms->irqstack; ++ ++ if (!readmem(bt->stackbase, KVADDR, ++ bt->stackbuf, bt->stacktop - bt->stackbase, ++ bt->hp && (bt->hp->esp == bt_in->stkptr) ? ++ "irqstack contents via hook" : "irqstack contents", ++ RETURN_ON_ERROR)) ++ error(FATAL, "read of IRQ stack at %lx failed\n", ++ bt->stackbase); ++ ++ stacktop = bt->stacktop - 64; /* from kernel code */ ++ ++ bt->flags &= ~BT_FRAMESIZE_DISABLE; ++ ++ for (i = (rsp - bt->stackbase)/sizeof(ulong); ++ !done && (rsp < stacktop); i++, rsp += sizeof(ulong)) { ++ ++ up = (ulong *)(&bt->stackbuf[i*sizeof(ulong)]); ++ ++ if (!is_kernel_text(*up)) ++ continue; ++ ++ switch (x86_64_print_stack_entry(bt, ofp, level, i,*up)) ++ { ++ case BACKTRACE_ENTRY_AND_EFRAME_DISPLAYED: ++ rsp += SIZE(pt_regs); ++ i += SIZE(pt_regs)/sizeof(ulong); ++ case BACKTRACE_ENTRY_DISPLAYED: ++ level++; ++ if ((framesize = x86_64_get_framesize(bt, *up)) >= 0) { ++ rsp += framesize; ++ i += framesize/sizeof(ulong); ++ } ++ break; ++ case BACKTRACE_ENTRY_IGNORED: ++ break; ++ case BACKTRACE_COMPLETE: ++ done = TRUE; ++ break; ++ } ++ } ++ ++ if (!BT_REFERENCE_CHECK(bt)) ++ fprintf(fp, "--- ---\n"); ++ ++ /* ++ * stack = (unsigned long *) (irqstack_end[-1]); ++ * (where irqstack_end is 64 bytes below page end) ++ */ ++ up = (ulong *)(&bt->stackbuf[stacktop - bt->stackbase]); ++ up -= 1; ++ irq_eframe = rsp = bt->stkptr = (*up) - ms->irq_eframe_link; ++ up -= 1; ++ bt->instptr = *up; ++ /* ++ * No exception frame when coming from call_softirq. ++ */ ++ if ((sp = value_search(bt->instptr, &offset)) && ++ STREQ(sp->name, "call_softirq")) ++ irq_eframe = 0; ++ bt->frameptr = 0; ++ done = FALSE; ++ } else ++ irq_eframe = 0; ++ ++ if (!done && (estack = x86_64_in_exception_stack(bt))) ++ goto in_exception_stack; ++ ++ if (!done && (bt->flags & (BT_EXCEPTION_STACK|BT_IRQSTACK))) { ++ /* ++ * Verify that the rsp pointer taken from either the ++ * exception or IRQ stack points into the process stack. ++ */ ++ bt->stackbase = GET_STACKBASE(bt->tc->task); ++ bt->stacktop = GET_STACKTOP(bt->tc->task); ++ ++ if (!INSTACK(rsp, bt)) { ++ switch (bt->flags & (BT_EXCEPTION_STACK|BT_IRQSTACK)) ++ { ++ case (BT_EXCEPTION_STACK|BT_IRQSTACK): ++ error(FATAL, STACK_TRANSITION_ERRMSG_E_I_P, ++ bt_in->stkptr, bt->stkptr, rsp, ++ bt->stackbase); ++ ++ case BT_EXCEPTION_STACK: ++ error(FATAL, STACK_TRANSITION_ERRMSG_E_P, ++ bt_in->stkptr, rsp, bt->stackbase); ++ ++ case BT_IRQSTACK: ++ error(FATAL, STACK_TRANSITION_ERRMSG_I_P, ++ bt_in->stkptr, rsp, bt->stackbase); ++ } ++ } ++ ++ /* ++ * Now fill the local stack buffer from the process stack. ++ */ ++ if (!readmem(bt->stackbase, KVADDR, bt->stackbuf, ++ bt->stacktop - bt->stackbase, ++ "irqstack contents", RETURN_ON_ERROR)) ++ error(FATAL, "read of process stack at %lx failed\n", ++ bt->stackbase); ++ } ++ ++ /* ++ * For a normally blocked task, hand-create the first level. ++ */ ++ if (!done && ++ !(bt->flags & (BT_TEXT_SYMBOLS|BT_EXCEPTION_STACK|BT_IRQSTACK)) && ++ STREQ(closest_symbol(bt->instptr), "thread_return")) { ++ bt->flags |= BT_SCHEDULE; ++ i = (rsp - bt->stackbase)/sizeof(ulong); ++ x86_64_print_stack_entry(bt, ofp, level, ++ i, bt->instptr); ++ bt->flags &= ~(ulonglong)BT_SCHEDULE; ++ rsp += sizeof(ulong); ++ level++; ++ } ++ ++ /* ++ * Dump the IRQ exception frame from the process stack. ++ * If the CS register indicates a user exception frame, ++ * then set done to TRUE to avoid the process stack walk-through. ++ * Otherwise, bump up the rsp past the kernel-mode eframe. ++ */ ++ if (irq_eframe) { ++ bt->flags |= BT_EXCEPTION_FRAME; ++ i = (irq_eframe - bt->stackbase)/sizeof(ulong); ++ x86_64_print_stack_entry(bt, ofp, level, i, bt->instptr); ++ bt->flags &= ~(ulonglong)BT_EXCEPTION_FRAME; ++ cs = x86_64_exception_frame(EFRAME_PRINT|EFRAME_CS, 0, ++ bt->stackbuf + (irq_eframe - bt->stackbase), bt, ofp); ++ if (cs & 3) ++ done = TRUE; /* IRQ from user-mode */ ++ else { ++ if (x86_64_print_eframe_location(rsp, level, ofp)) ++ level++; ++ rsp += SIZE(pt_regs); ++ irq_eframe = 0; ++ } ++ level++; ++ } ++ ++ /* ++ * Walk the process stack. ++ */ ++ ++ bt->flags &= ~BT_FRAMESIZE_DISABLE; ++ ++ for (i = (rsp - bt->stackbase)/sizeof(ulong); ++ !done && (rsp < bt->stacktop); i++, rsp += sizeof(ulong)) { ++ ++ up = (ulong *)(&bt->stackbuf[i*sizeof(ulong)]); ++ ++ if (!is_kernel_text(*up)) ++ continue; ++ ++ if ((bt->flags & BT_CHECK_CALLER)) { ++ /* ++ * A non-zero offset value from the value_search() ++ * lets us know if it's a real text return address. ++ */ ++ spt = value_search(*up, &offset); ++ if (!offset && !(bt->flags & BT_FRAMESIZE_DISABLE)) ++ continue; ++ ++ /* ++ * sp gets the syment of the function that the text ++ * routine above called before leaving its return ++ * address on the stack -- if it can be determined. ++ */ ++ sp = x86_64_function_called_by((*up)-5); ++ ++ if (sp == NULL) { ++ /* ++ * We were unable to get the called function. ++ * If the text address had an offset, then ++ * it must have made an indirect call, and ++ * can't have called our target function. ++ */ ++ if (offset) { ++ if (CRASHDEBUG(1)) ++ fprintf(ofp, ++ "< ignoring %s() -- makes indirect call and NOT %s()>\n", ++ spt->name, ++ bt->call_target); ++ continue; ++ } ++ } else if ((machdep->flags & SCHED_TEXT) && ++ STREQ(bt->call_target, "schedule") && ++ STREQ(sp->name, "__sched_text_start")) { ++ ; /* bait and switch */ ++ } else if (!STREQ(sp->name, bt->call_target)) { ++ /* ++ * We got function called by the text routine, ++ * but it's not our target function. ++ */ ++ if (CRASHDEBUG(2)) ++ fprintf(ofp, ++ "< ignoring %s() -- calls %s() and NOT %s()>\n", ++ spt->name, sp->name, ++ bt->call_target); ++ continue; ++ } ++ } ++ ++ switch (x86_64_print_stack_entry(bt, ofp, level, i,*up)) ++ { ++ case BACKTRACE_ENTRY_AND_EFRAME_DISPLAYED: ++ last_process_stack_eframe = rsp + 8; ++ if (x86_64_print_eframe_location(last_process_stack_eframe, level, ofp)) ++ level++; ++ rsp += SIZE(pt_regs); ++ i += SIZE(pt_regs)/sizeof(ulong); ++ case BACKTRACE_ENTRY_DISPLAYED: ++ level++; ++ if ((framesize = x86_64_get_framesize(bt, *up)) >= 0) { ++ rsp += framesize; ++ i += framesize/sizeof(ulong); ++ } ++ break; ++ case BACKTRACE_ENTRY_IGNORED: ++ break; ++ case BACKTRACE_COMPLETE: ++ done = TRUE; ++ break; ++ } ++ } ++ ++ if (!irq_eframe && !is_kernel_thread(bt->tc->task) && ++ (GET_STACKBASE(bt->tc->task) == bt->stackbase)) { ++ user_mode_eframe = bt->stacktop - SIZE(pt_regs); ++ if (last_process_stack_eframe < user_mode_eframe) ++ x86_64_exception_frame(EFRAME_PRINT, 0, bt->stackbuf + ++ (bt->stacktop - bt->stackbase) - SIZE(pt_regs), ++ bt, ofp); ++ } ++ ++ if (bt->flags & BT_TEXT_SYMBOLS) { ++ if (BT_REFERENCE_FOUND(bt)) { ++ print_task_header(fp, task_to_context(bt->task), 0); ++ BCOPY(bt_in, bt, sizeof(struct bt_info)); ++ bt->ref = NULL; ++ machdep->back_trace(bt); ++ fprintf(fp, "\n"); ++ } ++ } ++} ++ ++/* ++ * Use dwarf CFI encodings to correctly follow the call chain. ++ */ ++static void ++x86_64_dwarf_back_trace_cmd(struct bt_info *bt_in) ++{ ++ int i, level, done; ++ ulong rsp, offset, stacktop; ++ ulong *up; ++ long cs; ++ struct syment *sp; ++ FILE *ofp; ++ ulong estack, irqstack; ++ ulong irq_eframe; ++ struct bt_info bt_local, *bt; ++ struct machine_specific *ms; ++ ulong last_process_stack_eframe; ++ ulong user_mode_eframe; ++ ++ /* ++ * User may have made a run-time switch. ++ */ ++ if (!(kt->flags & DWARF_UNWIND)) { ++ machdep->back_trace = x86_64_low_budget_back_trace_cmd; ++ x86_64_low_budget_back_trace_cmd(bt_in); ++ return; ++ } ++ ++ bt = &bt_local; ++ BCOPY(bt_in, bt, sizeof(struct bt_info)); ++ ++ if (bt->flags & BT_FRAMESIZE_DEBUG) { ++ dwarf_debug(bt); ++ return; ++ } ++ ++ level = 0; ++ done = FALSE; ++ irq_eframe = 0; ++ last_process_stack_eframe = 0; ++ bt->call_target = NULL; ++ bt->bptr = 0; ++ rsp = bt->stkptr; ++ if (!rsp) { ++ error(INFO, "cannot determine starting stack pointer\n"); ++ return; ++ } ++ ms = machdep->machspec; ++ if (BT_REFERENCE_CHECK(bt)) ++ ofp = pc->nullfp; ++ else ++ ofp = fp; ++ ++ if (bt->flags & BT_TEXT_SYMBOLS) { ++ if (!(bt->flags & BT_TEXT_SYMBOLS_ALL)) ++ fprintf(ofp, "%sSTART: %s%s at %lx\n", ++ space(VADDR_PRLEN > 8 ? 14 : 6), ++ closest_symbol(bt->instptr), ++ STREQ(closest_symbol(bt->instptr), "thread_return") ? ++ " (schedule)" : "", ++ bt->instptr); ++ } else if (bt->flags & BT_START) { ++ x86_64_print_stack_entry(bt, ofp, level, ++ 0, bt->instptr); ++ bt->flags &= ~BT_START; ++ level++; ++ } ++ ++ ++ if ((estack = x86_64_in_exception_stack(bt))) { ++in_exception_stack: ++ bt->flags |= BT_EXCEPTION_STACK; ++ /* ++ * The stack buffer will have been loaded with the process ++ * stack, so switch to the indicated exception stack. ++ */ ++ bt->stackbase = estack; ++ bt->stacktop = estack + ms->stkinfo.esize; ++ bt->stackbuf = ms->irqstack; ++ ++ if (!readmem(bt->stackbase, KVADDR, bt->stackbuf, ++ bt->stacktop - bt->stackbase, ++ bt->hp && (bt->hp->esp == bt->stkptr) ? ++ "irqstack contents via hook" : "irqstack contents", ++ RETURN_ON_ERROR)) ++ error(FATAL, "read of exception stack at %lx failed\n", ++ bt->stackbase); ++ ++ /* ++ * If irq_eframe is set, we've jumped back here from the ++ * IRQ stack dump below. Do basically the same thing as if ++ * had come from the processor stack, but presume that we ++ * must have been in kernel mode, i.e., took an exception ++ * while operating on an IRQ stack. (untested) ++ */ ++ if (irq_eframe) { ++ bt->flags |= BT_EXCEPTION_FRAME; ++ i = (irq_eframe - bt->stackbase)/sizeof(ulong); ++ x86_64_print_stack_entry(bt, ofp, level, i, ++ bt->instptr); ++ bt->flags &= ~(ulonglong)BT_EXCEPTION_FRAME; ++ cs = x86_64_exception_frame(EFRAME_PRINT|EFRAME_CS, 0, ++ bt->stackbuf + (irq_eframe - bt->stackbase), ++ bt, ofp); ++ rsp += SIZE(pt_regs); /* guaranteed kernel mode */ ++ level++; ++ irq_eframe = 0; ++ } ++ ++ stacktop = bt->stacktop - SIZE(pt_regs); ++ ++ if (!done) { ++ level = dwarf_backtrace(bt, level, stacktop); ++ done = TRUE; ++ } ++ ++ cs = x86_64_exception_frame(EFRAME_PRINT|EFRAME_CS, 0, ++ bt->stackbuf + (bt->stacktop - bt->stackbase) - ++ SIZE(pt_regs), bt, ofp); ++ ++ if (!BT_REFERENCE_CHECK(bt)) ++ fprintf(fp, "--- ---\n"); ++ ++ /* ++ * stack = (unsigned long *) estack_end[-2]; ++ */ ++ up = (ulong *)(&bt->stackbuf[bt->stacktop - bt->stackbase]); ++ up -= 2; ++ rsp = bt->stkptr = *up; ++ up -= 3; ++ bt->instptr = *up; ++ if (cs & 3) ++ done = TRUE; /* user-mode exception */ ++ else ++ done = FALSE; /* kernel-mode exception */ ++ bt->frameptr = 0; ++ ++ /* ++ * Print the return values from the estack end. ++ */ ++ if (!done) { ++ bt->flags |= BT_START; ++ x86_64_print_stack_entry(bt, ofp, level, ++ 0, bt->instptr); ++ bt->flags &= ~BT_START; ++ level++; ++ } ++ } ++ ++ /* ++ * IRQ stack entry always comes in via the process stack, regardless ++ * whether it happened while running in user or kernel space. ++ */ ++ if (!done && (irqstack = x86_64_in_irqstack(bt))) { ++ bt->flags |= BT_IRQSTACK; ++ /* ++ * Until coded otherwise, the stackbase will be pointing to ++ * either the exception stack or, more likely, the process ++ * stack base. Switch it to the IRQ stack. ++ */ ++ bt->stackbase = irqstack; ++ bt->stacktop = irqstack + ms->stkinfo.isize; ++ bt->stackbuf = ms->irqstack; ++ ++ if (!readmem(bt->stackbase, KVADDR, ++ bt->stackbuf, bt->stacktop - bt->stackbase, ++ bt->hp && (bt->hp->esp == bt_in->stkptr) ? ++ "irqstack contents via hook" : "irqstack contents", ++ RETURN_ON_ERROR)) ++ error(FATAL, "read of IRQ stack at %lx failed\n", ++ bt->stackbase); ++ ++ stacktop = bt->stacktop - 64; /* from kernel code */ ++ ++ if (!done) { ++ level = dwarf_backtrace(bt, level, stacktop); ++ done = TRUE; ++ } ++ ++ if (!BT_REFERENCE_CHECK(bt)) ++ fprintf(fp, "--- ---\n"); ++ ++ /* ++ * stack = (unsigned long *) (irqstack_end[-1]); ++ * (where irqstack_end is 64 bytes below page end) ++ */ ++ up = (ulong *)(&bt->stackbuf[stacktop - bt->stackbase]); ++ up -= 1; ++ irq_eframe = rsp = bt->stkptr = (*up) - ms->irq_eframe_link; ++ up -= 1; ++ bt->instptr = *up; ++ /* ++ * No exception frame when coming from call_softirq. ++ */ ++ if ((sp = value_search(bt->instptr, &offset)) && ++ STREQ(sp->name, "call_softirq")) ++ irq_eframe = 0; ++ bt->frameptr = 0; ++ done = FALSE; ++ } else ++ irq_eframe = 0; ++ ++ if (!done && (estack = x86_64_in_exception_stack(bt))) ++ goto in_exception_stack; ++ ++ if (!done && (bt->flags & (BT_EXCEPTION_STACK|BT_IRQSTACK))) { ++ /* ++ * Verify that the rsp pointer taken from either the ++ * exception or IRQ stack points into the process stack. ++ */ ++ bt->stackbase = GET_STACKBASE(bt->tc->task); ++ bt->stacktop = GET_STACKTOP(bt->tc->task); ++ ++ if (!INSTACK(rsp, bt)) { ++ switch (bt->flags & (BT_EXCEPTION_STACK|BT_IRQSTACK)) ++ { ++ case (BT_EXCEPTION_STACK|BT_IRQSTACK): ++ error(FATAL, STACK_TRANSITION_ERRMSG_E_I_P, ++ bt_in->stkptr, bt->stkptr, rsp, ++ bt->stackbase); ++ ++ case BT_EXCEPTION_STACK: ++ error(FATAL, STACK_TRANSITION_ERRMSG_E_P, ++ bt_in->stkptr, rsp, bt->stackbase); ++ ++ case BT_IRQSTACK: ++ error(FATAL, STACK_TRANSITION_ERRMSG_I_P, ++ bt_in->stkptr, rsp, bt->stackbase); ++ } ++ } ++ ++ /* ++ * Now fill the local stack buffer from the process stack. ++ */ ++ if (!readmem(bt->stackbase, KVADDR, bt->stackbuf, ++ bt->stacktop - bt->stackbase, ++ "irqstack contents", RETURN_ON_ERROR)) ++ error(FATAL, "read of process stack at %lx failed\n", ++ bt->stackbase); ++ } ++ ++ /* ++ * Dump the IRQ exception frame from the process stack. ++ * If the CS register indicates a user exception frame, ++ * then set done to TRUE to avoid the process stack walk-through. + * Otherwise, bump up the rsp past the kernel-mode eframe. + */ +- if (irq_eframe) { +- bt->flags |= BT_EXCEPTION_FRAME; +- i = (irq_eframe - bt->stackbase)/sizeof(ulong); +- x86_64_print_stack_entry(bt, ofp, level, i, bt->instptr); +- bt->flags &= ~(ulonglong)BT_EXCEPTION_FRAME; +- cs = x86_64_exception_frame(EFRAME_PRINT|EFRAME_CS, 0, +- bt->stackbuf + (irq_eframe - bt->stackbase), bt, ofp); +- if (cs & 3) +- done = TRUE; /* IRQ from user-mode */ ++ if (irq_eframe) { ++ bt->flags |= BT_EXCEPTION_FRAME; ++ level = dwarf_print_stack_entry(bt, level); ++ bt->flags &= ~(ulonglong)BT_EXCEPTION_FRAME; ++ cs = x86_64_exception_frame(EFRAME_PRINT|EFRAME_CS, 0, ++ bt->stackbuf + (irq_eframe - bt->stackbase), bt, ofp); ++ if (cs & 3) ++ done = TRUE; /* IRQ from user-mode */ ++ else { ++ if (x86_64_print_eframe_location(rsp, level, ofp)) ++ level++; ++ rsp += SIZE(pt_regs); ++ irq_eframe = 0; ++ } ++ level++; ++ } ++ ++ /* ++ * Walk the process stack. ++ */ ++ if (!done) { ++ level = dwarf_backtrace(bt, level, bt->stacktop); ++ done = TRUE; ++ } ++ ++ if (!irq_eframe && !is_kernel_thread(bt->tc->task) && ++ (GET_STACKBASE(bt->tc->task) == bt->stackbase)) { ++ user_mode_eframe = bt->stacktop - SIZE(pt_regs); ++ if (last_process_stack_eframe < user_mode_eframe) ++ x86_64_exception_frame(EFRAME_PRINT, 0, bt->stackbuf + ++ (bt->stacktop - bt->stackbase) - SIZE(pt_regs), ++ bt, ofp); ++ } ++ ++ if (bt->flags & BT_TEXT_SYMBOLS) { ++ if (BT_REFERENCE_FOUND(bt)) { ++ print_task_header(fp, task_to_context(bt->task), 0); ++ BCOPY(bt_in, bt, sizeof(struct bt_info)); ++ bt->ref = NULL; ++ machdep->back_trace(bt); ++ fprintf(fp, "\n"); ++ } ++ } ++} ++ ++/* ++ * Functions that won't be called indirectly. ++ * Add more to this as they are discovered. ++ */ ++static const char *direct_call_targets[] = { ++ "schedule", ++ "schedule_timeout", ++ NULL ++}; ++ ++static int ++is_direct_call_target(struct bt_info *bt) ++{ ++ int i; ++ ++ if (!bt->call_target || (bt->flags & BT_NO_CHECK_CALLER)) ++ return FALSE; ++ ++ if (strstr(bt->call_target, "schedule") && ++ is_task_active(bt->task)) ++ return FALSE; ++ ++ for (i = 0; direct_call_targets[i]; i++) { ++ if (STREQ(direct_call_targets[i], bt->call_target)) ++ return TRUE; ++ } ++ ++ return FALSE; ++} ++ ++static struct syment * ++x86_64_function_called_by(ulong rip) ++{ ++ struct syment *sp; ++ char buf[BUFSIZE], *p1; ++ ulong value, offset; ++ unsigned char byte; ++ ++ value = 0; ++ sp = NULL; ++ ++ if (!readmem(rip, KVADDR, &byte, sizeof(unsigned char), "call byte", ++ RETURN_ON_ERROR)) ++ return sp; ++ ++ if (byte != 0xe8) ++ return sp; ++ ++ sprintf(buf, "x/i 0x%lx", rip); ++ ++ open_tmpfile2(); ++ if (gdb_pass_through(buf, pc->tmpfile2, GNU_RETURN_ON_ERROR)) { ++ rewind(pc->tmpfile2); ++ while (fgets(buf, BUFSIZE, pc->tmpfile2)) { ++ if ((p1 = strstr(buf, "callq")) && ++ whitespace(*(p1-1))) { ++ if (extract_hex(p1, &value, NULLCHAR, TRUE)) ++ break; ++ } ++ } ++ } ++ close_tmpfile2(); ++ ++ if (value) ++ sp = value_search(value, &offset); ++ ++ /* ++ * Functions that jmp to schedule() or schedule_timeout(). ++ */ ++ if (sp) { ++ if ((STREQ(sp->name, "schedule_timeout_interruptible") || ++ STREQ(sp->name, "schedule_timeout_uninterruptible"))) ++ sp = symbol_search("schedule_timeout"); ++ ++ if (STREQ(sp->name, "__cond_resched")) ++ sp = symbol_search("schedule"); ++ } ++ ++ return sp; ++} ++ ++/* ++ * Unroll the kernel stack using a minimal amount of gdb services. ++ */ ++static void ++x86_64_back_trace(struct gnu_request *req, struct bt_info *bt) ++{ ++ error(FATAL, "x86_64_back_trace: unused\n"); ++} ++ ++ ++/* ++ * Print exception frame information for x86_64. ++ * ++ * Pid: 0, comm: swapper Not tainted 2.6.5-1.360phro.rootsmp ++ * RIP: 0010:[] {default_idle+36} ++ * RSP: 0018:ffffffff8048bfd8 EFLAGS: 00000246 ++ * RAX: 0000000000000000 RBX: ffffffff8010f510 RCX: 0000000000000018 ++ * RDX: 0000010001e37280 RSI: ffffffff803ac0a0 RDI: 000001007f43c400 ++ * RBP: 0000000000000000 R08: ffffffff8048a000 R09: 0000000000000000 ++ * R10: ffffffff80482188 R11: 0000000000000001 R12: 0000000000000000 ++ * R13: 0000000000000000 R14: 0000000000000000 R15: 0000000000000000 ++ * FS: 0000002a96e14fc0(0000) GS:ffffffff80481d80(0000) GS:0000000055578aa0 ++ * CS: 0010 DS: 0018 ES: 0018 CR0: 000000008005003b ++ * CR2: 0000002a9556b000 CR3: 0000000000101000 CR4: 00000000000006e0 ++ * ++ */ ++ ++static long ++x86_64_exception_frame(ulong flags, ulong kvaddr, char *local, ++ struct bt_info *bt, FILE *ofp) ++{ ++ long rip, rsp, cs, ss, rflags, orig_rax, rbp; ++ long rax, rbx, rcx, rdx, rsi, rdi; ++ long r8, r9, r10, r11, r12, r13, r14, r15; ++ struct machine_specific *ms; ++ struct syment *sp; ++ ulong offset; ++ char *pt_regs_buf; ++ long verified; ++ int err; ++ ++ ms = machdep->machspec; ++ ++ if (!(machdep->flags & PT_REGS_INIT)) { ++ err = 0; ++ err |= ((ms->pto.r15 = MEMBER_OFFSET("pt_regs", "r15")) == ++ INVALID_OFFSET); ++ err |= ((ms->pto.r14 = MEMBER_OFFSET("pt_regs", "r14")) == ++ INVALID_OFFSET); ++ err |= ((ms->pto.r13 = MEMBER_OFFSET("pt_regs", "r13")) == ++ INVALID_OFFSET); ++ err |= ((ms->pto.r12 = MEMBER_OFFSET("pt_regs", "r12")) == ++ INVALID_OFFSET); ++ err |= ((ms->pto.r11 = MEMBER_OFFSET("pt_regs", "r11")) == ++ INVALID_OFFSET); ++ err |= ((ms->pto.r10 = MEMBER_OFFSET("pt_regs", "r10")) == ++ INVALID_OFFSET); ++ err |= ((ms->pto.r9 = MEMBER_OFFSET("pt_regs", "r9")) == ++ INVALID_OFFSET); ++ err |= ((ms->pto.r8 = MEMBER_OFFSET("pt_regs", "r8")) == ++ INVALID_OFFSET); ++ err |= ((ms->pto.rax = MEMBER_OFFSET("pt_regs", "rax")) == ++ INVALID_OFFSET); ++ err |= ((ms->pto.rbx = MEMBER_OFFSET("pt_regs", "rbx")) == ++ INVALID_OFFSET); ++ err |= ((ms->pto.rcx = MEMBER_OFFSET("pt_regs", "rcx")) == ++ INVALID_OFFSET); ++ err |= ((ms->pto.rdx = MEMBER_OFFSET("pt_regs", "rdx")) == ++ INVALID_OFFSET); ++ err |= ((ms->pto.rsi = MEMBER_OFFSET("pt_regs", "rsi")) == ++ INVALID_OFFSET); ++ err |= ((ms->pto.rdi = MEMBER_OFFSET("pt_regs", "rdi")) == ++ INVALID_OFFSET); ++ err |= ((ms->pto.rip = MEMBER_OFFSET("pt_regs", "rip")) == ++ INVALID_OFFSET); ++ err |= ((ms->pto.rsp = MEMBER_OFFSET("pt_regs", "rsp")) == ++ INVALID_OFFSET); ++ err |= ((ms->pto.cs = MEMBER_OFFSET("pt_regs", "cs")) == ++ INVALID_OFFSET); ++ err |= ((ms->pto.ss = MEMBER_OFFSET("pt_regs", "ss")) == ++ INVALID_OFFSET); ++ err |= ((ms->pto.eflags = MEMBER_OFFSET("pt_regs", "eflags")) == ++ INVALID_OFFSET); ++ err |= ((ms->pto.orig_rax = ++ MEMBER_OFFSET("pt_regs", "orig_rax")) == ++ INVALID_OFFSET); ++ err |= ((ms->pto.rbp = MEMBER_OFFSET("pt_regs", "rbp")) == ++ INVALID_OFFSET); ++ ++ if (err) ++ error(WARNING, "pt_regs structure has changed\n"); ++ ++ machdep->flags |= PT_REGS_INIT; ++ } ++ ++ if (kvaddr) { ++ pt_regs_buf = GETBUF(SIZE(pt_regs)); ++ readmem(kvaddr, KVADDR, pt_regs_buf, ++ SIZE(pt_regs), "pt_regs", FAULT_ON_ERROR); ++ } else ++ pt_regs_buf = local; ++ ++ rip = ULONG(pt_regs_buf + ms->pto.rip); ++ rsp = ULONG(pt_regs_buf + ms->pto.rsp); ++ cs = ULONG(pt_regs_buf + ms->pto.cs); ++ ss = ULONG(pt_regs_buf + ms->pto.ss); ++ rflags = ULONG(pt_regs_buf + ms->pto.eflags); ++ orig_rax = ULONG(pt_regs_buf + ms->pto.orig_rax); ++ rbp = ULONG(pt_regs_buf + ms->pto.rbp); ++ rax = ULONG(pt_regs_buf + ms->pto.rax); ++ rbx = ULONG(pt_regs_buf + ms->pto.rbx); ++ rcx = ULONG(pt_regs_buf + ms->pto.rcx); ++ rdx = ULONG(pt_regs_buf + ms->pto.rdx); ++ rsi = ULONG(pt_regs_buf + ms->pto.rsi); ++ rdi = ULONG(pt_regs_buf + ms->pto.rdi); ++ r8 = ULONG(pt_regs_buf + ms->pto.r8); ++ r9 = ULONG(pt_regs_buf + ms->pto.r9); ++ r10 = ULONG(pt_regs_buf + ms->pto.r10); ++ r11 = ULONG(pt_regs_buf + ms->pto.r11); ++ r12 = ULONG(pt_regs_buf + ms->pto.r12); ++ r13 = ULONG(pt_regs_buf + ms->pto.r13); ++ r14 = ULONG(pt_regs_buf + ms->pto.r14); ++ r15 = ULONG(pt_regs_buf + ms->pto.r15); ++ ++ verified = x86_64_eframe_verify(bt, ++ kvaddr ? kvaddr : (local - bt->stackbuf) + bt->stackbase, ++ cs, ss, rip, rsp, rflags); ++ ++ /* ++ * If it's print-if-verified request, don't print bogus eframes. ++ */ ++ if (!verified && ((flags & (EFRAME_VERIFY|EFRAME_PRINT)) == ++ (EFRAME_VERIFY|EFRAME_PRINT))) ++ flags &= ~EFRAME_PRINT; ++ ++ if (CRASHDEBUG(2)) ++ fprintf(ofp, "< exception frame at: %lx >\n", kvaddr ? kvaddr : ++ (local - bt->stackbuf) + bt->stackbase); ++ ++ if (flags & EFRAME_PRINT) { ++ if (flags & EFRAME_SEARCH) { ++ fprintf(ofp, "\n %s-MODE EXCEPTION FRAME AT: %lx\n", ++ cs & 3 ? "USER" : "KERNEL", ++ kvaddr ? kvaddr : ++ (local - bt->stackbuf) + bt->stackbase); ++ } else if (!(cs & 3)) { ++ fprintf(ofp, " [exception RIP: "); ++ if ((sp = value_search(rip, &offset))) { ++ fprintf(ofp, "%s", sp->name); ++ if (offset) ++ fprintf(ofp, (output_radix == 16) ? ++ "+0x%lx" : "+%ld", offset); ++ } else ++ fprintf(ofp, "unknown or invalid address"); ++ fprintf(ofp, "]\n"); ++ } ++ fprintf(ofp, " RIP: %016lx RSP: %016lx RFLAGS: %08lx\n", ++ rip, rsp, rflags); ++ fprintf(ofp, " RAX: %016lx RBX: %016lx RCX: %016lx\n", ++ rax, rbx, rcx); ++ fprintf(ofp, " RDX: %016lx RSI: %016lx RDI: %016lx\n", ++ rdx, rsi, rdi); ++ fprintf(ofp, " RBP: %016lx R8: %016lx R9: %016lx\n", ++ rbp, r8, r9); ++ fprintf(ofp, " R10: %016lx R11: %016lx R12: %016lx\n", ++ r10, r11, r12); ++ fprintf(ofp, " R13: %016lx R14: %016lx R15: %016lx\n", ++ r13, r14, r15); ++ fprintf(ofp, " ORIG_RAX: %016lx CS: %04lx SS: %04lx\n", ++ orig_rax, cs, ss); ++ ++ if (!verified && CRASHDEBUG((pc->flags & RUNTIME) ? 0 : 1)) ++ error(WARNING, "possibly bogus exception frame\n"); ++ } ++ ++ if ((flags & EFRAME_PRINT) && BT_REFERENCE_CHECK(bt)) { ++ x86_64_do_bt_reference_check(bt, rip, NULL); ++ x86_64_do_bt_reference_check(bt, rsp, NULL); ++ x86_64_do_bt_reference_check(bt, cs, NULL); ++ x86_64_do_bt_reference_check(bt, ss, NULL); ++ x86_64_do_bt_reference_check(bt, rflags, NULL); ++ x86_64_do_bt_reference_check(bt, orig_rax, NULL); ++ x86_64_do_bt_reference_check(bt, rbp, NULL); ++ x86_64_do_bt_reference_check(bt, rax, NULL); ++ x86_64_do_bt_reference_check(bt, rbx, NULL); ++ x86_64_do_bt_reference_check(bt, rcx, NULL); ++ x86_64_do_bt_reference_check(bt, rdx, NULL); ++ x86_64_do_bt_reference_check(bt, rsi, NULL); ++ x86_64_do_bt_reference_check(bt, rdi, NULL); ++ x86_64_do_bt_reference_check(bt, r8, NULL); ++ x86_64_do_bt_reference_check(bt, r9, NULL); ++ x86_64_do_bt_reference_check(bt, r10, NULL); ++ x86_64_do_bt_reference_check(bt, r11, NULL); ++ x86_64_do_bt_reference_check(bt, r12, NULL); ++ x86_64_do_bt_reference_check(bt, r13, NULL); ++ x86_64_do_bt_reference_check(bt, r14, NULL); ++ x86_64_do_bt_reference_check(bt, r15, NULL); ++ } ++ ++ /* Remember the rip and rsp for unwinding the process stack */ ++ if (kt->flags & DWARF_UNWIND){ ++ bt->instptr = rip; ++ bt->stkptr = rsp; ++ bt->bptr = rbp; ++ } ++ ++ if (kvaddr) ++ FREEBUF(pt_regs_buf); ++ ++ if (flags & EFRAME_CS) ++ return cs; ++ else if (flags & EFRAME_VERIFY) ++ return verified; ++ ++ return 0; ++} ++ ++static int ++x86_64_print_eframe_location(ulong eframe, int level, FILE *ofp) ++{ ++ return FALSE; ++ ++#ifdef NOTDEF ++ ulong rip; ++ char *pt_regs_buf; ++ struct machine_specific *ms; ++ struct syment *sp; ++ ++ ms = machdep->machspec; ++ ++ pt_regs_buf = GETBUF(SIZE(pt_regs)); ++ if (!readmem(eframe, KVADDR, pt_regs_buf, SIZE(pt_regs), ++ "pt_regs", RETURN_ON_ERROR|QUIET)) { ++ FREEBUF(pt_regs_buf); ++ return FALSE; ++ } ++ ++ rip = ULONG(pt_regs_buf + ms->pto.rip); ++ FREEBUF(pt_regs_buf); ++ ++ if (!(sp = value_search(rip, NULL))) ++ return FALSE; ++ ++ fprintf(ofp, "%s#%d [%8lx] %s at %lx\n", level < 10 ? " " : "", level+1, ++ eframe, sp->name, rip); ++ ++ return TRUE; ++#endif ++} ++ ++/* ++ * Check that the verifiable registers contain reasonable data. ++ */ ++#define RAZ_MASK 0xffffffffffc08028 /* return-as-zero bits */ ++ ++static int ++x86_64_eframe_verify(struct bt_info *bt, long kvaddr, long cs, long ss, ++ long rip, long rsp, long rflags) ++{ ++ if ((rflags & RAZ_MASK) || !(rflags & 0x2)) ++ return FALSE; ++ ++ if ((cs == 0x10) && (ss == 0x18)) { ++ if (is_kernel_text(rip) && IS_KVADDR(rsp)) ++ return TRUE; ++ ++ if (x86_64_is_module_addr(rip) && ++ IS_KVADDR(rsp) && ++ (rsp == (kvaddr + SIZE(pt_regs)))) ++ return TRUE; ++ } ++ ++ if ((cs == 0x10) && kvaddr) { ++ if (is_kernel_text(rip) && IS_KVADDR(rsp) && ++ (rsp == (kvaddr + SIZE(pt_regs) + 8))) ++ return TRUE; ++ } ++ ++ if ((cs == 0x10) && kvaddr) { ++ if (is_kernel_text(rip) && IS_KVADDR(rsp) && ++ (rsp == (kvaddr + SIZE(pt_regs)))) ++ return TRUE; ++ } ++ ++ if ((cs == 0x10) && kvaddr) { ++ if (is_kernel_text(rip) && IS_KVADDR(rsp) && ++ x86_64_in_exception_stack(bt)) ++ return TRUE; ++ } ++ ++ if ((cs == 0x33) && (ss == 0x2b)) { ++ if (IS_UVADDR(rip, bt->tc) && IS_UVADDR(rsp, bt->tc)) ++ return TRUE; ++ } ++ ++ if (XEN() && ((cs == 0x33) || (cs == 0xe033)) && ++ ((ss == 0x2b) || (ss == 0xe02b))) { ++ if (IS_UVADDR(rip, bt->tc) && IS_UVADDR(rsp, bt->tc)) ++ return TRUE; ++ } ++ ++ if (XEN() && ((cs == 0x10000e030) || (cs == 0xe030)) && ++ (ss == 0xe02b)) { ++ if (is_kernel_text(rip) && IS_KVADDR(rsp)) ++ return TRUE; ++ } ++ ++ /* ++ * 32-bit segments ++ */ ++ if ((cs == 0x23) && (ss == 0x2b)) { ++ if (IS_UVADDR(rip, bt->tc) && IS_UVADDR(rsp, bt->tc)) ++ return TRUE; ++ } ++ ++ return FALSE; ++} ++ ++/* ++ * Get a stack frame combination of pc and ra from the most relevent spot. ++ */ ++static void ++x86_64_get_stack_frame(struct bt_info *bt, ulong *pcp, ulong *spp) ++{ ++ if (bt->flags & BT_DUMPFILE_SEARCH) ++ return x86_64_get_dumpfile_stack_frame(bt, pcp, spp); ++ ++ if (pcp) ++ *pcp = x86_64_get_pc(bt); ++ if (spp) ++ *spp = x86_64_get_sp(bt); ++} ++ ++/* ++ * Get the starting point for the active cpus in a diskdump/netdump. ++ */ ++static void ++x86_64_get_dumpfile_stack_frame(struct bt_info *bt_in, ulong *rip, ulong *rsp) ++{ ++ int panic_task; ++ int i, estack, panic, stage; ++ char *sym; ++ struct syment *sp; ++ ulong *up; ++ struct bt_info bt_local, *bt; ++ struct machine_specific *ms; ++ char *user_regs; ++ ulong ur_rip, ur_rsp; ++ ulong halt_rip, halt_rsp; ++ ulong crash_kexec_rip, crash_kexec_rsp; ++ ++ bt = &bt_local; ++ BCOPY(bt_in, bt, sizeof(struct bt_info)); ++ ms = machdep->machspec; ++ ur_rip = ur_rsp = 0; ++ halt_rip = halt_rsp = 0; ++ crash_kexec_rip = crash_kexec_rsp = 0; ++ stage = 0; ++ estack = -1; ++ ++ panic_task = tt->panic_task == bt->task ? TRUE : FALSE; ++ ++ if (panic_task && bt->machdep) { ++ user_regs = bt->machdep; ++ ++ if (x86_64_eframe_verify(bt, ++ 0, ++ ULONG(user_regs + OFFSET(user_regs_struct_cs)), ++ ULONG(user_regs + OFFSET(user_regs_struct_ss)), ++ ULONG(user_regs + OFFSET(user_regs_struct_rip)), ++ ULONG(user_regs + OFFSET(user_regs_struct_rsp)), ++ ULONG(user_regs + OFFSET(user_regs_struct_eflags)))) { ++ bt->stkptr = ULONG(user_regs + ++ OFFSET(user_regs_struct_rsp)); ++ if (x86_64_in_irqstack(bt)) { ++ ur_rip = ULONG(user_regs + ++ OFFSET(user_regs_struct_rip)); ++ ur_rsp = ULONG(user_regs + ++ OFFSET(user_regs_struct_rsp)); ++ goto skip_stage; ++ } ++ } ++ } ++ ++ panic = FALSE; ++ ++ /* ++ * Check the process stack first. ++ */ ++next_stack: ++ for (i = 0, up = (ulong *)bt->stackbuf; ++ i < (bt->stacktop - bt->stackbase)/sizeof(ulong); i++, up++) { ++ sym = closest_symbol(*up); ++ if (XEN_CORE_DUMPFILE()) { ++ if (STREQ(sym, "crash_kexec")) { ++ sp = x86_64_function_called_by((*up)-5); ++ if (sp && STREQ(sp->name, "machine_kexec")) { ++ *rip = *up; ++ *rsp = bt->stackbase + ((char *)(up) - bt->stackbuf); ++ return; ++ } ++ } ++ if (STREQ(sym, "xen_machine_kexec")) { ++ *rip = *up; ++ *rsp = bt->stackbase + ((char *)(up) - bt->stackbuf); ++ return; ++ } ++ } else if (STREQ(sym, "netconsole_netdump") || ++ STREQ(sym, "netpoll_start_netdump") || ++ STREQ(sym, "start_disk_dump") || ++ STREQ(sym, "disk_dump") || ++ STREQ(sym, "crash_kexec") || ++ STREQ(sym, "machine_kexec") || ++ STREQ(sym, "try_crashdump")) { ++ if (STREQ(sym, "crash_kexec")) { ++ sp = x86_64_function_called_by((*up)-5); ++ if (sp && STREQ(sp->name, "machine_kexec")) { ++ *rip = *up; ++ *rsp = bt->stackbase + ((char *)(up) - bt->stackbuf); ++ return; ++ } ++ } ++ /* ++ * Use second instance of crash_kexec if it exists. ++ */ ++ if (!(bt->flags & BT_TEXT_SYMBOLS) && ++ STREQ(sym, "crash_kexec") && !crash_kexec_rip) { ++ crash_kexec_rip = *up; ++ crash_kexec_rsp = bt->stackbase + ((char *)(up) - bt->stackbuf); ++ continue; ++ } ++ *rip = *up; ++ *rsp = bt->stackbase + ((char *)(up) - bt->stackbuf); ++ return; ++ } ++ ++ if ((estack >= 0) && ++ (STREQ(sym, "nmi_watchdog_tick") || ++ STREQ(sym, "default_do_nmi"))) { ++ sp = x86_64_function_called_by((*up)-5); ++ if (!sp || !STREQ(sp->name, "die_nmi")) ++ continue; ++ *rip = *up; ++ *rsp = bt->stackbase + ((char *)(up) - bt->stackbuf); ++ bt_in->flags |= BT_START; ++ *rip = symbol_value("die_nmi"); ++ *rsp = (*rsp) - (7*sizeof(ulong)); ++ return; ++ } ++ ++ if (STREQ(sym, "panic")) { ++ *rip = *up; ++ *rsp = bt->stackbase + ((char *)(up) - bt->stackbuf); ++ panic = TRUE; ++ continue; /* keep looking for die */ ++ } ++ ++ if (STREQ(sym, "die")) { ++ *rip = *up; ++ *rsp = bt->stackbase + ((char *)(up) - bt->stackbuf); ++ for (i++, up++; i < LONGS_PER_STACK; i++, up++) { ++ sym = closest_symbol(*up); ++ if (STREQ(sym, "sysrq_handle_crash")) ++ goto next_sysrq; ++ } ++ return; ++ } ++ ++ if (STREQ(sym, "sysrq_handle_crash")) { ++next_sysrq: ++ *rip = *up; ++ *rsp = bt->stackbase + ((char *)(up) - bt->stackbuf); ++ pc->flags |= SYSRQ; ++ for (i++, up++; i < LONGS_PER_STACK; i++, up++) { ++ sym = closest_symbol(*up); ++ if (STREQ(sym, "sysrq_handle_crash")) ++ goto next_sysrq; ++ } ++ return; ++ } ++ ++ if (!panic_task && (stage > 0) && ++ STREQ(sym, "smp_call_function_interrupt")) { ++ *rip = *up; ++ *rsp = bt->stackbase + ((char *)(up) - bt->stackbuf); ++ return; ++ } ++ ++ if (!panic_task && STREQ(sym, "crash_nmi_callback")) { ++ *rip = *up; ++ *rsp = bt->stackbase + ((char *)(up) - bt->stackbuf); ++ return; ++ } ++ ++ if (XEN_CORE_DUMPFILE() && !panic_task && (bt->tc->pid == 0) && ++ (stage == 0) && STREQ(sym, "safe_halt")) { ++ halt_rip = *up; ++ halt_rsp = bt->stackbase + ((char *)(up) - bt->stackbuf); ++ } ++ ++ if (XEN_CORE_DUMPFILE() && !panic_task && (bt->tc->pid == 0) && ++ !halt_rip && (stage == 0) && STREQ(sym, "xen_idle")) { ++ halt_rip = *up; ++ halt_rsp = bt->stackbase + ((char *)(up) - bt->stackbuf); ++ } ++ ++ if (!XEN_CORE_DUMPFILE() && !panic_task && (bt->tc->pid == 0) && ++ !halt_rip && (stage == 0) && STREQ(sym, "cpu_idle")) { ++ halt_rip = *up; ++ halt_rsp = bt->stackbase + ((char *)(up) - bt->stackbuf); ++ } ++ } ++ ++ if (panic) ++ return; ++ ++ if (crash_kexec_rip) { ++ *rip = crash_kexec_rip; ++ *rsp = crash_kexec_rsp; ++ return; ++ } ++ ++skip_stage: ++ switch (stage) ++ { ++ /* ++ * Now check the processor's interrupt stack. ++ */ ++ case 0: ++ bt->stackbase = ms->stkinfo.ibase[bt->tc->processor]; ++ bt->stacktop = ms->stkinfo.ibase[bt->tc->processor] + ++ ms->stkinfo.isize; ++ console("x86_64_get_dumpfile_stack_frame: searching IRQ stack at %lx\n", ++ bt->stackbase); ++ bt->stackbuf = ms->irqstack; ++ alter_stackbuf(bt); ++ stage = 1; ++ goto next_stack; ++ ++ /* ++ * Check the exception stacks. ++ */ ++ case 1: ++ if (++estack == 7) ++ break; ++ bt->stackbase = ms->stkinfo.ebase[bt->tc->processor][estack]; ++ bt->stacktop = ms->stkinfo.ebase[bt->tc->processor][estack] + ++ ms->stkinfo.esize; ++ console("x86_64_get_dumpfile_stack_frame: searching %s estack at %lx\n", ++ x86_64_exception_stacks[estack], bt->stackbase); ++ if (!(bt->stackbase)) ++ goto skip_stage; ++ bt->stackbuf = ms->irqstack; ++ alter_stackbuf(bt); ++ goto next_stack; ++ ++ } ++ ++ /* ++ * We didn't find what we were looking for, so just use what was ++ * passed in from the ELF header. ++ */ ++ if (ur_rip && ur_rsp) { ++ *rip = ur_rip; ++ *rsp = ur_rsp; ++ return; ++ } ++ ++ if (halt_rip && halt_rsp) { ++ *rip = halt_rip; ++ *rsp = halt_rsp; ++ return; ++ } ++ ++ if (CRASHDEBUG(1)) ++ error(INFO, ++ "x86_64_get_dumpfile_stack_frame: cannot find anything useful (task: %lx)\n", ++ bt->task); ++ ++ bt->flags &= ~(ulonglong)BT_DUMPFILE_SEARCH; ++ ++ machdep->get_stack_frame(bt, rip, rsp); ++} ++ ++/* ++ * Get the saved RSP from the task's thread_struct. ++ */ ++static ulong ++x86_64_get_sp(struct bt_info *bt) ++{ ++ ulong offset, rsp; ++ ++ if (tt->flags & THREAD_INFO) { ++ readmem(bt->task + OFFSET(task_struct_thread) + ++ OFFSET(thread_struct_rsp), KVADDR, ++ &rsp, sizeof(void *), ++ "thread_struct rsp", FAULT_ON_ERROR); ++ return rsp; ++ } ++ ++ offset = OFFSET(task_struct_thread) + OFFSET(thread_struct_rsp); ++ ++ return GET_STACK_ULONG(offset); ++} ++ ++/* ++ * Get the saved PC from the task's thread_struct if it exists; ++ * otherwise just use the "thread_return" label value. ++ */ ++static ulong ++x86_64_get_pc(struct bt_info *bt) ++{ ++ ulong offset, rip; ++ ++ if (INVALID_MEMBER(thread_struct_rip)) ++ return symbol_value("thread_return"); ++ ++ if (tt->flags & THREAD_INFO) { ++ readmem(bt->task + OFFSET(task_struct_thread) + ++ OFFSET(thread_struct_rip), KVADDR, ++ &rip, sizeof(void *), ++ "thread_struct rip", FAULT_ON_ERROR); ++ return rip; ++ } ++ ++ offset = OFFSET(task_struct_thread) + OFFSET(thread_struct_rip); ++ ++ return GET_STACK_ULONG(offset); ++} ++ ++ ++/* ++ * Do the work for x86_64_get_sp() and x86_64_get_pc(). ++ */ ++static void ++get_x86_64_frame(struct bt_info *bt, ulong *getpc, ulong *getsp) ++{ ++ error(FATAL, "get_x86_64_frame: TBD\n"); ++} ++ ++/* ++ * Do the work for cmd_irq(). ++ */ ++static void ++x86_64_dump_irq(int irq) ++{ ++ if (symbol_exists("irq_desc")) { ++ machdep->dump_irq = generic_dump_irq; ++ return(generic_dump_irq(irq)); ++ } ++ ++ error(FATAL, "x86_64_dump_irq: irq_desc[] does not exist?\n"); ++} ++ ++/* ++ * Do the work for irq -d ++ */ ++void ++x86_64_display_idt_table(void) ++{ ++ int i; ++ char *idt_table_buf; ++ char buf[BUFSIZE]; ++ ulong *ip; ++ ++ idt_table_buf = GETBUF(SIZE(gate_struct) * 256); ++ readmem(symbol_value("idt_table"), KVADDR, idt_table_buf, ++ SIZE(gate_struct) * 256, "idt_table", FAULT_ON_ERROR); ++ ip = (ulong *)idt_table_buf; ++ ++ for (i = 0; i < 256; i++, ip += 2) { ++ if (i < 10) ++ fprintf(fp, " "); ++ else if (i < 100) ++ fprintf(fp, " "); ++ fprintf(fp, "[%d] %s\n", ++ i, x86_64_extract_idt_function(ip, buf, NULL)); ++ } ++ ++ FREEBUF(idt_table_buf); ++} ++ ++/* ++ * Extract the function name out of the IDT entry. ++ */ ++static char * ++x86_64_extract_idt_function(ulong *ip, char *buf, ulong *retaddr) ++{ ++ ulong i1, i2, addr; ++ char locbuf[BUFSIZE]; ++ physaddr_t phys; ++ ++ if (buf) ++ BZERO(buf, BUFSIZE); ++ ++ i1 = *ip; ++ i2 = *(ip+1); ++ ++ i2 <<= 32; ++ addr = i2 & 0xffffffff00000000; ++ addr |= (i1 & 0xffff); ++ i1 >>= 32; ++ addr |= (i1 & 0xffff0000); ++ ++ if (retaddr) ++ *retaddr = addr; ++ ++ if (!buf) ++ return NULL; ++ ++ value_to_symstr(addr, locbuf, 0); ++ if (strlen(locbuf)) ++ sprintf(buf, locbuf); ++ else { ++ sprintf(buf, "%016lx", addr); ++ if (kvtop(NULL, addr, &phys, 0)) { ++ addr = machdep->kvbase + (ulong)phys; ++ if (value_to_symstr(addr, locbuf, 0)) { ++ strcat(buf, " <"); ++ strcat(buf, locbuf); ++ strcat(buf, ">"); ++ } ++ } ++ } ++ ++ return buf; ++} ++ ++/* ++ * Filter disassembly output if the output radix is not gdb's default 10 ++ */ ++static int ++x86_64_dis_filter(ulong vaddr, char *inbuf) ++{ ++ char buf1[BUFSIZE]; ++ char buf2[BUFSIZE]; ++ char *colon, *p1; ++ int argc; ++ char *argv[MAXARGS]; ++ ulong value; ++ ++ if (!inbuf) ++ return TRUE; ++/* ++ * For some reason gdb can go off into the weeds translating text addresses, ++ * (on alpha -- not necessarily seen on x86_64) so this routine both fixes the ++ * references as well as imposing the current output radix on the translations. ++ */ ++ console("IN: %s", inbuf); ++ ++ colon = strstr(inbuf, ":"); ++ ++ if (colon) { ++ sprintf(buf1, "0x%lx <%s>", vaddr, ++ value_to_symstr(vaddr, buf2, pc->output_radix)); ++ sprintf(buf2, "%s%s", buf1, colon); ++ strcpy(inbuf, buf2); ++ } ++ ++ strcpy(buf1, inbuf); ++ argc = parse_line(buf1, argv); ++ ++ if ((FIRSTCHAR(argv[argc-1]) == '<') && ++ (LASTCHAR(argv[argc-1]) == '>')) { ++ p1 = rindex(inbuf, '<'); ++ while ((p1 > inbuf) && !STRNEQ(p1, " 0x")) ++ p1--; ++ ++ if (!STRNEQ(p1, " 0x")) ++ return FALSE; ++ p1++; ++ ++ if (!extract_hex(p1, &value, NULLCHAR, TRUE)) ++ return FALSE; ++ ++ sprintf(buf1, "0x%lx <%s>\n", value, ++ value_to_symstr(value, buf2, pc->output_radix)); ++ ++ sprintf(p1, buf1); ++ ++ } else if (STREQ(argv[argc-2], "callq") && ++ hexadecimal(argv[argc-1], 0)) { ++ /* ++ * Update module code of the form: ++ * ++ * callq 0xffffffffa0017aa0 ++ * ++ * to show a bracketed direct call target. ++ */ ++ p1 = &LASTCHAR(inbuf); ++ ++ if (extract_hex(argv[argc-1], &value, NULLCHAR, TRUE)) { ++ sprintf(buf1, " <%s>\n", ++ value_to_symstr(value, buf2, ++ pc->output_radix)); ++ if (IS_MODULE_VADDR(value) && ++ !strstr(buf2, "+")) ++ sprintf(p1, buf1); ++ } ++ } ++ ++ console(" %s", inbuf); ++ ++ return TRUE; ++} ++ ++ ++/* ++ * Override smp_num_cpus if possible and necessary. ++ */ ++int ++x86_64_get_smp_cpus(void) ++{ ++ int i, cpus, nr_pda, cpunumber, _cpu_pda; ++ char *cpu_pda_buf; ++ ulong level4_pgt, cpu_pda_addr; ++ ++ if (!VALID_STRUCT(x8664_pda)) ++ return 1; ++ ++ cpu_pda_buf = GETBUF(SIZE(x8664_pda)); ++ ++ if (LKCD_KERNTYPES()) { ++ if (symbol_exists("_cpu_pda")) ++ _cpu_pda = TRUE; + else +- rsp += SIZE(pt_regs); +- level++; ++ _cpu_pda = FALSE; ++ nr_pda = get_cpus_possible(); ++ } else { ++ if (symbol_exists("_cpu_pda")) { ++ if (!(nr_pda = get_array_length("_cpu_pda", NULL, 0))) ++ nr_pda = NR_CPUS; ++ _cpu_pda = TRUE; ++ } else { ++ if (!(nr_pda = get_array_length("cpu_pda", NULL, 0))) ++ nr_pda = NR_CPUS; ++ _cpu_pda = FALSE; ++ } ++ } ++ for (i = cpus = 0; i < nr_pda; i++) { ++ if (_cpu_pda) { ++ if (!_CPU_PDA_READ(i, cpu_pda_buf)) ++ break; ++ } else { ++ if (!CPU_PDA_READ(i, cpu_pda_buf)) ++ break; ++ } ++ if (VALID_MEMBER(x8664_pda_level4_pgt)) { ++ level4_pgt = ULONG(cpu_pda_buf + OFFSET(x8664_pda_level4_pgt)); ++ if (!VALID_LEVEL4_PGT_ADDR(level4_pgt)) ++ break; ++ } ++ cpunumber = INT(cpu_pda_buf + OFFSET(x8664_pda_cpunumber)); ++ if (cpunumber != cpus) ++ break; ++ cpus++; ++ } ++ ++ FREEBUF(cpu_pda_buf); ++ ++ return cpus; ++} ++ ++/* ++ * Machine dependent command. ++ */ ++void ++x86_64_cmd_mach(void) ++{ ++ int c; ++ ++ while ((c = getopt(argcnt, args, "cm")) != EOF) { ++ switch(c) ++ { ++ case 'c': ++ x86_64_display_cpu_data(); ++ return; ++ ++ case 'm': ++ x86_64_display_memmap(); ++ return; ++ ++ default: ++ argerrs++; ++ break; ++ } ++ } ++ ++ if (argerrs) ++ cmd_usage(pc->curcmd, SYNOPSIS); ++ ++ x86_64_display_machine_stats(); ++} ++ ++/* ++ * "mach" command output. ++ */ ++static void ++x86_64_display_machine_stats(void) ++{ ++ struct new_utsname *uts; ++ char buf[BUFSIZE]; ++ ulong mhz; ++ ++ uts = &kt->utsname; ++ ++ fprintf(fp, " MACHINE TYPE: %s\n", uts->machine); ++ fprintf(fp, " MEMORY SIZE: %s\n", get_memory_size(buf)); ++ fprintf(fp, " CPUS: %d\n", kt->cpus); ++ fprintf(fp, " PROCESSOR SPEED: "); ++ if ((mhz = machdep->processor_speed())) ++ fprintf(fp, "%ld Mhz\n", mhz); ++ else ++ fprintf(fp, "(unknown)\n"); ++ fprintf(fp, " HZ: %d\n", machdep->hz); ++ fprintf(fp, " PAGE SIZE: %d\n", PAGESIZE()); ++// fprintf(fp, " L1 CACHE SIZE: %d\n", l1_cache_size()); ++ fprintf(fp, "KERNEL VIRTUAL BASE: %lx\n", machdep->kvbase); ++ fprintf(fp, "KERNEL VMALLOC BASE: %lx\n", vt->vmalloc_start); ++ if (machdep->flags & VMEMMAP) ++ fprintf(fp, "KERNEL VMEMMAP BASE: %lx\n", machdep->machspec->vmemmap_vaddr); ++ fprintf(fp, " KERNEL START MAP: %lx\n", __START_KERNEL_map); ++ fprintf(fp, "KERNEL MODULES BASE: %lx\n", MODULES_VADDR); ++ fprintf(fp, " KERNEL STACK SIZE: %ld\n", STACKSIZE()); ++} ++ ++/* ++ * "mach -c" ++ */ ++static void ++x86_64_display_cpu_data(void) ++{ ++ int cpu, cpus, boot_cpu, _cpu_pda; ++ ulong cpu_data; ++ ulong cpu_pda, cpu_pda_addr; ++ ++ if (symbol_exists("cpu_data")) { ++ cpu_data = symbol_value("cpu_data"); ++ cpus = kt->cpus; ++ boot_cpu = FALSE; ++ } else if (symbol_exists("boot_cpu_data")) { ++ cpu_data = symbol_value("boot_cpu_data"); ++ boot_cpu = TRUE; ++ cpus = 1; ++ } ++ if (symbol_exists("_cpu_pda")) { ++ cpu_pda = symbol_value("_cpu_pda"); ++ _cpu_pda = TRUE; ++ } else if (symbol_exists("cpu_pda")) { ++ cpu_pda = symbol_value("cpu_pda"); ++ _cpu_pda = FALSE; ++ } ++ ++ for (cpu = 0; cpu < cpus; cpu++) { ++ if (boot_cpu) ++ fprintf(fp, "BOOT CPU:\n"); ++ else ++ fprintf(fp, "%sCPU %d:\n", cpu ? "\n" : "", cpu); ++ ++ dump_struct("cpuinfo_x86", cpu_data, 0); ++ fprintf(fp, "\n"); ++ ++ if (_cpu_pda) { ++ readmem(cpu_pda, KVADDR, &cpu_pda_addr, ++ sizeof(unsigned long), "_cpu_pda addr", FAULT_ON_ERROR); ++ dump_struct("x8664_pda", cpu_pda_addr, 0); ++ cpu_pda += sizeof(void *); ++ } else { ++ dump_struct("x8664_pda", cpu_pda, 0); ++ cpu_pda += SIZE(x8664_pda); ++ } ++ cpu_data += SIZE(cpuinfo_x86); ++ } ++} ++ ++/* ++ * "mach -m" ++ */ ++static char *e820type[] = { ++ "(invalid type)", ++ "E820_RAM", ++ "E820_RESERVED", ++ "E820_ACPI", ++ "E820_NVS", ++}; ++ ++static void ++x86_64_display_memmap(void) ++{ ++ ulong e820; ++ int nr_map, i; ++ char *buf, *e820entry_ptr; ++ ulonglong addr, size; ++ uint type; ++ ++ e820 = symbol_value("e820"); ++ if (CRASHDEBUG(1)) ++ dump_struct("e820map", e820, RADIX(16)); ++ buf = (char *)GETBUF(SIZE(e820map)); ++ ++ readmem(e820, KVADDR, &buf[0], SIZE(e820map), ++ "e820map", FAULT_ON_ERROR); ++ ++ nr_map = INT(buf + OFFSET(e820map_nr_map)); ++ ++ fprintf(fp, " PHYSICAL ADDRESS RANGE TYPE\n"); ++ ++ for (i = 0; i < nr_map; i++) { ++ e820entry_ptr = buf + sizeof(int) + (SIZE(e820entry) * i); ++ addr = ULONGLONG(e820entry_ptr + OFFSET(e820entry_addr)); ++ size = ULONGLONG(e820entry_ptr + OFFSET(e820entry_size)); ++ type = UINT(e820entry_ptr + OFFSET(e820entry_type)); ++ fprintf(fp, "%016llx - %016llx %s\n", addr, addr+size, ++ e820type[type]); ++ } ++} ++ ++ ++static const char *hook_files[] = { ++ "arch/x86_64/kernel/entry.S", ++ "arch/x86_64/kernel/head.S", ++ "arch/x86_64/kernel/semaphore.c" ++}; ++ ++#define ENTRY_S ((char **)&hook_files[0]) ++#define HEAD_S ((char **)&hook_files[1]) ++#define SEMAPHORE_C ((char **)&hook_files[2]) ++ ++static struct line_number_hook x86_64_line_number_hooks[] = { ++ {"ret_from_fork", ENTRY_S}, ++ {"system_call", ENTRY_S}, ++ {"int_ret_from_sys_call", ENTRY_S}, ++ {"ptregscall_common", ENTRY_S}, ++ {"stub_execve", ENTRY_S}, ++ {"stub_rt_sigreturn", ENTRY_S}, ++ {"common_interrupt", ENTRY_S}, ++ {"ret_from_intr", ENTRY_S}, ++ {"load_gs_index", ENTRY_S}, ++ {"arch_kernel_thread", ENTRY_S}, ++ {"execve", ENTRY_S}, ++ {"page_fault", ENTRY_S}, ++ {"coprocessor_error", ENTRY_S}, ++ {"simd_coprocessor_error", ENTRY_S}, ++ {"device_not_available", ENTRY_S}, ++ {"debug", ENTRY_S}, ++ {"nmi", ENTRY_S}, ++ {"int3", ENTRY_S}, ++ {"overflow", ENTRY_S}, ++ {"bounds", ENTRY_S}, ++ {"invalid_op", ENTRY_S}, ++ {"coprocessor_segment_overrun", ENTRY_S}, ++ {"reserved", ENTRY_S}, ++ {"double_fault", ENTRY_S}, ++ {"invalid_TSS", ENTRY_S}, ++ {"segment_not_present", ENTRY_S}, ++ {"stack_segment", ENTRY_S}, ++ {"general_protection", ENTRY_S}, ++ {"alignment_check", ENTRY_S}, ++ {"divide_error", ENTRY_S}, ++ {"spurious_interrupt_bug", ENTRY_S}, ++ {"machine_check", ENTRY_S}, ++ {"call_debug", ENTRY_S}, ++ ++ {NULL, NULL} /* list must be NULL-terminated */ ++}; ++ ++static void ++x86_64_dump_line_number(ulong callpc) ++{ ++ error(FATAL, "x86_64_dump_line_number: TBD\n"); ++} ++ ++void ++x86_64_compiler_warning_stub(void) ++{ ++ struct line_number_hook *lhp; ++ char **p; ++ ++ lhp = &x86_64_line_number_hooks[0]; lhp++; ++ p = ENTRY_S; ++ x86_64_back_trace(NULL, NULL); ++ get_x86_64_frame(NULL, NULL, NULL); ++ x86_64_dump_line_number(0); ++} ++ ++/* ++ * Force the VM address-range selection via: ++ * ++ * --machdep vm=orig ++ * --machdep vm=2.6.11 ++ * ++ * Force the phys_base address via: ++ * ++ * --machdep phys_base=
++ * ++ * Force the IRQ stack back-link via: ++ * ++ * --machdep irq_eframe_link= ++ */ ++ ++void ++parse_cmdline_arg(void) ++{ ++ int i, c, errflag; ++ char *p; ++ char buf[BUFSIZE]; ++ char *arglist[MAXARGS]; ++ int megabytes; ++ int lines = 0; ++ int vm_flag; ++ ulong value; ++ ++ if (!strstr(machdep->cmdline_arg, "=")) { ++ error(WARNING, "ignoring --machdep option: %s\n\n", ++ machdep->cmdline_arg); ++ return; + } + +- /* +- * Walk the process stack. +- */ +- for (i = (rsp - bt->stackbase)/sizeof(ulong); +- !done && (rsp < bt->stacktop); i++, rsp += sizeof(ulong)) { ++ strcpy(buf, machdep->cmdline_arg); + +- up = (ulong *)(&bt->stackbuf[i*sizeof(ulong)]); ++ for (p = buf; *p; p++) { ++ if (*p == ',') ++ *p = ' '; ++ } + +- if (!is_kernel_text(*up)) +- continue; ++ c = parse_line(buf, arglist); + +- if ((bt->flags & BT_CHECK_CALLER)) { +- /* +- * A non-zero offset value from the value_search() +- * lets us know if it's a real text return address. +- */ +- spt = value_search(*up, &offset); +- /* +- * sp gets the syment of the function that the text +- * routine above called before leaving its return +- * address on the stack -- if it can be determined. +- */ +- sp = x86_64_function_called_by((*up)-5); ++ for (i = vm_flag = 0; i < c; i++) { ++ errflag = 0; + +- if (sp == NULL) { +- /* +- * We were unable to get the called function. +- * If the text address had an offset, then +- * it must have made an indirect call, and +- * can't have called our target function. +- */ +- if (offset) { +- if (CRASHDEBUG(1)) +- fprintf(ofp, +- "< ignoring %s() -- makes indirect call and NOT %s()>\n", +- spt->name, +- bt->call_target); ++ if (STRNEQ(arglist[i], "vm=")) { ++ vm_flag++; ++ p = arglist[i] + strlen("vm="); ++ if (strlen(p)) { ++ if (STREQ(p, "orig")) { ++ machdep->flags |= VM_ORIG; ++ continue; ++ } else if (STREQ(p, "2.6.11")) { ++ machdep->flags |= VM_2_6_11; ++ continue; ++ } else if (STREQ(p, "xen")) { ++ machdep->flags |= VM_XEN; ++ continue; ++ } else if (STREQ(p, "xen-rhel4")) { ++ machdep->flags |= VM_XEN_RHEL4; ++ continue; ++ } ++ } ++ } else if (STRNEQ(arglist[i], "phys_base=")) { ++ megabytes = FALSE; ++ if ((LASTCHAR(arglist[i]) == 'm') || ++ (LASTCHAR(arglist[i]) == 'M')) { ++ LASTCHAR(arglist[i]) = NULLCHAR; ++ megabytes = TRUE; ++ } ++ p = arglist[i] + strlen("phys_base="); ++ if (strlen(p)) { ++ if (megabytes) { ++ value = dtol(p, RETURN_ON_ERROR|QUIET, ++ &errflag); ++ } else ++ value = htol(p, RETURN_ON_ERROR|QUIET, ++ &errflag); ++ if (!errflag) { ++ if (megabytes) ++ value = MEGABYTES(value); ++ machdep->machspec->phys_base = value; ++ error(NOTE, ++ "setting phys_base to: 0x%lx\n\n", ++ machdep->machspec->phys_base); ++ machdep->flags |= PHYS_BASE; ++ continue; ++ } ++ } ++ } else if (STRNEQ(arglist[i], "irq_eframe_link=")) { ++ p = arglist[i] + strlen("irq_eframe_link="); ++ if (strlen(p)) { ++ value = stol(p, RETURN_ON_ERROR|QUIET, &errflag); ++ if (!errflag) { ++ machdep->machspec->irq_eframe_link = value; + continue; + } +- } else if (!STREQ(sp->name, bt->call_target)) { +- /* +- * We got function called by the text routine, +- * but it's not our target function. +- */ +- if (CRASHDEBUG(2)) +- fprintf(ofp, +- "< ignoring %s() -- calls %s() and NOT %s()>\n", +- spt->name, sp->name, +- bt->call_target); +- continue; + } + } + +- switch (x86_64_print_stack_entry(bt, ofp, level, i,*up)) ++ error(WARNING, "ignoring --machdep option: %s\n", arglist[i]); ++ lines++; ++ } ++ ++ if (vm_flag) { ++ switch (machdep->flags & VM_FLAGS) + { +- case BACKTRACE_ENTRY_AND_EFRAME_DISPLAYED: +- last_process_stack_eframe = rsp + 8; +- rsp += SIZE(pt_regs); +- i += SIZE(pt_regs)/sizeof(ulong); +- case BACKTRACE_ENTRY_DISPLAYED: +- level++; ++ case 0: + break; +- case BACKTRACE_ENTRY_IGNORED: ++ ++ case VM_ORIG: ++ error(NOTE, "using original x86_64 VM address ranges\n"); ++ lines++; + break; +- case BACKTRACE_COMPLETE: +- done = TRUE; ++ ++ case VM_2_6_11: ++ error(NOTE, "using 2.6.11 x86_64 VM address ranges\n"); ++ lines++; ++ break; ++ ++ case VM_XEN: ++ error(NOTE, "using xen x86_64 VM address ranges\n"); ++ lines++; + break; +- } +- } + +- if (!irq_eframe && !is_kernel_thread(bt->tc->task) && +- (GET_STACKBASE(bt->tc->task) == bt->stackbase)) { +- user_mode_eframe = bt->stacktop - SIZE(pt_regs); +- if (last_process_stack_eframe < user_mode_eframe) +- x86_64_exception_frame(EFRAME_PRINT, 0, bt->stackbuf + +- (bt->stacktop - bt->stackbase) - SIZE(pt_regs), +- bt, ofp); ++ case VM_XEN_RHEL4: ++ error(NOTE, "using RHEL4 xen x86_64 VM address ranges\n"); ++ lines++; ++ break; ++ ++ default: ++ error(WARNING, "cannot set multiple vm values\n"); ++ lines++; ++ machdep->flags &= ~VM_FLAGS; ++ break; ++ } + } + +- if (bt->flags & BT_TEXT_SYMBOLS) { +- if (BT_REFERENCE_FOUND(bt)) { +- print_task_header(fp, task_to_context(bt->task), 0); +- BCOPY(bt_in, bt, sizeof(struct bt_info)); +- bt->ref = NULL; +- machdep->back_trace(bt); +- fprintf(fp, "\n"); +- } ++ if (lines) ++ fprintf(fp, "\n"); ++} ++ ++void ++x86_64_clear_machdep_cache(void) ++{ ++ machdep->machspec->last_upml_read = 0; ++} ++ ++static void ++x86_64_irq_eframe_link_init(void) ++{ ++ int c; ++ struct syment *sp, *spn; ++ char buf[BUFSIZE]; ++ char link_register[BUFSIZE]; ++ char *arglist[MAXARGS]; ++ ulong max_instructions; ++ ++ if (machdep->machspec->irq_eframe_link == UNINITIALIZED) ++ machdep->machspec->irq_eframe_link = 0; ++ else ++ return; ++ ++ if (THIS_KERNEL_VERSION < LINUX(2,6,9)) ++ return; ++ ++ if (!(sp = symbol_search("common_interrupt")) || ++ !(spn = next_symbol(NULL, sp))) { ++ return; ++ } ++ ++ max_instructions = spn->value - sp->value; ++ ++ open_tmpfile(); ++ ++ sprintf(buf, "x/%ldi 0x%lx", ++ max_instructions, sp->value); ++ ++ if (!gdb_pass_through(buf, pc->tmpfile, GNU_RETURN_ON_ERROR)) ++ return; ++ ++ link_register[0] = NULLCHAR; ++ ++ rewind(pc->tmpfile); ++ while (fgets(buf, BUFSIZE, pc->tmpfile)) { ++ if (!strstr(buf, sp->name)) ++ break; ++ if ((c = parse_line(buf, arglist)) < 4) ++ continue; ++ if (strstr(arglist[2], "push")) ++ strcpy(link_register, arglist[3]); + } ++ close_tmpfile(); ++ ++ if (CRASHDEBUG(1)) ++ fprintf(fp, "IRQ stack link register: %s\n", ++ strlen(link_register) ? ++ link_register : "undetermined"); ++ ++ if (STREQ(link_register, "%rbp")) ++ machdep->machspec->irq_eframe_link = 40; ++ + } + ++#include "netdump.h" ++ + /* +- * Functions that won't be called indirectly. +- * Add more to this as they are discovered. ++ * From the xen vmcore, create an index of mfns for each page that makes ++ * up the dom0 kernel's complete phys_to_machine_mapping[max_pfn] array. + */ +-static const char *direct_call_targets[] = { +- "schedule", +- "schedule_timeout", +- NULL +-}; ++ ++#define MAX_X86_64_FRAMES (512) ++#define MFNS_PER_FRAME (PAGESIZE()/sizeof(ulong)) + + static int +-is_direct_call_target(struct bt_info *bt) ++x86_64_xen_kdump_p2m_create(struct xen_kdump_data *xkd) + { +- int i; ++ int i, j; ++ ulong kvaddr; ++ ulong *up; ++ ulong frames; ++ ulong frame_mfn[MAX_X86_64_FRAMES] = { 0 }; ++ int mfns[MAX_X86_64_FRAMES] = { 0 }; + +- if (!bt->call_target || (bt->flags & BT_NO_CHECK_CALLER)) +- return FALSE; ++ /* ++ * Temporarily read physical (machine) addresses from vmcore by ++ * going directly to read_netdump() instead of via read_kdump(). ++ */ ++ pc->readmem = read_netdump; + +- for (i = 0; direct_call_targets[i]; i++) { +- if (STREQ(direct_call_targets[i], bt->call_target)) +- return TRUE; ++ if (xkd->flags & KDUMP_CR3) ++ goto use_cr3; ++ ++ if (CRASHDEBUG(1)) ++ fprintf(fp, "x86_64_xen_kdump_p2m_create: p2m_mfn: %lx\n", ++ xkd->p2m_mfn); ++ ++ if (!readmem(PTOB(xkd->p2m_mfn), PHYSADDR, xkd->page, PAGESIZE(), ++ "xen kdump p2m mfn page", RETURN_ON_ERROR)) ++ error(FATAL, "cannot read xen kdump p2m mfn page\n"); ++ ++ if (CRASHDEBUG(2)) ++ x86_64_debug_dump_page(fp, xkd->page, "pfn_to_mfn_frame_list"); ++ ++ for (i = 0, up = (ulong *)xkd->page; i < MAX_X86_64_FRAMES; i++, up++) ++ frame_mfn[i] = *up; ++ ++ for (i = 0; i < MAX_X86_64_FRAMES; i++) { ++ if (!frame_mfn[i]) ++ break; ++ ++ if (!readmem(PTOB(frame_mfn[i]), PHYSADDR, xkd->page, ++ PAGESIZE(), "xen kdump p2m mfn list page", RETURN_ON_ERROR)) ++ error(FATAL, "cannot read xen kdump p2m mfn list page\n"); ++ ++ for (j = 0, up = (ulong *)xkd->page; j < MFNS_PER_FRAME; j++, up++) ++ if (*up) ++ mfns[i]++; ++ ++ xkd->p2m_frames += mfns[i]; ++ ++ if (CRASHDEBUG(7)) ++ x86_64_debug_dump_page(fp, xkd->page, "pfn_to_mfn_frame_list page"); + } + +- return FALSE; ++ if (CRASHDEBUG(1)) ++ fprintf(fp, "p2m_frames: %d\n", xkd->p2m_frames); ++ ++ if ((xkd->p2m_mfn_frame_list = (ulong *) ++ malloc(xkd->p2m_frames * sizeof(ulong))) == NULL) ++ error(FATAL, "cannot malloc p2m_frame_index_list"); ++ ++ for (i = 0, frames = xkd->p2m_frames; frames; i++) { ++ if (!readmem(PTOB(frame_mfn[i]), PHYSADDR, ++ &xkd->p2m_mfn_frame_list[i * MFNS_PER_FRAME], ++ mfns[i] * sizeof(ulong), "xen kdump p2m mfn list page", ++ RETURN_ON_ERROR)) ++ error(FATAL, "cannot read xen kdump p2m mfn list page\n"); ++ ++ frames -= mfns[i]; ++ } ++ ++ if (CRASHDEBUG(2)) { ++ for (i = 0; i < xkd->p2m_frames; i++) ++ fprintf(fp, "%lx ", xkd->p2m_mfn_frame_list[i]); ++ fprintf(fp, "\n"); ++ } ++ ++ pc->readmem = read_kdump; ++ return TRUE; ++ ++use_cr3: ++ ++ if (CRASHDEBUG(1)) ++ fprintf(fp, "x86_64_xen_kdump_p2m_create: cr3: %lx\n", xkd->cr3); ++ ++ if (!readmem(PTOB(xkd->cr3), PHYSADDR, machdep->machspec->pml4, ++ PAGESIZE(), "xen kdump cr3 page", RETURN_ON_ERROR)) ++ error(FATAL, "cannot read xen kdump cr3 page\n"); ++ ++ if (CRASHDEBUG(7)) ++ x86_64_debug_dump_page(fp, machdep->machspec->pml4, ++ "contents of PML4 page:"); ++ ++ kvaddr = symbol_value("end_pfn"); ++ if (!x86_64_xen_kdump_load_page(kvaddr, xkd->page)) ++ return FALSE; ++ up = (ulong *)(xkd->page + PAGEOFFSET(kvaddr)); ++ ++ xkd->p2m_frames = (*up/(PAGESIZE()/sizeof(ulong))) + ++ ((*up%(PAGESIZE()/sizeof(ulong))) ? 1 : 0); ++ ++ if (CRASHDEBUG(1)) ++ fprintf(fp, "end_pfn at %lx: %lx (%ld) -> %d p2m_frames\n", ++ kvaddr, *up, *up, xkd->p2m_frames); ++ ++ if ((xkd->p2m_mfn_frame_list = (ulong *) ++ malloc(xkd->p2m_frames * sizeof(ulong))) == NULL) ++ error(FATAL, "cannot malloc p2m_frame_index_list"); ++ ++ kvaddr = symbol_value("phys_to_machine_mapping"); ++ if (!x86_64_xen_kdump_load_page(kvaddr, xkd->page)) ++ return FALSE; ++ up = (ulong *)(xkd->page + PAGEOFFSET(kvaddr)); ++ kvaddr = *up; ++ if (CRASHDEBUG(1)) ++ fprintf(fp, "phys_to_machine_mapping: %lx\n", kvaddr); ++ ++ machdep->last_pgd_read = BADADDR; ++ machdep->last_pmd_read = BADADDR; ++ machdep->last_ptbl_read = BADADDR; ++ ++ for (i = 0; i < xkd->p2m_frames; i++) { ++ xkd->p2m_mfn_frame_list[i] = x86_64_xen_kdump_page_mfn(kvaddr); ++ kvaddr += PAGESIZE(); ++ } ++ ++ if (CRASHDEBUG(1)) { ++ for (i = 0; i < xkd->p2m_frames; i++) ++ fprintf(fp, "%lx ", xkd->p2m_mfn_frame_list[i]); ++ fprintf(fp, "\n"); ++ } ++ ++ machdep->last_pgd_read = 0; ++ machdep->last_ptbl_read = 0; ++ machdep->last_pmd_read = 0; ++ pc->readmem = read_kdump; ++ ++ return TRUE; + } + +-static struct syment * +-x86_64_function_called_by(ulong rip) ++static char * ++x86_64_xen_kdump_load_page(ulong kvaddr, char *pgbuf) + { +- struct syment *sp; +- char buf[BUFSIZE], *p1; +- ulong value, offset; +- unsigned char byte; ++ ulong mfn; ++ ulong *pml4, *pgd, *pmd, *ptep; + +- value = 0; +- sp = NULL; ++ pml4 = ((ulong *)machdep->machspec->pml4) + pml4_index(kvaddr); ++ mfn = ((*pml4) & PHYSICAL_PAGE_MASK) >> PAGESHIFT(); + +- if (!readmem(rip, KVADDR, &byte, sizeof(unsigned char), "call byte", +- RETURN_ON_ERROR)) +- return sp; ++ if (CRASHDEBUG(3)) ++ fprintf(fp, ++ "[%lx] pml4: %lx mfn: %lx pml4_index: %lx\n", ++ kvaddr, *pml4, mfn, pml4_index(kvaddr)); ++ ++ if (!readmem(PTOB(mfn), PHYSADDR, machdep->pgd, PAGESIZE(), ++ "xen kdump pud page", RETURN_ON_ERROR)) ++ error(FATAL, "cannot read/find pud page\n"); ++ ++ if (CRASHDEBUG(7)) ++ x86_64_debug_dump_page(fp, machdep->pgd, ++ "contents of page upper directory page:"); + +- if (byte != 0xe8) +- return sp; ++ pgd = ((ulong *)machdep->pgd) + pgd_index(kvaddr); ++ mfn = ((*pgd) & PHYSICAL_PAGE_MASK) >> PAGESHIFT(); ++ ++ if (CRASHDEBUG(3)) ++ fprintf(fp, ++ "[%lx] pgd: %lx mfn: %lx pgd_index: %lx\n", ++ kvaddr, *pgd, mfn, pgd_index(kvaddr)); ++ ++ if (!readmem(PTOB(mfn), PHYSADDR, machdep->pmd, PAGESIZE(), ++ "xen kdump pmd page", RETURN_ON_ERROR)) ++ error(FATAL, "cannot read/find pmd page\n"); ++ ++ if (CRASHDEBUG(7)) ++ x86_64_debug_dump_page(fp, machdep->pmd, ++ "contents of page middle directory page:"); ++ ++ pmd = ((ulong *)machdep->pmd) + pmd_index(kvaddr); ++ mfn = ((*pmd) & PHYSICAL_PAGE_MASK) >> PAGESHIFT(); ++ ++ if (CRASHDEBUG(3)) ++ fprintf(fp, ++ "[%lx] pmd: %lx mfn: %lx pmd_index: %lx\n", ++ kvaddr, *pmd, mfn, pmd_index(kvaddr)); ++ ++ if (!readmem(PTOB(mfn), PHYSADDR, machdep->ptbl, PAGESIZE(), ++ "xen kdump page table page", RETURN_ON_ERROR)) ++ error(FATAL, "cannot read/find page table page\n"); ++ ++ if (CRASHDEBUG(7)) ++ x86_64_debug_dump_page(fp, machdep->ptbl, ++ "contents of page table page:"); ++ ++ ptep = ((ulong *)machdep->ptbl) + pte_index(kvaddr); ++ mfn = ((*ptep) & PHYSICAL_PAGE_MASK) >> PAGESHIFT(); ++ ++ if (CRASHDEBUG(3)) ++ fprintf(fp, ++ "[%lx] ptep: %lx mfn: %lx pte_index: %lx\n", ++ kvaddr, *ptep, mfn, pte_index(kvaddr)); ++ ++ if (!readmem(PTOB(mfn), PHYSADDR, pgbuf, PAGESIZE(), ++ "xen kdump page table page", RETURN_ON_ERROR)) ++ error(FATAL, "cannot read/find pte page\n"); ++ ++ if (CRASHDEBUG(7)) ++ x86_64_debug_dump_page(fp, pgbuf, ++ "contents of page:"); ++ ++ return pgbuf; ++} ++ ++static ulong ++x86_64_xen_kdump_page_mfn(ulong kvaddr) ++{ ++ ulong mfn; ++ ulong *pml4, *pgd, *pmd, *ptep; ++ ++ pml4 = ((ulong *)machdep->machspec->pml4) + pml4_index(kvaddr); ++ mfn = ((*pml4) & PHYSICAL_PAGE_MASK) >> PAGESHIFT(); ++ ++ if ((mfn != machdep->last_pgd_read) && ++ !readmem(PTOB(mfn), PHYSADDR, machdep->pgd, PAGESIZE(), ++ "xen kdump pud entry", RETURN_ON_ERROR)) ++ error(FATAL, "cannot read/find pud page\n"); ++ machdep->last_pgd_read = mfn; ++ ++ pgd = ((ulong *)machdep->pgd) + pgd_index(kvaddr); ++ mfn = ((*pgd) & PHYSICAL_PAGE_MASK) >> PAGESHIFT(); ++ ++ if ((mfn != machdep->last_pmd_read) && ++ !readmem(PTOB(mfn), PHYSADDR, machdep->pmd, PAGESIZE(), ++ "xen kdump pmd entry", RETURN_ON_ERROR)) ++ error(FATAL, "cannot read/find pmd page\n"); ++ machdep->last_pmd_read = mfn; ++ ++ pmd = ((ulong *)machdep->pmd) + pmd_index(kvaddr); ++ mfn = ((*pmd) & PHYSICAL_PAGE_MASK) >> PAGESHIFT(); ++ ++ if ((mfn != machdep->last_ptbl_read) && ++ !readmem(PTOB(mfn), PHYSADDR, machdep->ptbl, PAGESIZE(), ++ "xen kdump page table page", RETURN_ON_ERROR)) ++ error(FATAL, "cannot read/find page table page\n"); ++ machdep->last_ptbl_read = mfn; ++ ++ ptep = ((ulong *)machdep->ptbl) + pte_index(kvaddr); ++ mfn = ((*ptep) & PHYSICAL_PAGE_MASK) >> PAGESHIFT(); ++ ++ return mfn; ++} ++ ++#include "xendump.h" ++ ++/* ++ * Determine the physical address base for relocatable kernels. ++ */ ++static void ++x86_64_calc_phys_base(void) ++{ ++ int i; ++ FILE *iomem; ++ char buf[BUFSIZE]; ++ char *p1; ++ ulong phys_base, text_start, kernel_code_start; ++ int errflag; ++ struct vmcore_data *vd; ++ static struct xendump_data *xd; ++ Elf64_Phdr *phdr; ++ ++ if (machdep->flags & PHYS_BASE) /* --machdep override */ ++ return; ++ ++ machdep->machspec->phys_base = 0; /* default/traditional */ ++ ++ if (!kernel_symbol_exists("phys_base")) ++ return; ++ ++ if (!symbol_exists("_text")) ++ return; ++ else ++ text_start = symbol_value("_text"); ++ ++ if (ACTIVE()) { ++ if ((iomem = fopen("/proc/iomem", "r")) == NULL) ++ return; ++ ++ errflag = 1; ++ while (fgets(buf, BUFSIZE, iomem)) { ++ if (strstr(buf, ": Kernel code")) { ++ clean_line(buf); ++ errflag = 0; ++ break; ++ } ++ } ++ fclose(iomem); ++ ++ if (errflag) ++ return; ++ ++ if (!(p1 = strstr(buf, "-"))) ++ return; ++ else ++ *p1 = NULLCHAR; ++ ++ errflag = 0; ++ kernel_code_start = htol(buf, RETURN_ON_ERROR|QUIET, &errflag); ++ if (errflag) ++ return; ++ ++ machdep->machspec->phys_base = kernel_code_start - ++ (text_start - __START_KERNEL_map); ++ ++ if (CRASHDEBUG(1)) { ++ fprintf(fp, "_text: %lx ", text_start); ++ fprintf(fp, "Kernel code: %lx -> ", kernel_code_start); ++ fprintf(fp, "phys_base: %lx\n\n", ++ machdep->machspec->phys_base); ++ } ++ ++ return; ++ } + +- sprintf(buf, "x/i 0x%lx", rip); ++ /* ++ * Get relocation value from whatever dumpfile format is being used. ++ */ + +- open_tmpfile2(); +- if (gdb_pass_through(buf, pc->tmpfile2, GNU_RETURN_ON_ERROR)) { +- rewind(pc->tmpfile2); +- while (fgets(buf, BUFSIZE, pc->tmpfile2)) { +- if ((p1 = strstr(buf, "callq")) && +- whitespace(*(p1-1))) { +- if (extract_hex(p1, &value, NULLCHAR, TRUE)) +- break; ++ if (DISKDUMP_DUMPFILE()) { ++ if (diskdump_phys_base(&phys_base)) { ++ machdep->machspec->phys_base = phys_base; ++ if (CRASHDEBUG(1)) ++ fprintf(fp, "compressed kdump: phys_base: %lx\n", ++ phys_base); ++ } ++ return; ++ } ++ ++ if ((vd = get_kdump_vmcore_data())) { ++ for (i = 0; i < vd->num_pt_load_segments; i++) { ++ phdr = vd->load64 + i; ++ if ((phdr->p_vaddr >= __START_KERNEL_map) && ++ !(IS_VMALLOC_ADDR(phdr->p_vaddr))) { ++ ++ machdep->machspec->phys_base = phdr->p_paddr - ++ (phdr->p_vaddr & ~(__START_KERNEL_map)); ++ ++ if (CRASHDEBUG(1)) { ++ fprintf(fp, "p_vaddr: %lx p_paddr: %lx -> ", ++ phdr->p_vaddr, phdr->p_paddr); ++ fprintf(fp, "phys_base: %lx\n\n", ++ machdep->machspec->phys_base); ++ } ++ break; + } + } ++ ++ return; + } +- close_tmpfile2(); + +- if (value) +- sp = value_search(value, &offset); ++ if ((xd = get_xendump_data())) { ++ if (text_start == __START_KERNEL_map) { ++ /* ++ * Xen kernels are not relocable (yet) and don't have ++ * the "phys_base" entry point, so this is most likely ++ * a xendump of a fully-virtualized relocatable kernel. ++ * No clues exist in the xendump header, so hardwire ++ * phys_base to 2MB and hope for the best. ++ */ ++ machdep->machspec->phys_base = 0x200000; ++ if (CRASHDEBUG(1)) ++ fprintf(fp, ++ "default relocatable phys_base: %lx\n", ++ machdep->machspec->phys_base); + +- return sp; +-} ++ } else if (text_start > __START_KERNEL_map) { ++ switch (xd->flags & (XC_CORE_ELF|XC_CORE_NO_P2M)) ++ { ++ /* ++ * If this is a new ELF-style xendump with no ++ * p2m information, then it also must be a ++ * fully-virtualized relocatable kernel. Again, ++ * the xendump header is useless, and we don't ++ * have /proc/iomem, so presume that the kernel ++ * code starts at 2MB. ++ */ ++ case (XC_CORE_ELF|XC_CORE_NO_P2M): ++ machdep->machspec->phys_base = 0x200000 - ++ (text_start - __START_KERNEL_map); ++ if (CRASHDEBUG(1)) ++ fprintf(fp, "default relocatable " ++ "phys_base: %lx\n", ++ machdep->machspec->phys_base); ++ break; + +-/* +- * Unroll the kernel stack using a minimal amount of gdb services. +- */ +-static void +-x86_64_back_trace(struct gnu_request *req, struct bt_info *bt) +-{ +- error(FATAL, "x86_64_back_trace: unused\n"); ++ default: ++ break; ++ } ++ } ++ } + } + + + /* +- * Print exception frame information for x86_64. +- * +- * Pid: 0, comm: swapper Not tainted 2.6.5-1.360phro.rootsmp +- * RIP: 0010:[] {default_idle+36} +- * RSP: 0018:ffffffff8048bfd8 EFLAGS: 00000246 +- * RAX: 0000000000000000 RBX: ffffffff8010f510 RCX: 0000000000000018 +- * RDX: 0000010001e37280 RSI: ffffffff803ac0a0 RDI: 000001007f43c400 +- * RBP: 0000000000000000 R08: ffffffff8048a000 R09: 0000000000000000 +- * R10: ffffffff80482188 R11: 0000000000000001 R12: 0000000000000000 +- * R13: 0000000000000000 R14: 0000000000000000 R15: 0000000000000000 +- * FS: 0000002a96e14fc0(0000) GS:ffffffff80481d80(0000) GS:0000000055578aa0 +- * CS: 0010 DS: 0018 ES: 0018 CR0: 000000008005003b +- * CR2: 0000002a9556b000 CR3: 0000000000101000 CR4: 00000000000006e0 +- * ++ * Create an index of mfns for each page that makes up the ++ * kernel's complete phys_to_machine_mapping[max_pfn] array. + */ +- +-static long +-x86_64_exception_frame(ulong flags, ulong kvaddr, char *local, +- struct bt_info *bt, FILE *ofp) ++static int ++x86_64_xendump_p2m_create(struct xendump_data *xd) + { +- long rip, rsp, cs, ss, rflags, orig_rax, rbp; +- long rax, rbx, rcx, rdx, rsi, rdi; +- long r8, r9, r10, r11, r12, r13, r14, r15; +- struct machine_specific *ms; +- char *pt_regs_buf; +- long verified; +- int err; +- +- ms = machdep->machspec; +- +- if (!(machdep->flags & PT_REGS_INIT)) { +- err = 0; +- err |= ((ms->pto.r15 = MEMBER_OFFSET("pt_regs", "r15")) == +- INVALID_OFFSET); +- err |= ((ms->pto.r14 = MEMBER_OFFSET("pt_regs", "r14")) == +- INVALID_OFFSET); +- err |= ((ms->pto.r13 = MEMBER_OFFSET("pt_regs", "r13")) == +- INVALID_OFFSET); +- err |= ((ms->pto.r12 = MEMBER_OFFSET("pt_regs", "r12")) == +- INVALID_OFFSET); +- err |= ((ms->pto.r11 = MEMBER_OFFSET("pt_regs", "r11")) == +- INVALID_OFFSET); +- err |= ((ms->pto.r10 = MEMBER_OFFSET("pt_regs", "r10")) == +- INVALID_OFFSET); +- err |= ((ms->pto.r9 = MEMBER_OFFSET("pt_regs", "r9")) == +- INVALID_OFFSET); +- err |= ((ms->pto.r8 = MEMBER_OFFSET("pt_regs", "r8")) == +- INVALID_OFFSET); +- err |= ((ms->pto.rax = MEMBER_OFFSET("pt_regs", "rax")) == +- INVALID_OFFSET); +- err |= ((ms->pto.rbx = MEMBER_OFFSET("pt_regs", "rbx")) == +- INVALID_OFFSET); +- err |= ((ms->pto.rcx = MEMBER_OFFSET("pt_regs", "rcx")) == +- INVALID_OFFSET); +- err |= ((ms->pto.rdx = MEMBER_OFFSET("pt_regs", "rdx")) == +- INVALID_OFFSET); +- err |= ((ms->pto.rsi = MEMBER_OFFSET("pt_regs", "rsi")) == +- INVALID_OFFSET); +- err |= ((ms->pto.rdi = MEMBER_OFFSET("pt_regs", "rdi")) == +- INVALID_OFFSET); +- err |= ((ms->pto.rip = MEMBER_OFFSET("pt_regs", "rip")) == +- INVALID_OFFSET); +- err |= ((ms->pto.rsp = MEMBER_OFFSET("pt_regs", "rsp")) == +- INVALID_OFFSET); +- err |= ((ms->pto.cs = MEMBER_OFFSET("pt_regs", "cs")) == +- INVALID_OFFSET); +- err |= ((ms->pto.ss = MEMBER_OFFSET("pt_regs", "ss")) == +- INVALID_OFFSET); +- err |= ((ms->pto.eflags = MEMBER_OFFSET("pt_regs", "eflags")) == +- INVALID_OFFSET); +- err |= ((ms->pto.orig_rax = +- MEMBER_OFFSET("pt_regs", "orig_rax")) == +- INVALID_OFFSET); +- err |= ((ms->pto.rbp = MEMBER_OFFSET("pt_regs", "rbp")) == +- INVALID_OFFSET); ++ int i, idx; ++ ulong mfn, kvaddr, ctrlreg[8], ctrlreg_offset; ++ ulong *up; ++ off_t offset; + +- if (err) +- error(WARNING, "pt_regs structure has changed\n"); ++ if (!symbol_exists("phys_to_machine_mapping")) { ++ xd->flags |= XC_CORE_NO_P2M; ++ return TRUE; ++ } + +- machdep->flags |= PT_REGS_INIT; +- } ++ if ((ctrlreg_offset = MEMBER_OFFSET("vcpu_guest_context", "ctrlreg")) == ++ INVALID_OFFSET) ++ error(FATAL, ++ "cannot determine vcpu_guest_context.ctrlreg offset\n"); ++ else if (CRASHDEBUG(1)) ++ fprintf(xd->ofp, ++ "MEMBER_OFFSET(vcpu_guest_context, ctrlreg): %ld\n", ++ ctrlreg_offset); ++ ++ offset = (off_t)xd->xc_core.header.xch_ctxt_offset + ++ (off_t)ctrlreg_offset; ++ ++ if (lseek(xd->xfd, offset, SEEK_SET) == -1) ++ error(FATAL, "cannot lseek to xch_ctxt_offset\n"); ++ ++ if (read(xd->xfd, &ctrlreg, sizeof(ctrlreg)) != ++ sizeof(ctrlreg)) ++ error(FATAL, "cannot read vcpu_guest_context ctrlreg[8]\n"); ++ ++ for (i = 0; CRASHDEBUG(1) && (i < 8); i++) ++ fprintf(xd->ofp, "ctrlreg[%d]: %lx\n", i, ctrlreg[i]); ++ ++ mfn = ctrlreg[3] >> PAGESHIFT(); ++ ++ if (!xc_core_mfn_to_page(mfn, machdep->machspec->pml4)) ++ error(FATAL, "cannot read/find cr3 page\n"); ++ ++ if (CRASHDEBUG(7)) ++ x86_64_debug_dump_page(xd->ofp, machdep->machspec->pml4, ++ "contents of PML4 page:"); + +- if (kvaddr) { +- pt_regs_buf = GETBUF(SIZE(pt_regs)); +- readmem(kvaddr, KVADDR, pt_regs_buf, +- SIZE(pt_regs), "pt_regs", FAULT_ON_ERROR); +- } else +- pt_regs_buf = local; ++ kvaddr = symbol_value("end_pfn"); ++ if (!x86_64_xendump_load_page(kvaddr, xd)) ++ return FALSE; + +- rip = ULONG(pt_regs_buf + ms->pto.rip); +- rsp = ULONG(pt_regs_buf + ms->pto.rsp); +- cs = ULONG(pt_regs_buf + ms->pto.cs); +- ss = ULONG(pt_regs_buf + ms->pto.ss); +- rflags = ULONG(pt_regs_buf + ms->pto.eflags); +- orig_rax = ULONG(pt_regs_buf + ms->pto.orig_rax); +- rbp = ULONG(pt_regs_buf + ms->pto.rbp); +- rax = ULONG(pt_regs_buf + ms->pto.rax); +- rbx = ULONG(pt_regs_buf + ms->pto.rbx); +- rcx = ULONG(pt_regs_buf + ms->pto.rcx); +- rdx = ULONG(pt_regs_buf + ms->pto.rdx); +- rsi = ULONG(pt_regs_buf + ms->pto.rsi); +- rdi = ULONG(pt_regs_buf + ms->pto.rdi); +- r8 = ULONG(pt_regs_buf + ms->pto.r8); +- r9 = ULONG(pt_regs_buf + ms->pto.r9); +- r10 = ULONG(pt_regs_buf + ms->pto.r10); +- r11 = ULONG(pt_regs_buf + ms->pto.r11); +- r12 = ULONG(pt_regs_buf + ms->pto.r12); +- r13 = ULONG(pt_regs_buf + ms->pto.r13); +- r14 = ULONG(pt_regs_buf + ms->pto.r14); +- r15 = ULONG(pt_regs_buf + ms->pto.r15); ++ up = (ulong *)(xd->page + PAGEOFFSET(kvaddr)); ++ if (CRASHDEBUG(1)) ++ fprintf(xd->ofp, "end_pfn: %lx\n", *up); + +- verified = x86_64_eframe_verify(bt, +- kvaddr ? kvaddr : (local - bt->stackbuf) + bt->stackbase, +- cs, ss, rip, rsp, rflags); ++ xd->xc_core.p2m_frames = (*up/(PAGESIZE()/sizeof(ulong))) + ++ ((*up%(PAGESIZE()/sizeof(ulong))) ? 1 : 0); + +- /* +- * If it's print-if-verified request, don't print bogus eframes. +- */ +- if (!verified && ((flags & (EFRAME_VERIFY|EFRAME_PRINT)) == +- (EFRAME_VERIFY|EFRAME_PRINT))) +- flags &= ~EFRAME_PRINT; ++ if ((xd->xc_core.p2m_frame_index_list = (ulong *) ++ malloc(xd->xc_core.p2m_frames * sizeof(ulong))) == NULL) ++ error(FATAL, "cannot malloc p2m_frame_list"); + +- if (CRASHDEBUG(2)) +- fprintf(ofp, "< exception frame at: %lx >\n", kvaddr ? kvaddr : +- (local - bt->stackbuf) + bt->stackbase); ++ kvaddr = symbol_value("phys_to_machine_mapping"); ++ if (!x86_64_xendump_load_page(kvaddr, xd)) ++ return FALSE; + +- if (flags & EFRAME_PRINT) { +- if (flags & EFRAME_SEARCH) { +- fprintf(ofp, "\n %s-MODE EXCEPTION FRAME AT: %lx\n", +- cs & 3 ? "USER" : "KERNEL", +- kvaddr ? kvaddr : +- (local - bt->stackbuf) + bt->stackbase); +- } ++ up = (ulong *)(xd->page + PAGEOFFSET(kvaddr)); ++ if (CRASHDEBUG(1)) ++ fprintf(fp, "phys_to_machine_mapping: %lx\n", *up); + +- fprintf(ofp, " RIP: %016lx RSP: %016lx RFLAGS: %08lx\n", +- rip, rsp, rflags); +- fprintf(ofp, " RAX: %016lx RBX: %016lx RCX: %016lx\n", +- rax, rbx, rcx); +- fprintf(ofp, " RDX: %016lx RSI: %016lx RDI: %016lx\n", +- rdx, rsi, rdi); +- fprintf(ofp, " RBP: %016lx R8: %016lx R9: %016lx\n", +- rbp, r8, r9); +- fprintf(ofp, " R10: %016lx R11: %016lx R12: %016lx\n", +- r10, r11, r12); +- fprintf(ofp, " R13: %016lx R14: %016lx R15: %016lx\n", +- r13, r14, r15); +- fprintf(ofp, " ORIG_RAX: %016lx CS: %04lx SS: %04lx\n", +- orig_rax, cs, ss); ++ kvaddr = *up; ++ machdep->last_ptbl_read = BADADDR; + +- if (!verified) +- error(WARNING, "possibly bogus exception frame\n"); ++ for (i = 0; i < xd->xc_core.p2m_frames; i++) { ++ if ((idx = x86_64_xendump_page_index(kvaddr, xd)) == MFN_NOT_FOUND) ++ return FALSE; ++ xd->xc_core.p2m_frame_index_list[i] = idx; ++ kvaddr += PAGESIZE(); + } + +- if ((flags & EFRAME_PRINT) && BT_REFERENCE_CHECK(bt)) { +- x86_64_do_bt_reference_check(bt, rip, NULL); +- x86_64_do_bt_reference_check(bt, rsp, NULL); +- x86_64_do_bt_reference_check(bt, cs, NULL); +- x86_64_do_bt_reference_check(bt, ss, NULL); +- x86_64_do_bt_reference_check(bt, rflags, NULL); +- x86_64_do_bt_reference_check(bt, orig_rax, NULL); +- x86_64_do_bt_reference_check(bt, rbp, NULL); +- x86_64_do_bt_reference_check(bt, rax, NULL); +- x86_64_do_bt_reference_check(bt, rbx, NULL); +- x86_64_do_bt_reference_check(bt, rcx, NULL); +- x86_64_do_bt_reference_check(bt, rdx, NULL); +- x86_64_do_bt_reference_check(bt, rsi, NULL); +- x86_64_do_bt_reference_check(bt, rdi, NULL); +- x86_64_do_bt_reference_check(bt, r8, NULL); +- x86_64_do_bt_reference_check(bt, r9, NULL); +- x86_64_do_bt_reference_check(bt, r10, NULL); +- x86_64_do_bt_reference_check(bt, r11, NULL); +- x86_64_do_bt_reference_check(bt, r12, NULL); +- x86_64_do_bt_reference_check(bt, r13, NULL); +- x86_64_do_bt_reference_check(bt, r14, NULL); +- x86_64_do_bt_reference_check(bt, r15, NULL); +- } ++ machdep->last_ptbl_read = 0; + +- if (kvaddr) +- FREEBUF(pt_regs_buf); ++ return TRUE; ++} + +- if (flags & EFRAME_CS) +- return cs; +- else if (flags & EFRAME_VERIFY) +- return verified; ++static void ++x86_64_debug_dump_page(FILE *ofp, char *page, char *name) ++{ ++ int i; ++ ulong *up; + +- return 0; ++ fprintf(ofp, "%s\n", name); ++ ++ up = (ulong *)page; ++ for (i = 0; i < 256; i++) { ++ fprintf(ofp, "%016lx: %016lx %016lx\n", ++ (ulong)((i * 2) * sizeof(ulong)), ++ *up, *(up+1)); ++ up += 2; ++ } + } + + /* +- * Check that the verifiable registers contain reasonable data. ++ * Find the page associate with the kvaddr, and read its contents ++ * into the passed-in buffer. + */ +-#define RAZ_MASK 0xffffffffffc08028 /* return-as-zero bits */ +- +-static int +-x86_64_eframe_verify(struct bt_info *bt, long kvaddr, long cs, long ss, +- long rip, long rsp, long rflags) ++static char * ++x86_64_xendump_load_page(ulong kvaddr, struct xendump_data *xd) + { +- if ((rflags & RAZ_MASK) || !(rflags & 0x2)) +- return FALSE; ++ ulong mfn; ++ ulong *pml4, *pgd, *pmd, *ptep; + +- if ((cs == 0x10) && (ss == 0x18)) { +- if (is_kernel_text(rip) && IS_KVADDR(rsp)) +- return TRUE; +- } ++ pml4 = ((ulong *)machdep->machspec->pml4) + pml4_index(kvaddr); ++ mfn = ((*pml4) & PHYSICAL_PAGE_MASK) >> PAGESHIFT(); + +- if ((cs == 0x10) && kvaddr) { +- if (is_kernel_text(rip) && IS_KVADDR(rsp) && +- (rsp == (kvaddr + SIZE(pt_regs) + 8))) +- return TRUE; +- } ++ if (CRASHDEBUG(3)) ++ fprintf(xd->ofp, ++ "[%lx] pml4: %lx mfn: %lx pml4_index: %lx\n", ++ kvaddr, *pml4, mfn, pml4_index(kvaddr)); + +- if ((cs == 0x10) && kvaddr) { +- if (is_kernel_text(rip) && IS_KVADDR(rsp) && +- (rsp == (kvaddr + SIZE(pt_regs)))) +- return TRUE; +- } ++ if (!xc_core_mfn_to_page(mfn, machdep->pgd)) ++ error(FATAL, "cannot read/find pud page\n"); ++ ++ if (CRASHDEBUG(7)) ++ x86_64_debug_dump_page(xd->ofp, machdep->pgd, ++ "contents of page upper directory page:"); ++ ++ pgd = ((ulong *)machdep->pgd) + pgd_index(kvaddr); ++ mfn = ((*pgd) & PHYSICAL_PAGE_MASK) >> PAGESHIFT(); ++ ++ if (CRASHDEBUG(3)) ++ fprintf(xd->ofp, ++ "[%lx] pgd: %lx mfn: %lx pgd_index: %lx\n", ++ kvaddr, *pgd, mfn, pgd_index(kvaddr)); ++ ++ if (!xc_core_mfn_to_page(mfn, machdep->pmd)) ++ error(FATAL, "cannot read/find pmd page\n"); ++ ++ if (CRASHDEBUG(7)) ++ x86_64_debug_dump_page(xd->ofp, machdep->pmd, ++ "contents of page middle directory page:"); ++ ++ pmd = ((ulong *)machdep->pmd) + pmd_index(kvaddr); ++ mfn = ((*pmd) & PHYSICAL_PAGE_MASK) >> PAGESHIFT(); ++ ++ if (CRASHDEBUG(3)) ++ fprintf(xd->ofp, ++ "[%lx] pmd: %lx mfn: %lx pmd_index: %lx\n", ++ kvaddr, *pmd, mfn, pmd_index(kvaddr)); ++ ++ if (!xc_core_mfn_to_page(mfn, machdep->ptbl)) ++ error(FATAL, "cannot read/find page table page\n"); ++ ++ if (CRASHDEBUG(7)) ++ x86_64_debug_dump_page(xd->ofp, machdep->ptbl, ++ "contents of page table page:"); ++ ++ ptep = ((ulong *)machdep->ptbl) + pte_index(kvaddr); ++ mfn = ((*ptep) & PHYSICAL_PAGE_MASK) >> PAGESHIFT(); + +- if ((cs == 0x33) && (ss == 0x2b)) { +- if (IS_UVADDR(rip, bt->tc) && IS_UVADDR(rsp, bt->tc)) +- return TRUE; +- } ++ if (CRASHDEBUG(3)) ++ fprintf(xd->ofp, ++ "[%lx] ptep: %lx mfn: %lx pte_index: %lx\n", ++ kvaddr, *ptep, mfn, pte_index(kvaddr)); + +- return FALSE; ++ if (!xc_core_mfn_to_page(mfn, xd->page)) ++ error(FATAL, "cannot read/find pte page\n"); ++ ++ if (CRASHDEBUG(7)) ++ x86_64_debug_dump_page(xd->ofp, xd->page, ++ "contents of page:"); ++ ++ return xd->page; + } + + /* +- * Get a stack frame combination of pc and ra from the most relevent spot. ++ * Find the dumpfile page index associated with the kvaddr. + */ +-static void +-x86_64_get_stack_frame(struct bt_info *bt, ulong *pcp, ulong *spp) ++static int ++x86_64_xendump_page_index(ulong kvaddr, struct xendump_data *xd) + { +- if (bt->flags & BT_DUMPFILE_SEARCH) +- return x86_64_get_dumpfile_stack_frame(bt, pcp, spp); ++ int idx; ++ ulong mfn; ++ ulong *pml4, *pgd, *pmd, *ptep; + +- if (pcp) +- *pcp = x86_64_get_pc(bt); +- if (spp) +- *spp = x86_64_get_sp(bt); ++ pml4 = ((ulong *)machdep->machspec->pml4) + pml4_index(kvaddr); ++ mfn = ((*pml4) & PHYSICAL_PAGE_MASK) >> PAGESHIFT(); ++ ++ if ((mfn != machdep->last_pgd_read) && ++ !xc_core_mfn_to_page(mfn, machdep->pgd)) ++ error(FATAL, "cannot read/find pud page\n"); ++ machdep->last_pgd_read = mfn; ++ ++ pgd = ((ulong *)machdep->pgd) + pgd_index(kvaddr); ++ mfn = ((*pgd) & PHYSICAL_PAGE_MASK) >> PAGESHIFT(); ++ ++ if ((mfn != machdep->last_pmd_read) && ++ !xc_core_mfn_to_page(mfn, machdep->pmd)) ++ error(FATAL, "cannot read/find pmd page\n"); ++ ++ machdep->last_pmd_read = mfn; ++ ++ pmd = ((ulong *)machdep->pmd) + pmd_index(kvaddr); ++ mfn = ((*pmd) & PHYSICAL_PAGE_MASK) >> PAGESHIFT(); ++ ++ if ((mfn != machdep->last_ptbl_read) && ++ !xc_core_mfn_to_page(mfn, machdep->ptbl)) ++ error(FATAL, "cannot read/find page table page\n"); ++ machdep->last_ptbl_read = mfn; ++ ++ ptep = ((ulong *)machdep->ptbl) + pte_index(kvaddr); ++ mfn = ((*ptep) & PHYSICAL_PAGE_MASK) >> PAGESHIFT(); ++ ++ if ((idx = xc_core_mfn_to_page_index(mfn)) == MFN_NOT_FOUND) ++ error(INFO, "cannot determine page index for %lx\n", ++ kvaddr); ++ ++ return idx; + } + + /* +- * Get the starting point for the active cpus in a diskdump/netdump. ++ * Pull the rsp from the cpu_user_regs struct in the header ++ * turn it into a task, and match it with the active_set. ++ * Unfortunately, the registers in the vcpu_guest_context ++ * are not necessarily those of the panic task, so for now ++ * let get_active_set_panic_task() get the right task. + */ +-static void +-x86_64_get_dumpfile_stack_frame(struct bt_info *bt_in, ulong *rip, ulong *rsp) ++static ulong ++x86_64_xendump_panic_task(struct xendump_data *xd) + { +- int panic_task; +- int i, panic, stage; +- char *sym; +- struct syment *sp; +- ulong *up; +- struct bt_info bt_local, *bt; +- struct machine_specific *ms; +- char *user_regs; +- ulong ur_rip; +- ulong ur_rsp; ++ int i; ++ ulong rsp; ++ off_t offset; ++ ulong task; + +- bt = &bt_local; +- BCOPY(bt_in, bt, sizeof(struct bt_info)); +- ms = machdep->machspec; +- ur_rip = ur_rsp = 0; +- stage = 0; ++ if (INVALID_MEMBER(vcpu_guest_context_user_regs) || ++ INVALID_MEMBER(cpu_user_regs_esp)) ++ return NO_TASK; ++ ++ offset = (off_t)xd->xc_core.header.xch_ctxt_offset + ++ (off_t)OFFSET(vcpu_guest_context_user_regs) + ++ (off_t)OFFSET(cpu_user_regs_rsp); ++ ++ if (lseek(xd->xfd, offset, SEEK_SET) == -1) ++ return NO_TASK; ++ ++ if (read(xd->xfd, &rsp, sizeof(ulong)) != sizeof(ulong)) ++ return NO_TASK; ++ ++ if (IS_KVADDR(rsp) && (task = stkptr_to_task(rsp))) { ++ ++ for (i = 0; i < NR_CPUS; i++) { ++ if (task == tt->active_set[i]) { ++ if (CRASHDEBUG(0)) ++ error(INFO, ++ "x86_64_xendump_panic_task: rsp: %lx -> task: %lx\n", ++ rsp, task); ++ return task; ++ } ++ } + +- panic_task = tt->panic_task == bt->task ? TRUE : FALSE; ++ error(WARNING, ++ "x86_64_xendump_panic_task: rsp: %lx -> task: %lx (not active)\n", ++ rsp); ++ } + +- if (panic_task && bt->machdep) { +- user_regs = bt->machdep; ++ return NO_TASK; ++} + +- if (x86_64_eframe_verify(bt, +- 0, +- ULONG(user_regs + OFFSET(user_regs_struct_cs)), +- ULONG(user_regs + OFFSET(user_regs_struct_ss)), +- ULONG(user_regs + OFFSET(user_regs_struct_rip)), +- ULONG(user_regs + OFFSET(user_regs_struct_rsp)), +- ULONG(user_regs + OFFSET(user_regs_struct_eflags)))) { +- bt->stkptr = ULONG(user_regs + +- OFFSET(user_regs_struct_rsp)); +- if (x86_64_in_irqstack(bt)) { +- ur_rip = ULONG(user_regs + +- OFFSET(user_regs_struct_rip)); +- ur_rsp = ULONG(user_regs + +- OFFSET(user_regs_struct_rsp)); +- goto skip_stage; +- } +- } +- } ++/* ++ * Because of an off-by-one vcpu bug in early xc_domain_dumpcore() ++ * instantiations, the registers in the vcpu_guest_context are not ++ * necessarily those of the panic task. Furthermore, the rsp is ++ * seemingly unassociated with the task, presumably due a hypervisor ++ * callback, so only accept the contents if they retfer to the panic ++ * task's stack. ++ */ ++static void ++x86_64_get_xendump_regs(struct xendump_data *xd, struct bt_info *bt, ulong *rip, ulong *rsp) ++{ ++ ulong task, xrip, xrsp; ++ off_t offset; ++ struct syment *sp; ++ int cpu; + +- panic = FALSE; ++ if (INVALID_MEMBER(vcpu_guest_context_user_regs) || ++ INVALID_MEMBER(cpu_user_regs_rip) || ++ INVALID_MEMBER(cpu_user_regs_rsp)) ++ goto generic; ++ ++ offset = (off_t)xd->xc_core.header.xch_ctxt_offset + ++ (off_t)OFFSET(vcpu_guest_context_user_regs) + ++ (off_t)OFFSET(cpu_user_regs_rsp); ++ if (lseek(xd->xfd, offset, SEEK_SET) == -1) ++ goto generic; ++ if (read(xd->xfd, &xrsp, sizeof(ulong)) != sizeof(ulong)) ++ goto generic; ++ ++ offset = (off_t)xd->xc_core.header.xch_ctxt_offset + ++ (off_t)OFFSET(vcpu_guest_context_user_regs) + ++ (off_t)OFFSET(cpu_user_regs_rip); ++ if (lseek(xd->xfd, offset, SEEK_SET) == -1) ++ goto generic; ++ if (read(xd->xfd, &xrip, sizeof(ulong)) != sizeof(ulong)) ++ goto generic; + + /* +- * Check the process stack first. ++ * This works -- comes from smp_send_stop call in panic. ++ * But xendump_panic_hook() will forestall this function ++ * from being called (for now). + */ +-next_stack: +- for (i = 0, up = (ulong *)bt->stackbuf; +- i < (bt->stacktop - bt->stackbase)/sizeof(ulong); i++, up++) { +- sym = closest_symbol(*up); +- +- if (STREQ(sym, "netconsole_netdump") || +- STREQ(sym, "netpoll_start_netdump") || +- STREQ(sym, "start_disk_dump") || +- STREQ(sym, "disk_dump") || +- STREQ(sym, "try_crashdump")) { +- *rip = *up; +- *rsp = bt->stackbase + ((char *)(up) - bt->stackbuf); +- return; +- } ++ if (IS_KVADDR(xrsp) && (task = stkptr_to_task(xrsp)) && ++ (task == bt->task)) { ++ if (CRASHDEBUG(1)) ++ fprintf(xd->ofp, ++ "hooks from vcpu_guest_context: rip: %lx rsp: %lx\n", xrip, xrsp); ++ *rip = xrip; ++ *rsp = xrsp; ++ return; ++ } + +- if ((stage == 2) && +- (STREQ(sym, "nmi_watchdog_tick") || +- STREQ(sym, "default_do_nmi"))) { +- sp = x86_64_function_called_by((*up)-5); +- if (!sp || !STREQ(sp->name, "die_nmi")) +- continue; +- *rip = *up; +- *rsp = bt->stackbase + ((char *)(up) - bt->stackbuf); +- bt_in->flags |= BT_START; +- *rip = symbol_value("die_nmi"); +- *rsp = (*rsp) - (7*sizeof(ulong)); +- return; +- } ++generic: + +- if (STREQ(sym, "panic")) { +- *rip = *up; +- *rsp = bt->stackbase + ((char *)(up) - bt->stackbuf); +- panic = TRUE; +- continue; /* keep looking for die */ +- } ++ machdep->get_stack_frame(bt, rip, rsp); + +- if (STREQ(sym, "die")) { +- *rip = *up; +- *rsp = bt->stackbase + ((char *)(up) - bt->stackbuf); +- for (i++, up++; i < LONGS_PER_STACK; i++, up++) { +- sym = closest_symbol(*up); +- if (STREQ(sym, "sysrq_handle_crash")) +- goto next_sysrq; ++ /* ++ * If this is an active task showing itself in schedule(), ++ * then the thread_struct rsp is stale. It has to be coming ++ * from a callback via the interrupt stack. ++ */ ++ if (is_task_active(bt->task) && (symbol_value("thread_return") == *rip)) { ++ cpu = bt->tc->processor; ++ xrsp = machdep->machspec->stkinfo.ibase[cpu] + ++ machdep->machspec->stkinfo.isize - sizeof(ulong); ++ ++ while (readmem(xrsp, KVADDR, &xrip, ++ sizeof(ulong), "xendump rsp", RETURN_ON_ERROR)) { ++ if ((sp = value_search(xrip, (ulong *)&offset)) && ++ STREQ(sp->name, "smp_really_stop_cpu") && offset) { ++ *rip = xrip; ++ *rsp = xrsp; ++ if (CRASHDEBUG(1)) ++ error(INFO, ++ "switch thread_return to smp_call_function_interrupt\n"); ++ break; + } +- return; ++ xrsp -= sizeof(ulong); ++ if (xrsp <= machdep->machspec->stkinfo.ibase[cpu]) ++ break; + } ++ } ++} + +- if (STREQ(sym, "sysrq_handle_crash")) { +-next_sysrq: +- *rip = *up; +- *rsp = bt->stackbase + ((char *)(up) - bt->stackbuf); +- machdep->flags |= SYSRQ; +- for (i++, up++; i < LONGS_PER_STACK; i++, up++) { +- sym = closest_symbol(*up); +- if (STREQ(sym, "sysrq_handle_crash")) +- goto next_sysrq; +- } +- return; +- } ++/* for XEN Hypervisor analysis */ + +- if (!panic_task && (stage > 0) && +- STREQ(sym, "smp_call_function_interrupt")) { +- *rip = *up; +- *rsp = bt->stackbase + ((char *)(up) - bt->stackbuf); +- return; +- } +- } ++static int ++x86_64_is_kvaddr_hyper(ulong addr) ++{ ++ return (addr >= HYPERVISOR_VIRT_START && addr < HYPERVISOR_VIRT_END); ++} + +- if (panic) +- return; ++static ulong ++x86_64_get_stackbase_hyper(ulong task) ++{ ++ struct xen_hyper_vcpu_context *vcc; ++ struct xen_hyper_pcpu_context *pcc; ++ ulong rsp0, base; ++ ++ /* task means vcpu here */ ++ vcc = xen_hyper_vcpu_to_vcpu_context(task); ++ if (!vcc) ++ error(FATAL, "invalid vcpu\n"); ++ ++ pcc = xen_hyper_id_to_pcpu_context(vcc->processor); ++ if (!pcc) ++ error(FATAL, "invalid pcpu number\n"); ++ ++ rsp0 = pcc->sp.rsp0; ++ base = rsp0 & (~(STACKSIZE() - 1)); ++ return base; ++} + +-skip_stage: +- switch (stage) +- { +- /* +- * Now check the processor's interrupt stack. +- */ +- case 0: +- bt->stackbase = ms->stkinfo.ibase[bt->tc->processor]; +- bt->stacktop = ms->stkinfo.ibase[bt->tc->processor] + +- ms->stkinfo.isize; +- bt->stackbuf = ms->irqstack; +- alter_stackbuf(bt); +- stage = 1; +- goto next_stack; ++static ulong ++x86_64_get_stacktop_hyper(ulong task) ++{ ++ return x86_64_get_stackbase_hyper(task) + STACKSIZE(); ++} + +- /* +- * Check the NMI exception stack. +- */ +- case 1: +- bt->stackbase = ms->stkinfo.ebase[bt->tc->processor][NMI_STACK]; +- bt->stacktop = ms->stkinfo.ebase[bt->tc->processor][NMI_STACK] + +- ms->stkinfo.esize; +- bt->stackbuf = ms->irqstack; +- alter_stackbuf(bt); +- stage = 2; +- goto next_stack; ++#define EXCEPTION_STACKSIZE_HYPER (1024UL) + +- case 2: +- break; +- } ++static ulong ++x86_64_in_exception_stack_hyper(ulong vcpu, ulong rsp) ++{ ++ struct xen_hyper_vcpu_context *vcc; ++ struct xen_hyper_pcpu_context *pcc; ++ int i; ++ ulong stackbase; + +- /* +- * We didn't find what we were looking for, so just use what was +- * passed in from the ELF header. +- */ +- if (ur_rip && ur_rsp) { +- *rip = ur_rip; +- *rsp = ur_rsp; ++ vcc = xen_hyper_vcpu_to_vcpu_context(vcpu); ++ if (!vcc) ++ error(FATAL, "invalid vcpu\n"); ++ ++ pcc = xen_hyper_id_to_pcpu_context(vcc->processor); ++ if (!pcc) ++ error(FATAL, "invalid pcpu number\n"); ++ ++ for (i = 0; i < XEN_HYPER_TSS_IST_MAX; i++) { ++ if (pcc->ist[i] == 0) { ++ continue; ++ } ++ stackbase = pcc->ist[i] - EXCEPTION_STACKSIZE_HYPER; ++ if ((rsp & ~(EXCEPTION_STACKSIZE_HYPER - 1)) == stackbase) { ++ return stackbase; ++ } + } + +- console("x86_64_get_dumpfile_stack_frame: cannot find anything useful\n"); ++ return 0; ++} + +- bt->flags &= ~(ulonglong)BT_DUMPFILE_SEARCH; ++static void ++x86_64_get_stack_frame_hyper(struct bt_info *bt, ulong *pcp, ulong *spp) ++{ ++ struct xen_hyper_vcpu_context *vcc; ++ int pcpu; ++ ulong *regs; ++ ulong rsp, rip; ++ ++ /* task means vcpu here */ ++ vcc = xen_hyper_vcpu_to_vcpu_context(bt->task); ++ if (!vcc) ++ error(FATAL, "invalid vcpu\n"); ++ ++ pcpu = vcc->processor; ++ if (!xen_hyper_test_pcpu_id(pcpu)) { ++ error(FATAL, "invalid pcpu number\n"); ++ } ++ ++ if (bt->flags & BT_TEXT_SYMBOLS_ALL) { ++ if (spp) ++ *spp = x86_64_get_stackbase_hyper(bt->task); ++ if (pcp) ++ *pcp = 0; ++ bt->flags &= ~BT_TEXT_SYMBOLS_ALL; ++ return; ++ } + +- machdep->get_stack_frame(bt, rip, rsp); ++ regs = (ulong *)xen_hyper_id_to_dumpinfo_context(pcpu)->pr_reg_ptr; ++ rsp = XEN_HYPER_X86_64_NOTE_RSP(regs); ++ rip = XEN_HYPER_X86_64_NOTE_RIP(regs); ++ ++ if (spp) { ++ if (x86_64_in_exception_stack_hyper(bt->task, rsp)) ++ *spp = rsp; ++ else if (rsp < x86_64_get_stackbase_hyper(bt->task) || ++ rsp >= x86_64_get_stacktop_hyper(bt->task)) ++ *spp = x86_64_get_stackbase_hyper(bt->task); ++ else ++ *spp = rsp; ++ } ++ if (pcp) { ++ if (is_kernel_text(rip)) ++ *pcp = rip; ++ else ++ *pcp = 0; ++ } + } + +-/* +- * Get the saved RSP from the task's thread_struct. +- */ +-static ulong +-x86_64_get_sp(struct bt_info *bt) ++static int ++x86_64_print_stack_entry_hyper(struct bt_info *bt, FILE *ofp, int level, ++ int stkindex, ulong text) + { +- ulong offset, rsp; ++ ulong rsp, offset; ++ struct syment *sp; ++ char *name; ++ int result; ++ char buf[BUFSIZE]; + +- if (tt->flags & THREAD_INFO) { +- readmem(bt->task + OFFSET(task_struct_thread) + +- OFFSET(thread_struct_rsp), KVADDR, +- &rsp, sizeof(void *), +- "thread_struct rsp", FAULT_ON_ERROR); +- return rsp; +- } ++ offset = 0; ++ sp = value_search(text, &offset); ++ if (!sp) ++ return BACKTRACE_ENTRY_IGNORED; + +- offset = OFFSET(task_struct_thread) + OFFSET(thread_struct_rsp); ++ name = sp->name; + +- return GET_STACK_ULONG(offset); +-} ++ if (STREQ(name, "syscall_enter")) ++ result = BACKTRACE_COMPLETE; ++ else ++ result = BACKTRACE_ENTRY_DISPLAYED; + +-/* +- * Get the saved PC from the task's thread_struct if it exists; +- * otherwise just use the "thread_return" label value. +- */ +-static ulong +-x86_64_get_pc(struct bt_info *bt) +-{ +- ulong offset, rip; ++ rsp = bt->stackbase + (stkindex * sizeof(long)); + +- if (INVALID_MEMBER(thread_struct_rip)) +- return symbol_value("thread_return"); ++ if ((bt->flags & BT_FULL)) { ++ if (bt->frameptr) ++ x86_64_display_full_frame(bt, rsp, ofp); ++ bt->frameptr = rsp + sizeof(ulong); ++ } + +- if (tt->flags & THREAD_INFO) { +- readmem(bt->task + OFFSET(task_struct_thread) + +- OFFSET(thread_struct_rip), KVADDR, +- &rip, sizeof(void *), +- "thread_struct rip", FAULT_ON_ERROR); +- return rip; +- } ++ fprintf(ofp, "%s#%d [%8lx] %s at %lx\n", level < 10 ? " " : "", level, ++ rsp, name, text); ++ ++ if (bt->flags & BT_LINE_NUMBERS) { ++ get_line_number(text, buf, FALSE); ++ if (strlen(buf)) ++ fprintf(ofp, " %s\n", buf); ++ } + +- offset = OFFSET(task_struct_thread) + OFFSET(thread_struct_rip); ++ if (BT_REFERENCE_CHECK(bt)) ++ x86_64_do_bt_reference_check(bt, text, name); + +- return GET_STACK_ULONG(offset); ++ return result; + } + +- +-/* +- * Do the work for x86_64_get_sp() and x86_64_get_pc(). +- */ + static void +-get_x86_64_frame(struct bt_info *bt, ulong *getpc, ulong *getsp) +-{ +- error(FATAL, "get_x86_64_frame: TBD\n"); +-} +- +-/* +- * Do the work for cmd_irq(). +- */ +-static void +-x86_64_dump_irq(int irq) ++x86_64_print_eframe_regs_hyper(struct bt_info *bt) + { +- if (symbol_exists("irq_desc")) { +- machdep->dump_irq = generic_dump_irq; +- return(generic_dump_irq(irq)); +- } ++ ulong *up; ++ ulong offset; ++ struct syment *sp; + +- error(FATAL, "ia64_dump_irq: irq_desc[] does not exist?\n"); +-} + +-/* +- * Do the work for irq -d +- */ +-void +-x86_64_display_idt_table(void) +-{ +- int i; +- char *idt_table_buf; +- char buf[BUFSIZE]; +- ulong *ip; ++ up = (ulong *)(&bt->stackbuf[bt->stacktop - bt->stackbase]); ++ up -= 21; + +- idt_table_buf = GETBUF(SIZE(gate_struct) * 256); +- readmem(symbol_value("idt_table"), KVADDR, idt_table_buf, +- SIZE(gate_struct) * 256, "idt_table", FAULT_ON_ERROR); +- ip = (ulong *)idt_table_buf; ++ fprintf(fp, " [exception RIP: "); ++ if ((sp = value_search(up[16], &offset))) { ++ fprintf(fp, "%s", sp->name); ++ if (offset) ++ fprintf(fp, (output_radix == 16) ? ++ "+0x%lx" : "+%ld", offset); ++ } else ++ fprintf(fp, "unknown or invalid address"); ++ fprintf(fp, "]\n"); + +- for (i = 0; i < 256; i++, ip += 2) { +- if (i < 10) +- fprintf(fp, " "); +- else if (i < 100) +- fprintf(fp, " "); +- fprintf(fp, "[%d] %s\n", +- i, x86_64_extract_idt_function(ip, buf, NULL)); +- } ++ fprintf(fp, " RIP: %016lx RSP: %016lx RFLAGS: %08lx\n", ++ up[16], up[19], up[18]); ++ fprintf(fp, " RAX: %016lx RBX: %016lx RCX: %016lx\n", ++ up[10], up[5], up[11]); ++ fprintf(fp, " RDX: %016lx RSI: %016lx RDI: %016lx\n", ++ up[11], up[13], up[14]); ++ fprintf(fp, " RBP: %016lx R8: %016lx R9: %016lx\n", ++ up[4], up[9], up[8]); ++ fprintf(fp, " R10: %016lx R11: %016lx R12: %016lx\n", ++ up[7], up[6], up[3]); ++ fprintf(fp, " R13: %016lx R14: %016lx R15: %016lx\n", ++ up[2], up[1], up[0]); ++ fprintf(fp, " ORIG_RAX: %016lx CS: %04lx SS: %04lx\n", ++ up[15], up[17], up[20]); + +- FREEBUF(idt_table_buf); ++ fprintf(fp, "--- ---\n"); + } + + /* +- * Extract the function name out of the IDT entry. ++ * simple back tracer for xen hypervisor ++ * irq stack does not exist. so relative easy. + */ +-static char * +-x86_64_extract_idt_function(ulong *ip, char *buf, ulong *retaddr) ++static void ++x86_64_simple_back_trace_cmd_hyper(struct bt_info *bt_in) + { +- ulong i1, i2, addr; +- char locbuf[BUFSIZE]; +- physaddr_t phys; ++ int i, level, done; ++ ulong rsp, estack, stacktop; ++ ulong *up; ++ FILE *ofp; ++ struct bt_info bt_local, *bt; ++ char ebuf[EXCEPTION_STACKSIZE_HYPER]; + +- if (buf) +- BZERO(buf, BUFSIZE); ++ bt = &bt_local; ++ BCOPY(bt_in, bt, sizeof(struct bt_info)); + +- i1 = *ip; +- i2 = *(ip+1); ++ if (bt->flags & BT_FRAMESIZE_DEBUG) { ++ error(INFO, "-F not support\n"); ++ return; ++ } + +- i2 <<= 32; +- addr = i2 & 0xffffffff00000000; +- addr |= (i1 & 0xffff); +- i1 >>= 32; +- addr |= (i1 & 0xffff0000); ++ level = 0; ++ done = FALSE; ++ bt->call_target = NULL; ++ rsp = bt->stkptr; ++ if (!rsp) { ++ error(INFO, "cannot determine starting stack pointer\n"); ++ return; ++ } ++ if (BT_REFERENCE_CHECK(bt)) ++ ofp = pc->nullfp; ++ else ++ ofp = fp; + +- if (retaddr) +- *retaddr = addr; ++ while ((estack = x86_64_in_exception_stack_hyper(bt->task, rsp))) { ++ bt->flags |= BT_EXCEPTION_STACK; ++ bt->stackbase = estack; ++ bt->stacktop = estack + EXCEPTION_STACKSIZE_HYPER; ++ bt->stackbuf = ebuf; + +- if (!buf) +- return NULL; ++ if (!readmem(bt->stackbase, KVADDR, bt->stackbuf, ++ bt->stacktop - bt->stackbase, "exception stack contents", ++ RETURN_ON_ERROR)) ++ error(FATAL, "read of exception stack at %lx failed\n", ++ bt->stackbase); + +- value_to_symstr(addr, locbuf, 0); +- if (strlen(locbuf)) +- sprintf(buf, locbuf); +- else { +- sprintf(buf, "%016lx", addr); +- if (kvtop(NULL, addr, &phys, 0)) { +- addr = machdep->kvbase + (ulong)phys; +- if (value_to_symstr(addr, locbuf, 0)) { +- strcat(buf, " <"); +- strcat(buf, locbuf); +- strcat(buf, ">"); +- } +- } +- } ++ stacktop = bt->stacktop - 168; + +- return buf; +-} ++ for (i = (rsp - bt->stackbase)/sizeof(ulong); ++ !done && (rsp < stacktop); i++, rsp += sizeof(ulong)) { ++ ++ up = (ulong *)(&bt->stackbuf[i*sizeof(ulong)]); + +-/* +- * Filter disassembly output if the output radix is not gdb's default 10 +- */ +-static int +-x86_64_dis_filter(ulong vaddr, char *inbuf) +-{ +- char buf1[BUFSIZE]; +- char buf2[BUFSIZE]; +- char *colon, *p1; +- int argc; +- char *argv[MAXARGS]; +- ulong value; ++ if (!is_kernel_text(*up)) ++ continue; + +- if (!inbuf) +- return TRUE; +-/* +- * For some reason gdb can go off into the weeds translating text addresses, +- * (on alpha -- not necessarily seen on x86_64) so this routine both fixes the +- * references as well as imposing the current output radix on the translations. +- */ +- console("IN: %s", inbuf); ++ switch (x86_64_print_stack_entry_hyper(bt, ofp, level, i,*up)) ++ { ++ case BACKTRACE_ENTRY_DISPLAYED: ++ level++; ++ break; ++ case BACKTRACE_ENTRY_IGNORED: ++ break; ++ case BACKTRACE_COMPLETE: ++ done = TRUE; ++ break; ++ } ++ } + +- colon = strstr(inbuf, ":"); ++ if (!BT_REFERENCE_CHECK(bt)) ++ x86_64_print_eframe_regs_hyper(bt); + +- if (colon) { +- sprintf(buf1, "0x%lx <%s>", vaddr, +- value_to_symstr(vaddr, buf2, pc->output_radix)); +- sprintf(buf2, "%s%s", buf1, colon); +- strcpy(inbuf, buf2); ++ up = (ulong *)(&bt->stackbuf[bt->stacktop - bt->stackbase]); ++ up -= 2; ++ rsp = bt->stkptr = *up; ++ up -= 3; ++ bt->instptr = *up; ++ done = FALSE; ++ bt->frameptr = 0; + } + +- strcpy(buf1, inbuf); +- argc = parse_line(buf1, argv); ++ if (bt->flags & BT_EXCEPTION_STACK) { ++ bt->flags &= ~BT_EXCEPTION_STACK; ++ bt->stackbase = bt_in->stackbase; ++ bt->stacktop = bt_in->stacktop; ++ bt->stackbuf = bt_in->stackbuf; ++ } + +- if ((FIRSTCHAR(argv[argc-1]) == '<') && +- (LASTCHAR(argv[argc-1]) == '>')) { +- p1 = rindex(inbuf, '<'); +- while ((p1 > inbuf) && !STRNEQ(p1, " 0x")) +- p1--; ++ for (i = (rsp - bt->stackbase)/sizeof(ulong); ++ !done && (rsp < bt->stacktop); i++, rsp += sizeof(ulong)) { + +- if (!STRNEQ(p1, " 0x")) +- return FALSE; +- p1++; ++ up = (ulong *)(&bt->stackbuf[i*sizeof(ulong)]); + +- if (!extract_hex(p1, &value, NULLCHAR, TRUE)) +- return FALSE; ++ if (!is_kernel_text(*up)) ++ continue; + +- sprintf(buf1, "0x%lx <%s>\n", value, +- value_to_symstr(value, buf2, pc->output_radix)); ++ switch (x86_64_print_stack_entry_hyper(bt, ofp, level, i,*up)) ++ { ++ case BACKTRACE_ENTRY_DISPLAYED: ++ level++; ++ break; ++ case BACKTRACE_ENTRY_IGNORED: ++ break; ++ case BACKTRACE_COMPLETE: ++ done = TRUE; ++ break; ++ } ++ } ++} + +- sprintf(p1, buf1); +- +- } else if (STREQ(argv[argc-2], "callq") && +- hexadecimal(argv[argc-1], 0)) { +- /* +- * Update module code of the form: +- * +- * callq 0xffffffffa0017aa0 +- * +- * to show a bracketed direct call target. +- */ +- p1 = &LASTCHAR(inbuf); ++static void ++x86_64_init_hyper(int when) ++{ ++ switch (when) ++ { ++ case PRE_SYMTAB: ++ machdep->verify_symbol = x86_64_verify_symbol; ++ machdep->machspec = &x86_64_machine_specific; ++ if (pc->flags & KERNEL_DEBUG_QUERY) ++ return; ++ machdep->pagesize = memory_page_size(); ++ machdep->pageshift = ffs(machdep->pagesize) - 1; ++ machdep->pageoffset = machdep->pagesize - 1; ++ machdep->pagemask = ~((ulonglong)machdep->pageoffset); ++ machdep->stacksize = machdep->pagesize * 2; ++ if ((machdep->machspec->upml = (char *)malloc(PAGESIZE())) == NULL) ++ error(FATAL, "cannot malloc upml space."); ++ if ((machdep->pgd = (char *)malloc(PAGESIZE())) == NULL) ++ error(FATAL, "cannot malloc pgd space."); ++ if ((machdep->pmd = (char *)malloc(PAGESIZE())) == NULL) ++ error(FATAL, "cannot malloc pmd space."); ++ if ((machdep->ptbl = (char *)malloc(PAGESIZE())) == NULL) ++ error(FATAL, "cannot malloc ptbl space."); ++ if ((machdep->machspec->pml4 = ++ (char *)malloc(PAGESIZE()*2)) == NULL) ++ error(FATAL, "cannot malloc pml4 space."); ++ machdep->machspec->last_upml_read = 0; ++ machdep->machspec->last_pml4_read = 0; ++ machdep->last_pgd_read = 0; ++ machdep->last_pmd_read = 0; ++ machdep->last_ptbl_read = 0; ++ machdep->verify_paddr = generic_verify_paddr; ++ machdep->ptrs_per_pgd = PTRS_PER_PGD; ++ if (machdep->cmdline_arg) ++ parse_cmdline_arg(); ++ break; + +- if (extract_hex(argv[argc-1], &value, NULLCHAR, TRUE)) { +- sprintf(buf1, " <%s>\n", +- value_to_symstr(value, buf2, +- pc->output_radix)); +- if (IS_MODULE_VADDR(value) && +- !strstr(buf2, "+")) +- sprintf(p1, buf1); +- } +- } ++ case PRE_GDB: ++ machdep->machspec->page_offset = PAGE_OFFSET_XEN_HYPER; ++ machdep->kvbase = (ulong)HYPERVISOR_VIRT_START; ++ machdep->identity_map_base = (ulong)PAGE_OFFSET_XEN_HYPER; ++ machdep->is_kvaddr = x86_64_is_kvaddr_hyper; ++ machdep->is_uvaddr = x86_64_is_uvaddr; ++ machdep->eframe_search = x86_64_eframe_search; ++ machdep->back_trace = x86_64_simple_back_trace_cmd_hyper; ++ machdep->processor_speed = x86_64_processor_speed; ++ machdep->kvtop = x86_64_kvtop; ++ machdep->get_task_pgd = x86_64_get_task_pgd; ++ machdep->get_stack_frame = x86_64_get_stack_frame_hyper; ++ machdep->get_stackbase = x86_64_get_stackbase_hyper; ++ machdep->get_stacktop = x86_64_get_stacktop_hyper; ++ machdep->translate_pte = x86_64_translate_pte; ++ machdep->memory_size = xen_hyper_x86_memory_size; /* KAK add */ ++ machdep->is_task_addr = x86_64_is_task_addr; ++ machdep->dis_filter = x86_64_dis_filter; ++ machdep->cmd_mach = x86_64_cmd_mach; ++ machdep->get_smp_cpus = xen_hyper_x86_get_smp_cpus; /* KAK add */ ++ machdep->line_number_hooks = x86_64_line_number_hooks; ++ machdep->value_to_symbol = generic_machdep_value_to_symbol; ++ machdep->init_kernel_pgd = x86_64_init_kernel_pgd; ++ machdep->clear_machdep_cache = x86_64_clear_machdep_cache; + +- console(" %s", inbuf); ++ /* machdep table for Xen Hypervisor */ ++ xhmachdep->pcpu_init = xen_hyper_x86_pcpu_init; ++ break; ++ ++ case POST_GDB: ++ XEN_HYPER_STRUCT_SIZE_INIT(cpuinfo_x86, "cpuinfo_x86"); ++ XEN_HYPER_STRUCT_SIZE_INIT(tss_struct, "tss_struct"); ++ XEN_HYPER_ASSIGN_OFFSET(tss_struct_rsp0) = MEMBER_OFFSET("tss_struct", "__blh") + sizeof(short unsigned int); ++ XEN_HYPER_MEMBER_OFFSET_INIT(tss_struct_ist, "tss_struct", "ist"); ++ if (symbol_exists("cpu_data")) { ++ xht->cpu_data_address = symbol_value("cpu_data"); ++ } ++/* KAK Can this be calculated? */ ++ if (!machdep->hz) { ++ machdep->hz = XEN_HYPER_HZ; ++ } ++ break; + +- return TRUE; ++ case POST_INIT: ++ break; ++ } + } + + +-/* +- * Override smp_num_cpus if possible and necessary. +- */ +-int +-x86_64_get_smp_cpus(void) +-{ +- int i, cpus, nr_pda, cpunumber; +- char *cpu_pda_buf; +- ulong level4_pgt; ++struct framesize_cache { ++ ulong textaddr; ++ int framesize; ++}; + +- if (!VALID_STRUCT(x8664_pda)) +- return 1; ++static struct framesize_cache *x86_64_framesize_cache = NULL; ++static int framesize_cache_entries = 0; + +- cpu_pda_buf = GETBUF(SIZE(x8664_pda)); ++#define FRAMESIZE_QUERY (1) ++#define FRAMESIZE_ENTER (2) ++#define FRAMESIZE_DUMP (3) + +- if (!(nr_pda = get_array_length("cpu_pda", NULL, 0))) +- nr_pda = NR_CPUS; ++#define FRAMESIZE_CACHE_INCR (50) + +- for (i = cpus = 0; i < nr_pda; i++) { +- if (!CPU_PDA_READ(i, cpu_pda_buf)) +- break; +- level4_pgt = ULONG(cpu_pda_buf + OFFSET(x8664_pda_level4_pgt)); +- cpunumber = INT(cpu_pda_buf + OFFSET(x8664_pda_cpunumber)); +- if (!VALID_LEVEL4_PGT_ADDR(level4_pgt) || (cpunumber != cpus)) +- break; +- cpus++; +- } ++static int ++x86_64_framesize_cache_resize(void) ++{ ++ int i; ++ struct framesize_cache *new_fc, *fc; + +- FREEBUF(cpu_pda_buf); ++ if ((new_fc = realloc(x86_64_framesize_cache, ++ (framesize_cache_entries+FRAMESIZE_CACHE_INCR) * ++ sizeof(struct framesize_cache))) == NULL) { ++ error(INFO, "cannot realloc x86_64_framesize_cache space!\n"); ++ return FALSE; ++ } + +- return cpus; ++ fc = new_fc + framesize_cache_entries; ++ for (i = framesize_cache_entries; ++ i < (framesize_cache_entries+FRAMESIZE_CACHE_INCR); ++ fc++, i++) { ++ fc->textaddr = 0; ++ fc->framesize = 0; ++ } ++ ++ x86_64_framesize_cache = new_fc; ++ framesize_cache_entries += FRAMESIZE_CACHE_INCR; ++ ++ return TRUE; + } + +-/* +- * Machine dependent command. +- */ +-void +-x86_64_cmd_mach(void) ++static int ++x86_64_framesize_cache_func(int cmd, ulong textaddr, int *framesize) + { +- int c; ++ int i; ++ struct framesize_cache *fc; ++ char buf[BUFSIZE]; + +- while ((c = getopt(argcnt, args, "cm")) != EOF) { +- switch(c) +- { +- case 'c': +- x86_64_display_cpu_data(); +- return; ++ if (!x86_64_framesize_cache) { ++ framesize_cache_entries = FRAMESIZE_CACHE_INCR; ++ if ((x86_64_framesize_cache = calloc(framesize_cache_entries, ++ sizeof(struct framesize_cache))) == NULL) ++ error(FATAL, ++ "cannot calloc x86_64_framesize_cache space!\n"); ++ } + +- case 'm': +- x86_64_display_memmap(); +- return; ++ switch (cmd) ++ { ++ case FRAMESIZE_QUERY: ++ fc = &x86_64_framesize_cache[0]; ++ for (i = 0; i < framesize_cache_entries; i++, fc++) { ++ if (fc->textaddr == textaddr) { ++ *framesize = fc->framesize; ++ return TRUE; ++ } ++ } ++ return FALSE; + +- default: +- argerrs++; +- break; +- } +- } ++ case FRAMESIZE_ENTER: ++retry: ++ fc = &x86_64_framesize_cache[0]; ++ for (i = 0; i < framesize_cache_entries; i++, fc++) { ++ if ((fc->textaddr == 0) || ++ (fc->textaddr == textaddr)) { ++ fc->textaddr = textaddr; ++ fc->framesize = *framesize; ++ return fc->framesize; ++ } ++ } + +- if (argerrs) +- cmd_usage(pc->curcmd, SYNOPSIS); ++ if (x86_64_framesize_cache_resize()) ++ goto retry; + +- x86_64_display_machine_stats(); +-} ++ return *framesize; + +-/* +- * "mach" command output. +- */ +-static void +-x86_64_display_machine_stats(void) +-{ +- struct new_utsname *uts; +- char buf[BUFSIZE]; +- ulong mhz; ++ case FRAMESIZE_DUMP: ++ fc = &x86_64_framesize_cache[0]; ++ for (i = 0; i < framesize_cache_entries; i++, fc++) { ++ if (fc->textaddr == 0) { ++ if (i < (framesize_cache_entries-1)) { ++ fprintf(fp, "[%d-%d]: (unused)\n", ++ i, framesize_cache_entries-1); ++ } ++ break; ++ } + +- uts = &kt->utsname; ++ fprintf(fp, "[%3d]: %lx %3d (%s)\n", i, ++ fc->textaddr, fc->framesize, ++ value_to_symstr(fc->textaddr, buf, 0)); ++ } ++ break; ++ } + +- fprintf(fp, " MACHINE TYPE: %s\n", uts->machine); +- fprintf(fp, " MEMORY SIZE: %s\n", get_memory_size(buf)); +- fprintf(fp, " CPUS: %d\n", kt->cpus); +- fprintf(fp, " PROCESSOR SPEED: "); +- if ((mhz = machdep->processor_speed())) +- fprintf(fp, "%ld Mhz\n", mhz); +- else +- fprintf(fp, "(unknown)\n"); +- fprintf(fp, " HZ: %d\n", machdep->hz); +- fprintf(fp, " PAGE SIZE: %d\n", PAGESIZE()); +- fprintf(fp, " L1 CACHE SIZE: %d\n", l1_cache_size()); +- fprintf(fp, "KERNEL VIRTUAL BASE: %lx\n", machdep->kvbase); +- fprintf(fp, "KERNEL VMALLOC BASE: %lx\n", vt->vmalloc_start); +- fprintf(fp, " KERNEL START MAP: %lx\n", __START_KERNEL_map); +- fprintf(fp, "KERNEL MODULES BASE: %lx\n", MODULES_VADDR); +- fprintf(fp, " KERNEL STACK SIZE: %ld\n", STACKSIZE()); ++ return TRUE; + } + +-/* +- * "mach -c" +- */ +-static void +-x86_64_display_cpu_data(void) ++#define BT_FRAMESIZE_IGNORE_MASK \ ++ (BT_OLD_BACK_TRACE|BT_TEXT_SYMBOLS|BT_TEXT_SYMBOLS_ALL|BT_FRAMESIZE_DISABLE) ++ ++static int ++x86_64_get_framesize(struct bt_info *bt, ulong textaddr) + { +- int cpu, cpus, boot_cpu; +- ulong cpu_data; +- ulong cpu_pda; +- +- if (symbol_exists("cpu_data")) { +- cpu_data = symbol_value("cpu_data"); +- cpus = kt->cpus; +- boot_cpu = FALSE; +- } else if (symbol_exists("boot_cpu_data")) { +- cpu_data = symbol_value("boot_cpu_data"); +- boot_cpu = TRUE; +- cpus = 1; ++ int c, framesize, instr, arg; ++ struct syment *sp; ++ long max_instructions; ++ ulong offset; ++ char buf[BUFSIZE]; ++ char buf2[BUFSIZE]; ++ char *arglist[MAXARGS]; ++ ulong locking_func, textaddr_save, current; ++ char *p1, *p2; ++ int reterror; ++ ++ if (!(bt->flags & BT_FRAMESIZE_DEBUG)) { ++ if ((bt->flags & BT_FRAMESIZE_IGNORE_MASK) || ++ (kt->flags & USE_OLD_BT)) ++ return 0; ++ } ++ ++ if (!(sp = value_search(textaddr, &offset))) { ++ if (!(bt->flags & BT_FRAMESIZE_DEBUG)) ++ bt->flags |= BT_FRAMESIZE_DISABLE; ++ return 0; ++ } ++ ++ if (!(bt->flags & BT_FRAMESIZE_DEBUG) && ++ x86_64_framesize_cache_func(FRAMESIZE_QUERY, textaddr, &framesize)) { ++ if (framesize == -1) ++ bt->flags |= BT_FRAMESIZE_DISABLE; ++ return framesize; + } +- cpu_pda = symbol_value("cpu_pda"); +- +- for (cpu = 0; cpu < cpus; cpu++) { +- if (boot_cpu) +- fprintf(fp, "BOOT CPU:\n"); +- else +- fprintf(fp, "%sCPU %d:\n", cpu ? "\n" : "", cpu); + +- dump_struct("cpuinfo_x86", cpu_data, 0); +- fprintf(fp, "\n"); +- dump_struct("x8664_pda", cpu_pda, 0); +- +- cpu_data += SIZE(cpuinfo_x86); +- cpu_pda += SIZE(x8664_pda); +- } +-} ++ /* ++ * Bait and switch an incoming .text.lock address ++ * with the containing function's address. ++ */ ++ if (STRNEQ(sp->name, ".text.lock.") && ++ (locking_func = text_lock_function(sp->name, bt, textaddr))) { ++ if (!(sp = value_search(locking_func, &offset))) { ++ bt->flags |= BT_FRAMESIZE_DISABLE; ++ return 0; ++ } ++ textaddr_save = textaddr; ++ textaddr = locking_func; ++ } else ++ textaddr_save = 0; + +-/* +- * "mach -m" +- */ +-static char *e820type[] = { +- "(invalid type)", +- "E820_RAM", +- "E820_RESERVED", +- "E820_ACPI", +- "E820_NVS", +-}; ++ framesize = 0; ++ max_instructions = textaddr - sp->value; ++ instr = arg = -1; + +-static void +-x86_64_display_memmap(void) +-{ +- ulong e820; +- int nr_map, i; +- char *buf, *e820entry_ptr; +- ulonglong addr, size; +- uint type; ++ open_tmpfile2(); + +- e820 = symbol_value("e820"); +- if (CRASHDEBUG(1)) +- dump_struct("e820map", e820, RADIX(16)); +- buf = (char *)GETBUF(SIZE(e820map)); ++ sprintf(buf, "x/%ldi 0x%lx", ++ max_instructions, sp->value); + +- readmem(e820, KVADDR, &buf[0], SIZE(e820map), +- "e820map", FAULT_ON_ERROR); ++ if (!gdb_pass_through(buf, pc->tmpfile2, GNU_RETURN_ON_ERROR)) { ++ close_tmpfile2(); ++ bt->flags |= BT_FRAMESIZE_DISABLE; ++ return 0; ++ } + +- nr_map = INT(buf + OFFSET(e820map_nr_map)); ++ rewind(pc->tmpfile2); ++ while (fgets(buf, BUFSIZE, pc->tmpfile2)) { ++ strcpy(buf2, buf); + +- fprintf(fp, " PHYSICAL ADDRESS RANGE TYPE\n"); ++ if (CRASHDEBUG(3)) ++ fprintf(pc->saved_fp, buf2); + +- for (i = 0; i < nr_map; i++) { +- e820entry_ptr = buf + sizeof(int) + (SIZE(e820entry) * i); +- addr = ULONGLONG(e820entry_ptr + OFFSET(e820entry_addr)); +- size = ULONGLONG(e820entry_ptr + OFFSET(e820entry_size)); +- type = UINT(e820entry_ptr + OFFSET(e820entry_type)); +- fprintf(fp, "%016llx - %016llx %s\n", addr, addr+size, +- e820type[type]); +- } +-} ++ c = parse_line(buf, arglist); + ++ if (instr == -1) { ++ /* ++ * Check whether are ++ * in the output string. ++ */ ++ if (LASTCHAR(arglist[0]) == ':') { ++ instr = 1; ++ arg = 2; ++ } else { ++ instr = 2; ++ arg = 3; ++ } ++ } + +-static const char *hook_files[] = { +- "arch/x86_64/kernel/entry.S", +- "arch/x86_64/kernel/head.S", +- "arch/x86_64/kernel/semaphore.c" +-}; ++ if (c < (arg+1)) ++ continue; + +-#define ENTRY_S ((char **)&hook_files[0]) +-#define HEAD_S ((char **)&hook_files[1]) +-#define SEMAPHORE_C ((char **)&hook_files[2]) ++ reterror = 0; ++ current = htol(strip_ending_char(arglist[0], ':'), ++ RETURN_ON_ERROR, &reterror); ++ if (reterror) ++ continue; ++ if (current >= textaddr) ++ break; + +-static struct line_number_hook x86_64_line_number_hooks[] = { +- {"ret_from_fork", ENTRY_S}, +- {"system_call", ENTRY_S}, +- {"int_ret_from_sys_call", ENTRY_S}, +- {"ptregscall_common", ENTRY_S}, +- {"stub_execve", ENTRY_S}, +- {"stub_rt_sigreturn", ENTRY_S}, +- {"common_interrupt", ENTRY_S}, +- {"ret_from_intr", ENTRY_S}, +- {"load_gs_index", ENTRY_S}, +- {"arch_kernel_thread", ENTRY_S}, +- {"execve", ENTRY_S}, +- {"page_fault", ENTRY_S}, +- {"coprocessor_error", ENTRY_S}, +- {"simd_coprocessor_error", ENTRY_S}, +- {"device_not_available", ENTRY_S}, +- {"debug", ENTRY_S}, +- {"nmi", ENTRY_S}, +- {"int3", ENTRY_S}, +- {"overflow", ENTRY_S}, +- {"bounds", ENTRY_S}, +- {"invalid_op", ENTRY_S}, +- {"coprocessor_segment_overrun", ENTRY_S}, +- {"reserved", ENTRY_S}, +- {"double_fault", ENTRY_S}, +- {"invalid_TSS", ENTRY_S}, +- {"segment_not_present", ENTRY_S}, +- {"stack_segment", ENTRY_S}, +- {"general_protection", ENTRY_S}, +- {"alignment_check", ENTRY_S}, +- {"divide_error", ENTRY_S}, +- {"spurious_interrupt_bug", ENTRY_S}, +- {"machine_check", ENTRY_S}, +- {"call_debug", ENTRY_S}, ++ if (STRNEQ(arglist[instr], "push")) { ++ framesize += 8; ++ if (CRASHDEBUG(2) || (bt->flags & BT_FRAMESIZE_DEBUG)) ++ fprintf(pc->saved_fp, "%s\t[framesize: %d]\n", ++ strip_linefeeds(buf2), framesize); ++ } else if (STRNEQ(arglist[instr], "pop")) { ++ framesize -= 8; ++ if (CRASHDEBUG(2) || (bt->flags & BT_FRAMESIZE_DEBUG)) ++ fprintf(pc->saved_fp, "%s\t[framesize: %d]\n", ++ strip_linefeeds(buf2), framesize); ++ } else if (STRNEQ(arglist[instr], "add") && ++ (p1 = strstr(arglist[arg], ",%rsp"))) { ++ *p1 = NULLCHAR; ++ p2 = arglist[arg]; ++ reterror = 0; ++ offset = htol(p2+1, RETURN_ON_ERROR, &reterror); ++ if (reterror) ++ continue; ++ framesize -= offset; ++ if (CRASHDEBUG(2) || (bt->flags & BT_FRAMESIZE_DEBUG)) ++ fprintf(pc->saved_fp, "%s\t[framesize: %d]\n", ++ strip_linefeeds(buf2), framesize); ++ } else if (STRNEQ(arglist[instr], "sub") && ++ (p1 = strstr(arglist[arg], ",%rsp"))) { ++ *p1 = NULLCHAR; ++ p2 = arglist[arg]; ++ reterror = 0; ++ offset = htol(p2+1, RETURN_ON_ERROR, &reterror); ++ if (reterror) ++ continue; ++ framesize += offset; ++ if (CRASHDEBUG(2) || (bt->flags & BT_FRAMESIZE_DEBUG)) ++ fprintf(pc->saved_fp, "%s\t[framesize: %d]\n", ++ strip_linefeeds(buf2), framesize); ++ } else if (STRNEQ(arglist[instr], "retq")) { ++ bt->flags |= BT_FRAMESIZE_DISABLE; ++ framesize = -1; ++ if (CRASHDEBUG(2) || (bt->flags & BT_FRAMESIZE_DEBUG)) ++ fprintf(pc->saved_fp, "%s\t[framesize: DISABLED]\n", ++ strip_linefeeds(buf2)); ++ break; ++ } ++ } ++ close_tmpfile2(); + +- {NULL, NULL} /* list must be NULL-terminated */ +-}; ++ if (textaddr_save) ++ textaddr = textaddr_save; + +-static void +-x86_64_dump_line_number(ulong callpc) +-{ +- error(FATAL, "x86_64_dump_line_number: TBD\n"); ++ return (x86_64_framesize_cache_func(FRAMESIZE_ENTER, textaddr, &framesize)); + } + +-void +-x86_64_compiler_warning_stub(void) ++static void ++x86_64_framesize_debug(struct bt_info *bt) + { +- struct line_number_hook *lhp; +- char **p; ++ int framesize; + +- lhp = &x86_64_line_number_hooks[0]; lhp++; +- p = ENTRY_S; +- x86_64_back_trace(NULL, NULL); +- get_x86_64_frame(NULL, NULL, NULL); +- x86_64_dump_line_number(0); +-} ++ switch (bt->hp->esp) ++ { ++ case 1: /* "dump" */ ++ if (bt->hp->eip) { ++ framesize = 1; ++ x86_64_framesize_cache_func(FRAMESIZE_ENTER, bt->hp->eip, ++ &framesize); ++ } else ++ x86_64_framesize_cache_func(FRAMESIZE_DUMP, 0, NULL); ++ break; ++ ++ case 0: ++ if (bt->hp->eip) { ++ framesize = 0; ++ x86_64_framesize_cache_func(FRAMESIZE_ENTER, bt->hp->eip, ++ &framesize); ++ } else /* "clear" */ ++ BZERO(&x86_64_framesize_cache[0], ++ sizeof(struct framesize_cache)*framesize_cache_entries); ++ break; ++ ++ case -1: ++ if (!bt->hp->eip) ++ error(INFO, "x86_64_framesize_debug: ignoring command\n"); ++ else ++ x86_64_get_framesize(bt, bt->hp->eip); ++ break; + ++ default: ++ if (bt->hp->esp > 1) { ++ framesize = bt->hp->esp; ++ if (bt->hp->eip) ++ x86_64_framesize_cache_func(FRAMESIZE_ENTER, bt->hp->eip, ++ &framesize); ++ } else ++ error(INFO, "x86_64_framesize_debug: ignoring command\n"); ++ break; ++ } ++} + #endif /* X86_64 */ +--- crash/symbols.c.orig 2008-01-17 15:17:20.000000000 -0500 ++++ crash/symbols.c 2008-01-16 12:01:59.000000000 -0500 +@@ -1,8 +1,8 @@ + /* symbols.c - core analysis suite + * + * Copyright (C) 1999, 2000, 2001, 2002 Mission Critical Linux, Inc. +- * Copyright (C) 2002, 2003, 2004, 2005 David Anderson +- * Copyright (C) 2002, 2003, 2004, 2005 Red Hat, Inc. All rights reserved. ++ * Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2008 David Anderson ++ * Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2008 Red Hat, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by +@@ -21,6 +21,8 @@ + + static void store_symbols(bfd *, int, void *, long, unsigned int); + static void store_sysmap_symbols(void); ++static ulong relocate(ulong, char *, int); ++static int relocate_force(ulong, char *); + static void strip_module_symbol_end(char *s); + static int compare_syms(const void *, const void *); + static int compare_mods(const void *, const void *); +@@ -36,7 +38,9 @@ + static int load_module_index(struct syment *); + static void section_header_info(bfd *, asection *, void *); + static void store_section_data(struct load_module *, bfd *, asection *); +-static void calculate_load_order(struct load_module *, bfd *); ++static void calculate_load_order_v1(struct load_module *, bfd *); ++static void calculate_load_order_v2(struct load_module *, bfd *, int, ++ void *, long, unsigned int); + static void check_insmod_builtin(struct load_module *, int, ulong *); + static int is_insmod_builtin(struct load_module *, struct syment *); + struct load_module; +@@ -61,12 +65,16 @@ + struct elf_common; + static void Elf32_Sym_to_common(Elf32_Sym *, struct elf_common *); + static void Elf64_Sym_to_common(Elf64_Sym *, struct elf_common *); ++static void cmd_datatype_common(ulong); ++static int display_per_cpu_info(struct syment *); + + + #define KERNEL_SECTIONS (void *)(1) + #define MODULE_SECTIONS (void *)(2) + #define VERIFY_SECTIONS (void *)(3) + ++#define EV_DWARFEXTRACT 101010101 ++ + #define PARSE_FOR_DATA (1) + #define PARSE_FOR_DECLARATION (2) + static void parse_for_member(struct datatype_member *, ulong); +@@ -96,6 +104,7 @@ + #define SHOW_OFFSET (0x10000) + #define IN_UNION (0x20000) + #define IN_STRUCT (0x40000) ++#define DATATYPE_QUERY (0x80000) + + #define INTEGER_TYPE (UINT8|INT8|UINT16|INT16|UINT32|INT32|UINT64|INT64) + +@@ -110,6 +119,7 @@ + static void dump_datatype_member(FILE *, struct datatype_member *); + static void dump_datatype_flags(ulong, FILE *); + static void dump_enumerator_list(char *); ++static long anon_member_offset(char *, char *); + static int gdb_whatis(char *); + static void do_datatype_declaration(struct datatype_member *, ulong); + +@@ -139,6 +149,12 @@ + if (!bfd_check_format_matches(st->bfd, bfd_object, &matching)) + error(FATAL, "cannot determine object file format: %s\n", + pc->namelist); ++ /* ++ * Check whether the namelist is a kerntypes file built by ++ * dwarfextract, which places a magic number in e_version. ++ */ ++ if (file_elf_version(pc->namelist) == EV_DWARFEXTRACT) ++ pc->flags |= KERNTYPES; + + if (pc->flags & SYSMAP) { + bfd_map_over_sections(st->bfd, section_header_info, +@@ -153,13 +169,16 @@ + } + store_sysmap_symbols(); + return; +- } ++ } else if (LKCD_KERNTYPES()) ++ error(FATAL, "%s: use of kerntypes requires a system map\n", ++ pc->namelist); + + /* + * Pull a bait-and-switch on st->bfd if we've got a separate +- * .gnu_debuglink file that matches the CRC. ++ * .gnu_debuglink file that matches the CRC. Not done for kerntypes. + */ +- if (!(bfd_get_file_flags(st->bfd) & HAS_SYMS)) { ++ if (!(LKCD_KERNTYPES()) && ++ !(bfd_get_file_flags(st->bfd) & HAS_SYMS)) { + if (!check_gnu_debuglink(st->bfd)) + no_debugging_data(FATAL); + } +@@ -471,6 +490,11 @@ + kt->stext_init = (ulong)bfd_get_section_vma(st->bfd, section); + kt->etext_init = kt->stext_init + + (ulong)bfd_section_size(st->bfd, section); ++ ++ if (kt->relocate) { ++ kt->stext_init -= kt->relocate; ++ kt->etext_init -= kt->relocate; ++ } + } + + /* +@@ -486,6 +510,7 @@ + bfd_byte *from, *fromend; + symbol_info syminfo; + struct syment *sp; ++ int first; + + if ((store = bfd_make_empty_symbol(abfd)) == NULL) + error(FATAL, "bfd_make_empty_symbol() failed\n"); +@@ -505,6 +530,13 @@ + st->symcnt = 0; + sp = st->symtable; + ++ if (machine_type("X86")) { ++ if (!(kt->flags & RELOC_SET)) ++ kt->flags |= RELOC_FORCE; ++ } else ++ kt->flags &= ~RELOC_SET; ++ ++ first = 0; + from = (bfd_byte *) minisyms; + fromend = from + symcount * size; + for (; from < fromend; from += size) +@@ -516,7 +548,11 @@ + bfd_get_symbol_info(abfd, sym, &syminfo); + if (machdep->verify_symbol(syminfo.name, syminfo.value, + syminfo.type)) { +- sp->value = syminfo.value; ++ if (kt->flags & (RELOC_SET|RELOC_FORCE)) ++ sp->value = relocate(syminfo.value, ++ (char *)syminfo.name, !(first++)); ++ else ++ sp->value = syminfo.value; + sp->type = syminfo.type; + namespace_ctl(NAMESPACE_INSTALL, &st->namespace, + sp, (char *)syminfo.name); +@@ -540,7 +576,7 @@ + static void + store_sysmap_symbols(void) + { +- int c; ++ int c, first; + long symcount; + char buf[BUFSIZE]; + FILE *map; +@@ -564,6 +600,10 @@ + error(FATAL, "symbol table namespace malloc: %s\n", + strerror(errno)); + ++ if (!machine_type("X86")) ++ kt->flags &= ~RELOC_SET; ++ ++ first = 0; + st->syment_size = symcount * sizeof(struct syment); + st->symcnt = 0; + sp = st->symtable; +@@ -580,7 +620,11 @@ + + if (machdep->verify_symbol(syment.name, syment.value, + syment.type)) { +- sp->value = syment.value; ++ if (kt->flags & RELOC_SET) ++ sp->value = relocate(syment.value, ++ syment.name, !(first++)); ++ else ++ sp->value = syment.value; + sp->type = syment.type; + namespace_ctl(NAMESPACE_INSTALL, &st->namespace, + sp, syment.name); +@@ -603,6 +647,96 @@ + } + + /* ++ * Handle x86 kernels configured such that the vmlinux symbols ++ * are not as loaded into the kernel (not unity-mapped). ++ */ ++static ulong ++relocate(ulong symval, char *symname, int first_symbol) ++{ ++ switch (kt->flags & (RELOC_SET|RELOC_FORCE)) ++ { ++ case RELOC_SET: ++ break; ++ ++ case RELOC_FORCE: ++ if (first_symbol && !relocate_force(symval, symname)) ++ kt->flags &= ~RELOC_FORCE; ++ break; ++ } ++ ++ return (symval - kt->relocate); ++} ++ ++/* ++ * If no --reloc argument was passed, try to figure it out ++ * by comparing the first vmlinux kernel symbol with the ++ * first /proc/kallsyms symbol. (should be "_text") ++ * ++ * Live system only (at least for now). ++ */ ++static int ++relocate_force(ulong symval, char *symname) ++{ ++ FILE *kp; ++ char buf[BUFSIZE]; ++ char *kallsyms[MAXARGS]; ++ ulong first; ++ ++ if (!ACTIVE() || !file_exists("/proc/kallsyms", NULL)) { ++ if (CRASHDEBUG(1)) ++ fprintf(fp, ++ "cannot determine relocation value: %s\n", ++ !ACTIVE() ? "not a live system" : ++ "/proc/kallsyms does not exist"); ++ return FALSE; ++ } ++ ++ if ((kp = fopen("/proc/kallsyms", "r")) == NULL) { ++ if (CRASHDEBUG(1)) ++ fprintf(fp, ++ "cannot open /proc/kallsyms to determine relocation\n"); ++ return FALSE; ++ } ++ ++ if (!fgets(buf, BUFSIZE, kp) || ++ (parse_line(buf, kallsyms) != 3) || ++ !hexadecimal(kallsyms[0], 0)) { ++ fclose(kp); ++ if (CRASHDEBUG(1)) ++ fprintf(fp, ++ "malformed /proc/kallsyms: cannot determine relocation value\n"); ++ return FALSE; ++ } ++ fclose(kp); ++ ++ first = htol(kallsyms[0], RETURN_ON_ERROR, NULL); ++ ++ if (CRASHDEBUG(1)) ++ fprintf(fp, ++ "RELOCATE: %s @ %lx %s\n" ++ " %s @ %lx /proc/kallsyms\n", ++ symname, symval, pc->namelist, ++ kallsyms[2], first); ++ ++ /* ++ * If the symbols match and have different values, ++ * force the relocation. ++ */ ++ if (STREQ(symname, kallsyms[2])) { ++ if (symval > first) { ++ kt->relocate = symval - first; ++ return TRUE; ++ } ++ } ++ ++ if (CRASHDEBUG(1)) ++ fprintf(fp, ++ "cannot determine relocation value from first symbol\n"); ++ ++ return FALSE; ++} ++ ++/* + * Install all static kernel symbol values into the symval_hash. + */ + static void +@@ -1159,7 +1293,7 @@ + mod_name); + strncpy(lm->mod_name, mod_name, MAX_MOD_NAME-1); + } +- if (CRASHDEBUG(1)) ++ if (CRASHDEBUG(3)) + fprintf(fp, + "%lx (%lx): %s syms: %d gplsyms: %d ksyms: %ld\n", + mod, lm->mod_base, lm->mod_name, nsyms, +@@ -2121,22 +2255,13 @@ + fprintf(fp, "%sFORCE_DEBUGINFO", others++ ? "|" : ""); + if (st->flags & CRC_MATCHES) + fprintf(fp, "%sCRC_MATCHES", others++ ? "|" : ""); ++ if (st->flags & ADD_SYMBOL_FILE) ++ fprintf(fp, "%sADD_SYMBOL_FILE", others++ ? "|" : ""); ++ if (st->flags & USE_OLD_ADD_SYM) ++ fprintf(fp, "%sUSE_OLD_ADD_SYM", others++ ? "|" : ""); + fprintf(fp, ")\n"); + + fprintf(fp, " bfd: %lx\n", (ulong)st->bfd); +- +- sec = (asection **)st->sections; +- fprintf(fp, " sections: %s\n", sec ? "" : "(not in use)"); +- for (i = 0; sec && (i < st->bfd->section_count); i++, sec++) { +- asection *section; +- +- section = *sec; +- fprintf(fp, "%25s vma: %.*lx size: %ld\n", +- section->name, VADDR_PRLEN, +- (ulong)bfd_get_section_vma(st->bfd, section), +- (ulong)bfd_section_size(st->bfd, section)); +- } +- + fprintf(fp, " symtable: %lx\n", (ulong)st->symtable); + fprintf(fp, " symend: %lx\n", (ulong)st->symend); + fprintf(fp, " symcnt: %ld\n", st->symcnt); +@@ -2320,6 +2445,24 @@ + } + } + } ++ ++ fprintf(fp, "\n"); ++ fprintf(fp, "dwarf_eh_frame_file_offset: %llx\n", ++ (unsigned long long)st->dwarf_eh_frame_file_offset); ++ fprintf(fp, " dwarf_eh_frame_size: %ld\n", st->dwarf_eh_frame_size); ++ fprintf(fp, "\n"); ++ ++ sec = (asection **)st->sections; ++ fprintf(fp, " sections: %s\n", sec ? "" : "(not in use)"); ++ for (i = 0; sec && (i < st->bfd->section_count); i++, sec++) { ++ asection *section; ++ ++ section = *sec; ++ fprintf(fp, "%25s vma: %.*lx size: %ld\n", ++ section->name, VADDR_PRLEN, ++ (ulong)bfd_get_section_vma(st->bfd, section), ++ (ulong)bfd_section_size(st->bfd, section)); ++ } + } + + +@@ -2354,6 +2497,96 @@ + } + + /* ++ * Verify a vmlinux file, issuing a warning for processor and endianness ++ * mismatches. ++ */ ++int ++is_kernel(char *file) ++{ ++ int fd, swap; ++ char eheader[BUFSIZE]; ++ Elf32_Ehdr *elf32; ++ Elf64_Ehdr *elf64; ++ ++ if ((fd = open(file, O_RDONLY)) < 0) { ++ error(INFO, "%s: %s\n", file, strerror(errno)); ++ return FALSE; ++ } ++ if (read(fd, eheader, BUFSIZE) != BUFSIZE) { ++ /* error(INFO, "%s: %s\n", file, strerror(errno)); */ ++ close(fd); ++ return FALSE; ++ } ++ close(fd); ++ ++ if (!STRNEQ(eheader, ELFMAG) || eheader[EI_VERSION] != EV_CURRENT) ++ return FALSE; ++ ++ elf32 = (Elf32_Ehdr *)&eheader[0]; ++ elf64 = (Elf64_Ehdr *)&eheader[0]; ++ ++ swap = (((eheader[EI_DATA] == ELFDATA2LSB) && ++ (__BYTE_ORDER == __BIG_ENDIAN)) || ++ ((eheader[EI_DATA] == ELFDATA2MSB) && ++ (__BYTE_ORDER == __LITTLE_ENDIAN))); ++ ++ if ((elf32->e_ident[EI_CLASS] == ELFCLASS32) && ++ (swap16(elf32->e_type, swap) == ET_EXEC) && ++ (swap32(elf32->e_version, swap) == EV_CURRENT)) { ++ switch (swap16(elf32->e_machine, swap)) ++ { ++ case EM_386: ++ if (machine_type_mismatch(file, "X86", NULL, 0)) ++ goto bailout; ++ break; ++ ++ default: ++ if (machine_type_mismatch(file, "(unknown)", NULL, 0)) ++ goto bailout; ++ } ++ ++ if (endian_mismatch(file, elf32->e_ident[EI_DATA], 0)) ++ goto bailout; ++ ++ } else if ((elf64->e_ident[EI_CLASS] == ELFCLASS64) && ++ (swap16(elf64->e_type, swap) == ET_EXEC) && ++ (swap32(elf64->e_version, swap) == EV_CURRENT)) { ++ switch (swap16(elf64->e_machine, swap)) ++ { ++ case EM_IA_64: ++ if (machine_type_mismatch(file, "IA64", NULL, 0)) ++ goto bailout; ++ break; ++ ++ case EM_PPC64: ++ if (machine_type_mismatch(file, "PPC64", NULL, 0)) ++ goto bailout; ++ break; ++ ++ case EM_X86_64: ++ if (machine_type_mismatch(file, "X86_64", NULL, 0)) ++ goto bailout; ++ break; ++ ++ case EM_386: ++ if (machine_type_mismatch(file, "X86", NULL, 0)) ++ goto bailout; ++ break; ++ ++ default: ++ if (machine_type_mismatch(file, "(unknown)", NULL, 0)) ++ goto bailout; ++ } ++ ++ if (endian_mismatch(file, elf64->e_ident[EI_DATA], 0)) ++ goto bailout; ++ } ++ ++bailout: ++ return(is_bfd_format(file)); ++} ++ ++/* + * Given a choice between two namelists, pick the one for gdb to use. + * For now, just check get their stats and check their sizes; the larger + * one presumably has debug data. +@@ -2427,7 +2660,7 @@ + goto not_system_map; + if (parse_line(buf, mapitems) != 3) + goto not_system_map; +- if ((strlen(mapitems[0]) != MAX_HEXADDR_STRLEN) || ++ if ((strlen(mapitems[0]) > MAX_HEXADDR_STRLEN) || + !hexadecimal(mapitems[0], 0) || (strlen(mapitems[1]) > 1)) + goto not_system_map; + } +@@ -3463,6 +3696,22 @@ + } + + /* ++ * Same as above, but allow for failure. ++ */ ++int ++try_get_symbol_data(char *symbol, long size, void *local) ++{ ++ struct syment *sp; ++ ++ if ((sp = symbol_search(symbol)) && ++ readmem(sp->value, KVADDR, local, ++ size, symbol, RETURN_ON_ERROR|QUIET)) ++ return TRUE; ++ ++ return FALSE; ++} ++ ++/* + * Return the value of a given symbol. + */ + ulong +@@ -3477,6 +3726,34 @@ + } + + /* ++ * Return the value of a symbol from a specific module. ++ */ ++ulong ++symbol_value_module(char *symbol, char *module) ++{ ++ int i; ++ struct syment *sp, *sp_end; ++ struct load_module *lm; ++ ++ for (i = 0; i < st->mods_installed; i++) { ++ lm = &st->load_modules[i]; ++ ++ if (!STREQ(module, lm->mod_name)) ++ continue; ++ ++ sp = lm->mod_symtable; ++ sp_end = lm->mod_symend; ++ ++ for ( ; sp < sp_end; sp++) { ++ if (STREQ(symbol, sp->name)) ++ return(sp->value); ++ } ++ } ++ ++ return 0; ++} ++ ++/* + * Return the symbol name of a given value, with no allowance for offsets. + * Returns NULL on failure to allow for testing of a value. + */ +@@ -3608,6 +3885,8 @@ + * #define STRUCT_EXISTS(X) (datatype_info((X), NULL, NULL) >= 0) + * #define MEMBER_EXISTS(X,Y) (datatype_info((X), (Y), NULL) >= 0) + * #define MEMBER_SIZE(X,Y) datatype_info((X), (Y), MEMBER_SIZE_REQUEST) ++ * #define MEMBER_TYPE(X,Y) datatype_info((X), (Y), MEMBER_TYPE_REQUEST) ++ * #define ANON_MEMBER_OFFSET(X,Y) datatype_info((X), (Y), ANON_MEMBER_OFFSET_REQUEST) + * + * to determine structure or union sizes, or member offsets. + */ +@@ -3620,6 +3899,9 @@ + ulong type_found; + char buf[BUFSIZE]; + ++ if (dm == ANON_MEMBER_OFFSET_REQUEST) ++ return anon_member_offset(name, member); ++ + strcpy(buf, name); + + req = (struct gnu_request *)GETBUF(sizeof(struct gnu_request)); +@@ -3743,11 +4025,12 @@ + + FREEBUF(req); + +- if (dm && (dm != MEMBER_SIZE_REQUEST)) { ++ if (dm && (dm != MEMBER_SIZE_REQUEST) && (dm != MEMBER_TYPE_REQUEST)) { + dm->type = type_found; + dm->size = size; + dm->member_size = member_size; + dm->member_typecode = member_typecode; ++ dm->member_offset = offset; + if (req->is_typedef) { + dm->flags |= TYPEDEF; + } +@@ -3762,13 +4045,42 @@ + + if (dm == MEMBER_SIZE_REQUEST) + return member_size; +- else if (member) ++ else if (dm == MEMBER_TYPE_REQUEST) ++ return member_typecode; ++ else if (member) + return offset; + else + return size; + } + + /* ++ * Determine the offset of a member in an anonymous union ++ * in a structure. ++ */ ++static long ++anon_member_offset(char *name, char *member) ++{ ++ int c; ++ char buf[BUFSIZE]; ++ char *arglist[MAXARGS]; ++ ulong value; ++ ++ value = -1; ++ sprintf(buf, "print &((struct %s *)0x0)->%s", name, member); ++ ++ open_tmpfile(); ++ if (gdb_pass_through(buf, fp, GNU_RETURN_ON_ERROR)) { ++ rewind(pc->tmpfile); ++ if (fgets(buf, BUFSIZE, pc->tmpfile) && ++ (c = parse_line(strip_linefeeds(buf), arglist))) ++ value = stol(arglist[c-1], RETURN_ON_ERROR|QUIET, NULL); ++ } ++ close_tmpfile(); ++ ++ return value; ++} ++ ++/* + * Get the basic type info for a symbol. Let the caller pass in the + * gnu_request structure to have access to the full response; in either + * case, return the type code. The member field can be used for structures +@@ -3928,25 +4240,59 @@ + void + cmd_struct(void) + { +- int c; ++ cmd_datatype_common(STRUCT_REQUEST); ++} ++/* ++ * This command displays either a union definition, or a formatted display ++ * of the contents of a union at a specified address. If no address is ++ * specified, the union size and the file in which the union is defined ++ * are also displayed. A union member may be appended to the union ++ * name (in a "union.member" format) in order to limit the scope of the data ++ * displayed to that particular member. Structure data is shown in hexadecimal ++ * format. The raw data in a union may be dumped with the -r flag. ++ */ ++void ++cmd_union(void) ++{ ++ cmd_datatype_common(UNION_REQUEST); ++} ++ ++/* ++ * After determining what type of data type follows the *, this routine ++ * has the identical functionality as cmd_struct() or cmd_union(). ++ */ ++void ++cmd_pointer(void) ++{ ++ cmd_datatype_common(0); ++} ++ ++static void ++cmd_datatype_common(ulong flags) ++{ ++ int i, c; + ulong addr, aflag; + struct syment *sp; + int rawdata; + long len; +- ulong flags; + ulong list_head_offset; + int count; +- struct datatype_member struct_member, *sm; ++ int argc_members; ++ int optind_save; ++ struct datatype_member datatype_member, *dm; ++ char *separator; ++ char *structname, *members; ++ char *memberlist[MAXARGS]; + +- sm = &struct_member; +- count = 1; ++ dm = &datatype_member; ++ count = 0xdeadbeef; + rawdata = 0; + aflag = 0; +- list_head_offset = 0; +- flags = STRUCT_REQUEST; ++ list_head_offset = 0; ++ argc_members = 0; + +- while ((c = getopt(argcnt, args, "c:rvol:")) != EOF) { +- switch(c) ++ while ((c = getopt(argcnt, args, "fuc:rvol:")) != EOF) { ++ switch (c) + { + case 'c': + count = atoi(optarg); +@@ -3969,8 +4315,22 @@ + list_head_offset = stol(optarg, + FAULT_ON_ERROR, NULL); + else if (arg_to_datatype(optarg, +- sm, RETURN_ON_ERROR) > 1) +- list_head_offset = sm->member_offset; ++ dm, RETURN_ON_ERROR) > 1) ++ list_head_offset = dm->member_offset; ++ else ++ error(FATAL, "invalid -l option: %s\n", ++ optarg); ++ break; ++ ++ case 'f': ++ if (!pc->dumpfile) ++ error(FATAL, ++ "-f option requires a dumpfile\n"); ++ pc->curcmd_flags |= MEMTYPE_FILEADDR; ++ break; ++ ++ case 'u': ++ pc->curcmd_flags |= MEMTYPE_UVADDR; + break; + + default: +@@ -3982,35 +4342,42 @@ + if (argerrs || !args[optind]) + cmd_usage(pc->curcmd, SYNOPSIS); + +- if ((arg_to_datatype(args[optind++], sm, FAULT_ON_ERROR) > 1) && +- rawdata) +- error(FATAL, "member-specific output not allowed with -r\n"); +- +- if ((len = sm->size) < 0) { +- error(INFO, "structure not found: %s\n", sm->name); +- cmd_usage(pc->curcmd, SYNOPSIS); +- } +- +- if (!args[optind]) { +- do_datatype_declaration(sm, flags | (sm->flags & TYPEDEF)); +- return; +- } ++ if ((count_chars(args[optind], ',')+1) > MAXARGS) ++ error(FATAL, "too many members in comma-separated list!\n"); ++ ++ if ((count_chars(args[optind], '.') > 1) || ++ (LASTCHAR(args[optind]) == ',') || ++ (LASTCHAR(args[optind]) == '.')) ++ error(FATAL, "invalid format: %s\n", args[optind]); ++ ++ optind_save = optind; ++ ++ /* ++ * Take care of address and count (array). ++ */ ++ while (args[++optind]) { ++ if (aflag && (count != 0xdeadbeef)) ++ error(FATAL, "too many arguments!\n"); + +- while (args[optind]) { + if (clean_arg() && IS_A_NUMBER(args[optind])) { + if (aflag) + count = stol(args[optind], + FAULT_ON_ERROR, NULL); + else { +- if (!IS_KVADDR(addr = htol(args[optind], ++ if (pc->curcmd_flags & MEMTYPE_FILEADDR) ++ pc->curcmd_private = stoll(args[optind], ++ FAULT_ON_ERROR, NULL); ++ else if (pc->curcmd_flags & MEMTYPE_UVADDR) { ++ addr = htol(args[optind], FAULT_ON_ERROR, ++ NULL); ++ } else if (!IS_KVADDR(addr = htol(args[optind], + FAULT_ON_ERROR, NULL))) + error(FATAL, + "invalid kernel virtual address: %s\n", + args[optind]); + aflag++; + } +- } +- else if ((sp = symbol_search(args[optind]))) { ++ } else if ((sp = symbol_search(args[optind]))) { + addr = sp->value; + aflag++; + } else { +@@ -4018,301 +4385,137 @@ + fprintf(fp, "possible aternatives:\n"); + if (!symbol_query(args[optind], " ", NULL)) + fprintf(fp, " (none found)\n"); +- return; ++ goto freebuf; + } +- optind++; + } + +- if (!aflag) ++ optind = optind_save; ++ ++ if (count == 0xdeadbeef) ++ count = 1; ++ else if (!aflag) + error(FATAL, "no kernel virtual address argument entered\n"); + ++ if ((flags & SHOW_OFFSET) && aflag) { ++ error(INFO, "-o option not valid with an address argument\n"); ++ flags &= ~SHOW_OFFSET; ++ } ++ + if (list_head_offset) + addr -= list_head_offset; + ++ /* ++ * Handle struct.member[,member] argument format. ++ */ ++ if (strstr(args[optind], ".")) { ++ structname = GETBUF(strlen(args[optind])+1); ++ strcpy(structname, args[optind]); ++ separator = strstr(structname, "."); ++ ++ members = GETBUF(strlen(args[optind])+1); ++ strcpy(members, separator+1); ++ replace_string(members, ",", ' '); ++ argc_members = parse_line(members, memberlist); ++ } else ++ structname = args[optind]; ++ ++ if ((arg_to_datatype(structname, dm, DATATYPE_QUERY|RETURN_ON_ERROR) < 1)) ++ error(FATAL, "invalid data structure reference: %s\n", structname); ++ ++ if ((argc_members > 1) && !aflag) { ++ error(INFO, flags & SHOW_OFFSET ? ++ "-o option not valid with multiple member format\n" : ++ "multiple member format not supported in this syntax\n"); ++ *separator = NULLCHAR; ++ argc_members = 0; ++ flags |= SHOW_OFFSET; ++ } ++ ++ len = dm->size; ++ + if (count < 0) { + addr -= len * abs(count); + addr += len; + } + +- for (c = 0; c < abs(count); c++, addr += len) { +- if (rawdata) +- raw_data_dump(addr, len, flags & STRUCT_VERBOSE); +- else { +- if (sm->member) +- open_tmpfile(); +- +- print_struct(sm->name, addr); +- +- if (sm->member) { +- parse_for_member(sm, PARSE_FOR_DATA); +- close_tmpfile(); ++ if (pc->curcmd_flags & MEMTYPE_FILEADDR) ++ addr = 0; /* unused, but parsed by gdb */ ++ ++ for (c = 0; c < abs(count); c++, addr += len, pc->curcmd_private += len) { ++ if (c) ++ fprintf(fp,"\n"); ++ ++ i = 0; ++ do { ++ if (argc_members) { ++ *separator = '.'; ++ strcpy(separator+1, memberlist[i]); + } +- } ++ ++ switch (arg_to_datatype(structname, dm, RETURN_ON_ERROR)) ++ { ++ case 0: error(FATAL, "invalid data structure reference: %s\n", ++ structname); ++ break; ++ case 1: break; ++ case 2: if (rawdata) ++ error(FATAL, ++ "member-specific output not allowed with -r\n"); ++ break; ++ } ++ ++ if (!(dm->flags & TYPEDEF)) { ++ if (flags &(STRUCT_REQUEST|UNION_REQUEST) ) { ++ if ((flags & (STRUCT_REQUEST|UNION_REQUEST)) != dm->type) ++ goto freebuf; ++ } else ++ flags |= dm->type; ++ } ++ ++ /* ++ * No address was passed -- dump the structure/member declaration. ++ */ ++ if (!aflag) { ++ do_datatype_declaration(dm, flags | (dm->flags & TYPEDEF)); ++ goto freebuf; ++ } ++ ++ if (!(flags & (UNION_REQUEST|STRUCT_REQUEST))) ++ error(FATAL, "invalid argument"); ++ ++ /* ++ * Display data. ++ */ ++ if (rawdata) ++ raw_data_dump(addr, len, flags & STRUCT_VERBOSE); ++ else { ++ if (dm->member) ++ open_tmpfile(); ++ ++ if (flags & UNION_REQUEST) ++ print_union(dm->name, addr); ++ else if (flags & STRUCT_REQUEST) ++ print_struct(dm->name, addr); ++ ++ if (dm->member) { ++ parse_for_member(dm, PARSE_FOR_DATA); ++ close_tmpfile(); ++ } ++ } ++ } while (++i < argc_members); ++ } ++ ++freebuf: ++ if (argc_members) { ++ FREEBUF(structname); ++ FREEBUF(members); + } + } + ++ + /* +- * After determining what type of data type follows the *, this routine +- * has the identical functionality as cmd_struct() or cmd_union(). +- */ +-void +-cmd_pointer(void) +-{ +- int c; +- ulong addr, aflag; +- struct syment *sp; +- int rawdata; +- long len; +- ulong flags; +- int count; +- struct datatype_member datatype_member, *dm; +- +- dm = &datatype_member; +- rawdata = 0; +- flags = 0; +- aflag = 0; +- count = 1; +- +- while ((c = getopt(argcnt, args, "c:rvo")) != EOF) { +- switch(c) +- { +- case 'c': +- count = atoi(optarg); +- break; +- +- case 'r': +- rawdata = 1; +- break; +- +- case 'v': +- flags |= STRUCT_VERBOSE; +- break; +- +- case 'o': +- flags |= SHOW_OFFSET; +- break; +- +- default: +- argerrs++; +- break; +- } +- } +- +- if (argerrs || !args[optind]) +- cmd_usage(pc->curcmd, SYNOPSIS); +- +- if ((arg_to_datatype(args[optind++], dm, FAULT_ON_ERROR) > 1) && +- rawdata) +- error(FATAL, "member-specific output not allowed with -r\n"); +- +- if ((len = dm->size) < 0) { +- error(INFO, "structure or union not found: %s\n", dm->name); +- cmd_usage(pc->curcmd, SYNOPSIS); +- } +- +- flags |= dm->type; +- +- if (!args[optind]) { +- do_datatype_declaration(dm, flags | (dm->flags & TYPEDEF)); +- return; +- } +- +- while (args[optind]) { +- if (clean_arg() && IS_A_NUMBER(args[optind])) { +- if (aflag) +- count = stol(args[optind], +- FAULT_ON_ERROR, NULL); +- else { +- if (!IS_KVADDR(addr = htol(args[optind], +- FAULT_ON_ERROR, NULL))) +- error(FATAL, +- "invalid kernel virtual address: %s\n", +- args[optind]); +- aflag++; +- } +- } +- else if ((sp = symbol_search(args[optind]))) { +- addr = sp->value; +- aflag++; +- } else { +- fprintf(fp, "symbol not found: %s\n", args[optind]); +- fprintf(fp, "possible aternatives:\n"); +- if (!symbol_query(args[optind], " ", NULL)) +- fprintf(fp, " (none found)\n"); +- return; +- } +- optind++; +- } +- +- if (!(flags & (UNION_REQUEST|STRUCT_REQUEST))) +- error(FATAL, "invalid argument!"); +- +- if (!aflag) +- error(FATAL, "no kernel virtual address argument entered\n"); +- +- if (count < 0) { +- addr -= len * abs(count); +- addr += len; +- } +- +- for (c = 0; c < abs(count); c++, addr += len) { +- if (rawdata) +- raw_data_dump(addr, len, flags & STRUCT_VERBOSE); +- else { +- if (dm->member) +- open_tmpfile(); +- +- if (flags & UNION_REQUEST) +- print_union(dm->name, addr); +- else if (flags & STRUCT_REQUEST) +- print_struct(dm->name, addr); +- +- if (dm->member) { +- parse_for_member(dm, PARSE_FOR_DATA); +- close_tmpfile(); +- } +- } +- } +-} +- +-/* +- * This command displays either a union definition, or a formatted display +- * of the contents of a union at a specified address. If no address is +- * specified, the union size and the file in which the union is defined +- * are also displayed. A union member may be appended to the union +- * name (in a "union.member" format) in order to limit the scope of the data +- * displayed to that particular member. Structure data is shown in hexadecimal +- * format. The raw data in a union may be dumped with the -r flag. +- */ +-void +-cmd_union(void) +-{ +- int c; +- ulong addr, aflag; +- struct syment *sp; +- int rawdata; +- long len; +- ulong flags; +- int count; +- struct datatype_member union_member, *um; +- ulong list_head_offset; +- +- um = &union_member; +- count = 1; +- rawdata = 0; +- aflag = 0; +- list_head_offset = 0; +- flags = UNION_REQUEST; +- +- while ((c = getopt(argcnt, args, "c:rvol:")) != EOF) { +- switch(c) +- { +- case 'c': +- count = atoi(optarg); +- break; +- +- case 'r': +- rawdata = 1; +- break; +- +- case 'v': +- flags |= STRUCT_VERBOSE; +- break; +- +- case 'o': +- flags |= SHOW_OFFSET; +- break; +- +- case 'l': +- if (IS_A_NUMBER(optarg)) +- list_head_offset = stol(optarg, +- FAULT_ON_ERROR, NULL); +- else if (arg_to_datatype(optarg, +- um, RETURN_ON_ERROR) > 1) +- list_head_offset = um->member_offset; +- break; +- +- default: +- argerrs++; +- break; +- } +- } +- +- if (argerrs || !args[optind]) +- cmd_usage(pc->curcmd, SYNOPSIS); +- +- if ((arg_to_datatype(args[optind++], um, FAULT_ON_ERROR) > 1) && +- rawdata) +- error(FATAL, "member-specific output not allowed with -r\n"); +- +- if ((len = um->size) < 0) { +- error(INFO, "union not found: %s\n", um->name); +- cmd_usage(pc->curcmd, SYNOPSIS); +- } +- +- if (!args[optind]) { +- do_datatype_declaration(um, flags | (um->flags & TYPEDEF)); +- return; +- } +- +- while (args[optind]) { +- if (clean_arg() && IS_A_NUMBER(args[optind])) { +- if (aflag) +- count = stol(args[optind], +- FAULT_ON_ERROR, NULL); +- else { +- if (!IS_KVADDR(addr = htol(args[optind], +- FAULT_ON_ERROR, NULL))) +- error(FATAL, +- "invalid kernel virtual address: %s\n", +- args[optind]); +- aflag++; +- } +- } +- else if ((sp = symbol_search(args[optind]))) { +- addr = sp->value; +- aflag++; +- } else { +- fprintf(fp, "symbol not found: %s\n", args[optind]); +- fprintf(fp, "possible aternatives:\n"); +- if (!symbol_query(args[optind], " ", NULL)) +- fprintf(fp, " (none found)\n"); +- return; +- } +- optind++; +- } +- +- if (!aflag) +- error(FATAL, "no kernel virtual address argument entered\n"); +- +- if (list_head_offset) +- addr -= list_head_offset; +- +- if (count < 0) { +- addr -= len * abs(count); +- addr += len; +- } +- +- for (c = 0; c < abs(count); c++, addr += len) { +- if (rawdata) +- raw_data_dump(addr, len, flags & STRUCT_VERBOSE); +- else { +- if (um->member) +- open_tmpfile(); +- +- print_union(um->name, addr); +- +- if (um->member) { +- parse_for_member(um, PARSE_FOR_DATA); +- close_tmpfile(); +- } +- } +- } +-} +- +-/* +- * Generic function for dumping data structure declarations, with a small +- * fixup for typedefs, sizes and member offsets. ++ * Generic function for dumping data structure declarations, with a small ++ * fixup for typedefs, sizes and member offsets. + */ + static void + do_datatype_declaration(struct datatype_member *dm, ulong flags) +@@ -4405,7 +4608,10 @@ + + if (!(p1 = strstr(s, "."))) + both = FALSE; +- else { ++ else if (flags & DATATYPE_QUERY) { ++ *p1 = NULLCHAR; ++ both = FALSE; ++ } else { + if ((p1 == s) || !strlen(p1+1)) + goto datatype_member_fatal; + *p1 = NULLCHAR; +@@ -4634,6 +4840,27 @@ + } + + /* ++ * Given the name of an enum, return its value. ++ */ ++int ++enumerator_value(char *e, long *value) ++{ ++ struct datatype_member datatype_member, *dm; ++ ++ dm = &datatype_member; ++ ++ if (arg_to_datatype(e, dm, RETURN_ON_ERROR)) { ++ if ((dm->size >= 0) && ++ (dm->type == ENUM) && dm->tagname) { ++ *value = dm->value; ++ return TRUE; ++ } ++ } ++ ++ return FALSE; ++} ++ ++/* + * Verify that a datatype exists, but return on error. + */ + int +@@ -4705,6 +4932,8 @@ + cmd_usage(pc->curcmd, SYNOPSIS); + + if ((sp = symbol_search(args[optind])) && !args[optind+1]) { ++ if (STRNEQ(sp->name, "per_cpu__") && display_per_cpu_info(sp)) ++ return; + sprintf(buf2, "%s = ", args[optind]); + leader = strlen(buf2); + if (module_symbol(sp->value, NULL, NULL, NULL, output_radix)) +@@ -4758,6 +4987,39 @@ + } + + /* ++ * Display the datatype of the per_cpu__xxx symbol and ++ * the addresses of each its per-cpu instances. ++ */ ++static int ++display_per_cpu_info(struct syment *sp) ++{ ++ int c; ++ ulong addr; ++ char buf[BUFSIZE]; ++ ++ if (((kt->flags & (SMP|PER_CPU_OFF)) != (SMP|PER_CPU_OFF)) || ++ (sp->value < symbol_value("__per_cpu_start")) || ++ (sp->value >= symbol_value("__per_cpu_end")) || ++ !((sp->type == 'd') || (sp->type == 'D'))) ++ return FALSE; ++ ++ fprintf(fp, "PER-CPU DATA TYPE:\n "); ++ sprintf(buf, "whatis %s", sp->name); ++ if (!gdb_pass_through(buf, pc->nullfp, GNU_RETURN_ON_ERROR)) ++ fprintf(fp, "[undetermined type] %s;\n", sp->name); ++ else ++ whatis_variable(sp); ++ ++ fprintf(fp, "PER-CPU ADDRESSES:\n"); ++ for (c = 0; c < kt->cpus; c++) { ++ addr = sp->value + kt->__per_cpu_offset[c]; ++ fprintf(fp, " [%d]: %lx\n", c, addr); ++ } ++ ++ return TRUE; ++} ++ ++/* + * As a latch ditch effort before a command is thrown away by exec_command(), + * args[0] is checked to see whether it's the name of a variable, structure, + * union, or typedef. If so, args[0] is changed to the appropriate command, +@@ -4793,9 +5055,9 @@ + command = "whatis"; + else if (!datatype_exists(args[0])) + return FALSE; +- else if (!arg_to_datatype(buf, dm, RETURN_ON_ERROR)) { ++ else if (!arg_to_datatype(buf, dm, RETURN_ON_ERROR|DATATYPE_QUERY)) + return FALSE; +- } else { ++ else { + if (is_gdb_command(FALSE, RETURN_ON_ERROR)) { + pc->curcmd = pc->program_name; + error(FATAL, +@@ -5056,6 +5318,8 @@ + fprintf(ofp, "%sSTRUCT_VERBOSE", others++ ? "|" : ""); + if (flags & SHOW_OFFSET) + fprintf(ofp, "%sSHOW_OFFSET", others++ ? "|" : ""); ++ if (flags & DATATYPE_QUERY) ++ fprintf(ofp, "%sDATATYPE_QUERY", others++ ? "|" : ""); + fprintf(ofp, ")\n"); + } + +@@ -5079,7 +5343,8 @@ + + s = dm->member; + indent = 0; +- on = array = FALSE; ++ array = FALSE; ++ on = 0; + rewind(pc->tmpfile); + + switch (flag) +@@ -5090,7 +5355,7 @@ + next_item: + while (fgets(buf, BUFSIZE, pc->tmpfile)) { + if (STRNEQ(buf, lookfor1) || STRNEQ(buf, lookfor2)) { +- on = TRUE; ++ on++; + if (strstr(buf, "= {")) + indent = count_leading_spaces(buf); + if (strstr(buf, "[")) +@@ -5098,16 +5363,22 @@ + } + + if (on) { ++ if ((indent && (on > 1) && (count_leading_spaces(buf) == indent) && ++ !strstr(buf, "}")) || (buf[0] == '}')) { ++ break; ++ } + fprintf(pc->saved_fp, buf); + if (!indent) + break; + if (strstr(buf, "}") && + (count_leading_spaces(buf) == indent)) + break; ++ on++; + } + } + if (array) { + on = array = FALSE; ++ on = 0; + goto next_item; + } + break; +@@ -5174,7 +5445,7 @@ + { + int i, c, len; + long offset; +- char *target; ++ char *t1, *target; + char *arglist[MAXARGS]; + char buf1[BUFSIZE]; + char fmt[BUFSIZE]; +@@ -5186,6 +5457,9 @@ + return FALSE; + } + ++ if (STRNEQ(inbuf, " ")) ++ goto do_empty_offset; ++ + if (STRNEQ(inbuf, " union {")) + dm->flags |= IN_UNION; + if (STRNEQ(inbuf, " struct {")) +@@ -5215,9 +5489,20 @@ + } + } + } else if (c) { +- target = arglist[c-1]; +- if (!strstr(target, ";")) +- target = NULL; ++ for (i = 0; i < c; i++) { ++ if (STRNEQ(arglist[i], "(*")) { ++ target = arglist[i]+2; ++ if (!(t1 = strstr(target, ")"))) ++ continue; ++ *t1 = NULLCHAR; ++ break; ++ } ++ } ++ if (i == c) { ++ target = arglist[c-1]; ++ if (!strstr(target, ";")) ++ target = NULL; ++ } + } + + if (!target) +@@ -5307,7 +5592,8 @@ + if ((retval = builtin_array_length(s, 0, two_dim))) + return retval; + +- if (symbol_search(s)) { ++ /* symbol_search cannot be done with just kernel type information */ ++ if (!(LKCD_KERNTYPES()) && symbol_search(s)) { + if (!two_dim) { + req = &gnu_request; + if ((get_symbol_type(copy, NULL, req) == +@@ -5417,6 +5703,23 @@ + } + + /* ++ * Get and store the size of a "known" array. ++ * A wrapper for get_array_length(), for cases in which ++ * the name of the result to be stored is different from the ++ * structure.member to be evaluated. ++ */ ++int ++get_array_length_alt(char *name, char *s, int *two_dim, long entry_size) ++{ ++ int retval; ++ ++ retval = get_array_length(s, two_dim, entry_size); ++ if (retval) ++ retval = builtin_array_length(name, retval, two_dim); ++ return retval; ++} ++ ++/* + * Designed for use by non-debug kernels, but used by all. + */ + int +@@ -5433,6 +5736,8 @@ + lenptr = &array_table.kmem_cache_s_c_name; + else if (STREQ(s, "kmem_cache_s.array")) + lenptr = &array_table.kmem_cache_s_array; ++ else if (STREQ(s, "kmem_cache.array")) ++ lenptr = &array_table.kmem_cache_s_array; + else if (STREQ(s, "kmem_cache_s.cpudata")) + lenptr = &array_table.kmem_cache_s_cpudata; + else if (STREQ(s, "log_buf")) +@@ -5469,11 +5774,16 @@ + lenptr = &array_table.prio_array_queue; + else if (STREQ(s, "height_to_maxindex")) + lenptr = &array_table.height_to_maxindex; ++ else if (STREQ(s, "pid_hash")) ++ lenptr = &array_table.pid_hash; + else if (STREQ(s, "free_area")) { + lenptr = &array_table.free_area; + if (two_dim) + dimptr = &array_table.free_area_DIMENSION; +- } ++ } else if (STREQ(s, "kmem_cache.node")) ++ lenptr = &array_table.kmem_cache_node; ++ else if (STREQ(s, "kmem_cache.cpu_slab")) ++ lenptr = &array_table.kmem_cache_cpu_slab; + + if (!lenptr) /* not stored */ + return(len); +@@ -5608,6 +5918,10 @@ + OFFSET(task_struct_timestamp)); + fprintf(fp, " task_struct_thread_info: %ld\n", + OFFSET(task_struct_thread_info)); ++ fprintf(fp, " task_struct_nsproxy: %ld\n", ++ OFFSET(task_struct_nsproxy)); ++ fprintf(fp, " task_struct_rlim: %ld\n", ++ OFFSET(task_struct_rlim)); + + fprintf(fp, " thread_info_task: %ld\n", + OFFSET(thread_info_task)); +@@ -5618,11 +5932,31 @@ + fprintf(fp, " thread_info_previous_esp: %ld\n", + OFFSET(thread_info_previous_esp)); + ++ fprintf(fp, " nsproxy_mnt_ns: %ld\n", ++ OFFSET(nsproxy_mnt_ns)); ++ fprintf(fp, " mnt_namespace_root: %ld\n", ++ OFFSET(mnt_namespace_root)); ++ fprintf(fp, " mnt_namespace_list: %ld\n", ++ OFFSET(mnt_namespace_list)); ++ + fprintf(fp, " pid_link_pid: %ld\n", + OFFSET(pid_link_pid)); + fprintf(fp, " pid_hash_chain: %ld\n", + OFFSET(pid_hash_chain)); + ++ fprintf(fp, " pid_numbers: %ld\n", ++ OFFSET(pid_numbers)); ++ ++ fprintf(fp, " upid_nr: %ld\n", ++ OFFSET(upid_nr)); ++ fprintf(fp, " upid_ns: %ld\n", ++ OFFSET(upid_ns)); ++ fprintf(fp, " upid_pid_chain: %ld\n", ++ OFFSET(upid_pid_chain)); ++ ++ fprintf(fp, " pid_tasks: %ld\n", ++ OFFSET(pid_tasks)); ++ + fprintf(fp, " hlist_node_next: %ld\n", + OFFSET(hlist_node_next)); + fprintf(fp, " hlist_node_pprev: %ld\n", +@@ -5647,6 +5981,11 @@ + OFFSET(signal_struct_count)); + fprintf(fp, " signal_struct_action: %ld\n", + OFFSET(signal_struct_action)); ++ fprintf(fp, " signal_struct_shared_pending: %ld\n", ++ OFFSET(signal_struct_shared_pending)); ++ fprintf(fp, " signal_struct_rlim: %ld\n", ++ OFFSET(signal_struct_rlim)); ++ + fprintf(fp, " task_struct_start_time: %ld\n", + OFFSET(task_struct_start_time)); + fprintf(fp, " task_struct_times: %ld\n", +@@ -5766,10 +6105,22 @@ + OFFSET(mm_struct_pgd)); + fprintf(fp, " mm_struct_rss: %ld\n", + OFFSET(mm_struct_rss)); ++ fprintf(fp, " mm_struct_anon_rss: %ld\n", ++ OFFSET(mm_struct_anon_rss)); ++ fprintf(fp, " mm_struct_file_rss: %ld\n", ++ OFFSET(mm_struct_file_rss)); + fprintf(fp, " mm_struct_total_vm: %ld\n", + OFFSET(mm_struct_total_vm)); + fprintf(fp, " mm_struct_start_code: %ld\n", + OFFSET(mm_struct_start_code)); ++ fprintf(fp, " mm_struct_arg_start: %ld\n", ++ OFFSET(mm_struct_arg_start)); ++ fprintf(fp, " mm_struct_arg_end: %ld\n", ++ OFFSET(mm_struct_arg_end)); ++ fprintf(fp, " mm_struct_env_start: %ld\n", ++ OFFSET(mm_struct_env_start)); ++ fprintf(fp, " mm_struct_env_end: %ld\n", ++ OFFSET(mm_struct_env_end)); + + fprintf(fp, " vm_area_struct_vm_mm: %ld\n", + OFFSET(vm_area_struct_vm_mm)); +@@ -5885,6 +6236,15 @@ + fprintf(fp, " page_pte: %ld\n", + OFFSET(page_pte)); + ++ fprintf(fp, " page_inuse: %ld\n", ++ OFFSET(page_inuse)); ++ fprintf(fp, " page_slab: %ld\n", ++ OFFSET(page_slab)); ++ fprintf(fp, " page_first_page: %ld\n", ++ OFFSET(page_first_page)); ++ fprintf(fp, " page_freelist: %ld\n", ++ OFFSET(page_freelist)); ++ + fprintf(fp, " swap_info_struct_swap_file: %ld\n", + OFFSET(swap_info_struct_swap_file)); + fprintf(fp, " swap_info_struct_swap_vfsmnt: %ld\n", +@@ -5922,6 +6282,8 @@ + OFFSET(irq_desc_t_status)); + fprintf(fp, " irq_desc_t_handler: %ld\n", + OFFSET(irq_desc_t_handler)); ++ fprintf(fp, " irq_desc_t_chip: %ld\n", ++ OFFSET(irq_desc_t_chip)); + fprintf(fp, " irq_desc_t_action: %ld\n", + OFFSET(irq_desc_t_action)); + fprintf(fp, " irq_desc_t_depth: %ld\n", +@@ -5967,11 +6329,52 @@ + fprintf(fp, "hw_interrupt_type_set_affinity: %ld\n", + OFFSET(hw_interrupt_type_set_affinity)); + ++ fprintf(fp, " irq_chip_typename: %ld\n", ++ OFFSET(irq_chip_typename)); ++ fprintf(fp, " irq_chip_startup: %ld\n", ++ OFFSET(irq_chip_startup)); ++ fprintf(fp, " irq_chip_shutdown: %ld\n", ++ OFFSET(irq_chip_shutdown)); ++ fprintf(fp, " irq_chip_enable: %ld\n", ++ OFFSET(irq_chip_enable)); ++ fprintf(fp, " irq_chip_disable: %ld\n", ++ OFFSET(irq_chip_disable)); ++ fprintf(fp, " irq_chip_ack: %ld\n", ++ OFFSET(irq_chip_ack)); ++ fprintf(fp, " irq_chip_mask: %ld\n", ++ OFFSET(irq_chip_mask)); ++ fprintf(fp, " irq_chip_mask_ack: %ld\n", ++ OFFSET(irq_chip_mask_ack)); ++ fprintf(fp, " irq_chip_unmask: %ld\n", ++ OFFSET(irq_chip_unmask)); ++ fprintf(fp, " irq_chip_eoi: %ld\n", ++ OFFSET(irq_chip_eoi)); ++ fprintf(fp, " irq_chip_end: %ld\n", ++ OFFSET(irq_chip_end)); ++ fprintf(fp, " irq_chip_set_affinity: %ld\n", ++ OFFSET(irq_chip_set_affinity)); ++ fprintf(fp, " irq_chip_retrigger: %ld\n", ++ OFFSET(irq_chip_retrigger)); ++ fprintf(fp, " irq_chip_set_type: %ld\n", ++ OFFSET(irq_chip_set_type)); ++ fprintf(fp, " irq_chip_set_wake: %ld\n", ++ OFFSET(irq_chip_set_wake)); ++ + fprintf(fp, "irq_cpustat_t___softirq_active: %ld\n", + OFFSET(irq_cpustat_t___softirq_active)); + fprintf(fp, " irq_cpustat_t___softirq_mask: %ld\n", + OFFSET(irq_cpustat_t___softirq_mask)); + ++ fprintf(fp, " files_struct_fdt: %ld\n", ++ OFFSET(files_struct_fdt)); ++ fprintf(fp, " fdtable_max_fds: %ld\n", ++ OFFSET(fdtable_max_fds)); ++ fprintf(fp, " fdtable_max_fdset: %ld\n", ++ OFFSET(fdtable_max_fdset)); ++ fprintf(fp, " fdtable_open_fds: %ld\n", ++ OFFSET(fdtable_open_fds)); ++ fprintf(fp, " fdtable_fd: %ld\n", ++ OFFSET(fdtable_fd)); + fprintf(fp, " files_struct_max_fds: %ld\n", + OFFSET(files_struct_max_fds)); + fprintf(fp, " files_struct_max_fdset: %ld\n", +@@ -5988,6 +6391,12 @@ + OFFSET(file_f_vfsmnt)); + fprintf(fp, " file_f_count: %ld\n", + OFFSET(file_f_count)); ++ fprintf(fp, " file_f_path: %ld\n", ++ OFFSET(file_f_path)); ++ fprintf(fp, " path_mnt: %ld\n", ++ OFFSET(path_mnt)); ++ fprintf(fp, " path_dentry: %ld\n", ++ OFFSET(path_dentry)); + fprintf(fp, " fs_struct_root: %ld\n", + OFFSET(fs_struct_root)); + fprintf(fp, " fs_struct_pwd: %ld\n", +@@ -6165,6 +6574,47 @@ + fprintf(fp, " slab_free: %ld\n", + OFFSET(slab_free)); + ++ fprintf(fp, " kmem_cache_size: %ld\n", ++ OFFSET(kmem_cache_size)); ++ fprintf(fp, " kmem_cache_objsize: %ld\n", ++ OFFSET(kmem_cache_objsize)); ++ fprintf(fp, " kmem_cache_offset: %ld\n", ++ OFFSET(kmem_cache_offset)); ++ fprintf(fp, " kmem_cache_order: %ld\n", ++ OFFSET(kmem_cache_order)); ++ fprintf(fp, " kmem_cache_local_node: %ld\n", ++ OFFSET(kmem_cache_local_node)); ++ fprintf(fp, " kmem_cache_objects: %ld\n", ++ OFFSET(kmem_cache_objects)); ++ fprintf(fp, " kmem_cache_inuse: %ld\n", ++ OFFSET(kmem_cache_inuse)); ++ fprintf(fp, " kmem_cache_align: %ld\n", ++ OFFSET(kmem_cache_align)); ++ fprintf(fp, " kmem_cache_name: %ld\n", ++ OFFSET(kmem_cache_name)); ++ fprintf(fp, " kmem_cache_list: %ld\n", ++ OFFSET(kmem_cache_list)); ++ fprintf(fp, " kmem_cache_node: %ld\n", ++ OFFSET(kmem_cache_node)); ++ fprintf(fp, " kmem_cache_cpu_slab: %ld\n", ++ OFFSET(kmem_cache_cpu_slab)); ++ ++ fprintf(fp, " kmem_cache_node_nr_partial: %ld\n", ++ OFFSET(kmem_cache_node_nr_partial)); ++ fprintf(fp, " kmem_cache_node_nr_slabs: %ld\n", ++ OFFSET(kmem_cache_node_nr_slabs)); ++ fprintf(fp, " kmem_cache_node_partial: %ld\n", ++ OFFSET(kmem_cache_node_partial)); ++ fprintf(fp, " kmem_cache_node_full: %ld\n", ++ OFFSET(kmem_cache_node_full)); ++ ++ fprintf(fp, " kmem_cache_cpu_freelist: %ld\n", ++ OFFSET(kmem_cache_cpu_freelist)); ++ fprintf(fp, " kmem_cache_cpu_page: %ld\n", ++ OFFSET(kmem_cache_cpu_page)); ++ fprintf(fp, " kmem_cache_cpu_node: %ld\n", ++ OFFSET(kmem_cache_cpu_node)); ++ + fprintf(fp, " net_device_next: %ld\n", + OFFSET(net_device_next)); + fprintf(fp, " net_device_name: %ld\n", +@@ -6217,6 +6667,11 @@ + fprintf(fp, " inet_opt_num: %ld\n", + OFFSET(inet_opt_num)); + ++ fprintf(fp, " ipv6_pinfo_rcv_saddr: %ld\n", ++ OFFSET(ipv6_pinfo_rcv_saddr)); ++ fprintf(fp, " ipv6_pinfo_daddr: %ld\n", ++ OFFSET(ipv6_pinfo_daddr)); ++ + fprintf(fp, " timer_list_list: %ld\n", + OFFSET(timer_list_list)); + fprintf(fp, " timer_list_next: %ld\n", +@@ -6291,6 +6746,8 @@ + OFFSET(zone_struct_size)); + fprintf(fp, " zone_struct_memsize: %ld\n", + OFFSET(zone_struct_memsize)); ++ fprintf(fp, " zone_struct_zone_start_pfn: %ld\n", ++ OFFSET(zone_struct_zone_start_pfn)); + fprintf(fp, " zone_struct_zone_start_paddr: %ld\n", + OFFSET(zone_struct_zone_start_paddr)); + fprintf(fp, " zone_struct_zone_start_mapnr: %ld\n", +@@ -6324,6 +6781,8 @@ + OFFSET(zone_name)); + fprintf(fp, " zone_spanned_pages: %ld\n", + OFFSET(zone_spanned_pages)); ++ fprintf(fp, " zone_present_pages: %ld\n", ++ OFFSET(zone_present_pages)); + fprintf(fp, " zone_zone_start_pfn: %ld\n", + OFFSET(zone_zone_start_pfn)); + fprintf(fp, " zone_pages_min: %ld\n", +@@ -6332,6 +6791,18 @@ + OFFSET(zone_pages_low)); + fprintf(fp, " zone_pages_high: %ld\n", + OFFSET(zone_pages_high)); ++ fprintf(fp, " zone_vm_stat: %ld\n", ++ OFFSET(zone_vm_stat)); ++ fprintf(fp, " zone_nr_active: %ld\n", ++ OFFSET(zone_nr_active)); ++ fprintf(fp, " zone_nr_inactive: %ld\n", ++ OFFSET(zone_nr_inactive)); ++ fprintf(fp, " zone_all_unreclaimable: %ld\n", ++ OFFSET(zone_all_unreclaimable)); ++ fprintf(fp, " zone_flags: %ld\n", ++ OFFSET(zone_flags)); ++ fprintf(fp, " zone_pages_scanned: %ld\n", ++ OFFSET(zone_pages_scanned)); + + fprintf(fp, " neighbour_next: %ld\n", + OFFSET(neighbour_next)); +@@ -6471,10 +6942,61 @@ + OFFSET(x8664_pda_irqstackptr)); + fprintf(fp, " x8664_pda_level4_pgt: %ld\n", + OFFSET(x8664_pda_level4_pgt)); ++ fprintf(fp, " x8664_pda_me: %ld\n", ++ OFFSET(x8664_pda_me)); + + fprintf(fp, " tss_struct_ist: %ld\n", + OFFSET(tss_struct_ist)); ++ fprintf(fp, " mem_section_section_mem_map: %ld\n", ++ OFFSET(mem_section_section_mem_map)); + ++ fprintf(fp, " vcpu_guest_context_user_regs: %ld\n", ++ OFFSET(vcpu_guest_context_user_regs)); ++ fprintf(fp, " cpu_user_regs_eip: %ld\n", ++ OFFSET(cpu_user_regs_eip)); ++ fprintf(fp, " cpu_user_regs_esp: %ld\n", ++ OFFSET(cpu_user_regs_esp)); ++ fprintf(fp, " cpu_user_regs_rip: %ld\n", ++ OFFSET(cpu_user_regs_rip)); ++ fprintf(fp, " cpu_user_regs_rsp: %ld\n", ++ OFFSET(cpu_user_regs_rsp)); ++ fprintf(fp, " unwind_table_core: %ld\n", ++ OFFSET(unwind_table_core)); ++ fprintf(fp, " unwind_table_init: %ld\n", ++ OFFSET(unwind_table_init)); ++ fprintf(fp, " unwind_table_address: %ld\n", ++ OFFSET(unwind_table_address)); ++ fprintf(fp, " unwind_table_size: %ld\n", ++ OFFSET(unwind_table_size)); ++ fprintf(fp, " unwind_table_link: %ld\n", ++ OFFSET(unwind_table_link)); ++ fprintf(fp, " unwind_table_name: %ld\n", ++ OFFSET(unwind_table_name)); ++ ++ fprintf(fp, " rq_cfs: %ld\n", ++ OFFSET(rq_cfs)); ++ fprintf(fp, " rq_rt: %ld\n", ++ OFFSET(rq_rt)); ++ fprintf(fp, " rq_nr_running: %ld\n", ++ OFFSET(rq_nr_running)); ++ fprintf(fp, " task_struct_se: %ld\n", ++ OFFSET(task_struct_se)); ++ fprintf(fp, " sched_entity_run_node: %ld\n", ++ OFFSET(sched_entity_run_node)); ++ fprintf(fp, " cfs_rq_nr_running: %ld\n", ++ OFFSET(cfs_rq_nr_running)); ++ fprintf(fp, " cfs_rq_rb_leftmost: %ld\n", ++ OFFSET(cfs_rq_rb_leftmost)); ++ fprintf(fp, " cfs_rq_tasks_timeline: %ld\n", ++ OFFSET(cfs_rq_tasks_timeline)); ++ fprintf(fp, " rt_rq_active: %ld\n", ++ OFFSET(rt_rq_active)); ++ fprintf(fp, " pcpu_info_vcpu: %ld\n", ++ OFFSET(pcpu_info_vcpu)); ++ fprintf(fp, " pcpu_info_idle: %ld\n", ++ OFFSET(pcpu_info_idle)); ++ fprintf(fp, " vcpu_struct_rq: %ld\n", ++ OFFSET(vcpu_struct_rq)); + + fprintf(fp, "\n size_table:\n"); + fprintf(fp, " page: %ld\n", SIZE(page)); +@@ -6493,6 +7015,10 @@ + fprintf(fp, " array_cache: %ld\n", SIZE(array_cache)); + fprintf(fp, " kmem_bufctl_t: %ld\n", + SIZE(kmem_bufctl_t)); ++ fprintf(fp, " kmem_cache: %ld\n", SIZE(kmem_cache)); ++ fprintf(fp, " kmem_cache_node: %ld\n", SIZE(kmem_cache_node)); ++ fprintf(fp, " kmem_cache_cpu: %ld\n", SIZE(kmem_cache_cpu)); ++ + fprintf(fp, " swap_info_struct: %ld\n", + SIZE(swap_info_struct)); + fprintf(fp, " vm_area_struct: %ld\n", +@@ -6512,6 +7038,7 @@ + fprintf(fp, " fs_struct: %ld\n", SIZE(fs_struct)); + fprintf(fp, " files_struct: %ld\n", + SIZE(files_struct)); ++ fprintf(fp, " fdtable: %ld\n", SIZE(fdtable)); + fprintf(fp, " file: %ld\n", SIZE(file)); + fprintf(fp, " inode: %ld\n", SIZE(inode)); + fprintf(fp, " vfsmount: %ld\n", SIZE(vfsmount)); +@@ -6546,8 +7073,11 @@ + fprintf(fp, " sock: %ld\n", SIZE(sock)); + fprintf(fp, " inet_sock: %ld\n", SIZE(inet_sock)); + fprintf(fp, " socket: %ld\n", SIZE(socket)); ++ fprintf(fp, " in6_addr: %ld\n", SIZE(in6_addr)); + fprintf(fp, " signal_struct: %ld\n", + SIZE(signal_struct)); ++ fprintf(fp, " sigpending_signal: %ld\n", ++ SIZE(sigpending_signal)); + fprintf(fp, " signal_queue: %ld\n", + SIZE(signal_queue)); + fprintf(fp, " sigqueue: %ld\n", SIZE(sigqueue)); +@@ -6601,6 +7131,8 @@ + + fprintf(fp, " x8664_pda: %ld\n", + SIZE(x8664_pda)); ++ fprintf(fp, " ppc64_paca: %ld\n", ++ SIZE(ppc64_paca)); + fprintf(fp, " gate_struct: %ld\n", + SIZE(gate_struct)); + fprintf(fp, " tss_struct: %ld\n", +@@ -6609,7 +7141,22 @@ + SIZE(task_struct_start_time)); + fprintf(fp, " cputime_t: %ld\n", + SIZE(cputime_t)); +- ++ fprintf(fp, " mem_section: %ld\n", ++ SIZE(mem_section)); ++ fprintf(fp, " pid_link: %ld\n", ++ SIZE(pid_link)); ++ fprintf(fp, " upid: %ld\n", ++ SIZE(upid)); ++ fprintf(fp, " unwind_table: %ld\n", ++ SIZE(unwind_table)); ++ fprintf(fp, " rlimit: %ld\n", ++ SIZE(rlimit)); ++ fprintf(fp, " cfs_rq: %ld\n", ++ SIZE(cfs_rq)); ++ fprintf(fp, " pcpu_info: %ld\n", ++ SIZE(pcpu_info)); ++ fprintf(fp, " vcpu_struct: %ld\n", ++ SIZE(vcpu_struct)); + + fprintf(fp, "\n array_table:\n"); + /* +@@ -6663,6 +7210,12 @@ + get_array_length("prio_array.queue", NULL, SIZE(list_head))); + fprintf(fp, " height_to_maxindex: %d\n", + ARRAY_LENGTH(height_to_maxindex)); ++ fprintf(fp, " pid_hash: %d\n", ++ ARRAY_LENGTH(pid_hash)); ++ fprintf(fp, " kmem_cache_node: %d\n", ++ ARRAY_LENGTH(kmem_cache_node)); ++ fprintf(fp, " kmem_cache_cpu_slab: %d\n", ++ ARRAY_LENGTH(kmem_cache_cpu_slab)); + + if (spec) { + int in_size_table, in_array_table, arrays, offsets, sizes; +@@ -6890,6 +7443,10 @@ + SEC_HAS_CONTENTS)) + st->flags |= NO_SEC_CONTENTS; + } ++ if (STREQ(bfd_get_section_name(bfd, section), ".eh_frame")) { ++ st->dwarf_eh_frame_file_offset = (off_t)section->filepos; ++ st->dwarf_eh_frame_size = (ulong)bfd_section_size(bfd, section); ++ } + break; + + case (uint)MODULE_SECTIONS: +@@ -6906,6 +7463,10 @@ + SEC_HAS_CONTENTS)) + st->flags |= NO_SEC_CONTENTS; + } ++ if (STREQ(bfd_get_section_name(bfd, section), ".eh_frame")) { ++ st->dwarf_eh_frame_file_offset = (off_t)section->filepos; ++ st->dwarf_eh_frame_size = (ulong)bfd_section_size(bfd, section); ++ } + break; + + default: +@@ -6960,8 +7521,9 @@ + i = lm->mod_sections; + lm->mod_section_data[i].section = section; + lm->mod_section_data[i].priority = prio; +- lm->mod_section_data[i].flags = section->flags; ++ lm->mod_section_data[i].flags = section->flags & ~SEC_FOUND; + lm->mod_section_data[i].size = bfd_section_size(bfd, section); ++ lm->mod_section_data[i].offset = 0; + if (strlen(name) < MAX_MOD_SEC_NAME) + strcpy(lm->mod_section_data[i].name, name); + else +@@ -7013,7 +7575,7 @@ + */ + + static void +-calculate_load_order(struct load_module *lm, bfd *bfd) ++calculate_load_order_v1(struct load_module *lm, bfd *bfd) + { + int i; + asection *section; +@@ -7073,6 +7635,134 @@ + } + + /* ++ * Later versions of kmod no longer get the help from insmod, ++ * and while the heuristics might work, it's relatively ++ * straightforward to just try to match the sections in the object file ++ * with exported symbols. ++ * ++ * This works well if kallsyms is set, but may not work so well in other ++ * instances. ++ */ ++static void ++calculate_load_order_v2(struct load_module *lm, bfd *bfd, int dynamic, ++ void *minisyms, long symcount, unsigned int size) ++{ ++ struct syment *s1, *s2; ++ ulong sec_start, sec_end; ++ bfd_byte *from, *fromend; ++ asymbol *store; ++ asymbol *sym; ++ symbol_info syminfo; ++ char *secname; ++ int i; ++ ++ if ((store = bfd_make_empty_symbol(bfd)) == NULL) ++ error(FATAL, "bfd_make_empty_symbol() failed\n"); ++ ++ s1 = lm->mod_symtable; ++ s2 = lm->mod_symend; ++ while (s1 < s2) { ++ ulong sym_offset = s1->value - lm->mod_base; ++ if (MODULE_PSEUDO_SYMBOL(s1)) { ++ s1++; ++ continue; ++ } ++ ++ /* Skip over symbols whose sections have been identified. */ ++ for (i = 0; i < lm->mod_sections; i++) { ++ if ((lm->mod_section_data[i].flags & SEC_FOUND) == 0) ++ continue; ++ if (sym_offset >= lm->mod_section_data[i].offset ++ && sym_offset < lm->mod_section_data[i].offset ++ + lm->mod_section_data[i].size) { ++ break; ++ } ++ } ++ ++ /* Matched one of the sections. Skip symbol. */ ++ if (i < lm->mod_sections) { ++ if (CRASHDEBUG(2)) { ++ fprintf(fp, "skip %lx %s %s\n", s1->value, s1->name, ++ lm->mod_section_data[i].name); ++ } ++ s1++; ++ continue; ++ } ++ ++ /* Find the symbol in the object file. */ ++ from = (bfd_byte *) minisyms; ++ fromend = from + symcount * size; ++ secname = NULL; ++ for (; from < fromend; from += size) { ++ if ((sym = bfd_minisymbol_to_symbol(bfd, dynamic, from, ++ store)) == NULL) ++ error(FATAL, ++ "bfd_minisymbol_to_symbol() failed\n"); ++ ++ bfd_get_symbol_info(bfd, sym, &syminfo); ++ if (CRASHDEBUG(3)) { ++ fprintf(fp,"matching sym %s %lx against bfd %s %lx\n", ++ s1->name, (long) s1->value, syminfo.name, ++ (long) syminfo.value); ++ } ++ if (strcmp(syminfo.name, s1->name) == 0) { ++ secname = (char *)bfd_get_section_name(bfd, sym->section); ++ break; ++ } ++ ++ } ++ if (secname == NULL) { ++ if (CRASHDEBUG(1)) { ++ fprintf(fp, "symbol %s not found in module\n", s1->name); ++ } ++ s1++; ++ continue; ++ } ++ ++ /* Match the section it came in. */ ++ for (i = 0; i < lm->mod_sections; i++) { ++ if (STREQ(lm->mod_section_data[i].name, secname)) { ++ break; ++ } ++ } ++ ++ if (i == lm->mod_sections) { ++ fprintf(fp, "?? Section %s not found for symbol %s\n", ++ secname, s1->name); ++ s1++; ++ continue; ++ } ++ ++ /* Update the offset information for the section */ ++ sec_start = s1->value - syminfo.value; ++ sec_end = sec_start + lm->mod_section_data[i].size; ++ lm->mod_section_data[i].offset = sec_start - lm->mod_base; ++ lm->mod_section_data[i].flags |= SEC_FOUND; ++ ++ if (CRASHDEBUG(1)) { ++ fprintf(fp, "update sec offset sym %s @ %lx val %lx section %s\n", ++ s1->name, s1->value, syminfo.value, secname); ++ } ++ ++ if (strcmp(secname, ".text") == 0) ++ lm->mod_text_start = sec_start; ++ ++ if (strcmp(secname, ".bss") == 0) ++ lm->mod_bss_start = sec_start; ++ ++ if (strcmp(secname, ".data") == 0) ++ lm->mod_data_start = sec_start; ++ ++ if (strcmp(secname, ".data") == 0) ++ lm->mod_data_start = sec_start; ++ ++ if (strcmp(secname, ".rodata") == 0) ++ lm->mod_rodata_start = sec_start; ++ s1++; ++ } ++} ++ ++/* + * Later versons of insmod store basic address information of each + * module in a format that looks like the following example of the + * nfsd module: +@@ -7185,8 +7875,8 @@ + } + + if (CRASHDEBUG(1)) +- fprintf(fp, "load_module_symbols: %s %s %lx\n", +- modref, namelist, base_addr); ++ fprintf(fp, "load_module_symbols: %s %s %lx %lx\n", ++ modref, namelist, base_addr, kt->flags); + + switch (kt->flags & (KMOD_V1|KMOD_V2)) + { +@@ -7199,7 +7889,8 @@ + strcpy(lm->mod_namelist, namelist); + else + strncpy(lm->mod_namelist, namelist, MAX_MOD_NAMELIST-1); +- goto add_symbols; ++ if (st->flags & USE_OLD_ADD_SYM) ++ goto add_symbols; + } + + if ((mbfd = bfd_openr(namelist, NULL)) == NULL) +@@ -7219,6 +7910,10 @@ + else if (symcount == 0) + error(FATAL, "no symbols in object file: %s\n", namelist); + ++ if (CRASHDEBUG(1)) { ++ fprintf(fp, "%ld symbols found in obj file %s\n", symcount, ++ namelist); ++ } + sort_x = bfd_make_empty_symbol(mbfd); + sort_y = bfd_make_empty_symbol(mbfd); + if (sort_x == NULL || sort_y == NULL) +@@ -7251,17 +7946,33 @@ + add_symbol_file(struct load_module *lm) + { + struct gnu_request request, *req; +- char buf[BUFSIZE]; ++ char buf[BUFSIZE]; ++ int i, len; ++ char *secname; ++ ++ for (i = len = 0; i < lm->mod_sections; i++) ++ { ++ secname = lm->mod_section_data[i].name; ++ if ((lm->mod_section_data[i].flags & SEC_FOUND) && ++ !STREQ(secname, ".text")) { ++ sprintf(buf, " -s %s 0x%lx", secname, ++ lm->mod_section_data[i].offset + lm->mod_base); ++ len += strlen(buf); ++ } ++ } + + req = &request; + BZERO(req, sizeof(struct gnu_request)); + req->command = GNU_ADD_SYMBOL_FILE; + req->addr = (ulong)lm; +- req->buf = buf; ++ req->buf = GETBUF(len+BUFSIZE); + if (!CRASHDEBUG(1)) + req->fp = pc->nullfp; + +- gdb_interface(req); ++ st->flags |= ADD_SYMBOL_FILE; ++ gdb_interface(req); ++ st->flags &= ~ADD_SYMBOL_FILE; ++ FREEBUF(req->buf); + + sprintf(buf, "set complaints 0"); + gdb_pass_through(buf, NULL, 0); +@@ -7382,7 +8093,12 @@ + + bfd_map_over_sections(bfd, section_header_info, MODULE_SECTIONS); + +- calculate_load_order(lm, bfd); ++ if (kt->flags & KMOD_V1) ++ calculate_load_order_v1(lm, bfd); ++ else ++ calculate_load_order_v2(lm, bfd, dynamic, minisyms, ++ symcount, size); ++ + + from = (bfd_byte *) minisyms; + fromend = from + symcount * size; +@@ -7395,104 +8111,112 @@ + bfd_get_symbol_info(bfd, sym, &syminfo); + + secname = (char *)bfd_get_section_name(bfd, sym->section); ++ found = 0; + +- switch (syminfo.type) +- { +- case 'b': +- case 'B': +- if (CRASHDEBUG(2)) +- fprintf(fp, "%08lx (%c) [%s] %s\n", +- (ulong)syminfo.value, +- syminfo.type, secname, syminfo.name); ++ if (kt->flags & KMOD_V1) { ++ switch (syminfo.type) ++ { ++ case 'b': ++ case 'B': ++ if (CRASHDEBUG(2)) ++ fprintf(fp, "%08lx (%c) [%s] %s\n", ++ (ulong)syminfo.value, ++ syminfo.type, secname, syminfo.name); + +- syminfo.value += lm->mod_bss_start; +- strcpy(name, syminfo.name); +- strip_module_symbol_end(name); ++ if (!lm->mod_bss_start) ++ break; + +- if (machdep->verify_symbol(name, syminfo.value, +- syminfo.type)) { +- sp->value = syminfo.value; +- sp->type = syminfo.type; +- +- namespace_ctl(NAMESPACE_INSTALL, +- &lm->mod_load_namespace, sp, name); ++ syminfo.value += lm->mod_bss_start; ++ found = 1; ++ break; + +- if (CRASHDEBUG(1)) +- fprintf(fp, "%08lx %s\n", sp->value, +- name); ++ case 'd': ++ case 'D': ++ if (CRASHDEBUG(2)) ++ fprintf(fp, "%08lx (%c) [%s] %s\n", ++ (ulong)syminfo.value, ++ syminfo.type, secname, syminfo.name); ++ ++ if (STREQ(secname, ".rodata")) { ++ if (!lm->mod_rodata_start) ++ break; ++ syminfo.value += lm->mod_rodata_start; ++ } else { ++ if (!lm->mod_data_start) ++ break; ++ syminfo.value += lm->mod_data_start; ++ } ++ found = 1; ++ break; + +- sp++; +- lm->mod_load_symcnt++; +- } +- break; ++ case 't': ++ case 'T': ++ if (CRASHDEBUG(2)) ++ fprintf(fp, "%08lx (%c) [%s] %s\n", ++ (ulong)syminfo.value, ++ syminfo.type, secname, syminfo.name); + +- case 'd': +- case 'D': +- if (CRASHDEBUG(2)) +- fprintf(fp, "%08lx (%c) [%s] %s\n", +- (ulong)syminfo.value, +- syminfo.type, secname, syminfo.name); ++ if (! lm->mod_text_start) { ++ break; ++ } + +- if (STREQ(secname, ".rodata")) +- syminfo.value += lm->mod_rodata_start; +- else +- syminfo.value += lm->mod_data_start; ++ if ((st->flags & INSMOD_BUILTIN) && ++ (STREQ(name, "init_module") || ++ STREQ(name, "cleanup_module"))) ++ break; ++ ++ syminfo.value += lm->mod_text_start; ++ found = 1; ++ break; + ++ default: ++ break; ++ } ++ ++ } else { ++ /* Match the section it came in. */ ++ for (i = 0; i < lm->mod_sections; i++) { ++ if (STREQ(lm->mod_section_data[i].name, secname) ++ && (lm->mod_section_data[i].flags & SEC_FOUND)) { ++ break; ++ } ++ } ++ if (i < lm->mod_sections) { ++ if (CRASHDEBUG(2)) ++ fprintf(fp, "%08lx (%c) [%s] %s\n", ++ (ulong)syminfo.value, ++ syminfo.type, secname, syminfo.name); ++ ++ if ((st->flags & INSMOD_BUILTIN) && ++ (STREQ(name, "init_module") || ++ STREQ(name, "cleanup_module"))) { ++ found = 0; ++ } else { ++ syminfo.value += lm->mod_section_data[i].offset + lm->mod_base; ++ found = 1; ++ } ++ } ++ } ++ ++ if (found) { + strcpy(name, syminfo.name); + strip_module_symbol_end(name); + +- if (machdep->verify_symbol(name, syminfo.value, +- syminfo.type)) { ++ if (machdep->verify_symbol(name, syminfo.value, ++ syminfo.type)) { + sp->value = syminfo.value; +- sp->type = syminfo.type; ++ sp->type = syminfo.type; + namespace_ctl(NAMESPACE_INSTALL, +- &lm->mod_load_namespace, sp, name); ++ &lm->mod_load_namespace, sp, name); + + if (CRASHDEBUG(1)) +- fprintf(fp, "%08lx %s\n", sp->value, ++ fprintf(fp, "installing %c %08lx %s\n", syminfo.type, sp->value, + name); + + sp++; + lm->mod_load_symcnt++; + } +- break; +- +- case 't': +- case 'T': +- if (CRASHDEBUG(2)) +- fprintf(fp, "%08lx (%c) [%s] %s\n", +- (ulong)syminfo.value, +- syminfo.type, secname, syminfo.name); +- +- syminfo.value += lm->mod_text_start; +- strcpy(name, syminfo.name); +- strip_module_symbol_end(name); +- +- if ((st->flags & INSMOD_BUILTIN) && +- (STREQ(name, "init_module") || +- STREQ(name, "cleanup_module"))) +- break; +- +- if (machdep->verify_symbol(name, syminfo.value, +- syminfo.type)) { +- sp->value = syminfo.value; +- sp->type = syminfo.type; +- namespace_ctl(NAMESPACE_INSTALL, +- &lm->mod_load_namespace, sp, name); +- +- if (CRASHDEBUG(1)) +- fprintf(fp, "%08lx %s\n", sp->value, +- name); +- +- sp++; +- lm->mod_load_symcnt++; +- } +- +- break; +- +- default: +- break; +- } ++ } + } + + lm->mod_load_symend = &lm->mod_load_symtable[lm->mod_load_symcnt]; +@@ -7713,7 +8437,7 @@ + ulong start, end; + char *modbuf; + ulong maxchunk, alloc; +- long offset; ++ long offset = 0; + + start = roundup(lm->mod_size_of_struct, sizeof(long)) + lm->mod_base; + end = lm->mod_base + lm->mod_size; +@@ -8089,6 +8813,10 @@ + struct syment *sp_array[200], *sp; + + if (req->name == PATCH_KERNEL_SYMBOLS_START) { ++ if (kt->flags & RELOC_FORCE) ++ error(WARNING, ++ "\nkernel relocated [%ldMB]: patching %ld gdb minimal_symbol values\n", ++ kt->relocate >> 20, st->symcnt); + fprintf(fp, (pc->flags & SILENT) || !(pc->flags & TTY) ? "" : + "\nplease wait... (patching %ld gdb minimal_symbol values) ", + st->symcnt); +--- crash/defs.h.orig 2008-01-17 15:17:20.000000000 -0500 ++++ crash/defs.h 2008-01-16 11:45:00.000000000 -0500 +@@ -1,8 +1,8 @@ + /* defs.h - core analysis suite + * + * Copyright (C) 1999, 2000, 2001, 2002 Mission Critical Linux, Inc. +- * Copyright (C) 2002, 2003, 2004, 2005 David Anderson +- * Copyright (C) 2002, 2003, 2004, 2005 Red Hat, Inc. All rights reserved. ++ * Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2008 David Anderson ++ * Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2008 Red Hat, Inc. All rights reserved. + * Copyright (C) 2002 Silicon Graphics, Inc. + * + * This program is free software; you can redistribute it and/or modify +@@ -54,12 +54,16 @@ + + #define TRUE (1) + #define FALSE (0) ++#define STR(x) #x ++#ifndef offsetof ++# define offsetof(TYPE, MEMBER) ((ulong)&((TYPE *)0)->MEMBER) ++#endif + + #ifdef X86 +-#define NR_CPUS (32) ++#define NR_CPUS (256) + #endif + #ifdef X86_64 +-#define NR_CPUS (32) ++#define NR_CPUS (256) + #endif + #ifdef ALPHA + #define NR_CPUS (64) +@@ -68,7 +72,7 @@ + #define NR_CPUS (32) + #endif + #ifdef IA64 +-#define NR_CPUS (512) ++#define NR_CPUS (4096) + #endif + #ifdef PPC64 + #define NR_CPUS (128) +@@ -98,6 +102,7 @@ + #define LASTCHAR(s) (s[strlen(s)-1]) + #define FIRSTCHAR(s) (s[0]) + #define QUOTED_STRING(s) ((FIRSTCHAR(s) == '"') && (LASTCHAR(s) == '"')) ++#define PATHEQ(A, B) ((A) && (B) && (pathcmp((char *)(A), (char *)(B)) == 0)) + + #ifdef roundup + #undef roundup +@@ -106,6 +111,8 @@ + + typedef uint64_t physaddr_t; + ++#define PADDR_NOT_AVAILABLE (0x1ULL) ++ + typedef unsigned long long int ulonglong; + struct number_option { + ulong num; +@@ -155,8 +162,8 @@ + #define UNLINK_MODULES (0x1000000000ULL) + #define S390D (0x2000000000ULL) + #define REM_S390D (0x4000000000ULL) +-#define PC_UNUSED_1 (0x8000000000ULL) +-#define PC_UNUSED_2 (0x10000000000ULL) ++#define SYSRQ (0x8000000000ULL) ++#define KDUMP (0x10000000000ULL) + #define NETDUMP (0x20000000000ULL) + #define REM_NETDUMP (0x40000000000ULL) + #define SYSMAP (0x80000000000ULL) +@@ -169,11 +176,18 @@ + #define VERSION_QUERY (0x4000000000000ULL) + #define READNOW (0x8000000000000ULL) + #define NOCRASHRC (0x10000000000000ULL) ++#define INIT_IFILE (0x20000000000000ULL) ++#define XENDUMP (0x40000000000000ULL) ++#define XEN_HYPER (0x80000000000000ULL) ++#define XEN_CORE (0x100000000000000ULL) ++#define PLEASE_WAIT (0x200000000000000ULL) ++#define IFILE_ERROR (0x400000000000000ULL) ++#define KERNTYPES (0x800000000000000ULL) + + #define ACTIVE() (pc->flags & LIVE_SYSTEM) + #define DUMPFILE() (!(pc->flags & LIVE_SYSTEM)) +-#define MEMORY_SOURCES (NETDUMP|MCLXCD|LKCD|DEVMEM|S390D|MEMMOD|DISKDUMP) +-#define DUMPFILE_TYPES (DISKDUMP|NETDUMP|MCLXCD|LKCD|S390D) ++#define MEMORY_SOURCES (NETDUMP|KDUMP|MCLXCD|LKCD|DEVMEM|S390D|MEMMOD|DISKDUMP|XENDUMP) ++#define DUMPFILE_TYPES (DISKDUMP|NETDUMP|KDUMP|MCLXCD|LKCD|S390D|XENDUMP) + #define REMOTE() (pc->flags & REMOTE_DAEMON) + #define REMOTE_ACTIVE() (pc->flags & REM_LIVE_SYSTEM) + #define REMOTE_DUMPFILE() \ +@@ -182,16 +196,35 @@ + #define LKCD_DUMPFILE() (pc->flags & (LKCD|REM_LKCD)) + #define NETDUMP_DUMPFILE() (pc->flags & (NETDUMP|REM_NETDUMP)) + #define DISKDUMP_DUMPFILE() (pc->flags & DISKDUMP) ++#define KDUMP_DUMPFILE() (pc->flags & KDUMP) ++#define XENDUMP_DUMPFILE() (pc->flags & XENDUMP) ++#define XEN_HYPER_MODE() (pc->flags & XEN_HYPER) ++#define SYSRQ_TASK(X) ((pc->flags & SYSRQ) && is_task_active(X)) ++#define XEN_CORE_DUMPFILE() (pc->flags & XEN_CORE) ++#define LKCD_KERNTYPES() (pc->flags & KERNTYPES) + + #define NETDUMP_LOCAL (0x1) /* netdump_data flags */ + #define NETDUMP_REMOTE (0x2) +-#define NETDUMP_VALID() (nd->flags & (NETDUMP_LOCAL|NETDUMP_REMOTE)) ++#define VMCORE_VALID() (nd->flags & (NETDUMP_LOCAL|NETDUMP_REMOTE|KDUMP_LOCAL)) + #define NETDUMP_ELF32 (0x4) + #define NETDUMP_ELF64 (0x8) + #define PARTIAL_DUMP (0x10) /* netdump or diskdump */ ++#define KDUMP_ELF32 (0x20) ++#define KDUMP_ELF64 (0x40) ++#define KDUMP_LOCAL (0x80) ++ ++#define DUMPFILE_FORMAT(flags) ((flags) & \ ++ (NETDUMP_ELF32|NETDUMP_ELF64|KDUMP_ELF32|KDUMP_ELF64)) ++ ++#define DISKDUMP_LOCAL (0x1) ++#define KDUMP_CMPRS_LOCAL (0x2) ++#define ERROR_EXCLUDED (0x4) ++#define ZERO_EXCLUDED (0x8) ++#define DISKDUMP_VALID() (dd->flags & DISKDUMP_LOCAL) ++#define KDUMP_CMPRS_VALID() (dd->flags & KDUMP_CMPRS_LOCAL) + +-#define DISKDUMP_LOCAL (0x1) +-#define DISKDUMP_VALID() (dd->flags & DISKDUMP_LOCAL) ++#define XENDUMP_LOCAL (0x1) ++#define XENDUMP_VALID() (xd->flags & XENDUMP_LOCAL) + + #define CRASHDEBUG(x) (pc->debug >= (x)) + +@@ -210,6 +243,7 @@ + #define SEEK_ERROR (-1) + #define READ_ERROR (-2) + #define WRITE_ERROR (-3) ++#define PAGE_EXCLUDED (-4) + + #define RESTART() (longjmp(pc->main_loop_env, 1)) + #define RESUME_FOREACH() (longjmp(pc->foreach_loop_env, 1)) +@@ -319,15 +353,28 @@ + #define SCROLL_NONE 0 + #define SCROLL_LESS 1 + #define SCROLL_MORE 2 ++#define SCROLL_CRASHPAGER 3 + ulong redirect; /* per-cmd origin and output flags */ + pid_t stdpipe_pid; /* per-cmd standard output pipe's pid */ + pid_t pipe_pid; /* per-cmd output pipe's pid */ + pid_t pipe_shell_pid; /* per-cmd output pipe's shell pid */ + char pipe_command[BUFSIZE]; /* pipe command line */ ++ struct command_table_entry *cmd_table; /* linux/xen command table */ + char *curcmd; /* currently-executing command */ + char *lastcmd; /* previously-executed command */ + ulong cmdgencur; /* current command generation number */ +- ulong cmdgenspec; /* specified command generation num */ ++ ulong curcmd_flags; /* general purpose per-command flag */ ++#define XEN_MACHINE_ADDR (0x1) ++#define REPEAT (0x2) ++#define IDLE_TASK_SHOWN (0x4) ++#define TASK_SPECIFIED (0x8) ++#define MEMTYPE_UVADDR (0x10) ++#define MEMTYPE_FILEADDR (0x20) ++#define HEADER_PRINTED (0x40) ++#define BAD_INSTRUCTION (0x80) ++#define UD2A_INSTRUCTION (0x100) ++#define IRQ_IN_USE (0x200) ++ ulonglong curcmd_private; /* general purpose per-command info */ + int cur_gdb_cmd; /* current gdb command */ + int last_gdb_cmd; /* previously-executed gdb command */ + int sigint_cnt; /* number of ignored SIGINTs */ +@@ -347,11 +394,11 @@ + struct extension_table *curext; /* extension being loaded */ + int (*readmem)(int, void *, int, ulong, physaddr_t); /* memory access */ + int (*writemem)(int, void *, int, ulong, physaddr_t);/* memory access */ ++ ulong ifile_in_progress; /* original xxx_IFILE flags */ ++ off_t ifile_offset; /* current offset into input file */ ++ char *runtime_ifile_cmd; /* runtime command using input file */ + }; + +-#define UNIQUE_COMMAND(s) \ +- (STREQ(pc->curcmd, s) && (pc->cmdgencur == pc->cmdgenspec)) +- + #define READMEM pc->readmem + + typedef void (*cmd_func_t)(void); +@@ -365,6 +412,7 @@ + + #define REFRESH_TASK_TABLE (0x1) /* command_table_entry flags */ + #define HIDDEN_COMMAND (0x2) ++#define CLEANUP (0x4) /* for extensions only */ + + /* + * A linked list of extension table structures keeps track of the current +@@ -407,9 +455,34 @@ + #define KALLSYMS_V2 (0x2000) + #define TVEC_BASES_V2 (0x4000) + #define GCC_3_3_3 (0x8000) ++#define USE_OLD_BT (0x10000) ++#define ARCH_XEN (0x20000) ++#define NO_IKCONFIG (0x40000) ++#define DWARF_UNWIND (0x80000) ++#define NO_DWARF_UNWIND (0x100000) ++#define DWARF_UNWIND_MEMORY (0x200000) ++#define DWARF_UNWIND_EH_FRAME (0x400000) ++#define DWARF_UNWIND_CAPABLE (DWARF_UNWIND_MEMORY|DWARF_UNWIND_EH_FRAME) ++#define DWARF_UNWIND_MODULES (0x800000) ++#define BUGVERBOSE_OFF (0x1000000) ++#define RELOC_SET (0x2000000) ++#define RELOC_FORCE (0x4000000) ++#define ARCH_OPENVZ (0x8000000) + + #define GCC_VERSION_DEPRECATED (GCC_3_2|GCC_3_2_3|GCC_2_96|GCC_3_3_2|GCC_3_3_3) + ++#define XEN() (kt->flags & ARCH_XEN) ++#define OPENVZ() (kt->flags & ARCH_OPENVZ) ++ ++#define XEN_MACHINE_TO_MFN(m) ((ulonglong)(m) >> PAGESHIFT()) ++#define XEN_PFN_TO_PSEUDO(p) ((ulonglong)(p) << PAGESHIFT()) ++ ++#define XEN_MFN_NOT_FOUND (~0UL) ++#define XEN_PFNS_PER_PAGE (PAGESIZE()/sizeof(ulong)) ++#define XEN_FOREIGN_FRAME (1UL << (BITS()-1)) ++ ++#define XEN_MACHADDR_NOT_FOUND (~0ULL) ++ + struct kernel_table { /* kernel data */ + ulong flags; + ulong stext; +@@ -420,6 +493,7 @@ + ulong init_end; + ulong end; + int cpus; ++ char *cpus_override; + void (*display_bh)(void); + ulong module_list; + ulong kernel_module; +@@ -430,11 +504,36 @@ + uint kernel_version[3]; + uint gcc_version[3]; + int runq_siblings; ++ int kernel_NR_CPUS; + long __rq_idx[NR_CPUS]; + long __cpu_idx[NR_CPUS]; + long __per_cpu_offset[NR_CPUS]; +- long cpu_flags[NR_CPUS]; ++ ulong cpu_flags[NR_CPUS]; ++ int BUG_bytes; + #define NMI 0x1 ++ ulong xen_flags; ++#define WRITABLE_PAGE_TABLES (0x1) ++#define SHADOW_PAGE_TABLES (0x2) ++#define CANONICAL_PAGE_TABLES (0x4) ++#define XEN_SUSPEND (0x8) ++ char *m2p_page; ++ ulong phys_to_machine_mapping; ++ ulong p2m_table_size; ++#define P2M_MAPPING_CACHE (512) ++ struct p2m_mapping_cache { ++ ulong mapping; ++ ulong start; ++ ulong end; ++ } p2m_mapping_cache[P2M_MAPPING_CACHE]; ++#define P2M_MAPPING_TO_PAGE_INDEX(c) \ ++ (((kt->p2m_mapping_cache[c].mapping - kt->phys_to_machine_mapping)/PAGESIZE()) \ ++ * XEN_PFNS_PER_PAGE) ++ ulong last_mapping_read; ++ ulong p2m_cache_index; ++ ulong p2m_pages_searched; ++ ulong p2m_mfn_cache_hits; ++ ulong p2m_page_cache_hits; ++ ulong relocate; + }; + + /* +@@ -511,6 +610,7 @@ + char *task_struct; + char *thread_info; + char *mm_struct; ++ ulong init_pid_ns; + }; + + #define TASK_INIT_DONE (0x1) +@@ -527,6 +627,7 @@ + #define IRQSTACKS (0x800) + #define TIMESPEC (0x1000) + #define NO_TIMESPEC (0x2000) ++#define ACTIVE_ONLY (0x4000) + + #define TASK_SLUSH (20) + +@@ -578,6 +679,7 @@ + ulonglong flags; + ulong instptr; + ulong stkptr; ++ ulong bptr; + ulong stackbase; + ulong stacktop; + char *stackbuf; +@@ -602,6 +704,8 @@ + (void *)(&bt->stackbuf[(ulong)STACK_OFFSET_TYPE(OFF)]), (size_t)(SZ)) + + struct machine_specific; /* uniquely defined below each machine's area */ ++struct xendump_data; ++struct xen_kdump_data; + + struct machdep_table { + ulong flags; +@@ -645,14 +749,24 @@ + char **file; + } *line_number_hooks; + ulong last_pgd_read; ++ ulong last_pud_read; + ulong last_pmd_read; + ulong last_ptbl_read; + char *pgd; ++ char *pud; + char *pmd; + char *ptbl; + int ptrs_per_pgd; + char *cmdline_arg; + struct machine_specific *machspec; ++ ulong section_size_bits; ++ ulong max_physmem_bits; ++ ulong sections_per_root; ++ int (*xendump_p2m_create)(struct xendump_data *); ++ ulong (*xendump_panic_task)(struct xendump_data *); ++ void (*get_xendump_regs)(struct xendump_data *, struct bt_info *, ulong *, ulong *); ++ void (*clear_machdep_cache)(void); ++ int (*xen_kdump_p2m_create)(struct xen_kdump_data *); + }; + + /* +@@ -660,19 +774,25 @@ + * as defined in their processor-specific files below. (see KSYMS_START defs). + */ + #define HWRESET (0x80000000) +-#define SYSRQ (0x40000000) +-#define OMIT_FRAME_PTR (0x20000000) +-#define FRAMESIZE_DEBUG (0x10000000) +-#define MACHDEP_BT_TEXT (0x8000000) +-#define DEVMEMRD (0x4000000) +-#define INIT (0x2000000) +-#define SYSRQ_TASK(X) ((machdep->flags & SYSRQ) && is_task_active(X)) ++#define OMIT_FRAME_PTR (0x40000000) ++#define FRAMESIZE_DEBUG (0x20000000) ++#define MACHDEP_BT_TEXT (0x10000000) ++#define DEVMEMRD (0x8000000) ++#define INIT (0x4000000) ++#define VM_4_LEVEL (0x2000000) ++#define MCA (0x1000000) ++#define PAE (0x800000) + + extern struct machdep_table *machdep; + ++#ifndef HZ ++#define HZ sysconf(_SC_CLK_TCK) ++#endif ++ + #define IS_LAST_PGD_READ(pgd) ((ulong)(pgd) == machdep->last_pgd_read) + #define IS_LAST_PMD_READ(pmd) ((ulong)(pmd) == machdep->last_pmd_read) + #define IS_LAST_PTBL_READ(ptbl) ((ulong)(ptbl) == machdep->last_ptbl_read) ++#define IS_LAST_PUD_READ(pud) ((ulong)(pud) == machdep->last_pud_read) + + #define FILL_PGD(PGD, TYPE, SIZE) \ + if (!IS_LAST_PGD_READ(PGD)) { \ +@@ -681,6 +801,13 @@ + machdep->last_pgd_read = (ulong)(PGD); \ + } + ++#define FILL_PUD(PUD, TYPE, SIZE) \ ++ if (!IS_LAST_PUD_READ(PUD)) { \ ++ readmem((ulonglong)((ulong)(PUD)), TYPE, machdep->pud, \ ++ SIZE, "pud page", FAULT_ON_ERROR); \ ++ machdep->last_pud_read = (ulong)(PUD); \ ++ } ++ + #define FILL_PMD(PMD, TYPE, SIZE) \ + if (!IS_LAST_PMD_READ(PMD)) { \ + readmem((ulonglong)(PMD), TYPE, machdep->pmd, \ +@@ -695,10 +822,12 @@ + machdep->last_ptbl_read = (ulong)(PTBL); \ + } + ++#define SETUP_ENV (0) + #define PRE_SYMTAB (1) + #define PRE_GDB (2) + #define POST_GDB (3) + #define POST_INIT (4) ++#define POST_VM (5) + + #define FOREACH_BT (1) + #define FOREACH_VM (2) +@@ -737,6 +866,7 @@ + #define FOREACH_c_FLAG (0x40000) + #define FOREACH_f_FLAG (0x80000) + #define FOREACH_o_FLAG (0x100000) ++#define FOREACH_T_FLAG (0x200000) + + struct foreach_data { + ulong flags; +@@ -810,10 +940,15 @@ + long task_struct_last_run; + long task_struct_timestamp; + long task_struct_thread_info; ++ long task_struct_nsproxy; ++ long task_struct_rlim; + long thread_info_task; + long thread_info_cpu; + long thread_info_previous_esp; + long thread_info_flags; ++ long nsproxy_mnt_ns; ++ long mnt_namespace_root; ++ long mnt_namespace_list; + long pid_link_pid; + long pid_hash_chain; + long hlist_node_next; +@@ -830,6 +965,8 @@ + long tms_tms_stime; + long signal_struct_count; + long signal_struct_action; ++ long signal_struct_shared_pending; ++ long signal_struct_rlim; + long k_sigaction_sa; + long sigaction_sa_handler; + long sigaction_sa_flags; +@@ -875,8 +1012,14 @@ + long mm_struct_mmap; + long mm_struct_pgd; + long mm_struct_rss; ++ long mm_struct_anon_rss; ++ long mm_struct_file_rss; + long mm_struct_total_vm; + long mm_struct_start_code; ++ long mm_struct_arg_start; ++ long mm_struct_arg_end; ++ long mm_struct_env_start; ++ long mm_struct_env_end; + long vm_area_struct_vm_mm; + long vm_area_struct_vm_next; + long vm_area_struct_vm_end; +@@ -948,6 +1091,7 @@ + long block_device_bd_disk; + long irq_desc_t_status; + long irq_desc_t_handler; ++ long irq_desc_t_chip; + long irq_desc_t_action; + long irq_desc_t_depth; + long irqdesc_action; +@@ -968,8 +1112,28 @@ + long hw_interrupt_type_ack; + long hw_interrupt_type_end; + long hw_interrupt_type_set_affinity; ++ long irq_chip_typename; ++ long irq_chip_startup; ++ long irq_chip_shutdown; ++ long irq_chip_enable; ++ long irq_chip_disable; ++ long irq_chip_ack; ++ long irq_chip_end; ++ long irq_chip_set_affinity; ++ long irq_chip_mask; ++ long irq_chip_mask_ack; ++ long irq_chip_unmask; ++ long irq_chip_eoi; ++ long irq_chip_retrigger; ++ long irq_chip_set_type; ++ long irq_chip_set_wake; + long irq_cpustat_t___softirq_active; + long irq_cpustat_t___softirq_mask; ++ long fdtable_max_fds; ++ long fdtable_max_fdset; ++ long fdtable_open_fds; ++ long fdtable_fd; ++ long files_struct_fdt; + long files_struct_max_fds; + long files_struct_max_fdset; + long files_struct_open_fds; +@@ -978,6 +1142,9 @@ + long file_f_dentry; + long file_f_vfsmnt; + long file_f_count; ++ long file_f_path; ++ long path_mnt; ++ long path_dentry; + long fs_struct_root; + long fs_struct_pwd; + long fs_struct_rootmnt; +@@ -1088,6 +1255,8 @@ + long inet_opt_dport; + long inet_opt_sport; + long inet_opt_num; ++ long ipv6_pinfo_rcv_saddr; ++ long ipv6_pinfo_daddr; + long timer_list_list; + long timer_list_next; + long timer_list_entry; +@@ -1123,6 +1292,7 @@ + long zone_struct_name; + long zone_struct_size; + long zone_struct_memsize; ++ long zone_struct_zone_start_pfn; + long zone_struct_zone_start_paddr; + long zone_struct_zone_start_mapnr; + long zone_struct_zone_mem_map; +@@ -1143,6 +1313,7 @@ + long zone_pages_min; + long zone_pages_low; + long zone_pages_high; ++ long zone_vm_stat; + long neighbour_next; + long neighbour_primary_key; + long neighbour_ha; +@@ -1210,7 +1381,67 @@ + long x8664_pda_irqstackptr; + long x8664_pda_level4_pgt; + long x8664_pda_cpunumber; ++ long x8664_pda_me; + long tss_struct_ist; ++ long mem_section_section_mem_map; ++ long vcpu_guest_context_user_regs; ++ long cpu_user_regs_eip; ++ long cpu_user_regs_esp; ++ long cpu_user_regs_rip; ++ long cpu_user_regs_rsp; ++ long unwind_table_core; ++ long unwind_table_init; ++ long unwind_table_address; ++ long unwind_table_size; ++ long unwind_table_link; ++ long unwind_table_name; ++ long rq_cfs; ++ long rq_rt; ++ long rq_nr_running; ++ long cfs_rq_rb_leftmost; ++ long cfs_rq_nr_running; ++ long cfs_rq_tasks_timeline; ++ long task_struct_se; ++ long sched_entity_run_node; ++ long rt_rq_active; ++ long kmem_cache_size; ++ long kmem_cache_objsize; ++ long kmem_cache_offset; ++ long kmem_cache_order; ++ long kmem_cache_local_node; ++ long kmem_cache_objects; ++ long kmem_cache_inuse; ++ long kmem_cache_align; ++ long kmem_cache_name; ++ long kmem_cache_list; ++ long kmem_cache_node; ++ long kmem_cache_cpu_slab; ++ long page_inuse; ++/* long page_offset; use "old" page->offset */ ++ long page_slab; ++ long page_first_page; ++ long page_freelist; ++ long kmem_cache_node_nr_partial; ++ long kmem_cache_node_nr_slabs; ++ long kmem_cache_node_partial; ++ long kmem_cache_node_full; ++ long pid_numbers; ++ long upid_nr; ++ long upid_ns; ++ long upid_pid_chain; ++ long pid_tasks; ++ long kmem_cache_cpu_freelist; ++ long kmem_cache_cpu_page; ++ long kmem_cache_cpu_node; ++ long zone_nr_active; ++ long zone_nr_inactive; ++ long zone_all_unreclaimable; ++ long zone_present_pages; ++ long zone_flags; ++ long zone_pages_scanned; ++ long pcpu_info_vcpu; ++ long pcpu_info_idle; ++ long vcpu_struct_rq; + }; + + struct size_table { /* stash of commonly-used sizes */ +@@ -1239,6 +1470,7 @@ + long umode_t; + long dentry; + long files_struct; ++ long fdtable; + long fs_struct; + long file; + long inode; +@@ -1264,6 +1496,7 @@ + long net_device; + long sock; + long signal_struct; ++ long sigpending_signal; + long signal_queue; + long sighand_struct; + long sigqueue; +@@ -1292,15 +1525,28 @@ + long address_space; + long char_device_struct; + long inet_sock; ++ long in6_addr; + long socket; + long spinlock_t; + long radix_tree_root; + long radix_tree_node; + long x8664_pda; ++ long ppc64_paca; + long gate_struct; + long tss_struct; + long task_struct_start_time; + long cputime_t; ++ long mem_section; ++ long pid_link; ++ long unwind_table; ++ long rlimit; ++ long kmem_cache; ++ long kmem_cache_node; ++ long upid; ++ long kmem_cache_cpu; ++ long cfs_rq; ++ long pcpu_info; ++ long vcpu_struct; + }; + + struct array_table { +@@ -1327,6 +1573,9 @@ + int free_area_DIMENSION; + int prio_array_queue; + int height_to_maxindex; ++ int pid_hash; ++ int kmem_cache_node; ++ int kmem_cache_cpu_slab; + }; + + /* +@@ -1342,7 +1591,12 @@ + #define MEMBER_OFFSET(X,Y) datatype_info((X), (Y), NULL) + #define MEMBER_EXISTS(X,Y) (datatype_info((X), (Y), NULL) >= 0) + #define MEMBER_SIZE_REQUEST ((struct datatype_member *)(-1)) ++#define MEMBER_TYPE_REQUEST ((struct datatype_member *)(-3)) + #define MEMBER_SIZE(X,Y) datatype_info((X), (Y), MEMBER_SIZE_REQUEST) ++#define MEMBER_TYPE(X,Y) datatype_info((X), (Y), MEMBER_TYPE_REQUEST) ++ ++#define ANON_MEMBER_OFFSET_REQUEST ((struct datatype_member *)(-2)) ++#define ANON_MEMBER_OFFSET(X,Y) datatype_info((X), (Y), ANON_MEMBER_OFFSET_REQUEST) + + /* + * The following set of macros can only be used with pre-intialized fields +@@ -1365,7 +1619,9 @@ + #define MEMBER_OFFSET_INIT(X, Y, Z) (ASSIGN_OFFSET(X) = MEMBER_OFFSET(Y, Z)) + #define STRUCT_SIZE_INIT(X, Y) (ASSIGN_SIZE(X) = STRUCT_SIZE(Y)) + #define ARRAY_LENGTH_INIT(A, B, C, D, E) ((A) = get_array_length(C, D, E)) ++#define ARRAY_LENGTH_INIT_ALT(A, B, C, D, E) ((A) = get_array_length_alt(B, C, D, E)) + #define MEMBER_SIZE_INIT(X, Y, Z) (ASSIGN_SIZE(X) = MEMBER_SIZE(Y, Z)) ++#define ANON_MEMBER_OFFSET_INIT(X, Y, Z) (ASSIGN_OFFSET(X) = ANON_MEMBER_OFFSET(Y, Z)) + + /* + * For use with non-debug kernels. +@@ -1389,6 +1645,7 @@ + #define ULONGLONG(ADDR) *((ulonglong *)((char *)(ADDR))) + #define ULONG_PTR(ADDR) *((ulong **)((char *)(ADDR))) + #define USHORT(ADDR) *((ushort *)((char *)(ADDR))) ++#define SHORT(ADDR) *((short *)((char *)(ADDR))) + #define VOID_PTR(ADDR) *((void **)((char *)(ADDR))) + + struct node_table { +@@ -1396,6 +1653,7 @@ + ulong pgdat; + ulong mem_map; + ulong size; ++ ulong present; + ulonglong start_paddr; + ulong start_mapnr; + }; +@@ -1420,8 +1678,10 @@ + ulong kmem_max_limit; + ulong kmem_max_cpus; + ulong kmem_cache_count; ++ ulong kmem_cache_len_nodes; + ulong PG_reserved; + ulong PG_slab; ++ ulong PG_head_tail_mask; + int kmem_cache_namelen; + ulong page_hash_table; + int page_hash_table_len; +@@ -1441,17 +1701,42 @@ + ulong cached_vma_hits[VMA_CACHE]; + int vma_cache_index; + ulong vma_cache_fills; +-}; +- +-#define NODES (0x1) +-#define ZONES (0x2) +-#define PERCPU_KMALLOC_V1 (0x4) +-#define COMMON_VADDR (0x8) +-#define KMEM_CACHE_INIT (0x10) +-#define V_MEM_MAP (0x20) +-#define PERCPU_KMALLOC_V2 (0x40) +-#define KMEM_CACHE_UNAVAIL (0x80) +-#define DISCONTIGMEM (0x100) ++ void *mem_sec; ++ char *mem_section; ++ int ZONE_HIGHMEM; ++ ulong *node_online_map; ++ int node_online_map_len; ++ int nr_vm_stat_items; ++ char **vm_stat_items; ++ int cpu_slab_type; ++ int nr_vm_event_items; ++ char **vm_event_items; ++}; ++ ++#define NODES (0x1) ++#define ZONES (0x2) ++#define PERCPU_KMALLOC_V1 (0x4) ++#define COMMON_VADDR (0x8) ++#define KMEM_CACHE_INIT (0x10) ++#define V_MEM_MAP (0x20) ++#define PERCPU_KMALLOC_V2 (0x40) ++#define KMEM_CACHE_UNAVAIL (0x80) ++#define FLATMEM (0x100) ++#define DISCONTIGMEM (0x200) ++#define SPARSEMEM (0x400) ++#define SPARSEMEM_EX (0x800) ++#define PERCPU_KMALLOC_V2_NODES (0x1000) ++#define KMEM_CACHE_DELAY (0x2000) ++#define NODES_ONLINE (0x4000) ++#define VM_STAT (0x8000) ++#define KMALLOC_SLUB (0x10000) ++#define CONFIG_NUMA (0x20000) ++#define VM_EVENT (0x40000) ++ ++#define IS_FLATMEM() (vt->flags & FLATMEM) ++#define IS_DISCONTIGMEM() (vt->flags & DISCONTIGMEM) ++#define IS_SPARSEMEM() (vt->flags & SPARSEMEM) ++#define IS_SPARSEMEM_EX() (vt->flags & SPARSEMEM_EX) + + #define COMMON_VADDR_SPACE() (vt->flags & COMMON_VADDR) + #define PADDR_PRLEN (vt->paddr_prlen) +@@ -1478,7 +1763,8 @@ + long list_head_offset; + ulong end; + ulong searchfor; +- char *structname; ++ char **structname; ++ int structname_args; + char *header; + }; + #define LIST_OFFSET_ENTERED (VERBOSE << 1) +@@ -1584,8 +1870,11 @@ + int mods_installed; + struct load_module *current; + struct load_module *load_modules; ++ off_t dwarf_eh_frame_file_offset; ++ ulong dwarf_eh_frame_size; + }; + ++/* flags for st */ + #define KERNEL_SYMS (0x1) + #define MODULE_SYMS (0x2) + #define LOAD_MODULE_SYMS (0x4) +@@ -1596,6 +1885,8 @@ + #define NO_SEC_CONTENTS (0x40) + #define FORCE_DEBUGINFO (0x80) + #define CRC_MATCHES (0x100) ++#define ADD_SYMBOL_FILE (0x200) ++#define USE_OLD_ADD_SYM (0x400) + + #endif /* !GDB_COMMON */ + +@@ -1611,6 +1902,8 @@ + #define MOD_KALLSYMS (0x8) + #define MOD_INITRD (0x10) + ++#define SEC_FOUND (0x10000) ++ + struct mod_section_data { + #if defined(GDB_6_1) + struct bfd_section *section; +@@ -1659,6 +1952,8 @@ + #define KVADDR (0x1) + #define UVADDR (0x2) + #define PHYSADDR (0x4) ++#define XENMACHADDR (0x8) ++#define FILEADDR (0x10) + #define AMBIGUOUS (~0) + + #define USE_USER_PGD (UVADDR << 2) +@@ -1680,6 +1975,33 @@ + #define VIRTPAGEBASE(X) (((ulong)(X)) & (ulong)machdep->pagemask) + #define PHYSPAGEBASE(X) (((physaddr_t)(X)) & (physaddr_t)machdep->pagemask) + ++/* ++ * Sparse memory stuff ++ * These must follow the definitions in the kernel mmzone.h ++ */ ++#define SECTION_SIZE_BITS() (machdep->section_size_bits) ++#define MAX_PHYSMEM_BITS() (machdep->max_physmem_bits) ++#define SECTIONS_SHIFT() (MAX_PHYSMEM_BITS() - SECTION_SIZE_BITS()) ++#define PA_SECTION_SHIFT() (SECTION_SIZE_BITS()) ++#define PFN_SECTION_SHIFT() (SECTION_SIZE_BITS() - PAGESHIFT()) ++#define NR_MEM_SECTIONS() (1UL << SECTIONS_SHIFT()) ++#define PAGES_PER_SECTION() (1UL << PFN_SECTION_SHIFT()) ++#define PAGE_SECTION_MASK() (~(PAGES_PER_SECTION()-1)) ++ ++#define pfn_to_section_nr(pfn) ((pfn) >> PFN_SECTION_SHIFT()) ++#define section_nr_to_pfn(sec) ((sec) << PFN_SECTION_SHIFT()) ++ ++#define SECTIONS_PER_ROOT() (machdep->sections_per_root) ++ ++/* CONFIG_SPARSEMEM_EXTREME */ ++#define _SECTIONS_PER_ROOT_EXTREME() (PAGESIZE() / SIZE(mem_section)) ++/* !CONFIG_SPARSEMEM_EXTREME */ ++#define _SECTIONS_PER_ROOT() (1) ++ ++#define SECTION_NR_TO_ROOT(sec) ((sec) / SECTIONS_PER_ROOT()) ++#define NR_SECTION_ROOTS() (NR_MEM_SECTIONS() / SECTIONS_PER_ROOT()) ++#define SECTION_ROOT_MASK() (SECTIONS_PER_ROOT() - 1) ++ + /* + * Machine specific stuff + */ +@@ -1689,8 +2011,8 @@ + #define MACHINE_TYPE "X86" + #define PTOV(X) ((unsigned long)(X)+(machdep->kvbase)) + #define VTOP(X) ((unsigned long)(X)-(machdep->kvbase)) +-#define IS_VMALLOC_ADDR(X) ((ulong)(X) >= vt->vmalloc_start) +-#define KVBASE_MASK (0x1fffff) ++#define IS_VMALLOC_ADDR(X) (vt->vmalloc_start && (ulong)(X) >= vt->vmalloc_start) ++#define KVBASE_MASK (0x1ffffff) + + #define PGDIR_SHIFT_2LEVEL (22) + #define PTRS_PER_PTE_2LEVEL (1024) +@@ -1721,25 +2043,91 @@ + + #define SWP_TYPE(entry) (((entry) >> 1) & 0x3f) + #define SWP_OFFSET(entry) ((entry) >> 8) ++#define __swp_type_PAE(entry) (((entry) >> 32) & 0x1f) ++#define __swp_type_nonPAE(entry) (((entry) >> 1) & 0x1f) ++#define __swp_offset_PAE(entry) (((entry) >> 32) >> 5) ++#define __swp_offset_nonPAE(entry) ((entry) >> 8) ++#define __swp_type(entry) (machdep->flags & PAE ? \ ++ __swp_type_PAE(entry) : __swp_type_nonPAE(entry)) ++#define __swp_offset(entry) (machdep->flags & PAE ? \ ++ __swp_offset_PAE(entry) : __swp_offset_nonPAE(entry)) + + #define TIF_SIGPENDING (2) + ++// CONFIG_X86_PAE ++#define _SECTION_SIZE_BITS_PAE 30 ++#define _MAX_PHYSMEM_BITS_PAE 36 ++ ++// !CONFIG_X86_PAE ++#define _SECTION_SIZE_BITS 26 ++#define _MAX_PHYSMEM_BITS 32 ++ ++#define IS_LAST_PMD_READ_PAE(pmd) ((ulong)(pmd) == machdep->machspec->last_pmd_read_PAE) ++#define IS_LAST_PTBL_READ_PAE(ptbl) ((ulong)(ptbl) == machdep->machspec->last_ptbl_read_PAE) ++ ++#define FILL_PMD_PAE(PMD, TYPE, SIZE) \ ++ if (!IS_LAST_PMD_READ_PAE(PMD)) { \ ++ readmem((ulonglong)(PMD), TYPE, machdep->pmd, \ ++ SIZE, "pmd page", FAULT_ON_ERROR); \ ++ machdep->machspec->last_pmd_read_PAE = (ulonglong)(PMD); \ ++ } ++ ++#define FILL_PTBL_PAE(PTBL, TYPE, SIZE) \ ++ if (!IS_LAST_PTBL_READ_PAE(PTBL)) { \ ++ readmem((ulonglong)(PTBL), TYPE, machdep->ptbl, \ ++ SIZE, "page table", FAULT_ON_ERROR); \ ++ machdep->machspec->last_ptbl_read_PAE = (ulonglong)(PTBL); \ ++ } ++ + #endif /* X86 */ + + #ifdef X86_64 + #define _64BIT_ + #define MACHINE_TYPE "X86_64" + +-#define USERSPACE_TOP 0x0000008000000000 +-#define __START_KERNEL_map 0xffffffff80000000 +-#define PAGE_OFFSET 0x0000010000000000 +- +-#define VMALLOC_START 0xffffff0000000000 +-#define VMALLOC_END 0xffffff7fffffffff +-#define MODULES_VADDR 0xffffffffa0000000 +-#define MODULES_END 0xffffffffafffffff ++#define USERSPACE_TOP (machdep->machspec->userspace_top) ++#define PAGE_OFFSET (machdep->machspec->page_offset) ++#define VMALLOC_START (machdep->machspec->vmalloc_start_addr) ++#define VMALLOC_END (machdep->machspec->vmalloc_end) ++#define VMEMMAP_VADDR (machdep->machspec->vmemmap_vaddr) ++#define VMEMMAP_END (machdep->machspec->vmemmap_end) ++#define MODULES_VADDR (machdep->machspec->modules_vaddr) ++#define MODULES_END (machdep->machspec->modules_end) ++ ++#define __START_KERNEL_map 0xffffffff80000000UL + #define MODULES_LEN (MODULES_END - MODULES_VADDR) + ++#define USERSPACE_TOP_ORIG 0x0000008000000000 ++#define PAGE_OFFSET_ORIG 0x0000010000000000 ++#define VMALLOC_START_ADDR_ORIG 0xffffff0000000000 ++#define VMALLOC_END_ORIG 0xffffff7fffffffff ++#define MODULES_VADDR_ORIG 0xffffffffa0000000 ++#define MODULES_END_ORIG 0xffffffffafffffff ++ ++#define USERSPACE_TOP_2_6_11 0x0000800000000000 ++#define PAGE_OFFSET_2_6_11 0xffff810000000000 ++#define VMALLOC_START_ADDR_2_6_11 0xffffc20000000000 ++#define VMALLOC_END_2_6_11 0xffffe1ffffffffff ++#define MODULES_VADDR_2_6_11 0xffffffff88000000 ++#define MODULES_END_2_6_11 0xfffffffffff00000 ++ ++#define VMEMMAP_VADDR_2_6_24 0xffffe20000000000 ++#define VMEMMAP_END_2_6_24 0xffffe2ffffffffff ++ ++#define USERSPACE_TOP_XEN 0x0000800000000000 ++#define PAGE_OFFSET_XEN 0xffff880000000000 ++#define VMALLOC_START_ADDR_XEN 0xffffc20000000000 ++#define VMALLOC_END_XEN 0xffffe1ffffffffff ++#define MODULES_VADDR_XEN 0xffffffff88000000 ++#define MODULES_END_XEN 0xfffffffffff00000 ++ ++#define USERSPACE_TOP_XEN_RHEL4 0x0000008000000000 ++#define PAGE_OFFSET_XEN_RHEL4 0xffffff8000000000 ++#define VMALLOC_START_ADDR_XEN_RHEL4 0xffffff0000000000 ++#define VMALLOC_END_XEN_RHEL4 0xffffff7fffffffff ++#define MODULES_VADDR_XEN_RHEL4 0xffffffffa0000000 ++#define MODULES_END_XEN_RHEL4 0xffffffffafffffff ++ + #define PTOV(X) ((unsigned long)(X)+(machdep->kvbase)) + #define VTOP(X) x86_64_VTOP((ulong)(X)) + #define IS_VMALLOC_ADDR(X) x86_64_IS_VMALLOC_ADDR((ulong)(X)) +@@ -1757,12 +2145,34 @@ + #define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1)) + #define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) + ++#define IS_LAST_PML4_READ(pml4) ((ulong)(pml4) == machdep->machspec->last_pml4_read) ++ + #define FILL_PML4() { \ + if (!(pc->flags & RUNTIME) || ACTIVE()) \ +- readmem(vt->kernel_pgd[0], KVADDR, machdep->machspec->pml4, \ ++ if (!IS_LAST_PML4_READ(vt->kernel_pgd[0])) \ ++ readmem(vt->kernel_pgd[0], KVADDR, machdep->machspec->pml4, \ + PAGESIZE(), "init_level4_pgt", FAULT_ON_ERROR); \ ++ machdep->machspec->last_pml4_read = (ulong)(vt->kernel_pgd[0]); \ + } + ++#define FILL_PML4_HYPER() { \ ++ if (!machdep->machspec->last_pml4_read) { \ ++ readmem(symbol_value("idle_pg_table_4"), KVADDR, \ ++ machdep->machspec->pml4, PAGESIZE(), "idle_pg_table_4", \ ++ FAULT_ON_ERROR); \ ++ machdep->machspec->last_pml4_read = symbol_value("idle_pg_table_4"); \ ++ }\ ++} ++ ++#define IS_LAST_UPML_READ(pml) ((ulong)(pml) == machdep->machspec->last_upml_read) ++ ++#define FILL_UPML(PML, TYPE, SIZE) \ ++ if (!IS_LAST_UPML_READ(PML)) { \ ++ readmem((ulonglong)((ulong)(PML)), TYPE, machdep->machspec->upml, \ ++ SIZE, "pml page", FAULT_ON_ERROR); \ ++ machdep->machspec->last_upml_read = (ulong)(PML); \ ++ } ++ + /* + * PHYSICAL_PAGE_MASK changed (enlarged) between 2.4 and 2.6, so + * for safety, use the 2.6 values to generate it. +@@ -1791,11 +2201,22 @@ + + #define SWP_TYPE(entry) (((entry) >> 1) & 0x3f) + #define SWP_OFFSET(entry) ((entry) >> 8) ++#define __swp_type(entry) SWP_TYPE(entry) ++#define __swp_offset(entry) SWP_OFFSET(entry) + + #define TIF_SIGPENDING (2) + + #define PAGEBASE(X) (((ulong)(X)) & (ulong)machdep->pagemask) + ++#define _CPU_PDA_READ(CPU, BUFFER) \ ++ ((STRNEQ("_cpu_pda", closest_symbol((symbol_value("_cpu_pda") + \ ++ ((CPU) * sizeof(unsigned long)))))) && \ ++ (readmem(symbol_value("_cpu_pda") + ((CPU) * sizeof(void *)), \ ++ KVADDR, &cpu_pda_addr, sizeof(unsigned long), \ ++ "_cpu_pda addr", FAULT_ON_ERROR)) && \ ++ (readmem(cpu_pda_addr, KVADDR, (BUFFER), SIZE(x8664_pda), \ ++ "cpu_pda entry", FAULT_ON_ERROR))) ++ + #define CPU_PDA_READ(CPU, BUFFER) \ + (STRNEQ("cpu_pda", closest_symbol((symbol_value("cpu_pda") + \ + ((CPU) * SIZE(x8664_pda))))) && \ +@@ -1806,6 +2227,9 @@ + #define VALID_LEVEL4_PGT_ADDR(X) \ + (((X) == VIRTPAGEBASE(X)) && IS_KVADDR(X) && !IS_VMALLOC_ADDR(X)) + ++#define _SECTION_SIZE_BITS 27 ++#define _MAX_PHYSMEM_BITS 40 ++ + #endif /* X86_64 */ + + #ifdef ALPHA +@@ -1816,7 +2240,7 @@ + + #define PTOV(X) ((unsigned long)(X)+(machdep->kvbase)) + #define VTOP(X) ((unsigned long)(X)-(machdep->kvbase)) +-#define IS_VMALLOC_ADDR(X) ((ulong)(X) >= vt->vmalloc_start) ++#define IS_VMALLOC_ADDR(X) (vt->vmalloc_start && (ulong)(X) >= vt->vmalloc_start) + #define KSEG_BASE_48_BIT (0xffff800000000000) + #define KSEG_BASE (0xfffffc0000000000) + #define _PFN_MASK (0xFFFFFFFF00000000) +@@ -1848,6 +2272,8 @@ + + #define SWP_TYPE(entry) (((entry) >> 32) & 0xff) + #define SWP_OFFSET(entry) ((entry) >> 40) ++#define __swp_type(entry) SWP_TYPE(entry) ++#define __swp_offset(entry) SWP_OFFSET(entry) + + #define TIF_SIGPENDING (2) + +@@ -1861,7 +2287,7 @@ + + #define PTOV(X) ((unsigned long)(X)+(machdep->kvbase)) + #define VTOP(X) ((unsigned long)(X)-(machdep->kvbase)) +-#define IS_VMALLOC_ADDR(X) ((ulong)(X) >= vt->vmalloc_start) ++#define IS_VMALLOC_ADDR(X) (vt->vmalloc_start && (ulong)(X) >= vt->vmalloc_start) + + #define PGDIR_SHIFT (22) + #define PTRS_PER_PTE (1024) +@@ -1881,9 +2307,14 @@ + + #define SWP_TYPE(entry) (((entry) >> 1) & 0x7f) + #define SWP_OFFSET(entry) ((entry) >> 8) ++#define __swp_type(entry) SWP_TYPE(entry) ++#define __swp_offset(entry) SWP_OFFSET(entry) + + #define TIF_SIGPENDING (2) + ++#define _SECTION_SIZE_BITS 24 ++#define _MAX_PHYSMEM_BITS 44 ++ + #endif /* PPC */ + + #ifdef IA64 +@@ -1908,6 +2339,9 @@ + #define KERNEL_UNCACHED_BASE ((ulong)KERNEL_UNCACHED_REGION << REGION_SHIFT) + #define KERNEL_CACHED_BASE ((ulong)KERNEL_CACHED_REGION << REGION_SHIFT) + ++#define _SECTION_SIZE_BITS 30 ++#define _MAX_PHYSMEM_BITS 50 ++ + /* + * As of 2.6, these are no longer straight forward. + */ +@@ -1917,16 +2351,57 @@ + + #define SWITCH_STACK_ADDR(X) (ia64_get_switch_stack((ulong)(X))) + +-#define PGDIR_SHIFT (PAGESHIFT() + 2*(PAGESHIFT()-3)) +-#define PMD_SHIFT (PAGESHIFT() + (PAGESHIFT()-3)) +-#define PTRS_PER_PGD (((ulong)(1)) << (PAGESHIFT()-3)) +-#define PTRS_PER_PMD (((ulong)(1)) << (PAGESHIFT()-3)) +-#define PTRS_PER_PTE (((ulong)(1)) << (PAGESHIFT()-3)) +-#define PTRS_PER_PAGE (((ulong)(1)) << (PAGESHIFT()-3)) + #define __IA64_UL(x) ((unsigned long)(x)) + #define IA64_MAX_PHYS_BITS (50) /* max # of phys address bits (architected) */ + + /* ++ * How many pointers will a page table level hold expressed in shift ++ */ ++#define PTRS_PER_PTD_SHIFT (PAGESHIFT()-3) ++ ++/* ++ * Definitions for fourth level: ++ */ ++#define PTRS_PER_PTE (__IA64_UL(1) << (PTRS_PER_PTD_SHIFT)) ++ ++/* ++ * Definitions for third level: ++ * ++ * PMD_SHIFT determines the size of the area a third-level page table ++ * can map. ++ */ ++#define PMD_SHIFT (PAGESHIFT() + (PTRS_PER_PTD_SHIFT)) ++#define PMD_SIZE (1UL << PMD_SHIFT) ++#define PMD_MASK (~(PMD_SIZE-1)) ++#define PTRS_PER_PMD (1UL << (PTRS_PER_PTD_SHIFT)) ++ ++/* ++ * PUD_SHIFT determines the size of the area a second-level page table ++ * can map ++ */ ++#define PUD_SHIFT (PMD_SHIFT + (PTRS_PER_PTD_SHIFT)) ++#define PUD_SIZE (1UL << PUD_SHIFT) ++#define PUD_MASK (~(PUD_SIZE-1)) ++#define PTRS_PER_PUD (1UL << (PTRS_PER_PTD_SHIFT)) ++ ++/* ++ * Definitions for first level: ++ * ++ * PGDIR_SHIFT determines what a first-level page table entry can map. ++ */ ++ ++#define PGDIR_SHIFT_4L (PUD_SHIFT + (PTRS_PER_PTD_SHIFT)) ++#define PGDIR_SHIFT_3L (PMD_SHIFT + (PTRS_PER_PTD_SHIFT)) ++/* Turns out 4L & 3L PGDIR_SHIFT are the same (for now) */ ++#define PGDIR_SHIFT PGDIR_SHIFT_4L ++#define PGDIR_SIZE (__IA64_UL(1) << PGDIR_SHIFT) ++#define PGDIR_MASK (~(PGDIR_SIZE-1)) ++#define PTRS_PER_PGD_SHIFT PTRS_PER_PTD_SHIFT ++#define PTRS_PER_PGD (1UL << PTRS_PER_PGD_SHIFT) ++#define USER_PTRS_PER_PGD (5*PTRS_PER_PGD/8) /* regions 0-4 are user regions */ ++#define FIRST_USER_ADDRESS 0 ++ ++/* + * First, define the various bits in a PTE. Note that the PTE format + * matches the VHPT short format, the firt doubleword of the VHPD long + * format, and the first doubleword of the TLB insertion format. +@@ -1978,6 +2453,7 @@ + #define __DIRTY_BITS _PAGE_ED | __DIRTY_BITS_NO_ED + + #define EFI_PAGE_SHIFT (12) ++ + /* + * NOTE: #include'ing creates too many compiler problems, so + * this stuff is hardwired here; it's probably etched in stone somewhere. +@@ -2020,6 +2496,8 @@ + + #define SWP_TYPE(entry) (((entry) >> 1) & 0xff) + #define SWP_OFFSET(entry) ((entry) >> 9) ++#define __swp_type(entry) ((entry >> 2) & 0x7f) ++#define __swp_offset(entry) ((entry << 1) >> 10) + + #define TIF_SIGPENDING (1) + +@@ -2038,11 +2516,14 @@ + #define _64BIT_ + #define MACHINE_TYPE "PPC64" + ++#define PPC64_64K_PAGE_SIZE 65536 ++#define PPC64_STACK_SIZE 16384 ++ + #define PAGEBASE(X) (((ulong)(X)) & (ulong)machdep->pagemask) + + #define PTOV(X) ((unsigned long)(X)+(machdep->kvbase)) + #define VTOP(X) ((unsigned long)(X)-(machdep->kvbase)) +-#define IS_VMALLOC_ADDR(X) ((ulong)(X) >= vt->vmalloc_start) ++#define IS_VMALLOC_ADDR(X) (vt->vmalloc_start && (ulong)(X) >= vt->vmalloc_start) + #define KERNELBASE machdep->pageoffset + + #define PGDIR_SHIFT (machdep->pageshift + (machdep->pageshift -3) + (machdep->pageshift - 2)) +@@ -2067,6 +2548,33 @@ + #define PGD_OFFSET(vaddr) ((vaddr >> PGDIR_SHIFT) & 0x7ff) + #define PMD_OFFSET(vaddr) ((vaddr >> PMD_SHIFT) & (PTRS_PER_PMD - 1)) + ++/* 4-level page table support */ ++ ++/* 4K pagesize */ ++#define PTE_INDEX_SIZE_L4_4K 9 ++#define PMD_INDEX_SIZE_L4_4K 7 ++#define PUD_INDEX_SIZE_L4_4K 7 ++#define PGD_INDEX_SIZE_L4_4K 9 ++#define PTE_SHIFT_L4_4K 17 ++#define PMD_MASKED_BITS_4K 0 ++ ++/* 64K pagesize */ ++#define PTE_INDEX_SIZE_L4_64K 12 ++#define PMD_INDEX_SIZE_L4_64K 12 ++#define PUD_INDEX_SIZE_L4_64K 0 ++#define PGD_INDEX_SIZE_L4_64K 4 ++#define PTE_SHIFT_L4_64K_V1 32 ++#define PTE_SHIFT_L4_64K_V2 30 ++#define PMD_MASKED_BITS_64K 0x1ff ++ ++#define L4_OFFSET(vaddr) ((vaddr >> (machdep->machspec->l4_shift)) & 0x1ff) ++ ++#define PGD_OFFSET_L4(vaddr) \ ++ ((vaddr >> (machdep->machspec->l3_shift)) & (machdep->machspec->ptrs_per_l3 - 1)) ++ ++#define PMD_OFFSET_L4(vaddr) \ ++ ((vaddr >> (machdep->machspec->l2_shift)) & (machdep->machspec->ptrs_per_l2 - 1)) ++ + #define _PAGE_PRESENT 0x001UL /* software: pte contains a translation */ + #define _PAGE_USER 0x002UL /* matches one of the PP bits */ + #define _PAGE_RW 0x004UL /* software: user write access allowed */ +@@ -2080,6 +2588,8 @@ + + #define SWP_TYPE(entry) (((entry) >> 1) & 0x7f) + #define SWP_OFFSET(entry) ((entry) >> 8) ++#define __swp_type(entry) SWP_TYPE(entry) ++#define __swp_offset(entry) SWP_OFFSET(entry) + + #define MSR_PR_LG 14 /* Problem State / Privilege Level */ + /* Used to find the user or kernel-mode frame*/ +@@ -2087,6 +2597,9 @@ + #define STACK_FRAME_OVERHEAD 112 + #define EXCP_FRAME_MARKER 0x7265677368657265 + ++#define _SECTION_SIZE_BITS 24 ++#define _MAX_PHYSMEM_BITS 44 ++ + #endif /* PPC64 */ + + #ifdef S390 +@@ -2095,7 +2608,7 @@ + + #define PTOV(X) ((unsigned long)(X)+(machdep->kvbase)) + #define VTOP(X) ((unsigned long)(X)-(machdep->kvbase)) +-#define IS_VMALLOC_ADDR(X) s390_IS_VMALLOC_ADDR(X) ++#define IS_VMALLOC_ADDR(X) (vt->vmalloc_start && (ulong)(X) >= vt->vmalloc_start) + + #define PTRS_PER_PTE 1024 + #define PTRS_PER_PMD 1 +@@ -2105,6 +2618,8 @@ + #define SWP_TYPE(entry) (((entry) >> 2) & 0x1f) + #define SWP_OFFSET(entry) ((((entry) >> 11) & 0xfffffffe) | \ + (((entry) >> 7) & 0x1)) ++#define __swp_type(entry) SWP_TYPE(entry) ++#define __swp_offset(entry) SWP_OFFSET(entry) + + #define TIF_SIGPENDING (2) + +@@ -2116,7 +2631,7 @@ + + #define PTOV(X) ((unsigned long)(X)+(machdep->kvbase)) + #define VTOP(X) ((unsigned long)(X)-(machdep->kvbase)) +-#define IS_VMALLOC_ADDR(X) ((ulong)(X) >= vt->vmalloc_start) ++#define IS_VMALLOC_ADDR(X) (vt->vmalloc_start && (ulong)(X) >= vt->vmalloc_start) + #define PTRS_PER_PTE 512 + #define PTRS_PER_PMD 1024 + #define PTRS_PER_PGD 2048 +@@ -2125,6 +2640,8 @@ + #define SWP_TYPE(entry) (((entry) >> 2) & 0x1f) + #define SWP_OFFSET(entry) ((((entry) >> 11) & 0xfffffffffffffffe) | \ + (((entry) >> 7) & 0x1)) ++#define __swp_type(entry) SWP_TYPE(entry) ++#define __swp_offset(entry) SWP_OFFSET(entry) + + #define TIF_SIGPENDING (2) + +@@ -2134,6 +2651,8 @@ + + #define SWP_TYPE(entry) (error("PLATFORM_SWP_TYPE: TBD\n")) + #define SWP_OFFSET(entry) (error("PLATFORM_SWP_OFFSET: TBD\n")) ++#define __swp_type(entry) SWP_TYPE(entry) ++#define __swp_offset(entry) SWP_OFFSET(entry) + + #endif /* PLATFORM */ + +@@ -2185,7 +2704,10 @@ + #define BADVAL ((ulong)(-1)) + #define UNUSED (-1) + ++#define UNINITIALIZED (BADVAL) ++ + #define BITS_PER_BYTE (8) ++#define BITS_PER_LONG (BITS_PER_BYTE * sizeof(long)) + + /* + * precision lengths for fprintf +@@ -2199,9 +2721,10 @@ + + #define MINSPACE (-100) + +-#define SYNOPSIS (0x1) +-#define COMPLETE_HELP (0x2) +-#define PIPE_TO_LESS (0x4) ++#define SYNOPSIS (0x1) ++#define COMPLETE_HELP (0x2) ++#define PIPE_TO_SCROLL (0x4) ++#define MUST_HELP (0x8) + + #define LEFT_JUSTIFY (1) + #define RIGHT_JUSTIFY (2) +@@ -2419,17 +2942,22 @@ + /* + * ps command options. + */ +-#define PS_BY_PID (0x1) +-#define PS_BY_TASK (0x2) +-#define PS_BY_CMD (0x4) +-#define PS_SHOW_ALL (0x8) +-#define PS_PPID_LIST (0x10) +-#define PS_CHILD_LIST (0x20) +-#define PS_KERNEL (0x40) +-#define PS_USER (0x80) +-#define PS_TIMES (0x100) +-#define PS_KSTACKP (0x200) +-#define PS_LAST_RUN (0x400) ++#define PS_BY_PID (0x1) ++#define PS_BY_TASK (0x2) ++#define PS_BY_CMD (0x4) ++#define PS_SHOW_ALL (0x8) ++#define PS_PPID_LIST (0x10) ++#define PS_CHILD_LIST (0x20) ++#define PS_KERNEL (0x40) ++#define PS_USER (0x80) ++#define PS_TIMES (0x100) ++#define PS_KSTACKP (0x200) ++#define PS_LAST_RUN (0x400) ++#define PS_ARGV_ENVP (0x800) ++#define PS_TGID_LIST (0x1000) ++#define PS_RLIMIT (0x2000) ++ ++#define PS_EXCLUSIVE (PS_TGID_LIST|PS_ARGV_ENVP|PS_TIMES|PS_CHILD_LIST|PS_PPID_LIST|PS_LAST_RUN|PS_RLIMIT) + + #define MAX_PS_ARGS (100) /* maximum command-line specific requests */ + +@@ -2461,7 +2989,7 @@ + extern struct program_context program_context, *pc; + extern struct task_table task_table, *tt; + extern struct kernel_table kernel_table, *kt; +-extern struct command_table_entry base_command_table[]; ++extern struct command_table_entry linux_command_table[]; + extern char *args[MAXARGS]; + extern int argcnt; + extern int argerrs; +@@ -2534,6 +3062,9 @@ + void cmd_gdb(void); /* gdb_interface.c */ + void cmd_net(void); /* net.c */ + void cmd_extend(void); /* extensions.c */ ++#if defined(S390) || defined(S390X) ++void cmd_s390dbf(void); ++#endif + + /* + * main.c +@@ -2591,6 +3122,8 @@ + int interruptible(void); + int received_SIGINT(void); + void debug_redirect(char *); ++int CRASHPAGER_valid(void); ++char *setup_scroll_command(void); + + /* + * tools.c +@@ -2658,6 +3191,7 @@ + int hq_open(void); + int hq_close(void); + int hq_enter(ulong); ++int hq_entry_exists(ulong); + long get_embedded(void); + void dump_embedded(char *); + char *ordinal(ulong, char *); +@@ -2683,9 +3217,16 @@ + int clean_arg(void); + int empty_list(ulong); + int machine_type(char *); ++int machine_type_mismatch(char *, char *, char *, ulong); + void command_not_supported(void); + void option_not_supported(int); +- ++void please_wait(char *); ++void please_wait_done(void); ++int pathcmp(char *, char *); ++int calculate(char *, ulong *, ulonglong *, ulong); ++int endian_mismatch(char *, char, ulong); ++uint16_t swap16(uint16_t, int); ++uint32_t swap32(uint32_t, int); + + /* + * symbols.c +@@ -2721,9 +3262,11 @@ + struct syment *next_symbol(char *, struct syment *); + struct syment *prev_symbol(char *, struct syment *); + void get_symbol_data(char *, long, void *); ++int try_get_symbol_data(char *, long, void *); + char *value_to_symstr(ulong, char *, ulong); + char *value_symbol(ulong); + ulong symbol_value(char *); ++ulong symbol_value_module(char *, char *); + int symbol_exists(char *s); + int kernel_symbol_exists(char *s); + int get_syment_array(char *, struct syment **, int); +@@ -2738,9 +3281,12 @@ + void dump_struct_table(ulong); + void dump_offset_table(char *, ulong); + int is_elf_file(char *); ++int is_kernel(char *); ++int file_elf_version(char *); + int is_system_map(char *); + int select_namelist(char *); + int get_array_length(char *, int *, long); ++int get_array_length_alt(char *, char *, int *, long); + int builtin_array_length(char *, int, int *); + char *get_line_number(ulong, char *, int); + char *get_build_directory(char *); +@@ -2768,6 +3314,7 @@ + long OFFSET_option(long, long, char *, char *, int, char *, char *); + long SIZE_option(long, long, char *, char *, int, char *, char *); + void dump_trace(ulong *); ++int enumerator_value(char *, long *); + + /* + * memory.c +@@ -2807,6 +3354,7 @@ + char *swap_location(ulonglong, char *); + void clear_swap_info_cache(void); + uint memory_page_size(void); ++void force_page_size(char *); + ulong first_vmalloc_address(void); + int l1_cache_size(void); + int dumpfile_memory(int); +@@ -2838,6 +3386,7 @@ + void open_files_dump(ulong, int, struct reference *); + void get_pathname(ulong, char *, int, int, ulong); + ulong file_to_dentry(ulong); ++ulong file_to_vfsmnt(ulong); + void nlm_files_dump(void); + int get_proc_version(void); + int file_checksum(char *, long *); +@@ -2874,6 +3423,7 @@ + void help_init(void); + void cmd_usage(char *, int); + void display_version(void); ++void display_help_screen(char *); + #ifdef X86 + #define dump_machdep_table(X) x86_dump_machdep_table(X) + #endif +@@ -2945,6 +3495,9 @@ + extern char *help_waitq[]; + extern char *help_whatis[]; + extern char *help_wr[]; ++#if defined(S390) || defined(S390X) ++extern char *help_s390dbf[]; ++#endif + + /* + * task.c +@@ -2962,10 +3515,13 @@ + ulong task_flags(ulong); + ulong task_state(ulong); + ulong task_mm(ulong, int); ++ulong task_tgid(ulong); + ulonglong task_last_run(ulong); ++ulong vaddr_in_task_struct(ulong); + int comm_exists(char *); + struct task_context *task_to_context(ulong); + struct task_context *pid_to_context(ulong); ++struct task_context *tgid_to_context(ulong); + ulong stkptr_to_task(ulong); + ulong task_to_thread_info(ulong); + ulong task_to_stackbase(ulong); +@@ -3005,11 +3561,17 @@ + */ + void register_extension(struct command_table_entry *); + void dump_extension_table(int); ++void load_extension(char *); ++void unload_extension(char *); ++/* Hooks for sial */ ++unsigned long get_curtask(void); ++char *crash_global_cmd(void); ++struct command_table_entry *crash_cmd_table(void); + + /* + * kernel.c + */ +-void kernel_init(int); ++void kernel_init(void); + void module_init(void); + void verify_version(void); + void verify_spinlock(void); +@@ -3019,14 +3581,18 @@ + int is_system_call(char *, ulong); + void generic_dump_irq(int); + int generic_dis_filter(ulong, char *); ++int kernel_BUG_encoding_bytes(void); + void display_sys_stats(void); +-void dump_kernel_table(void); ++char *get_uptime(char *, ulonglong *); ++void clone_bt_info(struct bt_info *, struct bt_info *, struct task_context *); ++void dump_kernel_table(int); + void dump_bt_info(struct bt_info *); + void dump_log(int); + void set_cpu(int); + void clear_machdep_cache(void); + struct stack_hook *gather_text_list(struct bt_info *); + int get_cpus_online(void); ++int get_cpus_possible(void); + void print_stack_text_syms(struct bt_info *, ulong, ulong); + void back_trace(struct bt_info *); + #define BT_RAW (0x1ULL) +@@ -3039,11 +3605,13 @@ + #define BT_EXCEPTION_FRAME (0x80ULL) + #define BT_LINE_NUMBERS (0x100ULL) + #define BT_USER_EFRAME (0x200ULL) ++#define BT_INCOMPLETE_USER_EFRAME (BT_USER_EFRAME) + #define BT_SAVE_LASTSP (0x400ULL) + #define BT_FROM_EXCEPTION (0x800ULL) + #define BT_FROM_CALLFRAME (0x1000ULL) + #define BT_EFRAME_SEARCH (0x2000ULL) + #define BT_SPECULATE (0x4000ULL) ++#define BT_FRAMESIZE_DISABLE (BT_SPECULATE) + #define BT_RESCHEDULE (0x8000ULL) + #define BT_SCHEDULE (BT_RESCHEDULE) + #define BT_RET_FROM_SMP_FORK (0x10000ULL) +@@ -3069,6 +3637,8 @@ + #define BT_DUMPFILE_SEARCH (0x800000000ULL) + #define BT_EFRAME_SEARCH2 (0x1000000000ULL) + #define BT_START (0x2000000000ULL) ++#define BT_TEXT_SYMBOLS_ALL (0x4000000000ULL) ++#define BT_XEN_STOP_THIS_CPU (0x8000000000ULL) + + #define BT_REF_HEXVAL (0x1) + #define BT_REF_SYMBOL (0x2) +@@ -3101,6 +3671,17 @@ + #define TYPE_S390D (REMOTE_VERBOSE << 6) + #define TYPE_NETDUMP (REMOTE_VERBOSE << 7) + ++ulonglong xen_m2p(ulonglong); ++ ++void read_in_kernel_config(int); ++ ++#define IKCFG_INIT (0) ++#define IKCFG_READ (1) ++ ++#define MAGIC_START "IKCFG_ST" ++#define MAGIC_END "IKCFG_ED" ++#define MAGIC_SIZE (sizeof(MAGIC_START) - 1) ++ + /* + * dev.c + */ +@@ -3129,7 +3710,6 @@ + void x86_display_idt_table(void); + #define display_idt_table() x86_display_idt_table() + #define KSYMS_START (0x1) +-#define PAE (0x2) + void x86_dump_eframe_common(struct bt_info *bt, ulong *, int); + char *x86_function_called_by(ulong); + struct syment *x86_jmp_error_code(ulong); +@@ -3140,6 +3720,8 @@ + ulong entry_tramp_start; + ulong entry_tramp_end; + physaddr_t entry_tramp_start_phys; ++ ulonglong last_pmd_read_PAE; ++ ulonglong last_ptbl_read_PAE; + }; + + struct syment *x86_is_entry_tramp_address(ulong, ulong *); +@@ -3194,19 +3776,54 @@ + #define NMI_STACK 2 /* ebase[] offset to NMI exception stack */ + + struct machine_specific { ++ ulong userspace_top; ++ ulong page_offset; ++ ulong vmalloc_start_addr; ++ ulong vmalloc_end; ++ ulong vmemmap_vaddr; ++ ulong vmemmap_end; ++ ulong modules_vaddr; ++ ulong modules_end; ++ ulong phys_base; + char *pml4; ++ char *upml; ++ ulong last_upml_read; ++ ulong last_pml4_read; + char *irqstack; ++ ulong irq_eframe_link; + struct x86_64_pt_regs_offsets pto; + struct x86_64_stkinfo stkinfo; + }; + + #define KSYMS_START (0x1) + #define PT_REGS_INIT (0x2) ++#define VM_ORIG (0x4) ++#define VM_2_6_11 (0x8) ++#define VM_XEN (0x10) ++#define NO_TSS (0x20) ++#define SCHED_TEXT (0x40) ++#define PHYS_BASE (0x80) ++#define VM_XEN_RHEL4 (0x100) ++#define VMEMMAP (0x200) ++ ++#define VM_FLAGS (VM_ORIG|VM_2_6_11|VM_XEN|VM_XEN_RHEL4) + + #define _2MB_PAGE_MASK (~((MEGABYTES(2))-1)) ++ ++#endif ++ ++#if defined(X86) || defined(X86_64) ++ ++/* ++ * unwind_x86_32_64.c ++ */ ++void init_unwind_table(void); ++int dwarf_backtrace(struct bt_info *, int, ulong); ++void dwarf_debug(struct bt_info *); ++int dwarf_print_stack_entry(struct bt_info *, int); ++ + #endif + +-void x86_64_backtrace_notice(ulong); + + /* + * ppc64.c +@@ -3240,13 +3857,42 @@ + ulong hwintrstack[NR_CPUS]; + char *hwstackbuf; + uint hwstacksize; +-}; ++ char *level4; ++ ulong last_level4_read; ++ ++ uint l4_index_size; ++ uint l3_index_size; ++ uint l2_index_size; ++ uint l1_index_size; ++ ++ uint ptrs_per_l3; ++ uint ptrs_per_l2; ++ uint ptrs_per_l1; ++ ++ uint l4_shift; ++ uint l3_shift; ++ uint l2_shift; ++ uint l1_shift; ++ ++ uint pte_shift; ++ uint l2_masked_bits; ++}; ++ ++#define IS_LAST_L4_READ(l4) ((ulong)(l4) == machdep->machspec->last_level4_read) ++ ++#define FILL_L4(L4, TYPE, SIZE) \ ++ if (!IS_LAST_L4_READ(L4)) { \ ++ readmem((ulonglong)((ulong)(L4)), TYPE, machdep->machspec->level4, \ ++ SIZE, "level4 page", FAULT_ON_ERROR); \ ++ machdep->machspec->last_level4_read = (ulong)(L4); \ ++ } + + void ppc64_init(int); + void ppc64_dump_machdep_table(ulong); + #define display_idt_table() \ + error(FATAL, "-d option is not applicable to PowerPC architecture\n") + #define KSYMS_START (0x1) ++#define VM_ORIG (0x2) + #endif + + /* +@@ -3258,15 +3904,27 @@ + #define display_idt_table() \ + error(FATAL, "-d option is not applicable to PowerPC architecture\n") + #define KSYMS_START (0x1) ++/* This should match PPC_FEATURE_BOOKE from include/asm-powerpc/cputable.h */ ++#define CPU_BOOKE (0x00008000) + #endif + + /* + * lkcd_fix_mem.c + */ + ++struct _dump_header_asm_s; ++struct _dump_header_s; + ulong get_lkcd_switch_stack(ulong); +-int fix_addr_v8(int); ++int fix_addr_v8(struct _dump_header_asm_s *); ++int lkcd_dump_init_v8_arch(struct _dump_header_s *dh); + int fix_addr_v7(int); ++int get_lkcd_regs_for_cpu_arch(int cpu, ulong *eip, ulong *esp); ++int lkcd_get_kernel_start_v8(ulong *addr); ++ ++/* ++ * lkcd_v8.c ++ */ ++int get_lkcd_regs_for_cpu_v8(struct bt_info *bt, ulong *eip, ulong *esp); + + /* + * ia64.c +@@ -3283,6 +3941,8 @@ + #define display_idt_table() \ + error(FATAL, "-d option TBD on ia64 architecture\n"); + int ia64_in_init_stack(ulong addr); ++int ia64_in_mca_stack_hyper(ulong addr, struct bt_info *bt); ++physaddr_t ia64_xen_kdump_p2m(struct xen_kdump_data *xkd, physaddr_t pseudo); + + #define OLD_UNWIND (0x1) /* CONFIG_IA64_NEW_UNWIND not turned on */ + #define NEW_UNWIND (0x2) /* CONFIG_IA64_NEW_UNWIND turned on */ +@@ -3396,10 +4056,26 @@ + int netdump_init(char *, FILE *); + ulong get_netdump_panic_task(void); + ulong get_netdump_switch_stack(ulong); +-int netdump_memory_dump(FILE *); + FILE *set_netdump_fp(FILE *); ++int netdump_memory_dump(FILE *); + void get_netdump_regs(struct bt_info *, ulong *, ulong *); + int is_partial_netdump(void); ++void get_netdump_regs_x86(struct bt_info *, ulong *, ulong *); ++void get_netdump_regs_x86_64(struct bt_info *, ulong *, ulong *); ++struct vmcore_data; ++struct vmcore_data *get_kdump_vmcore_data(void); ++int read_kdump(int, void *, int, ulong, physaddr_t); ++int write_kdump(int, void *, int, ulong, physaddr_t); ++int is_kdump(char *, ulong); ++int kdump_init(char *, FILE *); ++ulong get_kdump_panic_task(void); ++uint kdump_page_size(void); ++int kdump_free_memory(void); ++int kdump_memory_used(void); ++int kdump_memory_dump(FILE *); ++void get_kdump_regs(struct bt_info *, ulong *, ulong *); ++void xen_kdump_p2m_mfn(char *); ++int is_sadump_xen(void); + + /* + * diskdump.c +@@ -3416,6 +4092,28 @@ + int diskdump_memory_dump(FILE *); + FILE *set_diskdump_fp(FILE *); + void get_diskdump_regs(struct bt_info *, ulong *, ulong *); ++int diskdump_phys_base(unsigned long *); ++ulong *diskdump_flags; ++int is_partial_diskdump(void); ++ ++/* ++ * xendump.c ++ */ ++int is_xendump(char *); ++int read_xendump(int, void *, int, ulong, physaddr_t); ++int write_xendump(int, void *, int, ulong, physaddr_t); ++uint xendump_page_size(void); ++int xendump_free_memory(void); ++int xendump_memory_used(void); ++int xendump_init(char *, FILE *); ++int xendump_memory_dump(FILE *); ++ulong get_xendump_panic_task(void); ++void get_xendump_regs(struct bt_info *, ulong *, ulong *); ++char *xc_core_mfn_to_page(ulong, char *); ++int xc_core_mfn_to_page_index(ulong); ++void xendump_panic_hook(char *); ++int read_xendump_hyper(int, void *, int, ulong, physaddr_t); ++struct xendump_data *get_xendump_data(void); + + /* + * net.c +@@ -3493,6 +4191,8 @@ + void lkcd_dumpfile_complaint(uint32_t, uint32_t, int); + int set_mb_benchmark(ulong); + ulonglong fix_lkcd_address(ulonglong); ++int lkcd_get_kernel_start(ulong *addr); ++int get_lkcd_regs_for_cpu(struct bt_info *bt, ulong *eip, ulong *esp); + + /* + * lkcd_v1.c +@@ -3560,6 +4260,7 @@ + #define LKCD_DUMP_V7 (0x7) /* DUMP_VERSION_NUMBER */ + #define LKCD_DUMP_V8 (0x8) /* DUMP_VERSION_NUMBER */ + #define LKCD_DUMP_V9 (0x9) /* DUMP_VERSION_NUMBER */ ++#define LKCD_DUMP_V10 (0xa) /* DUMP_VERSION_NUMBER */ + + #define LKCD_DUMP_VERSION_NUMBER_MASK (0xf) + #define LKCD_DUMP_RAW (0x1) /* DUMP_[DH_]RAW */ +@@ -3764,7 +4465,6 @@ + extern int prettyprint_structs; + extern int prettyprint_arrays; + extern int repeat_count_threshold; +-extern int repeat_count_threshold; + extern unsigned int print_max; + + /* +@@ -3814,4 +4514,8 @@ + extern int have_partial_symbols(void); + extern int have_full_symbols(void); + ++#if defined(X86) || defined(X86_64) || defined(IA64) ++#define XEN_HYPERVISOR_ARCH ++#endif ++ + #endif /* !GDB_COMMON */ +--- crash/alpha.c.orig 2008-01-17 15:17:20.000000000 -0500 ++++ crash/alpha.c 2008-01-04 09:42:08.000000000 -0500 +@@ -1,8 +1,8 @@ + /* alpha.c - core analysis suite + * + * Copyright (C) 1999, 2000, 2001, 2002 Mission Critical Linux, Inc. +- * Copyright (C) 2002, 2003, 2004, 2005 David Anderson +- * Copyright (C) 2002, 2003, 2004, 2005 Red Hat, Inc. All rights reserved. ++ * Copyright (C) 2002, 2003, 2004, 2005, 2006 David Anderson ++ * Copyright (C) 2002, 2003, 2004, 2005, 2006 Red Hat, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by +@@ -186,7 +186,8 @@ + "irq_desc", NULL, 0); + else + machdep->nr_irqs = 0; +- machdep->hz = HZ; ++ if (!machdep->hz) ++ machdep->hz = HZ; + break; + + case POST_INIT: +@@ -1858,8 +1859,6 @@ + fprintf(fp, " flags: %lx (", machdep->flags); + if (machdep->flags & HWRESET) + fprintf(fp, "%sHWRESET", others++ ? "|" : ""); +- if (machdep->flags & SYSRQ) +- fprintf(fp, "%sSYSRQ", others++ ? "|" : ""); + fprintf(fp, ")\n"); + fprintf(fp, " kvbase: %lx\n", machdep->kvbase); + fprintf(fp, " identity_map_base: %lx\n", machdep->identity_map_base); +--- crash/va_server.c.orig 2008-01-17 15:17:20.000000000 -0500 ++++ crash/va_server.c 2008-01-04 09:42:08.000000000 -0500 +@@ -1,8 +1,8 @@ + /* va_server.c - kernel crash dump file translation library + * + * Copyright (C) 1999, 2000, 2001, 2002 Mission Critical Linux, Inc. +- * Copyright (C) 2002, 2003, 2004, 2005 David Anderson +- * Copyright (C) 2002, 2003, 2004, 2005 Red Hat, Inc. All rights reserved. ++ * Copyright (C) 2002, 2003, 2004, 2005, 2006 David Anderson ++ * Copyright (C) 2002, 2003, 2004, 2005, 2006 Red Hat, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by +@@ -57,13 +57,15 @@ + + extern int monitor_memory(long *, long *, long *, long *); + +-int Page_Size = PAGE_SIZE; /* temporary setting until disk header is read */ ++int Page_Size; + ulong vas_debug = 0; + + extern void *malloc(size_t); + + int va_server_init(char *crash_file, u_long *start, u_long *end, u_long *stride) + { ++ Page_Size = getpagesize(); /* temporary setting until disk header is read */ ++ + if(read_map(crash_file)) { + if(va_server_init_v1(crash_file, start, end, stride)) + return -1; +--- crash/kernel.c.orig 2008-01-17 15:17:20.000000000 -0500 ++++ crash/kernel.c 2008-01-16 16:32:14.000000000 -0500 +@@ -1,8 +1,8 @@ + /* kernel.c - core analysis suite + * + * Copyright (C) 1999, 2000, 2001, 2002 Mission Critical Linux, Inc. +- * Copyright (C) 2002, 2003, 2004, 2005 David Anderson +- * Copyright (C) 2002, 2003, 2004, 2005 Red Hat, Inc. All rights reserved. ++ * Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2008 David Anderson ++ * Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2008 Red Hat, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by +@@ -16,11 +16,12 @@ + */ + + #include "defs.h" ++#include "xen_hyper_defs.h" + #include + + static void do_module_cmd(ulong, char *, ulong, char *, char *); + static char *find_module_objfile(char *, char *, char *); +-static char *get_uptime(char *); ++static char *module_objfile_search(char *, char *, char *); + static char *get_loadavg(char *); + static void get_lkcd_regs(struct bt_info *, ulong *, ulong *); + static void dump_sys_call_table(char *, int); +@@ -42,328 +43,452 @@ + static void verify_namelist(void); + static char *debug_kernel_version(char *); + static int restore_stack(struct bt_info *); ++static ulong __xen_m2p(ulonglong, ulong); ++static int search_mapping_page(ulong, ulong *, ulong *, ulong *); ++static void read_in_kernel_config_err(int, char *); ++static void BUG_bytes_init(void); ++static int BUG_x86(void); ++static int BUG_x86_64(void); + + + /* + * Gather a few kernel basics. + */ + void +-kernel_init(int when) ++kernel_init() + { +- int i; +- char *p1, *p2, buf[BUFSIZE];; ++ int i, c; ++ char *p1, *p2, buf[BUFSIZE]; + struct syment *sp1, *sp2; ++ char *rqstruct; ++ char *irq_desc_type_name; + + if (pc->flags & KERNEL_DEBUG_QUERY) + return; + +- switch (when) +- { +- case PRE_GDB: +- kt->stext = symbol_value("_stext"); +- kt->etext = symbol_value("_etext"); +- get_text_init_space(); +- if (symbol_exists("__init_begin")) { +- kt->init_begin = symbol_value("__init_begin"); +- kt->init_end = symbol_value("__init_end"); +- } +- kt->end = symbol_value("_end"); ++ kt->stext = symbol_value("_stext"); ++ kt->etext = symbol_value("_etext"); ++ get_text_init_space(); ++ if (symbol_exists("__init_begin")) { ++ kt->init_begin = symbol_value("__init_begin"); ++ kt->init_end = symbol_value("__init_end"); ++ } ++ kt->end = symbol_value("_end"); + +- if (symbol_exists("smp_num_cpus")) { +- kt->flags |= SMP; +- get_symbol_data("smp_num_cpus", sizeof(int), &kt->cpus); +- if (kt->cpus < 1 || kt->cpus > NR_CPUS) +- error(WARNING, +- "invalid value: smp_num_cpus: %d\n", +- kt->cpus); +- } else if (symbol_exists("__per_cpu_offset")) { +- kt->flags |= SMP; +- kt->cpus = 1; +- } else +- kt->cpus = 1; +- +- if ((sp1 = symbol_search("__per_cpu_start")) && +- (sp2 = symbol_search("__per_cpu_end")) && +- (sp1->type == 'A') && (sp2->type == 'A') && +- (sp2->value > sp1->value)) +- kt->flags |= SMP|PER_CPU_OFF; ++ /* ++ * For the Xen architecture, default to writable page tables unless: ++ * ++ * (1) it's an "xm save" CANONICAL_PAGE_TABLES dumpfile, or ++ * (2) the --shadow_page_tables option was explicitly entered. ++ * ++ * But if the "phys_to_maching_mapping" array does not exist, and ++ * it's not an "xm save" canonical dumpfile, then we have no choice ++ * but to presume shadow page tables. ++ */ ++ if (symbol_exists("xen_start_info")) { ++ kt->flags |= ARCH_XEN; ++ if (!(kt->xen_flags & (SHADOW_PAGE_TABLES|CANONICAL_PAGE_TABLES))) ++ kt->xen_flags |= WRITABLE_PAGE_TABLES; ++ if (symbol_exists("phys_to_machine_mapping")) ++ get_symbol_data("phys_to_machine_mapping", sizeof(ulong), ++ &kt->phys_to_machine_mapping); ++ else if (!(kt->xen_flags & CANONICAL_PAGE_TABLES)) { ++ kt->xen_flags &= ~WRITABLE_PAGE_TABLES; ++ kt->xen_flags |= SHADOW_PAGE_TABLES; ++ } ++ if (machine_type("X86")) ++ get_symbol_data("max_pfn", sizeof(ulong), &kt->p2m_table_size); ++ if (machine_type("X86_64")) ++ get_symbol_data("end_pfn", sizeof(ulong), &kt->p2m_table_size); ++ if ((kt->m2p_page = (char *)malloc(PAGESIZE())) == NULL) ++ error(FATAL, "cannot malloc m2p page."); ++ } ++ ++ if (symbol_exists("smp_num_cpus")) { ++ kt->flags |= SMP; ++ get_symbol_data("smp_num_cpus", sizeof(int), &kt->cpus); ++ if (kt->cpus < 1 || kt->cpus > NR_CPUS) ++ error(WARNING, ++ "invalid value: smp_num_cpus: %d\n", ++ kt->cpus); ++ } else if (symbol_exists("__per_cpu_offset")) { ++ kt->flags |= SMP; ++ kt->cpus = 1; ++ } else ++ kt->cpus = 1; ++ ++ if ((sp1 = symbol_search("__per_cpu_start")) && ++ (sp2 = symbol_search("__per_cpu_end")) && ++ (sp1->type == 'A' || sp1->type == 'D') && ++ (sp2->type == 'A' || sp2->type == 'D') && ++ (sp2->value > sp1->value)) ++ kt->flags |= SMP|PER_CPU_OFF; + +- get_symbol_data("xtime", sizeof(struct timespec), &kt->date); ++ get_symbol_data("xtime", sizeof(struct timespec), &kt->date); + +- if (pc->flags & GET_TIMESTAMP) { +- fprintf(fp, "%s\n\n", +- strip_linefeeds(ctime(&kt->date.tv_sec))); +- clean_exit(0); +- } ++ if (pc->flags & GET_TIMESTAMP) { ++ fprintf(fp, "%s\n\n", ++ strip_linefeeds(ctime(&kt->date.tv_sec))); ++ clean_exit(0); ++ } + +- readmem(symbol_value("system_utsname"), KVADDR, &kt->utsname, +- sizeof(struct new_utsname), "system_utsname", +- FAULT_ON_ERROR); +- strncpy(buf, kt->utsname.release, MIN(strlen(kt->utsname.release), 65)); +- if (ascii_string(kt->utsname.release)) { +- p1 = p2 = buf; +- while (*p2 != '.') +- p2++; +- *p2 = NULLCHAR; +- kt->kernel_version[0] = atoi(p1); +- p1 = ++p2; +- while (*p2 != '.') +- p2++; +- *p2 = NULLCHAR; +- kt->kernel_version[1] = atoi(p1); +- p1 = ++p2; +- while ((*p2 >= '0') && (*p2 <= '9')) +- p2++; +- *p2 = NULLCHAR; +- kt->kernel_version[2] = atoi(p1); +- } +- break; ++ if (symbol_exists("system_utsname")) ++ readmem(symbol_value("system_utsname"), KVADDR, &kt->utsname, ++ sizeof(struct new_utsname), "system_utsname", ++ RETURN_ON_ERROR); ++ else if (symbol_exists("init_uts_ns")) ++ readmem(symbol_value("init_uts_ns") + sizeof(int), ++ KVADDR, &kt->utsname, sizeof(struct new_utsname), ++ "init_uts_ns", RETURN_ON_ERROR); ++ else ++ error(INFO, "cannot access utsname information\n\n"); + +- case POST_GDB: +- if (symbol_exists("__per_cpu_offset")) { ++ strncpy(buf, kt->utsname.release, MIN(strlen(kt->utsname.release), 65)); ++ if (ascii_string(kt->utsname.release)) { ++ p1 = p2 = buf; ++ while (*p2 != '.') ++ p2++; ++ *p2 = NULLCHAR; ++ kt->kernel_version[0] = atoi(p1); ++ p1 = ++p2; ++ while (*p2 != '.') ++ p2++; ++ *p2 = NULLCHAR; ++ kt->kernel_version[1] = atoi(p1); ++ p1 = ++p2; ++ while ((*p2 >= '0') && (*p2 <= '9')) ++ p2++; ++ *p2 = NULLCHAR; ++ kt->kernel_version[2] = atoi(p1); ++ } ++ ++ verify_version(); ++ ++ if (symbol_exists("__per_cpu_offset")) { ++ if (LKCD_KERNTYPES()) ++ i = get_cpus_possible(); ++ else + i = get_array_length("__per_cpu_offset", NULL, 0); +- get_symbol_data("__per_cpu_offset", +- sizeof(long)*(i <= NR_CPUS ? i : NR_CPUS), +- &kt->__per_cpu_offset[0]); +- kt->flags |= PER_CPU_OFF; +- } +- MEMBER_OFFSET_INIT(runqueue_cpu, "runqueue", "cpu"); +- if (VALID_MEMBER(runqueue_cpu)) { +- MEMBER_OFFSET_INIT(cpu_s_curr, "cpu_s", "curr"); +- MEMBER_OFFSET_INIT(cpu_s_idle, "cpu_s", "idle"); +- STRUCT_SIZE_INIT(cpu_s, "cpu_s"); +- kt->runq_siblings = get_array_length("runqueue.cpu", +- NULL, 0); +- if (symbol_exists("__cpu_idx") && +- symbol_exists("__rq_idx")) { +- if (!readmem(symbol_value("__cpu_idx"), KVADDR, +- &kt->__cpu_idx[0], sizeof(long) * NR_CPUS, +- "__cpu_idx[NR_CPUS]", RETURN_ON_ERROR)) +- error(INFO, +- "cannot read __cpu_idx[NR_CPUS] array\n"); +- if (!readmem(symbol_value("__rq_idx"), KVADDR, +- &kt->__rq_idx[0], sizeof(long) * NR_CPUS, +- "__rq_idx[NR_CPUS]", RETURN_ON_ERROR)) +- error(INFO, +- "cannot read __rq_idx[NR_CPUS] array\n"); +- } else if (kt->runq_siblings > 1) +- error(INFO, +- "runq_siblings: %d: __cpu_idx and __rq_idx arrays don't exist?\n", +- kt->runq_siblings); +- } else { +- MEMBER_OFFSET_INIT(runqueue_idle, "runqueue", "idle"); +- MEMBER_OFFSET_INIT(runqueue_curr, "runqueue", "curr"); +- } +- MEMBER_OFFSET_INIT(runqueue_active, "runqueue", "active"); +- MEMBER_OFFSET_INIT(runqueue_expired, "runqueue", "expired"); +- MEMBER_OFFSET_INIT(runqueue_arrays, "runqueue", "arrays"); +- MEMBER_OFFSET_INIT(prio_array_queue, "prio_array", "queue"); +- MEMBER_OFFSET_INIT(prio_array_nr_active, "prio_array", +- "nr_active"); +- STRUCT_SIZE_INIT(runqueue, "runqueue"); +- STRUCT_SIZE_INIT(prio_array, "prio_array"); +- +- /* +- * In 2.4, smp_send_stop() sets smp_num_cpus back to 1 +- * in some, but not all, architectures. So if a count +- * of 1 is found, be suspicious, and check the +- * init_tasks[NR_CPUS] array (also intro'd in 2.4), +- * for idle thread addresses. For 2.2, prepare for the +- * eventuality by verifying the cpu count with the machine +- * dependent count. +- */ +- if ((kt->flags & SMP) && DUMPFILE() && (kt->cpus == 1)) { +- if (symbol_exists("init_tasks")) { +- ulong init_tasks[NR_CPUS]; +- int nr_cpus; +- +- BZERO(&init_tasks[0], sizeof(ulong) * NR_CPUS); +- +- nr_cpus = get_array_length("init_tasks", +- NULL, 0); +- if ((nr_cpus < 1) || (nr_cpus > NR_CPUS)) +- nr_cpus = NR_CPUS; +- +- get_idle_threads(&init_tasks[0], nr_cpus); +- +- for (i = kt->cpus = 0; i < nr_cpus; i++) +- if (init_tasks[i]) +- kt->cpus++; +- } else +- kt->cpus = machdep->get_smp_cpus(); +- } ++ get_symbol_data("__per_cpu_offset", ++ sizeof(long)*((i && (i <= NR_CPUS)) ? i : NR_CPUS), ++ &kt->__per_cpu_offset[0]); ++ kt->flags |= PER_CPU_OFF; ++ } ++ if (STRUCT_EXISTS("runqueue")) ++ rqstruct = "runqueue"; ++ else if (STRUCT_EXISTS("rq")) ++ rqstruct = "rq"; + +- if ((kt->flags & SMP) && ACTIVE() && (kt->cpus == 1) && +- (kt->flags & PER_CPU_OFF)) ++ MEMBER_OFFSET_INIT(runqueue_cpu, rqstruct, "cpu"); ++ /* ++ * 'cpu' does not exist in 'struct rq'. ++ */ ++ if (VALID_MEMBER(runqueue_cpu) && ++ (get_array_length("runqueue.cpu", NULL, 0) > 0)) { ++ MEMBER_OFFSET_INIT(cpu_s_curr, "cpu_s", "curr"); ++ MEMBER_OFFSET_INIT(cpu_s_idle, "cpu_s", "idle"); ++ STRUCT_SIZE_INIT(cpu_s, "cpu_s"); ++ kt->runq_siblings = get_array_length("runqueue.cpu", ++ NULL, 0); ++ if (symbol_exists("__cpu_idx") && ++ symbol_exists("__rq_idx")) { ++ if (!readmem(symbol_value("__cpu_idx"), KVADDR, ++ &kt->__cpu_idx[0], sizeof(long) * NR_CPUS, ++ "__cpu_idx[NR_CPUS]", RETURN_ON_ERROR)) ++ error(INFO, ++ "cannot read __cpu_idx[NR_CPUS] array\n"); ++ if (!readmem(symbol_value("__rq_idx"), KVADDR, ++ &kt->__rq_idx[0], sizeof(long) * NR_CPUS, ++ "__rq_idx[NR_CPUS]", RETURN_ON_ERROR)) ++ error(INFO, ++ "cannot read __rq_idx[NR_CPUS] array\n"); ++ } else if (kt->runq_siblings > 1) ++ error(INFO, ++ "runq_siblings: %d: __cpu_idx and __rq_idx arrays don't exist?\n", ++ kt->runq_siblings); ++ } else { ++ MEMBER_OFFSET_INIT(runqueue_idle, rqstruct, "idle"); ++ MEMBER_OFFSET_INIT(runqueue_curr, rqstruct, "curr"); ++ ASSIGN_OFFSET(runqueue_cpu) = INVALID_OFFSET; ++ } ++ MEMBER_OFFSET_INIT(runqueue_active, rqstruct, "active"); ++ MEMBER_OFFSET_INIT(runqueue_expired, rqstruct, "expired"); ++ MEMBER_OFFSET_INIT(runqueue_arrays, rqstruct, "arrays"); ++ MEMBER_OFFSET_INIT(prio_array_queue, "prio_array", "queue"); ++ MEMBER_OFFSET_INIT(prio_array_nr_active, "prio_array", "nr_active"); ++ STRUCT_SIZE_INIT(runqueue, rqstruct); ++ STRUCT_SIZE_INIT(prio_array, "prio_array"); ++ ++ MEMBER_OFFSET_INIT(rq_cfs, "rq", "cfs"); ++ ++ /* ++ * In 2.4, smp_send_stop() sets smp_num_cpus back to 1 ++ * in some, but not all, architectures. So if a count ++ * of 1 is found, be suspicious, and check the ++ * init_tasks[NR_CPUS] array (also intro'd in 2.4), ++ * for idle thread addresses. For 2.2, prepare for the ++ * eventuality by verifying the cpu count with the machine ++ * dependent count. ++ */ ++ if ((kt->flags & SMP) && DUMPFILE() && (kt->cpus == 1)) { ++ if (symbol_exists("init_tasks")) { ++ ulong init_tasks[NR_CPUS]; ++ int nr_cpus; ++ ++ BZERO(&init_tasks[0], sizeof(ulong) * NR_CPUS); ++ ++ nr_cpus = get_array_length("init_tasks", NULL, 0); ++ if ((nr_cpus < 1) || (nr_cpus > NR_CPUS)) ++ nr_cpus = NR_CPUS; ++ ++ get_idle_threads(&init_tasks[0], nr_cpus); ++ ++ for (i = kt->cpus = 0; i < nr_cpus; i++) ++ if (init_tasks[i]) ++ kt->cpus++; ++ } else + kt->cpus = machdep->get_smp_cpus(); ++ } + +- if (kt->cpus > NR_CPUS) { +- error(WARNING, +- "calculated number of cpus (%d) greater than compiled-in NR_CPUS (%d)\n", +- kt->cpus, NR_CPUS); +- error(FATAL, "recompile crash with larger NR_CPUS\n"); +- } +- +- STRUCT_SIZE_INIT(spinlock_t, "spinlock_t"); +- verify_spinlock(); +- +- STRUCT_SIZE_INIT(list_head, "list_head"); +- MEMBER_OFFSET_INIT(list_head_next, "list_head", "next"); +- MEMBER_OFFSET_INIT(list_head_prev, "list_head", "prev"); +- if (OFFSET(list_head_next) != 0) +- error(WARNING, +- "list_head.next offset: %ld: list command may fail\n", +- OFFSET(list_head_next)); +- +- MEMBER_OFFSET_INIT(hlist_node_next, "hlist_node", "next"); +- MEMBER_OFFSET_INIT(hlist_node_pprev, "hlist_node", "pprev"); +- STRUCT_SIZE_INIT(hlist_head, "hlist_head"); +- STRUCT_SIZE_INIT(hlist_node, "hlist_node"); +- +- MEMBER_OFFSET_INIT(irq_desc_t_status, "irq_desc_t", "status"); +- MEMBER_OFFSET_INIT(irq_desc_t_handler, "irq_desc_t", "handler"); +- MEMBER_OFFSET_INIT(irq_desc_t_action, "irq_desc_t", "action"); +- MEMBER_OFFSET_INIT(irq_desc_t_depth, "irq_desc_t", "depth"); +- MEMBER_OFFSET_INIT(hw_interrupt_type_typename, ++ if ((kt->flags & SMP) && ACTIVE() && (kt->cpus == 1) && ++ (kt->flags & PER_CPU_OFF)) ++ kt->cpus = machdep->get_smp_cpus(); ++ ++ if (kt->cpus_override && (c = atoi(kt->cpus_override))) { ++ error(WARNING, "forcing cpu count to: %d\n\n", c); ++ kt->cpus = c; ++ } ++ ++ if (kt->cpus > NR_CPUS) { ++ error(WARNING, ++ "%s number of cpus (%d) greater than compiled-in NR_CPUS (%d)\n", ++ kt->cpus_override && atoi(kt->cpus_override) ? ++ "configured" : "calculated", kt->cpus, NR_CPUS); ++ error(FATAL, "recompile crash with larger NR_CPUS\n"); ++ } ++ ++ STRUCT_SIZE_INIT(spinlock_t, "spinlock_t"); ++ verify_spinlock(); ++ ++ STRUCT_SIZE_INIT(list_head, "list_head"); ++ MEMBER_OFFSET_INIT(list_head_next, "list_head", "next"); ++ MEMBER_OFFSET_INIT(list_head_prev, "list_head", "prev"); ++ if (OFFSET(list_head_next) != 0) ++ error(WARNING, ++ "list_head.next offset: %ld: list command may fail\n", ++ OFFSET(list_head_next)); ++ ++ MEMBER_OFFSET_INIT(hlist_node_next, "hlist_node", "next"); ++ MEMBER_OFFSET_INIT(hlist_node_pprev, "hlist_node", "pprev"); ++ STRUCT_SIZE_INIT(hlist_head, "hlist_head"); ++ STRUCT_SIZE_INIT(hlist_node, "hlist_node"); ++ ++ if (STRUCT_EXISTS("irq_desc_t")) ++ irq_desc_type_name = "irq_desc_t"; ++ else ++ irq_desc_type_name = "irq_desc"; ++ ++ STRUCT_SIZE_INIT(irq_desc_t, irq_desc_type_name); ++ MEMBER_OFFSET_INIT(irq_desc_t_status, irq_desc_type_name, "status"); ++ if (MEMBER_EXISTS(irq_desc_type_name, "handler")) ++ MEMBER_OFFSET_INIT(irq_desc_t_handler, irq_desc_type_name, "handler"); ++ else ++ MEMBER_OFFSET_INIT(irq_desc_t_chip, irq_desc_type_name, "chip"); ++ MEMBER_OFFSET_INIT(irq_desc_t_action, irq_desc_type_name, "action"); ++ MEMBER_OFFSET_INIT(irq_desc_t_depth, irq_desc_type_name, "depth"); ++ if (STRUCT_EXISTS("hw_interrupt_type")) { ++ MEMBER_OFFSET_INIT(hw_interrupt_type_typename, + "hw_interrupt_type", "typename"); + MEMBER_OFFSET_INIT(hw_interrupt_type_startup, + "hw_interrupt_type", "startup"); + MEMBER_OFFSET_INIT(hw_interrupt_type_shutdown, + "hw_interrupt_type", "shutdown"); +- MEMBER_OFFSET_INIT(hw_interrupt_type_handle, +- "hw_interrupt_type", "handle"); ++ MEMBER_OFFSET_INIT(hw_interrupt_type_handle, ++ "hw_interrupt_type", "handle"); + MEMBER_OFFSET_INIT(hw_interrupt_type_enable, + "hw_interrupt_type", "enable"); + MEMBER_OFFSET_INIT(hw_interrupt_type_disable, + "hw_interrupt_type", "disable"); +- MEMBER_OFFSET_INIT(hw_interrupt_type_ack, ++ MEMBER_OFFSET_INIT(hw_interrupt_type_ack, + "hw_interrupt_type", "ack"); +- MEMBER_OFFSET_INIT(hw_interrupt_type_end, ++ MEMBER_OFFSET_INIT(hw_interrupt_type_end, + "hw_interrupt_type", "end"); + MEMBER_OFFSET_INIT(hw_interrupt_type_set_affinity, + "hw_interrupt_type", "set_affinity"); +- MEMBER_OFFSET_INIT(irqaction_handler, "irqaction", "handler"); +- MEMBER_OFFSET_INIT(irqaction_flags, "irqaction", "flags"); +- MEMBER_OFFSET_INIT(irqaction_mask, "irqaction", "mask"); +- MEMBER_OFFSET_INIT(irqaction_name, "irqaction", "name"); +- MEMBER_OFFSET_INIT(irqaction_dev_id, "irqaction", "dev_id"); +- MEMBER_OFFSET_INIT(irqaction_next, "irqaction", "next"); +- +- STRUCT_SIZE_INIT(irq_desc_t, "irq_desc_t"); +- +- STRUCT_SIZE_INIT(irq_cpustat_t, "irq_cpustat_t"); +- MEMBER_OFFSET_INIT(irq_cpustat_t___softirq_active, +- "irq_cpustat_t", "__softirq_active"); +- MEMBER_OFFSET_INIT(irq_cpustat_t___softirq_mask, +- "irq_cpustat_t", "__softirq_mask"); +- +- STRUCT_SIZE_INIT(timer_list, "timer_list"); +- MEMBER_OFFSET_INIT(timer_list_list, "timer_list", "list"); +- MEMBER_OFFSET_INIT(timer_list_next, "timer_list", "next"); +- MEMBER_OFFSET_INIT(timer_list_entry, "timer_list", "entry"); +- MEMBER_OFFSET_INIT(timer_list_expires, "timer_list", "expires"); +- MEMBER_OFFSET_INIT(timer_list_function, +- "timer_list", "function"); +- STRUCT_SIZE_INIT(timer_vec_root, "timer_vec_root"); +- if (VALID_STRUCT(timer_vec_root)) +- MEMBER_OFFSET_INIT(timer_vec_root_vec, +- "timer_vec_root", "vec"); +- STRUCT_SIZE_INIT(timer_vec, "timer_vec"); +- if (VALID_STRUCT(timer_vec)) +- MEMBER_OFFSET_INIT(timer_vec_vec, "timer_vec", "vec"); +- +- STRUCT_SIZE_INIT(tvec_root_s, "tvec_root_s"); +- if (VALID_STRUCT(tvec_root_s)) { +- STRUCT_SIZE_INIT(tvec_t_base_s, "tvec_t_base_s"); +- MEMBER_OFFSET_INIT(tvec_t_base_s_tv1, +- "tvec_t_base_s", "tv1"); +- MEMBER_OFFSET_INIT(tvec_root_s_vec, +- "tvec_root_s", "vec"); +- STRUCT_SIZE_INIT(tvec_s, "tvec_s"); +- MEMBER_OFFSET_INIT(tvec_s_vec, "tvec_s", "vec"); +- } +- +- STRUCT_SIZE_INIT(__wait_queue, "__wait_queue"); +- if (VALID_STRUCT(__wait_queue)) { +- MEMBER_OFFSET_INIT(__wait_queue_task, +- "__wait_queue", "task"); +- MEMBER_OFFSET_INIT(__wait_queue_head_task_list, +- "__wait_queue_head", "task_list"); +- MEMBER_OFFSET_INIT(__wait_queue_task_list, +- "__wait_queue", "task_list"); +- } else { +- STRUCT_SIZE_INIT(wait_queue, "wait_queue"); +- if (VALID_STRUCT(wait_queue)) { +- MEMBER_OFFSET_INIT(wait_queue_task, +- "wait_queue", "task"); +- MEMBER_OFFSET_INIT(wait_queue_next, +- "wait_queue", "next"); +- } ++ } else { /* ++ * On later kernels where hw_interrupt_type was replaced ++ * by irq_chip ++ */ ++ MEMBER_OFFSET_INIT(irq_chip_typename, ++ "irq_chip", "name"); ++ MEMBER_OFFSET_INIT(irq_chip_startup, ++ "irq_chip", "startup"); ++ MEMBER_OFFSET_INIT(irq_chip_shutdown, ++ "irq_chip", "shutdown"); ++ MEMBER_OFFSET_INIT(irq_chip_enable, ++ "irq_chip", "enable"); ++ MEMBER_OFFSET_INIT(irq_chip_disable, ++ "irq_chip", "disable"); ++ MEMBER_OFFSET_INIT(irq_chip_ack, ++ "irq_chip", "ack"); ++ MEMBER_OFFSET_INIT(irq_chip_mask, ++ "irq_chip", "mask"); ++ MEMBER_OFFSET_INIT(irq_chip_mask_ack, ++ "irq_chip", "mask_ack"); ++ MEMBER_OFFSET_INIT(irq_chip_unmask, ++ "irq_chip", "unmask"); ++ MEMBER_OFFSET_INIT(irq_chip_eoi, ++ "irq_chip", "eoi"); ++ MEMBER_OFFSET_INIT(irq_chip_end, ++ "irq_chip", "end"); ++ MEMBER_OFFSET_INIT(irq_chip_set_affinity, ++ "irq_chip", "set_affinity"); ++ MEMBER_OFFSET_INIT(irq_chip_retrigger, ++ "irq_chip", "retrigger"); ++ MEMBER_OFFSET_INIT(irq_chip_set_type, ++ "irq_chip", "set_type"); ++ MEMBER_OFFSET_INIT(irq_chip_set_wake, ++ "irq_chip", "set_wake"); ++ } ++ MEMBER_OFFSET_INIT(irqaction_handler, "irqaction", "handler"); ++ MEMBER_OFFSET_INIT(irqaction_flags, "irqaction", "flags"); ++ MEMBER_OFFSET_INIT(irqaction_mask, "irqaction", "mask"); ++ MEMBER_OFFSET_INIT(irqaction_name, "irqaction", "name"); ++ MEMBER_OFFSET_INIT(irqaction_dev_id, "irqaction", "dev_id"); ++ MEMBER_OFFSET_INIT(irqaction_next, "irqaction", "next"); ++ ++ STRUCT_SIZE_INIT(irq_cpustat_t, "irq_cpustat_t"); ++ MEMBER_OFFSET_INIT(irq_cpustat_t___softirq_active, ++ "irq_cpustat_t", "__softirq_active"); ++ MEMBER_OFFSET_INIT(irq_cpustat_t___softirq_mask, ++ "irq_cpustat_t", "__softirq_mask"); ++ ++ STRUCT_SIZE_INIT(timer_list, "timer_list"); ++ MEMBER_OFFSET_INIT(timer_list_list, "timer_list", "list"); ++ MEMBER_OFFSET_INIT(timer_list_next, "timer_list", "next"); ++ MEMBER_OFFSET_INIT(timer_list_entry, "timer_list", "entry"); ++ MEMBER_OFFSET_INIT(timer_list_expires, "timer_list", "expires"); ++ MEMBER_OFFSET_INIT(timer_list_function, "timer_list", "function"); ++ STRUCT_SIZE_INIT(timer_vec_root, "timer_vec_root"); ++ if (VALID_STRUCT(timer_vec_root)) ++ MEMBER_OFFSET_INIT(timer_vec_root_vec, ++ "timer_vec_root", "vec"); ++ STRUCT_SIZE_INIT(timer_vec, "timer_vec"); ++ if (VALID_STRUCT(timer_vec)) ++ MEMBER_OFFSET_INIT(timer_vec_vec, "timer_vec", "vec"); ++ ++ STRUCT_SIZE_INIT(tvec_root_s, "tvec_root_s"); ++ if (VALID_STRUCT(tvec_root_s)) { ++ STRUCT_SIZE_INIT(tvec_t_base_s, "tvec_t_base_s"); ++ MEMBER_OFFSET_INIT(tvec_t_base_s_tv1, ++ "tvec_t_base_s", "tv1"); ++ MEMBER_OFFSET_INIT(tvec_root_s_vec, ++ "tvec_root_s", "vec"); ++ STRUCT_SIZE_INIT(tvec_s, "tvec_s"); ++ MEMBER_OFFSET_INIT(tvec_s_vec, "tvec_s", "vec"); ++ } ++ ++ STRUCT_SIZE_INIT(__wait_queue, "__wait_queue"); ++ if (VALID_STRUCT(__wait_queue)) { ++ if (MEMBER_EXISTS("__wait_queue", "task")) ++ MEMBER_OFFSET_INIT(__wait_queue_task, ++ "__wait_queue", "task"); ++ else ++ MEMBER_OFFSET_INIT(__wait_queue_task, ++ "__wait_queue", "private"); ++ MEMBER_OFFSET_INIT(__wait_queue_head_task_list, ++ "__wait_queue_head", "task_list"); ++ MEMBER_OFFSET_INIT(__wait_queue_task_list, ++ "__wait_queue", "task_list"); ++ } else { ++ STRUCT_SIZE_INIT(wait_queue, "wait_queue"); ++ if (VALID_STRUCT(wait_queue)) { ++ MEMBER_OFFSET_INIT(wait_queue_task, ++ "wait_queue", "task"); ++ MEMBER_OFFSET_INIT(wait_queue_next, ++ "wait_queue", "next"); + } ++ } + +- STRUCT_SIZE_INIT(pt_regs, "pt_regs"); +- STRUCT_SIZE_INIT(softirq_state, "softirq_state"); +- STRUCT_SIZE_INIT(desc_struct, "desc_struct"); +- +- STRUCT_SIZE_INIT(char_device_struct, "char_device_struct"); +- if (VALID_STRUCT(char_device_struct)) { +- MEMBER_OFFSET_INIT(char_device_struct_next, +- "char_device_struct", "next"); +- MEMBER_OFFSET_INIT(char_device_struct_name, +- "char_device_struct", "name"); +- MEMBER_OFFSET_INIT(char_device_struct_fops, +- "char_device_struct", "fops"); +- MEMBER_OFFSET_INIT(char_device_struct_major, +- "char_device_struct", "major"); +- } +- +- MEMBER_OFFSET_INIT(module_kallsyms_start, "module", +- "kallsyms_start"); +- +- STRUCT_SIZE_INIT(kallsyms_header, "kallsyms_header"); +- +- if (VALID_MEMBER(module_kallsyms_start) && +- VALID_SIZE(kallsyms_header)) { +- MEMBER_OFFSET_INIT(kallsyms_header_sections, +- "kallsyms_header", "sections"); +- MEMBER_OFFSET_INIT(kallsyms_header_section_off, +- "kallsyms_header", "section_off"); +- MEMBER_OFFSET_INIT(kallsyms_header_symbols, +- "kallsyms_header", "symbols"); +- MEMBER_OFFSET_INIT(kallsyms_header_symbol_off, +- "kallsyms_header", "symbol_off"); +- MEMBER_OFFSET_INIT(kallsyms_header_string_off, +- "kallsyms_header", "string_off"); +- MEMBER_OFFSET_INIT(kallsyms_symbol_section_off, +- "kallsyms_symbol", "section_off"); +- MEMBER_OFFSET_INIT(kallsyms_symbol_symbol_addr, +- "kallsyms_symbol", "symbol_addr"); +- MEMBER_OFFSET_INIT(kallsyms_symbol_name_off, +- "kallsyms_symbol", "name_off"); +- MEMBER_OFFSET_INIT(kallsyms_section_start, +- "kallsyms_section", "start"); +- MEMBER_OFFSET_INIT(kallsyms_section_size, +- "kallsyms_section", "size"); +- MEMBER_OFFSET_INIT(kallsyms_section_name_off, +- "kallsyms_section", "name_off"); +- STRUCT_SIZE_INIT(kallsyms_symbol, "kallsyms_symbol"); +- STRUCT_SIZE_INIT(kallsyms_section, "kallsyms_section"); ++ STRUCT_SIZE_INIT(pt_regs, "pt_regs"); ++ STRUCT_SIZE_INIT(softirq_state, "softirq_state"); ++ STRUCT_SIZE_INIT(desc_struct, "desc_struct"); ++ ++ STRUCT_SIZE_INIT(char_device_struct, "char_device_struct"); ++ if (VALID_STRUCT(char_device_struct)) { ++ MEMBER_OFFSET_INIT(char_device_struct_next, ++ "char_device_struct", "next"); ++ MEMBER_OFFSET_INIT(char_device_struct_name, ++ "char_device_struct", "name"); ++ MEMBER_OFFSET_INIT(char_device_struct_fops, ++ "char_device_struct", "fops"); ++ MEMBER_OFFSET_INIT(char_device_struct_major, ++ "char_device_struct", "major"); ++ } ++ ++ MEMBER_OFFSET_INIT(module_kallsyms_start, "module", ++ "kallsyms_start"); ++ ++ STRUCT_SIZE_INIT(kallsyms_header, "kallsyms_header"); ++ ++ if (VALID_MEMBER(module_kallsyms_start) && ++ VALID_SIZE(kallsyms_header)) { ++ MEMBER_OFFSET_INIT(kallsyms_header_sections, ++ "kallsyms_header", "sections"); ++ MEMBER_OFFSET_INIT(kallsyms_header_section_off, ++ "kallsyms_header", "section_off"); ++ MEMBER_OFFSET_INIT(kallsyms_header_symbols, ++ "kallsyms_header", "symbols"); ++ MEMBER_OFFSET_INIT(kallsyms_header_symbol_off, ++ "kallsyms_header", "symbol_off"); ++ MEMBER_OFFSET_INIT(kallsyms_header_string_off, ++ "kallsyms_header", "string_off"); ++ MEMBER_OFFSET_INIT(kallsyms_symbol_section_off, ++ "kallsyms_symbol", "section_off"); ++ MEMBER_OFFSET_INIT(kallsyms_symbol_symbol_addr, ++ "kallsyms_symbol", "symbol_addr"); ++ MEMBER_OFFSET_INIT(kallsyms_symbol_name_off, ++ "kallsyms_symbol", "name_off"); ++ MEMBER_OFFSET_INIT(kallsyms_section_start, ++ "kallsyms_section", "start"); ++ MEMBER_OFFSET_INIT(kallsyms_section_size, ++ "kallsyms_section", "size"); ++ MEMBER_OFFSET_INIT(kallsyms_section_name_off, ++ "kallsyms_section", "name_off"); ++ STRUCT_SIZE_INIT(kallsyms_symbol, "kallsyms_symbol"); ++ STRUCT_SIZE_INIT(kallsyms_section, "kallsyms_section"); + +- if (!(kt->flags & NO_KALLSYMS)) +- kt->flags |= KALLSYMS_V1; +- } ++ if (!(kt->flags & NO_KALLSYMS)) ++ kt->flags |= KALLSYMS_V1; ++ } + +- MEMBER_OFFSET_INIT(module_num_symtab, "module", "num_symtab"); ++ MEMBER_OFFSET_INIT(module_num_symtab, "module", "num_symtab"); + +- if (VALID_MEMBER(module_num_symtab)) { +- MEMBER_OFFSET_INIT(module_symtab, "module", "symtab"); +- MEMBER_OFFSET_INIT(module_strtab, "module", "strtab"); ++ if (VALID_MEMBER(module_num_symtab)) { ++ MEMBER_OFFSET_INIT(module_symtab, "module", "symtab"); ++ MEMBER_OFFSET_INIT(module_strtab, "module", "strtab"); + +- if (!(kt->flags & NO_KALLSYMS)) +- kt->flags |= KALLSYMS_V2; +- } +- break; ++ if (!(kt->flags & NO_KALLSYMS)) ++ kt->flags |= KALLSYMS_V2; ++ } ++ ++ if (!(kt->flags & DWARF_UNWIND)) ++ kt->flags |= NO_DWARF_UNWIND; ++ ++ /* ++ * OpenVZ ++ */ ++ if (kernel_symbol_exists("pcpu_info") && ++ STRUCT_EXISTS("pcpu_info") && STRUCT_EXISTS("vcpu_struct")) { ++ MEMBER_OFFSET_INIT(pcpu_info_vcpu, "pcpu_info", "vcpu"); ++ MEMBER_OFFSET_INIT(pcpu_info_idle, "pcpu_info", "idle"); ++ MEMBER_OFFSET_INIT(vcpu_struct_rq, "vcpu_struct", "rq"); ++ STRUCT_SIZE_INIT(pcpu_info, "pcpu_info"); ++ STRUCT_SIZE_INIT(vcpu_struct, "vcpu_struct"); ++ kt->flags |= ARCH_OPENVZ; + } ++ ++ BUG_bytes_init(); + } + + /* +@@ -377,7 +502,7 @@ + { + char buf[BUFSIZE]; + ulong linux_banner; +- int argc; ++ int argc, len; + char *arglist[MAXARGS]; + char *p1, *p2; + struct syment *sp; +@@ -389,7 +514,7 @@ + + if (!(sp = symbol_search("linux_banner"))) + error(FATAL, "linux_banner symbol does not exist?\n"); +- else if (sp->type == 'R') ++ else if ((sp->type == 'R') || (sp->type == 'r')) + linux_banner = symbol_value("linux_banner"); + else + get_symbol_data("linux_banner", sizeof(ulong), &linux_banner); +@@ -405,7 +530,8 @@ + error(WARNING, "cannot read linux_banner string\n"); + + if (ACTIVE()) { +- if (strlen(kt->proc_version) && !STREQ(buf, kt->proc_version)) { ++ len = strlen(kt->proc_version) - 1; ++ if ((len > 0) && (strncmp(buf, kt->proc_version, len) != 0)) { + if (CRASHDEBUG(1)) { + fprintf(fp, "/proc/version:\n%s", + kt->proc_version); +@@ -471,6 +597,9 @@ + } + } + ++ if (CRASHDEBUG(1)) ++ gdb_readnow_warning(); ++ + return; + + bad_match: +@@ -614,6 +743,10 @@ + if (pc->flags & KERNEL_DEBUG_QUERY) + return; + ++ /* the kerntypes may not match in terms of gcc version or SMP */ ++ if (LKCD_KERNTYPES()) ++ return; ++ + if (!strlen(kt->utsname.version)) + return; + +@@ -740,7 +873,7 @@ + { + int c; + int do_load_module_filter, do_machdep_filter, reverse; +- int unfiltered, user_mode, count_entered; ++ int unfiltered, user_mode, count_entered, bug_bytes_entered; + ulong curaddr; + ulong revtarget; + ulong count; +@@ -754,7 +887,16 @@ + char buf4[BUFSIZE]; + char buf5[BUFSIZE]; + +- reverse = count_entered = FALSE; ++ if ((argcnt == 2) && STREQ(args[1], "-b")) { ++ fprintf(fp, "encoded bytes being skipped after ud2a: "); ++ if (kt->BUG_bytes < 0) ++ fprintf(fp, "undetermined\n"); ++ else ++ fprintf(fp, "%d\n", kt->BUG_bytes); ++ return; ++ } ++ ++ reverse = count_entered = bug_bytes_entered = FALSE; + sp = NULL; + unfiltered = user_mode = do_machdep_filter = do_load_module_filter = 0; + +@@ -763,7 +905,7 @@ + req->flags |= GNU_FROM_TTY_OFF|GNU_RETURN_ON_ERROR; + req->count = 1; + +- while ((c = getopt(argcnt, args, "ulrx")) != EOF) { ++ while ((c = getopt(argcnt, args, "ulrxb:B:")) != EOF) { + switch(c) + { + case 'x': +@@ -786,6 +928,12 @@ + BZERO(buf4, BUFSIZE); + break; + ++ case 'B': ++ case 'b': ++ kt->BUG_bytes = atoi(optarg); ++ bug_bytes_entered = TRUE; ++ break; ++ + default: + argerrs++; + break; +@@ -846,7 +994,7 @@ + if (user_mode) { + sprintf(buf1, "x/%ldi 0x%lx", + req->count ? req->count : 1, req->addr); +- pc->cmdgenspec = pc->cmdgencur; ++ pc->curcmd_flags |= MEMTYPE_UVADDR; + gdb_pass_through(buf1, NULL, 0); + return; + } +@@ -962,7 +1110,9 @@ + close_tmpfile(); + } + } +- else cmd_usage(pc->curcmd, SYNOPSIS); ++ else if (bug_bytes_entered) ++ return; ++ else cmd_usage(pc->curcmd, SYNOPSIS); + + if (!reverse) { + FREEBUF(req->buf); +@@ -1053,6 +1203,185 @@ + FREEBUF(req); + } + ++/* ++ * x86 and x86_64 kernels may have file/line-number encoding ++ * asm()'d in just after the "ud2a" instruction, which confuses ++ * the disassembler and the x86 backtracer. Determine the ++ * number of bytes to skip. ++ */ ++static void ++BUG_bytes_init(void) ++{ ++ if (machine_type("X86")) ++ kt->BUG_bytes = BUG_x86(); ++ else if (machine_type("X86_64")) ++ kt->BUG_bytes = BUG_x86_64(); ++} ++ ++static int ++BUG_x86(void) ++{ ++ struct syment *sp, *spn; ++ char buf1[BUFSIZE]; ++ char buf2[BUFSIZE]; ++ char *arglist[MAXARGS]; ++ ulong vaddr, fileptr; ++ int found; ++ ++ /* ++ * Prior to 2.4.19, a call to do_BUG() preceded ++ * the standalone ud2a instruction. ++ */ ++ if (THIS_KERNEL_VERSION < LINUX(2,4,19)) ++ return 0; ++ ++ /* ++ * 2.6.20 introduced __bug_table support for i386, ++ * but even if CONFIG_DEBUG_BUGVERBOSE is not configured, ++ * the ud2a stands alone. ++ */ ++ if (THIS_KERNEL_VERSION >= LINUX(2,6,20)) ++ return 0; ++ ++ /* ++ * For previous kernel versions, it may depend upon ++ * whether CONFIG_DEBUG_BUGVERBOSE was configured: ++ * ++ * #ifdef CONFIG_DEBUG_BUGVERBOSE ++ * #define BUG() \ ++ * __asm__ __volatile__( "ud2\n" \ ++ * "\t.word %c0\n" \ ++ * "\t.long %c1\n" \ ++ * : : "i" (__LINE__), "i" (__FILE__)) ++ * #else ++ * #define BUG() __asm__ __volatile__("ud2\n") ++ * #endif ++ * ++ * But that's not necessarily true, since there are ++ * pre-2.6.11 versions that force it like so: ++ * ++ * #if 1 /- Set to zero for a slightly smaller kernel -/ ++ * #define BUG() \ ++ * __asm__ __volatile__( "ud2\n" \ ++ * "\t.word %c0\n" \ ++ * "\t.long %c1\n" \ ++ * : : "i" (__LINE__), "i" (__FILE__)) ++ * #else ++ * #define BUG() __asm__ __volatile__("ud2\n") ++ * #endif ++ */ ++ ++ /* ++ * This works if in-kernel config data is available. ++ */ ++ if ((THIS_KERNEL_VERSION >= LINUX(2,6,11)) && ++ (kt->flags & BUGVERBOSE_OFF)) ++ return 0; ++ ++ /* ++ * At this point, it's a pretty safe bet that it's configured, ++ * but to be sure, disassemble a known BUG() caller and ++ * verify that the encoding is there. ++ */ ++ ++#define X86_BUG_BYTES (6) /* sizeof(short) + sizeof(pointer) */ ++ ++ if (!(sp = symbol_search("do_exit")) || ++ !(spn = next_symbol(NULL, sp))) ++ return X86_BUG_BYTES; ++ ++ sprintf(buf1, "x/%ldi 0x%lx", spn->value - sp->value, sp->value); ++ ++ found = FALSE; ++ open_tmpfile(); ++ gdb_pass_through(buf1, pc->tmpfile, GNU_RETURN_ON_ERROR); ++ rewind(pc->tmpfile); ++ while (fgets(buf2, BUFSIZE, pc->tmpfile)) { ++ if (parse_line(buf2, arglist) < 3) ++ continue; ++ ++ if ((vaddr = htol(arglist[0], RETURN_ON_ERROR, NULL)) >= spn->value) ++ continue; ++ ++ if (STREQ(arglist[2], "ud2a")) { ++ found = TRUE; ++ break; ++ } ++ } ++ close_tmpfile(); ++ ++ if (!found || !readmem(vaddr+4, KVADDR, &fileptr, sizeof(ulong), ++ "BUG filename pointer", RETURN_ON_ERROR|QUIET)) ++ return X86_BUG_BYTES; ++ ++ if (!IS_KVADDR(fileptr)) { ++ if (CRASHDEBUG(1)) ++ fprintf(fp, ++ "no filename pointer: kt->BUG_bytes: 0\n"); ++ return 0; ++ } ++ ++ if (!read_string(fileptr, buf1, BUFSIZE-1)) ++ error(WARNING, ++ "cannot read BUG (ud2a) encoded filename address: %lx\n", ++ fileptr); ++ else if (CRASHDEBUG(1)) ++ fprintf(fp, "BUG bytes filename encoding: [%s]\n", buf1); ++ ++ return X86_BUG_BYTES; ++} ++ ++static int ++BUG_x86_64(void) ++{ ++ /* ++ * 2.6.20 introduced __bug_table support for x86_64, ++ * but even if CONFIG_DEBUG_BUGVERBOSE is not configured, ++ * the ud2a stands alone. ++ */ ++ if (THIS_KERNEL_VERSION >= LINUX(2,6,20)) ++ return 0; ++ ++ /* ++ * The original bug_frame structure looks like this, which ++ * causes the disassembler to go off into the weeds: ++ * ++ * struct bug_frame { ++ * unsigned char ud2[2]; ++ * char *filename; ++ * unsigned short line; ++ * } ++ * ++ * In 2.6.13, fake push and ret instructions were encoded ++ * into the frame so that the disassembly would at least ++ * "work", although the two fake instructions show nonsensical ++ * arguments: ++ * ++ * struct bug_frame { ++ * unsigned char ud2[2]; ++ * unsigned char push; ++ * signed int filename; ++ * unsigned char ret; ++ * unsigned short line; ++ * } ++ */ ++ ++ if (STRUCT_EXISTS("bug_frame")) ++ return (int)(STRUCT_SIZE("bug_frame") - 2); ++ ++ return 0; ++} ++ ++ ++/* ++ * Callback from gdb disassembly code. ++ */ ++int ++kernel_BUG_encoding_bytes(void) ++{ ++ return kt->BUG_bytes; ++} ++ + #ifdef NOT_USED + /* + * To avoid premature stoppage/extension of a dis that includes +@@ -1094,7 +1423,8 @@ + } + + #define FRAMESIZE_DEBUG_MESSAGE \ +-"usage: bt -F [size|clear|dump|seek|noseek|validate|novalidate] [-I eip]\n If eip: set its associated framesize to size.\n \"validate/novalidate\" will turn on/off V bit for this eip entry.\n If !eip: \"clear\" will clear the framesize cache and RA seek/noseek flags.\n \"dump\" will dump the current framesize cache entries.\n \"seek/noseek\" turns on/off RA seeking.\n \"validate/novalidate\" turns on/off V bit for all current entries.\n" ++"\nx86 usage: bt -F [size|clear|dump|seek|noseek|validate|novalidate] [-I eip]\n If eip: set its associated framesize to size.\n \"validate/novalidate\" will turn on/off V bit for this eip entry.\n If !eip: \"clear\" will clear the framesize cache and RA seek/noseek flags.\n \"dump\" will dump the current framesize cache entries.\n \"seek/noseek\" turns on/off RA seeking.\n \"validate/novalidate\" turns on/off V bit for all current entries.\n\nx86_64 usage: bt -F [clear|dump|validate] [-I rip]\n If rip: \"validate\" will verbosely recalculate the framesize.\n If !rip: \"clear\" will clear the framesize cache.\n \"dump\" will dump the current framesize cache entries.\n" ++ + + /* + * Display a kernel stack backtrace. Arguments may be any number pid or task +@@ -1108,18 +1438,25 @@ + * -s displays arguments symbolically. + */ + ++void ++clone_bt_info(struct bt_info *orig, struct bt_info *new, ++ struct task_context *tc) ++{ ++ BCOPY(orig, new, sizeof(*new)); ++ new->stackbuf = NULL; ++ new->tc = tc; ++ new->task = tc->task; ++ new->stackbase = GET_STACKBASE(tc->task); ++ new->stacktop = GET_STACKTOP(tc->task); ++} ++ + #define BT_SETUP(TC) \ +- BCOPY(&bt_setup, bt, sizeof(struct bt_info)); \ ++ clone_bt_info(&bt_setup, bt, (TC)); \ + if (refptr) { \ + BZERO(&reference, sizeof(struct reference)); \ + bt->ref = &reference; \ + bt->ref->str = refptr; \ +- } \ +- bt->tc = (TC); \ +- bt->task = ((TC)->task); \ +- bt->stackbase = GET_STACKBASE((TC)->task); \ +- bt->stacktop = GET_STACKTOP((TC)->task); \ +- bt->stackbuf = NULL; ++ } + + void + cmd_bt(void) +@@ -1140,8 +1477,11 @@ + bt = &bt_info; + BZERO(bt, sizeof(struct bt_info)); + +- while ((c = getopt(argcnt, args, "fF:I:S:aloreEgstd:R:")) != EOF) { +- switch(c) ++ if (kt->flags & USE_OLD_BT) ++ bt->flags |= BT_OLD_BACK_TRACE; ++ ++ while ((c = getopt(argcnt, args, "fF:I:S:aloreEgstTd:R:O")) != EOF) { ++ switch (c) + { + case 'f': + bt->flags |= BT_FULL; +@@ -1151,6 +1491,28 @@ + bt->flags |= BT_OLD_BACK_TRACE; + break; + ++ case 'O': ++ if (!(machine_type("X86") || machine_type("X86_64"))) ++ option_not_supported(c); ++ else if (kt->flags & USE_OLD_BT) { ++ /* ++ * Make this setting idempotent across the use of ++ * $HOME/.crashrc, ./.crashrc, and "-i input" files. ++ * If we've been here before during initialization, ++ * leave it alone. ++ */ ++ if (pc->flags & INIT_IFILE) { ++ error(INFO, "use old bt method by default (already set)\n"); ++ return; ++ } ++ kt->flags &= ~USE_OLD_BT; ++ error(INFO, "use new bt method by default\n"); ++ } else { ++ kt->flags |= USE_OLD_BT; ++ error(INFO, "use old bt method by default\n"); ++ } ++ return; ++ + case 'R': + if (refptr) + error(INFO, "only one -R option allowed\n"); +@@ -1217,6 +1579,9 @@ + } else if (*optarg == '-') { + hook.esp = dtol(optarg+1, FAULT_ON_ERROR, NULL); + hook.esp = (ulong)(0 - (long)hook.esp); ++ } else if (STREQ(optarg, "dwarf") || STREQ(optarg, "cfi")) { ++ if (!(kt->flags & DWARF_UNWIND_CAPABLE)) ++ return; + } else + hook.esp = dtol(optarg, FAULT_ON_ERROR, NULL); + break; +@@ -1241,6 +1606,8 @@ + bt->flags |= BT_SYMBOLIC_ARGS; + break; + ++ case 'T': ++ bt->flags |= BT_TEXT_SYMBOLS_ALL; + case 't': + bt->flags |= BT_TEXT_SYMBOLS; + break; +@@ -1255,6 +1622,11 @@ + } + } + ++ if (XEN_HYPER_MODE()) { ++ if (bt->flags & BT_EFRAME_SEARCH) ++ argerrs++; ++ } ++ + if (argerrs) + cmd_usage(pc->curcmd, SYNOPSIS); + +@@ -1286,6 +1658,35 @@ + return; + } + ++ if (XEN_HYPER_MODE()) { ++#ifdef XEN_HYPERVISOR_ARCH ++ /* "task" means vcpu for xen hypervisor */ ++ if (active) { ++ for (c = 0; c < XEN_HYPER_MAX_CPUS(); c++) { ++ if (!xen_hyper_test_pcpu_id(c)) ++ continue; ++ fake_tc.task = xen_hyper_pcpu_to_active_vcpu(c); ++ BT_SETUP(&fake_tc); ++ xen_hyper_print_bt_header(fp, fake_tc.task, subsequent++); ++ back_trace(bt); ++ } ++ } else { ++ if (args[optind]) { ++ fake_tc.task = xen_hyper_pcpu_to_active_vcpu( ++ convert(args[optind], 0, NULL, NUM_DEC | NUM_HEX)); ++ } else { ++ fake_tc.task = XEN_HYPER_VCPU_LAST_CONTEXT()->vcpu; ++ } ++ BT_SETUP(&fake_tc); ++ xen_hyper_print_bt_header(fp, fake_tc.task, 0); ++ back_trace(bt); ++ } ++ return; ++#else ++ error(FATAL, XEN_HYPERVISOR_NOT_SUPPORTED); ++#endif ++ } ++ + if (active) { + if (ACTIVE()) + error(FATAL, +@@ -1350,9 +1751,10 @@ + char buf[BUFSIZE]; + + if (bt->flags & BT_TEXT_SYMBOLS) { +- fprintf(fp, "%sSTART: %s at %lx\n", +- space(VADDR_PRLEN > 8 ? 14 : 6), +- closest_symbol(eip), eip); ++ if (!(bt->flags & BT_TEXT_SYMBOLS_ALL)) ++ fprintf(fp, "%sSTART: %s at %lx\n", ++ space(VADDR_PRLEN > 8 ? 14 : 6), ++ closest_symbol(eip), eip); + } + + if (bt->hp) +@@ -1435,6 +1837,9 @@ + i < LONGS_PER_STACK; i++, up++) { + if (is_kernel_text(*up)) + fprintf(fp, "%lx: %s\n", ++ tt->flags & THREAD_INFO ? ++ bt->tc->thread_info + ++ (i * sizeof(long)) : + bt->task + (i * sizeof(long)), + value_to_symstr(*up, buf, 0)); + } +@@ -1461,20 +1866,26 @@ + if (bt->hp) { + if (bt->hp->esp && !INSTACK(bt->hp->esp, bt)) + error(INFO, +- "invalid stack address for this task: %lx\n", +- bt->hp->esp); ++ "invalid stack address for this task: %lx\n (valid range: %lx - %lx)\n", ++ bt->hp->esp, bt->stackbase, bt->stacktop); + eip = bt->hp->eip; + esp = bt->hp->esp; + + machdep->get_stack_frame(bt, eip ? NULL : &eip, + esp ? NULL : &esp); + +- } else if (NETDUMP_DUMPFILE()) ++ } else if (XEN_HYPER_MODE()) ++ machdep->get_stack_frame(bt, &eip, &esp); ++ else if (NETDUMP_DUMPFILE()) + get_netdump_regs(bt, &eip, &esp); ++ else if (KDUMP_DUMPFILE()) ++ get_kdump_regs(bt, &eip, &esp); + else if (DISKDUMP_DUMPFILE()) + get_diskdump_regs(bt, &eip, &esp); + else if (LKCD_DUMPFILE()) + get_lkcd_regs(bt, &eip, &esp); ++ else if (XENDUMP_DUMPFILE()) ++ get_xendump_regs(bt, &eip, &esp); + else + machdep->get_stack_frame(bt, &eip, &esp); + +@@ -1486,6 +1897,13 @@ + if (bt->flags & + (BT_TEXT_SYMBOLS|BT_TEXT_SYMBOLS_PRINT|BT_TEXT_SYMBOLS_NOPRINT)) { + ++ if (bt->flags & BT_TEXT_SYMBOLS_ALL) { ++ esp = bt->stackbase + ++ ((tt->flags & THREAD_INFO) ? ++ SIZE(thread_info) : SIZE(task_struct)); ++ eip = 0; ++ } ++ + if (machdep->flags & MACHDEP_BT_TEXT) { + bt->instptr = eip; + bt->stkptr = esp; +@@ -1666,6 +2084,7 @@ + fprintf(fp, " flags: %llx\n", bt->flags); + fprintf(fp, " instptr: %lx\n", bt->instptr); + fprintf(fp, " stkptr: %lx\n", bt->stkptr); ++ fprintf(fp, " bptr: %lx\n", bt->bptr); + fprintf(fp, " stackbase: %lx\n", bt->stackbase); + fprintf(fp, " stacktop: %lx\n", bt->stacktop); + fprintf(fp, " tc: %lx ", (ulong)bt->tc); +@@ -1700,6 +2119,11 @@ + return; + } + ++ /* try to get it from the header */ ++ if (get_lkcd_regs_for_cpu(bt, eip, esp) == 0) ++ return; ++ ++ /* if that fails: do guessing */ + sysrq_eip = sysrq_esp = 0; + + for (i = 0, up = (ulong *)bt->stackbuf; i < LONGS_PER_STACK; i++, up++){ +@@ -1721,6 +2145,25 @@ + *esp = *(up-1); + return; + } ++ /* Egenera */ ++ if (STREQ(sym, "netdump_ipi")) { ++ *eip = *up; ++ *esp = bt->task + ++ ((char *)(up-1) - bt->stackbuf); ++ return; ++ } ++ if (STREQ(sym, "dump_execute")) { ++ *eip = *up; ++ *esp = bt->stackbase + ++ ((char *)(up) - bt->stackbuf); ++ return; ++ } ++ if (STREQ(sym, "vmdump_nmi_callback")) { ++ *eip = *up; ++ *esp = bt->stackbase + ++ ((char *)(up) - bt->stackbuf); ++ return; ++ } + if (STREQ(sym, "smp_stop_cpu_interrupt")) { + *eip = *up; + *esp = bt->task + +@@ -1837,8 +2280,8 @@ + return; + } + +- if (IS_VMALLOC_ADDR(list.next) && +- IS_VMALLOC_ADDR(list.prev)) { ++ if (IS_VMALLOC_ADDR((ulong)list.next) && ++ IS_VMALLOC_ADDR((ulong)list.prev)) { + kt->kernel_module = sp->value; + kt->module_list = (ulong)list.next; + modules_found = TRUE; +@@ -1873,14 +2316,17 @@ + kallsymsbuf = kt->flags & KALLSYMS_V1 ? + GETBUF(SIZE(kallsyms_header)) : NULL; + ++ please_wait("gathering module symbol data"); ++ + for (mod = kt->module_list; mod != kt->kernel_module; mod = mod_next) { +- if (CRASHDEBUG(7)) ++ if (CRASHDEBUG(3)) + fprintf(fp, "module: %lx\n", mod); + + if (!readmem(mod, KVADDR, modbuf, SIZE(module), + "module struct", RETURN_ON_ERROR|QUIET)) { + error(WARNING, +- "cannot access vmalloc'd module memory\n\n"); ++ "%scannot access vmalloc'd module memory\n\n", ++ DUMPFILE() ? "\n" : ""); + kt->mods_installed = 0; + kt->flags |= NO_MODULE_ACCESS; + FREEBUF(modbuf); +@@ -1914,7 +2360,8 @@ + kallsymsbuf, SIZE(kallsyms_header), + "kallsyms_header", RETURN_ON_ERROR|QUIET)) { + error(WARNING, +- "cannot access module kallsyms_header\n"); ++ "%scannot access module kallsyms_header\n", ++ DUMPFILE() ? "\n" : ""); + } else { + nsyms = UINT(kallsymsbuf + + OFFSET(kallsyms_header_symbols)); +@@ -1947,6 +2394,8 @@ + store_module_symbols_v2(total, kt->mods_installed); + break; + } ++ ++ please_wait_done(); + } + + +@@ -2112,7 +2561,7 @@ + address = 0; + flag = LIST_MODULE_HDR; + +- while ((c = getopt(argcnt, args, "rd:Ds:St:")) != EOF) { ++ while ((c = getopt(argcnt, args, "rd:Ds:St:o")) != EOF) { + switch(c) + { + case 'r': +@@ -2145,6 +2594,19 @@ + cmd_usage(pc->curcmd, SYNOPSIS); + break; + ++ /* ++ * Revert to using old-style add-symbol-file command ++ * for KMOD_V2 kernels. ++ */ ++ case 'o': ++ if (flag) ++ cmd_usage(pc->curcmd, SYNOPSIS); ++ if (kt->flags & KMOD_V1) ++ error(INFO, ++ "-o option is not applicable to this kernel version\n"); ++ st->flags |= USE_OLD_ADD_SYM; ++ return; ++ + case 't': + if (is_directory(optarg)) + tree = optarg; +@@ -2459,7 +2921,7 @@ + + + static char * +-find_module_objfile(char *modref, char *filename, char *tree) ++module_objfile_search(char *modref, char *filename, char *tree) + { + char buf[BUFSIZE]; + char file[BUFSIZE]; +@@ -2477,16 +2939,20 @@ + strcpy(file, filename); + #ifdef MODULES_IN_CWD + else { +- sprintf(file, "%s.o", modref); +- if (access(file, R_OK) == 0) { +- retbuf = GETBUF(strlen(file)+1); +- strcpy(retbuf, file); +- if (CRASHDEBUG(1)) +- fprintf(fp, +- "find_module_objfile: [%s] file in cwd\n", +- retbuf); +- return retbuf; +- } ++ char *fileext[] = { "ko", "o"}; ++ int i; ++ for (i = 0; i < 2; i++) { ++ sprintf(file, "%s.%s", modref, fileext[i]); ++ if (access(file, R_OK) == 0) { ++ retbuf = GETBUF(strlen(file)+1); ++ strcpy(retbuf, file); ++ if (CRASHDEBUG(1)) ++ fprintf(fp, ++ "find_module_objfile: [%s] file in cwd\n", ++ retbuf); ++ return retbuf; ++ } ++ } + } + #else + else +@@ -2505,6 +2971,8 @@ + if ((st->flags & INSMOD_BUILTIN) && !filename) { + sprintf(buf, "__insmod_%s_O/", modref); + if (symbol_query(buf, NULL, &sp) == 1) { ++ if (CRASHDEBUG(1)) ++ fprintf(fp, "search: INSMOD_BUILTIN %s\n", sp->name); + BZERO(buf, BUFSIZE); + p1 = strstr(sp->name, "/"); + if ((p2 = strstr(sp->name, file))) +@@ -2578,6 +3046,18 @@ + retbuf = search_directory_tree(dir, file); + + if (!retbuf) { ++ sprintf(dir, "/lib/modules/%s/updates", kt->utsname.release); ++ if (!(retbuf = search_directory_tree(dir, file))) { ++ switch (kt->flags & (KMOD_V1|KMOD_V2)) ++ { ++ case KMOD_V2: ++ sprintf(file, "%s.ko", modref); ++ retbuf = search_directory_tree(dir, file); ++ } ++ } ++ } ++ ++ if (!retbuf) { + sprintf(dir, "/lib/modules/%s", kt->utsname.release); + if (!(retbuf = search_directory_tree(dir, file))) { + switch (kt->flags & (KMOD_V1|KMOD_V2)) +@@ -2592,6 +3072,32 @@ + return retbuf; + } + ++/* ++ * First look for a module based upon its reference name. ++ * If that fails, try replacing any underscores in the ++ * reference name with a dash. ++ * ++ * Example: module name "dm_mod" comes from "dm-mod.ko" objfile ++ */ ++static char * ++find_module_objfile(char *modref, char *filename, char *tree) ++{ ++ char * retbuf; ++ char tmpref[BUFSIZE]; ++ int c; ++ ++ retbuf = module_objfile_search(modref, filename, tree); ++ ++ if (!retbuf) { ++ strncpy(tmpref, modref, BUFSIZE); ++ for (c = 0; c < BUFSIZE && tmpref[c]; c++) ++ if (tmpref[c] == '_') ++ tmpref[c] = '-'; ++ retbuf = module_objfile_search(tmpref, filename, tree); ++ } ++ ++ return retbuf; ++} + + /* + * Unlink any temporary remote module object files. +@@ -2651,7 +3157,7 @@ + dump_log(int msg_level) + { + int i; +- ulong log_buf, log_start, logged_chars; ++ ulong log_buf, logged_chars; + char *buf; + char last; + ulong index; +@@ -2678,13 +3184,16 @@ + + buf = GETBUF(log_buf_len); + log_wrap = FALSE; +- get_symbol_data("log_start", sizeof(ulong), &log_start); + get_symbol_data("logged_chars", sizeof(ulong), &logged_chars); + readmem(log_buf, KVADDR, buf, + log_buf_len, "log_buf contents", FAULT_ON_ERROR); + +- log_start &= log_buf_len-1; +- index = (logged_chars < log_buf_len) ? 0 : log_start; ++ if (logged_chars < log_buf_len) { ++ index = 0; ++ } else { ++ get_symbol_data("log_end", sizeof(ulong), &index); ++ index &= log_buf_len-1; ++ } + + if ((logged_chars < log_buf_len) && (index == 0) && (buf[index] == '<')) + loglevel = TRUE; +@@ -2787,6 +3296,8 @@ + do { + if (sflag) + dump_sys_call_table(args[optind], cnt++); ++ else if (STREQ(args[optind], "config")) ++ read_in_kernel_config(IKCFG_READ); + else + cmd_usage(args[optind], COMPLETE_HELP); + optind++; +@@ -2867,6 +3378,9 @@ + if (NETDUMP_DUMPFILE() && is_partial_netdump()) + fprintf(fp, " [PARTIAL DUMP]"); + ++ if (DISKDUMP_DUMPFILE() && is_partial_diskdump()) ++ fprintf(fp, " [PARTIAL DUMP]"); ++ + fprintf(fp, "\n"); + } + +@@ -2876,7 +3390,7 @@ + get_symbol_data("xtime", sizeof(struct timespec), &kt->date); + fprintf(fp, " DATE: %s\n", + strip_linefeeds(ctime(&kt->date.tv_sec))); +- fprintf(fp, " UPTIME: %s\n", get_uptime(buf)); ++ fprintf(fp, " UPTIME: %s\n", get_uptime(buf, NULL)); + fprintf(fp, "LOAD AVERAGE: %s\n", get_loadavg(buf)); + fprintf(fp, " TASKS: %ld\n", RUNNING_TASKS()); + fprintf(fp, " NODENAME: %s\n", uts->nodename); +@@ -2891,10 +3405,17 @@ + #ifdef WHO_CARES + fprintf(fp, " DOMAINNAME: %s\n", uts->domainname); + #endif ++ if (XENDUMP_DUMPFILE() && (kt->xen_flags & XEN_SUSPEND)) ++ return; ++ + if (DUMPFILE()) { + fprintf(fp, " PANIC: "); + if (machdep->flags & HWRESET) +- fprintf(fp, "HARDWARE RESET\n"); ++ fprintf(fp, "(HARDWARE RESET)\n"); ++ else if (machdep->flags & INIT) ++ fprintf(fp, "(INIT)\n"); ++ else if (machdep->flags & MCA) ++ fprintf(fp, "(MCA)\n"); + else { + strip_linefeeds(get_panicmsg(buf)); + fprintf(fp, "\"%s\"%s\n", buf, +@@ -2952,28 +3473,42 @@ + /* + * Calculate and return the uptime. + */ +- +-static char * +-get_uptime(char *buf) ++char * ++get_uptime(char *buf, ulonglong *j64p) + { +- ulong jiffies; +- +- get_symbol_data("jiffies", sizeof(long), &jiffies); ++ ulong jiffies, tmp1, tmp2; ++ ulonglong jiffies_64, wrapped; + +- if ((machine_type("S390") || machine_type("S390X")) && +- (THIS_KERNEL_VERSION >= LINUX(2,6,0))) +- jiffies -= ((unsigned long)(unsigned int)(-300*machdep->hz)); +- else if (symbol_exists("jiffies_64") && BITS64() && +- (((ulonglong)jiffies & 0xffffffff00000000ULL) == +- 0x100000000ULL)) +- jiffies &= 0xffffffff; +- +- convert_time((ulonglong)jiffies, buf); ++ if (symbol_exists("jiffies_64")) { ++ get_symbol_data("jiffies_64", sizeof(ulonglong), &jiffies_64); ++ if (THIS_KERNEL_VERSION >= LINUX(2,6,0)) { ++ wrapped = (jiffies_64 & 0xffffffff00000000ULL); ++ if (wrapped) { ++ wrapped -= 0x100000000ULL; ++ jiffies_64 &= 0x00000000ffffffffULL; ++ jiffies_64 |= wrapped; ++ jiffies_64 += (ulonglong)(300*machdep->hz); ++ } else { ++ tmp1 = (ulong)(uint)(-300*machdep->hz); ++ tmp2 = (ulong)jiffies_64; ++ jiffies_64 = (ulonglong)(tmp2 - tmp1); ++ } ++ } ++ if (buf) ++ convert_time(jiffies_64, buf); ++ if (j64p) ++ *j64p = jiffies_64; ++ } else { ++ get_symbol_data("jiffies", sizeof(long), &jiffies); ++ if (buf) ++ convert_time((ulonglong)jiffies, buf); ++ if (j64p) ++ *j64p = (ulonglong)jiffies; ++ } + + return buf; + } + +- + #define FSHIFT 11 /* nr of bits of precision */ + #define FIXED_1 (1<> FSHIFT) +@@ -3048,9 +3583,9 @@ + struct syment *sp, *spn; + long size; + #ifdef S390X +- unsigned int *sct, *sys_call_table, addr; ++ unsigned int *sct, *sys_call_table, sys_ni_syscall, addr; + #else +- ulong *sys_call_table, *sct, addr; ++ ulong *sys_call_table, *sct, sys_ni_syscall, addr; + #endif + if (GDB_PATCHED()) + error(INFO, "line numbers are not available\n"); +@@ -3068,6 +3603,8 @@ + readmem(symbol_value("sys_call_table"), KVADDR, sys_call_table, + size, "sys_call_table", FAULT_ON_ERROR); + ++ sys_ni_syscall = symbol_value("sys_ni_syscall"); ++ + if (spec) + open_tmpfile(); + +@@ -3080,13 +3617,17 @@ + "%3x " : "%3d ", i); + fprintf(fp, + "invalid sys_call_table entry: %lx (%s)\n", +- *sct, value_to_symstr(*sct, buf1, 0)); ++ (unsigned long)*sct, ++ value_to_symstr(*sct, buf1, 0)); + } + continue; + } + + fprintf(fp, (output_radix == 16) ? "%3x " : "%3d ", i); +- fprintf(fp, "%-26s ", scp); ++ if (sys_ni_syscall && *sct == sys_ni_syscall) ++ fprintf(fp, "%-26s ", "sys_ni_syscall"); ++ else ++ fprintf(fp, "%-26s ", scp); + + /* + * For system call symbols whose first instruction is +@@ -3181,16 +3722,16 @@ + * "help -k" output + */ + void +-dump_kernel_table(void) ++dump_kernel_table(int verbose) + { +- int i; ++ int i, nr_cpus; + struct new_utsname *uts; + int others; + + others = 0; + uts = &kt->utsname; + +- fprintf(fp, " flags: %lx (", kt->flags); ++ fprintf(fp, " flags: %lx\n (", kt->flags); + if (kt->flags & NO_MODULE_ACCESS) + fprintf(fp, "%sNO_MODULE_ACCESS", others++ ? "|" : ""); + if (kt->flags & TVEC_BASES_V1) +@@ -3225,6 +3766,30 @@ + fprintf(fp, "%sKMOD_V2", others++ ? "|" : ""); + if (kt->flags & KALLSYMS_V2) + fprintf(fp, "%sKALLSYMS_V2", others++ ? "|" : ""); ++ if (kt->flags & USE_OLD_BT) ++ fprintf(fp, "%sUSE_OLD_BT", others++ ? "|" : ""); ++ if (kt->flags & ARCH_XEN) ++ fprintf(fp, "%sARCH_XEN", others++ ? "|" : ""); ++ if (kt->flags & ARCH_OPENVZ) ++ fprintf(fp, "%sARCH_OPENVZ", others++ ? "|" : ""); ++ if (kt->flags & NO_IKCONFIG) ++ fprintf(fp, "%sNO_IKCONFIG", others++ ? "|" : ""); ++ if (kt->flags & DWARF_UNWIND) ++ fprintf(fp, "%sDWARF_UNWIND", others++ ? "|" : ""); ++ if (kt->flags & NO_DWARF_UNWIND) ++ fprintf(fp, "%sNO_DWARF_UNWIND", others++ ? "|" : ""); ++ if (kt->flags & DWARF_UNWIND_MEMORY) ++ fprintf(fp, "%sDWARF_UNWIND_MEMORY", others++ ? "|" : ""); ++ if (kt->flags & DWARF_UNWIND_EH_FRAME) ++ fprintf(fp, "%sDWARF_UNWIND_EH_FRAME", others++ ? "|" : ""); ++ if (kt->flags & DWARF_UNWIND_MODULES) ++ fprintf(fp, "%sDWARF_UNWIND_MODULES", others++ ? "|" : ""); ++ if (kt->flags & BUGVERBOSE_OFF) ++ fprintf(fp, "%sBUGVERBOSE_OFF", others++ ? "|" : ""); ++ if (kt->flags & RELOC_SET) ++ fprintf(fp, "%sRELOC_SET", others++ ? "|" : ""); ++ if (kt->flags & RELOC_FORCE) ++ fprintf(fp, "%sRELOC_FORCE", others++ ? "|" : ""); + fprintf(fp, ")\n"); + fprintf(fp, " stext: %lx\n", kt->stext); + fprintf(fp, " etext: %lx\n", kt->etext); +@@ -3234,8 +3799,10 @@ + fprintf(fp, " init_end: %lx\n", kt->init_end); + fprintf(fp, " end: %lx\n", kt->end); + fprintf(fp, " cpus: %d\n", kt->cpus); ++ fprintf(fp, " cpus_override: %s\n", kt->cpus_override); + fprintf(fp, " NR_CPUS: %d (compiled-in to this version of %s)\n", + NR_CPUS, pc->program_name); ++ fprintf(fp, "kernel_NR_CPUS: %d\n", kt->kernel_NR_CPUS); + if (kt->display_bh == display_bh_1) + fprintf(fp, " display_bh: display_bh_1()\n"); + else if (kt->display_bh == display_bh_2) +@@ -3263,21 +3830,61 @@ + kt->kernel_version[1], kt->kernel_version[2]); + fprintf(fp, " gcc_version: %d.%d.%d\n", kt->gcc_version[0], + kt->gcc_version[1], kt->gcc_version[2]); ++ fprintf(fp, " BUG_bytes: %d\n", kt->BUG_bytes); ++ fprintf(fp, " relocate: %lx\n", kt->relocate); + fprintf(fp, " runq_siblings: %d\n", kt->runq_siblings); + fprintf(fp, " __rq_idx[NR_CPUS]: "); +- for (i = 0; i < NR_CPUS; i++) ++ nr_cpus = kt->kernel_NR_CPUS ? kt->kernel_NR_CPUS : NR_CPUS; ++ for (i = 0; i < nr_cpus; i++) + fprintf(fp, "%ld ", kt->__rq_idx[i]); + fprintf(fp, "\n __cpu_idx[NR_CPUS]: "); +- for (i = 0; i < NR_CPUS; i++) ++ for (i = 0; i < nr_cpus; i++) + fprintf(fp, "%ld ", kt->__cpu_idx[i]); + fprintf(fp, "\n __per_cpu_offset[NR_CPUS]:"); +- for (i = 0; i < NR_CPUS; i++) ++ for (i = 0; i < nr_cpus; i++) + fprintf(fp, "%s%.*lx ", (i % 4) == 0 ? "\n " : "", + LONG_PRLEN, kt->__per_cpu_offset[i]); + fprintf(fp, "\n cpu_flags[NR_CPUS]:"); +- for (i = 0; i < NR_CPUS; i++) ++ for (i = 0; i < nr_cpus; i++) + fprintf(fp, "%lx ", kt->cpu_flags[i]); +- fprintf(fp, "\n"); ++ others = 0; ++ fprintf(fp, "\n xen_flags: %lx (", kt->xen_flags); ++ if (kt->xen_flags & WRITABLE_PAGE_TABLES) ++ fprintf(fp, "%sWRITABLE_PAGE_TABLES", others++ ? "|" : ""); ++ if (kt->xen_flags & SHADOW_PAGE_TABLES) ++ fprintf(fp, "%sSHADOW_PAGE_TABLES", others++ ? "|" : ""); ++ if (kt->xen_flags & CANONICAL_PAGE_TABLES) ++ fprintf(fp, "%sCANONICAL_PAGE_TABLES", others++ ? "|" : ""); ++ if (kt->xen_flags & XEN_SUSPEND) ++ fprintf(fp, "%sXEN_SUSPEND", others++ ? "|" : ""); ++ fprintf(fp, ")\n"); ++ fprintf(fp, " m2p_page: %lx\n", (ulong)kt->m2p_page); ++ fprintf(fp, "phys_to_machine_mapping: %lx\n", kt->phys_to_machine_mapping); ++ fprintf(fp, " p2m_table_size: %ld\n", kt->p2m_table_size); ++ fprintf(fp, " p2m_mapping_cache[%d]: %s\n", P2M_MAPPING_CACHE, ++ verbose ? "" : "(use \"help -K\" to view cache contents)"); ++ for (i = 0; verbose && (i < P2M_MAPPING_CACHE); i++) { ++ if (!kt->p2m_mapping_cache[i].mapping) ++ continue; ++ fprintf(fp, " [%d] mapping: %lx start: %lx end: %lx (%ld mfns)\n", ++ i, kt->p2m_mapping_cache[i].mapping, ++ kt->p2m_mapping_cache[i].start, ++ kt->p2m_mapping_cache[i].end, ++ kt->p2m_mapping_cache[i].end - kt->p2m_mapping_cache[i].start + 1); ++ } ++ fprintf(fp, " last_mapping_read: %lx\n", kt->last_mapping_read); ++ fprintf(fp, " p2m_cache_index: %ld\n", kt->p2m_cache_index); ++ fprintf(fp, " p2m_pages_searched: %ld\n", kt->p2m_pages_searched); ++ fprintf(fp, " p2m_mfn_cache_hits: %ld ", kt->p2m_mfn_cache_hits); ++ if (kt->p2m_pages_searched) ++ fprintf(fp, "(%ld%%)\n", kt->p2m_mfn_cache_hits * 100 / kt->p2m_pages_searched); ++ else ++ fprintf(fp, "\n"); ++ fprintf(fp, " p2m_page_cache_hits: %ld ", kt->p2m_page_cache_hits); ++ if (kt->p2m_pages_searched) ++ fprintf(fp, "(%ld%%)\n", kt->p2m_page_cache_hits * 100 / kt->p2m_pages_searched); ++ else ++ fprintf(fp, "\n"); + } + + /* +@@ -3314,7 +3921,7 @@ + if (machine_type("S390") || machine_type("S390X")) + command_not_supported(); + +- while ((c = getopt(argcnt, args, "db")) != EOF) { ++ while ((c = getopt(argcnt, args, "dbu")) != EOF) { + switch(c) + { + case 'd': +@@ -3344,6 +3951,17 @@ + kt->display_bh(); + return; + ++ case 'u': ++ pc->curcmd_flags |= IRQ_IN_USE; ++ if (kernel_symbol_exists("no_irq_chip")) ++ pc->curcmd_private = (ulonglong)symbol_value("no_irq_chip"); ++ else if (kernel_symbol_exists("no_irq_type")) ++ pc->curcmd_private = (ulonglong)symbol_value("no_irq_type"); ++ else ++ error(WARNING, ++ "irq: -u option ignored: \"no_irq_chip\" or \"no_irq_type\" symbols do not exist\n"); ++ break; ++ + default: + argerrs++; + break; +@@ -3362,6 +3980,8 @@ + return; + } + ++ pc->curcmd_flags &= ~IRQ_IN_USE; ++ + while (args[optind]) { + i = dtoi(args[optind], FAULT_ON_ERROR, NULL); + if (i >= nr_irqs) +@@ -3402,13 +4022,22 @@ + + readmem(irq_desc_addr + OFFSET(irq_desc_t_status), KVADDR, &status, + sizeof(int), "irq_desc entry", FAULT_ON_ERROR); +- readmem(irq_desc_addr + OFFSET(irq_desc_t_handler), KVADDR, &handler, +- sizeof(long), "irq_desc entry", FAULT_ON_ERROR); ++ if (VALID_MEMBER(irq_desc_t_handler)) ++ readmem(irq_desc_addr + OFFSET(irq_desc_t_handler), KVADDR, ++ &handler, sizeof(long), "irq_desc entry", ++ FAULT_ON_ERROR); ++ else if (VALID_MEMBER(irq_desc_t_chip)) ++ readmem(irq_desc_addr + OFFSET(irq_desc_t_chip), KVADDR, ++ &handler, sizeof(long), "irq_desc entry", ++ FAULT_ON_ERROR); + readmem(irq_desc_addr + OFFSET(irq_desc_t_action), KVADDR, &action, + sizeof(long), "irq_desc entry", FAULT_ON_ERROR); + readmem(irq_desc_addr + OFFSET(irq_desc_t_depth), KVADDR, &depth, + sizeof(int), "irq_desc entry", FAULT_ON_ERROR); + ++ if (!action && (handler == (ulong)pc->curcmd_private)) ++ return; ++ + fprintf(fp, " IRQ: %d\n", irq); + fprintf(fp, " STATUS: %x %s", status, status ? "(" : ""); + others = 0; +@@ -3441,19 +4070,30 @@ + } else + fprintf(fp, "%lx\n", handler); + +- if (handler) { +- readmem(handler+OFFSET(hw_interrupt_type_typename), KVADDR, +- &tmp1, sizeof(void *), +- "hw_interrupt_type typename", FAULT_ON_ERROR); ++ if (handler) { ++ if (VALID_MEMBER(hw_interrupt_type_typename)) ++ readmem(handler+OFFSET(hw_interrupt_type_typename), ++ KVADDR, &tmp1, sizeof(void *), ++ "hw_interrupt_type typename", FAULT_ON_ERROR); ++ else if (VALID_MEMBER(irq_chip_typename)) ++ readmem(handler+OFFSET(irq_chip_typename), ++ KVADDR, &tmp1, sizeof(void *), ++ "hw_interrupt_type typename", FAULT_ON_ERROR); ++ + fprintf(fp, " typename: %lx ", tmp1); + BZERO(buf, BUFSIZE); + if (read_string(tmp1, buf, BUFSIZE-1)) + fprintf(fp, "\"%s\"", buf); + fprintf(fp, "\n"); + +- readmem(handler+OFFSET(hw_interrupt_type_startup), KVADDR, +- &tmp1, sizeof(void *), +- "hw_interrupt_type startup", FAULT_ON_ERROR); ++ if (VALID_MEMBER(hw_interrupt_type_startup)) ++ readmem(handler+OFFSET(hw_interrupt_type_startup), ++ KVADDR, &tmp1, sizeof(void *), ++ "hw_interrupt_type startup", FAULT_ON_ERROR); ++ else if (VALID_MEMBER(irq_chip_startup)) ++ readmem(handler+OFFSET(irq_chip_startup), ++ KVADDR, &tmp1, sizeof(void *), ++ "hw_interrupt_type startup", FAULT_ON_ERROR); + fprintf(fp, " startup: %lx ", tmp1); + if (is_kernel_text(tmp1)) + fprintf(fp, "<%s>", value_to_symstr(tmp1, buf, 0)); +@@ -3464,9 +4104,15 @@ + value_to_symstr(tmp2, buf, 0)); + fprintf(fp, "\n"); + +- readmem(handler+OFFSET(hw_interrupt_type_shutdown), KVADDR, +- &tmp1, sizeof(void *), +- "hw_interrupt_type shutdown", FAULT_ON_ERROR); ++ if (VALID_MEMBER(hw_interrupt_type_shutdown)) ++ readmem(handler+OFFSET(hw_interrupt_type_shutdown), ++ KVADDR, &tmp1, sizeof(void *), ++ "hw_interrupt_type shutdown", FAULT_ON_ERROR); ++ else if (VALID_MEMBER(irq_chip_shutdown)) ++ readmem(handler+OFFSET(irq_chip_shutdown), ++ KVADDR, &tmp1, sizeof(void *), ++ "hw_interrupt_type shutdown", FAULT_ON_ERROR); ++ + fprintf(fp, " shutdown: %lx ", tmp1); + if (is_kernel_text(tmp1)) + fprintf(fp, "<%s>", value_to_symstr(tmp1, buf, 0)); +@@ -3494,9 +4140,14 @@ + fprintf(fp, "\n"); + } + +- readmem(handler+OFFSET(hw_interrupt_type_enable), KVADDR, +- &tmp1, sizeof(void *), +- "hw_interrupt_type enable", FAULT_ON_ERROR); ++ if (VALID_MEMBER(hw_interrupt_type_enable)) ++ readmem(handler+OFFSET(hw_interrupt_type_enable), ++ KVADDR, &tmp1, sizeof(void *), ++ "hw_interrupt_type enable", FAULT_ON_ERROR); ++ else if (VALID_MEMBER(irq_chip_enable)) ++ readmem(handler+OFFSET(irq_chip_enable), ++ KVADDR, &tmp1, sizeof(void *), ++ "hw_interrupt_type enable", FAULT_ON_ERROR); + fprintf(fp, " enable: %lx ", tmp1); + if (is_kernel_text(tmp1)) + fprintf(fp, "<%s>", value_to_symstr(tmp1, buf, 0)); +@@ -3507,9 +4158,14 @@ + value_to_symstr(tmp2, buf, 0)); + fprintf(fp, "\n"); + +- readmem(handler+OFFSET(hw_interrupt_type_disable), KVADDR, +- &tmp1, sizeof(void *), +- "hw_interrupt_type disable", FAULT_ON_ERROR); ++ if (VALID_MEMBER(hw_interrupt_type_disable)) ++ readmem(handler+OFFSET(hw_interrupt_type_disable), ++ KVADDR, &tmp1, sizeof(void *), ++ "hw_interrupt_type disable", FAULT_ON_ERROR); ++ else if (VALID_MEMBER(irq_chip_disable)) ++ readmem(handler+OFFSET(irq_chip_disable), ++ KVADDR, &tmp1, sizeof(void *), ++ "hw_interrupt_type disable", FAULT_ON_ERROR); + fprintf(fp, " disable: %lx ", tmp1); + if (is_kernel_text(tmp1)) + fprintf(fp, "<%s>", value_to_symstr(tmp1, buf, 0)); +@@ -3534,6 +4190,84 @@ + fprintf(fp, "<%s>", + value_to_symstr(tmp2, buf, 0)); + fprintf(fp, "\n"); ++ } else if (VALID_MEMBER(irq_chip_ack)) { ++ readmem(handler+OFFSET(irq_chip_ack), KVADDR, ++ &tmp1, sizeof(void *), ++ "irq_chip ack", FAULT_ON_ERROR); ++ fprintf(fp, " ack: %lx ", tmp1); ++ if (is_kernel_text(tmp1)) ++ fprintf(fp, "<%s>", ++ value_to_symstr(tmp1, buf, 0)); ++ else if (readmem(tmp1, KVADDR, &tmp2, ++ sizeof(ulong), "ack indirection", ++ RETURN_ON_ERROR|QUIET) && is_kernel_text(tmp2)) ++ fprintf(fp, "<%s>", ++ value_to_symstr(tmp2, buf, 0)); ++ fprintf(fp, "\n"); ++ } ++ ++ if (VALID_MEMBER(irq_chip_mask)) { ++ readmem(handler+OFFSET(irq_chip_mask), KVADDR, ++ &tmp1, sizeof(void *), ++ "irq_chip mask", FAULT_ON_ERROR); ++ fprintf(fp, " mask: %lx ", tmp1); ++ if (is_kernel_text(tmp1)) ++ fprintf(fp, "<%s>", ++ value_to_symstr(tmp1, buf, 0)); ++ else if (readmem(tmp1, KVADDR, &tmp2, ++ sizeof(ulong), "mask indirection", ++ RETURN_ON_ERROR|QUIET) && is_kernel_text(tmp2)) ++ fprintf(fp, "<%s>", ++ value_to_symstr(tmp2, buf, 0)); ++ fprintf(fp, "\n"); ++ } ++ ++ if (VALID_MEMBER(irq_chip_mask_ack)) { ++ readmem(handler+OFFSET(irq_chip_mask_ack), KVADDR, ++ &tmp1, sizeof(void *), ++ "irq_chip mask_ack", FAULT_ON_ERROR); ++ fprintf(fp, " mask_ack: %lx ", tmp1); ++ if (is_kernel_text(tmp1)) ++ fprintf(fp, "<%s>", ++ value_to_symstr(tmp1, buf, 0)); ++ else if (readmem(tmp1, KVADDR, &tmp2, ++ sizeof(ulong), "mask_ack indirection", ++ RETURN_ON_ERROR|QUIET) && is_kernel_text(tmp2)) ++ fprintf(fp, "<%s>", ++ value_to_symstr(tmp2, buf, 0)); ++ fprintf(fp, "\n"); ++ } ++ ++ if (VALID_MEMBER(irq_chip_unmask)) { ++ readmem(handler+OFFSET(irq_chip_unmask), KVADDR, ++ &tmp1, sizeof(void *), ++ "irq_chip unmask", FAULT_ON_ERROR); ++ fprintf(fp, " unmask: %lx ", tmp1); ++ if (is_kernel_text(tmp1)) ++ fprintf(fp, "<%s>", ++ value_to_symstr(tmp1, buf, 0)); ++ else if (readmem(tmp1, KVADDR, &tmp2, ++ sizeof(ulong), "unmask indirection", ++ RETURN_ON_ERROR|QUIET) && is_kernel_text(tmp2)) ++ fprintf(fp, "<%s>", ++ value_to_symstr(tmp2, buf, 0)); ++ fprintf(fp, "\n"); ++ } ++ ++ if (VALID_MEMBER(irq_chip_eoi)) { ++ readmem(handler+OFFSET(irq_chip_eoi), KVADDR, ++ &tmp1, sizeof(void *), ++ "irq_chip eoi", FAULT_ON_ERROR); ++ fprintf(fp, " eoi: %lx ", tmp1); ++ if (is_kernel_text(tmp1)) ++ fprintf(fp, "<%s>", ++ value_to_symstr(tmp1, buf, 0)); ++ else if (readmem(tmp1, KVADDR, &tmp2, ++ sizeof(ulong), "eoi indirection", ++ RETURN_ON_ERROR|QUIET) && is_kernel_text(tmp2)) ++ fprintf(fp, "<%s>", ++ value_to_symstr(tmp2, buf, 0)); ++ fprintf(fp, "\n"); + } + + if (VALID_MEMBER(hw_interrupt_type_end)) { +@@ -3550,6 +4284,20 @@ + fprintf(fp, "<%s>", + value_to_symstr(tmp2, buf, 0)); + fprintf(fp, "\n"); ++ } else if (VALID_MEMBER(irq_chip_end)) { ++ readmem(handler+OFFSET(irq_chip_end), KVADDR, ++ &tmp1, sizeof(void *), ++ "irq_chip end", FAULT_ON_ERROR); ++ fprintf(fp, " end: %lx ", tmp1); ++ if (is_kernel_text(tmp1)) ++ fprintf(fp, "<%s>", ++ value_to_symstr(tmp1, buf, 0)); ++ else if (readmem(tmp1, KVADDR, &tmp2, ++ sizeof(ulong), "end indirection", ++ RETURN_ON_ERROR|QUIET) && is_kernel_text(tmp2)) ++ fprintf(fp, "<%s>", ++ value_to_symstr(tmp2, buf, 0)); ++ fprintf(fp, "\n"); + } + + if (VALID_MEMBER(hw_interrupt_type_set_affinity)) { +@@ -3567,6 +4315,66 @@ + fprintf(fp, "<%s>", + value_to_symstr(tmp2, buf, 0)); + fprintf(fp, "\n"); ++ } else if (VALID_MEMBER(irq_chip_set_affinity)) { ++ readmem(handler+OFFSET(irq_chip_set_affinity), ++ KVADDR, &tmp1, sizeof(void *), ++ "irq_chip set_affinity", ++ FAULT_ON_ERROR); ++ fprintf(fp, " set_affinity: %lx ", tmp1); ++ if (is_kernel_text(tmp1)) ++ fprintf(fp, "<%s>", ++ value_to_symstr(tmp1, buf, 0)); ++ else if (readmem(tmp1, KVADDR, &tmp2, ++ sizeof(ulong), "set_affinity indirection", ++ RETURN_ON_ERROR|QUIET) && is_kernel_text(tmp2)) ++ fprintf(fp, "<%s>", ++ value_to_symstr(tmp2, buf, 0)); ++ fprintf(fp, "\n"); ++ } ++ if (VALID_MEMBER(irq_chip_retrigger)) { ++ readmem(handler+OFFSET(irq_chip_retrigger), KVADDR, ++ &tmp1, sizeof(void *), ++ "irq_chip retrigger", FAULT_ON_ERROR); ++ fprintf(fp, " retrigger: %lx ", tmp1); ++ if (is_kernel_text(tmp1)) ++ fprintf(fp, "<%s>", ++ value_to_symstr(tmp1, buf, 0)); ++ else if (readmem(tmp1, KVADDR, &tmp2, ++ sizeof(ulong), "retrigger indirection", ++ RETURN_ON_ERROR|QUIET) && is_kernel_text(tmp2)) ++ fprintf(fp, "<%s>", ++ value_to_symstr(tmp2, buf, 0)); ++ fprintf(fp, "\n"); ++ } ++ if (VALID_MEMBER(irq_chip_set_type)) { ++ readmem(handler+OFFSET(irq_chip_set_type), KVADDR, ++ &tmp1, sizeof(void *), ++ "irq_chip set_type", FAULT_ON_ERROR); ++ fprintf(fp, " set_type: %lx ", tmp1); ++ if (is_kernel_text(tmp1)) ++ fprintf(fp, "<%s>", ++ value_to_symstr(tmp1, buf, 0)); ++ else if (readmem(tmp1, KVADDR, &tmp2, ++ sizeof(ulong), "set_type indirection", ++ RETURN_ON_ERROR|QUIET) && is_kernel_text(tmp2)) ++ fprintf(fp, "<%s>", ++ value_to_symstr(tmp2, buf, 0)); ++ fprintf(fp, "\n"); ++ } ++ if (VALID_MEMBER(irq_chip_set_wake)) { ++ readmem(handler+OFFSET(irq_chip_set_wake), KVADDR, ++ &tmp1, sizeof(void *), ++ "irq_chip set wake", FAULT_ON_ERROR); ++ fprintf(fp, " set_wake: %lx ", tmp1); ++ if (is_kernel_text(tmp1)) ++ fprintf(fp, "<%s>", ++ value_to_symstr(tmp1, buf, 0)); ++ else if (readmem(tmp1, KVADDR, &tmp2, ++ sizeof(ulong), "set_wake indirection", ++ RETURN_ON_ERROR|QUIET) && is_kernel_text(tmp2)) ++ fprintf(fp, "<%s>", ++ value_to_symstr(tmp2, buf, 0)); ++ fprintf(fp, "\n"); + } + } + +@@ -4146,7 +4954,7 @@ + } + + /* +- * 2.6 per-cpu timers, using "per_cpu__tvec_bases". XXX ++ * 2.6 per-cpu timers, using "per_cpu__tvec_bases". + */ + + static void +@@ -4220,8 +5028,12 @@ + else + tvec_bases = symbol_value("per_cpu__tvec_bases"); + +- fprintf(fp, "TVEC_BASES[%d]: %lx\n", cpu, +- tvec_bases + SIZE(tvec_t_base_s)); ++ if (symbol_exists("boot_tvec_bases")) { ++ readmem(tvec_bases, KVADDR, &tvec_bases, sizeof(void *), ++ "per-cpu tvec_bases", FAULT_ON_ERROR); ++ } ++ ++ fprintf(fp, "TVEC_BASES[%d]: %lx\n", cpu, tvec_bases); + + sprintf(buf1, "%ld", highest); + flen = MAX(strlen(buf1), strlen("JIFFIES")); +@@ -4320,6 +5132,11 @@ + else + tvec_bases = symbol_value("per_cpu__tvec_bases"); + ++ if (symbol_exists("boot_tvec_bases")) { ++ readmem(tvec_bases, KVADDR, &tvec_bases, sizeof(void *), ++ "per-cpu tvec_bases", FAULT_ON_ERROR); ++ } ++ + tv[1].base = tvec_bases + + OFFSET(tvec_t_base_s_tv1); + tv[1].end = tv[1].base + SIZE(tvec_root_s); +@@ -4475,9 +5292,16 @@ + ld->start = vec[i]; + ld->list_head_offset = offset; + ld->end = vec_kvaddr; ++ ld->flags = RETURN_ON_LIST_ERROR; + + hq_open(); +- timer_cnt = do_list(ld); ++ if ((timer_cnt = do_list(ld)) == -1) { ++ /* Ignore chains with errors */ ++ error(INFO, ++ "ignoring faulty timer list at index %d of timer array\n", ++ i/2); ++ continue; ++ } + if (!timer_cnt) + continue; + timer_list = (ulong *)GETBUF(timer_cnt * sizeof(ulong)); +@@ -4708,21 +5532,569 @@ + machdep->last_pgd_read = 0; + machdep->last_pmd_read = 0; + machdep->last_ptbl_read = 0; ++ if (machdep->clear_machdep_cache) ++ machdep->clear_machdep_cache(); + } + } + + /* +- * For kernels containing cpu_online_map, count the bits. ++ * For kernels containing at least the cpu_online_map, use it ++ * to determine the cpu count. + */ + int + get_cpus_online() + { +- ulong cpu_online_map; ++ int i, len, online; ++ struct gnu_request req; ++ char *buf; ++ ulong *maskptr; + + if (!symbol_exists("cpu_online_map")) + return 0; + +- get_symbol_data("cpu_online_map", sizeof(ulong), &cpu_online_map); ++ if (LKCD_KERNTYPES()) { ++ if ((len = STRUCT_SIZE("cpumask_t")) < 0) ++ error(FATAL, "cannot determine type cpumask_t\n"); ++ } else ++ len = get_symbol_type("cpu_online_map", NULL, &req) == ++ TYPE_CODE_UNDEF ? sizeof(ulong) : req.length; ++ buf = GETBUF(len); ++ ++ online = 0; ++ ++ if (readmem(symbol_value("cpu_online_map"), KVADDR, buf, len, ++ "cpu_online_map", RETURN_ON_ERROR)) { ++ ++ maskptr = (ulong *)buf; ++ for (i = 0; i < (len/sizeof(ulong)); i++, maskptr++) ++ online += count_bits_long(*maskptr); ++ ++ FREEBUF(buf); ++ if (CRASHDEBUG(1)) ++ error(INFO, "get_cpus_online: online: %d\n", online); ++ } ++ ++ return online; ++} ++ ++/* ++ * For kernels containing at least the cpu_possible_map, used ++ * to determine the cpu count (of online and offline cpus). ++ */ ++int ++get_cpus_possible() ++{ ++ int i, len, possible; ++ struct gnu_request req; ++ char *buf; ++ ulong *maskptr; ++ ++ if (!symbol_exists("cpu_possible_map")) ++ return 0; ++ ++ if (LKCD_KERNTYPES()) { ++ if ((len = STRUCT_SIZE("cpumask_t")) < 0) ++ error(FATAL, "cannot determine type cpumask_t\n"); ++ } else ++ len = get_symbol_type("cpu_possible_map", NULL, &req) == ++ TYPE_CODE_UNDEF ? sizeof(ulong) : req.length; ++ buf = GETBUF(len); ++ ++ possible = 0; ++ ++ if (readmem(symbol_value("cpu_possible_map"), KVADDR, buf, len, ++ "cpu_possible_map", RETURN_ON_ERROR)) { ++ ++ maskptr = (ulong *)buf; ++ for (i = 0; i < (len/sizeof(ulong)); i++, maskptr++) ++ possible += count_bits_long(*maskptr); ++ ++ FREEBUF(buf); ++ if (CRASHDEBUG(1)) ++ error(INFO, "get_cpus_possible: possible: %d\n", ++ possible); ++ } ++ ++ return possible; ++} ++ ++/* ++ * Xen machine-address to pseudo-physical-page translator. ++ */ ++ulonglong ++xen_m2p(ulonglong machine) ++{ ++ ulong mfn, pfn; ++ ++ mfn = XEN_MACHINE_TO_MFN(machine); ++ pfn = __xen_m2p(machine, mfn); ++ ++ if (pfn == XEN_MFN_NOT_FOUND) { ++ if (CRASHDEBUG(1)) ++ error(INFO, ++ "xen_machine_to_pseudo_PAE: machine address %lx not found\n", ++ machine); ++ return XEN_MACHADDR_NOT_FOUND; ++ } ++ ++ return XEN_PFN_TO_PSEUDO(pfn); ++} ++ ++static ulong ++__xen_m2p(ulonglong machine, ulong mfn) ++{ ++ ulong mapping, kmfn, pfn, p, i, c; ++ ulong start, end; ++ ulong *mp; ++ ++ mp = (ulong *)kt->m2p_page; ++ mapping = kt->phys_to_machine_mapping; ++ ++ /* ++ * Check the FIFO cache first. ++ */ ++ for (c = 0; c < P2M_MAPPING_CACHE; c++) { ++ if (kt->p2m_mapping_cache[c].mapping && ++ ((mfn >= kt->p2m_mapping_cache[c].start) && ++ (mfn <= kt->p2m_mapping_cache[c].end))) { ++ ++ if (kt->p2m_mapping_cache[c].mapping != kt->last_mapping_read) { ++ if (!readmem(kt->p2m_mapping_cache[c].mapping, KVADDR, ++ mp, PAGESIZE(), "phys_to_machine_mapping page (cached)", ++ RETURN_ON_ERROR)) ++ error(FATAL, "cannot access " ++ "phys_to_machine_mapping page\n"); ++ else ++ kt->last_mapping_read = kt->p2m_mapping_cache[c].mapping; ++ } else ++ kt->p2m_page_cache_hits++; ++ ++ for (i = 0; i < XEN_PFNS_PER_PAGE; i++) { ++ kmfn = (*(mp+i)) & ~XEN_FOREIGN_FRAME; ++ if (kmfn == mfn) { ++ p = P2M_MAPPING_TO_PAGE_INDEX(c); ++ pfn = p + i; ++ ++ if (CRASHDEBUG(1)) ++ console("(cached) mfn: %lx (%llx) p: %ld" ++ " i: %ld pfn: %lx (%llx)\n", ++ mfn, machine, p, ++ i, pfn, XEN_PFN_TO_PSEUDO(pfn)); ++ kt->p2m_mfn_cache_hits++; ++ ++ return pfn; ++ } ++ } ++ /* ++ * Stale entry -- clear it out. ++ */ ++ kt->p2m_mapping_cache[c].mapping = 0; ++ } ++ } ++ ++ /* ++ * The machine address was not cached, so search from the ++ * beginning of the phys_to_machine_mapping array, caching ++ * only the found machine address. ++ */ ++ for (p = 0; p < kt->p2m_table_size; p += XEN_PFNS_PER_PAGE) ++ { ++ if (mapping != kt->last_mapping_read) { ++ if (!readmem(mapping, KVADDR, mp, PAGESIZE(), ++ "phys_to_machine_mapping page", RETURN_ON_ERROR)) ++ error(FATAL, ++ "cannot access phys_to_machine_mapping page\n"); ++ else ++ kt->last_mapping_read = mapping; ++ } ++ ++ kt->p2m_pages_searched++; ++ ++ if (search_mapping_page(mfn, &i, &start, &end)) { ++ pfn = p + i; ++ if (CRASHDEBUG(1)) ++ console("pages: %d mfn: %lx (%llx) p: %ld" ++ " i: %ld pfn: %lx (%llx)\n", ++ (p/XEN_PFNS_PER_PAGE)+1, mfn, machine, ++ p, i, pfn, XEN_PFN_TO_PSEUDO(pfn)); ++ ++ c = kt->p2m_cache_index; ++ kt->p2m_mapping_cache[c].start = start; ++ kt->p2m_mapping_cache[c].end = end; ++ kt->p2m_mapping_cache[c].mapping = mapping; ++ kt->p2m_cache_index = (c+1) % P2M_MAPPING_CACHE; ++ ++ return pfn; ++ } ++ ++ mapping += PAGESIZE(); ++ } ++ ++ if (CRASHDEBUG(1)) ++ console("machine address %llx not found\n", machine); ++ ++ return (XEN_MFN_NOT_FOUND); ++} ++ ++/* ++ * Search for an mfn in the current mapping page, and if found, ++ * determine the range of contiguous mfns that it's contained ++ * within (if any). ++ */ ++#define PREV_UP 0x1 ++#define NEXT_UP 0x2 ++#define PREV_DOWN 0x4 ++#define NEXT_DOWN 0x8 ++ ++static int ++search_mapping_page(ulong mfn, ulong *index, ulong *startptr, ulong *endptr) ++{ ++ int n, found; ++ ulong i, kmfn; ++ ulong flags, start, end, next, prev, curr; ++ ulong *mp; ++ ++ mp = (ulong *)kt->m2p_page; ++ ++ for (i = 0, found = FALSE; i < XEN_PFNS_PER_PAGE; i++) { ++ kmfn = (*(mp+i)) & ~XEN_FOREIGN_FRAME; ++ ++ if (kmfn == mfn) { ++ found = TRUE; ++ *index = i; ++ break; ++ } ++ } ++ ++ if (found) { ++ flags = 0; ++ next = prev = XEN_MFN_NOT_FOUND; ++ start = end = kmfn; ++ ++ if (i) ++ prev = (*(mp+(i-1))) & ~XEN_FOREIGN_FRAME; ++ if ((i+1) != XEN_PFNS_PER_PAGE) ++ next = (*(mp+(i+1))) & ~XEN_FOREIGN_FRAME; ++ ++ if (prev == (kmfn-1)) ++ flags |= PREV_UP; ++ else if (prev == (kmfn+1)) ++ flags |= PREV_DOWN; ++ ++ if (next == (kmfn+1)) ++ flags |= NEXT_UP; ++ else if (next == (kmfn-1)) ++ flags |= NEXT_DOWN; ++ ++ /* Should be impossible, but just in case... */ ++ if ((flags & PREV_UP) && (flags & NEXT_DOWN)) ++ flags &= ~NEXT_DOWN; ++ else if ((flags & PREV_DOWN) && (flags & NEXT_UP)) ++ flags &= ~NEXT_UP; ++ ++ if (flags & (PREV_UP|PREV_DOWN)) { ++ start = prev; ++ ++ for (n = (i-2); n >= 0; n--) { ++ curr = (*(mp+n)) & ~XEN_FOREIGN_FRAME; ++ if (flags & PREV_UP) { ++ if (curr == (start-1)) ++ start = curr; ++ } else { ++ if (curr == (start+1)) ++ start = curr; ++ } ++ } ++ ++ } ++ ++ if (flags & (NEXT_UP|NEXT_DOWN)) { ++ end = next; ++ ++ for (n = (i+2); n < XEN_PFNS_PER_PAGE; n++) { ++ curr = (*(mp+n)) & ~XEN_FOREIGN_FRAME; ++ if (flags & NEXT_UP) { ++ if (curr == (end+1)) ++ end = curr; ++ } else { ++ if (curr == (end-1)) ++ end = curr; ++ } ++ } ++ ++ ++ } ++ ++ if (start > end) { ++ curr = start; ++ start = end; ++ end = curr; ++ } ++ ++ *startptr = start; ++ *endptr = end; ++ ++ if (CRASHDEBUG(2)) ++ fprintf(fp, "mfn: %lx -> start: %lx end: %lx (%ld mfns)\n", ++ mfn, start, end, end - start); ++ } ++ ++ return found; ++} ++ + +- return count_bits_long(cpu_online_map); ++ ++/* ++ * Read the relevant IKCONFIG (In Kernel Config) data if available. ++ */ ++ ++static char *ikconfig[] = { ++ "CONFIG_NR_CPUS", ++ "CONFIG_PGTABLE_4", ++ "CONFIG_HZ", ++ "CONFIG_DEBUG_BUGVERBOSE", ++ NULL, ++}; ++ ++void ++read_in_kernel_config(int command) ++{ ++ struct syment *sp; ++ int ii, jj, ret, end, found=0; ++ unsigned long size, bufsz; ++ char *pos, *ln, *buf, *head, *tail, *val, *uncomp; ++ char line[512]; ++ z_stream stream; ++ ++ if ((kt->flags & NO_IKCONFIG) && !(pc->flags & RUNTIME)) ++ return; ++ ++ if ((sp = symbol_search("kernel_config_data")) == NULL) { ++ if (command == IKCFG_READ) ++ error(FATAL, ++ "kernel_config_data does not exist in this kernel\n"); ++ return; ++ } ++ ++ /* We don't know how large IKCONFIG is, so we start with ++ * 32k, if we can't find MAGIC_END assume we didn't read ++ * enough, double it and try again. ++ */ ++ ii = 32; ++ ++again: ++ size = ii * 1024; ++ ++ if ((buf = (char *)malloc(size)) == NULL) { ++ error(WARNING, "cannot malloc IKCONFIG input buffer\n"); ++ return; ++ } ++ ++ if (!readmem(sp->value, KVADDR, buf, size, ++ "kernel_config_data", RETURN_ON_ERROR)) { ++ error(WARNING, "cannot read kernel_config_data\n"); ++ goto out2; ++ } ++ ++ /* Find the start */ ++ if (strstr(buf, MAGIC_START)) ++ head = buf + MAGIC_SIZE + 10; /* skip past MAGIC_START and gzip header */ ++ else { ++ error(WARNING, "could not find MAGIC_START!\n"); ++ goto out2; ++ } ++ ++ tail = head; ++ ++ end = strlen(MAGIC_END); ++ ++ /* Find the end*/ ++ while (tail < (buf + (size - 1))) { ++ ++ if (strncmp(tail, MAGIC_END, end)==0) { ++ found = 1; ++ break; ++ } ++ tail++; ++ } ++ ++ if (found) { ++ bufsz = tail - head; ++ size = 10 * bufsz; ++ if ((uncomp = (char *)malloc(size)) == NULL) { ++ error(WARNING, "cannot malloc IKCONFIG output buffer\n"); ++ goto out2; ++ } ++ } else { ++ if (ii > 512) { ++ error(WARNING, "could not find MAGIC_END!\n"); ++ goto out2; ++ } else { ++ free(buf); ++ ii *= 2; ++ goto again; ++ } ++ } ++ ++ ++ /* initialize zlib */ ++ stream.next_in = (Bytef *)head; ++ stream.avail_in = (uInt)bufsz; ++ ++ stream.next_out = (Bytef *)uncomp; ++ stream.avail_out = (uInt)size; ++ ++ stream.zalloc = NULL; ++ stream.zfree = NULL; ++ stream.opaque = NULL; ++ ++ ret = inflateInit2(&stream, -MAX_WBITS); ++ if (ret != Z_OK) { ++ read_in_kernel_config_err(ret, "initialize"); ++ goto out1; ++ } ++ ++ ret = inflate(&stream, Z_FINISH); ++ ++ if (ret != Z_STREAM_END) { ++ inflateEnd(&stream); ++ if (ret == Z_NEED_DICT || ++ (ret == Z_BUF_ERROR && stream.avail_in == 0)) { ++ read_in_kernel_config_err(Z_DATA_ERROR, "uncompress"); ++ goto out1; ++ } ++ read_in_kernel_config_err(ret, "uncompress"); ++ goto out1; ++ } ++ size = stream.total_out; ++ ++ ret = inflateEnd(&stream); ++ ++ pos = uncomp; ++ ++ do { ++ ret = sscanf(pos, "%511[^\n]\n%n", line, &ii); ++ if (ret > 0) { ++ if ((command == IKCFG_READ) || CRASHDEBUG(8)) ++ fprintf(fp, "%s\n", line); ++ ++ pos += ii; ++ ++ ln = line; ++ ++ /* skip leading whitespace */ ++ while (whitespace(*ln)) ++ ln++; ++ ++ /* skip comments -- except when looking for "not set" */ ++ if (*ln == '#') { ++ if (strstr(ln, "CONFIG_DEBUG_BUGVERBOSE") && ++ strstr(ln, "not set")) ++ kt->flags |= BUGVERBOSE_OFF; ++ continue; ++ } ++ ++ /* Find '=' */ ++ if ((head = strchr(ln, '=')) != NULL) { ++ *head = '\0'; ++ val = head + 1; ++ ++ head--; ++ ++ /* skip trailing whitespace */ ++ while (whitespace(*head)) { ++ *head = '\0'; ++ head--; ++ } ++ ++ /* skip whitespace */ ++ while (whitespace(*val)) ++ val++; ++ ++ } else /* Bad line, skip it */ ++ continue; ++ ++ if (command != IKCFG_INIT) ++ continue; ++ ++ for (jj = 0; ikconfig[jj]; jj++) { ++ if (STREQ(ln, ikconfig[jj])) { ++ ++ if (STREQ(ln, "CONFIG_NR_CPUS")) { ++ kt->kernel_NR_CPUS = atoi(val); ++ if (CRASHDEBUG(1)) ++ error(INFO, ++ "CONFIG_NR_CPUS: %d\n", ++ kt->kernel_NR_CPUS); ++ ++ } else if (STREQ(ln, "CONFIG_PGTABLE_4")) { ++ machdep->flags |= VM_4_LEVEL; ++ if (CRASHDEBUG(1)) ++ error(INFO, "CONFIG_PGTABLE_4\n"); ++ ++ } else if (STREQ(ln, "CONFIG_HZ")) { ++ machdep->hz = atoi(val); ++ if (CRASHDEBUG(1)) ++ error(INFO, ++ "CONFIG_HZ: %d\n", ++ machdep->hz); ++ } ++ } ++ } ++ } ++ } while (ret > 0); ++ ++out1: ++ free(uncomp); ++out2: ++ free(buf); ++ ++ return; ++} ++ ++static void ++read_in_kernel_config_err(int e, char *msg) ++{ ++ error(WARNING, "zlib could not %s\n", msg); ++ switch (e) { ++ case Z_OK: ++ fprintf(fp, "Z_OK\n"); ++ break; ++ ++ case Z_STREAM_END: ++ fprintf(fp, "Z_STREAM_END\n"); ++ break; ++ ++ case Z_NEED_DICT: ++ fprintf(fp, "Z_NEED_DICT\n"); ++ break; ++ ++ case Z_ERRNO: ++ fprintf(fp, "Z_ERNO\n"); ++ break; ++ ++ case Z_STREAM_ERROR: ++ fprintf(fp, "Z_STREAM\n"); ++ break; ++ ++ case Z_DATA_ERROR: ++ fprintf(fp, "Z_DATA_ERROR\n"); ++ break; ++ ++ case Z_MEM_ERROR: /* out of memory */ ++ fprintf(fp, "Z_MEM_ERROR\n"); ++ break; ++ ++ case Z_BUF_ERROR: /* not enough room in output buf */ ++ fprintf(fp, "Z_BUF_ERROR\n"); ++ break; ++ ++ case Z_VERSION_ERROR: ++ fprintf(fp, "Z_VERSION_ERROR\n"); ++ break; ++ ++ default: ++ fprintf(fp, "UNKNOWN ERROR: %d\n", e); ++ break; ++ } + } +--- crash/lkcd_vmdump_v1.h.orig 2008-01-17 15:17:20.000000000 -0500 ++++ crash/lkcd_vmdump_v1.h 2008-01-04 09:42:08.000000000 -0500 +@@ -1,8 +1,8 @@ + /* lkcd_vmdump_v1.h - core analysis suite + * + * Copyright (C) 1999, 2000, 2001, 2002 Mission Critical Linux, Inc. +- * Copyright (C) 2002, 2003, 2004, 2005 David Anderson +- * Copyright (C) 2002, 2003, 2004, 2005 Red Hat, Inc. All rights reserved. ++ * Copyright (C) 2002, 2003, 2004, 2005, 2006 David Anderson ++ * Copyright (C) 2002, 2003, 2004, 2005, 2006 Red Hat, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by +@@ -114,8 +114,12 @@ + + /* the dump registers */ + #ifndef IA64 ++#ifndef S390 ++#ifndef S390X + struct pt_regs dh_regs; + #endif ++#endif ++#endif + + /* the address of the current task */ + struct task_struct *dh_current_task; +--- crash/xen_hyper_dump_tables.c.orig 2008-01-17 15:17:20.000000000 -0500 ++++ crash/xen_hyper_dump_tables.c 2008-01-04 09:42:08.000000000 -0500 +@@ -0,0 +1,948 @@ ++/* ++ * xen_hyper_dump_tables.c ++ * ++ * Portions Copyright (C) 2006-2007 Fujitsu Limited ++ * Portions Copyright (C) 2006-2007 VA Linux Systems Japan K.K. ++ * ++ * Authors: Itsuro Oda ++ * Fumihiko Kakuma ++ * ++ * This file is part of Xencrash. ++ * ++ * Xencrash is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation (version 2 of the License). ++ * ++ * Xencrash is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with Xencrash; if not, write to the Free Software ++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ */ ++ ++#include "defs.h" ++ ++#ifdef XEN_HYPERVISOR_ARCH ++#include "xen_hyper_defs.h" ++ ++static void xen_hyper_dump_xen_hyper_table(int verbose); ++static void xen_hyper_dump_xen_hyper_dumpinfo_table(int verbose); ++static void xen_hyper_dump_xen_hyper_domain_table(int verbose); ++static void xen_hyper_dump_xen_hyper_vcpu_table(int verbose); ++static void xen_hyper_dump_xen_hyper_pcpu_table(int verbose); ++static void xen_hyper_dump_xen_hyper_sched_table(int verbose); ++static void xen_hyper_dump_xen_hyper_size_table(char *spec, ulong makestruct); ++static void xen_hyper_dump_xen_hyper_offset_table(char *spec, ulong makestruct); ++ ++static void xen_hyper_dump_mem(void *mem, ulong len, int dsz); ++ ++/* ++ * Get help for a command, to dump an internal table, or the GNU public ++ * license copying/warranty information. ++ */ ++void ++xen_hyper_cmd_help(void) ++{ ++ int c; ++ int oflag; ++ ++ oflag = 0; ++ ++ while ((c = getopt(argcnt, args, ++ "aBbcDgHhM:mnOopszX:")) != EOF) { ++ switch(c) ++ { ++ case 'a': ++ dump_alias_data(); ++ return; ++ case 'b': ++ dump_shared_bufs(); ++ return; ++ case 'B': ++ dump_build_data(); ++ return; ++ case 'c': ++ dump_numargs_cache(); ++ return; ++ case 'n': ++ case 'D': ++ dumpfile_memory(DUMPFILE_MEM_DUMP); ++ return; ++ case 'g': ++ dump_gdb_data(); ++ return; ++ case 'H': ++ dump_hash_table(VERBOSE); ++ return; ++ case 'h': ++ dump_hash_table(!VERBOSE); ++ return; ++ case 'M': ++ dump_machdep_table(stol(optarg, FAULT_ON_ERROR, NULL)); ++ return; ++ case 'm': ++ dump_machdep_table(0); ++ return; ++ case 'O': ++ dump_offset_table(NULL, TRUE); ++ return; ++ case 'o': ++ oflag = TRUE; ++ break; ++ case 'p': ++ dump_program_context(); ++ return; ++ case 's': ++ dump_symbol_table(); ++ return; ++ case 'X': ++ if (strlen(optarg) != 3) { ++ argerrs++; ++ break; ++ } ++ if (!strncmp("Xen", optarg, strlen(optarg))) ++ xen_hyper_dump_xen_hyper_table(VERBOSE); ++ else if (!strncmp("xen", optarg, strlen(optarg))) ++ xen_hyper_dump_xen_hyper_table(!VERBOSE); ++ else if (!strncmp("Dmp", optarg, strlen(optarg))) ++ xen_hyper_dump_xen_hyper_dumpinfo_table(VERBOSE); ++ else if (!strncmp("dmp", optarg, strlen(optarg))) ++ xen_hyper_dump_xen_hyper_dumpinfo_table(!VERBOSE); ++ else if (!strncmp("Dom", optarg, strlen(optarg))) ++ xen_hyper_dump_xen_hyper_domain_table(VERBOSE); ++ else if (!strncmp("dom", optarg, strlen(optarg))) ++ xen_hyper_dump_xen_hyper_domain_table(!VERBOSE); ++ else if (!strncmp("Vcp", optarg, strlen(optarg))) ++ xen_hyper_dump_xen_hyper_vcpu_table(VERBOSE); ++ else if (!strncmp("vcp", optarg, strlen(optarg))) ++ xen_hyper_dump_xen_hyper_vcpu_table(!VERBOSE); ++ else if (!strncmp("Pcp", optarg, strlen(optarg))) ++ xen_hyper_dump_xen_hyper_pcpu_table(VERBOSE); ++ else if (!strncmp("pcp", optarg, strlen(optarg))) ++ xen_hyper_dump_xen_hyper_pcpu_table(!VERBOSE); ++ else if (!strncmp("Sch", optarg, strlen(optarg))) ++ xen_hyper_dump_xen_hyper_sched_table(VERBOSE); ++ else if (!strncmp("sch", optarg, strlen(optarg))) ++ xen_hyper_dump_xen_hyper_sched_table(!VERBOSE); ++ else if (!strncmp("siz", optarg, strlen(optarg))) ++ xen_hyper_dump_xen_hyper_size_table(NULL, TRUE); ++ else if (!strncmp("ofs", optarg, strlen(optarg))) ++ xen_hyper_dump_xen_hyper_offset_table(NULL, TRUE); ++ else { ++ argerrs++; ++ break; ++ } ++ return; ++ case 'z': ++ fprintf(fp, "help options:\n"); ++ fprintf(fp, " -a - alias data\n"); ++ fprintf(fp, " -b - shared buffer data\n"); ++ fprintf(fp, " -B - build data\n"); ++ fprintf(fp, " -c - numargs cache\n"); ++ fprintf(fp, " -M machine specific\n"); ++ fprintf(fp, " -m - machdep_table\n"); ++ fprintf(fp, " -s - symbol table data\n"); ++ fprintf(fp, " -o - offset_table and size_table\n"); ++ fprintf(fp, " -p - program_context\n"); ++ fprintf(fp, " -h - hash_table data\n"); ++ fprintf(fp, " -H - hash_table data (verbose)\n"); ++ fprintf(fp, " -X Xen - xen table data (verbose)\n"); ++ fprintf(fp, " -X xen - xen table data\n"); ++ fprintf(fp, " -X Dmp - dumpinfo table data (verbose)\n"); ++ fprintf(fp, " -X dmp - dumpinfo table data\n"); ++ fprintf(fp, " -X Dom - domain table data (verbose)\n"); ++ fprintf(fp, " -X dom - domain table data\n"); ++ fprintf(fp, " -X Vcp - vcpu table data (verbose)\n"); ++ fprintf(fp, " -X vcp - vcpu table data\n"); ++ fprintf(fp, " -X Pcp - pcpu table data (verbose)\n"); ++ fprintf(fp, " -X pcp - pcpu table data\n"); ++ fprintf(fp, " -X Sch - schedule table data (verbose)\n"); ++ fprintf(fp, " -X sch - schedule table data\n"); ++ fprintf(fp, " -X siz - size table data\n"); ++ fprintf(fp, " -X ofs - offset table data\n"); ++ return; ++ default: ++ argerrs++; ++ break; ++ } ++ } ++ ++ if (argerrs) ++ cmd_usage(pc->curcmd, COMPLETE_HELP); ++ ++ if (!args[optind]) { ++ if (oflag) ++ dump_offset_table(NULL, FALSE); ++ else ++ display_help_screen(""); ++ return; ++ } ++ ++ do { ++ if (oflag) ++ dump_offset_table(args[optind], FALSE); ++ else ++ cmd_usage(args[optind], COMPLETE_HELP); ++ optind++; ++ } while (args[optind]); ++} ++ ++/* ++ * "help -x xen" output ++ */ ++static void ++xen_hyper_dump_xen_hyper_table(int verbose) ++{ ++ char buf[XEN_HYPER_CMD_BUFSIZE]; ++ uint cpuid; ++ int len, flag, i; ++ ++ len = 14; ++ flag = XEN_HYPER_PRI_R; ++ ++ XEN_HYPER_PRI(fp, len, "cpu_data_address: ", buf, flag, ++ (buf, "%lu\n", xht->cpu_data_address)); ++ XEN_HYPER_PRI(fp, len, "cpu_curr: ", buf, flag, ++ (buf, "%u\n", xht->cpu_curr)); ++ XEN_HYPER_PRI(fp, len, "max_cpus: ", buf, flag, ++ (buf, "%u\n", xht->max_cpus)); ++ XEN_HYPER_PRI(fp, len, "cores: ", buf, flag, ++ (buf, "%d\n", xht->cores)); ++ XEN_HYPER_PRI(fp, len, "pcpus: ", buf, flag, ++ (buf, "%d\n", xht->pcpus)); ++ XEN_HYPER_PRI(fp, len, "vcpus: ", buf, flag, ++ (buf, "%d\n", xht->vcpus)); ++ XEN_HYPER_PRI(fp, len, "domains: ", buf, flag, ++ (buf, "%d\n", xht->domains)); ++ XEN_HYPER_PRI(fp, len, "sys_pages: ", buf, flag, ++ (buf, "%lu\n", xht->sys_pages)); ++ XEN_HYPER_PRI(fp, len, "crashing_cpu: ", buf, flag, ++ (buf, "%d\n", xht->crashing_cpu)); ++ XEN_HYPER_PRI(fp, len, "crashing_vcc: ", buf, flag, ++ (buf, "%p\n", xht->crashing_vcc)); ++ XEN_HYPER_PRI(fp, len, "max_page: ", buf, flag, ++ (buf, "%lu\n", xht->max_page)); ++ XEN_HYPER_PRI(fp, len, "total_pages: ", buf, flag, ++ (buf, "%lu\n", xht->total_pages)); ++ XEN_HYPER_PRI(fp, len, "cpumask: ", buf, flag, ++ (buf, "%p\n", xht->cpumask)); ++ if (verbose && xht->cpumask) { ++ xen_hyper_dump_mem(xht->cpumask, ++ XEN_HYPER_SIZE(cpumask_t), sizeof(long)); ++ } ++ XEN_HYPER_PRI(fp, len, "cpu_idxs: ", buf, flag, ++ (buf, "%p\n", xht->cpu_idxs)); ++ if (verbose) { ++ for_cpu_indexes(i, cpuid) ++ fprintf(fp, "%03d : %d\n", i, cpuid); ++ } ++} ++ ++/* ++ * "help -x dmp" output ++ */ ++static void ++xen_hyper_dump_xen_hyper_dumpinfo_table(int verbose) ++{ ++ char buf[XEN_HYPER_CMD_BUFSIZE]; ++ int len, flag; ++ ++ len = 25; ++ flag = XEN_HYPER_PRI_R; ++ ++ XEN_HYPER_PRI(fp, len, "note_ver: ", buf, flag, ++ (buf, "%u\n", xhdit->note_ver)); ++ XEN_HYPER_PRI(fp, len, "context_array: ", buf, flag, ++ (buf, "%p\n", xhdit->context_array)); ++ if (verbose && xhdit->context_array) { ++ xen_hyper_dump_mem((long *)xhdit->context_array, ++ sizeof(struct xen_hyper_dumpinfo_context) * ++ XEN_HYPER_MAX_CPUS(), sizeof(long)); ++ } ++ XEN_HYPER_PRI(fp, len, "context_xen_core_array: ", buf, flag, ++ (buf, "%p\n", xhdit->context_xen_core_array)); ++ if (verbose && xhdit->context_xen_core_array) { ++ xen_hyper_dump_mem((long *)xhdit->context_xen_core_array, ++ sizeof(struct xen_hyper_dumpinfo_context_xen_core) * ++ XEN_HYPER_MAX_CPUS(), sizeof(long)); ++ } ++ XEN_HYPER_PRI_CONST(fp, len, "context_xen_info: ", flag|XEN_HYPER_PRI_LF); ++ XEN_HYPER_PRI(fp, len, "note: ", buf, flag, ++ (buf, "%lx\n", xhdit->context_xen_info.note)); ++ XEN_HYPER_PRI(fp, len, "pcpu_id: ", buf, flag, ++ (buf, "%u\n", xhdit->context_xen_info.pcpu_id)); ++ XEN_HYPER_PRI(fp, len, "crash_xen_info_ptr: ", buf, flag, ++ (buf, "%p\n", xhdit->context_xen_info.crash_xen_info_ptr)); ++ XEN_HYPER_PRI(fp, len, "crash_note_core_array: ", buf, flag, ++ (buf, "%p\n", xhdit->crash_note_core_array)); ++ if (verbose && xhdit->crash_note_core_array) { ++ xen_hyper_dump_mem((long *)xhdit->crash_note_core_array, ++ xhdit->core_size * XEN_HYPER_NR_PCPUS(), ++ sizeof(long)); ++ } ++ XEN_HYPER_PRI(fp, len, "crash_note_xen_core_array: ", buf, flag, ++ (buf, "%p\n", xhdit->crash_note_xen_core_array)); ++ if (verbose && xhdit->crash_note_xen_core_array) { ++ xen_hyper_dump_mem( ++ xhdit->crash_note_xen_core_array, ++ xhdit->xen_core_size * XEN_HYPER_NR_PCPUS(), ++ sizeof(long)); ++ } ++ XEN_HYPER_PRI(fp, len, "crash_note_xen_info_ptr: ", buf, flag, ++ (buf, "%p\n", xhdit->crash_note_xen_info_ptr)); ++ if (verbose && xhdit->crash_note_xen_info_ptr) { ++ xen_hyper_dump_mem( ++ xhdit->crash_note_xen_info_ptr, ++ xhdit->xen_info_size, sizeof(long)); ++ } ++ XEN_HYPER_PRI(fp, len, "xen_info_cpu: ", buf, flag, ++ (buf, "%u\n", xhdit->xen_info_cpu)); ++ XEN_HYPER_PRI(fp, len, "note_size: ", buf, flag, ++ (buf, "%u\n", xhdit->note_size)); ++ XEN_HYPER_PRI(fp, len, "core_offset: ", buf, flag, ++ (buf, "%u\n", xhdit->core_offset)); ++ XEN_HYPER_PRI(fp, len, "core_size: ", buf, flag, ++ (buf, "%u\n", xhdit->core_size)); ++ XEN_HYPER_PRI(fp, len, "xen_core_offset: ", buf, flag, ++ (buf, "%u\n", xhdit->xen_core_offset)); ++ XEN_HYPER_PRI(fp, len, "xen_core_size: ", buf, flag, ++ (buf, "%u\n", xhdit->xen_core_size)); ++ XEN_HYPER_PRI(fp, len, "xen_info_offset: ", buf, flag, ++ (buf, "%u\n", xhdit->xen_info_offset)); ++ XEN_HYPER_PRI(fp, len, "xen_info_size: ", buf, flag, ++ (buf, "%u\n", xhdit->xen_info_size)); ++} ++ ++/* ++ * "help -x dom" output ++ */ ++static void ++xen_hyper_dump_xen_hyper_domain_table(int verbose) ++{ ++ char buf[XEN_HYPER_CMD_BUFSIZE]; ++ struct xen_hyper_domain_context *dcca; ++ int len, flag, i; ++ ++ len = 22; ++ flag = XEN_HYPER_PRI_R; ++ ++ XEN_HYPER_PRI(fp, len, "context_array: ", buf, flag, ++ (buf, "%p\n", xhdt->context_array)); ++ if (verbose) { ++ char buf1[XEN_HYPER_CMD_BUFSIZE]; ++ int j; ++ for (i = 0, dcca = xhdt->context_array; ++ i < xhdt->context_array_cnt; i++, dcca++) { ++ snprintf(buf, XEN_HYPER_CMD_BUFSIZE, "context_array[%d]: ", i); ++ XEN_HYPER_PRI_CONST(fp, len, buf, flag|XEN_HYPER_PRI_LF); ++ XEN_HYPER_PRI(fp, len, "domain: ", buf, flag, ++ (buf, "%lx\n", dcca->domain)); ++ XEN_HYPER_PRI(fp, len, "domain_id: ", buf, flag, ++ (buf, "%d\n", dcca->domain_id)); ++ XEN_HYPER_PRI(fp, len, "tot_pages: ", buf, flag, ++ (buf, "%x\n", dcca->tot_pages)); ++ XEN_HYPER_PRI(fp, len, "max_pages: ", buf, flag, ++ (buf, "%x\n", dcca->max_pages)); ++ XEN_HYPER_PRI(fp, len, "xenheap_pages: ", buf, flag, ++ (buf, "%x\n", dcca->xenheap_pages)); ++ XEN_HYPER_PRI(fp, len, "shared_info: ", buf, flag, ++ (buf, "%lx\n", dcca->shared_info)); ++ XEN_HYPER_PRI(fp, len, "sched_priv: ", buf, flag, ++ (buf, "%lx\n", dcca->sched_priv)); ++ XEN_HYPER_PRI(fp, len, "next_in_list: ", buf, flag, ++ (buf, "%lx\n", dcca->next_in_list)); ++ XEN_HYPER_PRI(fp, len, "domain_flags: ", buf, flag, ++ (buf, "%lx\n", dcca->domain_flags)); ++ XEN_HYPER_PRI(fp, len, "evtchn: ", buf, flag, ++ (buf, "%lx\n", dcca->evtchn)); ++ XEN_HYPER_PRI(fp, len, "vcpu_cnt: ", buf, flag, ++ (buf, "%d\n", dcca->vcpu_cnt)); ++ for (j = 0; j < XEN_HYPER_MAX_VIRT_CPUS; j++) { ++ snprintf(buf1, XEN_HYPER_CMD_BUFSIZE, "vcpu[%d]: ", j); ++ XEN_HYPER_PRI(fp, len, buf1, buf, flag, ++ (buf, "%lx\n", dcca->vcpu[j])); ++ } ++ XEN_HYPER_PRI(fp, len, "vcpu_context_array: ", buf, flag, ++ (buf, "%p\n", dcca->vcpu_context_array)); ++ } ++ } ++ XEN_HYPER_PRI(fp, len, "context_array_cnt: ", buf, flag, ++ (buf, "%d\n", xhdt->context_array_cnt)); ++ XEN_HYPER_PRI(fp, len, "running_domains: ", buf, flag, ++ (buf, "%lu\n", xhdt->running_domains)); ++ XEN_HYPER_PRI(fp, len, "dom_io: ", buf, flag, ++ (buf, "%p\n", xhdt->dom_io)); ++ XEN_HYPER_PRI(fp, len, "dom_xen: ", buf, flag, ++ (buf, "%p\n", xhdt->dom_xen)); ++ XEN_HYPER_PRI(fp, len, "dom0: ", buf, flag, ++ (buf, "%p\n", xhdt->dom0)); ++ XEN_HYPER_PRI(fp, len, "idle_domain: ", buf, flag, ++ (buf, "%p\n", xhdt->idle_domain)); ++ XEN_HYPER_PRI(fp, len, "curr_domain: ", buf, flag, ++ (buf, "%p\n", xhdt->curr_domain)); ++ XEN_HYPER_PRI(fp, len, "last: ", buf, flag, ++ (buf, "%p\n", xhdt->last)); ++ XEN_HYPER_PRI(fp, len, "domain_struct: ", buf, flag, ++ (buf, "%p\n", xhdt->domain_struct)); ++ XEN_HYPER_PRI(fp, len, "domain_struct_verify: ", buf, flag, ++ (buf, "%p\n", xhdt->domain_struct_verify)); ++} ++ ++/* ++ * "help -x vcp" output ++ */ ++static void ++xen_hyper_dump_xen_hyper_vcpu_table(int verbose) ++{ ++ char buf[XEN_HYPER_CMD_BUFSIZE]; ++ int len, flag; ++ ++ len = 25; ++ flag = XEN_HYPER_PRI_R; ++ ++ XEN_HYPER_PRI(fp, len, "vcpu_context_arrays: ", buf, flag, ++ (buf, "%p\n", xhvct->vcpu_context_arrays)); ++ XEN_HYPER_PRI(fp, len, "vcpu_context_arrays_cnt: ", buf, flag, ++ (buf, "%d\n", xhvct->vcpu_context_arrays_cnt)); ++ if (verbose) { ++ struct xen_hyper_vcpu_context_array *vcca; ++ struct xen_hyper_vcpu_context *vca; ++ int i, j; ++ ++ for (i = 0, vcca = xhvct->vcpu_context_arrays; ++ i < xhvct->vcpu_context_arrays_cnt; i++, vcca++) { ++ snprintf(buf, XEN_HYPER_CMD_BUFSIZE, "vcpu_context_arrays[%d]: ", i); ++ XEN_HYPER_PRI_CONST(fp, len, buf, flag|XEN_HYPER_PRI_LF); ++ if (vcca->context_array) { ++ XEN_HYPER_PRI(fp, len, "context_array: ", buf, flag, ++ (buf, "%p\n", vcca->context_array)); ++ } else { ++ XEN_HYPER_PRI(fp, len, "context_array: ", buf, flag, ++ (buf, "NULL\n")); ++ } ++ XEN_HYPER_PRI(fp, len, "context_array_cnt: ", buf, flag, ++ (buf, "%d\n", vcca->context_array_cnt)); ++ XEN_HYPER_PRI(fp, len, "context_array_valid: ", buf, flag, ++ (buf, "%d\n", vcca->context_array_valid)); ++ for (j = 0, vca = vcca->context_array; ++ j < vcca->context_array_cnt; j++, vca++) { ++ snprintf(buf, XEN_HYPER_CMD_BUFSIZE, "context_array[%d]: ", j); ++ XEN_HYPER_PRI_CONST(fp, len, buf, flag|XEN_HYPER_PRI_LF); ++ XEN_HYPER_PRI(fp, len, "vcpu: ", buf, flag, ++ (buf, "%lx\n", vca->vcpu)); ++ XEN_HYPER_PRI(fp, len, "vcpu_id: ", buf, flag, ++ (buf, "%d\n", vca->vcpu_id)); ++ XEN_HYPER_PRI(fp, len, "processor: ", buf, flag, ++ (buf, "%d\n", vca->processor)); ++ XEN_HYPER_PRI(fp, len, "vcpu_info: ", buf, flag, ++ (buf, "%lx\n", vca->vcpu_info)); ++ XEN_HYPER_PRI(fp, len, "domain: ", buf, flag, ++ (buf, "%lx\n", vca->domain)); ++ XEN_HYPER_PRI(fp, len, "next_in_list: ", buf, flag, ++ (buf, "%lx\n", vca->next_in_list)); ++ XEN_HYPER_PRI(fp, len, "sleep_tick: ", buf, flag, ++ (buf, "%lx\n", vca->sleep_tick)); ++ XEN_HYPER_PRI(fp, len, "sched_priv: ", buf, flag, ++ (buf, "%lx\n", vca->sched_priv)); ++ XEN_HYPER_PRI(fp, len, "state: ", buf, flag, ++ (buf, "%d\n", vca->state)); ++ XEN_HYPER_PRI(fp, len, "state_entry_time: ", buf, flag, ++ (buf, "%llux\n", (unsigned long long)(vca->state_entry_time))); ++ XEN_HYPER_PRI(fp, len, "runstate_guest: ", buf, flag, ++ (buf, "%lx\n", vca->runstate_guest)); ++ XEN_HYPER_PRI(fp, len, "vcpu_flags: ", buf, flag, ++ (buf, "%lx\n", vca->vcpu_flags)); ++ } ++ } ++ } ++ XEN_HYPER_PRI(fp, len, "idle_vcpu: ", buf, flag, ++ (buf, "%lx\n", xhvct->idle_vcpu)); ++ XEN_HYPER_PRI(fp, len, "idle_vcpu_context_array: ", buf, flag, ++ (buf, "%p\n", xhvct->idle_vcpu_context_array)); ++ XEN_HYPER_PRI(fp, len, "last: ", buf, flag, ++ (buf, "%p\n", xhvct->last)); ++ XEN_HYPER_PRI(fp, len, "vcpu_struct: ", buf, flag, ++ (buf, "%p\n", xhvct->vcpu_struct)); ++ XEN_HYPER_PRI(fp, len, "vcpu_struct_verify: ", buf, flag, ++ (buf, "%p\n", xhvct->vcpu_struct_verify)); ++} ++ ++/* ++ * "help -x pcp" output ++ */ ++static void ++xen_hyper_dump_xen_hyper_pcpu_table(int verbose) ++{ ++ char buf[XEN_HYPER_CMD_BUFSIZE]; ++ struct xen_hyper_pcpu_context *pcca; ++ int len, flag, i; ++#ifdef X86_64 ++ uint64_t *ist_p; ++ int j; ++#endif ++ ++ len = 21; ++ flag = XEN_HYPER_PRI_R; ++ ++ XEN_HYPER_PRI(fp, len, "context_array: ", buf, flag, ++ (buf, "%p\n", xhpct->context_array)); ++ if (verbose) { ++ for (i = 0, pcca = xhpct->context_array; ++ i < XEN_HYPER_MAX_CPUS(); i++, pcca++) { ++ snprintf(buf, XEN_HYPER_CMD_BUFSIZE, "context_array %d: ", i); ++ XEN_HYPER_PRI_CONST(fp, len, buf, flag|XEN_HYPER_PRI_LF); ++ XEN_HYPER_PRI(fp, len, "pcpu: ", buf, flag, ++ (buf, "%lx\n", pcca->pcpu)); ++ XEN_HYPER_PRI(fp, len, "processor_id: ", buf, flag, ++ (buf, "%u\n", pcca->processor_id)); ++ XEN_HYPER_PRI(fp, len, "guest_cpu_user_regs: ", buf, flag, ++ (buf, "%lx\n", pcca->guest_cpu_user_regs)); ++ XEN_HYPER_PRI(fp, len, "current_vcpu: ", buf, flag, ++ (buf, "%lx\n", pcca->current_vcpu)); ++ XEN_HYPER_PRI(fp, len, "init_tss: ", buf, flag, ++ (buf, "%lx\n", pcca->init_tss)); ++#ifdef X86 ++ XEN_HYPER_PRI(fp, len, "sp.esp0: ", buf, flag, ++ (buf, "%x\n", pcca->sp.esp0)); ++#endif ++#ifdef X86_64 ++ XEN_HYPER_PRI(fp, len, "sp.rsp0: ", buf, flag, ++ (buf, "%lx\n", pcca->sp.rsp0)); ++ for (j = 0, ist_p = pcca->ist; ++ j < XEN_HYPER_TSS_IST_MAX; j++, ist_p++) { ++ XEN_HYPER_PRI(fp, len, "ist: ", buf, flag, ++ (buf, "%lx\n", *ist_p)); ++ } ++#endif ++ } ++ } ++ XEN_HYPER_PRI(fp, len, "last: ", buf, flag, ++ (buf, "%p\n", xhpct->last)); ++ XEN_HYPER_PRI(fp, len, "pcpu_struct: ", buf, flag, ++ (buf, "%p\n", xhpct->pcpu_struct)); ++} ++ ++/* ++ * "help -x sch" output ++ */ ++static void ++xen_hyper_dump_xen_hyper_sched_table(int verbose) ++{ ++ struct xen_hyper_sched_context *schc; ++ char buf[XEN_HYPER_CMD_BUFSIZE]; ++ int len, flag, i; ++ ++ len = 21; ++ flag = XEN_HYPER_PRI_R; ++ ++ XEN_HYPER_PRI(fp, len, "name: ", buf, flag, ++ (buf, "%s\n", xhscht->name)); ++ XEN_HYPER_PRI(fp, len, "opt_sched: ", buf, flag, ++ (buf, "%s\n", xhscht->opt_sched)); ++ XEN_HYPER_PRI(fp, len, "sched_id: ", buf, flag, ++ (buf, "%d\n", xhscht->sched_id)); ++ XEN_HYPER_PRI(fp, len, "scheduler: ", buf, flag, ++ (buf, "%lx\n", xhscht->scheduler)); ++ XEN_HYPER_PRI(fp, len, "scheduler_struct: ", buf, flag, ++ (buf, "%p\n", xhscht->scheduler_struct)); ++ XEN_HYPER_PRI(fp, len, "sched_context_array: ", buf, flag, ++ (buf, "%p\n", xhscht->sched_context_array)); ++ if (verbose) { ++ for (i = 0, schc = xhscht->sched_context_array; ++ i < xht->pcpus; i++, schc++) { ++ XEN_HYPER_PRI(fp, len, "sched_context_array[", buf, ++ flag, (buf, "%d]\n", i)); ++ XEN_HYPER_PRI(fp, len, "schedule_data: ", buf, flag, ++ (buf, "%lx\n", schc->schedule_data)); ++ XEN_HYPER_PRI(fp, len, "curr: ", buf, flag, ++ (buf, "%lx\n", schc->curr)); ++ XEN_HYPER_PRI(fp, len, "idle: ", buf, flag, ++ (buf, "%lx\n", schc->idle)); ++ XEN_HYPER_PRI(fp, len, "sched_priv: ", buf, flag, ++ (buf, "%lx\n", schc->sched_priv)); ++ XEN_HYPER_PRI(fp, len, "tick: ", buf, flag, ++ (buf, "%lx\n", schc->tick)); ++ } ++ } ++} ++ ++/* ++ * "help -x siz" output ++ */ ++static void ++xen_hyper_dump_xen_hyper_size_table(char *spec, ulong makestruct) ++{ ++ char buf[XEN_HYPER_CMD_BUFSIZE]; ++ int len, flag; ++ ++ len = 23; ++ flag = XEN_HYPER_PRI_R; ++ ++ XEN_HYPER_PRI(fp, len, "ELF_Prstatus: ", buf, flag, ++ (buf, "%ld\n", xen_hyper_size_table.ELF_Prstatus)); ++ XEN_HYPER_PRI(fp, len, "ELF_Signifo: ", buf, flag, ++ (buf, "%ld\n", xen_hyper_size_table.ELF_Signifo)); ++ XEN_HYPER_PRI(fp, len, "ELF_Gregset: ", buf, flag, ++ (buf, "%ld\n", xen_hyper_size_table.ELF_Gregset)); ++ XEN_HYPER_PRI(fp, len, "ELF_Timeval: ", buf, flag, ++ (buf, "%ld\n", xen_hyper_size_table.ELF_Timeval)); ++ XEN_HYPER_PRI(fp, len, "arch_domain: ", buf, flag, ++ (buf, "%ld\n", xen_hyper_size_table.arch_domain)); ++ XEN_HYPER_PRI(fp, len, "arch_shared_info: ", buf, flag, ++ (buf, "%ld\n", xen_hyper_size_table.arch_shared_info)); ++ XEN_HYPER_PRI(fp, len, "cpu_info: ", buf, flag, ++ (buf, "%ld\n", xen_hyper_size_table.cpu_info)); ++ XEN_HYPER_PRI(fp, len, "cpu_time: ", buf, flag, ++ (buf, "%ld\n", xen_hyper_size_table.cpu_time)); ++ XEN_HYPER_PRI(fp, len, "cpu_user_regs: ", buf, flag, ++ (buf, "%ld\n", xen_hyper_size_table.cpu_user_regs)); ++ XEN_HYPER_PRI(fp, len, "cpumask_t: ", buf, flag, ++ (buf, "%ld\n", xen_hyper_size_table.cpumask_t)); ++ XEN_HYPER_PRI(fp, len, "cpuinfo_ia64: ", buf, flag, ++ (buf, "%ld\n", xen_hyper_size_table.cpuinfo_ia64)); ++ XEN_HYPER_PRI(fp, len, "cpuinfo_x86: ", buf, flag, ++ (buf, "%ld\n", xen_hyper_size_table.cpuinfo_x86)); ++ XEN_HYPER_PRI(fp, len, "crash_note_t: ", buf, flag, ++ (buf, "%ld\n", xen_hyper_size_table.crash_note_t)); ++ XEN_HYPER_PRI(fp, len, "crash_note_core_t: ", buf, flag, ++ (buf, "%ld\n", xen_hyper_size_table.crash_note_core_t)); ++ XEN_HYPER_PRI(fp, len, "crash_note_xen_t: ", buf, flag, ++ (buf, "%ld\n", xen_hyper_size_table.crash_note_xen_t)); ++ XEN_HYPER_PRI(fp, len, "crash_note_xen_core_t: ", buf, flag, ++ (buf, "%ld\n", xen_hyper_size_table.crash_note_xen_core_t)); ++ XEN_HYPER_PRI(fp, len, "crash_note_xen_info_t: ", buf, flag, ++ (buf, "%ld\n", xen_hyper_size_table.crash_note_xen_info_t)); ++ XEN_HYPER_PRI(fp, len, "crash_xen_core_t: ", buf, flag, ++ (buf, "%ld\n", xen_hyper_size_table.crash_xen_core_t)); ++ XEN_HYPER_PRI(fp, len, "crash_xen_info_t: ", buf, flag, ++ (buf, "%ld\n", xen_hyper_size_table.crash_xen_info_t)); ++ XEN_HYPER_PRI(fp, len, "domain: ", buf, flag, ++ (buf, "%ld\n", xen_hyper_size_table.domain)); ++#ifdef IA64 ++ XEN_HYPER_PRI(fp, len, "mm_struct: ", buf, flag, ++ (buf, "%ld\n", xen_hyper_size_table.mm_struct)); ++#endif ++ XEN_HYPER_PRI(fp, len, "note_buf_t: ", buf, flag, ++ (buf, "%ld\n", xen_hyper_size_table.note_buf_t)); ++ XEN_HYPER_PRI(fp, len, "schedule_data: ", buf, flag, ++ (buf, "%ld\n", xen_hyper_size_table.schedule_data)); ++ XEN_HYPER_PRI(fp, len, "scheduler: ", buf, flag, ++ (buf, "%ld\n", xen_hyper_size_table.scheduler)); ++ XEN_HYPER_PRI(fp, len, "shared_info: ", buf, flag, ++ (buf, "%ld\n", xen_hyper_size_table.shared_info)); ++ XEN_HYPER_PRI(fp, len, "timer: ", buf, flag, ++ (buf, "%ld\n", xen_hyper_size_table.timer)); ++ XEN_HYPER_PRI(fp, len, "tss_struct: ", buf, flag, ++ (buf, "%ld\n", xen_hyper_size_table.tss_struct)); ++ XEN_HYPER_PRI(fp, len, "vcpu: ", buf, flag, ++ (buf, "%ld\n", xen_hyper_size_table.vcpu)); ++ XEN_HYPER_PRI(fp, len, "vcpu_runstate_info: ", buf, flag, ++ (buf, "%ld\n", xen_hyper_size_table.vcpu_runstate_info)); ++ XEN_HYPER_PRI(fp, len, "xen_crash_xen_regs_t: ", buf, flag, ++ (buf, "%ld\n", xen_hyper_size_table.xen_crash_xen_regs_t)); ++} ++ ++/* ++ * "help -x ofs" output ++ */ ++static void ++xen_hyper_dump_xen_hyper_offset_table(char *spec, ulong makestruct) ++{ ++ char buf[XEN_HYPER_CMD_BUFSIZE]; ++ int len, flag; ++ ++ len = 45; ++ flag = XEN_HYPER_PRI_R; ++ ++ XEN_HYPER_PRI(fp, len, "ELF_Prstatus_pr_info: ", buf, flag, ++ (buf, "%ld\n", xen_hyper_offset_table.ELF_Prstatus_pr_info)); ++ XEN_HYPER_PRI(fp, len, "ELF_Prstatus_pr_cursig: ", buf, flag, ++ (buf, "%ld\n", xen_hyper_offset_table.ELF_Prstatus_pr_cursig)); ++ XEN_HYPER_PRI(fp, len, "ELF_Prstatus_pr_sigpend: ", buf, flag, ++ (buf, "%ld\n", xen_hyper_offset_table.ELF_Prstatus_pr_sigpend)); ++ XEN_HYPER_PRI(fp, len, "ELF_Prstatus_pr_sighold: ", buf, flag, ++ (buf, "%ld\n", xen_hyper_offset_table.ELF_Prstatus_pr_sighold)); ++ XEN_HYPER_PRI(fp, len, "ELF_Prstatus_pr_pid: ", buf, flag, ++ (buf, "%ld\n", xen_hyper_offset_table.ELF_Prstatus_pr_pid)); ++ XEN_HYPER_PRI(fp, len, "ELF_Prstatus_pr_ppid: ", buf, flag, ++ (buf, "%ld\n", xen_hyper_offset_table.ELF_Prstatus_pr_ppid)); ++ XEN_HYPER_PRI(fp, len, "ELF_Prstatus_pr_pgrp: ", buf, flag, ++ (buf, "%ld\n", xen_hyper_offset_table.ELF_Prstatus_pr_pgrp)); ++ XEN_HYPER_PRI(fp, len, "ELF_Prstatus_pr_sid: ", buf, flag, ++ (buf, "%ld\n", xen_hyper_offset_table.ELF_Prstatus_pr_sid)); ++ XEN_HYPER_PRI(fp, len, "ELF_Prstatus_pr_stime: ", buf, flag, ++ (buf, "%ld\n", xen_hyper_offset_table.ELF_Prstatus_pr_stime)); ++ XEN_HYPER_PRI(fp, len, "ELF_Prstatus_pr_cutime: ", buf, flag, ++ (buf, "%ld\n", xen_hyper_offset_table.ELF_Prstatus_pr_cutime)); ++ XEN_HYPER_PRI(fp, len, "ELF_Prstatus_pr_cstime: ", buf, flag, ++ (buf, "%ld\n", xen_hyper_offset_table.ELF_Prstatus_pr_cstime)); ++ XEN_HYPER_PRI(fp, len, "ELF_Prstatus_pr_reg: ", buf, flag, ++ (buf, "%ld\n", xen_hyper_offset_table.ELF_Prstatus_pr_reg)); ++ XEN_HYPER_PRI(fp, len, "ELF_Prstatus_pr_fpvalid: ", buf, flag, ++ (buf, "%ld\n", xen_hyper_offset_table.ELF_Prstatus_pr_fpvalid)); ++ XEN_HYPER_PRI(fp, len, "ELF_Timeval_tv_sec: ", buf, flag, ++ (buf, "%ld\n", xen_hyper_offset_table.ELF_Timeval_tv_sec)); ++ XEN_HYPER_PRI(fp, len, "ELF_Timeval_tv_usec: ", buf, flag, ++ (buf, "%ld\n", xen_hyper_offset_table.ELF_Timeval_tv_usec)); ++ ++#ifdef IA64 ++ XEN_HYPER_PRI(fp, len, "arch_domain_mm: ", buf, flag, ++ (buf, "%ld\n", xen_hyper_offset_table.arch_domain_mm)); ++#endif ++ ++ XEN_HYPER_PRI(fp, len, "arch_shared_info_max_pfn: ", buf, flag, ++ (buf, "%ld\n", xen_hyper_offset_table.arch_shared_info_max_pfn)); ++ XEN_HYPER_PRI(fp, len, "arch_shared_info_pfn_to_mfn_frame_list_list: ", buf, flag, ++ (buf, "%ld\n", xen_hyper_offset_table.arch_shared_info_pfn_to_mfn_frame_list_list)); ++ XEN_HYPER_PRI(fp, len, "arch_shared_info_nmi_reason: ", buf, flag, ++ (buf, "%ld\n", xen_hyper_offset_table.arch_shared_info_nmi_reason)); ++ ++ XEN_HYPER_PRI(fp, len, "cpu_info_guest_cpu_user_regs: ", buf, flag, ++ (buf, "%ld\n", xen_hyper_offset_table.cpu_info_guest_cpu_user_regs)); ++ XEN_HYPER_PRI(fp, len, "cpu_info_processor_id: ", buf, flag, ++ (buf, "%ld\n", xen_hyper_offset_table.cpu_info_processor_id)); ++ XEN_HYPER_PRI(fp, len, "cpu_info_current_vcpu: ", buf, flag, ++ (buf, "%ld\n", xen_hyper_offset_table.cpu_info_current_vcpu)); ++ ++ XEN_HYPER_PRI(fp, len, "cpu_time_local_tsc_stamp: ", buf, flag, ++ (buf, "%ld\n", xen_hyper_offset_table.cpu_time_local_tsc_stamp)); ++ XEN_HYPER_PRI(fp, len, "cpu_time_stime_local_stamp: ", buf, flag, ++ (buf, "%ld\n", xen_hyper_offset_table.cpu_time_stime_local_stamp)); ++ XEN_HYPER_PRI(fp, len, "cpu_time_stime_master_stamp: ", buf, flag, ++ (buf, "%ld\n", xen_hyper_offset_table.cpu_time_stime_master_stamp)); ++ XEN_HYPER_PRI(fp, len, "cpu_time_tsc_scale: ", buf, flag, ++ (buf, "%ld\n", xen_hyper_offset_table.cpu_time_tsc_scale)); ++ XEN_HYPER_PRI(fp, len, "cpu_time_calibration_timer: ", buf, flag, ++ (buf, "%ld\n", xen_hyper_offset_table.cpu_time_calibration_timer)); ++ ++ XEN_HYPER_PRI(fp, len, "crash_note_t_core: ", buf, flag, ++ (buf, "%ld\n", xen_hyper_offset_table.crash_note_t_core)); ++ XEN_HYPER_PRI(fp, len, "crash_note_t_xen: ", buf, flag, ++ (buf, "%ld\n", xen_hyper_offset_table.crash_note_t_xen)); ++ XEN_HYPER_PRI(fp, len, "crash_note_t_xen_regs: ", buf, flag, ++ (buf, "%ld\n", xen_hyper_offset_table.crash_note_t_xen_regs)); ++ XEN_HYPER_PRI(fp, len, "crash_note_t_xen_info: ", buf, flag, ++ (buf, "%ld\n", xen_hyper_offset_table.crash_note_t_xen_info)); ++ ++ XEN_HYPER_PRI(fp, len, "crash_note_core_t_note: ", buf, flag, ++ (buf, "%ld\n", xen_hyper_offset_table.crash_note_core_t_note)); ++ XEN_HYPER_PRI(fp, len, "crash_note_core_t_desc: ", buf, flag, ++ (buf, "%ld\n", xen_hyper_offset_table.crash_note_core_t_desc)); ++ ++ XEN_HYPER_PRI(fp, len, "crash_note_xen_t_note: ", buf, flag, ++ (buf, "%ld\n", xen_hyper_offset_table.crash_note_xen_t_note)); ++ XEN_HYPER_PRI(fp, len, "crash_note_xen_t_desc: ", buf, flag, ++ (buf, "%ld\n", xen_hyper_offset_table.crash_note_xen_t_desc)); ++ ++ XEN_HYPER_PRI(fp, len, "crash_note_xen_core_t_note: ", buf, flag, ++ (buf, "%ld\n", xen_hyper_offset_table.crash_note_xen_core_t_note)); ++ XEN_HYPER_PRI(fp, len, "crash_note_xen_core_t_desc: ", buf, flag, ++ (buf, "%ld\n", xen_hyper_offset_table.crash_note_xen_core_t_desc)); ++ ++ XEN_HYPER_PRI(fp, len, "crash_note_xen_info_t_note: ", buf, flag, ++ (buf, "%ld\n", xen_hyper_offset_table.crash_note_xen_info_t_note)); ++ XEN_HYPER_PRI(fp, len, "crash_note_xen_info_t_desc: ", buf, flag, ++ (buf, "%ld\n", xen_hyper_offset_table.crash_note_xen_info_t_desc)); ++ ++ XEN_HYPER_PRI(fp, len, "domain_page_list: ", buf, flag, ++ (buf, "%ld\n", xen_hyper_offset_table.domain_page_list)); ++ XEN_HYPER_PRI(fp, len, "domain_xenpage_list: ", buf, flag, ++ (buf, "%ld\n", xen_hyper_offset_table.domain_xenpage_list)); ++ XEN_HYPER_PRI(fp, len, "domain_domain_id: ", buf, flag, ++ (buf, "%ld\n", xen_hyper_offset_table.domain_domain_id)); ++ XEN_HYPER_PRI(fp, len, "domain_tot_pages: ", buf, flag, ++ (buf, "%ld\n", xen_hyper_offset_table.domain_tot_pages)); ++ XEN_HYPER_PRI(fp, len, "domain_max_pages: ", buf, flag, ++ (buf, "%ld\n", xen_hyper_offset_table.domain_max_pages)); ++ XEN_HYPER_PRI(fp, len, "domain_xenheap_pages: ", buf, flag, ++ (buf, "%ld\n", xen_hyper_offset_table.domain_xenheap_pages)); ++ XEN_HYPER_PRI(fp, len, "domain_shared_info: ", buf, flag, ++ (buf, "%ld\n", xen_hyper_offset_table.domain_shared_info)); ++ XEN_HYPER_PRI(fp, len, "domain_sched_priv: ", buf, flag, ++ (buf, "%ld\n", xen_hyper_offset_table.domain_sched_priv)); ++ XEN_HYPER_PRI(fp, len, "domain_next_in_list: ", buf, flag, ++ (buf, "%ld\n", xen_hyper_offset_table.domain_next_in_list)); ++ XEN_HYPER_PRI(fp, len, "domain_domain_flags: ", buf, flag, ++ (buf, "%lx\n", xen_hyper_offset_table.domain_domain_flags)); ++ XEN_HYPER_PRI(fp, len, "domain_evtchn: ", buf, flag, ++ (buf, "%ld\n", xen_hyper_offset_table.domain_evtchn)); ++ XEN_HYPER_PRI(fp, len, "domain_is_hvm: ", buf, flag, ++ (buf, "%ld\n", xen_hyper_offset_table.domain_is_hvm)); ++ XEN_HYPER_PRI(fp, len, "domain_is_privileged: ", buf, flag, ++ (buf, "%ld\n", xen_hyper_offset_table.domain_is_privileged)); ++ XEN_HYPER_PRI(fp, len, "domain_debugger_attached: ", buf, flag, ++ (buf, "%ld\n", xen_hyper_offset_table.domain_debugger_attached)); ++ XEN_HYPER_PRI(fp, len, "domain_is_polling: ", buf, flag, ++ (buf, "%ld\n", xen_hyper_offset_table.domain_is_polling)); ++ XEN_HYPER_PRI(fp, len, "domain_is_dying: ", buf, flag, ++ (buf, "%ld\n", xen_hyper_offset_table.domain_is_dying)); ++ XEN_HYPER_PRI(fp, len, "domain_is_paused_by_controller: ", buf, flag, ++ (buf, "%ld\n", xen_hyper_offset_table.domain_is_paused_by_controller)); ++ XEN_HYPER_PRI(fp, len, "domain_is_shutting_down: ", buf, flag, ++ (buf, "%ld\n", xen_hyper_offset_table.domain_is_shutting_down)); ++ XEN_HYPER_PRI(fp, len, "domain_is_shut_down: ", buf, flag, ++ (buf, "%ld\n", xen_hyper_offset_table.domain_is_shut_down)); ++ XEN_HYPER_PRI(fp, len, "domain_vcpu: ", buf, flag, ++ (buf, "%ld\n", xen_hyper_offset_table.domain_vcpu)); ++ XEN_HYPER_PRI(fp, len, "domain_arch: ", buf, flag, ++ (buf, "%ld\n", xen_hyper_offset_table.domain_arch)); ++ ++#ifdef IA64 ++ XEN_HYPER_PRI(fp, len, "mm_struct_pgd: ", buf, flag, ++ (buf, "%ld\n", xen_hyper_offset_table.mm_struct_pgd)); ++#endif ++ ++ XEN_HYPER_PRI(fp, len, "schedule_data_schedule_lock: ", buf, flag, ++ (buf, "%ld\n", xen_hyper_offset_table.schedule_data_schedule_lock)); ++ XEN_HYPER_PRI(fp, len, "schedule_data_curr: ", buf, flag, ++ (buf, "%ld\n", xen_hyper_offset_table.schedule_data_curr)); ++ XEN_HYPER_PRI(fp, len, "schedule_data_idle: ", buf, flag, ++ (buf, "%ld\n", xen_hyper_offset_table.schedule_data_idle)); ++ XEN_HYPER_PRI(fp, len, "schedule_data_sched_priv: ", buf, flag, ++ (buf, "%ld\n", xen_hyper_offset_table.schedule_data_sched_priv)); ++ XEN_HYPER_PRI(fp, len, "schedule_data_s_timer: ", buf, flag, ++ (buf, "%ld\n", xen_hyper_offset_table.schedule_data_s_timer)); ++ XEN_HYPER_PRI(fp, len, "schedule_data_tick: ", buf, flag, ++ (buf, "%ld\n", xen_hyper_offset_table.schedule_data_tick)); ++ ++ XEN_HYPER_PRI(fp, len, "scheduler_name: ", buf, flag, ++ (buf, "%ld\n", xen_hyper_offset_table.scheduler_name)); ++ XEN_HYPER_PRI(fp, len, "scheduler_opt_name: ", buf, flag, ++ (buf, "%ld\n", xen_hyper_offset_table.scheduler_opt_name)); ++ XEN_HYPER_PRI(fp, len, "scheduler_sched_id: ", buf, flag, ++ (buf, "%ld\n", xen_hyper_offset_table.scheduler_sched_id)); ++ XEN_HYPER_PRI(fp, len, "scheduler_init: ", buf, flag, ++ (buf, "%ld\n", xen_hyper_offset_table.scheduler_init)); ++ XEN_HYPER_PRI(fp, len, "scheduler_tick: ", buf, flag, ++ (buf, "%ld\n", xen_hyper_offset_table.scheduler_tick)); ++ XEN_HYPER_PRI(fp, len, "scheduler_init_vcpu: ", buf, flag, ++ (buf, "%ld\n", xen_hyper_offset_table.scheduler_init_vcpu)); ++ XEN_HYPER_PRI(fp, len, "scheduler_destroy_domain: ", buf, flag, ++ (buf, "%ld\n", xen_hyper_offset_table.scheduler_destroy_domain)); ++ XEN_HYPER_PRI(fp, len, "scheduler_sleep: ", buf, flag, ++ (buf, "%ld\n", xen_hyper_offset_table.scheduler_sleep)); ++ XEN_HYPER_PRI(fp, len, "scheduler_wake: ", buf, flag, ++ (buf, "%ld\n", xen_hyper_offset_table.scheduler_wake)); ++ XEN_HYPER_PRI(fp, len, "scheduler_set_affinity: ", buf, flag, ++ (buf, "%ld\n", xen_hyper_offset_table.scheduler_set_affinity)); ++ XEN_HYPER_PRI(fp, len, "scheduler_do_schedule: ", buf, flag, ++ (buf, "%ld\n", xen_hyper_offset_table.scheduler_do_schedule)); ++ XEN_HYPER_PRI(fp, len, "scheduler_adjust: ", buf, flag, ++ (buf, "%ld\n", xen_hyper_offset_table.scheduler_adjust)); ++ XEN_HYPER_PRI(fp, len, "scheduler_dump_settings: ", buf, flag, ++ (buf, "%ld\n", xen_hyper_offset_table.scheduler_dump_settings)); ++ XEN_HYPER_PRI(fp, len, "scheduler_dump_cpu_state: ", buf, flag, ++ (buf, "%ld\n", xen_hyper_offset_table.scheduler_dump_cpu_state)); ++ ++ XEN_HYPER_PRI(fp, len, "shared_info_vcpu_info: ", buf, flag, ++ (buf, "%ld\n", xen_hyper_offset_table.shared_info_vcpu_info)); ++ XEN_HYPER_PRI(fp, len, "shared_info_evtchn_pending: ", buf, flag, ++ (buf, "%ld\n", xen_hyper_offset_table.shared_info_evtchn_pending)); ++ XEN_HYPER_PRI(fp, len, "shared_info_evtchn_mask: ", buf, flag, ++ (buf, "%ld\n", xen_hyper_offset_table.shared_info_evtchn_mask)); ++ XEN_HYPER_PRI(fp, len, "shared_info_arch: ", buf, flag, ++ (buf, "%ld\n", xen_hyper_offset_table.shared_info_arch)); ++ ++ XEN_HYPER_PRI(fp, len, "timer_expires: ", buf, flag, ++ (buf, "%ld\n", xen_hyper_offset_table.timer_expires)); ++ XEN_HYPER_PRI(fp, len, "timer_cpu: ", buf, flag, ++ (buf, "%ld\n", xen_hyper_offset_table.timer_cpu)); ++ XEN_HYPER_PRI(fp, len, "timer_function: ", buf, flag, ++ (buf, "%ld\n", xen_hyper_offset_table.timer_function)); ++ XEN_HYPER_PRI(fp, len, "timer_data: ", buf, flag, ++ (buf, "%ld\n", xen_hyper_offset_table.timer_data)); ++ XEN_HYPER_PRI(fp, len, "timer_heap_offset: ", buf, flag, ++ (buf, "%ld\n", xen_hyper_offset_table.timer_heap_offset)); ++ XEN_HYPER_PRI(fp, len, "timer_killed: ", buf, flag, ++ (buf, "%ld\n", xen_hyper_offset_table.timer_killed)); ++ ++ XEN_HYPER_PRI(fp, len, "tss_struct_rsp0: ", buf, flag, ++ (buf, "%ld\n", xen_hyper_offset_table.tss_struct_rsp0)); ++ XEN_HYPER_PRI(fp, len, "tss_struct_esp0: ", buf, flag, ++ (buf, "%ld\n", xen_hyper_offset_table.tss_struct_esp0)); ++ ++ XEN_HYPER_PRI(fp, len, "vcpu_vcpu_id: ", buf, flag, ++ (buf, "%ld\n", xen_hyper_offset_table.vcpu_vcpu_id)); ++ XEN_HYPER_PRI(fp, len, "vcpu_processor: ", buf, flag, ++ (buf, "%ld\n", xen_hyper_offset_table.vcpu_processor)); ++ XEN_HYPER_PRI(fp, len, "vcpu_vcpu_info: ", buf, flag, ++ (buf, "%ld\n", xen_hyper_offset_table.vcpu_vcpu_info)); ++ XEN_HYPER_PRI(fp, len, "vcpu_domain: ", buf, flag, ++ (buf, "%ld\n", xen_hyper_offset_table.vcpu_domain)); ++ XEN_HYPER_PRI(fp, len, "vcpu_next_in_list: ", buf, flag, ++ (buf, "%ld\n", xen_hyper_offset_table.vcpu_next_in_list)); ++ XEN_HYPER_PRI(fp, len, "vcpu_timer: ", buf, flag, ++ (buf, "%ld\n", xen_hyper_offset_table.vcpu_timer)); ++ XEN_HYPER_PRI(fp, len, "vcpu_sleep_tick: ", buf, flag, ++ (buf, "%ld\n", xen_hyper_offset_table.vcpu_sleep_tick)); ++ XEN_HYPER_PRI(fp, len, "vcpu_poll_timer: ", buf, flag, ++ (buf, "%ld\n", xen_hyper_offset_table.vcpu_poll_timer)); ++ XEN_HYPER_PRI(fp, len, "vcpu_sched_priv: ", buf, flag, ++ (buf, "%ld\n", xen_hyper_offset_table.vcpu_sched_priv)); ++ XEN_HYPER_PRI(fp, len, "vcpu_runstate: ", buf, flag, ++ (buf, "%ld\n", xen_hyper_offset_table.vcpu_runstate)); ++ XEN_HYPER_PRI(fp, len, "vcpu_runstate_guest: ", buf, flag, ++ (buf, "%ld\n", xen_hyper_offset_table.vcpu_runstate_guest)); ++ XEN_HYPER_PRI(fp, len, "vcpu_vcpu_flags: ", buf, flag, ++ (buf, "%ld\n", xen_hyper_offset_table.vcpu_vcpu_flags)); ++ XEN_HYPER_PRI(fp, len, "vcpu_pause_count: ", buf, flag, ++ (buf, "%ld\n", xen_hyper_offset_table.vcpu_pause_count)); ++ XEN_HYPER_PRI(fp, len, "vcpu_virq_to_evtchn: ", buf, flag, ++ (buf, "%ld\n", xen_hyper_offset_table.vcpu_virq_to_evtchn)); ++ XEN_HYPER_PRI(fp, len, "vcpu_cpu_affinity: ", buf, flag, ++ (buf, "%ld\n", xen_hyper_offset_table.vcpu_cpu_affinity)); ++ XEN_HYPER_PRI(fp, len, "vcpu_nmi_addr: ", buf, flag, ++ (buf, "%ld\n", xen_hyper_offset_table.vcpu_nmi_addr)); ++ XEN_HYPER_PRI(fp, len, "vcpu_vcpu_dirty_cpumask: ", buf, flag, ++ (buf, "%ld\n", xen_hyper_offset_table.vcpu_vcpu_dirty_cpumask)); ++ XEN_HYPER_PRI(fp, len, "vcpu_arch: ", buf, flag, ++ (buf, "%ld\n", xen_hyper_offset_table.vcpu_arch)); ++ XEN_HYPER_PRI(fp, len, "vcpu_runstate_info_state: ", buf, flag, ++ (buf, "%ld\n", xen_hyper_offset_table.vcpu_runstate_info_state)); ++ XEN_HYPER_PRI(fp, len, "vcpu_runstate_info_state_entry_time: ", buf, flag, ++ (buf, "%ld\n", xen_hyper_offset_table.vcpu_runstate_info_state_entry_time)); ++ XEN_HYPER_PRI(fp, len, "vcpu_runstate_info_time: ", buf, flag, ++ (buf, "%ld\n", xen_hyper_offset_table.vcpu_runstate_info_time)); ++#ifdef IA64 ++ XEN_HYPER_PRI(fp, len, "vcpu_thread_ksp: ", buf, flag, ++ (buf, "%ld\n", xen_hyper_offset_table.vcpu_thread_ksp)); ++#endif ++} ++ ++/* ++ * dump specified memory with specified size. ++ */ ++#define DSP_BYTE_SIZE 16 ++ ++static void ++xen_hyper_dump_mem(void *mem, ulong len, int dsz) ++{ ++ long i, max; ++ void *mem_w = mem; ++ ++ if (!len || ++ (dsz != SIZEOF_8BIT && dsz != SIZEOF_16BIT && ++ dsz != SIZEOF_32BIT && dsz != SIZEOF_64BIT)) ++ return; ++ max = len / dsz + (len % dsz ? 1 : 0); ++ for (i = 0; i < max; i++) { ++ if (i != 0 && !(i % (DSP_BYTE_SIZE / dsz))) ++ fprintf(fp, "\n"); ++ if (i == 0 || !(i % (DSP_BYTE_SIZE / dsz))) ++ fprintf(fp, "%p : ", mem_w); ++ if (dsz == SIZEOF_8BIT) ++ fprintf(fp, "%02x ", *(uint8_t *)mem_w); ++ else if (dsz == SIZEOF_16BIT) ++ fprintf(fp, "%04x ", *(uint16_t *)mem_w); ++ else if (dsz == SIZEOF_32BIT) ++ fprintf(fp, "%08x ", *(uint32_t *)mem_w); ++ else if (dsz == SIZEOF_64BIT) ++ fprintf(fp, "%016llx ", *(unsigned long long *)mem_w); ++ mem_w = (char *)mem_w + dsz; ++ } ++ fprintf(fp, "\n"); ++} ++#endif +--- crash/xendump.c.orig 2008-01-17 15:17:20.000000000 -0500 ++++ crash/xendump.c 2008-01-04 09:42:08.000000000 -0500 +@@ -0,0 +1,2848 @@ ++/* ++ * xendump.c ++ * ++ * Copyright (C) 2006, 2007, 2008 David Anderson ++ * Copyright (C) 2006, 2007, 2008 Red Hat, Inc. All rights reserved. ++ * ++ * This software may be freely redistributed under the terms of the ++ * GNU General Public License. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program; if not, write to the Free Software ++ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. ++ */ ++ ++#include "defs.h" ++#include "xendump.h" ++ ++static struct xendump_data xendump_data = { 0 }; ++static struct xendump_data *xd = &xendump_data; ++ ++static int xc_save_verify(char *); ++static int xc_core_verify(char *, char *); ++static int xc_save_read(void *, int, ulong, physaddr_t); ++static int xc_core_read(void *, int, ulong, physaddr_t); ++static int xc_core_mfns(ulong, FILE *); ++ ++static void poc_store(ulong, off_t); ++static off_t poc_get(ulong, int *); ++ ++static void xen_dump_vmconfig(FILE *); ++ ++static void xc_core_create_pfn_tables(void); ++static ulong xc_core_pfn_to_page_index(ulong); ++static int xc_core_pfn_valid(ulong); ++ ++static void xendump_print(char *fmt, ...); ++ ++static int xc_core_elf_verify(char *, char *); ++static void xc_core_elf_dump(void); ++static char *xc_core_elf_mfn_to_page(ulong, char *); ++static int xc_core_elf_mfn_to_page_index(ulong); ++static ulong xc_core_elf_pfn_valid(ulong); ++static ulong xc_core_elf_pfn_to_page_index(ulong); ++static void xc_core_dump_Elf32_Ehdr(Elf32_Ehdr *); ++static void xc_core_dump_Elf64_Ehdr(Elf64_Ehdr *); ++static void xc_core_dump_Elf32_Shdr(Elf32_Off offset, int); ++static void xc_core_dump_Elf64_Shdr(Elf64_Off offset, int); ++static char *xc_core_strtab(uint32_t, char *); ++static void xc_core_dump_elfnote(off_t, size_t, int); ++static void xc_core_elf_pfn_init(void); ++ ++#define ELFSTORE 1 ++#define ELFREAD 0 ++ ++/* ++ * Determine whether a file is a xendump creation, and if TRUE, ++ * initialize the xendump_data structure. ++ */ ++int ++is_xendump(char *file) ++{ ++ int verified; ++ char buf[BUFSIZE]; ++ ++ if ((xd->xfd = open(file, O_RDWR)) < 0) { ++ if ((xd->xfd = open(file, O_RDONLY)) < 0) { ++ sprintf(buf, "%s: open", file); ++ perror(buf); ++ return FALSE; ++ } ++ } ++ ++ if (read(xd->xfd, buf, BUFSIZE) != BUFSIZE) ++ return FALSE; ++ ++ if (machine_type("X86") || machine_type("X86_64")) ++ xd->page_size = 4096; ++ else if (machine_type("IA64") && !machdep->pagesize) ++ xd->page_size = 16384; ++ else ++ xd->page_size = machdep->pagesize; ++ ++ verified = xc_save_verify(buf) || xc_core_verify(file, buf); ++ ++ if (!verified) ++ close(xd->xfd); ++ ++ return (verified); ++} ++ ++/* ++ * Verify whether the dump was created by the xc_domain_dumpcore() ++ * library function in libxc/xc_core.c. ++ */ ++static int ++xc_core_verify(char *file, char *buf) ++{ ++ struct xc_core_header *xcp; ++ ++ xcp = (struct xc_core_header *)buf; ++ ++ if (xc_core_elf_verify(file, buf)) ++ return TRUE; ++ ++ if ((xcp->xch_magic != XC_CORE_MAGIC) && ++ (xcp->xch_magic != XC_CORE_MAGIC_HVM)) ++ return FALSE; ++ ++ if (!xcp->xch_nr_vcpus) { ++ error(INFO, ++ "faulty xc_core dump file header: xch_nr_vcpus is 0\n\n"); ++ ++ fprintf(stderr, " xch_magic: %x\n", xcp->xch_magic); ++ fprintf(stderr, " xch_nr_vcpus: %d\n", xcp->xch_nr_vcpus); ++ fprintf(stderr, " xch_nr_pages: %d\n", xcp->xch_nr_pages); ++ fprintf(stderr, " xch_ctxt_offset: %d\n", xcp->xch_ctxt_offset); ++ fprintf(stderr, " xch_index_offset: %d\n", xcp->xch_index_offset); ++ fprintf(stderr, " xch_pages_offset: %d\n\n", xcp->xch_pages_offset); ++ ++ clean_exit(1); ++ } ++ ++ BCOPY(xcp, &xd->xc_core.header, ++ sizeof(struct xc_core_header)); ++ ++ xd->flags |= (XENDUMP_LOCAL | XC_CORE_ORIG | XC_CORE_P2M_CREATE); ++ ++ if (xc_core_mfns(XC_CORE_64BIT_HOST, stderr)) ++ xd->flags |= XC_CORE_64BIT_HOST; ++ ++ if (!xd->page_size) ++ error(FATAL, ++ "unknown page size: use -p command line option\n"); ++ ++ if (!(xd->page = (char *)malloc(xd->page_size))) ++ error(FATAL, "cannot malloc page space."); ++ ++ if (!(xd->poc = (struct pfn_offset_cache *)calloc ++ (PFN_TO_OFFSET_CACHE_ENTRIES, ++ sizeof(struct pfn_offset_cache)))) ++ error(FATAL, "cannot malloc pfn_offset_cache\n"); ++ xd->last_pfn = ~(0UL); ++ ++ if (CRASHDEBUG(1)) ++ xendump_memory_dump(stderr); ++ ++ return TRUE; ++} ++ ++/* ++ * Do the work for read_xendump() for the XC_CORE dumpfile format. ++ */ ++static int ++xc_core_read(void *bufptr, int cnt, ulong addr, physaddr_t paddr) ++{ ++ ulong pfn, page_index; ++ off_t offset; ++ int redundant; ++ ++ if (xd->flags & (XC_CORE_P2M_CREATE|XC_CORE_PFN_CREATE)) ++ xc_core_create_pfn_tables(); ++ ++ pfn = (ulong)BTOP(paddr); ++ ++ if ((offset = poc_get(pfn, &redundant))) { ++ if (!redundant) { ++ if (lseek(xd->xfd, offset, SEEK_SET) == -1) ++ return SEEK_ERROR; ++ if (read(xd->xfd, xd->page, xd->page_size) != ++ xd->page_size) ++ return READ_ERROR; ++ xd->last_pfn = pfn; ++ } ++ ++ BCOPY(xd->page + PAGEOFFSET(paddr), bufptr, cnt); ++ return cnt; ++ } ++ ++ if ((page_index = xc_core_pfn_to_page_index(pfn)) == ++ PFN_NOT_FOUND) ++ return READ_ERROR; ++ ++ offset = (off_t)xd->xc_core.header.xch_pages_offset + ++ ((off_t)(page_index) * (off_t)xd->page_size); ++ ++ if (lseek(xd->xfd, offset, SEEK_SET) == -1) ++ return SEEK_ERROR; ++ ++ if (read(xd->xfd, xd->page, xd->page_size) != xd->page_size) ++ return READ_ERROR; ++ ++ poc_store(pfn, offset); ++ ++ BCOPY(xd->page + PAGEOFFSET(paddr), bufptr, cnt); ++ ++ return cnt; ++} ++ ++/* ++ * Verify whether the dumpfile was created by the "xm save" facility. ++ * This gets started by the "save" function in XendCheckpoint.py, and ++ * then by xc_save.c, with the work done in the xc_linux_save() library ++ * function in libxc/xc_linux_save.c. ++ */ ++ ++#define MAX_BATCH_SIZE 1024 ++/* ++ * Number of P2M entries in a page. ++ */ ++#define ULPP (xd->page_size/sizeof(unsigned long)) ++/* ++ * Number of P2M entries in the pfn_to_mfn_frame_list. ++ */ ++#define P2M_FL_ENTRIES (((xd->xc_save.nr_pfns)+ULPP-1)/ULPP) ++/* ++ * Size in bytes of the pfn_to_mfn_frame_list. ++ */ ++#define P2M_FL_SIZE ((P2M_FL_ENTRIES)*sizeof(unsigned long)) ++ ++#define XTAB (0xf<<28) /* invalid page */ ++#define LTAB_MASK XTAB ++ ++static int ++xc_save_verify(char *buf) ++{ ++ int i, batch_count, done_batch, *intptr; ++ ulong flags, *ulongptr; ++ ulong batch_index, total_pages_read; ++ ulong N; ++ ++ if (!STRNEQ(buf, XC_SAVE_SIGNATURE)) ++ return FALSE; ++ ++ if (lseek(xd->xfd, strlen(XC_SAVE_SIGNATURE), SEEK_SET) == -1) ++ return FALSE; ++ ++ flags = XC_SAVE; ++ ++ if (CRASHDEBUG(1)) { ++ fprintf(stderr, "\"%s\"\n", buf); ++ fprintf(stderr, "endian: %d %s\n", __BYTE_ORDER, ++ __BYTE_ORDER == __BIG_ENDIAN ? "__BIG_ENDIAN" : ++ (__BYTE_ORDER == __LITTLE_ENDIAN ? ++ "__LITTLE_ENDIAN" : "???")); ++ } ++ ++ /* ++ * size of vmconfig data structure (big-endian) ++ */ ++ if (read(xd->xfd, buf, sizeof(int)) != sizeof(int)) ++ return FALSE; ++ ++ intptr = (int *)buf; ++ ++ if (CRASHDEBUG(1) && BYTE_SWAP_REQUIRED(__BIG_ENDIAN)) { ++ fprintf(stderr, "byte-swap required for this:\n"); ++ for (i = 0; i < sizeof(int); i++) ++ fprintf(stderr, "[%x]", buf[i] & 0xff); ++ fprintf(stderr, ": %x -> ", *intptr); ++ } ++ ++ xd->xc_save.vmconfig_size = swab32(*intptr); ++ ++ if (CRASHDEBUG(1)) ++ fprintf(stderr, "%x\n", xd->xc_save.vmconfig_size); ++ ++ if (!(xd->xc_save.vmconfig_buf = (char *)malloc ++ (xd->xc_save.vmconfig_size))) ++ error(FATAL, "cannot malloc xc_save vmconfig space."); ++ ++ if (!xd->page_size) ++ error(FATAL, ++ "unknown page size: use -p command line option\n"); ++ ++ if (!(xd->page = (char *)malloc(xd->page_size))) ++ error(FATAL, "cannot malloc page space."); ++ ++ if (!(xd->poc = (struct pfn_offset_cache *)calloc ++ (PFN_TO_OFFSET_CACHE_ENTRIES, ++ sizeof(struct pfn_offset_cache)))) ++ error(FATAL, "cannot malloc pfn_offset_cache\n"); ++ xd->last_pfn = ~(0UL); ++ ++ if (!(xd->xc_save.region_pfn_type = (ulong *)calloc ++ (MAX_BATCH_SIZE, sizeof(ulong)))) ++ error(FATAL, "cannot malloc region_pfn_type\n"); ++ ++ if (read(xd->xfd, xd->xc_save.vmconfig_buf, ++ xd->xc_save.vmconfig_size) != xd->xc_save.vmconfig_size) ++ goto xc_save_bailout; ++ ++ /* ++ * nr_pfns (native byte order) ++ */ ++ if (read(xd->xfd, buf, sizeof(ulong)) != sizeof(ulong)) ++ goto xc_save_bailout; ++ ++ ulongptr = (ulong *)buf; ++ ++ if (CRASHDEBUG(1)) { ++ for (i = 0; i < sizeof(ulong); i++) ++ fprintf(stderr, "[%x]", buf[i] & 0xff); ++ fprintf(stderr, ": %lx (nr_pfns)\n", *ulongptr); ++ } ++ ++ xd->xc_save.nr_pfns = *ulongptr; ++ ++ if (machine_type("IA64")) ++ goto xc_save_ia64; ++ ++ /* ++ * Get a local copy of the live_P2M_frame_list ++ */ ++ if (!(xd->xc_save.p2m_frame_list = (unsigned long *)malloc(P2M_FL_SIZE))) ++ error(FATAL, "cannot allocate p2m_frame_list array"); ++ ++ if (!(xd->xc_save.batch_offsets = (off_t *)calloc((size_t)P2M_FL_ENTRIES, ++ sizeof(off_t)))) ++ error(FATAL, "cannot allocate batch_offsets array"); ++ ++ xd->xc_save.batch_count = P2M_FL_ENTRIES; ++ ++ if (read(xd->xfd, xd->xc_save.p2m_frame_list, P2M_FL_SIZE) != ++ P2M_FL_SIZE) ++ goto xc_save_bailout; ++ ++ if (CRASHDEBUG(1)) ++ fprintf(stderr, "pre-batch file pointer: %lld\n", ++ (ulonglong)lseek(xd->xfd, 0L, SEEK_CUR)); ++ ++ /* ++ * ... ++ * int batch_count ++ * ulong region pfn_type[batch_count] ++ * page 0 ++ * page 1 ++ * ... ++ * page batch_count-1 ++ * (repeat) ++ */ ++ ++ total_pages_read = 0; ++ batch_index = 0; ++ done_batch = FALSE; ++ ++ while (!done_batch) { ++ ++ xd->xc_save.batch_offsets[batch_index] = (off_t) ++ lseek(xd->xfd, 0L, SEEK_CUR); ++ ++ if (read(xd->xfd, &batch_count, sizeof(int)) != sizeof(int)) ++ goto xc_save_bailout; ++ ++ if (CRASHDEBUG(1)) ++ fprintf(stderr, "batch[%ld]: %d ", ++ batch_index, batch_count); ++ ++ batch_index++; ++ ++ if (batch_index >= P2M_FL_ENTRIES) { ++ fprintf(stderr, "more than %ld batches encountered?\n", ++ P2M_FL_ENTRIES); ++ goto xc_save_bailout; ++ } ++ ++ switch (batch_count) ++ { ++ case 0: ++ if (CRASHDEBUG(1)) { ++ fprintf(stderr, ++ ": Batch work is done: %ld pages read (P2M_FL_ENTRIES: %ld)\n", ++ total_pages_read, P2M_FL_ENTRIES); ++ } ++ done_batch = TRUE; ++ continue; ++ ++ case -1: ++ if (CRASHDEBUG(1)) ++ fprintf(stderr, ": Entering page verify mode\n"); ++ continue; ++ ++ default: ++ if (batch_count > MAX_BATCH_SIZE) { ++ if (CRASHDEBUG(1)) ++ fprintf(stderr, ++ ": Max batch size exceeded. Giving up.\n"); ++ done_batch = TRUE; ++ continue; ++ } ++ if (CRASHDEBUG(1)) ++ fprintf(stderr, "\n"); ++ break; ++ } ++ ++ if (read(xd->xfd, xd->xc_save.region_pfn_type, batch_count * sizeof(ulong)) != ++ batch_count * sizeof(ulong)) ++ goto xc_save_bailout; ++ ++ for (i = 0; i < batch_count; i++) { ++ unsigned long pagetype; ++ unsigned long pfn; ++ ++ pfn = xd->xc_save.region_pfn_type[i] & ~LTAB_MASK; ++ pagetype = xd->xc_save.region_pfn_type[i] & LTAB_MASK; ++ ++ if (pagetype == XTAB) ++ /* a bogus/unmapped page: skip it */ ++ continue; ++ ++ if (pfn > xd->xc_save.nr_pfns) { ++ if (CRASHDEBUG(1)) ++ fprintf(stderr, ++ "batch_count: %d pfn %ld out of range", ++ batch_count, pfn); ++ } ++ ++ if (lseek(xd->xfd, xd->page_size, SEEK_CUR) == -1) ++ goto xc_save_bailout; ++ ++ total_pages_read++; ++ } ++ } ++ ++ /* ++ * Get the list of PFNs that are not in the psuedo-phys map ++ */ ++ if (read(xd->xfd, &xd->xc_save.pfns_not, ++ sizeof(xd->xc_save.pfns_not)) != sizeof(xd->xc_save.pfns_not)) ++ goto xc_save_bailout; ++ ++ if (CRASHDEBUG(1)) ++ fprintf(stderr, "PFNs not in pseudo-phys map: %d\n", ++ xd->xc_save.pfns_not); ++ ++ if ((total_pages_read + xd->xc_save.pfns_not) != ++ xd->xc_save.nr_pfns) ++ error(WARNING, ++ "nr_pfns: %ld != (total pages: %ld + pages not saved: %d)\n", ++ xd->xc_save.nr_pfns, total_pages_read, ++ xd->xc_save.pfns_not); ++ ++ xd->xc_save.pfns_not_offset = lseek(xd->xfd, 0L, SEEK_CUR); ++ ++ if (lseek(xd->xfd, sizeof(ulong) * xd->xc_save.pfns_not, SEEK_CUR) == -1) ++ goto xc_save_bailout; ++ ++ xd->xc_save.vcpu_ctxt_offset = lseek(xd->xfd, 0L, SEEK_CUR); ++ ++ lseek(xd->xfd, 0, SEEK_END); ++ lseek(xd->xfd, -((off_t)(xd->page_size)), SEEK_CUR); ++ ++ xd->xc_save.shared_info_page_offset = lseek(xd->xfd, 0L, SEEK_CUR); ++ ++ xd->flags |= (XENDUMP_LOCAL | flags); ++ kt->xen_flags |= (CANONICAL_PAGE_TABLES|XEN_SUSPEND); ++ ++ if (CRASHDEBUG(1)) ++ xendump_memory_dump(stderr); ++ ++ return TRUE; ++ ++xc_save_ia64: ++ ++ /* ++ * Completely different format for ia64: ++ * ++ * ... ++ * pfn # ++ * page data ++ * pfn # ++ * page data ++ * ... ++ */ ++ free(xd->poc); ++ xd->poc = NULL; ++ free(xd->xc_save.region_pfn_type); ++ xd->xc_save.region_pfn_type = NULL; ++ ++ if (!(xd->xc_save.ia64_page_offsets = ++ (ulong *)calloc(xd->xc_save.nr_pfns, sizeof(off_t)))) ++ error(FATAL, "cannot allocate ia64_page_offsets array"); ++ ++ /* ++ * version ++ */ ++ if (read(xd->xfd, buf, sizeof(ulong)) != sizeof(ulong)) ++ goto xc_save_bailout; ++ ++ xd->xc_save.ia64_version = *((ulong *)buf); ++ ++ if (CRASHDEBUG(1)) ++ fprintf(stderr, "ia64 version: %lx\n", ++ xd->xc_save.ia64_version); ++ ++ /* ++ * xen_domctl_arch_setup structure ++ */ ++ if (read(xd->xfd, buf, sizeof(xen_domctl_arch_setup_t)) != ++ sizeof(xen_domctl_arch_setup_t)) ++ goto xc_save_bailout; ++ ++ if (CRASHDEBUG(1)) { ++ xen_domctl_arch_setup_t *setup = ++ (xen_domctl_arch_setup_t *)buf; ++ ++ fprintf(stderr, "xen_domctl_arch_setup:\n"); ++ fprintf(stderr, " flags: %lx\n", (ulong)setup->flags); ++ fprintf(stderr, " bp: %lx\n", (ulong)setup->bp); ++ fprintf(stderr, " maxmem: %lx\n", (ulong)setup->maxmem); ++ fprintf(stderr, " xsi_va: %lx\n", (ulong)setup->xsi_va); ++ fprintf(stderr, "hypercall_imm: %x\n", setup->hypercall_imm); ++ } ++ ++ for (i = N = 0; i < xd->xc_save.nr_pfns; i++) { ++ if (read(xd->xfd, &N, sizeof(N)) != sizeof(N)) ++ goto xc_save_bailout; ++ ++ if (N < xd->xc_save.nr_pfns) ++ xd->xc_save.ia64_page_offsets[N] = ++ lseek(xd->xfd, 0, SEEK_CUR); ++ else ++ error(WARNING, ++ "[%d]: pfn of %lx (0x%lx) in ia64 canonical page list exceeds %ld\n", ++ i, N, N, xd->xc_save.nr_pfns); ++ ++ if (CRASHDEBUG(1)) { ++ if ((i < 10) || (N >= (xd->xc_save.nr_pfns-10))) ++ fprintf(stderr, "[%d]: %ld\n%s", i, N, ++ i == 9 ? "...\n" : ""); ++ } ++ ++ if ((N+1) >= xd->xc_save.nr_pfns) ++ break; ++ ++ if (lseek(xd->xfd, xd->page_size, SEEK_CUR) == -1) ++ goto xc_save_bailout; ++ } ++ ++ if (CRASHDEBUG(1)) { ++ for (i = N = 0; i < xd->xc_save.nr_pfns; i++) { ++ if (!xd->xc_save.ia64_page_offsets[i]) ++ N++; ++ } ++ fprintf(stderr, "%ld out of %ld pfns not dumped\n", ++ N, xd->xc_save.nr_pfns); ++ } ++ ++ xd->flags |= (XENDUMP_LOCAL | flags | XC_SAVE_IA64); ++ kt->xen_flags |= (CANONICAL_PAGE_TABLES|XEN_SUSPEND); ++ ++ if (CRASHDEBUG(1)) ++ xendump_memory_dump(stderr); ++ ++ return TRUE; ++ ++xc_save_bailout: ++ ++ error(INFO, ++ "xc_save_verify: \"LinuxGuestRecord\" file handling/format error\n"); ++ ++ if (xd->xc_save.p2m_frame_list) { ++ free(xd->xc_save.p2m_frame_list); ++ xd->xc_save.p2m_frame_list = NULL; ++ } ++ if (xd->xc_save.batch_offsets) { ++ free(xd->xc_save.batch_offsets); ++ xd->xc_save.batch_offsets = NULL; ++ } ++ if (xd->xc_save.vmconfig_buf) { ++ free(xd->xc_save.vmconfig_buf); ++ xd->xc_save.vmconfig_buf = NULL; ++ } ++ if (xd->page) { ++ free(xd->page); ++ xd->page = NULL; ++ } ++ ++ return FALSE; ++} ++ ++/* ++ * Do the work for read_xendump() for the XC_SAVE dumpfile format. ++ */ ++static int ++xc_save_read(void *bufptr, int cnt, ulong addr, physaddr_t paddr) ++{ ++ int b, i, redundant; ++ ulong reqpfn; ++ int batch_count; ++ off_t file_offset; ++ ++ reqpfn = (ulong)BTOP(paddr); ++ ++ if (CRASHDEBUG(8)) ++ fprintf(xd->ofp, ++ "xc_save_read(bufptr: %lx cnt: %d addr: %lx paddr: %llx (%ld, 0x%lx)\n", ++ (ulong)bufptr, cnt, addr, (ulonglong)paddr, reqpfn, reqpfn); ++ ++ if (xd->flags & XC_SAVE_IA64) { ++ if (reqpfn >= xd->xc_save.nr_pfns) { ++ if (CRASHDEBUG(1)) ++ fprintf(xd->ofp, ++ "xc_save_read: pfn %lx too large: nr_pfns: %lx\n", ++ reqpfn, xd->xc_save.nr_pfns); ++ return SEEK_ERROR; ++ } ++ ++ file_offset = xd->xc_save.ia64_page_offsets[reqpfn]; ++ if (!file_offset) { ++ if (CRASHDEBUG(1)) ++ fprintf(xd->ofp, ++ "xc_save_read: pfn %lx not stored in xendump\n", ++ reqpfn); ++ return SEEK_ERROR; ++ } ++ ++ if (reqpfn != xd->last_pfn) { ++ if (lseek(xd->xfd, file_offset, SEEK_SET) == -1) ++ return SEEK_ERROR; ++ ++ if (read(xd->xfd, xd->page, xd->page_size) != xd->page_size) ++ return READ_ERROR; ++ } else { ++ xd->redundant++; ++ xd->cache_hits++; ++ } ++ ++ xd->accesses++; ++ xd->last_pfn = reqpfn; ++ ++ BCOPY(xd->page + PAGEOFFSET(paddr), bufptr, cnt); ++ return cnt; ++ } ++ ++ if ((file_offset = poc_get(reqpfn, &redundant))) { ++ if (!redundant) { ++ if (lseek(xd->xfd, file_offset, SEEK_SET) == -1) ++ return SEEK_ERROR; ++ if (read(xd->xfd, xd->page, xd->page_size) != xd->page_size) ++ return READ_ERROR; ++ xd->last_pfn = reqpfn; ++ } else if (CRASHDEBUG(1)) ++ console("READ %ld (0x%lx) skipped!\n", reqpfn, reqpfn); ++ ++ BCOPY(xd->page + PAGEOFFSET(paddr), bufptr, cnt); ++ return cnt; ++ } ++ ++ /* ++ * ... ++ * int batch_count ++ * ulong region pfn_type[batch_count] ++ * page 0 ++ * page 1 ++ * ... ++ * page batch_count-1 ++ * (repeat) ++ */ ++ for (b = 0; b < xd->xc_save.batch_count; b++) { ++ ++ if (lseek(xd->xfd, xd->xc_save.batch_offsets[b], SEEK_SET) == -1) ++ return SEEK_ERROR; ++ ++ if (CRASHDEBUG(8)) ++ fprintf(xd->ofp, "check batch[%d]: offset: %llx\n", ++ b, (ulonglong)xd->xc_save.batch_offsets[b]); ++ ++ if (read(xd->xfd, &batch_count, sizeof(int)) != sizeof(int)) ++ return READ_ERROR; ++ ++ switch (batch_count) ++ { ++ case 0: ++ if (CRASHDEBUG(1)) { ++ fprintf(xd->ofp, ++ "batch[%d]: has count of zero -- bailing out on pfn %ld\n", ++ b, reqpfn); ++ } ++ return READ_ERROR; ++ ++ case -1: ++ return READ_ERROR; ++ ++ default: ++ if (CRASHDEBUG(8)) ++ fprintf(xd->ofp, ++ "batch[%d]: offset: %llx batch count: %d\n", ++ b, (ulonglong)xd->xc_save.batch_offsets[b], ++ batch_count); ++ break; ++ } ++ ++ if (read(xd->xfd, xd->xc_save.region_pfn_type, batch_count * sizeof(ulong)) != ++ batch_count * sizeof(ulong)) ++ return READ_ERROR; ++ ++ for (i = 0; i < batch_count; i++) { ++ unsigned long pagetype; ++ unsigned long pfn; ++ ++ pfn = xd->xc_save.region_pfn_type[i] & ~LTAB_MASK; ++ pagetype = xd->xc_save.region_pfn_type[i] & LTAB_MASK; ++ ++ if (pagetype == XTAB) ++ /* a bogus/unmapped page: skip it */ ++ continue; ++ ++ if (pfn > xd->xc_save.nr_pfns) { ++ if (CRASHDEBUG(1)) ++ fprintf(stderr, ++ "batch_count: %d pfn %ld out of range", ++ batch_count, pfn); ++ } ++ ++ if (pfn == reqpfn) { ++ file_offset = lseek(xd->xfd, 0, SEEK_CUR); ++ poc_store(pfn, file_offset); ++ ++ if (read(xd->xfd, xd->page, xd->page_size) != ++ xd->page_size) ++ return READ_ERROR; ++ ++ BCOPY(xd->page + PAGEOFFSET(paddr), bufptr, cnt); ++ return cnt; ++ } ++ ++ if (lseek(xd->xfd, xd->page_size, SEEK_CUR) == -1) ++ return SEEK_ERROR; ++ } ++ } ++ ++ return READ_ERROR; ++} ++ ++/* ++ * Stash a pfn's offset. If they're all in use, put it in the ++ * least-used slot that's closest to the beginning of the array. ++ */ ++static void ++poc_store(ulong pfn, off_t file_offset) ++{ ++ int i; ++ struct pfn_offset_cache *poc, *plow; ++ ulong curlow; ++ ++ curlow = ~(0UL); ++ plow = NULL; ++ poc = xd->poc; ++ ++ for (i = 0; i < PFN_TO_OFFSET_CACHE_ENTRIES; i++, poc++) { ++ if (poc->cnt == 0) { ++ poc->cnt = 1; ++ poc->pfn = pfn; ++ poc->file_offset = file_offset; ++ xd->last_pfn = pfn; ++ return; ++ } ++ ++ if (poc->cnt < curlow) { ++ curlow = poc->cnt; ++ plow = poc; ++ } ++ } ++ ++ plow->cnt = 1; ++ plow->pfn = pfn; ++ plow->file_offset = file_offset; ++ xd->last_pfn = pfn; ++} ++ ++/* ++ * Check whether a pfn's offset has been cached. ++ */ ++static off_t ++poc_get(ulong pfn, int *redundant) ++{ ++ int i; ++ struct pfn_offset_cache *poc; ++ ++ xd->accesses++; ++ ++ if (pfn == xd->last_pfn) { ++ xd->redundant++; ++ *redundant = TRUE; ++ return 1; ++ } else ++ *redundant = FALSE; ++ ++ poc = xd->poc; ++ ++ for (i = 0; i < PFN_TO_OFFSET_CACHE_ENTRIES; i++, poc++) { ++ if (poc->cnt && (poc->pfn == pfn)) { ++ poc->cnt++; ++ xd->cache_hits++; ++ return poc->file_offset; ++ } ++ } ++ ++ return 0; ++} ++ ++ ++/* ++ * Perform any post-dumpfile determination stuff here. ++ */ ++int ++xendump_init(char *unused, FILE *fptr) ++{ ++ if (!XENDUMP_VALID()) ++ return FALSE; ++ ++ xd->ofp = fptr; ++ return TRUE; ++} ++ ++int ++read_xendump(int fd, void *bufptr, int cnt, ulong addr, physaddr_t paddr) ++{ ++ if (pc->curcmd_flags & XEN_MACHINE_ADDR) ++ return READ_ERROR; ++ ++ switch (xd->flags & (XC_SAVE|XC_CORE_ORIG|XC_CORE_ELF)) ++ { ++ case XC_SAVE: ++ return xc_save_read(bufptr, cnt, addr, paddr); ++ ++ case XC_CORE_ORIG: ++ case XC_CORE_ELF: ++ return xc_core_read(bufptr, cnt, addr, paddr); ++ ++ default: ++ return READ_ERROR; ++ } ++} ++ ++int ++read_xendump_hyper(int fd, void *bufptr, int cnt, ulong addr, physaddr_t paddr) ++{ ++ ulong pfn, page_index; ++ off_t offset; ++ ++ pfn = (ulong)BTOP(paddr); ++ ++ /* ODA: pfn == mfn !!! */ ++ if ((page_index = xc_core_mfn_to_page_index(pfn)) == PFN_NOT_FOUND) ++ return READ_ERROR; ++ ++ offset = (off_t)xd->xc_core.header.xch_pages_offset + ++ ((off_t)(page_index) * (off_t)xd->page_size); ++ ++ if (lseek(xd->xfd, offset, SEEK_SET) == -1) ++ return SEEK_ERROR; ++ ++ if (read(xd->xfd, xd->page, xd->page_size) != xd->page_size) ++ return READ_ERROR; ++ ++ BCOPY(xd->page + PAGEOFFSET(paddr), bufptr, cnt); ++ ++ return cnt; ++} ++ ++int ++write_xendump(int fd, void *bufptr, int cnt, ulong addr, physaddr_t paddr) ++{ ++ return WRITE_ERROR; ++} ++ ++uint ++xendump_page_size(void) ++{ ++ if (!XENDUMP_VALID()) ++ return 0; ++ ++ return xd->page_size; ++} ++ ++/* ++ * xendump_free_memory(), and xendump_memory_used() ++ * are debug only, and typically unnecessary to implement. ++ */ ++int ++xendump_free_memory(void) ++{ ++ return 0; ++} ++ ++int ++xendump_memory_used(void) ++{ ++ return 0; ++} ++ ++/* ++ * This function is dump-type independent, used here to ++ * to dump the xendump_data structure contents. ++ */ ++int ++xendump_memory_dump(FILE *fp) ++{ ++ int i, linefeed, used, others; ++ ulong *ulongptr; ++ Elf32_Off offset32; ++ Elf64_Off offset64; ++ FILE *fpsave; ++ ++ fprintf(fp, " flags: %lx (", xd->flags); ++ others = 0; ++ if (xd->flags & XENDUMP_LOCAL) ++ fprintf(fp, "%sXENDUMP_LOCAL", others++ ? "|" : ""); ++ if (xd->flags & XC_SAVE) ++ fprintf(fp, "%sXC_SAVE", others++ ? "|" : ""); ++ if (xd->flags & XC_CORE_ORIG) ++ fprintf(fp, "%sXC_CORE_ORIG", others++ ? "|" : ""); ++ if (xd->flags & XC_CORE_ELF) ++ fprintf(fp, "%sXC_CORE_ELF", others++ ? "|" : ""); ++ if (xd->flags & XC_CORE_P2M_CREATE) ++ fprintf(fp, "%sXC_CORE_P2M_CREATE", others++ ? "|" : ""); ++ if (xd->flags & XC_CORE_PFN_CREATE) ++ fprintf(fp, "%sXC_CORE_PFN_CREATE", others++ ? "|" : ""); ++ if (xd->flags & XC_CORE_NO_P2M) ++ fprintf(fp, "%sXC_CORE_NO_P2M", others++ ? "|" : ""); ++ if (xd->flags & XC_SAVE_IA64) ++ fprintf(fp, "%sXC_SAVE_IA64", others++ ? "|" : ""); ++ if (xd->flags & XC_CORE_64BIT_HOST) ++ fprintf(fp, "%sXC_CORE_64BIT_HOST", others++ ? "|" : ""); ++ fprintf(fp, ")\n"); ++ fprintf(fp, " xfd: %d\n", xd->xfd); ++ fprintf(fp, " page_size: %d\n", xd->page_size); ++ fprintf(fp, " ofp: %lx\n", (ulong)xd->ofp); ++ fprintf(fp, " page: %lx\n", (ulong)xd->page); ++ fprintf(fp, " panic_pc: %lx\n", xd->panic_pc); ++ fprintf(fp, " panic_sp: %lx\n", xd->panic_sp); ++ fprintf(fp, " accesses: %ld\n", (ulong)xd->accesses); ++ fprintf(fp, " cache_hits: %ld ", (ulong)xd->cache_hits); ++ if (xd->accesses) ++ fprintf(fp, "(%ld%%)\n", xd->cache_hits * 100 / xd->accesses); ++ else ++ fprintf(fp, "\n"); ++ fprintf(fp, " last_pfn: %ld\n", xd->last_pfn); ++ fprintf(fp, " redundant: %ld ", (ulong)xd->redundant); ++ if (xd->accesses) ++ fprintf(fp, "(%ld%%)\n", xd->redundant * 100 / xd->accesses); ++ else ++ fprintf(fp, "\n"); ++ for (i = used = 0; i < PFN_TO_OFFSET_CACHE_ENTRIES; i++) ++ if (xd->poc && xd->poc[i].cnt) ++ used++; ++ if (xd->poc) ++ fprintf(fp, " poc[%d]: %lx %s", PFN_TO_OFFSET_CACHE_ENTRIES, ++ (ulong)xd->poc, xd->poc ? "" : "(none)"); ++ else ++ fprintf(fp, " poc[0]: (unused)\n"); ++ for (i = 0; i < PFN_TO_OFFSET_CACHE_ENTRIES; i++) { ++ if (!xd->poc) ++ break; ++ if (!xd->poc[i].cnt) { ++ if (!i) ++ fprintf(fp, "(none used)\n"); ++ break; ++ } else if (!i) ++ fprintf(fp, "(%d used)\n", used); ++ if (CRASHDEBUG(2)) ++ fprintf(fp, ++ " [%d]: pfn: %ld (0x%lx) count: %ld file_offset: %llx\n", ++ i, ++ xd->poc[i].pfn, ++ xd->poc[i].pfn, ++ xd->poc[i].cnt, ++ (ulonglong)xd->poc[i].file_offset); ++ } ++ if (!xd->poc) ++ fprintf(fp, "\n"); ++ ++ fprintf(fp, "\n xc_save:\n"); ++ fprintf(fp, " nr_pfns: %ld (0x%lx)\n", ++ xd->xc_save.nr_pfns, xd->xc_save.nr_pfns); ++ fprintf(fp, " vmconfig_size: %d (0x%x)\n", xd->xc_save.vmconfig_size, ++ xd->xc_save.vmconfig_size); ++ fprintf(fp, " vmconfig_buf: %lx\n", (ulong)xd->xc_save.vmconfig_buf); ++ if (xd->flags & XC_SAVE) ++ xen_dump_vmconfig(fp); ++ fprintf(fp, " p2m_frame_list: %lx ", (ulong)xd->xc_save.p2m_frame_list); ++ if ((xd->flags & XC_SAVE) && xd->xc_save.p2m_frame_list) { ++ fprintf(fp, "\n"); ++ ulongptr = xd->xc_save.p2m_frame_list; ++ for (i = 0; i < P2M_FL_ENTRIES; i++, ulongptr++) ++ fprintf(fp, "%ld ", *ulongptr); ++ fprintf(fp, "\n"); ++ } else ++ fprintf(fp, "(none)\n"); ++ fprintf(fp, " pfns_not: %d\n", xd->xc_save.pfns_not); ++ fprintf(fp, " pfns_not_offset: %lld\n", ++ (ulonglong)xd->xc_save.pfns_not_offset); ++ fprintf(fp, " vcpu_ctxt_offset: %lld\n", ++ (ulonglong)xd->xc_save.vcpu_ctxt_offset); ++ fprintf(fp, " shared_info_page_offset: %lld\n", ++ (ulonglong)xd->xc_save.shared_info_page_offset); ++ fprintf(fp, " region_pfn_type: %lx\n", (ulong)xd->xc_save.region_pfn_type); ++ fprintf(fp, " batch_count: %ld\n", (ulong)xd->xc_save.batch_count); ++ fprintf(fp, " batch_offsets: %lx %s\n", ++ (ulong)xd->xc_save.batch_offsets, ++ xd->xc_save.batch_offsets ? "" : "(none)"); ++ for (i = linefeed = 0; i < xd->xc_save.batch_count; i++) { ++ fprintf(fp, "[%d]: %llx ", i, ++ (ulonglong)xd->xc_save.batch_offsets[i]); ++ if (((i+1)%4) == 0) { ++ fprintf(fp, "\n"); ++ linefeed = FALSE; ++ } else ++ linefeed = TRUE; ++ } ++ if (linefeed) ++ fprintf(fp, "\n"); ++ fprintf(fp, " ia64_version: %ld\n", (ulong)xd->xc_save.ia64_version); ++ fprintf(fp, " ia64_page_offsets: %lx ", (ulong)xd->xc_save.ia64_page_offsets); ++ if (xd->xc_save.ia64_page_offsets) ++ fprintf(fp, "(%ld entries)\n\n", xd->xc_save.nr_pfns); ++ else ++ fprintf(fp, "(none)\n\n"); ++ ++ fprintf(fp, " xc_core:\n"); ++ fprintf(fp, " header:\n"); ++ fprintf(fp, " xch_magic: %x ", ++ xd->xc_core.header.xch_magic); ++ if (xd->xc_core.header.xch_magic == XC_CORE_MAGIC) ++ fprintf(fp, "(XC_CORE_MAGIC)\n"); ++ else if (xd->xc_core.header.xch_magic == XC_CORE_MAGIC_HVM) ++ fprintf(fp, "(XC_CORE_MAGIC_HVM)\n"); ++ else ++ fprintf(fp, "(unknown)\n"); ++ fprintf(fp, " xch_nr_vcpus: %d\n", ++ xd->xc_core.header.xch_nr_vcpus); ++ fprintf(fp, " xch_nr_pages: %d (0x%x)\n", ++ xd->xc_core.header.xch_nr_pages, ++ xd->xc_core.header.xch_nr_pages); ++ fprintf(fp, " xch_ctxt_offset: %d (0x%x)\n", ++ xd->xc_core.header.xch_ctxt_offset, ++ xd->xc_core.header.xch_ctxt_offset); ++ fprintf(fp, " xch_index_offset: %d (0x%x)\n", ++ xd->xc_core.header.xch_index_offset, ++ xd->xc_core.header.xch_index_offset); ++ fprintf(fp, " xch_pages_offset: %d (0x%x)\n", ++ xd->xc_core.header.xch_pages_offset, ++ xd->xc_core.header.xch_pages_offset); ++ ++ fprintf(fp, " elf_class: %s\n", xd->xc_core.elf_class == ELFCLASS64 ? "ELFCLASS64" : ++ xd->xc_core.elf_class == ELFCLASS32 ? "ELFCLASS32" : "n/a"); ++ fprintf(fp, " elf_strtab_offset: %lld (0x%llx)\n", ++ (ulonglong)xd->xc_core.elf_strtab_offset, ++ (ulonglong)xd->xc_core.elf_strtab_offset); ++ fprintf(fp, " format_version: %016llx\n", ++ (ulonglong)xd->xc_core.format_version); ++ fprintf(fp, " shared_info_offset: %lld (0x%llx)\n", ++ (ulonglong)xd->xc_core.shared_info_offset, ++ (ulonglong)xd->xc_core.shared_info_offset); ++ if (machine_type("IA64")) ++ fprintf(fp, " ia64_mapped_regs_offset: %lld (0x%llx)\n", ++ (ulonglong)xd->xc_core.ia64_mapped_regs_offset, ++ (ulonglong)xd->xc_core.ia64_mapped_regs_offset); ++ fprintf(fp, " elf_index_pfn[%d]: %s", INDEX_PFN_COUNT, ++ xd->xc_core.elf_class ? "\n" : "(none used)\n"); ++ if (xd->xc_core.elf_class) { ++ for (i = 0; i < INDEX_PFN_COUNT; i++) { ++ fprintf(fp, "%ld:%ld ", ++ xd->xc_core.elf_index_pfn[i].index, ++ xd->xc_core.elf_index_pfn[i].pfn); ++ } ++ fprintf(fp, "\n"); ++ } ++ fprintf(fp, " last_batch:\n"); ++ fprintf(fp, " index: %ld (%ld - %ld)\n", ++ xd->xc_core.last_batch.index, ++ xd->xc_core.last_batch.start, xd->xc_core.last_batch.end); ++ fprintf(fp, " accesses: %ld\n", ++ xd->xc_core.last_batch.accesses); ++ fprintf(fp, " duplicates: %ld ", ++ xd->xc_core.last_batch.duplicates); ++ if (xd->xc_core.last_batch.accesses) ++ fprintf(fp, "(%ld%%)\n", ++ xd->xc_core.last_batch.duplicates * 100 / ++ xd->xc_core.last_batch.accesses); ++ else ++ fprintf(fp, "\n"); ++ ++ fprintf(fp, " elf32: %lx\n", (ulong)xd->xc_core.elf32); ++ fprintf(fp, " elf64: %lx\n", (ulong)xd->xc_core.elf64); ++ ++ fprintf(fp, " p2m_frames: %d\n", ++ xd->xc_core.p2m_frames); ++ fprintf(fp, " p2m_frame_index_list: %s\n", ++ (xd->flags & (XC_CORE_NO_P2M|XC_SAVE)) ? "(not used)" : ""); ++ for (i = 0; i < xd->xc_core.p2m_frames; i++) { ++ fprintf(fp, "%ld ", ++ xd->xc_core.p2m_frame_index_list[i]); ++ } ++ fprintf(fp, xd->xc_core.p2m_frames ? "\n" : ""); ++ ++ if ((xd->flags & XC_CORE_ORIG) && CRASHDEBUG(8)) ++ xc_core_mfns(XENDUMP_LOCAL, fp); ++ ++ switch (xd->xc_core.elf_class) ++ { ++ case ELFCLASS32: ++ fpsave = xd->ofp; ++ xd->ofp = fp; ++ xc_core_elf_dump(); ++ offset32 = xd->xc_core.elf32->e_shoff; ++ for (i = 0; i < xd->xc_core.elf32->e_shnum; i++) { ++ xc_core_dump_Elf32_Shdr(offset32, ELFREAD); ++ offset32 += xd->xc_core.elf32->e_shentsize; ++ } ++ xendump_print("\n"); ++ xd->ofp = fpsave; ++ break; ++ ++ case ELFCLASS64: ++ fpsave = xd->ofp; ++ xd->ofp = fp; ++ xc_core_elf_dump(); ++ offset64 = xd->xc_core.elf64->e_shoff; ++ for (i = 0; i < xd->xc_core.elf64->e_shnum; i++) { ++ xc_core_dump_Elf64_Shdr(offset64, ELFREAD); ++ offset64 += xd->xc_core.elf64->e_shentsize; ++ } ++ xendump_print("\n"); ++ xd->ofp = fpsave; ++ break; ++ } ++ ++ return 0; ++} ++ ++static void ++xen_dump_vmconfig(FILE *fp) ++{ ++ int i, opens, closes; ++ char *p; ++ ++ opens = closes = 0; ++ p = xd->xc_save.vmconfig_buf; ++ for (i = 0; i < xd->xc_save.vmconfig_size; i++, p++) { ++ if (ascii(*p)) ++ fprintf(fp, "%c", *p); ++ else ++ fprintf(fp, "<%x>", *p); ++ ++ if (*p == '(') ++ opens++; ++ else if (*p == ')') ++ closes++; ++ } ++ fprintf(fp, "\n"); ++ ++ if (opens != closes) ++ error(WARNING, "invalid vmconfig contents?\n"); ++} ++ ++/* ++ * Looking at the active set, try to determine who panicked, ++ * or who was the "suspend" kernel thread. ++ */ ++ulong get_xendump_panic_task(void) ++{ ++ int i; ++ ulong task; ++ struct task_context *tc; ++ ++ switch (xd->flags & (XC_CORE_ORIG|XC_CORE_ELF|XC_SAVE)) ++ { ++ case XC_CORE_ORIG: ++ case XC_CORE_ELF: ++ if (machdep->xendump_panic_task) ++ return (machdep->xendump_panic_task((void *)xd)); ++ break; ++ ++ case XC_SAVE: ++ for (i = 0; i < NR_CPUS; i++) { ++ if (!(task = tt->active_set[i])) ++ continue; ++ tc = task_to_context(task); ++ if (is_kernel_thread(task) && ++ STREQ(tc->comm, "suspend")) ++ return tc->task; ++ } ++ break; ++ } ++ ++ return NO_TASK; ++} ++ ++/* ++ * Figure out the back trace hooks. ++ */ ++void get_xendump_regs(struct bt_info *bt, ulong *pc, ulong *sp) ++{ ++ int i; ++ ulong *up; ++ ++ if ((tt->panic_task == bt->task) && ++ (xd->panic_pc && xd->panic_sp)) { ++ *pc = xd->panic_pc; ++ *sp = xd->panic_sp; ++ return; ++ } ++ ++ switch (xd->flags & (XC_CORE_ORIG|XC_CORE_ELF|XC_SAVE)) ++ { ++ case XC_CORE_ORIG: ++ case XC_CORE_ELF: ++ if (machdep->get_xendump_regs) ++ return (machdep->get_xendump_regs(xd, bt, pc, sp)); ++ break; ++ ++ case XC_SAVE: ++ if (tt->panic_task != bt->task) ++ break; ++ ++ for (i = 0, up = (ulong *)bt->stackbuf; ++ i < LONGS_PER_STACK; i++, up++) { ++ if (is_kernel_text(*up) && ++ (STREQ(closest_symbol(*up), ++ "__do_suspend"))) { ++ *pc = *up; ++ *sp = tt->flags & THREAD_INFO ? ++ bt->tc->thread_info + ++ (i * sizeof(long)) : ++ bt->task + ++ (i * sizeof(long)); ++ xd->panic_pc = *pc; ++ xd->panic_sp = *sp; ++ return; ++ } ++ } ++ } ++ ++ machdep->get_stack_frame(bt, pc, sp); ++} ++ ++/* ++ * Farm out most of the work to the proper architecture to create ++ * the p2m table. For ELF core dumps, create the index;pfn table. ++ */ ++static void ++xc_core_create_pfn_tables(void) ++{ ++ if (xd->flags & XC_CORE_P2M_CREATE) { ++ if (!machdep->xendump_p2m_create) ++ error(FATAL, ++ "xen xc_core dumpfiles not supported on this architecture"); ++ ++ if (!machdep->xendump_p2m_create((void *)xd)) ++ error(FATAL, ++ "cannot create xen pfn-to-mfn mapping\n"); ++ } ++ ++ if (xd->flags & XC_CORE_ELF) ++ xc_core_elf_pfn_init(); ++ ++ xd->flags &= ~(XC_CORE_P2M_CREATE|XC_CORE_PFN_CREATE); ++ ++ if (CRASHDEBUG(1)) ++ xendump_memory_dump(xd->ofp); ++} ++ ++/* ++ * Find the page index containing the mfn, and read the ++ * machine page into the buffer. ++ */ ++char * ++xc_core_mfn_to_page(ulong mfn, char *pgbuf) ++{ ++ int i, b, idx, done; ++ ulong tmp[MAX_BATCH_SIZE]; ++ off_t offset; ++ size_t size; ++ uint nr_pages; ++ ++ if (xd->flags & XC_CORE_ELF) ++ return xc_core_elf_mfn_to_page(mfn, pgbuf); ++ ++ if (lseek(xd->xfd, (off_t)xd->xc_core.header.xch_index_offset, ++ SEEK_SET) == -1) { ++ error(INFO, "cannot lseek to page index\n"); ++ return NULL; ++ } ++ ++ nr_pages = xd->xc_core.header.xch_nr_pages; ++ if (xd->flags & XC_CORE_64BIT_HOST) ++ nr_pages *= 2; ++ ++ for (b = 0, idx = -1, done = FALSE; ++ !done && (b < nr_pages); b += MAX_BATCH_SIZE) { ++ size = sizeof(ulong) * MIN(MAX_BATCH_SIZE, nr_pages - b); ++ if (read(xd->xfd, tmp, size) != size) { ++ error(INFO, "cannot read index page %d\n", b); ++ return NULL; ++ } ++ ++ for (i = 0; i < MAX_BATCH_SIZE; i++) { ++ if ((b+i) >= nr_pages) { ++ done = TRUE; ++ break; ++ } ++ if (tmp[i] == mfn) { ++ idx = i+b; ++ if (CRASHDEBUG(4)) ++ fprintf(xd->ofp, ++ "page: found mfn 0x%lx (%ld) at index %d\n", ++ mfn, mfn, idx); ++ done = TRUE; ++ } ++ } ++ } ++ ++ if (idx == -1) { ++ error(INFO, "cannot find mfn %ld (0x%lx) in page index\n", ++ mfn, mfn); ++ return NULL; ++ } ++ ++ if (lseek(xd->xfd, (off_t)xd->xc_core.header.xch_pages_offset, ++ SEEK_SET) == -1) { ++ error(INFO, "cannot lseek to xch_pages_offset\n"); ++ return NULL; ++ } ++ ++ offset = (off_t)(idx) * (off_t)xd->page_size; ++ ++ if (lseek(xd->xfd, offset, SEEK_CUR) == -1) { ++ error(INFO, "cannot lseek to mfn-specified page\n"); ++ return NULL; ++ } ++ ++ if (read(xd->xfd, pgbuf, xd->page_size) != xd->page_size) { ++ error(INFO, "cannot read mfn-specified page\n"); ++ return NULL; ++ } ++ ++ return pgbuf; ++} ++ ++/* ++ * Find the page index containing the mfn, and read the ++ * machine page into the buffer. ++ */ ++static char * ++xc_core_elf_mfn_to_page(ulong mfn, char *pgbuf) ++{ ++ int i, b, idx, done; ++ off_t offset; ++ size_t size; ++ uint nr_pages; ++ ulong tmp; ++ struct xen_dumpcore_p2m p2m_batch[MAX_BATCH_SIZE]; ++ ++ offset = xd->xc_core.header.xch_index_offset; ++ nr_pages = xd->xc_core.header.xch_nr_pages; ++ ++ if (lseek(xd->xfd, offset, SEEK_SET) == -1) ++ error(FATAL, "cannot lseek to page index\n"); ++ ++ for (b = 0, idx = -1, done = FALSE; ++ !done && (b < nr_pages); b += MAX_BATCH_SIZE) { ++ size = sizeof(struct xen_dumpcore_p2m) * ++ MIN(MAX_BATCH_SIZE, nr_pages - b); ++ if (read(xd->xfd, &p2m_batch[0], size) != size) { ++ error(INFO, "cannot read index page %d\n", b); ++ return NULL; ++ } ++ ++ for (i = 0; i < MAX_BATCH_SIZE; i++) { ++ if ((b+i) >= nr_pages) { ++ done = TRUE; ++ break; ++ } ++ ++ tmp = (ulong)p2m_batch[i].gmfn; ++ ++ if (tmp == mfn) { ++ idx = i+b; ++ if (CRASHDEBUG(4)) ++ fprintf(xd->ofp, ++ "page: found mfn 0x%lx (%ld) at index %d\n", ++ mfn, mfn, idx); ++ done = TRUE; ++ } ++ } ++ } ++ ++ if (idx == -1) { ++ error(INFO, "cannot find mfn %ld (0x%lx) in page index\n", ++ mfn, mfn); ++ return NULL; ++ } ++ ++ if (lseek(xd->xfd, (off_t)xd->xc_core.header.xch_pages_offset, ++ SEEK_SET) == -1) ++ error(FATAL, "cannot lseek to xch_pages_offset\n"); ++ ++ offset = (off_t)(idx) * (off_t)xd->page_size; ++ ++ if (lseek(xd->xfd, offset, SEEK_CUR) == -1) { ++ error(INFO, "cannot lseek to mfn-specified page\n"); ++ return NULL; ++ } ++ ++ if (read(xd->xfd, pgbuf, xd->page_size) != xd->page_size) { ++ error(INFO, "cannot read mfn-specified page\n"); ++ return NULL; ++ } ++ ++ return pgbuf; ++} ++ ++ ++/* ++ * Find and return the page index containing the mfn. ++ */ ++int ++xc_core_mfn_to_page_index(ulong mfn) ++{ ++ int i, b; ++ ulong tmp[MAX_BATCH_SIZE]; ++ uint nr_pages; ++ size_t size; ++ ++ if (xd->flags & XC_CORE_ELF) ++ return xc_core_elf_mfn_to_page_index(mfn); ++ ++ if (lseek(xd->xfd, (off_t)xd->xc_core.header.xch_index_offset, ++ SEEK_SET) == -1) { ++ error(INFO, "cannot lseek to page index\n"); ++ return MFN_NOT_FOUND; ++ } ++ ++ nr_pages = xd->xc_core.header.xch_nr_pages; ++ if (xd->flags & XC_CORE_64BIT_HOST) ++ nr_pages *= 2; ++ ++ for (b = 0; b < nr_pages; b += MAX_BATCH_SIZE) { ++ size = sizeof(ulong) * MIN(MAX_BATCH_SIZE, nr_pages - b); ++ if (read(xd->xfd, tmp, size) != size) { ++ error(INFO, "cannot read index page %d\n", b); ++ return MFN_NOT_FOUND; ++ } ++ ++ for (i = 0; i < MAX_BATCH_SIZE; i++) { ++ if ((b+i) >= nr_pages) ++ break; ++ ++ if (tmp[i] == mfn) { ++ if (CRASHDEBUG(4)) ++ fprintf(xd->ofp, ++ "index: batch: %d found mfn %ld (0x%lx) at index %d\n", ++ b/MAX_BATCH_SIZE, mfn, mfn, i+b); ++ return (i+b); ++ } ++ } ++ } ++ ++ return MFN_NOT_FOUND; ++} ++ ++/* ++ * Find and return the page index containing the mfn. ++ */ ++static int ++xc_core_elf_mfn_to_page_index(ulong mfn) ++{ ++ int i, b; ++ off_t offset; ++ size_t size; ++ uint nr_pages; ++ ulong tmp; ++ struct xen_dumpcore_p2m p2m_batch[MAX_BATCH_SIZE]; ++ ++ offset = xd->xc_core.header.xch_index_offset; ++ nr_pages = xd->xc_core.header.xch_nr_pages; ++ ++ if (lseek(xd->xfd, offset, SEEK_SET) == -1) ++ error(FATAL, "cannot lseek to page index\n"); ++ ++ for (b = 0; b < nr_pages; b += MAX_BATCH_SIZE) { ++ size = sizeof(struct xen_dumpcore_p2m) * ++ MIN(MAX_BATCH_SIZE, nr_pages - b); ++ if (read(xd->xfd, &p2m_batch[0], size) != size) { ++ error(INFO, "cannot read index page %d\n", b); ++ return MFN_NOT_FOUND; ++ } ++ ++ for (i = 0; i < MAX_BATCH_SIZE; i++) { ++ if ((b+i) >= nr_pages) ++ break; ++ ++ tmp = (ulong)p2m_batch[i].gmfn; ++ ++ if (tmp == mfn) { ++ if (CRASHDEBUG(4)) ++ fprintf(xd->ofp, ++ "index: batch: %d found mfn %ld (0x%lx) at index %d\n", ++ b/MAX_BATCH_SIZE, mfn, mfn, i+b); ++ return (i+b); ++ } ++ } ++ } ++ ++ return MFN_NOT_FOUND; ++} ++ ++ ++/* ++ * XC_CORE mfn-related utility function. ++ */ ++static int ++xc_core_mfns(ulong arg, FILE *ofp) ++{ ++ int i, b; ++ uint nr_pages; ++ ulong tmp[MAX_BATCH_SIZE]; ++ ulonglong tmp64[MAX_BATCH_SIZE]; ++ size_t size; ++ ++ if (lseek(xd->xfd, (off_t)xd->xc_core.header.xch_index_offset, ++ SEEK_SET) == -1) { ++ error(INFO, "cannot lseek to page index\n"); ++ return FALSE; ++ } ++ ++ switch (arg) ++ { ++ case XC_CORE_64BIT_HOST: ++ /* ++ * Determine whether this is a 32-bit guest xendump that ++ * was taken on a 64-bit xen host. ++ */ ++ if (machine_type("X86_64") || machine_type("IA64")) ++ return FALSE; ++check_next_4: ++ if (read(xd->xfd, tmp, sizeof(ulong) * 4) != (4 * sizeof(ulong))) { ++ error(INFO, "cannot read index pages\n"); ++ return FALSE; ++ } ++ ++ if ((tmp[0] == 0xffffffff) || (tmp[1] == 0xffffffff) || ++ (tmp[2] == 0xffffffff) || (tmp[3] == 0xffffffff) || ++ (!tmp[0] && !tmp[1]) || (!tmp[2] && !tmp[3])) ++ goto check_next_4; ++ ++ if (CRASHDEBUG(2)) ++ fprintf(ofp, "mfns: %08lx %08lx %08lx %08lx\n", ++ tmp[0], tmp[1], tmp[2], tmp[3]); ++ ++ if (tmp[0] && !tmp[1] && tmp[2] && !tmp[3]) ++ return TRUE; ++ else ++ return FALSE; ++ ++ case XENDUMP_LOCAL: ++ if (BITS64() || (xd->flags & XC_CORE_64BIT_HOST)) ++ goto show_64bit_mfns; ++ ++ fprintf(ofp, "xch_index_offset mfn list:\n"); ++ ++ nr_pages = xd->xc_core.header.xch_nr_pages; ++ ++ for (b = 0; b < nr_pages; b += MAX_BATCH_SIZE) { ++ size = sizeof(ulong) * ++ MIN(MAX_BATCH_SIZE, nr_pages - b); ++ if (read(xd->xfd, tmp, size) != size) { ++ error(INFO, "cannot read index page %d\n", b); ++ return FALSE; ++ } ++ ++ if (b) fprintf(ofp, "\n"); ++ ++ for (i = 0; i < MAX_BATCH_SIZE; i++) { ++ if ((b+i) >= nr_pages) ++ break; ++ if ((i%8) == 0) ++ fprintf(ofp, "%s[%d]:", ++ i ? "\n" : "", b+i); ++ if (tmp[i] == 0xffffffff) ++ fprintf(ofp, " INVALID"); ++ else ++ fprintf(ofp, " %lx", tmp[i]); ++ } ++ } ++ ++ fprintf(ofp, "\nxch_nr_pages: %d\n", ++ xd->xc_core.header.xch_nr_pages); ++ return TRUE; ++ ++show_64bit_mfns: ++ fprintf(ofp, "xch_index_offset mfn list: %s\n", ++ BITS32() ? "(64-bit mfns)" : ""); ++ ++ nr_pages = xd->xc_core.header.xch_nr_pages; ++ ++ for (b = 0; b < nr_pages; b += MAX_BATCH_SIZE) { ++ size = sizeof(ulonglong) * ++ MIN(MAX_BATCH_SIZE, nr_pages - b); ++ if (read(xd->xfd, tmp64, size) != size) { ++ error(INFO, "cannot read index page %d\n", b); ++ return FALSE; ++ } ++ ++ if (b) fprintf(ofp, "\n"); ++ ++ for (i = 0; i < MAX_BATCH_SIZE; i++) { ++ if ((b+i) >= nr_pages) ++ break; ++ if ((i%8) == 0) ++ fprintf(ofp, "%s[%d]:", ++ i ? "\n" : "", b+i); ++ if (tmp64[i] == 0xffffffffffffffffULL) ++ fprintf(ofp, " INVALID"); ++ else ++ fprintf(ofp, " %llx", tmp64[i]); ++ } ++ } ++ ++ fprintf(ofp, "\nxch_nr_pages: %d\n", nr_pages); ++ return TRUE; ++ ++ default: ++ return FALSE; ++ } ++} ++ ++/* ++ * Given a normal kernel pfn, determine the page index in the dumpfile. ++ * ++ * - First determine which of the pages making up the ++ * phys_to_machine_mapping[] array would contain the pfn. ++ * - From the phys_to_machine_mapping page, determine the mfn. ++ * - Find the mfn in the dumpfile page index. ++ */ ++#define PFNS_PER_PAGE (xd->page_size/sizeof(unsigned long)) ++ ++static ulong ++xc_core_pfn_to_page_index(ulong pfn) ++{ ++ ulong idx, p2m_idx, mfn_idx; ++ ulong *up, mfn; ++ off_t offset; ++ ++ /* ++ * This function does not apply when there's no p2m ++ * mapping and/or if this is an ELF format dumpfile. ++ */ ++ switch (xd->flags & (XC_CORE_NO_P2M|XC_CORE_ELF)) ++ { ++ case (XC_CORE_NO_P2M|XC_CORE_ELF): ++ return xc_core_elf_pfn_valid(pfn); ++ ++ case XC_CORE_NO_P2M: ++ return(xc_core_pfn_valid(pfn) ? pfn : PFN_NOT_FOUND); ++ ++ case XC_CORE_ELF: ++ return xc_core_elf_pfn_to_page_index(pfn); ++ } ++ ++ idx = pfn/PFNS_PER_PAGE; ++ ++ if (idx >= xd->xc_core.p2m_frames) { ++ error(INFO, "pfn: %lx is too large for dumpfile\n", ++ pfn); ++ return PFN_NOT_FOUND; ++ } ++ ++ p2m_idx = xd->xc_core.p2m_frame_index_list[idx]; ++ ++ if (lseek(xd->xfd, (off_t)xd->xc_core.header.xch_pages_offset, ++ SEEK_SET) == -1) { ++ error(INFO, "cannot lseek to xch_pages_offset\n"); ++ return PFN_NOT_FOUND; ++ } ++ ++ offset = (off_t)(p2m_idx) * (off_t)xd->page_size; ++ ++ if (lseek(xd->xfd, offset, SEEK_CUR) == -1) { ++ error(INFO, "cannot lseek to pfn-specified page\n"); ++ return PFN_NOT_FOUND; ++ } ++ ++ if (read(xd->xfd, xd->page, xd->page_size) != xd->page_size) { ++ error(INFO, "cannot read pfn-specified page\n"); ++ return PFN_NOT_FOUND; ++ } ++ ++ up = (ulong *)xd->page; ++ up += (pfn%PFNS_PER_PAGE); ++ ++ mfn = *up; ++ ++ if ((mfn_idx = xc_core_mfn_to_page_index(mfn)) == MFN_NOT_FOUND) { ++ error(INFO, "cannot find mfn in page index\n"); ++ return PFN_NOT_FOUND; ++ } ++ ++ return mfn_idx; ++} ++ ++ ++/* ++ * Search the .xen_p2m array for the target pfn, starting at a ++ * higher batch if appropriate. This presumes that the pfns ++ * are laid out in ascending order. ++ */ ++static ulong ++xc_core_elf_pfn_to_page_index(ulong pfn) ++{ ++ int i, b, start_index; ++ off_t offset; ++ size_t size; ++ uint nr_pages; ++ ulong tmp; ++ struct xen_dumpcore_p2m p2m_batch[MAX_BATCH_SIZE]; ++ ++ offset = xd->xc_core.header.xch_index_offset; ++ nr_pages = xd->xc_core.header.xch_nr_pages; ++ ++ /* ++ * Initialize the start_index. ++ */ ++ xd->xc_core.last_batch.accesses++; ++ ++ if ((pfn >= xd->xc_core.last_batch.start) && ++ (pfn <= xd->xc_core.last_batch.end)) { ++ xd->xc_core.last_batch.duplicates++; ++ start_index = xd->xc_core.last_batch.index; ++ } else { ++ for (i = 0; i <= INDEX_PFN_COUNT; i++) { ++ if ((i == INDEX_PFN_COUNT) || ++ (pfn < xd->xc_core.elf_index_pfn[i].pfn)) { ++ if (--i < 0) ++ i = 0; ++ start_index = xd->xc_core.elf_index_pfn[i].index; ++ break; ++ } ++ } ++ } ++ ++ offset += (start_index * sizeof(struct xen_dumpcore_p2m)); ++ if (lseek(xd->xfd, offset, SEEK_SET) == -1) ++ error(FATAL, "cannot lseek to page index\n"); ++ ++ for (b = start_index; b < nr_pages; b += MAX_BATCH_SIZE) { ++ size = sizeof(struct xen_dumpcore_p2m) * ++ MIN(MAX_BATCH_SIZE, nr_pages - b); ++ if (read(xd->xfd, &p2m_batch[0], size) != size) { ++ error(INFO, "cannot read index page %d\n", b); ++ return PFN_NOT_FOUND; ++ } ++ ++ for (i = 0; i < MAX_BATCH_SIZE; i++) { ++ if ((b+i) >= nr_pages) ++ break; ++ ++ tmp = (ulong)p2m_batch[i].pfn; ++ ++ if (tmp == pfn) { ++ if (CRASHDEBUG(4)) ++ fprintf(xd->ofp, ++ "index: batch: %d found pfn %ld (0x%lx) at index %d\n", ++ b/MAX_BATCH_SIZE, pfn, pfn, i+b); ++ ++ if ((b+MAX_BATCH_SIZE) < nr_pages) { ++ xd->xc_core.last_batch.index = b; ++ xd->xc_core.last_batch.start = p2m_batch[0].pfn; ++ xd->xc_core.last_batch.end = p2m_batch[MAX_BATCH_SIZE-1].pfn; ++ } ++ ++ return (i+b); ++ } ++ } ++ } ++ ++ return PFN_NOT_FOUND; ++} ++ ++/* ++ * In xendumps containing INVALID_MFN markers in the page index, ++ * return the validity of the pfn. ++ */ ++static int ++xc_core_pfn_valid(ulong pfn) ++{ ++ ulong mfn; ++ off_t offset; ++ ++ if (pfn >= (ulong)xd->xc_core.header.xch_nr_pages) ++ return FALSE; ++ ++ offset = (off_t)xd->xc_core.header.xch_index_offset; ++ ++ if (xd->flags & XC_CORE_64BIT_HOST) ++ offset += (off_t)(pfn * sizeof(ulonglong)); ++ else ++ offset += (off_t)(pfn * sizeof(ulong)); ++ ++ /* ++ * The lseek and read should never fail, so report ++ * any errors unconditionally. ++ */ ++ if (lseek(xd->xfd, offset, SEEK_SET) == -1) { ++ error(INFO, ++ "xendump: cannot lseek to page index for pfn %lx\n", ++ pfn); ++ return FALSE; ++ } ++ ++ if (read(xd->xfd, &mfn, sizeof(ulong)) != sizeof(ulong)) { ++ error(INFO, ++ "xendump: cannot read index page for pfn %lx\n", ++ pfn); ++ return FALSE; ++ } ++ ++ /* ++ * If it's an invalid mfn, let the caller decide whether ++ * to display an error message (unless debugging). ++ */ ++ if (mfn == INVALID_MFN) { ++ if (CRASHDEBUG(1)) ++ error(INFO, ++ "xendump: pfn %lx contains INVALID_MFN\n", ++ pfn); ++ return FALSE; ++ } ++ ++ return TRUE; ++} ++ ++/* ++ * Return the index into the .xen_pfn array containing the pfn. ++ * If not found, return PFN_NOT_FOUND. ++ */ ++static ulong ++xc_core_elf_pfn_valid(ulong pfn) ++{ ++ int i, b, start_index; ++ off_t offset; ++ size_t size; ++ uint nr_pages; ++ ulong tmp; ++ uint64_t pfn_batch[MAX_BATCH_SIZE]; ++ ++ offset = xd->xc_core.header.xch_index_offset; ++ nr_pages = xd->xc_core.header.xch_nr_pages; ++ ++ /* ++ * Initialize the start_index. ++ */ ++ xd->xc_core.last_batch.accesses++; ++ ++ if ((pfn >= xd->xc_core.last_batch.start) && ++ (pfn <= xd->xc_core.last_batch.end)) { ++ xd->xc_core.last_batch.duplicates++; ++ start_index = xd->xc_core.last_batch.index; ++ } else { ++ for (i = 0; i <= INDEX_PFN_COUNT; i++) { ++ if ((i == INDEX_PFN_COUNT) || ++ (pfn < xd->xc_core.elf_index_pfn[i].pfn)) { ++ if (--i < 0) ++ i = 0; ++ start_index = xd->xc_core.elf_index_pfn[i].index; ++ break; ++ } ++ } ++ } ++ ++ offset += (start_index * sizeof(uint64_t)); ++ if (lseek(xd->xfd, offset, SEEK_SET) == -1) ++ error(FATAL, "cannot lseek to page index\n"); ++ ++ for (b = start_index; b < nr_pages; b += MAX_BATCH_SIZE) { ++ size = sizeof(uint64_t) * MIN(MAX_BATCH_SIZE, nr_pages - b); ++ if (read(xd->xfd, &pfn_batch[0], size) != size) { ++ error(INFO, "cannot read index page %d\n", b); ++ return PFN_NOT_FOUND; ++ } ++ ++ for (i = 0; i < MAX_BATCH_SIZE; i++) { ++ if ((b+i) >= nr_pages) ++ break; ++ ++ tmp = (ulong)pfn_batch[i]; ++ ++ if (tmp == pfn) { ++ if (CRASHDEBUG(4)) ++ fprintf(xd->ofp, ++ "index: batch: %d found pfn %ld (0x%lx) at index %d\n", ++ b/MAX_BATCH_SIZE, pfn, pfn, i+b); ++ ++ if ((b+MAX_BATCH_SIZE) < nr_pages) { ++ xd->xc_core.last_batch.index = b; ++ xd->xc_core.last_batch.start = (ulong)pfn_batch[0]; ++ xd->xc_core.last_batch.end = (ulong)pfn_batch[MAX_BATCH_SIZE-1]; ++ } ++ ++ return (i+b); ++ } ++ } ++ } ++ ++ return PFN_NOT_FOUND; ++} ++ ++/* ++ * Store the panic task's stack hooks from where it was found ++ * in get_active_set_panic_task(). ++ */ ++void ++xendump_panic_hook(char *stack) ++{ ++ int i, err, argc; ++ char *arglist[MAXARGS]; ++ char buf[BUFSIZE]; ++ ulong value, *sp; ++ ++ if (machine_type("IA64")) /* needs switch_stack address */ ++ return; ++ ++ strcpy(buf, stack); ++ ++ argc = parse_line(buf, arglist); ++ ++ if ((value = htol(strip_ending_char(arglist[0], ':'), ++ RETURN_ON_ERROR, &err)) == BADADDR) ++ return; ++ for (sp = (ulong *)value, i = 1; i < argc; i++, sp++) { ++ if (strstr(arglist[i], "xen_panic_event")) { ++ if (!readmem((ulong)sp, KVADDR, &value, ++ sizeof(ulong), "xen_panic_event address", ++ RETURN_ON_ERROR)) ++ return; ++ ++ xd->panic_sp = (ulong)sp; ++ xd->panic_pc = value; ++ } else if (strstr(arglist[i], "panic") && !xd->panic_sp) { ++ if (!readmem((ulong)sp, KVADDR, &value, ++ sizeof(ulong), "xen_panic_event address", ++ RETURN_ON_ERROR)) ++ return; ++ ++ xd->panic_sp = (ulong)sp; ++ xd->panic_pc = value; ++ } ++ } ++} ++ ++static void ++xendump_print(char *fmt, ...) ++{ ++ char buf[BUFSIZE]; ++ va_list ap; ++ ++ if (!fmt || !strlen(fmt)) ++ return; ++ ++ va_start(ap, fmt); ++ (void)vsnprintf(buf, BUFSIZE, fmt, ap); ++ va_end(ap); ++ ++ if (xd->ofp) ++ fprintf(xd->ofp, buf); ++ else if (!XENDUMP_VALID() && CRASHDEBUG(7)) ++ fprintf(stderr, buf); ++ ++} ++ ++/* ++ * Support for xc_core ELF dumpfile format. ++ */ ++static int ++xc_core_elf_verify(char *file, char *buf) ++{ ++ int i; ++ Elf32_Ehdr *elf32; ++ Elf64_Ehdr *elf64; ++ Elf32_Off offset32; ++ Elf64_Off offset64; ++ char *eheader; ++ int swap; ++ ++ eheader = buf; ++ ++ if (!STRNEQ(eheader, ELFMAG) || eheader[EI_VERSION] != EV_CURRENT) ++ goto bailout; ++ ++ swap = (((eheader[EI_DATA] == ELFDATA2LSB) && ++ (__BYTE_ORDER == __BIG_ENDIAN)) || ++ ((eheader[EI_DATA] == ELFDATA2MSB) && ++ (__BYTE_ORDER == __LITTLE_ENDIAN))); ++ ++ elf32 = (Elf32_Ehdr *)buf; ++ elf64 = (Elf64_Ehdr *)buf; ++ ++ if ((elf32->e_ident[EI_CLASS] == ELFCLASS32) && ++ (swap16(elf32->e_type, swap) == ET_CORE) && ++ (swap32(elf32->e_version, swap) == EV_CURRENT) && ++ (swap16(elf32->e_shnum, swap) > 0)) { ++ switch (swap16(elf32->e_machine, swap)) ++ { ++ case EM_386: ++ if (machine_type_mismatch(file, "X86", NULL, 0)) ++ goto bailout; ++ break; ++ ++ default: ++ if (machine_type_mismatch(file, "(unknown)", NULL, 0)) ++ goto bailout; ++ break; ++ } ++ ++ if (endian_mismatch(file, elf32->e_ident[EI_DATA], 0)) ++ goto bailout; ++ ++ xd->xc_core.elf_class = ELFCLASS32; ++ if ((xd->xc_core.elf32 = (Elf32_Ehdr *)malloc(sizeof(Elf32_Ehdr))) == NULL) { ++ fprintf(stderr, "cannot malloc ELF header buffer\n"); ++ clean_exit(1); ++ } ++ BCOPY(buf, xd->xc_core.elf32, sizeof(Elf32_Ehdr)); ++ ++ } else if ((elf64->e_ident[EI_CLASS] == ELFCLASS64) && ++ (swap16(elf64->e_type, swap) == ET_CORE) && ++ (swap32(elf64->e_version, swap) == EV_CURRENT) && ++ (swap16(elf64->e_shnum, swap) > 0)) { ++ switch (swap16(elf64->e_machine, swap)) ++ { ++ case EM_IA_64: ++ if (machine_type_mismatch(file, "IA64", NULL, 0)) ++ goto bailout; ++ break; ++ ++ case EM_X86_64: ++ if (machine_type_mismatch(file, "X86_64", "X86", 0)) ++ goto bailout; ++ break; ++ ++ case EM_386: ++ if (machine_type_mismatch(file, "X86", NULL, 0)) ++ goto bailout; ++ break; ++ ++ default: ++ if (machine_type_mismatch(file, "(unknown)", NULL, 0)) ++ goto bailout; ++ } ++ ++ if (endian_mismatch(file, elf64->e_ident[EI_DATA], 0)) ++ goto bailout; ++ ++ xd->xc_core.elf_class = ELFCLASS64; ++ if ((xd->xc_core.elf64 = (Elf64_Ehdr *)malloc(sizeof(Elf64_Ehdr))) == NULL) { ++ fprintf(stderr, "cannot malloc ELF header buffer\n"); ++ clean_exit(1); ++ } ++ BCOPY(buf, xd->xc_core.elf64, sizeof(Elf64_Ehdr)); ++ ++ } else { ++ if (CRASHDEBUG(1)) ++ error(INFO, "%s: not a xen ELF core file\n", file); ++ goto bailout; ++ } ++ ++ xc_core_elf_dump(); ++ ++ switch (xd->xc_core.elf_class) ++ { ++ case ELFCLASS32: ++ offset32 = xd->xc_core.elf32->e_shoff; ++ for (i = 0; i < xd->xc_core.elf32->e_shnum; i++) { ++ xc_core_dump_Elf32_Shdr(offset32, ELFSTORE); ++ offset32 += xd->xc_core.elf32->e_shentsize; ++ } ++ xendump_print("\n"); ++ break; ++ ++ case ELFCLASS64: ++ offset64 = xd->xc_core.elf64->e_shoff; ++ for (i = 0; i < xd->xc_core.elf64->e_shnum; i++) { ++ xc_core_dump_Elf64_Shdr(offset64, ELFSTORE); ++ offset64 += xd->xc_core.elf64->e_shentsize; ++ } ++ xendump_print("\n"); ++ break; ++ } ++ ++ xd->flags |= (XENDUMP_LOCAL | XC_CORE_ELF); ++ ++ if (!xd->page_size) ++ error(FATAL, ++ "unknown page size: use -p command line option\n"); ++ ++ if (!(xd->page = (char *)malloc(xd->page_size))) ++ error(FATAL, "cannot malloc page space."); ++ ++ if (!(xd->poc = (struct pfn_offset_cache *)calloc ++ (PFN_TO_OFFSET_CACHE_ENTRIES, ++ sizeof(struct pfn_offset_cache)))) ++ error(FATAL, "cannot malloc pfn_offset_cache\n"); ++ xd->last_pfn = ~(0UL); ++ ++ for (i = 0; i < INDEX_PFN_COUNT; i++) ++ xd->xc_core.elf_index_pfn[i].pfn = ~0UL; ++ ++ if (CRASHDEBUG(1)) ++ xendump_memory_dump(fp); ++ ++ return TRUE; ++ ++bailout: ++ return FALSE; ++} ++ ++/* ++ * Dump the relevant ELF header. ++ */ ++static void ++xc_core_elf_dump(void) ++{ ++ switch (xd->xc_core.elf_class) ++ { ++ case ELFCLASS32: ++ xc_core_dump_Elf32_Ehdr(xd->xc_core.elf32); ++ break; ++ case ELFCLASS64: ++ xc_core_dump_Elf64_Ehdr(xd->xc_core.elf64); ++ break; ++ } ++} ++ ++ ++/* ++ * Dump the 32-bit ELF header, and grab a pointer to the strtab section. ++ */ ++static void ++xc_core_dump_Elf32_Ehdr(Elf32_Ehdr *elf) ++{ ++ char buf[BUFSIZE]; ++ Elf32_Off offset32; ++ Elf32_Shdr shdr; ++ ++ BZERO(buf, BUFSIZE); ++ BCOPY(elf->e_ident, buf, SELFMAG); ++ xendump_print("\nElf32_Ehdr:\n"); ++ xendump_print(" e_ident: \\%o%s\n", buf[0], ++ &buf[1]); ++ xendump_print(" e_ident[EI_CLASS]: %d ", elf->e_ident[EI_CLASS]); ++ switch (elf->e_ident[EI_CLASS]) ++ { ++ case ELFCLASSNONE: ++ xendump_print("(ELFCLASSNONE)"); ++ break; ++ case ELFCLASS32: ++ xendump_print("(ELFCLASS32)\n"); ++ break; ++ case ELFCLASS64: ++ xendump_print("(ELFCLASS64)\n"); ++ break; ++ case ELFCLASSNUM: ++ xendump_print("(ELFCLASSNUM)\n"); ++ break; ++ default: ++ xendump_print("(?)\n"); ++ break; ++ } ++ xendump_print(" e_ident[EI_DATA]: %d ", elf->e_ident[EI_DATA]); ++ switch (elf->e_ident[EI_DATA]) ++ { ++ case ELFDATANONE: ++ xendump_print("(ELFDATANONE)\n"); ++ break; ++ case ELFDATA2LSB: ++ xendump_print("(ELFDATA2LSB)\n"); ++ break; ++ case ELFDATA2MSB: ++ xendump_print("(ELFDATA2MSB)\n"); ++ break; ++ case ELFDATANUM: ++ xendump_print("(ELFDATANUM)\n"); ++ break; ++ default: ++ xendump_print("(?)\n"); ++ } ++ xendump_print(" e_ident[EI_VERSION]: %d ", ++ elf->e_ident[EI_VERSION]); ++ if (elf->e_ident[EI_VERSION] == EV_CURRENT) ++ xendump_print("(EV_CURRENT)\n"); ++ else ++ xendump_print("(?)\n"); ++ xendump_print(" e_ident[EI_OSABI]: %d ", elf->e_ident[EI_OSABI]); ++ switch (elf->e_ident[EI_OSABI]) ++ { ++ case ELFOSABI_SYSV: ++ xendump_print("(ELFOSABI_SYSV)\n"); ++ break; ++ case ELFOSABI_HPUX: ++ xendump_print("(ELFOSABI_HPUX)\n"); ++ break; ++ case ELFOSABI_ARM: ++ xendump_print("(ELFOSABI_ARM)\n"); ++ break; ++ case ELFOSABI_STANDALONE: ++ xendump_print("(ELFOSABI_STANDALONE)\n"); ++ break; ++ default: ++ xendump_print("(?)\n"); ++ } ++ xendump_print(" e_ident[EI_ABIVERSION]: %d\n", ++ elf->e_ident[EI_ABIVERSION]); ++ ++ xendump_print(" e_type: %d ", elf->e_type); ++ switch (elf->e_type) ++ { ++ case ET_NONE: ++ xendump_print("(ET_NONE)\n"); ++ break; ++ case ET_REL: ++ xendump_print("(ET_REL)\n"); ++ break; ++ case ET_EXEC: ++ xendump_print("(ET_EXEC)\n"); ++ break; ++ case ET_DYN: ++ xendump_print("(ET_DYN)\n"); ++ break; ++ case ET_CORE: ++ xendump_print("(ET_CORE)\n"); ++ break; ++ case ET_NUM: ++ xendump_print("(ET_NUM)\n"); ++ break; ++ case ET_LOOS: ++ xendump_print("(ET_LOOS)\n"); ++ break; ++ case ET_HIOS: ++ xendump_print("(ET_HIOS)\n"); ++ break; ++ case ET_LOPROC: ++ xendump_print("(ET_LOPROC)\n"); ++ break; ++ case ET_HIPROC: ++ xendump_print("(ET_HIPROC)\n"); ++ break; ++ default: ++ xendump_print("(?)\n"); ++ } ++ ++ xendump_print(" e_machine: %d ", elf->e_machine); ++ switch (elf->e_machine) ++ { ++ case EM_386: ++ xendump_print("(EM_386)\n"); ++ break; ++ default: ++ xendump_print("(unsupported)\n"); ++ break; ++ } ++ ++ xendump_print(" e_version: %ld ", (ulong)elf->e_version); ++ xendump_print("%s\n", elf->e_version == EV_CURRENT ? ++ "(EV_CURRENT)" : ""); ++ ++ xendump_print(" e_entry: %lx\n", (ulong)elf->e_entry); ++ xendump_print(" e_phoff: %lx\n", (ulong)elf->e_phoff); ++ xendump_print(" e_shoff: %lx\n", (ulong)elf->e_shoff); ++ xendump_print(" e_flags: %lx\n", (ulong)elf->e_flags); ++ xendump_print(" e_ehsize: %x\n", elf->e_ehsize); ++ xendump_print(" e_phentsize: %x\n", elf->e_phentsize); ++ xendump_print(" e_phnum: %x\n", elf->e_phnum); ++ xendump_print(" e_shentsize: %x\n", elf->e_shentsize); ++ xendump_print(" e_shnum: %x\n", elf->e_shnum); ++ xendump_print(" e_shstrndx: %x\n", elf->e_shstrndx); ++ ++ /* Determine the strtab location. */ ++ ++ offset32 = elf->e_shoff + ++ (elf->e_shstrndx * elf->e_shentsize); ++ ++ if (lseek(xd->xfd, offset32, SEEK_SET) != offset32) ++ error(FATAL, ++ "xc_core_dump_Elf32_Ehdr: cannot seek to strtab Elf32_Shdr\n"); ++ if (read(xd->xfd, &shdr, sizeof(Elf32_Shdr)) != sizeof(Elf32_Shdr)) ++ error(FATAL, ++ "xc_core_dump_Elf32_Ehdr: cannot read strtab Elf32_Shdr\n"); ++ ++ xd->xc_core.elf_strtab_offset = (ulonglong)shdr.sh_offset; ++} ++ ++/* ++ * Dump the 64-bit ELF header, and grab a pointer to the strtab section. ++ */ ++static void ++xc_core_dump_Elf64_Ehdr(Elf64_Ehdr *elf) ++{ ++ char buf[BUFSIZE]; ++ Elf64_Off offset64; ++ Elf64_Shdr shdr; ++ ++ BZERO(buf, BUFSIZE); ++ BCOPY(elf->e_ident, buf, SELFMAG); ++ xendump_print("\nElf64_Ehdr:\n"); ++ xendump_print(" e_ident: \\%o%s\n", buf[0], ++ &buf[1]); ++ xendump_print(" e_ident[EI_CLASS]: %d ", elf->e_ident[EI_CLASS]); ++ switch (elf->e_ident[EI_CLASS]) ++ { ++ case ELFCLASSNONE: ++ xendump_print("(ELFCLASSNONE)"); ++ break; ++ case ELFCLASS32: ++ xendump_print("(ELFCLASS32)\n"); ++ break; ++ case ELFCLASS64: ++ xendump_print("(ELFCLASS64)\n"); ++ break; ++ case ELFCLASSNUM: ++ xendump_print("(ELFCLASSNUM)\n"); ++ break; ++ default: ++ xendump_print("(?)\n"); ++ break; ++ } ++ xendump_print(" e_ident[EI_DATA]: %d ", elf->e_ident[EI_DATA]); ++ switch (elf->e_ident[EI_DATA]) ++ { ++ case ELFDATANONE: ++ xendump_print("(ELFDATANONE)\n"); ++ break; ++ case ELFDATA2LSB: ++ xendump_print("(ELFDATA2LSB)\n"); ++ break; ++ case ELFDATA2MSB: ++ xendump_print("(ELFDATA2MSB)\n"); ++ break; ++ case ELFDATANUM: ++ xendump_print("(ELFDATANUM)\n"); ++ break; ++ default: ++ xendump_print("(?)\n"); ++ } ++ xendump_print(" e_ident[EI_VERSION]: %d ", ++ elf->e_ident[EI_VERSION]); ++ if (elf->e_ident[EI_VERSION] == EV_CURRENT) ++ xendump_print("(EV_CURRENT)\n"); ++ else ++ xendump_print("(?)\n"); ++ xendump_print(" e_ident[EI_OSABI]: %d ", elf->e_ident[EI_OSABI]); ++ switch (elf->e_ident[EI_OSABI]) ++ { ++ case ELFOSABI_SYSV: ++ xendump_print("(ELFOSABI_SYSV)\n"); ++ break; ++ case ELFOSABI_HPUX: ++ xendump_print("(ELFOSABI_HPUX)\n"); ++ break; ++ case ELFOSABI_ARM: ++ xendump_print("(ELFOSABI_ARM)\n"); ++ break; ++ case ELFOSABI_STANDALONE: ++ xendump_print("(ELFOSABI_STANDALONE)\n"); ++ break; ++ default: ++ xendump_print("(?)\n"); ++ } ++ xendump_print(" e_ident[EI_ABIVERSION]: %d\n", ++ elf->e_ident[EI_ABIVERSION]); ++ ++ xendump_print(" e_type: %d ", elf->e_type); ++ switch (elf->e_type) ++ { ++ case ET_NONE: ++ xendump_print("(ET_NONE)\n"); ++ break; ++ case ET_REL: ++ xendump_print("(ET_REL)\n"); ++ break; ++ case ET_EXEC: ++ xendump_print("(ET_EXEC)\n"); ++ break; ++ case ET_DYN: ++ xendump_print("(ET_DYN)\n"); ++ break; ++ case ET_CORE: ++ xendump_print("(ET_CORE)\n"); ++ break; ++ case ET_NUM: ++ xendump_print("(ET_NUM)\n"); ++ break; ++ case ET_LOOS: ++ xendump_print("(ET_LOOS)\n"); ++ break; ++ case ET_HIOS: ++ xendump_print("(ET_HIOS)\n"); ++ break; ++ case ET_LOPROC: ++ xendump_print("(ET_LOPROC)\n"); ++ break; ++ case ET_HIPROC: ++ xendump_print("(ET_HIPROC)\n"); ++ break; ++ default: ++ xendump_print("(?)\n"); ++ } ++ ++ xendump_print(" e_machine: %d ", elf->e_machine); ++ switch (elf->e_machine) ++ { ++ case EM_386: ++ xendump_print("(EM_386)\n"); ++ break; ++ case EM_IA_64: ++ xendump_print("(EM_IA_64)\n"); ++ break; ++ case EM_PPC64: ++ xendump_print("(EM_PPC64)\n"); ++ break; ++ case EM_X86_64: ++ xendump_print("(EM_X86_64)\n"); ++ break; ++ default: ++ xendump_print("(unsupported)\n"); ++ break; ++ } ++ ++ xendump_print(" e_version: %ld ", (ulong)elf->e_version); ++ xendump_print("%s\n", elf->e_version == EV_CURRENT ? ++ "(EV_CURRENT)" : ""); ++ ++ xendump_print(" e_entry: %lx\n", (ulong)elf->e_entry); ++ xendump_print(" e_phoff: %lx\n", (ulong)elf->e_phoff); ++ xendump_print(" e_shoff: %lx\n", (ulong)elf->e_shoff); ++ xendump_print(" e_flags: %lx\n", (ulong)elf->e_flags); ++ xendump_print(" e_ehsize: %x\n", elf->e_ehsize); ++ xendump_print(" e_phentsize: %x\n", elf->e_phentsize); ++ xendump_print(" e_phnum: %x\n", elf->e_phnum); ++ xendump_print(" e_shentsize: %x\n", elf->e_shentsize); ++ xendump_print(" e_shnum: %x\n", elf->e_shnum); ++ xendump_print(" e_shstrndx: %x\n", elf->e_shstrndx); ++ ++ /* Determine the strtab location. */ ++ ++ offset64 = elf->e_shoff + ++ (elf->e_shstrndx * elf->e_shentsize); ++ ++ if (lseek(xd->xfd, offset64, SEEK_SET) != offset64) ++ error(FATAL, ++ "xc_core_dump_Elf64_Ehdr: cannot seek to strtab Elf32_Shdr\n"); ++ if (read(xd->xfd, &shdr, sizeof(Elf32_Shdr)) != sizeof(Elf32_Shdr)) ++ error(FATAL, ++ "xc_core_dump_Elf64_Ehdr: cannot read strtab Elf32_Shdr\n"); ++ ++ xd->xc_core.elf_strtab_offset = (ulonglong)shdr.sh_offset; ++} ++ ++/* ++ * Dump each 32-bit section header and the data that they reference. ++ */ ++static void ++xc_core_dump_Elf32_Shdr(Elf32_Off offset, int store) ++{ ++ Elf32_Shdr shdr; ++ char name[BUFSIZE]; ++ int i; ++ char c; ++ ++ if (lseek(xd->xfd, offset, SEEK_SET) != offset) ++ error(FATAL, ++ "xc_core_dump_Elf32_Shdr: cannot seek to Elf32_Shdr\n"); ++ if (read(xd->xfd, &shdr, sizeof(Elf32_Shdr)) != sizeof(Elf32_Shdr)) ++ error(FATAL, ++ "xc_core_dump_Elf32_Shdr: cannot read Elf32_Shdr\n"); ++ ++ xendump_print("\nElf32_Shdr:\n"); ++ xendump_print(" sh_name: %lx ", shdr.sh_name); ++ xendump_print("\"%s\"\n", xc_core_strtab(shdr.sh_name, name)); ++ xendump_print(" sh_type: %lx ", shdr.sh_type); ++ switch (shdr.sh_type) ++ { ++ case SHT_NULL: ++ xendump_print("(SHT_NULL)\n"); ++ break; ++ case SHT_PROGBITS: ++ xendump_print("(SHT_PROGBITS)\n"); ++ break; ++ case SHT_STRTAB: ++ xendump_print("(SHT_STRTAB)\n"); ++ break; ++ case SHT_NOTE: ++ xendump_print("(SHT_NOTE)\n"); ++ break; ++ default: ++ xendump_print("\n"); ++ break; ++ } ++ xendump_print(" sh_flags: %lx\n", shdr.sh_flags); ++ xendump_print(" sh_addr: %lx\n", shdr.sh_addr); ++ xendump_print(" sh_offset: %lx\n", shdr.sh_offset); ++ xendump_print(" sh_size: %lx\n", shdr.sh_size); ++ xendump_print(" sh_link: %lx\n", shdr.sh_link); ++ xendump_print(" sh_info: %lx\n", shdr.sh_info); ++ xendump_print(" sh_addralign: %lx\n", shdr.sh_addralign); ++ xendump_print(" sh_entsize: %lx\n", shdr.sh_entsize); ++ ++ if (STREQ(name, ".shstrtab")) { ++ if (lseek(xd->xfd, xd->xc_core.elf_strtab_offset, SEEK_SET) != ++ xd->xc_core.elf_strtab_offset) ++ error(FATAL, ++ "xc_core_dump_Elf32_Shdr: cannot seek to strtab data\n"); ++ ++ xendump_print(" "); ++ for (i = 0; i < shdr.sh_size; i++) { ++ if (read(xd->xfd, &c, sizeof(char)) != sizeof(char)) ++ error(FATAL, ++ "xc_core_dump_Elf32_Shdr: cannot read strtab data\n"); ++ if (i && !c) ++ xendump_print("\n "); ++ else ++ xendump_print("%c", c); ++ } ++ } ++ ++ if (STREQ(name, ".note.Xen")) ++ xc_core_dump_elfnote((off_t)shdr.sh_offset, ++ (size_t)shdr.sh_size, store); ++ ++ if (!store) ++ return; ++ ++ if (STREQ(name, ".xen_prstatus")) ++ xd->xc_core.header.xch_ctxt_offset = ++ (unsigned int)shdr.sh_offset; ++ ++ if (STREQ(name, ".xen_shared_info")) ++ xd->xc_core.shared_info_offset = (off_t)shdr.sh_offset; ++ ++ if (STREQ(name, ".xen_pfn")) { ++ xd->xc_core.header.xch_index_offset = shdr.sh_offset; ++ xd->flags |= (XC_CORE_NO_P2M|XC_CORE_PFN_CREATE); ++ } ++ ++ if (STREQ(name, ".xen_p2m")) { ++ xd->xc_core.header.xch_index_offset = shdr.sh_offset; ++ xd->flags |= XC_CORE_P2M_CREATE; ++ } ++ ++ if (STREQ(name, ".xen_pages")) ++ xd->xc_core.header.xch_pages_offset = ++ (unsigned int)shdr.sh_offset; ++ ++ if (STREQ(name, ".xen_ia64_mapped_regs")) ++ xd->xc_core.ia64_mapped_regs_offset = ++ (off_t)shdr.sh_offset; ++} ++ ++/* ++ * Dump each 64-bit section header and the data that they reference. ++ */ ++static void ++xc_core_dump_Elf64_Shdr(Elf64_Off offset, int store) ++{ ++ Elf64_Shdr shdr; ++ char name[BUFSIZE]; ++ int i; ++ char c; ++ ++ if (lseek(xd->xfd, offset, SEEK_SET) != offset) ++ error(FATAL, ++ "xc_core_dump_Elf64_Shdr: cannot seek to Elf64_Shdr\n"); ++ if (read(xd->xfd, &shdr, sizeof(Elf64_Shdr)) != sizeof(Elf64_Shdr)) ++ error(FATAL, ++ "xc_core_dump_Elf64_Shdr: cannot read Elf64_Shdr\n"); ++ ++ xendump_print("\nElf64_Shdr:\n"); ++ xendump_print(" sh_name: %x ", shdr.sh_name); ++ xendump_print("\"%s\"\n", xc_core_strtab(shdr.sh_name, name)); ++ xendump_print(" sh_type: %x ", shdr.sh_type); ++ switch (shdr.sh_type) ++ { ++ case SHT_NULL: ++ xendump_print("(SHT_NULL)\n"); ++ break; ++ case SHT_PROGBITS: ++ xendump_print("(SHT_PROGBITS)\n"); ++ break; ++ case SHT_STRTAB: ++ xendump_print("(SHT_STRTAB)\n"); ++ break; ++ case SHT_NOTE: ++ xendump_print("(SHT_NOTE)\n"); ++ break; ++ default: ++ xendump_print("\n"); ++ break; ++ } ++ xendump_print(" sh_flags: %lx\n", shdr.sh_flags); ++ xendump_print(" sh_addr: %lx\n", shdr.sh_addr); ++ xendump_print(" sh_offset: %lx\n", shdr.sh_offset); ++ xendump_print(" sh_size: %lx\n", shdr.sh_size); ++ xendump_print(" sh_link: %x\n", shdr.sh_link); ++ xendump_print(" sh_info: %x\n", shdr.sh_info); ++ xendump_print(" sh_addralign: %lx\n", shdr.sh_addralign); ++ xendump_print(" sh_entsize: %lx\n", shdr.sh_entsize); ++ ++ if (STREQ(name, ".shstrtab")) { ++ if (lseek(xd->xfd, xd->xc_core.elf_strtab_offset, SEEK_SET) != ++ xd->xc_core.elf_strtab_offset) ++ error(FATAL, ++ "xc_core_dump_Elf64_Shdr: cannot seek to strtab data\n"); ++ ++ xendump_print(" "); ++ for (i = 0; i < shdr.sh_size; i++) { ++ if (read(xd->xfd, &c, sizeof(char)) != sizeof(char)) ++ error(FATAL, ++ "xc_core_dump_Elf64_Shdr: cannot read strtab data\n"); ++ if (i && !c) ++ xendump_print("\n "); ++ else ++ xendump_print("%c", c); ++ } ++ } ++ ++ if (STREQ(name, ".note.Xen")) ++ xc_core_dump_elfnote((off_t)shdr.sh_offset, ++ (size_t)shdr.sh_size, store); ++ ++ if (!store) ++ return; ++ ++ if (STREQ(name, ".xen_prstatus")) ++ xd->xc_core.header.xch_ctxt_offset = ++ (unsigned int)shdr.sh_offset; ++ ++ if (STREQ(name, ".xen_shared_info")) ++ xd->xc_core.shared_info_offset = (off_t)shdr.sh_offset; ++ ++ if (STREQ(name, ".xen_pfn")) { ++ xd->xc_core.header.xch_index_offset = shdr.sh_offset; ++ xd->flags |= (XC_CORE_NO_P2M|XC_CORE_PFN_CREATE); ++ } ++ ++ if (STREQ(name, ".xen_p2m")) { ++ xd->xc_core.header.xch_index_offset = shdr.sh_offset; ++ xd->flags |= XC_CORE_P2M_CREATE; ++ } ++ ++ if (STREQ(name, ".xen_pages")) ++ xd->xc_core.header.xch_pages_offset = ++ (unsigned int)shdr.sh_offset; ++ ++ if (STREQ(name, ".xen_ia64_mapped_regs")) ++ xd->xc_core.ia64_mapped_regs_offset = ++ (off_t)shdr.sh_offset; ++} ++ ++/* ++ * Return the string found at the specified index into ++ * the dumpfile's strtab. ++ */ ++static char * ++xc_core_strtab(uint32_t index, char *buf) ++{ ++ off_t offset; ++ int i; ++ ++ offset = xd->xc_core.elf_strtab_offset + index; ++ ++ if (lseek(xd->xfd, offset, SEEK_SET) != offset) ++ error(FATAL, ++ "xc_core_strtab: cannot seek to Elf64_Shdr\n"); ++ ++ BZERO(buf, BUFSIZE); ++ i = 0; ++ ++ while (read(xd->xfd, &buf[i], sizeof(char)) == sizeof(char)) { ++ if (buf[i] == NULLCHAR) ++ break; ++ i++; ++ } ++ ++ return buf; ++} ++ ++ ++/* ++ * Dump the array of elfnote structures, storing relevant info ++ * when requested during initialization. This function is ++ * common to both 32-bit and 64-bit ELF files. ++ */ ++static void ++xc_core_dump_elfnote(off_t sh_offset, size_t sh_size, int store) ++{ ++ int i, lf, index; ++ char *notes_buffer; ++ struct elfnote *elfnote; ++ ulonglong *data; ++ struct xen_dumpcore_elfnote_header_desc *elfnote_header; ++ struct xen_dumpcore_elfnote_format_version_desc *format_version; ++ ++ elfnote_header = NULL; ++ ++ if (!(notes_buffer = (char *)malloc(sh_size))) ++ error(FATAL, "cannot malloc notes space."); ++ ++ if (lseek(xd->xfd, sh_offset, SEEK_SET) != sh_offset) ++ error(FATAL, ++ "xc_core_dump_elfnote: cannot seek to sh_offset\n"); ++ ++ if (read(xd->xfd, notes_buffer, sh_size) != sh_size) ++ error(FATAL, ++ "xc_core_dump_elfnote: cannot read elfnote data\n"); ++ ++ for (index = 0; index < sh_size; ) { ++ elfnote = (struct elfnote *)¬es_buffer[index]; ++ xendump_print(" namesz: %d\n", elfnote->namesz); ++ xendump_print(" descz: %d\n", elfnote->descsz); ++ xendump_print(" type: %x ", elfnote->type); ++ switch (elfnote->type) ++ { ++ case XEN_ELFNOTE_DUMPCORE_NONE: ++ xendump_print("(XEN_ELFNOTE_DUMPCORE_NONE)\n"); ++ break; ++ case XEN_ELFNOTE_DUMPCORE_HEADER: ++ xendump_print("(XEN_ELFNOTE_DUMPCORE_HEADER)\n"); ++ elfnote_header = (struct xen_dumpcore_elfnote_header_desc *) ++ (elfnote+1); ++ break; ++ case XEN_ELFNOTE_DUMPCORE_XEN_VERSION: ++ xendump_print("(XEN_ELFNOTE_DUMPCORE_XEN_VERSION)\n"); ++ break; ++ case XEN_ELFNOTE_DUMPCORE_FORMAT_VERSION: ++ xendump_print("(XEN_ELFNOTE_DUMPCORE_FORMAT_VERSION)\n"); ++ format_version = (struct xen_dumpcore_elfnote_format_version_desc *) ++ (elfnote+1); ++ break; ++ default: ++ xendump_print("(unknown)\n"); ++ break; ++ } ++ xendump_print(" name: %s\n", elfnote->name); ++ ++ data = (ulonglong *)(elfnote+1); ++ for (i = lf = 0; i < elfnote->descsz/sizeof(ulonglong); i++) { ++ if (((i%2)==0)) { ++ xendump_print("%s ", ++ i ? "\n" : ""); ++ lf++; ++ } else ++ lf = 0; ++ xendump_print("%016llx ", *data++); ++ } ++ if (!elfnote->descsz) ++ xendump_print(" (empty)"); ++ xendump_print("\n"); ++ ++ index += sizeof(struct elfnote) + elfnote->descsz; ++ } ++ ++ if (!store) ++ return; ++ ++ if (elfnote_header) { ++ xd->xc_core.header.xch_magic = elfnote_header->xch_magic; ++ xd->xc_core.header.xch_nr_vcpus = elfnote_header->xch_nr_vcpus; ++ xd->xc_core.header.xch_nr_pages = elfnote_header->xch_nr_pages; ++ xd->page_size = elfnote_header->xch_page_size; ++ } ++ ++ if (format_version) { ++ switch (format_version->version) ++ { ++ case FORMAT_VERSION_0000000000000001: ++ break; ++ default: ++ error(WARNING, ++ "unsupported xen dump-core format version: %016llx\n", ++ format_version->version); ++ } ++ xd->xc_core.format_version = format_version->version; ++ } ++ ++} ++ ++/* ++ * Initialize the batching list for the .xen_p2m or .xen_pfn ++ * arrays. ++ */ ++static void ++xc_core_elf_pfn_init(void) ++{ ++ int i, c, chunk; ++ off_t offset; ++ struct xen_dumpcore_p2m p2m; ++ uint64_t pfn; ++ ++ switch (xd->flags & (XC_CORE_ELF|XC_CORE_NO_P2M)) ++ { ++ case (XC_CORE_ELF|XC_CORE_NO_P2M): ++ chunk = xd->xc_core.header.xch_nr_pages/INDEX_PFN_COUNT; ++ ++ for (i = c = 0; i < INDEX_PFN_COUNT; i++, c += chunk) { ++ offset = (off_t)xd->xc_core.header.xch_index_offset + ++ (off_t)(c * sizeof(uint64_t)); ++ ++ if (lseek(xd->xfd, offset, SEEK_SET) == -1) ++ error(FATAL, ++ "cannot lseek to page index %d\n", c); ++ if (read(xd->xfd, &pfn, sizeof(uint64_t)) != ++ sizeof(uint64_t)) ++ error(FATAL, ++ "cannot read page index %d\n", c); ++ ++ xd->xc_core.elf_index_pfn[i].index = c; ++ xd->xc_core.elf_index_pfn[i].pfn = (ulong)pfn; ++ } ++ break; ++ ++ case XC_CORE_ELF: ++ chunk = xd->xc_core.header.xch_nr_pages/INDEX_PFN_COUNT; ++ ++ for (i = c = 0; i < INDEX_PFN_COUNT; i++, c += chunk) { ++ offset = (off_t)xd->xc_core.header.xch_index_offset + ++ (off_t)(c * sizeof(struct xen_dumpcore_p2m)); ++ ++ if (lseek(xd->xfd, offset, SEEK_SET) == -1) ++ error(FATAL, ++ "cannot lseek to page index %d\n", c); ++ if (read(xd->xfd, &p2m, sizeof(struct xen_dumpcore_p2m)) != ++ sizeof(struct xen_dumpcore_p2m)) ++ error(FATAL, ++ "cannot read page index %d\n", c); ++ ++ xd->xc_core.elf_index_pfn[i].index = c; ++ xd->xc_core.elf_index_pfn[i].pfn = (ulong)p2m.pfn; ++ } ++ break; ++ } ++} ++ ++struct xendump_data * ++get_xendump_data(void) ++{ ++ return (XENDUMP_VALID() ? xd : NULL); ++} +--- crash/lkcd_v8.c.orig 2008-01-17 15:17:20.000000000 -0500 ++++ crash/lkcd_v8.c 2008-01-04 09:42:08.000000000 -0500 +@@ -23,9 +23,185 @@ + #include "lkcd_dump_v8.h" /* REMIND */ + + static dump_header_t dump_header_v8 = { 0 }; +-// static dump_header_asm_t dump_header_asm_v8 = { 0 }; ++#ifndef HAVE_NO_DUMP_HEADER_ASM ++static dump_header_asm_t dump_header_asm_v8 = { 0 }; ++#endif + static dump_page_t dump_page = { 0 }; + static void mclx_cache_page_headers_v8(void); ++static off_t lkcd_offset_to_first_page = LKCD_OFFSET_TO_FIRST_PAGE; ++ ++#if defined(X86_64) ++ ++int ++get_lkcd_regs_for_cpu_arch(int cpu, ulong *eip, ulong *esp) ++{ ++ if (eip) ++ *eip = dump_header_asm_v8.dha_smp_regs[cpu].rip; ++ if (esp) ++ *esp = dump_header_asm_v8.dha_smp_regs[cpu].rsp; ++ ++ return 0; ++} ++ ++#elif defined(X86) ++ ++int ++get_lkcd_regs_for_cpu_arch(int cpu, ulong *eip, ulong *esp) ++{ ++ if (eip) ++ *eip = dump_header_asm_v8.dha_smp_regs[cpu].eip; ++ if (esp) ++ *esp = dump_header_asm_v8.dha_smp_regs[cpu].esp; ++ ++ return 0; ++} ++ ++#else ++ ++int ++get_lkcd_regs_for_cpu_arch(int cpu, ulong *eip, ulong *esp) ++{ ++ return -1; ++} ++ ++#endif ++ ++ ++ ++int ++get_lkcd_regs_for_cpu_v8(struct bt_info *bt, ulong *eip, ulong *esp) ++{ ++ int cpu = bt->tc->processor; ++ ++ if (!bt || !bt->tc) { ++ fprintf(stderr, "get_lkcd_regs_for_cpu_v8: invalid tc " ++ "(CPU=%d)\n", cpu); ++ return -EINVAL; ++ } ++ ++ if (cpu >= NR_CPUS) { ++ fprintf(stderr, "get_lkcd_regs_for_cpu_v8, cpu (%d) too high\n", cpu); ++ return -EINVAL; ++ } ++ ++ return get_lkcd_regs_for_cpu_arch(cpu, eip, esp); ++} ++ ++ ++#ifndef HAVE_NO_DUMP_HEADER_ASM ++int ++lkcd_dump_init_v8_arch(dump_header_t *dh) ++{ ++ off_t ret_of; ++ ssize_t ret_sz; ++ uint32_t hdr_size, offset, nr_cpus; ++ dump_header_asm_t arch_hdr; ++ char *hdr_buf = NULL; ++ ++ ret_of = lseek(lkcd->fd, dh->dh_header_size + ++ offsetof(dump_header_asm_t, dha_header_size), ++ SEEK_SET); ++ if (ret_of < 0) { ++ perror("lseek failed in " __FILE__ ":" STR(__LINE__)); ++ goto err; ++ } ++ ++ ret_sz = read(lkcd->fd, (char *)&hdr_size, sizeof(hdr_size)); ++ if (ret_sz != sizeof(hdr_size)) { ++ perror("Reading hdr_size failed in " __FILE__ ":" STR(__LINE__)); ++ goto err; ++ } ++ ++ ret_of = lseek(lkcd->fd, dh->dh_header_size, SEEK_SET); ++ if (ret_of < 0) { ++ perror("lseek failed in " __FILE__ ":" STR(__LINE__)); ++ goto err; ++ } ++ ++ hdr_buf = (char *)malloc(hdr_size); ++ if (!hdr_buf) { ++ perror("Could not allocate memory for dump header\n"); ++ goto err; ++ } ++ ++ ret_sz = read(lkcd->fd, (char *)hdr_buf, hdr_size); ++ if (ret_sz != hdr_size) { ++ perror("Could not read header " __FILE__ ":" STR(__LINE__)); ++ goto err; ++ } ++ ++ ++ /* ++ * Though we have KL_NR_CPUS is 128, the header size is different ++ * CONFIG_NR_CPUS might be different in the kernel. Hence, need ++ * to find out how many CPUs are configured. ++ */ ++ offset = offsetof(dump_header_asm_t, dha_smp_regs[0]); ++ nr_cpus = (hdr_size - offset) / sizeof(dump_CPU_info_t); ++ ++ /* check for CPU overflow */ ++ if (nr_cpus > NR_CPUS) { ++ fprintf(stderr, "CPU number too high %d (%s:%d)\n", ++ nr_cpus, __FILE__, __LINE__); ++ goto err; ++ } ++ ++ /* parts that don't depend on the number of CPUs */ ++ memcpy(&arch_hdr, (void *)hdr_buf, offset); ++ ++ /* registers */ ++ memcpy(&arch_hdr.dha_smp_regs, (void *)&hdr_buf[offset], ++ nr_cpus * sizeof(struct pt_regs)); ++ offset += nr_cpus * sizeof(struct pt_regs); ++ ++ /* current task */ ++ memcpy(&arch_hdr.dha_smp_current_task, (void *)&hdr_buf[offset], ++ nr_cpus * sizeof(&arch_hdr.dha_smp_current_task[0])); ++ offset += nr_cpus * sizeof(&arch_hdr.dha_smp_current_task[0]); ++ ++ /* stack */ ++ memcpy(&arch_hdr.dha_stack, (void *)&hdr_buf[offset], ++ nr_cpus * sizeof(&arch_hdr.dha_stack[0])); ++ offset += nr_cpus * sizeof(&arch_hdr.dha_stack[0]); ++ ++ /* stack_ptr */ ++ memcpy(&arch_hdr.dha_stack_ptr, (void *)&hdr_buf[offset], ++ nr_cpus * sizeof(&arch_hdr.dha_stack_ptr[0])); ++ offset += nr_cpus * sizeof(&arch_hdr.dha_stack_ptr[0]); ++ ++ if (arch_hdr.dha_magic_number != DUMP_ASM_MAGIC_NUMBER) { ++ fprintf(stderr, "Invalid magic number for x86_64\n"); ++ goto err; ++ } ++ ++ /* ++ * read the kernel load address on IA64 -- other architectures have ++ * no relocatable kernel at the lifetime of LKCD ++ */ ++#ifdef IA64 ++ memcpy(&arch_hdr.dha_kernel_addr, (void *)&hdr_buf[offset], sizeof(uint64_t)); ++#endif ++ ++ memcpy(&dump_header_asm_v8, &arch_hdr, sizeof(dump_header_asm_t)); ++ ++ return 0; ++ ++err: ++ free(hdr_buf); ++ return -1; ++} ++ ++#else /* architecture that has no lkcd_dump_init_v8 */ ++ ++int ++lkcd_dump_init_v8_arch(dump_header_t *dh) ++{ ++ return 0; ++} ++ ++#endif ++ ++ + + /* + * Verify and initialize the LKCD environment, storing the common data +@@ -56,17 +232,26 @@ + if (read(lkcd->fd, dh, sizeof(dump_header_t)) != + sizeof(dump_header_t)) + return FALSE; +- if ((dh->dh_version & LKCD_DUMP_VERSION_NUMBER_MASK) == LKCD_DUMP_V9) ++ if ((dh->dh_version & LKCD_DUMP_VERSION_NUMBER_MASK) == LKCD_DUMP_V9){ + if (read(lkcd->fd, &dh_dump_buffer_size, sizeof(dh_dump_buffer_size)) != + sizeof(dh_dump_buffer_size)) + return FALSE; ++ lkcd_offset_to_first_page = dh_dump_buffer_size; ++ } else ++ lkcd_offset_to_first_page = LKCD_OFFSET_TO_FIRST_PAGE; + + lkcd->dump_page = dp; + lkcd->dump_header = dh; + if (lkcd->debug) + dump_lkcd_environment(LKCD_DUMP_HEADER_ONLY); ++ ++ if (lkcd_dump_init_v8_arch(dh) != 0) { ++ fprintf(stderr, "Warning: Failed to initialise " ++ "arch specific dump code\n"); ++ } ++ + #ifdef IA64 +- if ( (fix_addr_v8(fd) == -1) ) ++ if ( (fix_addr_v8(&dump_header_asm_v8) == -1) ) + return FALSE; + #endif + +@@ -146,7 +331,7 @@ + lkcd->compression = dh->dh_dump_compress; + lkcd->page_header_size = sizeof(dump_page_t); + +- lseek(lkcd->fd, LKCD_OFFSET_TO_FIRST_PAGE, SEEK_SET); ++ lseek(lkcd->fd, lkcd_offset_to_first_page, SEEK_SET); + + /* + * Read all of the pages and save the page offsets for lkcd_lseek(). +@@ -483,7 +668,7 @@ + /* + * Determine the granularity between offsets. + */ +- if (lseek(lkcd->fd, page_headers[0] + LKCD_OFFSET_TO_FIRST_PAGE, ++ if (lseek(lkcd->fd, page_headers[0] + lkcd_offset_to_first_page, + SEEK_SET) == -1) + return; + if (read(lkcd->fd, dp, lkcd->page_header_size) != +@@ -491,7 +676,7 @@ + return; + physaddr1 = (dp->dp_address - lkcd->kvbase) << lkcd->page_shift; + +- if (lseek(lkcd->fd, page_headers[1] + LKCD_OFFSET_TO_FIRST_PAGE, ++ if (lseek(lkcd->fd, page_headers[1] + lkcd_offset_to_first_page, + SEEK_SET) == -1) + return; + if (read(lkcd->fd, dp, lkcd->page_header_size) +@@ -508,7 +693,7 @@ + for (i = 0; i < (MCLX_PAGE_HEADERS-1); i++) { + if (!page_headers[i]) + break; +- lkcd->curhdroffs = page_headers[i] + LKCD_OFFSET_TO_FIRST_PAGE; ++ lkcd->curhdroffs = page_headers[i] + lkcd_offset_to_first_page; + set_mb_benchmark((granularity * (i+1))/lkcd->page_size); + } + } +--- crash/xen_hyper_defs.h.orig 2008-01-17 15:17:20.000000000 -0500 ++++ crash/xen_hyper_defs.h 2008-01-04 09:42:08.000000000 -0500 +@@ -0,0 +1,970 @@ ++/* ++ * xen_hyper_defs.h ++ * ++ * Portions Copyright (C) 2006-2007 Fujitsu Limited ++ * Portions Copyright (C) 2006-2007 VA Linux Systems Japan K.K. ++ * ++ * Authors: Itsuro Oda ++ * Fumihiko Kakuma ++ * ++ * This file is part of Xencrash. ++ * ++ * Xencrash is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation (version 2 of the License). ++ * ++ * Xencrash is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with Xencrash; if not, write to the Free Software ++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ */ ++ ++#ifdef XEN_HYPERVISOR_ARCH ++ ++#include ++#include ++ ++#ifdef X86 ++/* Xen Hypervisor address space layout */ ++#define IOREMAP_VIRT_END (0UL) ++#define IOREMAP_VIRT_START (0xFFC00000UL) ++#define DIRECTMAP_VIRT_END IOREMAP_VIRT_START ++#define DIRECTMAP_VIRT_START (0xFF000000UL) ++#define MAPCACHE_VIRT_END DIRECTMAP_VIRT_START ++#define MAPCACHE_VIRT_START (0xFFC00000UL) ++#define PERDOMAIN_VIRT_END DIRECTMAP_VIRT_START ++#define PERDOMAIN_VIRT_START (0xFE800000UL) ++#define SH_LINEAR_PT_VIRT_END PERDOMAIN_VIRT_START ++#define SH_LINEAR_PT_VIRT_START (0xFE400000UL) ++#define SH_LINEAR_PT_VIRT_START_PAE (0xFE000000UL) ++#define LINEAR_PT_VIRT_END SH_LINEAR_PT_VIRT_START ++#define LINEAR_PT_VIRT_START (0xFE000000UL) ++#define LINEAR_PT_VIRT_START_PAE (0xFD800000UL) ++#define RDWR_MPT_VIRT_END LINEAR_PT_VIRT_START ++#define RDWR_MPT_VIRT_START (0xFDC00000UL) ++#define RDWR_MPT_VIRT_START_PAE (0xFC800000UL) ++#define FRAMETABLE_VIRT_END RDWR_MPT_VIRT_START ++#define FRAMETABLE_VIRT_START (0xFC400000UL) ++#define FRAMETABLE_VIRT_START_PAE (0xF6800000UL) ++#define RO_MPT_VIRT_END FRAMETABLE_VIRT_START ++#define RO_MPT_VIRT_START (0xFC000000UL) ++#define RO_MPT_VIRT_START_PAE (0xF5800000UL) ++ ++#define HYPERVISOR_VIRT_START RO_MPT_VIRT_START ++#define HYPERVISOR_VIRT_START_PAE RO_MPT_VIRT_START_PAE ++#endif ++ ++#ifdef X86_64 ++#define HYPERVISOR_VIRT_START (0xffff800000000000) ++#define HYPERVISOR_VIRT_END (0xffff880000000000) ++#define DIRECTMAP_VIRT_START (0xffff830000000000) ++#define DIRECTMAP_VIRT_END (0xffff840000000000) ++#define PAGE_OFFSET_XEN_HYPER DIRECTMAP_VIRT_START ++#endif ++ ++#ifdef IA64 ++#define HYPERVISOR_VIRT_START (0xe800000000000000) ++#define HYPERVISOR_VIRT_END (0xf800000000000000) ++#define DEFAULT_SHAREDINFO_ADDR (0xf100000000000000) ++#define PERCPU_PAGE_SIZE 65536 ++#define PERCPU_ADDR (DEFAULT_SHAREDINFO_ADDR - PERCPU_PAGE_SIZE) ++#define DIRECTMAP_VIRT_START (0xf000000000000000) ++#define DIRECTMAP_VIRT_END PERCPU_ADDR ++#define VIRT_FRAME_TABLE_SIZE (0x0100000000000000) ++ ++#define PERCPU_VIRT_ADDR(vaddr) \ ++ (((vaddr) >= PERCPU_ADDR) && ((vaddr) < PERCPU_ADDR + PERCPU_PAGE_SIZE)) ++ ++#define FRAME_TABLE_VIRT_ADDR(vaddr) \ ++ ((vaddr) >= xhmachdep->frame_table && (vaddr) < xhmachdep->frame_table + VIRT_FRAME_TABLE_SIZE) ++ ++#undef IA64_RBS_OFFSET ++#define IA64_RBS_OFFSET ((XEN_HYPER_SIZE(vcpu) + 15) & ~15) ++ ++#endif /* IA64 */ ++ ++#define DIRECTMAP_VIRT_ADDR(vaddr) \ ++ (((vaddr) >= DIRECTMAP_VIRT_START) && ((vaddr) < DIRECTMAP_VIRT_END)) ++ ++typedef uint16_t domid_t; ++typedef uint32_t Elf_Word; ++ ++/* ++ * NOTE kakuma: The following defines are temporary version for ++ * elf note format which is used only in crash. ++ */ ++#define XEN_HYPER_ELF_NOTE_V1 1 ++#define XEN_HYPER_ELF_NOTE_V2 2 ++#define XEN_HYPER_ELF_NOTE_V3 3 ++#define XEN_HYPER_ELF_NOTE_V4 4 ++ ++#ifdef X86 ++#define XEN_HYPER_ELF_NOTE_V4_NOTE_SIZE 0x100 ++#endif ++#if defined(X86_64) || defined(IA64) ++#define XEN_HYPER_ELF_NOTE_V4_NOTE_SIZE 0x200 ++#endif ++ ++/* ++ * Xen Hyper ++ */ ++#define XEN_HYPER_SMP (0x400) ++ ++#ifdef X86 ++#define XEN_HYPER_MAX_VIRT_CPUS (32) ++#define XEN_HYPER_HZ 100 ++#endif ++#ifdef X86_64 ++#define XEN_HYPER_MAX_VIRT_CPUS (32) ++#define XEN_HYPER_HZ 100 ++#endif ++#ifdef IA64 ++#define XEN_HYPER_MAX_VIRT_CPUS (64) ++#define XEN_HYPER_HZ 100 ++#endif ++#ifndef XEN_HYPER_MAX_VIRT_CPUS ++#define XEN_HYPER_MAX_VIRT_CPUS (1) ++#endif ++ ++#if defined(X86) || defined(X86_64) ++#define XEN_HYPER_PERCPU_SHIFT 12 ++#define xen_hyper_per_cpu(var, cpu) \ ++ ((ulong)(var) + (((ulong)(cpu))<flags & XEN_HYPER_SMP) ? \ ++ (ulong)(var) + (xht->__per_cpu_offset[cpu]) : \ ++ (ulong)(var)) ++#endif ++ ++#if defined(X86) || defined(X86_64) ++#define XEN_HYPER_STACK_ORDER 2 ++#if 0 ++#define XEN_HYPER_STACK_SIZE (machdep->pagesize << XEN_HYPER_STACK_ORDER) ++#endif ++#define XEN_HYPER_GET_CPU_INFO(sp) \ ++ ((sp & ~(STACKSIZE()-1)) | \ ++ (STACKSIZE() - XEN_HYPER_SIZE(cpu_info))) ++#endif ++ ++#define XEN_HYPER_CONRING_SIZE 16384 ++ ++/* system time */ ++#define XEN_HYPER_NANO_TO_SEC(ns) ((ulonglong)((ns) / 1000000000ULL)) ++#define XEN_HYPER_MICR_TO_SEC(us) ((ulonglong)((us) / 1000000ULL)) ++#define XEN_HYPER_MILI_TO_SEC(ms) ((ulonglong)((ms) / 1000ULL)) ++ ++/* ++ * Domain ++ */ ++/* Prepared domain ID. */ ++#define XEN_HYPER_DOMID_IO (0x7FF1U) ++#define XEN_HYPER_DOMID_XEN (0x7FF2U) ++ ++/* Domain flags (domain_flags). */ ++ /* Is this domain privileged? */ ++#define XEN_HYPER__DOMF_privileged 0 ++#define XEN_HYPER_DOMF_privileged (1UL<= 0) ++#define XEN_HYPER_VALID_STRUCT(X) (xen_hyper_size_table.X >= 0) ++#define XEN_HYPER_VALID_MEMBER(X) (xen_hyper_offset_table.X >= 0) ++ ++#define XEN_HYPER_ASSIGN_SIZE(X) (xen_hyper_size_table.X) ++#define XEN_HYPER_ASSIGN_OFFSET(X) (xen_hyper_offset_table.X) ++ ++#define XEN_HYPER_STRUCT_SIZE_INIT(X, Y) (XEN_HYPER_ASSIGN_SIZE(X) = STRUCT_SIZE(Y)) ++#define XEN_HYPER_MEMBER_SIZE_INIT(X, Y, Z) (XEN_HYPER_ASSIGN_SIZE(X) = MEMBER_SIZE(Y, Z)) ++#define XEN_HYPER_MEMBER_OFFSET_INIT(X, Y, Z) (XEN_HYPER_ASSIGN_OFFSET(X) = MEMBER_OFFSET(Y, Z)) ++ ++/* ++ * System ++ */ ++#define XEN_HYPER_MAX_CPUS() (xht->max_cpus) ++#define XEN_HYPER_CRASHING_CPU() (xht->crashing_cpu) ++ ++/* ++ * Dump information ++ */ ++#define XEN_HYPER_X86_NOTE_EIP(regs) (regs[12]) ++#define XEN_HYPER_X86_NOTE_ESP(regs) (regs[15]) ++#define XEN_HYPER_X86_64_NOTE_RIP(regs) (regs[16]) ++#define XEN_HYPER_X86_64_NOTE_RSP(regs) (regs[19]) ++ ++/* ++ * Domain ++ */ ++#define XEN_HYPER_DOMAIN_F_INIT 0x1 ++ ++#define XEN_HYPER_NR_DOMAINS() (xht->domains) ++#define XEN_HYPER_RUNNING_DOMAINS() (xhdt->running_domains) ++ ++/* ++ * Phisycal CPU ++ */ ++#define XEN_HYPER_NR_PCPUS() (xht->pcpus) ++#define for_cpu_indexes(i, cpuid) \ ++ for (i = 0, cpuid = xht->cpu_idxs[i]; \ ++ i < XEN_HYPER_NR_PCPUS(); \ ++ cpuid = xht->cpu_idxs[++i]) ++#define XEN_HYPER_CURR_VCPU(pcpuid) \ ++ (xen_hyper_get_active_vcpu_from_pcpuid(pcpuid)) ++ ++/* ++ * VCPU ++ */ ++#define XEN_HYPER_VCPU_F_INIT 0x1 ++ ++#define XEN_HYPER_NR_VCPUS_IN_DOM(domain_context) (domain_context->vcpu_cnt) ++#define XEN_HYPER_VCPU_LAST_CONTEXT() (xhvct->last) ++ ++/* ++ * tools ++ */ ++#define XEN_HYPER_PRI(fp, len, str, buf, flag, args) \ ++ sprintf args; \ ++ xen_hyper_fpr_indent(fp, len, str, buf, flag); ++#define XEN_HYPER_PRI_CONST(fp, len, str, flag) \ ++ xen_hyper_fpr_indent(fp, len, str, NULL, flag); ++ ++#define XEN_HYPER_PRI_L (0x0) ++#define XEN_HYPER_PRI_R (0x1) ++#define XEN_HYPER_PRI_LF (0x2) ++ ++/* ++ * Global data ++ */ ++extern struct xen_hyper_machdep_table *xhmachdep; ++extern struct xen_hyper_table *xht; ++extern struct xen_hyper_dumpinfo_table *xhdit; ++extern struct xen_hyper_domain_table *xhdt; ++extern struct xen_hyper_vcpu_table *xhvct; ++extern struct xen_hyper_pcpu_table *xhpct; ++extern struct xen_hyper_sched_table *xhscht; ++extern struct xen_hyper_symbol_table_data *xhsymt; ++ ++extern struct xen_hyper_offset_table xen_hyper_offset_table; ++extern struct xen_hyper_size_table xen_hyper_size_table; ++ ++extern struct command_table_entry xen_hyper_command_table[]; ++extern struct task_context fake_tc; ++ ++/* ++ * Xen Hyper command help ++ */ ++extern char *xen_hyper_help_domain[]; ++extern char *xen_hyper_help_doms[]; ++extern char *xen_hyper_help_dumpinfo[]; ++extern char *xen_hyper_help_log[]; ++extern char *xen_hyper_help_pcpus[]; ++extern char *xen_hyper_help_sched[]; ++extern char *xen_hyper_help_sys[]; ++extern char *xen_hyper_help_vcpu[]; ++extern char *xen_hyper_help_vcpus[]; ++ ++/* ++ * Prototype ++ */ ++ulonglong xen_hyper_get_uptime_hyper(void); ++ ++/* ++ * x86 ++ */ ++int xen_hyper_x86_get_smp_cpus(void); ++uint64_t xen_hyper_x86_memory_size(void); ++ ++/* ++ * IA64 ++ */ ++int xen_hyper_ia64_get_smp_cpus(void); ++uint64_t xen_hyper_ia64_memory_size(void); ++ulong xen_hyper_ia64_processor_speed(void); ++ ++/* ++ * Xen Hyper ++ */ ++void xen_hyper_init(void); ++void xen_hyper_domain_init(void); ++void xen_hyper_vcpu_init(void); ++void xen_hyper_dumpinfo_init(void); ++void xen_hyper_misc_init(void); ++void xen_hyper_post_init(void); ++struct xen_hyper_dumpinfo_context *xen_hyper_id_to_dumpinfo_context(uint id); ++struct xen_hyper_dumpinfo_context *xen_hyper_note_to_dumpinfo_context(ulong note); ++char *xen_hyper_fill_elf_notes(ulong note, char *note_buf, int type); ++ ++/* domain */ ++void xen_hyper_refresh_domain_context_space(void); ++int xen_hyper_get_domains(void); ++char *xen_hyper_get_domain_next(int mod, ulong *next); ++domid_t xen_hyper_domain_to_id(ulong domain); ++char *xen_hyper_id_to_domain_struct(domid_t id); ++struct xen_hyper_domain_context * ++xen_hyper_domain_to_domain_context(ulong domain); ++struct xen_hyper_domain_context * ++xen_hyper_id_to_domain_context(domid_t id); ++struct xen_hyper_domain_context * ++xen_hyper_store_domain_context(struct xen_hyper_domain_context *dc, ++ ulong domain, char *dp); ++char *xen_hyper_read_domain_from_context(struct xen_hyper_domain_context *dc); ++char *xen_hyper_read_domain(ulong domain); ++char *xen_hyper_read_domain_verify(ulong domain); ++char *xen_hyper_fill_domain_struct(ulong domain, char *domain_struct); ++void xen_hyper_alloc_domain_context_space(int domains); ++ulong xen_hyper_domain_state(struct xen_hyper_domain_context *dc); ++ ++/* vcpu */ ++void xen_hyper_refresh_vcpu_context_space(void); ++struct xen_hyper_vcpu_context * ++xen_hyper_vcpu_to_vcpu_context(ulong vcpu); ++struct xen_hyper_vcpu_context * ++xen_hyper_id_to_vcpu_context(ulong domain, domid_t did, int vcid); ++struct xen_hyper_vcpu_context_array * ++xen_hyper_domain_to_vcpu_context_array(ulong domain); ++struct xen_hyper_vcpu_context_array * ++xen_hyper_domid_to_vcpu_context_array(domid_t id); ++struct xen_hyper_vcpu_context * ++xen_hyper_store_vcpu_context(struct xen_hyper_vcpu_context *vcc, ++ ulong vcpu, char *vcp); ++char * ++xen_hyper_read_vcpu_from_context(struct xen_hyper_vcpu_context *vcc); ++char *xen_hyper_read_vcpu(ulong vcpu); ++char *xen_hyper_read_vcpu_verify(ulong vcpu); ++char *xen_hyper_fill_vcpu_struct(ulong vcpu, char *vcpu_struct); ++void xen_hyper_alloc_vcpu_context_arrays_space(int domains); ++void xen_hyper_alloc_vcpu_context_space(struct xen_hyper_vcpu_context_array *vcca, int vcpus); ++int xen_hyper_vcpu_state(struct xen_hyper_vcpu_context *vcc); ++ ++/* pcpu */ ++#if defined(X86) || defined(X86_64) ++void xen_hyper_x86_pcpu_init(void); ++#elif defined(IA64) ++void xen_hyper_ia64_pcpu_init(void); ++#endif ++struct xen_hyper_pcpu_context *xen_hyper_id_to_pcpu_context(uint id); ++struct xen_hyper_pcpu_context *xen_hyper_pcpu_to_pcpu_context(ulong pcpu); ++struct xen_hyper_pcpu_context *xen_hyper_store_pcpu_context(struct xen_hyper_pcpu_context *pcc, ++ ulong pcpu, char *pcp); ++struct xen_hyper_pcpu_context *xen_hyper_store_pcpu_context_tss(struct xen_hyper_pcpu_context *pcc, ++ ulong init_tss, char *tss); ++char *xen_hyper_read_pcpu(ulong pcpu); ++char *xen_hyper_fill_pcpu_struct(ulong pcpu, char *pcpu_struct); ++void xen_hyper_alloc_pcpu_context_space(int pcpus); ++ ++/* others */ ++char *xen_hyper_x86_fill_cpu_data(int idx, char *cpuinfo_x86); ++char *xen_hyper_ia64_fill_cpu_data(int idx, char *cpuinfo_ia64); ++int xen_hyper_is_vcpu_crash(struct xen_hyper_vcpu_context *vcc); ++void xen_hyper_print_bt_header(FILE *out, ulong pcpu, int newline); ++ulong xen_hyper_get_active_vcpu_from_pcpuid(ulong pcpu); ++ulong xen_hyper_pcpu_to_active_vcpu(ulong pcpu); ++void xen_hyper_get_cpu_info(void); ++int xen_hyper_test_pcpu_id(uint pcpu_id); ++ ++/* ++ * Xen Hyper command ++ */ ++void xen_hyper_cmd_help(void); ++void xen_hyper_cmd_domain(void); ++void xen_hyper_cmd_doms(void); ++void xen_hyper_cmd_dumpinfo(void); ++void xen_hyper_cmd_log(void); ++void xen_hyper_dump_log(void); ++void xen_hyper_cmd_pcpus(void); ++void xen_hyper_cmd_sched(void); ++void xen_hyper_cmd_sys(void); ++void xen_hyper_cmd_vcpu(void); ++void xen_hyper_cmd_vcpus(void); ++void xen_hyper_display_sys_stats(void); ++ ++void xen_hyper_show_vcpu_context(struct xen_hyper_vcpu_context *vcc); ++char *xen_hyper_domain_state_string(struct xen_hyper_domain_context *dc, ++ char *buf, int verbose); ++char *xen_hyper_vcpu_state_string(struct xen_hyper_vcpu_context *vcc, ++ char *buf, int verbose); ++ ++/* tools */ ++void xen_hyper_fpr_indent(FILE *fp, int len, char *str1, char *str2, int flag); ++ ++#else ++ ++#define XEN_HYPERVISOR_NOT_SUPPORTED \ ++ "Xen hypervisor mode not supported on this architecture\n" ++ ++#endif +--- crash/lkcd_v7.c.orig 2008-01-17 15:17:20.000000000 -0500 ++++ crash/lkcd_v7.c 2008-01-04 09:42:08.000000000 -0500 +@@ -89,7 +89,11 @@ + ifd = 0; + + #ifdef LKCD_INDEX_FILE +- lkcd->memory_pages = (dh->dh_memory_size * (getpagesize()/lkcd->page_size)) * 2; ++ if (dh->dh_memory_end < 0x1000000000LL) { ++ lkcd->memory_pages = dh->dh_memory_end / lkcd->page_size + 1; ++ } else { ++ lkcd->memory_pages = (dh->dh_memory_size * (getpagesize()/lkcd->page_size)) * 2; ++ } + dump_index_size = (lkcd->memory_pages * sizeof(off_t)); + lkcd->page_offsets = 0; + strcpy(dumpfile_index_name, dumpfile); +--- crash/filesys.c.orig 2008-01-17 15:17:20.000000000 -0500 ++++ crash/filesys.c 2008-01-04 09:42:08.000000000 -0500 +@@ -1,8 +1,8 @@ + /* filesys.c - core analysis suite + * + * Copyright (C) 1999, 2000, 2001, 2002 Mission Critical Linux, Inc. +- * Copyright (C) 2002, 2003, 2004, 2005 David Anderson +- * Copyright (C) 2002, 2003, 2004, 2005 Red Hat, Inc. All rights reserved. ++ * Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007 David Anderson ++ * Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007 Red Hat, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by +@@ -18,7 +18,7 @@ + #include "defs.h" + #include + +-static void show_mounts(ulong, int); ++static void show_mounts(ulong, int, struct task_context *); + static int find_booted_kernel(void); + static int find_booted_system_map(void); + static int verify_utsname(char *); +@@ -33,7 +33,7 @@ + static int open_file_reference(struct reference *); + static void memory_source_init(void); + static int get_pathname_component(ulong, ulong, int, char *, char *); +-static ulong *get_mount_list(int *); ++static ulong *get_mount_list(int *, struct task_context *); + char *inode_type(char *, char *); + static void match_proc_version(void); + static void get_live_memory_source(void); +@@ -43,6 +43,7 @@ + static int memory_driver_init(void); + static int create_memory_device(dev_t); + static void *radix_tree_lookup(ulong, ulong, int); ++static int match_file_string(char *, char *, char *); + + #define DENTRY_CACHE (20) + #define INODE_CACHE (20) +@@ -99,6 +100,10 @@ + } + + if (pc->namelist) { ++ if (XEN_HYPER_MODE() && !pc->dumpfile) ++ error(FATAL, ++ "Xen hypervisor mode requires a dumpfile\n"); ++ + if (!pc->dumpfile && !get_proc_version()) + error(INFO, "/proc/version: %s\n", + strerror(errno)); +@@ -190,7 +195,15 @@ + if (!netdump_init(pc->dumpfile, fp)) + error(FATAL, "%s: initialization failed\n", + pc->dumpfile); +- } else if (pc->flags & NETDUMP) { ++ } else if (pc->flags & KDUMP) { ++ if (!kdump_init(pc->dumpfile, fp)) ++ error(FATAL, "%s: initialization failed\n", ++ pc->dumpfile); ++ } else if (pc->flags & XENDUMP) { ++ if (!xendump_init(pc->dumpfile, fp)) ++ error(FATAL, "%s: initialization failed\n", ++ pc->dumpfile); ++ } else if (pc->flags & DISKDUMP) { + if (!diskdump_init(pc->dumpfile, fp)) + error(FATAL, "%s: initialization failed\n", + pc->dumpfile); +@@ -217,10 +230,7 @@ + static void + match_proc_version(void) + { +- char command[BUFSIZE]; +- char buffer[BUFSIZE]; +- FILE *pipe; +- int found; ++ char buffer[BUFSIZE], *p1, *p2; + + if (pc->flags & KERNEL_DEBUG_QUERY) + return; +@@ -228,24 +238,7 @@ + if (!strlen(kt->proc_version)) + return; + +- sprintf(command, "/usr/bin/strings %s", pc->namelist); +- if ((pipe = popen(command, "r")) == NULL) { +- error(INFO, "%s: %s\n", pc->namelist, strerror(errno)); +- return; +- } +- +- found = FALSE; +- while (fgets(buffer, BUFSIZE-1, pipe)) { +- if (!strstr(buffer, "Linux version 2.")) +- continue; +- +- if (STREQ(buffer, kt->proc_version)) +- found = TRUE; +- break; +- } +- pclose(pipe); +- +- if (found) { ++ if (match_file_string(pc->namelist, kt->proc_version, buffer)) { + if (CRASHDEBUG(1)) { + fprintf(fp, "/proc/version:\n%s", kt->proc_version); + fprintf(fp, "%s:\n%s", pc->namelist, buffer); +@@ -253,7 +246,29 @@ + return; + } + +- if (find_booted_system_map()) ++ error(WARNING, "%s%sand /proc/version do not match!\n\n", ++ pc->namelist, ++ strlen(pc->namelist) > 39 ? "\n " : " "); ++ ++ /* ++ * find_booted_system_map() requires VTOP(), which used to be a ++ * hardwired masking of the kernel address. But some architectures ++ * may not know what their physical base address is at this point, ++ * and others may have different machdep->kvbase values, so for all ++ * but the 0-based kernel virtual address architectures, bail out ++ * here with a relevant error message. ++ */ ++ if (!machine_type("S390") && !machine_type("S390X")) { ++ p1 = &kt->proc_version[strlen("Linux version ")]; ++ p2 = strstr(p1, " "); ++ *p2 = NULLCHAR; ++ error(WARNING, "/proc/version indicates kernel version: %s\n", p1); ++ error(FATAL, "please use the vmlinux file for that kernel version, or try using\n" ++ " the System.map for that kernel version as an additional argument.\n", p1); ++ clean_exit(1); ++ } ++ ++ if (find_booted_system_map()) + pc->flags |= SYSMAP; + } + +@@ -303,14 +318,12 @@ + for (dp = readdir(dirp); dp != NULL; dp = readdir(dirp)) + cnt++; + +- if ((searchdirs = (char **)malloc(cnt * sizeof(char *))) +- == NULL) { ++ if ((searchdirs = calloc(cnt, sizeof(char *))) == NULL) { + error(INFO, "/usr/src/ directory list malloc: %s\n", + strerror(errno)); + closedir(dirp); + return default_searchdirs; + } +- BZERO(searchdirs, cnt * sizeof(char *)); + + for (i = 0; i < DEFAULT_SEARCHDIRS; i++) + searchdirs[i] = default_searchdirs[i]; +@@ -345,6 +358,16 @@ + closedir(dirp); + + searchdirs[cnt] = NULL; ++ } else { ++ if ((searchdirs = calloc(cnt, sizeof(char *))) == NULL) { ++ error(INFO, "search directory list malloc: %s\n", ++ strerror(errno)); ++ closedir(dirp); ++ return default_searchdirs; ++ } ++ for (i = 0; i < DEFAULT_SEARCHDIRS; i++) ++ searchdirs[i] = default_searchdirs[i]; ++ cnt = DEFAULT_SEARCHDIRS; + } + + if (redhat_kernel_directory_v1(dirbuf)) { +@@ -483,13 +506,11 @@ + find_booted_kernel(void) + { + char kernel[BUFSIZE]; +- char command[BUFSIZE]; + char buffer[BUFSIZE]; + char **searchdirs; + int i, preferred, wrapped; + DIR *dirp; + struct dirent *dp; +- FILE *pipe; + int found; + + pc->flags |= FINDKERNEL; +@@ -538,24 +559,11 @@ + !is_elf_file(kernel)) + continue; + +- sprintf(command, "/usr/bin/strings %s", kernel); +- if ((pipe = popen(command, "r")) == NULL) { +- error(INFO, "%s: %s\n", +- kernel, strerror(errno)); +- continue; +- } +- + if (CRASHDEBUG(1)) + fprintf(fp, "find_booted_kernel: check: %s\n", + kernel); + +- while (fgets(buffer, BUFSIZE-1, pipe)) { +- if (STREQ(buffer, kt->proc_version)) { +- found = TRUE; +- break; +- } +- } +- pclose(pipe); ++ found = match_file_string(kernel, kt->proc_version, buffer); + + if (found) + break; +@@ -797,30 +805,14 @@ + static int + verify_utsname(char *system_map) + { +- char command[BUFSIZE]; + char buffer[BUFSIZE]; +- FILE *pipe; +- int found; + ulong value; + struct new_utsname new_utsname; + +- sprintf(command, "/usr/bin/strings %s", system_map); +- if ((pipe = popen(command, "r")) == NULL) +- return FALSE; +- + if (CRASHDEBUG(1)) + fprintf(fp, "verify_utsname: check: %s\n", system_map); + +- found = FALSE; +- while (fgets(buffer, BUFSIZE-1, pipe)) { +- if (strstr(buffer, "D system_utsname")) { +- found = TRUE; +- break; +- } +- } +- pclose(pipe); +- +- if (!found) ++ if (!match_file_string(system_map, "D system_utsname", buffer)) + return FALSE; + + if (extract_hex(buffer, &value, NULLCHAR, TRUE) && +@@ -1125,6 +1117,8 @@ + { + int i; + int c, found; ++ struct task_context *tc, *namespace_context; ++ ulong value; + char *spec_string; + char buf1[BUFSIZE]; + char buf2[BUFSIZE]; +@@ -1133,7 +1127,9 @@ + int flags = 0; + int save_next; + +- while ((c = getopt(argcnt, args, "if")) != EOF) { ++ namespace_context = pid_to_context(1); ++ ++ while ((c = getopt(argcnt, args, "ifn:")) != EOF) { + switch(c) + { + case 'i': +@@ -1144,6 +1140,19 @@ + flags |= MOUNT_PRINT_FILES; + break; + ++ case 'n': ++ switch (str_to_context(optarg, &value, &tc)) { ++ case STR_PID: ++ case STR_TASK: ++ namespace_context = tc; ++ break; ++ case STR_INVALID: ++ error(FATAL, "invalid task or pid value: %s\n", ++ optarg); ++ break; ++ } ++ break; ++ + default: + argerrs++; + break; +@@ -1162,7 +1171,7 @@ + shift_string_left(spec_string, 2); + + open_tmpfile(); +- show_mounts(0, MOUNT_PRINT_ALL); ++ show_mounts(0, MOUNT_PRINT_ALL, namespace_context); + + found = FALSE; + rewind(pc->tmpfile); +@@ -1181,16 +1190,20 @@ + continue; + + for (i = 0; i < c; i++) { +- if (STREQ(arglist[i], spec_string)) ++ if (PATHEQ(arglist[i], spec_string)) + found = TRUE; + } + if (found) { + fp = pc->saved_fp; + if (flags) { + sscanf(buf2,"%lx",&vfsmount); +- show_mounts(vfsmount, flags); ++ show_mounts(vfsmount, flags, ++ namespace_context); + } else { +- fprintf(fp, mount_hdr); ++ if (!(pc->curcmd_flags & HEADER_PRINTED)) { ++ fprintf(fp, mount_hdr); ++ pc->curcmd_flags |= HEADER_PRINTED; ++ } + fprintf(fp, buf2); + } + found = FALSE; +@@ -1200,7 +1213,7 @@ + close_tmpfile(); + } while (args[++optind]); + } else +- show_mounts(0, flags); ++ show_mounts(0, flags, namespace_context); + } + + /* +@@ -1208,7 +1221,7 @@ + */ + + static void +-show_mounts(ulong one_vfsmount, int flags) ++show_mounts(ulong one_vfsmount, int flags, struct task_context *namespace_context) + { + ulong one_vfsmount_list; + long sb_s_files; +@@ -1246,7 +1259,7 @@ + mount_cnt = 1; + mntlist = &one_vfsmount_list; + } else +- mntlist = get_mount_list(&mount_cnt); ++ mntlist = get_mount_list(&mount_cnt, namespace_context); + + if (!strlen(mount_hdr)) { + devlen = strlen("DEVNAME"); +@@ -1408,11 +1421,11 @@ + * Allocate and fill a list of the currently-mounted vfsmount pointers. + */ + static ulong * +-get_mount_list(int *cntptr) ++get_mount_list(int *cntptr, struct task_context *namespace_context) + { + struct list_data list_data, *ld; + int mount_cnt; +- ulong *mntlist, namespace, root; ++ ulong *mntlist, namespace, root, nsproxy, mnt_ns; + struct task_context *tc; + + ld = &list_data; +@@ -1421,9 +1434,26 @@ + if (symbol_exists("vfsmntlist")) { + get_symbol_data("vfsmntlist", sizeof(void *), &ld->start); + ld->end = symbol_value("vfsmntlist"); ++ } else if (VALID_MEMBER(task_struct_nsproxy)) { ++ tc = namespace_context; ++ ++ readmem(tc->task + OFFSET(task_struct_nsproxy), KVADDR, ++ &nsproxy, sizeof(void *), "task nsproxy", ++ FAULT_ON_ERROR); ++ if (!readmem(nsproxy + OFFSET(nsproxy_mnt_ns), KVADDR, ++ &mnt_ns, sizeof(void *), "nsproxy mnt_ns", ++ RETURN_ON_ERROR|QUIET)) ++ error(FATAL, "cannot determine mount list location!\n"); ++ if (!readmem(mnt_ns + OFFSET(mnt_namespace_root), KVADDR, ++ &root, sizeof(void *), "mnt_namespace root", ++ RETURN_ON_ERROR|QUIET)) ++ error(FATAL, "cannot determine mount list location!\n"); ++ ++ ld->start = root + OFFSET(vfsmount_mnt_list); ++ ld->end = mnt_ns + OFFSET(mnt_namespace_list); ++ + } else if (VALID_MEMBER(namespace_root)) { +- if (!(tc = pid_to_context(1))) +- tc = CURRENT_CONTEXT(); ++ tc = namespace_context; + + readmem(tc->task + OFFSET(task_struct_namespace), KVADDR, + &namespace, sizeof(void *), "task namespace", +@@ -1497,7 +1527,7 @@ + goto nopath; + + if (VALID_MEMBER(file_f_vfsmnt)) { +- mntlist = get_mount_list(&mount_cnt); ++ mntlist = get_mount_list(&mount_cnt, pid_to_context(1)); + vfsmount_buf = GETBUF(SIZE(vfsmount)); + + for (m = found = 0, vfsmnt = mntlist; +@@ -1706,15 +1736,30 @@ + MEMBER_OFFSET_INIT(fs_struct_pwd, "fs_struct", "pwd"); + MEMBER_OFFSET_INIT(fs_struct_rootmnt, "fs_struct", "rootmnt"); + MEMBER_OFFSET_INIT(fs_struct_pwdmnt, "fs_struct", "pwdmnt"); +- MEMBER_OFFSET_INIT(files_struct_max_fds, "files_struct", "max_fds"); +- MEMBER_OFFSET_INIT(files_struct_max_fdset, "files_struct", "max_fdset"); +- MEMBER_OFFSET_INIT(files_struct_open_fds, "files_struct", "open_fds"); + MEMBER_OFFSET_INIT(files_struct_open_fds_init, + "files_struct", "open_fds_init"); +- MEMBER_OFFSET_INIT(files_struct_fd, "files_struct", "fd"); ++ MEMBER_OFFSET_INIT(files_struct_fdt, "files_struct", "fdt"); ++ if (VALID_MEMBER(files_struct_fdt)) { ++ MEMBER_OFFSET_INIT(fdtable_max_fds, "fdtable", "max_fds"); ++ MEMBER_OFFSET_INIT(fdtable_max_fdset, "fdtable", "max_fdset"); ++ MEMBER_OFFSET_INIT(fdtable_open_fds, "fdtable", "open_fds"); ++ MEMBER_OFFSET_INIT(fdtable_fd, "fdtable", "fd"); ++ } else { ++ MEMBER_OFFSET_INIT(files_struct_max_fds, "files_struct", "max_fds"); ++ MEMBER_OFFSET_INIT(files_struct_max_fdset, "files_struct", "max_fdset"); ++ MEMBER_OFFSET_INIT(files_struct_open_fds, "files_struct", "open_fds"); ++ MEMBER_OFFSET_INIT(files_struct_fd, "files_struct", "fd"); ++ } + MEMBER_OFFSET_INIT(file_f_dentry, "file", "f_dentry"); + MEMBER_OFFSET_INIT(file_f_vfsmnt, "file", "f_vfsmnt"); + MEMBER_OFFSET_INIT(file_f_count, "file", "f_count"); ++ if (INVALID_MEMBER(file_f_dentry)) { ++ MEMBER_OFFSET_INIT(file_f_path, "file", "f_path"); ++ MEMBER_OFFSET_INIT(path_mnt, "path", "mnt"); ++ MEMBER_OFFSET_INIT(path_dentry, "path", "dentry"); ++ ASSIGN_OFFSET(file_f_dentry) = OFFSET(file_f_path) + OFFSET(path_dentry); ++ ASSIGN_OFFSET(file_f_vfsmnt) = OFFSET(file_f_path) + OFFSET(path_mnt); ++ } + MEMBER_OFFSET_INIT(dentry_d_inode, "dentry", "d_inode"); + MEMBER_OFFSET_INIT(dentry_d_parent, "dentry", "d_parent"); + MEMBER_OFFSET_INIT(dentry_d_covers, "dentry", "d_covers"); +@@ -1736,10 +1781,15 @@ + MEMBER_OFFSET_INIT(vfsmount_mnt_mountpoint, + "vfsmount", "mnt_mountpoint"); + MEMBER_OFFSET_INIT(namespace_root, "namespace", "root"); ++ MEMBER_OFFSET_INIT(task_struct_nsproxy, "task_struct", "nsproxy"); + if (VALID_MEMBER(namespace_root)) { + MEMBER_OFFSET_INIT(namespace_list, "namespace", "list"); + MEMBER_OFFSET_INIT(task_struct_namespace, + "task_struct", "namespace"); ++ } else if (VALID_MEMBER(task_struct_nsproxy)) { ++ MEMBER_OFFSET_INIT(nsproxy_mnt_ns, "nsproxy", "mnt_ns"); ++ MEMBER_OFFSET_INIT(mnt_namespace_root, "mnt_namespace", "root"); ++ MEMBER_OFFSET_INIT(mnt_namespace_list, "mnt_namespace", "list"); + } else if (THIS_KERNEL_VERSION >= LINUX(2,4,20)) { + if (CRASHDEBUG(2)) + fprintf(fp, "hardwiring namespace stuff\n"); +@@ -1762,6 +1812,8 @@ + STRUCT_SIZE_INIT(umode_t, "umode_t"); + STRUCT_SIZE_INIT(dentry, "dentry"); + STRUCT_SIZE_INIT(files_struct, "files_struct"); ++ if (VALID_MEMBER(files_struct_fdt)) ++ STRUCT_SIZE_INIT(fdtable, "fdtable"); + STRUCT_SIZE_INIT(file, "file"); + STRUCT_SIZE_INIT(inode, "inode"); + STRUCT_SIZE_INIT(vfsmount, "vfsmount"); +@@ -1777,8 +1829,12 @@ + + if (symbol_exists("height_to_maxindex")) { + int tmp; +- ARRAY_LENGTH_INIT(tmp, height_to_maxindex, +- "height_to_maxindex", NULL, 0); ++ if (LKCD_KERNTYPES()) ++ ARRAY_LENGTH_INIT_ALT(tmp, "height_to_maxindex", ++ "radix_tree_preload.nodes", NULL, 0); ++ else ++ ARRAY_LENGTH_INIT(tmp, height_to_maxindex, ++ "height_to_maxindex", NULL, 0); + STRUCT_SIZE_INIT(radix_tree_root, "radix_tree_root"); + STRUCT_SIZE_INIT(radix_tree_node, "radix_tree_node"); + MEMBER_OFFSET_INIT(radix_tree_root_height, +@@ -1998,8 +2054,9 @@ + open_files_dump(ulong task, int flags, struct reference *ref) + { + struct task_context *tc; +- ulong files_struct_addr; +- char *files_struct_buf; ++ ulong files_struct_addr; ++ ulong fdtable_addr = 0; ++ char *files_struct_buf, *fdtable_buf = NULL; + ulong fs_struct_addr; + char *dentry_buf, *fs_struct_buf; + ulong root_dentry, pwd_dentry; +@@ -2027,6 +2084,8 @@ + BZERO(root_pathname, BUFSIZE); + BZERO(pwd_pathname, BUFSIZE); + files_struct_buf = GETBUF(SIZE(files_struct)); ++ if (VALID_STRUCT(fdtable)) ++ fdtable_buf = GETBUF(SIZE(fdtable)); + fill_task_struct(task); + + sprintf(files_header, " FD%s%s%s%s%s%s%sTYPE%sPATH\n", +@@ -2107,24 +2166,45 @@ + + files_struct_addr = ULONG(tt->task_struct + OFFSET(task_struct_files)); + +- if (files_struct_addr) { +- readmem(files_struct_addr, KVADDR, files_struct_buf, +- SIZE(files_struct), "files_struct buffer", +- FAULT_ON_ERROR); +- +- max_fdset = INT(files_struct_buf + ++ if (files_struct_addr) { ++ readmem(files_struct_addr, KVADDR, files_struct_buf, ++ SIZE(files_struct), "files_struct buffer", ++ FAULT_ON_ERROR); ++ ++ if (VALID_MEMBER(files_struct_max_fdset)) { ++ max_fdset = INT(files_struct_buf + + OFFSET(files_struct_max_fdset)); + +- max_fds = INT(files_struct_buf + +- OFFSET(files_struct_max_fds)); +- } ++ max_fds = INT(files_struct_buf + ++ OFFSET(files_struct_max_fds)); ++ } ++ } + +- if (!files_struct_addr || max_fdset == 0 || max_fds == 0) { ++ if (VALID_MEMBER(files_struct_fdt)) { ++ fdtable_addr = ULONG(files_struct_buf + OFFSET(files_struct_fdt)); ++ ++ if (fdtable_addr) { ++ readmem(fdtable_addr, KVADDR, fdtable_buf, ++ SIZE(fdtable), "fdtable buffer", FAULT_ON_ERROR); ++ if (VALID_MEMBER(fdtable_max_fdset)) ++ max_fdset = INT(fdtable_buf + ++ OFFSET(fdtable_max_fdset)); ++ else ++ max_fdset = -1; ++ max_fds = INT(fdtable_buf + ++ OFFSET(fdtable_max_fds)); ++ } ++ } ++ ++ if ((VALID_MEMBER(files_struct_fdt) && !fdtable_addr) || ++ !files_struct_addr || max_fdset == 0 || max_fds == 0) { + if (ref) { + if (ref->cmdflags & FILES_REF_FOUND) + fprintf(fp, "\n"); + } else + fprintf(fp, "No open files\n"); ++ if (fdtable_buf) ++ FREEBUF(fdtable_buf); + FREEBUF(files_struct_buf); + return; + } +@@ -2146,8 +2226,12 @@ + } + } + +- open_fds_addr = ULONG(files_struct_buf + +- OFFSET(files_struct_open_fds)); ++ if (VALID_MEMBER(fdtable_open_fds)) ++ open_fds_addr = ULONG(fdtable_buf + ++ OFFSET(fdtable_open_fds)); ++ else ++ open_fds_addr = ULONG(files_struct_buf + ++ OFFSET(files_struct_open_fds)); + + if (open_fds_addr) { + if (VALID_MEMBER(files_struct_open_fds_init) && +@@ -2157,16 +2241,21 @@ + OFFSET(files_struct_open_fds_init), + &open_fds, sizeof(fd_set)); + else +- readmem(open_fds_addr, KVADDR, &open_fds, +- sizeof(fd_set), "files_struct open_fds", ++ readmem(open_fds_addr, KVADDR, &open_fds, ++ sizeof(fd_set), "fdtable open_fds", + FAULT_ON_ERROR); + } + +- fd = ULONG(files_struct_buf + OFFSET(files_struct_fd)); ++ if (VALID_MEMBER(fdtable_fd)) ++ fd = ULONG(fdtable_buf + OFFSET(fdtable_fd)); ++ else ++ fd = ULONG(files_struct_buf + OFFSET(files_struct_fd)); + + if (!open_fds_addr || !fd) { + if (ref && (ref->cmdflags & FILES_REF_FOUND)) + fprintf(fp, "\n"); ++ if (fdtable_buf) ++ FREEBUF(fdtable_buf); + FREEBUF(files_struct_buf); + return; + } +@@ -2175,7 +2264,8 @@ + for (;;) { + unsigned long set; + i = j * __NFDBITS; +- if (i >= max_fdset || i >= max_fds) ++ if (((max_fdset >= 0) && (i >= max_fdset)) || ++ (i >= max_fds)) + break; + set = open_fds.__fds_bits[j++]; + while (set) { +@@ -2220,6 +2310,8 @@ + if (ref && (ref->cmdflags & FILES_REF_FOUND)) + fprintf(fp, "\n"); + ++ if (fdtable_buf) ++ FREEBUF(fdtable_buf); + FREEBUF(files_struct_buf); + } + +@@ -2494,6 +2586,20 @@ + } + + /* ++ * Get the vfsmnt associated with a file. ++ */ ++ulong ++file_to_vfsmnt(ulong file) ++{ ++ char *file_buf; ++ ulong vfsmnt; ++ ++ file_buf = fill_file_cache(file); ++ vfsmnt = ULONG(file_buf + OFFSET(file_f_vfsmnt)); ++ return vfsmnt; ++} ++ ++/* + * get_pathname() fills in a pathname string for an ending dentry + * See __d_path() in the kernel for help fixing problems. + */ +@@ -3575,3 +3681,29 @@ + + return TRUE; + } ++ ++static int ++match_file_string(char *filename, char *string, char *buffer) ++{ ++ int found; ++ char command[BUFSIZE]; ++ FILE *pipe; ++ ++ ++ sprintf(command, "/usr/bin/strings %s", filename); ++ if ((pipe = popen(command, "r")) == NULL) { ++ error(INFO, "%s: %s\n", filename, strerror(errno)); ++ return FALSE; ++ } ++ ++ found = FALSE; ++ while (fgets(buffer, BUFSIZE-1, pipe)) { ++ if (strstr(buffer, string)) { ++ found = TRUE; ++ break; ++ } ++ } ++ pclose(pipe); ++ ++ return found; ++} +--- crash/task.c.orig 2008-01-17 15:17:20.000000000 -0500 ++++ crash/task.c 2008-01-17 14:44:07.000000000 -0500 +@@ -1,8 +1,8 @@ + /* task.c - core analysis suite + * + * Copyright (C) 1999, 2000, 2001, 2002 Mission Critical Linux, Inc. +- * Copyright (C) 2002, 2003, 2004, 2005 David Anderson +- * Copyright (C) 2002, 2003, 2004, 2005 Red Hat, Inc. All rights reserved. ++ * Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2008 David Anderson ++ * Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2008 Red Hat, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by +@@ -27,11 +27,17 @@ + static void refresh_pidhash_task_table(void); + static void refresh_pid_hash_task_table(void); + static void refresh_hlist_task_table(void); ++static void refresh_hlist_task_table_v2(void); ++static void refresh_hlist_task_table_v3(void); ++static void refresh_active_task_table(void); + static struct task_context *store_context(struct task_context *, ulong, char *); + static void refresh_context(ulong, ulong); + static void parent_list(ulong); + static void child_list(ulong); + static void show_task_times(struct task_context *, ulong); ++static void show_task_args(struct task_context *); ++static void show_task_rlimit(struct task_context *); ++static void show_tgid_list(ulong); + static int compare_start_time(const void *, const void *); + static int start_time_timespec(void); + static ulonglong convert_start_time(ulonglong, ulonglong); +@@ -46,11 +52,26 @@ + static void dump_runq(void); + static void dump_runqueues(void); + static void dump_prio_array(int, ulong, char *); ++struct rb_root; ++static struct rb_node *rb_first(struct rb_root *); ++struct rb_node; ++static struct rb_node *rb_next(struct rb_node *); ++static struct rb_node *rb_parent(struct rb_node *, struct rb_node *); ++static struct rb_node *rb_right(struct rb_node *, struct rb_node *); ++static struct rb_node *rb_left(struct rb_node *, struct rb_node *); ++static void dump_CFS_runqueues(void); ++static void dump_RT_prio_array(int, ulong, char *); + static void task_struct_member(struct task_context *,ulong,struct reference *); + static void signal_reference(struct task_context *, ulong, struct reference *); +-static void dump_signal_data(struct task_context *); ++static void do_sig_thread_group(ulong); ++static void dump_signal_data(struct task_context *, ulong); ++#define TASK_LEVEL (0x1) ++#define THREAD_GROUP_LEVEL (0x2) ++#define TASK_INDENT (0x4) ++static int sigrt_minmax(int *, int *); + static void signame_list(void); +-static ulonglong task_signal(ulong); ++static void sigqueue_list(ulong); ++static ulonglong task_signal(ulong, ulong*); + static ulonglong task_blocked(ulong); + static void translate_sigset(ulonglong); + static ulonglong sigaction_mask(ulong); +@@ -151,8 +172,15 @@ + get_idle_threads(&tt->idle_threads[0], kt->cpus); + } + +- MEMBER_OFFSET_INIT(task_struct_thread_info, "task_struct", +- "thread_info"); ++ if (MEMBER_EXISTS("task_struct", "thread_info")) ++ MEMBER_OFFSET_INIT(task_struct_thread_info, "task_struct", ++ "thread_info"); ++ else if (MEMBER_EXISTS("task_struct", "stack")) ++ MEMBER_OFFSET_INIT(task_struct_thread_info, "task_struct", ++ "stack"); ++ else ++ ASSIGN_OFFSET(task_struct_thread_info) = INVALID_OFFSET; ++ + if (VALID_MEMBER(task_struct_thread_info)) { + MEMBER_OFFSET_INIT(thread_info_task, "thread_info", "task"); + MEMBER_OFFSET_INIT(thread_info_cpu, "thread_info", "cpu"); +@@ -193,6 +221,17 @@ + MEMBER_OFFSET_INIT(pid_link_pid, "pid_link", "pid"); + MEMBER_OFFSET_INIT(pid_hash_chain, "pid", "hash_chain"); + ++ STRUCT_SIZE_INIT(pid_link, "pid_link"); ++ STRUCT_SIZE_INIT(upid, "upid"); ++ if (VALID_STRUCT(upid)) { ++ MEMBER_OFFSET_INIT(upid_nr, "upid", "nr"); ++ MEMBER_OFFSET_INIT(upid_ns, "upid", "ns"); ++ MEMBER_OFFSET_INIT(upid_pid_chain, "upid", "pid_chain"); ++ MEMBER_OFFSET_INIT(pid_numbers, "pid", "numbers"); ++ MEMBER_OFFSET_INIT(pid_tasks, "pid", "tasks"); ++ tt->init_pid_ns = symbol_value("init_pid_ns"); ++ } ++ + MEMBER_OFFSET_INIT(pid_pid_chain, "pid", "pid_chain"); + + STRUCT_SIZE_INIT(task_struct, "task_struct"); +@@ -207,6 +246,8 @@ + + MEMBER_OFFSET_INIT(signal_struct_count, "signal_struct", "count"); + MEMBER_OFFSET_INIT(signal_struct_action, "signal_struct", "action"); ++ MEMBER_OFFSET_INIT(signal_struct_shared_pending, "signal_struct", ++ "shared_pending"); + + MEMBER_OFFSET_INIT(k_sigaction_sa, "k_sigaction", "sa"); + +@@ -217,17 +258,10 @@ + if (INVALID_MEMBER(sigpending_head)) + MEMBER_OFFSET_INIT(sigpending_list, "sigpending", "list"); + MEMBER_OFFSET_INIT(sigpending_signal, "sigpending", "signal"); ++ MEMBER_SIZE_INIT(sigpending_signal, "sigpending", "signal"); + + STRUCT_SIZE_INIT(sigqueue, "sigqueue"); +- if (VALID_STRUCT(sigqueue)) { +- MEMBER_OFFSET_INIT(sigqueue_next, "sigqueue", "next"); +- MEMBER_OFFSET_INIT(sigqueue_list, "sigqueue", "list"); +- MEMBER_OFFSET_INIT(sigqueue_info, "sigqueue", "info"); +- } else { +- STRUCT_SIZE_INIT(signal_queue, "signal_queue"); +- MEMBER_OFFSET_INIT(signal_queue_next, "signal_queue", "next"); +- MEMBER_OFFSET_INIT(signal_queue_info, "signal_queue", "info"); +- } ++ STRUCT_SIZE_INIT(signal_queue, "signal_queue"); + + STRUCT_SIZE_INIT(sighand_struct, "sighand_struct"); + if (VALID_STRUCT(sighand_struct)) +@@ -249,6 +283,19 @@ + + STRUCT_SIZE_INIT(cputime_t, "cputime_t"); + ++ if (symbol_exists("cfq_slice_async")) { ++ uint cfq_slice_async; ++ ++ get_symbol_data("cfq_slice_async", sizeof(int), ++ &cfq_slice_async); ++ machdep->hz = cfq_slice_async * 25; ++ ++ if (CRASHDEBUG(2)) ++ fprintf(fp, ++ "cfq_slice_async exitsts: setting hz to %d\n", ++ machdep->hz); ++ } ++ + if (VALID_MEMBER(runqueue_arrays)) + MEMBER_OFFSET_INIT(task_struct_run_list, "task_struct", + "run_list"); +@@ -279,12 +326,6 @@ + error(FATAL, + "pidhash and pid_hash both exist -- cannot distinquish between them\n"); + +- /* +- * NOTE: We rely on PIDTYPE_PID staying at enum value of 0, because +- * evan at the lowest level in gdb, I can't seem to find where +- * the actual value is stored via the struct type. (?) +- * Should be safe, though... +- */ + if (symbol_exists("pid_hash") && symbol_exists("pidhash_shift")) { + int pidhash_shift; + +@@ -302,7 +343,33 @@ + tt->refresh_task_table = refresh_pid_hash_task_table; + } else { + tt->pidhash_addr = symbol_value("pid_hash"); +- tt->refresh_task_table = refresh_hlist_task_table; ++ if (LKCD_KERNTYPES()) { ++ if (VALID_STRUCT(pid_link)) { ++ if (VALID_STRUCT(upid) && VALID_MEMBER(pid_numbers)) ++ tt->refresh_task_table = ++ refresh_hlist_task_table_v3; ++ else ++ tt->refresh_task_table = ++ refresh_hlist_task_table_v2; ++ } else ++ tt->refresh_task_table = ++ refresh_hlist_task_table; ++ builtin_array_length("pid_hash", ++ tt->pidhash_len, NULL); ++ } else { ++ if (!get_array_length("pid_hash", NULL, ++ sizeof(void *)) && VALID_STRUCT(pid_link)) { ++ if (VALID_STRUCT(upid) && VALID_MEMBER(pid_numbers)) ++ tt->refresh_task_table = ++ refresh_hlist_task_table_v3; ++ else ++ tt->refresh_task_table = ++ refresh_hlist_task_table_v2; ++ } ++ else ++ tt->refresh_task_table = ++ refresh_hlist_task_table; ++ } + } + + tt->flags |= PID_HASH; +@@ -343,6 +410,10 @@ + irqstacks_init(); + + get_active_set(); ++ ++ if (tt->flags & ACTIVE_ONLY) ++ tt->refresh_task_table = refresh_active_task_table; ++ + tt->refresh_task_table(); + + if (tt->flags & TASK_REFRESH_OFF) +@@ -353,8 +424,11 @@ + set_context(NO_TASK, active_pid); + tt->this_task = pid_to_task(active_pid); + } +- else ++ else { ++ please_wait("determining panic task"); + set_context(get_panic_context(), NO_PID); ++ please_wait_done(); ++ } + + sort_context_array(); + +@@ -987,9 +1061,7 @@ + return; + + if (DUMPFILE()) { /* impossible */ +- fprintf(fp, (pc->flags & SILENT) || !(pc->flags & TTY) ? +- "" : "\rplease wait... (gathering task table data)"); +- fflush(fp); ++ please_wait("gathering task table data"); + if (!symbol_exists("panic_threads")) + tt->flags |= POPULATE_PANIC; + } +@@ -1152,11 +1224,7 @@ + + FREEBUF(pid_hash); + +- if (DUMPFILE()) { +- fprintf(fp, (pc->flags & SILENT) || !(pc->flags & TTY) ? "" : +- "\r \r"); +- fflush(fp); +- } ++ please_wait_done(); + + if (ACTIVE() && (tt->flags & TASK_INIT_DONE)) + refresh_context(curtask, curpid); +@@ -1176,12 +1244,14 @@ + { + int i; + ulong *pid_hash; ++ struct syment *sp; + ulong pidhash_array; + ulong kpp; + char *tp; + ulong next, pnext, pprev; + char *nodebuf; + int plen, len, cnt; ++ long value; + struct task_context *tc; + ulong curtask; + ulong curpid; +@@ -1192,9 +1262,7 @@ + return; + + if (DUMPFILE()) { /* impossible */ +- fprintf(fp, (pc->flags & SILENT) || !(pc->flags & TTY) ? +- "" : "\rplease wait... (gathering task table data)"); +- fflush(fp); ++ please_wait("gathering task table data"); + if (!symbol_exists("panic_threads")) + tt->flags |= POPULATE_PANIC; + } +@@ -1211,8 +1279,21 @@ + curpid = CURRENT_PID(); + } + +- if (!(plen = get_array_length("pid_hash", NULL, sizeof(void *)))) +- error(FATAL, "cannot determine pid_hash array dimensions\n"); ++ if (!(plen = get_array_length("pid_hash", NULL, sizeof(void *)))) { ++ /* ++ * Workaround for gcc omitting debuginfo data for pid_hash. ++ */ ++ if (enumerator_value("PIDTYPE_MAX", &value)) { ++ if ((sp = next_symbol("pid_hash", NULL)) && ++ (((sp->value - tt->pidhash_addr) / sizeof(void *)) < value)) ++ error(WARNING, "possible pid_hash array mis-handling\n"); ++ plen = (int)value; ++ } else { ++ error(WARNING, ++ "cannot determine pid_hash array dimensions\n"); ++ plen = 1; ++ } ++ } + + pid_hash = (ulong *)GETBUF(plen * sizeof(void *)); + +@@ -1228,6 +1309,16 @@ + * The zero'th (PIDTYPE_PID) entry is the hlist_head array + * that we want. + */ ++ if (CRASHDEBUG(1)) { ++ if (!enumerator_value("PIDTYPE_PID", &value)) ++ error(WARNING, ++ "possible pid_hash array mis-handling: PIDTYPE_PID: (unknown)\n"); ++ else if (value != 0) ++ error(WARNING, ++ "possible pid_hash array mis-handling: PIDTYPE_PID: %d \n", ++ value); ++ } ++ + pidhash_array = pid_hash[0]; + FREEBUF(pid_hash); + +@@ -1345,6 +1436,15 @@ + } + } + ++ if (cnt > tt->max_tasks) { ++ tt->max_tasks = cnt + TASK_SLUSH; ++ allocate_task_space(tt->max_tasks); ++ hq_close(); ++ if (!DUMPFILE()) ++ retries++; ++ goto retry_pid_hash; ++ } ++ + BZERO(tt->task_local, tt->max_tasks * sizeof(void *)); + cnt = retrieve_list((ulong *)tt->task_local, cnt); + +@@ -1394,11 +1494,7 @@ + FREEBUF(pid_hash); + FREEBUF(nodebuf); + +- if (DUMPFILE()) { +- fprintf(fp, (pc->flags & SILENT) || !(pc->flags & TTY) ? "" : +- "\r \r"); +- fflush(fp); +- } ++ please_wait_done(); + + if (ACTIVE() && (tt->flags & TASK_INIT_DONE)) + refresh_context(curtask, curpid); +@@ -1406,151 +1502,751 @@ + tt->retries = MAX(tt->retries, retries); + } + +- + /* +- * Fill a task_context structure with the data from a task. If a NULL +- * task_context pointer is passed in, use the next available one. ++ * 2.6.17 replaced: ++ * static struct hlist_head *pid_hash[PIDTYPE_MAX]; ++ * with ++ * static struct hlist_head *pid_hash; + */ +-static struct task_context * +-store_context(struct task_context *tc, ulong task, char *tp) ++static void ++refresh_hlist_task_table_v2(void) + { +- pid_t *pid_addr; +- char *comm_addr; +- int *processor_addr; +- ulong *parent_addr; +- ulong *mm_addr; +- int has_cpu; +- int do_verify; +- +- if (tt->refresh_task_table == refresh_fixed_task_table) +- do_verify = 1; +- else if (tt->refresh_task_table == refresh_pid_hash_task_table) +- do_verify = 2; +- else +- do_verify = 0; ++ int i; ++ ulong *pid_hash; ++ ulong pidhash_array; ++ ulong kpp; ++ char *tp; ++ ulong next, pnext, pprev; ++ char *nodebuf; ++ int len, cnt; ++ struct task_context *tc; ++ ulong curtask; ++ ulong curpid; ++ ulong retries; ++ ulong *tlp; + +- if (!tc) +- tc = tt->context_array + tt->running_tasks; ++ if (DUMPFILE() && (tt->flags & TASK_INIT_DONE)) /* impossible */ ++ return; + +- pid_addr = (pid_t *)(tp + OFFSET(task_struct_pid)); +- comm_addr = (char *)(tp + OFFSET(task_struct_comm)); +- if (tt->flags & THREAD_INFO) { +- tc->thread_info = ULONG(tp + OFFSET(task_struct_thread_info)); +- fill_thread_info(tc->thread_info); +- processor_addr = (int *) (tt->thread_info + +- OFFSET(thread_info_cpu)); +- } else if (VALID_MEMBER(task_struct_processor)) +- processor_addr = (int *) (tp + OFFSET(task_struct_processor)); +- else if (VALID_MEMBER(task_struct_cpu)) +- processor_addr = (int *) (tp + OFFSET(task_struct_cpu)); +- if (VALID_MEMBER(task_struct_p_pptr)) +- parent_addr = (ulong *)(tp + OFFSET(task_struct_p_pptr)); +- else +- parent_addr = (ulong *)(tp + OFFSET(task_struct_parent)); +- mm_addr = (ulong *)(tp + OFFSET(task_struct_mm)); +- has_cpu = task_has_cpu(task, tp); ++ if (DUMPFILE()) { /* impossible */ ++ please_wait("gathering task table data"); ++ if (!symbol_exists("panic_threads")) ++ tt->flags |= POPULATE_PANIC; ++ } + +- tc->pid = (ulong)(*pid_addr); +- BCOPY(comm_addr, &tc->comm[0], 16); +- tc->comm[16] = NULLCHAR; +- tc->processor = *processor_addr; +- tc->ptask = *parent_addr; +- tc->mm_struct = *mm_addr; +- tc->task = task; +- tc->tc_next = NULL; ++ if (ACTIVE() && !(tt->flags & TASK_REFRESH)) ++ return; + +- if (do_verify && !verify_task(tc, do_verify)) { +- error(INFO, "invalid task address: %lx\n", tc->task); +- BZERO(tc, sizeof(struct task_context)); +- return NULL; ++ /* ++ * The current task's task_context entry may change, ++ * or the task may not even exist anymore. ++ */ ++ if (ACTIVE() && (tt->flags & TASK_INIT_DONE)) { ++ curtask = CURRENT_TASK(); ++ curpid = CURRENT_PID(); + } + +- if (has_cpu && (tt->flags & POPULATE_PANIC)) +- tt->panic_threads[tc->processor] = tc->task; +- +- return tc; +-} ++ get_symbol_data("pid_hash", sizeof(void *), &pidhash_array); + +-/* +- * The current context may have moved to a new spot in the task table +- * or have exited since the last command. If it still exists, reset its +- * new position. If it doesn't exist, set the context back to the initial +- * crash context. If necessary, complain and show the restored context. +- */ +-static void +-refresh_context(ulong curtask, ulong curpid) +-{ +- ulong value, complain; +- struct task_context *tc; ++ len = tt->pidhash_len; ++ pid_hash = (ulong *)GETBUF(len * SIZE(hlist_head)); ++ nodebuf = GETBUF(SIZE(pid_link)); ++ retries = 0; + +- if (task_exists(curtask) && pid_exists(curpid)) { +- set_context(curtask, NO_PID); +- } else { +- set_context(tt->this_task, NO_PID); ++retry_pid_hash: ++ if (retries && DUMPFILE()) ++ error(FATAL, ++ "\ncannot gather a stable task list via pid_hash\n"); + +- complain = TRUE; +- if (STREQ(args[0], "set") && (argcnt == 2) && +- IS_A_NUMBER(args[1])) { ++ if ((retries == MAX_UNLIMITED_TASK_RETRIES) && ++ !(tt->flags & TASK_INIT_DONE)) ++ error(FATAL, ++ "\ncannot gather a stable task list via pid_hash (%d retries)\n", ++ retries); + +- switch (str_to_context(args[optind], &value, &tc)) +- { +- case STR_PID: +- case STR_TASK: +- complain = FALSE; +- break; +- case STR_INVALID: +- complain = TRUE; +- break; +- } +- } ++ if (!readmem(pidhash_array, KVADDR, pid_hash, ++ len * SIZE(hlist_head), "pid_hash contents", RETURN_ON_ERROR)) ++ error(FATAL, "\ncannot read pid_hash array\n"); + +- if (complain) { +- error(INFO, "current context no longer exists -- " +- "restoring \"%s\" context:\n\n", +- pc->program_name); +- show_context(CURRENT_CONTEXT()); +- fprintf(fp, "\n"); +- } ++ if (!hq_open()) { ++ error(INFO, "cannot hash task_struct entries\n"); ++ if (!(tt->flags & TASK_INIT_DONE)) ++ clean_exit(1); ++ error(INFO, "using stale task_structs\n"); ++ FREEBUF(pid_hash); ++ return; + } +-} +- +-/* +- * Sort the task_context array by PID number; for PID 0, sort by processor. +- */ +-void +-sort_context_array(void) +-{ +- ulong curtask; +- +- curtask = CURRENT_TASK(); +- qsort((void *)tt->context_array, (size_t)tt->running_tasks, +- sizeof(struct task_context), sort_by_pid); +- set_context(curtask, NO_PID); +-} + +-static int +-sort_by_pid(const void *arg1, const void *arg2) +-{ +- struct task_context *t1, *t2; ++ /* ++ * Get the idle threads first. ++ */ ++ cnt = 0; ++ for (i = 0; i < kt->cpus; i++) { ++ if (hq_enter(tt->idle_threads[i])) ++ cnt++; ++ else ++ error(WARNING, "%sduplicate idle tasks?\n", ++ DUMPFILE() ? "\n" : ""); ++ } + +- t1 = (struct task_context *)arg1; +- t2 = (struct task_context *)arg2; ++ for (i = 0; i < len; i++) { ++ if (!pid_hash[i]) ++ continue; + +- if ((t1->pid == 0) && (t2->pid == 0)) +- return (t1->processor < t2->processor ? -1 : +- t1->processor == t2->processor ? 0 : 1); +- else +- return (t1->pid < t2->pid ? -1 : +- t1->pid == t2->pid ? 0 : 1); +-} ++ if (!readmem(pid_hash[i], KVADDR, nodebuf, ++ SIZE(pid_link), "pid_hash node pid_link", RETURN_ON_ERROR|QUIET)) { ++ error(INFO, "\ncannot read pid_hash node pid_link\n"); ++ if (DUMPFILE()) ++ continue; ++ hq_close(); ++ retries++; ++ goto retry_pid_hash; ++ } + ++ kpp = pid_hash[i]; ++ next = ULONG(nodebuf + OFFSET(pid_link_pid)); ++ if (next) ++ next -= OFFSET(task_struct_pids); ++ pnext = ULONG(nodebuf + OFFSET(hlist_node_next)); ++ pprev = ULONG(nodebuf + OFFSET(hlist_node_pprev)); + +-static int +-sort_by_last_run(const void *arg1, const void *arg2) +-{ +- ulong task_last_run_stamp(ulong); +- struct task_context *t1, *t2; +- ulonglong lr1, lr2; ++ if (CRASHDEBUG(1)) ++ console("pid_hash[%d]: %lx task: %lx (node: %lx) next: %lx pprev: %lx\n", ++ i, pid_hash[i], next, kpp, pnext, pprev); ++ ++ while (next) { ++ if (!IS_TASK_ADDR(next)) { ++ error(INFO, ++ "%sinvalid task address in pid_hash: %lx\n", ++ DUMPFILE() ? "\n" : "", next); ++ if (DUMPFILE()) ++ break; ++ hq_close(); ++ retries++; ++ goto retry_pid_hash; ++ ++ } ++ ++ if (!is_idle_thread(next) && !hq_enter(next)) { ++ error(INFO, ++ "%sduplicate task in pid_hash: %lx\n", ++ DUMPFILE() ? "\n" : "", next); ++ if (DUMPFILE()) ++ break; ++ hq_close(); ++ retries++; ++ goto retry_pid_hash; ++ } ++ ++ cnt++; ++ ++ if (!pnext) ++ break; ++ ++ if (!readmem((ulonglong)pnext, KVADDR, nodebuf, ++ SIZE(pid_link), "task hlist_node pid_link", RETURN_ON_ERROR|QUIET)) { ++ error(INFO, "\ncannot read hlist_node pid_link from node next\n"); ++ if (DUMPFILE()) ++ break; ++ hq_close(); ++ retries++; ++ goto retry_pid_hash; ++ } ++ ++ kpp = (ulong)pnext; ++ next = ULONG(nodebuf + OFFSET(pid_link_pid)); ++ if (next) ++ next -= OFFSET(task_struct_pids); ++ pnext = ULONG(nodebuf + OFFSET(hlist_node_next)); ++ pprev = ULONG(nodebuf + OFFSET(hlist_node_pprev)); ++ ++ if (CRASHDEBUG(1)) ++ console(" chained task: %lx (node: %lx) next: %lx pprev: %lx\n", ++ next, kpp, pnext, pprev); ++ } ++ } ++ ++ if (cnt > tt->max_tasks) { ++ tt->max_tasks = cnt + TASK_SLUSH; ++ allocate_task_space(tt->max_tasks); ++ hq_close(); ++ if (!DUMPFILE()) ++ retries++; ++ goto retry_pid_hash; ++ } ++ ++ BZERO(tt->task_local, tt->max_tasks * sizeof(void *)); ++ cnt = retrieve_list((ulong *)tt->task_local, cnt); ++ ++ hq_close(); ++ ++ clear_task_cache(); ++ ++ for (i = 0, tlp = (ulong *)tt->task_local, ++ tt->running_tasks = 0, tc = tt->context_array; ++ i < tt->max_tasks; i++, tlp++) { ++ if (!(*tlp)) ++ continue; ++ ++ if (!IS_TASK_ADDR(*tlp)) { ++ error(WARNING, ++ "%sinvalid task address found in task list: %lx\n", ++ DUMPFILE() ? "\n" : "", *tlp); ++ if (DUMPFILE()) ++ continue; ++ retries++; ++ goto retry_pid_hash; ++ } ++ ++ if (task_exists(*tlp)) { ++ error(WARNING, ++ "%sduplicate task address found in task list: %lx\n", ++ DUMPFILE() ? "\n" : "", *tlp); ++ if (DUMPFILE()) ++ continue; ++ retries++; ++ goto retry_pid_hash; ++ } ++ ++ if (!(tp = fill_task_struct(*tlp))) { ++ if (DUMPFILE()) ++ continue; ++ retries++; ++ goto retry_pid_hash; ++ } ++ ++ if (store_context(tc, *tlp, tp)) { ++ tc++; ++ tt->running_tasks++; ++ } ++ } ++ ++ FREEBUF(pid_hash); ++ FREEBUF(nodebuf); ++ ++ please_wait_done(); ++ ++ if (ACTIVE() && (tt->flags & TASK_INIT_DONE)) ++ refresh_context(curtask, curpid); ++ ++ tt->retries = MAX(tt->retries, retries); ++} ++ ++ ++/* ++ * 2.6.24: The pid_hash[] hlist_head entries were changed to point ++ * to the hlist_node structure embedded in a upid structure. ++ */ ++static void ++refresh_hlist_task_table_v3(void) ++{ ++ int i; ++ ulong *pid_hash; ++ ulong pidhash_array; ++ ulong kpp; ++ char *tp; ++ ulong next, pnext, pprev; ++ ulong upid; ++ char *nodebuf; ++ int len, cnt; ++ struct task_context *tc; ++ ulong curtask; ++ ulong curpid; ++ ulong retries; ++ ulong *tlp; ++ uint upid_nr; ++ ulong upid_ns; ++ int chained; ++ ulong pid; ++ ulong pid_tasks_0; ++ ++ if (DUMPFILE() && (tt->flags & TASK_INIT_DONE)) /* impossible */ ++ return; ++ ++ if (DUMPFILE()) { /* impossible */ ++ please_wait("gathering task table data"); ++ if (!symbol_exists("panic_threads")) ++ tt->flags |= POPULATE_PANIC; ++ } ++ ++ if (ACTIVE() && !(tt->flags & TASK_REFRESH)) ++ return; ++ ++ /* ++ * The current task's task_context entry may change, ++ * or the task may not even exist anymore. ++ */ ++ if (ACTIVE() && (tt->flags & TASK_INIT_DONE)) { ++ curtask = CURRENT_TASK(); ++ curpid = CURRENT_PID(); ++ } ++ ++ get_symbol_data("pid_hash", sizeof(void *), &pidhash_array); ++ ++ len = tt->pidhash_len; ++ pid_hash = (ulong *)GETBUF(len * SIZE(hlist_head)); ++ nodebuf = GETBUF(SIZE(upid)); ++ retries = 0; ++ ++retry_pid_hash: ++ if (retries && DUMPFILE()) ++ error(FATAL, ++ "\ncannot gather a stable task list via pid_hash\n"); ++ ++ if ((retries == MAX_UNLIMITED_TASK_RETRIES) && ++ !(tt->flags & TASK_INIT_DONE)) ++ error(FATAL, ++ "\ncannot gather a stable task list via pid_hash (%d retries)\n", ++ retries); ++ ++ if (!readmem(pidhash_array, KVADDR, pid_hash, ++ len * SIZE(hlist_head), "pid_hash contents", RETURN_ON_ERROR)) ++ error(FATAL, "\ncannot read pid_hash array\n"); ++ ++ if (!hq_open()) { ++ error(INFO, "cannot hash task_struct entries\n"); ++ if (!(tt->flags & TASK_INIT_DONE)) ++ clean_exit(1); ++ error(INFO, "using stale task_structs\n"); ++ FREEBUF(pid_hash); ++ return; ++ } ++ ++ /* ++ * Get the idle threads first. ++ */ ++ cnt = 0; ++ for (i = 0; i < kt->cpus; i++) { ++ if (hq_enter(tt->idle_threads[i])) ++ cnt++; ++ else ++ error(WARNING, "%sduplicate idle tasks?\n", ++ DUMPFILE() ? "\n" : ""); ++ } ++ ++ for (i = 0; i < len; i++) { ++ if (!pid_hash[i]) ++ continue; ++ ++ kpp = pid_hash[i]; ++ upid = pid_hash[i] - OFFSET(upid_pid_chain); ++ chained = 0; ++do_chained: ++ if (!readmem(upid, KVADDR, nodebuf, SIZE(upid), ++ "pid_hash upid", RETURN_ON_ERROR|QUIET)) { ++ error(INFO, "\ncannot read pid_hash upid\n"); ++ if (DUMPFILE()) ++ continue; ++ hq_close(); ++ retries++; ++ goto retry_pid_hash; ++ } ++ ++ pnext = ULONG(nodebuf + OFFSET(upid_pid_chain) + OFFSET(hlist_node_next)); ++ pprev = ULONG(nodebuf + OFFSET(upid_pid_chain) + OFFSET(hlist_node_pprev)); ++ upid_nr = UINT(nodebuf + OFFSET(upid_nr)); ++ upid_ns = ULONG(nodebuf + OFFSET(upid_ns)); ++ /* ++ * Use init_pid_ns level 0 (PIDTYPE_PID). ++ */ ++ if (upid_ns != tt->init_pid_ns) ++ continue; ++ ++ pid = upid - OFFSET(pid_numbers); ++ ++ if (!readmem(pid + OFFSET(pid_tasks), KVADDR, &pid_tasks_0, ++ sizeof(void *), "pid tasks", RETURN_ON_ERROR|QUIET)) { ++ error(INFO, "\ncannot read pid.tasks[0]\n"); ++ if (DUMPFILE()) ++ continue; ++ hq_close(); ++ retries++; ++ goto retry_pid_hash; ++ } ++ ++ if (pid_tasks_0 == 0) ++ continue; ++ ++ next = pid_tasks_0 - OFFSET(task_struct_pids); ++ ++ if (CRASHDEBUG(1)) { ++ if (chained) ++ console(" %lx upid: %lx nr: %d pid: %lx\n" ++ " pnext/pprev: %.*lx/%lx task: %lx\n", ++ kpp, upid, upid_nr, pid, VADDR_PRLEN, pnext, pprev, next); ++ else ++ console("pid_hash[%4d]: %lx upid: %lx nr: %d pid: %lx\n" ++ " pnext/pprev: %.*lx/%lx task: %lx\n", ++ i, kpp, upid, upid_nr, pid, VADDR_PRLEN, pnext, pprev, next); ++ } ++ ++ if (!IS_TASK_ADDR(next)) { ++ error(INFO, "%sinvalid task address in pid_hash: %lx\n", ++ DUMPFILE() ? "\n" : "", next); ++ if (DUMPFILE()) ++ break; ++ hq_close(); ++ retries++; ++ goto retry_pid_hash; ++ } ++ ++ if (!is_idle_thread(next) && !hq_enter(next)) { ++ error(INFO, "%sduplicate task in pid_hash: %lx\n", ++ DUMPFILE() ? "\n" : "", next); ++ if (DUMPFILE()) ++ break; ++ hq_close(); ++ retries++; ++ goto retry_pid_hash; ++ } ++ ++ cnt++; ++ ++ if (pnext) { ++ kpp = pnext; ++ upid = pnext - OFFSET(upid_pid_chain); ++ chained++; ++ goto do_chained; ++ } ++ } ++ ++ if (cnt > tt->max_tasks) { ++ tt->max_tasks = cnt + TASK_SLUSH; ++ allocate_task_space(tt->max_tasks); ++ hq_close(); ++ if (!DUMPFILE()) ++ retries++; ++ goto retry_pid_hash; ++ } ++ ++ BZERO(tt->task_local, tt->max_tasks * sizeof(void *)); ++ cnt = retrieve_list((ulong *)tt->task_local, cnt); ++ ++ hq_close(); ++ ++ clear_task_cache(); ++ ++ for (i = 0, tlp = (ulong *)tt->task_local, ++ tt->running_tasks = 0, tc = tt->context_array; ++ i < tt->max_tasks; i++, tlp++) { ++ if (!(*tlp)) ++ continue; ++ ++ if (!IS_TASK_ADDR(*tlp)) { ++ error(WARNING, ++ "%sinvalid task address found in task list: %lx\n", ++ DUMPFILE() ? "\n" : "", *tlp); ++ if (DUMPFILE()) ++ continue; ++ retries++; ++ goto retry_pid_hash; ++ } ++ ++ if (task_exists(*tlp)) { ++ error(WARNING, ++ "%sduplicate task address found in task list: %lx\n", ++ DUMPFILE() ? "\n" : "", *tlp); ++ if (DUMPFILE()) ++ continue; ++ retries++; ++ goto retry_pid_hash; ++ } ++ ++ if (!(tp = fill_task_struct(*tlp))) { ++ if (DUMPFILE()) ++ continue; ++ retries++; ++ goto retry_pid_hash; ++ } ++ ++ if (store_context(tc, *tlp, tp)) { ++ tc++; ++ tt->running_tasks++; ++ } ++ } ++ ++ FREEBUF(pid_hash); ++ FREEBUF(nodebuf); ++ ++ please_wait_done(); ++ ++ if (ACTIVE() && (tt->flags & TASK_INIT_DONE)) ++ refresh_context(curtask, curpid); ++ ++ tt->retries = MAX(tt->retries, retries); ++} ++ ++static void ++refresh_active_task_table(void) ++{ ++ int i; ++ char *tp; ++ int cnt; ++ struct task_context *tc; ++ ulong curtask; ++ ulong curpid; ++ ulong retries; ++ ulong *tlp; ++ ++ if (DUMPFILE() && (tt->flags & TASK_INIT_DONE)) /* impossible */ ++ return; ++ ++ if (DUMPFILE()) { ++ please_wait("gathering task table data"); ++ if (!symbol_exists("panic_threads")) ++ tt->flags |= POPULATE_PANIC; ++ } ++ ++ if (ACTIVE() && !(tt->flags & TASK_REFRESH)) ++ return; ++ ++ get_active_set(); ++ /* ++ * The current task's task_context entry may change, ++ * or the task may not even exist anymore. ++ */ ++ if (ACTIVE() && (tt->flags & TASK_INIT_DONE)) { ++ curtask = CURRENT_TASK(); ++ curpid = CURRENT_PID(); ++ } ++ ++retry_active: ++ ++ if (!hq_open()) { ++ error(INFO, "cannot hash task_struct entries\n"); ++ if (!(tt->flags & TASK_INIT_DONE)) ++ clean_exit(1); ++ error(INFO, "using stale task_structs\n"); ++ return; ++ } ++ ++ /* ++ * Get the active tasks. ++ */ ++ cnt = 0; ++ for (i = 0; i < kt->cpus; i++) { ++ if (hq_enter(tt->active_set[i])) ++ cnt++; ++ else ++ error(WARNING, "%sduplicate active tasks?\n", ++ DUMPFILE() ? "\n" : ""); ++ } ++ ++ BZERO(tt->task_local, tt->max_tasks * sizeof(void *)); ++ cnt = retrieve_list((ulong *)tt->task_local, cnt); ++ ++ hq_close(); ++ ++ clear_task_cache(); ++ ++ for (i = 0, tlp = (ulong *)tt->task_local, ++ tt->running_tasks = 0, tc = tt->context_array; ++ i < tt->max_tasks; i++, tlp++) { ++ if (!(*tlp)) ++ continue; ++ ++ if (!IS_TASK_ADDR(*tlp)) { ++ error(WARNING, ++ "%sinvalid task address found in task list: %lx\n", ++ DUMPFILE() ? "\n" : "", *tlp); ++ if (DUMPFILE()) ++ continue; ++ retries++; ++ goto retry_active; ++ } ++ ++ if (task_exists(*tlp)) { ++ error(WARNING, ++ "%sduplicate task address found in task list: %lx\n", ++ DUMPFILE() ? "\n" : "", *tlp); ++ if (DUMPFILE()) ++ continue; ++ retries++; ++ goto retry_active; ++ } ++ ++ if (!(tp = fill_task_struct(*tlp))) { ++ if (DUMPFILE()) ++ continue; ++ retries++; ++ goto retry_active; ++ } ++ ++ if (store_context(tc, *tlp, tp)) { ++ tc++; ++ tt->running_tasks++; ++ } else if (DUMPFILE()) ++ error(WARNING, "corrupt/invalid active task: %lx\n", ++ *tlp); ++ } ++ ++ if (!tt->running_tasks) { ++ if (DUMPFILE()) ++ error(FATAL, "cannot determine any active tasks!\n"); ++ retries++; ++ goto retry_active; ++ } ++ ++ please_wait_done(); ++ ++ if (ACTIVE() && (tt->flags & TASK_INIT_DONE)) ++ refresh_context(curtask, curpid); ++ ++ tt->retries = MAX(tt->retries, retries); ++} ++ ++/* ++ * Fill a task_context structure with the data from a task. If a NULL ++ * task_context pointer is passed in, use the next available one. ++ */ ++static struct task_context * ++store_context(struct task_context *tc, ulong task, char *tp) ++{ ++ pid_t *pid_addr; ++ char *comm_addr; ++ int *processor_addr; ++ ulong *parent_addr; ++ ulong *mm_addr; ++ int has_cpu; ++ int do_verify; ++ ++ if (tt->refresh_task_table == refresh_fixed_task_table) ++ do_verify = 1; ++ else if (tt->refresh_task_table == refresh_pid_hash_task_table) ++ do_verify = 2; ++ else if (tt->refresh_task_table == refresh_hlist_task_table) ++ do_verify = 2; ++ else if (tt->refresh_task_table == refresh_hlist_task_table_v2) ++ do_verify = 2; ++ else if (tt->refresh_task_table == refresh_hlist_task_table_v3) ++ do_verify = 2; ++ else if (tt->refresh_task_table == refresh_active_task_table) ++ do_verify = 2; ++ else ++ do_verify = 0; ++ ++ if (!tc) ++ tc = tt->context_array + tt->running_tasks; ++ ++ pid_addr = (pid_t *)(tp + OFFSET(task_struct_pid)); ++ comm_addr = (char *)(tp + OFFSET(task_struct_comm)); ++ if (tt->flags & THREAD_INFO) { ++ tc->thread_info = ULONG(tp + OFFSET(task_struct_thread_info)); ++ fill_thread_info(tc->thread_info); ++ processor_addr = (int *) (tt->thread_info + ++ OFFSET(thread_info_cpu)); ++ } else if (VALID_MEMBER(task_struct_processor)) ++ processor_addr = (int *) (tp + OFFSET(task_struct_processor)); ++ else if (VALID_MEMBER(task_struct_cpu)) ++ processor_addr = (int *) (tp + OFFSET(task_struct_cpu)); ++ if (VALID_MEMBER(task_struct_p_pptr)) ++ parent_addr = (ulong *)(tp + OFFSET(task_struct_p_pptr)); ++ else ++ parent_addr = (ulong *)(tp + OFFSET(task_struct_parent)); ++ mm_addr = (ulong *)(tp + OFFSET(task_struct_mm)); ++ has_cpu = task_has_cpu(task, tp); ++ ++ tc->pid = (ulong)(*pid_addr); ++ BCOPY(comm_addr, &tc->comm[0], 16); ++ tc->comm[16] = NULLCHAR; ++ tc->processor = *processor_addr; ++ tc->ptask = *parent_addr; ++ tc->mm_struct = *mm_addr; ++ tc->task = task; ++ tc->tc_next = NULL; ++ ++ if (do_verify && !verify_task(tc, do_verify)) { ++ error(INFO, "invalid task address: %lx\n", tc->task); ++ BZERO(tc, sizeof(struct task_context)); ++ return NULL; ++ } ++ ++ if (has_cpu && (tt->flags & POPULATE_PANIC)) ++ tt->panic_threads[tc->processor] = tc->task; ++ ++ return tc; ++} ++ ++/* ++ * The current context may have moved to a new spot in the task table ++ * or have exited since the last command. If it still exists, reset its ++ * new position. If it doesn't exist, set the context back to the initial ++ * crash context. If necessary, complain and show the restored context. ++ */ ++static void ++refresh_context(ulong curtask, ulong curpid) ++{ ++ ulong value, complain; ++ struct task_context *tc; ++ ++ if (task_exists(curtask) && pid_exists(curpid)) { ++ set_context(curtask, NO_PID); ++ } else { ++ set_context(tt->this_task, NO_PID); ++ ++ complain = TRUE; ++ if (STREQ(args[0], "set") && (argcnt == 2) && ++ IS_A_NUMBER(args[1])) { ++ ++ switch (str_to_context(args[optind], &value, &tc)) ++ { ++ case STR_PID: ++ case STR_TASK: ++ complain = FALSE; ++ break; ++ case STR_INVALID: ++ complain = TRUE; ++ break; ++ } ++ } ++ ++ if (complain) { ++ error(INFO, "current context no longer exists -- " ++ "restoring \"%s\" context:\n\n", ++ pc->program_name); ++ show_context(CURRENT_CONTEXT()); ++ fprintf(fp, "\n"); ++ } ++ } ++} ++ ++/* ++ * Sort the task_context array by PID number; for PID 0, sort by processor. ++ */ ++void ++sort_context_array(void) ++{ ++ ulong curtask; ++ ++ curtask = CURRENT_TASK(); ++ qsort((void *)tt->context_array, (size_t)tt->running_tasks, ++ sizeof(struct task_context), sort_by_pid); ++ set_context(curtask, NO_PID); ++} ++ ++static int ++sort_by_pid(const void *arg1, const void *arg2) ++{ ++ struct task_context *t1, *t2; ++ ++ t1 = (struct task_context *)arg1; ++ t2 = (struct task_context *)arg2; ++ ++ if ((t1->pid == 0) && (t2->pid == 0)) ++ return (t1->processor < t2->processor ? -1 : ++ t1->processor == t2->processor ? 0 : 1); ++ else ++ return (t1->pid < t2->pid ? -1 : ++ t1->pid == t2->pid ? 0 : 1); ++} ++ ++ ++static int ++sort_by_last_run(const void *arg1, const void *arg2) ++{ ++ ulong task_last_run_stamp(ulong); ++ struct task_context *t1, *t2; ++ ulonglong lr1, lr2; + + t1 = (struct task_context *)arg1; + t2 = (struct task_context *)arg2; +@@ -1581,6 +2277,9 @@ + char * + fill_task_struct(ulong task) + { ++ if (XEN_HYPER_MODE()) ++ return NULL; ++ + if (!IS_LAST_TASK_READ(task)) { + if (!readmem(task, KVADDR, tt->task_struct, + SIZE(task_struct), "fill_task_struct", +@@ -1632,6 +2331,9 @@ + bt->stackbase); + } + ++ if (XEN_HYPER_MODE()) ++ return; ++ + if (!IS_LAST_TASK_READ(bt->task)) { + if (bt->stackbase == bt->task) { + BCOPY(bt->stackbuf, tt->task_struct, SIZE(task_struct)); +@@ -1893,7 +2595,7 @@ + BZERO(&psinfo, sizeof(struct psinfo)); + flag = 0; + +- while ((c = getopt(argcnt, args, "stcpkul")) != EOF) { ++ while ((c = getopt(argcnt, args, "gstcpkular")) != EOF) { + switch(c) + { + case 'k': +@@ -1907,21 +2609,31 @@ + break; + + /* +- * The remaining flags are all mutually-exclusive. ++ * The a, t, c, p, g and l flags are all mutually-exclusive. + */ ++ case 'g': ++ flag &= ~(PS_EXCLUSIVE); ++ flag |= PS_TGID_LIST; ++ break; ++ ++ case 'a': ++ flag &= ~(PS_EXCLUSIVE); ++ flag |= PS_ARGV_ENVP; ++ break; ++ + case 't': ++ flag &= ~(PS_EXCLUSIVE); + flag |= PS_TIMES; +- flag &= ~(PS_CHILD_LIST|PS_PPID_LIST|PS_LAST_RUN); + break; + + case 'c': ++ flag &= ~(PS_EXCLUSIVE); + flag |= PS_CHILD_LIST; +- flag &= ~(PS_PPID_LIST|PS_TIMES|PS_LAST_RUN); + break; + + case 'p': ++ flag &= ~(PS_EXCLUSIVE); + flag |= PS_PPID_LIST; +- flag &= ~(PS_CHILD_LIST|PS_TIMES|PS_LAST_RUN); + break; + + case 'l': +@@ -1932,14 +2644,19 @@ + argerrs++; + break; + } ++ flag &= ~(PS_EXCLUSIVE); + flag |= PS_LAST_RUN; +- flag &= ~(PS_CHILD_LIST|PS_TIMES|PS_PPID_LIST); + break; + + case 's': + flag |= PS_KSTACKP; + break; + ++ case 'r': ++ flag &= ~(PS_EXCLUSIVE); ++ flag |= PS_RLIMIT; ++ break; ++ + default: + argerrs++; + break; +@@ -2020,6 +2737,18 @@ + show_last_run(tc); \ + continue; \ + } \ ++ if (flag & PS_ARGV_ENVP) { \ ++ show_task_args(tc); \ ++ continue; \ ++ } \ ++ if (flag & PS_RLIMIT) { \ ++ show_task_rlimit(tc); \ ++ continue; \ ++ } \ ++ if (flag & PS_TGID_LIST) { \ ++ show_tgid_list(tc->task); \ ++ continue; \ ++ } \ + get_task_mem_usage(tc->task, tm); \ + fprintf(fp, "%s", is_task_active(tc->task) ? "> " : " "); \ + fprintf(fp, "%5ld %5ld %2s %s %3s", \ +@@ -2050,7 +2779,7 @@ + char buf2[BUFSIZE]; + char buf3[BUFSIZE]; + +- if (!(flag & (PS_PPID_LIST|PS_CHILD_LIST|PS_TIMES|PS_LAST_RUN))) ++ if (!(flag & PS_EXCLUSIVE)) + fprintf(fp, + " PID PPID CPU %s ST %%MEM VSZ RSS COMM\n", + flag & PS_KSTACKP ? +@@ -2076,6 +2805,8 @@ + return; + } + ++ pc->curcmd_flags |= TASK_SPECIFIED; ++ + for (ac = 0; ac < psi->argc; ac++) { + tm = &task_mem_usage; + tc = FIRST_CONTEXT(); +@@ -2096,8 +2827,15 @@ + break; + + case PS_BY_CMD: +- if (STREQ(tc->comm, psi->comm[ac])) +- print = TRUE; ++ if (STREQ(tc->comm, psi->comm[ac])) { ++ if (flag & PS_TGID_LIST) { ++ if (tc->pid == task_tgid(tc->task)) ++ print = TRUE; ++ else ++ print = FALSE; ++ } else ++ print = TRUE; ++ } + break; + } + +@@ -2145,6 +2883,229 @@ + } + + /* ++ * Show the argv and envp strings pointed to by mm_struct->arg_start ++ * and mm_struct->env_start. The user addresses need to broken up ++ * into physical on a page-per-page basis because we typically are ++ * not going to be working in the context of the target task. ++ */ ++static void ++show_task_args(struct task_context *tc) ++{ ++ ulong arg_start, arg_end, env_start, env_end; ++ char *buf, *bufptr, *p1; ++ char *as, *ae, *es, *ee; ++ physaddr_t paddr; ++ ulong uvaddr, size, cnt; ++ int c, d; ++ ++ print_task_header(fp, tc, 0); ++ ++ if (!tc || !tc->mm_struct) { /* probably a kernel thread */ ++ error(INFO, "no user stack\n\n"); ++ return; ++ } ++ ++ if (!task_mm(tc->task, TRUE)) ++ return; ++ ++ if (INVALID_MEMBER(mm_struct_arg_start)) { ++ MEMBER_OFFSET_INIT(mm_struct_arg_start, "mm_struct", "arg_start"); ++ MEMBER_OFFSET_INIT(mm_struct_arg_end, "mm_struct", "arg_end"); ++ MEMBER_OFFSET_INIT(mm_struct_env_start, "mm_struct", "env_start"); ++ MEMBER_OFFSET_INIT(mm_struct_env_end, "mm_struct", "env_end"); ++ } ++ ++ arg_start = ULONG(tt->mm_struct + OFFSET(mm_struct_arg_start)); ++ arg_end = ULONG(tt->mm_struct + OFFSET(mm_struct_arg_end)); ++ env_start = ULONG(tt->mm_struct + OFFSET(mm_struct_env_start)); ++ env_end = ULONG(tt->mm_struct + OFFSET(mm_struct_env_end)); ++ ++ if (CRASHDEBUG(1)) { ++ fprintf(fp, "arg_start: %lx arg_end: %lx (%ld)\n", ++ arg_start, arg_end, arg_end - arg_start); ++ fprintf(fp, "env_start: %lx env_end: %lx (%ld)\n", ++ env_start, env_end, env_end - env_start); ++ } ++ ++ buf = GETBUF(env_end - arg_start + 1); ++ ++ uvaddr = arg_start; ++ size = env_end - arg_start; ++ bufptr = buf; ++ ++ while (size > 0) { ++ if (!uvtop(tc, uvaddr, &paddr, 0)) { ++ error(INFO, "cannot access user stack address: %lx\n\n", ++ uvaddr); ++ goto bailout; ++ } ++ ++ cnt = PAGESIZE() - PAGEOFFSET(uvaddr); ++ ++ if (cnt > size) ++ cnt = size; ++ ++ if (!readmem(paddr, PHYSADDR, bufptr, cnt, ++ "user stack contents", RETURN_ON_ERROR|QUIET)) { ++ error(INFO, "cannot access user stack address: %lx\n\n", ++ uvaddr); ++ goto bailout; ++ } ++ ++ uvaddr += cnt; ++ bufptr += cnt; ++ size -= cnt; ++ } ++ ++ as = buf; ++ ae = &buf[arg_end - arg_start]; ++ es = &buf[env_start - arg_start]; ++ ee = &buf[env_end - arg_start]; ++ ++ fprintf(fp, "ARG: "); ++ for (p1 = as, c = 0; p1 < ae; p1++) { ++ if (*p1 == NULLCHAR) { ++ if (c) ++ fprintf(fp, " "); ++ c = 0; ++ } else { ++ fprintf(fp, "%c", *p1); ++ c++; ++ } ++ } ++ ++ fprintf(fp, "\nENV: "); ++ for (p1 = es, c = d = 0; p1 < ee; p1++) { ++ if (*p1 == NULLCHAR) { ++ if (c) ++ fprintf(fp, "\n"); ++ c = 0; ++ } else { ++ fprintf(fp, "%s%c", !c && (p1 != es) ? " " : "", *p1); ++ c++, d++; ++ } ++ } ++ fprintf(fp, "\n%s", d ? "" : "\n"); ++ ++bailout: ++ FREEBUF(buf); ++} ++ ++char *rlim_names[] = { ++ /* 0 */ "CPU", ++ /* 1 */ "FSIZE", ++ /* 2 */ "DATA", ++ /* 3 */ "STACK", ++ /* 4 */ "CORE", ++ /* 5 */ "RSS", ++ /* 6 */ "NPROC", ++ /* 7 */ "NOFILE", ++ /* 8 */ "MEMLOCK", ++ /* 9 */ "AS", ++ /* 10 */ "LOCKS", ++ /* 11 */ "SIGPENDING", ++ /* 12 */ "MSGQUEUE", ++ /* 13 */ "NICE", ++ /* 14 */ "RTPRIO", ++ NULL, ++}; ++ ++#ifndef RLIM_INFINITY ++#define RLIM_INFINITY (~0UL) ++#endif ++ ++/* ++ * Show the current and maximum rlimit values. ++ */ ++static void ++show_task_rlimit(struct task_context *tc) ++{ ++ int i, j, len1, len2, rlimit_index; ++ int in_task_struct, in_signal_struct; ++ char *rlimit_buffer; ++ ulong *p1, rlim_addr; ++ char buf1[BUFSIZE]; ++ char buf2[BUFSIZE]; ++ char buf3[BUFSIZE]; ++ ++ if (!VALID_MEMBER(task_struct_rlim) && !VALID_MEMBER(signal_struct_rlim)) { ++ MEMBER_OFFSET_INIT(task_struct_rlim, "task_struct", "rlim"); ++ MEMBER_OFFSET_INIT(signal_struct_rlim, "signal_struct", "rlim"); ++ STRUCT_SIZE_INIT(rlimit, "rlimit"); ++ if (!VALID_MEMBER(task_struct_rlim) && ++ !VALID_MEMBER(signal_struct_rlim)) ++ error(FATAL, "cannot determine rlimit array location\n"); ++ } else if (!VALID_STRUCT(rlimit)) ++ error(FATAL, "cannot determine rlimit structure definition\n"); ++ ++ in_task_struct = in_signal_struct = FALSE; ++ ++ if (VALID_MEMBER(task_struct_rlim)) { ++ rlimit_index = get_array_length("task_struct.rlim", NULL, 0); ++ in_task_struct = TRUE; ++ } else if (VALID_MEMBER(signal_struct_rlim)) { ++ if (!VALID_MEMBER(task_struct_signal)) ++ error(FATAL, "cannot determine rlimit array location\n"); ++ rlimit_index = get_array_length("signal_struct.rlim", NULL, 0); ++ in_signal_struct = TRUE; ++ } ++ ++ if (!rlimit_index) ++ error(FATAL, "cannot determine rlimit array size\n"); ++ ++ for (i = len1 = 0; i < rlimit_index; i++) { ++ if ((j = strlen(rlim_names[i])) > len1) ++ len1 = j; ++ } ++ len2 = strlen("(unlimited)"); ++ ++ rlimit_buffer = GETBUF(rlimit_index * SIZE(rlimit)); ++ ++ print_task_header(fp, tc, 0); ++ ++ fill_task_struct(tc->task); ++ ++ if (in_task_struct) { ++ BCOPY(tt->task_struct + OFFSET(task_struct_rlim), ++ rlimit_buffer, rlimit_index * SIZE(rlimit)); ++ } else if (in_signal_struct) { ++ rlim_addr = ULONG(tt->task_struct + OFFSET(task_struct_signal)); ++ if (!readmem(rlim_addr + OFFSET(signal_struct_rlim), ++ KVADDR, rlimit_buffer, rlimit_index * SIZE(rlimit), ++ "signal_struct rlimit array", RETURN_ON_ERROR)) { ++ FREEBUF(rlimit_buffer); ++ return; ++ } ++ } ++ ++ fprintf(fp, " %s %s %s\n", ++ mkstring(buf1, len1, RJUST, "RLIMIT"), ++ mkstring(buf2, len2, CENTER|RJUST, "CURRENT"), ++ mkstring(buf3, len2, CENTER|RJUST, "MAXIMUM")); ++ ++ for (p1 = (ulong *)rlimit_buffer, i = 0; i < rlimit_index; i++) { ++ fprintf(fp, " %s ", mkstring(buf1, len1, RJUST, ++ rlim_names[i] ? rlim_names[i] : "(unknown)")); ++ if (*p1 == (ulong)RLIM_INFINITY) ++ fprintf(fp, "(unlimited) "); ++ else ++ fprintf(fp, "%s ", mkstring(buf1, len2, ++ CENTER|LJUST|LONG_DEC, MKSTR(*p1))); ++ p1++; ++ if (*p1 == (ulong)RLIM_INFINITY) ++ fprintf(fp, "(unlimited)\n"); ++ else ++ fprintf(fp, "%s\n", mkstring(buf1, len2, ++ CENTER|LJUST|LONG_DEC, MKSTR(*p1))); ++ p1++; ++ } ++ ++ fprintf(fp, "\n"); ++ ++ FREEBUF(rlimit_buffer); ++} ++ ++/* + * Put either the task_struct address or kernel stack pointer into a string. + * If the kernel stack pointer is requested, piggy-back on top of the + * back trace code to avoid having to deal with machine dependencies, +@@ -2229,11 +3190,8 @@ + + use_kernel_timeval = STRUCT_EXISTS("kernel_timeval"); + get_symbol_data("jiffies", sizeof(long), &jiffies); +- if (symbol_exists("jiffies_64")) { +- get_symbol_data("jiffies_64", sizeof(long long), &jiffies_64); +- if ((jiffies_64 & 0xffffffff00000000ULL) == 0x100000000ULL) +- jiffies_64 &= 0xffffffffULL; +- } ++ if (symbol_exists("jiffies_64")) ++ get_uptime(NULL, &jiffies_64); + tsp = task_start_times; + tc = tcp ? tcp : FIRST_CONTEXT(); + +@@ -2330,8 +3288,7 @@ + for (i = 0, tsp = task_start_times; i < tasks; i++, tsp++) { + print_task_header(fp, tsp->tc, 0); + fprintf(fp, " RUN TIME: %s\n", symbol_exists("jiffies_64") ? +- convert_time(jiffies_64 - +- convert_start_time(tsp->start_time, jiffies_64), buf1) : ++ convert_time(convert_start_time(tsp->start_time, jiffies_64), buf1) : + convert_time(jiffies - tsp->start_time, buf1)); + fprintf(fp, " START TIME: %llu\n", tsp->start_time); + if (VALID_MEMBER(task_struct_times)) { +@@ -2397,15 +3354,33 @@ + static ulonglong + convert_start_time(ulonglong start_time, ulonglong current) + { ++ ulong tmp1, tmp2; ++ ulonglong wrapped; ++ + switch(tt->flags & (TIMESPEC | NO_TIMESPEC)) + { + case TIMESPEC: +- if ((start_time * (ulonglong)machdep->hz) > current) +- return current; ++ if ((start_time * (ulonglong)machdep->hz) > current) ++ return 0; + else +- return start_time * (ulonglong)machdep->hz; ++ return current - (start_time * (ulonglong)machdep->hz); + + case NO_TIMESPEC: ++ if (THIS_KERNEL_VERSION >= LINUX(2,6,0)) { ++ wrapped = (start_time & 0xffffffff00000000ULL); ++ if (wrapped) { ++ wrapped -= 0x100000000ULL; ++ start_time &= 0x00000000ffffffffULL; ++ start_time |= wrapped; ++ start_time += (ulonglong)(300*machdep->hz); ++ } else { ++ tmp1 = (ulong)(uint)(-300*machdep->hz); ++ tmp2 = (ulong)start_time; ++ start_time = (ulonglong)(tmp2 - tmp1); ++ } ++ } ++ break; ++ + default: + break; + } +@@ -2511,6 +3486,54 @@ + } + + /* ++ * Dump the children of a task. ++ */ ++static void ++show_tgid_list(ulong task) ++{ ++ int i; ++ int cnt; ++ struct task_context *tc; ++ ulong tgid; ++ ++ tc = task_to_context(task); ++ tgid = task_tgid(task); ++ ++ if (tc->pid != tgid) { ++ if (pc->curcmd_flags & TASK_SPECIFIED) { ++ if (!(tc = tgid_to_context(tgid))) ++ return; ++ task = tc->task; ++ } else ++ return; ++ } ++ ++ if ((tc->pid == 0) && (pc->curcmd_flags & IDLE_TASK_SHOWN)) ++ return; ++ ++ print_task_header(fp, tc, 0); ++ ++ tc = FIRST_CONTEXT(); ++ for (i = cnt = 0; i < RUNNING_TASKS(); i++, tc++) { ++ if (tc->task == task) ++ continue; ++ ++ if (task_tgid(tc->task) == tgid) { ++ INDENT(2); ++ print_task_header(fp, tc, 0); ++ cnt++; ++ if (tc->pid == 0) ++ pc->curcmd_flags |= IDLE_TASK_SHOWN; ++ } ++ } ++ ++ if (!cnt) ++ fprintf(fp, " (no threads)\n"); ++ ++ fprintf(fp, "\n"); ++} ++ ++/* + * Return the first task found that belongs to a pid. + */ + ulong +@@ -2580,6 +3603,26 @@ + return NULL; + } + ++/* ++ * Return a tgid's parent task_context structure. ++ */ ++struct task_context * ++tgid_to_context(ulong parent_tgid) ++{ ++ int i; ++ struct task_context *tc; ++ ulong tgid; ++ ++ tc = FIRST_CONTEXT(); ++ for (i = 0; i < RUNNING_TASKS(); i++, tc++) { ++ tgid = task_tgid(tc->task); ++ if ((tgid == parent_tgid) && (tgid == tc->pid)) ++ return tc; ++ } ++ ++ return NULL; ++} ++ + + /* + * Return the task_context structure of the first task found with a pid, +@@ -2816,20 +3859,39 @@ + + + /* ++ * Return the task if the vaddr is part of a task's task_struct. ++ */ ++ulong ++vaddr_in_task_struct(ulong vaddr) ++{ ++ int i; ++ struct task_context *tc; ++ ++ tc = FIRST_CONTEXT(); ++ for (i = 0; i < RUNNING_TASKS(); i++, tc++) { ++ if ((vaddr >= tc->task) && ++ (vaddr < (tc->task + SIZE(task_struct)))) ++ return tc->task; ++ } ++ ++ return NO_TASK; ++} ++ ++/* + * Verify whether any task is running a command. + */ + int + comm_exists(char *s) + { +- int i; ++ int i, cnt; + struct task_context *tc; + + tc = FIRST_CONTEXT(); +- for (i = 0; i < RUNNING_TASKS(); i++, tc++) ++ for (i = cnt = 0; i < RUNNING_TASKS(); i++, tc++) + if (STREQ(tc->comm, s)) +- return TRUE; ++ cnt++; + +- return FALSE; ++ return cnt; + } + + /* +@@ -2925,7 +3987,11 @@ + fprintf(fp, "COMMAND: \"%s\"\n", tc->comm); + INDENT(indent); + fprintf(fp, " TASK: %lx ", tc->task); +- if ((cnt = TASKS_PER_PID(tc->pid)) > 1) ++ if ((machdep->flags & (INIT|MCA)) && (tc->pid == 0)) ++ cnt = comm_exists(tc->comm); ++ else ++ cnt = TASKS_PER_PID(tc->pid); ++ if (cnt > 1) + fprintf(fp, "(1 of %d) ", cnt); + if (tt->flags & THREAD_INFO) + fprintf(fp, "[THREAD_INFO: %lx]", tc->thread_info); +@@ -2938,19 +4004,27 @@ + if (is_task_active(tc->task)) { + if (machdep->flags & HWRESET) + fprintf(fp, "(HARDWARE RESET)"); +- else if (machdep->flags & SYSRQ) ++ else if ((pc->flags & SYSRQ) && (tc->task == tt->panic_task)) + fprintf(fp, "(SYSRQ)"); + else if (machdep->flags & INIT) + fprintf(fp, "(INIT)"); +- else if (kt->cpu_flags[tc->processor] & NMI) ++ else if ((machdep->flags & MCA) && (tc->task == tt->panic_task)) ++ fprintf(fp, "(MCA)"); ++ else if ((tc->processor >= 0) && ++ (tc->processor < NR_CPUS) && ++ (kt->cpu_flags[tc->processor] & NMI)) + fprintf(fp, "(NMI)"); ++ else if ((tc->task == tt->panic_task) && ++ XENDUMP_DUMPFILE() && (kt->xen_flags & XEN_SUSPEND)) ++ fprintf(fp, "(SUSPEND)"); + else if (tc->task == tt->panic_task) + fprintf(fp, "(PANIC)"); + else + fprintf(fp, "(ACTIVE)"); + } + +- if (!(pc->flags & RUNTIME) && (tt->flags & PANIC_TASK_NOT_FOUND) && ++ if (!(pc->flags & RUNTIME) && !ACTIVE() && ++ (tt->flags & PANIC_TASK_NOT_FOUND) && + !SYSRQ_TASK(tc->task)) { + fprintf(fp, "\n"); INDENT(indent); + if (machine_type("S390") || machine_type("S390X")) +@@ -3006,6 +4080,10 @@ + cnt++ ? "" : "\n", tc->comm); + break; + } ++ ++ if (!(pc->flags & RUNTIME) && (tt->flags & ACTIVE_ONLY)) ++ error(WARNING, ++ "\nonly the active tasks on each cpu are being tracked\n"); + } + + +@@ -3182,6 +4260,22 @@ + return flags; + } + ++/* ++ * Return a task's tgid. ++ */ ++ulong ++task_tgid(ulong task) ++{ ++ uint tgid; ++ ++ fill_task_struct(task); ++ ++ tgid = tt->last_task_read ? ++ UINT(tt->task_struct + OFFSET(task_struct_tgid)) : 0; ++ ++ return (ulong)tgid; ++} ++ + ulonglong + task_last_run(ulong task) + { +@@ -3368,6 +4462,12 @@ + task = NO_TASK; + tc = FIRST_CONTEXT(); + ++ /* ++ * --no_panic command line option ++ */ ++ if (tt->flags & PANIC_TASK_NOT_FOUND) ++ goto use_task_0; ++ + if (symbol_exists("panic_threads") && + symbol_exists("panicmsg") && + symbol_exists("panic_processor")) { +@@ -3411,6 +4511,9 @@ + + use_task_0: + ++ if (CRASHDEBUG(1)) ++ error(INFO, "get_panic_context: panic task not found\n"); ++ + tt->flags |= PANIC_TASK_NOT_FOUND; + tc = FIRST_CONTEXT(); + return(tc->task); +@@ -3448,50 +4551,74 @@ + int msg_found; + + BZERO(buf, BUFSIZE); ++ msg_found = FALSE; + +- if (tt->panicmsg) ++ if (tt->panicmsg) { + read_string(tt->panicmsg, buf, BUFSIZE-1); +- else if (LKCD_DUMPFILE()) ++ msg_found = TRUE; ++ } else if (LKCD_DUMPFILE()) { + get_lkcd_panicmsg(buf); +- else { +- msg_found = FALSE; +- +- open_tmpfile(); +- dump_log(FALSE); +- +- rewind(pc->tmpfile); +- while (!msg_found && fgets(buf, BUFSIZE, pc->tmpfile)) { +- if (strstr(buf, "Kernel panic: ")) +- msg_found = TRUE; +- } +- rewind(pc->tmpfile); +- while (!msg_found && fgets(buf, BUFSIZE, pc->tmpfile)) { +- if (strstr(buf, "Oops: ") || +- strstr(buf, "kernel BUG at")) +- msg_found = TRUE; +- } +- rewind(pc->tmpfile); +- while (!msg_found && fgets(buf, BUFSIZE, pc->tmpfile)) { +- if (strstr(buf, "SysRq : Netdump") || +- strstr(buf, "SysRq : Crash")) { +- machdep->flags |= SYSRQ; +- msg_found = TRUE; +- } +- } +- rewind(pc->tmpfile); +- while (!msg_found && fgets(buf, BUFSIZE, pc->tmpfile)) { +- if (strstr(buf, "sysrq") && +- symbol_exists("sysrq_pressed")) +- get_symbol_data("sysrq_pressed", sizeof(int), +- &msg_found); +- } ++ msg_found = TRUE; ++ } ++ ++ if (msg_found == TRUE) ++ return(buf); ++ ++ open_tmpfile(); ++ dump_log(FALSE); + +- close_tmpfile(); ++ /* ++ * First check for a SYSRQ-generated crash, and set the ++ * active-task flag appropriately. The message may or ++ * may not be used as the panic message. ++ */ ++ rewind(pc->tmpfile); ++ while (fgets(buf, BUFSIZE, pc->tmpfile)) { ++ if (strstr(buf, "SysRq : Crash") || ++ strstr(buf, "SysRq : Trigger a crashdump")) { ++ pc->flags |= SYSRQ; ++ break; ++ } ++ } + +- if (!msg_found) +- BZERO(buf, BUFSIZE); ++ rewind(pc->tmpfile); ++ while (!msg_found && fgets(buf, BUFSIZE, pc->tmpfile)) { ++ if (strstr(buf, "Kernel panic: ")) ++ msg_found = TRUE; ++ } ++ rewind(pc->tmpfile); ++ while (!msg_found && fgets(buf, BUFSIZE, pc->tmpfile)) { ++ if (strstr(buf, "Oops: ") || ++ strstr(buf, "kernel BUG at")) ++ msg_found = TRUE; ++ } ++ rewind(pc->tmpfile); ++ while (!msg_found && fgets(buf, BUFSIZE, pc->tmpfile)) { ++ if (strstr(buf, "SysRq : Netdump") || ++ strstr(buf, "SysRq : Trigger a crashdump") || ++ strstr(buf, "SysRq : Crash")) { ++ pc->flags |= SYSRQ; ++ msg_found = TRUE; ++ } ++ } ++ rewind(pc->tmpfile); ++ while (!msg_found && fgets(buf, BUFSIZE, pc->tmpfile)) { ++ if (strstr(buf, "sysrq") && ++ symbol_exists("sysrq_pressed")) ++ get_symbol_data("sysrq_pressed", sizeof(int), ++ &msg_found); ++ } ++ rewind(pc->tmpfile); ++ while (!msg_found && fgets(buf, BUFSIZE, pc->tmpfile)) { ++ if (strstr(buf, "Kernel panic - ")) ++ msg_found = TRUE; + } + ++ close_tmpfile(); ++ ++ if (!msg_found) ++ BZERO(buf, BUFSIZE); ++ + return(buf); + } + +@@ -3517,7 +4644,7 @@ + BZERO(&foreach_data, sizeof(struct foreach_data)); + fd = &foreach_data; + +- while ((c = getopt(argcnt, args, "R:vomlgersStpukcf")) != EOF) { ++ while ((c = getopt(argcnt, args, "R:vomlgersStTpukcf")) != EOF) { + switch(c) + { + case 'R': +@@ -3560,6 +4687,10 @@ + fd->flags |= FOREACH_r_FLAG; + break; + ++ case 'T': ++ fd->flags |= FOREACH_T_FLAG; ++ break; ++ + case 't': + fd->flags |= FOREACH_t_FLAG; + break; +@@ -3754,12 +4885,14 @@ + foreach(struct foreach_data *fd) + { + int i, j, k, a; +- struct task_context *tc; ++ struct task_context *tc, *tgc; + int specified; + int doit; + int subsequent; + ulong cmdflags; ++ ulong tgid; + struct reference reference, *ref; ++ int print_header; + struct bt_info bt_info, *bt; + + /* +@@ -3797,6 +4930,8 @@ + fd->reference ? fd->reference : ""); + } + ++ print_header = TRUE; ++ + for (k = 0; k < fd->keys; k++) { + switch(fd->keyword_array[k]) + { +@@ -3881,6 +5016,14 @@ + error(FATAL, + "sig: -l and -s options are not applicable\n"); + } ++ if (fd->flags & FOREACH_g_FLAG) { ++ if (!hq_open()) { ++ error(INFO, ++ "cannot hash thread group tasks\n"); ++ fd->flags &= ~FOREACH_g_FLAG; ++ } else ++ print_header = FALSE; ++ } + break; + + case FOREACH_TEST: +@@ -3941,7 +5084,7 @@ + if (fd->reference) { + BZERO(ref, sizeof(struct reference)); + ref->str = fd->reference; +- } else ++ } else if (print_header) + print_task_header(fp, tc, subsequent++); + + for (k = 0; k < fd->keys; k++) { +@@ -3962,7 +5105,12 @@ + bt->flags |= BT_SYMBOLIC_ARGS; + if (fd->flags & FOREACH_t_FLAG) + bt->flags |= BT_TEXT_SYMBOLS; +- if (fd->flags & FOREACH_o_FLAG) ++ if (fd->flags & FOREACH_T_FLAG) { ++ bt->flags |= BT_TEXT_SYMBOLS; ++ bt->flags |= BT_TEXT_SYMBOLS_ALL; ++ } ++ if ((fd->flags & FOREACH_o_FLAG) || ++ (kt->flags & USE_OLD_BT)) + bt->flags |= BT_OLD_BACK_TRACE; + if (fd->flags & FOREACH_e_FLAG) + bt->flags |= BT_EFRAME_SEARCH; +@@ -4010,8 +5158,14 @@ + + case FOREACH_SIG: + pc->curcmd = "sig"; +- do_sig(tc->task, FOREACH_SIG, +- fd->reference ? ref : NULL); ++ if (fd->flags & FOREACH_g_FLAG) { ++ tgid = task_tgid(tc->task); ++ tgc = tgid_to_context(tgid); ++ if (hq_enter(tgc->task)) ++ do_sig_thread_group(tgc->task); ++ } else ++ do_sig(tc->task, FOREACH_SIG, ++ fd->reference ? ref : NULL); + break; + + case FOREACH_SET: +@@ -4075,6 +5229,11 @@ + nlm_files_dump(); + } + break; ++ ++ case FOREACH_SIG: ++ if (fd->flags & FOREACH_g_FLAG) ++ hq_close(); ++ break; + } + } + +@@ -4161,7 +5320,7 @@ + fd = &foreach_data; + fd->keys = 1; + fd->keyword_array[0] = FOREACH_BT; +- fd->flags |= FOREACH_t_FLAG; ++ fd->flags |= (FOREACH_t_FLAG|FOREACH_o_FLAG); + + dietask = lasttask = NO_TASK; + +@@ -4188,6 +5347,12 @@ + break; + } + ++ if (strstr(buf, " crash_kexec at ") || ++ strstr(buf, " .crash_kexec at ")) { ++ found = TRUE; ++ break; ++ } ++ + if (strstr(buf, " die at ")) { + switch (dietask) + { +@@ -4211,6 +5376,10 @@ + if (dietask == (NO_TASK+1)) + error(WARNING, "multiple active tasks have called die\n\n"); + ++ if (CRASHDEBUG(1) && found) ++ error(INFO, "panic_search: %lx (via foreach bt)\n", ++ lasttask); ++ + found_panic_task: + populate_panic_threads(); + +@@ -4229,6 +5398,9 @@ + } + } + ++ if (CRASHDEBUG(1)) ++ error(INFO, "panic_search: failed (via foreach bt)\n"); ++ + return NULL; + } + +@@ -4240,25 +5412,28 @@ + { + ulong task; + +- if (LKCD_DUMPFILE()) +- return(get_lkcd_panic_task()); +- + if (NETDUMP_DUMPFILE()) { + task = pc->flags & REM_NETDUMP ? + tt->panic_task : get_netdump_panic_task(); + if (task) + return task; +- if (get_active_set()) +- return(get_active_set_panic_task()); +- } +- +- if (DISKDUMP_DUMPFILE()) { ++ } else if (KDUMP_DUMPFILE()) { ++ task = get_kdump_panic_task(); ++ if (task) ++ return task; ++ } else if (DISKDUMP_DUMPFILE()) { + task = get_diskdump_panic_task(); + if (task) + return task; +- if (get_active_set()) +- return(get_active_set_panic_task()); +- } ++ } else if (XENDUMP_DUMPFILE()) { ++ task = get_xendump_panic_task(); ++ if (task) ++ return task; ++ } else if (LKCD_DUMPFILE()) ++ return(get_lkcd_panic_task()); ++ ++ if (get_active_set()) ++ return(get_active_set_panic_task()); + + return NO_TASK; + } +@@ -4298,14 +5473,17 @@ + + tc = FIRST_CONTEXT(); + for (i = 0; i < RUNNING_TASKS(); i++, tc++) { +- if (task_has_cpu(tc->task, NULL)) { ++ if (task_has_cpu(tc->task, NULL) && ++ (tc->processor >= 0) && ++ (tc->processor < NR_CPUS)) { + tt->panic_threads[tc->processor] = tc->task; + found++; + } + } + + if (!found && !(kt->flags & SMP) && +- (LKCD_DUMPFILE() || NETDUMP_DUMPFILE() || DISKDUMP_DUMPFILE())) ++ (LKCD_DUMPFILE() || NETDUMP_DUMPFILE() || ++ KDUMP_DUMPFILE() || DISKDUMP_DUMPFILE())) + tt->panic_threads[0] = get_dumpfile_panic_task(); + } + +@@ -4331,7 +5509,7 @@ + void + dump_task_table(int verbose) + { +- int i; ++ int i, nr_cpus; + struct task_context *tc; + char buf[BUFSIZE]; + int others, wrap, flen; +@@ -4363,6 +5541,12 @@ + fprintf(fp, "refresh_pid_hash_task_table()\n"); + else if (tt->refresh_task_table == refresh_hlist_task_table) + fprintf(fp, "refresh_hlist_task_table()\n"); ++ else if (tt->refresh_task_table == refresh_hlist_task_table_v2) ++ fprintf(fp, "refresh_hlist_task_table_v2()\n"); ++ else if (tt->refresh_task_table == refresh_hlist_task_table_v3) ++ fprintf(fp, "refresh_hlist_task_table_v3()\n"); ++ else if (tt->refresh_task_table == refresh_active_task_table) ++ fprintf(fp, "refresh_active_task_table()\n"); + else + fprintf(fp, "%lx\n", (ulong)tt->refresh_task_table); + +@@ -4411,6 +5595,9 @@ + if (tt->flags & NO_TIMESPEC) + sprintf(&buf[strlen(buf)], + "%sNO_TIMESPEC", others++ ? "|" : ""); ++ if (tt->flags & ACTIVE_ONLY) ++ sprintf(&buf[strlen(buf)], ++ "%sACTIVE_ONLY", others++ ? "|" : ""); + sprintf(&buf[strlen(buf)], ")"); + + if (strlen(buf) > 54) +@@ -4436,14 +5623,16 @@ + fprintf(fp, " last_mm_read: %lx\n", tt->last_mm_read); + fprintf(fp, " task_struct: %lx\n", (ulong)tt->task_struct); + fprintf(fp, " mm_struct: %lx\n", (ulong)tt->mm_struct); +- ++ fprintf(fp, " init_pid_ns: %lx\n", tt->init_pid_ns); + + fprintf(fp, " panic_threads:"); + + wrap = sizeof(void *) == SIZEOF_32BIT ? 8 : 4; + flen = sizeof(void *) == SIZEOF_32BIT ? 8 : 16; + +- for (i = 0; i < NR_CPUS; i++) { ++ nr_cpus = kt->kernel_NR_CPUS ? kt->kernel_NR_CPUS : NR_CPUS; ++ ++ for (i = 0; i < nr_cpus; i++) { + if ((i % wrap) == 0) + fprintf(fp, "\n "); + fprintf(fp, "%.*lx ", flen, tt->panic_threads[i]); +@@ -4451,7 +5640,7 @@ + fprintf(fp, "\n"); + + fprintf(fp, " panic_ksp:"); +- for (i = 0; i < NR_CPUS; i++) { ++ for (i = 0; i < nr_cpus; i++) { + if ((i % wrap) == 0) + fprintf(fp, "\n "); + fprintf(fp, "%.*lx ", flen, tt->panic_ksp[i]); +@@ -4459,7 +5648,7 @@ + fprintf(fp, "\n"); + + fprintf(fp, " hardirq_ctx:"); +- for (i = 0; i < NR_CPUS; i++) { ++ for (i = 0; i < nr_cpus; i++) { + if ((i % wrap) == 0) + fprintf(fp, "\n "); + fprintf(fp, "%.*lx ", flen, tt->hardirq_ctx[i]); +@@ -4467,7 +5656,7 @@ + fprintf(fp, "\n"); + + fprintf(fp, " hardirq_tasks:"); +- for (i = 0; i < NR_CPUS; i++) { ++ for (i = 0; i < nr_cpus; i++) { + if ((i % wrap) == 0) + fprintf(fp, "\n "); + fprintf(fp, "%.*lx ", flen, tt->hardirq_tasks[i]); +@@ -4475,7 +5664,7 @@ + fprintf(fp, "\n"); + + fprintf(fp, " softirq_ctx:"); +- for (i = 0; i < NR_CPUS; i++) { ++ for (i = 0; i < nr_cpus; i++) { + if ((i % wrap) == 0) + fprintf(fp, "\n "); + fprintf(fp, "%.*lx ", flen, tt->softirq_ctx[i]); +@@ -4483,7 +5672,7 @@ + fprintf(fp, "\n"); + + fprintf(fp, " softirq_tasks:"); +- for (i = 0; i < NR_CPUS; i++) { ++ for (i = 0; i < nr_cpus; i++) { + if ((i % wrap) == 0) + fprintf(fp, "\n "); + fprintf(fp, "%.*lx ", flen, tt->softirq_tasks[i]); +@@ -4491,7 +5680,7 @@ + fprintf(fp, "\n"); + + fprintf(fp, " idle_threads:"); +- for (i = 0; i < NR_CPUS; i++) { ++ for (i = 0; i < nr_cpus; i++) { + if ((i % wrap) == 0) + fprintf(fp, "\n "); + fprintf(fp, "%.*lx ", flen, tt->idle_threads[i]); +@@ -4499,7 +5688,7 @@ + fprintf(fp, "\n"); + + fprintf(fp, " active_set:"); +- for (i = 0; i < NR_CPUS; i++) { ++ for (i = 0; i < nr_cpus; i++) { + if ((i % wrap) == 0) + fprintf(fp, "\n "); + fprintf(fp, "%.*lx ", flen, tt->active_set[i]); +@@ -4546,6 +5735,9 @@ + if ((tc->pid == 0) && !STREQ(tc->comm, pc->program_name)) + return TRUE; + ++ if (_ZOMBIE_ == TASK_STATE_UNINITIALIZED) ++ initialize_task_state(); ++ + if (IS_ZOMBIE(task) || IS_EXITING(task)) + return FALSE; + +@@ -4641,6 +5833,16 @@ + cnt++; + else + BZERO(tasklist, sizeof(ulong) * NR_CPUS); ++ } else if (OPENVZ()) { ++ runq = symbol_value("pcpu_info"); ++ runqbuf = GETBUF(SIZE(pcpu_info)); ++ for (i = 0; i < nr_cpus; i++, runq += SIZE(pcpu_info)) { ++ readmem(runq, KVADDR, runqbuf, SIZE(pcpu_info), ++ "pcpu info", FAULT_ON_ERROR); ++ tasklist[i] = ULONG(runqbuf + OFFSET(pcpu_info_idle)); ++ if (IS_KVADDR(tasklist[i])) ++ cnt++; ++ } + } + + if (runqbuf) +@@ -4734,14 +5936,38 @@ + } else if (symbol_exists("per_cpu__runqueues")) { + runq = symbol_value("per_cpu__runqueues"); + per_cpu = TRUE; +- } else ++ } else if (OPENVZ()) ++ runq = symbol_value("pcpu_info"); ++ else + return FALSE; + + BZERO(tt->active_set, sizeof(ulong) * NR_CPUS); + runqbuf = GETBUF(SIZE(runqueue)); + cnt = 0; + +- if (VALID_MEMBER(runqueue_curr) && per_cpu) { ++ if (OPENVZ()) { ++ ulong vcpu_struct; ++ char *pcpu_info_buf, *vcpu_struct_buf; ++ ++ pcpu_info_buf = GETBUF(SIZE(pcpu_info)); ++ vcpu_struct_buf = GETBUF(SIZE(vcpu_struct)); ++ ++ for (i = 0; i < kt->cpus; i++, runq += SIZE(pcpu_info)) { ++ readmem(runq, KVADDR, pcpu_info_buf, ++ SIZE(pcpu_info), "pcpu_info", FAULT_ON_ERROR); ++ vcpu_struct= ULONG(pcpu_info_buf + ++ OFFSET(pcpu_info_vcpu)); ++ readmem(vcpu_struct, KVADDR, vcpu_struct_buf, ++ SIZE(vcpu_struct), "pcpu_info->vcpu", ++ FAULT_ON_ERROR); ++ tt->active_set[i] = ULONG(vcpu_struct_buf + ++ OFFSET(vcpu_struct_rq) + OFFSET(runqueue_curr)); ++ if (IS_KVADDR(tt->active_set[i])) ++ cnt++; ++ } ++ FREEBUF(pcpu_info_buf); ++ FREEBUF(vcpu_struct_buf); ++ } else if (VALID_MEMBER(runqueue_curr) && per_cpu) { + for (i = 0; i < kt->cpus; i++) { + if ((kt->flags & SMP) && (kt->flags & PER_CPU_OFF)) { + runq = symbol_value("per_cpu__runqueues") + +@@ -4799,23 +6025,55 @@ + tt->flags &= ~ACTIVE_SET; + } + +-#define RESOLVE_PANIC_AND_DIE_CALLERS() \ +- if ((panic_task > (NO_TASK+1)) && !die_task) \ +- return panic_task; \ +- \ +- if (panic_task && die_task) { \ +- error(WARNING, \ +- "multiple active tasks have called die and/or panic\n\n"); \ +- return NO_TASK; \ +- } \ +- \ +- if (die_task > (NO_TASK+1)) \ +- return die_task; \ +- else if (die_task == (NO_TASK+1)) \ +- error(WARNING, \ ++#define RESOLVE_PANIC_AND_DIE_CALLERS() \ ++ if (xen_panic_task) { \ ++ if (CRASHDEBUG(1)) \ ++ error(INFO, \ ++ "get_active_set_panic_task: %lx (xen_panic_event)\n", \ ++ xen_panic_task); \ ++ return xen_panic_task; \ ++ } \ ++ if (crash_kexec_task) { \ ++ if (CRASHDEBUG(1)) \ ++ error(INFO, \ ++ "get_active_set_panic_task: %lx (crash_kexec)\n", \ ++ crash_kexec_task); \ ++ return crash_kexec_task; \ ++ } \ ++ if ((panic_task > (NO_TASK+1)) && !die_task) { \ ++ if (CRASHDEBUG(1)) \ ++ fprintf(fp, \ ++ "get_active_set_panic_task: %lx (panic)\n", \ ++ panic_task); \ ++ return panic_task; \ ++ } \ ++ \ ++ if (panic_task && die_task) { \ ++ if ((panic_task > (NO_TASK+1)) && \ ++ (panic_task == die_task)) { \ ++ if (CRASHDEBUG(1)) \ ++ fprintf(fp, \ ++ "get_active_set_panic_task: %lx (panic)\n", \ ++ panic_task); \ ++ return panic_task; \ ++ } \ ++ error(WARNING, \ ++ "multiple active tasks have called die and/or panic\n\n"); \ ++ goto no_panic_task_found; \ ++ } \ ++ \ ++ if (die_task > (NO_TASK+1)) { \ ++ if (CRASHDEBUG(1)) \ ++ fprintf(fp, \ ++ "get_active_set_panic_task: %lx (die)\n", \ ++ die_task); \ ++ return die_task; \ ++ } \ ++ else if (die_task == (NO_TASK+1)) \ ++ error(WARNING, \ + "multiple active tasks have called die\n\n"); + +-#define SEARCH_STACK_FOR_PANIC_AND_DIE_CALLERS() \ ++#define SEARCH_STACK_FOR_PANIC_DIE_AND_KEXEC_CALLERS() \ + while (fgets(buf, BUFSIZE, pc->tmpfile)) { \ + if (strstr(buf, " die+")) { \ + switch (die_task) \ +@@ -4833,12 +6091,30 @@ + { \ + case NO_TASK: \ + panic_task = task; \ ++ if (XENDUMP_DUMPFILE()) \ ++ xendump_panic_hook(buf); \ + break; \ + default: \ + panic_task = NO_TASK+1; \ + break; \ + } \ + } \ ++ if (strstr(buf, " crash_kexec+") || \ ++ strstr(buf, " .crash_kexec+")) { \ ++ crash_kexec_task = task; \ ++ } \ ++ if (strstr(buf, " machine_kexec+") || \ ++ strstr(buf, " .machine_kexec+")) { \ ++ crash_kexec_task = task; \ ++ } \ ++ if (strstr(buf, " xen_panic_event+") || \ ++ strstr(buf, " .xen_panic_event+")){ \ ++ xen_panic_task = task; \ ++ xendump_panic_hook(buf); \ ++ } \ ++ if (machine_type("IA64") && XENDUMP_DUMPFILE() && !xen_panic_task && \ ++ strstr(buf, " sysrq_handle_crashdump+")) \ ++ xen_sysrq_task = task; \ + } + + /* +@@ -4850,11 +6126,14 @@ + int i, j, found; + ulong task; + char buf[BUFSIZE]; +- ulong panic_task, die_task; ++ ulong panic_task, die_task, crash_kexec_task; ++ ulong xen_panic_task; ++ ulong xen_sysrq_task; + char *tp; + struct task_context *tc; + +- panic_task = die_task = NO_TASK; ++ panic_task = die_task = crash_kexec_task = xen_panic_task = NO_TASK; ++ xen_sysrq_task = NO_TASK; + + for (i = 0; i < NR_CPUS; i++) { + if (!(task = tt->active_set[i])) +@@ -4867,15 +6146,16 @@ + if ((tp = fill_task_struct(task))) { + if ((tc = store_context(NULL, task, tp))) + tt->running_tasks++; ++ else ++ continue; + } +- continue; + } + + open_tmpfile(); + raw_stack_dump(GET_STACKBASE(task), STACKSIZE()); + rewind(pc->tmpfile); + +- SEARCH_STACK_FOR_PANIC_AND_DIE_CALLERS(); ++ SEARCH_STACK_FOR_PANIC_DIE_AND_KEXEC_CALLERS(); + + close_tmpfile(); + } +@@ -4903,7 +6183,7 @@ + raw_stack_dump(tt->hardirq_ctx[i], SIZE(thread_union)); + rewind(pc->tmpfile); + +- SEARCH_STACK_FOR_PANIC_AND_DIE_CALLERS(); ++ SEARCH_STACK_FOR_PANIC_DIE_AND_KEXEC_CALLERS(); + + close_tmpfile(); + } +@@ -4930,7 +6210,7 @@ + raw_stack_dump(tt->softirq_ctx[i], SIZE(thread_union)); + rewind(pc->tmpfile); + +- SEARCH_STACK_FOR_PANIC_AND_DIE_CALLERS(); ++ SEARCH_STACK_FOR_PANIC_DIE_AND_KEXEC_CALLERS(); + + close_tmpfile(); + } +@@ -4938,6 +6218,28 @@ + RESOLVE_PANIC_AND_DIE_CALLERS(); + } + ++ if (crash_kexec_task) { ++ if (CRASHDEBUG(1)) ++ error(INFO, ++ "get_active_set_panic_task: %lx (crash_kexec)\n", ++ crash_kexec_task); ++ return crash_kexec_task; ++ } ++ ++ if (xen_sysrq_task) { ++ if (CRASHDEBUG(1)) ++ error(INFO, ++ "get_active_set_panic_task: %lx (sysrq_handle_crashdump)\n", ++ xen_sysrq_task); ++ return xen_sysrq_task; ++ } ++ ++no_panic_task_found: ++ ++ if (CRASHDEBUG(1)) ++ error(INFO, ++ "get_active_set_panic_task: failed\n"); ++ + return NO_TASK; + } + +@@ -4997,6 +6299,11 @@ + ulong *tlist; + struct task_context *tc; + ++ if (VALID_MEMBER(rq_cfs)) { ++ dump_CFS_runqueues(); ++ return; ++ } ++ + if (VALID_MEMBER(runqueue_arrays)) { + dump_runqueues(); + return; +@@ -5017,120 +6324,370 @@ + error(FATAL, + "cannot determine run queue structures being used\n"); + +- cnt = 0; +- do { +- if (cnt == qlen) { +- FREEBUF(tlist); +- qlen += 1000; +- goto start_again; +- } ++ cnt = 0; ++ do { ++ if (cnt == qlen) { ++ FREEBUF(tlist); ++ qlen += 1000; ++ goto start_again; ++ } ++ ++ tlist[cnt++] = next; ++ ++ readmem(next+offs, KVADDR, &next, sizeof(void *), ++ "run queue entry", FAULT_ON_ERROR); ++ ++ if (next == runqueue_head) ++ break; ++ } while (next); ++ ++ for (i = 0; i < cnt; i++) { ++ if (tlist[i] == runqueue_head) ++ continue; ++ ++ if (!(tc = task_to_context(VIRTPAGEBASE(tlist[i])))) { ++ fprintf(fp, ++ "PID: ? TASK: %lx CPU: ? COMMAND: ?\n", ++ tlist[i]); ++ continue; ++ } ++ ++ if (!is_idle_thread(tc->task)) ++ print_task_header(fp, tc, 0); ++ } ++} ++ ++#define RUNQ_ACTIVE (1) ++#define RUNQ_EXPIRED (2) ++ ++static void ++dump_runqueues(void) ++{ ++ int cpu; ++ ulong runq, offset; ++ char *runqbuf; ++ ulong active, expired, arrays; ++ int per_cpu; ++ ++ ++ if (symbol_exists("runqueues")) { ++ runq = symbol_value("runqueues"); ++ per_cpu = FALSE; ++ } else if (symbol_exists("per_cpu__runqueues")) { ++ runq = symbol_value("per_cpu__runqueues"); ++ per_cpu = TRUE; ++ } ++ ++ runqbuf = GETBUF(SIZE(runqueue)); ++ ++ for (cpu = 0; cpu < kt->cpus; cpu++, runq += SIZE(runqueue)) { ++ if (per_cpu) { ++ if ((kt->flags & SMP) && (kt->flags & PER_CPU_OFF)) { ++ runq = symbol_value("per_cpu__runqueues") + ++ kt->__per_cpu_offset[cpu]; ++ } else ++ runq = symbol_value("per_cpu__runqueues"); ++ } ++ ++ fprintf(fp, "RUNQUEUES[%d]: %lx\n", cpu, runq); ++ ++ readmem(runq, KVADDR, runqbuf, SIZE(runqueue), ++ "runqueues array entry", FAULT_ON_ERROR); ++ active = ULONG(runqbuf + OFFSET(runqueue_active)); ++ expired = ULONG(runqbuf + OFFSET(runqueue_expired)); ++ arrays = runq + OFFSET(runqueue_arrays); ++ ++ console("active: %lx\n", active); ++ console("expired: %lx\n", expired); ++ console("arrays: %lx\n", arrays); ++ ++ offset = active == arrays ? OFFSET(runqueue_arrays) : ++ OFFSET(runqueue_arrays) + SIZE(prio_array); ++ offset = active - runq; ++ dump_prio_array(RUNQ_ACTIVE, active, &runqbuf[offset]); ++ ++ offset = expired == arrays ? OFFSET(runqueue_arrays) : ++ OFFSET(runqueue_arrays) + SIZE(prio_array); ++ offset = expired - runq; ++ dump_prio_array(RUNQ_EXPIRED, expired, &runqbuf[offset]); ++ } ++} ++ ++static void ++dump_prio_array(int which, ulong k_prio_array, char *u_prio_array) ++{ ++ int i, c, cnt, qheads, nr_active; ++ ulong offset, kvaddr, uvaddr; ++ ulong list_head[2]; ++ struct list_data list_data, *ld; ++ struct task_context *tc; ++ ulong *tlist; ++ ++ qheads = (i = ARRAY_LENGTH(prio_array_queue)) ? ++ i : get_array_length("prio_array.queue", NULL, SIZE(list_head)); ++ ++ console("dump_prio_array[%d]: %lx %lx\n", ++ which, k_prio_array, (ulong)u_prio_array); ++ ++ nr_active = INT(u_prio_array + OFFSET(prio_array_nr_active)); ++ console("nr_active: %d\n", nr_active); ++ ++ fprintf(fp, " %s PRIO_ARRAY: %lx\n", ++ which == RUNQ_ACTIVE ? "ACTIVE" : "EXPIRED", k_prio_array); ++ ++ ld = &list_data; ++ ++ for (i = 0; i < 140; i++) { ++ offset = OFFSET(prio_array_queue) + (i * SIZE(list_head)); ++ kvaddr = k_prio_array + offset; ++ uvaddr = (ulong)u_prio_array + offset; ++ BCOPY((char *)uvaddr, (char *)&list_head[0], sizeof(ulong)*2); ++ ++ if (CRASHDEBUG(1)) ++ fprintf(fp, "prio_array[%d] @ %lx => %lx/%lx\n", ++ i, kvaddr, list_head[0], list_head[1]); ++ ++ if ((list_head[0] == kvaddr) && (list_head[1] == kvaddr)) ++ continue; ++ ++ console("[%d] %lx => %lx-%lx ", i, kvaddr, list_head[0], ++ list_head[1]); ++ ++ fprintf(fp, " [%3d] ", i); ++ ++ BZERO(ld, sizeof(struct list_data)); ++ ld->start = list_head[0]; ++ ld->list_head_offset = OFFSET(task_struct_run_list); ++ ld->end = kvaddr; ++ hq_open(); ++ cnt = do_list(ld); ++ hq_close(); ++ console("%d entries\n", cnt); ++ tlist = (ulong *)GETBUF((cnt) * sizeof(ulong)); ++ cnt = retrieve_list(tlist, cnt); ++ for (c = 0; c < cnt; c++) { ++ if (!(tc = task_to_context(tlist[c]))) ++ continue; ++ if (c) ++ INDENT(8); ++ print_task_header(fp, tc, FALSE); ++ } ++ FREEBUF(tlist); ++ } ++} ++ ++/* ++ * CFS scheduler uses Red-Black trees to maintain run queue. ++ */ ++struct rb_node ++{ ++ unsigned long rb_parent_color; ++#define RB_RED 0 ++#define RB_BLACK 1 ++ struct rb_node *rb_right; ++ struct rb_node *rb_left; ++}; ++ ++struct rb_root ++{ ++ struct rb_node *rb_node; ++}; ++ ++static struct rb_node * ++rb_first(struct rb_root *root) ++{ ++ struct rb_root rloc; ++ struct rb_node *n; ++ struct rb_node nloc; ++ ++ readmem((ulong)root, KVADDR, &rloc, sizeof(struct rb_root), ++ "rb_root", FAULT_ON_ERROR); ++ ++ n = rloc.rb_node; ++ if (!n) ++ return NULL; ++ while (rb_left(n, &nloc)) ++ n = nloc.rb_left; ++ ++ return n; ++} ++ ++static struct rb_node * ++rb_parent(struct rb_node *node, struct rb_node *nloc) ++{ ++ readmem((ulong)node, KVADDR, nloc, sizeof(struct rb_node), ++ "rb_node", FAULT_ON_ERROR); ++ ++ return (struct rb_node *)(nloc->rb_parent_color & ~3); ++} ++ ++static struct rb_node * ++rb_right(struct rb_node *node, struct rb_node *nloc) ++{ ++ readmem((ulong)node, KVADDR, nloc, sizeof(struct rb_node), ++ "rb_node", FAULT_ON_ERROR); ++ ++ return nloc->rb_right; ++} + +- tlist[cnt++] = next; ++static struct rb_node * ++rb_left(struct rb_node *node, struct rb_node *nloc) ++{ ++ readmem((ulong)node, KVADDR, nloc, sizeof(struct rb_node), ++ "rb_node", FAULT_ON_ERROR); + +- readmem(next+offs, KVADDR, &next, sizeof(void *), +- "run queue entry", FAULT_ON_ERROR); ++ return nloc->rb_left; ++} + +- if (next == runqueue_head) +- break; +- } while (next); ++static struct rb_node * ++rb_next(struct rb_node *node) ++{ ++ struct rb_node nloc; ++ struct rb_node *parent; + +- for (i = 0; i < cnt; i++) { +- if (tlist[i] == runqueue_head) +- continue; ++ parent = rb_parent(node, &nloc); + +- if (!(tc = task_to_context(VIRTPAGEBASE(tlist[i])))) { +- fprintf(fp, +- "PID: ? TASK: %lx CPU: ? COMMAND: ?\n", +- tlist[i]); +- continue; +- } ++ if (parent == node) ++ return NULL; + +- if (!is_idle_thread(tc->task)) +- print_task_header(fp, tc, 0); ++ if (nloc.rb_right) { ++ node = nloc.rb_right; ++ while (rb_left(node, &nloc)) ++ node = nloc.rb_left; ++ return node; + } +-} + +-#define RUNQ_ACTIVE (1) +-#define RUNQ_EXPIRED (2) ++ while ((parent = rb_parent(node, &nloc)) && (node == rb_right(parent, &nloc))) ++ node = parent; ++ ++ return parent; ++} + + static void +-dump_runqueues(void) ++dump_CFS_runqueues(void) + { + int cpu; +- ulong runq, offset; +- char *runqbuf; +- ulong active, expired, arrays; +- int per_cpu; ++ ulong runq, cfs_rq; ++ char *runqbuf, *cfs_rq_buf; ++ ulong leftmost, tasks_timeline; ++ struct task_context *tc; ++ long nr_running, cfs_rq_nr_running; ++ struct rb_root *root; ++ struct rb_node *node; ++ ++ if (!VALID_STRUCT(cfs_rq)) { ++ STRUCT_SIZE_INIT(cfs_rq, "cfs_rq"); ++ MEMBER_OFFSET_INIT(rq_rt, "rq", "rt"); ++ MEMBER_OFFSET_INIT(rq_nr_running, "rq", "nr_running"); ++ MEMBER_OFFSET_INIT(task_struct_se, "task_struct", "se"); ++ MEMBER_OFFSET_INIT(sched_entity_run_node, "sched_entity", ++ "run_node"); ++ MEMBER_OFFSET_INIT(cfs_rq_rb_leftmost, "cfs_rq", "rb_leftmost"); ++ MEMBER_OFFSET_INIT(cfs_rq_nr_running, "cfs_rq", "nr_running"); ++ MEMBER_OFFSET_INIT(cfs_rq_tasks_timeline, "cfs_rq", ++ "tasks_timeline"); ++ MEMBER_OFFSET_INIT(rt_rq_active, "rt_rq", "active"); ++ MEMBER_OFFSET_INIT(task_struct_run_list, "task_struct", ++ "run_list"); ++ } + ++ if (!symbol_exists("per_cpu__runqueues")) ++ error(FATAL, "per_cpu__runqueues does not exist\n"); + +- if (symbol_exists("runqueues")) { +- runq = symbol_value("runqueues"); +- per_cpu = FALSE; +- } else if (symbol_exists("per_cpu__runqueues")) { +- runq = symbol_value("per_cpu__runqueues"); +- per_cpu = TRUE; +- } ++ runq = symbol_value("per_cpu__runqueues"); + + runqbuf = GETBUF(SIZE(runqueue)); ++ cfs_rq_buf = symbol_exists("per_cpu__init_cfs_rq") ? ++ GETBUF(SIZE(cfs_rq)) : NULL; + +- for (cpu = 0; cpu < kt->cpus; cpu++, runq += SIZE(runqueue)) { +- if (per_cpu) { +- if ((kt->flags & SMP) && (kt->flags & PER_CPU_OFF)) { +- runq = symbol_value("per_cpu__runqueues") + +- kt->__per_cpu_offset[cpu]; +- } else +- runq = symbol_value("per_cpu__runqueues"); +- } ++ for (cpu = 0; cpu < kt->cpus; cpu++) { ++ if ((kt->flags & SMP) && (kt->flags & PER_CPU_OFF)) { ++ runq = symbol_value("per_cpu__runqueues") + ++ kt->__per_cpu_offset[cpu]; ++ } else ++ runq = symbol_value("per_cpu__runqueues"); + +- fprintf(fp, "RUNQUEUES[%d]: %lx\n", cpu, runq); ++ fprintf(fp, "RUNQUEUES[%d]: %lx\n", cpu, runq); ++ readmem(runq, KVADDR, runqbuf, SIZE(runqueue), ++ "per-cpu rq", FAULT_ON_ERROR); ++ ++ if (cfs_rq_buf) { ++ /* ++ * Use default task group's cfs_rq on each cpu. ++ */ ++ if ((kt->flags & SMP) && (kt->flags & PER_CPU_OFF)) { ++ cfs_rq = symbol_value("per_cpu__init_cfs_rq") + ++ kt->__per_cpu_offset[cpu]; ++ } else ++ cfs_rq = symbol_value("per_cpu__init_cfs_rq"); + +- readmem(runq, KVADDR, runqbuf, SIZE(runqueue), +- "runqueues array entry", FAULT_ON_ERROR); +- active = ULONG(runqbuf + OFFSET(runqueue_active)); +- expired = ULONG(runqbuf + OFFSET(runqueue_expired)); +- arrays = runq + OFFSET(runqueue_arrays); ++ readmem(cfs_rq, KVADDR, cfs_rq_buf, SIZE(cfs_rq), ++ "per-cpu cfs_rq", FAULT_ON_ERROR); ++ leftmost = ULONG(cfs_rq_buf + OFFSET(cfs_rq_rb_leftmost)); ++ tasks_timeline = ULONG(cfs_rq_buf + ++ OFFSET(cfs_rq_tasks_timeline)); ++ nr_running = LONG(cfs_rq_buf + OFFSET(rq_nr_running)); ++ cfs_rq_nr_running = ULONG(cfs_rq_buf + ++ OFFSET(cfs_rq_nr_running)); ++ root = (struct rb_root *)(cfs_rq + ++ OFFSET(cfs_rq_tasks_timeline)); ++ } else { ++ leftmost = ULONG(runqbuf + OFFSET(rq_cfs) + ++ OFFSET(cfs_rq_rb_leftmost)); ++ tasks_timeline = ULONG(runqbuf + OFFSET(rq_cfs) + ++ OFFSET(cfs_rq_tasks_timeline)); ++ nr_running = LONG(runqbuf + OFFSET(rq_nr_running)); ++ cfs_rq_nr_running = ULONG(runqbuf + OFFSET(rq_cfs) + ++ OFFSET(cfs_rq_nr_running)); ++ root = (struct rb_root *)(runq + OFFSET(rq_cfs) + ++ OFFSET(cfs_rq_tasks_timeline)); ++ } ++ ++ dump_RT_prio_array(nr_running != cfs_rq_nr_running, ++ runq + OFFSET(rq_rt) + OFFSET(rt_rq_active), ++ &runqbuf[OFFSET(rq_rt) + OFFSET(rt_rq_active)]); + +- console("active: %lx\n", active); +- console("expired: %lx\n", expired); +- console("arrays: %lx\n", arrays); ++ fprintf(fp, " CFS RB_ROOT: %lx\n", (ulong)root); + +- offset = active == arrays ? OFFSET(runqueue_arrays) : +- OFFSET(runqueue_arrays) + SIZE(prio_array); +- offset = active - runq; +- dump_prio_array(RUNQ_ACTIVE, active, &runqbuf[offset]); ++ if (!leftmost) ++ continue; + +- offset = expired == arrays ? OFFSET(runqueue_arrays) : +- OFFSET(runqueue_arrays) + SIZE(prio_array); +- offset = expired - runq; +- dump_prio_array(RUNQ_EXPIRED, expired, &runqbuf[offset]); ++ for (node = rb_first(root); node; node = rb_next(node)) { ++ tc = task_to_context((ulong)node - OFFSET(task_struct_se) - ++ OFFSET(sched_entity_run_node)); ++ if (!tc) ++ continue; ++ INDENT(2); ++ print_task_header(fp, tc, FALSE); ++ } + } ++ ++ FREEBUF(runqbuf); ++ if (cfs_rq_buf) ++ FREEBUF(cfs_rq_buf); + } + + static void +-dump_prio_array(int which, ulong k_prio_array, char *u_prio_array) ++dump_RT_prio_array(int active, ulong k_prio_array, char *u_prio_array) + { +- int i, c, cnt, qheads, nr_active; ++ int i, c, cnt, qheads; + ulong offset, kvaddr, uvaddr; + ulong list_head[2]; + struct list_data list_data, *ld; + struct task_context *tc; + ulong *tlist; + +- qheads = (i = ARRAY_LENGTH(prio_array_queue)) ? +- i : get_array_length("prio_array.queue", NULL, SIZE(list_head)); +- +- console("dump_prio_array[%d]: %lx %lx\n", +- which, k_prio_array, (ulong)u_prio_array); ++ fprintf(fp, " RT PRIO_ARRAY: %lx\n", k_prio_array); + +- nr_active = INT(u_prio_array + OFFSET(prio_array_nr_active)); +- console("nr_active: %d\n", nr_active); ++ if (!active) ++ return; + +- fprintf(fp, " %s PRIO_ARRAY: %lx\n", +- which == RUNQ_ACTIVE ? "ACTIVE" : "EXPIRED", k_prio_array); ++ qheads = (i = ARRAY_LENGTH(prio_array_queue)) ? ++ i : get_array_length("prio_array.queue", NULL, SIZE(list_head)); + + ld = &list_data; + +- for (i = 0; i < 140; i++) { ++ for (i = 0; i < qheads; i++) { + offset = OFFSET(prio_array_queue) + (i * SIZE(list_head)); + kvaddr = k_prio_array + offset; + uvaddr = (ulong)u_prio_array + offset; +@@ -5143,9 +6700,6 @@ + if ((list_head[0] == kvaddr) && (list_head[1] == kvaddr)) + continue; + +- console("[%d] %lx => %lx-%lx ", i, kvaddr, list_head[0], +- list_head[1]); +- + fprintf(fp, " [%3d] ", i); + + BZERO(ld, sizeof(struct list_data)); +@@ -5155,8 +6709,7 @@ + hq_open(); + cnt = do_list(ld); + hq_close(); +- console("%d entries\n", cnt); +- tlist = (ulong *)GETBUF((cnt) * sizeof(ulong)); ++ tlist = (ulong *)GETBUF((cnt) * sizeof(ulong)); + cnt = retrieve_list(tlist, cnt); + for (c = 0; c < cnt; c++) { + if (!(tc = task_to_context(tlist[c]))) +@@ -5174,6 +6727,9 @@ + #define _NSIG_BPW machdep->bits + #define _NSIG_WORDS (_NSIG / _NSIG_BPW) + ++#undef SIGRTMIN ++#define SIGRTMIN 32 ++ + static struct signame { + char *name; + char *altname; +@@ -5209,23 +6765,56 @@ + /* 28 */ {"SIGWINCH", NULL}, + /* 29 */ {"SIGIO", "SIGPOLL"}, + /* 30 */ {"SIGPWR", NULL}, +- /* 31 */ {"SIGSYS", NULL}, ++ /* 31 */ {"SIGSYS", "SIGUNUSED"}, + {NULL, NULL}, /* Real time signals start here. */ + }; + ++static int ++sigrt_minmax(int *min, int *max) ++{ ++ int sigrtmax, j; ++ ++ sigrtmax = THIS_KERNEL_VERSION < LINUX(2,5,0) ? ++ _NSIG - 1 : _NSIG; ++ ++ if (min && max) { ++ j = sigrtmax-SIGRTMIN-1; ++ *max = j / 2; ++ *min = j - *max; ++ } ++ ++ return sigrtmax; ++} ++ + static void + signame_list(void) + { +- int i; ++ int i, sigrtmax, j, min, max; + +- for (i = 0; i < _NSIG; i++) { +- if (!signame[i].name) +- continue; ++ sigrtmax = sigrt_minmax(&min, &max); ++ j = 1; ++ ++ for (i = 1; i <= sigrtmax; i++) { ++ if ((i == SIGRTMIN) || (i == sigrtmax)) { ++ fprintf(fp, "[%d] %s", i, ++ (i== SIGRTMIN) ? "SIGRTMIN" : "SIGRTMAX"); ++ } else if (i > SIGRTMIN) { ++ if (j <= min){ ++ fprintf(fp, "[%d] %s%d", i , "SIGRTMIN+", j); ++ j++; ++ } else if (max >= 1) { ++ fprintf(fp, "[%d] %s%d", i , "SIGRTMAX-",max); ++ max--; ++ } ++ } else { ++ if (!signame[i].name) ++ continue; + +- fprintf(fp, "%s[%d] %s", i < 10 ? " " : "", +- i, signame[i].name); +- if (signame[i].altname) +- fprintf(fp, "/%s", signame[i].altname); ++ fprintf(fp, "%s[%d] %s", i < 10 ? " " : "", ++ i, signame[i].name); ++ if (signame[i].altname) ++ fprintf(fp, "/%s", signame[i].altname); ++ } + fprintf(fp, "\n"); + } + } +@@ -5236,8 +6825,7 @@ + static void + translate_sigset(ulonglong sigset) + { +- int i, c, bit, len; +- ulonglong mask, sig; ++ int sigrtmax, min, max, i, j, c, len; + char buf[BUFSIZE]; + + if (!sigset) { +@@ -5246,21 +6834,42 @@ + } + + len = 0; ++ sigrtmax= sigrt_minmax(&min, &max); ++ j = 1; ++ ++ for (i = 1, c = 0; i <= sigrtmax; i++) { ++ if (sigset & (ulonglong)1) { ++ if (i == SIGRTMIN || i == sigrtmax) ++ sprintf(buf, "%s%s", c++ ? " " : "", ++ (i==SIGRTMIN) ? "SIGRTMIN" : "SIGRTMAX"); ++ else if (i > SIGRTMIN) { ++ if (j <= min) ++ sprintf(buf, "%s%s%d", ++ c++ ? " " : "", "SIGRTMIN+", j); ++ else if (max >= 1) ++ sprintf(buf, "%s%s%d", ++ c++ ? " " : "", "SIGRTMAX-", max); ++ } else ++ sprintf(buf, "%s%s", c++ ? " " : "", ++ signame[i].name); + +- for (i = c = 0; i < (_NSIG/2); i++) { +- mask = (ulong)(1) << i; +- if ((sig = (sigset & mask))) { +- bit = ffs((int)sig); +- sprintf(buf, "%s%s", c++ ? " " : "", +- signame[bit].name); + if ((len + strlen(buf)) > 80) { + shift_string_left(buf, 1); + fprintf(fp, "\n"); + len = 0; + } ++ + len += strlen(buf); + fprintf(fp, buf); + } ++ ++ sigset >>= 1; ++ if (i > SIGRTMIN) { ++ if (j <= min) ++ j++; ++ else if (max >= 1) ++ max--; ++ } + } + fprintf(fp, "\n"); + } +@@ -5290,13 +6899,14 @@ + struct task_context *tc; + ulong *tasklist; + char *siglist; ++ int thread_group = FALSE; + + tasklist = (ulong *)GETBUF((MAXARGS+NR_CPUS)*sizeof(ulong)); + ref = (struct reference *)GETBUF(sizeof(struct reference)); + siglist = GETBUF(BUFSIZE); + ref->str = siglist; + +- while ((c = getopt(argcnt, args, "lR:s:")) != EOF) { ++ while ((c = getopt(argcnt, args, "lR:s:g")) != EOF) { + switch(c) + { + case 's': +@@ -5314,6 +6924,10 @@ + signame_list(); + return; + ++ case 'g': ++ pc->curcmd_flags |= TASK_SPECIFIED; ++ thread_group = TRUE; ++ break; + default: + argerrs++; + break; +@@ -5360,10 +6974,65 @@ + tasklist[tcnt++] = CURRENT_TASK(); + + for (c = 0; c < tcnt; c++) { +- do_sig(tasklist[c], 0, strlen(ref->str) ? ref : NULL); +- fprintf(fp, "\n"); ++ if (thread_group) ++ do_sig_thread_group(tasklist[c]); ++ else { ++ do_sig(tasklist[c], 0, strlen(ref->str) ? ref : NULL); ++ fprintf(fp, "\n"); ++ } ++ } ++ ++} ++ ++ ++/* ++ * Do the work for the "sig -g" command option, coming from sig or foreach. ++ */ ++static void ++do_sig_thread_group(ulong task) ++{ ++ int i; ++ int cnt; ++ struct task_context *tc; ++ ulong tgid; ++ ++ tc = task_to_context(task); ++ tgid = task_tgid(task); ++ ++ if (tc->pid != tgid) { ++ if (pc->curcmd_flags & TASK_SPECIFIED) { ++ if (!(tc = tgid_to_context(tgid))) ++ return; ++ task = tc->task; ++ } else ++ return; + } + ++ if ((tc->pid == 0) && (pc->curcmd_flags & IDLE_TASK_SHOWN)) ++ return; ++ ++ print_task_header(fp, tc, 0); ++ dump_signal_data(tc, THREAD_GROUP_LEVEL); ++ fprintf(fp, "\n "); ++ print_task_header(fp, tc, 0); ++ dump_signal_data(tc, TASK_LEVEL|TASK_INDENT); ++ ++ tc = FIRST_CONTEXT(); ++ for (i = cnt = 0; i < RUNNING_TASKS(); i++, tc++) { ++ if (tc->task == task) ++ continue; ++ ++ if (task_tgid(tc->task) == tgid) { ++ fprintf(fp, "\n "); ++ print_task_header(fp, tc, 0); ++ dump_signal_data(tc, TASK_LEVEL|TASK_INDENT); ++ cnt++; ++ if (tc->pid == 0) ++ pc->curcmd_flags |= IDLE_TASK_SHOWN; ++ } ++ } ++ ++ fprintf(fp, "\n"); + } + + /* +@@ -5381,7 +7050,7 @@ + else { + if (!(flags & FOREACH_TASK)) + print_task_header(fp, tc, 0); +- dump_signal_data(tc); ++ dump_signal_data(tc, TASK_LEVEL|THREAD_GROUP_LEVEL); + } + } + +@@ -5401,40 +7070,34 @@ + * Dump all signal-handling data for a task. + */ + static void +-dump_signal_data(struct task_context *tc) ++dump_signal_data(struct task_context *tc, ulong flags) + { +- int i, others, use_sighand; +- int translate, sig, sigpending; ++ int i, sigrtmax, others, use_sighand; ++ int translate, sigpending; + uint ti_flags; + ulonglong sigset, blocked, mask; +- ulong signal_struct, kaddr, handler, flags, sigqueue, next; ++ ulong signal_struct, kaddr, handler, sa_flags, sigqueue; + ulong sighand_struct; + long size; + char *signal_buf, *uaddr; ++ ulong shared_pending, signal; + char buf1[BUFSIZE]; + char buf2[BUFSIZE]; + char buf3[BUFSIZE]; + char buf4[BUFSIZE]; + +- sigset = task_signal(tc->task); ++ if (VALID_STRUCT(sigqueue) && !VALID_MEMBER(sigqueue_next)) { ++ MEMBER_OFFSET_INIT(sigqueue_next, "sigqueue", "next"); ++ MEMBER_OFFSET_INIT(sigqueue_list, "sigqueue", "list"); ++ MEMBER_OFFSET_INIT(sigqueue_info, "sigqueue", "info"); ++ } else if (!VALID_MEMBER(signal_queue_next)) { ++ MEMBER_OFFSET_INIT(signal_queue_next, "signal_queue", "next"); ++ MEMBER_OFFSET_INIT(signal_queue_info, "signal_queue", "info"); ++ } ++ ++ sigset = task_signal(tc->task, 0); + if (!tt->last_task_read) + return; +- blocked = task_blocked(tc->task); +- +- if (VALID_MEMBER(task_struct_sigpending)) +- sigpending = INT(tt->task_struct + +- OFFSET(task_struct_sigpending)); +- else if (VALID_MEMBER(thread_info_flags)) { +- fill_thread_info(tc->thread_info); +- ti_flags = UINT(tt->thread_info + OFFSET(thread_info_flags)); +- sigpending = ti_flags & (1<task_struct + +@@ -5443,143 +7106,259 @@ + signal_struct = ULONG(tt->task_struct + + OFFSET(task_struct_signal)); + +- fprintf(fp, "SIGNAL_STRUCT: %lx ", signal_struct); +- + size = MAX(SIZE(signal_struct), VALID_SIZE(signal_queue) ? + SIZE(signal_queue) : SIZE(sigqueue)); + if (VALID_SIZE(sighand_struct)) + size = MAX(size, SIZE(sighand_struct)); + signal_buf = GETBUF(size); + +- readmem(signal_struct, KVADDR, signal_buf, +- SIZE(signal_struct), "signal_struct buffer", +- FAULT_ON_ERROR); +- fprintf(fp, "COUNT: %d\n", +- INT(signal_buf + OFFSET(signal_struct_count))); +- +- fprintf(fp, " SIG %s %s %s %s\n", +- mkstring(buf1, VADDR_PRLEN == 8 ? 9 : VADDR_PRLEN, +- CENTER, "SIGACTION"), ++ if (signal_struct) ++ readmem(signal_struct, KVADDR, signal_buf, ++ SIZE(signal_struct), "signal_struct buffer", ++ FAULT_ON_ERROR); ++ ++ /* ++ * Signal dispositions (thread group level). ++ */ ++ if (flags & THREAD_GROUP_LEVEL) { ++ if (flags & TASK_INDENT) ++ INDENT(2); ++ fprintf(fp, "SIGNAL_STRUCT: %lx ", signal_struct); ++ if (!signal_struct) { ++ fprintf(fp, "\n"); ++ return; ++ } ++ fprintf(fp, "COUNT: %d\n", ++ INT(signal_buf + OFFSET(signal_struct_count))); ++ ++ if (flags & TASK_INDENT) ++ INDENT(2); ++ fprintf(fp, " SIG %s %s %s %s\n", ++ mkstring(buf1, VADDR_PRLEN == 8 ? 9 : VADDR_PRLEN, ++ CENTER, "SIGACTION"), + mkstring(buf2, UVADDR_PRLEN, RJUST, "HANDLER"), + mkstring(buf3, 16, CENTER, "MASK"), + mkstring(buf4, VADDR_PRLEN, LJUST, "FLAGS")); + +- if (VALID_MEMBER(task_struct_sighand)) { +- sighand_struct = ULONG(tt->task_struct + +- OFFSET(task_struct_sighand)); +- readmem(sighand_struct, KVADDR, signal_buf, +- SIZE(sighand_struct), "sighand_struct buffer", +- FAULT_ON_ERROR); +- use_sighand = TRUE; +- } else +- use_sighand = FALSE; +- +- for (i = 1; i < _NSIG; i++) { +- fprintf(fp, "%s[%d] ", i < 10 ? " " : "", i); +- +- if (use_sighand) { +- kaddr = sighand_struct + OFFSET(sighand_struct_action) + +- ((i-1) * SIZE(k_sigaction)); +- uaddr = signal_buf + OFFSET(sighand_struct_action) + +- ((i-1) * SIZE(k_sigaction)); +- } else { +- kaddr = signal_struct + OFFSET(signal_struct_action) + +- ((i-1) * SIZE(k_sigaction)); +- uaddr = signal_buf + OFFSET(signal_struct_action) + +- ((i-1) * SIZE(k_sigaction)); +- } ++ if (VALID_MEMBER(task_struct_sighand)) { ++ sighand_struct = ULONG(tt->task_struct + ++ OFFSET(task_struct_sighand)); ++ readmem(sighand_struct, KVADDR, signal_buf, ++ SIZE(sighand_struct), "sighand_struct buffer", ++ FAULT_ON_ERROR); ++ use_sighand = TRUE; ++ } else ++ use_sighand = FALSE; + +- handler = ULONG(uaddr + OFFSET(sigaction_sa_handler)); +- switch ((long)handler) +- { +- case -1: +- mkstring(buf1, UVADDR_PRLEN, RJUST, "SIG_ERR"); +- break; +- case 0: +- mkstring(buf1, UVADDR_PRLEN, RJUST, "SIG_DFL"); +- break; +- case 1: +- mkstring(buf1, UVADDR_PRLEN, RJUST, "SIG_IGN"); +- break; +- default: +- mkstring(buf1, UVADDR_PRLEN, RJUST|LONG_HEX, +- MKSTR(handler)); +- break; +- } ++ sigrtmax = sigrt_minmax(NULL, NULL); + +- mask = sigaction_mask((ulong)uaddr); +- flags = ULONG(uaddr + OFFSET(sigaction_sa_flags)); ++ for (i = 1; i <= sigrtmax; i++) { ++ if (flags & TASK_INDENT) ++ INDENT(2); + +- fprintf(fp, "%s%s %s %016llx %lx ", +- space(MINSPACE-1), +- mkstring(buf2,UVADDR_PRLEN,LJUST|LONG_HEX,MKSTR(kaddr)), +- buf1, +- mask, +- flags); +- +- if (flags) { +- others = 0; translate = 1; +- if (flags & SA_NOCLDSTOP) +- fprintf(fp, "%s%sSA_NOCLDSTOP", +- translate-- > 0 ? "(" : "", +- others++ ? "|" : ""); ++ fprintf(fp, "%s[%d] ", i < 10 ? " " : "", i); ++ ++ if (use_sighand) { ++ kaddr = sighand_struct + ++ OFFSET(sighand_struct_action) + ++ ((i-1) * SIZE(k_sigaction)); ++ uaddr = signal_buf + ++ OFFSET(sighand_struct_action) + ++ ((i-1) * SIZE(k_sigaction)); ++ } else { ++ kaddr = signal_struct + ++ OFFSET(signal_struct_action) + ++ ((i-1) * SIZE(k_sigaction)); ++ uaddr = signal_buf + ++ OFFSET(signal_struct_action) + ++ ((i-1) * SIZE(k_sigaction)); ++ } ++ ++ handler = ULONG(uaddr + OFFSET(sigaction_sa_handler)); ++ switch ((long)handler) ++ { ++ case -1: ++ mkstring(buf1, UVADDR_PRLEN, RJUST, "SIG_ERR"); ++ break; ++ case 0: ++ mkstring(buf1, UVADDR_PRLEN, RJUST, "SIG_DFL"); ++ break; ++ case 1: ++ mkstring(buf1, UVADDR_PRLEN, RJUST, "SIG_IGN"); ++ break; ++ default: ++ mkstring(buf1, UVADDR_PRLEN, RJUST|LONG_HEX, ++ MKSTR(handler)); ++ break; ++ } ++ ++ mask = sigaction_mask((ulong)uaddr); ++ sa_flags = ULONG(uaddr + OFFSET(sigaction_sa_flags)); ++ ++ fprintf(fp, "%s%s %s %016llx %lx ", ++ space(MINSPACE-1), ++ mkstring(buf2, ++ UVADDR_PRLEN,LJUST|LONG_HEX,MKSTR(kaddr)), ++ buf1, ++ mask, ++ sa_flags); ++ ++ if (sa_flags) { ++ others = 0; translate = 1; ++ if (sa_flags & SA_NOCLDSTOP) ++ fprintf(fp, "%s%sSA_NOCLDSTOP", ++ translate-- > 0 ? "(" : "", ++ others++ ? "|" : ""); + #ifdef SA_RESTORER +- if (flags & SA_RESTORER) +- fprintf(fp, "%s%sSA_RESTORER", +- translate-- > 0 ? "(" : "", +- others++ ? "|" : ""); ++ if (sa_flags & SA_RESTORER) ++ fprintf(fp, "%s%sSA_RESTORER", ++ translate-- > 0 ? "(" : "", ++ others++ ? "|" : ""); + #endif + #ifdef SA_NOCLDWAIT +- if (flags & SA_NOCLDWAIT) +- fprintf(fp, "%s%sSA_NOCLDWAIT", +- translate-- > 0 ? "(" : "", +- others++ ? "|" : ""); ++ if (sa_flags & SA_NOCLDWAIT) ++ fprintf(fp, "%s%sSA_NOCLDWAIT", ++ translate-- > 0 ? "(" : "", ++ others++ ? "|" : ""); + #endif +- if (flags & SA_SIGINFO) +- fprintf(fp, "%s%sSA_SIGINFO", +- translate-- > 0 ? "(" : "", +- others++ ? "|" : ""); +- if (flags & SA_ONSTACK) +- fprintf(fp, "%s%sSA_ONSTACK", +- translate-- > 0 ? "(" : "", +- others++ ? "|" : ""); +- if (flags & SA_RESTART) +- fprintf(fp, "%s%sSA_RESTART", +- translate-- > 0 ? "(" : "", +- others++ ? "|" : ""); +- if (flags & SA_NODEFER) +- fprintf(fp, "%s%sSA_NODEFER", +- translate-- > 0 ? "(" : "", +- others++ ? "|" : ""); +- if (flags & SA_RESETHAND) +- fprintf(fp, "%s%sSA_RESETHAND", +- translate-- > 0 ? "(" : "", +- others++ ? "|" : ""); +- if (translate < 1) +- fprintf(fp, ")"); +- } +- +- fprintf(fp, "\n"); +- } +- +- if (VALID_MEMBER(task_struct_sigqueue)) +- sigqueue = ULONG(tt->task_struct + +- OFFSET(task_struct_sigqueue)); +- +- else if (VALID_MEMBER(task_struct_pending)) +- sigqueue = ULONG(tt->task_struct + +- OFFSET(task_struct_pending) + +- OFFSET_OPTION(sigpending_head, sigpending_list)); +- +- if (VALID_MEMBER(sigqueue_list) && empty_list(sigqueue)) +- sigqueue = 0; +- +- if (sigqueue) +- fprintf(fp, "SIGQUEUE: SIG %s\n", +- mkstring(buf1, VADDR_PRLEN, CENTER|LJUST, "SIGINFO")); +- else +- fprintf(fp, "SIGQUEUE: (empty)\n"); ++ if (sa_flags & SA_SIGINFO) ++ fprintf(fp, "%s%sSA_SIGINFO", ++ translate-- > 0 ? "(" : "", ++ others++ ? "|" : ""); ++ if (sa_flags & SA_ONSTACK) ++ fprintf(fp, "%s%sSA_ONSTACK", ++ translate-- > 0 ? "(" : "", ++ others++ ? "|" : ""); ++ if (sa_flags & SA_RESTART) ++ fprintf(fp, "%s%sSA_RESTART", ++ translate-- > 0 ? "(" : "", ++ others++ ? "|" : ""); ++ if (sa_flags & SA_NODEFER) ++ fprintf(fp, "%s%sSA_NODEFER", ++ translate-- > 0 ? "(" : "", ++ others++ ? "|" : ""); ++ if (sa_flags & SA_RESETHAND) ++ fprintf(fp, "%s%sSA_RESETHAND", ++ translate-- > 0 ? "(" : "", ++ others++ ? "|" : ""); ++ if (translate < 1) ++ fprintf(fp, ")"); ++ } ++ ++ fprintf(fp, "\n"); ++ } ++ } ++ ++ if (flags & TASK_LEVEL) { ++ /* ++ * Pending signals (task level). ++ */ ++ if (VALID_MEMBER(task_struct_sigpending)) ++ sigpending = INT(tt->task_struct + ++ OFFSET(task_struct_sigpending)); ++ else if (VALID_MEMBER(thread_info_flags)) { ++ fill_thread_info(tc->thread_info); ++ ti_flags = UINT(tt->thread_info + OFFSET(thread_info_flags)); ++ sigpending = ti_flags & (1<task); ++ if (flags & TASK_INDENT) ++ INDENT(2); ++ fprintf(fp, " BLOCKED: %016llx\n", blocked); ++ ++ /* ++ * Pending queue (task level). ++ */ ++ ++ if (flags & TASK_INDENT) ++ INDENT(2); ++ if (VALID_MEMBER(signal_struct_shared_pending)) { ++ fprintf(fp, "PRIVATE_PENDING\n"); ++ if (flags & TASK_INDENT) ++ INDENT(2); ++ } ++ fprintf(fp, " SIGNAL: %016llx\n", sigset); ++ ++ if (VALID_MEMBER(task_struct_sigqueue)) ++ sigqueue = ULONG(tt->task_struct + ++ OFFSET(task_struct_sigqueue)); ++ ++ else if (VALID_MEMBER(task_struct_pending)) ++ sigqueue = ULONG(tt->task_struct + ++ OFFSET(task_struct_pending) + ++ OFFSET_OPTION(sigpending_head, ++ sigpending_list)); ++ ++ if (VALID_MEMBER(sigqueue_list) && empty_list(sigqueue)) ++ sigqueue = 0; ++ ++ if (flags & TASK_INDENT) ++ INDENT(2); ++ if (sigqueue) { ++ fprintf(fp, " SIGQUEUE: SIG %s\n", ++ mkstring(buf1, VADDR_PRLEN, CENTER|LJUST, "SIGINFO")); ++ sigqueue_list(sigqueue); ++ } else ++ fprintf(fp, " SIGQUEUE: (empty)\n"); ++ } ++ ++ /* ++ * Pending queue (thread group level). ++ */ ++ if ((flags & THREAD_GROUP_LEVEL) && ++ VALID_MEMBER(signal_struct_shared_pending)) { ++ ++ fprintf(fp, "SHARED_PENDING\n"); ++ shared_pending = signal_struct + OFFSET(signal_struct_shared_pending); ++ signal = shared_pending + OFFSET(sigpending_signal); ++ readmem(signal, KVADDR, signal_buf,SIZE(sigpending_signal), ++ "signal", FAULT_ON_ERROR); ++ sigset = task_signal(0, (ulong*)signal_buf); ++ if (flags & TASK_INDENT) ++ INDENT(2); ++ fprintf(fp, " SIGNAL: %016llx\n", sigset); ++ sigqueue = (shared_pending + ++ OFFSET_OPTION(sigpending_head, sigpending_list) + ++ OFFSET(list_head_next)); ++ readmem(sigqueue,KVADDR, signal_buf, ++ SIZE(sigqueue), "sigqueue", FAULT_ON_ERROR); ++ sigqueue = ULONG(signal_buf); ++ ++ if (VALID_MEMBER(sigqueue_list) && empty_list(sigqueue)) ++ sigqueue = 0; ++ if (flags & TASK_INDENT) ++ INDENT(2); ++ if (sigqueue) { ++ fprintf(fp, " SIGQUEUE: SIG %s\n", ++ mkstring(buf1, VADDR_PRLEN, CENTER|LJUST, "SIGINFO")); ++ sigqueue_list(sigqueue); ++ } else ++ fprintf(fp, " SIGQUEUE: (empty)\n"); ++ } ++ FREEBUF(signal_buf); ++} ++ ++/* ++ * Dump a pending signal queue (private/shared). ++ */ ++ ++static void sigqueue_list(ulong sigqueue) { ++ ulong sigqueue_save, next; ++ int sig; ++ char *signal_buf; ++ long size; ++ size = VALID_SIZE(signal_queue) ? SIZE(signal_queue) : SIZE(sigqueue); ++ signal_buf = GETBUF(size); + ++ sigqueue_save = sigqueue; + while (sigqueue) { + readmem(sigqueue, KVADDR, signal_buf, + SIZE_OPTION(signal_queue, sigqueue), +@@ -5597,14 +7376,17 @@ + OFFSET(siginfo_si_signo)); + } + +- fprintf(fp, " %3d %lx\n", ++ if (sigqueue_save == next) ++ break; ++ ++ fprintf(fp, " %3d %lx\n", + sig, sigqueue + + OFFSET_OPTION(signal_queue_info, sigqueue_info)); + + sigqueue = next; + } +- + FREEBUF(signal_buf); ++ + } + + /* +@@ -5614,12 +7396,13 @@ + */ + + static ulonglong +-task_signal(ulong task) ++task_signal(ulong task, ulong *signal) + { + ulonglong sigset; + ulong *sigset_ptr; + +- fill_task_struct(task); ++ if (task) { ++ fill_task_struct(task); + + if (!tt->last_task_read) + return 0; +@@ -5633,6 +7416,10 @@ + OFFSET(task_struct_signal)); + } else + return 0; ++ } else if (signal) { ++ sigset_ptr = signal; ++ } else ++ return 0; + + switch (_NSIG_WORDS) + { +--- crash/memory.c.orig 2008-01-17 15:17:20.000000000 -0500 ++++ crash/memory.c 2008-01-11 11:58:54.000000000 -0500 +@@ -1,8 +1,8 @@ + /* memory.c - core analysis suite + * + * Copyright (C) 1999, 2000, 2001, 2002 Mission Critical Linux, Inc. +- * Copyright (C) 2002, 2003, 2004, 2005 David Anderson +- * Copyright (C) 2002, 2003, 2004, 2005 Red Hat, Inc. All rights reserved. ++ * Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007 David Anderson ++ * Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007 Red Hat, Inc. All rights reserved. + * Copyright (C) 2002 Silicon Graphics, Inc. + * + * This program is free software; you can redistribute it and/or modify +@@ -35,34 +35,46 @@ + ulong order; + ulong slabsize; + ulong num_slabs; ++ ulong objects; + ulonglong spec_addr; + ulong flags; + ulong size; ++ ulong objsize; + int memtype; + int free; ++ int slab_offset; + char *reqname; + char *curname; + ulong *addrlist; + int *kmem_bufctl; + ulong *cpudata[NR_CPUS]; ++ ulong *shared_array_cache; ++ int current_cache_index; + ulong found; + ulong retval; + char *ignore; + int errors; + int calls; + int cpu; ++ int cache_count; + ulong get_shared; + ulong get_totalram; + ulong get_buffers; + ulong get_slabs; + char *slab_buf; + char *cache_buf; ++ ulong *cache_list; ++ struct vmlist { ++ ulong addr; ++ ulong size; ++ } *vmlist; + }; + + static char *memtype_string(int, int); + static char *error_handle_string(ulong); + static void dump_mem_map(struct meminfo *); +-static void fill_mem_map_cache(ulong, char *); ++static void dump_mem_map_SPARSEMEM(struct meminfo *); ++static void fill_mem_map_cache(ulong, ulong, char *); + static void dump_free_pages(struct meminfo *); + static int dump_zone_page_usage(void); + static void dump_multidimensional_free_pages(struct meminfo *); +@@ -72,19 +84,27 @@ + static void dump_page_hash_table(struct meminfo *); + static void kmem_search(struct meminfo *); + static void kmem_cache_init(void); ++static void kmem_cache_init_slub(void); + static ulong max_cpudata_limit(ulong, ulong *); + static int ignore_cache(struct meminfo *, char *); + static char *is_kmem_cache_addr(ulong, char *); ++static char *is_kmem_cache_addr_slub(ulong, char *); + static void kmem_cache_list(void); + static void dump_kmem_cache(struct meminfo *); + static void dump_kmem_cache_percpu_v1(struct meminfo *); + static void dump_kmem_cache_percpu_v2(struct meminfo *); ++static void dump_kmem_cache_slub(struct meminfo *); + static void dump_kmem_cache_info_v2(struct meminfo *); ++static void kmem_cache_list_slub(void); ++static ulong get_cpu_slab_ptr(struct meminfo *, int); + static char *vaddr_to_kmem_cache(ulong, char *); + static ulong vaddr_to_slab(ulong); + static void do_slab_chain(int, struct meminfo *); + static void do_slab_chain_percpu_v1(long, struct meminfo *); + static void do_slab_chain_percpu_v2(long, struct meminfo *); ++static void do_slab_chain_percpu_v2_nodes(long, struct meminfo *); ++static void do_slab_slub(struct meminfo *, int); ++static void do_kmem_cache_slub(struct meminfo *); + static void save_slab_data(struct meminfo *); + static int slab_data_saved(struct meminfo *); + static void dump_saved_slab_data(void); +@@ -97,7 +117,9 @@ + static void gather_slab_free_list_percpu(struct meminfo *); + static void gather_cpudata_list_v1(struct meminfo *); + static void gather_cpudata_list_v2(struct meminfo *); ++static void gather_cpudata_list_v2_nodes(struct meminfo *, int); + static int check_cpudata_list(struct meminfo *, ulong); ++static int check_shared_list(struct meminfo *, ulong); + static void gather_slab_cached_count(struct meminfo *); + static void dump_slab_objects(struct meminfo *); + static void dump_slab_objects_percpu(struct meminfo *); +@@ -110,6 +132,7 @@ + static void search(ulong, ulong, ulong, int, ulong *, int); + static int next_upage(struct task_context *, ulong, ulong *); + static int next_kpage(ulong, ulong *); ++static ulong next_vmlist_vaddr(struct meminfo *, ulong); + static int vm_area_page_dump(ulong, ulong, ulong, ulong, void *, + struct reference *); + static int dump_swap_info(ulong, ulong *, ulong *); +@@ -118,15 +141,44 @@ + static char *vma_file_offset(ulong, ulong, char *); + static ssize_t read_dev_kmem(ulong, char *, long); + static void dump_memory_nodes(int); ++static void dump_zone_stats(void); + #define MEMORY_NODES_DUMP (0) + #define MEMORY_NODES_INITIALIZE (1) + static void node_table_init(void); + static int compare_node_data(const void *, const void *); + static void do_vm_flags(ulong); + static void PG_reserved_flag_init(void); ++static void PG_slab_flag_init(void); + static ulong nr_blockdev_pages(void); +- +- ++void sparse_mem_init(void); ++void dump_mem_sections(void); ++void list_mem_sections(void); ++ulong sparse_decode_mem_map(ulong, ulong); ++char *read_mem_section(ulong); ++ulong nr_to_section(ulong); ++int valid_section(ulong); ++int section_has_mem_map(ulong); ++ulong section_mem_map_addr(ulong); ++ulong valid_section_nr(ulong); ++ulong pfn_to_map(ulong); ++static int get_nodes_online(void); ++static int next_online_node(int); ++static ulong next_online_pgdat(int); ++static int vm_stat_init(void); ++static int vm_event_state_init(void); ++static int dump_vm_stat(char *, long *, ulong); ++static int dump_vm_event_state(void); ++static int dump_page_states(void); ++static int generic_read_dumpfile(ulonglong, void *, long, char *, ulong); ++static int generic_write_dumpfile(ulonglong, void *, long, char *, ulong); ++static int page_to_nid(ulong); ++static int get_kmem_cache_list(ulong **); ++static int get_kmem_cache_slub_data(long, struct meminfo *); ++static ulong compound_head(ulong); ++static long count_partial(ulong); ++static ulong get_freepointer(struct meminfo *, void *); ++char *is_slab_page(struct meminfo *, char *); ++static void do_node_lists_slub(struct meminfo *, ulong, int); + + /* + * Memory display modes specific to this file. +@@ -142,6 +194,7 @@ + #define DECIMAL (0x100) + #define UDECIMAL (0x200) + #define ASCII_ENDLINE (0x400) ++#define NO_ASCII (0x800) + + static ulong DISPLAY_DEFAULT; + +@@ -182,6 +235,10 @@ + MEMBER_OFFSET_INIT(mm_struct_mmap, "mm_struct", "mmap"); + MEMBER_OFFSET_INIT(mm_struct_pgd, "mm_struct", "pgd"); + MEMBER_OFFSET_INIT(mm_struct_rss, "mm_struct", "rss"); ++ if (!VALID_MEMBER(mm_struct_rss)) ++ MEMBER_OFFSET_INIT(mm_struct_rss, "mm_struct", "_rss"); ++ MEMBER_OFFSET_INIT(mm_struct_anon_rss, "mm_struct", "_anon_rss"); ++ MEMBER_OFFSET_INIT(mm_struct_file_rss, "mm_struct", "_file_rss"); + MEMBER_OFFSET_INIT(mm_struct_total_vm, "mm_struct", "total_vm"); + MEMBER_OFFSET_INIT(mm_struct_start_code, "mm_struct", "start_code"); + MEMBER_OFFSET_INIT(vm_area_struct_vm_mm, "vm_area_struct", "vm_mm"); +@@ -222,7 +279,16 @@ + MEMBER_OFFSET_INIT(page_count, "page", "_count"); + MEMBER_OFFSET_INIT(page_flags, "page", "flags"); + MEMBER_OFFSET_INIT(page_mapping, "page", "mapping"); ++ if (INVALID_MEMBER(page_mapping)) ++ ANON_MEMBER_OFFSET_INIT(page_mapping, "page", "mapping"); ++ if (INVALID_MEMBER(page_mapping) && ++ (THIS_KERNEL_VERSION < LINUX(2,6,17)) && ++ MEMBER_EXISTS("page", "_mapcount")) ++ ASSIGN_OFFSET(page_mapping) = MEMBER_OFFSET("page", "_mapcount") + ++ STRUCT_SIZE("atomic_t") + sizeof(ulong); + MEMBER_OFFSET_INIT(page_index, "page", "index"); ++ if (INVALID_MEMBER(page_index)) ++ ANON_MEMBER_OFFSET_INIT(page_index, "page", "index"); + MEMBER_OFFSET_INIT(page_buffers, "page", "buffers"); + MEMBER_OFFSET_INIT(page_lru, "page", "lru"); + MEMBER_OFFSET_INIT(page_pte, "page", "pte"); +@@ -270,6 +336,7 @@ + STRUCT_SIZE_INIT(kmem_slab_s, "kmem_slab_s"); + STRUCT_SIZE_INIT(slab_s, "slab_s"); + STRUCT_SIZE_INIT(slab, "slab"); ++ STRUCT_SIZE_INIT(kmem_cache_s, "kmem_cache_s"); + STRUCT_SIZE_INIT(pgd_t, "pgd_t"); + + if (!VALID_STRUCT(kmem_slab_s) && VALID_STRUCT(slab_s)) { +@@ -310,17 +377,49 @@ + !VALID_STRUCT(slab_s) && VALID_STRUCT(slab)) { + vt->flags |= PERCPU_KMALLOC_V2; + +- MEMBER_OFFSET_INIT(kmem_cache_s_num, "kmem_cache_s", "num"); +- MEMBER_OFFSET_INIT(kmem_cache_s_next, "kmem_cache_s", "next"); +- MEMBER_OFFSET_INIT(kmem_cache_s_name, "kmem_cache_s", "name"); +- MEMBER_OFFSET_INIT(kmem_cache_s_colour_off, "kmem_cache_s", +- "colour_off"); +- MEMBER_OFFSET_INIT(kmem_cache_s_objsize, "kmem_cache_s", +- "objsize"); +- MEMBER_OFFSET_INIT(kmem_cache_s_flags, "kmem_cache_s", "flags"); +- MEMBER_OFFSET_INIT(kmem_cache_s_gfporder, +- "kmem_cache_s", "gfporder"); +- ++ if (VALID_STRUCT(kmem_cache_s)) { ++ MEMBER_OFFSET_INIT(kmem_cache_s_num, "kmem_cache_s", "num"); ++ MEMBER_OFFSET_INIT(kmem_cache_s_next, "kmem_cache_s", "next"); ++ MEMBER_OFFSET_INIT(kmem_cache_s_name, "kmem_cache_s", "name"); ++ MEMBER_OFFSET_INIT(kmem_cache_s_colour_off, "kmem_cache_s", ++ "colour_off"); ++ MEMBER_OFFSET_INIT(kmem_cache_s_objsize, "kmem_cache_s", ++ "objsize"); ++ MEMBER_OFFSET_INIT(kmem_cache_s_flags, "kmem_cache_s", "flags"); ++ MEMBER_OFFSET_INIT(kmem_cache_s_gfporder, ++ "kmem_cache_s", "gfporder"); ++ ++ MEMBER_OFFSET_INIT(kmem_cache_s_lists, "kmem_cache_s", "lists"); ++ MEMBER_OFFSET_INIT(kmem_cache_s_array, "kmem_cache_s", "array"); ++ ARRAY_LENGTH_INIT(len, NULL, "kmem_cache_s.array", NULL, 0); ++ } else { ++ STRUCT_SIZE_INIT(kmem_cache_s, "kmem_cache"); ++ MEMBER_OFFSET_INIT(kmem_cache_s_num, "kmem_cache", "num"); ++ MEMBER_OFFSET_INIT(kmem_cache_s_next, "kmem_cache", "next"); ++ MEMBER_OFFSET_INIT(kmem_cache_s_name, "kmem_cache", "name"); ++ MEMBER_OFFSET_INIT(kmem_cache_s_colour_off, "kmem_cache", ++ "colour_off"); ++ if (MEMBER_EXISTS("kmem_cache", "objsize")) ++ MEMBER_OFFSET_INIT(kmem_cache_s_objsize, "kmem_cache", ++ "objsize"); ++ else if (MEMBER_EXISTS("kmem_cache", "buffer_size")) ++ MEMBER_OFFSET_INIT(kmem_cache_s_objsize, "kmem_cache", ++ "buffer_size"); ++ MEMBER_OFFSET_INIT(kmem_cache_s_flags, "kmem_cache", "flags"); ++ MEMBER_OFFSET_INIT(kmem_cache_s_gfporder, ++ "kmem_cache", "gfporder"); ++ ++ if (MEMBER_EXISTS("kmem_cache", "lists")) ++ MEMBER_OFFSET_INIT(kmem_cache_s_lists, "kmem_cache", "lists"); ++ else if (MEMBER_EXISTS("kmem_cache", "nodelists")) { ++ vt->flags |= PERCPU_KMALLOC_V2_NODES; ++ MEMBER_OFFSET_INIT(kmem_cache_s_lists, "kmem_cache", "nodelists"); ++ ARRAY_LENGTH_INIT(vt->kmem_cache_len_nodes, NULL, ++ "kmem_cache.nodelists", NULL, 0); ++ } ++ MEMBER_OFFSET_INIT(kmem_cache_s_array, "kmem_cache", "array"); ++ ARRAY_LENGTH_INIT(len, NULL, "kmem_cache.array", NULL, 0); ++ } + MEMBER_OFFSET_INIT(slab_list, "slab", "list"); + MEMBER_OFFSET_INIT(slab_s_mem, "slab", "s_mem"); + MEMBER_OFFSET_INIT(slab_inuse, "slab", "inuse"); +@@ -330,10 +429,6 @@ + MEMBER_OFFSET_INIT(array_cache_limit, "array_cache", "limit"); + STRUCT_SIZE_INIT(array_cache, "array_cache"); + +- MEMBER_OFFSET_INIT(kmem_cache_s_lists, "kmem_cache_s", "lists"); +- MEMBER_OFFSET_INIT(kmem_cache_s_array, "kmem_cache_s", "array"); +- ARRAY_LENGTH_INIT(len, NULL, "kmem_cache_s.array", NULL, 0); +- + MEMBER_OFFSET_INIT(kmem_list3_slabs_partial, + "kmem_list3", "slabs_partial"); + MEMBER_OFFSET_INIT(kmem_list3_slabs_full, +@@ -343,6 +438,47 @@ + MEMBER_OFFSET_INIT(kmem_list3_free_objects, + "kmem_list3", "free_objects"); + MEMBER_OFFSET_INIT(kmem_list3_shared, "kmem_list3", "shared"); ++ } else if (MEMBER_EXISTS("kmem_cache", "cpu_slab") && ++ STRUCT_EXISTS("kmem_cache_node")) { ++ vt->flags |= KMALLOC_SLUB; ++ ++ STRUCT_SIZE_INIT(kmem_cache, "kmem_cache"); ++ MEMBER_OFFSET_INIT(kmem_cache_size, "kmem_cache", "size"); ++ MEMBER_OFFSET_INIT(kmem_cache_objsize, "kmem_cache", "objsize"); ++ MEMBER_OFFSET_INIT(kmem_cache_offset, "kmem_cache", "offset"); ++ MEMBER_OFFSET_INIT(kmem_cache_order, "kmem_cache", "order"); ++ MEMBER_OFFSET_INIT(kmem_cache_local_node, "kmem_cache", "local_node"); ++ MEMBER_OFFSET_INIT(kmem_cache_objects, "kmem_cache", "objects"); ++ MEMBER_OFFSET_INIT(kmem_cache_inuse, "kmem_cache", "inuse"); ++ MEMBER_OFFSET_INIT(kmem_cache_align, "kmem_cache", "align"); ++ MEMBER_OFFSET_INIT(kmem_cache_node, "kmem_cache", "node"); ++ MEMBER_OFFSET_INIT(kmem_cache_cpu_slab, "kmem_cache", "cpu_slab"); ++ MEMBER_OFFSET_INIT(kmem_cache_list, "kmem_cache", "list"); ++ MEMBER_OFFSET_INIT(kmem_cache_name, "kmem_cache", "name"); ++ MEMBER_OFFSET_INIT(kmem_cache_cpu_freelist, "kmem_cache_cpu", "freelist"); ++ MEMBER_OFFSET_INIT(kmem_cache_cpu_page, "kmem_cache_cpu", "page"); ++ MEMBER_OFFSET_INIT(kmem_cache_cpu_node, "kmem_cache_cpu", "node"); ++ ANON_MEMBER_OFFSET_INIT(page_inuse, "page", "inuse"); ++ ANON_MEMBER_OFFSET_INIT(page_offset, "page", "offset"); ++ ANON_MEMBER_OFFSET_INIT(page_slab, "page", "slab"); ++ ANON_MEMBER_OFFSET_INIT(page_first_page, "page", "first_page"); ++ ANON_MEMBER_OFFSET_INIT(page_freelist, "page", "freelist"); ++ if (VALID_MEMBER(kmem_cache_node)) { ++ ARRAY_LENGTH_INIT(len, NULL, "kmem_cache.node", NULL, 0); ++ vt->flags |= CONFIG_NUMA; ++ } ++ ARRAY_LENGTH_INIT(len, NULL, "kmem_cache.cpu_slab", NULL, 0); ++ ++ STRUCT_SIZE_INIT(kmem_cache_node, "kmem_cache_node"); ++ STRUCT_SIZE_INIT(kmem_cache_cpu, "kmem_cache_cpu"); ++ MEMBER_OFFSET_INIT(kmem_cache_node_nr_partial, ++ "kmem_cache_node", "nr_partial"); ++ MEMBER_OFFSET_INIT(kmem_cache_node_nr_slabs, ++ "kmem_cache_node", "nr_slabs"); ++ MEMBER_OFFSET_INIT(kmem_cache_node_partial, ++ "kmem_cache_node", "partial"); ++ MEMBER_OFFSET_INIT(kmem_cache_node_full, ++ "kmem_cache_node", "full"); + } else { + MEMBER_OFFSET_INIT(kmem_cache_s_c_nextp, + "kmem_cache_s", "c_nextp"); +@@ -381,6 +517,22 @@ + "kmem_slab_s", "s_magic"); + } + ++ if (!kt->kernel_NR_CPUS) { ++ if (ARRAY_LENGTH(kmem_cache_s_cpudata)) ++ kt->kernel_NR_CPUS = ARRAY_LENGTH(kmem_cache_s_cpudata); ++ else if (ARRAY_LENGTH(kmem_cache_s_array)) ++ kt->kernel_NR_CPUS = ARRAY_LENGTH(kmem_cache_s_array); ++ else if (ARRAY_LENGTH(kmem_cache_cpu_slab)) ++ kt->kernel_NR_CPUS = ARRAY_LENGTH(kmem_cache_cpu_slab); ++ } ++ ++ if (kt->kernel_NR_CPUS > NR_CPUS) { ++ error(WARNING, ++ "kernel-configured NR_CPUS (%d) greater than compiled-in NR_CPUS (%d)\n", ++ kt->kernel_NR_CPUS, NR_CPUS); ++ error(FATAL, "recompile crash with larger NR_CPUS\n"); ++ } ++ + if (machdep->init_kernel_pgd) + machdep->init_kernel_pgd(); + else if (symbol_exists("swapper_pg_dir")) { +@@ -415,10 +567,17 @@ + error(FATAL, "no swapper_pg_dir or cpu_pgd symbols exist?\n"); + + get_symbol_data("high_memory", sizeof(ulong), &vt->high_memory); +- if (kernel_symbol_exists("mem_map")) ++ ++ if (kernel_symbol_exists("mem_section")) ++ vt->flags |= SPARSEMEM; ++ else if (kernel_symbol_exists("mem_map")) { + get_symbol_data("mem_map", sizeof(char *), &vt->mem_map); +- else ++ vt->flags |= FLATMEM; ++ } else + vt->flags |= DISCONTIGMEM; ++ ++ sparse_mem_init(); ++ + vt->vmalloc_start = machdep->vmalloc_start(); + if (IS_VMALLOC_ADDR(vt->mem_map)) + vt->flags |= V_MEM_MAP; +@@ -478,7 +637,6 @@ + STRUCT_SIZE_INIT(free_area_struct, "free_area_struct"); + STRUCT_SIZE_INIT(zone, "zone"); + STRUCT_SIZE_INIT(zone_struct, "zone_struct"); +- STRUCT_SIZE_INIT(kmem_cache_s, "kmem_cache_s"); + STRUCT_SIZE_INIT(kmem_bufctl_t, "kmem_bufctl_t"); + STRUCT_SIZE_INIT(swap_info_struct, "swap_info_struct"); + STRUCT_SIZE_INIT(mm_struct, "mm_struct"); +@@ -488,13 +646,20 @@ + if (VALID_STRUCT(pglist_data)) { + vt->flags |= ZONES; + +- if (symbol_exists("pgdat_list")) ++ if (symbol_exists("pgdat_list") && !IS_SPARSEMEM()) + vt->flags |= NODES; + ++ /* ++ * Determine the number of nodes the best way possible, ++ * starting with a default of 1. ++ */ ++ vt->numnodes = 1; ++ + if (symbol_exists("numnodes")) + get_symbol_data("numnodes", sizeof(int), &vt->numnodes); +- else +- vt->numnodes = 1; ++ ++ if (get_nodes_online()) ++ vt->flags |= NODES_ONLINE; + + MEMBER_OFFSET_INIT(pglist_data_node_zones, + "pglist_data", "node_zones"); +@@ -524,6 +689,7 @@ + ARRAY_LENGTH_INIT(vt->nr_zones, pglist_data_node_zones, + "pglist_data.node_zones", NULL, + SIZE_OPTION(zone_struct, zone)); ++ vt->ZONE_HIGHMEM = vt->nr_zones - 1; + + if (VALID_STRUCT(zone_struct)) { + MEMBER_OFFSET_INIT(zone_struct_free_pages, +@@ -539,6 +705,8 @@ + if (INVALID_MEMBER(zone_struct_size)) + MEMBER_OFFSET_INIT(zone_struct_memsize, + "zone_struct", "memsize"); ++ MEMBER_OFFSET_INIT(zone_struct_zone_start_pfn, ++ "zone_struct", "zone_start_pfn"); + MEMBER_OFFSET_INIT(zone_struct_zone_start_paddr, + "zone_struct", "zone_start_paddr"); + MEMBER_OFFSET_INIT(zone_struct_zone_start_mapnr, +@@ -565,8 +733,17 @@ + vt->dump_free_pages = dump_free_pages_zones_v1; + + } else if (VALID_STRUCT(zone)) { +- MEMBER_OFFSET_INIT(zone_free_pages, +- "zone", "free_pages"); ++ MEMBER_OFFSET_INIT(zone_vm_stat, "zone", "vm_stat"); ++ MEMBER_OFFSET_INIT(zone_free_pages, "zone", "free_pages"); ++ if (INVALID_MEMBER(zone_free_pages) && ++ VALID_MEMBER(zone_vm_stat)) { ++ long nr_free_pages = 0; ++ if (!enumerator_value("NR_FREE_PAGES", &nr_free_pages)) ++ error(WARNING, ++ "cannot determine NR_FREE_PAGES enumerator\n"); ++ ASSIGN_OFFSET(zone_free_pages) = OFFSET(zone_vm_stat) + ++ (nr_free_pages * sizeof(long)); ++ } + MEMBER_OFFSET_INIT(zone_free_area, + "zone", "free_area"); + MEMBER_OFFSET_INIT(zone_zone_pgdat, +@@ -579,12 +756,23 @@ + "zone", "zone_start_pfn"); + MEMBER_OFFSET_INIT(zone_spanned_pages, + "zone", "spanned_pages"); ++ MEMBER_OFFSET_INIT(zone_present_pages, ++ "zone", "present_pages"); + MEMBER_OFFSET_INIT(zone_pages_min, + "zone", "pages_min"); + MEMBER_OFFSET_INIT(zone_pages_low, + "zone", "pages_low"); + MEMBER_OFFSET_INIT(zone_pages_high, + "zone", "pages_high"); ++ MEMBER_OFFSET_INIT(zone_nr_active, ++ "zone", "nr_active"); ++ MEMBER_OFFSET_INIT(zone_nr_inactive, ++ "zone", "nr_inactive"); ++ MEMBER_OFFSET_INIT(zone_all_unreclaimable, ++ "zone", "all_unreclaimable"); ++ MEMBER_OFFSET_INIT(zone_flags, "zone", "flags"); ++ MEMBER_OFFSET_INIT(zone_pages_scanned, "zone", ++ "pages_scanned"); + ARRAY_LENGTH_INIT(vt->nr_free_areas, zone_free_area, + "zone.free_area", NULL, SIZE(free_area)); + vt->dump_free_pages = dump_free_pages_zones_v2; +@@ -603,6 +791,8 @@ + vt->dump_kmem_cache = dump_kmem_cache_percpu_v1; + else if (vt->flags & PERCPU_KMALLOC_V2) + vt->dump_kmem_cache = dump_kmem_cache_percpu_v2; ++ else if (vt->flags & KMALLOC_SLUB) ++ vt->dump_kmem_cache = dump_kmem_cache_slub; + else + vt->dump_kmem_cache = dump_kmem_cache; + +@@ -640,13 +830,7 @@ + kmem_cache_init(); + + PG_reserved_flag_init(); +- +- if (VALID_MEMBER(page_pte)) { +- if (THIS_KERNEL_VERSION < LINUX(2,6,0)) +- vt->PG_slab = 10; +- else if (THIS_KERNEL_VERSION >= LINUX(2,6,0)) +- vt->PG_slab = 7; +- } ++ PG_slab_flag_init(); + } + + /* +@@ -685,7 +869,7 @@ + memtype = KVADDR; + count = -1; + +- while ((c = getopt(argcnt, args, "e:pudDuso:81:3:6:")) != EOF) { ++ while ((c = getopt(argcnt, args, "xme:pfudDuso:81:3:6:")) != EOF) { + switch(c) + { + case '8': +@@ -748,12 +932,12 @@ + break; + + case 'p': +- memtype &= ~(UVADDR|KVADDR); ++ memtype &= ~(UVADDR|KVADDR|XENMACHADDR|FILEADDR); + memtype = PHYSADDR; + break; + + case 'u': +- memtype &= ~(KVADDR|PHYSADDR); ++ memtype &= ~(KVADDR|PHYSADDR|XENMACHADDR|FILEADDR); + memtype = UVADDR; + break; + +@@ -767,6 +951,25 @@ + flag |= UDECIMAL; + break; + ++ case 'm': ++ if (!(kt->flags & ARCH_XEN)) ++ error(FATAL, "-m option only applies to xen architecture\n"); ++ memtype &= ~(UVADDR|KVADDR|FILEADDR); ++ memtype = XENMACHADDR; ++ break; ++ ++ case 'f': ++ if (!pc->dumpfile) ++ error(FATAL, ++ "-f option requires a dumpfile\n"); ++ memtype &= ~(KVADDR|UVADDR|PHYSADDR|XENMACHADDR); ++ memtype = FILEADDR; ++ break; ++ ++ case 'x': ++ flag |= NO_ASCII; ++ break; ++ + default: + argerrs++; + break; +@@ -830,7 +1033,7 @@ + error(WARNING, + "ending address ignored when count is specified\n"); + +- if ((flag & HEXADECIMAL) && !(flag & SYMBOLIC)) ++ if ((flag & HEXADECIMAL) && !(flag & SYMBOLIC) && !(flag & NO_ASCII)) + flag |= ASCII_ENDLINE; + + if (memtype == KVADDR) { +@@ -839,7 +1042,6 @@ + } + + display_memory(addr, count, flag, memtype); +- + } + + /* +@@ -903,6 +1105,12 @@ + case PHYSADDR: + addrtype = "PHYSADDR"; + break; ++ case XENMACHADDR: ++ addrtype = "XENMACHADDR"; ++ break; ++ case FILEADDR: ++ addrtype = "FILEADDR"; ++ break; + } + + if (CRASHDEBUG(4)) +@@ -970,7 +1178,8 @@ + case DISPLAY_64: + if ((flag & (HEXADECIMAL|SYMBOLIC|DISPLAY_DEFAULT)) == + (HEXADECIMAL|SYMBOLIC|DISPLAY_DEFAULT)) { +- if (in_ksymbol_range(mem.u64)) { ++ if (in_ksymbol_range(mem.u64) && ++ strlen(value_to_symstr(mem.u64, buf, 0))) { + fprintf(fp, "%-16s ", + value_to_symstr(mem.u64, buf, 0)); + linelen += strlen(buf)+1; +@@ -993,7 +1202,8 @@ + case DISPLAY_32: + if ((flag & (HEXADECIMAL|SYMBOLIC|DISPLAY_DEFAULT)) == + (HEXADECIMAL|SYMBOLIC|DISPLAY_DEFAULT)) { +- if (in_ksymbol_range(mem.u32)) { ++ if (in_ksymbol_range(mem.u32) && ++ strlen(value_to_symstr(mem.u32, buf, 0))) { + fprintf(fp, INT_PRLEN == 16 ? + "%-16s " : "%-8s ", + value_to_symstr(mem.u32, +@@ -1138,7 +1348,7 @@ + size = sizeof(void*); + addr_entered = value_entered = FALSE; + +- while ((c = getopt(argcnt, args, "ukp81:3:6:")) != EOF) { ++ while ((c = getopt(argcnt, args, "fukp81:3:6:")) != EOF) { + switch(c) + { + case '8': +@@ -1173,17 +1383,33 @@ + break; + + case 'p': ++ memtype &= ~(UVADDR|KVADDR|FILEADDR); + memtype = PHYSADDR; + break; + + case 'u': ++ memtype &= ~(PHYSADDR|KVADDR|FILEADDR); + memtype = UVADDR; + break; + + case 'k': ++ memtype &= ~(PHYSADDR|UVADDR|FILEADDR); + memtype = KVADDR; + break; + ++ case 'f': ++ /* ++ * Unsupported, but can be forcibly implemented ++ * by removing the DUMPFILE() check above and ++ * recompiling. ++ */ ++ if (!pc->dumpfile) ++ error(FATAL, ++ "-f option requires a dumpfile\n"); ++ memtype &= ~(PHYSADDR|UVADDR|KVADDR); ++ memtype = FILEADDR; ++ break; ++ + default: + argerrs++; + break; +@@ -1262,6 +1488,9 @@ + case PHYSADDR: + break; + ++ case FILEADDR: ++ break; ++ + case AMBIGUOUS: + error(INFO, + "ambiguous address: %llx (requires -p, -u or -k)\n", +@@ -1309,6 +1538,8 @@ + raw_data_dump(ulong addr, long count, int symbolic) + { + long wordcnt; ++ ulonglong address; ++ int memtype; + + switch (sizeof(long)) + { +@@ -1328,9 +1559,20 @@ + break; + } + +- display_memory(addr, wordcnt, ++ if (pc->curcmd_flags & MEMTYPE_FILEADDR) { ++ address = pc->curcmd_private; ++ memtype = FILEADDR; ++ } else if (pc->curcmd_flags & MEMTYPE_UVADDR) { ++ address = (ulonglong)addr; ++ memtype = UVADDR; ++ } else { ++ address = (ulonglong)addr; ++ memtype = KVADDR; ++ } ++ ++ display_memory(address, wordcnt, + HEXADECIMAL|DISPLAY_DEFAULT|(symbolic ? SYMBOLIC : ASCII_ENDLINE), +- KVADDR); ++ memtype); + } + + /* +@@ -1351,7 +1593,7 @@ + * is appropriate: + * + * addr a user, kernel or physical memory address. +- * memtype addr type: UVADDR, KVADDR or PHYSADDR. ++ * memtype addr type: UVADDR, KVADDR, PHYSADDR, XENMACHADDR or FILEADDR + * buffer supplied buffer to read the data into. + * size number of bytes to read. + * type string describing the request -- helpful when the read fails. +@@ -1368,6 +1610,7 @@ + #define SEEK_ERRMSG "seek error: %s address: %llx type: \"%s\"\n" + #define READ_ERRMSG "read error: %s address: %llx type: \"%s\"\n" + #define WRITE_ERRMSG "write error: %s address: %llx type: \"%s\"\n" ++#define PAGE_EXCLUDED_ERRMSG "page excluded: %s address: %llx type: \"%s\"\n" + + int + readmem(ulonglong addr, int memtype, void *buffer, long size, +@@ -1376,6 +1619,7 @@ + int fd; + long cnt; + physaddr_t paddr; ++ ulonglong pseudo; + char *bufptr; + + if (CRASHDEBUG(4)) +@@ -1424,7 +1668,11 @@ + break; + + case PHYSADDR: ++ case XENMACHADDR: + break; ++ ++ case FILEADDR: ++ return generic_read_dumpfile(addr, buffer, size, type, error_handle); + } + + while (size > 0) { +@@ -1449,6 +1697,17 @@ + case PHYSADDR: + paddr = addr; + break; ++ ++ case XENMACHADDR: ++ pseudo = xen_m2p(addr); ++ ++ if (pseudo == XEN_MACHADDR_NOT_FOUND) { ++ pc->curcmd_flags |= XEN_MACHINE_ADDR; ++ paddr = addr; ++ } else ++ paddr = pseudo | PAGEOFFSET(addr); ++ ++ break; + } + + /* +@@ -1460,7 +1719,7 @@ + cnt = size; + + switch (READMEM(fd, bufptr, cnt, +- memtype == PHYSADDR ? 0 : addr, paddr)) ++ (memtype == PHYSADDR) || (memtype == XENMACHADDR) ? 0 : addr, paddr)) + { + case SEEK_ERROR: + if (PRINT_ERROR_MESSAGE) +@@ -1472,6 +1731,11 @@ + error(INFO, READ_ERRMSG, memtype_string(memtype, 0), addr, type); + goto readmem_error; + ++ case PAGE_EXCLUDED: ++ if (PRINT_ERROR_MESSAGE) ++ error(INFO, PAGE_EXCLUDED_ERRMSG, memtype_string(memtype, 0), addr, type); ++ goto readmem_error; ++ + default: + break; + } +@@ -1610,6 +1874,9 @@ + int + read_memory_device(int fd, void *bufptr, int cnt, ulong addr, physaddr_t paddr) + { ++ if (pc->curcmd_flags & XEN_MACHINE_ADDR) ++ return READ_ERROR; ++ + if (!machdep->verify_paddr(paddr)) { + if (CRASHDEBUG(1)) + error(INFO, "verify_paddr(%lx) failed\n", paddr); +@@ -1754,6 +2021,12 @@ + case PHYSADDR: + sprintf(membuf, debug ? "PHYSADDR" : "physical"); + break; ++ case XENMACHADDR: ++ sprintf(membuf, debug ? "XENMACHADDR" : "xen machine"); ++ break; ++ case FILEADDR: ++ sprintf(membuf, debug ? "FILEADDR" : "dumpfile"); ++ break; + default: + if (debug) + sprintf(membuf, "0x%x (?)", memtype); +@@ -1849,6 +2122,10 @@ + + case PHYSADDR: + break; ++ ++ ++ case FILEADDR: ++ return generic_write_dumpfile(addr, buffer, size, type, error_handle); + } + + while (size > 0) { +@@ -1946,6 +2223,77 @@ + } + + /* ++ * Generic dumpfile read/write functions to handle FILEADDR ++ * memtype arguments to readmem() and writemem(). These are ++ * not to be confused with pc->readmem/writemem plug-ins. ++ */ ++static int ++generic_read_dumpfile(ulonglong addr, void *buffer, long size, char *type, ++ ulong error_handle) ++{ ++ int fd; ++ int retval; ++ ++ retval = TRUE; ++ ++ if (!pc->dumpfile) ++ error(FATAL, "command requires a dumpfile\n"); ++ ++ if ((fd = open(pc->dumpfile, O_RDONLY)) < 0) ++ error(FATAL, "%s: %s\n", pc->dumpfile, ++ strerror(errno)); ++ ++ if (lseek(fd, addr, SEEK_SET) == -1) { ++ if (PRINT_ERROR_MESSAGE) ++ error(INFO, SEEK_ERRMSG, ++ memtype_string(FILEADDR, 0), addr, type); ++ retval = FALSE; ++ } else if (read(fd, buffer, size) != size) { ++ if (PRINT_ERROR_MESSAGE) ++ error(INFO, READ_ERRMSG, ++ memtype_string(FILEADDR, 0), addr, type); ++ retval = FALSE; ++ } ++ ++ close(fd); ++ ++ return retval; ++} ++ ++static int ++generic_write_dumpfile(ulonglong addr, void *buffer, long size, char *type, ++ ulong error_handle) ++{ ++ int fd; ++ int retval; ++ ++ retval = TRUE; ++ ++ if (!pc->dumpfile) ++ error(FATAL, "command requires a dumpfile\n"); ++ ++ if ((fd = open(pc->dumpfile, O_WRONLY)) < 0) ++ error(FATAL, "%s: %s\n", pc->dumpfile, ++ strerror(errno)); ++ ++ if (lseek(fd, addr, SEEK_SET) == -1) { ++ if (PRINT_ERROR_MESSAGE) ++ error(INFO, SEEK_ERRMSG, ++ memtype_string(FILEADDR, 0), addr, type); ++ retval = FALSE; ++ } else if (write(fd, buffer, size) != size) { ++ if (PRINT_ERROR_MESSAGE) ++ error(INFO, WRITE_ERRMSG, ++ memtype_string(FILEADDR, 0), addr, type); ++ retval = FALSE; ++ } ++ ++ close(fd); ++ ++ return retval; ++} ++ ++/* + * Translates a kernel virtual address to its physical address. cmd_vtop() + * sets the verbose flag so that the pte translation gets displayed; all + * other callers quietly accept the translation. +@@ -2113,6 +2461,8 @@ + break; + } + ++ paddr = 0; ++ + switch (memtype) { + case UVADDR: + fprintf(fp, "%s %s\n", +@@ -2126,9 +2476,12 @@ + return; + } + if (!uvtop(tc, vaddr, &paddr, 0)) { +- fprintf(fp, "%s (not mapped)\n\n", ++ fprintf(fp, "%s %s\n\n", + mkstring(buf1, UVADDR_PRLEN, LJUST|LONG_HEX, +- MKSTR(vaddr))); ++ MKSTR(vaddr)), ++ (XEN() && (paddr == PADDR_NOT_AVAILABLE)) ? ++ "(page not available)" : "(not mapped)"); ++ + page_exists = FALSE; + } else { + fprintf(fp, "%s %s\n\n", +@@ -2161,9 +2514,13 @@ + } + if (vtop_flags & USE_USER_PGD) { + if (!uvtop(tc, vaddr, &paddr, 0)) { +- fprintf(fp, "%s (not mapped)\n\n", ++ fprintf(fp, "%s %s\n\n", + mkstring(buf1, UVADDR_PRLEN, +- LJUST|LONG_HEX, MKSTR(vaddr))); ++ LJUST|LONG_HEX, MKSTR(vaddr)), ++ (XEN() && ++ (paddr == PADDR_NOT_AVAILABLE)) ? ++ "(page not available)" : ++ "(not mapped)"); + page_exists = FALSE; + } else { + fprintf(fp, "%s %s\n\n", +@@ -2176,9 +2533,13 @@ + uvtop(tc, vaddr, &paddr, VERBOSE); + } else { + if (!kvtop(tc, vaddr, &paddr, 0)) { +- fprintf(fp, "%s (not mapped)\n\n", ++ fprintf(fp, "%s %s\n\n", + mkstring(buf1, VADDR_PRLEN, +- LJUST|LONG_HEX, MKSTR(vaddr))); ++ LJUST|LONG_HEX, MKSTR(vaddr)), ++ (XEN() && ++ (paddr == PADDR_NOT_AVAILABLE)) ? ++ "(page not available)" : ++ "(not mapped)"); + page_exists = FALSE; + } else { + fprintf(fp, "%s %s\n\n", +@@ -2839,7 +3200,8 @@ + + if (DO_REF_SEARCH(ref)) { + if (VM_REF_CHECK_DECVAL(ref, +- SWP_OFFSET(paddr))) { ++ THIS_KERNEL_VERSION >= LINUX(2,6,0) ? ++ __swp_offset(paddr) : SWP_OFFSET(paddr))) { + if (DO_REF_DISPLAY(ref)) + display = TRUE; + else { +@@ -2979,7 +3341,20 @@ + if (!task_mm(task, TRUE)) + return; + +- tm->rss = ULONG(tt->mm_struct + OFFSET(mm_struct_rss)); ++ if (VALID_MEMBER(mm_struct_rss)) ++ /* ++ * mm_struct.rss or mm_struct._rss exist. ++ */ ++ tm->rss = ULONG(tt->mm_struct + OFFSET(mm_struct_rss)); ++ else { ++ /* ++ * mm_struct._anon_rss and mm_struct._file_rss should exist. ++ */ ++ if (VALID_MEMBER(mm_struct_anon_rss)) ++ tm->rss += ULONG(tt->mm_struct + OFFSET(mm_struct_anon_rss)); ++ if (VALID_MEMBER(mm_struct_file_rss)) ++ tm->rss += ULONG(tt->mm_struct + OFFSET(mm_struct_file_rss)); ++ } + tm->total_vm = ULONG(tt->mm_struct + OFFSET(mm_struct_total_vm)); + tm->pgd_addr = ULONG(tt->mm_struct + OFFSET(mm_struct_pgd)); + +@@ -3036,6 +3411,12 @@ + #define GET_INACTIVE_DIRTY (ADDRESS_SPECIFIED << 13) /* obsolete */ + #define SLAB_GET_COUNTS (ADDRESS_SPECIFIED << 14) + #define SLAB_WALKTHROUGH (ADDRESS_SPECIFIED << 15) ++#define GET_VMLIST_COUNT (ADDRESS_SPECIFIED << 16) ++#define GET_VMLIST (ADDRESS_SPECIFIED << 17) ++#define SLAB_DATA_NOSAVE (ADDRESS_SPECIFIED << 18) ++#define GET_SLUB_SLABS (ADDRESS_SPECIFIED << 19) ++#define GET_SLUB_OBJECTS (ADDRESS_SPECIFIED << 20) ++#define VMLIST_VERIFY (ADDRESS_SPECIFIED << 21) + + #define GET_ALL \ + (GET_SHARED_PAGES|GET_TOTALRAM_PAGES|GET_BUFFERS_PAGES|GET_SLAB_PAGES) +@@ -3045,8 +3426,8 @@ + { + int i; + int c; +- int sflag, Sflag, pflag, fflag, Fflag, vflag; +- int nflag, cflag, Cflag, iflag, lflag, Lflag, Pflag; ++ int sflag, Sflag, pflag, fflag, Fflag, vflag, zflag; ++ int nflag, cflag, Cflag, iflag, lflag, Lflag, Pflag, Vflag; + struct meminfo meminfo; + ulonglong value[MAXARGS]; + char buf[BUFSIZE]; +@@ -3054,18 +3435,26 @@ + int spec_addr; + + spec_addr = 0; +- sflag = Sflag = pflag = fflag = Fflag = Pflag = 0; +- vflag = Cflag = cflag = iflag = nflag = lflag = Lflag = 0; ++ sflag = Sflag = pflag = fflag = Fflag = Pflag = zflag = 0; ++ vflag = Cflag = cflag = iflag = nflag = lflag = Lflag = Vflag = 0; + BZERO(&meminfo, sizeof(struct meminfo)); + BZERO(&value[0], sizeof(ulonglong)*MAXARGS); + +- while ((c = getopt(argcnt, args, "I:sSFfpvcCinl:L:P")) != EOF) { ++ while ((c = getopt(argcnt, args, "I:sSFfpvczCinl:L:PV")) != EOF) { + switch(c) + { ++ case 'V': ++ Vflag = 1; ++ break; ++ + case 'n': + nflag = 1; + break; + ++ case 'z': ++ zflag = 1; ++ break; ++ + case 'i': + iflag = 1; + break; +@@ -3153,13 +3542,13 @@ + if (argerrs) + cmd_usage(pc->curcmd, SYNOPSIS); + +- if ((sflag + Sflag + pflag + fflag + Fflag + ++ if ((sflag + Sflag + pflag + fflag + Fflag + Vflag + + vflag + Cflag + cflag + iflag + lflag + Lflag) > 1) { + error(INFO, "only one flag allowed!\n"); + cmd_usage(pc->curcmd, SYNOPSIS); + } + +- if (sflag || Sflag) ++ if (sflag || Sflag || !(vt->flags & KMEM_CACHE_INIT)) + kmem_cache_init(); + + while (args[optind]) { +@@ -3198,8 +3587,6 @@ + if (pflag) { + meminfo.spec_addr = value[i]; + meminfo.flags = ADDRESS_SPECIFIED; +- if (meminfo.calls++) +- fprintf(fp, "\n"); + dump_mem_map(&meminfo); + pflag++; + } +@@ -3234,6 +3621,8 @@ + } else { + meminfo.spec_addr = value[i]; + meminfo.flags = ADDRESS_SPECIFIED; ++ if (Sflag && (vt->flags & KMALLOC_SLUB)) ++ meminfo.flags |= VERBOSE; + if (meminfo.calls++) + fprintf(fp, "\n"); + vt->dump_kmem_cache(&meminfo); +@@ -3248,8 +3637,6 @@ + if (vflag) { + meminfo.spec_addr = value[i]; + meminfo.flags = ADDRESS_SPECIFIED; +- if (meminfo.calls++) +- fprintf(fp, "\n"); + dump_vmlist(&meminfo); + vflag++; + } +@@ -3275,7 +3662,7 @@ + /* + * no value arguments allowed! + */ +- if (nflag || iflag || Fflag || Cflag || Lflag) { ++ if (zflag || nflag || iflag || Fflag || Cflag || Lflag || Vflag) { + error(INFO, + "no address arguments allowed with this option\n"); + cmd_usage(pc->curcmd, SYNOPSIS); +@@ -3309,24 +3696,25 @@ + } + + if (sflag == 1) { +- if (vt->flags & KMEM_CACHE_UNAVAIL) +- error(FATAL, +- "kmem cache slab subsystem not available\n"); + if (STREQ(meminfo.reqname, "list")) + kmem_cache_list(); ++ else if (vt->flags & KMEM_CACHE_UNAVAIL) ++ error(FATAL, ++ "kmem cache slab subsystem not available\n"); + else + vt->dump_kmem_cache(&meminfo); + } + + if (Sflag == 1) { +- if (vt->flags & KMEM_CACHE_UNAVAIL) +- error(FATAL, +- "kmem cache slab subsystem not available\n"); +- meminfo.flags = VERBOSE; + if (STREQ(meminfo.reqname, "list")) + kmem_cache_list(); +- else ++ else if (vt->flags & KMEM_CACHE_UNAVAIL) ++ error(FATAL, ++ "kmem cache slab subsystem not available\n"); ++ else { ++ meminfo.flags = VERBOSE; + vt->dump_kmem_cache(&meminfo); ++ } + } + + if (vflag == 1) +@@ -3343,6 +3731,9 @@ + if (nflag == 1) + dump_memory_nodes(MEMORY_NODES_DUMP); + ++ if (zflag == 1) ++ dump_zone_stats(); ++ + if (lflag == 1) { + dump_page_lists(&meminfo); + } +@@ -3352,7 +3743,13 @@ + dump_page_lists(&meminfo); + } + +- if (!(sflag + Sflag + pflag + fflag + Fflag + vflag + ++ if (Vflag == 1) { ++ dump_vm_stat(NULL, NULL, 0); ++ dump_page_states(); ++ dump_vm_event_state(); ++ } ++ ++ if (!(sflag + Sflag + pflag + fflag + Fflag + vflag + Vflag + zflag + + cflag + Cflag + iflag + nflag + lflag + Lflag + meminfo.calls)) + cmd_usage(pc->curcmd, SYNOPSIS); + +@@ -3373,12 +3770,13 @@ + buf = (char *)GETBUF(SIZE(page)); + + if (!readmem(pageptr, KVADDR, buf, SIZE(page), +- "reserved page", RETURN_ON_ERROR|QUIET)) ++ "reserved page", RETURN_ON_ERROR|QUIET)) { ++ FREEBUF(buf); + return; ++ } + + flags = ULONG(buf + OFFSET(page_flags)); + +- + if (count_bits_long(flags) == 1) + vt->PG_reserved = flags; + else +@@ -3386,12 +3784,64 @@ + + if (CRASHDEBUG(2)) + fprintf(fp, +- "PG_reserved bit: vaddr: %lx page: %lx flags: %lx => %lx\n", ++ "PG_reserved: vaddr: %lx page: %lx flags: %lx => %lx\n", + vaddr, pageptr, flags, vt->PG_reserved); + + FREEBUF(buf); + } + ++static void ++PG_slab_flag_init(void) ++{ ++ int bit; ++ ulong pageptr; ++ ulong vaddr, flags; ++ char buf[BUFSIZE]; /* safe for a page struct */ ++ ++ /* ++ * Set the old defaults in case the search below fails. ++ */ ++ if (VALID_MEMBER(page_pte)) { ++ if (THIS_KERNEL_VERSION < LINUX(2,6,0)) ++ vt->PG_slab = 10; ++ else if (THIS_KERNEL_VERSION >= LINUX(2,6,0)) ++ vt->PG_slab = 7; ++ } else if (THIS_KERNEL_VERSION >= LINUX(2,6,0)) ++ vt->PG_slab = 7; ++ ++ if (vt->flags & KMALLOC_SLUB) { ++ /* ++ * PG_slab and the following are hardwired for ++ * now -- at least until I can come up with ++ * better way. (PG_slab test below fails because ++ * slub.c uses lower-bit PG_active and PG_error) ++ */ ++#define PG_compound 14 /* Part of a compound page */ ++#define PG_reclaim 17 /* To be reclaimed asap */ ++ vt->PG_head_tail_mask = ((1L << PG_compound) | (1L << PG_reclaim)); ++ ++ return; ++ } ++ ++ if (try_get_symbol_data("vm_area_cachep", sizeof(void *), &vaddr) && ++ phys_to_page((physaddr_t)VTOP(vaddr), &pageptr) && ++ readmem(pageptr, KVADDR, buf, SIZE(page), ++ "vm_area_cachep page", RETURN_ON_ERROR|QUIET)) { ++ ++ flags = ULONG(buf + OFFSET(page_flags)); ++ ++ if ((bit = ffsl(flags))) { ++ vt->PG_slab = bit - 1; ++ ++ if (CRASHDEBUG(2)) ++ fprintf(fp, ++ "PG_slab bit: vaddr: %lx page: %lx flags: %lx => %ld\n", ++ vaddr, pageptr, flags, vt->PG_slab); ++ ++ } ++ } ++} ++ + /* + * dump_mem_map() displays basic data about each entry in the mem_map[] + * array, or if an address is specified, just the mem_map[] entry for that +@@ -3438,22 +3888,20 @@ + #define PGMM_CACHED (512) + + static void +-dump_mem_map(struct meminfo *mi) ++dump_mem_map_SPARSEMEM(struct meminfo *mi) + { +- long i, n; ++ ulong i; + long total_pages; +- int others, page_not_mapped, phys_not_mapped; ++ int others, page_not_mapped, phys_not_mapped, page_mapping; + ulong pp, ppend; + physaddr_t phys, physend; + ulong tmp, reserved, shared, slabs; + ulong PG_reserved_flag; + long buffers; + ulong inode, offset, flags, mapping, index; +- ulong node_size; + uint count; + int print_hdr, pg_spec, phys_spec, done; + int v22; +- struct node_table *nt; + char hdr[BUFSIZE]; + char buf0[BUFSIZE]; + char buf1[BUFSIZE]; +@@ -3462,6 +3910,7 @@ + char buf4[BUFSIZE]; + char *page_cache; + char *pcache; ++ ulong section, section_nr, nr_mem_sections, section_size; + + v22 = VALID_MEMBER(page_inode); /* page.inode vs. page.mapping */ + +@@ -3549,22 +3998,62 @@ + done = FALSE; + total_pages = 0; + +- for (n = 0; n < vt->numnodes; n++) { ++ nr_mem_sections = NR_MEM_SECTIONS(); ++ ++ /* ++ * Iterate over all possible sections ++ */ ++ for (section_nr = 0; section_nr < nr_mem_sections ; section_nr++) { ++ ++ if (CRASHDEBUG(2)) ++ fprintf(fp, "section_nr = %ld\n", section_nr); ++ ++ /* ++ * If we are looking up a specific address, jump directly ++ * to the section with that page ++ */ ++ if (mi->flags & ADDRESS_SPECIFIED) { ++ ulong pfn; ++ physaddr_t tmp; ++ ++ if (pg_spec) { ++ if (!page_to_phys(mi->spec_addr, &tmp)) ++ return; ++ pfn = tmp >> PAGESHIFT(); ++ } else ++ pfn = mi->spec_addr >> PAGESHIFT(); ++ section_nr = pfn_to_section_nr(pfn); ++ } ++ ++ if (!(section = valid_section_nr(section_nr))) { ++#ifdef NOTDEF ++ break; /* On a real sparsemem system we need to check ++ * every section as gaps may exist. But this ++ * can be slow. If we know we don't have gaps ++ * just stop validating sections when we ++ * get to the end of the valid ones. ++ * In the future find a way to short circuit ++ * this loop. ++ */ ++#endif ++ if (mi->flags & ADDRESS_SPECIFIED) ++ break; ++ continue; ++ } ++ + if (print_hdr) { +- fprintf(fp, "%s%s", n ? "\n" : "", hdr); ++ if (!(pc->curcmd_flags & HEADER_PRINTED)) ++ fprintf(fp, "%s", hdr); + print_hdr = FALSE; ++ pc->curcmd_flags |= HEADER_PRINTED; + } + +- nt = &vt->node_table[n]; +- total_pages += nt->size; +- pp = nt->mem_map; +- phys = nt->start_paddr; +- if ((vt->flags & V_MEM_MAP) && (vt->numnodes == 1)) +- node_size = vt->max_mapnr; +- else +- node_size = nt->size; ++ pp = section_mem_map_addr(section); ++ pp = sparse_decode_mem_map(pp, section_nr); ++ phys = (physaddr_t) section_nr * PAGES_PER_SECTION() * PAGESIZE(); ++ section_size = PAGES_PER_SECTION(); + +- for (i = 0; i < node_size; ++ for (i = 0; i < section_size; + i++, pp += SIZE(page), phys += PAGESIZE()) { + + if ((i % PGMM_CACHED) == 0) { +@@ -3581,7 +4070,7 @@ + continue; + } + +- fill_mem_map_cache(pp, page_cache); ++ fill_mem_map_cache(pp, ppend, page_cache); + } + + pcache = page_cache + ((i%PGMM_CACHED) * SIZE(page)); +@@ -3653,11 +4142,12 @@ + } + continue; + } ++ page_mapping = VALID_MEMBER(page_mapping); + + if (v22) { + inode = ULONG(pcache + OFFSET(page_inode)); + offset = ULONG(pcache + OFFSET(page_offset)); +- } else { ++ } else if (page_mapping) { + mapping = ULONG(pcache + + OFFSET(page_mapping)); + index = ULONG(pcache + OFFSET(page_index)); +@@ -3700,6 +4190,20 @@ + space(MINSPACE), + mkstring(buf4, 8, CENTER|RJUST, " "), + " "); ++ else if (!page_mapping) ++ fprintf(fp, "%s%s%s%s%s%s%s %2d ", ++ mkstring(buf0, VADDR_PRLEN, ++ LJUST|LONG_HEX, MKSTR(pp)), ++ space(MINSPACE), ++ mkstring(buf1, MAX(PADDR_PRLEN, ++ strlen("PHYSICAL")), ++ RJUST|LONGLONG_HEX, MKSTR(&phys)), ++ space(MINSPACE), ++ mkstring(buf3, VADDR_PRLEN, ++ CENTER|RJUST, "-------"), ++ space(MINSPACE), ++ mkstring(buf4, 8, CENTER|RJUST, "-----"), ++ count); + else + fprintf(fp, "%s%s%s%s%s%s%8ld %2d ", + mkstring(buf0, VADDR_PRLEN, +@@ -3862,193 +4366,640 @@ + FREEBUF(page_cache); + } + +-/* +- * Stash a chunk of PGMM_CACHED page structures, starting at addr, into the +- * passed-in buffer. The mem_map array is normally guaranteed to be +- * readable except in the case of virtual mem_map usage. When V_MEM_MAP +- * is in place, read all pages consumed by PGMM_CACHED page structures +- * that are currently mapped, leaving the unmapped ones just zeroed out. +- */ + static void +-fill_mem_map_cache(ulong pp, char *page_cache) ++dump_mem_map(struct meminfo *mi) + { +- long size, cnt; +- ulong addr; +- char *bufptr; ++ long i, n; ++ long total_pages; ++ int others, page_not_mapped, phys_not_mapped, page_mapping; ++ ulong pp, ppend; ++ physaddr_t phys, physend; ++ ulong tmp, reserved, shared, slabs; ++ ulong PG_reserved_flag; ++ long buffers; ++ ulong inode, offset, flags, mapping, index; ++ ulong node_size; ++ uint count; ++ int print_hdr, pg_spec, phys_spec, done; ++ int v22; ++ struct node_table *nt; ++ char hdr[BUFSIZE]; ++ char buf0[BUFSIZE]; ++ char buf1[BUFSIZE]; ++ char buf2[BUFSIZE]; ++ char buf3[BUFSIZE]; ++ char buf4[BUFSIZE]; ++ char *page_cache; ++ char *pcache; + +- /* +- * Try to read it in one fell swoop. +- */ +- if (readmem(pp, KVADDR, page_cache, SIZE(page) * PGMM_CACHED, +- "page struct cache", RETURN_ON_ERROR|QUIET)) ++ if (IS_SPARSEMEM()) { ++ dump_mem_map_SPARSEMEM(mi); + return; ++ } + +- /* +- * Break it into page-size-or-less requests, warning if it's +- * not a virtual mem_map. +- */ +- size = SIZE(page) * PGMM_CACHED; +- addr = pp; +- bufptr = page_cache; +- +- while (size > 0) { +- /* +- * Compute bytes till end of page. +- */ +- cnt = PAGESIZE() - PAGEOFFSET(addr); +- +- if (cnt > size) +- cnt = size; +- +- if (!readmem(addr, KVADDR, bufptr, size, +- "virtual page struct cache", RETURN_ON_ERROR|QUIET)) { +- BZERO(bufptr, size); +- if (!(vt->flags & V_MEM_MAP)) +- error(WARNING, +- "mem_map[] from %lx to %lx not accessible\n", +- addr, addr+size); +- } ++ v22 = VALID_MEMBER(page_inode); /* page.inode vs. page.mapping */ + +- addr += cnt; +- bufptr += cnt; +- size -= cnt; ++ if (v22) { ++ sprintf(hdr, "%s%s%s%s%s%s%s%sCNT FLAGS\n", ++ mkstring(buf1, VADDR_PRLEN, CENTER, "PAGE"), ++ space(MINSPACE), ++ mkstring(buf2, MAX(PADDR_PRLEN, strlen("PHYSICAL")), ++ RJUST, "PHYSICAL"), ++ space(MINSPACE), ++ mkstring(buf3, VADDR_PRLEN, CENTER|RJUST, "INODE"), ++ space(MINSPACE), ++ mkstring(buf4, 8, CENTER|LJUST, "OFFSET"), ++ space(MINSPACE-1)); ++ } else { ++ sprintf(hdr, "%s%s%s%s%s%s%sCNT FLAGS\n", ++ mkstring(buf1, VADDR_PRLEN, CENTER, "PAGE"), ++ space(MINSPACE), ++ mkstring(buf2, MAX(PADDR_PRLEN, strlen("PHYSICAL")), ++ RJUST, "PHYSICAL"), ++ space(MINSPACE), ++ mkstring(buf3, VADDR_PRLEN, CENTER|RJUST, "MAPPING"), ++ space(MINSPACE), ++ mkstring(buf4, 8, CENTER|RJUST, "INDEX")); + } +-} + ++ pg_spec = phys_spec = print_hdr = FALSE; ++ ++ switch (mi->flags) ++ { ++ case ADDRESS_SPECIFIED: ++ switch (mi->memtype) ++ { ++ case KVADDR: ++ if (is_page_ptr(mi->spec_addr, NULL)) ++ pg_spec = TRUE; ++ else { ++ if (kvtop(NULL, mi->spec_addr, &phys, 0)) { ++ mi->spec_addr = phys; ++ phys_spec = TRUE; ++ } ++ else ++ return; ++ } ++ break; ++ case PHYSADDR: ++ phys_spec = TRUE; ++ break; ++ default: ++ error(FATAL, "dump_mem_map: no memtype specified\n"); ++ break; ++ } ++ print_hdr = TRUE; ++ break; + +-/* +- * dump_page_hash_table() displays the entries in each page_hash_table. +- */ ++ case GET_ALL: ++ shared = 0; ++ reserved = 0; ++ buffers = 0; ++ slabs = 0; ++ break; + +-#define PGHASH_CACHED (1024) ++ case GET_SHARED_PAGES: ++ shared = 0; ++ break; + +-static void +-dump_page_hash_table(struct meminfo *hi) +-{ +- int i; +- int len, entry_len; +- ulong page_hash_table, head; +- struct list_data list_data, *ld; +- struct gnu_request req; +- long total_cached; +- long page_cache_size; +- ulong this_addr, searchpage; +- int errflag, found, cnt, populated, verbose; +- uint ival; +- ulong buffer_pages; +- char buf[BUFSIZE]; +- char hash_table[BUFSIZE]; +- char *pcache, *pghash_cache; ++ case GET_TOTALRAM_PAGES: ++ reserved = 0; ++ break; + +- if (!vt->page_hash_table) { +- if (hi->flags & VERBOSE) +- error(FATAL, +- "address_space page cache radix tree not supported\n"); +- +- if (symbol_exists("nr_pagecache")) { +- buffer_pages = nr_blockdev_pages(); +- get_symbol_data("nr_pagecache", sizeof(int), &ival); +- page_cache_size = (ulong)ival; +- page_cache_size -= buffer_pages; +- fprintf(fp, "page cache size: %ld\n", page_cache_size); +- if (hi->flags & ADDRESS_SPECIFIED) +- error(INFO, +- "address_space page cache radix tree not supported: %lx: ignored\n", +- hi->spec_addr); +- } else +- error(FATAL, "cannot determine page cache size\n"); +- return; +- } ++ case GET_BUFFERS_PAGES: ++ buffers = 0; ++ break; + +- ld = &list_data; ++ case GET_SLAB_PAGES: ++ slabs = 0; ++ break; + +- if (hi->spec_addr && (hi->flags & ADDRESS_SPECIFIED)) { +- verbose = TRUE; +- searchpage = hi->spec_addr; +- } else if (hi->flags & VERBOSE) { +- verbose = TRUE; +- searchpage = 0; +- } else { +- verbose = FALSE; +- searchpage = 0; ++ default: ++ print_hdr = TRUE; ++ break; + } + +- if (vt->page_hash_table_len == 0) +- error(FATAL, "cannot determine size of page_hash_table\n"); +- +- page_hash_table = vt->page_hash_table; +- len = vt->page_hash_table_len; +- entry_len = VALID_STRUCT(page_cache_bucket) ? +- SIZE(page_cache_bucket) : sizeof(void *); ++ page_cache = GETBUF(SIZE(page) * PGMM_CACHED); ++ done = FALSE; ++ total_pages = 0; + +- if (CRASHDEBUG(1)) { +- populated = 0; +- fprintf(fp, "page_hash_table length: %d\n", len); +- } ++ for (n = 0; n < vt->numnodes; n++) { ++ if (print_hdr) { ++ if (!(pc->curcmd_flags & HEADER_PRINTED)) ++ fprintf(fp, "%s%s", n ? "\n" : "", hdr); ++ print_hdr = FALSE; ++ pc->curcmd_flags |= HEADER_PRINTED; ++ } + +- get_symbol_type("page_cache_size", NULL, &req); +- if (req.length == sizeof(int)) { +- get_symbol_data("page_cache_size", sizeof(int), &ival); +- page_cache_size = (long)ival; +- } else +- get_symbol_data("page_cache_size", sizeof(long), +- &page_cache_size); ++ nt = &vt->node_table[n]; ++ total_pages += nt->size; ++ pp = nt->mem_map; ++ phys = nt->start_paddr; ++ if ((vt->flags & V_MEM_MAP) && (vt->numnodes == 1)) ++ node_size = vt->max_mapnr; ++ else ++ node_size = nt->size; + +- pghash_cache = GETBUF(sizeof(void *) * PGHASH_CACHED); ++ for (i = 0; i < node_size; ++ i++, pp += SIZE(page), phys += PAGESIZE()) { + +- if (searchpage) +- open_tmpfile(); ++ if ((i % PGMM_CACHED) == 0) { ++ ppend = pp + ((PGMM_CACHED-1) * SIZE(page)); ++ physend = phys + ((PGMM_CACHED-1) * PAGESIZE()); + +- hq_open(); +- for (i = total_cached = 0; i < len; i++, +- page_hash_table += entry_len) { ++ if ((pg_spec && (mi->spec_addr > ppend)) || ++ (phys_spec && ++ (PHYSPAGEBASE(mi->spec_addr) > physend))) { ++ i += (PGMM_CACHED-1); ++ pp = ppend; ++ phys = physend; ++ continue; ++ } + +- if ((i % PGHASH_CACHED) == 0) { +- readmem(page_hash_table, KVADDR, pghash_cache, +- entry_len * PGHASH_CACHED, +- "page hash cache", FAULT_ON_ERROR); +- } ++ fill_mem_map_cache(pp, ppend, page_cache); ++ } + +- pcache = pghash_cache + ((i%PGHASH_CACHED) * entry_len); +- if (VALID_STRUCT(page_cache_bucket)) +- pcache += OFFSET(page_cache_bucket_chain); +- +- head = ULONG(pcache); ++ pcache = page_cache + ((i%PGMM_CACHED) * SIZE(page)); + +- if (!head) +- continue; ++ if (received_SIGINT()) ++ restart(0); ++ ++ if ((pg_spec && (pp == mi->spec_addr)) || ++ (phys_spec && (phys == PHYSPAGEBASE(mi->spec_addr)))) ++ done = TRUE; + +- if (verbose) +- fprintf(fp, "page_hash_table[%d]\n", i); +- +- if (CRASHDEBUG(1)) +- populated++; ++ if (!done && (pg_spec || phys_spec)) ++ continue; ++ ++ flags = ULONG(pcache + OFFSET(page_flags)); ++ count = UINT(pcache + OFFSET(page_count)); + +- BZERO(ld, sizeof(struct list_data)); +- ld->flags = verbose; +- ld->start = head; +- ld->searchfor = searchpage; +- ld->member_offset = OFFSET(page_next_hash); +- cnt = do_list(ld); +- total_cached += cnt; ++ switch (mi->flags) ++ { ++ case GET_ALL: ++ case GET_BUFFERS_PAGES: ++ if (VALID_MEMBER(page_buffers)) { ++ tmp = ULONG(pcache + ++ OFFSET(page_buffers)); ++ if (tmp) ++ buffers++; ++ } else if (THIS_KERNEL_VERSION >= LINUX(2,6,0)) { ++ if ((flags >> v26_PG_private) & 1) ++ buffers++; ++ } else ++ error(FATAL, ++ "cannot determine whether pages have buffers\n"); + +- if (ld->searchfor) +- break; ++ if (mi->flags != GET_ALL) ++ continue; + +- if (received_SIGINT()) +- restart(0); +- } +- hq_close(); ++ /* FALLTHROUGH */ + +- fprintf(fp, "%spage_cache_size: %ld ", verbose ? "\n" : "", +- page_cache_size); +- if (page_cache_size != total_cached) +- fprintf(fp, "(found %ld)\n", total_cached); +- else +- fprintf(fp, "(verified)\n"); ++ case GET_SLAB_PAGES: ++ if (v22) { ++ if ((flags >> v22_PG_Slab) & 1) ++ slabs++; ++ } else if (vt->PG_slab) { ++ if ((flags >> vt->PG_slab) & 1) ++ slabs++; ++ } else { ++ if ((flags >> v24_PG_slab) & 1) ++ slabs++; ++ } ++ if (mi->flags != GET_ALL) ++ continue; + +- if (CRASHDEBUG(1)) +- fprintf(fp, "heads containing page(s): %d\n", populated); ++ /* FALLTHROUGH */ ++ ++ case GET_SHARED_PAGES: ++ case GET_TOTALRAM_PAGES: ++ if (vt->PG_reserved) ++ PG_reserved_flag = vt->PG_reserved; ++ else ++ PG_reserved_flag = v22 ? ++ 1 << v22_PG_reserved : ++ 1 << v24_PG_reserved; ++ ++ if (flags & PG_reserved_flag) { ++ reserved++; ++ } else { ++ if (count > 1) ++ shared++; ++ } ++ continue; ++ } ++ ++ page_mapping = VALID_MEMBER(page_mapping); ++ ++ if (v22) { ++ inode = ULONG(pcache + OFFSET(page_inode)); ++ offset = ULONG(pcache + OFFSET(page_offset)); ++ } else if (page_mapping) { ++ mapping = ULONG(pcache + ++ OFFSET(page_mapping)); ++ index = ULONG(pcache + OFFSET(page_index)); ++ } ++ ++ page_not_mapped = phys_not_mapped = FALSE; ++ ++ if (v22) { ++ fprintf(fp, "%lx%s%s%s%s%s%8lx %2d%s", ++ pp, ++ space(MINSPACE), ++ mkstring(buf1, MAX(PADDR_PRLEN, ++ strlen("PHYSICAL")), ++ RJUST|LONGLONG_HEX, MKSTR(&phys)), ++ space(MINSPACE), ++ mkstring(buf2, VADDR_PRLEN, ++ RJUST|LONG_HEX, MKSTR(inode)), ++ space(MINSPACE), ++ offset, ++ count, ++ space(MINSPACE)); ++ } else { ++ if ((vt->flags & V_MEM_MAP)) { ++ if (!machdep->verify_paddr(phys)) ++ phys_not_mapped = TRUE; ++ if (!kvtop(NULL, pp, NULL, 0)) ++ page_not_mapped = TRUE; ++ } ++ if (page_not_mapped) ++ fprintf(fp, "%s%s%s%s%s%s%s %2s ", ++ mkstring(buf0, VADDR_PRLEN, ++ LJUST|LONG_HEX, MKSTR(pp)), ++ space(MINSPACE), ++ mkstring(buf1, MAX(PADDR_PRLEN, ++ strlen("PHYSICAL")), ++ RJUST|LONGLONG_HEX, MKSTR(&phys)), ++ space(MINSPACE), ++ mkstring(buf3, VADDR_PRLEN, ++ CENTER|RJUST, " "), ++ space(MINSPACE), ++ mkstring(buf4, 8, CENTER|RJUST, " "), ++ " "); ++ else if (!page_mapping) ++ fprintf(fp, "%s%s%s%s%s%s%s %2d ", ++ mkstring(buf0, VADDR_PRLEN, ++ LJUST|LONG_HEX, MKSTR(pp)), ++ space(MINSPACE), ++ mkstring(buf1, MAX(PADDR_PRLEN, ++ strlen("PHYSICAL")), ++ RJUST|LONGLONG_HEX, MKSTR(&phys)), ++ space(MINSPACE), ++ mkstring(buf3, VADDR_PRLEN, ++ CENTER|RJUST, "-------"), ++ space(MINSPACE), ++ mkstring(buf4, 8, CENTER|RJUST, "-----"), ++ count); ++ else ++ fprintf(fp, "%s%s%s%s%s%s%8ld %2d ", ++ mkstring(buf0, VADDR_PRLEN, ++ LJUST|LONG_HEX, MKSTR(pp)), ++ space(MINSPACE), ++ mkstring(buf1, MAX(PADDR_PRLEN, ++ strlen("PHYSICAL")), ++ RJUST|LONGLONG_HEX, MKSTR(&phys)), ++ space(MINSPACE), ++ mkstring(buf2, VADDR_PRLEN, ++ RJUST|LONG_HEX, MKSTR(mapping)), ++ space(MINSPACE), ++ index, ++ count); ++ } ++ ++ others = 0; ++ ++ if (v22) { ++ if ((flags >> v22_PG_DMA) & 1) ++ fprintf(fp, "%sDMA", ++ others++ ? "," : ""); ++ if ((flags >> v22_PG_locked) & 1) ++ fprintf(fp, "%slocked", ++ others++ ? "," : ""); ++ if ((flags >> v22_PG_error) & 1) ++ fprintf(fp, "%serror", ++ others++ ? "," : ""); ++ if ((flags >> v22_PG_referenced) & 1) ++ fprintf(fp, "%sreferenced", ++ others++ ? "," : ""); ++ if ((flags >> v22_PG_dirty) & 1) ++ fprintf(fp, "%sdirty", ++ others++ ? "," : ""); ++ if ((flags >> v22_PG_uptodate) & 1) ++ fprintf(fp, "%suptodate", ++ others++ ? "," : ""); ++ if ((flags >> v22_PG_free_after) & 1) ++ fprintf(fp, "%sfree_after", ++ others++ ? "," : ""); ++ if ((flags >> v22_PG_decr_after) & 1) ++ fprintf(fp, "%sdecr_after", ++ others++ ? "," : ""); ++ if ((flags >> v22_PG_swap_unlock_after) & 1) ++ fprintf(fp, "%sswap_unlock_after", ++ others++ ? "," : ""); ++ if ((flags >> v22_PG_Slab) & 1) ++ fprintf(fp, "%sslab", ++ others++ ? "," : ""); ++ if ((flags >> v22_PG_swap_cache) & 1) ++ fprintf(fp, "%sswap_cache", ++ others++ ? "," : ""); ++ if ((flags >> v22_PG_skip) & 1) ++ fprintf(fp, "%sskip", ++ others++ ? "," : ""); ++ if ((flags >> v22_PG_reserved) & 1) ++ fprintf(fp, "%sreserved", ++ others++ ? "," : ""); ++ fprintf(fp, "\n"); ++ } else if (THIS_KERNEL_VERSION > LINUX(2,4,9)) { ++ fprintf(fp, "%lx\n", flags); ++ } else { ++ ++ if ((flags >> v24_PG_locked) & 1) ++ fprintf(fp, "%slocked", ++ others++ ? "," : ""); ++ if ((flags >> v24_PG_error) & 1) ++ fprintf(fp, "%serror", ++ others++ ? "," : ""); ++ if ((flags >> v24_PG_referenced) & 1) ++ fprintf(fp, "%sreferenced", ++ others++ ? "," : ""); ++ if ((flags >> v24_PG_uptodate) & 1) ++ fprintf(fp, "%suptodate", ++ others++ ? "," : ""); ++ if ((flags >> v24_PG_dirty) & 1) ++ fprintf(fp, "%sdirty", ++ others++ ? "," : ""); ++ if ((flags >> v24_PG_decr_after) & 1) ++ fprintf(fp, "%sdecr_after", ++ others++ ? "," : ""); ++ if ((flags >> v24_PG_active) & 1) ++ fprintf(fp, "%sactive", ++ others++ ? "," : ""); ++ if ((flags >> v24_PG_inactive_dirty) & 1) ++ fprintf(fp, "%sinactive_dirty", ++ others++ ? "," : ""); ++ if ((flags >> v24_PG_slab) & 1) ++ fprintf(fp, "%sslab", ++ others++ ? "," : ""); ++ if ((flags >> v24_PG_swap_cache) & 1) ++ fprintf(fp, "%sswap_cache", ++ others++ ? "," : ""); ++ if ((flags >> v24_PG_skip) & 1) ++ fprintf(fp, "%sskip", ++ others++ ? "," : ""); ++ if ((flags >> v24_PG_inactive_clean) & 1) ++ fprintf(fp, "%sinactive_clean", ++ others++ ? "," : ""); ++ if ((flags >> v24_PG_highmem) & 1) ++ fprintf(fp, "%shighmem", ++ others++ ? "," : ""); ++ if ((flags >> v24_PG_checked) & 1) ++ fprintf(fp, "%schecked", ++ others++ ? "," : ""); ++ if ((flags >> v24_PG_bigpage) & 1) ++ fprintf(fp, "%sbigpage", ++ others++ ? "," : ""); ++ if ((flags >> v24_PG_arch_1) & 1) ++ fprintf(fp, "%sarch_1", ++ others++ ? "," : ""); ++ if ((flags >> v24_PG_reserved) & 1) ++ fprintf(fp, "%sreserved", ++ others++ ? "," : ""); ++ if (phys_not_mapped) ++ fprintf(fp, "%s[NOT MAPPED]", ++ others++ ? " " : ""); ++ ++ fprintf(fp, "\n"); ++ } ++ ++ if (done) ++ break; ++ } ++ ++ if (done) ++ break; ++ } ++ ++ switch (mi->flags) ++ { ++ case GET_TOTALRAM_PAGES: ++ mi->retval = total_pages - reserved; ++ break; ++ ++ case GET_SHARED_PAGES: ++ mi->retval = shared; ++ break; ++ ++ case GET_BUFFERS_PAGES: ++ mi->retval = buffers; ++ break; ++ ++ case GET_SLAB_PAGES: ++ mi->retval = slabs; ++ break; ++ ++ case GET_ALL: ++ mi->get_totalram = total_pages - reserved; ++ mi->get_shared = shared; ++ mi->get_buffers = buffers; ++ mi->get_slabs = slabs; ++ break; ++ ++ case ADDRESS_SPECIFIED: ++ mi->retval = done; ++ break; ++ } ++ ++ FREEBUF(page_cache); ++} ++ ++/* ++ * Stash a chunk of PGMM_CACHED page structures, starting at addr, into the ++ * passed-in buffer. The mem_map array is normally guaranteed to be ++ * readable except in the case of virtual mem_map usage. When V_MEM_MAP ++ * is in place, read all pages consumed by PGMM_CACHED page structures ++ * that are currently mapped, leaving the unmapped ones just zeroed out. ++ */ ++static void ++fill_mem_map_cache(ulong pp, ulong ppend, char *page_cache) ++{ ++ long size, cnt; ++ ulong addr; ++ char *bufptr; ++ ++ /* ++ * Try to read it in one fell swoop. ++ */ ++ if (readmem(pp, KVADDR, page_cache, SIZE(page) * PGMM_CACHED, ++ "page struct cache", RETURN_ON_ERROR|QUIET)) ++ return; ++ ++ /* ++ * Break it into page-size-or-less requests, warning if it's ++ * not a virtual mem_map. ++ */ ++ size = SIZE(page) * PGMM_CACHED; ++ addr = pp; ++ bufptr = page_cache; ++ ++ while (size > 0) { ++ /* ++ * Compute bytes till end of page. ++ */ ++ cnt = PAGESIZE() - PAGEOFFSET(addr); ++ ++ if (cnt > size) ++ cnt = size; ++ ++ if (!readmem(addr, KVADDR, bufptr, size, ++ "virtual page struct cache", RETURN_ON_ERROR|QUIET)) { ++ BZERO(bufptr, size); ++ if (!(vt->flags & V_MEM_MAP) && ((addr+size) < ppend)) ++ error(WARNING, ++ "mem_map[] from %lx to %lx not accessible\n", ++ addr, addr+size); ++ } ++ ++ addr += cnt; ++ bufptr += cnt; ++ size -= cnt; ++ } ++} ++ ++ ++/* ++ * dump_page_hash_table() displays the entries in each page_hash_table. ++ */ ++ ++#define PGHASH_CACHED (1024) ++ ++static void ++dump_page_hash_table(struct meminfo *hi) ++{ ++ int i; ++ int len, entry_len; ++ ulong page_hash_table, head; ++ struct list_data list_data, *ld; ++ struct gnu_request req; ++ long total_cached; ++ long page_cache_size; ++ ulong this_addr, searchpage; ++ int errflag, found, cnt, populated, verbose; ++ uint ival; ++ ulong buffer_pages; ++ char buf[BUFSIZE]; ++ char hash_table[BUFSIZE]; ++ char *pcache, *pghash_cache; ++ ++ if (!vt->page_hash_table) { ++ if (hi->flags & VERBOSE) ++ error(FATAL, ++ "address_space page cache radix tree not supported\n"); ++ ++ if (symbol_exists("nr_pagecache")) { ++ buffer_pages = nr_blockdev_pages(); ++ get_symbol_data("nr_pagecache", sizeof(int), &ival); ++ page_cache_size = (ulong)ival; ++ page_cache_size -= buffer_pages; ++ fprintf(fp, "page cache size: %ld\n", page_cache_size); ++ if (hi->flags & ADDRESS_SPECIFIED) ++ error(INFO, ++ "address_space page cache radix tree not supported: %lx: ignored\n", ++ hi->spec_addr); ++ } else ++ error(FATAL, "cannot determine page cache size\n"); ++ return; ++ } ++ ++ ld = &list_data; ++ ++ if (hi->spec_addr && (hi->flags & ADDRESS_SPECIFIED)) { ++ verbose = TRUE; ++ searchpage = hi->spec_addr; ++ } else if (hi->flags & VERBOSE) { ++ verbose = TRUE; ++ searchpage = 0; ++ } else { ++ verbose = FALSE; ++ searchpage = 0; ++ } ++ ++ if (vt->page_hash_table_len == 0) ++ error(FATAL, "cannot determine size of page_hash_table\n"); ++ ++ page_hash_table = vt->page_hash_table; ++ len = vt->page_hash_table_len; ++ entry_len = VALID_STRUCT(page_cache_bucket) ? ++ SIZE(page_cache_bucket) : sizeof(void *); ++ ++ if (CRASHDEBUG(1)) { ++ populated = 0; ++ fprintf(fp, "page_hash_table length: %d\n", len); ++ } ++ ++ get_symbol_type("page_cache_size", NULL, &req); ++ if (req.length == sizeof(int)) { ++ get_symbol_data("page_cache_size", sizeof(int), &ival); ++ page_cache_size = (long)ival; ++ } else ++ get_symbol_data("page_cache_size", sizeof(long), ++ &page_cache_size); ++ ++ pghash_cache = GETBUF(sizeof(void *) * PGHASH_CACHED); ++ ++ if (searchpage) ++ open_tmpfile(); ++ ++ hq_open(); ++ for (i = total_cached = 0; i < len; i++, ++ page_hash_table += entry_len) { ++ ++ if ((i % PGHASH_CACHED) == 0) { ++ readmem(page_hash_table, KVADDR, pghash_cache, ++ entry_len * PGHASH_CACHED, ++ "page hash cache", FAULT_ON_ERROR); ++ } ++ ++ pcache = pghash_cache + ((i%PGHASH_CACHED) * entry_len); ++ if (VALID_STRUCT(page_cache_bucket)) ++ pcache += OFFSET(page_cache_bucket_chain); ++ ++ head = ULONG(pcache); ++ ++ if (!head) ++ continue; ++ ++ if (verbose) ++ fprintf(fp, "page_hash_table[%d]\n", i); ++ ++ if (CRASHDEBUG(1)) ++ populated++; ++ ++ BZERO(ld, sizeof(struct list_data)); ++ ld->flags = verbose; ++ ld->start = head; ++ ld->searchfor = searchpage; ++ ld->member_offset = OFFSET(page_next_hash); ++ cnt = do_list(ld); ++ total_cached += cnt; ++ ++ if (ld->searchfor) ++ break; ++ ++ if (received_SIGINT()) ++ restart(0); ++ } ++ hq_close(); ++ ++ fprintf(fp, "%spage_cache_size: %ld ", verbose ? "\n" : "", ++ page_cache_size); ++ if (page_cache_size != total_cached) ++ fprintf(fp, "(found %ld)\n", total_cached); ++ else ++ fprintf(fp, "(verified)\n"); ++ ++ if (CRASHDEBUG(1)) ++ fprintf(fp, "heads containing page(s): %d\n", populated); + + if (searchpage) { + rewind(pc->tmpfile); +@@ -4057,5262 +5008,8014 @@ + if (CRASHDEBUG(1) && STRNEQ(buf, "retval = TRUE; ++ } ++ } ++} ++ ++/* ++ * dump_free_pages() displays basic data about pages currently resident ++ * in the free_area[] memory lists. If the flags contains the VERBOSE ++ * bit, each page slab base address is dumped. If an address is specified ++ * only the free_area[] data containing that page is displayed, along with ++ * the page slab base address. Specified addresses can either be physical ++ * address or page structure pointers. ++ */ ++char *free_area_hdr1 = \ ++ "AREA SIZE FREE_AREA_STRUCT BLOCKS PAGES\n"; ++char *free_area_hdr2 = \ ++ "AREA SIZE FREE_AREA_STRUCT\n"; ++ ++static void ++dump_free_pages(struct meminfo *fi) ++{ ++ int i; ++ int order; ++ ulong free_area; ++ char *free_area_buf; ++ ulong *pp; ++ int nr_mem_lists; ++ struct list_data list_data, *ld; ++ long cnt, total_free, chunk_size; ++ int nr_free_pages; ++ char buf[BUFSIZE]; ++ char last_free[BUFSIZE]; ++ char last_free_hdr[BUFSIZE]; ++ int verbose, errflag, found; ++ physaddr_t searchphys; ++ ulong this_addr; ++ physaddr_t this_phys; ++ int do_search; ++ ulong kfp, offset; ++ int flen, dimension; ++ ++ if (vt->flags & (NODES|ZONES)) ++ error(FATAL, "dump_free_pages called with (NODES|ZONES)\n"); ++ ++ nr_mem_lists = ARRAY_LENGTH(free_area); ++ dimension = ARRAY_LENGTH(free_area_DIMENSION); ++ ++ if (nr_mem_lists == 0) ++ error(FATAL, "cannot determine size/dimensions of free_area\n"); ++ ++ if (dimension) ++ error(FATAL, ++ "dump_free_pages called with multidimensional free area\n"); ++ ++ ld = &list_data; ++ total_free = 0; ++ searchphys = 0; ++ do_search = FALSE; ++ get_symbol_data("nr_free_pages", sizeof(int), &nr_free_pages); ++ ++ switch (fi->flags) ++ { ++ case GET_FREE_HIGHMEM_PAGES: ++ error(FATAL, "GET_FREE_HIGHMEM_PAGES invalid in this kernel\n"); ++ ++ case GET_FREE_PAGES: ++ fi->retval = (ulong)nr_free_pages; ++ return; ++ ++ case ADDRESS_SPECIFIED: ++ switch (fi->memtype) ++ { ++ case KVADDR: ++ if (!page_to_phys(fi->spec_addr, &searchphys)) { ++ if (!kvtop(NULL, fi->spec_addr, &searchphys, 0)) ++ return; ++ } ++ break; ++ case PHYSADDR: ++ searchphys = fi->spec_addr; ++ break; ++ default: ++ error(FATAL, "dump_free_pages: no memtype specified\n"); ++ } ++ do_search = TRUE; ++ break; ++ } ++ ++ verbose = (do_search || (fi->flags & VERBOSE)) ? TRUE : FALSE; ++ ++ free_area_buf = GETBUF(nr_mem_lists * SIZE(free_area_struct)); ++ kfp = free_area = symbol_value("free_area"); ++ flen = MAX(VADDR_PRLEN, strlen("FREE_AREA_STRUCT")); ++ readmem(free_area, KVADDR, free_area_buf, ++ SIZE(free_area_struct) * nr_mem_lists, ++ "free_area_struct", FAULT_ON_ERROR); ++ ++ if (do_search) ++ open_tmpfile(); ++ ++ if (!verbose) ++ fprintf(fp, free_area_hdr1); ++ ++ hq_open(); ++ for (i = 0; i < nr_mem_lists; i++) { ++ pp = (ulong *)(free_area_buf + (SIZE(free_area_struct)*i)); ++ ++ chunk_size = power(2, i); ++ ++ if (verbose) ++ fprintf(fp, free_area_hdr2); ++ ++ fprintf(fp, "%3d ", i); ++ sprintf(buf, "%ldk", (chunk_size * PAGESIZE())/1024); ++ fprintf(fp, "%5s ", buf); ++ ++ fprintf(fp, "%s %s", ++ mkstring(buf, flen, CENTER|LONG_HEX, MKSTR(kfp)), ++ verbose ? "\n" : ""); ++ ++ if (is_page_ptr(*pp, NULL)) { ++ BZERO(ld, sizeof(struct list_data)); ++ ld->flags = verbose; ++ ld->start = *pp; ++ ld->end = free_area; ++ cnt = do_list(ld); ++ total_free += (cnt * chunk_size); ++ } else ++ cnt = 0; ++ ++ if (!verbose) ++ fprintf(fp, "%6ld %6ld\n", cnt, cnt * chunk_size ); ++ ++ free_area += SIZE(free_area_struct); ++ kfp += SIZE(free_area_struct); ++ } ++ hq_close(); ++ ++ fprintf(fp, "\nnr_free_pages: %d ", nr_free_pages); ++ if (total_free != nr_free_pages) ++ fprintf(fp, "(found %ld)\n", total_free); ++ else ++ fprintf(fp, "(verified)\n"); ++ ++ if (!do_search) ++ return; ++ ++ found = FALSE; ++ rewind(pc->tmpfile); ++ order = offset = 0; ++ ++ while (fgets(buf, BUFSIZE, pc->tmpfile)) { ++ if (CRASHDEBUG(1) && STRNEQ(buf, "= this_phys) && ++ (searchphys < (this_phys+chunk_size))) { ++ if (searchphys > this_phys) ++ offset = (searchphys - this_phys)/PAGESIZE(); ++ found = TRUE; ++ break; ++ } ++ } ++ close_tmpfile(); ++ ++ if (found) { ++ order--; ++ ++ fprintf(fp, last_free_hdr); ++ fprintf(fp, last_free); ++ fprintf(fp, "%lx ", this_addr); ++ if (order) { ++ switch (fi->memtype) ++ { ++ case KVADDR: ++ fprintf(fp, "(%lx is ", (ulong)fi->spec_addr); ++ break; ++ case PHYSADDR: ++ fprintf(fp, "(%llx is %s", fi->spec_addr, ++ PAGEOFFSET(fi->spec_addr) ? "in " : ""); ++ break; ++ } ++ fprintf(fp, "%s of %ld pages) ", ++ ordinal(offset+1, buf), power(2, order)); ++ } ++ ++ fi->retval = TRUE; ++ fprintf(fp, "\n"); ++ } ++} ++ ++/* ++ * Dump free pages on kernels with a multi-dimensional free_area array. ++ */ ++char *free_area_hdr5 = \ ++ " AREA SIZE FREE_AREA_STRUCT BLOCKS PAGES\n"; ++char *free_area_hdr6 = \ ++ " AREA SIZE FREE_AREA_STRUCT\n"; ++ ++static void ++dump_multidimensional_free_pages(struct meminfo *fi) ++{ ++ int i, j; ++ struct list_data list_data, *ld; ++ long cnt, total_free; ++ ulong kfp, free_area; ++ physaddr_t searchphys; ++ int flen, errflag, verbose, nr_free_pages; ++ int nr_mem_lists, dimension, order, do_search; ++ ulong sum, found, offset; ++ char *free_area_buf, *p; ++ ulong *pp; ++ long chunk_size; ++ ulong this_addr; ++ physaddr_t this_phys; ++ char buf[BUFSIZE]; ++ char last_area[BUFSIZE]; ++ char last_area_hdr[BUFSIZE]; ++ ++ ++ if (vt->flags & (NODES|ZONES)) ++ error(FATAL, ++ "dump_multidimensional_free_pages called with (NODES|ZONES)\n"); ++ ++ ld = &list_data; ++ if (SIZE(free_area_struct) % sizeof(ulong)) ++ error(FATAL, "free_area_struct not long-word aligned?\n"); ++ ++ total_free = 0; ++ searchphys = 0; ++ do_search = FALSE; ++ get_symbol_data("nr_free_pages", sizeof(int), &nr_free_pages); ++ ++ switch (fi->flags) ++ { ++ case GET_FREE_HIGHMEM_PAGES: ++ error(FATAL, "GET_FREE_HIGHMEM_PAGES invalid in this kernel\n"); ++ ++ case GET_FREE_PAGES: ++ fi->retval = (ulong)nr_free_pages; ++ return; ++ ++ case ADDRESS_SPECIFIED: ++ switch (fi->memtype) ++ { ++ case KVADDR: ++ if (!page_to_phys(fi->spec_addr, &searchphys)) { ++ if (!kvtop(NULL, fi->spec_addr, &searchphys, 0)) ++ return; ++ } ++ break; ++ case PHYSADDR: ++ searchphys = fi->spec_addr; ++ break; ++ default: ++ error(FATAL, ++ "dump_multidimensional_free_pages: no memtype specified\n"); ++ } ++ do_search = TRUE; ++ break; ++ } ++ ++ verbose = (do_search || (fi->flags & VERBOSE)) ? TRUE : FALSE; ++ ++ flen = MAX(VADDR_PRLEN, strlen("FREE_AREA_STRUCT")); ++ nr_mem_lists = ARRAY_LENGTH(free_area); ++ dimension = ARRAY_LENGTH(free_area_DIMENSION); ++ if (!nr_mem_lists || !dimension) ++ error(FATAL, "cannot determine free_area dimensions\n"); ++ free_area_buf = ++ GETBUF((nr_mem_lists * SIZE(free_area_struct)) * dimension); ++ kfp = free_area = symbol_value("free_area"); ++ readmem(free_area, KVADDR, free_area_buf, ++ (SIZE(free_area_struct) * nr_mem_lists) * dimension, ++ "free_area arrays", FAULT_ON_ERROR); ++ ++ if (do_search) ++ open_tmpfile(); ++ ++ hq_open(); ++ for (i = sum = found = 0; i < dimension; i++) { ++ if (!verbose) ++ fprintf(fp, free_area_hdr5); ++ pp = (ulong *)(free_area_buf + ++ ((SIZE(free_area_struct)*nr_mem_lists)*i)); ++ for (j = 0; j < nr_mem_lists; j++) { ++ if (verbose) ++ fprintf(fp, free_area_hdr6); ++ ++ sprintf(buf, "[%d][%d]", i, j); ++ fprintf(fp, "%7s ", buf); ++ ++ chunk_size = power(2, j); ++ ++ sprintf(buf, "%ldk", (chunk_size * PAGESIZE())/1024); ++ fprintf(fp, "%5s ", buf); ++ ++ fprintf(fp, "%s %s", ++ mkstring(buf, flen, CENTER|LONG_HEX, MKSTR(kfp)), ++ verbose ? "\n" : ""); ++ ++ if (is_page_ptr(*pp, NULL)) { ++ BZERO(ld, sizeof(struct list_data)); ++ ld->flags = verbose; ++ ld->start = *pp; ++ ld->end = free_area; ++ cnt = do_list(ld); ++ total_free += (cnt * chunk_size); ++ } else ++ cnt = 0; ++ ++ if (!verbose) ++ fprintf(fp, ++ "%6ld %6ld\n", cnt, cnt * chunk_size ); ++ ++ pp += (SIZE(free_area_struct)/sizeof(ulong)); ++ free_area += SIZE(free_area_struct); ++ kfp += SIZE(free_area_struct); ++ } ++ fprintf(fp, "\n"); ++ } ++ hq_close(); ++ ++ fprintf(fp, "nr_free_pages: %d ", nr_free_pages); ++ if (total_free != nr_free_pages) ++ fprintf(fp, "(found %ld)\n", total_free); ++ else ++ fprintf(fp, "(verified)\n"); ++ ++ if (!do_search) ++ return; ++ ++ found = FALSE; ++ rewind(pc->tmpfile); ++ order = offset = 0; ++ ++ while (fgets(buf, BUFSIZE, pc->tmpfile)) { ++ if (CRASHDEBUG(1) && STRNEQ(buf, "tmpfile); ++ strcpy(last_area, strip_linefeeds(buf)); ++ p = strstr(buf, "k"); ++ *p = NULLCHAR; ++ while (*p != ' ') ++ p--; ++ chunk_size = atol(p+1) * 1024; ++ if (chunk_size == PAGESIZE()) ++ order = 0; ++ else ++ order++; ++ continue; ++ } ++ ++ errflag = 0; ++ this_addr = htol(strip_linefeeds(buf), ++ RETURN_ON_ERROR, &errflag); ++ if (errflag) ++ continue; ++ ++ if (!page_to_phys(this_addr, &this_phys)) ++ continue; ++ ++ if ((searchphys >= this_phys) && ++ (searchphys < (this_phys+chunk_size))) { ++ if (searchphys > this_phys) ++ offset = (searchphys - this_phys)/PAGESIZE(); ++ found = TRUE; ++ break; ++ } ++ ++ } ++ close_tmpfile(); ++ ++ if (found) { ++ fprintf(fp, last_area_hdr); ++ fprintf(fp, "%s\n", last_area); ++ fprintf(fp, "%lx ", this_addr); ++ if (order) { ++ switch (fi->memtype) ++ { ++ case KVADDR: ++ fprintf(fp, "(%lx is ", (ulong)fi->spec_addr); ++ break; ++ case PHYSADDR: ++ fprintf(fp, "(%llx is %s", fi->spec_addr, ++ PAGEOFFSET(fi->spec_addr) ? "in " : ""); ++ break; ++ } ++ fprintf(fp, "%s of %ld pages) ", ++ ordinal(offset+1, buf), power(2, order)); ++ } ++ ++ fi->retval = TRUE; ++ fprintf(fp, "\n"); ++ } ++} ++ ++ ++/* ++ * Dump free pages in newer kernels that have zones. This is a work in ++ * progress, because although the framework for memory nodes has been laid ++ * down, complete support has not been put in place. ++ */ ++static char *zone_hdr = "ZONE NAME SIZE FREE"; ++ ++static void ++dump_free_pages_zones_v1(struct meminfo *fi) ++{ ++ int i, n; ++ ulong node_zones; ++ ulong size; ++ long zone_size_offset; ++ long chunk_size; ++ int order, errflag, do_search; ++ ulong offset, verbose, value, sum, found; ++ ulong this_addr; ++ physaddr_t this_phys, searchphys; ++ ulong zone_mem_map; ++ ulong zone_start_paddr; ++ ulong zone_start_mapnr; ++ struct node_table *nt; ++ char buf[BUFSIZE], *p; ++ char buf1[BUFSIZE]; ++ char buf2[BUFSIZE]; ++ char buf3[BUFSIZE]; ++ char last_node[BUFSIZE]; ++ char last_zone[BUFSIZE]; ++ char last_area[BUFSIZE]; ++ char last_area_hdr[BUFSIZE]; ++ ++ if (!(vt->flags & (NODES|ZONES))) ++ error(FATAL, ++ "dump_free_pages_zones_v1 called without (NODES|ZONES)\n"); ++ ++ if (fi->flags & ADDRESS_SPECIFIED) { ++ switch (fi->memtype) ++ { ++ case KVADDR: ++ if (!page_to_phys(fi->spec_addr, &searchphys)) { ++ if (!kvtop(NULL, fi->spec_addr, &searchphys, 0)) ++ return; ++ } ++ break; ++ case PHYSADDR: ++ searchphys = fi->spec_addr; ++ break; ++ default: ++ error(FATAL, ++ "dump_free_pages_zones_v1: no memtype specified\n"); ++ } ++ do_search = TRUE; ++ } else { ++ searchphys = 0; ++ do_search = FALSE; ++ } ++ verbose = (do_search || (fi->flags & VERBOSE)) ? TRUE : FALSE; ++ ++ if (VALID_MEMBER(zone_struct_size)) ++ zone_size_offset = OFFSET(zone_struct_size); ++ else if (VALID_MEMBER(zone_struct_memsize)) ++ zone_size_offset = OFFSET(zone_struct_memsize); ++ else ++ error(FATAL, ++ "zone_struct has neither size nor memsize field\n"); ++ ++ if (do_search) ++ open_tmpfile(); ++ ++ hq_open(); ++ ++ for (n = sum = found = 0; n < vt->numnodes; n++) { ++ nt = &vt->node_table[n]; ++ node_zones = nt->pgdat + OFFSET(pglist_data_node_zones); ++ ++ for (i = 0; i < vt->nr_zones; i++) { ++ ++ if (fi->flags == GET_FREE_PAGES) { ++ readmem(node_zones+ ++ OFFSET(zone_struct_free_pages), ++ KVADDR, &value, sizeof(ulong), ++ "node_zones free_pages", ++ FAULT_ON_ERROR); ++ sum += value; ++ node_zones += SIZE(zone_struct); ++ continue; ++ } ++ ++ if (fi->flags == GET_FREE_HIGHMEM_PAGES) { ++ if (i == vt->ZONE_HIGHMEM) { ++ readmem(node_zones+ ++ OFFSET(zone_struct_free_pages), ++ KVADDR, &value, sizeof(ulong), ++ "node_zones free_pages", ++ FAULT_ON_ERROR); ++ sum += value; ++ } ++ node_zones += SIZE(zone_struct); ++ continue; ++ } ++ ++ if (fi->flags == GET_ZONE_SIZES) { ++ readmem(node_zones+zone_size_offset, ++ KVADDR, &size, sizeof(ulong), ++ "node_zones {mem}size", FAULT_ON_ERROR); ++ sum += size; ++ node_zones += SIZE(zone_struct); ++ continue; ++ } ++ ++ if ((i == 0) && (vt->flags & NODES)) { ++ if (n) { ++ fprintf(fp, "\n"); ++ pad_line(fp, ++ VADDR_PRLEN > 8 ? 74 : 66, '-'); ++ fprintf(fp, "\n"); ++ } ++ fprintf(fp, "%sNODE\n %2d\n", ++ n ? "\n" : "", nt->node_id); ++ } ++ ++ fprintf(fp, "%s%s %s START_PADDR START_MAPNR\n", ++ i > 0 ? "\n" : "", ++ zone_hdr, ++ mkstring(buf1, VADDR_PRLEN, CENTER|LJUST, ++ "MEM_MAP")); ++ ++ fprintf(fp, "%3d ", i); ++ ++ readmem(node_zones+OFFSET(zone_struct_name), KVADDR, ++ &value, sizeof(void *), ++ "node_zones name", FAULT_ON_ERROR); ++ if (read_string(value, buf, BUFSIZE-1)) ++ fprintf(fp, "%-9s ", buf); ++ else ++ fprintf(fp, "(unknown) "); ++ ++ readmem(node_zones+zone_size_offset, KVADDR, ++ &size, sizeof(ulong), ++ "node_zones {mem}size", FAULT_ON_ERROR); ++ fprintf(fp, "%6ld ", size); ++ ++ readmem(node_zones+OFFSET(zone_struct_free_pages), ++ KVADDR, &value, sizeof(ulong), ++ "node_zones free_pages", FAULT_ON_ERROR); ++ ++ fprintf(fp, "%6ld ", value); ++ ++ readmem(node_zones+OFFSET(zone_struct_zone_start_paddr), ++ KVADDR, &zone_start_paddr, sizeof(ulong), ++ "node_zones zone_start_paddr", FAULT_ON_ERROR); ++ readmem(node_zones+OFFSET(zone_struct_zone_start_mapnr), ++ KVADDR, &zone_start_mapnr, sizeof(ulong), ++ "node_zones zone_start_mapnr", FAULT_ON_ERROR); ++ readmem(node_zones+OFFSET(zone_struct_zone_mem_map), ++ KVADDR, &zone_mem_map, sizeof(ulong), ++ "node_zones zone_mem_map", FAULT_ON_ERROR); ++ ++ fprintf(fp, "%s %s %s\n", ++ mkstring(buf1, VADDR_PRLEN, ++ CENTER|LONG_HEX,MKSTR(zone_mem_map)), ++ mkstring(buf2, strlen("START_PADDR"), ++ CENTER|LONG_HEX|RJUST, ++ MKSTR(zone_start_paddr)), ++ mkstring(buf3, strlen("START_MAPNR"), ++ CENTER|LONG_DEC|RJUST, ++ MKSTR(zone_start_mapnr))); ++ ++ sum += value; ++ ++ if (value) ++ found += dump_zone_free_area(node_zones+ ++ OFFSET(zone_struct_free_area), ++ vt->nr_free_areas, verbose); ++ ++ node_zones += SIZE(zone_struct); ++ } ++ } ++ ++ hq_close(); ++ ++ if (fi->flags & (GET_FREE_PAGES|GET_ZONE_SIZES|GET_FREE_HIGHMEM_PAGES)) { ++ fi->retval = sum; ++ return; ++ } ++ ++ fprintf(fp, "\nnr_free_pages: %ld ", sum); ++ if (sum == found) ++ fprintf(fp, "(verified)\n"); ++ else ++ fprintf(fp, "(found %ld)\n", found); ++ ++ if (!do_search) ++ return; ++ ++ found = FALSE; ++ rewind(pc->tmpfile); ++ order = offset = 0; ++ last_node[0] = NULLCHAR; ++ last_zone[0] = NULLCHAR; ++ last_area[0] = NULLCHAR; ++ last_area_hdr[0] = NULLCHAR; ++ ++ ++ while (fgets(buf, BUFSIZE, pc->tmpfile)) { ++ if (CRASHDEBUG(1) && STRNEQ(buf, "tmpfile); ++ strcpy(last_node, strip_linefeeds(buf)); ++ continue; ++ } ++ if (STRNEQ(buf, "ZONE")) { ++ fgets(buf, BUFSIZE, pc->tmpfile); ++ strcpy(last_zone, strip_linefeeds(buf)); ++ continue; ++ } ++ if (STRNEQ(buf, "AREA")) { ++ strcpy(last_area_hdr, buf); ++ fgets(buf, BUFSIZE, pc->tmpfile); ++ strcpy(last_area, strip_linefeeds(buf)); ++ p = strstr(buf, "k"); ++ *p = NULLCHAR; ++ while (*p != ' ') ++ p--; ++ chunk_size = atol(p+1) * 1024; ++ if (chunk_size == PAGESIZE()) ++ order = 0; ++ else ++ order++; ++ continue; ++ } ++ ++ if (CRASHDEBUG(0) && ++ !hexadecimal(strip_linefeeds(buf), 0)) ++ continue; ++ ++ errflag = 0; ++ this_addr = htol(strip_linefeeds(buf), ++ RETURN_ON_ERROR, &errflag); ++ if (errflag) ++ continue; ++ ++ if (!page_to_phys(this_addr, &this_phys)) ++ continue; ++ ++ if ((searchphys >= this_phys) && ++ (searchphys < (this_phys+chunk_size))) { ++ if (searchphys > this_phys) ++ offset = (searchphys - this_phys)/PAGESIZE(); ++ found = TRUE; ++ break; ++ } ++ ++ } ++ close_tmpfile(); ++ ++ if (found) { ++ if (strlen(last_node)) ++ fprintf(fp, "NODE\n%s\n", last_node); ++ fprintf(fp, "%s %s START_PADDR START_MAPNR\n", ++ zone_hdr, ++ mkstring(buf1, VADDR_PRLEN, CENTER|LJUST, "MEM_MAP")); ++ fprintf(fp, "%s\n", last_zone); ++ fprintf(fp, last_area_hdr); ++ fprintf(fp, "%s\n", last_area); ++ fprintf(fp, "%lx ", this_addr); ++ if (order) { ++ switch (fi->memtype) ++ { ++ case KVADDR: ++ fprintf(fp, "(%lx is ", (ulong)fi->spec_addr); ++ break; ++ case PHYSADDR: ++ fprintf(fp, "(%llx is %s", fi->spec_addr, ++ PAGEOFFSET(fi->spec_addr) ? "in " : ""); ++ break; ++ } ++ fprintf(fp, "%s of %ld pages) ", ++ ordinal(offset+1, buf), power(2, order)); ++ } ++ ++ fi->retval = TRUE; ++ fprintf(fp, "\n"); ++ } ++} ++ ++ ++/* ++ * Same as dump_free_pages_zones_v1(), but updated for numerous 2.6 zone ++ * and free_area related data structure changes. ++ */ ++static void ++dump_free_pages_zones_v2(struct meminfo *fi) ++{ ++ int i, n; ++ ulong node_zones; ++ ulong size; ++ long zone_size_offset; ++ long chunk_size; ++ int order, errflag, do_search; ++ ulong offset, verbose, value, sum, found; ++ ulong this_addr; ++ physaddr_t phys, this_phys, searchphys; ++ ulong pp; ++ ulong zone_mem_map; ++ ulong zone_start_paddr; ++ ulong zone_start_pfn; ++ ulong zone_start_mapnr; ++ struct node_table *nt; ++ char buf[BUFSIZE], *p; ++ char buf1[BUFSIZE]; ++ char buf2[BUFSIZE]; ++ char buf3[BUFSIZE]; ++ char last_node[BUFSIZE]; ++ char last_zone[BUFSIZE]; ++ char last_area[BUFSIZE]; ++ char last_area_hdr[BUFSIZE]; ++ ++ if (!(vt->flags & (NODES|ZONES))) ++ error(FATAL, ++ "dump_free_pages_zones_v2 called without (NODES|ZONES)\n"); ++ ++ if (fi->flags & ADDRESS_SPECIFIED) { ++ switch (fi->memtype) ++ { ++ case KVADDR: ++ if (!page_to_phys(fi->spec_addr, &searchphys)) { ++ if (!kvtop(NULL, fi->spec_addr, &searchphys, 0)) ++ return; ++ } ++ break; ++ case PHYSADDR: ++ searchphys = fi->spec_addr; ++ break; ++ default: ++ error(FATAL, ++ "dump_free_pages_zones_v2: no memtype specified\n"); ++ } ++ do_search = TRUE; ++ } else { ++ searchphys = 0; ++ do_search = FALSE; ++ } ++ ++ verbose = (do_search || (fi->flags & VERBOSE)) ? TRUE : FALSE; ++ ++ if (VALID_MEMBER(zone_spanned_pages)) ++ zone_size_offset = OFFSET(zone_spanned_pages); ++ else ++ error(FATAL, "zone struct has no spanned_pages field\n"); ++ ++ if (do_search) ++ open_tmpfile(); ++ ++ hq_open(); ++ ++ for (n = sum = found = 0; n < vt->numnodes; n++) { ++ nt = &vt->node_table[n]; ++ node_zones = nt->pgdat + OFFSET(pglist_data_node_zones); ++ ++ for (i = 0; i < vt->nr_zones; i++) { ++ if (fi->flags == GET_FREE_PAGES) { ++ readmem(node_zones+ ++ OFFSET(zone_free_pages), ++ KVADDR, &value, sizeof(ulong), ++ "node_zones free_pages", ++ FAULT_ON_ERROR); ++ sum += value; ++ node_zones += SIZE(zone); ++ continue; ++ } ++ ++ if (fi->flags == GET_FREE_HIGHMEM_PAGES) { ++ if (i == vt->ZONE_HIGHMEM) { ++ readmem(node_zones+ ++ OFFSET(zone_free_pages), ++ KVADDR, &value, sizeof(ulong), ++ "node_zones free_pages", ++ FAULT_ON_ERROR); ++ sum += value; ++ } ++ node_zones += SIZE(zone); ++ continue; ++ } ++ ++ if (fi->flags == GET_ZONE_SIZES) { ++ readmem(node_zones+zone_size_offset, ++ KVADDR, &size, sizeof(ulong), ++ "node_zones size", FAULT_ON_ERROR); ++ sum += size; ++ node_zones += SIZE(zone); ++ continue; ++ } ++ ++ if ((i == 0) && ((vt->flags & NODES) || (vt->numnodes > 1))) { ++ if (n) { ++ fprintf(fp, "\n"); ++ pad_line(fp, ++ VADDR_PRLEN > 8 ? 74 : 66, '-'); ++ fprintf(fp, "\n"); ++ } ++ fprintf(fp, "%sNODE\n %2d\n", ++ n ? "\n" : "", nt->node_id); ++ } ++ ++ fprintf(fp, "%s%s %s START_PADDR START_MAPNR\n", ++ i > 0 ? "\n" : "", ++ zone_hdr, ++ mkstring(buf1, VADDR_PRLEN, CENTER|LJUST, ++ "MEM_MAP")); ++ ++ fprintf(fp, "%3d ", i); ++ ++ readmem(node_zones+OFFSET(zone_name), KVADDR, ++ &value, sizeof(void *), ++ "node_zones name", FAULT_ON_ERROR); ++ if (read_string(value, buf, BUFSIZE-1)) ++ fprintf(fp, "%-9s ", buf); ++ else ++ fprintf(fp, "(unknown) "); ++ ++ readmem(node_zones+zone_size_offset, KVADDR, ++ &size, sizeof(ulong), ++ "node_zones size", FAULT_ON_ERROR); ++ fprintf(fp, "%6ld ", size); ++ ++ readmem(node_zones+OFFSET(zone_free_pages), ++ KVADDR, &value, sizeof(ulong), ++ "node_zones free_pages", FAULT_ON_ERROR); ++ ++ fprintf(fp, "%6ld ", value); ++ ++ if (VALID_MEMBER(zone_zone_mem_map)) { ++ readmem(node_zones+OFFSET(zone_zone_mem_map), ++ KVADDR, &zone_mem_map, sizeof(ulong), ++ "node_zones zone_mem_map", FAULT_ON_ERROR); ++ } ++ ++ readmem(node_zones+ OFFSET(zone_zone_start_pfn), ++ KVADDR, &zone_start_pfn, sizeof(ulong), ++ "node_zones zone_start_pfn", FAULT_ON_ERROR); ++ zone_start_paddr = PTOB(zone_start_pfn); ++ ++ if (!VALID_MEMBER(zone_zone_mem_map)) { ++ if (IS_SPARSEMEM() || IS_DISCONTIGMEM()) { ++ zone_mem_map = 0; ++ if (size) { ++ phys = PTOB(zone_start_pfn); ++ if (phys_to_page(phys, &pp)) ++ zone_mem_map = pp; ++ } ++ } else if (vt->flags & FLATMEM) { ++ zone_mem_map = 0; ++ if (size) ++ zone_mem_map = nt->mem_map + ++ (zone_start_pfn * SIZE(page)); ++ } else ++ error(FATAL, "\ncannot determine zone mem_map: TBD\n"); ++ } ++ ++ if (zone_mem_map) ++ zone_start_mapnr = ++ (zone_mem_map - nt->mem_map) / ++ SIZE(page); ++ else ++ zone_start_mapnr = 0; ++ ++ fprintf(fp, "%s %s %s\n", ++ mkstring(buf1, VADDR_PRLEN, ++ CENTER|LONG_HEX,MKSTR(zone_mem_map)), ++ mkstring(buf2, strlen("START_PADDR"), ++ CENTER|LONG_HEX|RJUST, ++ MKSTR(zone_start_paddr)), ++ mkstring(buf3, strlen("START_MAPNR"), ++ CENTER|LONG_DEC|RJUST, ++ MKSTR(zone_start_mapnr))); ++ ++ sum += value; ++ ++ if (value) ++ found += dump_zone_free_area(node_zones+ ++ OFFSET(zone_free_area), ++ vt->nr_free_areas, verbose); ++ ++ node_zones += SIZE(zone); ++ } ++ } ++ ++ hq_close(); ++ ++ if (fi->flags & (GET_FREE_PAGES|GET_ZONE_SIZES|GET_FREE_HIGHMEM_PAGES)) { ++ fi->retval = sum; ++ return; ++ } ++ ++ fprintf(fp, "\nnr_free_pages: %ld ", sum); ++ if (sum == found) ++ fprintf(fp, "(verified)\n"); ++ else ++ fprintf(fp, "(found %ld)\n", found); ++ ++ if (!do_search) ++ return; ++ ++ found = FALSE; ++ rewind(pc->tmpfile); ++ order = offset = 0; ++ last_node[0] = NULLCHAR; ++ last_zone[0] = NULLCHAR; ++ last_area[0] = NULLCHAR; ++ last_area_hdr[0] = NULLCHAR; ++ ++ ++ while (fgets(buf, BUFSIZE, pc->tmpfile)) { ++ if (CRASHDEBUG(1) && STRNEQ(buf, "tmpfile); ++ strcpy(last_node, strip_linefeeds(buf)); ++ continue; ++ } ++ if (STRNEQ(buf, "ZONE")) { ++ fgets(buf, BUFSIZE, pc->tmpfile); ++ strcpy(last_zone, strip_linefeeds(buf)); ++ continue; ++ } ++ if (STRNEQ(buf, "AREA")) { ++ strcpy(last_area_hdr, buf); ++ fgets(buf, BUFSIZE, pc->tmpfile); ++ strcpy(last_area, strip_linefeeds(buf)); ++ p = strstr(buf, "k"); ++ *p = NULLCHAR; ++ while (*p != ' ') ++ p--; ++ chunk_size = atol(p+1) * 1024; ++ if (chunk_size == PAGESIZE()) ++ order = 0; ++ else ++ order++; ++ continue; ++ } ++ ++ if (CRASHDEBUG(0) && ++ !hexadecimal(strip_linefeeds(buf), 0)) ++ continue; ++ ++ errflag = 0; ++ this_addr = htol(strip_linefeeds(buf), ++ RETURN_ON_ERROR, &errflag); ++ if (errflag) ++ continue; ++ ++ if (!page_to_phys(this_addr, &this_phys)) ++ continue; ++ ++ if ((searchphys >= this_phys) && ++ (searchphys < (this_phys+chunk_size))) { ++ if (searchphys > this_phys) ++ offset = (searchphys - this_phys)/PAGESIZE(); ++ found = TRUE; ++ break; ++ } ++ ++ } ++ close_tmpfile(); ++ ++ if (found) { ++ if (strlen(last_node)) ++ fprintf(fp, "NODE\n%s\n", last_node); ++ fprintf(fp, "%s %s START_PADDR START_MAPNR\n", ++ zone_hdr, ++ mkstring(buf1, VADDR_PRLEN, CENTER|LJUST, "MEM_MAP")); ++ fprintf(fp, "%s\n", last_zone); ++ fprintf(fp, last_area_hdr); ++ fprintf(fp, "%s\n", last_area); ++ fprintf(fp, "%lx ", this_addr); ++ if (order) { ++ switch (fi->memtype) ++ { ++ case KVADDR: ++ fprintf(fp, "(%lx is ", (ulong)fi->spec_addr); ++ break; ++ case PHYSADDR: ++ fprintf(fp, "(%llx is %s", fi->spec_addr, ++ PAGEOFFSET(fi->spec_addr) ? "in " : ""); ++ break; ++ } ++ fprintf(fp, "%s of %ld pages)", ++ ordinal(offset+1, buf), chunk_size/PAGESIZE()); ++ } ++ ++ fi->retval = TRUE; ++ fprintf(fp, "\n"); ++ } ++} ++ ++ ++static char * ++page_usage_hdr = "ZONE NAME FREE ACTIVE INACTIVE_DIRTY INACTIVE_CLEAN MIN/LOW/HIGH"; ++ ++/* ++ * Display info about the non-free pages in each zone. ++ */ ++static int ++dump_zone_page_usage(void) ++{ ++ int i, n; ++ ulong value, node_zones; ++ struct node_table *nt; ++ ulong inactive_dirty_pages, inactive_clean_pages, active_pages; ++ ulong free_pages, pages_min, pages_low, pages_high; ++ char namebuf[BUFSIZE]; ++ char buf1[BUFSIZE]; ++ char buf2[BUFSIZE]; ++ char buf3[BUFSIZE]; ++ ++ if (!VALID_MEMBER(zone_struct_inactive_dirty_pages) || ++ !VALID_MEMBER(zone_struct_inactive_clean_pages) || ++ !VALID_MEMBER(zone_struct_active_pages) || ++ !VALID_MEMBER(zone_struct_pages_min) || ++ !VALID_MEMBER(zone_struct_pages_low) || ++ !VALID_MEMBER(zone_struct_pages_high)) ++ return FALSE; ++ ++ fprintf(fp, "\n"); ++ ++ for (n = 0; n < vt->numnodes; n++) { ++ nt = &vt->node_table[n]; ++ node_zones = nt->pgdat + OFFSET(pglist_data_node_zones); ++ ++ if ((i == 0) && (vt->flags & NODES)) { ++ fprintf(fp, "%sNODE\n %2d\n", ++ n ? "\n" : "", nt->node_id); ++ } ++ fprintf(fp, "%s\n", page_usage_hdr); ++ ++ for (i = 0; i < vt->nr_zones; i++) { ++ readmem(node_zones+OFFSET(zone_struct_free_pages), ++ KVADDR, &free_pages, sizeof(ulong), ++ "node_zones free_pages", FAULT_ON_ERROR); ++ readmem(node_zones+ ++ OFFSET(zone_struct_inactive_dirty_pages), ++ KVADDR, &inactive_dirty_pages, sizeof(ulong), ++ "node_zones inactive_dirty_pages", ++ FAULT_ON_ERROR); ++ readmem(node_zones+ ++ OFFSET(zone_struct_inactive_clean_pages), ++ KVADDR, &inactive_clean_pages, sizeof(ulong), ++ "node_zones inactive_clean_pages", ++ FAULT_ON_ERROR); ++ readmem(node_zones+OFFSET(zone_struct_active_pages), ++ KVADDR, &active_pages, sizeof(ulong), ++ "node_zones active_pages", FAULT_ON_ERROR); ++ readmem(node_zones+OFFSET(zone_struct_pages_min), ++ KVADDR, &pages_min, sizeof(ulong), ++ "node_zones pages_min", FAULT_ON_ERROR); ++ readmem(node_zones+OFFSET(zone_struct_pages_low), ++ KVADDR, &pages_low, sizeof(ulong), ++ "node_zones pages_low", FAULT_ON_ERROR); ++ readmem(node_zones+OFFSET(zone_struct_pages_high), ++ KVADDR, &pages_high, sizeof(ulong), ++ "node_zones pages_high", FAULT_ON_ERROR); ++ ++ readmem(node_zones+OFFSET(zone_struct_name), KVADDR, ++ &value, sizeof(void *), ++ "node_zones name", FAULT_ON_ERROR); ++ if (read_string(value, buf1, BUFSIZE-1)) ++ sprintf(namebuf, "%-8s", buf1); ++ else ++ sprintf(namebuf, "(unknown)"); ++ ++ sprintf(buf2, "%ld/%ld/%ld", ++ pages_min, pages_low, pages_high); ++ fprintf(fp, "%3d %s %7ld %7ld %15ld %15ld %s\n", ++ i, ++ namebuf, ++ free_pages, ++ active_pages, ++ inactive_dirty_pages, ++ inactive_clean_pages, ++ mkstring(buf3, strlen("MIN/LOW/HIGH"), ++ CENTER, buf2)); ++ ++ node_zones += SIZE(zone_struct); ++ } ++ } ++ ++ return TRUE; ++} ++ ++ ++/* ++ * Dump the num "order" contents of the zone_t free_area array. ++ */ ++char *free_area_hdr3 = "AREA SIZE FREE_AREA_STRUCT\n"; ++char *free_area_hdr4 = "AREA SIZE FREE_AREA_STRUCT BLOCKS PAGES\n"; ++ ++static int ++dump_zone_free_area(ulong free_area, int num, ulong verbose) ++{ ++ int i, j; ++ long chunk_size; ++ int flen, total_free, cnt; ++ char buf[BUFSIZE]; ++ ulong free_area_buf[3]; ++ char *free_area_buf2; ++ char *free_list_buf; ++ ulong free_list; ++ struct list_data list_data, *ld; ++ int list_count; ++ ulong *free_ptr; ++ ++ if (VALID_STRUCT(free_area_struct)) { ++ if (SIZE(free_area_struct) != (3 * sizeof(ulong))) ++ error(FATAL, ++ "unrecognized free_area_struct size: %ld\n", ++ SIZE(free_area_struct)); ++ list_count = 1; ++ } else if (VALID_STRUCT(free_area)) { ++ if (SIZE(free_area) == (3 * sizeof(ulong))) ++ list_count = 1; ++ else { ++ list_count = MEMBER_SIZE("free_area", ++ "free_list")/SIZE(list_head); ++ free_area_buf2 = GETBUF(SIZE(free_area)); ++ free_list_buf = GETBUF(SIZE(list_head)); ++ readmem(free_area, KVADDR, free_area_buf2, ++ SIZE(free_area), "free_area struct", ++ FAULT_ON_ERROR); ++ } ++ } else error(FATAL, ++ "neither free_area_struct or free_area structures exist\n"); ++ ++ ld = &list_data; ++ ++ if (!verbose) ++ fprintf(fp, free_area_hdr4); ++ ++ total_free = 0; ++ flen = MAX(VADDR_PRLEN, strlen("FREE_AREA_STRUCT")); ++ ++ if (list_count > 1) ++ goto multiple_lists; ++ ++ for (i = 0; i < num; i++, ++ free_area += SIZE_OPTION(free_area_struct, free_area)) { ++ if (verbose) ++ fprintf(fp, free_area_hdr3); ++ fprintf(fp, "%3d ", i); ++ chunk_size = power(2, i); ++ sprintf(buf, "%ldk", (chunk_size * PAGESIZE())/1024); ++ fprintf(fp, " %7s ", buf); ++ ++ readmem(free_area, KVADDR, free_area_buf, ++ sizeof(ulong) * 3, "free_area_struct", FAULT_ON_ERROR); ++ ++ fprintf(fp, "%s ", ++ mkstring(buf, flen, CENTER|LONG_HEX, MKSTR(free_area))); ++ ++ if (free_area_buf[0] == free_area) { ++ if (verbose) ++ fprintf(fp, "\n"); ++ else ++ fprintf(fp, "%6d %6d\n", 0, 0); ++ continue; ++ } ++ ++ if (verbose) ++ fprintf(fp, "\n"); ++ ++ BZERO(ld, sizeof(struct list_data)); ++ ld->flags = verbose | RETURN_ON_DUPLICATE; ++ ld->start = free_area_buf[0]; ++ ld->end = free_area; ++ if (VALID_MEMBER(page_list_next)) ++ ld->list_head_offset = OFFSET(page_list); ++ else if (VALID_MEMBER(page_lru)) ++ ld->list_head_offset = OFFSET(page_lru)+ ++ OFFSET(list_head_next); ++ else error(FATAL, ++ "neither page.list or page.lru exist?\n"); ++ ++ cnt = do_list(ld); ++ if (cnt < 0) ++ error(FATAL, ++ "corrupted free list from free_area_struct: %lx\n", ++ free_area); ++ ++ if (!verbose) ++ fprintf(fp, "%6d %6ld\n", cnt, cnt*chunk_size); ++ ++ total_free += (cnt * chunk_size); ++ } ++ ++ return total_free; ++ ++multiple_lists: ++ ++ for (i = 0; i < num; i++, ++ free_area += SIZE_OPTION(free_area_struct, free_area)) { ++ ++ readmem(free_area, KVADDR, free_area_buf2, ++ SIZE(free_area), "free_area struct", FAULT_ON_ERROR); ++ ++ for (j = 0, free_list = free_area; j < list_count; ++ j++, free_list += SIZE(list_head)) { ++ ++ if (verbose) ++ fprintf(fp, free_area_hdr3); ++ ++ fprintf(fp, "%3d ", i); ++ chunk_size = power(2, i); ++ sprintf(buf, "%ldk", (chunk_size * PAGESIZE())/1024); ++ fprintf(fp, " %7s ", buf); ++ ++ readmem(free_list, KVADDR, free_list_buf, ++ SIZE(list_head), "free_area free_list", ++ FAULT_ON_ERROR); ++ fprintf(fp, "%s ", ++ mkstring(buf, flen, CENTER|LONG_HEX, MKSTR(free_list))); ++ ++ free_ptr = (ulong *)free_list_buf; ++ ++ if (*free_ptr == free_list) { ++ if (verbose) ++ fprintf(fp, "\n"); ++ else ++ fprintf(fp, "%6d %6d\n", 0, 0); ++ continue; ++ } ++ ++ BZERO(ld, sizeof(struct list_data)); ++ ld->flags = verbose | RETURN_ON_DUPLICATE; ++ ld->start = *free_ptr; ++ ld->end = free_list; ++ ld->list_head_offset = OFFSET(page_lru) + ++ OFFSET(list_head_next); ++ ++ cnt = do_list(ld); ++ if (cnt < 0) ++ error(FATAL, ++ "corrupted free list %d from free_area struct: %lx\n", ++ j, free_area); ++ ++ if (!verbose) ++ fprintf(fp, "%6d %6ld\n", cnt, cnt*chunk_size); ++ ++ total_free += (cnt * chunk_size); ++ } ++ } ++ ++ FREEBUF(free_area_buf2); ++ FREEBUF(free_list_buf); ++ return total_free; ++} ++ ++/* ++ * dump_kmeminfo displays basic memory use information typically shown ++ * by /proc/meminfo, and then some... ++ */ ++ ++char *kmeminfo_hdr = " PAGES TOTAL PERCENTAGE\n"; ++ ++static void ++dump_kmeminfo(void) ++{ ++ ulong totalram_pages; ++ ulong freeram_pages; ++ ulong used_pages; ++ ulong shared_pages; ++ ulong buffer_pages; ++ ulong subtract_buffer_pages; ++ ulong totalswap_pages, totalused_pages; ++ ulong totalhigh_pages; ++ ulong freehighmem_pages; ++ ulong totallowmem_pages; ++ ulong freelowmem_pages; ++ long nr_file_pages, nr_slab; ++ ulong swapper_space_nrpages; ++ ulong pct; ++ ulong value1, value2; ++ uint tmp; ++ struct meminfo meminfo; ++ struct gnu_request req; ++ long page_cache_size; ++ ulong get_totalram; ++ ulong get_buffers; ++ ulong get_slabs; ++ struct syment *sp_array[2]; ++ char buf[BUFSIZE]; ++ ++ ++ BZERO(&meminfo, sizeof(struct meminfo)); ++ meminfo.flags = GET_ALL; ++ dump_mem_map(&meminfo); ++ get_totalram = meminfo.get_totalram; ++ shared_pages = meminfo.get_shared; ++ get_buffers = meminfo.get_buffers; ++ get_slabs = meminfo.get_slabs; ++ ++ /* ++ * If vm_stat array exists, override page search info. ++ */ ++ if (vm_stat_init()) { ++ if (dump_vm_stat("NR_SLAB", &nr_slab, 0)) ++ get_slabs = nr_slab; ++ else if (dump_vm_stat("NR_SLAB_RECLAIMABLE", &nr_slab, 0)) { ++ get_slabs = nr_slab; ++ if (dump_vm_stat("NR_SLAB_UNRECLAIMABLE", &nr_slab, 0)) ++ get_slabs += nr_slab; ++ } ++ } ++ ++ fprintf(fp, kmeminfo_hdr); ++ /* ++ * Get total RAM based upon how the various versions of si_meminfo() ++ * have done it, latest to earliest: ++ * ++ * Prior to 2.3.36, count all mem_map pages minus the reserved ones. ++ * From 2.3.36 onwards, use "totalram_pages" if set. ++ */ ++ if (symbol_exists("totalram_pages")) { ++ totalram_pages = vt->totalram_pages ? ++ vt->totalram_pages : get_totalram; ++ } else ++ totalram_pages = get_totalram; ++ ++ fprintf(fp, "%10s %7ld %11s ----\n", "TOTAL MEM", ++ totalram_pages, pages_to_size(totalram_pages, buf)); ++ ++ /* ++ * Get free pages from dump_free_pages() or its associates. ++ * Used pages are a free-bee... ++ */ ++ meminfo.flags = GET_FREE_PAGES; ++ vt->dump_free_pages(&meminfo); ++ freeram_pages = meminfo.retval; ++ pct = (freeram_pages * 100)/totalram_pages; ++ fprintf(fp, "%10s %7ld %11s %3ld%% of TOTAL MEM\n", ++ "FREE", freeram_pages, pages_to_size(freeram_pages, buf), pct); ++ ++ used_pages = totalram_pages - freeram_pages; ++ pct = (used_pages * 100)/totalram_pages; ++ fprintf(fp, "%10s %7ld %11s %3ld%% of TOTAL MEM\n", ++ "USED", used_pages, pages_to_size(used_pages, buf), pct); ++ ++ /* ++ * Get shared pages from dump_mem_map(). Note that this is done ++ * differently than the kernel -- it just tallies the non-reserved ++ * pages that have a count of greater than 1. ++ */ ++ pct = (shared_pages * 100)/totalram_pages; ++ fprintf(fp, "%10s %7ld %11s %3ld%% of TOTAL MEM\n", ++ "SHARED", shared_pages, pages_to_size(shared_pages, buf), pct); ++ ++ subtract_buffer_pages = 0; ++ if (symbol_exists("buffermem_pages")) { ++ get_symbol_data("buffermem_pages", sizeof(int), &tmp); ++ buffer_pages = (ulong)tmp; ++ } else if (symbol_exists("buffermem")) { ++ get_symbol_data("buffermem", sizeof(int), &tmp); ++ buffer_pages = BTOP(tmp); ++ } else if ((THIS_KERNEL_VERSION >= LINUX(2,6,0)) && ++ symbol_exists("nr_blockdev_pages")) { ++ subtract_buffer_pages = buffer_pages = nr_blockdev_pages(); ++ } else ++ buffer_pages = 0; ++ ++ pct = (buffer_pages * 100)/totalram_pages; ++ fprintf(fp, "%10s %7ld %11s %3ld%% of TOTAL MEM\n", ++ "BUFFERS", buffer_pages, pages_to_size(buffer_pages, buf), pct); ++ ++ if (CRASHDEBUG(1)) ++ error(NOTE, "pages with buffers: %ld\n", get_buffers); ++ ++ /* ++ * page_cache_size has evolved from a long to an atomic_t to ++ * not existing at all. ++ */ ++ ++ if (symbol_exists("page_cache_size")) { ++ get_symbol_type("page_cache_size", NULL, &req); ++ if (req.length == sizeof(int)) { ++ get_symbol_data("page_cache_size", sizeof(int), &tmp); ++ page_cache_size = (long)tmp; ++ } else ++ get_symbol_data("page_cache_size", sizeof(long), ++ &page_cache_size); ++ page_cache_size -= subtract_buffer_pages; ++ } else if (symbol_exists("nr_pagecache")) { ++ get_symbol_data("nr_pagecache", sizeof(int), &tmp); ++ page_cache_size = (long)tmp; ++ page_cache_size -= subtract_buffer_pages; ++ } else if (dump_vm_stat("NR_FILE_PAGES", &nr_file_pages, 0)) { ++ char *swapper_space = GETBUF(SIZE(address_space)); ++ ++ if (!readmem(symbol_value("swapper_space"), KVADDR, swapper_space, ++ SIZE(address_space), "swapper_space", RETURN_ON_ERROR)) ++ swapper_space_nrpages = 0; ++ else ++ swapper_space_nrpages = ULONG(swapper_space + ++ OFFSET(address_space_nrpages)); ++ ++ page_cache_size = nr_file_pages - swapper_space_nrpages - ++ buffer_pages; ++ FREEBUF(swapper_space); ++ } ++ ++ ++ pct = (page_cache_size * 100)/totalram_pages; ++ fprintf(fp, "%10s %7ld %11s %3ld%% of TOTAL MEM\n", ++ "CACHED", page_cache_size, ++ pages_to_size(page_cache_size, buf), pct); ++ ++ /* ++ * Although /proc/meminfo doesn't show it, show how much memory ++ * the slabs take up. ++ */ ++ ++ pct = (get_slabs * 100)/totalram_pages; ++ fprintf(fp, "%10s %7ld %11s %3ld%% of TOTAL MEM\n", ++ "SLAB", get_slabs, pages_to_size(get_slabs, buf), pct); ++ ++ if (symbol_exists("totalhigh_pages")) { ++ switch (get_syment_array("totalhigh_pages", sp_array, 2)) ++ { ++ case 1: ++ get_symbol_data("totalhigh_pages", sizeof(ulong), ++ &totalhigh_pages); ++ break; ++ case 2: ++ if (!(readmem(sp_array[0]->value, KVADDR, ++ &value1, sizeof(ulong), ++ "totalhigh_pages #1", RETURN_ON_ERROR))) ++ break; ++ if (!(readmem(sp_array[1]->value, KVADDR, ++ &value2, sizeof(ulong), ++ "totalhigh_pages #2", RETURN_ON_ERROR))) ++ break; ++ totalhigh_pages = MAX(value1, value2); ++ break; ++ } ++ ++ pct = totalhigh_pages ? ++ (totalhigh_pages * 100)/totalram_pages : 0; ++ fprintf(fp, "\n%10s %7ld %11s %3ld%% of TOTAL MEM\n", ++ "TOTAL HIGH", totalhigh_pages, ++ pages_to_size(totalhigh_pages, buf), pct); ++ ++ meminfo.flags = GET_FREE_HIGHMEM_PAGES; ++ vt->dump_free_pages(&meminfo); ++ freehighmem_pages = meminfo.retval; ++ pct = freehighmem_pages ? ++ (freehighmem_pages * 100)/totalhigh_pages : 0; ++ fprintf(fp, "%10s %7ld %11s %3ld%% of TOTAL HIGH\n", ++ "FREE HIGH", freehighmem_pages, ++ pages_to_size(freehighmem_pages, buf), pct); ++ ++ totallowmem_pages = totalram_pages - totalhigh_pages; ++ pct = (totallowmem_pages * 100)/totalram_pages; ++ fprintf(fp, "%10s %7ld %11s %3ld%% of TOTAL MEM\n", ++ "TOTAL LOW", totallowmem_pages, ++ pages_to_size(totallowmem_pages, buf), pct); ++ ++ freelowmem_pages = freeram_pages - freehighmem_pages; ++ pct = (freelowmem_pages * 100)/totallowmem_pages; ++ fprintf(fp, "%10s %7ld %11s %3ld%% of TOTAL LOW\n", ++ "FREE LOW", freelowmem_pages, ++ pages_to_size(freelowmem_pages, buf), pct); ++ } ++ ++ /* ++ * get swap data from dump_swap_info(). ++ */ ++ fprintf(fp, "\n"); ++ if (dump_swap_info(RETURN_ON_ERROR, &totalswap_pages, ++ &totalused_pages)) { ++ fprintf(fp, "%10s %7ld %11s ----\n", ++ "TOTAL SWAP", totalswap_pages, ++ pages_to_size(totalswap_pages, buf)); ++ pct = totalswap_pages ? (totalused_pages * 100) / ++ totalswap_pages : 100; ++ fprintf(fp, "%10s %7ld %11s %3ld%% of TOTAL SWAP\n", ++ "SWAP USED", totalused_pages, ++ pages_to_size(totalused_pages, buf), pct); ++ pct = totalswap_pages ? ((totalswap_pages - totalused_pages) * ++ 100) / totalswap_pages : 0; ++ fprintf(fp, "%10s %7ld %11s %3ld%% of TOTAL SWAP\n", ++ "SWAP FREE", ++ totalswap_pages - totalused_pages, ++ pages_to_size(totalswap_pages - totalused_pages, buf), ++ pct); ++ } else ++ error(INFO, "swap_info[%ld].swap_map at %lx is unaccessible\n", ++ totalused_pages, totalswap_pages); ++ ++ dump_zone_page_usage(); ++} ++ ++/* ++ * Emulate 2.6 nr_blockdev_pages() function. ++ */ ++static ulong ++nr_blockdev_pages(void) ++{ ++ struct list_data list_data, *ld; ++ ulong *bdevlist; ++ int i, bdevcnt; ++ ulong inode, address_space; ++ ulong nrpages; ++ char *block_device_buf, *inode_buf, *address_space_buf; ++ ++ ld = &list_data; ++ BZERO(ld, sizeof(struct list_data)); ++ get_symbol_data("all_bdevs", sizeof(void *), &ld->start); ++ if (empty_list(ld->start)) ++ return 0; ++ ld->end = symbol_value("all_bdevs"); ++ ld->list_head_offset = OFFSET(block_device_bd_list); ++ ++ block_device_buf = GETBUF(SIZE(block_device)); ++ inode_buf = GETBUF(SIZE(inode)); ++ address_space_buf = GETBUF(SIZE(address_space)); ++ ++ hq_open(); ++ bdevcnt = do_list(ld); ++ bdevlist = (ulong *)GETBUF(bdevcnt * sizeof(ulong)); ++ bdevcnt = retrieve_list(bdevlist, bdevcnt); ++ hq_close(); ++ ++ /* ++ * go through the block_device list, emulating: ++ * ++ * ret += bdev->bd_inode->i_mapping->nrpages; ++ */ ++ for (i = nrpages = 0; i < bdevcnt; i++) { ++ readmem(bdevlist[i], KVADDR, block_device_buf, ++ SIZE(block_device), "block_device buffer", ++ FAULT_ON_ERROR); ++ inode = ULONG(block_device_buf + OFFSET(block_device_bd_inode)); ++ readmem(inode, KVADDR, inode_buf, SIZE(inode), "inode buffer", ++ FAULT_ON_ERROR); ++ address_space = ULONG(inode_buf + OFFSET(inode_i_mapping)); ++ readmem(address_space, KVADDR, address_space_buf, ++ SIZE(address_space), "address_space buffer", ++ FAULT_ON_ERROR); ++ nrpages += ULONG(address_space_buf + ++ OFFSET(address_space_nrpages)); ++ } ++ ++ FREEBUF(bdevlist); ++ FREEBUF(block_device_buf); ++ FREEBUF(inode_buf); ++ FREEBUF(address_space_buf); ++ ++ return nrpages; ++} ++ ++/* ++ * dump_vmlist() displays information from the vmlist. ++ */ ++ ++static void ++dump_vmlist(struct meminfo *vi) ++{ ++ char buf[BUFSIZE]; ++ char buf1[BUFSIZE]; ++ char buf2[BUFSIZE]; ++ ulong vmlist; ++ ulong addr, size, next, pcheck, count, verified; ++ physaddr_t paddr; ++ ++ get_symbol_data("vmlist", sizeof(void *), &vmlist); ++ next = vmlist; ++ count = verified = 0; ++ ++ while (next) { ++ if (!(pc->curcmd_flags & HEADER_PRINTED) && (next == vmlist) && ++ !(vi->flags & (GET_HIGHEST|GET_PHYS_TO_VMALLOC| ++ GET_VMLIST_COUNT|GET_VMLIST|VMLIST_VERIFY))) { ++ fprintf(fp, "%s ", ++ mkstring(buf, MAX(strlen("VM_STRUCT"), VADDR_PRLEN), ++ CENTER|LJUST, "VM_STRUCT")); ++ fprintf(fp, "%s SIZE\n", ++ mkstring(buf, (VADDR_PRLEN * 2) + strlen(" - "), ++ CENTER|LJUST, "ADDRESS RANGE")); ++ pc->curcmd_flags |= HEADER_PRINTED; ++ } ++ ++ readmem(next+OFFSET(vm_struct_addr), KVADDR, ++ &addr, sizeof(void *), ++ "vmlist addr", FAULT_ON_ERROR); ++ readmem(next+OFFSET(vm_struct_size), KVADDR, ++ &size, sizeof(ulong), ++ "vmlist size", FAULT_ON_ERROR); ++ ++ if (vi->flags & (GET_VMLIST_COUNT|GET_VMLIST)) { ++ /* ++ * Preceding GET_VMLIST_COUNT set vi->retval. ++ */ ++ if (vi->flags & GET_VMLIST) { ++ if (count < vi->retval) { ++ vi->vmlist[count].addr = addr; ++ vi->vmlist[count].size = size; ++ } ++ } ++ count++; ++ goto next_entry; ++ } ++ ++ if (!(vi->flags & ADDRESS_SPECIFIED) || ++ ((vi->memtype == KVADDR) && ++ ((vi->spec_addr >= addr) && (vi->spec_addr < (addr+size))))) { ++ if (vi->flags & VMLIST_VERIFY) { ++ verified++; ++ break; ++ } ++ fprintf(fp, "%s%s %s - %s %6ld\n", ++ mkstring(buf,VADDR_PRLEN, LONG_HEX|CENTER|LJUST, ++ MKSTR(next)), space(MINSPACE-1), ++ mkstring(buf1, VADDR_PRLEN, LONG_HEX|RJUST, ++ MKSTR(addr)), ++ mkstring(buf2, VADDR_PRLEN, LONG_HEX|LJUST, ++ MKSTR(addr+size)), ++ size); ++ } ++ ++ if ((vi->flags & ADDRESS_SPECIFIED) && ++ (vi->memtype == PHYSADDR)) { ++ for (pcheck = addr; pcheck < (addr+size); ++ pcheck += PAGESIZE()) { ++ if (!kvtop(NULL, pcheck, &paddr, 0)) ++ continue; ++ if ((vi->spec_addr >= paddr) && ++ (vi->spec_addr < (paddr+PAGESIZE()))) { ++ if (vi->flags & GET_PHYS_TO_VMALLOC) { ++ vi->retval = pcheck + ++ PAGEOFFSET(paddr); ++ return; ++ } else ++ fprintf(fp, ++ "%s%s %s - %s %6ld\n", ++ mkstring(buf, VADDR_PRLEN, ++ LONG_HEX|CENTER|LJUST, ++ MKSTR(next)), space(MINSPACE-1), ++ mkstring(buf1, VADDR_PRLEN, ++ LONG_HEX|RJUST, MKSTR(addr)), ++ mkstring(buf2, VADDR_PRLEN, ++ LONG_HEX|LJUST, ++ MKSTR(addr+size)), size); ++ break; ++ } ++ } ++ ++ } ++next_entry: ++ readmem(next+OFFSET(vm_struct_next), ++ KVADDR, &next, sizeof(void *), ++ "vmlist next", FAULT_ON_ERROR); ++ } ++ ++ if (vi->flags & GET_HIGHEST) ++ vi->retval = addr+size; ++ ++ if (vi->flags & GET_VMLIST_COUNT) ++ vi->retval = count; ++ ++ if (vi->flags & VMLIST_VERIFY) ++ vi->retval = verified; ++} ++ ++/* ++ * dump_page_lists() displays information from the active_list, ++ * inactive_dirty_list and inactive_clean_list from each zone. ++ */ ++static int ++dump_page_lists(struct meminfo *mi) ++{ ++ int i, c, n, retval; ++ ulong node_zones, pgdat; ++ struct node_table *nt; ++ struct list_data list_data, *ld; ++ char buf[BUFSIZE]; ++ ulong value; ++ ulong inactive_clean_pages, inactive_clean_list; ++ int nr_active_pages, nr_inactive_pages; ++ int nr_inactive_dirty_pages; ++ ++ ld = &list_data; ++ ++ retval = FALSE; ++ nr_active_pages = nr_inactive_dirty_pages = -1; ++ ++ BZERO(ld, sizeof(struct list_data)); ++ ld->list_head_offset = OFFSET(page_lru); ++ if (mi->flags & ADDRESS_SPECIFIED) ++ ld->searchfor = mi->spec_addr; ++ else if (mi->flags & VERBOSE) ++ ld->flags |= VERBOSE; ++ ++ if (mi->flags & GET_ACTIVE_LIST) { ++ if (!symbol_exists("active_list")) ++ error(FATAL, ++ "active_list does not exist in this kernel\n"); ++ ++ if (symbol_exists("nr_active_pages")) ++ get_symbol_data("nr_active_pages", sizeof(int), ++ &nr_active_pages); ++ else ++ error(FATAL, ++ "nr_active_pages does not exist in this kernel\n"); ++ ++ ld->end = symbol_value("active_list"); ++ readmem(ld->end, KVADDR, &ld->start, sizeof(void *), ++ "LIST_HEAD contents", FAULT_ON_ERROR); ++ ++ if (mi->flags & VERBOSE) ++ fprintf(fp, "active_list:\n"); ++ ++ if (ld->start == ld->end) { ++ c = 0; ++ ld->searchfor = 0; ++ if (mi->flags & VERBOSE) ++ fprintf(fp, "(empty)\n"); ++ } else { ++ hq_open(); ++ c = do_list(ld); ++ hq_close(); ++ } ++ ++ if ((mi->flags & ADDRESS_SPECIFIED) && ld->searchfor) { ++ fprintf(fp, "%lx\n", ld->searchfor); ++ retval = TRUE; ++ } else { ++ fprintf(fp, "%snr_active_pages: %d ", ++ mi->flags & VERBOSE ? "\n" : "", ++ nr_active_pages); ++ if (c != nr_active_pages) ++ fprintf(fp, "(found %d)\n", c); ++ else ++ fprintf(fp, "(verified)\n"); ++ } ++ } ++ ++ if (mi->flags & GET_INACTIVE_LIST) { ++ if (!symbol_exists("inactive_list")) ++ error(FATAL, ++ "inactive_list does not exist in this kernel\n"); ++ ++ if (symbol_exists("nr_inactive_pages")) ++ get_symbol_data("nr_inactive_pages", sizeof(int), ++ &nr_inactive_pages); ++ else ++ error(FATAL, ++ "nr_active_pages does not exist in this kernel\n"); ++ ++ ld->end = symbol_value("inactive_list"); ++ readmem(ld->end, KVADDR, &ld->start, sizeof(void *), ++ "LIST_HEAD contents", FAULT_ON_ERROR); ++ ++ if (mi->flags & VERBOSE) ++ fprintf(fp, "inactive_list:\n"); ++ ++ if (ld->start == ld->end) { ++ c = 0; ++ ld->searchfor = 0; ++ if (mi->flags & VERBOSE) ++ fprintf(fp, "(empty)\n"); ++ } else { ++ hq_open(); ++ c = do_list(ld); ++ hq_close(); ++ } ++ ++ if ((mi->flags & ADDRESS_SPECIFIED) && ld->searchfor) { ++ fprintf(fp, "%lx\n", ld->searchfor); ++ retval = TRUE; ++ } else { ++ fprintf(fp, "%snr_inactive_pages: %d ", ++ mi->flags & VERBOSE ? "\n" : "", ++ nr_inactive_pages); ++ if (c != nr_inactive_pages) ++ fprintf(fp, "(found %d)\n", c); ++ else ++ fprintf(fp, "(verified)\n"); ++ } ++ } ++ ++ if (mi->flags & GET_INACTIVE_DIRTY) { ++ if (!symbol_exists("inactive_dirty_list")) ++ error(FATAL, ++ "inactive_dirty_list does not exist in this kernel\n"); ++ ++ if (symbol_exists("nr_inactive_dirty_pages")) ++ get_symbol_data("nr_inactive_dirty_pages", sizeof(int), ++ &nr_inactive_dirty_pages); ++ else ++ error(FATAL, ++ "nr_inactive_dirty_pages does not exist in this kernel\n"); ++ ++ ld->end = symbol_value("inactive_dirty_list"); ++ readmem(ld->end, KVADDR, &ld->start, sizeof(void *), ++ "LIST_HEAD contents", FAULT_ON_ERROR); ++ ++ if (mi->flags & VERBOSE) ++ fprintf(fp, "%sinactive_dirty_list:\n", ++ mi->flags & GET_ACTIVE_LIST ? "\n" : ""); ++ ++ if (ld->start == ld->end) { ++ c = 0; ++ ld->searchfor = 0; ++ if (mi->flags & VERBOSE) ++ fprintf(fp, "(empty)\n"); ++ } else { ++ hq_open(); ++ c = do_list(ld); ++ hq_close(); ++ } ++ ++ if ((mi->flags & ADDRESS_SPECIFIED) && ld->searchfor) { ++ fprintf(fp, "%lx\n", ld->searchfor); ++ retval = TRUE; ++ } else { ++ fprintf(fp, "%snr_inactive_dirty_pages: %d ", ++ mi->flags & VERBOSE ? "\n" : "", ++ nr_inactive_dirty_pages); ++ if (c != nr_inactive_dirty_pages) ++ fprintf(fp, "(found %d)\n", c); ++ else ++ fprintf(fp, "(verified)\n"); ++ } ++ } ++ ++ if (mi->flags & GET_INACTIVE_CLEAN) { ++ if (INVALID_MEMBER(zone_struct_inactive_clean_list)) ++ error(FATAL, ++ "inactive_clean_list(s) do not exist in this kernel\n"); ++ ++ get_symbol_data("pgdat_list", sizeof(void *), &pgdat); ++ ++ if ((mi->flags & VERBOSE) && ++ (mi->flags & (GET_ACTIVE_LIST|GET_INACTIVE_DIRTY))) ++ fprintf(fp, "\n"); ++ ++ for (n = 0; pgdat; n++) { ++ nt = &vt->node_table[n]; ++ ++ node_zones = nt->pgdat + OFFSET(pglist_data_node_zones); ++ ++ for (i = 0; i < vt->nr_zones; i++) { ++ readmem(node_zones+OFFSET(zone_struct_name), ++ KVADDR, &value, sizeof(void *), ++ "zone_struct name", FAULT_ON_ERROR); ++ if (!read_string(value, buf, BUFSIZE-1)) ++ sprintf(buf, "(unknown) "); ++ ++ if (mi->flags & VERBOSE) { ++ if (vt->numnodes > 1) ++ fprintf(fp, "NODE %d ", n); ++ fprintf(fp, ++ "\"%s\" inactive_clean_list:\n", ++ buf); ++ } ++ ++ readmem(node_zones + ++ OFFSET(zone_struct_inactive_clean_pages), ++ KVADDR, &inactive_clean_pages, ++ sizeof(ulong), "inactive_clean_pages", ++ FAULT_ON_ERROR); ++ ++ readmem(node_zones + ++ OFFSET(zone_struct_inactive_clean_list), ++ KVADDR, &inactive_clean_list, ++ sizeof(ulong), "inactive_clean_list", ++ FAULT_ON_ERROR); ++ ++ ld->start = inactive_clean_list; ++ ld->end = node_zones + ++ OFFSET(zone_struct_inactive_clean_list); ++ if (mi->flags & ADDRESS_SPECIFIED) ++ ld->searchfor = mi->spec_addr; ++ ++ if (ld->start == ld->end) { ++ c = 0; ++ ld->searchfor = 0; ++ if (mi->flags & VERBOSE) ++ fprintf(fp, "(empty)\n"); ++ } else { ++ hq_open(); ++ c = do_list(ld); ++ hq_close(); ++ } ++ ++ if ((mi->flags & ADDRESS_SPECIFIED) && ++ ld->searchfor) { ++ fprintf(fp, "%lx\n", ld->searchfor); ++ retval = TRUE; ++ } else { ++ if (vt->numnodes > 1) ++ fprintf(fp, "NODE %d ", n); ++ fprintf(fp, "\"%s\" ", buf); ++ fprintf(fp, ++ "inactive_clean_pages: %ld ", ++ inactive_clean_pages); ++ if (c != inactive_clean_pages) ++ fprintf(fp, "(found %d)\n", c); ++ else ++ fprintf(fp, "(verified)\n"); ++ } ++ ++ node_zones += SIZE(zone_struct); + } +- if (strstr(buf, "page_cache_size")) +- continue; + +- if (CRASHDEBUG(1) && +- !hexadecimal(strip_linefeeds(buf), 0)) +- continue; ++ readmem(pgdat + OFFSET_OPTION(pglist_data_node_next, ++ pglist_data_pgdat_next), KVADDR, ++ &pgdat, sizeof(void *), "pglist_data node_next", ++ FAULT_ON_ERROR); ++ } ++ } + +- this_addr = htol(strip_linefeeds(buf), +- RETURN_ON_ERROR, &errflag); ++ return retval; ++} ++ ++ ++ ++/* ++ * Check whether an address is a kmem_cache_t address, and if so, return ++ * a pointer to the static buffer containing its name string. Otherwise ++ * return NULL on failure. ++ */ ++ ++#define PERCPU_NOT_SUPPORTED "per-cpu slab format not supported yet\n" ++ ++static char * ++is_kmem_cache_addr(ulong vaddr, char *kbuf) ++{ ++ ulong cache, cache_cache, name; ++ long next_offset, name_offset; ++ char *cache_buf; ++ ++ if (vt->flags & KMEM_CACHE_UNAVAIL) { ++ error(INFO, "kmem cache slab subsystem not available\n"); ++ return NULL; ++ } ++ ++ if (vt->flags & KMALLOC_SLUB) ++ return is_kmem_cache_addr_slub(vaddr, kbuf); ++ ++ name_offset = vt->flags & (PERCPU_KMALLOC_V1|PERCPU_KMALLOC_V2) ? ++ OFFSET(kmem_cache_s_name) : OFFSET(kmem_cache_s_c_name); ++ next_offset = vt->flags & (PERCPU_KMALLOC_V1|PERCPU_KMALLOC_V2) ? ++ OFFSET(kmem_cache_s_next) : OFFSET(kmem_cache_s_c_nextp); ++ ++ cache = cache_cache = symbol_value("cache_cache"); ++ ++ cache_buf = GETBUF(SIZE(kmem_cache_s)); ++ ++ do { ++ readmem(cache, KVADDR, cache_buf, SIZE(kmem_cache_s), ++ "kmem_cache_s buffer", FAULT_ON_ERROR); ++ ++ if (cache == vaddr) { ++ if (vt->kmem_cache_namelen) { ++ BCOPY(cache_buf+name_offset, kbuf, ++ vt->kmem_cache_namelen); ++ } else { ++ name = ULONG(cache_buf + name_offset); ++ if (!read_string(name, kbuf, BUFSIZE-1)) { ++ if (vt->flags & ++ (PERCPU_KMALLOC_V1|PERCPU_KMALLOC_V2)) ++ error(WARNING, ++ "cannot read kmem_cache_s.name string at %lx\n", ++ name); ++ else ++ error(WARNING, ++ "cannot read kmem_cache_s.c_name string at %lx\n", ++ name); ++ sprintf(kbuf, "(unknown)"); ++ } ++ } ++ FREEBUF(cache_buf); ++ return kbuf; ++ } ++ ++ cache = ULONG(cache_buf + next_offset); ++ ++ if (vt->flags & (PERCPU_KMALLOC_V1|PERCPU_KMALLOC_V2)) ++ cache -= next_offset; ++ ++ } while (cache != cache_cache); ++ ++ FREEBUF(cache_buf); ++ return NULL; ++} ++ ++/* ++ * Note same functionality as above, but instead it just ++ * dumps all slab cache names and their addresses. ++ */ ++static void ++kmem_cache_list(void) ++{ ++ ulong cache, cache_cache, name; ++ long next_offset, name_offset; ++ char *cache_buf; ++ char buf[BUFSIZE]; ++ ++ if (vt->flags & KMEM_CACHE_UNAVAIL) { ++ error(INFO, "kmem cache slab subsystem not available\n"); ++ return; ++ } ++ ++ if (vt->flags & KMALLOC_SLUB) { ++ kmem_cache_list_slub(); ++ return; ++ } ++ ++ name_offset = vt->flags & (PERCPU_KMALLOC_V1|PERCPU_KMALLOC_V2) ? ++ OFFSET(kmem_cache_s_name) : OFFSET(kmem_cache_s_c_name); ++ next_offset = vt->flags & (PERCPU_KMALLOC_V1|PERCPU_KMALLOC_V2) ? ++ OFFSET(kmem_cache_s_next) : OFFSET(kmem_cache_s_c_nextp); ++ ++ cache = cache_cache = symbol_value("cache_cache"); ++ ++ cache_buf = GETBUF(SIZE(kmem_cache_s)); ++ ++ do { ++ readmem(cache, KVADDR, cache_buf, SIZE(kmem_cache_s), ++ "kmem_cache_s buffer", FAULT_ON_ERROR); ++ ++ if (vt->kmem_cache_namelen) { ++ BCOPY(cache_buf+name_offset, buf, ++ vt->kmem_cache_namelen); ++ } else { ++ name = ULONG(cache_buf + name_offset); ++ if (!read_string(name, buf, BUFSIZE-1)) { ++ if (vt->flags & ++ (PERCPU_KMALLOC_V1|PERCPU_KMALLOC_V2)) ++ error(WARNING, ++ "cannot read kmem_cache_s.name string at %lx\n", ++ name); ++ else ++ error(WARNING, ++ "cannot read kmem_cache_s.c_name string at %lx\n", ++ name); ++ sprintf(buf, "(unknown)"); ++ } ++ } ++ ++ fprintf(fp, "%lx %s\n", cache, buf); ++ ++ cache = ULONG(cache_buf + next_offset); ++ ++ if (vt->flags & (PERCPU_KMALLOC_V1|PERCPU_KMALLOC_V2)) ++ cache -= next_offset; ++ ++ } while (cache != cache_cache); ++ ++ FREEBUF(cache_buf); ++} ++ ++/* ++ * Translate an address to its physical page number, verify that the ++ * page in fact belongs to the slab subsystem, and if so, return the ++ * name of the cache to which it belongs. ++ */ ++static char * ++vaddr_to_kmem_cache(ulong vaddr, char *buf) ++{ ++ physaddr_t paddr; ++ ulong page; ++ ulong cache; + +- if (this_addr == searchpage) { +- found = TRUE; +- break; +- } +- } +- close_tmpfile(); ++ if (!kvtop(NULL, vaddr, &paddr, 0)) { ++ error(WARNING, ++ "cannot make virtual-to-physical translation: %lx\n", ++ vaddr); ++ return NULL; ++ } + +- if (found) { +- fprintf(fp, hash_table); +- fprintf(fp, "%lx\n", searchpage); +- hi->retval = TRUE; +- } ++ if (!phys_to_page(paddr, &page)) { ++ error(WARNING, "cannot find mem_map page for address: %lx\n", ++ vaddr); ++ return NULL; + } ++ ++ if (vt->flags & KMALLOC_SLUB) { ++ readmem(compound_head(page)+OFFSET(page_slab), ++ KVADDR, &cache, sizeof(void *), ++ "page.slab", FAULT_ON_ERROR); ++ } else if (VALID_MEMBER(page_next)) ++ readmem(page+OFFSET(page_next), ++ KVADDR, &cache, sizeof(void *), ++ "page.next", FAULT_ON_ERROR); ++ else if (VALID_MEMBER(page_list_next)) ++ readmem(page+OFFSET(page_list_next), ++ KVADDR, &cache, sizeof(void *), ++ "page.list.next", FAULT_ON_ERROR); ++ else if (VALID_MEMBER(page_lru)) ++ readmem(page+OFFSET(page_lru)+OFFSET(list_head_next), ++ KVADDR, &cache, sizeof(void *), ++ "page.lru.next", FAULT_ON_ERROR); ++ else ++ error(FATAL, "cannot determine slab cache from page struct\n"); ++ ++ return(is_kmem_cache_addr(cache, buf)); + } + + /* +- * dump_free_pages() displays basic data about pages currently resident +- * in the free_area[] memory lists. If the flags contains the VERBOSE +- * bit, each page slab base address is dumped. If an address is specified +- * only the free_area[] data containing that page is displayed, along with +- * the page slab base address. Specified addresses can either be physical +- * address or page structure pointers. ++ * Translate an address to its physical page number, verify that the ++ * page in fact belongs to the slab subsystem, and if so, return the ++ * address of the slab to which it belongs. + */ +-char *free_area_hdr1 = \ +- "AREA SIZE FREE_AREA_STRUCT BLOCKS PAGES\n"; +-char *free_area_hdr2 = \ +- "AREA SIZE FREE_AREA_STRUCT\n"; +- +-static void +-dump_free_pages(struct meminfo *fi) ++static ulong ++vaddr_to_slab(ulong vaddr) + { +- int i; +- int order; +- ulong free_area; +- char *free_area_buf; +- ulong *pp; +- int nr_mem_lists; +- struct list_data list_data, *ld; +- long cnt, total_free, chunk_size; +- int nr_free_pages; +- char buf[BUFSIZE]; +- char last_free[BUFSIZE]; +- char last_free_hdr[BUFSIZE]; +- int verbose, errflag, found; +- physaddr_t searchphys; +- ulong this_addr; +- physaddr_t this_phys; +- int do_search; +- ulong kfp, offset; +- int flen, dimension; ++ physaddr_t paddr; ++ ulong page; ++ ulong slab; + +- if (vt->flags & (NODES|ZONES)) +- error(FATAL, "dump_free_pages called with (NODES|ZONES)\n"); ++ if (!kvtop(NULL, vaddr, &paddr, 0)) { ++ error(WARNING, ++ "cannot make virtual-to-physical translation: %lx\n", ++ vaddr); ++ return 0; ++ } + +- nr_mem_lists = ARRAY_LENGTH(free_area); +- dimension = ARRAY_LENGTH(free_area_DIMENSION); ++ if (!phys_to_page(paddr, &page)) { ++ error(WARNING, "cannot find mem_map page for address: %lx\n", ++ vaddr); ++ return 0; ++ } + +- if (nr_mem_lists == 0) +- error(FATAL, "cannot determine size/dimensions of free_area\n"); ++ slab = 0; + +- if (dimension) +- error(FATAL, +- "dump_free_pages called with multidimensional free area\n"); ++ if (vt->flags & KMALLOC_SLUB) ++ slab = compound_head(page); ++ else if (VALID_MEMBER(page_prev)) ++ readmem(page+OFFSET(page_prev), ++ KVADDR, &slab, sizeof(void *), ++ "page.prev", FAULT_ON_ERROR); ++ else if (VALID_MEMBER(page_list_prev)) ++ readmem(page+OFFSET(page_list_prev), ++ KVADDR, &slab, sizeof(void *), ++ "page.list.prev", FAULT_ON_ERROR); ++ else if (VALID_MEMBER(page_lru)) ++ readmem(page+OFFSET(page_lru)+OFFSET(list_head_prev), ++ KVADDR, &slab, sizeof(void *), ++ "page.lru.prev", FAULT_ON_ERROR); ++ else ++ error(FATAL, "unknown definition of struct page?\n"); + +- ld = &list_data; +- total_free = 0; +- searchphys = 0; +- do_search = FALSE; +- get_symbol_data("nr_free_pages", sizeof(int), &nr_free_pages); +- +- switch (fi->flags) +- { +- case GET_FREE_HIGHMEM_PAGES: +- error(FATAL, "GET_FREE_HIGHMEM_PAGES invalid in this kernel\n"); ++ return slab; ++} + +- case GET_FREE_PAGES: +- fi->retval = (ulong)nr_free_pages; +- return; + +- case ADDRESS_SPECIFIED: +- switch (fi->memtype) +- { +- case KVADDR: +- if (!page_to_phys(fi->spec_addr, &searchphys)) { +- if (!kvtop(NULL, fi->spec_addr, &searchphys, 0)) +- return; +- } +- break; +- case PHYSADDR: +- searchphys = fi->spec_addr; +- break; +- default: +- error(FATAL, "dump_free_pages: no memtype specified\n"); +- } +- do_search = TRUE; +- break; +- } ++/* ++ * Initialize any data required for scouring the kmalloc subsystem more ++ * efficiently. ++ */ ++char slab_hdr[100] = { 0 }; ++char kmem_cache_hdr[100] = { 0 }; ++char free_inuse_hdr[100] = { 0 }; + +- verbose = (do_search || (fi->flags & VERBOSE)) ? TRUE : FALSE; ++static void ++kmem_cache_init(void) ++{ ++ ulong cache, cache_end, max_cnum, max_limit, max_cpus, tmp, tmp2; ++ long cache_count, num_offset, next_offset; ++ char *cache_buf; + +- free_area_buf = GETBUF(nr_mem_lists * SIZE(free_area_struct)); +- kfp = free_area = symbol_value("free_area"); +- flen = MAX(VADDR_PRLEN, strlen("FREE_AREA_STRUCT")); +- readmem(free_area, KVADDR, free_area_buf, +- SIZE(free_area_struct) * nr_mem_lists, +- "free_area_struct", FAULT_ON_ERROR); ++ if (vt->flags & KMEM_CACHE_UNAVAIL) ++ return; + +- if (do_search) +- open_tmpfile(); ++ if ((vt->flags & KMEM_CACHE_DELAY) && !(pc->flags & RUNTIME)) ++ return; + +- if (!verbose) +- fprintf(fp, free_area_hdr1); ++ if (DUMPFILE() && (vt->flags & KMEM_CACHE_INIT)) ++ return; + +- hq_open(); +- for (i = 0; i < nr_mem_lists; i++) { +- pp = (ulong *)(free_area_buf + (SIZE(free_area_struct)*i)); ++ please_wait("gathering kmem slab cache data"); + +- chunk_size = power(2, i); ++ if (!strlen(slab_hdr)) { ++ if (vt->flags & KMALLOC_SLUB) ++ sprintf(slab_hdr, ++ "SLAB%sMEMORY%sNODE TOTAL ALLOCATED FREE\n", ++ space(VADDR_PRLEN > 8 ? 14 : 6), ++ space(VADDR_PRLEN > 8 ? 12 : 4)); ++ else ++ sprintf(slab_hdr, ++ "SLAB%sMEMORY%sTOTAL ALLOCATED FREE\n", ++ space(VADDR_PRLEN > 8 ? 14 : 6), ++ space(VADDR_PRLEN > 8 ? 12 : 4)); ++ } + +- if (verbose) +- fprintf(fp, free_area_hdr2); ++ if (!strlen(kmem_cache_hdr)) ++ sprintf(kmem_cache_hdr, ++ "CACHE%sNAME OBJSIZE ALLOCATED TOTAL SLABS SSIZE\n", ++ space(VADDR_PRLEN > 8 ? 12 : 4)); + +- fprintf(fp, "%3d ", i); +- sprintf(buf, "%ldk", (chunk_size * PAGESIZE())/1024); +- fprintf(fp, "%5s ", buf); ++ if (!strlen(free_inuse_hdr)) ++ sprintf(free_inuse_hdr, "FREE / [ALLOCATED]\n"); + +- fprintf(fp, "%s %s", +- mkstring(buf, flen, CENTER|LONG_HEX, MKSTR(kfp)), +- verbose ? "\n" : ""); ++ if (vt->flags & KMALLOC_SLUB) { ++ kmem_cache_init_slub(); ++ return; ++ } + +- if (is_page_ptr(*pp, NULL)) { +- BZERO(ld, sizeof(struct list_data)); +- ld->flags = verbose; +- ld->start = *pp; +- ld->end = free_area; +- cnt = do_list(ld); +- total_free += (cnt * chunk_size); +- } else +- cnt = 0; ++ num_offset = vt->flags & (PERCPU_KMALLOC_V1|PERCPU_KMALLOC_V2) ? ++ OFFSET(kmem_cache_s_num) : OFFSET(kmem_cache_s_c_num); ++ next_offset = vt->flags & (PERCPU_KMALLOC_V1|PERCPU_KMALLOC_V2) ? ++ OFFSET(kmem_cache_s_next) : OFFSET(kmem_cache_s_c_nextp); ++ max_cnum = max_limit = max_cpus = cache_count = 0; + +- if (!verbose) +- fprintf(fp, "%6ld %6ld\n", cnt, cnt * chunk_size ); ++ /* ++ * Pre-2.6 versions used the "cache_cache" as the head of the ++ * slab chain list. 2.6 uses the "cache_chain" list_head. ++ */ ++ if (vt->flags & PERCPU_KMALLOC_V2) { ++ get_symbol_data("cache_chain", sizeof(ulong), &cache); ++ cache -= next_offset; ++ cache_end = symbol_value("cache_chain"); ++ } else ++ cache = cache_end = symbol_value("cache_cache"); + +- free_area += SIZE(free_area_struct); +- kfp += SIZE(free_area_struct); +- } +- hq_close(); ++ cache_buf = GETBUF(SIZE(kmem_cache_s)); + +- fprintf(fp, "\nnr_free_pages: %d ", nr_free_pages); +- if (total_free != nr_free_pages) +- fprintf(fp, "(found %ld)\n", total_free); +- else +- fprintf(fp, "(verified)\n"); ++ do { ++ cache_count++; + +- if (!do_search) +- return; ++ if (!readmem(cache, KVADDR, cache_buf, SIZE(kmem_cache_s), ++ "kmem_cache_s buffer", RETURN_ON_ERROR)) { ++ FREEBUF(cache_buf); ++ vt->flags |= KMEM_CACHE_UNAVAIL; ++ error(INFO, ++ "%sunable to initialize kmem slab cache subsystem\n\n", ++ DUMPFILE() ? "\n" : ""); ++ return; ++ } + +- found = FALSE; +- rewind(pc->tmpfile); +- order = offset = 0; ++ tmp = (ulong)(UINT(cache_buf + num_offset)); + +- while (fgets(buf, BUFSIZE, pc->tmpfile)) { +- if (CRASHDEBUG(1) && STRNEQ(buf, " max_cnum) ++ max_cnum = tmp; ++ ++ if ((tmp = max_cpudata_limit(cache, &tmp2)) > max_limit) ++ max_limit = tmp; ++ /* ++ * Recognize and bail out on any max_cpudata_limit() failures. ++ */ ++ if (vt->flags & KMEM_CACHE_UNAVAIL) { ++ FREEBUF(cache_buf); ++ return; ++ } + +- if (strstr(buf, "nr_free_pages") || +- STREQ(buf, "\n")) +- continue; ++ if (tmp2 > max_cpus) ++ max_cpus = tmp2; + +- if (strstr(buf, "AREA")) { +- strcpy(last_free_hdr, buf); +- continue; +- } ++ cache = ULONG(cache_buf + next_offset); + +- if (strstr(buf, "k")) { +- strcpy(last_free, buf); +- chunk_size = power(2, order) * PAGESIZE(); +- order++; +- continue; ++ switch (vt->flags & (PERCPU_KMALLOC_V1|PERCPU_KMALLOC_V2)) ++ { ++ case PERCPU_KMALLOC_V1: ++ cache -= next_offset; ++ break; ++ case PERCPU_KMALLOC_V2: ++ if (cache != cache_end) ++ cache -= next_offset; ++ break; + } + +- if (CRASHDEBUG(1) && !hexadecimal(strip_linefeeds(buf), 0)) +- continue; ++ } while (cache != cache_end); + +- errflag = 0; +- this_addr = htol(strip_linefeeds(buf), +- RETURN_ON_ERROR, &errflag); +- if (errflag) +- continue; ++ FREEBUF(cache_buf); + +- if (!page_to_phys(this_addr, &this_phys)) +- continue; ++ vt->kmem_max_c_num = max_cnum; ++ vt->kmem_max_limit = max_limit; ++ vt->kmem_max_cpus = max_cpus; ++ vt->kmem_cache_count = cache_count; + +- if ((searchphys >= this_phys) && +- (searchphys < (this_phys+chunk_size))) { +- if (searchphys > this_phys) +- offset = (searchphys - this_phys)/PAGESIZE(); +- found = TRUE; +- break; +- } ++ if (CRASHDEBUG(2)) { ++ fprintf(fp, "kmem_cache_init:\n"); ++ fprintf(fp, " kmem_max_c_num: %ld\n", vt->kmem_max_c_num); ++ fprintf(fp, " kmem_max_limit: %ld\n", vt->kmem_max_limit); ++ fprintf(fp, " kmem_max_cpus: %ld\n", vt->kmem_max_cpus); ++ fprintf(fp, " kmem_cache_count: %ld\n", vt->kmem_cache_count); + } +- close_tmpfile(); + +- if (found) { +- order--; ++ if (!(vt->flags & KMEM_CACHE_INIT)) { ++ if (vt->flags & PERCPU_KMALLOC_V1) ++ ARRAY_LENGTH_INIT(vt->kmem_cache_namelen, ++ kmem_cache_s_name, "kmem_cache_s.name", ++ NULL, sizeof(char)); ++ else if (vt->flags & PERCPU_KMALLOC_V2) ++ vt->kmem_cache_namelen = 0; ++ else ++ ARRAY_LENGTH_INIT(vt->kmem_cache_namelen, ++ kmem_cache_s_c_name, "kmem_cache_s.c_name", ++ NULL, 0); ++ } + +- fprintf(fp, last_free_hdr); +- fprintf(fp, last_free); +- fprintf(fp, "%lx ", this_addr); +- if (order) { +- switch (fi->memtype) +- { +- case KVADDR: +- fprintf(fp, "(%lx is ", (ulong)fi->spec_addr); +- break; +- case PHYSADDR: +- fprintf(fp, "(%llx is %s", fi->spec_addr, +- PAGEOFFSET(fi->spec_addr) ? "in " : ""); +- break; +- } +- fprintf(fp, "%s of %ld pages) ", +- ordinal(offset+1, buf), power(2, order)); +- } ++ please_wait_done(); + +- fi->retval = TRUE; +- fprintf(fp, "\n"); +- } ++ vt->flags |= KMEM_CACHE_INIT; + } + + /* +- * Dump free pages on kernels with a multi-dimensional free_area array. ++ * Determine the largest cpudata limit for a given cache. + */ +-char *free_area_hdr5 = \ +- " AREA SIZE FREE_AREA_STRUCT BLOCKS PAGES\n"; +-char *free_area_hdr6 = \ +- " AREA SIZE FREE_AREA_STRUCT\n"; +- +-static void +-dump_multidimensional_free_pages(struct meminfo *fi) ++static ulong ++max_cpudata_limit(ulong cache, ulong *cpus) + { +- int i, j; +- struct list_data list_data, *ld; +- long cnt, total_free; +- ulong kfp, free_area; +- physaddr_t searchphys; +- int flen, errflag, verbose, nr_free_pages; +- int nr_mem_lists, dimension, order, do_search; +- ulong sum, found, offset; +- char *free_area_buf, *p; +- ulong *pp; +- long chunk_size; +- ulong this_addr; +- physaddr_t this_phys; +- char buf[BUFSIZE]; +- char last_area[BUFSIZE]; +- char last_area_hdr[BUFSIZE]; ++ int i; ++ ulong cpudata[NR_CPUS]; ++ int limit; ++ ulong max_limit; ++ ulong shared; ++ ulong *start_address; ++ ++ if (vt->flags & PERCPU_KMALLOC_V2_NODES) ++ goto kmem_cache_s_array_nodes; ++ ++ if (vt->flags & PERCPU_KMALLOC_V2) ++ goto kmem_cache_s_array; ++ ++ if (INVALID_MEMBER(kmem_cache_s_cpudata)) { ++ *cpus = 0; ++ return 0; ++ } + ++ if (!readmem(cache+OFFSET(kmem_cache_s_cpudata), ++ KVADDR, &cpudata[0], ++ sizeof(ulong) * ARRAY_LENGTH(kmem_cache_s_cpudata), ++ "cpudata array", RETURN_ON_ERROR)) ++ goto bail_out; + +- if (vt->flags & (NODES|ZONES)) +- error(FATAL, +- "dump_multidimensional_free_pages called with (NODES|ZONES)\n"); ++ for (i = max_limit = 0; (i < ARRAY_LENGTH(kmem_cache_s_cpudata)) && ++ cpudata[i]; i++) { ++ if (!readmem(cpudata[i]+OFFSET(cpucache_s_limit), ++ KVADDR, &limit, sizeof(int), ++ "cpucache limit", RETURN_ON_ERROR)) ++ goto bail_out; ++ if (limit > max_limit) ++ max_limit = limit; ++ } + +- ld = &list_data; +- if (SIZE(free_area_struct) % sizeof(ulong)) +- error(FATAL, "free_area_struct not long-word aligned?\n"); ++ *cpus = i; + +- total_free = 0; +- searchphys = 0; +- do_search = FALSE; +- get_symbol_data("nr_free_pages", sizeof(int), &nr_free_pages); ++ return max_limit; + +- switch (fi->flags) +- { +- case GET_FREE_HIGHMEM_PAGES: +- error(FATAL, "GET_FREE_HIGHMEM_PAGES invalid in this kernel\n"); ++kmem_cache_s_array: + +- case GET_FREE_PAGES: +- fi->retval = (ulong)nr_free_pages; +- return; ++ if (!readmem(cache+OFFSET(kmem_cache_s_array), ++ KVADDR, &cpudata[0], ++ sizeof(ulong) * ARRAY_LENGTH(kmem_cache_s_array), ++ "array cache array", RETURN_ON_ERROR)) ++ goto bail_out; + +- case ADDRESS_SPECIFIED: +- switch (fi->memtype) +- { +- case KVADDR: +- if (!page_to_phys(fi->spec_addr, &searchphys)) { +- if (!kvtop(NULL, fi->spec_addr, &searchphys, 0)) +- return; +- } +- break; +- case PHYSADDR: +- searchphys = fi->spec_addr; +- break; +- default: +- error(FATAL, +- "dump_multidimensional_free_pages: no memtype specified\n"); +- } +- do_search = TRUE; +- break; ++ for (i = max_limit = 0; (i < ARRAY_LENGTH(kmem_cache_s_array)) && ++ cpudata[i]; i++) { ++ if (!readmem(cpudata[i]+OFFSET(array_cache_limit), ++ KVADDR, &limit, sizeof(int), ++ "array cache limit", RETURN_ON_ERROR)) ++ goto bail_out; ++ if (limit > max_limit) ++ max_limit = limit; ++ } ++ ++ /* ++ * If the shared list can be accessed, check its size as well. ++ */ ++ if (VALID_MEMBER(kmem_list3_shared) && ++ VALID_MEMBER(kmem_cache_s_lists) && ++ readmem(cache+OFFSET(kmem_cache_s_lists)+OFFSET(kmem_list3_shared), ++ KVADDR, &shared, sizeof(void *), "kmem_list3 shared", ++ RETURN_ON_ERROR|QUIET) && ++ readmem(shared+OFFSET(array_cache_limit), ++ KVADDR, &limit, sizeof(int), "shared array_cache limit", ++ RETURN_ON_ERROR|QUIET)) { ++ if (limit > max_limit) ++ max_limit = limit; + } ++ ++ *cpus = i; ++ return max_limit; + +- verbose = (do_search || (fi->flags & VERBOSE)) ? TRUE : FALSE; ++kmem_cache_s_array_nodes: + +- flen = MAX(VADDR_PRLEN, strlen("FREE_AREA_STRUCT")); +- nr_mem_lists = ARRAY_LENGTH(free_area); +- dimension = ARRAY_LENGTH(free_area_DIMENSION); +- if (!nr_mem_lists || !dimension) +- error(FATAL, "cannot determine free_area dimensions\n"); +- free_area_buf = +- GETBUF((nr_mem_lists * SIZE(free_area_struct)) * dimension); +- kfp = free_area = symbol_value("free_area"); +- readmem(free_area, KVADDR, free_area_buf, +- (SIZE(free_area_struct) * nr_mem_lists) * dimension, +- "free_area arrays", FAULT_ON_ERROR); ++ if (!readmem(cache+OFFSET(kmem_cache_s_array), ++ KVADDR, &cpudata[0], ++ sizeof(ulong) * ARRAY_LENGTH(kmem_cache_s_array), ++ "array cache array", RETURN_ON_ERROR)) ++ goto bail_out; + +- if (do_search) +- open_tmpfile(); ++ for (i = max_limit = 0; (i < ARRAY_LENGTH(kmem_cache_s_array)) && ++ cpudata[i]; i++) { ++ if (!readmem(cpudata[i]+OFFSET(array_cache_limit), ++ KVADDR, &limit, sizeof(int), ++ "array cache limit", RETURN_ON_ERROR)) ++ goto bail_out; ++ if (limit > max_limit) ++ max_limit = limit; ++ } + +- hq_open(); +- for (i = sum = found = 0; i < dimension; i++) { +- if (!verbose) +- fprintf(fp, free_area_hdr5); +- pp = (ulong *)(free_area_buf + +- ((SIZE(free_area_struct)*nr_mem_lists)*i)); +- for (j = 0; j < nr_mem_lists; j++) { +- if (verbose) +- fprintf(fp, free_area_hdr6); ++ *cpus = i; + +- sprintf(buf, "[%d][%d]", i, j); +- fprintf(fp, "%7s ", buf); ++ /* ++ * Check the shared list of all the nodes. ++ */ ++ start_address = (ulong *)GETBUF(sizeof(ulong) * vt->kmem_cache_len_nodes); ++ ++ if (VALID_MEMBER(kmem_list3_shared) && VALID_MEMBER(kmem_cache_s_lists) && ++ readmem(cache+OFFSET(kmem_cache_s_lists), KVADDR, &start_address[0], ++ sizeof(ulong) * vt->kmem_cache_len_nodes, "array nodelist array", ++ RETURN_ON_ERROR)) { ++ for (i = 0; i < vt->kmem_cache_len_nodes && start_address[i]; i++) { ++ if (readmem(start_address[i] + OFFSET(kmem_list3_shared), ++ KVADDR, &shared, sizeof(void *), ++ "kmem_list3 shared", RETURN_ON_ERROR|QUIET) && ++ readmem(shared + OFFSET(array_cache_limit), ++ KVADDR, &limit, sizeof(int), "shared array_cache limit", ++ RETURN_ON_ERROR|QUIET)) { ++ if (limit > max_limit) ++ max_limit = limit; ++ } ++ } ++ } ++ FREEBUF(start_address); ++ return max_limit; + +- chunk_size = power(2, j); ++bail_out: ++ vt->flags |= KMEM_CACHE_UNAVAIL; ++ error(INFO, "unable to initialize kmem slab cache subsystem\n\n"); ++ *cpus = 0; ++ return 0; ++} + +- sprintf(buf, "%ldk", (chunk_size * PAGESIZE())/1024); +- fprintf(fp, "%5s ", buf); ++/* ++ * Determine whether the current slab cache is contained in ++ * the comma-separated list from a "kmem -I list1,list2 ..." ++ * command entry. ++ */ ++static int ++ignore_cache(struct meminfo *si, char *name) ++{ ++ int i, argc; ++ char *p1; ++ char *arglist[MAXARGS]; ++ char buf[BUFSIZE]; + +- fprintf(fp, "%s %s", +- mkstring(buf, flen, CENTER|LONG_HEX, MKSTR(kfp)), +- verbose ? "\n" : ""); ++ if (!si->ignore) ++ return FALSE; + +- if (is_page_ptr(*pp, NULL)) { +- BZERO(ld, sizeof(struct list_data)); +- ld->flags = verbose; +- ld->start = *pp; +- ld->end = free_area; +- cnt = do_list(ld); +- total_free += (cnt * chunk_size); +- } else +- cnt = 0; ++ strcpy(buf, si->ignore); ++ ++ p1 = buf; ++ while (*p1) { ++ if (*p1 == ',') ++ *p1 = ' '; ++ p1++; ++ } + +- if (!verbose) +- fprintf(fp, +- "%6ld %6ld\n", cnt, cnt * chunk_size ); ++ argc = parse_line(buf, arglist); + +- pp += (SIZE(free_area_struct)/sizeof(ulong)); +- free_area += SIZE(free_area_struct); +- kfp += SIZE(free_area_struct); +- } +- fprintf(fp, "\n"); ++ for (i = 0; i < argc; i++) { ++ if (STREQ(name, arglist[i])) ++ return TRUE; + } +- hq_close(); + +- fprintf(fp, "nr_free_pages: %d ", nr_free_pages); +- if (total_free != nr_free_pages) +- fprintf(fp, "(found %ld)\n", total_free); +- else +- fprintf(fp, "(verified)\n"); ++ return FALSE; ++} + +- if (!do_search) +- return; + +- found = FALSE; +- rewind(pc->tmpfile); +- order = offset = 0; ++/* ++ * dump_kmem_cache() displays basic information about kmalloc() slabs. ++ * At this point, only kmem_cache_s structure data for each slab is dumped. ++ * ++ * TBD: Given a specified physical address, and determine which slab it came ++ * from, and whether it's in use or not. ++ */ + +- while (fgets(buf, BUFSIZE, pc->tmpfile)) { +- if (CRASHDEBUG(1) && STRNEQ(buf, "tmpfile); +- strcpy(last_area, strip_linefeeds(buf)); +- p = strstr(buf, "k"); +- *p = NULLCHAR; +- while (*p != ' ') +- p--; +- chunk_size = atol(p+1) * 1024; +- if (chunk_size == PAGESIZE()) +- order = 0; +- else +- order++; +- continue; +- } ++#define KMEM_SLAB_ADDR (1) ++#define KMEM_BUFCTL_ADDR (2) ++#define KMEM_OBJECT_ADDR_FREE (3) ++#define KMEM_OBJECT_ADDR_INUSE (4) ++#define KMEM_OBJECT_ADDR_CACHED (5) ++#define KMEM_ON_SLAB (6) ++#define KMEM_OBJECT_ADDR_SHARED (7) + +- errflag = 0; +- this_addr = htol(strip_linefeeds(buf), +- RETURN_ON_ERROR, &errflag); +- if (errflag) +- continue; ++#define DUMP_KMEM_CACHE_INFO_V1() \ ++ { \ ++ char b1[BUFSIZE]; \ ++ fprintf(fp, "%s %-18s %8ld ", \ ++ mkstring(b1, VADDR_PRLEN, LJUST|LONG_HEX, MKSTR(si->cache)), \ ++ buf, si->size); \ ++ fprintf(fp, "%9ld %8ld %5ld %3ldk\n", \ ++ vt->flags & PERCPU_KMALLOC_V1 ? \ ++ si->inuse - si->cpucached_cache : \ ++ si->inuse, si->num_slabs * si->c_num, \ ++ si->num_slabs, si->slabsize/1024); \ ++ } + +- if (!page_to_phys(this_addr, &this_phys)) +- continue; ++#define DUMP_KMEM_CACHE_INFO_V2() dump_kmem_cache_info_v2(si) + +- if ((searchphys >= this_phys) && +- (searchphys < (this_phys+chunk_size))) { +- if (searchphys > this_phys) +- offset = (searchphys - this_phys)/PAGESIZE(); +- found = TRUE; +- break; +- } ++static void ++dump_kmem_cache_info_v2(struct meminfo *si) ++{ ++ char b1[BUFSIZE]; ++ char b2[BUFSIZE]; ++ int namelen, sizelen, spacelen; + +- } +- close_tmpfile(); ++ fprintf(fp, "%s ", ++ mkstring(b1, VADDR_PRLEN, LJUST|LONG_HEX, MKSTR(si->cache))); + +- if (found) { +- fprintf(fp, last_area_hdr); +- fprintf(fp, "%s\n", last_area); +- fprintf(fp, "%lx ", this_addr); +- if (order) { +- switch (fi->memtype) +- { +- case KVADDR: +- fprintf(fp, "(%lx is ", (ulong)fi->spec_addr); +- break; +- case PHYSADDR: +- fprintf(fp, "(%llx is %s", fi->spec_addr, +- PAGEOFFSET(fi->spec_addr) ? "in " : ""); +- break; +- } +- fprintf(fp, "%s of %ld pages) ", +- ordinal(offset+1, buf), power(2, order)); +- } ++ namelen = strlen(si->curname); ++ sprintf(b2, "%ld", si->size); ++ sizelen = strlen(b2); ++ spacelen = 0; + +- fi->retval = TRUE; +- fprintf(fp, "\n"); ++ if (namelen++ > 18) { ++ spacelen = 29 - namelen - sizelen; ++ fprintf(fp, "%s%s%ld ", si->curname, ++ space(spacelen <= 0 ? 1 : spacelen), si->size); ++ if (spacelen > 0) ++ spacelen = 1; ++ sprintf(b1, "%c%dld ", '%', 9 + spacelen - 1); ++ } else { ++ fprintf(fp, "%-18s %8ld ", si->curname, si->size); ++ sprintf(b1, "%c%dld ", '%', 9); + } +-} + ++ fprintf(fp, b1, vt->flags & (PERCPU_KMALLOC_V2) ? ++ si->inuse - si->cpucached_cache : si->inuse); + +-/* +- * Dump free pages in newer kernels that have zones. This is a work in +- * progress, because although the framework for memory nodes has been laid +- * down, complete support has not been put in place. +- */ +-static char *zone_hdr = "ZONE NAME SIZE FREE"; ++ fprintf(fp, "%8ld %5ld %3ldk\n", ++ si->num_slabs * si->c_num, ++ si->num_slabs, si->slabsize/1024); ++} + +-/* +- * From linux/mmzone.h +- */ +-#define ZONE_DMA 0 +-#define ZONE_NORMAL 1 +-#define ZONE_HIGHMEM 2 ++#define DUMP_SLAB_INFO() \ ++ { \ ++ char b1[BUFSIZE], b2[BUFSIZE]; \ ++ ulong allocated, freeobjs; \ ++ if (vt->flags & (PERCPU_KMALLOC_V1|PERCPU_KMALLOC_V2)) { \ ++ allocated = si->s_inuse - si->cpucached_slab; \ ++ freeobjs = si->c_num - allocated - si->cpucached_slab; \ ++ } else { \ ++ allocated = si->s_inuse; \ ++ freeobjs = si->c_num - si->s_inuse; \ ++ } \ ++ fprintf(fp, "%s %s %5ld %9ld %4ld\n", \ ++ mkstring(b1, VADDR_PRLEN, LJUST|LONG_HEX, MKSTR(si->slab)), \ ++ mkstring(b2, VADDR_PRLEN, LJUST|LONG_HEX, MKSTR(si->s_mem)), \ ++ si->c_num, allocated, \ ++ vt->flags & (PERCPU_KMALLOC_V1|PERCPU_KMALLOC_V2) ? \ ++ freeobjs + si->cpucached_slab : freeobjs); \ ++ } + + static void +-dump_free_pages_zones_v1(struct meminfo *fi) ++dump_kmem_cache(struct meminfo *si) + { +- int i, n; +- ulong node_zones; +- ulong size; +- long zone_size_offset; +- long chunk_size; +- int order, errflag, do_search; +- ulong offset, verbose, value, sum, found; +- ulong this_addr; +- physaddr_t this_phys, searchphys; +- ulong zone_mem_map; +- ulong zone_start_paddr; +- ulong zone_start_mapnr; +- struct node_table *nt; +- char buf[BUFSIZE], *p; +- char buf1[BUFSIZE]; +- char buf2[BUFSIZE]; +- char buf3[BUFSIZE]; +- char last_node[BUFSIZE]; +- char last_zone[BUFSIZE]; +- char last_area[BUFSIZE]; +- char last_area_hdr[BUFSIZE]; ++ char buf[BUFSIZE]; ++ char kbuf[BUFSIZE]; ++ char *reqname; ++ ulong cache_cache; ++ ulong name, magic; ++ int cnt; ++ char *p1; + +- if (!(vt->flags & (NODES|ZONES))) ++ if (vt->flags & (PERCPU_KMALLOC_V1|PERCPU_KMALLOC_V2)) + error(FATAL, +- "dump_free_pages_zones_v1 called without (NODES|ZONES)\n"); ++ "dump_kmem_cache called with PERCPU_KMALLOC_V[12] set\n"); + +- if (fi->flags & ADDRESS_SPECIFIED) { +- switch (fi->memtype) +- { +- case KVADDR: +- if (!page_to_phys(fi->spec_addr, &searchphys)) { +- if (!kvtop(NULL, fi->spec_addr, &searchphys, 0)) +- return; +- } +- break; +- case PHYSADDR: +- searchphys = fi->spec_addr; +- break; +- default: +- error(FATAL, +- "dump_free_pages_zones_v1: no memtype specified\n"); +- } +- do_search = TRUE; +- } else { +- searchphys = 0; +- do_search = FALSE; +- } +- verbose = (do_search || (fi->flags & VERBOSE)) ? TRUE : FALSE; ++ si->found = si->retval = 0; ++ reqname = NULL; + +- if (VALID_MEMBER(zone_struct_size)) +- zone_size_offset = OFFSET(zone_struct_size); +- else if (VALID_MEMBER(zone_struct_memsize)) +- zone_size_offset = OFFSET(zone_struct_memsize); +- else +- error(FATAL, +- "zone_struct has neither size nor memsize field\n"); ++ if ((!(si->flags & VERBOSE) || si->reqname) && ++ !(si->flags & (ADDRESS_SPECIFIED|GET_SLAB_PAGES))) ++ fprintf(fp, kmem_cache_hdr); + +- if (do_search) +- open_tmpfile(); ++ si->addrlist = (ulong *)GETBUF((vt->kmem_max_c_num+1) * sizeof(ulong)); ++ cnt = 0; ++ si->cache = cache_cache = symbol_value("cache_cache"); ++ ++ if (si->flags & ADDRESS_SPECIFIED) { ++ if (!(p1 = vaddr_to_kmem_cache(si->spec_addr, kbuf))) { ++ error(INFO, ++ "address is not allocated in slab subsystem: %lx\n", ++ si->spec_addr); ++ return; ++ } ++ ++ if (si->reqname && (si->reqname != p1)) ++ error(INFO, ++ "ignoring pre-selected %s cache for address: %lx\n", ++ si->reqname, si->spec_addr, si->reqname); ++ ++ reqname = p1; ++ } else ++ reqname = si->reqname; + +- hq_open(); ++ si->cache_buf = GETBUF(SIZE(kmem_cache_s)); + +- for (n = sum = found = 0; n < vt->numnodes; n++) { +- nt = &vt->node_table[n]; +- node_zones = nt->pgdat + OFFSET(pglist_data_node_zones); ++ do { ++ if ((si->flags & VERBOSE) && !si->reqname && ++ !(si->flags & ADDRESS_SPECIFIED)) ++ fprintf(fp, "%s%s", cnt++ ? "\n" : "", kmem_cache_hdr); + +- for (i = 0; i < vt->nr_zones; i++) { +- +- if (fi->flags == GET_FREE_PAGES) { +- readmem(node_zones+ +- OFFSET(zone_struct_free_pages), +- KVADDR, &value, sizeof(ulong), +- "node_zones free_pages", +- FAULT_ON_ERROR); +- sum += value; +- node_zones += SIZE(zone_struct); +- continue; +- } +- +- if (fi->flags == GET_FREE_HIGHMEM_PAGES) { +- if (i == ZONE_HIGHMEM) { +- readmem(node_zones+ +- OFFSET(zone_struct_free_pages), +- KVADDR, &value, sizeof(ulong), +- "node_zones free_pages", +- FAULT_ON_ERROR); +- sum += value; +- } +- node_zones += SIZE(zone_struct); +- continue; +- } +- +- if (fi->flags == GET_ZONE_SIZES) { +- readmem(node_zones+zone_size_offset, +- KVADDR, &size, sizeof(ulong), +- "node_zones {mem}size", FAULT_ON_ERROR); +- sum += size; +- node_zones += SIZE(zone_struct); +- continue; +- } ++ readmem(si->cache, KVADDR, si->cache_buf, SIZE(kmem_cache_s), ++ "kmem_cache_s buffer", FAULT_ON_ERROR); + +- if ((i == 0) && (vt->flags & NODES)) { +- if (n) { +- fprintf(fp, "\n"); +- pad_line(fp, +- VADDR_PRLEN > 8 ? 74 : 66, '-'); +- fprintf(fp, "\n"); +- } +- fprintf(fp, "%sNODE\n %2d\n", +- n ? "\n" : "", nt->node_id); ++ if (vt->kmem_cache_namelen) { ++ BCOPY(si->cache_buf + OFFSET(kmem_cache_s_c_name), ++ buf, vt->kmem_cache_namelen); ++ } else { ++ name = ULONG(si->cache_buf + ++ OFFSET(kmem_cache_s_c_name)); ++ if (!read_string(name, buf, BUFSIZE-1)) { ++ error(WARNING, ++ "cannot read kmem_cache_s.c_name string at %lx\n", ++ name); ++ sprintf(buf, "(unknown)"); + } ++ } + +- fprintf(fp, "%s%s %s START_PADDR START_MAPNR\n", +- i > 0 ? "\n" : "", +- zone_hdr, +- mkstring(buf1, VADDR_PRLEN, CENTER|LJUST, +- "MEM_MAP")); +- +- fprintf(fp, "%3d ", i); +- +- readmem(node_zones+OFFSET(zone_struct_name), KVADDR, +- &value, sizeof(void *), +- "node_zones name", FAULT_ON_ERROR); +- if (read_string(value, buf, BUFSIZE-1)) +- fprintf(fp, "%-9s ", buf); +- else +- fprintf(fp, "(unknown) "); +- +- readmem(node_zones+zone_size_offset, KVADDR, +- &size, sizeof(ulong), +- "node_zones {mem}size", FAULT_ON_ERROR); +- fprintf(fp, "%6ld ", size); +- +- readmem(node_zones+OFFSET(zone_struct_free_pages), +- KVADDR, &value, sizeof(ulong), +- "node_zones free_pages", FAULT_ON_ERROR); +- +- fprintf(fp, "%6ld ", value); +- +- readmem(node_zones+OFFSET(zone_struct_zone_start_paddr), +- KVADDR, &zone_start_paddr, sizeof(ulong), +- "node_zones zone_start_paddr", FAULT_ON_ERROR); +- readmem(node_zones+OFFSET(zone_struct_zone_start_mapnr), +- KVADDR, &zone_start_mapnr, sizeof(ulong), +- "node_zones zone_start_mapnr", FAULT_ON_ERROR); +- readmem(node_zones+OFFSET(zone_struct_zone_mem_map), +- KVADDR, &zone_mem_map, sizeof(ulong), +- "node_zones zone_mem_map", FAULT_ON_ERROR); +- +- fprintf(fp, "%s %s %s\n", +- mkstring(buf1, VADDR_PRLEN, +- CENTER|LONG_HEX,MKSTR(zone_mem_map)), +- mkstring(buf2, strlen("START_PADDR"), +- CENTER|LONG_HEX|RJUST, +- MKSTR(zone_start_paddr)), +- mkstring(buf3, strlen("START_MAPNR"), +- CENTER|LONG_DEC|RJUST, +- MKSTR(zone_start_mapnr))); +- +- sum += value; +- +- if (value) +- found += dump_zone_free_area(node_zones+ +- OFFSET(zone_struct_free_area), +- vt->nr_free_areas, verbose); ++ if (reqname && !STREQ(reqname, buf)) ++ goto next_cache; + +- node_zones += SIZE(zone_struct); ++ if (ignore_cache(si, buf)) { ++ fprintf(fp, "%lx %-18s [IGNORED]\n", si->cache, buf); ++ goto next_cache; + } +- } + +- hq_close(); ++ si->curname = buf; + +- if (fi->flags & (GET_FREE_PAGES|GET_ZONE_SIZES|GET_FREE_HIGHMEM_PAGES)){ +- fi->retval = sum; +- return; +- } ++ if (CRASHDEBUG(1)) ++ fprintf(fp, "cache: %lx %s\n", si->cache, si->curname); ++ console("cache: %lx %s\n", si->cache, si->curname); + +- fprintf(fp, "\nnr_free_pages: %ld ", sum); +- if (sum == found) +- fprintf(fp, "(verified)\n"); +- else +- fprintf(fp, "(found %ld)\n", found); ++ magic = ULONG(si->cache_buf + OFFSET(kmem_cache_s_c_magic)); + +- if (!do_search) +- return; ++ if (magic == SLAB_C_MAGIC) { + +- found = FALSE; +- rewind(pc->tmpfile); +- order = offset = 0; +- last_node[0] = NULLCHAR; +- last_zone[0] = NULLCHAR; +- last_area[0] = NULLCHAR; +- last_area_hdr[0] = NULLCHAR; ++ si->size = ULONG(si->cache_buf + ++ OFFSET(kmem_cache_s_c_org_size)); ++ if (!si->size) { ++ if (STREQ(si->curname, "kmem_cache")) ++ si->size = SIZE(kmem_cache_s); ++ else { ++ error(INFO, ++ "\"%s\" cache: c_org_size: %ld\n", ++ si->curname, si->size); ++ si->errors++; ++ } ++ } ++ si->c_flags = ULONG(si->cache_buf + ++ OFFSET(kmem_cache_s_c_flags)); ++ si->c_offset = ULONG(si->cache_buf + ++ OFFSET(kmem_cache_s_c_offset)); ++ si->order = ULONG(si->cache_buf + ++ OFFSET(kmem_cache_s_c_gfporder)); ++ si->c_num = ULONG(si->cache_buf + ++ OFFSET(kmem_cache_s_c_num)); + ++ do_slab_chain(SLAB_GET_COUNTS, si); + +- while (fgets(buf, BUFSIZE, pc->tmpfile)) { +- if (CRASHDEBUG(1) && STRNEQ(buf, "flags & (ADDRESS_SPECIFIED|GET_SLAB_PAGES))) ++ DUMP_KMEM_CACHE_INFO_V1(); + +- if (STRNEQ(buf, "nr_free_pages:")) +- continue; ++ if (si->flags == GET_SLAB_PAGES) ++ si->retval += (si->num_slabs * ++ (si->slabsize/PAGESIZE())); + +- if (STRNEQ(buf, "NODE")) { +- fgets(buf, BUFSIZE, pc->tmpfile); +- strcpy(last_node, strip_linefeeds(buf)); +- continue; +- } +- if (STRNEQ(buf, "ZONE")) { +- fgets(buf, BUFSIZE, pc->tmpfile); +- strcpy(last_zone, strip_linefeeds(buf)); +- continue; +- } +- if (STRNEQ(buf, "AREA")) { +- strcpy(last_area_hdr, buf); +- fgets(buf, BUFSIZE, pc->tmpfile); +- strcpy(last_area, strip_linefeeds(buf)); +- p = strstr(buf, "k"); +- *p = NULLCHAR; +- while (*p != ' ') +- p--; +- chunk_size = atol(p+1) * 1024; +- if (chunk_size == PAGESIZE()) +- order = 0; +- else +- order++; +- continue; +- } ++ if (si->flags & (VERBOSE|ADDRESS_SPECIFIED)) { ++ si->slab = (si->flags & ADDRESS_SPECIFIED) ? ++ vaddr_to_slab(si->spec_addr) : 0; ++ ++ do_slab_chain(SLAB_WALKTHROUGH, si); + +- if (CRASHDEBUG(0) && +- !hexadecimal(strip_linefeeds(buf), 0)) +- continue; ++ if (si->found) { ++ fprintf(fp, kmem_cache_hdr); ++ DUMP_KMEM_CACHE_INFO_V1(); ++ fprintf(fp, slab_hdr); ++ DUMP_SLAB_INFO(); ++ ++ switch (si->found) ++ { ++ case KMEM_BUFCTL_ADDR: ++ fprintf(fp, " %lx ", ++ (ulong)si->spec_addr); ++ fprintf(fp, ++ "(ON-SLAB kmem_bufctl_t)\n"); ++ break; ++ ++ case KMEM_SLAB_ADDR: ++ fprintf(fp, " %lx ", ++ (ulong)si->spec_addr); ++ fprintf(fp, ++ "(ON-SLAB kmem_slab_t)\n"); ++ break; ++ ++ case KMEM_ON_SLAB: ++ fprintf(fp, " %lx ", ++ (ulong)si->spec_addr); ++ fprintf(fp, ++ "(unused part of slab)\n"); ++ break; ++ ++ case KMEM_OBJECT_ADDR_FREE: ++ fprintf(fp, free_inuse_hdr); ++ fprintf(fp, " %lx\n", ++ (ulong)si->spec_addr); ++ break; ++ ++ case KMEM_OBJECT_ADDR_INUSE: ++ fprintf(fp, free_inuse_hdr); ++ fprintf(fp, " [%lx]\n", ++ (ulong)si->spec_addr); ++ break; ++ } ++ ++ break; ++ } ++ } + +- errflag = 0; +- this_addr = htol(strip_linefeeds(buf), +- RETURN_ON_ERROR, &errflag); +- if (errflag) +- continue; ++ } else { ++ error(INFO, "\"%s\" cache: invalid c_magic: %lx\n", ++ si->curname, magic); ++ si->errors++; ++ } + +- if (!page_to_phys(this_addr, &this_phys)) +- continue; ++next_cache: ++ si->cache = ULONG(si->cache_buf + OFFSET(kmem_cache_s_c_nextp)); + +- if ((searchphys >= this_phys) && +- (searchphys < (this_phys+chunk_size))) { +- if (searchphys > this_phys) +- offset = (searchphys - this_phys)/PAGESIZE(); +- found = TRUE; +- break; +- } ++ } while (si->cache != cache_cache); + +- } +- close_tmpfile(); ++ FREEBUF(si->cache_buf); + +- if (found) { +- if (strlen(last_node)) +- fprintf(fp, "NODE\n%s\n", last_node); +- fprintf(fp, "%s %s START_PADDR START_MAPNR\n", +- zone_hdr, +- mkstring(buf1, VADDR_PRLEN, CENTER|LJUST, "MEM_MAP")); +- fprintf(fp, "%s\n", last_zone); +- fprintf(fp, last_area_hdr); +- fprintf(fp, "%s\n", last_area); +- fprintf(fp, "%lx ", this_addr); +- if (order) { +- switch (fi->memtype) +- { +- case KVADDR: +- fprintf(fp, "(%lx is ", (ulong)fi->spec_addr); +- break; +- case PHYSADDR: +- fprintf(fp, "(%llx is %s", fi->spec_addr, +- PAGEOFFSET(fi->spec_addr) ? "in " : ""); +- break; +- } +- fprintf(fp, "%s of %ld pages) ", +- ordinal(offset+1, buf), power(2, order)); +- } ++ if ((si->flags & ADDRESS_SPECIFIED) && !si->found) ++ error(INFO, "%s: address not found in cache: %lx\n", ++ reqname, si->spec_addr); ++ ++ if (si->errors) ++ error(INFO, "%ld error%s encountered\n", ++ si->errors, si->errors > 1 ? "s" : ""); + +- fi->retval = TRUE; +- fprintf(fp, "\n"); +- } ++ FREEBUF(si->addrlist); + } + +- + /* +- * Same as dump_free_pages_zones_v1(), but updated for numerous 2.6 zone +- * and free_area related data structure changes. ++ * dump_kmem_cache() adapted for newer percpu slab format. + */ ++ + static void +-dump_free_pages_zones_v2(struct meminfo *fi) ++dump_kmem_cache_percpu_v1(struct meminfo *si) + { +- int i, n; +- ulong node_zones; +- ulong size; +- long zone_size_offset; +- long chunk_size; +- int order, errflag, do_search; +- ulong offset, verbose, value, sum, found; +- ulong this_addr; +- physaddr_t this_phys, searchphys; +- ulong zone_mem_map; +- ulong zone_start_paddr; +- ulong zone_start_pfn; +- ulong zone_start_mapnr; +- struct node_table *nt; +- char buf[BUFSIZE], *p; +- char buf1[BUFSIZE]; +- char buf2[BUFSIZE]; +- char buf3[BUFSIZE]; +- char last_node[BUFSIZE]; +- char last_zone[BUFSIZE]; +- char last_area[BUFSIZE]; +- char last_area_hdr[BUFSIZE]; ++ int i; ++ char buf[BUFSIZE]; ++ char kbuf[BUFSIZE]; ++ char *reqname; ++ ulong cache_cache; ++ ulong name; ++ int cnt; ++ uint tmp_val; /* Used as temporary variable to read sizeof(int) and ++ assigned to ulong variable. We are doing this to mask ++ the endian issue */ ++ char *p1; + +- if (!(vt->flags & (NODES|ZONES))) +- error(FATAL, +- "dump_free_pages_zones_v2 called without (NODES|ZONES)\n"); ++ if (!(vt->flags & PERCPU_KMALLOC_V1)) ++ error(FATAL, ++ "dump_kmem_cache_percpu called without PERCPU_KMALLOC_V1\n"); + +- if (fi->flags & ADDRESS_SPECIFIED) { +- switch (fi->memtype) +- { +- case KVADDR: +- if (!page_to_phys(fi->spec_addr, &searchphys)) { +- if (!kvtop(NULL, fi->spec_addr, &searchphys, 0)) +- return; +- } +- break; +- case PHYSADDR: +- searchphys = fi->spec_addr; +- break; +- default: +- error(FATAL, +- "dump_free_pages_zones_v2: no memtype specified\n"); +- } +- do_search = TRUE; +- } else { +- searchphys = 0; +- do_search = FALSE; +- } ++ si->found = si->retval = 0; ++ reqname = NULL; + +- verbose = (do_search || (fi->flags & VERBOSE)) ? TRUE : FALSE; ++ if ((!(si->flags & VERBOSE) || si->reqname) && ++ !(si->flags & (ADDRESS_SPECIFIED|GET_SLAB_PAGES))) ++ fprintf(fp, kmem_cache_hdr); + +- if (VALID_MEMBER(zone_spanned_pages)) +- zone_size_offset = OFFSET(zone_spanned_pages); +- else +- error(FATAL, "zone struct has no spanned_pages field\n"); ++ si->addrlist = (ulong *)GETBUF((vt->kmem_max_c_num+1) * sizeof(ulong)); ++ si->kmem_bufctl = (int *)GETBUF((vt->kmem_max_c_num+1) * sizeof(int)); ++ for (i = 0; i < vt->kmem_max_cpus; i++) ++ si->cpudata[i] = (ulong *) ++ GETBUF(vt->kmem_max_limit * sizeof(ulong)); + +- if (do_search) +- open_tmpfile(); ++ cnt = 0; ++ si->cache = cache_cache = symbol_value("cache_cache"); + +- hq_open(); ++ if (si->flags & ADDRESS_SPECIFIED) { ++ if (!(p1 = vaddr_to_kmem_cache(si->spec_addr, kbuf))) { ++ error(INFO, ++ "address is not allocated in slab subsystem: %lx\n", ++ si->spec_addr); ++ return; ++ } ++ ++ if (si->reqname && (si->reqname != p1)) ++ error(INFO, ++ "ignoring pre-selected %s cache for address: %lx\n", ++ si->reqname, si->spec_addr, si->reqname); ++ reqname = p1; ++ } else ++ reqname = si->reqname; + +- for (n = sum = found = 0; n < vt->numnodes; n++) { +- nt = &vt->node_table[n]; +- node_zones = nt->pgdat + OFFSET(pglist_data_node_zones); ++ do { ++ if ((si->flags & VERBOSE) && !si->reqname && ++ !(si->flags & ADDRESS_SPECIFIED)) ++ fprintf(fp, "%s%s", cnt++ ? "\n" : "", kmem_cache_hdr); + +- for (i = 0; i < vt->nr_zones; i++) { +- +- if (fi->flags == GET_FREE_PAGES) { +- readmem(node_zones+ +- OFFSET(zone_free_pages), +- KVADDR, &value, sizeof(ulong), +- "node_zones free_pages", +- FAULT_ON_ERROR); +- sum += value; +- node_zones += SIZE(zone); +- continue; ++ if (vt->kmem_cache_namelen) { ++ readmem(si->cache+OFFSET(kmem_cache_s_name), ++ KVADDR, buf, vt->kmem_cache_namelen, ++ "name array", FAULT_ON_ERROR); ++ } else { ++ readmem(si->cache+OFFSET(kmem_cache_s_name), ++ KVADDR, &name, sizeof(ulong), ++ "name", FAULT_ON_ERROR); ++ if (!read_string(name, buf, BUFSIZE-1)) { ++ error(WARNING, ++ "cannot read kmem_cache_s.name string at %lx\n", ++ name); ++ sprintf(buf, "(unknown)"); + } +- +- if (fi->flags == GET_FREE_HIGHMEM_PAGES) { +- if (i == ZONE_HIGHMEM) { +- readmem(node_zones+ +- OFFSET(zone_free_pages), +- KVADDR, &value, sizeof(ulong), +- "node_zones free_pages", +- FAULT_ON_ERROR); +- sum += value; +- } +- node_zones += SIZE(zone); +- continue; +- } +- +- if (fi->flags == GET_ZONE_SIZES) { +- readmem(node_zones+zone_size_offset, +- KVADDR, &size, sizeof(ulong), +- "node_zones size", FAULT_ON_ERROR); +- sum += size; +- node_zones += SIZE(zone); +- continue; ++ } ++ ++ if (reqname && !STREQ(reqname, buf)) ++ goto next_cache; ++ ++ if (ignore_cache(si, buf)) { ++ fprintf(fp, "%lx %-18s [IGNORED]\n", si->cache, buf); ++ goto next_cache; ++ } ++ ++ si->curname = buf; ++ ++ readmem(si->cache+OFFSET(kmem_cache_s_objsize), ++ KVADDR, &tmp_val, sizeof(uint), ++ "objsize", FAULT_ON_ERROR); ++ si->size = (ulong)tmp_val; ++ ++ if (!si->size) { ++ if (STREQ(si->curname, "kmem_cache")) ++ si->size = SIZE(kmem_cache_s); ++ else { ++ error(INFO, "\"%s\" cache: objsize: %ld\n", ++ si->curname, si->size); ++ si->errors++; + } ++ } ++ ++ readmem(si->cache+OFFSET(kmem_cache_s_flags), ++ KVADDR, &tmp_val, sizeof(uint), ++ "kmem_cache_s flags", FAULT_ON_ERROR); ++ si->c_flags = (ulong)tmp_val; ++ ++ readmem(si->cache+OFFSET(kmem_cache_s_gfporder), ++ KVADDR, &tmp_val, sizeof(uint), ++ "gfporder", FAULT_ON_ERROR); ++ si->order = (ulong)tmp_val; ++ ++ readmem(si->cache+OFFSET(kmem_cache_s_num), ++ KVADDR, &tmp_val, sizeof(uint), ++ "kmem_cache_s num", FAULT_ON_ERROR); ++ si->c_num = (ulong)tmp_val; ++ ++ do_slab_chain_percpu_v1(SLAB_GET_COUNTS, si); ++ ++ if (!(si->flags & (ADDRESS_SPECIFIED|GET_SLAB_PAGES))) { ++ DUMP_KMEM_CACHE_INFO_V1(); ++ if (CRASHDEBUG(3)) ++ dump_struct("kmem_cache_s", si->cache, 0); ++ } + +- if ((i == 0) && (vt->flags & NODES)) { +- if (n) { +- fprintf(fp, "\n"); +- pad_line(fp, +- VADDR_PRLEN > 8 ? 74 : 66, '-'); +- fprintf(fp, "\n"); +- } +- fprintf(fp, "%sNODE\n %2d\n", +- n ? "\n" : "", nt->node_id); +- } ++ if (si->flags == GET_SLAB_PAGES) ++ si->retval += (si->num_slabs * ++ (si->slabsize/PAGESIZE())); + +- fprintf(fp, "%s%s %s START_PADDR START_MAPNR\n", +- i > 0 ? "\n" : "", +- zone_hdr, +- mkstring(buf1, VADDR_PRLEN, CENTER|LJUST, +- "MEM_MAP")); +- +- fprintf(fp, "%3d ", i); +- +- readmem(node_zones+OFFSET(zone_name), KVADDR, +- &value, sizeof(void *), +- "node_zones name", FAULT_ON_ERROR); +- if (read_string(value, buf, BUFSIZE-1)) +- fprintf(fp, "%-9s ", buf); +- else +- fprintf(fp, "(unknown) "); +- +- readmem(node_zones+zone_size_offset, KVADDR, +- &size, sizeof(ulong), +- "node_zones size", FAULT_ON_ERROR); +- fprintf(fp, "%6ld ", size); +- +- readmem(node_zones+OFFSET(zone_free_pages), +- KVADDR, &value, sizeof(ulong), +- "node_zones free_pages", FAULT_ON_ERROR); +- +- fprintf(fp, "%6ld ", value); +- +- readmem(node_zones+OFFSET(zone_zone_mem_map), +- KVADDR, &zone_mem_map, sizeof(ulong), +- "node_zones zone_mem_map", FAULT_ON_ERROR); ++ if (si->flags & (VERBOSE|ADDRESS_SPECIFIED)) { + +- readmem(node_zones+ OFFSET(zone_zone_start_pfn), +- KVADDR, &zone_start_pfn, sizeof(ulong), +- "node_zones zone_start_pfn", FAULT_ON_ERROR); +- zone_start_paddr = PTOB(zone_start_pfn); ++ gather_cpudata_list_v1(si); + +- if (zone_mem_map) +- zone_start_mapnr = +- (zone_mem_map - nt->mem_map) / +- SIZE(page); +- else +- zone_start_mapnr = 0; +- +- fprintf(fp, "%s %s %s\n", +- mkstring(buf1, VADDR_PRLEN, +- CENTER|LONG_HEX,MKSTR(zone_mem_map)), +- mkstring(buf2, strlen("START_PADDR"), +- CENTER|LONG_HEX|RJUST, +- MKSTR(zone_start_paddr)), +- mkstring(buf3, strlen("START_MAPNR"), +- CENTER|LONG_DEC|RJUST, +- MKSTR(zone_start_mapnr))); +- +- sum += value; ++ si->slab = (si->flags & ADDRESS_SPECIFIED) ? ++ vaddr_to_slab(si->spec_addr) : 0; + +- if (value) +- found += dump_zone_free_area(node_zones+ +- OFFSET(zone_free_area), +- vt->nr_free_areas, verbose); ++ do_slab_chain_percpu_v1(SLAB_WALKTHROUGH, si); + +- node_zones += SIZE(zone); ++ if (si->found) { ++ fprintf(fp, kmem_cache_hdr); ++ DUMP_KMEM_CACHE_INFO_V1(); ++ fprintf(fp, slab_hdr); ++ gather_slab_cached_count(si); ++ DUMP_SLAB_INFO(); ++ ++ switch (si->found) ++ { ++ case KMEM_BUFCTL_ADDR: ++ fprintf(fp, " %lx ", ++ (ulong)si->spec_addr); ++ fprintf(fp,"(kmem_bufctl_t)\n"); ++ break; ++ ++ case KMEM_SLAB_ADDR: ++ fprintf(fp, " %lx ", ++ (ulong)si->spec_addr); ++ fprintf(fp, "(slab_s)\n"); ++ break; ++ ++ case KMEM_ON_SLAB: ++ fprintf(fp, " %lx ", ++ (ulong)si->spec_addr); ++ fprintf(fp, "(unused part of slab)\n"); ++ break; ++ ++ case KMEM_OBJECT_ADDR_FREE: ++ fprintf(fp, free_inuse_hdr); ++ fprintf(fp, " %lx\n", ++ (ulong)si->spec_addr); ++ break; ++ ++ case KMEM_OBJECT_ADDR_INUSE: ++ fprintf(fp, free_inuse_hdr); ++ fprintf(fp, " [%lx]\n", ++ (ulong)si->spec_addr); ++ break; ++ ++ case KMEM_OBJECT_ADDR_CACHED: ++ fprintf(fp, free_inuse_hdr); ++ fprintf(fp, ++ " %lx (cpu %d cache)\n", ++ (ulong)si->spec_addr, si->cpu); ++ break; ++ } ++ ++ break; ++ } + } +- } + +- hq_close(); ++next_cache: ++ readmem(si->cache+OFFSET(kmem_cache_s_next), ++ KVADDR, &si->cache, sizeof(ulong), ++ "kmem_cache_s next", FAULT_ON_ERROR); + +- if (fi->flags & (GET_FREE_PAGES|GET_ZONE_SIZES|GET_FREE_HIGHMEM_PAGES)){ +- fi->retval = sum; +- return; +- } ++ si->cache -= OFFSET(kmem_cache_s_next); + +- fprintf(fp, "\nnr_free_pages: %ld ", sum); +- if (sum == found) +- fprintf(fp, "(verified)\n"); +- else +- fprintf(fp, "(found %ld)\n", found); ++ } while (si->cache != cache_cache); + +- if (!do_search) +- return; ++ if ((si->flags & ADDRESS_SPECIFIED) && !si->found) ++ error(INFO, "%s: address not found in cache: %lx\n", ++ reqname, si->spec_addr); ++ ++ if (si->errors) ++ error(INFO, "%ld error%s encountered\n", ++ si->errors, si->errors > 1 ? "s" : ""); + +- found = FALSE; +- rewind(pc->tmpfile); +- order = offset = 0; +- last_node[0] = NULLCHAR; +- last_zone[0] = NULLCHAR; +- last_area[0] = NULLCHAR; +- last_area_hdr[0] = NULLCHAR; ++ FREEBUF(si->addrlist); ++ FREEBUF(si->kmem_bufctl); ++ for (i = 0; i < vt->kmem_max_cpus; i++) ++ FREEBUF(si->cpudata[i]); + ++} + +- while (fgets(buf, BUFSIZE, pc->tmpfile)) { +- if (CRASHDEBUG(1) && STRNEQ(buf, "tmpfile); +- strcpy(last_node, strip_linefeeds(buf)); +- continue; +- } +- if (STRNEQ(buf, "ZONE")) { +- fgets(buf, BUFSIZE, pc->tmpfile); +- strcpy(last_zone, strip_linefeeds(buf)); +- continue; +- } +- if (STRNEQ(buf, "AREA")) { +- strcpy(last_area_hdr, buf); +- fgets(buf, BUFSIZE, pc->tmpfile); +- strcpy(last_area, strip_linefeeds(buf)); +- p = strstr(buf, "k"); +- *p = NULLCHAR; +- while (*p != ' ') +- p--; +- chunk_size = atol(p+1) * 1024; +- if (chunk_size == PAGESIZE()) +- order = 0; +- else +- order++; +- continue; +- } ++ if (!(vt->flags & PERCPU_KMALLOC_V2)) ++ error(FATAL, ++ "dump_kmem_cache_percpu called without PERCPU_KMALLOC_V2\n"); + +- if (CRASHDEBUG(0) && +- !hexadecimal(strip_linefeeds(buf), 0)) +- continue; ++ si->found = si->retval = 0; ++ reqname = NULL; + +- errflag = 0; +- this_addr = htol(strip_linefeeds(buf), +- RETURN_ON_ERROR, &errflag); +- if (errflag) +- continue; ++ if ((!(si->flags & VERBOSE) || si->reqname) && ++ !(si->flags & (ADDRESS_SPECIFIED|GET_SLAB_PAGES))) ++ fprintf(fp, kmem_cache_hdr); + +- if (!page_to_phys(this_addr, &this_phys)) +- continue; ++ si->addrlist = (ulong *)GETBUF((vt->kmem_max_c_num+1) * sizeof(ulong)); ++ si->kmem_bufctl = (int *)GETBUF((vt->kmem_max_c_num+1) * sizeof(int)); ++ for (i = 0; i < vt->kmem_max_cpus; i++) ++ si->cpudata[i] = (ulong *) ++ GETBUF(vt->kmem_max_limit * sizeof(ulong)); ++ if(vt->flags & PERCPU_KMALLOC_V2_NODES) ++ si->shared_array_cache = (ulong *) ++ GETBUF(vt->kmem_cache_len_nodes * ++ (vt->kmem_max_limit+1) * sizeof(ulong)); ++ else ++ si->shared_array_cache = (ulong *) ++ GETBUF((vt->kmem_max_limit+1) * sizeof(ulong)); + +- if ((searchphys >= this_phys) && +- (searchphys < (this_phys+chunk_size))) { +- if (searchphys > this_phys) +- offset = (searchphys - this_phys)/PAGESIZE(); +- found = TRUE; +- break; +- } ++ cnt = 0; ++ ++ get_symbol_data("cache_chain", sizeof(ulong), &si->cache); ++ si->cache -= OFFSET(kmem_cache_s_next); ++ cache_end = symbol_value("cache_chain"); ++ ++ if (si->flags & ADDRESS_SPECIFIED) { ++ if (!(p1 = vaddr_to_kmem_cache(si->spec_addr, kbuf))) { ++ error(INFO, ++ "address is not allocated in slab subsystem: %lx\n", ++ si->spec_addr); ++ return; ++ } ++ ++ if (si->reqname && (si->reqname != p1)) ++ error(INFO, ++ "ignoring pre-selected %s cache for address: %lx\n", ++ si->reqname, si->spec_addr, si->reqname); ++ reqname = p1; ++ } else ++ reqname = si->reqname; + +- } +- close_tmpfile(); ++ do { ++ if ((si->flags & VERBOSE) && !si->reqname && ++ !(si->flags & ADDRESS_SPECIFIED)) ++ fprintf(fp, "%s%s", cnt++ ? "\n" : "", kmem_cache_hdr); + +- if (found) { +- if (strlen(last_node)) +- fprintf(fp, "NODE\n%s\n", last_node); +- fprintf(fp, "%s %s START_PADDR START_MAPNR\n", +- zone_hdr, +- mkstring(buf1, VADDR_PRLEN, CENTER|LJUST, "MEM_MAP")); +- fprintf(fp, "%s\n", last_zone); +- fprintf(fp, last_area_hdr); +- fprintf(fp, "%s\n", last_area); +- fprintf(fp, "%lx ", this_addr); +- if (order) { +- switch (fi->memtype) +- { +- case KVADDR: +- fprintf(fp, "(%lx is ", (ulong)fi->spec_addr); +- break; +- case PHYSADDR: +- fprintf(fp, "(%llx is %s", fi->spec_addr, +- PAGEOFFSET(fi->spec_addr) ? "in " : ""); +- break; ++ if (vt->kmem_cache_namelen) { ++ readmem(si->cache+OFFSET(kmem_cache_s_name), ++ KVADDR, buf, vt->kmem_cache_namelen, ++ "name array", FAULT_ON_ERROR); ++ } else { ++ readmem(si->cache+OFFSET(kmem_cache_s_name), ++ KVADDR, &name, sizeof(ulong), ++ "name", FAULT_ON_ERROR); ++ if (!read_string(name, buf, BUFSIZE-1)) { ++ error(WARNING, ++ "cannot read kmem_cache_s.name string at %lx\n", ++ name); ++ sprintf(buf, "(unknown)"); + } +- fprintf(fp, "%s of %ld pages) ", +- ordinal(offset+1, buf), power(2, order)); +- } +- +- fi->retval = TRUE; +- fprintf(fp, "\n"); +- } +-} ++ } + ++ if (reqname && !STREQ(reqname, buf)) ++ goto next_cache; + +-static char * +-page_usage_hdr = "ZONE NAME FREE ACTIVE INACTIVE_DIRTY INACTIVE_CLEAN MIN/LOW/HIGH"; ++ if (ignore_cache(si, buf)) { ++ fprintf(fp, "%lx %-18s [IGNORED]\n", si->cache, buf); ++ goto next_cache; ++ } + +-/* +- * Display info about the non-free pages in each zone. +- */ +-static int +-dump_zone_page_usage(void) +-{ +- int i, n; +- ulong value, node_zones; +- struct node_table *nt; +- ulong inactive_dirty_pages, inactive_clean_pages, active_pages; +- ulong free_pages, pages_min, pages_low, pages_high; +- char namebuf[BUFSIZE]; +- char buf1[BUFSIZE]; +- char buf2[BUFSIZE]; +- char buf3[BUFSIZE]; ++ si->curname = buf; + +- if (!VALID_MEMBER(zone_struct_inactive_dirty_pages) || +- !VALID_MEMBER(zone_struct_inactive_clean_pages) || +- !VALID_MEMBER(zone_struct_active_pages) || +- !VALID_MEMBER(zone_struct_pages_min) || +- !VALID_MEMBER(zone_struct_pages_low) || +- !VALID_MEMBER(zone_struct_pages_high)) +- return FALSE; ++ readmem(si->cache+OFFSET(kmem_cache_s_objsize), ++ KVADDR, &tmp_val, sizeof(uint), ++ "objsize", FAULT_ON_ERROR); ++ si->size = (ulong)tmp_val; + +- fprintf(fp, "\n"); ++ if (!si->size) { ++ if (STREQ(si->curname, "kmem_cache")) ++ si->size = SIZE(kmem_cache_s); ++ else { ++ error(INFO, "\"%s\" cache: objsize: %ld\n", ++ si->curname, si->size); ++ si->errors++; ++ } ++ } + +- for (n = 0; n < vt->numnodes; n++) { +- nt = &vt->node_table[n]; +- node_zones = nt->pgdat + OFFSET(pglist_data_node_zones); +- +- if ((i == 0) && (vt->flags & NODES)) { +- fprintf(fp, "%sNODE\n %2d\n", +- n ? "\n" : "", nt->node_id); +- } +- fprintf(fp, "%s\n", page_usage_hdr); ++ readmem(si->cache+OFFSET(kmem_cache_s_flags), ++ KVADDR, &tmp_val, sizeof(uint), ++ "kmem_cache_s flags", FAULT_ON_ERROR); ++ si->c_flags = (ulong)tmp_val; + +- for (i = 0; i < vt->nr_zones; i++) { +- readmem(node_zones+OFFSET(zone_struct_free_pages), +- KVADDR, &free_pages, sizeof(ulong), +- "node_zones free_pages", FAULT_ON_ERROR); +- readmem(node_zones+ +- OFFSET(zone_struct_inactive_dirty_pages), +- KVADDR, &inactive_dirty_pages, sizeof(ulong), +- "node_zones inactive_dirty_pages", +- FAULT_ON_ERROR); +- readmem(node_zones+ +- OFFSET(zone_struct_inactive_clean_pages), +- KVADDR, &inactive_clean_pages, sizeof(ulong), +- "node_zones inactive_clean_pages", +- FAULT_ON_ERROR); +- readmem(node_zones+OFFSET(zone_struct_active_pages), +- KVADDR, &active_pages, sizeof(ulong), +- "node_zones active_pages", FAULT_ON_ERROR); +- readmem(node_zones+OFFSET(zone_struct_pages_min), +- KVADDR, &pages_min, sizeof(ulong), +- "node_zones pages_min", FAULT_ON_ERROR); +- readmem(node_zones+OFFSET(zone_struct_pages_low), +- KVADDR, &pages_low, sizeof(ulong), +- "node_zones pages_low", FAULT_ON_ERROR); +- readmem(node_zones+OFFSET(zone_struct_pages_high), +- KVADDR, &pages_high, sizeof(ulong), +- "node_zones pages_high", FAULT_ON_ERROR); ++ readmem(si->cache+OFFSET(kmem_cache_s_gfporder), ++ KVADDR, &tmp_val, sizeof(uint), ++ "gfporder", FAULT_ON_ERROR); ++ si->order = (ulong)tmp_val; + +- readmem(node_zones+OFFSET(zone_struct_name), KVADDR, +- &value, sizeof(void *), +- "node_zones name", FAULT_ON_ERROR); +- if (read_string(value, buf1, BUFSIZE-1)) +- sprintf(namebuf, "%-8s", buf1); +- else +- sprintf(namebuf, "(unknown)"); ++ readmem(si->cache+OFFSET(kmem_cache_s_num), ++ KVADDR, &tmp_val, sizeof(uint), ++ "kmem_cache_s num", FAULT_ON_ERROR); ++ si->c_num = (ulong)tmp_val; + +- sprintf(buf2, "%ld/%ld/%ld", +- pages_min, pages_low, pages_high); +- fprintf(fp, "%3d %s %7ld %7ld %15ld %15ld %s\n", +- i, +- namebuf, +- free_pages, +- active_pages, +- inactive_dirty_pages, +- inactive_clean_pages, +- mkstring(buf3, strlen("MIN/LOW/HIGH"), +- CENTER, buf2)); ++ if( vt->flags & PERCPU_KMALLOC_V2_NODES ) ++ do_slab_chain_percpu_v2_nodes(SLAB_GET_COUNTS, si); ++ else ++ do_slab_chain_percpu_v2(SLAB_GET_COUNTS, si); + +- node_zones += SIZE(zone_struct); ++ if (!(si->flags & (ADDRESS_SPECIFIED|GET_SLAB_PAGES))) { ++ DUMP_KMEM_CACHE_INFO_V2(); ++ if (CRASHDEBUG(3)) ++ dump_struct("kmem_cache_s", si->cache, 0); + } +- } + +- return TRUE; +-} ++ if (si->flags == GET_SLAB_PAGES) ++ si->retval += (si->num_slabs * ++ (si->slabsize/PAGESIZE())); + ++ if (si->flags & (VERBOSE|ADDRESS_SPECIFIED)) { + +-/* +- * Dump the num "order" contents of the zone_t free_area array. +- */ +-char *free_area_hdr3 = "AREA SIZE FREE_AREA_STRUCT\n"; +-char *free_area_hdr4 = "AREA SIZE FREE_AREA_STRUCT BLOCKS PAGES\n"; ++ if (!(vt->flags & PERCPU_KMALLOC_V2_NODES)) ++ gather_cpudata_list_v2(si); + +-static int +-dump_zone_free_area(ulong free_area, int num, ulong verbose) +-{ +- int i; +- long chunk_size; +- int flen, total_free, cnt; +- char buf[BUFSIZE]; +- ulong free_area_buf[3]; +- struct list_data list_data, *ld; ++ si->slab = (si->flags & ADDRESS_SPECIFIED) ? ++ vaddr_to_slab(si->spec_addr) : 0; + +- if (VALID_STRUCT(free_area_struct)) { +- if (SIZE(free_area_struct) != (3 * sizeof(ulong))) +- error(FATAL, +- "unrecognized free_area_struct size: %ld\n", +- SIZE(free_area_struct)); +- } else if (VALID_STRUCT(free_area)) { +- if (SIZE(free_area) != (3 * sizeof(ulong))) +- error(FATAL, +- "unrecognized free_area struct size: %ld\n", +- SIZE(free_area)); +- } else error(FATAL, +- "neither free_area_struct or free_area structures exist\n"); ++ if (vt->flags & PERCPU_KMALLOC_V2_NODES) ++ do_slab_chain_percpu_v2_nodes(SLAB_WALKTHROUGH, si); ++ else ++ do_slab_chain_percpu_v2(SLAB_WALKTHROUGH, si); + +- ld = &list_data; ++ if (si->found) { ++ fprintf(fp, kmem_cache_hdr); ++ DUMP_KMEM_CACHE_INFO_V2(); ++ fprintf(fp, slab_hdr); ++ gather_slab_cached_count(si); ++ DUMP_SLAB_INFO(); + +- if (!verbose) +- fprintf(fp, free_area_hdr4); ++ switch (si->found) ++ { ++ case KMEM_BUFCTL_ADDR: ++ fprintf(fp, " %lx ", ++ (ulong)si->spec_addr); ++ fprintf(fp,"(kmem_bufctl_t)\n"); ++ break; + +- total_free = 0; +- flen = MAX(VADDR_PRLEN, strlen("FREE_AREA_STRUCT")); ++ case KMEM_SLAB_ADDR: ++ fprintf(fp, " %lx ", ++ (ulong)si->spec_addr); ++ fprintf(fp, "(slab)\n"); ++ break; + +- for (i = 0; i < num; i++, +- free_area += SIZE_OPTION(free_area_struct, free_area)) { +- if (verbose) +- fprintf(fp, free_area_hdr3); +- fprintf(fp, "%3d ", i); +- chunk_size = power(2, i); +- sprintf(buf, "%ldk", (chunk_size * PAGESIZE())/1024); +- fprintf(fp, " %7s ", buf); ++ case KMEM_ON_SLAB: ++ fprintf(fp, " %lx ", ++ (ulong)si->spec_addr); ++ fprintf(fp, "(unused part of slab)\n"); ++ break; ++ ++ case KMEM_OBJECT_ADDR_FREE: ++ fprintf(fp, free_inuse_hdr); ++ fprintf(fp, " %lx\n", ++ (ulong)si->spec_addr); ++ break; + +- readmem(free_area, KVADDR, free_area_buf, +- sizeof(ulong) * 3, "free_area_struct", FAULT_ON_ERROR); ++ case KMEM_OBJECT_ADDR_INUSE: ++ fprintf(fp, free_inuse_hdr); ++ fprintf(fp, " [%lx]\n", ++ (ulong)si->spec_addr); ++ break; + +- fprintf(fp, "%s ", +- mkstring(buf, flen, CENTER|LONG_HEX, MKSTR(free_area))); ++ case KMEM_OBJECT_ADDR_CACHED: ++ fprintf(fp, free_inuse_hdr); ++ fprintf(fp, ++ " %lx (cpu %d cache)\n", ++ (ulong)si->spec_addr, si->cpu); ++ break; ++ ++ case KMEM_OBJECT_ADDR_SHARED: ++ fprintf(fp, free_inuse_hdr); ++ fprintf(fp, ++ " %lx (shared cache)\n", ++ (ulong)si->spec_addr); ++ break; ++ } + +- if (free_area_buf[0] == free_area) { +- if (verbose) +- fprintf(fp, "\n"); +- else +- fprintf(fp, "%6d %6d\n", 0, 0); +- continue; ++ break; ++ } + } +- +- if (verbose) +- fprintf(fp, "\n"); + +- BZERO(ld, sizeof(struct list_data)); +- ld->flags = verbose | RETURN_ON_DUPLICATE; +- ld->start = free_area_buf[0]; +- ld->end = free_area; +- if (VALID_MEMBER(page_list_next)) +- ld->list_head_offset = OFFSET(page_list); +- else if (VALID_MEMBER(page_lru)) +- ld->list_head_offset = OFFSET(page_lru)+ +- OFFSET(list_head_next); +- else error(FATAL, +- "neither page.list or page.lru exist?\n"); ++next_cache: ++ readmem(si->cache+OFFSET(kmem_cache_s_next), ++ KVADDR, &si->cache, sizeof(ulong), ++ "kmem_cache_s next", FAULT_ON_ERROR); + +- cnt = do_list(ld); +- if (cnt < 0) +- error(FATAL, +- "corrupted free list from free_area_struct: %lx\n", +- free_area); ++ if (si->cache != cache_end) ++ si->cache -= OFFSET(kmem_cache_s_next); + +- if (!verbose) +- fprintf(fp, "%6d %6ld\n", cnt, cnt*chunk_size); ++ } while (si->cache != cache_end); + +- total_free += (cnt * chunk_size); +- } ++ if ((si->flags & ADDRESS_SPECIFIED) && !si->found) ++ error(INFO, "%s: address not found in cache: %lx\n", ++ reqname, si->spec_addr); ++ ++ if (si->errors) ++ error(INFO, "%ld error%s encountered\n", ++ si->errors, si->errors > 1 ? "s" : ""); ++ ++ FREEBUF(si->addrlist); ++ FREEBUF(si->kmem_bufctl); ++ for (i = 0; i < vt->kmem_max_cpus; i++) ++ FREEBUF(si->cpudata[i]); ++ FREEBUF(si->shared_array_cache); + +- return total_free; + } + ++ + /* +- * dump_kmeminfo displays basic memory use information typically shown +- * by /proc/meminfo, and then some... ++ * Walk through the slab chain hanging off a kmem_cache_s structure, ++ * gathering basic statistics. ++ * ++ * TBD: Given a specified physical address, determine whether it's in this ++ * slab chain, and whether it's in use or not. + */ + +-char *kmeminfo_hdr = " PAGES TOTAL PERCENTAGE\n"; ++#define INSLAB(obj, si) \ ++ ((ulong)((ulong)(obj) & ~(si->slabsize-1)) == si->s_mem) + + static void +-dump_kmeminfo(void) ++do_slab_chain(int cmd, struct meminfo *si) + { +- ulong totalram_pages; +- ulong freeram_pages; +- ulong used_pages; +- ulong shared_pages; +- ulong buffer_pages; +- ulong subtract_buffer_pages; +- ulong totalswap_pages, totalused_pages; +- ulong totalhigh_pages; +- ulong freehighmem_pages; +- ulong totallowmem_pages; +- ulong freelowmem_pages; +- ulong pct; +- ulong value1, value2; +- uint tmp; +- struct meminfo meminfo; +- struct gnu_request req; +- long page_cache_size; +- ulong get_totalram; +- ulong get_buffers; +- ulong get_slabs; +- struct syment *sp_array[2]; +- char buf[BUFSIZE]; ++ ulong tmp, magic; ++ ulong kmem_slab_end; ++ char *kmem_slab_s_buf; + ++ si->slabsize = (power(2, si->order) * PAGESIZE()); + +- BZERO(&meminfo, sizeof(struct meminfo)); +- meminfo.flags = GET_ALL; +- dump_mem_map(&meminfo); +- get_totalram = meminfo.get_totalram; +- shared_pages = meminfo.get_shared; +- get_buffers = meminfo.get_buffers; +- get_slabs = meminfo.get_slabs; ++ kmem_slab_end = si->cache + OFFSET(kmem_cache_s_c_offset); + +- fprintf(fp, kmeminfo_hdr); +- /* +- * Get total RAM based upon how the various versions of si_meminfo() +- * have done it, latest to earliest: +- * +- * Prior to 2.3.36, count all mem_map pages minus the reserved ones. +- * From 2.3.36 onwards, use "totalram_pages" if set. +- */ +- if (symbol_exists("totalram_pages")) { +- totalram_pages = vt->totalram_pages ? +- vt->totalram_pages : get_totalram; +- } else +- totalram_pages = get_totalram; ++ switch (cmd) ++ { ++ case SLAB_GET_COUNTS: ++ si->slab = ULONG(si->cache_buf + OFFSET(kmem_cache_s_c_firstp)); + +- fprintf(fp, "%10s %7ld %11s ----\n", "TOTAL MEM", +- totalram_pages, pages_to_size(totalram_pages, buf)); ++ if (slab_data_saved(si)) ++ return; + +- /* +- * Get free pages from dump_free_pages() or its associates. +- * Used pages are a free-bee... +- */ +- meminfo.flags = GET_FREE_PAGES; +- vt->dump_free_pages(&meminfo); +- freeram_pages = meminfo.retval; +- pct = (freeram_pages * 100)/totalram_pages; +- fprintf(fp, "%10s %7ld %11s %3ld%% of TOTAL MEM\n", +- "FREE", freeram_pages, pages_to_size(freeram_pages, buf), pct); ++ si->num_slabs = si->inuse = 0; + +- used_pages = totalram_pages - freeram_pages; +- pct = (used_pages * 100)/totalram_pages; +- fprintf(fp, "%10s %7ld %11s %3ld%% of TOTAL MEM\n", +- "USED", used_pages, pages_to_size(used_pages, buf), pct); ++ if (si->slab == kmem_slab_end) ++ return; + +- /* +- * Get shared pages from dump_mem_map(). Note that this is done +- * differently than the kernel -- it just tallies the non-reserved +- * pages that have a count of greater than 1. +- */ +- pct = (shared_pages * 100)/totalram_pages; +- fprintf(fp, "%10s %7ld %11s %3ld%% of TOTAL MEM\n", +- "SHARED", shared_pages, pages_to_size(shared_pages, buf), pct); ++ kmem_slab_s_buf = GETBUF(SIZE(kmem_slab_s)); + +- subtract_buffer_pages = 0; +- if (symbol_exists("buffermem_pages")) { +- get_symbol_data("buffermem_pages", sizeof(int), &tmp); +- buffer_pages = (ulong)tmp; +- } else if (symbol_exists("buffermem")) { +- get_symbol_data("buffermem", sizeof(int), &tmp); +- buffer_pages = BTOP(tmp); +- } else if ((THIS_KERNEL_VERSION >= LINUX(2,6,0)) && +- symbol_exists("nr_blockdev_pages")) { +- subtract_buffer_pages = buffer_pages = nr_blockdev_pages(); +- } else +- buffer_pages = 0; ++ do { ++ if (received_SIGINT()) { ++ FREEBUF(kmem_slab_s_buf); ++ restart(0); ++ } + +- pct = (buffer_pages * 100)/totalram_pages; +- fprintf(fp, "%10s %7ld %11s %3ld%% of TOTAL MEM\n", +- "BUFFERS", buffer_pages, pages_to_size(buffer_pages, buf), pct); ++ readmem(si->slab, KVADDR, kmem_slab_s_buf, ++ SIZE(kmem_slab_s), "kmem_slab_s buffer", ++ FAULT_ON_ERROR); + +- if (CRASHDEBUG(1)) +- error(NOTE, "pages with buffers: %ld\n", get_buffers); ++ magic = ULONG(kmem_slab_s_buf + ++ OFFSET(kmem_slab_s_s_magic)); + +- /* +- * page_cache_size has evolved from a long to an atomic_t to +- * not existing at all. +- */ ++ if (magic == SLAB_MAGIC_ALLOC) { + +- if (symbol_exists("page_cache_size")) { +- get_symbol_type("page_cache_size", NULL, &req); +- if (req.length == sizeof(int)) { +- get_symbol_data("page_cache_size", sizeof(int), &tmp); +- page_cache_size = (long)tmp; +- } else +- get_symbol_data("page_cache_size", sizeof(long), +- &page_cache_size); +- } else if (symbol_exists("nr_pagecache")) { +- get_symbol_data("nr_pagecache", sizeof(int), &tmp); +- page_cache_size = (long)tmp; +- } +- +- page_cache_size -= subtract_buffer_pages; +- +- pct = (page_cache_size * 100)/totalram_pages; +- fprintf(fp, "%10s %7ld %11s %3ld%% of TOTAL MEM\n", +- "CACHED", page_cache_size, +- pages_to_size(page_cache_size, buf), pct); +- +- /* +- * Although /proc/meminfo doesn't show it, show how much memory +- * the slabs take up. +- */ +- +- pct = (get_slabs * 100)/totalram_pages; +- fprintf(fp, "%10s %7ld %11s %3ld%% of TOTAL MEM\n", +- "SLAB", get_slabs, pages_to_size(get_slabs, buf), pct); +- +- if (symbol_exists("totalhigh_pages")) { +- switch (get_syment_array("totalhigh_pages", sp_array, 2)) +- { +- case 1: +- get_symbol_data("totalhigh_pages", sizeof(ulong), +- &totalhigh_pages); +- break; +- case 2: +- if (!(readmem(sp_array[0]->value, KVADDR, +- &value1, sizeof(ulong), +- "totalhigh_pages #1", RETURN_ON_ERROR))) +- break; +- if (!(readmem(sp_array[1]->value, KVADDR, +- &value2, sizeof(ulong), +- "totalhigh_pages #2", RETURN_ON_ERROR))) +- break; +- totalhigh_pages = MAX(value1, value2); +- break; +- } +- +- pct = totalhigh_pages ? +- (totalhigh_pages * 100)/totalram_pages : 0; +- fprintf(fp, "\n%10s %7ld %11s %3ld%% of TOTAL MEM\n", +- "TOTAL HIGH", totalhigh_pages, +- pages_to_size(totalhigh_pages, buf), pct); +- +- meminfo.flags = GET_FREE_HIGHMEM_PAGES; +- vt->dump_free_pages(&meminfo); +- freehighmem_pages = meminfo.retval; +- pct = freehighmem_pages ? +- (freehighmem_pages * 100)/totalhigh_pages : 0; +- fprintf(fp, "%10s %7ld %11s %3ld%% of TOTAL HIGH\n", +- "FREE HIGH", freehighmem_pages, +- pages_to_size(freehighmem_pages, buf), pct); +- +- totallowmem_pages = totalram_pages - totalhigh_pages; +- pct = (totallowmem_pages * 100)/totalram_pages; +- fprintf(fp, "%10s %7ld %11s %3ld%% of TOTAL MEM\n", +- "TOTAL LOW", totallowmem_pages, +- pages_to_size(totallowmem_pages, buf), pct); +- +- freelowmem_pages = freeram_pages - freehighmem_pages; +- pct = (freelowmem_pages * 100)/totallowmem_pages; +- fprintf(fp, "%10s %7ld %11s %3ld%% of TOTAL LOW\n", +- "FREE LOW", freelowmem_pages, +- pages_to_size(freelowmem_pages, buf), pct); +- } ++ tmp = ULONG(kmem_slab_s_buf + ++ OFFSET(kmem_slab_s_s_inuse)); ++ ++ si->inuse += tmp; ++ si->num_slabs++; ++ } else { ++ fprintf(fp, ++ "\"%s\" cache: invalid s_magic: %lx\n", ++ si->curname, magic); ++ si->errors++; ++ FREEBUF(kmem_slab_s_buf); ++ return; ++ } ++ ++ si->slab = ULONG(kmem_slab_s_buf + ++ OFFSET(kmem_slab_s_s_nextp)); ++ ++ } while (si->slab != kmem_slab_end); ++ ++ FREEBUF(kmem_slab_s_buf); ++ save_slab_data(si); ++ break; + +- /* +- * get swap data from dump_swap_info(). +- */ +- fprintf(fp, "\n"); +- if (dump_swap_info(RETURN_ON_ERROR, &totalswap_pages, +- &totalused_pages)) { +- fprintf(fp, "%10s %7ld %11s ----\n", +- "TOTAL SWAP", totalswap_pages, +- pages_to_size(totalswap_pages, buf)); +- pct = totalswap_pages ? (totalused_pages * 100) / +- totalswap_pages : 100; +- fprintf(fp, "%10s %7ld %11s %3ld%% of TOTAL SWAP\n", +- "SWAP USED", totalused_pages, +- pages_to_size(totalused_pages, buf), pct); +- pct = totalswap_pages ? ((totalswap_pages - totalused_pages) * +- 100) / totalswap_pages : 0; +- fprintf(fp, "%10s %7ld %11s %3ld%% of TOTAL SWAP\n", +- "SWAP FREE", +- totalswap_pages - totalused_pages, +- pages_to_size(totalswap_pages - totalused_pages, buf), +- pct); +- } else +- error(INFO, "swap_info[%ld].swap_map at %lx is unaccessible\n", +- totalused_pages, totalswap_pages); ++ case SLAB_WALKTHROUGH: ++ if (!si->slab) ++ si->slab = ULONG(si->cache_buf + ++ OFFSET(kmem_cache_s_c_firstp)); + +- dump_zone_page_usage(); +-} ++ if (si->slab == kmem_slab_end) ++ return; + +-/* +- * Emulate 2.6 nr_blockdev_pages() function. +- */ +-static ulong +-nr_blockdev_pages(void) +-{ +- struct list_data list_data, *ld; +- ulong *bdevlist; +- int i, bdevcnt; +- ulong inode, address_space; +- ulong nrpages; +- char *block_device_buf, *inode_buf, *address_space_buf; ++ if (CRASHDEBUG(1)) { ++ fprintf(fp, "search cache: [%s] ", si->curname); ++ if (si->flags & ADDRESS_SPECIFIED) ++ fprintf(fp, "for %llx", si->spec_addr); ++ fprintf(fp, "\n"); ++ } + +- block_device_buf = GETBUF(SIZE(block_device)); +- inode_buf = GETBUF(SIZE(inode)); +- address_space_buf = GETBUF(SIZE(address_space)); ++ si->slab_buf = kmem_slab_s_buf = GETBUF(SIZE(kmem_slab_s)); + +- ld = &list_data; +- BZERO(ld, sizeof(struct list_data)); ++ do { ++ if (received_SIGINT()) { ++ FREEBUF(kmem_slab_s_buf); ++ restart(0); ++ } + +- get_symbol_data("all_bdevs", sizeof(void *), &ld->start); +- ld->end = symbol_value("all_bdevs"); +- ld->list_head_offset = OFFSET(block_device_bd_list); ++ readmem(si->slab, KVADDR, kmem_slab_s_buf, ++ SIZE(kmem_slab_s), "kmem_slab_s buffer", ++ FAULT_ON_ERROR); + +- hq_open(); +- bdevcnt = do_list(ld); +- bdevlist = (ulong *)GETBUF(bdevcnt * sizeof(ulong)); +- bdevcnt = retrieve_list(bdevlist, bdevcnt); +- hq_close(); ++ dump_slab(si); ++ ++ if (si->found) { ++ FREEBUF(kmem_slab_s_buf); ++ return; ++ } ++ ++ si->slab = ULONG(kmem_slab_s_buf + ++ OFFSET(kmem_slab_s_s_nextp)); ++ ++ } while (si->slab != kmem_slab_end); + +- /* +- * go through the block_device list, emulating: +- * +- * ret += bdev->bd_inode->i_mapping->nrpages; +- */ +- for (i = nrpages = 0; i < bdevcnt; i++) { +- readmem(bdevlist[i], KVADDR, block_device_buf, +- SIZE(block_device), "block_device buffer", +- FAULT_ON_ERROR); +- inode = ULONG(block_device_buf + OFFSET(block_device_bd_inode)); +- readmem(inode, KVADDR, inode_buf, SIZE(inode), "inode buffer", +- FAULT_ON_ERROR); +- address_space = ULONG(inode_buf + OFFSET(inode_i_mapping)); +- readmem(address_space, KVADDR, address_space_buf, +- SIZE(address_space), "address_space buffer", +- FAULT_ON_ERROR); +- nrpages += ULONG(address_space_buf + +- OFFSET(address_space_nrpages)); ++ FREEBUF(kmem_slab_s_buf); ++ break; + } ++} + +- FREEBUF(bdevlist); +- FREEBUF(block_device_buf); +- FREEBUF(inode_buf); +- FREEBUF(address_space_buf); +- +- return nrpages; +-} + + /* +- * dump_vmlist() displays information from the vmlist. ++ * do_slab_chain() adapted for newer percpu slab format. + */ + +-static void +-dump_vmlist(struct meminfo *vi) +-{ +- char buf[BUFSIZE]; +- char buf1[BUFSIZE]; +- char buf2[BUFSIZE]; +- ulong vmlist; +- ulong addr, size, next, pcheck; +- physaddr_t paddr; ++#define SLAB_BASE(X) (PTOB(BTOP(X))) + +- get_symbol_data("vmlist", sizeof(void *), &vmlist); +- next = vmlist; ++#define INSLAB_PERCPU(obj, si) \ ++ ((ulong)((ulong)(obj) & ~(si->slabsize-1)) == SLAB_BASE(si->s_mem)) + +- while (next) { +- if ((next == vmlist) && +- !(vi->flags & (GET_HIGHEST|GET_PHYS_TO_VMALLOC))) { +- fprintf(fp, "%s ", +- mkstring(buf, MAX(strlen("VM_STRUCT"), VADDR_PRLEN), +- CENTER|LJUST, "VM_STRUCT")); +- fprintf(fp, "%s SIZE\n", +- mkstring(buf, (VADDR_PRLEN * 2) + strlen(" - "), +- CENTER|LJUST, "ADDRESS RANGE")); +- } ++#define SLAB_CHAINS (3) + +- readmem(next+OFFSET(vm_struct_addr), KVADDR, +- &addr, sizeof(void *), +- "vmlist addr", FAULT_ON_ERROR); +- readmem(next+OFFSET(vm_struct_size), KVADDR, +- &size, sizeof(ulong), +- "vmlist size", FAULT_ON_ERROR); ++static char *slab_chain_name_v1[] = {"full", "partial", "free"}; + +- if (!(vi->flags & ADDRESS_SPECIFIED) || +- ((vi->memtype == KVADDR) && +- ((vi->spec_addr >= addr) && (vi->spec_addr < (addr+size))))) +- fprintf(fp, "%s%s %s - %s %6ld\n", +- mkstring(buf,VADDR_PRLEN, LONG_HEX|CENTER|LJUST, +- MKSTR(next)), space(MINSPACE-1), +- mkstring(buf1, VADDR_PRLEN, LONG_HEX|RJUST, +- MKSTR(addr)), +- mkstring(buf2, VADDR_PRLEN, LONG_HEX|LJUST, +- MKSTR(addr+size)), +- size); ++static void ++do_slab_chain_percpu_v1(long cmd, struct meminfo *si) ++{ ++ int i, tmp, s; ++ int list_borked; ++ char *slab_s_buf; ++ ulong specified_slab; ++ ulong last; ++ ulong slab_chains[SLAB_CHAINS]; + +- if ((vi->flags & ADDRESS_SPECIFIED) && +- (vi->memtype == PHYSADDR)) { +- for (pcheck = addr; pcheck < (addr+size); +- pcheck += PAGESIZE()) { +- if (!kvtop(NULL, pcheck, &paddr, 0)) +- continue; +- if ((vi->spec_addr >= paddr) && +- (vi->spec_addr < (paddr+PAGESIZE()))) { +- if (vi->flags & GET_PHYS_TO_VMALLOC) { +- vi->retval = pcheck + +- PAGEOFFSET(paddr); +- return; +- } else +- fprintf(fp, +- "%s%s %s - %s %6ld\n", +- mkstring(buf, VADDR_PRLEN, +- LONG_HEX|CENTER|LJUST, +- MKSTR(next)), space(MINSPACE-1), +- mkstring(buf1, VADDR_PRLEN, +- LONG_HEX|RJUST, MKSTR(addr)), +- mkstring(buf2, VADDR_PRLEN, +- LONG_HEX|LJUST, +- MKSTR(addr+size)), size); +- break; +- } +- } ++ list_borked = 0; ++ si->slabsize = (power(2, si->order) * PAGESIZE()); ++ si->cpucached_slab = 0; + +- } ++ if (VALID_MEMBER(kmem_cache_s_slabs)) { ++ slab_chains[0] = si->cache + OFFSET(kmem_cache_s_slabs); ++ slab_chains[1] = 0; ++ slab_chains[2] = 0; ++ } else { ++ slab_chains[0] = si->cache + OFFSET(kmem_cache_s_slabs_full); ++ slab_chains[1] = si->cache + OFFSET(kmem_cache_s_slabs_partial); ++ slab_chains[2] = si->cache + OFFSET(kmem_cache_s_slabs_free); ++ } + +- readmem(next+OFFSET(vm_struct_next), +- KVADDR, &next, sizeof(void *), +- "vmlist next", FAULT_ON_ERROR); ++ if (CRASHDEBUG(1)) { ++ fprintf(fp, "[ %s: %lx ", si->curname, si->cache); ++ fprintf(fp, "full: %lx partial: %lx free: %lx ]\n", ++ slab_chains[0], slab_chains[1], slab_chains[2]); + } + +- if (vi->flags & GET_HIGHEST) +- vi->retval = addr+size; +-} ++ switch (cmd) ++ { ++ case SLAB_GET_COUNTS: ++ si->flags |= SLAB_GET_COUNTS; ++ si->flags &= ~SLAB_WALKTHROUGH; ++ si->cpucached_cache = 0; ++ si->num_slabs = si->inuse = 0; ++ gather_cpudata_list_v1(si); + +-/* +- * dump_page_lists() displays information from the active_list, +- * inactive_dirty_list and inactive_clean_list from each zone. +- */ +-static int +-dump_page_lists(struct meminfo *mi) +-{ +- int i, c, n, retval; +- ulong node_zones, pgdat; +- struct node_table *nt; +- struct list_data list_data, *ld; +- char buf[BUFSIZE]; +- ulong value; +- ulong inactive_clean_pages, inactive_clean_list; +- int nr_active_pages, nr_inactive_pages; +- int nr_inactive_dirty_pages; ++ slab_s_buf = GETBUF(SIZE(slab_s)); + +- ld = &list_data; ++ for (s = 0; s < SLAB_CHAINS; s++) { + +- retval = FALSE; +- nr_active_pages = nr_inactive_dirty_pages = -1; ++ if (!slab_chains[s]) ++ continue; + +- BZERO(ld, sizeof(struct list_data)); +- ld->list_head_offset = OFFSET(page_lru); +- if (mi->flags & ADDRESS_SPECIFIED) +- ld->searchfor = mi->spec_addr; +- else if (mi->flags & VERBOSE) +- ld->flags |= VERBOSE; ++ if (!readmem(slab_chains[s], ++ KVADDR, &si->slab, sizeof(ulong), ++ "first slab", QUIET|RETURN_ON_ERROR)) { ++ error(INFO, ++ "%s: %s list: bad slab pointer: %lx\n", ++ si->curname, slab_chain_name_v1[s], ++ slab_chains[s]); ++ list_borked = 1; ++ continue; ++ } + +- if (mi->flags & GET_ACTIVE_LIST) { +- if (!symbol_exists("active_list")) +- error(FATAL, +- "active_list does not exist in this kernel\n"); ++ if (slab_data_saved(si)) { ++ FREEBUF(slab_s_buf); ++ return; ++ } ++ ++ if (si->slab == slab_chains[s]) ++ continue; ++ ++ last = slab_chains[s]; + +- if (symbol_exists("nr_active_pages")) +- get_symbol_data("nr_active_pages", sizeof(int), +- &nr_active_pages); +- else +- error(FATAL, +- "nr_active_pages does not exist in this kernel\n"); ++ do { ++ if (received_SIGINT()) { ++ FREEBUF(slab_s_buf); ++ restart(0); ++ } + +- ld->end = symbol_value("active_list"); +- readmem(ld->end, KVADDR, &ld->start, sizeof(void *), +- "LIST_HEAD contents", FAULT_ON_ERROR); ++ if (!verify_slab_v1(si, last, s)) { ++ list_borked = 1; ++ continue; ++ } ++ last = si->slab - OFFSET(slab_s_list); ++ ++ readmem(si->slab, KVADDR, slab_s_buf, ++ SIZE(slab_s), "slab_s buffer", ++ FAULT_ON_ERROR); ++ ++ tmp = INT(slab_s_buf + OFFSET(slab_s_inuse)); ++ si->inuse += tmp; ++ ++ if (ACTIVE()) ++ gather_cpudata_list_v1(si); ++ ++ si->s_mem = ULONG(slab_s_buf + ++ OFFSET(slab_s_s_mem)); ++ gather_slab_cached_count(si); ++ ++ si->num_slabs++; + +- if (mi->flags & VERBOSE) +- fprintf(fp, "active_list:\n"); ++ si->slab = ULONG(slab_s_buf + ++ OFFSET(slab_s_list)); ++ si->slab -= OFFSET(slab_s_list); + +- if (ld->start == ld->end) { +- c = 0; +- ld->searchfor = 0; +- if (mi->flags & VERBOSE) +- fprintf(fp, "(empty)\n"); +- } else { +- hq_open(); +- c = do_list(ld); +- hq_close(); ++ /* ++ * Check for slab transition. (Tony Dziedzic) ++ */ ++ for (i = 0; i < SLAB_CHAINS; i++) { ++ if ((i != s) && ++ (si->slab == slab_chains[i])) { ++ error(NOTE, ++ "%s: slab chain inconsistency: %s list\n", ++ si->curname, ++ slab_chain_name_v1[s]); ++ list_borked = 1; ++ } ++ } ++ ++ } while (si->slab != slab_chains[s] && !list_borked); + } + +- if ((mi->flags & ADDRESS_SPECIFIED) && ld->searchfor) { +- fprintf(fp, "%lx\n", ld->searchfor); +- retval = TRUE; +- } else { +- fprintf(fp, "%snr_active_pages: %d ", +- mi->flags & VERBOSE ? "\n" : "", +- nr_active_pages); +- if (c != nr_active_pages) +- fprintf(fp, "(found %d)\n", c); +- else +- fprintf(fp, "(verified)\n"); +- } +- } ++ FREEBUF(slab_s_buf); ++ if (!list_borked) ++ save_slab_data(si); ++ break; + +- if (mi->flags & GET_INACTIVE_LIST) { +- if (!symbol_exists("inactive_list")) +- error(FATAL, +- "inactive_list does not exist in this kernel\n"); ++ case SLAB_WALKTHROUGH: ++ specified_slab = si->slab; ++ si->flags |= SLAB_WALKTHROUGH; ++ si->flags &= ~SLAB_GET_COUNTS; + +- if (symbol_exists("nr_inactive_pages")) +- get_symbol_data("nr_inactive_pages", sizeof(int), +- &nr_inactive_pages); +- else +- error(FATAL, +- "nr_active_pages does not exist in this kernel\n"); ++ for (s = 0; s < SLAB_CHAINS; s++) { ++ if (!slab_chains[s]) ++ continue; + +- ld->end = symbol_value("inactive_list"); +- readmem(ld->end, KVADDR, &ld->start, sizeof(void *), +- "LIST_HEAD contents", FAULT_ON_ERROR); +- +- if (mi->flags & VERBOSE) +- fprintf(fp, "inactive_list:\n"); ++ if (!specified_slab) { ++ if (!readmem(slab_chains[s], ++ KVADDR, &si->slab, sizeof(ulong), ++ "slabs", QUIET|RETURN_ON_ERROR)) { ++ error(INFO, ++ "%s: %s list: bad slab pointer: %lx\n", ++ si->curname, ++ slab_chain_name_v1[s], ++ slab_chains[s]); ++ list_borked = 1; ++ continue; ++ } ++ last = slab_chains[s]; ++ } else ++ last = 0; ++ ++ if (si->slab == slab_chains[s]) ++ continue; + +- if (ld->start == ld->end) { +- c = 0; +- ld->searchfor = 0; +- if (mi->flags & VERBOSE) +- fprintf(fp, "(empty)\n"); +- } else { +- hq_open(); +- c = do_list(ld); +- hq_close(); +- } ++ if (CRASHDEBUG(1)) { ++ fprintf(fp, "search cache: [%s] ", si->curname); ++ if (si->flags & ADDRESS_SPECIFIED) ++ fprintf(fp, "for %llx", si->spec_addr); ++ fprintf(fp, "\n"); ++ } ++ ++ do { ++ if (received_SIGINT()) ++ restart(0); + +- if ((mi->flags & ADDRESS_SPECIFIED) && ld->searchfor) { +- fprintf(fp, "%lx\n", ld->searchfor); +- retval = TRUE; +- } else { +- fprintf(fp, "%snr_inactive_pages: %d ", +- mi->flags & VERBOSE ? "\n" : "", +- nr_inactive_pages); +- if (c != nr_inactive_pages) +- fprintf(fp, "(found %d)\n", c); +- else +- fprintf(fp, "(verified)\n"); ++ if (!verify_slab_v1(si, last, s)) { ++ list_borked = 1; ++ continue; ++ } ++ last = si->slab - OFFSET(slab_s_list); ++ ++ dump_slab_percpu_v1(si); ++ ++ if (si->found) { ++ return; ++ } ++ ++ readmem(si->slab+OFFSET(slab_s_list), ++ KVADDR, &si->slab, sizeof(ulong), ++ "slab list", FAULT_ON_ERROR); ++ ++ si->slab -= OFFSET(slab_s_list); ++ ++ } while (si->slab != slab_chains[s] && !list_borked); + } ++ ++ break; + } ++} + +- if (mi->flags & GET_INACTIVE_DIRTY) { +- if (!symbol_exists("inactive_dirty_list")) +- error(FATAL, +- "inactive_dirty_list does not exist in this kernel\n"); ++/* ++ * Try to preclude any attempt to translate a bogus slab structure. ++ */ + +- if (symbol_exists("nr_inactive_dirty_pages")) +- get_symbol_data("nr_inactive_dirty_pages", sizeof(int), +- &nr_inactive_dirty_pages); +- else +- error(FATAL, +- "nr_inactive_dirty_pages does not exist in this kernel\n"); ++static int ++verify_slab_v1(struct meminfo *si, ulong last, int s) ++{ ++ char slab_s_buf[BUFSIZE]; ++ struct kernel_list_head *list_head; ++ unsigned int inuse; ++ ulong s_mem; ++ char *list; ++ int errcnt; + +- ld->end = symbol_value("inactive_dirty_list"); +- readmem(ld->end, KVADDR, &ld->start, sizeof(void *), +- "LIST_HEAD contents", FAULT_ON_ERROR); ++ list = slab_chain_name_v1[s]; + +- if (mi->flags & VERBOSE) +- fprintf(fp, "%sinactive_dirty_list:\n", +- mi->flags & GET_ACTIVE_LIST ? "\n" : ""); ++ errcnt = 0; + +- if (ld->start == ld->end) { +- c = 0; +- ld->searchfor = 0; +- if (mi->flags & VERBOSE) +- fprintf(fp, "(empty)\n"); +- } else { +- hq_open(); +- c = do_list(ld); +- hq_close(); +- } ++ if (!readmem(si->slab, KVADDR, slab_s_buf, ++ SIZE(slab_s), "slab_s buffer", QUIET|RETURN_ON_ERROR)) { ++ error(INFO, "%s: %s list: bad slab pointer: %lx\n", ++ si->curname, list, si->slab); ++ return FALSE; ++ } + +- if ((mi->flags & ADDRESS_SPECIFIED) && ld->searchfor) { +- fprintf(fp, "%lx\n", ld->searchfor); +- retval = TRUE; +- } else { +- fprintf(fp, "%snr_inactive_dirty_pages: %d ", +- mi->flags & VERBOSE ? "\n" : "", +- nr_inactive_dirty_pages); +- if (c != nr_inactive_dirty_pages) +- fprintf(fp, "(found %d)\n", c); +- else +- fprintf(fp, "(verified)\n"); +- } +- } ++ list_head = (struct kernel_list_head *) ++ (slab_s_buf + OFFSET(slab_s_list)); + +- if (mi->flags & GET_INACTIVE_CLEAN) { +- if (INVALID_MEMBER(zone_struct_inactive_clean_list)) +- error(FATAL, +- "inactive_clean_list(s) do not exist in this kernel\n"); ++ if (!IS_KVADDR((ulong)list_head->next) || ++ !accessible((ulong)list_head->next)) { ++ error(INFO, "%s: %s list: slab: %lx bad next pointer: %lx\n", ++ si->curname, list, si->slab, ++ (ulong)list_head->next); ++ errcnt++; ++ } + +- get_symbol_data("pgdat_list", sizeof(void *), &pgdat); ++ if (last && (last != (ulong)list_head->prev)) { ++ error(INFO, "%s: %s list: slab: %lx bad prev pointer: %lx\n", ++ si->curname, list, si->slab, ++ (ulong)list_head->prev); ++ errcnt++; ++ } + +- if ((mi->flags & VERBOSE) && +- (mi->flags & (GET_ACTIVE_LIST|GET_INACTIVE_DIRTY))) +- fprintf(fp, "\n"); ++ inuse = UINT(slab_s_buf + OFFSET(slab_s_inuse)); ++ if (inuse > si->c_num) { ++ error(INFO, "%s: %s list: slab: %lx bad inuse counter: %ld\n", ++ si->curname, list, si->slab, inuse); ++ errcnt++; ++ } + +- for (n = 0; pgdat; n++) { +- nt = &vt->node_table[n]; ++ if (!last) ++ goto no_inuse_check_v1; + +- node_zones = nt->pgdat + OFFSET(pglist_data_node_zones); ++ switch (s) ++ { ++ case 0: /* full -- but can be one singular list */ ++ if (VALID_MEMBER(kmem_cache_s_slabs_full) && ++ (inuse != si->c_num)) { ++ error(INFO, ++ "%s: %s list: slab: %lx bad inuse counter: %ld\n", ++ si->curname, list, si->slab, inuse); ++ errcnt++; ++ } ++ break; + +- for (i = 0; i < vt->nr_zones; i++) { +- readmem(node_zones+OFFSET(zone_struct_name), +- KVADDR, &value, sizeof(void *), +- "zone_struct name", FAULT_ON_ERROR); +- if (!read_string(value, buf, BUFSIZE-1)) +- sprintf(buf, "(unknown) "); ++ case 1: /* partial */ ++ if ((inuse == 0) || (inuse == si->c_num)) { ++ error(INFO, ++ "%s: %s list: slab: %lx bad inuse counter: %ld\n", ++ si->curname, list, si->slab, inuse); ++ errcnt++; ++ } ++ break; + +- if (mi->flags & VERBOSE) { +- if (vt->numnodes > 1) +- fprintf(fp, "NODE %d ", n); +- fprintf(fp, +- "\"%s\" inactive_clean_list:\n", +- buf); +- } ++ case 2: /* free */ ++ if (inuse > 0) { ++ error(INFO, ++ "%s: %s list: slab: %lx bad inuse counter: %ld\n", ++ si->curname, list, si->slab, inuse); ++ errcnt++; ++ } ++ break; ++ } + +- readmem(node_zones + +- OFFSET(zone_struct_inactive_clean_pages), +- KVADDR, &inactive_clean_pages, +- sizeof(ulong), "inactive_clean_pages", +- FAULT_ON_ERROR); ++no_inuse_check_v1: ++ s_mem = ULONG(slab_s_buf + OFFSET(slab_s_s_mem)); ++ if (!IS_KVADDR(s_mem) || !accessible(s_mem)) { ++ error(INFO, "%s: %s list: slab: %lx bad s_mem pointer: %lx\n", ++ si->curname, list, si->slab, s_mem); ++ errcnt++; ++ } + +- readmem(node_zones + +- OFFSET(zone_struct_inactive_clean_list), +- KVADDR, &inactive_clean_list, +- sizeof(ulong), "inactive_clean_list", +- FAULT_ON_ERROR); ++ return(errcnt ? FALSE : TRUE); ++} + +- ld->start = inactive_clean_list; +- ld->end = node_zones + +- OFFSET(zone_struct_inactive_clean_list); +- if (mi->flags & ADDRESS_SPECIFIED) +- ld->searchfor = mi->spec_addr; ++/* ++ * Updated for 2.6 slab substructure. ++ */ + +- if (ld->start == ld->end) { +- c = 0; +- ld->searchfor = 0; +- if (mi->flags & VERBOSE) +- fprintf(fp, "(empty)\n"); +- } else { +- hq_open(); +- c = do_list(ld); +- hq_close(); +- } ++static char *slab_chain_name_v2[] = {"partial", "full", "free"}; + +- if ((mi->flags & ADDRESS_SPECIFIED) && +- ld->searchfor) { +- fprintf(fp, "%lx\n", ld->searchfor); +- retval = TRUE; +- } else { +- if (vt->numnodes > 1) +- fprintf(fp, "NODE %d ", n); +- fprintf(fp, "\"%s\" ", buf); +- fprintf(fp, +- "inactive_clean_pages: %ld ", +- inactive_clean_pages); +- if (c != inactive_clean_pages) +- fprintf(fp, "(found %d)\n", c); +- else +- fprintf(fp, "(verified)\n"); +- } ++static void ++do_slab_chain_percpu_v2(long cmd, struct meminfo *si) ++{ ++ int i, tmp, s; ++ int list_borked; ++ char *slab_buf; ++ ulong specified_slab; ++ ulong last; ++ ulong slab_chains[SLAB_CHAINS]; + +- node_zones += SIZE(zone_struct); +- } ++ list_borked = 0; ++ si->slabsize = (power(2, si->order) * PAGESIZE()); ++ si->cpucached_slab = 0; + +- readmem(pgdat + OFFSET_OPTION(pglist_data_node_next, +- pglist_data_pgdat_next), KVADDR, +- &pgdat, sizeof(void *), "pglist_data node_next", +- FAULT_ON_ERROR); +- } ++ slab_chains[0] = si->cache + OFFSET(kmem_cache_s_lists) + ++ OFFSET(kmem_list3_slabs_partial); ++ slab_chains[1] = si->cache + OFFSET(kmem_cache_s_lists) + ++ OFFSET(kmem_list3_slabs_full); ++ slab_chains[2] = si->cache + OFFSET(kmem_cache_s_lists) + ++ OFFSET(kmem_list3_slabs_free); ++ ++ if (CRASHDEBUG(1)) { ++ fprintf(fp, "[ %s: %lx ", si->curname, si->cache); ++ fprintf(fp, "partial: %lx full: %lx free: %lx ]\n", ++ slab_chains[0], slab_chains[1], slab_chains[2]); + } + +- return retval; +-} ++ switch (cmd) ++ { ++ case SLAB_GET_COUNTS: ++ si->flags |= SLAB_GET_COUNTS; ++ si->flags &= ~SLAB_WALKTHROUGH; ++ si->cpucached_cache = 0; ++ si->num_slabs = si->inuse = 0; ++ gather_cpudata_list_v2(si); + ++ slab_buf = GETBUF(SIZE(slab)); + ++ for (s = 0; s < SLAB_CHAINS; s++) { ++ if (!slab_chains[s]) ++ continue; + +-/* +- * Check whether an address is a kmem_cache_t address, and if so, return +- * a pointer to the static buffer containing its name string. Otherwise +- * return NULL on failure. +- */ ++ if (!readmem(slab_chains[s], ++ KVADDR, &si->slab, sizeof(ulong), ++ "first slab", QUIET|RETURN_ON_ERROR)) { ++ error(INFO, ++ "%s: %s list: bad slab pointer: %lx\n", ++ si->curname, ++ slab_chain_name_v2[s], ++ slab_chains[s]); ++ list_borked = 1; ++ continue; ++ } ++ ++ if (slab_data_saved(si)) { ++ FREEBUF(slab_buf); ++ return; ++ } ++ ++ if (si->slab == slab_chains[s]) ++ continue; ++ ++ last = slab_chains[s]; + +-#define PERCPU_NOT_SUPPORTED "per-cpu slab format not supported yet\n" ++ do { ++ if (received_SIGINT()) { ++ FREEBUF(slab_buf); ++ restart(0); ++ } + +-static char * +-is_kmem_cache_addr(ulong vaddr, char *kbuf) +-{ +- ulong cache, cache_cache, name; +- long next_offset, name_offset; +- char *cache_buf; ++ if (!verify_slab_v2(si, last, s)) { ++ list_borked = 1; ++ continue; ++ } ++ last = si->slab - OFFSET(slab_list); ++ ++ readmem(si->slab, KVADDR, slab_buf, ++ SIZE(slab), "slab buffer", ++ FAULT_ON_ERROR); ++ ++ tmp = INT(slab_buf + OFFSET(slab_inuse)); ++ si->inuse += tmp; ++ ++ if (ACTIVE()) ++ gather_cpudata_list_v2(si); + +- if (vt->flags & KMEM_CACHE_UNAVAIL) { +- error(INFO, "kmem cache slab subsystem not available\n"); +- return NULL; +- } ++ si->s_mem = ULONG(slab_buf + ++ OFFSET(slab_s_mem)); ++ gather_slab_cached_count(si); ++ ++ si->num_slabs++; ++ ++ si->slab = ULONG(slab_buf + ++ OFFSET(slab_list)); ++ si->slab -= OFFSET(slab_list); ++ ++ /* ++ * Check for slab transition. (Tony Dziedzic) ++ */ ++ for (i = 0; i < SLAB_CHAINS; i++) { ++ if ((i != s) && ++ (si->slab == slab_chains[i])) { ++ error(NOTE, ++ "%s: slab chain inconsistency: %s list\n", ++ si->curname, ++ slab_chain_name_v2[s]); ++ list_borked = 1; ++ } ++ } ++ ++ } while (si->slab != slab_chains[s] && !list_borked); ++ } + +- name_offset = vt->flags & (PERCPU_KMALLOC_V1|PERCPU_KMALLOC_V2) ? +- OFFSET(kmem_cache_s_name) : OFFSET(kmem_cache_s_c_name); +- next_offset = vt->flags & (PERCPU_KMALLOC_V1|PERCPU_KMALLOC_V2) ? +- OFFSET(kmem_cache_s_next) : OFFSET(kmem_cache_s_c_nextp); ++ FREEBUF(slab_buf); ++ if (!list_borked) ++ save_slab_data(si); ++ break; + +- cache = cache_cache = symbol_value("cache_cache"); ++ case SLAB_WALKTHROUGH: ++ specified_slab = si->slab; ++ si->flags |= SLAB_WALKTHROUGH; ++ si->flags &= ~SLAB_GET_COUNTS; + +- cache_buf = GETBUF(SIZE(kmem_cache_s)); ++ for (s = 0; s < SLAB_CHAINS; s++) { ++ if (!slab_chains[s]) ++ continue; + +- do { +- readmem(cache, KVADDR, cache_buf, SIZE(kmem_cache_s), +- "kmem_cache_s buffer", FAULT_ON_ERROR); ++ if (!specified_slab) { ++ if (!readmem(slab_chains[s], ++ KVADDR, &si->slab, sizeof(ulong), ++ "slabs", QUIET|RETURN_ON_ERROR)) { ++ error(INFO, ++ "%s: %s list: bad slab pointer: %lx\n", ++ si->curname, ++ slab_chain_name_v2[s], ++ slab_chains[s]); ++ list_borked = 1; ++ continue; ++ } ++ last = slab_chains[s]; ++ } else ++ last = 0; ++ ++ if (si->slab == slab_chains[s]) ++ continue; ++ ++ if (CRASHDEBUG(1)) { ++ fprintf(fp, "search cache: [%s] ", si->curname); ++ if (si->flags & ADDRESS_SPECIFIED) ++ fprintf(fp, "for %llx", si->spec_addr); ++ fprintf(fp, "\n"); ++ } ++ ++ do { ++ if (received_SIGINT()) ++ restart(0); ++ ++ if (!verify_slab_v2(si, last, s)) { ++ list_borked = 1; ++ continue; ++ } ++ last = si->slab - OFFSET(slab_list); + +- if (cache == vaddr) { +- if (vt->kmem_cache_namelen) { +- BCOPY(cache_buf+name_offset, kbuf, +- vt->kmem_cache_namelen); +- } else { +- name = ULONG(cache_buf + name_offset); +- if (!read_string(name, kbuf, BUFSIZE-1)) { +- if (vt->flags & +- (PERCPU_KMALLOC_V1|PERCPU_KMALLOC_V2)) +- error(FATAL, +- "cannot read kmem_cache_s.name string at %lx\n", +- name); +- else +- error(FATAL, +- "cannot read kmem_cache_s.c_name string at %lx\n", +- name); ++ dump_slab_percpu_v2(si); ++ ++ if (si->found) { ++ return; + } +- } +- FREEBUF(cache_buf); +- return kbuf; ++ ++ readmem(si->slab+OFFSET(slab_list), ++ KVADDR, &si->slab, sizeof(ulong), ++ "slab list", FAULT_ON_ERROR); ++ ++ si->slab -= OFFSET(slab_list); ++ ++ } while (si->slab != slab_chains[s] && !list_borked); + } + +- cache = ULONG(cache_buf + next_offset); +- +- if (vt->flags & (PERCPU_KMALLOC_V1|PERCPU_KMALLOC_V2)) +- cache -= next_offset; ++ break; ++ } ++} + +- } while (cache != cache_cache); + +- FREEBUF(cache_buf); +- return NULL; +-} ++/* ++* Added To Traverse the Nodelists ++*/ + +-/* +- * Note same functionality as above, but instead it just +- * dumps all slab cache names and their addresses. +- */ + static void +-kmem_cache_list(void) ++do_slab_chain_percpu_v2_nodes(long cmd, struct meminfo *si) + { +- ulong cache, cache_cache, name; +- long next_offset, name_offset; +- char *cache_buf; +- char buf[BUFSIZE]; +- +- if (vt->flags & KMEM_CACHE_UNAVAIL) { +- error(INFO, "kmem cache slab subsystem not available\n"); +- return; +- } +- +- name_offset = vt->flags & (PERCPU_KMALLOC_V1|PERCPU_KMALLOC_V2) ? +- OFFSET(kmem_cache_s_name) : OFFSET(kmem_cache_s_c_name); +- next_offset = vt->flags & (PERCPU_KMALLOC_V1|PERCPU_KMALLOC_V2) ? +- OFFSET(kmem_cache_s_next) : OFFSET(kmem_cache_s_c_nextp); +- +- cache = cache_cache = symbol_value("cache_cache"); ++ int i, tmp, s; ++ int list_borked; ++ char *slab_buf; ++ ulong specified_slab; ++ ulong last; ++ ulong slab_chains[SLAB_CHAINS]; ++ ulong *start_address; ++ int index; + +- cache_buf = GETBUF(SIZE(kmem_cache_s)); ++ list_borked = 0; ++ si->slabsize = (power(2, si->order) * PAGESIZE()); ++ si->cpucached_slab = 0; ++ start_address = (ulong *)GETBUF(sizeof(ulong) * vt->kmem_cache_len_nodes); + +- do { +- readmem(cache, KVADDR, cache_buf, SIZE(kmem_cache_s), +- "kmem_cache_s buffer", FAULT_ON_ERROR); ++ if (!readmem(si->cache+OFFSET(kmem_cache_s_lists), KVADDR, ++ &start_address[0], sizeof(ulong) * vt->kmem_cache_len_nodes, ++ "array nodelist array", RETURN_ON_ERROR)) ++ error(INFO, "cannot read kmem_cache nodelists array"); + +- if (vt->kmem_cache_namelen) { +- BCOPY(cache_buf+name_offset, buf, +- vt->kmem_cache_namelen); +- } else { +- name = ULONG(cache_buf + name_offset); +- if (!read_string(name, buf, BUFSIZE-1)) { +- if (vt->flags & +- (PERCPU_KMALLOC_V1|PERCPU_KMALLOC_V2)) +- error(FATAL, +- "cannot read kmem_cache_s.name string at %lx\n", +- name); +- else +- error(FATAL, +- "cannot read kmem_cache_s.c_name string at %lx\n", +- name); ++ switch (cmd) ++ { ++ case SLAB_GET_COUNTS: ++ si->flags |= SLAB_GET_COUNTS; ++ si->flags &= ~SLAB_WALKTHROUGH; ++ si->cpucached_cache = 0; ++ si->num_slabs = si->inuse = 0; ++ slab_buf = GETBUF(SIZE(slab)); ++ for (index=0; (index < vt->kmem_cache_len_nodes) && start_address[index]; index++) ++ { ++ slab_chains[0] = start_address[index] + OFFSET(kmem_list3_slabs_partial); ++ slab_chains[1] = start_address[index] + OFFSET(kmem_list3_slabs_full); ++ slab_chains[2] = start_address[index] + OFFSET(kmem_list3_slabs_free); ++ ++ gather_cpudata_list_v2_nodes(si, index); ++ ++ if (CRASHDEBUG(1)) { ++ fprintf(fp, "[ %s: %lx ", si->curname, si->cache); ++ fprintf(fp, "partial: %lx full: %lx free: %lx ]\n", ++ slab_chains[0], slab_chains[1], slab_chains[2]); + } +- } + +- fprintf(fp, "%lx %s\n", cache, buf); ++ for (s = 0; s < SLAB_CHAINS; s++) { ++ if (!slab_chains[s]) ++ continue; ++ ++ if (!readmem(slab_chains[s], ++ KVADDR, &si->slab, sizeof(ulong), ++ "first slab", QUIET|RETURN_ON_ERROR)) { ++ error(INFO, ++ "%s: %s list: bad slab pointer: %lx\n", ++ si->curname, ++ slab_chain_name_v2[s], ++ slab_chains[s]); ++ list_borked = 1; ++ continue; ++ } ++ ++ if (slab_data_saved(si)) { ++ FREEBUF(slab_buf); ++ FREEBUF(start_address); ++ return; ++ } ++ ++ if (si->slab == slab_chains[s]) ++ continue; ++ ++ last = slab_chains[s]; + +- cache = ULONG(cache_buf + next_offset); ++ do { ++ if (received_SIGINT()) { ++ FREEBUF(slab_buf); ++ FREEBUF(start_address); ++ restart(0); ++ } + +- if (vt->flags & (PERCPU_KMALLOC_V1|PERCPU_KMALLOC_V2)) +- cache -= next_offset; ++ if (!verify_slab_v2(si, last, s)) { ++ list_borked = 1; ++ continue; ++ } ++ last = si->slab - OFFSET(slab_list); ++ ++ readmem(si->slab, KVADDR, slab_buf, ++ SIZE(slab), "slab buffer", ++ FAULT_ON_ERROR); ++ ++ tmp = INT(slab_buf + OFFSET(slab_inuse)); ++ si->inuse += tmp; ++ ++ if (ACTIVE()) ++ gather_cpudata_list_v2_nodes(si, index); + +- } while (cache != cache_cache); ++ si->s_mem = ULONG(slab_buf + ++ OFFSET(slab_s_mem)); ++ gather_slab_cached_count(si); ++ ++ si->num_slabs++; ++ ++ si->slab = ULONG(slab_buf + ++ OFFSET(slab_list)); ++ si->slab -= OFFSET(slab_list); + +- FREEBUF(cache_buf); +-} ++ /* ++ * Check for slab transition. (Tony Dziedzic) ++ */ ++ for (i = 0; i < SLAB_CHAINS; i++) { ++ if ((i != s) && ++ (si->slab == slab_chains[i])) { ++ error(NOTE, ++ "%s: slab chain inconsistency: %s list\n", ++ si->curname, ++ slab_chain_name_v2[s]); ++ list_borked = 1; ++ } ++ } ++ ++ } while (si->slab != slab_chains[s] && !list_borked); ++ } ++ } + +-/* +- * Translate an address to its physical page number, verify that the +- * page in fact belongs to the slab subsystem, and if so, return the +- * name of the cache to which it belongs. +- */ +-static char * +-vaddr_to_kmem_cache(ulong vaddr, char *buf) +-{ +- physaddr_t paddr; +- ulong page; +- ulong cache; ++ if (!list_borked) ++ save_slab_data(si); ++ break; + +- if (!kvtop(NULL, vaddr, &paddr, 0)) { +- error(WARNING, +- "cannot make virtual-to-physical translation: %lx\n", +- vaddr); +- return NULL; +- } ++ case SLAB_WALKTHROUGH: ++ specified_slab = si->slab; ++ si->flags |= SLAB_WALKTHROUGH; ++ si->flags &= ~SLAB_GET_COUNTS; ++ slab_buf = GETBUF(SIZE(slab)); ++ for (index=0; (index < vt->kmem_cache_len_nodes) && start_address[index]; index++) ++ { ++ slab_chains[0] = start_address[index] + OFFSET(kmem_list3_slabs_partial); ++ slab_chains[1] = start_address[index] + OFFSET(kmem_list3_slabs_full); ++ slab_chains[2] = start_address[index] + OFFSET(kmem_list3_slabs_free); ++ ++ gather_cpudata_list_v2_nodes(si, index); ++ ++ if (CRASHDEBUG(1)) { ++ fprintf(fp, "[ %s: %lx ", si->curname, si->cache); ++ fprintf(fp, "partial: %lx full: %lx free: %lx ]\n", ++ slab_chains[0], slab_chains[1], slab_chains[2]); ++ } + +- if (!phys_to_page(paddr, &page)) { +- error(WARNING, "cannot find mem_map page for address: %lx\n", +- vaddr); +- return NULL; +- } ++ for (s = 0; s < SLAB_CHAINS; s++) { ++ if (!slab_chains[s]) ++ continue; + +- if (VALID_MEMBER(page_next)) +- readmem(page+OFFSET(page_next), +- KVADDR, &cache, sizeof(void *), +- "page.next", FAULT_ON_ERROR); +- else if (VALID_MEMBER(page_list_next)) +- readmem(page+OFFSET(page_list_next), +- KVADDR, &cache, sizeof(void *), +- "page.list.next", FAULT_ON_ERROR); +- else if (VALID_MEMBER(page_lru)) +- readmem(page+OFFSET(page_lru)+OFFSET(list_head_next), +- KVADDR, &cache, sizeof(void *), +- "page.lru.next", FAULT_ON_ERROR); +- else +- error(FATAL, "cannot determine slab cache from page struct\n"); ++ if (!specified_slab) { ++ if (!readmem(slab_chains[s], ++ KVADDR, &si->slab, sizeof(ulong), ++ "slabs", QUIET|RETURN_ON_ERROR)) { ++ error(INFO, ++ "%s: %s list: bad slab pointer: %lx\n", ++ si->curname, ++ slab_chain_name_v2[s], ++ slab_chains[s]); ++ list_borked = 1; ++ continue; ++ } ++ last = slab_chains[s]; ++ } else ++ last = 0; ++ ++ if (si->slab == slab_chains[s]) ++ continue; ++ ++ readmem(si->slab, KVADDR, slab_buf, ++ SIZE(slab), "slab buffer", ++ FAULT_ON_ERROR); ++ ++ si->s_mem = ULONG(slab_buf + ++ OFFSET(slab_s_mem)); + +- return(is_kmem_cache_addr(cache, buf)); ++ if (CRASHDEBUG(1)) { ++ fprintf(fp, "search cache: [%s] ", si->curname); ++ if (si->flags & ADDRESS_SPECIFIED) ++ fprintf(fp, "for %llx", si->spec_addr); ++ fprintf(fp, "\n"); ++ } ++ ++ do { ++ if (received_SIGINT()) ++ { ++ FREEBUF(start_address); ++ FREEBUF(slab_buf); ++ restart(0); ++ } ++ ++ if (!verify_slab_v2(si, last, s)) { ++ list_borked = 1; ++ continue; ++ } ++ last = si->slab - OFFSET(slab_list); ++ ++ dump_slab_percpu_v2(si); ++ ++ if (si->found) { ++ FREEBUF(start_address); ++ FREEBUF(slab_buf); ++ return; ++ } ++ ++ readmem(si->slab+OFFSET(slab_list), ++ KVADDR, &si->slab, sizeof(ulong), ++ "slab list", FAULT_ON_ERROR); ++ ++ si->slab -= OFFSET(slab_list); ++ ++ } while (si->slab != slab_chains[s] && !list_borked); ++ } ++ } ++ ++ break; ++ } ++ FREEBUF(slab_buf); ++ FREEBUF(start_address); + } + + /* +- * Translate an address to its physical page number, verify that the +- * page in fact belongs to the slab subsystem, and if so, return the +- * address of the slab to which it belongs. ++ * Try to preclude any attempt to translate a bogus slab structure. + */ +-static ulong +-vaddr_to_slab(ulong vaddr) ++static int ++verify_slab_v2(struct meminfo *si, ulong last, int s) + { +- physaddr_t paddr; +- ulong page; +- ulong slab; ++ char slab_buf[BUFSIZE]; ++ struct kernel_list_head *list_head; ++ unsigned int inuse; ++ ulong s_mem; ++ char *list; ++ int errcnt; + +- if (!kvtop(NULL, vaddr, &paddr, 0)) { +- error(WARNING, +- "cannot make virtual-to-physical translation: %lx\n", +- vaddr); +- return 0; +- } ++ list = slab_chain_name_v2[s]; + +- if (!phys_to_page(paddr, &page)) { +- error(WARNING, "cannot find mem_map page for address: %lx\n", +- vaddr); +- return 0; +- } ++ errcnt = 0; + +- slab = 0; ++ if (!readmem(si->slab, KVADDR, slab_buf, ++ SIZE(slab), "slab buffer", QUIET|RETURN_ON_ERROR)) { ++ error(INFO, "%s: %s list: bad slab pointer: %lx\n", ++ si->curname, list, si->slab); ++ return FALSE; ++ } + +- if (VALID_MEMBER(page_prev)) +- readmem(page+OFFSET(page_prev), +- KVADDR, &slab, sizeof(void *), +- "page.prev", FAULT_ON_ERROR); +- else if (VALID_MEMBER(page_list_prev)) +- readmem(page+OFFSET(page_list_prev), +- KVADDR, &slab, sizeof(void *), +- "page.list.prev", FAULT_ON_ERROR); +- else if (VALID_MEMBER(page_lru)) +- readmem(page+OFFSET(page_lru)+OFFSET(list_head_prev), +- KVADDR, &slab, sizeof(void *), +- "page.lru.prev", FAULT_ON_ERROR); +- else +- error(FATAL, "unknown definition of struct page?\n"); ++ list_head = (struct kernel_list_head *)(slab_buf + OFFSET(slab_list)); ++ if (!IS_KVADDR((ulong)list_head->next) || ++ !accessible((ulong)list_head->next)) { ++ error(INFO, "%s: %s list: slab: %lx bad next pointer: %lx\n", ++ si->curname, list, si->slab, ++ (ulong)list_head->next); ++ errcnt++; ++ } + +- return slab; +-} ++ if (last && (last != (ulong)list_head->prev)) { ++ error(INFO, "%s: %s list: slab: %lx bad prev pointer: %lx\n", ++ si->curname, list, si->slab, ++ (ulong)list_head->prev); ++ errcnt++; ++ } + ++ inuse = UINT(slab_buf + OFFSET(slab_inuse)); ++ if (inuse > si->c_num) { ++ error(INFO, "%s: %s list: slab: %lx bad inuse counter: %ld\n", ++ si->curname, list, si->slab, inuse); ++ errcnt++; ++ } + +-/* +- * Initialize any data required for scouring the kmalloc subsystem more +- * efficiently. +- */ +-char slab_hdr[BUFSIZE] = { 0 }; +-char kmem_cache_hdr[BUFSIZE] = { 0 }; +-char free_inuse_hdr[BUFSIZE] = { 0 }; ++ if (!last) ++ goto no_inuse_check_v2; + +-static void +-kmem_cache_init(void) +-{ +- ulong cache, cache_end, max_cnum, max_limit, max_cpus, tmp, tmp2; +- long cache_count, num_offset, next_offset; +- char *cache_buf; ++ switch (s) ++ { ++ case 0: /* partial */ ++ if ((inuse == 0) || (inuse == si->c_num)) { ++ error(INFO, ++ "%s: %s list: slab: %lx bad inuse counter: %ld\n", ++ si->curname, list, si->slab, inuse); ++ errcnt++; ++ } ++ break; + +- if (vt->flags & KMEM_CACHE_UNAVAIL) +- return; ++ case 1: /* full */ ++ if (inuse != si->c_num) { ++ error(INFO, ++ "%s: %s list: slab: %lx bad inuse counter: %ld\n", ++ si->curname, list, si->slab, inuse); ++ errcnt++; ++ } ++ break; + +- if (DUMPFILE() && (vt->flags & KMEM_CACHE_INIT)) +- return; ++ case 2: /* free */ ++ if (inuse > 0) { ++ error(INFO, ++ "%s: %s list: slab: %lx bad inuse counter: %ld\n", ++ si->curname, list, si->slab, inuse); ++ errcnt++; ++ } ++ break; ++ } + +- if (!strlen(slab_hdr)) +- sprintf(slab_hdr, +- "SLAB%sMEMORY%sTOTAL ALLOCATED FREE\n", +- space(VADDR_PRLEN > 8 ? 14 : 6), +- space(VADDR_PRLEN > 8 ? 12 : 4)); ++no_inuse_check_v2: ++ s_mem = ULONG(slab_buf + OFFSET(slab_s_mem)); ++ if (!IS_KVADDR(s_mem) || !accessible(s_mem)) { ++ error(INFO, "%s: %s list: slab: %lx bad s_mem pointer: %lx\n", ++ si->curname, list, si->slab, s_mem); ++ errcnt++; ++ } + +- if (!strlen(kmem_cache_hdr)) +- sprintf(kmem_cache_hdr, +- "CACHE%sNAME OBJSIZE ALLOCATED TOTAL SLABS SSIZE\n", +- space(VADDR_PRLEN > 8 ? 12 : 4)); ++ return(errcnt ? FALSE : TRUE); ++} + +- if (!strlen(free_inuse_hdr)) +- sprintf(free_inuse_hdr, "FREE / [ALLOCATED]\n"); ++/* ++ * If it's a dumpfile, save the essential slab data to avoid re-reading ++ * the whole slab chain more than once. This may seem like overkill, but ++ * if the problem is a memory leak, or just the over-use of the buffer_head ++ * cache, it's painful to wait each time subsequent kmem -s or -i commands ++ * simply need the basic slab counts. ++ */ ++struct slab_data { ++ ulong cache_addr; ++ int num_slabs; ++ int inuse; ++ ulong cpucached_cache; ++}; + +- num_offset = vt->flags & (PERCPU_KMALLOC_V1|PERCPU_KMALLOC_V2) ? +- OFFSET(kmem_cache_s_num) : OFFSET(kmem_cache_s_c_num); +- next_offset = vt->flags & (PERCPU_KMALLOC_V1|PERCPU_KMALLOC_V2) ? +- OFFSET(kmem_cache_s_next) : OFFSET(kmem_cache_s_c_nextp); +- max_cnum = max_limit = max_cpus = cache_count = 0; ++#define NO_SLAB_DATA ((void *)(-1)) + +- /* +- * Pre-2.6 versions used the "cache_cache" as the head of the +- * slab chain list. 2.6 uses the "cache_chain" list_head. +- */ +- if (vt->flags & PERCPU_KMALLOC_V2) { +- get_symbol_data("cache_chain", sizeof(ulong), &cache); +- cache -= next_offset; +- cache_end = symbol_value("cache_chain"); +- } else +- cache = cache_end = symbol_value("cache_cache"); ++static void ++save_slab_data(struct meminfo *si) ++{ ++ int i; + +- cache_buf = GETBUF(SIZE(kmem_cache_s)); ++ if (si->flags & SLAB_DATA_NOSAVE) { ++ si->flags &= ~SLAB_DATA_NOSAVE; ++ return; ++ } + +- do { +- cache_count++; ++ if (ACTIVE()) ++ return; + +- if (!readmem(cache, KVADDR, cache_buf, SIZE(kmem_cache_s), +- "kmem_cache_s buffer", RETURN_ON_ERROR)) { +- vt->flags |= KMEM_CACHE_UNAVAIL; +- error(INFO, +- "unable to initialize kmem slab cache subsystem\n\n"); ++ if (vt->slab_data == NO_SLAB_DATA) ++ return; ++ ++ if (!vt->slab_data) { ++ if (!(vt->slab_data = (struct slab_data *) ++ malloc(sizeof(struct slab_data) * vt->kmem_cache_count))) { ++ error(INFO, "cannot malloc slab_data table"); ++ vt->slab_data = NO_SLAB_DATA; + return; + } ++ for (i = 0; i < vt->kmem_cache_count; i++) { ++ vt->slab_data[i].cache_addr = (ulong)NO_SLAB_DATA; ++ vt->slab_data[i].num_slabs = 0; ++ vt->slab_data[i].inuse = 0; ++ vt->slab_data[i].cpucached_cache = 0; ++ } ++ } + +- tmp = (ulong)(UINT(cache_buf + num_offset)); +- +- if (tmp > max_cnum) +- max_cnum = tmp; ++ for (i = 0; i < vt->kmem_cache_count; i++) { ++ if (vt->slab_data[i].cache_addr == si->cache) ++ break; + +- if ((tmp = max_cpudata_limit(cache, &tmp2)) > max_limit) +- max_limit = tmp; ++ if (vt->slab_data[i].cache_addr == (ulong)NO_SLAB_DATA) { ++ vt->slab_data[i].cache_addr = si->cache; ++ vt->slab_data[i].num_slabs = si->num_slabs; ++ vt->slab_data[i].inuse = si->inuse; ++ vt->slab_data[i].cpucached_cache = si->cpucached_cache; ++ break; ++ } ++ } ++} + +- if (tmp2 > max_cpus) +- max_cpus = tmp2; ++static int ++slab_data_saved(struct meminfo *si) ++{ ++ int i; + +- cache = ULONG(cache_buf + next_offset); ++ if (ACTIVE() || !vt->slab_data || (vt->slab_data == NO_SLAB_DATA)) ++ return FALSE; + +- switch (vt->flags & (PERCPU_KMALLOC_V1|PERCPU_KMALLOC_V2)) +- { +- case PERCPU_KMALLOC_V1: +- cache -= next_offset; +- break; +- case PERCPU_KMALLOC_V2: +- if (cache != cache_end) +- cache -= next_offset; +- break; ++ for (i = 0; i < vt->kmem_cache_count; i++) { ++ if (vt->slab_data[i].cache_addr == si->cache) { ++ si->inuse = vt->slab_data[i].inuse; ++ si->num_slabs = vt->slab_data[i].num_slabs; ++ si->cpucached_cache = vt->slab_data[i].cpucached_cache; ++ return TRUE; + } ++ } + +- } while (cache != cache_end); ++ return FALSE; ++} + +- FREEBUF(cache_buf); ++static void ++dump_saved_slab_data(void) ++{ ++ int i; + +- vt->kmem_max_c_num = max_cnum; +- vt->kmem_max_limit = max_limit; +- vt->kmem_max_cpus = max_cpus; +- vt->kmem_cache_count = cache_count; ++ if (!vt->slab_data || (vt->slab_data == NO_SLAB_DATA)) ++ return; + +- if (CRASHDEBUG(2)) { +- fprintf(fp, "kmem_cache_init:\n"); +- fprintf(fp, " kmem_max_c_num: %ld\n", vt->kmem_max_c_num); +- fprintf(fp, " kmem_max_limit: %ld\n", vt->kmem_max_limit); +- fprintf(fp, " kmem_max_cpus: %ld\n", vt->kmem_max_cpus); +- fprintf(fp, " kmem_cache_count: %ld\n", vt->kmem_cache_count); +- } ++ for (i = 0; i < vt->kmem_cache_count; i++) { ++ if (vt->slab_data[i].cache_addr == (ulong)NO_SLAB_DATA) ++ break; + +- if (!(vt->flags & KMEM_CACHE_INIT)) { +- if (vt->flags & PERCPU_KMALLOC_V1) +- ARRAY_LENGTH_INIT(vt->kmem_cache_namelen, +- kmem_cache_s_name, "kmem_cache_s.name", +- NULL, sizeof(char)); +- else if (vt->flags & PERCPU_KMALLOC_V2) +- vt->kmem_cache_namelen = 0; +- else +- ARRAY_LENGTH_INIT(vt->kmem_cache_namelen, +- kmem_cache_s_c_name, "kmem_cache_s.c_name", +- NULL, 0); ++ fprintf(fp, ++ " cache: %lx inuse: %5d num_slabs: %3d cpucached_cache: %ld\n", ++ vt->slab_data[i].cache_addr, ++ vt->slab_data[i].inuse, ++ vt->slab_data[i].num_slabs, ++ vt->slab_data[i].cpucached_cache); + } +- +- vt->flags |= KMEM_CACHE_INIT; + } + + /* +- * Determine the largest cpudata limit for a given cache. ++ * Dump the contents of a kmem slab. + */ +-static ulong +-max_cpudata_limit(ulong cache, ulong *cpus) ++ ++static void ++dump_slab(struct meminfo *si) + { +- int i; +- ulong cpudata[NR_CPUS]; +- int limit; +- ulong max_limit; ++ uint16_t s_offset; + +- if (vt->flags & PERCPU_KMALLOC_V2) +- goto kmem_cache_s_array; ++ si->s_mem = ULONG(si->slab_buf + OFFSET(kmem_slab_s_s_mem)); ++ si->s_mem = PTOB(BTOP(si->s_mem)); + +- if (INVALID_MEMBER(kmem_cache_s_cpudata)) { +- *cpus = 0; +- return 0; ++ if (si->flags & ADDRESS_SPECIFIED) { ++ if (INSLAB(si->slab, si) && (si->spec_addr >= si->slab) && ++ (si->spec_addr < (si->slab+SIZE(kmem_slab_s)))) { ++ si->found = KMEM_SLAB_ADDR; ++ return; ++ } ++ if (INSLAB(si->spec_addr, si)) ++ si->found = KMEM_ON_SLAB; /* But don't return yet... */ ++ else ++ return; + } + +- readmem(cache+OFFSET(kmem_cache_s_cpudata), +- KVADDR, &cpudata[0], +- sizeof(ulong) * ARRAY_LENGTH(kmem_cache_s_cpudata), +- "cpudata array", FAULT_ON_ERROR); ++ si->s_freep = VOID_PTR(si->slab_buf + OFFSET(kmem_slab_s_s_freep)); ++ si->s_inuse = ULONG(si->slab_buf + OFFSET(kmem_slab_s_s_inuse)); ++ si->s_index = ULONG_PTR(si->slab_buf + OFFSET(kmem_slab_s_s_index)); ++ s_offset = USHORT(si->slab_buf + OFFSET(kmem_slab_s_s_offset)); + +- for (i = max_limit = 0; (i < ARRAY_LENGTH(kmem_cache_s_cpudata)) && +- cpudata[i]; i++) { +- readmem(cpudata[i]+OFFSET(cpucache_s_limit), +- KVADDR, &limit, sizeof(int), +- "cpucache limit", FAULT_ON_ERROR); +- if (limit > max_limit) +- max_limit = limit; ++ if (!(si->flags & ADDRESS_SPECIFIED)) { ++ fprintf(fp, slab_hdr); ++ DUMP_SLAB_INFO(); + } + +- *cpus = i; ++ dump_slab_objects(si); ++} + +- return max_limit; ++/* ++ * dump_slab() adapted for newer percpu slab format. ++ */ + +-kmem_cache_s_array: ++static void ++dump_slab_percpu_v1(struct meminfo *si) ++{ ++ int tmp; + +- readmem(cache+OFFSET(kmem_cache_s_array), +- KVADDR, &cpudata[0], +- sizeof(ulong) * ARRAY_LENGTH(kmem_cache_s_array), +- "array cache array", FAULT_ON_ERROR); ++ readmem(si->slab+OFFSET(slab_s_s_mem), ++ KVADDR, &si->s_mem, sizeof(ulong), ++ "s_mem", FAULT_ON_ERROR); + +- for (i = max_limit = 0; (i < ARRAY_LENGTH(kmem_cache_s_array)) && +- cpudata[i]; i++) { +- readmem(cpudata[i]+OFFSET(array_cache_limit), +- KVADDR, &limit, sizeof(int), +- "array cache limit", FAULT_ON_ERROR); +- if (limit > max_limit) +- max_limit = limit; ++ /* ++ * Include the array of kmem_bufctl_t's appended to slab. ++ */ ++ tmp = SIZE(slab_s) + (SIZE(kmem_bufctl_t) * si->c_num); ++ ++ if (si->flags & ADDRESS_SPECIFIED) { ++ if (INSLAB_PERCPU(si->slab, si) && ++ (si->spec_addr >= si->slab) && ++ (si->spec_addr < (si->slab+tmp))) { ++ if (si->spec_addr >= (si->slab + SIZE(slab_s))) ++ si->found = KMEM_BUFCTL_ADDR; ++ else ++ si->found = KMEM_SLAB_ADDR; ++ } else if (INSLAB_PERCPU(si->spec_addr, si)) ++ si->found = KMEM_ON_SLAB; /* But don't return yet... */ ++ else ++ return; + } + +- *cpus = i; +- return max_limit; ++ readmem(si->slab+OFFSET(slab_s_inuse), ++ KVADDR, &tmp, sizeof(int), ++ "inuse", FAULT_ON_ERROR); ++ si->s_inuse = tmp; ++ ++ readmem(si->slab+OFFSET(slab_s_free), ++ KVADDR, &si->free, SIZE(kmem_bufctl_t), ++ "kmem_bufctl_t", FAULT_ON_ERROR); ++ ++ gather_slab_free_list_percpu(si); ++ gather_slab_cached_count(si); ++ ++ if (!(si->flags & ADDRESS_SPECIFIED)) { ++ fprintf(fp, slab_hdr); ++ DUMP_SLAB_INFO(); ++ } ++ ++ dump_slab_objects_percpu(si); + } + ++ + /* +- * Determine whether the current slab cache is contained in +- * the comma-separated list from a "kmem -I list1,list2 ..." +- * command entry. ++ * Updated for 2.6 slab substructure. + */ +-static int +-ignore_cache(struct meminfo *si, char *name) ++static void ++dump_slab_percpu_v2(struct meminfo *si) + { +- int i, argc; +- char *p1; +- char *arglist[MAXARGS]; +- char buf[BUFSIZE]; ++ int tmp; + +- if (!si->ignore) +- return FALSE; ++ readmem(si->slab+OFFSET(slab_s_mem), ++ KVADDR, &si->s_mem, sizeof(ulong), ++ "s_mem", FAULT_ON_ERROR); + +- strcpy(buf, si->ignore); ++ /* ++ * Include the array of kmem_bufctl_t's appended to slab. ++ */ ++ tmp = SIZE(slab) + (SIZE(kmem_bufctl_t) * si->c_num); + +- p1 = buf; +- while (*p1) { +- if (*p1 == ',') +- *p1 = ' '; +- p1++; +- } ++ if (si->flags & ADDRESS_SPECIFIED) { ++ if (INSLAB_PERCPU(si->slab, si) && ++ (si->spec_addr >= si->slab) && ++ (si->spec_addr < (si->slab+tmp))) { ++ if (si->spec_addr >= (si->slab + SIZE(slab))) ++ si->found = KMEM_BUFCTL_ADDR; ++ else ++ si->found = KMEM_SLAB_ADDR; ++ } else if (INSLAB_PERCPU(si->spec_addr, si)) ++ si->found = KMEM_ON_SLAB; /* But don't return yet... */ ++ else ++ return; ++ } + +- argc = parse_line(buf, arglist); ++ readmem(si->slab+OFFSET(slab_inuse), ++ KVADDR, &tmp, sizeof(int), ++ "inuse", FAULT_ON_ERROR); ++ si->s_inuse = tmp; + +- for (i = 0; i < argc; i++) { +- if (STREQ(name, arglist[i])) +- return TRUE; ++ readmem(si->slab+OFFSET(slab_free), ++ KVADDR, &si->free, SIZE(kmem_bufctl_t), ++ "kmem_bufctl_t", FAULT_ON_ERROR); ++ ++ gather_slab_free_list_percpu(si); ++ gather_slab_cached_count(si); ++ ++ if (!(si->flags & ADDRESS_SPECIFIED)) { ++ fprintf(fp, slab_hdr); ++ DUMP_SLAB_INFO(); + } + +- return FALSE; ++ dump_slab_objects_percpu(si); + } + + ++ + /* +- * dump_kmem_cache() displays basic information about kmalloc() slabs. +- * At this point, only kmem_cache_s structure data for each slab is dumped. +- * +- * TBD: Given a specified physical address, and determine which slab it came +- * from, and whether it's in use or not. ++ * Gather the free objects in a slab into the si->addrlist, checking for ++ * specified addresses that are in-slab kmem_bufctls, and making error checks ++ * along the way. Object address checks are deferred to dump_slab_objects(). + */ + +-#define SLAB_C_MAGIC 0x4F17A36DUL +-#define SLAB_MAGIC_ALLOC 0xA5C32F2BUL /* slab is alive */ +-#define SLAB_MAGIC_DESTROYED 0xB2F23C5AUL /* slab has been destroyed */ ++#define INOBJECT(addr, obj) ((addr >= obj) && (addr < (obj+si->size))) + +-#define SLAB_CFLGS_BUFCTL 0x020000UL /* bufctls in own cache */ ++static void ++gather_slab_free_list(struct meminfo *si) ++{ ++ ulong *next, obj; ++ ulong expected, cnt; + +-#define KMEM_SLAB_ADDR (1) +-#define KMEM_BUFCTL_ADDR (2) +-#define KMEM_OBJECT_ADDR_FREE (3) +-#define KMEM_OBJECT_ADDR_INUSE (4) +-#define KMEM_OBJECT_ADDR_CACHED (5) +-#define KMEM_ON_SLAB (6) ++ BNEG(si->addrlist, sizeof(ulong) * (si->c_num+1)); ++ ++ if (!si->s_freep) ++ return; ++ ++ cnt = 0; ++ expected = si->c_num - si->s_inuse; ++ ++ next = si->s_freep; ++ do { ++ ++ if (cnt == si->c_num) { ++ error(INFO, ++ "\"%s\" cache: too many objects found in slab free list\n", ++ si->curname); ++ si->errors++; ++ return; ++ } ++ ++ /* ++ * Off-slab kmem_bufctls are contained in arrays of object ++ * pointers that point to: ++ * 1. next kmem_bufctl (or NULL) if the object is free. ++ * 2. to the object if it the object is in use. ++ * ++ * On-slab kmem_bufctls resides just after the object itself, ++ * and point to: ++ * 1. next kmem_bufctl (or NULL) if object is free. ++ * 2. the containing slab if the object is in use. ++ */ ++ ++ if (si->c_flags & SLAB_CFLGS_BUFCTL) ++ obj = si->s_mem + ((next - si->s_index) * si->c_offset); ++ else ++ obj = (ulong)next - si->c_offset; + +-#define DUMP_KMEM_CACHE_INFO_V1() \ +- { \ +- char b1[BUFSIZE]; \ +- fprintf(fp, "%s %-18s %8ld ", \ +- mkstring(b1, VADDR_PRLEN, LJUST|LONG_HEX, MKSTR(si->cache)), \ +- buf, si->size); \ +- fprintf(fp, "%9ld %8ld %5ld %3ldk\n", \ +- vt->flags & PERCPU_KMALLOC_V1 ? \ +- si->inuse - si->cpucached_cache : \ +- si->inuse, si->num_slabs * si->c_num, \ +- si->num_slabs, si->slabsize/1024); \ +- } ++ si->addrlist[cnt] = obj; + +-#define DUMP_KMEM_CACHE_INFO_V2() dump_kmem_cache_info_v2(si) ++ if (si->flags & ADDRESS_SPECIFIED) { ++ if (INSLAB(next, si) && ++ (si->spec_addr >= (ulong)next) && ++ (si->spec_addr < (ulong)(next + 1))) { ++ si->found = KMEM_BUFCTL_ADDR; ++ return; ++ } ++ } + +-static void +-dump_kmem_cache_info_v2(struct meminfo *si) +-{ +- char b1[BUFSIZE]; +- char b2[BUFSIZE]; +- int namelen, sizelen, spacelen; ++ cnt++; + +- fprintf(fp, "%s ", +- mkstring(b1, VADDR_PRLEN, LJUST|LONG_HEX, MKSTR(si->cache))); ++ if (!INSLAB(obj, si)) { ++ error(INFO, ++ "\"%s\" cache: address not contained within slab: %lx\n", ++ si->curname, obj); ++ si->errors++; ++ } + +- namelen = strlen(si->curname); +- sprintf(b2, "%ld", si->size); +- sizelen = strlen(b2); +- spacelen = 0; ++ readmem((ulong)next, KVADDR, &next, sizeof(void *), ++ "s_freep chain entry", FAULT_ON_ERROR); ++ } while (next); + +- if (namelen++ > 18) { +- spacelen = 29 - namelen - sizelen; +- fprintf(fp, "%s%s%ld ", si->curname, +- space(spacelen <= 0 ? 1 : spacelen), si->size); +- if (spacelen > 0) +- spacelen = 1; +- sprintf(b1, "%c%dld ", '%', 9 + spacelen - 1); +- } else { +- fprintf(fp, "%-18s %8ld ", si->curname, si->size); +- sprintf(b1, "%c%dld ", '%', 9); ++ if (cnt != expected) { ++ error(INFO, ++ "\"%s\" cache: free object mismatch: expected: %ld found: %ld\n", ++ si->curname, expected, cnt); ++ si->errors++; + } ++} + +- fprintf(fp, b1, vt->flags & (PERCPU_KMALLOC_V2) ? +- si->inuse - si->cpucached_cache : si->inuse); + +- fprintf(fp, "%8ld %5ld %3ldk\n", +- si->num_slabs * si->c_num, +- si->num_slabs, si->slabsize/1024); +-} ++/* ++ * gather_slab_free_list() adapted for newer percpu slab format. ++ */ + +-#define DUMP_SLAB_INFO() \ +- { \ +- char b1[BUFSIZE], b2[BUFSIZE]; \ +- ulong allocated, freeobjs; \ +- if (vt->flags & PERCPU_KMALLOC_V1) { \ +- allocated = si->s_inuse - si->cpucached_slab; \ +- freeobjs = si->c_num - allocated - si->cpucached_slab; \ +- } else { \ +- allocated = si->s_inuse; \ +- freeobjs = si->c_num - si->s_inuse; \ +- } \ +- fprintf(fp, "%s %s %5ld %9ld %4ld\n", \ +- mkstring(b1, VADDR_PRLEN, LJUST|LONG_HEX, MKSTR(si->slab)), \ +- mkstring(b2, VADDR_PRLEN, LJUST|LONG_HEX, MKSTR(si->s_mem)), \ +- si->c_num, allocated, \ +- vt->flags & PERCPU_KMALLOC_V1 ? freeobjs + si->cpucached_slab :\ +- freeobjs); \ +- } ++#define BUFCTL_END 0xffffFFFF + + static void +-dump_kmem_cache(struct meminfo *si) ++gather_slab_free_list_percpu(struct meminfo *si) + { +- char buf[BUFSIZE]; +- char kbuf[BUFSIZE]; +- char *reqname; +- ulong cache_cache; +- ulong name, magic; +- int cnt; +- char *p1; +- +- if (vt->flags & (PERCPU_KMALLOC_V1|PERCPU_KMALLOC_V2)) +- error(FATAL, +- "dump_kmem_cache called with PERCPU_KMALLOC_V[12] set\n"); +- +- si->found = si->retval = 0; +- reqname = NULL; +- +- if ((!(si->flags & VERBOSE) || si->reqname) && +- !(si->flags & (ADDRESS_SPECIFIED|GET_SLAB_PAGES))) +- fprintf(fp, kmem_cache_hdr); ++ int i; ++ ulong obj; ++ ulong expected, cnt; ++ int free_index; ++ ulong kmembp; ++ short *kbp; + +- si->addrlist = (ulong *)GETBUF((vt->kmem_max_c_num+1) * sizeof(ulong)); +- cnt = 0; +- si->cache = cache_cache = symbol_value("cache_cache"); ++ BNEG(si->addrlist, sizeof(ulong) * (si->c_num+1)); + +- if (si->flags & ADDRESS_SPECIFIED) { +- if (!(p1 = vaddr_to_kmem_cache(si->spec_addr, kbuf))) { +- error(INFO, +- "address is not allocated in slab subsystem: %lx\n", +- si->spec_addr); +- return; +- } +- +- if (si->reqname && (si->reqname != p1)) +- error(INFO, +- "ignoring pre-selected %s cache for address: %lx\n", +- si->reqname, si->spec_addr, si->reqname); ++ if (CRASHDEBUG(1)) ++ fprintf(fp, "slab: %lx si->s_inuse: %ld si->c_num: %ld\n", ++ si->slab, si->s_inuse, si->c_num); + +- reqname = p1; +- } else +- reqname = si->reqname; ++ if (si->s_inuse == si->c_num ) ++ return; + +- si->cache_buf = GETBUF(SIZE(kmem_cache_s)); ++ kmembp = si->slab + SIZE_OPTION(slab_s, slab); ++ readmem((ulong)kmembp, KVADDR, si->kmem_bufctl, ++ SIZE(kmem_bufctl_t) * si->c_num, ++ "kmem_bufctl array", FAULT_ON_ERROR); + +- do { +- if ((si->flags & VERBOSE) && !si->reqname && +- !(si->flags & ADDRESS_SPECIFIED)) +- fprintf(fp, "%s%s", cnt++ ? "\n" : "", kmem_cache_hdr); ++ if (CRASHDEBUG(1)) { ++ for (i = 0; (SIZE(kmem_bufctl_t) == sizeof(int)) && ++ (i < si->c_num); i++) ++ fprintf(fp, "%d ", si->kmem_bufctl[i]); + +- readmem(si->cache, KVADDR, si->cache_buf, SIZE(kmem_cache_s), +- "kmem_cache_s buffer", FAULT_ON_ERROR); ++ for (kbp = (short *)&si->kmem_bufctl[0], i = 0; ++ (SIZE(kmem_bufctl_t) == sizeof(short)) && (i < si->c_num); ++ i++) ++ fprintf(fp, "%d ", *(kbp + i)); + +- if (vt->kmem_cache_namelen) { +- BCOPY(si->cache_buf + OFFSET(kmem_cache_s_c_name), +- buf, vt->kmem_cache_namelen); +- } else { +- name = ULONG(si->cache_buf + +- OFFSET(kmem_cache_s_c_name)); +- if (!read_string(name, buf, BUFSIZE-1)) +- error(FATAL, +- "cannot read kmem_cache_s.c_name string at %lx\n", +- name); +- } ++ fprintf(fp, "\n"); ++ } + +- if (reqname && !STREQ(reqname, buf)) +- goto next_cache; ++ cnt = 0; ++ expected = si->c_num - si->s_inuse; + +- if (ignore_cache(si, buf)) { +- fprintf(fp, "%lx %-18s [IGNORED]\n", si->cache, buf); +- goto next_cache; ++ if (SIZE(kmem_bufctl_t) == sizeof(int)) { ++ for (free_index = si->free; free_index != BUFCTL_END; ++ free_index = si->kmem_bufctl[free_index]) { ++ ++ if (cnt == si->c_num) { ++ error(INFO, ++ "\"%s\" cache: too many objects found in slab free list\n", ++ si->curname); ++ si->errors++; ++ return; ++ } ++ ++ obj = si->s_mem + (free_index*si->size); ++ si->addrlist[cnt] = obj; ++ cnt++; + } ++ } else if (SIZE(kmem_bufctl_t) == sizeof(short)) { ++ kbp = (short *)&si->kmem_bufctl[0]; + +- si->curname = buf; ++ for (free_index = si->free; free_index != BUFCTL_END; ++ free_index = (int)*(kbp + free_index)) { + +- if (CRASHDEBUG(1)) +- fprintf(fp, "cache: %lx %s\n", si->cache, si->curname); +- console("cache: %lx %s\n", si->cache, si->curname); ++ if (cnt == si->c_num) { ++ error(INFO, ++ "\"%s\" cache: too many objects found in slab free list\n", si->curname); ++ si->errors++; ++ return; ++ } + +- magic = ULONG(si->cache_buf + OFFSET(kmem_cache_s_c_magic)); ++ obj = si->s_mem + (free_index*si->size); ++ si->addrlist[cnt] = obj; ++ cnt++; ++ } ++ } else ++ error(FATAL, ++ "size of kmem_bufctl_t (%d) not sizeof(int) or sizeof(short)\n", ++ SIZE(kmem_bufctl_t)); + +- if (magic == SLAB_C_MAGIC) { ++ if (cnt != expected) { ++ error(INFO, ++ "\"%s\" cache: free object mismatch: expected: %ld found: %ld\n", ++ si->curname, expected, cnt); ++ si->errors++; ++ } ++} + +- si->size = ULONG(si->cache_buf + +- OFFSET(kmem_cache_s_c_org_size)); +- if (!si->size) { +- if (STREQ(si->curname, "kmem_cache")) +- si->size = SIZE(kmem_cache_s); +- else { +- error(INFO, +- "\"%s\" cache: c_org_size: %ld\n", +- si->curname, si->size); +- si->errors++; +- } +- } +- si->c_flags = ULONG(si->cache_buf + +- OFFSET(kmem_cache_s_c_flags)); +- si->c_offset = ULONG(si->cache_buf + +- OFFSET(kmem_cache_s_c_offset)); +- si->order = ULONG(si->cache_buf + +- OFFSET(kmem_cache_s_c_gfporder)); +- si->c_num = ULONG(si->cache_buf + +- OFFSET(kmem_cache_s_c_num)); + +- do_slab_chain(SLAB_GET_COUNTS, si); + +- if (!(si->flags & (ADDRESS_SPECIFIED|GET_SLAB_PAGES))) +- DUMP_KMEM_CACHE_INFO_V1(); ++/* ++ * Dump the FREE, [ALLOCATED] and objects of a slab. ++ */ + +- if (si->flags == GET_SLAB_PAGES) +- si->retval += (si->num_slabs * +- (si->slabsize/PAGESIZE())); ++#define DUMP_SLAB_OBJECT() \ ++ for (j = on_free_list = 0; j < si->c_num; j++) { \ ++ if (obj == si->addrlist[j]) { \ ++ on_free_list = TRUE; \ ++ break; \ ++ } \ ++ } \ ++ \ ++ if (on_free_list) { \ ++ if (!(si->flags & ADDRESS_SPECIFIED)) \ ++ fprintf(fp, " %lx\n", obj); \ ++ if (si->flags & ADDRESS_SPECIFIED) { \ ++ if (INOBJECT(si->spec_addr, obj)) { \ ++ si->found = \ ++ KMEM_OBJECT_ADDR_FREE; \ ++ return; \ ++ } \ ++ } \ ++ } else { \ ++ if (!(si->flags & ADDRESS_SPECIFIED)) \ ++ fprintf(fp, " [%lx]\n", obj); \ ++ cnt++; \ ++ if (si->flags & ADDRESS_SPECIFIED) { \ ++ if (INOBJECT(si->spec_addr, obj)) { \ ++ si->found = \ ++ KMEM_OBJECT_ADDR_INUSE; \ ++ return; \ ++ } \ ++ } \ ++ } + +- if (si->flags & (VERBOSE|ADDRESS_SPECIFIED)) { +- si->slab = (si->flags & ADDRESS_SPECIFIED) ? +- vaddr_to_slab(si->spec_addr) : 0; +- +- do_slab_chain(SLAB_WALKTHROUGH, si); ++static void ++dump_slab_objects(struct meminfo *si) ++{ ++ int i, j; ++ ulong *next; ++ int on_free_list; ++ ulong cnt, expected; ++ ulong bufctl, obj; + +- if (si->found) { +- fprintf(fp, kmem_cache_hdr); +- DUMP_KMEM_CACHE_INFO_V1(); +- fprintf(fp, slab_hdr); +- DUMP_SLAB_INFO(); ++ gather_slab_free_list(si); + +- switch (si->found) +- { +- case KMEM_BUFCTL_ADDR: +- fprintf(fp, " %lx ", +- (ulong)si->spec_addr); +- fprintf(fp, +- "(ON-SLAB kmem_bufctl_t)\n"); +- break; ++ if ((si->flags & ADDRESS_SPECIFIED) && (si->found & ~KMEM_ON_SLAB)) ++ return; + +- case KMEM_SLAB_ADDR: +- fprintf(fp, " %lx ", +- (ulong)si->spec_addr); +- fprintf(fp, +- "(ON-SLAB kmem_slab_t)\n"); +- break; ++ cnt = 0; ++ expected = si->s_inuse; + +- case KMEM_ON_SLAB: +- fprintf(fp, " %lx ", +- (ulong)si->spec_addr); +- fprintf(fp, +- "(unused part of slab)\n"); +- break; +- +- case KMEM_OBJECT_ADDR_FREE: +- fprintf(fp, free_inuse_hdr); +- fprintf(fp, " %lx\n", +- (ulong)si->spec_addr); +- break; ++ if (CRASHDEBUG(1)) ++ for (i = 0; i < si->c_num; i++) { ++ fprintf(fp, "si->addrlist[%d]: %lx\n", ++ i, si->addrlist[i]); ++ } + +- case KMEM_OBJECT_ADDR_INUSE: +- fprintf(fp, free_inuse_hdr); +- fprintf(fp, " [%lx]\n", +- (ulong)si->spec_addr); +- break; +- } ++ if (!(si->flags & ADDRESS_SPECIFIED)) ++ fprintf(fp, free_inuse_hdr); + +- break; +- } +- } ++ /* For on-slab bufctls, c_offset is the distance between the start of ++ * an obj and its related bufctl. For off-slab bufctls, c_offset is ++ * the distance between objs in the slab. ++ */ + +- } else { +- error(INFO, "\"%s\" cache: invalid c_magic: %lx\n", +- si->curname, magic); +- si->errors++; ++ if (si->c_flags & SLAB_CFLGS_BUFCTL) { ++ for (i = 0, next = si->s_index; i < si->c_num; i++, next++) { ++ obj = si->s_mem + ++ ((next - si->s_index) * si->c_offset); ++ DUMP_SLAB_OBJECT(); + } ++ } else { ++ /* ++ * Get the "real" s_mem, i.e., without the offset stripped off. ++ * It contains the address of the first object. ++ */ ++ readmem(si->slab+OFFSET(kmem_slab_s_s_mem), ++ KVADDR, &obj, sizeof(ulong), ++ "s_mem", FAULT_ON_ERROR); + +-next_cache: +- si->cache = ULONG(si->cache_buf + OFFSET(kmem_cache_s_c_nextp)); ++ for (i = 0; i < si->c_num; i++) { ++ DUMP_SLAB_OBJECT(); + +- } while (si->cache != cache_cache); ++ if (si->flags & ADDRESS_SPECIFIED) { ++ bufctl = obj + si->c_offset; + +- FREEBUF(si->cache_buf); ++ if ((si->spec_addr >= bufctl) && ++ (si->spec_addr < ++ (bufctl + SIZE(kmem_bufctl_t)))) { ++ si->found = KMEM_BUFCTL_ADDR; ++ return; ++ } ++ } + +- if ((si->flags & ADDRESS_SPECIFIED) && !si->found) +- error(INFO, "%s: address not found in cache: %lx\n", +- reqname, si->spec_addr); +- +- if (si->errors) +- error(INFO, "%ld error%s encountered\n", +- si->errors, si->errors > 1 ? "s" : ""); ++ obj += (si->c_offset + SIZE(kmem_bufctl_t)); ++ } ++ } ++ ++ if (cnt != expected) { ++ error(INFO, ++ "\"%s\" cache: inuse object mismatch: expected: %ld found: %ld\n", ++ si->curname, expected, cnt); ++ si->errors++; ++ } + +- FREEBUF(si->addrlist); + } + ++ + /* +- * dump_kmem_cache() adapted for newer percpu slab format. ++ * dump_slab_objects() adapted for newer percpu slab format. + */ + + static void +-dump_kmem_cache_percpu_v1(struct meminfo *si) ++dump_slab_objects_percpu(struct meminfo *si) + { +- int i; +- char buf[BUFSIZE]; +- char kbuf[BUFSIZE]; +- char *reqname; +- ulong cache_cache; +- ulong name; +- int cnt; +- uint tmp_val; /* Used as temporary variable to read sizeof(int) and +- assigned to ulong variable. We are doing this to mask +- the endian issue */ +- char *p1; ++ int i, j; ++ int on_free_list, on_cpudata_list, on_shared_list; ++ ulong cnt, expected; ++ ulong obj; + +- if (!(vt->flags & PERCPU_KMALLOC_V1)) +- error(FATAL, +- "dump_kmem_cache_percpu called without PERCPU_KMALLOC_V1\n"); ++ if ((si->flags & ADDRESS_SPECIFIED) && (si->found & ~KMEM_ON_SLAB)) ++ return; + +- si->found = si->retval = 0; +- reqname = NULL; ++ cnt = 0; ++ expected = si->s_inuse; + +- if ((!(si->flags & VERBOSE) || si->reqname) && +- !(si->flags & (ADDRESS_SPECIFIED|GET_SLAB_PAGES))) +- fprintf(fp, kmem_cache_hdr); ++ if (CRASHDEBUG(1)) ++ for (i = 0; i < si->c_num; i++) { ++ fprintf(fp, "si->addrlist[%d]: %lx\n", ++ i, si->addrlist[i]); ++ } + +- si->addrlist = (ulong *)GETBUF((vt->kmem_max_c_num+1) * sizeof(ulong)); +- si->kmem_bufctl = (int *)GETBUF((vt->kmem_max_c_num+1) * sizeof(int)); +- for (i = 0; i < vt->kmem_max_cpus; i++) +- si->cpudata[i] = (ulong *) +- GETBUF(vt->kmem_max_limit * sizeof(ulong)); ++ if (!(si->flags & ADDRESS_SPECIFIED)) ++ fprintf(fp, free_inuse_hdr); + +- cnt = 0; +- si->cache = cache_cache = symbol_value("cache_cache"); ++ for (i = 0, obj = si->s_mem; i < si->c_num; i++, obj += si->size) { ++ on_free_list = FALSE; ++ on_cpudata_list = FALSE; ++ on_shared_list = FALSE; + +- if (si->flags & ADDRESS_SPECIFIED) { +- if (!(p1 = vaddr_to_kmem_cache(si->spec_addr, kbuf))) { ++ for (j = 0; j < si->c_num; j++) { ++ if (obj == si->addrlist[j]) { ++ on_free_list = TRUE; ++ break; ++ } ++ } ++ ++ on_cpudata_list = check_cpudata_list(si, obj); ++ on_shared_list = check_shared_list(si, obj); ++ ++ if (on_free_list && on_cpudata_list) { + error(INFO, +- "address is not allocated in slab subsystem: %lx\n", +- si->spec_addr); +- return; ++ "\"%s\" cache: object %lx on both free and cpu %d lists\n", ++ si->curname, obj, si->cpu); ++ si->errors++; + } +- +- if (si->reqname && (si->reqname != p1)) ++ if (on_free_list && on_shared_list) { + error(INFO, +- "ignoring pre-selected %s cache for address: %lx\n", +- si->reqname, si->spec_addr, si->reqname); +- reqname = p1; +- } else +- reqname = si->reqname; +- +- do { +- if ((si->flags & VERBOSE) && !si->reqname && +- !(si->flags & ADDRESS_SPECIFIED)) +- fprintf(fp, "%s%s", cnt++ ? "\n" : "", kmem_cache_hdr); +- +- if (vt->kmem_cache_namelen) { +- readmem(si->cache+OFFSET(kmem_cache_s_name), +- KVADDR, buf, vt->kmem_cache_namelen, +- "name array", FAULT_ON_ERROR); +- } else { +- readmem(si->cache+OFFSET(kmem_cache_s_name), +- KVADDR, &name, sizeof(ulong), +- "name", FAULT_ON_ERROR); +- if (!read_string(name, buf, BUFSIZE-1)) +- error(FATAL, +- "cannot read kmem_cache_s.name string at %lx\n", +- name); ++ "\"%s\" cache: object %lx on both free and shared lists\n", ++ si->curname, obj); ++ si->errors++; ++ } ++ if (on_cpudata_list && on_shared_list) { ++ error(INFO, ++ "\"%s\" cache: object %lx on both cpu %d and shared lists\n", ++ si->curname, obj, si->cpu); ++ si->errors++; + } ++ ++ if (on_free_list) { ++ if (!(si->flags & ADDRESS_SPECIFIED)) ++ fprintf(fp, " %lx\n", obj); ++ if (si->flags & ADDRESS_SPECIFIED) { ++ if (INOBJECT(si->spec_addr, obj)) { ++ si->found = ++ KMEM_OBJECT_ADDR_FREE; ++ return; ++ } ++ } ++ } else if (on_cpudata_list) { ++ if (!(si->flags & ADDRESS_SPECIFIED)) ++ fprintf(fp, " %lx (cpu %d cache)\n", obj, ++ si->cpu); ++ cnt++; ++ if (si->flags & ADDRESS_SPECIFIED) { ++ if (INOBJECT(si->spec_addr, obj)) { ++ si->found = ++ KMEM_OBJECT_ADDR_CACHED; ++ return; ++ } ++ } ++ } else if (on_shared_list) { ++ if (!(si->flags & ADDRESS_SPECIFIED)) ++ fprintf(fp, " %lx (shared cache)\n", obj); ++ cnt++; ++ if (si->flags & ADDRESS_SPECIFIED) { ++ if (INOBJECT(si->spec_addr, obj)) { ++ si->found = ++ KMEM_OBJECT_ADDR_SHARED; ++ return; ++ } ++ } ++ } else { ++ if (!(si->flags & ADDRESS_SPECIFIED)) ++ fprintf(fp, " [%lx]\n", obj); ++ cnt++; ++ if (si->flags & ADDRESS_SPECIFIED) { ++ if (INOBJECT(si->spec_addr, obj)) { ++ si->found = ++ KMEM_OBJECT_ADDR_INUSE; ++ return; ++ } ++ } ++ } ++ } + +- if (reqname && !STREQ(reqname, buf)) +- goto next_cache; ++ if (cnt != expected) { ++ error(INFO, ++ "\"%s\" cache: inuse object mismatch: expected: %ld found: %ld\n", ++ si->curname, expected, cnt); ++ si->errors++; ++ } ++} + +- if (ignore_cache(si, buf)) { +- fprintf(fp, "%lx %-18s [IGNORED]\n", si->cache, buf); +- goto next_cache; +- } ++/* ++ * Determine how many of the "inuse" slab objects are actually cached ++ * in the kmem_cache_s header. Set the per-slab count and update the ++ * cumulative per-cache count. With the addition of the shared list ++ * check, the terms "cpucached_cache" and "cpucached_slab" are somewhat ++ * misleading. But they both are types of objects that are cached ++ * in the kmem_cache_s header, just not necessarily per-cpu. ++ */ + +- si->curname = buf; ++static void ++gather_slab_cached_count(struct meminfo *si) ++{ ++ int i; ++ ulong obj; ++ int in_cpudata, in_shared; + +- readmem(si->cache+OFFSET(kmem_cache_s_objsize), +- KVADDR, &tmp_val, sizeof(uint), +- "objsize", FAULT_ON_ERROR); +- si->size = (ulong)tmp_val; ++ si->cpucached_slab = 0; + +- if (!si->size) { +- if (STREQ(si->curname, "kmem_cache")) +- si->size = SIZE(kmem_cache_s); +- else { +- error(INFO, "\"%s\" cache: objsize: %ld\n", +- si->curname, si->size); +- si->errors++; ++ for (i = 0, obj = si->s_mem; i < si->c_num; i++, obj += si->size) { ++ in_cpudata = in_shared = 0; ++ if (check_cpudata_list(si, obj)) { ++ in_cpudata = TRUE; ++ si->cpucached_slab++; ++ if (si->flags & SLAB_GET_COUNTS) { ++ si->cpucached_cache++; ++ } ++ } ++ if (check_shared_list(si, obj)) { ++ in_shared = TRUE; ++ if (!in_cpudata) { ++ si->cpucached_slab++; ++ if (si->flags & SLAB_GET_COUNTS) { ++ si->cpucached_cache++; ++ } + } ++ } ++ if (in_cpudata && in_shared) { ++ si->flags |= SLAB_DATA_NOSAVE; ++ if (!(si->flags & VERBOSE)) ++ error(INFO, ++ "\"%s\" cache: object %lx on both cpu %d and shared lists\n", ++ si->curname, obj, si->cpu); + } ++ } ++} + +- readmem(si->cache+OFFSET(kmem_cache_s_flags), +- KVADDR, &tmp_val, sizeof(uint), +- "kmem_cache_s flags", FAULT_ON_ERROR); +- si->c_flags = (ulong)tmp_val; ++/* ++ * Populate the percpu object list for a given slab. ++ */ + +- readmem(si->cache+OFFSET(kmem_cache_s_gfporder), +- KVADDR, &tmp_val, sizeof(uint), +- "gfporder", FAULT_ON_ERROR); +- si->order = (ulong)tmp_val; ++static void ++gather_cpudata_list_v1(struct meminfo *si) ++{ ++ int i, j; ++ int avail; ++ ulong cpudata[NR_CPUS]; + +- readmem(si->cache+OFFSET(kmem_cache_s_num), +- KVADDR, &tmp_val, sizeof(uint), +- "kmem_cache_s num", FAULT_ON_ERROR); +- si->c_num = (ulong)tmp_val; ++ if (INVALID_MEMBER(kmem_cache_s_cpudata)) ++ return; + +- do_slab_chain_percpu_v1(SLAB_GET_COUNTS, si); ++ readmem(si->cache+OFFSET(kmem_cache_s_cpudata), ++ KVADDR, &cpudata[0], ++ sizeof(ulong) * ARRAY_LENGTH(kmem_cache_s_cpudata), ++ "cpudata array", FAULT_ON_ERROR); + +- if (!(si->flags & (ADDRESS_SPECIFIED|GET_SLAB_PAGES))) { +- DUMP_KMEM_CACHE_INFO_V1(); +- if (CRASHDEBUG(3)) +- dump_struct("kmem_cache_s", si->cache, 0); +- } ++ for (i = 0; (i < ARRAY_LENGTH(kmem_cache_s_cpudata)) && ++ cpudata[i]; i++) { ++ BZERO(si->cpudata[i], sizeof(ulong) * vt->kmem_max_limit); + +- if (si->flags == GET_SLAB_PAGES) +- si->retval += (si->num_slabs * +- (si->slabsize/PAGESIZE())); ++ readmem(cpudata[i]+OFFSET(cpucache_s_avail), ++ KVADDR, &avail, sizeof(int), ++ "cpucache avail", FAULT_ON_ERROR); + +- if (si->flags & (VERBOSE|ADDRESS_SPECIFIED)) { ++ if (!avail) ++ continue; + +- gather_cpudata_list_v1(si); ++ if (avail > vt->kmem_max_limit) { ++ error(INFO, ++ "\"%s\" cache: cpucache_s.avail %d greater than limit %ld\n", ++ si->curname, avail, vt->kmem_max_limit); ++ si->errors++; ++ } + +- si->slab = (si->flags & ADDRESS_SPECIFIED) ? +- vaddr_to_slab(si->spec_addr) : 0; ++ if (CRASHDEBUG(2)) ++ fprintf(fp, "%s: cpu[%d] avail: %d\n", ++ si->curname, i, avail); + +- do_slab_chain_percpu_v1(SLAB_WALKTHROUGH, si); ++ readmem(cpudata[i]+SIZE(cpucache_s), ++ KVADDR, si->cpudata[i], ++ sizeof(void *) * avail, ++ "cpucache avail", FAULT_ON_ERROR); + +- if (si->found) { +- fprintf(fp, kmem_cache_hdr); +- DUMP_KMEM_CACHE_INFO_V1(); +- fprintf(fp, slab_hdr); +- gather_slab_cached_count(si); +- DUMP_SLAB_INFO(); ++ if (CRASHDEBUG(2)) ++ for (j = 0; j < avail; j++) ++ fprintf(fp, " %lx\n", si->cpudata[i][j]); ++ } ++} + +- switch (si->found) +- { +- case KMEM_BUFCTL_ADDR: +- fprintf(fp, " %lx ", +- (ulong)si->spec_addr); +- fprintf(fp,"(kmem_bufctl_t)\n"); +- break; ++/* ++ * Updated for 2.6 slab percpu data structure, this also gathers ++ * the shared array_cache list as well. ++ */ ++static void ++gather_cpudata_list_v2(struct meminfo *si) ++{ ++ int i, j; ++ int avail; ++ ulong cpudata[NR_CPUS]; ++ ulong shared; + +- case KMEM_SLAB_ADDR: +- fprintf(fp, " %lx ", +- (ulong)si->spec_addr); +- fprintf(fp, "(slab_s)\n"); +- break; ++ readmem(si->cache+OFFSET(kmem_cache_s_array), ++ KVADDR, &cpudata[0], ++ sizeof(ulong) * ARRAY_LENGTH(kmem_cache_s_array), ++ "array_cache array", FAULT_ON_ERROR); + +- case KMEM_ON_SLAB: +- fprintf(fp, " %lx ", +- (ulong)si->spec_addr); +- fprintf(fp, "(unused part of slab)\n"); +- break; +- +- case KMEM_OBJECT_ADDR_FREE: +- fprintf(fp, free_inuse_hdr); +- fprintf(fp, " %lx\n", +- (ulong)si->spec_addr); +- break; ++ for (i = 0; (i < ARRAY_LENGTH(kmem_cache_s_array)) && ++ cpudata[i]; i++) { ++ BZERO(si->cpudata[i], sizeof(ulong) * vt->kmem_max_limit); + +- case KMEM_OBJECT_ADDR_INUSE: +- fprintf(fp, free_inuse_hdr); +- fprintf(fp, " [%lx]\n", +- (ulong)si->spec_addr); +- break; ++ readmem(cpudata[i]+OFFSET(array_cache_avail), ++ KVADDR, &avail, sizeof(int), ++ "array cache avail", FAULT_ON_ERROR); + +- case KMEM_OBJECT_ADDR_CACHED: +- fprintf(fp, free_inuse_hdr); +- fprintf(fp, +- " %lx (cpu %d cache)\n", +- (ulong)si->spec_addr, si->cpu); +- break; +- } ++ if (!avail) ++ continue; + +- break; +- } ++ if (avail > vt->kmem_max_limit) { ++ error(INFO, ++ "\"%s\" cache: array_cache.avail %d greater than limit %ld\n", ++ si->curname, avail, vt->kmem_max_limit); ++ si->errors++; + } + +-next_cache: +- readmem(si->cache+OFFSET(kmem_cache_s_next), +- KVADDR, &si->cache, sizeof(ulong), +- "kmem_cache_s next", FAULT_ON_ERROR); +- +- si->cache -= OFFSET(kmem_cache_s_next); +- +- } while (si->cache != cache_cache); ++ if (CRASHDEBUG(2)) ++ fprintf(fp, "%s: cpu[%d] avail: %d\n", ++ si->curname, i, avail); + +- if ((si->flags & ADDRESS_SPECIFIED) && !si->found) +- error(INFO, "%s: address not found in cache: %lx\n", +- reqname, si->spec_addr); +- +- if (si->errors) +- error(INFO, "%ld error%s encountered\n", +- si->errors, si->errors > 1 ? "s" : ""); ++ readmem(cpudata[i]+SIZE(array_cache), ++ KVADDR, si->cpudata[i], ++ sizeof(void *) * avail, ++ "array_cache avail", FAULT_ON_ERROR); + +- FREEBUF(si->addrlist); +- FREEBUF(si->kmem_bufctl); +- for (i = 0; i < vt->kmem_max_cpus; i++) +- FREEBUF(si->cpudata[i]); ++ if (CRASHDEBUG(2)) ++ for (j = 0; j < avail; j++) ++ fprintf(fp, " %lx (cpu %d)\n", si->cpudata[i][j], i); ++ } + +-} ++ /* ++ * If the shared list contains anything, gather them as well. ++ */ ++ BZERO(si->shared_array_cache, sizeof(ulong) * vt->kmem_max_limit); + ++ if (!VALID_MEMBER(kmem_list3_shared) || ++ !VALID_MEMBER(kmem_cache_s_lists) || ++ !readmem(si->cache+OFFSET(kmem_cache_s_lists)+ ++ OFFSET(kmem_list3_shared), KVADDR, &shared, sizeof(void *), ++ "kmem_list3 shared", RETURN_ON_ERROR|QUIET) || ++ !readmem(shared+OFFSET(array_cache_avail), ++ KVADDR, &avail, sizeof(int), "shared array_cache avail", ++ RETURN_ON_ERROR|QUIET) || !avail) ++ return; + +-/* +- * Updated for 2.6 slab substructure. +- */ +-static void +-dump_kmem_cache_percpu_v2(struct meminfo *si) +-{ +- int i; +- char buf[BUFSIZE]; +- char kbuf[BUFSIZE]; +- char *reqname; +- ulong cache_end; +- ulong name; +- int cnt; +- uint tmp_val; /* Used as temporary variable to read sizeof(int) and +- assigned to ulong variable. We are doing this to mask +- the endian issue */ +- char *p1; ++ if (avail > vt->kmem_max_limit) { ++ error(INFO, ++ "\"%s\" cache: shared array_cache.avail %d greater than limit %ld\n", ++ si->curname, avail, vt->kmem_max_limit); ++ si->errors++; ++ return; ++ } + +- if (!(vt->flags & PERCPU_KMALLOC_V2)) +- error(FATAL, +- "dump_kmem_cache_percpu called without PERCPU_KMALLOC_V2\n"); ++ if (CRASHDEBUG(2)) ++ fprintf(fp, "%s: shared avail: %d\n", ++ si->curname, avail); + +- si->found = si->retval = 0; +- reqname = NULL; ++ readmem(shared+SIZE(array_cache), KVADDR, si->shared_array_cache, ++ sizeof(void *) * avail, "shared array_cache avail", ++ FAULT_ON_ERROR); + +- if ((!(si->flags & VERBOSE) || si->reqname) && +- !(si->flags & (ADDRESS_SPECIFIED|GET_SLAB_PAGES))) +- fprintf(fp, kmem_cache_hdr); ++ if (CRASHDEBUG(2)) ++ for (j = 0; j < avail; j++) ++ fprintf(fp, " %lx (shared list)\n", si->shared_array_cache[j]); ++} + +- si->addrlist = (ulong *)GETBUF((vt->kmem_max_c_num+1) * sizeof(ulong)); +- si->kmem_bufctl = (int *)GETBUF((vt->kmem_max_c_num+1) * sizeof(int)); +- for (i = 0; i < vt->kmem_max_cpus; i++) +- si->cpudata[i] = (ulong *) +- GETBUF(vt->kmem_max_limit * sizeof(ulong)); + +- cnt = 0; + +- get_symbol_data("cache_chain", sizeof(ulong), &si->cache); +- si->cache -= OFFSET(kmem_cache_s_next); +- cache_end = symbol_value("cache_chain"); ++/* ++ * Updated gather_cpudata_list_v2 for per-node kmem_list3's in kmem_cache ++ */ ++static void ++gather_cpudata_list_v2_nodes(struct meminfo *si, int index) ++{ ++ int i, j; ++ int avail; ++ ulong cpudata[NR_CPUS]; ++ ulong shared; ++ ulong *start_address; + +- if (si->flags & ADDRESS_SPECIFIED) { +- if (!(p1 = vaddr_to_kmem_cache(si->spec_addr, kbuf))) { ++ start_address = (ulong *) GETBUF(sizeof(ulong) * vt->kmem_cache_len_nodes); ++ readmem(si->cache+OFFSET(kmem_cache_s_array), ++ KVADDR, &cpudata[0], ++ sizeof(ulong) * ARRAY_LENGTH(kmem_cache_s_array), ++ "array_cache array", FAULT_ON_ERROR); ++ ++ for (i = 0; (i < ARRAY_LENGTH(kmem_cache_s_array)) && ++ (cpudata[i]) && !(index); i++) { ++ BZERO(si->cpudata[i], sizeof(ulong) * vt->kmem_max_limit); ++ ++ readmem(cpudata[i]+OFFSET(array_cache_avail), ++ KVADDR, &avail, sizeof(int), ++ "array cache avail", FAULT_ON_ERROR); ++ ++ if (!avail) ++ continue; ++ ++ if (avail > vt->kmem_max_limit) { + error(INFO, +- "address is not allocated in slab subsystem: %lx\n", +- si->spec_addr); +- return; ++ "\"%s\" cache: array_cache.avail %d greater than limit %ld\n", ++ si->curname, avail, vt->kmem_max_limit); ++ si->errors++; + } ++ ++ if (CRASHDEBUG(2)) ++ fprintf(fp, "%s: cpu[%d] avail: %d\n", ++ si->curname, i, avail); + +- if (si->reqname && (si->reqname != p1)) +- error(INFO, +- "ignoring pre-selected %s cache for address: %lx\n", +- si->reqname, si->spec_addr, si->reqname); +- reqname = p1; +- } else +- reqname = si->reqname; ++ readmem(cpudata[i]+SIZE(array_cache), ++ KVADDR, si->cpudata[i], ++ sizeof(void *) * avail, ++ "array_cache avail", FAULT_ON_ERROR); + +- do { +- if ((si->flags & VERBOSE) && !si->reqname && +- !(si->flags & ADDRESS_SPECIFIED)) +- fprintf(fp, "%s%s", cnt++ ? "\n" : "", kmem_cache_hdr); ++ if (CRASHDEBUG(2)) ++ for (j = 0; j < avail; j++) ++ fprintf(fp, " %lx (cpu %d)\n", si->cpudata[i][j], i); ++ } + +- if (vt->kmem_cache_namelen) { +- readmem(si->cache+OFFSET(kmem_cache_s_name), +- KVADDR, buf, vt->kmem_cache_namelen, +- "name array", FAULT_ON_ERROR); +- } else { +- readmem(si->cache+OFFSET(kmem_cache_s_name), +- KVADDR, &name, sizeof(ulong), +- "name", FAULT_ON_ERROR); +- if (!read_string(name, buf, BUFSIZE-1)) +- error(FATAL, +- "cannot read kmem_cache_s.name string at %lx\n", +- name); +- } ++ /* ++ * If the shared list contains anything, gather them as well. ++ */ ++ if (!index) { ++ BZERO(si->shared_array_cache, sizeof(ulong) * ++ vt->kmem_max_limit * vt->kmem_cache_len_nodes); ++ si->current_cache_index = 0; ++ } + +- if (reqname && !STREQ(reqname, buf)) +- goto next_cache; ++ if (!readmem(si->cache+OFFSET(kmem_cache_s_lists), KVADDR, &start_address[0], ++ sizeof(ulong) * vt->kmem_cache_len_nodes , "array nodelist array", ++ RETURN_ON_ERROR) || ++ !readmem(start_address[index] + OFFSET(kmem_list3_shared), KVADDR, &shared, ++ sizeof(void *), "kmem_list3 shared", RETURN_ON_ERROR|QUIET) || ++ !readmem(shared + OFFSET(array_cache_avail), KVADDR, &avail, sizeof(int), ++ "shared array_cache avail", RETURN_ON_ERROR|QUIET) || !avail) { ++ FREEBUF(start_address); ++ return; ++ } + +- if (ignore_cache(si, buf)) { +- fprintf(fp, "%lx %-18s [IGNORED]\n", si->cache, buf); +- goto next_cache; +- } ++ if (avail > vt->kmem_max_limit) { ++ error(INFO, ++ "\"%s\" cache: shared array_cache.avail %d greater than limit %ld\n", ++ si->curname, avail, vt->kmem_max_limit); ++ si->errors++; ++ FREEBUF(start_address); ++ return; ++ } + +- si->curname = buf; ++ if (CRASHDEBUG(2)) ++ fprintf(fp, "%s: shared avail: %d\n", ++ si->curname, avail); + +- readmem(si->cache+OFFSET(kmem_cache_s_objsize), +- KVADDR, &tmp_val, sizeof(uint), +- "objsize", FAULT_ON_ERROR); +- si->size = (ulong)tmp_val; ++ readmem(shared+SIZE(array_cache), KVADDR, si->shared_array_cache + si->current_cache_index, ++ sizeof(void *) * avail, "shared array_cache avail", ++ FAULT_ON_ERROR); + +- if (!si->size) { +- if (STREQ(si->curname, "kmem_cache")) +- si->size = SIZE(kmem_cache_s); +- else { +- error(INFO, "\"%s\" cache: objsize: %ld\n", +- si->curname, si->size); +- si->errors++; +- } +- } ++ if ((si->current_cache_index + avail) > ++ (vt->kmem_max_limit * vt->kmem_cache_len_nodes)) { ++ error(INFO, ++ "\"%s\" cache: total shared array_cache.avail %d greater than total limit %ld\n", ++ si->curname, ++ si->current_cache_index + avail, ++ vt->kmem_max_limit * vt->kmem_cache_len_nodes); ++ si->errors++; ++ FREEBUF(start_address); ++ return; ++ } + +- readmem(si->cache+OFFSET(kmem_cache_s_flags), +- KVADDR, &tmp_val, sizeof(uint), +- "kmem_cache_s flags", FAULT_ON_ERROR); +- si->c_flags = (ulong)tmp_val; ++ if (CRASHDEBUG(2)) ++ for (j = si->current_cache_index; j < (si->current_cache_index + avail); j++) ++ fprintf(fp, " %lx (shared list)\n", si->shared_array_cache[j]); ++ ++ si->current_cache_index += avail; ++ FREEBUF(start_address); ++} + +- readmem(si->cache+OFFSET(kmem_cache_s_gfporder), +- KVADDR, &tmp_val, sizeof(uint), +- "gfporder", FAULT_ON_ERROR); +- si->order = (ulong)tmp_val; ++/* ++ * Check whether a given address is contained in the previously-gathered ++ * percpu object cache. ++ */ + +- readmem(si->cache+OFFSET(kmem_cache_s_num), +- KVADDR, &tmp_val, sizeof(uint), +- "kmem_cache_s num", FAULT_ON_ERROR); +- si->c_num = (ulong)tmp_val; ++static int ++check_cpudata_list(struct meminfo *si, ulong obj) ++{ ++ int i, j; + +- do_slab_chain_percpu_v2(SLAB_GET_COUNTS, si); ++ for (i = 0; i < vt->kmem_max_cpus; i++) { ++ for (j = 0; si->cpudata[i][j]; j++) ++ if (si->cpudata[i][j] == obj) { ++ si->cpu = i; ++ return TRUE; ++ } ++ } + +- if (!(si->flags & (ADDRESS_SPECIFIED|GET_SLAB_PAGES))) { +- DUMP_KMEM_CACHE_INFO_V2(); +- if (CRASHDEBUG(3)) +- dump_struct("kmem_cache_s", si->cache, 0); +- } ++ return FALSE; ++} + +- if (si->flags == GET_SLAB_PAGES) +- si->retval += (si->num_slabs * +- (si->slabsize/PAGESIZE())); ++/* ++ * Check whether a given address is contained in the previously-gathered ++ * shared object cache. ++ */ + +- if (si->flags & (VERBOSE|ADDRESS_SPECIFIED)) { ++static int ++check_shared_list(struct meminfo *si, ulong obj) ++{ ++ int i; + +- gather_cpudata_list_v2(si); ++ if (INVALID_MEMBER(kmem_list3_shared) || ++ !si->shared_array_cache) ++ return FALSE; + +- si->slab = (si->flags & ADDRESS_SPECIFIED) ? +- vaddr_to_slab(si->spec_addr) : 0; ++ for (i = 0; si->shared_array_cache[i]; i++) { ++ if (si->shared_array_cache[i] == obj) ++ return TRUE; ++ } + +- do_slab_chain_percpu_v2(SLAB_WALKTHROUGH, si); ++ return FALSE; ++} + +- if (si->found) { +- fprintf(fp, kmem_cache_hdr); +- DUMP_KMEM_CACHE_INFO_V2(); +- fprintf(fp, slab_hdr); +- gather_slab_cached_count(si); +- DUMP_SLAB_INFO(); ++/* ++ * Search the various memory subsystems for instances of this address. ++ * Start with the most specific areas, ending up with at least the ++ * mem_map page data. ++ */ ++static void ++kmem_search(struct meminfo *mi) ++{ ++ struct syment *sp; ++ struct meminfo tmp_meminfo; ++ char buf[BUFSIZE]; ++ ulong vaddr, orig_flags; ++ physaddr_t paddr; ++ ulong offset; ++ ulong task; ++ struct task_context *tc; + +- switch (si->found) +- { +- case KMEM_BUFCTL_ADDR: +- fprintf(fp, " %lx ", +- (ulong)si->spec_addr); +- fprintf(fp,"(kmem_bufctl_t)\n"); +- break; ++ pc->curcmd_flags &= ~HEADER_PRINTED; + +- case KMEM_SLAB_ADDR: +- fprintf(fp, " %lx ", +- (ulong)si->spec_addr); +- fprintf(fp, "(slab)\n"); +- break; ++ switch (mi->memtype) ++ { ++ case KVADDR: ++ vaddr = mi->spec_addr; ++ break; + +- case KMEM_ON_SLAB: +- fprintf(fp, " %lx ", +- (ulong)si->spec_addr); +- fprintf(fp, "(unused part of slab)\n"); +- break; +- +- case KMEM_OBJECT_ADDR_FREE: +- fprintf(fp, free_inuse_hdr); +- fprintf(fp, " %lx\n", +- (ulong)si->spec_addr); +- break; ++ case PHYSADDR: ++ vaddr = mi->spec_addr < VTOP(vt->high_memory) ? ++ PTOV(mi->spec_addr) : BADADDR; ++ break; ++ } + +- case KMEM_OBJECT_ADDR_INUSE: +- fprintf(fp, free_inuse_hdr); +- fprintf(fp, " [%lx]\n", +- (ulong)si->spec_addr); +- break; ++ orig_flags = mi->flags; ++ mi->retval = 0; + +- case KMEM_OBJECT_ADDR_CACHED: +- fprintf(fp, free_inuse_hdr); +- fprintf(fp, +- " %lx (cpu %d cache)\n", +- (ulong)si->spec_addr, si->cpu); +- break; +- } ++ /* ++ * Check first for a possible symbolic display of the virtual ++ * address associated with mi->spec_addr or PTOV(mi->spec_addr). ++ */ ++ if (((vaddr >= kt->stext) && (vaddr <= kt->end)) || ++ IS_MODULE_VADDR(mi->spec_addr)) { ++ if ((sp = value_search(vaddr, &offset))) { ++ show_symbol(sp, offset, SHOW_LINENUM | SHOW_RADIX()); ++ fprintf(fp, "\n"); ++ } ++ } + +- break; ++ /* ++ * Check for a valid mapped address. ++ */ ++ if ((mi->memtype == KVADDR) && IS_VMALLOC_ADDR(mi->spec_addr)) { ++ if (kvtop(NULL, mi->spec_addr, &paddr, 0)) { ++ mi->flags = orig_flags | VMLIST_VERIFY; ++ dump_vmlist(mi); ++ if (mi->retval) { ++ mi->flags = orig_flags; ++ dump_vmlist(mi); ++ fprintf(fp, "\n"); ++ mi->spec_addr = paddr; ++ mi->memtype = PHYSADDR; ++ goto mem_map; + } + } ++ } + +-next_cache: +- readmem(si->cache+OFFSET(kmem_cache_s_next), +- KVADDR, &si->cache, sizeof(ulong), +- "kmem_cache_s next", FAULT_ON_ERROR); ++ /* ++ * If the address is physical, check whether it's in vmalloc space. ++ */ ++ if (mi->memtype == PHYSADDR) { ++ mi->flags = orig_flags; ++ mi->flags |= GET_PHYS_TO_VMALLOC; ++ mi->retval = 0; ++ dump_vmlist(mi); ++ mi->flags &= ~GET_PHYS_TO_VMALLOC; + +- if (si->cache != cache_end) +- si->cache -= OFFSET(kmem_cache_s_next); ++ if (mi->retval) { ++ if ((sp = value_search(mi->retval, &offset))) { ++ show_symbol(sp, offset, ++ SHOW_LINENUM | SHOW_RADIX()); ++ fprintf(fp, "\n"); ++ } ++ dump_vmlist(mi); ++ fprintf(fp, "\n"); ++ goto mem_map; ++ } ++ } + +- } while (si->cache != cache_end); ++ /* ++ * Check whether the containing page belongs to the slab subsystem. ++ */ ++ mi->flags = orig_flags; ++ mi->retval = 0; ++ if ((vaddr != BADADDR) && vaddr_to_kmem_cache(vaddr, buf)) { ++ BZERO(&tmp_meminfo, sizeof(struct meminfo)); ++ tmp_meminfo.spec_addr = vaddr; ++ tmp_meminfo.memtype = KVADDR; ++ tmp_meminfo.flags = mi->flags; ++ vt->dump_kmem_cache(&tmp_meminfo); ++ fprintf(fp, "\n"); ++ } ++ if ((vaddr != BADADDR) && is_slab_page(mi, buf)) { ++ BZERO(&tmp_meminfo, sizeof(struct meminfo)); ++ tmp_meminfo.spec_addr = vaddr; ++ tmp_meminfo.memtype = KVADDR; ++ tmp_meminfo.flags = mi->flags; ++ vt->dump_kmem_cache(&tmp_meminfo); ++ fprintf(fp, "\n"); ++ } + +- if ((si->flags & ADDRESS_SPECIFIED) && !si->found) +- error(INFO, "%s: address not found in cache: %lx\n", +- reqname, si->spec_addr); +- +- if (si->errors) +- error(INFO, "%ld error%s encountered\n", +- si->errors, si->errors > 1 ? "s" : ""); ++ /* ++ * Check free list. ++ */ ++ mi->flags = orig_flags; ++ mi->retval = 0; ++ vt->dump_free_pages(mi); ++ if (mi->retval) ++ fprintf(fp, "\n"); + +- FREEBUF(si->addrlist); +- FREEBUF(si->kmem_bufctl); +- for (i = 0; i < vt->kmem_max_cpus; i++) +- FREEBUF(si->cpudata[i]); ++ if (vt->page_hash_table) { ++ /* ++ * Check the page cache. ++ */ ++ mi->flags = orig_flags; ++ mi->retval = 0; ++ dump_page_hash_table(mi); ++ if (mi->retval) ++ fprintf(fp, "\n"); ++ } + +-} ++ /* ++ * Check whether it's a current task or stack address. ++ */ ++ if ((mi->memtype == KVADDR) && (task = vaddr_in_task_struct(vaddr)) && ++ (tc = task_to_context(task))) { ++ show_context(tc); ++ fprintf(fp, "\n"); ++ } else if ((mi->memtype == KVADDR) && (task = stkptr_to_task(vaddr)) && ++ (tc = task_to_context(task))) { ++ show_context(tc); ++ fprintf(fp, "\n"); ++ } ++ ++mem_map: ++ mi->flags = orig_flags; ++ pc->curcmd_flags &= ~HEADER_PRINTED; ++ dump_mem_map(mi); + ++ if (!mi->retval) ++ fprintf(fp, "%llx: %s address not found in mem map\n", ++ mi->spec_addr, memtype_string(mi->memtype, 0)); ++} + + /* +- * Walk through the slab chain hanging off a kmem_cache_s structure, +- * gathering basic statistics. +- * +- * TBD: Given a specified physical address, determine whether it's in this +- * slab chain, and whether it's in use or not. ++ * Determine whether an address is a page pointer from the mem_map[] array. ++ * If the caller requests it, return the associated physical address. + */ +- +-#define INSLAB(obj, si) \ +- ((ulong)((ulong)(obj) & ~(si->slabsize-1)) == si->s_mem) +- +-static void +-do_slab_chain(int cmd, struct meminfo *si) ++int ++is_page_ptr(ulong addr, physaddr_t *phys) + { +- ulong tmp, magic; +- ulong kmem_slab_end; +- char *kmem_slab_s_buf; +- +- si->slabsize = (power(2, si->order) * PAGESIZE()); +- +- kmem_slab_end = si->cache + OFFSET(kmem_cache_s_c_offset); +- +- switch (cmd) +- { +- case SLAB_GET_COUNTS: +- si->slab = ULONG(si->cache_buf + OFFSET(kmem_cache_s_c_firstp)); +- +- if (slab_data_saved(si)) +- return; +- +- si->num_slabs = si->inuse = 0; +- +- if (si->slab == kmem_slab_end) +- return; +- +- kmem_slab_s_buf = GETBUF(SIZE(kmem_slab_s)); ++ int n; ++ ulong ppstart, ppend; ++ struct node_table *nt; ++ ulong pgnum, node_size; ++ ulong nr, sec_addr; ++ ulong nr_mem_sections; ++ ulong coded_mem_map, mem_map, end_mem_map; ++ physaddr_t section_paddr; ++ ++ if (IS_SPARSEMEM()) { ++ nr_mem_sections = NR_MEM_SECTIONS(); ++ for (nr = 0; nr <= nr_mem_sections ; nr++) { ++ if ((sec_addr = valid_section_nr(nr))) { ++ coded_mem_map = section_mem_map_addr(sec_addr); ++ mem_map = sparse_decode_mem_map(coded_mem_map, nr); ++ end_mem_map = mem_map + (PAGES_PER_SECTION() * SIZE(page)); ++ ++ if ((addr >= mem_map) && (addr < end_mem_map)) { ++ if ((addr - mem_map) % SIZE(page)) ++ return FALSE; ++ if (phys) { ++ section_paddr = PTOB(section_nr_to_pfn(nr)); ++ pgnum = (addr - mem_map) / SIZE(page); ++ *phys = section_paddr + ((physaddr_t)pgnum * PAGESIZE()); ++ } ++ return TRUE; ++ } ++ } ++ } ++ return FALSE; ++ } + +- do { +- if (received_SIGINT()) { +- FREEBUF(kmem_slab_s_buf); +- restart(0); +- } ++ for (n = 0; n < vt->numnodes; n++) { ++ nt = &vt->node_table[n]; ++ if ((vt->flags & V_MEM_MAP) && (vt->numnodes == 1)) ++ node_size = vt->max_mapnr; ++ else ++ node_size = nt->size; + +- readmem(si->slab, KVADDR, kmem_slab_s_buf, +- SIZE(kmem_slab_s), "kmem_slab_s buffer", +- FAULT_ON_ERROR); ++ ppstart = nt->mem_map; ++ ppend = ppstart + (node_size * SIZE(page)); + +- magic = ULONG(kmem_slab_s_buf + +- OFFSET(kmem_slab_s_s_magic)); ++ if ((addr < ppstart) || (addr >= ppend)) ++ continue; + +- if (magic == SLAB_MAGIC_ALLOC) { +- +- tmp = ULONG(kmem_slab_s_buf + +- OFFSET(kmem_slab_s_s_inuse)); +- +- si->inuse += tmp; +- si->num_slabs++; +- } else { +- fprintf(fp, +- "\"%s\" cache: invalid s_magic: %lx\n", +- si->curname, magic); +- si->errors++; +- FREEBUF(kmem_slab_s_buf); +- return; +- } +- +- si->slab = ULONG(kmem_slab_s_buf + +- OFFSET(kmem_slab_s_s_nextp)); +- +- } while (si->slab != kmem_slab_end); +- +- FREEBUF(kmem_slab_s_buf); +- save_slab_data(si); +- break; ++ /* ++ * We're in the mem_map range -- but it is a page pointer? ++ */ ++ if ((addr - ppstart) % SIZE(page)) ++ return FALSE; + +- case SLAB_WALKTHROUGH: +- if (!si->slab) +- si->slab = ULONG(si->cache_buf + +- OFFSET(kmem_cache_s_c_firstp)); ++ if (phys) { ++ pgnum = (addr - nt->mem_map) / SIZE(page); ++ *phys = ((physaddr_t)pgnum * PAGESIZE()) + nt->start_paddr; ++ } + +- if (si->slab == kmem_slab_end) +- return; ++ return TRUE; ++ } + +- if (CRASHDEBUG(1)) { +- fprintf(fp, "search cache: [%s] ", si->curname); +- if (si->flags & ADDRESS_SPECIFIED) +- fprintf(fp, "for %llx", si->spec_addr); +- fprintf(fp, "\n"); +- } ++ return FALSE; + +- si->slab_buf = kmem_slab_s_buf = GETBUF(SIZE(kmem_slab_s)); ++#ifdef PRE_NODES ++ ppstart = vt->mem_map; ++ ppend = ppstart + (vt->total_pages * vt->page_struct_len); + +- do { +- if (received_SIGINT()) { +- FREEBUF(kmem_slab_s_buf); +- restart(0); +- } ++ if ((addr < ppstart) || (addr >= ppend)) ++ return FALSE; + +- readmem(si->slab, KVADDR, kmem_slab_s_buf, +- SIZE(kmem_slab_s), "kmem_slab_s buffer", +- FAULT_ON_ERROR); ++ if ((addr - ppstart) % vt->page_struct_len) ++ return FALSE; + +- dump_slab(si); +- +- if (si->found) { +- FREEBUF(kmem_slab_s_buf); +- return; +- } +- +- si->slab = ULONG(kmem_slab_s_buf + +- OFFSET(kmem_slab_s_s_nextp)); +- +- } while (si->slab != kmem_slab_end); ++ return TRUE; ++#endif ++} + +- FREEBUF(kmem_slab_s_buf); +- break; +- } ++/* ++ * Return the physical address associated with this page pointer. ++ */ ++static int ++page_to_phys(ulong pp, physaddr_t *phys) ++{ ++ return(is_page_ptr(pp, phys)); + } + + + /* +- * do_slab_chain() adapted for newer percpu slab format. ++ * Return the page pointer associated with this physical address. + */ ++static int ++phys_to_page(physaddr_t phys, ulong *pp) ++{ ++ int n; ++ ulong pgnum; ++ struct node_table *nt; ++ physaddr_t pstart, pend; ++ ulong node_size; + +-#define SLAB_BASE(X) (PTOB(BTOP(X))) +- +-#define INSLAB_PERCPU(obj, si) \ +- ((ulong)((ulong)(obj) & ~(si->slabsize-1)) == SLAB_BASE(si->s_mem)) ++ if (IS_SPARSEMEM()) { ++ ulong map; ++ map = pfn_to_map(phys >> PAGESHIFT()); ++ if (map) { ++ *pp = map; ++ return TRUE; ++ } ++ return FALSE; ++ } + +-#define SLAB_CHAINS (3) ++ for (n = 0; n < vt->numnodes; n++) { ++ nt = &vt->node_table[n]; ++ if ((vt->flags & V_MEM_MAP) && (vt->numnodes == 1)) ++ node_size = vt->max_mapnr; ++ else ++ node_size = nt->size; + +-static char *slab_chain_name_v1[] = {"full", "partial", "free"}; ++ pstart = nt->start_paddr; ++ pend = pstart + ((ulonglong)node_size * PAGESIZE()); + +-static void +-do_slab_chain_percpu_v1(long cmd, struct meminfo *si) +-{ +- int i, tmp, s; +- int list_borked; +- char *slab_s_buf; +- ulong specified_slab; +- ulong last; +- ulong slab_chains[SLAB_CHAINS]; ++ if ((phys < pstart) || (phys >= pend)) ++ continue; ++ /* ++ * We're in the physical range -- calculate the page. ++ */ ++ pgnum = BTOP(phys - pstart); ++ *pp = nt->mem_map + (pgnum * SIZE(page)); + +- list_borked = 0; +- si->slabsize = (power(2, si->order) * PAGESIZE()); +- si->cpucached_slab = 0; ++ return TRUE; ++ } + +- if (VALID_MEMBER(kmem_cache_s_slabs)) { +- slab_chains[0] = si->cache + OFFSET(kmem_cache_s_slabs); +- slab_chains[1] = 0; +- slab_chains[2] = 0; +- } else { +- slab_chains[0] = si->cache + OFFSET(kmem_cache_s_slabs_full); +- slab_chains[1] = si->cache + OFFSET(kmem_cache_s_slabs_partial); +- slab_chains[2] = si->cache + OFFSET(kmem_cache_s_slabs_free); +- } ++ return FALSE; + +- if (CRASHDEBUG(1)) { +- fprintf(fp, "[ %s: %lx ", si->curname, si->cache); +- fprintf(fp, "full: %lx partial: %lx free: %lx ]\n", +- slab_chains[0], slab_chains[1], slab_chains[2]); +- } ++#ifdef PRE_NODES ++ if (phys >= (vt->total_pages * PAGESIZE())) ++ return FALSE; + +- switch (cmd) +- { +- case SLAB_GET_COUNTS: +- si->flags |= SLAB_GET_COUNTS; +- si->flags &= ~SLAB_WALKTHROUGH; +- si->cpucached_cache = 0; +- si->num_slabs = si->inuse = 0; +- gather_cpudata_list_v1(si); ++ pgnum = PTOB(BTOP(phys)) / PAGESIZE(); ++ *pp = vt->mem_map + (pgnum * vt->page_struct_len); ++ ++ return TRUE; ++#endif ++} + +- slab_s_buf = GETBUF(SIZE(slab_s)); + +- for (s = 0; s < SLAB_CHAINS; s++) { ++/* ++ * Try to read a string of non-NULL characters from a memory location, ++ * returning the number of characters read. ++ */ ++int ++read_string(ulong kvaddr, char *buf, int maxlen) ++{ ++ char strbuf[MIN_PAGE_SIZE]; ++ ulong kp; ++ char *bufptr; ++ long cnt, size; + +- if (!slab_chains[s]) +- continue; ++ BZERO(buf, maxlen); ++ BZERO(strbuf, MIN_PAGE_SIZE); + +- if (!readmem(slab_chains[s], +- KVADDR, &si->slab, sizeof(ulong), +- "first slab", QUIET|RETURN_ON_ERROR)) { +- error(INFO, +- "%s: %s list: bad slab pointer: %lx\n", +- si->curname, slab_chain_name_v1[s], +- slab_chains[s]); +- list_borked = 1; +- continue; +- } +- +- if (slab_data_saved(si)) { +- FREEBUF(slab_s_buf); +- return; +- } +- +- if (si->slab == slab_chains[s]) +- continue; +- +- last = slab_chains[s]; ++ kp = kvaddr; ++ bufptr = strbuf; ++ size = maxlen; + +- do { +- if (received_SIGINT()) { +- FREEBUF(slab_s_buf); +- restart(0); +- } ++ while (size > 0) { ++ cnt = MIN_PAGE_SIZE - (kp & (MIN_PAGE_SIZE-1)); ++ ++ if (cnt > size) ++ cnt = size; + +- if (!verify_slab_v1(si, last, s)) { +- list_borked = 1; +- continue; +- } +- last = si->slab - OFFSET(slab_s_list); +- +- readmem(si->slab, KVADDR, slab_s_buf, +- SIZE(slab_s), "slab_s buffer", +- FAULT_ON_ERROR); +- +- tmp = INT(slab_s_buf + OFFSET(slab_s_inuse)); +- si->inuse += tmp; +- +- if (ACTIVE()) +- gather_cpudata_list_v1(si); ++ if (!readmem(kp, KVADDR, bufptr, cnt, ++ "readstring characters", QUIET|RETURN_ON_ERROR)) ++ break; + +- si->s_mem = ULONG(slab_s_buf + +- OFFSET(slab_s_s_mem)); +- gather_slab_cached_count(si); +- +- si->num_slabs++; +- +- si->slab = ULONG(slab_s_buf + +- OFFSET(slab_s_list)); +- si->slab -= OFFSET(slab_s_list); ++ if (count_buffer_chars(bufptr, NULLCHAR, cnt)) ++ break; + +- /* +- * Check for slab transition. (Tony Dziedzic) +- */ +- for (i = 0; i < SLAB_CHAINS; i++) { +- if ((i != s) && +- (si->slab == slab_chains[i])) { +- error(NOTE, +- "%s: slab chain inconsistency: %s list\n", +- si->curname, +- slab_chain_name_v1[s]); +- list_borked = 1; +- } +- } +- +- } while (si->slab != slab_chains[s] && !list_borked); +- } ++ kp += cnt; ++ bufptr += cnt; ++ size -= cnt; ++ } + +- FREEBUF(slab_s_buf); +- if (!list_borked) +- save_slab_data(si); +- break; ++ strcpy(buf, strbuf); ++ return (strlen(buf)); ++} + +- case SLAB_WALKTHROUGH: +- specified_slab = si->slab; +- si->flags |= SLAB_WALKTHROUGH; +- si->flags &= ~SLAB_GET_COUNTS; ++/* ++ * "help -v" output ++ */ ++void ++dump_vm_table(int verbose) ++{ ++ int i; ++ struct node_table *nt; ++ int others; ++ ulong *up; + +- for (s = 0; s < SLAB_CHAINS; s++) { +- if (!slab_chains[s]) +- continue; ++ others = 0; ++ fprintf(fp, " flags: %lx %s(", ++ vt->flags, count_bits_long(vt->flags) > 4 ? "\n " : ""); ++ if (vt->flags & NODES) ++ fprintf(fp, "%sNODES", others++ ? "|" : ""); ++ if (vt->flags & NODES_ONLINE) ++ fprintf(fp, "%sNODES_ONLINE", others++ ? "|" : ""); ++ if (vt->flags & ZONES) ++ fprintf(fp, "%sZONES", others++ ? "|" : ""); ++ if (vt->flags & PERCPU_KMALLOC_V1) ++ fprintf(fp, "%sPERCPU_KMALLOC_V1", others++ ? "|" : ""); ++ if (vt->flags & PERCPU_KMALLOC_V2) ++ fprintf(fp, "%sPERCPU_KMALLOC_V2", others++ ? "|" : ""); ++ if (vt->flags & COMMON_VADDR) ++ fprintf(fp, "%sCOMMON_VADDR", others++ ? "|" : ""); ++ if (vt->flags & KMEM_CACHE_INIT) ++ fprintf(fp, "%sKMEM_CACHE_INIT", others++ ? "|" : ""); ++ if (vt->flags & V_MEM_MAP) ++ fprintf(fp, "%sV_MEM_MAP", others++ ? "|" : ""); ++ if (vt->flags & KMEM_CACHE_UNAVAIL) ++ fprintf(fp, "%sKMEM_CACHE_UNAVAIL", others++ ? "|" : ""); ++ if (vt->flags & DISCONTIGMEM) ++ fprintf(fp, "%sDISCONTIGMEM", others++ ? "|" : ""); ++ if (vt->flags & FLATMEM) ++ fprintf(fp, "%sFLATMEM", others++ ? "|" : ""); ++ if (vt->flags & SPARSEMEM) ++ fprintf(fp, "%sSPARSEMEM", others++ ? "|" : "");\ ++ if (vt->flags & SPARSEMEM_EX) ++ fprintf(fp, "%sSPARSEMEM_EX", others++ ? "|" : "");\ ++ if (vt->flags & KMEM_CACHE_DELAY) ++ fprintf(fp, "%sKMEM_CACHE_DELAY", others++ ? "|" : "");\ ++ if (vt->flags & PERCPU_KMALLOC_V2_NODES) ++ fprintf(fp, "%sPERCPU_KMALLOC_V2_NODES", others++ ? "|" : "");\ ++ if (vt->flags & VM_STAT) ++ fprintf(fp, "%sVM_STAT", others++ ? "|" : "");\ ++ if (vt->flags & KMALLOC_SLUB) ++ fprintf(fp, "%sKMALLOC_SLUB", others++ ? "|" : "");\ ++ if (vt->flags & CONFIG_NUMA) ++ fprintf(fp, "%sCONFIG_NUMA", others++ ? "|" : "");\ ++ if (vt->flags & VM_EVENT) ++ fprintf(fp, "%sVM_EVENT", others++ ? "|" : "");\ + +- if (!specified_slab) { +- if (!readmem(slab_chains[s], +- KVADDR, &si->slab, sizeof(ulong), +- "slabs", QUIET|RETURN_ON_ERROR)) { +- error(INFO, +- "%s: %s list: bad slab pointer: %lx\n", +- si->curname, +- slab_chain_name_v1[s], +- slab_chains[s]); +- list_borked = 1; +- continue; +- } +- last = slab_chains[s]; +- } else +- last = 0; +- +- if (si->slab == slab_chains[s]) +- continue; ++ fprintf(fp, ")\n"); ++ if (vt->kernel_pgd[0] == vt->kernel_pgd[1]) ++ fprintf(fp, " kernel_pgd[NR_CPUS]: %lx ...\n", ++ vt->kernel_pgd[0]); ++ else { ++ fprintf(fp, " kernel_pgd[NR_CPUS]: "); ++ for (i = 0; i < NR_CPUS; i++) { ++ if ((i % 4) == 0) ++ fprintf(fp, "\n "); ++ fprintf(fp, "%lx ", vt->kernel_pgd[i]); ++ } ++ fprintf(fp, "\n"); ++ } ++ fprintf(fp, " high_memory: %lx\n", vt->high_memory); ++ fprintf(fp, " vmalloc_start: %lx\n", vt->vmalloc_start); ++ fprintf(fp, " mem_map: %lx\n", vt->mem_map); ++ fprintf(fp, " total_pages: %ld\n", vt->total_pages); ++ fprintf(fp, " max_mapnr: %ld\n", vt->max_mapnr); ++ fprintf(fp, " totalram_pages: %ld\n", vt->totalram_pages); ++ fprintf(fp, " totalhigh_pages: %ld\n", vt->totalhigh_pages); ++ fprintf(fp, " num_physpages: %ld\n", vt->num_physpages); ++ fprintf(fp, " page_hash_table: %lx\n", vt->page_hash_table); ++ fprintf(fp, "page_hash_table_len: %d\n", vt->page_hash_table_len); ++ fprintf(fp, " kmem_max_c_num: %ld\n", vt->kmem_max_c_num); ++ fprintf(fp, " kmem_max_limit: %ld\n", vt->kmem_max_limit); ++ fprintf(fp, " kmem_max_cpus: %ld\n", vt->kmem_max_cpus); ++ fprintf(fp, " kmem_cache_count: %ld\n", vt->kmem_cache_count); ++ fprintf(fp, " kmem_cache_namelen: %d\n", vt->kmem_cache_namelen); ++ fprintf(fp, "kmem_cache_nodelist_len: %ld\n", vt->kmem_cache_len_nodes); ++ fprintf(fp, " PG_reserved: %lx\n", vt->PG_reserved); ++ fprintf(fp, " PG_slab: %ld (%lx)\n", vt->PG_slab, ++ (ulong)1 << vt->PG_slab); ++ fprintf(fp, " PG_head_tail_mask: %lx\n", vt->PG_head_tail_mask); ++ fprintf(fp, " paddr_prlen: %d\n", vt->paddr_prlen); ++ fprintf(fp, " numnodes: %d\n", vt->numnodes); ++ fprintf(fp, " nr_zones: %d\n", vt->nr_zones); ++ fprintf(fp, " nr_free_areas: %d\n", vt->nr_free_areas); ++ for (i = 0; i < vt->numnodes; i++) { ++ nt = &vt->node_table[i]; ++ fprintf(fp, " node_table[%d]: \n", i); ++ fprintf(fp, " id: %d\n", nt->node_id); ++ fprintf(fp, " pgdat: %lx\n", nt->pgdat); ++ fprintf(fp, " size: %ld\n", nt->size); ++ fprintf(fp, " present: %ld\n", nt->present); ++ fprintf(fp, " mem_map: %lx\n", nt->mem_map); ++ fprintf(fp, " start_paddr: %llx\n", nt->start_paddr); ++ fprintf(fp, " start_mapnr: %ld\n", nt->start_mapnr); ++ } + +- if (CRASHDEBUG(1)) { +- fprintf(fp, "search cache: [%s] ", si->curname); +- if (si->flags & ADDRESS_SPECIFIED) +- fprintf(fp, "for %llx", si->spec_addr); +- fprintf(fp, "\n"); +- } +- +- do { +- if (received_SIGINT()) +- restart(0); ++ fprintf(fp, " dump_free_pages: "); ++ if (vt->dump_free_pages == dump_free_pages) ++ fprintf(fp, "dump_free_pages()\n"); ++ else if (vt->dump_free_pages == dump_free_pages_zones_v1) ++ fprintf(fp, "dump_free_pages_zones_v1()\n"); ++ else if (vt->dump_free_pages == dump_free_pages_zones_v2) ++ fprintf(fp, "dump_free_pages_zones_v2()\n"); ++ else if (vt->dump_free_pages == dump_multidimensional_free_pages) ++ fprintf(fp, "dump_multidimensional_free_pages()\n"); ++ else ++ fprintf(fp, "%lx (unknown)\n", (ulong)vt->dump_free_pages); + +- if (!verify_slab_v1(si, last, s)) { +- list_borked = 1; +- continue; +- } +- last = si->slab - OFFSET(slab_s_list); +- +- dump_slab_percpu_v1(si); +- +- if (si->found) { +- return; +- } +- +- readmem(si->slab+OFFSET(slab_s_list), +- KVADDR, &si->slab, sizeof(ulong), +- "slab list", FAULT_ON_ERROR); +- +- si->slab -= OFFSET(slab_s_list); +- +- } while (si->slab != slab_chains[s] && !list_borked); ++ fprintf(fp, " dump_kmem_cache: "); ++ if (vt->dump_kmem_cache == dump_kmem_cache) ++ fprintf(fp, "dump_kmem_cache()\n"); ++ else if (vt->dump_kmem_cache == dump_kmem_cache_percpu_v1) ++ fprintf(fp, "dump_kmem_cache_percpu_v1()\n"); ++ else if (vt->dump_kmem_cache == dump_kmem_cache_percpu_v2) ++ fprintf(fp, "dump_kmem_cache_percpu_v2()\n"); ++ else if (vt->dump_kmem_cache == dump_kmem_cache_slub) ++ fprintf(fp, "dump_kmem_cache_slub()\n"); ++ else ++ fprintf(fp, "%lx (unknown)\n", (ulong)vt->dump_kmem_cache); ++ fprintf(fp, " slab_data: %lx\n", (ulong)vt->slab_data); ++ if (verbose) ++ dump_saved_slab_data(); ++ fprintf(fp, " cpu_slab_type: %d\n", vt->cpu_slab_type); ++ fprintf(fp, " nr_swapfiles: %d\n", vt->nr_swapfiles); ++ fprintf(fp, " last_swap_read: %lx\n", vt->last_swap_read); ++ fprintf(fp, " swap_info_struct: %lx\n", (ulong)vt->swap_info_struct); ++ fprintf(fp, " mem_sec: %lx\n", (ulong)vt->mem_sec); ++ fprintf(fp, " mem_section: %lx\n", (ulong)vt->mem_section); ++ fprintf(fp, " ZONE_HIGHMEM: %d\n", vt->ZONE_HIGHMEM); ++ fprintf(fp, "node_online_map_len: %d\n", vt->node_online_map_len); ++ if (vt->node_online_map_len) { ++ fprintf(fp, " node_online_map: "); ++ up = (ulong *)vt->node_online_map; ++ for (i = 0; i < vt->node_online_map_len; i++) { ++ fprintf(fp, "%s%lx", i ? ", " : "[", *up); ++ up++; + } +- +- break; ++ fprintf(fp, "]\n"); ++ } else { ++ fprintf(fp, " node_online_map: (unused)\n"); + } ++ fprintf(fp, " nr_vm_stat_items: %d\n", vt->nr_vm_stat_items); ++ fprintf(fp, " vm_stat_items: %s", (vt->flags & VM_STAT) ? ++ "\n" : "(not used)\n"); ++ for (i = 0; i < vt->nr_vm_stat_items; i++) ++ fprintf(fp, " [%d] %s\n", i, vt->vm_stat_items[i]); ++ ++ fprintf(fp, " nr_vm_event_items: %d\n", vt->nr_vm_event_items); ++ fprintf(fp, " vm_event_items: %s", (vt->flags & VM_EVENT) ? ++ "\n" : "(not used)\n"); ++ for (i = 0; i < vt->nr_vm_event_items; i++) ++ fprintf(fp, " [%d] %s\n", i, vt->vm_event_items[i]); ++ ++ dump_vma_cache(VERBOSE); + } + + /* +- * Try to preclude any attempt to translate a bogus slab structure. ++ * Calculate the amount of memory referenced in the kernel-specific "nodes". + */ +- +-static int +-verify_slab_v1(struct meminfo *si, ulong last, int s) ++uint64_t ++total_node_memory() + { +- char slab_s_buf[BUFSIZE]; +- struct kernel_list_head *list_head; +- unsigned int inuse; +- ulong s_mem; +- char *list; +- int errcnt; +- +- list = slab_chain_name_v1[s]; +- +- errcnt = 0; +- +- if (!readmem(si->slab, KVADDR, slab_s_buf, +- SIZE(slab_s), "slab_s buffer", QUIET|RETURN_ON_ERROR)) { +- error(INFO, "%s: %s list: bad slab pointer: %lx\n", +- si->curname, list, si->slab); +- return FALSE; +- } +- +- list_head = (struct kernel_list_head *) +- (slab_s_buf + OFFSET(slab_s_list)); ++ int i; ++ struct node_table *nt; ++ uint64_t total; + +- if (!IS_KVADDR((ulong)list_head->next) || +- !accessible((ulong)list_head->next)) { +- error(INFO, "%s: %s list: slab: %lx bad next pointer: %lx\n", +- si->curname, list, si->slab, +- (ulong)list_head->next); +- errcnt++; +- } ++ for (i = total = 0; i < vt->numnodes; i++) { ++ nt = &vt->node_table[i]; + +- if (last && (last != (ulong)list_head->prev)) { +- error(INFO, "%s: %s list: slab: %lx bad prev pointer: %lx\n", +- si->curname, list, si->slab, +- (ulong)list_head->prev); +- errcnt++; +- } ++ if (CRASHDEBUG(1)) { ++ console("node_table[%d]: \n", i); ++ console(" id: %d\n", nt->node_id); ++ console(" pgdat: %lx\n", nt->pgdat); ++ console(" size: %ld\n", nt->size); ++ console(" present: %ld\n", nt->present); ++ console(" mem_map: %lx\n", nt->mem_map); ++ console(" start_paddr: %lx\n", nt->start_paddr); ++ console(" start_mapnr: %ld\n", nt->start_mapnr); ++ } + +- inuse = UINT(slab_s_buf + OFFSET(slab_s_inuse)); +- if (inuse > si->c_num) { +- error(INFO, "%s: %s list: slab: %lx bad inuse counter: %ld\n", +- si->curname, list, si->slab, inuse); +- errcnt++; +- } ++ if (nt->present) ++ total += (uint64_t)((uint64_t)nt->present * (uint64_t)PAGESIZE()); ++ else ++ total += (uint64_t)((uint64_t)nt->size * (uint64_t)PAGESIZE()); ++ } + +- if (!last) +- goto no_inuse_check_v1; ++ return total; ++} + +- switch (s) +- { +- case 0: /* full -- but can be one singular list */ +- if (VALID_MEMBER(kmem_cache_s_slabs_full) && +- (inuse != si->c_num)) { +- error(INFO, +- "%s: %s list: slab: %lx bad inuse counter: %ld\n", +- si->curname, list, si->slab, inuse); +- errcnt++; +- } +- break; ++/* ++ * Dump just the vm_area_struct cache table data so that it can be ++ * called from above or for debug purposes. ++ */ ++void ++dump_vma_cache(ulong verbose) ++{ ++ int i; ++ ulong vhits; + +- case 1: /* partial */ +- if ((inuse == 0) || (inuse == si->c_num)) { +- error(INFO, +- "%s: %s list: slab: %lx bad inuse counter: %ld\n", +- si->curname, list, si->slab, inuse); +- errcnt++; +- } +- break; ++ if (!verbose) ++ goto show_hits; + +- case 2: /* free */ +- if (inuse > 0) { +- error(INFO, +- "%s: %s list: slab: %lx bad inuse counter: %ld\n", +- si->curname, list, si->slab, inuse); +- errcnt++; +- } +- break; +- } ++ for (i = 0; i < VMA_CACHE; i++) ++ fprintf(fp, " cached_vma[%2d]: %lx (%ld)\n", ++ i, vt->cached_vma[i], ++ vt->cached_vma_hits[i]); ++ fprintf(fp, " vma_cache: %lx\n", (ulong)vt->vma_cache); ++ fprintf(fp, " vma_cache_index: %d\n", vt->vma_cache_index); ++ fprintf(fp, " vma_cache_fills: %ld\n", vt->vma_cache_fills); ++ fflush(fp); + +-no_inuse_check_v1: +- s_mem = ULONG(slab_s_buf + OFFSET(slab_s_s_mem)); +- if (!IS_KVADDR(s_mem) || !accessible(s_mem)) { +- error(INFO, "%s: %s list: slab: %lx bad s_mem pointer: %lx\n", +- si->curname, list, si->slab, s_mem); +- errcnt++; +- } ++show_hits: ++ if (vt->vma_cache_fills) { ++ for (i = vhits = 0; i < VMA_CACHE; i++) ++ vhits += vt->cached_vma_hits[i]; + +- return(errcnt ? FALSE : TRUE); ++ fprintf(stderr, "%s vma hit rate: %2ld%% (%ld of %ld)\n", ++ verbose ? "" : " ", ++ (vhits * 100)/vt->vma_cache_fills, ++ vhits, vt->vma_cache_fills); ++ } + } + + /* +- * Updated for 2.6 slab substructure. ++ * Guess at the "real" amount of physical memory installed, formatting ++ * it in a MB or GB based string. + */ +- +-static char *slab_chain_name_v2[] = {"partial", "full", "free"}; +- +-static void +-do_slab_chain_percpu_v2(long cmd, struct meminfo *si) ++char * ++get_memory_size(char *buf) + { +- int i, tmp, s; +- int list_borked; +- char *slab_buf; +- ulong specified_slab; +- ulong last; +- ulong slab_chains[SLAB_CHAINS]; ++ uint64_t total; ++ ulong next_gig; ++#ifdef OLDWAY ++ ulong mbs, gbs; ++#endif + +- list_borked = 0; +- si->slabsize = (power(2, si->order) * PAGESIZE()); +- si->cpucached_slab = 0; ++ total = machdep->memory_size(); + +- slab_chains[0] = si->cache + OFFSET(kmem_cache_s_lists) + +- OFFSET(kmem_list3_slabs_partial); +- slab_chains[1] = si->cache + OFFSET(kmem_cache_s_lists) + +- OFFSET(kmem_list3_slabs_full); +- slab_chains[2] = si->cache + OFFSET(kmem_cache_s_lists) + +- OFFSET(kmem_list3_slabs_free); ++ if ((next_gig = roundup(total, GIGABYTES(1)))) { ++ if ((next_gig - total) <= MEGABYTES(64)) ++ total = next_gig; ++ } + +- if (CRASHDEBUG(1)) { +- fprintf(fp, "[ %s: %lx ", si->curname, si->cache); +- fprintf(fp, "partial: %lx full: %lx free: %lx ]\n", +- slab_chains[0], slab_chains[1], slab_chains[2]); +- } ++ return (pages_to_size((ulong)(total/PAGESIZE()), buf)); + +- switch (cmd) +- { +- case SLAB_GET_COUNTS: +- si->flags |= SLAB_GET_COUNTS; +- si->flags &= ~SLAB_WALKTHROUGH; +- si->cpucached_cache = 0; +- si->num_slabs = si->inuse = 0; +- gather_cpudata_list_v2(si); ++#ifdef OLDWAY ++ gbs = (ulong)(total/GIGABYTES(1)); ++ mbs = (ulong)(total/MEGABYTES(1)); ++ if (gbs) ++ mbs = (total % GIGABYTES(1))/MEGABYTES(1); ++ ++ if (total%MEGABYTES(1)) ++ mbs++; + +- slab_buf = GETBUF(SIZE(slab)); ++ if (gbs) ++ sprintf(buf, mbs ? "%ld GB %ld MB" : "%ld GB", gbs, mbs); ++ else ++ sprintf(buf, "%ld MB", mbs); + +- for (s = 0; s < SLAB_CHAINS; s++) { +- if (!slab_chains[s]) +- continue; ++ return buf; ++#endif ++} + +- if (!readmem(slab_chains[s], +- KVADDR, &si->slab, sizeof(ulong), +- "first slab", QUIET|RETURN_ON_ERROR)) { +- error(INFO, +- "%s: %s list: bad slab pointer: %lx\n", +- si->curname, +- slab_chain_name_v2[s], +- slab_chains[s]); +- list_borked = 1; +- continue; +- } +- +- if (slab_data_saved(si)) { +- FREEBUF(slab_buf); +- return; +- } +- +- if (si->slab == slab_chains[s]) +- continue; +- +- last = slab_chains[s]; ++/* ++ * For use by architectures not having machine-specific manners for ++ * best determining physical memory size. ++ */ ++uint64_t ++generic_memory_size(void) ++{ ++ if (machdep->memsize) ++ return machdep->memsize; + +- do { +- if (received_SIGINT()) { +- FREEBUF(slab_buf); +- restart(0); +- } ++ return (machdep->memsize = total_node_memory()); ++} + +- if (!verify_slab_v2(si, last, s)) { +- list_borked = 1; +- continue; +- } +- last = si->slab - OFFSET(slab_list); +- +- readmem(si->slab, KVADDR, slab_buf, +- SIZE(slab), "slab buffer", +- FAULT_ON_ERROR); +- +- tmp = INT(slab_buf + OFFSET(slab_inuse)); +- si->inuse += tmp; +- +- if (ACTIVE()) +- gather_cpudata_list_v2(si); ++/* ++ * Determine whether a virtual address is user or kernel or ambiguous. ++ */ ++int ++vaddr_type(ulong vaddr, struct task_context *tc) ++{ ++ int memtype, found; + +- si->s_mem = ULONG(slab_buf + +- OFFSET(slab_s_mem)); +- gather_slab_cached_count(si); +- +- si->num_slabs++; +- +- si->slab = ULONG(slab_buf + +- OFFSET(slab_list)); +- si->slab -= OFFSET(slab_list); ++ if (!tc) ++ tc = CURRENT_CONTEXT(); ++ memtype = found = 0; + +- /* +- * Check for slab transition. (Tony Dziedzic) +- */ +- for (i = 0; i < SLAB_CHAINS; i++) { +- if ((i != s) && +- (si->slab == slab_chains[i])) { +- error(NOTE, +- "%s: slab chain inconsistency: %s list\n", +- si->curname, +- slab_chain_name_v2[s]); +- list_borked = 1; +- } +- } +- +- } while (si->slab != slab_chains[s] && !list_borked); +- } ++ if (machdep->is_uvaddr(vaddr, tc)) { ++ memtype |= UVADDR; ++ found++; ++ } + +- FREEBUF(slab_buf); +- if (!list_borked) +- save_slab_data(si); +- break; ++ if (machdep->is_kvaddr(vaddr)) { ++ memtype |= KVADDR; ++ found++; ++ } + +- case SLAB_WALKTHROUGH: +- specified_slab = si->slab; +- si->flags |= SLAB_WALKTHROUGH; +- si->flags &= ~SLAB_GET_COUNTS; ++ if (found == 1) ++ return memtype; ++ else ++ return AMBIGUOUS; ++} + +- for (s = 0; s < SLAB_CHAINS; s++) { +- if (!slab_chains[s]) +- continue; ++/* ++ * Determine the first valid user space address ++ */ ++static int ++address_space_start(struct task_context *tc, ulong *addr) ++{ ++ ulong vma; ++ char *vma_buf; + +- if (!specified_slab) { +- if (!readmem(slab_chains[s], +- KVADDR, &si->slab, sizeof(ulong), +- "slabs", QUIET|RETURN_ON_ERROR)) { +- error(INFO, +- "%s: %s list: bad slab pointer: %lx\n", +- si->curname, +- slab_chain_name_v2[s], +- slab_chains[s]); +- list_borked = 1; +- continue; +- } +- last = slab_chains[s]; +- } else +- last = 0; +- +- if (si->slab == slab_chains[s]) +- continue; +- +- if (CRASHDEBUG(1)) { +- fprintf(fp, "search cache: [%s] ", si->curname); +- if (si->flags & ADDRESS_SPECIFIED) +- fprintf(fp, "for %llx", si->spec_addr); +- fprintf(fp, "\n"); +- } +- +- do { +- if (received_SIGINT()) +- restart(0); +- +- if (!verify_slab_v2(si, last, s)) { +- list_borked = 1; +- continue; +- } +- last = si->slab - OFFSET(slab_list); ++ if (!tc->mm_struct) ++ return FALSE; + +- dump_slab_percpu_v2(si); +- +- if (si->found) { +- return; +- } +- +- readmem(si->slab+OFFSET(slab_list), +- KVADDR, &si->slab, sizeof(ulong), +- "slab list", FAULT_ON_ERROR); +- +- si->slab -= OFFSET(slab_list); ++ fill_mm_struct(tc->mm_struct); ++ vma = ULONG(tt->mm_struct + OFFSET(mm_struct_mmap)); ++ if (!vma) ++ return FALSE; ++ vma_buf = fill_vma_cache(vma); ++ *addr = ULONG(vma_buf + OFFSET(vm_area_struct_vm_start)); + +- } while (si->slab != slab_chains[s] && !list_borked); +- } +- +- break; +- } ++ return TRUE; + } + + /* +- * Try to preclude any attempt to translate a bogus slab structure. ++ * Search for a given value between a starting and ending address range, ++ * applying an optional mask for "don't care" bits. As an alternative ++ * to entering the starting address value, -k means "start of kernel address ++ * space". For processors with ambiguous user/kernel address spaces, ++ * -u or -k must be used (with or without -s) as a differentiator. + */ +-static int +-verify_slab_v2(struct meminfo *si, ulong last, int s) ++void ++cmd_search(void) + { +- char slab_buf[BUFSIZE]; +- struct kernel_list_head *list_head; +- unsigned int inuse; +- ulong s_mem; +- char *list; +- int errcnt; ++ int c; ++ ulong start, end, mask, memtype, len; ++ ulong uvaddr_end; ++ int sflag; ++ struct meminfo meminfo; ++ ulong value_array[MAXARGS]; ++ struct syment *sp; + +- list = slab_chain_name_v2[s]; ++ start = end = mask = sflag = memtype = len = 0; ++ uvaddr_end = COMMON_VADDR_SPACE() ? (ulong)(-1) : machdep->kvbase; ++ BZERO(value_array, sizeof(ulong) * MAXARGS); + +- errcnt = 0; ++ while ((c = getopt(argcnt, args, "l:uks:e:v:m:")) != EOF) { ++ switch(c) ++ { ++ case 'u': ++ if (!sflag) { ++ address_space_start(CURRENT_CONTEXT(),&start); ++ sflag++; ++ } ++ memtype = UVADDR; ++ sflag++; ++ break; + +- if (!readmem(si->slab, KVADDR, slab_buf, +- SIZE(slab), "slab buffer", QUIET|RETURN_ON_ERROR)) { +- error(INFO, "%s: %s list: bad slab pointer: %lx\n", +- si->curname, list, si->slab); +- return FALSE; +- } ++ case 'k': ++ if (!sflag) { ++ start = machdep->kvbase; ++ sflag++; ++ } ++ memtype = KVADDR; ++ sflag++; ++ break; ++ ++ case 's': ++ if ((sp = symbol_search(optarg))) ++ start = sp->value; ++ else ++ start = htol(optarg, FAULT_ON_ERROR, NULL); ++ sflag++; ++ break; ++ ++ case 'e': ++ if ((sp = symbol_search(optarg))) ++ end = sp->value; ++ else ++ end = htol(optarg, FAULT_ON_ERROR, NULL); ++ break; + +- list_head = (struct kernel_list_head *)(slab_buf + OFFSET(slab_list)); +- if (!IS_KVADDR((ulong)list_head->next) || +- !accessible((ulong)list_head->next)) { +- error(INFO, "%s: %s list: slab: %lx bad next pointer: %lx\n", +- si->curname, list, si->slab, +- (ulong)list_head->next); +- errcnt++; +- } ++ case 'l': ++ len = stol(optarg, FAULT_ON_ERROR, NULL); ++ break; + +- if (last && (last != (ulong)list_head->prev)) { +- error(INFO, "%s: %s list: slab: %lx bad prev pointer: %lx\n", +- si->curname, list, si->slab, +- (ulong)list_head->prev); +- errcnt++; +- } ++ case 'm': ++ mask = htol(optarg, FAULT_ON_ERROR, NULL); ++ break; + +- inuse = UINT(slab_buf + OFFSET(slab_inuse)); +- if (inuse > si->c_num) { +- error(INFO, "%s: %s list: slab: %lx bad inuse counter: %ld\n", +- si->curname, list, si->slab, inuse); +- errcnt++; +- } ++ default: ++ argerrs++; ++ break; ++ } ++ } + +- if (!last) +- goto no_inuse_check_v2; ++ if (argerrs || !sflag || !args[optind] || (len && end)) ++ cmd_usage(pc->curcmd, SYNOPSIS); + +- switch (s) ++ if (!memtype) ++ memtype = vaddr_type(start, CURRENT_CONTEXT()); ++ ++ switch (memtype) + { +- case 0: /* partial */ +- if ((inuse == 0) || (inuse == si->c_num)) { +- error(INFO, +- "%s: %s list: slab: %lx bad inuse counter: %ld\n", +- si->curname, list, si->slab, inuse); +- errcnt++; ++ case UVADDR: ++ if (!IS_UVADDR(start, CURRENT_CONTEXT())) { ++ error(INFO, "invalid user virtual address: %lx\n", ++ start); ++ cmd_usage(pc->curcmd, SYNOPSIS); + } + break; + +- case 1: /* full */ +- if (inuse != si->c_num) { +- error(INFO, +- "%s: %s list: slab: %lx bad inuse counter: %ld\n", +- si->curname, list, si->slab, inuse); +- errcnt++; ++ case KVADDR: ++ if (!IS_KVADDR(start)) { ++ error(INFO, "invalid kernel virtual address: %lx\n", ++ start); ++ cmd_usage(pc->curcmd, SYNOPSIS); + } + break; + +- case 2: /* free */ +- if (inuse > 0) { +- error(INFO, +- "%s: %s list: slab: %lx bad inuse counter: %ld\n", +- si->curname, list, si->slab, inuse); +- errcnt++; ++ case AMBIGUOUS: ++ error(INFO, ++ "ambiguous virtual address: %lx (requires -u or -k)\n", ++ start); ++ cmd_usage(pc->curcmd, SYNOPSIS); ++ } ++ ++ if (!end && !len) { ++ switch (memtype) ++ { ++ case UVADDR: ++ end = uvaddr_end; ++ break; ++ ++ case KVADDR: ++ if (vt->vmalloc_start < machdep->identity_map_base) ++ end = (ulong)(-1); ++ else { ++ meminfo.memtype = KVADDR; ++ meminfo.spec_addr = 0; ++ meminfo.flags = (ADDRESS_SPECIFIED|GET_HIGHEST); ++ dump_vmlist(&meminfo); ++ end = meminfo.retval; ++ } ++ break; ++ } ++ } else if (len) ++ end = start + len; ++ ++ switch (memtype) ++ { ++ case UVADDR: ++ if (end > uvaddr_end) { ++ error(INFO, ++ "address range starts in user space and ends kernel space\n"); ++ cmd_usage(pc->curcmd, SYNOPSIS); ++ } ++ /* FALLTHROUGH */ ++ case KVADDR: ++ if (end < start) { ++ error(INFO, ++ "ending address %lx is below starting address %lx\n", ++ end, start); ++ cmd_usage(pc->curcmd, SYNOPSIS); + } + break; + } + +-no_inuse_check_v2: +- s_mem = ULONG(slab_buf + OFFSET(slab_s_mem)); +- if (!IS_KVADDR(s_mem) || !accessible(s_mem)) { +- error(INFO, "%s: %s list: slab: %lx bad s_mem pointer: %lx\n", +- si->curname, list, si->slab, s_mem); +- errcnt++; ++ c = 0; ++ while (args[optind]) { ++ value_array[c] = htol(args[optind], FAULT_ON_ERROR, NULL); ++ c++; ++ optind++; + } + +- return(errcnt ? FALSE : TRUE); ++ search(start, end, mask, memtype, value_array, c); + } + + /* +- * If it's a dumpfile, save the essential slab data to avoid re-reading +- * the whole slab chain more than once. This may seem like overkill, but +- * if the problem is a memory leak, or just the over-use of the buffer_head +- * cache, it's painful to wait each time subsequent kmem -s or -i commands +- * simply need the basic slab counts. ++ * Do the work for cmd_search(). + */ +-struct slab_data { +- ulong cache_addr; +- int num_slabs; +- int inuse; +- ulong cpucached_cache; +-}; + +-#define NO_SLAB_DATA ((void *)(-1)) ++#define SEARCHMASK(X) ((X) | mask) + +-static void +-save_slab_data(struct meminfo *si) ++static void ++search(ulong start, ulong end, ulong mask, int memtype, ulong *value, int vcnt) + { +- int i; ++ int i, j; ++ ulong pp, next, *ubp; ++ int wordcnt, lastpage; ++ ulong page; ++ physaddr_t paddr; ++ char *pagebuf; + +- if (ACTIVE()) +- return; ++ if (start & (sizeof(long)-1)) { ++ start &= ~(sizeof(long)-1); ++ error(INFO, "rounding down start address to: %lx\n", start); ++ } + +- if (vt->slab_data == NO_SLAB_DATA) +- return; ++ pagebuf = GETBUF(PAGESIZE()); ++ next = start; + +- if (!vt->slab_data) { +- if (!(vt->slab_data = (struct slab_data *) +- malloc(sizeof(struct slab_data) * vt->kmem_cache_count))) { +- error(INFO, "cannot malloc slab_data table"); +- vt->slab_data = NO_SLAB_DATA; +- return; +- } +- for (i = 0; i < vt->kmem_cache_count; i++) { +- vt->slab_data[i].cache_addr = (ulong)NO_SLAB_DATA; +- vt->slab_data[i].num_slabs = 0; +- vt->slab_data[i].inuse = 0; +- vt->slab_data[i].cpucached_cache = 0; ++ for (pp = VIRTPAGEBASE(start); next < end; next = pp) { ++ lastpage = (VIRTPAGEBASE(next) == VIRTPAGEBASE(end)); ++ if (LKCD_DUMPFILE()) ++ set_lkcd_nohash(); ++ ++ switch (memtype) ++ { ++ case UVADDR: ++ if (!uvtop(CURRENT_CONTEXT(), pp, &paddr, 0) || ++ !phys_to_page(paddr, &page)) { ++ if (!next_upage(CURRENT_CONTEXT(), pp, &pp)) ++ return; ++ continue; ++ } ++ break; ++ ++ case KVADDR: ++ if (!kvtop(CURRENT_CONTEXT(), pp, &paddr, 0) || ++ !phys_to_page(paddr, &page)) { ++ if (!next_kpage(pp, &pp)) ++ return; ++ continue; ++ } ++ break; ++ } ++ ++ if (!readmem(paddr, PHYSADDR, pagebuf, PAGESIZE(), ++ "search page", RETURN_ON_ERROR|QUIET)) { ++ pp += PAGESIZE(); ++ continue; + } +- } + +- for (i = 0; i < vt->kmem_cache_count; i++) { +- if (vt->slab_data[i].cache_addr == si->cache) +- break; ++ ubp = (ulong *)&pagebuf[next - pp]; ++ if (lastpage) { ++ if (end == (ulong)(-1)) ++ wordcnt = PAGESIZE()/sizeof(long); ++ else ++ wordcnt = (end - next)/sizeof(long); ++ } else ++ wordcnt = (PAGESIZE() - (next - pp))/sizeof(long); + +- if (vt->slab_data[i].cache_addr == (ulong)NO_SLAB_DATA) { +- vt->slab_data[i].cache_addr = si->cache; +- vt->slab_data[i].num_slabs = si->num_slabs; +- vt->slab_data[i].inuse = si->inuse; +- vt->slab_data[i].cpucached_cache = si->cpucached_cache; +- break; ++ for (i = 0; i < wordcnt; i++, ubp++, next += sizeof(long)) { ++ for (j = 0; j < vcnt; j++) { ++ if (SEARCHMASK(*ubp) == SEARCHMASK(value[j])) ++ fprintf(fp, "%lx: %lx\n", next, *ubp); ++ } + } ++ ++ if (CRASHDEBUG(1)) ++ if ((pp % (1024*1024)) == 0) ++ console("%lx\n", pp); ++ ++ pp += PAGESIZE(); + } + } + +-static int +-slab_data_saved(struct meminfo *si) ++ ++/* ++ * Return the next mapped user virtual address page that comes after ++ * the passed-in address. ++ */ ++static int ++next_upage(struct task_context *tc, ulong vaddr, ulong *nextvaddr) + { +- int i; ++ ulong vma, total_vm; ++ int found; ++ char *vma_buf; ++ ulong vm_start, vm_end; ++ void *vm_next; ++ ++ if (!tc->mm_struct) ++ return FALSE; ++ ++ fill_mm_struct(tc->mm_struct); ++ vma = ULONG(tt->mm_struct + OFFSET(mm_struct_mmap)); ++ total_vm = ULONG(tt->mm_struct + OFFSET(mm_struct_total_vm)); + +- if (ACTIVE() || !vt->slab_data || (vt->slab_data == NO_SLAB_DATA)) ++ if (!vma || (total_vm == 0)) + return FALSE; + +- for (i = 0; i < vt->kmem_cache_count; i++) { +- if (vt->slab_data[i].cache_addr == si->cache) { +- si->inuse = vt->slab_data[i].inuse; +- si->num_slabs = vt->slab_data[i].num_slabs; +- si->cpucached_cache = vt->slab_data[i].cpucached_cache; +- return TRUE; +- } +- } +- +- return FALSE; +-} ++ vaddr = VIRTPAGEBASE(vaddr) + PAGESIZE(); /* first possible page */ + +-static void +-dump_saved_slab_data(void) +-{ +- int i; ++ for (found = FALSE; vma; vma = (ulong)vm_next) { ++ vma_buf = fill_vma_cache(vma); + +- if (!vt->slab_data || (vt->slab_data == NO_SLAB_DATA)) +- return; ++ vm_start = ULONG(vma_buf + OFFSET(vm_area_struct_vm_start)); ++ vm_end = ULONG(vma_buf + OFFSET(vm_area_struct_vm_end)); ++ vm_next = VOID_PTR(vma_buf + OFFSET(vm_area_struct_vm_next)); + +- for (i = 0; i < vt->kmem_cache_count; i++) { +- if (vt->slab_data[i].cache_addr == (ulong)NO_SLAB_DATA) +- break; ++ if (vaddr <= vm_start) { ++ *nextvaddr = vm_start; ++ return TRUE; ++ } + +- fprintf(fp, +- " cache: %lx inuse: %5d num_slabs: %3d cpucached_cache: %ld\n", +- vt->slab_data[i].cache_addr, +- vt->slab_data[i].inuse, +- vt->slab_data[i].num_slabs, +- vt->slab_data[i].cpucached_cache); ++ if ((vaddr > vm_start) && (vaddr < vm_end)) { ++ *nextvaddr = vaddr; ++ return TRUE; ++ } + } ++ ++ return FALSE; + } + + /* +- * Dump the contents of a kmem slab. ++ * Return the next mapped kernel virtual address in the vmlist ++ * that is equal to or comes after the passed-in address. + */ +- +-static void +-dump_slab(struct meminfo *si) ++static ulong ++next_vmlist_vaddr(struct meminfo *mi, ulong vaddr) + { +- uint16_t s_offset; ++ ulong i, count; + +- si->s_mem = ULONG(si->slab_buf + OFFSET(kmem_slab_s_s_mem)); +- si->s_mem = PTOB(BTOP(si->s_mem)); ++ BZERO(mi, sizeof(struct meminfo)); + +- if (si->flags & ADDRESS_SPECIFIED) { +- if (INSLAB(si->slab, si) && (si->spec_addr >= si->slab) && +- (si->spec_addr < (si->slab+SIZE(kmem_slab_s)))){ +- si->found = KMEM_SLAB_ADDR; +- return; +- } +- if (INSLAB(si->spec_addr, si)) +- si->found = KMEM_ON_SLAB; /* But don't return yet... */ +- else +- return; +- } ++ mi->flags = GET_VMLIST_COUNT; ++ dump_vmlist(mi); ++ count = mi->retval; + +- si->s_freep = VOID_PTR(si->slab_buf + OFFSET(kmem_slab_s_s_freep)); +- si->s_inuse = ULONG(si->slab_buf + OFFSET(kmem_slab_s_s_inuse)); +- si->s_index = ULONG_PTR(si->slab_buf + OFFSET(kmem_slab_s_s_index)); +- s_offset = USHORT(si->slab_buf + OFFSET(kmem_slab_s_s_offset)); ++ if (!count) ++ return vaddr; + +- if (!(si->flags & ADDRESS_SPECIFIED)) { +- fprintf(fp, slab_hdr); +- DUMP_SLAB_INFO(); ++ mi->vmlist = (struct vmlist *)GETBUF(sizeof(struct vmlist)*count); ++ mi->flags = GET_VMLIST; ++ dump_vmlist(mi); ++ ++ for (i = 0; i < count; i++) { ++ if (vaddr <= mi->vmlist[i].addr) { ++ vaddr = mi->vmlist[i].addr; ++ break; ++ } ++ if (vaddr < (mi->vmlist[i].addr + mi->vmlist[i].size)) ++ break; + } + +- dump_slab_objects(si); ++ FREEBUF(mi->vmlist); ++ ++ return vaddr; + } + ++ + /* +- * dump_slab() adapted for newer percpu slab format. ++ * Return the next kernel virtual address page that comes after ++ * the passed-in address. + */ +- +-static void +-dump_slab_percpu_v1(struct meminfo *si) ++static int ++next_kpage(ulong vaddr, ulong *nextvaddr) + { +- int tmp; +- +- readmem(si->slab+OFFSET(slab_s_s_mem), +- KVADDR, &si->s_mem, sizeof(ulong), +- "s_mem", FAULT_ON_ERROR); ++ int n; ++ ulong paddr, vaddr_orig, node_size; ++ struct node_table *nt; ++ ulonglong pstart, pend; ++ ulong vmalloc_limit; ++ struct meminfo meminfo; + +- /* +- * Include the array of kmem_bufctl_t's appended to slab. +- */ +- tmp = SIZE(slab_s) + (SIZE(kmem_bufctl_t) * si->c_num); ++ vaddr_orig = vaddr; ++ vaddr = VIRTPAGEBASE(vaddr) + PAGESIZE(); /* first possible page */ + +- if (si->flags & ADDRESS_SPECIFIED) { +- if (INSLAB_PERCPU(si->slab, si) && +- (si->spec_addr >= si->slab) && +- (si->spec_addr < (si->slab+tmp))) { +- if (si->spec_addr >= (si->slab + SIZE(slab_s))) +- si->found = KMEM_BUFCTL_ADDR; +- else +- si->found = KMEM_SLAB_ADDR; +- } else if (INSLAB_PERCPU(si->spec_addr, si)) +- si->found = KMEM_ON_SLAB; /* But don't return yet... */ +- else +- return; +- } ++ if (vaddr < vaddr_orig) /* wrapped back to zero? */ ++ return FALSE; + +- readmem(si->slab+OFFSET(slab_s_inuse), +- KVADDR, &tmp, sizeof(int), +- "inuse", FAULT_ON_ERROR); +- si->s_inuse = tmp; ++ meminfo.memtype = KVADDR; ++ meminfo.spec_addr = 0; ++ meminfo.flags = (ADDRESS_SPECIFIED|GET_HIGHEST); ++ dump_vmlist(&meminfo); ++ vmalloc_limit = meminfo.retval; + +- readmem(si->slab+OFFSET(slab_s_free), +- KVADDR, &si->free, SIZE(kmem_bufctl_t), +- "kmem_bufctl_t", FAULT_ON_ERROR); ++ if (IS_VMALLOC_ADDR(vaddr_orig)) { ++ if (IS_VMALLOC_ADDR(vaddr) && (vaddr < vmalloc_limit)) { ++ if (machine_type("X86_64")) ++ vaddr = next_vmlist_vaddr(&meminfo, vaddr); ++ *nextvaddr = vaddr; ++ return TRUE; ++ } + +- gather_slab_free_list_percpu(si); +- gather_slab_cached_count(si); ++ if (vt->vmalloc_start < machdep->identity_map_base) { ++ *nextvaddr = machdep->identity_map_base; ++ return TRUE; ++ } + +- if (!(si->flags & ADDRESS_SPECIFIED)) { +- fprintf(fp, slab_hdr); +- DUMP_SLAB_INFO(); ++ return FALSE; + } + +- dump_slab_objects_percpu(si); +-} ++ paddr = VTOP(vaddr); ++ ++ for (n = 0; n < vt->numnodes; n++) { ++ nt = &vt->node_table[n]; ++ if ((vt->flags & V_MEM_MAP) && (vt->numnodes == 1)) ++ node_size = vt->max_mapnr; ++ else ++ node_size = nt->size; ++ ++ pstart = nt->start_paddr; ++ pend = pstart + ((ulonglong)node_size * PAGESIZE()); ++ ++ if ((paddr < pstart) || (paddr >= pend)) ++ continue; ++ /* ++ * We're in the physical range. ++ */ ++ *nextvaddr = vaddr; ++ return TRUE; ++ } + ++ if (vt->vmalloc_start > vaddr) { ++ *nextvaddr = vt->vmalloc_start; ++ return TRUE; ++ } else ++ return FALSE; ++} + + /* +- * Updated for 2.6 slab substructure. ++ * Display swap statistics. + */ +-static void +-dump_slab_percpu_v2(struct meminfo *si) ++void ++cmd_swap(void) + { +- int tmp; ++ int c; + +- readmem(si->slab+OFFSET(slab_s_mem), +- KVADDR, &si->s_mem, sizeof(ulong), +- "s_mem", FAULT_ON_ERROR); ++ while ((c = getopt(argcnt, args, "")) != EOF) { ++ switch(c) ++ { ++ default: ++ argerrs++; ++ break; ++ } ++ } + +- /* +- * Include the array of kmem_bufctl_t's appended to slab. +- */ +- tmp = SIZE(slab) + (SIZE(kmem_bufctl_t) * si->c_num); ++ if (argerrs) ++ cmd_usage(pc->curcmd, SYNOPSIS); + +- if (si->flags & ADDRESS_SPECIFIED) { +- if (INSLAB_PERCPU(si->slab, si) && +- (si->spec_addr >= si->slab) && +- (si->spec_addr < (si->slab+tmp))) { +- if (si->spec_addr >= (si->slab + SIZE(slab))) +- si->found = KMEM_BUFCTL_ADDR; +- else +- si->found = KMEM_SLAB_ADDR; +- } else if (INSLAB_PERCPU(si->spec_addr, si)) +- si->found = KMEM_ON_SLAB; /* But don't return yet... */ +- else +- return; +- } ++ dump_swap_info(VERBOSE, NULL, NULL); ++} + +- readmem(si->slab+OFFSET(slab_inuse), +- KVADDR, &tmp, sizeof(int), +- "inuse", FAULT_ON_ERROR); +- si->s_inuse = tmp; ++/* ++ * Do the work for cmd_swap(). ++ */ + +- readmem(si->slab+OFFSET(slab_free), +- KVADDR, &si->free, SIZE(kmem_bufctl_t), +- "kmem_bufctl_t", FAULT_ON_ERROR); ++#define SWP_USED 1 ++#define SWAP_MAP_BAD 0x8000 + +- gather_slab_free_list_percpu(si); +- gather_slab_cached_count(si); ++char *swap_info_hdr = \ ++"FILENAME TYPE SIZE USED PCT PRIORITY\n"; + +- if (!(si->flags & ADDRESS_SPECIFIED)) { +- fprintf(fp, slab_hdr); +- DUMP_SLAB_INFO(); +- } ++static int ++dump_swap_info(ulong swapflags, ulong *totalswap_pages, ulong *totalused_pages) ++{ ++ int i, j; ++ int flags, swap_device, pages, prio, usedswap; ++ ulong swap_file, max, swap_map, pct; ++ ulong vfsmnt; ++ ulong swap_info; ++ ushort *map; ++ ulong totalswap, totalused; ++ char buf[BUFSIZE]; + +- dump_slab_objects_percpu(si); +-} ++ if (!symbol_exists("nr_swapfiles")) ++ error(FATAL, "nr_swapfiles doesn't exist in this kernel!\n"); + ++ if (!symbol_exists("swap_info")) ++ error(FATAL, "swap_info doesn't exist in this kernel!\n"); + ++ swap_info = symbol_value("swap_info"); + +-/* +- * Gather the free objects in a slab into the si->addrlist, checking for +- * specified addresses that are in-slab kmem_bufctls, and making error checks +- * along the way. Object address checks are deferred to dump_slab_objects(). +- */ ++ if (swapflags & VERBOSE) ++ fprintf(fp, swap_info_hdr); + +-#define INOBJECT(addr, obj) ((addr >= obj) && (addr < (obj+si->size))) ++ totalswap = totalused = 0; + +-static void +-gather_slab_free_list(struct meminfo *si) +-{ +- ulong *next, obj; +- ulong expected, cnt; ++ for (i = 0; i < vt->nr_swapfiles; i++, ++ swap_info += SIZE(swap_info_struct)) { ++ fill_swap_info(swap_info); + +- BNEG(si->addrlist, sizeof(ulong) * (si->c_num+1)); ++ flags = INT(vt->swap_info_struct + ++ OFFSET(swap_info_struct_flags)); + +- if (!si->s_freep) +- return; ++ if (!(flags & SWP_USED)) ++ continue; + +- cnt = 0; +- expected = si->c_num - si->s_inuse; ++ swap_file = ULONG(vt->swap_info_struct + ++ OFFSET(swap_info_struct_swap_file)); + +- next = si->s_freep; +- do { ++ swap_device = INT(vt->swap_info_struct + ++ OFFSET_OPTION(swap_info_struct_swap_device, ++ swap_info_struct_old_block_size)); + +- if (cnt == si->c_num) { +- error(INFO, +- "\"%s\" cache: too many objects found in slab free list\n", +- si->curname); +- si->errors++; +- return; +- } ++ pages = INT(vt->swap_info_struct + ++ OFFSET(swap_info_struct_pages)); + +- /* +- * Off-slab kmem_bufctls are contained in arrays of object +- * pointers that point to: +- * 1. next kmem_bufctl (or NULL) if the object is free. +- * 2. to the object if it the object is in use. +- * +- * On-slab kmem_bufctls resides just after the object itself, +- * and point to: +- * 1. next kmem_bufctl (or NULL) if object is free. +- * 2. the containing slab if the object is in use. +- */ ++ totalswap += pages; ++ pages <<= (PAGESHIFT() - 10); + +- if (si->c_flags & SLAB_CFLGS_BUFCTL) +- obj = si->s_mem + ((next - si->s_index) * si->c_offset); +- else +- obj = (ulong)next - si->c_offset; ++ prio = INT(vt->swap_info_struct + ++ OFFSET(swap_info_struct_prio)); + +- si->addrlist[cnt] = obj; ++ if (MEMBER_SIZE("swap_info_struct", "max") == sizeof(int)) ++ max = UINT(vt->swap_info_struct + ++ OFFSET(swap_info_struct_max)); ++ else ++ max = ULONG(vt->swap_info_struct + ++ OFFSET(swap_info_struct_max)); + +- if (si->flags & ADDRESS_SPECIFIED) { +- if (INSLAB(next, si) && +- (si->spec_addr >= (ulong)next) && +- (si->spec_addr < (ulong)(next + 1))) { +- si->found = KMEM_BUFCTL_ADDR; +- return; ++ swap_map = ULONG(vt->swap_info_struct + ++ OFFSET(swap_info_struct_swap_map)); ++ ++ if (swap_file) { ++ if (VALID_MEMBER(swap_info_struct_swap_vfsmnt)) { ++ vfsmnt = ULONG(vt->swap_info_struct + ++ OFFSET(swap_info_struct_swap_vfsmnt)); ++ get_pathname(swap_file, buf, BUFSIZE, ++ 1, vfsmnt); ++ } else if (VALID_MEMBER ++ (swap_info_struct_old_block_size)) { ++ get_pathname(file_to_dentry(swap_file), ++ buf, BUFSIZE, 1, file_to_vfsmnt(swap_file)); ++ } else { ++ get_pathname(swap_file, buf, BUFSIZE, 1, 0); + } +- } ++ } else ++ sprintf(buf, "(unknown)"); + +- cnt++; ++ map = (ushort *)GETBUF(sizeof(ushort) * max); + +- if (!INSLAB(obj, si)) { +- error(INFO, +- "\"%s\" cache: address not contained within slab: %lx\n", +- si->curname, obj); +- si->errors++; ++ if (!readmem(swap_map, KVADDR, map, ++ sizeof(ushort) * max, "swap_info swap_map data", ++ RETURN_ON_ERROR|QUIET)) { ++ if (swapflags & RETURN_ON_ERROR) { ++ *totalswap_pages = swap_map; ++ *totalused_pages = i; ++ return FALSE; ++ } else ++ error(FATAL, ++ "swap_info[%d].swap_map at %lx is unaccessible\n", ++ i, swap_map); + } + +- readmem((ulong)next, KVADDR, &next, sizeof(void *), +- "s_freep chain entry", FAULT_ON_ERROR); +- } while (next); ++ usedswap = 0; ++ for (j = 0; j < max; j++) { ++ switch (map[j]) ++ { ++ case SWAP_MAP_BAD: ++ case 0: ++ continue; ++ default: ++ usedswap++; ++ } ++ } + +- if (cnt != expected) { +- error(INFO, +- "\"%s\" cache: free object mismatch: expected: %ld found: %ld\n", +- si->curname, expected, cnt); +- si->errors++; ++ FREEBUF(map); ++ ++ totalused += usedswap; ++ usedswap <<= (PAGESHIFT() - 10); ++ pct = (usedswap * 100)/pages; ++ ++ if (swapflags & VERBOSE) ++ fprintf(fp, "%-15s %s %7dk %7dk %2ld%% %d\n", ++ buf, swap_device ? "PARTITION" : " FILE ", ++ pages, usedswap, pct, prio); + } +-} + ++ if (totalswap_pages) ++ *totalswap_pages = totalswap; ++ if (totalused_pages) ++ *totalused_pages = totalused; ++ ++ return TRUE; ++} + + /* +- * gather_slab_free_list() adapted for newer percpu slab format. ++ * Translate a PTE into a swap device and offset string. + */ ++char * ++swap_location(ulonglong pte, char *buf) ++{ ++ char swapdev[BUFSIZE]; + +-#define BUFCTL_END 0xffffFFFF ++ if (!pte) ++ return NULL; + +-static void +-gather_slab_free_list_percpu(struct meminfo *si) +-{ +- int i; +- ulong obj; +- ulong expected, cnt; +- int free_index; +- ulong kmembp; +- short *kbp; ++ if (THIS_KERNEL_VERSION >= LINUX(2,6,0)) ++ sprintf(buf, "%s OFFSET: %lld", ++ get_swapdev(__swp_type(pte), swapdev), __swp_offset(pte)); ++ else ++ sprintf(buf, "%s OFFSET: %llx", ++ get_swapdev(SWP_TYPE(pte), swapdev), SWP_OFFSET(pte)); + +- BNEG(si->addrlist, sizeof(ulong) * (si->c_num+1)); ++ return buf; ++} + +- if (CRASHDEBUG(1)) +- fprintf(fp, "slab: %lx si->s_inuse: %ld si->c_num: %ld\n", +- si->slab, si->s_inuse, si->c_num); ++/* ++ * Given the type field from a PTE, return the name of the swap device. ++ */ ++static char * ++get_swapdev(ulong type, char *buf) ++{ ++ unsigned int i, swap_info_len; ++ ulong swap_info, swap_file; ++ ulong vfsmnt; + +- if (si->s_inuse == si->c_num ) +- return; ++ if (!symbol_exists("nr_swapfiles")) ++ error(FATAL, "nr_swapfiles doesn't exist in this kernel!\n"); + +- kmembp = si->slab + SIZE_OPTION(slab_s, slab); +- readmem((ulong)kmembp, KVADDR, si->kmem_bufctl, +- SIZE(kmem_bufctl_t) * si->c_num, +- "kmem_bufctl array", FAULT_ON_ERROR); ++ if (!symbol_exists("swap_info")) ++ error(FATAL, "swap_info doesn't exist in this kernel!\n"); + +- if (CRASHDEBUG(1)) { +- for (i = 0; (SIZE(kmem_bufctl_t) == sizeof(int)) && +- (i < si->c_num); i++) +- fprintf(fp, "%d ", si->kmem_bufctl[i]); ++ swap_info = symbol_value("swap_info"); + +- for (kbp = (short *)&si->kmem_bufctl[0], i = 0; +- (SIZE(kmem_bufctl_t) == sizeof(short)) && (i < si->c_num); +- i++) +- fprintf(fp, "%d ", *(kbp + i)); ++ swap_info_len = (i = ARRAY_LENGTH(swap_info)) ? ++ i : get_array_length("swap_info", NULL, 0); + +- fprintf(fp, "\n"); +- } ++ sprintf(buf, "(unknown swap location)"); + +- cnt = 0; +- expected = si->c_num - si->s_inuse; ++ if (type >= swap_info_len) ++ return buf; + +- if (SIZE(kmem_bufctl_t) == sizeof(int)) { +- for (free_index = si->free; free_index != BUFCTL_END; +- free_index = si->kmem_bufctl[free_index]) { +- +- if (cnt == si->c_num) { +- error(INFO, +- "\"%s\" cache: too many objects found in slab free list\n", +- si->curname); +- si->errors++; +- return; +- } +- +- obj = si->s_mem + (free_index*si->size); +- si->addrlist[cnt] = obj; +- cnt++; +- } +- } else if (SIZE(kmem_bufctl_t) == sizeof(short)) { +- kbp = (short *)&si->kmem_bufctl[0]; ++ swap_info += (SIZE(swap_info_struct) * type); ++ fill_swap_info(swap_info); ++ swap_file = ULONG(vt->swap_info_struct + ++ OFFSET(swap_info_struct_swap_file)); + +- for (free_index = si->free; free_index != BUFCTL_END; +- free_index = (int)*(kbp + free_index)) { ++ if (swap_file) { ++ if (VALID_MEMBER(swap_info_struct_swap_vfsmnt)) { ++ vfsmnt = ULONG(vt->swap_info_struct + ++ OFFSET(swap_info_struct_swap_vfsmnt)); ++ get_pathname(swap_file, buf, BUFSIZE, 1, vfsmnt); ++ } else if (VALID_MEMBER (swap_info_struct_old_block_size)) { ++ get_pathname(file_to_dentry(swap_file), ++ buf, BUFSIZE, 1, 0); ++ } else { ++ get_pathname(swap_file, buf, BUFSIZE, 1, 0); ++ } ++ } + +- if (cnt == si->c_num) { +- error(INFO, +- "\"%s\" cache: too many objects found in slab free list\n", si->curname); +- si->errors++; +- return; +- } ++ return buf; ++} + +- obj = si->s_mem + (free_index*si->size); +- si->addrlist[cnt] = obj; +- cnt++; +- } +- } else +- error(FATAL, +- "size of kmem_bufctl_t (%d) not sizeof(int) or sizeof(short)\n", +- SIZE(kmem_bufctl_t)); ++/* ++ * If not currently stashed, cache the passed-in swap_info_struct. ++ */ ++static void ++fill_swap_info(ulong swap_info) ++{ ++ if (vt->last_swap_read == swap_info) ++ return; + +- if (cnt != expected) { +- error(INFO, +- "\"%s\" cache: free object mismatch: expected: %ld found: %ld\n", +- si->curname, expected, cnt); +- si->errors++; +- } ++ if (!vt->swap_info_struct && !(vt->swap_info_struct = (char *) ++ malloc(SIZE(swap_info_struct)))) ++ error(FATAL, "cannot malloc swap_info_struct space\n"); ++ ++ readmem(swap_info, KVADDR, vt->swap_info_struct, SIZE(swap_info_struct), ++ "fill_swap_info", FAULT_ON_ERROR); ++ ++ vt->last_swap_read = swap_info; + } + ++/* ++ * If active, clear references to the swap_info references. ++ */ ++void ++clear_swap_info_cache(void) ++{ ++ if (ACTIVE()) ++ vt->last_swap_read = 0; ++} + + + /* +- * Dump the FREE, [ALLOCATED] and objects of a slab. +- */ ++ * Translage a vm_area_struct and virtual address into a filename ++ * and offset string. ++ */ + +-#define DUMP_SLAB_OBJECT() \ +- for (j = on_free_list = 0; j < si->c_num; j++) { \ +- if (obj == si->addrlist[j]) { \ +- on_free_list = TRUE; \ +- break; \ +- } \ +- } \ +- \ +- if (on_free_list) { \ +- if (!(si->flags & ADDRESS_SPECIFIED)) \ +- fprintf(fp, " %lx\n", obj); \ +- if (si->flags & ADDRESS_SPECIFIED) { \ +- if (INOBJECT(si->spec_addr, obj)) { \ +- si->found = \ +- KMEM_OBJECT_ADDR_FREE; \ +- return; \ +- } \ +- } \ +- } else { \ +- if (!(si->flags & ADDRESS_SPECIFIED)) \ +- fprintf(fp, " [%lx]\n", obj); \ +- cnt++; \ +- if (si->flags & ADDRESS_SPECIFIED) { \ +- if (INOBJECT(si->spec_addr, obj)) { \ +- si->found = \ +- KMEM_OBJECT_ADDR_INUSE; \ +- return; \ +- } \ +- } \ +- } ++#define PAGE_CACHE_SHIFT (machdep->pageshift) /* This is supposed to change! */ + +-static void +-dump_slab_objects(struct meminfo *si) ++static char * ++vma_file_offset(ulong vma, ulong vaddr, char *buf) + { +- int i, j; +- ulong *next; +- int on_free_list; +- ulong cnt, expected; +- ulong bufctl, obj; ++ ulong vm_file, vm_start, vm_offset, vm_pgoff, dentry, offset; ++ ulong vfsmnt; ++ char file[BUFSIZE]; ++ char *vma_buf, *file_buf; + +- gather_slab_free_list(si); ++ if (!vma) ++ return NULL; + +- if ((si->flags & ADDRESS_SPECIFIED) && (si->found & ~KMEM_ON_SLAB)) +- return; ++ vma_buf = fill_vma_cache(vma); + +- cnt = 0; +- expected = si->s_inuse; ++ vm_file = ULONG(vma_buf + OFFSET(vm_area_struct_vm_file)); + +- if (CRASHDEBUG(1)) +- for (i = 0; i < si->c_num; i++) { +- fprintf(fp, "si->addrlist[%d]: %lx\n", +- i, si->addrlist[i]); +- } ++ if (!vm_file) ++ goto no_file_offset; + +- if (!(si->flags & ADDRESS_SPECIFIED)) +- fprintf(fp, free_inuse_hdr); ++ file_buf = fill_file_cache(vm_file); ++ dentry = ULONG(file_buf + OFFSET(file_f_dentry)); + +- /* For on-slab bufctls, c_offset is the distance between the start of +- * an obj and its related bufctl. For off-slab bufctls, c_offset is +- * the distance between objs in the slab. +- */ ++ if (!dentry) ++ goto no_file_offset; + +- if (si->c_flags & SLAB_CFLGS_BUFCTL) { +- for (i = 0, next = si->s_index; i < si->c_num; i++, next++){ +- obj = si->s_mem + +- ((next - si->s_index) * si->c_offset); +- DUMP_SLAB_OBJECT(); +- } +- } else { +- /* +- * Get the "real" s_mem, i.e., without the offset stripped off. +- * It contains the address of the first object. +- */ +- readmem(si->slab+OFFSET(kmem_slab_s_s_mem), +- KVADDR, &obj, sizeof(ulong), +- "s_mem", FAULT_ON_ERROR); ++ file[0] = NULLCHAR; ++ if (VALID_MEMBER(file_f_vfsmnt)) { ++ vfsmnt = ULONG(file_buf + OFFSET(file_f_vfsmnt)); ++ get_pathname(dentry, file, BUFSIZE, 1, vfsmnt); ++ } else ++ get_pathname(dentry, file, BUFSIZE, 1, 0); + +- for (i = 0; i < si->c_num; i++) { +- DUMP_SLAB_OBJECT(); ++ if (!strlen(file)) ++ goto no_file_offset; + +- if (si->flags & ADDRESS_SPECIFIED) { +- bufctl = obj + si->c_offset; ++ vm_start = ULONG(vma_buf + OFFSET(vm_area_struct_vm_start)); + +- if ((si->spec_addr >= bufctl) && +- (si->spec_addr < +- (bufctl + SIZE(kmem_bufctl_t)))) { +- si->found = KMEM_BUFCTL_ADDR; +- return; +- } +- } ++ vm_offset = vm_pgoff = 0xdeadbeef; + +- obj += (si->c_offset + SIZE(kmem_bufctl_t)); +- } ++ if (VALID_MEMBER(vm_area_struct_vm_offset)) ++ vm_offset = ULONG(vma_buf + ++ OFFSET(vm_area_struct_vm_offset)); ++ else if (VALID_MEMBER(vm_area_struct_vm_pgoff)) ++ vm_pgoff = ULONG(vma_buf + ++ OFFSET(vm_area_struct_vm_pgoff)); ++ else ++ goto no_file_offset; ++ ++ if (vm_offset != 0xdeadbeef) ++ offset = VIRTPAGEBASE(vaddr) - vm_start + vm_offset; ++ else if (vm_pgoff != 0xdeadbeef) { ++ offset = ((vaddr - vm_start) >> PAGE_CACHE_SHIFT) + vm_pgoff; ++ offset <<= PAGE_CACHE_SHIFT; + } + +- if (cnt != expected) { +- error(INFO, +- "\"%s\" cache: inuse object mismatch: expected: %ld found: %ld\n", +- si->curname, expected, cnt); +- si->errors++; +- } ++ sprintf(buf, "%s OFFSET: %lx", file, offset); + +-} ++ return buf; + ++no_file_offset: ++ return NULL; ++} + + /* +- * dump_slab_objects() adapted for newer percpu slab format. ++ * Translate a PTE into its physical address and flags. + */ +- +-static void +-dump_slab_objects_percpu(struct meminfo *si) ++void ++cmd_pte(void) + { +- int i, j; +- int on_free_list, on_cpudata_list; +- ulong cnt, expected; +- ulong obj; +- +- if ((si->flags & ADDRESS_SPECIFIED) && (si->found & ~KMEM_ON_SLAB)) +- return; +- +- cnt = 0; +- expected = si->s_inuse; ++ int c; ++ ulonglong pte; + +- if (CRASHDEBUG(1)) +- for (i = 0; i < si->c_num; i++) { +- fprintf(fp, "si->addrlist[%d]: %lx\n", +- i, si->addrlist[i]); ++ while ((c = getopt(argcnt, args, "")) != EOF) { ++ switch(c) ++ { ++ default: ++ argerrs++; ++ break; + } ++ } + +- if (!(si->flags & ADDRESS_SPECIFIED)) +- fprintf(fp, free_inuse_hdr); +- +- for (i = 0, obj = si->s_mem; i < si->c_num; i++, obj += si->size) { +- on_free_list = FALSE; +- on_cpudata_list = FALSE; +- +- for (j = 0; j < si->c_num; j++) { +- if (obj == si->addrlist[j]) { +- on_free_list = TRUE; +- break; +- } +- } +- +- on_cpudata_list = check_cpudata_list(si, obj); ++ if (argerrs) ++ cmd_usage(pc->curcmd, SYNOPSIS); + +- if (on_free_list && on_cpudata_list) { +- error(INFO, +- "\"%s\" cache: object %lx on both free and cpudata lists\n", +- si->curname, obj); +- si->errors++; +- } +- +- if (on_free_list) { +- if (!(si->flags & ADDRESS_SPECIFIED)) +- fprintf(fp, " %lx\n", obj); +- if (si->flags & ADDRESS_SPECIFIED) { +- if (INOBJECT(si->spec_addr, obj)) { +- si->found = +- KMEM_OBJECT_ADDR_FREE; +- return; +- } +- } +- } else if (on_cpudata_list) { +- if (!(si->flags & ADDRESS_SPECIFIED)) +- fprintf(fp, " %lx (cpu %d cache)\n", obj, +- si->cpu); +- cnt++; +- if (si->flags & ADDRESS_SPECIFIED) { +- if (INOBJECT(si->spec_addr, obj)) { +- si->found = +- KMEM_OBJECT_ADDR_CACHED; +- return; +- } +- } +- } else { +- if (!(si->flags & ADDRESS_SPECIFIED)) +- fprintf(fp, " [%lx]\n", obj); +- cnt++; +- if (si->flags & ADDRESS_SPECIFIED) { +- if (INOBJECT(si->spec_addr, obj)) { +- si->found = +- KMEM_OBJECT_ADDR_INUSE; +- return; +- } +- } +- } ++ while (args[optind]) { ++ pte = htoll(args[optind], FAULT_ON_ERROR, NULL); ++ machdep->translate_pte((ulong)pte, NULL, pte); ++ optind++; + } + +- if (cnt != expected) { +- error(INFO, +- "\"%s\" cache: inuse object mismatch: expected: %ld found: %ld\n", +- si->curname, expected, cnt); +- si->errors++; +- } + } + ++static char *node_zone_hdr = "ZONE NAME SIZE"; ++ + /* +- * Determine how many of the "inuse" slab objects are actually cached +- * in the kmem_cache_s header. Set the per-slab count and update the +- * cumulative per-cache count. ++ * On systems supporting memory nodes, display the basic per-node data. + */ +- + static void +-gather_slab_cached_count(struct meminfo *si) ++dump_memory_nodes(int initialize) + { +- int i; +- ulong obj; ++ int i, j; ++ int n, id, node, flen, slen, badaddr; ++ ulong node_mem_map; ++ ulong node_start_paddr; ++ ulong node_start_pfn; ++ ulong node_start_mapnr; ++ ulong node_spanned_pages, node_present_pages; ++ ulong free_pages, zone_size, node_size, cum_zone_size; ++ ulong zone_start_paddr, zone_start_mapnr, zone_mem_map; ++ physaddr_t phys; ++ ulong pp; ++ ulong zone_start_pfn; ++ ulong bdata; ++ ulong pgdat; ++ ulong node_zones; ++ ulong value; ++ char buf1[BUFSIZE]; ++ char buf2[BUFSIZE]; ++ char buf3[BUFSIZE]; ++ char buf4[BUFSIZE]; ++ char buf5[BUFSIZE]; ++ struct node_table *nt; + +- si->cpucached_slab = 0; ++ if (!(vt->flags & (NODES|NODES_ONLINE)) && initialize) { ++ nt = &vt->node_table[0]; ++ nt->node_id = 0; ++ if (symbol_exists("contig_page_data")) ++ nt->pgdat = symbol_value("contig_page_data"); ++ else ++ nt->pgdat = 0; ++ nt->size = vt->total_pages; ++ nt->mem_map = vt->mem_map; ++ nt->start_paddr = 0; ++ nt->start_mapnr = 0; ++ if (CRASHDEBUG(1)) { ++ fprintf(fp, "node_table[%d]: \n", 0); ++ fprintf(fp, " id: %d\n", nt->node_id); ++ fprintf(fp, " pgdat: %lx\n", nt->pgdat); ++ fprintf(fp, " size: %ld\n", nt->size); ++ fprintf(fp, " present: %ld\n", nt->present); ++ fprintf(fp, " mem_map: %lx\n", nt->mem_map); ++ fprintf(fp, " start_paddr: %llx\n", nt->start_paddr); ++ fprintf(fp, " start_mapnr: %ld\n", nt->start_mapnr); ++ } ++ return; ++ } + +- for (i = 0, obj = si->s_mem; i < si->c_num; i++, obj += si->size) { +- if (check_cpudata_list(si, obj)) { +- si->cpucached_slab++; +- if (si->flags & SLAB_GET_COUNTS) { +- si->cpucached_cache++; ++ if (initialize) { ++ pgdat = UNINITIALIZED; ++ /* ++ * This order may have to change based upon architecture... ++ */ ++ if (symbol_exists("pgdat_list") && ++ (VALID_MEMBER(pglist_data_node_next) || ++ VALID_MEMBER(pglist_data_pgdat_next))) { ++ get_symbol_data("pgdat_list", sizeof(void *), &pgdat); ++ vt->flags &= ~NODES_ONLINE; ++ } else if (vt->flags & NODES_ONLINE) { ++ if ((node = next_online_node(0)) < 0) { ++ error(WARNING, ++ "cannot determine first node from node_online_map\n\n"); ++ return; ++ } ++ if (!(pgdat = next_online_pgdat(node))) { ++ error(WARNING, ++ "cannot determine pgdat list for this kernel/architecture\n\n"); ++ return; + } +- } ++ } ++ } else ++ pgdat = vt->node_table[0].pgdat; ++ ++ if (initialize && (pgdat == UNINITIALIZED)) { ++ error(WARNING, "cannot initialize pgdat list\n\n"); ++ return; + } +-} + +-/* +- * Populate the percpu object list for a given slab. +- */ ++ for (n = 0, badaddr = FALSE; pgdat; n++) { ++ if (n >= vt->numnodes) ++ error(FATAL, "numnodes out of sync with pgdat_list?\n"); + +-static void +-gather_cpudata_list_v1(struct meminfo *si) +-{ +- int i, j; +- int avail; +- ulong cpudata[NR_CPUS]; ++ nt = &vt->node_table[n]; + +- if (INVALID_MEMBER(kmem_cache_s_cpudata)) +- return; ++ readmem(pgdat+OFFSET(pglist_data_node_id), KVADDR, &id, ++ sizeof(int), "pglist node_id", FAULT_ON_ERROR); + +- readmem(si->cache+OFFSET(kmem_cache_s_cpudata), +- KVADDR, &cpudata[0], +- sizeof(ulong) * ARRAY_LENGTH(kmem_cache_s_cpudata), +- "cpudata array", FAULT_ON_ERROR); ++ if (VALID_MEMBER(pglist_data_node_mem_map)) { ++ readmem(pgdat+OFFSET(pglist_data_node_mem_map), KVADDR, ++ &node_mem_map, sizeof(ulong), ++ "node_mem_map", FAULT_ON_ERROR); ++ } else { ++ node_mem_map = BADADDR; ++ badaddr = TRUE; ++ } + +- for (i = 0; (i < ARRAY_LENGTH(kmem_cache_s_cpudata)) && +- cpudata[i]; i++) { +- BZERO(si->cpudata[i], sizeof(ulong) * vt->kmem_max_limit); ++ if (VALID_MEMBER(pglist_data_node_start_paddr)) ++ readmem(pgdat+OFFSET(pglist_data_node_start_paddr), ++ KVADDR, &node_start_paddr, sizeof(ulong), ++ "pglist node_start_paddr", FAULT_ON_ERROR); ++ else if (VALID_MEMBER(pglist_data_node_start_pfn)) { ++ readmem(pgdat+OFFSET(pglist_data_node_start_pfn), ++ KVADDR, &node_start_pfn, sizeof(ulong), ++ "pglist node_start_pfn", FAULT_ON_ERROR); ++ node_start_mapnr = node_start_pfn; ++ node_start_paddr = PTOB(node_start_pfn); ++ if (badaddr && IS_SPARSEMEM()) { ++ phys = PTOB(node_start_pfn); ++ if (phys_to_page(phys, &pp)) ++ node_mem_map = pp; ++ } ++ } else error(INFO, ++ "cannot determine zone starting physical address\n"); + +- readmem(cpudata[i]+OFFSET(cpucache_s_avail), +- KVADDR, &avail, sizeof(int), +- "cpucache avail", FAULT_ON_ERROR); ++ if (VALID_MEMBER(pglist_data_node_start_mapnr)) ++ readmem(pgdat+OFFSET(pglist_data_node_start_mapnr), ++ KVADDR, &node_start_mapnr, sizeof(ulong), ++ "pglist node_start_mapnr", FAULT_ON_ERROR); + +- if (!avail) +- continue; ++ if (VALID_MEMBER(pglist_data_node_size)) ++ readmem(pgdat+OFFSET(pglist_data_node_size), ++ KVADDR, &node_size, sizeof(ulong), ++ "pglist node_size", FAULT_ON_ERROR); ++ else if (VALID_MEMBER(pglist_data_node_spanned_pages)) { ++ readmem(pgdat+OFFSET(pglist_data_node_spanned_pages), ++ KVADDR, &node_spanned_pages, sizeof(ulong), ++ "pglist node_spanned_pages", FAULT_ON_ERROR); ++ node_size = node_spanned_pages; ++ } else error(INFO, "cannot determine zone size\n"); + +- if (avail > vt->kmem_max_limit) { +- error(INFO, +- "\"%s\" cache: cpucache_s.avail %d greater than limit %ld\n", +- si->curname, avail, vt->kmem_max_limit); +- si->errors++; ++ if (VALID_MEMBER(pglist_data_node_present_pages)) ++ readmem(pgdat+OFFSET(pglist_data_node_present_pages), ++ KVADDR, &node_present_pages, sizeof(ulong), ++ "pglist node_present_pages", FAULT_ON_ERROR); ++ else ++ node_present_pages = 0; ++ ++ readmem(pgdat+OFFSET(pglist_data_bdata), KVADDR, &bdata, ++ sizeof(ulong), "pglist bdata", FAULT_ON_ERROR); ++ ++ if (initialize) { ++ nt->node_id = id; ++ nt->pgdat = pgdat; ++ if (VALID_MEMBER(zone_struct_memsize)) ++ nt->size = 0; /* initialize below */ ++ else ++ nt->size = node_size; ++ nt->present = node_present_pages; ++ nt->mem_map = node_mem_map; ++ nt->start_paddr = node_start_paddr; ++ nt->start_mapnr = node_start_mapnr; ++ ++ if (CRASHDEBUG(1)) { ++ fprintf(fp, "node_table[%d]: \n", n); ++ fprintf(fp, " id: %d\n", nt->node_id); ++ fprintf(fp, " pgdat: %lx\n", nt->pgdat); ++ fprintf(fp, " size: %ld\n", nt->size); ++ fprintf(fp, " present: %ld\n", nt->present); ++ fprintf(fp, " mem_map: %lx\n", nt->mem_map); ++ fprintf(fp, " start_paddr: %llx\n", nt->start_paddr); ++ fprintf(fp, " start_mapnr: %ld\n", nt->start_mapnr); ++ } + } + +- if (CRASHDEBUG(2)) +- fprintf(fp, "%s: cpu[%d] avail: %d\n", +- si->curname, i, avail); ++ if (!initialize) { ++ if (n) { ++ fprintf(fp, "\n"); ++ pad_line(fp, slen, '-'); ++ } ++ flen = MAX(VADDR_PRLEN, strlen("BOOTMEM_DATA")); ++ fprintf(fp, "%sNODE %s %s %s %s\n", ++ n ? "\n\n" : "", ++ mkstring(buf1, 8, CENTER, "SIZE"), ++ mkstring(buf2, flen, CENTER|LJUST, "PGLIST_DATA"), ++ mkstring(buf3, flen, CENTER|LJUST, "BOOTMEM_DATA"), ++ mkstring(buf4, flen, CENTER|LJUST, "NODE_ZONES")); + +- readmem(cpudata[i]+SIZE(cpucache_s), +- KVADDR, si->cpudata[i], +- sizeof(void *) * avail, +- "cpucache avail", FAULT_ON_ERROR); ++ node_zones = pgdat + OFFSET(pglist_data_node_zones); ++ sprintf(buf5, " %2d %s %s %s %s\n", id, ++ mkstring(buf1, 8, CENTER|LJUST|LONG_DEC, ++ MKSTR(node_size)), ++ mkstring(buf2, flen, CENTER|LJUST|LONG_HEX, ++ MKSTR(pgdat)), ++ mkstring(buf3, flen, CENTER|LONG_HEX, ++ MKSTR(bdata)), ++ mkstring(buf4, flen, CENTER|LJUST|LONG_HEX, ++ MKSTR(node_zones))); ++ fprintf(fp, "%s", buf5); + +- if (CRASHDEBUG(2)) +- for (j = 0; j < avail; j++) +- fprintf(fp, " %lx\n", si->cpudata[i][j]); +- } +-} ++ j = 12 + strlen(buf1) + strlen(buf2) + strlen(buf3) + ++ count_leading_spaces(buf4); ++ for (i = 1; i < vt->nr_zones; i++) { ++ node_zones += SIZE_OPTION(zone_struct, zone); ++ INDENT(j); ++ fprintf(fp, "%lx\n", node_zones); ++ } ++ ++ fprintf(fp, "%s START_PADDR START_MAPNR\n", ++ mkstring(buf1, VADDR_PRLEN, CENTER|LJUST, ++ "MEM_MAP")); ++ fprintf(fp, "%s %s %s\n", ++ mkstring(buf1, VADDR_PRLEN, ++ CENTER|LONG_HEX, MKSTR(node_mem_map)), ++ mkstring(buf2, strlen("START_PADDR"), ++ CENTER|LONG_HEX|RJUST, MKSTR(node_start_paddr)), ++ mkstring(buf3, strlen("START_MAPNR"), ++ CENTER|LONG_DEC|RJUST, ++ MKSTR(node_start_mapnr))); ++ ++ sprintf(buf2, "%s %s START_PADDR START_MAPNR", ++ node_zone_hdr, ++ mkstring(buf1, VADDR_PRLEN, CENTER|RJUST, ++ "MEM_MAP")); ++ slen = strlen(buf2); ++ fprintf(fp, "\n%s\n", buf2); ++ } + +-/* +- * Updated for 2.6 slab percpu data structure. +- */ +-static void +-gather_cpudata_list_v2(struct meminfo *si) +-{ +- int i, j; +- int avail; +- ulong cpudata[NR_CPUS]; ++ node_zones = pgdat + OFFSET(pglist_data_node_zones); ++ cum_zone_size = 0; ++ for (i = 0; i < vt->nr_zones; i++) { ++ if (CRASHDEBUG(7)) ++ fprintf(fp, "zone %d at %lx\n", i, node_zones); ++ ++ if (VALID_MEMBER(zone_struct_size)) ++ readmem(node_zones+OFFSET(zone_struct_size), ++ KVADDR, &zone_size, sizeof(ulong), ++ "zone_struct size", FAULT_ON_ERROR); ++ else if (VALID_MEMBER(zone_struct_memsize)) { ++ readmem(node_zones+OFFSET(zone_struct_memsize), ++ KVADDR, &zone_size, sizeof(ulong), ++ "zone_struct memsize", FAULT_ON_ERROR); ++ nt->size += zone_size; ++ } else if (VALID_MEMBER(zone_spanned_pages)) { ++ readmem(node_zones+ OFFSET(zone_spanned_pages), ++ KVADDR, &zone_size, sizeof(ulong), ++ "zone spanned_pages", FAULT_ON_ERROR); ++ } else error(FATAL, ++ "zone_struct has neither size nor memsize field\n"); ++ ++ readmem(node_zones+ ++ OFFSET_OPTION(zone_struct_free_pages, ++ zone_free_pages), KVADDR, &free_pages, ++ sizeof(ulong), "zone[_struct] free_pages", ++ FAULT_ON_ERROR); ++ readmem(node_zones+OFFSET_OPTION(zone_struct_name, ++ zone_name), KVADDR, &value, sizeof(void *), ++ "zone[_struct] name", FAULT_ON_ERROR); ++ if (!read_string(value, buf1, BUFSIZE-1)) ++ sprintf(buf1, "(unknown) "); ++ if (VALID_STRUCT(zone_struct)) { ++ if (VALID_MEMBER(zone_struct_zone_start_paddr)) ++ { ++ readmem(node_zones+OFFSET ++ (zone_struct_zone_start_paddr), ++ KVADDR, &zone_start_paddr, ++ sizeof(ulong), ++ "node_zones zone_start_paddr", ++ FAULT_ON_ERROR); ++ } else { ++ readmem(node_zones+ ++ OFFSET(zone_struct_zone_start_pfn), ++ KVADDR, &zone_start_pfn, ++ sizeof(ulong), ++ "node_zones zone_start_pfn", ++ FAULT_ON_ERROR); ++ zone_start_paddr = ++ PTOB(zone_start_pfn); ++ } ++ readmem(node_zones+ ++ OFFSET(zone_struct_zone_start_mapnr), ++ KVADDR, &zone_start_mapnr, ++ sizeof(ulong), ++ "node_zones zone_start_mapnr", ++ FAULT_ON_ERROR); ++ } else { ++ readmem(node_zones+ ++ OFFSET(zone_zone_start_pfn), ++ KVADDR, &zone_start_pfn, ++ sizeof(ulong), ++ "node_zones zone_start_pfn", ++ FAULT_ON_ERROR); ++ zone_start_paddr = PTOB(zone_start_pfn); ++ ++ if (IS_SPARSEMEM()) { ++ zone_mem_map = 0; ++ zone_start_mapnr = 0; ++ if (zone_size) { ++ phys = PTOB(zone_start_pfn); ++ zone_start_mapnr = phys/PAGESIZE(); ++ } ++ ++ } else if (!(vt->flags & NODES) && ++ INVALID_MEMBER(zone_zone_mem_map)) { ++ readmem(pgdat+OFFSET(pglist_data_node_mem_map), ++ KVADDR, &zone_mem_map, sizeof(void *), ++ "contig_page_data mem_map", FAULT_ON_ERROR); ++ if (zone_size) ++ zone_mem_map += cum_zone_size * SIZE(page); ++ } else readmem(node_zones+ ++ OFFSET(zone_zone_mem_map), ++ KVADDR, &zone_mem_map, ++ sizeof(ulong), ++ "node_zones zone_mem_map", ++ FAULT_ON_ERROR); + +- readmem(si->cache+OFFSET(kmem_cache_s_array), +- KVADDR, &cpudata[0], +- sizeof(ulong) * ARRAY_LENGTH(kmem_cache_s_array), +- "array_cache array", FAULT_ON_ERROR); ++ if (zone_mem_map) ++ zone_start_mapnr = ++ (zone_mem_map - node_mem_map) / ++ SIZE(page); ++ else if (!IS_SPARSEMEM()) ++ zone_start_mapnr = 0; ++ } + +- for (i = 0; (i < ARRAY_LENGTH(kmem_cache_s_array)) && +- cpudata[i]; i++) { +- BZERO(si->cpudata[i], sizeof(ulong) * vt->kmem_max_limit); ++ if (IS_SPARSEMEM()) { ++ zone_mem_map = 0; ++ if (zone_size) { ++ phys = PTOB(zone_start_pfn); ++ if (phys_to_page(phys, &pp)) ++ zone_mem_map = pp; ++ } ++ } else if (!(vt->flags & NODES) && ++ INVALID_MEMBER(zone_struct_zone_mem_map) && ++ INVALID_MEMBER(zone_zone_mem_map)) { ++ readmem(pgdat+OFFSET(pglist_data_node_mem_map), ++ KVADDR, &zone_mem_map, sizeof(void *), ++ "contig_page_data mem_map", FAULT_ON_ERROR); ++ if (zone_size) ++ zone_mem_map += cum_zone_size * SIZE(page); ++ else ++ zone_mem_map = 0; ++ } else ++ readmem(node_zones+ ++ OFFSET_OPTION(zone_struct_zone_mem_map, ++ zone_zone_mem_map), KVADDR, &zone_mem_map, ++ sizeof(ulong), "node_zones zone_mem_map", ++ FAULT_ON_ERROR); + +- readmem(cpudata[i]+OFFSET(array_cache_avail), +- KVADDR, &avail, sizeof(int), +- "array cache avail", FAULT_ON_ERROR); ++ if (!initialize) { ++ fprintf(fp, " %2d %-9s %7ld ", ++ i, buf1, zone_size); ++ cum_zone_size += zone_size; ++ fprintf(fp, "%s %s %s\n", ++ mkstring(buf1, VADDR_PRLEN, ++ RJUST|LONG_HEX,MKSTR(zone_mem_map)), ++ mkstring(buf2, strlen("START_PADDR"), ++ LONG_HEX|RJUST,MKSTR(zone_start_paddr)), ++ mkstring(buf3, strlen("START_MAPNR"), ++ LONG_DEC|RJUST, ++ MKSTR(zone_start_mapnr))); ++ } + +- if (!avail) +- continue; ++ node_zones += SIZE_OPTION(zone_struct, zone); ++ } + +- if (avail > vt->kmem_max_limit) { +- error(INFO, +- "\"%s\" cache: array_cache.avail %d greater than limit %ld\n", +- si->curname, avail, vt->kmem_max_limit); +- si->errors++; ++ if (initialize) { ++ if (vt->flags & NODES_ONLINE) { ++ if ((node = next_online_node(node+1)) < 0) ++ pgdat = 0; ++ else if (!(pgdat = next_online_pgdat(node))) { ++ error(WARNING, ++ "cannot determine pgdat list for this kernel/architecture (node %d)\n\n", ++ node); ++ pgdat = 0; ++ } ++ } else ++ readmem(pgdat + OFFSET_OPTION(pglist_data_node_next, ++ pglist_data_pgdat_next), KVADDR, ++ &pgdat, sizeof(void *), "pglist_data node_next", ++ FAULT_ON_ERROR); ++ } else { ++ if ((n+1) < vt->numnodes) ++ pgdat = vt->node_table[n+1].pgdat; ++ else ++ pgdat = 0; + } ++ } + ++ if (n != vt->numnodes) { + if (CRASHDEBUG(2)) +- fprintf(fp, "%s: cpu[%d] avail: %d\n", +- si->curname, i, avail); +- +- readmem(cpudata[i]+SIZE(array_cache), +- KVADDR, si->cpudata[i], +- sizeof(void *) * avail, +- "array_cache avail", FAULT_ON_ERROR); ++ error(NOTE, "changing numnodes from %d to %d\n", ++ vt->numnodes, n); ++ vt->numnodes = n; ++ } + +- if (CRASHDEBUG(2)) +- for (j = 0; j < avail; j++) +- fprintf(fp, " %lx\n", si->cpudata[i][j]); +- } ++ if (!initialize && IS_SPARSEMEM()) ++ dump_mem_sections(); + } + +-/* +- * Check whether a given address is contained in the previously-gathered +- * percpu object cache. +- */ +- +-static int +-check_cpudata_list(struct meminfo *si, ulong obj) ++static void ++dump_zone_stats(void) + { +- int i, j; +- +- for (i = 0; i < vt->kmem_max_cpus; i++) { +- for (j = 0; si->cpudata[i][j]; j++) +- if (si->cpudata[i][j] == obj) { +- si->cpu = i; +- return TRUE; +- } +- } ++ int i, n; ++ ulong pgdat, node_zones; ++ char *zonebuf; ++ char buf1[BUFSIZE]; ++ int ivalue; ++ ulong value1; ++ ulong value2; ++ ulong value3; ++ ulong value4; ++ ulong value5; ++ ulong value6; ++ ++ pgdat = vt->node_table[0].pgdat; ++ zonebuf = GETBUF(SIZE_OPTION(zone_struct, zone)); ++ vm_stat_init(); + +- return FALSE; +-} ++ for (n = 0; pgdat; n++) { ++ node_zones = pgdat + OFFSET(pglist_data_node_zones); + ++ for (i = 0; i < vt->nr_zones; i++) { + +-/* +- * Search the various memory subsystems for instances of this address. +- * Start with the most specific areas, ending up with at least the +- * mem_map page data. +- */ +-static void +-kmem_search(struct meminfo *mi) +-{ +- struct syment *sp; +- struct meminfo tmp_meminfo; +- char buf[BUFSIZE]; +- ulong vaddr, orig_flags; +- physaddr_t paddr; +- ulong offset; ++ if (!readmem(node_zones, KVADDR, zonebuf, ++ SIZE_OPTION(zone_struct, zone), ++ "zone buffer", FAULT_ON_ERROR)) ++ break; + +- switch (mi->memtype) +- { +- case KVADDR: +- vaddr = mi->spec_addr; +- break; ++ value1 = ULONG(zonebuf + ++ OFFSET_OPTION(zone_struct_name, zone_name)); + +- case PHYSADDR: +- vaddr = mi->spec_addr < VTOP(vt->high_memory) ? +- PTOV(mi->spec_addr) : BADADDR; +- break; +- } ++ if (!read_string(value1, buf1, BUFSIZE-1)) ++ sprintf(buf1, "(unknown) "); + +- orig_flags = mi->flags; +- mi->retval = 0; ++ if (VALID_MEMBER(zone_struct_size)) ++ value1 = value6 = ULONG(zonebuf + ++ OFFSET(zone_struct_size)); ++ else if (VALID_MEMBER(zone_struct_memsize)) { ++ value1 = value6 = ULONG(zonebuf + ++ OFFSET(zone_struct_memsize)); ++ } else if (VALID_MEMBER(zone_spanned_pages)) { ++ value1 = ULONG(zonebuf + ++ OFFSET(zone_spanned_pages)); ++ value6 = ULONG(zonebuf + ++ OFFSET(zone_present_pages)); ++ } else error(FATAL, ++ "zone struct has unknown size field\n"); + +- /* +- * Check first for a possible symbolic display of the virtual +- * address associated with mi->spec_addr or PTOV(mi->spec_addr). +- */ +- if (((vaddr >= kt->stext) && (vaddr <= kt->end)) || +- IS_MODULE_VADDR(mi->spec_addr)) { +- if ((sp = value_search(vaddr, &offset))) { +- show_symbol(sp, offset, SHOW_LINENUM | SHOW_RADIX()); +- fprintf(fp, "\n"); +- } +- } ++ value2 = ULONG(zonebuf + OFFSET_OPTION(zone_pages_min, ++ zone_struct_pages_min)); ++ value3 = ULONG(zonebuf + OFFSET_OPTION(zone_pages_low, ++ zone_struct_pages_low)); ++ value4 = ULONG(zonebuf + OFFSET_OPTION(zone_pages_high, ++ zone_struct_pages_high)); ++ value5 = ULONG(zonebuf + OFFSET_OPTION(zone_free_pages, ++ zone_struct_free_pages)); ++ ++ fprintf(fp, ++ "NODE: %d ZONE: %d ADDR: %lx NAME: \"%s\"\n", ++ n, i, node_zones, buf1); ++ ++ if (!value1) { ++ fprintf(fp, " [unpopulated]\n"); ++ goto next_zone; ++ } ++ fprintf(fp, " SIZE: %ld", value1); ++ if (value6 < value1) ++ fprintf(fp, " PRESENT: %ld", value6); ++ fprintf(fp, " MIN/LOW/HIGH: %ld/%ld/%ld", ++ value2, value3, value4); ++ ++ if (VALID_MEMBER(zone_vm_stat)) ++ dump_vm_stat("NR_FREE_PAGES", (long *)&value5, ++ node_zones + OFFSET(zone_vm_stat)); ++ ++ if (VALID_MEMBER(zone_nr_active) && ++ VALID_MEMBER(zone_nr_inactive)) { ++ value1 = ULONG(zonebuf + ++ OFFSET(zone_nr_active)); ++ value2 = ULONG(zonebuf + ++ OFFSET(zone_nr_inactive)); ++ fprintf(fp, ++ "\n NR_ACTIVE: %ld NR_INACTIVE: %ld FREE: %ld\n", ++ value1, value2, value5); ++ if (VALID_MEMBER(zone_vm_stat)) { ++ fprintf(fp, " VM_STAT:\n"); ++ dump_vm_stat(NULL, NULL, node_zones + ++ OFFSET(zone_vm_stat)); ++ } ++ } else if (VALID_MEMBER(zone_vm_stat) && ++ dump_vm_stat("NR_ACTIVE", (long *)&value1, ++ node_zones + OFFSET(zone_vm_stat)) && ++ dump_vm_stat("NR_INACTIVE", (long *)&value2, ++ node_zones + OFFSET(zone_vm_stat))) { ++ fprintf(fp, "\n VM_STAT:\n"); ++ dump_vm_stat(NULL, NULL, node_zones + ++ OFFSET(zone_vm_stat)); ++ } else { ++ fprintf(fp, " FREE: %ld\n", value5); ++ goto next_zone; ++ } + +- /* +- * Check for a valid mapped address. +- */ +- if ((mi->memtype == KVADDR) && IS_VMALLOC_ADDR(mi->spec_addr)) { +- if (kvtop(NULL, mi->spec_addr, &paddr, 0)) { +- mi->flags = orig_flags; +- dump_vmlist(mi); ++ if (VALID_MEMBER(zone_all_unreclaimable)) { ++ ivalue = UINT(zonebuf + ++ OFFSET(zone_all_unreclaimable)); ++ fprintf(fp, " ALL_UNRECLAIMABLE: %s ", ++ ivalue ? "yes" : "no"); ++ } else if (VALID_MEMBER(zone_flags) && ++ enumerator_value("ZONE_ALL_UNRECLAIMABLE", ++ (long *)&value1)) { ++ value2 = ULONG(zonebuf + OFFSET(zone_flags)); ++ value3 = value2 & (1 << value1); ++ fprintf(fp, " ALL_UNRECLAIMABLE: %s ", ++ value3 ? "yes" : "no"); ++ } ++ ++ if (VALID_MEMBER(zone_pages_scanned)) { ++ value1 = ULONG(zonebuf + ++ OFFSET(zone_pages_scanned)); ++ fprintf(fp, "PAGES_SCANNED: %ld ", value1); ++ } + fprintf(fp, "\n"); +- mi->spec_addr = paddr; +- mi->memtype = PHYSADDR; +- } +- goto mem_map; +- } +- /* +- * If the address is physical, check whether it's in vmalloc space. +- */ +- +- if (mi->memtype == PHYSADDR) { +- mi->flags = orig_flags; +- mi->flags |= GET_PHYS_TO_VMALLOC; +- mi->retval = 0; +- dump_vmlist(mi); +- mi->flags &= ~GET_PHYS_TO_VMALLOC; + +- if (mi->retval) { +- if ((sp = value_search(mi->retval, &offset))) { +- show_symbol(sp, offset, +- SHOW_LINENUM | SHOW_RADIX()); +- fprintf(fp, "\n"); +- } +- dump_vmlist(mi); ++next_zone: + fprintf(fp, "\n"); +- goto mem_map; ++ node_zones += SIZE_OPTION(zone_struct, zone); + } +- } +- +- /* +- * Check whether the containing page belongs to the slab subsystem. +- */ +- mi->flags = orig_flags; +- mi->retval = 0; +- if ((vaddr != BADADDR) && vaddr_to_kmem_cache(vaddr, buf)) { +- BZERO(&tmp_meminfo, sizeof(struct meminfo)); +- tmp_meminfo.spec_addr = vaddr; +- tmp_meminfo.memtype = KVADDR; +- tmp_meminfo.flags = mi->flags; +- vt->dump_kmem_cache(&tmp_meminfo); +- fprintf(fp, "\n"); +- } +- +- /* +- * Check free list. +- */ +- mi->flags = orig_flags; +- mi->retval = 0; +- vt->dump_free_pages(mi); +- if (mi->retval) +- fprintf(fp, "\n"); + +- if (vt->page_hash_table) { +- /* +- * Check the page cache. +- */ +- mi->flags = orig_flags; +- mi->retval = 0; +- dump_page_hash_table(mi); +- if (mi->retval) +- fprintf(fp, "\n"); ++ if ((n+1) < vt->numnodes) ++ pgdat = vt->node_table[n+1].pgdat; ++ else ++ pgdat = 0; + } + +-mem_map: +- mi->flags = orig_flags; +- dump_mem_map(mi); +- +- if (!mi->retval) +- fprintf(fp, "%llx: address not found\n", mi->spec_addr); ++ FREEBUF(zonebuf); + + } + + /* +- * Determine whether an address is a page pointer from the mem_map[] array. +- * If the caller requests it, return the associated physical address. ++ * Gather essential information regarding each memory node. + */ +-int +-is_page_ptr(ulong addr, physaddr_t *phys) ++static void ++node_table_init(void) + { + int n; +- ulong ppstart, ppend; +- struct node_table *nt; +- ulong pgnum, node_size; +- +- for (n = 0; n < vt->numnodes; n++) { +- nt = &vt->node_table[n]; +- if ((vt->flags & V_MEM_MAP) && (vt->numnodes == 1)) +- node_size = vt->max_mapnr; +- else +- node_size = nt->size; +- +- ppstart = nt->mem_map; +- ppend = ppstart + (node_size * SIZE(page)); +- +- if ((addr < ppstart) || (addr >= ppend)) +- continue; ++ ulong pgdat; + +- /* +- * We're in the mem_map range -- but it is a page pointer? +- */ +- if ((addr - ppstart) % SIZE(page)) +- return FALSE; ++ /* ++ * Override numnodes -- some kernels may leave it at 1 on a system ++ * with multiple memory nodes. ++ */ ++ if ((vt->flags & NODES) && (VALID_MEMBER(pglist_data_node_next) || ++ VALID_MEMBER(pglist_data_pgdat_next))) { + +- if (phys) { +- pgnum = (addr - nt->mem_map) / SIZE(page); +- *phys = (pgnum * PAGESIZE()) + nt->start_paddr; ++ get_symbol_data("pgdat_list", sizeof(void *), &pgdat); ++ ++ for (n = 0; pgdat; n++) { ++ readmem(pgdat + OFFSET_OPTION(pglist_data_node_next, ++ pglist_data_pgdat_next), KVADDR, ++ &pgdat, sizeof(void *), "pglist_data node_next", ++ FAULT_ON_ERROR); ++ } ++ if (n != vt->numnodes) { ++ if (CRASHDEBUG(2)) ++ error(NOTE, "changing numnodes from %d to %d\n", ++ vt->numnodes, n); ++ vt->numnodes = n; + } ++ } else ++ vt->flags &= ~NODES; + +- return TRUE; +- } +- +- return FALSE; ++ if (!(vt->node_table = (struct node_table *) ++ malloc(sizeof(struct node_table) * vt->numnodes))) ++ error(FATAL, "cannot malloc node_table %s(%d nodes)", ++ vt->numnodes > 1 ? "array " : "", vt->numnodes); + +-#ifdef PRE_NODES +- ppstart = vt->mem_map; +- ppend = ppstart + (vt->total_pages * vt->page_struct_len); ++ BZERO(vt->node_table, sizeof(struct node_table) * vt->numnodes); + +- if ((addr < ppstart) || (addr >= ppend)) +- return FALSE; ++ dump_memory_nodes(MEMORY_NODES_INITIALIZE); + +- if ((addr - ppstart) % vt->page_struct_len) +- return FALSE; ++ qsort((void *)vt->node_table, (size_t)vt->numnodes, ++ sizeof(struct node_table), compare_node_data); + +- return TRUE; +-#endif ++ if (CRASHDEBUG(2)) ++ dump_memory_nodes(MEMORY_NODES_DUMP); + } + + /* +- * Return the physical address associated with this page pointer. ++ * The comparison function must return an integer less than, ++ * equal to, or greater than zero if the first argument is ++ * considered to be respectively less than, equal to, or ++ * greater than the second. If two members compare as equal, ++ * their order in the sorted array is undefined. + */ +-static int +-page_to_phys(ulong pp, physaddr_t *phys) ++ ++static int ++compare_node_data(const void *v1, const void *v2) + { +- return(is_page_ptr(pp, phys)); ++ struct node_table *t1, *t2; ++ ++ t1 = (struct node_table *)v1; ++ t2 = (struct node_table *)v2; ++ ++ return (t1->node_id < t2->node_id ? -1 : ++ t1->node_id == t2->node_id ? 0 : 1); + } + + + /* +- * Return the page pointer associated with this physical address. ++ * Depending upon the processor, and whether we're running live or on a ++ * dumpfile, get the system page size. + */ +-static int +-phys_to_page(physaddr_t phys, ulong *pp) ++uint ++memory_page_size(void) + { +- int n; +- ulong pgnum; +- struct node_table *nt; +- physaddr_t pstart, pend; +- ulong node_size; +- +- for (n = 0; n < vt->numnodes; n++) { +- nt = &vt->node_table[n]; +- if ((vt->flags & V_MEM_MAP) && (vt->numnodes == 1)) +- node_size = vt->max_mapnr; +- else +- node_size = nt->size; +- +- pstart = nt->start_paddr; +- pend = pstart + ((ulonglong)node_size * PAGESIZE()); +- +- if ((phys < pstart) || (phys >= pend)) +- continue; +- /* +- * We're in the physical range -- calculate the page. +- */ +- pgnum = BTOP(phys - pstart); +- *pp = nt->mem_map + (pgnum * SIZE(page)); +- +- return TRUE; +- } ++ uint psz; + +- return FALSE; ++ if (machdep->pagesize) ++ return machdep->pagesize; + +-#ifdef PRE_NODES +- if (phys >= (vt->total_pages * PAGESIZE())) +- return FALSE; ++ if (REMOTE_MEMSRC()) ++ return remote_page_size(); + +- pgnum = PTOB(BTOP(phys)) / PAGESIZE(); +- *pp = vt->mem_map + (pgnum * vt->page_struct_len); +- +- return TRUE; +-#endif +-} ++ switch (pc->flags & MEMORY_SOURCES) ++ { ++ case DISKDUMP: ++ psz = diskdump_page_size(); ++ break; + ++ case XENDUMP: ++ psz = xendump_page_size(); ++ break; + +-/* +- * Try to read a string of non-NULL characters from a memory location, +- * returning the number of characters read. +- */ +-int +-read_string(ulong kvaddr, char *buf, int maxlen) +-{ +- char strbuf[MIN_PAGE_SIZE]; +- ulong kp; +- char *bufptr; +- long cnt, size; ++ case KDUMP: ++ psz = kdump_page_size(); ++ break; + +- BZERO(buf, maxlen); +- BZERO(strbuf, MIN_PAGE_SIZE); ++ case NETDUMP: ++ psz = netdump_page_size(); ++ break; + +- kp = kvaddr; +- bufptr = strbuf; +- size = maxlen; ++ case MCLXCD: ++ psz = (uint)mclx_page_size(); ++ break; + +- while (size > 0) { +- cnt = MIN_PAGE_SIZE - (kp & (MIN_PAGE_SIZE-1)); +- +- if (cnt > size) +- cnt = size; ++ case LKCD: ++#if 0 /* REMIND: */ ++ psz = lkcd_page_size(); /* dh_dump_page_size is HW page size; should add dh_page_size */ ++#else ++ psz = (uint)getpagesize(); ++#endif ++ break; + +- if (!readmem(kp, KVADDR, bufptr, cnt, +- "readstring characters", QUIET|RETURN_ON_ERROR)) +- break; ++ case DEVMEM: ++ case MEMMOD: ++ psz = (uint)getpagesize(); ++ break; + +- if (count_buffer_chars(bufptr, NULLCHAR, cnt)) +- break; ++ case S390D: ++ psz = s390_page_size(); ++ break; + +- kp += cnt; +- bufptr += cnt; +- size -= cnt; ++ default: ++ error(FATAL, "memory_page_size: invalid pc->flags: %lx\n", ++ pc->flags & MEMORY_SOURCES); + } + +- strcpy(buf, strbuf); +- return (strlen(buf)); ++ return psz; + } + + /* +- * "help -v" output ++ * If the page size cannot be determined by the dumpfile (like kdump), ++ * and the processor default cannot be used, allow the force-feeding ++ * of a crash command-line page size option. + */ + void +-dump_vm_table(int verbose) ++force_page_size(char *s) + { +- int i; +- struct node_table *nt; +- int others; ++ int k, err; ++ ulong psize; + +- others = 0; +- fprintf(fp, " flags: %lx %s(", +- vt->flags, count_bits_long(vt->flags) > 4 ? "\n " : ""); +- if (vt->flags & NODES) +- fprintf(fp, "%sNODES", others++ ? "|" : ""); +- if (vt->flags & ZONES) +- fprintf(fp, "%sZONES", others++ ? "|" : ""); +- if (vt->flags & PERCPU_KMALLOC_V1) +- fprintf(fp, "%sPERCPU_KMALLOC_V1", others++ ? "|" : ""); +- if (vt->flags & PERCPU_KMALLOC_V2) +- fprintf(fp, "%sPERCPU_KMALLOC_V2", others++ ? "|" : ""); +- if (vt->flags & COMMON_VADDR) +- fprintf(fp, "%sCOMMON_VADDR", others++ ? "|" : ""); +- if (vt->flags & KMEM_CACHE_INIT) +- fprintf(fp, "%sKMEM_CACHE_INIT", others++ ? "|" : ""); +- if (vt->flags & V_MEM_MAP) +- fprintf(fp, "%sV_MEM_MAP", others++ ? "|" : ""); +- if (vt->flags & KMEM_CACHE_UNAVAIL) +- fprintf(fp, "%sKMEM_CACHE_UNAVAIL", others++ ? "|" : ""); +- if (vt->flags & DISCONTIGMEM) +- fprintf(fp, "%sDISCONTIGMEM", others++ ? "|" : ""); +- fprintf(fp, ")\n"); +- if (vt->kernel_pgd[0] == vt->kernel_pgd[1]) +- fprintf(fp, " kernel_pgd[NR_CPUS]: %lx ...\n", +- vt->kernel_pgd[0]); +- else { +- fprintf(fp, " kernel_pgd[NR_CPUS]: "); +- for (i = 0; i < NR_CPUS; i++) { +- if ((i % 4) == 0) +- fprintf(fp, "\n "); +- fprintf(fp, "%lx ", vt->kernel_pgd[i]); ++ k = 1; ++ err = FALSE; ++ ++ switch (LASTCHAR(s)) ++ { ++ case 'k': ++ case 'K': ++ LASTCHAR(s) = NULLCHAR; ++ if (!decimal(s, 0)) { ++ err = TRUE; ++ break; + } +- fprintf(fp, "\n"); +- } +- fprintf(fp, " high_memory: %lx\n", vt->high_memory); +- fprintf(fp, " vmalloc_start: %lx\n", vt->vmalloc_start); +- fprintf(fp, " mem_map: %lx\n", vt->mem_map); +- fprintf(fp, " total_pages: %ld\n", vt->total_pages); +- fprintf(fp, " max_mapnr: %ld\n", vt->max_mapnr); +- fprintf(fp, " totalram_pages: %ld\n", vt->totalram_pages); +- fprintf(fp, " totalhigh_pages: %ld\n", vt->totalhigh_pages); +- fprintf(fp, " num_physpages: %ld\n", vt->num_physpages); +- fprintf(fp, " page_hash_table: %lx\n", vt->page_hash_table); +- fprintf(fp, "page_hash_table_len: %d\n", vt->page_hash_table_len); +- fprintf(fp, " kmem_max_c_num: %ld\n", vt->kmem_max_c_num); +- fprintf(fp, " kmem_max_limit: %ld\n", vt->kmem_max_limit); +- fprintf(fp, " kmem_max_cpus: %ld\n", vt->kmem_max_cpus); +- fprintf(fp, " kmem_cache_count: %ld\n", vt->kmem_cache_count); +- fprintf(fp, " kmem_cache_namelen: %d\n", vt->kmem_cache_namelen); +- fprintf(fp, " PG_reserved: %lx\n", vt->PG_reserved); +- fprintf(fp, " PG_slab: %ld\n", vt->PG_slab); +- fprintf(fp, " paddr_prlen: %d\n", vt->paddr_prlen); +- fprintf(fp, " numnodes: %d\n", vt->numnodes); +- fprintf(fp, " nr_zones: %d\n", vt->nr_zones); +- fprintf(fp, " nr_free_areas: %d\n", vt->nr_free_areas); +- for (i = 0; i < vt->numnodes; i++) { +- nt = &vt->node_table[i]; +- fprintf(fp, " node_table[%d]: \n", i); +- fprintf(fp, " id: %d\n", nt->node_id); +- fprintf(fp, " pgdat: %lx\n", nt->pgdat); +- fprintf(fp, " size: %ld\n", nt->size); +- fprintf(fp, " mem_map: %lx\n", nt->mem_map); +- fprintf(fp, " start_paddr: %llx\n", nt->start_paddr); +- fprintf(fp, " start_mapnr: %ld\n", nt->start_mapnr); +- } ++ k = 1024; + +- fprintf(fp, " dump_free_pages: "); +- if (vt->dump_free_pages == dump_free_pages) +- fprintf(fp, "dump_free_pages()\n"); +- else if (vt->dump_free_pages == dump_free_pages_zones_v1) +- fprintf(fp, "dump_free_pages_zones_v1()\n"); +- else if (vt->dump_free_pages == dump_free_pages_zones_v2) +- fprintf(fp, "dump_free_pages_zones_v2()\n"); +- else if (vt->dump_free_pages == dump_multidimensional_free_pages) +- fprintf(fp, "dump_multidimensional_free_pages()\n"); +- else +- fprintf(fp, "%lx (unknown)\n", (ulong)vt->dump_free_pages); ++ /* FALLTHROUGH */ + +- fprintf(fp, " dump_kmem_cache: "); +- if (vt->dump_kmem_cache == dump_kmem_cache) +- fprintf(fp, "dump_kmem_cache()\n"); +- else if (vt->dump_kmem_cache == dump_kmem_cache_percpu_v1) +- fprintf(fp, "dump_kmem_cache_percpu_v1()\n"); +- else if (vt->dump_kmem_cache == dump_kmem_cache_percpu_v2) +- fprintf(fp, "dump_kmem_cache_percpu_v2()\n"); +- else +- fprintf(fp, "%lx (unknown)\n", (ulong)vt->dump_kmem_cache); +- fprintf(fp, " slab_data: %lx\n", (ulong)vt->slab_data); +- if (verbose) +- dump_saved_slab_data(); +- fprintf(fp, " nr_swapfiles: %d\n", vt->nr_swapfiles); +- fprintf(fp, " last_swap_read: %lx\n", vt->last_swap_read); +- fprintf(fp, " swap_info_struct: %lx\n", (ulong)vt->swap_info_struct); ++ default: ++ if (decimal(s, 0)) ++ psize = dtol(s, QUIET|RETURN_ON_ERROR, &err); ++ else if (hexadecimal(s, 0)) ++ psize = htol(s, QUIET|RETURN_ON_ERROR, &err); ++ else ++ err = TRUE; ++ break; ++ } + +- dump_vma_cache(VERBOSE); ++ if (err) ++ error(INFO, "invalid page size: %s\n", s); ++ else ++ machdep->pagesize = psize * k; + } + ++ + /* +- * Calculate the amount of memory referenced in the kernel-specific "nodes". ++ * Return the vmalloc address referenced by the first vm_struct ++ * on the vmlist. This can normally be used by the machine-specific ++ * xxx_vmalloc_start() routines. + */ +-uint64_t +-total_node_memory() +-{ +- int i; +- struct node_table *nt; +- uint64_t total; +- +- for (i = total = 0; i < vt->numnodes; i++) { +- nt = &vt->node_table[i]; + +- if (CRASHDEBUG(1)) { +- console("node_table[%d]: \n", i); +- console(" id: %d\n", nt->node_id); +- console(" pgdat: %lx\n", nt->pgdat); +- console(" size: %ld\n", nt->size); +- console(" mem_map: %lx\n", nt->mem_map); +- console(" start_paddr: %lx\n", nt->start_paddr); +- console(" start_mapnr: %ld\n", nt->start_mapnr); +- } ++ulong ++first_vmalloc_address(void) ++{ ++ ulong vmlist, addr; + +- total += (uint64_t)((uint64_t)nt->size * (uint64_t)PAGESIZE()); +- } ++ get_symbol_data("vmlist", sizeof(void *), &vmlist); + +- return total; ++ if (!vmlist) ++ return 0; ++ ++ if (!readmem(vmlist+OFFSET(vm_struct_addr), KVADDR, &addr, ++ sizeof(void *), "first vmlist addr", RETURN_ON_ERROR)) ++ non_matching_kernel(); ++ ++ return addr; + } + + /* +- * Dump just the vm_area_struct cache table data so that it can be +- * called from above or for debug purposes. ++ * Return the L1 cache size in bytes, which can be found stored in the ++ * cache_cache. + */ +-void +-dump_vma_cache(ulong verbose) +-{ +- int i; +- ulong vhits; + +- if (!verbose) +- goto show_hits; ++int ++l1_cache_size(void) ++{ ++ ulong cache; ++ ulong c_align; ++ int colour_off; ++ int retval; + +- for (i = 0; i < VMA_CACHE; i++) +- fprintf(fp, " cached_vma[%2d]: %lx (%ld)\n", +- i, vt->cached_vma[i], +- vt->cached_vma_hits[i]); +- fprintf(fp, " vma_cache: %lx\n", (ulong)vt->vma_cache); +- fprintf(fp, " vma_cache_index: %d\n", vt->vma_cache_index); +- fprintf(fp, " vma_cache_fills: %ld\n", vt->vma_cache_fills); +- fflush(fp); ++ retval = -1; + +-show_hits: +- if (vt->vma_cache_fills) { +- for (i = vhits = 0; i < VMA_CACHE; i++) +- vhits += vt->cached_vma_hits[i]; ++ if (VALID_MEMBER(kmem_cache_s_c_align)) { ++ cache = symbol_value("cache_cache"); ++ readmem(cache+OFFSET(kmem_cache_s_c_align), ++ KVADDR, &c_align, sizeof(ulong), ++ "c_align", FAULT_ON_ERROR); ++ retval = (int)c_align; ++ } else if (VALID_MEMBER(kmem_cache_s_colour_off)) { ++ cache = symbol_value("cache_cache"); ++ readmem(cache+OFFSET(kmem_cache_s_colour_off), ++ KVADDR, &colour_off, sizeof(int), ++ "colour_off", FAULT_ON_ERROR); ++ retval = colour_off; ++ } + +- fprintf(stderr, "%s vma hit rate: %2ld%% (%ld of %ld)\n", +- verbose ? "" : " ", +- (vhits * 100)/vt->vma_cache_fills, +- vhits, vt->vma_cache_fills); +- } ++ return retval; + } + + /* +- * Guess at the "real" amount of physical memory installed, formatting +- * it in a MB or GB based string. ++ * Multi-purpose routine used to query/control dumpfile memory usage. + */ +-char * +-get_memory_size(char *buf) ++int ++dumpfile_memory(int cmd) + { +- uint64_t total; +- ulong next_gig; +-#ifdef OLDWAY +- ulong mbs, gbs; +-#endif +- +- total = machdep->memory_size(); ++ int retval; + +- if ((next_gig = roundup(total, GIGABYTES(1)))) { +- if ((next_gig - total) <= MEGABYTES(64)) +- total = next_gig; +- } ++ retval = 0; + +- return (pages_to_size((ulong)(total/PAGESIZE()), buf)); ++ if (!DUMPFILE()) ++ return retval; + +-#ifdef OLDWAY +- gbs = (ulong)(total/GIGABYTES(1)); +- mbs = (ulong)(total/MEGABYTES(1)); +- if (gbs) +- mbs = (total % GIGABYTES(1))/MEGABYTES(1); ++ switch (cmd) ++ { ++ case DUMPFILE_MEM_USED: ++ if (REMOTE_DUMPFILE()) ++ retval = remote_memory_used(); ++ else if (pc->flags & NETDUMP) ++ retval = netdump_memory_used(); ++ else if (pc->flags & KDUMP) ++ retval = kdump_memory_used(); ++ else if (pc->flags & XENDUMP) ++ retval = xendump_memory_used(); ++ else if (pc->flags & DISKDUMP) ++ retval = diskdump_memory_used(); ++ else if (pc->flags & LKCD) ++ retval = lkcd_memory_used(); ++ else if (pc->flags & MCLXCD) ++ retval = vas_memory_used(); ++ else if (pc->flags & S390D) ++ retval = s390_memory_used(); ++ break; + +- if (total%MEGABYTES(1)) +- mbs++; ++ case DUMPFILE_FREE_MEM: ++ if (REMOTE_DUMPFILE()) ++ retval = remote_free_memory(); ++ else if (pc->flags & NETDUMP) ++ retval = netdump_free_memory(); ++ else if (pc->flags & KDUMP) ++ retval = kdump_free_memory(); ++ else if (pc->flags & XENDUMP) ++ retval = xendump_free_memory(); ++ else if (pc->flags & DISKDUMP) ++ retval = diskdump_free_memory(); ++ else if (pc->flags & LKCD) ++ retval = lkcd_free_memory(); ++ else if (pc->flags & MCLXCD) ++ retval = vas_free_memory(NULL); ++ else if (pc->flags & S390D) ++ retval = s390_free_memory(); ++ break; + +- if (gbs) +- sprintf(buf, mbs ? "%ld GB %ld MB" : "%ld GB", gbs, mbs); +- else +- sprintf(buf, "%ld MB", mbs); ++ case DUMPFILE_MEM_DUMP: ++ if (REMOTE_DUMPFILE()) ++ retval = remote_memory_dump(0); ++ else if (pc->flags & NETDUMP) ++ retval = netdump_memory_dump(fp); ++ else if (pc->flags & KDUMP) ++ retval = kdump_memory_dump(fp); ++ else if (pc->flags & XENDUMP) ++ retval = xendump_memory_dump(fp); ++ else if (pc->flags & DISKDUMP) ++ retval = diskdump_memory_dump(fp); ++ else if (pc->flags & LKCD) ++ retval = lkcd_memory_dump(set_lkcd_fp(fp)); ++ else if (pc->flags & MCLXCD) ++ retval = vas_memory_dump(fp); ++ else if (pc->flags & S390D) ++ retval = s390_memory_dump(fp); ++ break; ++ ++ case DUMPFILE_ENVIRONMENT: ++ if (pc->flags & LKCD) { ++ set_lkcd_fp(fp); ++ dump_lkcd_environment(0); ++ } else if (pc->flags & REM_LKCD) ++ retval = remote_memory_dump(VERBOSE); ++ break; ++ } + +- return buf; +-#endif ++ return retval; + } + +-/* +- * For use by architectures not having machine-specific manners for +- * best determining physical memory size. +- */ +-uint64_t +-generic_memory_size(void) ++/* ++ * Functions for sparse mem support ++ */ ++ulong ++sparse_decode_mem_map(ulong coded_mem_map, ulong section_nr) + { +- if (machdep->memsize) +- return machdep->memsize; +- +- return (machdep->memsize = total_node_memory()); ++ return coded_mem_map + ++ (section_nr_to_pfn(section_nr) * SIZE(page)); + } + +-/* +- * Determine whether a virtual address is user or kernel or ambiguous. +- */ +-int +-vaddr_type(ulong vaddr, struct task_context *tc) ++void ++sparse_mem_init(void) + { +- int memtype, found; ++ ulong addr; ++ ulong mem_section_size; + +- if (!tc) +- tc = CURRENT_CONTEXT(); +- memtype = found = 0; ++ if (!IS_SPARSEMEM()) ++ return; + +- if (machdep->is_uvaddr(vaddr, tc)) { +- memtype |= UVADDR; +- found++; +- } ++ MEMBER_OFFSET_INIT(mem_section_section_mem_map, "mem_section", ++ "section_mem_map"); ++ STRUCT_SIZE_INIT(mem_section, "mem_section"); + +- if (machdep->is_kvaddr(vaddr)) { +- memtype |= KVADDR; +- found++; ++ if (!MAX_PHYSMEM_BITS()) ++ error(FATAL, ++ "CONFIG_SPARSEMEM kernels not supported for this architecture\n"); ++ ++ if (get_array_length("mem_section", NULL, 0) == ++ (NR_MEM_SECTIONS() / _SECTIONS_PER_ROOT_EXTREME())) ++ vt->flags |= SPARSEMEM_EX; ++ ++ if (IS_SPARSEMEM_EX()) { ++ machdep->sections_per_root = _SECTIONS_PER_ROOT_EXTREME(); ++ mem_section_size = sizeof(void *) * NR_SECTION_ROOTS(); ++ } else { ++ machdep->sections_per_root = _SECTIONS_PER_ROOT(); ++ mem_section_size = SIZE(mem_section) * NR_SECTION_ROOTS(); + } + +- if (found == 1) +- return memtype; +- else +- return AMBIGUOUS; ++ if (CRASHDEBUG(1)) { ++ fprintf(fp, "PAGESIZE=%d\n",PAGESIZE()); ++ fprintf(fp,"mem_section_size = %ld\n", mem_section_size); ++ fprintf(fp, "NR_SECTION_ROOTS = %ld\n", NR_SECTION_ROOTS()); ++ fprintf(fp, "NR_MEM_SECTIONS = %ld\n", NR_MEM_SECTIONS()); ++ fprintf(fp, "SECTIONS_PER_ROOT = %ld\n", SECTIONS_PER_ROOT() ); ++ fprintf(fp, "SECTION_ROOT_MASK = 0x%lx\n", SECTION_ROOT_MASK()); ++ fprintf(fp, "PAGES_PER_SECTION = %ld\n", PAGES_PER_SECTION()); ++ } ++ ++ if (!(vt->mem_sec = (void *)malloc(mem_section_size))) ++ error(FATAL, "cannot malloc mem_sec cache\n"); ++ if (!(vt->mem_section = (char *)malloc(SIZE(mem_section)))) ++ error(FATAL, "cannot malloc mem_section cache\n"); ++ ++ addr = symbol_value("mem_section"); ++ readmem(addr, KVADDR,vt->mem_sec ,mem_section_size, ++ "memory section root table", FAULT_ON_ERROR); + } + +-/* +- * Determine the first valid user space address +- */ +-static int +-address_space_start(struct task_context *tc, ulong *addr) ++char * ++read_mem_section(ulong addr) + { +- ulong vma; +- char *vma_buf; ++ if (!IS_KVADDR(addr)) ++ return 0; ++ ++ readmem(addr, KVADDR, vt->mem_section, SIZE(mem_section), ++ "memory section", FAULT_ON_ERROR); + +- if (!tc->mm_struct) +- return FALSE; ++ return vt->mem_section; ++} + +- fill_mm_struct(tc->mm_struct); +- vma = ULONG(tt->mm_struct + OFFSET(mm_struct_mmap)); +- if (!vma) +- return FALSE; +- vma_buf = fill_vma_cache(vma); +- *addr = ULONG(vma_buf + OFFSET(vm_area_struct_vm_start)); +- +- return TRUE; ++ulong ++nr_to_section(ulong nr) ++{ ++ ulong addr; ++ ulong *mem_sec = vt->mem_sec; ++ ++ if (!IS_KVADDR(mem_sec[SECTION_NR_TO_ROOT(nr)])) ++ return 0; ++ ++ if (IS_SPARSEMEM_EX()) ++ addr = mem_sec[SECTION_NR_TO_ROOT(nr)] + ++ (nr & SECTION_ROOT_MASK()) * SIZE(mem_section); ++ else ++ addr = symbol_value("mem_section") + ++ (SECTIONS_PER_ROOT() * SECTION_NR_TO_ROOT(nr) + ++ (nr & SECTION_ROOT_MASK())) * SIZE(mem_section); ++ ++ if (!IS_KVADDR(addr)) ++ return 0; ++ ++ return addr; + } + + /* +- * Search for a given value between a starting and ending address range, +- * applying an optional mask for "don't care" bits. As an alternative +- * to entering the starting address value, -k means "start of kernel address +- * space". For processors with ambiguous user/kernel address spaces, +- * -u or -k must be used (with or without -s) as a differentiator. ++ * We use the lower bits of the mem_map pointer to store ++ * a little bit of information. There should be at least ++ * 3 bits here due to 32-bit alignment. + */ +-void +-cmd_search(void) +-{ +- int c; +- ulong start, end, mask, memtype, len; +- ulong uvaddr_end; +- int sflag; +- struct meminfo meminfo; +- ulong value_array[MAXARGS]; +- struct syment *sp; +- +- start = end = mask = sflag = memtype = len = 0; +- uvaddr_end = COMMON_VADDR_SPACE() ? (ulong)(-1) : machdep->kvbase; +- BZERO(value_array, sizeof(ulong) * MAXARGS); ++#define SECTION_MARKED_PRESENT (1UL<<0) ++#define SECTION_HAS_MEM_MAP (1UL<<1) ++#define SECTION_MAP_LAST_BIT (1UL<<2) ++#define SECTION_MAP_MASK (~(SECTION_MAP_LAST_BIT-1)) + +- while ((c = getopt(argcnt, args, "l:uks:e:v:m:")) != EOF) { +- switch(c) +- { +- case 'u': +- if (!sflag) { +- address_space_start(CURRENT_CONTEXT(),&start); +- sflag++; +- } +- memtype = UVADDR; +- sflag++; +- break; + +- case 'k': +- if (!sflag) { +- start = machdep->kvbase; +- sflag++; +- } +- memtype = KVADDR; +- sflag++; +- break; ++int ++valid_section(ulong addr) ++{ ++ char *mem_section; + +- case 's': +- if ((sp = symbol_search(optarg))) +- start = sp->value; +- else +- start = htol(optarg, FAULT_ON_ERROR, NULL); +- sflag++; +- break; ++ if ((mem_section = read_mem_section(addr))) ++ return (ULONG(mem_section + ++ OFFSET(mem_section_section_mem_map)) && ++ SECTION_MARKED_PRESENT); ++ return 0; ++} + +- case 'e': +- if ((sp = symbol_search(optarg))) +- end = sp->value; +- else +- end = htol(optarg, FAULT_ON_ERROR, NULL); +- break; ++int ++section_has_mem_map(ulong addr) ++{ ++ char *mem_section; + +- case 'l': +- len = stol(optarg, FAULT_ON_ERROR, NULL); +- break; ++ if ((mem_section = read_mem_section(addr))) ++ return (ULONG(mem_section + ++ OFFSET(mem_section_section_mem_map)) ++ && SECTION_HAS_MEM_MAP); ++ return 0; ++} + +- case 'm': +- mask = htol(optarg, FAULT_ON_ERROR, NULL); +- break; ++ulong ++section_mem_map_addr(ulong addr) ++{ ++ char *mem_section; ++ ulong map; + +- default: +- argerrs++; +- break; +- } +- } ++ if ((mem_section = read_mem_section(addr))) { ++ map = ULONG(mem_section + ++ OFFSET(mem_section_section_mem_map)); ++ map &= SECTION_MAP_MASK; ++ return map; ++ } ++ return 0; ++} + +- if (argerrs || !sflag || !args[optind] || (len && end)) +- cmd_usage(pc->curcmd, SYNOPSIS); + +- if (!memtype) +- memtype = vaddr_type(start, CURRENT_CONTEXT()); ++ulong ++valid_section_nr(ulong nr) ++{ ++ ulong addr = nr_to_section(nr); + +- switch (memtype) +- { +- case UVADDR: +- if (!IS_UVADDR(start, CURRENT_CONTEXT())) { +- error(INFO, "invalid user virtual address: %lx\n", +- start); +- cmd_usage(pc->curcmd, SYNOPSIS); +- } +- break; ++ if (valid_section(addr)) ++ return addr; + +- case KVADDR: +- if (!IS_KVADDR(start)) { +- error(INFO, "invalid kernel virtual address: %lx\n", +- start); +- cmd_usage(pc->curcmd, SYNOPSIS); +- } +- break; ++ return 0; ++} + +- case AMBIGUOUS: +- error(INFO, +- "ambiguous virtual address: %lx (requires -u or -k)\n", +- start); +- cmd_usage(pc->curcmd, SYNOPSIS); ++ulong ++pfn_to_map(ulong pfn) ++{ ++ ulong section, page_offset; ++ ulong section_nr; ++ ulong coded_mem_map, mem_map; ++ ++ section_nr = pfn_to_section_nr(pfn); ++ if (!(section = valid_section_nr(section_nr))) ++ return 0; ++ ++ if (section_has_mem_map(section)) { ++ page_offset = pfn - section_nr_to_pfn(section_nr); ++ coded_mem_map = section_mem_map_addr(section); ++ mem_map = sparse_decode_mem_map(coded_mem_map, section_nr) + ++ (page_offset * SIZE(page)); ++ return mem_map; + } + +- if (!end && !len) { +- switch (memtype) +- { +- case UVADDR: +- end = uvaddr_end; +- break; ++ return 0; ++} + +- case KVADDR: +- if (vt->vmalloc_start < machdep->identity_map_base) +- end = (ulong)(-1); +- else { +- meminfo.memtype = KVADDR; +- meminfo.spec_addr = 0; +- meminfo.flags = (ADDRESS_SPECIFIED|GET_HIGHEST); +- dump_vmlist(&meminfo); +- end = meminfo.retval; +- } +- break; +- } +- } else if (len) +- end = start + len; ++void ++dump_mem_sections(void) ++{ ++ ulong nr,addr; ++ ulong nr_mem_sections; ++ ulong coded_mem_map, mem_map, pfn; ++ char buf1[BUFSIZE]; ++ char buf2[BUFSIZE]; ++ char buf3[BUFSIZE]; ++ char buf4[BUFSIZE]; + +- switch (memtype) +- { +- case UVADDR: +- if (end > uvaddr_end) { +- error(INFO, +- "address range starts in user space and ends kernel space\n"); +- cmd_usage(pc->curcmd, SYNOPSIS); +- } +- /* FALLTHROUGH */ +- case KVADDR: +- if (end < start) { +- error(INFO, +- "ending address %lx is below starting address %lx\n", +- end, start); +- cmd_usage(pc->curcmd, SYNOPSIS); ++ nr_mem_sections = NR_MEM_SECTIONS(); ++ ++ fprintf(fp, "\n"); ++ pad_line(fp, BITS32() ? 59 : 67, '-'); ++ fprintf(fp, "\n\nNR %s %s %s PFN\n", ++ mkstring(buf1, VADDR_PRLEN, CENTER|LJUST, "SECTION"), ++ mkstring(buf2, VADDR_PRLEN, CENTER|LJUST, "CODED_MEM_MAP"), ++ mkstring(buf3, VADDR_PRLEN, CENTER|LJUST, "MEM_MAP")); ++ ++ for (nr = 0; nr <= nr_mem_sections ; nr++) { ++ if ((addr = valid_section_nr(nr))) { ++ coded_mem_map = section_mem_map_addr(addr); ++ mem_map = sparse_decode_mem_map(coded_mem_map,nr); ++ pfn = section_nr_to_pfn(nr); ++ ++ fprintf(fp, "%2ld %s %s %s %s\n", ++ nr, ++ mkstring(buf1, VADDR_PRLEN, ++ CENTER|LONG_HEX, MKSTR(addr)), ++ mkstring(buf2, VADDR_PRLEN, ++ CENTER|LONG_HEX|RJUST, MKSTR(coded_mem_map)), ++ mkstring(buf3, VADDR_PRLEN, ++ CENTER|LONG_HEX|RJUST, MKSTR(mem_map)), ++ pc->output_radix == 10 ? ++ mkstring(buf4, VADDR_PRLEN, ++ LONG_DEC|LJUST, MKSTR(pfn)) : ++ mkstring(buf4, VADDR_PRLEN, ++ LONG_HEX|LJUST, MKSTR(pfn))); + } +- break; + } ++} + +- c = 0; +- while (args[optind]) { +- value_array[c] = htol(args[optind], FAULT_ON_ERROR, NULL); +- c++; +- optind++; ++void ++list_mem_sections(void) ++{ ++ ulong nr,addr; ++ ulong nr_mem_sections = NR_MEM_SECTIONS(); ++ ulong coded_mem_map; ++ ++ for (nr = 0; nr <= nr_mem_sections ; nr++) { ++ if ((addr = valid_section_nr(nr))) { ++ coded_mem_map = section_mem_map_addr(addr); ++ fprintf(fp, ++ "nr=%ld section = %lx coded_mem_map=%lx pfn=%ld mem_map=%lx\n", ++ nr, ++ addr, ++ coded_mem_map, ++ section_nr_to_pfn(nr), ++ sparse_decode_mem_map(coded_mem_map,nr)); ++ } + } +- +- search(start, end, mask, memtype, value_array, c); + } + + /* +- * Do the work for cmd_search(). ++ * For kernels containing the node_online_map or node_states[], ++ * return the number of online node bits set. + */ ++static int ++get_nodes_online(void) ++{ ++ int i, len, online; ++ struct gnu_request req; ++ ulong *maskptr; ++ long N_ONLINE; ++ ulong mapaddr; + +-#define SEARCHMASK(X) ((X) | mask) ++ if (!symbol_exists("node_online_map") && ++ !symbol_exists("node_states")) ++ return 0; + +-static void +-search(ulong start, ulong end, ulong mask, int memtype, ulong *value, int vcnt) +-{ +- int i, j; +- ulong pp, next, *ubp; +- int wordcnt, lastpage; +- ulong page; +- physaddr_t paddr; +- char *pagebuf; ++ if (LKCD_KERNTYPES()) { ++ if ((len = STRUCT_SIZE("nodemask_t")) < 0) ++ error(FATAL, "cannot determine type nodemask_t\n"); ++ mapaddr = symbol_value("node_online_map"); ++ } else if (symbol_exists("node_online_map")) { ++ len = get_symbol_type("node_online_map", NULL, &req) ++ == TYPE_CODE_UNDEF ? sizeof(ulong) : req.length; ++ mapaddr = symbol_value("node_online_map"); ++ } else if (symbol_exists("node_states")) { ++ if ((get_symbol_type("node_states", NULL, &req) != TYPE_CODE_ARRAY) || ++ !(len = get_array_length("node_states", NULL, 0)) || ++ !enumerator_value("N_ONLINE", &N_ONLINE)) ++ return 0; ++ len = req.length / len; ++ mapaddr = symbol_value("node_states") + (N_ONLINE * len); ++ } ++ ++ if (!(vt->node_online_map = (ulong *)malloc(len))) ++ error(FATAL, "cannot malloc node_online_map\n"); ++ ++ if (!readmem(mapaddr, KVADDR, ++ (void *)&vt->node_online_map[0], len, "node_online_map", ++ QUIET|RETURN_ON_ERROR)) ++ error(FATAL, "cannot read node_online_map/node_states\n"); ++ ++ vt->node_online_map_len = len/sizeof(ulong); ++ ++ online = 0; ++ ++ maskptr = (ulong *)vt->node_online_map; ++ for (i = 0; i < vt->node_online_map_len; i++, maskptr++) ++ online += count_bits_long(*maskptr); + +- if (start & (sizeof(long)-1)) { +- start &= ~(sizeof(long)-1); +- error(INFO, "rounding down start address to: %lx\n", start); ++ if (CRASHDEBUG(1)) { ++ fprintf(fp, "node_online_map: ["); ++ for (i = 0; i < vt->node_online_map_len; i++) ++ fprintf(fp, "%s%lx", i ? ", " : "", vt->node_online_map[i]); ++ fprintf(fp, "] -> nodes online: %d\n", online); + } + +- pagebuf = GETBUF(PAGESIZE()); +- next = start; ++ if (online) ++ vt->numnodes = online; + +- for (pp = VIRTPAGEBASE(start); next < end; next = pp) { +- lastpage = (VIRTPAGEBASE(next) == VIRTPAGEBASE(end)); +- if (LKCD_DUMPFILE()) +- set_lkcd_nohash(); ++ return online; ++} + +- switch (memtype) +- { +- case UVADDR: +- if (!uvtop(CURRENT_CONTEXT(), pp, &paddr, 0) || +- !phys_to_page(paddr, &page)) { +- if (!next_upage(CURRENT_CONTEXT(), pp, &pp)) +- return; +- continue; +- } +- break; ++/* ++ * Return the next node index, with "first" being the first acceptable node. ++ */ ++static int ++next_online_node(int first) ++{ ++ int i, j, node; ++ ulong mask, *maskptr; + +- case KVADDR: +- if (!kvtop(CURRENT_CONTEXT(), pp, &paddr, 0) || +- !phys_to_page(paddr, &page)) { +- if (!next_kpage(pp, &pp)) +- return; +- continue; ++ if ((first/BITS_PER_LONG) >= vt->node_online_map_len) { ++ error(INFO, "next_online_node: %d is too large!\n", first); ++ return -1; ++ } ++ ++ maskptr = (ulong *)vt->node_online_map; ++ for (i = node = 0; i < vt->node_online_map_len; i++, maskptr++) { ++ mask = *maskptr; ++ for (j = 0; j < BITS_PER_LONG; j++, node++) { ++ if (mask & 1) { ++ if (node >= first) ++ return node; + } +- break; +- } ++ mask >>= 1; ++ } ++ } + +- if (!readmem(paddr, PHYSADDR, pagebuf, PAGESIZE(), +- "search page", RETURN_ON_ERROR|QUIET)) { +- pp += PAGESIZE(); +- continue; +- } ++ return -1; ++} + +- ubp = (ulong *)&pagebuf[next - pp]; +- if (lastpage) { +- if (end == (ulong)(-1)) +- wordcnt = PAGESIZE()/sizeof(long); +- else +- wordcnt = (end - next)/sizeof(long); +- } else +- wordcnt = (PAGESIZE() - (next - pp))/sizeof(long); ++/* ++ * Modify appropriately for architecture/kernel nuances. ++ */ ++static ulong ++next_online_pgdat(int node) ++{ ++ char buf[BUFSIZE]; ++ ulong pgdat; + +- for (i = 0; i < wordcnt; i++, ubp++, next += sizeof(long)) { +- for (j = 0; j < vcnt; j++) { +- if (SEARCHMASK(*ubp) == SEARCHMASK(value[j])) +- fprintf(fp, "%lx: %lx\n", next, *ubp); +- } +- } ++ /* ++ * Default -- look for type: struct pglist_data node_data[] ++ */ ++ if (LKCD_KERNTYPES()) { ++ if (!kernel_symbol_exists("node_data")) ++ goto pgdat2; ++ /* ++ * Just index into node_data[] without checking that it is ++ * an array; kerntypes have no such symbol information. ++ */ ++ } else { ++ if (get_symbol_type("node_data", NULL, NULL) != TYPE_CODE_ARRAY) ++ goto pgdat2; + +- if (CRASHDEBUG(1)) +- if ((pp % (1024*1024)) == 0) +- console("%lx\n", pp); ++ open_tmpfile(); ++ sprintf(buf, "whatis node_data"); ++ if (!gdb_pass_through(buf, fp, GNU_RETURN_ON_ERROR)) { ++ close_tmpfile(); ++ goto pgdat2; ++ } ++ rewind(pc->tmpfile); ++ while (fgets(buf, BUFSIZE, pc->tmpfile)) { ++ if (STRNEQ(buf, "type = ")) ++ break; ++ } ++ close_tmpfile(); + +- pp += PAGESIZE(); ++ if ((!strstr(buf, "struct pglist_data *") && ++ !strstr(buf, "pg_data_t *")) || ++ (count_chars(buf, '[') != 1) || ++ (count_chars(buf, ']') != 1)) ++ goto pgdat2; ++ } ++ ++ if (!readmem(symbol_value("node_data") + (node * sizeof(void *)), ++ KVADDR, &pgdat, sizeof(void *), "node_data", RETURN_ON_ERROR) || ++ !IS_KVADDR(pgdat)) ++ goto pgdat2; ++ ++ return pgdat; ++ ++pgdat2: ++ if (LKCD_KERNTYPES()) { ++ if (!kernel_symbol_exists("pgdat_list")) ++ goto pgdat3; ++ } else { ++ if (get_symbol_type("pgdat_list",NULL,NULL) != TYPE_CODE_ARRAY) ++ goto pgdat3; ++ ++ open_tmpfile(); ++ sprintf(buf, "whatis pgdat_list"); ++ if (!gdb_pass_through(buf, fp, GNU_RETURN_ON_ERROR)) { ++ close_tmpfile(); ++ goto pgdat3; ++ } ++ rewind(pc->tmpfile); ++ while (fgets(buf, BUFSIZE, pc->tmpfile)) { ++ if (STRNEQ(buf, "type = ")) ++ break; ++ } ++ close_tmpfile(); ++ ++ if ((!strstr(buf, "struct pglist_data *") && ++ !strstr(buf, "pg_data_t *")) || ++ (count_chars(buf, '[') != 1) || ++ (count_chars(buf, ']') != 1)) ++ goto pgdat3; + } +-} + ++ if (!readmem(symbol_value("pgdat_list") + (node * sizeof(void *)), ++ KVADDR, &pgdat, sizeof(void *), "pgdat_list", RETURN_ON_ERROR) || ++ !IS_KVADDR(pgdat)) ++ goto pgdat3; ++ ++ return pgdat; ++ ++pgdat3: ++ if (symbol_exists("contig_page_data") && (node == 0)) ++ return symbol_value("contig_page_data"); ++ ++ return 0; ++} + + /* +- * Return the next mapped user virtual address page that comes after +- * the passed-in address. ++ * Make the vm_stat[] array contents easily accessible. + */ + static int +-next_upage(struct task_context *tc, ulong vaddr, ulong *nextvaddr) ++vm_stat_init(void) + { +- ulong vma, total_vm; +- int found; +- char *vma_buf; +- ulong vm_start, vm_end; +- void *vm_next; ++ char buf[BUFSIZE]; ++ char *arglist[MAXARGS]; ++ int i, c, stringlen, total; ++ struct gnu_request *req; ++ char *start; + +- if (!tc->mm_struct) +- return FALSE; ++ if (vt->flags & VM_STAT) ++ return TRUE; ++ ++ if ((vt->nr_vm_stat_items == -1) || !symbol_exists("vm_stat")) ++ goto bailout; ++ ++ /* ++ * look for type: type = atomic_long_t [] ++ */ ++ if (LKCD_KERNTYPES()) { ++ if (!symbol_exists("vm_stat")) ++ goto bailout; ++ /* ++ * Just assume that vm_stat is an array; there is ++ * no symbol info in a kerntypes file. ++ */ ++ } else { ++ if (!symbol_exists("vm_stat") || ++ get_symbol_type("vm_stat", NULL, NULL) != TYPE_CODE_ARRAY) ++ goto bailout; ++ ++ open_tmpfile(); ++ sprintf(buf, "whatis vm_stat"); ++ if (!gdb_pass_through(buf, fp, GNU_RETURN_ON_ERROR)) { ++ close_tmpfile(); ++ goto bailout; ++ } ++ rewind(pc->tmpfile); ++ while (fgets(buf, BUFSIZE, pc->tmpfile)) { ++ if (STRNEQ(buf, "type = ")) ++ break; ++ } ++ close_tmpfile(); ++ ++ if (!strstr(buf, "atomic_long_t") || ++ (count_chars(buf, '[') != 1) || ++ (count_chars(buf, ']') != 1)) ++ goto bailout; ++ } ++ ++ open_tmpfile(); ++ req = (struct gnu_request *)GETBUF(sizeof(struct gnu_request)); ++ req->command = GNU_GET_DATATYPE; ++ req->name = "zone_stat_item"; ++ req->flags = GNU_PRINT_ENUMERATORS; ++ gdb_interface(req); ++ FREEBUF(req); ++ ++ stringlen = 1; ++ ++ rewind(pc->tmpfile); ++ while (fgets(buf, BUFSIZE, pc->tmpfile)) { ++ if (strstr(buf, "{") || strstr(buf, "}")) ++ continue; ++ clean_line(buf); ++ c = parse_line(buf, arglist); ++ if (STREQ(arglist[0], "NR_VM_ZONE_STAT_ITEMS")) { ++ vt->nr_vm_stat_items = atoi(arglist[2]); ++ break; ++ } else ++ stringlen += strlen(arglist[0]); ++ } ++ ++ total = stringlen + vt->nr_vm_stat_items + ++ (sizeof(void *) * vt->nr_vm_stat_items); ++ if (!(vt->vm_stat_items = (char **)malloc(total))) { ++ close_tmpfile(); ++ error(FATAL, "cannot malloc vm_stat_items cache\n"); ++ } ++ ++ start = (char *)&vt->vm_stat_items[vt->nr_vm_stat_items]; ++ ++ rewind(pc->tmpfile); ++ while (fgets(buf, BUFSIZE, pc->tmpfile)) { ++ if (strstr(buf, "{") || strstr(buf, "}")) ++ continue; ++ c = parse_line(buf, arglist); ++ i = atoi(arglist[2]); ++ if (i < vt->nr_vm_stat_items) { ++ vt->vm_stat_items[i] = start; ++ strcpy(start, arglist[0]); ++ start += strlen(arglist[0]) + 1; ++ } ++ } ++ close_tmpfile(); ++ ++ vt->flags |= VM_STAT; ++ return TRUE; ++ ++bailout: ++ vt->nr_vm_stat_items = -1; ++ return FALSE; ++} + +- fill_mm_struct(tc->mm_struct); +- vma = ULONG(tt->mm_struct + OFFSET(mm_struct_mmap)); +- total_vm = ULONG(tt->mm_struct + OFFSET(mm_struct_total_vm)); ++/* ++ * Either dump all vm_stat entries, or return the value of ++ * the specified vm_stat item. Use the global counter unless ++ * a zone-specific address is passed. ++ */ ++static int ++dump_vm_stat(char *item, long *retval, ulong zone) ++{ ++ char *buf; ++ ulong *vp; ++ ulong location; ++ int i; + +- if (!vma || (total_vm == 0)) ++ if (!vm_stat_init()) { ++ if (!item) ++ if (CRASHDEBUG(1)) ++ error(INFO, ++ "vm_stat not available in this kernel\n"); + return FALSE; ++ } + +- vaddr = VIRTPAGEBASE(vaddr) + PAGESIZE(); /* first possible page */ +- +- for (found = FALSE; vma; vma = (ulong)vm_next) { +- vma_buf = fill_vma_cache(vma); ++ buf = GETBUF(sizeof(ulong) * vt->nr_vm_stat_items); + +- vm_start = ULONG(vma_buf + OFFSET(vm_area_struct_vm_start)); +- vm_end = ULONG(vma_buf + OFFSET(vm_area_struct_vm_end)); +- vm_next = VOID_PTR(vma_buf + OFFSET(vm_area_struct_vm_next)); ++ location = zone ? zone : symbol_value("vm_stat"); + +- if (vaddr <= vm_start) { +- *nextvaddr = vm_start; +- return TRUE; +- } ++ readmem(location, KVADDR, buf, ++ sizeof(ulong) * vt->nr_vm_stat_items, ++ "vm_stat", FAULT_ON_ERROR); ++ ++ if (!item) { ++ if (!zone) ++ fprintf(fp, " VM_STAT:\n"); ++ vp = (ulong *)buf; ++ for (i = 0; i < vt->nr_vm_stat_items; i++) ++ fprintf(fp, "%23s: %ld\n", vt->vm_stat_items[i], vp[i]); ++ return TRUE; ++ } + +- if ((vaddr > vm_start) && (vaddr < vm_end)) { +- *nextvaddr = vaddr; ++ vp = (ulong *)buf; ++ for (i = 0; i < vt->nr_vm_stat_items; i++) { ++ if (STREQ(vt->vm_stat_items[i], item)) { ++ *retval = vp[i]; + return TRUE; + } + } +@@ -9321,920 +13024,1044 @@ + } + + /* +- * Return the next kernel virtual address page that comes after +- * the passed-in address. ++ * Dump the cumulative totals of the per_cpu__page_states counters. + */ +-static int +-next_kpage(ulong vaddr, ulong *nextvaddr) ++int ++dump_page_states(void) + { +- int n; +- ulong paddr, vaddr_orig, node_size; +- struct node_table *nt; +- ulonglong pstart, pend; +- ulong vmalloc_limit; +- struct meminfo meminfo; ++ struct syment *sp; ++ ulong addr, value; ++ int i, c, fd, len, instance, members; ++ char buf[BUFSIZE]; ++ char *arglist[MAXARGS]; ++ struct entry { ++ char *name; ++ ulong value; ++ } *entry_list; ++ struct stat stat; ++ char *namebuf, *nameptr; + +- vaddr_orig = vaddr; +- vaddr = VIRTPAGEBASE(vaddr) + PAGESIZE(); /* first possible page */ ++ if (!(sp = symbol_search("per_cpu__page_states"))) { ++ if (CRASHDEBUG(1)) ++ error(INFO, "per_cpu__page_states" ++ "not available in this kernel\n"); ++ return FALSE; ++ } + +- if (vaddr < vaddr_orig) /* wrapped back to zero? */ +- return FALSE; ++ instance = members = len = 0; + +- meminfo.memtype = KVADDR; +- meminfo.spec_addr = 0; +- meminfo.flags = (ADDRESS_SPECIFIED|GET_HIGHEST); +- dump_vmlist(&meminfo); +- vmalloc_limit = meminfo.retval; ++ sprintf(buf, "ptype struct page_state"); + +- if (IS_VMALLOC_ADDR(vaddr_orig)) { +- if (IS_VMALLOC_ADDR(vaddr) && (vaddr < vmalloc_limit)) { +- *nextvaddr = vaddr; +- return TRUE; +- } ++ open_tmpfile(); ++ if (!gdb_pass_through(buf, fp, GNU_RETURN_ON_ERROR)) { ++ close_tmpfile(); ++ return FALSE; ++ } + +- if (vt->vmalloc_start < machdep->identity_map_base) { +- *nextvaddr = machdep->identity_map_base; +- return TRUE; +- } ++ fflush(pc->tmpfile); ++ fd = fileno(pc->tmpfile); ++ fstat(fd, &stat); ++ namebuf = GETBUF(stat.st_size); ++ nameptr = namebuf; + +- return FALSE; ++ rewind(pc->tmpfile); ++ while (fgets(buf, BUFSIZE, pc->tmpfile)) { ++ if (strstr(buf, "struct page_state") || ++ strstr(buf, "}")) ++ continue; ++ members++; + } + +- paddr = VTOP(vaddr); ++ entry_list = (struct entry *) ++ GETBUF(sizeof(struct entry) * members); + +- for (n = 0; n < vt->numnodes; n++) { +- nt = &vt->node_table[n]; +- if ((vt->flags & V_MEM_MAP) && (vt->numnodes == 1)) +- node_size = vt->max_mapnr; +- else +- node_size = nt->size; ++ rewind(pc->tmpfile); ++ i = 0; ++ while (fgets(buf, BUFSIZE, pc->tmpfile)) { ++ if (strstr(buf, "struct page_state") || ++ strstr(buf, "}")) ++ continue; ++ strip_ending_char(strip_linefeeds(buf), ';'); ++ c = parse_line(buf, arglist); ++ strcpy(nameptr, arglist[c-1]); ++ entry_list[i].name = nameptr; ++ if (strlen(nameptr) > len) ++ len = strlen(nameptr); ++ nameptr += strlen(nameptr)+2; ++ i++; ++ } ++ close_tmpfile(); + +- pstart = nt->start_paddr; +- pend = pstart + ((ulonglong)node_size * PAGESIZE()); ++ open_tmpfile(); + +- if ((paddr < pstart) || (paddr >= pend)) +- continue; +- /* +- * We're in the physical range. +- */ +- return TRUE; ++ for (c = 0; c < kt->cpus; c++) { ++ addr = sp->value + kt->__per_cpu_offset[c]; ++ dump_struct("page_state", addr, RADIX(16)); + } + +- if (vt->vmalloc_start > vaddr) { +- *nextvaddr = vt->vmalloc_start; +- return TRUE; +- } else +- return FALSE; ++ i = 0; ++ rewind(pc->tmpfile); ++ while (fgets(buf, BUFSIZE, pc->tmpfile)) { ++ if (strstr(buf, "struct page_state")) { ++ instance++; ++ i = 0; ++ continue; ++ } ++ if (strstr(buf, "}")) ++ continue; ++ strip_linefeeds(buf); ++ extract_hex(buf, &value, ',', TRUE); ++ entry_list[i].value += value; ++ i++; ++ } ++ ++ close_tmpfile(); ++ ++ fprintf(fp, " PAGE_STATES:\n"); ++ for (i = 0; i < members; i++) { ++ sprintf(buf, "%s", entry_list[i].name); ++ fprintf(fp, "%s", mkstring(buf, len+2, RJUST, 0)); ++ fprintf(fp, ": %ld\n", entry_list[i].value); ++ } ++ ++ FREEBUF(namebuf); ++ FREEBUF(entry_list); ++ ++ return TRUE; + } + +-/* +- * Display swap statistics. ++ ++/* ++ * Dump the cumulative totals of the per_cpu__vm_event_state ++ * counters. + */ +-void +-cmd_swap(void) ++static int ++dump_vm_event_state(void) + { +- int c; ++ int i, c; ++ struct syment *sp; ++ ulong addr; ++ ulong *events, *cumulative; + +- while ((c = getopt(argcnt, args, "")) != EOF) { +- switch(c) +- { +- default: +- argerrs++; +- break; +- } +- } ++ if (!vm_event_state_init()) ++ return FALSE; + +- if (argerrs) +- cmd_usage(pc->curcmd, SYNOPSIS); ++ events = (ulong *)GETBUF((sizeof(ulong) * vt->nr_vm_event_items) * 2); ++ cumulative = &events[vt->nr_vm_event_items]; + +- dump_swap_info(VERBOSE, NULL, NULL); +-} ++ sp = symbol_search("per_cpu__vm_event_states"); + +-/* +- * Do the work for cmd_swap(). +- */ ++ for (c = 0; c < kt->cpus; c++) { ++ addr = sp->value + kt->__per_cpu_offset[c]; ++ if (CRASHDEBUG(1)) { ++ fprintf(fp, "[%d]: %lx\n", c, addr); ++ dump_struct("vm_event_state", addr, RADIX(16)); ++ } ++ readmem(addr, KVADDR, events, ++ sizeof(ulong) * vt->nr_vm_event_items, ++ "vm_event_states buffer", FAULT_ON_ERROR); ++ for (i = 0; i < vt->nr_vm_event_items; i++) ++ cumulative[i] += events[i]; ++ } + +-#define SWP_USED 1 +-#define SWAP_MAP_BAD 0x8000 ++ fprintf(fp, "\n VM_EVENT_STATES:\n"); ++ for (i = 0; i < vt->nr_vm_event_items; i++) ++ fprintf(fp, "%23s: %ld\n", vt->vm_event_items[i], cumulative[i]); + +-char *swap_info_hdr = \ +-"FILENAME TYPE SIZE USED PCT PRIORITY\n"; ++ FREEBUF(events); ++ ++ return TRUE; ++} + + static int +-dump_swap_info(ulong swapflags, ulong *totalswap_pages, ulong *totalused_pages) ++vm_event_state_init(void) + { +- int i, j; +- int flags, swap_device, pages, prio, usedswap; +- ulong swap_file, max, swap_map, pct; +- ulong vfsmnt; +- ulong swap_info; +- ushort *map; +- ulong totalswap, totalused; ++ int i, c, stringlen, total; ++ long count; ++ struct gnu_request *req; ++ char *arglist[MAXARGS]; + char buf[BUFSIZE]; ++ char *start; + +- if (!symbol_exists("nr_swapfiles")) +- error(FATAL, "nr_swapfiles doesn't exist in this kernel!\n"); +- +- if (!symbol_exists("swap_info")) +- error(FATAL, "swap_info doesn't exist in this kernel!\n"); ++ if (vt->flags & VM_EVENT) ++ return TRUE; + +- swap_info = symbol_value("swap_info"); ++ if ((vt->nr_vm_event_items == -1) || ++ !symbol_exists("per_cpu__vm_event_states")) ++ goto bailout; + +- if (swapflags & VERBOSE) +- fprintf(fp, swap_info_hdr); ++ if (!enumerator_value("NR_VM_EVENT_ITEMS", &count)) ++ return FALSE; + +- totalswap = totalused = 0; ++ vt->nr_vm_event_items = count; + +- for (i = 0; i < vt->nr_swapfiles; i++, +- swap_info += SIZE(swap_info_struct)){ +- fill_swap_info(swap_info); ++ open_tmpfile(); ++ req = (struct gnu_request *)GETBUF(sizeof(struct gnu_request)); ++ req->command = GNU_GET_DATATYPE; ++ req->name = "vm_event_item"; ++ req->flags = GNU_PRINT_ENUMERATORS; ++ gdb_interface(req); ++ FREEBUF(req); + +- flags = INT(vt->swap_info_struct + +- OFFSET(swap_info_struct_flags)); ++ stringlen = 1; + +- if (!(flags & SWP_USED)) ++ rewind(pc->tmpfile); ++ while (fgets(buf, BUFSIZE, pc->tmpfile)) { ++ if (strstr(buf, "{") || strstr(buf, "}")) + continue; ++ clean_line(buf); ++ c = parse_line(buf, arglist); ++ if (STREQ(arglist[0], "NR_VM_EVENT_ITEMS")) ++ break; ++ else ++ stringlen += strlen(arglist[0]); ++ } + +- swap_file = ULONG(vt->swap_info_struct + +- OFFSET(swap_info_struct_swap_file)); ++ total = stringlen + vt->nr_vm_event_items + ++ (sizeof(void *) * vt->nr_vm_event_items); ++ if (!(vt->vm_event_items = (char **)malloc(total))) { ++ close_tmpfile(); ++ error(FATAL, "cannot malloc vm_event_items cache\n"); ++ } + +- swap_device = INT(vt->swap_info_struct + +- OFFSET_OPTION(swap_info_struct_swap_device, +- swap_info_struct_old_block_size)); ++ start = (char *)&vt->vm_event_items[vt->nr_vm_event_items]; + +- pages = INT(vt->swap_info_struct + +- OFFSET(swap_info_struct_pages)); ++ rewind(pc->tmpfile); ++ while (fgets(buf, BUFSIZE, pc->tmpfile)) { ++ if (strstr(buf, "{") || strstr(buf, "}")) ++ continue; ++ c = parse_line(buf, arglist); ++ i = atoi(arglist[2]); ++ if (i < vt->nr_vm_event_items) { ++ vt->vm_event_items[i] = start; ++ strcpy(start, arglist[0]); ++ start += strlen(arglist[0]) + 1; ++ } ++ } ++ close_tmpfile(); + +- totalswap += pages; +- pages <<= (PAGESHIFT() - 10); ++ vt->flags |= VM_EVENT; ++ return TRUE; + +- prio = INT(vt->swap_info_struct + +- OFFSET(swap_info_struct_prio)); ++bailout: ++ vt->nr_vm_event_items = -1; ++ return FALSE; ++} + +- max = ULONG(vt->swap_info_struct + +- OFFSET(swap_info_struct_max)); + +- swap_map = ULONG(vt->swap_info_struct + +- OFFSET(swap_info_struct_swap_map)); ++/* ++ * Support for slub.c slab cache. ++ */ ++static void ++kmem_cache_init_slub(void) ++{ ++ if (CRASHDEBUG(1) && ++ !(vt->flags & CONFIG_NUMA) && (vt->numnodes > 1)) ++ error(WARNING, ++ "kmem_cache_init_slub: numnodes: %d without CONFIG_NUMA\n", ++ vt->numnodes); + +- if (swap_file) { +- if (VALID_MEMBER(swap_info_struct_swap_vfsmnt)) { +- vfsmnt = ULONG(vt->swap_info_struct + +- OFFSET(swap_info_struct_swap_vfsmnt)); +- get_pathname(swap_file, buf, BUFSIZE, +- 1, vfsmnt); +- } else if (VALID_MEMBER +- (swap_info_struct_old_block_size)) { +- get_pathname(file_to_dentry(swap_file), +- buf, BUFSIZE, 1, 0); +- } else { +- get_pathname(swap_file, buf, BUFSIZE, 1, 0); +- } +- } else +- sprintf(buf, "(unknown)"); ++ vt->cpu_slab_type = MEMBER_TYPE("kmem_cache", "cpu_slab"); ++ ++ vt->flags |= KMEM_CACHE_INIT; ++} ++ ++static void ++kmem_cache_list_slub(void) ++{ ++ int i, cnt; ++ ulong *cache_list; ++ ulong name; ++ char *cache_buf; ++ char buf[BUFSIZE]; ++ ++ cnt = get_kmem_cache_list(&cache_list); ++ cache_buf = GETBUF(SIZE(kmem_cache)); ++ ++ for (i = 0; i < cnt; i++) { ++ fprintf(fp, "%lx ", cache_list[i]); ++ ++ readmem(cache_list[i], KVADDR, cache_buf, ++ SIZE(kmem_cache), "kmem_cache buffer", ++ FAULT_ON_ERROR); ++ ++ name = ULONG(cache_buf + OFFSET(kmem_cache_name)); ++ if (!read_string(name, buf, BUFSIZE-1)) ++ sprintf(buf, "(unknown)\n"); ++ ++ fprintf(fp, "%s\n", buf); ++ } ++ ++ FREEBUF(cache_list); ++ FREEBUF(cache_buf); ++} ++ ++#define DUMP_KMEM_CACHE_INFO_SLUB() dump_kmem_cache_info_slub(si) ++ ++static void ++dump_kmem_cache_info_slub(struct meminfo *si) ++{ ++ char b1[BUFSIZE]; ++ char b2[BUFSIZE]; ++ int namelen, sizelen, spacelen; ++ ++ fprintf(fp, "%s ", ++ mkstring(b1, VADDR_PRLEN, LJUST|LONG_HEX, MKSTR(si->cache))); ++ ++ namelen = strlen(si->curname); ++ sprintf(b2, "%ld", si->objsize); ++ sizelen = strlen(b2); ++ spacelen = 0; ++ ++ if (namelen++ > 18) { ++ spacelen = 29 - namelen - sizelen; ++ fprintf(fp, "%s%s%ld ", si->curname, ++ space(spacelen <= 0 ? 1 : spacelen), si->objsize); ++ if (spacelen > 0) ++ spacelen = 1; ++ sprintf(b1, "%c%dld ", '%', 9 + spacelen - 1); ++ } else { ++ fprintf(fp, "%-18s %8ld ", si->curname, si->objsize); ++ sprintf(b1, "%c%dld ", '%', 9); ++ } ++ ++ fprintf(fp, b1, si->inuse); + +- map = (ushort *)GETBUF(sizeof(ushort) * max); ++ fprintf(fp, "%8ld %5ld %4ldk\n", ++ si->num_slabs * si->objects, ++ si->num_slabs, si->slabsize/1024); ++} + +- if (!readmem(swap_map, KVADDR, map, +- sizeof(ushort) * max, "swap_info swap_map data", +- RETURN_ON_ERROR|QUIET)) { +- if (swapflags & RETURN_ON_ERROR) { +- *totalswap_pages = swap_map; +- *totalused_pages = i; +- return FALSE; +- } else +- error(FATAL, +- "swap_info[%d].swap_map at %lx is unaccessible\n", +- i, swap_map); ++static void ++dump_kmem_cache_slub(struct meminfo *si) ++{ ++ int i; ++ ulong name; ++ unsigned int size, objsize, objects, order, offset; ++ char *reqname, *p1; ++ char kbuf[BUFSIZE]; ++ char buf[BUFSIZE]; ++ ++ si->cache_count = get_kmem_cache_list(&si->cache_list); ++ si->cache_buf = GETBUF(SIZE(kmem_cache)); ++ ++ if (!si->reqname && ++ !(si->flags & (ADDRESS_SPECIFIED|GET_SLAB_PAGES))) ++ fprintf(fp, kmem_cache_hdr); ++ ++ if (si->flags & ADDRESS_SPECIFIED) { ++ if ((p1 = is_slab_page(si, kbuf))) { ++ si->flags |= VERBOSE; ++ si->slab = (ulong)si->spec_addr; ++ } else if (!(p1 = vaddr_to_kmem_cache(si->spec_addr, kbuf))) { ++ error(INFO, ++ "address is not allocated in slab subsystem: %lx\n", ++ si->spec_addr); ++ goto bailout; + } ++ ++ if (si->reqname && (si->reqname != p1)) ++ error(INFO, ++ "ignoring pre-selected %s cache for address: %lx\n", ++ si->reqname, si->spec_addr, si->reqname); ++ reqname = p1; ++ } else ++ reqname = si->reqname; + +- usedswap = 0; +- for (j = 0; j < max; j++) { +- switch (map[j]) +- { +- case SWAP_MAP_BAD: +- case 0: +- continue; +- default: +- usedswap++; +- } ++ for (i = 0; i < si->cache_count; i++) { ++ if (!readmem(si->cache_list[i], KVADDR, si->cache_buf, ++ SIZE(kmem_cache), "kmem_cache buffer", RETURN_ON_ERROR)) ++ goto next_cache; ++ ++ name = ULONG(si->cache_buf + OFFSET(kmem_cache_name)); ++ if (!read_string(name, buf, BUFSIZE-1)) ++ sprintf(buf, "(unknown)"); ++ if (reqname) { ++ if (!STREQ(reqname, buf)) ++ continue; ++ fprintf(fp, kmem_cache_hdr); ++ } ++ if (ignore_cache(si, buf)) { ++ fprintf(fp, "%lx %-18s [IGNORED]\n", ++ si->cache_list[i], buf); ++ goto next_cache; + } + +- FREEBUF(map); ++ objsize = UINT(si->cache_buf + OFFSET(kmem_cache_objsize)); ++ size = UINT(si->cache_buf + OFFSET(kmem_cache_size)); ++ objects = UINT(si->cache_buf + OFFSET(kmem_cache_objects)); ++ order = UINT(si->cache_buf + OFFSET(kmem_cache_order)); ++ offset = UINT(si->cache_buf + OFFSET(kmem_cache_offset)); + +- totalused += usedswap; +- usedswap <<= (PAGESHIFT() - 10); +- pct = (usedswap * 100)/pages; ++ si->cache = si->cache_list[i]; ++ si->curname = buf; ++ si->objsize = objsize; ++ si->size = size; ++ si->objects = objects; ++ si->slabsize = (PAGESIZE() << order); ++ si->inuse = si->num_slabs = 0; ++ si->slab_offset = offset; ++ if (!get_kmem_cache_slub_data(GET_SLUB_SLABS, si) || ++ !get_kmem_cache_slub_data(GET_SLUB_OBJECTS, si)) ++ goto next_cache; + +- if (swapflags & VERBOSE) +- fprintf(fp, "%-15s %s %7dk %7dk %2ld%% %d\n", +- buf, swap_device ? "PARTITION" : " FILE ", +- pages, usedswap, pct, prio); +- } ++ DUMP_KMEM_CACHE_INFO_SLUB(); + +- if (totalswap_pages) +- *totalswap_pages = totalswap; +- if (totalused_pages) +- *totalused_pages = totalused; ++ if (si->flags & ADDRESS_SPECIFIED) { ++ if (!si->slab) ++ si->slab = vaddr_to_slab(si->spec_addr); ++ do_slab_slub(si, VERBOSE); ++ } else if (si->flags & VERBOSE) { ++ do_kmem_cache_slub(si); ++ if (!reqname && ((i+1) < si->cache_count)) ++ fprintf(fp, kmem_cache_hdr); ++ } + +- return TRUE; ++next_cache: ++ if (reqname) ++ break; ++ } ++ ++bailout: ++ FREEBUF(si->cache_list); ++ FREEBUF(si->cache_buf); + } + + /* +- * Translate a PTE into a swap device and offset string. +- */ +-char * +-swap_location(ulonglong pte, char *buf) ++ * Emulate the total count calculation done by the ++ * slab_objects() sysfs function in slub.c. ++ */ ++static int ++get_kmem_cache_slub_data(long cmd, struct meminfo *si) + { +- char swapdev[BUFSIZE]; ++ int i, n, node; ++ ulong total_objects, total_slabs; ++ ulong cpu_slab_ptr, node_ptr; ++ ulong node_nr_partial, node_nr_slabs; ++ int full_slabs, objects; ++ long p; ++ short inuse; ++ ulong *nodes, *per_cpu; + +- if (!pte) +- return NULL; ++ /* ++ * nodes[n] is not being used (for now) ++ * per_cpu[n] is a count of cpu_slab pages per node. ++ */ ++ nodes = (ulong *)GETBUF(2 * sizeof(ulong) * vt->numnodes); ++ per_cpu = nodes + vt->numnodes; + +- sprintf(buf, "%s OFFSET: %lld", +- get_swapdev(SWP_TYPE(pte), swapdev), SWP_OFFSET(pte)); ++ total_slabs = total_objects = 0; + +- return buf; +-} ++ for (i = 0; i < kt->cpus; i++) { ++ cpu_slab_ptr = get_cpu_slab_ptr(si, i); + +-/* +- * Given the type field from a PTE, return the name of the swap device. +- */ +-static char * +-get_swapdev(ulong type, char *buf) +-{ +- unsigned int i, swap_info_len; +- ulong swap_info, swap_file; +- ulong vfsmnt; ++ if (!cpu_slab_ptr) ++ continue; + +- if (!symbol_exists("nr_swapfiles")) +- error(FATAL, "nr_swapfiles doesn't exist in this kernel!\n"); ++ if ((node = page_to_nid(cpu_slab_ptr)) < 0) ++ goto bailout; + +- if (!symbol_exists("swap_info")) +- error(FATAL, "swap_info doesn't exist in this kernel!\n"); ++ switch (cmd) ++ { ++ case GET_SLUB_OBJECTS: ++ if (!readmem(cpu_slab_ptr + OFFSET(page_inuse), ++ KVADDR, &inuse, sizeof(short), ++ "page inuse", RETURN_ON_ERROR)) ++ return FALSE; ++ total_objects += inuse; ++ break; + +- swap_info = symbol_value("swap_info"); ++ case GET_SLUB_SLABS: ++ total_slabs++; ++ break; ++ } ++ per_cpu[node]++; ++ } ++ ++ for (n = 0; n < vt->numnodes; n++) { ++ if (vt->flags & CONFIG_NUMA) ++ node_ptr = ULONG(si->cache_buf + ++ OFFSET(kmem_cache_node) + ++ (sizeof(void *)*n)); ++ else ++ node_ptr = si->cache + ++ OFFSET(kmem_cache_local_node); + +- swap_info_len = (i = ARRAY_LENGTH(swap_info)) ? +- i : get_array_length("swap_info", NULL, 0); ++ if (!readmem(node_ptr + OFFSET(kmem_cache_node_nr_partial), ++ KVADDR, &node_nr_partial, sizeof(ulong), ++ "kmem_cache_node nr_partial", RETURN_ON_ERROR)) ++ goto bailout; ++ if (!readmem(node_ptr + OFFSET(kmem_cache_node_nr_slabs), ++ KVADDR, &node_nr_slabs, sizeof(ulong), ++ "kmem_cache_node nr_slabs", RETURN_ON_ERROR)) ++ goto bailout; + +- sprintf(buf, "(unknown swap location)"); ++ switch (cmd) ++ { ++ case GET_SLUB_OBJECTS: ++ if ((p = count_partial(node_ptr)) < 0) ++ return FALSE; ++ total_objects += p; ++ break; + +- if (type >= swap_info_len) +- return buf; ++ case GET_SLUB_SLABS: ++ total_slabs += node_nr_partial; ++ break; ++ } + +- swap_info += (SIZE(swap_info_struct) * type); +- fill_swap_info(swap_info); +- swap_file = ULONG(vt->swap_info_struct + +- OFFSET(swap_info_struct_swap_file)); ++ full_slabs = node_nr_slabs - per_cpu[n] - node_nr_partial; ++ objects = INT(si->cache_buf + OFFSET(kmem_cache_objects)); + +- if (swap_file) { +- if (VALID_MEMBER(swap_info_struct_swap_vfsmnt)) { +- vfsmnt = ULONG(vt->swap_info_struct + +- OFFSET(swap_info_struct_swap_vfsmnt)); +- get_pathname(swap_file, buf, BUFSIZE, 1, vfsmnt); +- } else if (VALID_MEMBER (swap_info_struct_old_block_size)) { +- get_pathname(file_to_dentry(swap_file), +- buf, BUFSIZE, 1, 0); +- } else { +- get_pathname(swap_file, buf, BUFSIZE, 1, 0); ++ switch (cmd) ++ { ++ case GET_SLUB_OBJECTS: ++ total_objects += (full_slabs * objects); ++ break; ++ ++ case GET_SLUB_SLABS: ++ total_slabs += full_slabs; ++ break; + } +- } + +- return buf; +-} ++ if (!(vt->flags & CONFIG_NUMA)) ++ break; ++ } + +-/* +- * If not currently stashed, cache the passed-in swap_info_struct. +- */ +-static void +-fill_swap_info(ulong swap_info) +-{ +- if (vt->last_swap_read == swap_info) +- return; ++ switch (cmd) ++ { ++ case GET_SLUB_OBJECTS: ++ si->inuse = total_objects; ++ break; + +- if (!vt->swap_info_struct && !(vt->swap_info_struct = (char *) +- malloc(SIZE(swap_info_struct)))) +- error(FATAL, "cannot malloc swap_info_struct space\n"); +- +- readmem(swap_info, KVADDR, vt->swap_info_struct, SIZE(swap_info_struct), +- "fill_swap_info", FAULT_ON_ERROR); ++ case GET_SLUB_SLABS: ++ si->num_slabs = total_slabs; ++ break; ++ } + +- vt->last_swap_read = swap_info; ++ FREEBUF(nodes); ++ return TRUE; ++ ++bailout: ++ FREEBUF(nodes); ++ return FALSE; + } + +-/* +- * If active, clear references to the swap_info references. +- */ +-void +-clear_swap_info_cache(void) ++ ++static void ++do_kmem_cache_slub(struct meminfo *si) + { +- if (ACTIVE()) +- vt->last_swap_read = 0; +-} ++ int i, n; ++ ulong cpu_slab_ptr, node_ptr; ++ ulong node_nr_partial, node_nr_slabs; ++ ulong *per_cpu; + ++ per_cpu = (ulong *)GETBUF(sizeof(ulong) * vt->numnodes); + +-/* +- * Translage a vm_area_struct and virtual address into a filename +- * and offset string. +- */ ++ for (i = 0; i < kt->cpus; i++) { ++ cpu_slab_ptr = get_cpu_slab_ptr(si, i); + +-#define PAGE_CACHE_SHIFT (machdep->pageshift) /* This is supposed to change! */ ++ fprintf(fp, "CPU %d SLAB:\n%s", i, ++ cpu_slab_ptr ? "" : " (empty)\n"); + +-static char * +-vma_file_offset(ulong vma, ulong vaddr, char *buf) +-{ +- ulong vm_file, vm_start, vm_offset, vm_pgoff, dentry, offset; +- ulong vfsmnt; +- char file[BUFSIZE]; +- char *vma_buf, *file_buf; ++ if (!cpu_slab_ptr) ++ continue; + +- if (!vma) +- return NULL; ++ if ((n = page_to_nid(cpu_slab_ptr)) >= 0) ++ per_cpu[n]++; + +- vma_buf = fill_vma_cache(vma); ++ si->slab = cpu_slab_ptr; ++ do_slab_slub(si, VERBOSE); + +- vm_file = ULONG(vma_buf + OFFSET(vm_area_struct_vm_file)); ++ if (received_SIGINT()) ++ restart(0); ++ } + +- if (!vm_file) +- goto no_file_offset; ++ for (n = 0; n < vt->numnodes; n++) { ++ if (vt->flags & CONFIG_NUMA) ++ node_ptr = ULONG(si->cache_buf + ++ OFFSET(kmem_cache_node) + ++ (sizeof(void *)*n)); ++ else ++ node_ptr = si->cache + ++ OFFSET(kmem_cache_local_node); + +- file_buf = fill_file_cache(vm_file); +- dentry = ULONG(file_buf + OFFSET(file_f_dentry)); ++ if (!readmem(node_ptr + OFFSET(kmem_cache_node_nr_partial), ++ KVADDR, &node_nr_partial, sizeof(ulong), ++ "kmem_cache_node nr_partial", RETURN_ON_ERROR)) ++ break; ++ if (!readmem(node_ptr + OFFSET(kmem_cache_node_nr_slabs), ++ KVADDR, &node_nr_slabs, sizeof(ulong), ++ "kmem_cache_node nr_slabs", RETURN_ON_ERROR)) ++ break; + +- if (!dentry) +- goto no_file_offset; ++ fprintf(fp, "KMEM_CACHE_NODE NODE SLABS PARTIAL PER-CPU\n"); + +- file[0] = NULLCHAR; +- if (VALID_MEMBER(file_f_vfsmnt)) { +- vfsmnt = ULONG(file_buf + OFFSET(file_f_vfsmnt)); +- get_pathname(dentry, file, BUFSIZE, 1, vfsmnt); +- } else +- get_pathname(dentry, file, BUFSIZE, 1, 0); ++ fprintf(fp, "%lx%s", node_ptr, space(VADDR_PRLEN > 8 ? 2 : 10)); ++ fprintf(fp, "%4d %5ld %7ld %7ld\n", ++ n, node_nr_slabs, node_nr_partial, per_cpu[n]); + +- if (!strlen(file)) +- goto no_file_offset; ++ do_node_lists_slub(si, node_ptr, n); + +- vm_start = ULONG(vma_buf + OFFSET(vm_area_struct_vm_start)); ++ if (!(vt->flags & CONFIG_NUMA)) ++ break; ++ } + +- vm_offset = vm_pgoff = 0xdeadbeef; ++ fprintf(fp, "\n"); + +- if (VALID_MEMBER(vm_area_struct_vm_offset)) +- vm_offset = ULONG(vma_buf + +- OFFSET(vm_area_struct_vm_offset)); +- else if (VALID_MEMBER(vm_area_struct_vm_pgoff)) +- vm_pgoff = ULONG(vma_buf + +- OFFSET(vm_area_struct_vm_pgoff)); +- else +- goto no_file_offset; ++ FREEBUF(per_cpu); ++} + +- if (vm_offset != 0xdeadbeef) +- offset = VIRTPAGEBASE(vaddr) - vm_start + vm_offset; +- else if (vm_pgoff != 0xdeadbeef) { +- offset = ((vaddr - vm_start) >> PAGE_CACHE_SHIFT) + vm_pgoff; +- offset <<= PAGE_CACHE_SHIFT; ++#define DUMP_SLAB_INFO_SLUB() \ ++ { \ ++ char b1[BUFSIZE], b2[BUFSIZE]; \ ++ fprintf(fp, " %s %s %4d %5ld %9d %4ld\n", \ ++ mkstring(b1, VADDR_PRLEN, LJUST|LONG_HEX, MKSTR(si->slab)), \ ++ mkstring(b2, VADDR_PRLEN, LJUST|LONG_HEX, MKSTR(vaddr)), \ ++ node, si->objects, inuse, si->objects - inuse); \ ++ } ++ ++static void ++do_slab_slub(struct meminfo *si, int verbose) ++{ ++ physaddr_t paddr; ++ ulong vaddr; ++ ushort inuse; ++ ulong freelist, cpu_slab_ptr; ++ int i, cpu_slab, is_free, node; ++ ulong p, q; ++ ++ if (!si->slab) { ++ if (CRASHDEBUG(1)) ++ error(INFO, "-S option not supported for CONFIG_SLUB\n"); ++ return; + } + +- sprintf(buf, "%s OFFSET: %lx", file, offset); ++ if (!page_to_phys(si->slab, &paddr)) { ++ error(WARNING, ++ "%lx: cannot tranlate slab page to physical address\n", ++ si->slab); ++ return; ++ } ++ ++ node = page_to_nid(si->slab); ++ ++ vaddr = PTOV(paddr); + +- return buf; ++ if (verbose) ++ fprintf(fp, " %s", slab_hdr); + +-no_file_offset: +- return NULL; +-} ++ if (!readmem(si->slab + OFFSET(page_inuse), KVADDR, &inuse, ++ sizeof(ushort), "page.inuse", RETURN_ON_ERROR)) ++ return; ++ if (!readmem(si->slab + OFFSET(page_freelist), KVADDR, &freelist, ++ sizeof(void *), "page.freelist", RETURN_ON_ERROR)) ++ return; + +-/* +- * Translate a PTE into its physical address and flags. +- */ +-void +-cmd_pte(void) +-{ +- int c; +- ulonglong pte; ++ DUMP_SLAB_INFO_SLUB(); + +- while ((c = getopt(argcnt, args, "")) != EOF) { +- switch(c) +- { +- default: +- argerrs++; +- break; +- } +- } ++ if (!verbose) ++ return; + +- if (argerrs) +- cmd_usage(pc->curcmd, SYNOPSIS); ++ for (i = 0, cpu_slab = -1; i < kt->cpus; i++) { ++ cpu_slab_ptr = get_cpu_slab_ptr(si, i); + +- while (args[optind]) { +- pte = htoll(args[optind], FAULT_ON_ERROR, NULL); +- machdep->translate_pte((ulong)pte, NULL, pte); +- optind++; ++ if (!cpu_slab_ptr) ++ continue; ++ if (cpu_slab_ptr == si->slab) { ++ cpu_slab = i; ++ break; ++ } + } + +-} +- +-static char *node_zone_hdr = "ZONE NAME SIZE"; ++ fprintf(fp, " %s", free_inuse_hdr); + +-/* +- * On systems supporting memory nodes, display the basic per-node data. +- */ +-static void +-dump_memory_nodes(int initialize) +-{ +- int i, j; +- int n, id, flen, slen; +- ulong node_mem_map; +- ulong node_start_paddr; +- ulong node_start_pfn; +- ulong node_start_mapnr; +- ulong node_spanned_pages; +- ulong free_pages, zone_size, node_size; +- ulong zone_start_paddr, zone_start_mapnr, zone_mem_map; +- ulong zone_start_pfn; +- ulong bdata; +- ulong pgdat; +- ulong node_zones; +- ulong value; +- char buf1[BUFSIZE]; +- char buf2[BUFSIZE]; +- char buf3[BUFSIZE]; +- char buf4[BUFSIZE]; +- char buf5[BUFSIZE]; +- struct node_table *nt; ++ for (p = vaddr; p < vaddr + si->objects * si->size; p += si->size) { ++ is_free = FALSE; ++ for (is_free = 0, q = freelist; q; ++ q = get_freepointer(si, (void *)q)) { ++ if (q == BADADDR) ++ return; ++ if (p == q) { ++ is_free = TRUE; ++ break; ++ } ++ } + +- if (!(vt->flags & NODES)) { +- if (!initialize) +- error(FATAL, +- "memory nodes not supported by this kernel\n\n"); +- else { +- nt = &vt->node_table[0]; +- nt->node_id = 0; +- if (symbol_exists("contig_page_data")) +- nt->pgdat = symbol_value("contig_page_data"); +- else +- nt->pgdat = 0; +- nt->size = vt->total_pages; +- nt->mem_map = vt->mem_map; +- nt->start_paddr = 0; +- nt->start_mapnr = 0; +- return; ++ if (si->flags & ADDRESS_SPECIFIED) { ++ if ((si->spec_addr < p) || ++ (si->spec_addr >= (p + si->size))) { ++ if (!(si->flags & VERBOSE)) ++ continue; ++ } + } +- } + +- if (initialize) +- get_symbol_data("pgdat_list", sizeof(void *), &pgdat); +- else +- pgdat = vt->node_table[0].pgdat; ++ fprintf(fp, " %s%lx%s", ++ is_free ? " " : "[", ++ p, is_free ? " " : "]"); ++ if (is_free && (cpu_slab >= 0)) ++ fprintf(fp, "(cpu %d cache)", cpu_slab); ++ fprintf(fp, "\n"); + +- for (n = 0; pgdat; n++) { +- if (n >= vt->numnodes) +- error(FATAL, "numnodes out of sync with pgdat_list?\n"); ++ } ++} + +- nt = &vt->node_table[n]; ++static ulong ++get_freepointer(struct meminfo *si, void *object) ++{ ++ ulong vaddr, nextfree; ++ ++ vaddr = (ulong)(object + si->slab_offset); ++ if (!readmem(vaddr, KVADDR, &nextfree, ++ sizeof(void *), "get_freepointer", RETURN_ON_ERROR)) ++ return BADADDR; + +- readmem(pgdat+OFFSET(pglist_data_node_id), KVADDR, &id, +- sizeof(int), "pglist node_id", FAULT_ON_ERROR); ++ return nextfree; ++} + +- readmem(pgdat+OFFSET(pglist_data_node_mem_map), KVADDR, +- &node_mem_map, sizeof(ulong), +- "node_mem_map", FAULT_ON_ERROR); ++static void ++do_node_lists_slub(struct meminfo *si, ulong node_ptr, int node) ++{ ++ ulong next, list_head; ++ int first; + +- if (VALID_MEMBER(pglist_data_node_start_paddr)) +- readmem(pgdat+OFFSET(pglist_data_node_start_paddr), +- KVADDR, &node_start_paddr, sizeof(ulong), +- "pglist node_start_paddr", FAULT_ON_ERROR); +- else if (VALID_MEMBER(pglist_data_node_start_pfn)) { +- readmem(pgdat+OFFSET(pglist_data_node_start_pfn), +- KVADDR, &node_start_pfn, sizeof(ulong), +- "pglist node_start_pfn", FAULT_ON_ERROR); +- node_start_mapnr = node_start_pfn; +- node_start_paddr = PTOB(node_start_pfn); +- } else error(INFO, +- "cannot determine zone starting physical address\n"); ++ list_head = node_ptr + OFFSET(kmem_cache_node_partial); ++ if (!readmem(list_head, KVADDR, &next, sizeof(ulong), ++ "kmem_cache_node partial", RETURN_ON_ERROR)) ++ return; + +- if (VALID_MEMBER(pglist_data_node_start_mapnr)) +- readmem(pgdat+OFFSET(pglist_data_node_start_mapnr), +- KVADDR, &node_start_mapnr, sizeof(ulong), +- "pglist node_start_mapnr", FAULT_ON_ERROR); ++ fprintf(fp, "NODE %d PARTIAL:\n%s", node, ++ next == list_head ? " (empty)\n" : ""); ++ first = 0; ++ while (next != list_head) { ++ si->slab = next - OFFSET(page_lru); ++ if (first++ == 0) ++ fprintf(fp, " %s", slab_hdr); ++ do_slab_slub(si, !VERBOSE); ++ ++ if (received_SIGINT()) ++ restart(0); + +- if (VALID_MEMBER(pglist_data_node_size)) +- readmem(pgdat+OFFSET(pglist_data_node_size), +- KVADDR, &node_size, sizeof(ulong), +- "pglist node_size", FAULT_ON_ERROR); +- else if (VALID_MEMBER(pglist_data_node_spanned_pages)) { +- readmem(pgdat+OFFSET(pglist_data_node_spanned_pages), +- KVADDR, &node_spanned_pages, sizeof(ulong), +- "pglist node_spanned_pages", FAULT_ON_ERROR); +- node_size = node_spanned_pages; +- } else error(INFO, "cannot determine zone size\n"); ++ if (!readmem(next, KVADDR, &next, sizeof(ulong), ++ "page.lru.next", RETURN_ON_ERROR)) ++ return; ++ } + +- readmem(pgdat+OFFSET(pglist_data_bdata), KVADDR, &bdata, +- sizeof(ulong), "pglist bdata", FAULT_ON_ERROR); ++ if (INVALID_MEMBER(kmem_cache_node_full)) { ++ fprintf(fp, "NODE %d FULL:\n (not tracked)\n", node); ++ return; ++ } + +- if (initialize) { +- nt->node_id = id; +- nt->pgdat = pgdat; +- if (VALID_MEMBER(zone_struct_memsize)) +- nt->size = 0; /* initialize below */ +- else +- nt->size = node_size; +- nt->mem_map = node_mem_map; +- nt->start_paddr = node_start_paddr; +- nt->start_mapnr = node_start_mapnr; +- } ++ list_head = node_ptr + OFFSET(kmem_cache_node_full); ++ if (!readmem(list_head, KVADDR, &next, sizeof(ulong), ++ "kmem_cache_node full", RETURN_ON_ERROR)) ++ return; + +- if (!initialize) { +- if (n) { +- fprintf(fp, "\n"); +- pad_line(fp, slen, '-'); +- } +- flen = MAX(VADDR_PRLEN, strlen("BOOTMEM_DATA")); +- fprintf(fp, "%sNODE %s %s %s %s\n", +- n ? "\n\n" : "", +- mkstring(buf1, 8, CENTER, "SIZE"), +- mkstring(buf2, flen, CENTER|LJUST, "PGLIST_DATA"), +- mkstring(buf3, flen, CENTER|LJUST, "BOOTMEM_DATA"), +- mkstring(buf4, flen, CENTER|LJUST, "NODE_ZONES")); ++ fprintf(fp, "NODE %d FULL:\n%s", node, ++ next == list_head ? " (empty)\n" : ""); ++ first = 0; ++ while (next != list_head) { ++ si->slab = next - OFFSET(page_lru); ++ if (first++ == 0) ++ fprintf(fp, " %s", slab_hdr); ++ do_slab_slub(si, !VERBOSE); + +- node_zones = pgdat + OFFSET(pglist_data_node_zones); +- sprintf(buf5, " %2d %s %s %s %s\n", id, +- mkstring(buf1, 8, CENTER|LJUST|LONG_DEC, +- MKSTR(node_size)), +- mkstring(buf2, flen, CENTER|LJUST|LONG_HEX, +- MKSTR(pgdat)), +- mkstring(buf3, flen, CENTER|LONG_HEX, +- MKSTR(bdata)), +- mkstring(buf4, flen, CENTER|LJUST|LONG_HEX, +- MKSTR(node_zones))); +- fprintf(fp, "%s", buf5); ++ if (received_SIGINT()) ++ restart(0); + +- j = 12 + strlen(buf1) + strlen(buf2) + strlen(buf3) + +- count_leading_spaces(buf4); +- for (i = 1; i < vt->nr_zones; i++) { +- node_zones += SIZE_OPTION(zone_struct, zone); +- INDENT(j); +- fprintf(fp, "%lx\n", node_zones); +- } +- +- fprintf(fp, "%s START_PADDR START_MAPNR\n", +- mkstring(buf1, VADDR_PRLEN, CENTER|LJUST, +- "MEM_MAP")); +- fprintf(fp, "%s %s %s\n", +- mkstring(buf1, VADDR_PRLEN, +- CENTER|LONG_HEX, MKSTR(node_mem_map)), +- mkstring(buf2, strlen("START_PADDR"), +- CENTER|LONG_HEX|RJUST, MKSTR(node_start_paddr)), +- mkstring(buf3, strlen("START_MAPNR"), +- CENTER|LONG_DEC|RJUST, +- MKSTR(node_start_mapnr))); +- +- sprintf(buf2, "%s %s START_PADDR START_MAPNR", +- node_zone_hdr, +- mkstring(buf1, VADDR_PRLEN, CENTER|RJUST, +- "MEM_MAP")); +- slen = strlen(buf2); +- fprintf(fp, "\n%s\n", buf2); +- } ++ if (!readmem(next, KVADDR, &next, sizeof(ulong), ++ "page.lru.next", RETURN_ON_ERROR)) ++ return; ++ } ++} + +- node_zones = pgdat + OFFSET(pglist_data_node_zones); +- for (i = 0; i < vt->nr_zones; i++) { +- if (CRASHDEBUG(7)) +- fprintf(fp, "zone at %lx\n", node_zones); + +- if (VALID_MEMBER(zone_struct_size)) +- readmem(node_zones+OFFSET(zone_struct_size), +- KVADDR, &zone_size, sizeof(ulong), +- "zone_struct size", FAULT_ON_ERROR); +- else if (VALID_MEMBER(zone_struct_memsize)) { +- readmem(node_zones+OFFSET(zone_struct_memsize), +- KVADDR, &zone_size, sizeof(ulong), +- "zone_struct memsize", FAULT_ON_ERROR); +- nt->size += zone_size; +- } else if (VALID_MEMBER(zone_spanned_pages)) { +- readmem(node_zones+ OFFSET(zone_spanned_pages), +- KVADDR, &zone_size, sizeof(ulong), +- "zone spanned_pages", FAULT_ON_ERROR); +- } else error(FATAL, +- "zone_struct has neither size nor memsize field\n"); +- readmem(node_zones+ +- OFFSET_OPTION(zone_struct_free_pages, +- zone_free_pages), KVADDR, &free_pages, +- sizeof(ulong), "zone[_struct] free_pages", +- FAULT_ON_ERROR); +- readmem(node_zones+OFFSET_OPTION(zone_struct_name, +- zone_name), KVADDR, &value, sizeof(void *), +- "zone[_struct] name", FAULT_ON_ERROR); +- if (!read_string(value, buf1, BUFSIZE-1)) +- sprintf(buf1, "(unknown) "); +- if (VALID_STRUCT(zone_struct)) { +- readmem(node_zones+ +- OFFSET(zone_struct_zone_start_paddr), +- KVADDR, &zone_start_paddr, +- sizeof(ulong), +- "node_zones zone_start_paddr", +- FAULT_ON_ERROR); +- readmem(node_zones+ +- OFFSET(zone_struct_zone_start_mapnr), +- KVADDR, &zone_start_mapnr, +- sizeof(ulong), +- "node_zones zone_start_mapnr", +- FAULT_ON_ERROR); +- } else { +- readmem(node_zones+ +- OFFSET(zone_zone_start_pfn), +- KVADDR, &zone_start_pfn, +- sizeof(ulong), +- "node_zones zone_start_pfn", +- FAULT_ON_ERROR); +- zone_start_paddr = PTOB(zone_start_pfn); +- readmem(node_zones+ +- OFFSET(zone_zone_mem_map), +- KVADDR, &zone_mem_map, +- sizeof(ulong), +- "node_zones zone_mem_map", +- FAULT_ON_ERROR); +- if (zone_mem_map) +- zone_start_mapnr = +- (zone_mem_map - node_mem_map) / +- SIZE(page); +- else +- zone_start_mapnr = 0; +- } +- readmem(node_zones+ +- OFFSET_OPTION(zone_struct_zone_mem_map, +- zone_zone_mem_map), KVADDR, &zone_mem_map, +- sizeof(ulong), "node_zones zone_mem_map", +- FAULT_ON_ERROR); ++static char * ++is_kmem_cache_addr_slub(ulong vaddr, char *kbuf) ++{ ++ int i, cnt; ++ ulong *cache_list; ++ ulong name; ++ char *cache_buf; ++ int found; ++ ++ cnt = get_kmem_cache_list(&cache_list); ++ cache_buf = GETBUF(SIZE(kmem_cache)); ++ ++ for (i = 0, found = FALSE; i < cnt; i++) { ++ if (cache_list[i] != vaddr) ++ continue; + +- if (!initialize) { +- fprintf(fp, " %2d %-9s %7ld ", +- i, buf1, zone_size); +- fprintf(fp, "%s %s %s\n", +- mkstring(buf1, VADDR_PRLEN, +- RJUST|LONG_HEX,MKSTR(zone_mem_map)), +- mkstring(buf2, strlen("START_PADDR"), +- LONG_HEX|RJUST,MKSTR(zone_start_paddr)), +- mkstring(buf3, strlen("START_MAPNR"), +- LONG_DEC|RJUST, +- MKSTR(zone_start_mapnr))); +- } ++ if (!readmem(cache_list[i], KVADDR, cache_buf, ++ SIZE(kmem_cache), "kmem_cache buffer", ++ RETURN_ON_ERROR)) ++ break; + +- node_zones += SIZE_OPTION(zone_struct, zone); +- } ++ name = ULONG(cache_buf + OFFSET(kmem_cache_name)); ++ if (!read_string(name, kbuf, BUFSIZE-1)) ++ sprintf(kbuf, "(unknown)"); + +- if (initialize) +- readmem(pgdat + OFFSET_OPTION(pglist_data_node_next, +- pglist_data_pgdat_next), KVADDR, +- &pgdat, sizeof(void *), "pglist_data node_next", +- FAULT_ON_ERROR); +- else { +- if ((n+1) < vt->numnodes) +- pgdat = vt->node_table[n+1].pgdat; +- else +- pgdat = 0; +- } +- } ++ found = TRUE; ++ break; ++ } + +- if (n != vt->numnodes) +- error(FATAL, "numnodes out of sync with pgdat_list?\n"); ++ FREEBUF(cache_list); ++ FREEBUF(cache_buf); ++ ++ return (found ? kbuf : NULL); + } + + /* +- * Gather essential information regarding each memory node. ++ * Kernel-config-neutral page-to-node evaluator. + */ +-static void +-node_table_init(void) ++static int ++page_to_nid(ulong page) + { +- int n; +- ulong pgdat; +- +- /* +- * Override numnodes -- some kernels may leave it at 1 on a system +- * with multiple memory nodes. +- */ +- get_symbol_data("pgdat_list", sizeof(void *), &pgdat); ++ int i; ++ physaddr_t paddr; ++ struct node_table *nt; ++ physaddr_t end_paddr; + +- for (n = 0; pgdat; n++) { +- readmem(pgdat + OFFSET_OPTION(pglist_data_node_next, +- pglist_data_pgdat_next), KVADDR, +- &pgdat, sizeof(void *), "pglist_data node_next", +- FAULT_ON_ERROR); +- } +- if (n != vt->numnodes) { +- if (CRASHDEBUG(2)) +- error(NOTE, "changing numnodes from %d to %d\n", +- vt->numnodes, n); +- vt->numnodes = n; ++ if (!page_to_phys(page, &paddr)) { ++ error(INFO, "page_to_nid: invalid page: %lx\n", page); ++ return -1; + } + +- if (!(vt->node_table = (struct node_table *) +- malloc(sizeof(struct node_table) * vt->numnodes))) +- error(FATAL, "cannot malloc node_table %s(%d nodes)", +- vt->numnodes > 1 ? "array " : "", vt->numnodes); +- +- BZERO(vt->node_table, sizeof(struct node_table) * vt->numnodes); ++ for (i = 0; i < vt->numnodes; i++) { ++ nt = &vt->node_table[i]; + +- dump_memory_nodes(MEMORY_NODES_INITIALIZE); ++ end_paddr = nt->start_paddr + ++ ((physaddr_t)nt->size * (physaddr_t)PAGESIZE()); ++ ++ if ((paddr >= nt->start_paddr) && (paddr < end_paddr)) ++ return i; ++ } + +- qsort((void *)vt->node_table, (size_t)vt->numnodes, +- sizeof(struct node_table), compare_node_data); ++ error(INFO, "page_to_nid: cannot determine node for pages: %lx\n", ++ page); + +- if (CRASHDEBUG(2)) +- dump_memory_nodes(MEMORY_NODES_DUMP); ++ return -1; + } + + /* +- * The comparison function must return an integer less than, +- * equal to, or greater than zero if the first argument is +- * considered to be respectively less than, equal to, or +- * greater than the second. If two members compare as equal, +- * their order in the sorted array is undefined. ++ * Allocate and fill the passed-in buffer with a list of ++ * the current kmem_cache structures. + */ +- + static int +-compare_node_data(const void *v1, const void *v2) ++get_kmem_cache_list(ulong **cache_buf) + { +- struct node_table *t1, *t2; ++ int cnt; ++ ulong vaddr; ++ struct list_data list_data, *ld; + +- t1 = (struct node_table *)v1; +- t2 = (struct node_table *)v2; ++ get_symbol_data("slab_caches", sizeof(void *), &vaddr); + +- return (t1->node_id < t2->node_id ? -1 : +- t1->node_id == t2->node_id ? 0 : 1); ++ ld = &list_data; ++ BZERO(ld, sizeof(struct list_data)); ++ ld->start = vaddr; ++ ld->list_head_offset = OFFSET(kmem_cache_list); ++ ld->end = symbol_value("slab_caches"); ++ if (CRASHDEBUG(3)) ++ ld->flags |= VERBOSE; ++ ++ hq_open(); ++ cnt = do_list(ld); ++ *cache_buf = (ulong *)GETBUF(cnt * sizeof(ulong)); ++ cnt = retrieve_list(*cache_buf, cnt); ++ hq_close(); ++ ++ return cnt; + } + + + /* +- * Depending upon the processor, and whether we're running live or on a +- * dumpfile, get the system page size. ++ * Get the address of the head page of a compound page. + */ +-uint +-memory_page_size(void) ++static ulong ++compound_head(ulong page) + { +- uint psz; ++ ulong flags, first_page;; + +- if (REMOTE_MEMSRC()) +- return remote_page_size(); ++ first_page = page; + +- switch (pc->flags & MEMORY_SOURCES) +- { +- case DISKDUMP: +- psz = diskdump_page_size(); +- break; ++ if (!readmem(page+OFFSET(page_flags), KVADDR, &flags, sizeof(ulong), ++ "page.flags", RETURN_ON_ERROR)) ++ return first_page; + +- case NETDUMP: +- psz = netdump_page_size(); +- break; ++ if ((flags & vt->PG_head_tail_mask) == vt->PG_head_tail_mask) ++ readmem(page+OFFSET(page_first_page), KVADDR, &first_page, ++ sizeof(ulong), "page.first_page", RETURN_ON_ERROR); ++ ++ return first_page; ++} + +- case MCLXCD: +- psz = (uint)mclx_page_size(); +- break; ++long ++count_partial(ulong node) ++{ ++ ulong list_head, next; ++ short inuse; ++ ulong total_inuse; ++ ++ total_inuse = 0; ++ list_head = node + OFFSET(kmem_cache_node_partial); ++ if (!readmem(list_head, KVADDR, &next, sizeof(ulong), ++ "kmem_cache_node.partial", RETURN_ON_ERROR)) ++ return -1; ++ ++ while (next != list_head) { ++ if (!readmem(next - OFFSET(page_lru) + OFFSET(page_inuse), KVADDR, &inuse, ++ sizeof(ushort), "page.inuse", RETURN_ON_ERROR)) ++ return -1; ++ total_inuse += inuse; ++ if (!readmem(next, KVADDR, &next, sizeof(ulong), ++ "page.lru.next", RETURN_ON_ERROR)) ++ return -1; ++ } ++ return total_inuse; ++} + +- case LKCD: +-#if 0 /* REMIND: */ +- psz = lkcd_page_size(); /* dh_dump_page_size is HW page size; should add dh_page_size */ +-#else +- psz = (uint)getpagesize(); +-#endif +- break; ++char * ++is_slab_page(struct meminfo *si, char *buf) ++{ ++ int i, cnt; ++ ulong page_slab, page_flags, name; ++ ulong *cache_list; ++ char *cache_buf, *retval; + +- case DEVMEM: +- case MEMMOD: +- psz = (uint)getpagesize(); +- break; ++ if (!(vt->flags & KMALLOC_SLUB)) ++ return NULL; + +- case S390D: +- psz = s390_page_size(); +- break; ++ if (!is_page_ptr((ulong)si->spec_addr, NULL)) ++ return NULL; + +- default: +- error(FATAL, "memory_page_size: invalid pc->flags: %lx\n", +- pc->flags & MEMORY_SOURCES); +- } ++ if (!readmem(si->spec_addr + OFFSET(page_flags), KVADDR, ++ &page_flags, sizeof(ulong), "page.flags", ++ RETURN_ON_ERROR|QUIET)) ++ return NULL; + +- return psz; +-} ++ if (!(page_flags & vt->PG_slab)) ++ return NULL; + +-/* +- * Return the vmalloc address referenced by the first vm_struct +- * on the vmlist. This can normally be used by the machine-specific +- * xxx_vmalloc_start() routines. +- */ ++ if (!readmem(si->spec_addr + OFFSET(page_slab), KVADDR, ++ &page_slab, sizeof(ulong), "page.slab", ++ RETURN_ON_ERROR|QUIET)) ++ return NULL; + +-ulong +-first_vmalloc_address(void) +-{ +- ulong vmlist, addr; ++ retval = NULL; ++ cnt = get_kmem_cache_list(&cache_list); ++ cache_buf = GETBUF(SIZE(kmem_cache)); ++ ++ for (i = 0; i < cnt; i++) { ++ if (page_slab == cache_list[i]) { ++ if (!readmem(cache_list[i], KVADDR, cache_buf, ++ SIZE(kmem_cache), "kmem_cache buffer", ++ QUIET|RETURN_ON_ERROR)) ++ goto bailout; ++ ++ name = ULONG(cache_buf + OFFSET(kmem_cache_name)); ++ if (!read_string(name, buf, BUFSIZE-1)) ++ goto bailout; + +- get_symbol_data("vmlist", sizeof(void *), &vmlist); ++ retval = buf; ++ break; ++ } ++ } + +- if (!readmem(vmlist+OFFSET(vm_struct_addr), KVADDR, &addr, +- sizeof(void *), "first vmlist addr", RETURN_ON_ERROR)) +- non_matching_kernel(); ++bailout: ++ FREEBUF(cache_list); ++ FREEBUF(cache_buf); + +- return addr; ++ return retval; + } + + /* +- * Return the L1 cache size in bytes, which can be found stored in the +- * cache_cache. ++ * Figure out which of the kmem_cache.cpu_slab declarations ++ * is used by this kernel, and return a pointer to the slab ++ * page being used. + */ +- +-int +-l1_cache_size(void) ++static ulong ++get_cpu_slab_ptr(struct meminfo *si, int cpu) + { +- ulong cache_cache; +- ulong c_align; +- int colour_off; +- int retval; ++ ulong cpu_slab_ptr, page; + +- cache_cache = symbol_value("cache_cache"); ++ switch (vt->cpu_slab_type) ++ { ++ case TYPE_CODE_STRUCT: ++ cpu_slab_ptr = ULONG(si->cache_buf + ++ OFFSET(kmem_cache_cpu_slab) + ++ OFFSET(kmem_cache_cpu_page)); ++ break; + +- retval = -1; ++ case TYPE_CODE_ARRAY: ++ cpu_slab_ptr = ULONG(si->cache_buf + ++ OFFSET(kmem_cache_cpu_slab) + (sizeof(void *)*cpu)); ++ ++ if (cpu_slab_ptr && VALID_MEMBER(kmem_cache_cpu_page)) { ++ if (!readmem(cpu_slab_ptr + OFFSET(kmem_cache_cpu_page), ++ KVADDR, &page, sizeof(void *), ++ "kmem_cache_cpu.page", RETURN_ON_ERROR)) ++ cpu_slab_ptr = 0; ++ else ++ cpu_slab_ptr = page; ++ } ++ break; + +- if (VALID_MEMBER(kmem_cache_s_c_align)) { +- readmem(cache_cache+OFFSET(kmem_cache_s_c_align), +- KVADDR, &c_align, sizeof(ulong), +- "c_align", FAULT_ON_ERROR); +- retval = (int)c_align; +- } else if (VALID_MEMBER(kmem_cache_s_colour_off)) { +- readmem(cache_cache+OFFSET(kmem_cache_s_colour_off), +- KVADDR, &colour_off, sizeof(int), +- "colour_off", FAULT_ON_ERROR); +- retval = colour_off; ++ default: ++ error(FATAL, "cannot determine location of kmem_cache.cpu_slab page\n"); + } + +- return retval; ++ return cpu_slab_ptr; + } + +-/* +- * Multi-purpose routine used to query/control dumpfile memory usage. +- */ +-int +-dumpfile_memory(int cmd) ++#ifdef NOT_USED ++ulong ++slab_to_kmem_cache_node(struct meminfo *si, ulong slab_page) + { +- int retval; ++ int node; ++ ulong node_ptr; + +- retval = 0; ++ if (vt->flags & CONFIG_NUMA) { ++ node = page_to_nid(slab_page); ++ node_ptr = ULONG(si->cache_buf + ++ OFFSET(kmem_cache_node) + ++ (sizeof(void *)*node)); ++ } else ++ node_ptr = si->cache + OFFSET(kmem_cache_local_node); + +- if (!DUMPFILE()) +- return retval; ++ return node_ptr; ++} + +- switch (cmd) +- { +- case DUMPFILE_MEM_USED: +- if (REMOTE_DUMPFILE()) +- retval = remote_memory_used(); +- else if (pc->flags & NETDUMP) +- retval = netdump_memory_used(); +- else if (pc->flags & DISKDUMP) +- retval = diskdump_memory_used(); +- else if (pc->flags & LKCD) +- retval = lkcd_memory_used(); +- else if (pc->flags & MCLXCD) +- retval = vas_memory_used(); +- else if (pc->flags & S390D) +- retval = s390_memory_used(); +- break; ++ulong ++get_kmem_cache_by_name(char *request) ++{ ++ int i, cnt; ++ ulong *cache_list; ++ ulong name; ++ char *cache_buf; ++ char buf[BUFSIZE]; ++ ulong found; ++ ++ cnt = get_kmem_cache_list(&cache_list); ++ cache_buf = GETBUF(SIZE(kmem_cache)); ++ found = 0; ++ ++ for (i = 0; i < cnt; i++) { ++ readmem(cache_list[i], KVADDR, cache_buf, ++ SIZE(kmem_cache), "kmem_cache buffer", ++ FAULT_ON_ERROR); + +- case DUMPFILE_FREE_MEM: +- if (REMOTE_DUMPFILE()) +- retval = remote_free_memory(); +- else if (pc->flags & NETDUMP) +- retval = netdump_free_memory(); +- else if (pc->flags & DISKDUMP) +- retval = diskdump_free_memory(); +- else if (pc->flags & LKCD) +- retval = lkcd_free_memory(); +- else if (pc->flags & MCLXCD) +- retval = vas_free_memory(NULL); +- else if (pc->flags & S390D) +- retval = s390_free_memory(); +- break; ++ name = ULONG(cache_buf + OFFSET(kmem_cache_name)); ++ if (!read_string(name, buf, BUFSIZE-1)) ++ continue; + +- case DUMPFILE_MEM_DUMP: +- if (REMOTE_DUMPFILE()) +- retval = remote_memory_dump(0); +- else if (pc->flags & NETDUMP) +- retval = netdump_memory_dump(fp); +- else if (pc->flags & DISKDUMP) +- retval = diskdump_memory_dump(fp); +- else if (pc->flags & LKCD) +- retval = lkcd_memory_dump(set_lkcd_fp(fp)); +- else if (pc->flags & MCLXCD) +- retval = vas_memory_dump(fp); +- else if (pc->flags & S390D) +- retval = s390_memory_dump(fp); +- break; +- +- case DUMPFILE_ENVIRONMENT: +- if (pc->flags & LKCD) { +- set_lkcd_fp(fp); +- dump_lkcd_environment(0); +- } else if (pc->flags & REM_LKCD) +- retval = remote_memory_dump(VERBOSE); +- break; +- } ++ if (STREQ(buf, request)) { ++ found = cache_list[i]; ++ break; ++ } ++ } + +- return retval; +-} ++ FREEBUF(cache_list); ++ FREEBUF(cache_buf); + ++ return found; ++} ++#endif /* NOT_USED */ +--- crash/unwind_x86.h.orig 2008-01-17 15:17:20.000000000 -0500 ++++ crash/unwind_x86.h 2008-01-04 09:42:08.000000000 -0500 +@@ -0,0 +1,2 @@ ++ ++ +--- crash/extensions/Makefile.orig 2008-01-17 15:17:21.000000000 -0500 ++++ crash/extensions/Makefile 2008-01-04 09:42:08.000000000 -0500 +@@ -0,0 +1,47 @@ ++# ++# Makefile for building crash shared object extensions ++# ++# Copyright (C) 2005, 2007 David Anderson ++# Copyright (C) 2005, 2007 Red Hat, Inc. All rights reserved. ++# ++# This program is free software; you can redistribute it and/or modify ++# it under the terms of the GNU General Public License as published by ++# the Free Software Foundation; either version 2 of the License, or ++# (at your option) any later version. ++# ++# This program is distributed in the hope that it will be useful, ++# but WITHOUT ANY WARRANTY; without even the implied warranty of ++# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++# GNU General Public License for more details. ++# ++# To build the extension shared objects in this directory, run ++# "make extensions" from the top-level directory. ++# ++# To add a new extension object, simply copy your module's .c file ++# to this directory, and it will be built automatically using ++# the "standard" compile line. If that compile line does not ++# suffice, create a .mk file with the same prefix as the .c file, ++# and that makefile will be invoked. ++# ++ ++CONTRIB_SO := $(patsubst %.c,%.so,$(wildcard *.c)) ++ ++all: link_defs $(CONTRIB_SO) ++ ++link_defs: ++ @if [ ! -f defs.h ]; then \ ++ ln -s ../defs.h; fi ++ ++$(CONTRIB_SO): %.so: %.c ++ @if [ -f $*.mk ]; then \ ++ make -f $*.mk; \ ++ else \ ++ echo "gcc -nostartfiles -shared -rdynamic -o $@ $*.c -fPIC -D$(TARGET) $(TARGET_CFLAGS)"; \ ++ gcc -nostartfiles -shared -rdynamic -o $@ $*.c -fPIC -D$(TARGET) $(TARGET_CFLAGS); \ ++ fi ++ ++clean: ++ rm -f $(CONTRIB_SO) ++ @for MAKEFILE in `grep -sl "^clean:" *.mk`; \ ++ do make --no-print-directory -f $$MAKEFILE clean; \ ++ done +--- crash/extensions/libsial/sial_input.c.orig 2008-01-17 15:17:21.000000000 -0500 ++++ crash/extensions/libsial/sial_input.c 2008-01-04 09:42:08.000000000 -0500 +@@ -0,0 +1,802 @@ ++/* ++ * Copyright 2001 Silicon Graphics, Inc. All rights reserved. ++ */ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include "sial.h" ++ ++char *sialpp_create_buffer(void *, int); ++void sialpp_switch_to_buffer(void *); ++void sialpp_delete_buffer(void *); ++ ++typedef void fdone(void *data); ++extern void* sial_create_buffer(FILE *, int); ++typedef struct inbuf_s { ++ srcpos_t pos; /* current filename,line,col */ ++ int cursor; /* position of next input() character */ ++ int len; /* length of the buffer */ ++ char *buf; /* buffer */ ++ void *data; /* opaque data for callback */ ++ void *mac; /* for nested defines substitutions */ ++ fdone *donefunc; /* function to call when done with buffer */ ++ int space; ++ int eofonpop; /* terminate parsing at end of buffer ? */ ++#if linux ++ void* yybuf; ++#endif ++ ++} inbuf_t; ++ ++void sial_switch_to_buffer(void *); ++void sial_delete_buffer(void *); ++#define MAXIN 20 ++static inbuf_t inlist[MAXIN]; ++static inbuf_t *in=0; ++static int nin=0; ++static int eol=0, virgin=1; ++#if linux ++static int inpp=0; ++#endif ++ ++extern void *sial_getmac(char *, int); ++ ++/* this function is called by the macro macro functions to set ++ and test the current buffer level in order to take care of ++ imbedded macros w/ the same parameter names. ++ see sial_getmac(). ++*/ ++void *sial_getcurmac(void) ++{ ++ return in ? in->mac : 0 ; ++} ++ ++static void ++sial_pusherror(void) ++{ ++ sial_error("Too many level of input stream"); ++} ++ ++/* ++ Push a buffer onto the parser input stream. ++*/ ++void ++sial_pushbuf(char *buf, char *fname, void (*vf)(void *), void *d, void *m) ++{ ++fdone *f=(fdone*)vf; ++ ++ if(nin==MAXIN) sial_pusherror(); ++ ++ /* if we are pushing a macro then use upper level coordinates */ ++ if(fname) { ++ ++ inlist[nin].pos.line=1; ++ inlist[nin].pos.col=1; ++ inlist[nin].pos.file=fname; ++ ++ } else sial_setpos(&inlist[nin].pos); ++ ++ /* set it */ ++ if(nin) { ++ ++ sial_curpos(&inlist[nin].pos, &inlist[nin-1].pos); ++ ++ } else { ++ ++ sial_curpos(&inlist[nin].pos, 0); ++ ++ } ++ ++ inlist[nin].buf=buf; ++ inlist[nin].donefunc=f; ++ inlist[nin].space=0; ++ inlist[nin].data=d; ++ inlist[nin].mac=m; ++ inlist[nin].cursor=0; ++ inlist[nin].len=strlen(buf); ++ inlist[nin].eofonpop=0; ++#if linux ++ if(inpp) { ++ inlist[nin].yybuf=sialpp_create_buffer(0, inlist[nin].len); ++ sialpp_switch_to_buffer(inlist[nin].yybuf); ++ }else{ ++ inlist[nin].yybuf=sial_create_buffer(0, inlist[nin].len); ++ sial_switch_to_buffer(inlist[nin].yybuf); ++ } ++#endif ++ in=&inlist[nin]; ++ nin++; ++} ++ ++/* read the rest of the "#include" line from the parser input stream ++ open the corresponding file, push it's contain on the parser input ++ stream. ++*/ ++int ++sial_pushfile(char *name) ++{ ++struct stat s; ++char *fname; ++ ++ if(nin==MAXIN) sial_pusherror(); ++ ++ fname=sial_fileipath(name); ++ ++ if(fname) { ++ ++ if(!stat(fname, &s)) { ++ ++ char *buf=sial_alloc(s.st_size+1); ++ int fd; ++ ++ if((fd=open(fname, O_RDONLY))==-1) { ++ ++ sial_msg("%s: %s", fname, strerror(errno)); ++ ++ } ++ else { ++ ++ if(read(fd, buf, s.st_size) != s.st_size) { ++ ++ if(errno != EISDIR) ++ sial_msg("%s: read error : %s", fname, strerror(errno)); ++ ++ } ++ else { ++ ++ ++ buf[s.st_size]='\0'; ++ sial_pushbuf(buf, fname, sial_free, buf, 0); ++ close(fd); ++ return 1; ++ ++ } ++ close(fd); ++ } ++ ++ sial_free(buf); ++ ++ } ++ sial_free(fname); ++ } ++ return 0; ++ ++} ++ ++/* ++ Done with the current buffer. ++ Go back to previous on the stack. ++*/ ++static int ++sial_popin(void) ++{ ++ ++ if(eol || !nin) { ++ ++ if(!nin) in=0; ++ return 1; ++ ++ } else { ++ ++ nin--; ++ ++ /* call back */ ++ if(inlist[nin].donefunc) { ++ ++ inlist[nin].donefunc(inlist[nin].data); ++ } ++ if(inlist[nin].eofonpop) { ++ ++ eol=1; ++#if linux ++ inpp=0; ++#endif ++ } ++ if(!nin) in=0; ++ else { ++ ++ in=&inlist[nin-1]; ++ if(!eol) { ++#if linux ++ if(inpp) { ++ sialpp_switch_to_buffer(inlist[nin-1].yybuf); ++ sialpp_delete_buffer(inlist[nin].yybuf); ++ } else { ++ sial_switch_to_buffer(inlist[nin-1].yybuf); ++ sial_delete_buffer(inlist[nin].yybuf); ++ } ++#endif ++ } ++ sial_curpos(&in->pos, 0); ++ } ++ return 0; ++ } ++} ++ ++/* ++ With linux we need to use the wrap function ++ so that the flex buffer stuff is keaped in the game. ++*/ ++int ++sialwrap(void) ++{ ++ return sial_popin(); ++} ++ ++int ++sialppwrap(void) ++{ ++ if(eol) return 1; ++ return sial_popin(); ++} ++ ++void ++sial_popallin(void) ++{ ++ while(nin) { ++ eol=0; ++ sial_popin(); ++ } ++} ++ ++#define BLK_IFDEF 1 ++#define BLK_IFNDEF 2 ++#define BLK_IF 3 ++#define BLK_ELIF 4 ++#define BLK_ELSE 5 ++ ++typedef struct ifblk { ++ int type; /* type of block */ ++ int exprpos; /* curpor to start of corresponding expression */ ++ int bstart; /* curpor position at start of block */ ++ int dirlen; /* length of the directive name */ ++ int bend; /* cursor position at end of block */ ++ struct ifblk *next; ++} ifblk_t; ++ ++static int ++sial_isif(int pos) ++{ ++ if( ++ (in->len-pos>6 && !strncmp(in->buf+pos, "ifndef", 6)) ++ || (in->len-pos>5 && !strncmp(in->buf+pos, "ifdef", 5)) ++ || (in->len-pos>2 && !strncmp(in->buf+pos, "if", 2)) ++ ++ ) return 1; ++ ++ return 0; ++} ++ ++/* ++ Get directly to next block, skipping nested blocks. ++*/ ++static int ++sial_nxtblk(int pos, int lev) ++{ ++int virgin=0; ++ ++ while(1) { ++ ++ if(pos==in->len) { ++ ++ sial_error("Block without endif"); ++ } ++ ++ if(virgin && in->buf[pos]=='#') { ++ ++ pos++; ++ ++ /* nested if ? */ ++ if(in->buf[pos]=='i' && sial_isif(pos)) { ++ ++ while(1) { ++ pos=sial_nxtblk(pos, lev+1); ++ if(in->len-pos>5 && !strncmp(in->buf+pos, "endif", 5)) break; ++ } ++ ++ } else if(in->buf[pos]=='e') return pos; ++ ++ } else if(in->buf[pos]=='\n') { ++ ++ virgin=1; ++ ++ } else if(in->buf[pos] != ' ' && in->buf[pos] != '\t') { ++ ++ virgin=0; ++ } ++ pos++; ++ } ++} ++ ++static ifblk_t * ++sial_getblklst(void) ++{ ++ifblk_t *lst, *last; ++int doneelse=0, pos; ++ ++ lst=sial_alloc(sizeof(ifblk_t)); ++ ++ lst->bstart=in->cursor-1; ++ if(!strncmp(in->buf+in->cursor, "ifdef", 5)) { ++ ++ lst->type=BLK_IFDEF; ++ lst->exprpos=lst->bstart+6; ++ lst->dirlen=6; ++ ++ } else if(!strncmp(in->buf+in->cursor, "ifndef", 6)){ ++ ++ lst->type=BLK_IFNDEF; ++ lst->exprpos=lst->bstart+7; ++ lst->dirlen=7; ++ ++ } else { ++ ++ lst->type=BLK_IF; ++ lst->exprpos=lst->bstart+3; ++ lst->dirlen=3; ++ } ++ ++ last=lst; ++ pos=in->cursor; ++ ++ while(1) { ++ ++ ifblk_t *new=sial_alloc(sizeof(ifblk_t)); ++ ++ pos=sial_nxtblk(pos, 0); ++ ++ last->bend=pos-2; ++ new->bstart=pos-1; ++ if(!strncmp(in->buf+pos, "elif", 4)) { ++ ++ if(doneelse) { ++ ++ sial_error("Additional block found after #else directive"); ++ } ++ new->type=BLK_ELIF; ++ new->exprpos=new->bstart+5; ++ new->dirlen=5; ++ ++ } else if(!strncmp(in->buf+pos, "else", 4)) { ++ ++ if(doneelse) { ++ ++ sial_error("#else already done"); ++ } ++ new->type=BLK_ELSE; ++ new->exprpos=new->bstart+5; ++ new->dirlen=5; ++ doneelse=1; ++ ++ } else if(!strncmp(in->buf+pos, "endif", 5)) { ++ ++ sial_free(new); ++ last->next=0; ++ break; ++ } ++ last->next=new; ++ last=new; ++ } ++ return lst; ++} ++ ++/* ++ Zap a complete block. ++ We put spaces everywhere but over the newline. ++ Hey, it works. It's good enough for me. ++*/ ++static void ++sial_zapblk(ifblk_t *blk) ++{ ++int i; ++ ++ for(i=blk->bstart;ibend;i++) { ++ ++ if(in->buf[i]!='\n') in->buf[i]=' '; ++ } ++} ++ ++int sial_eol(char c) { return (!c || c=='\n') ? 1 : 0; } ++ ++/* ++ This function is called by sial_input() when a #if[def] is found. ++ We gather all blocks of the if/then/else into a list. ++ Parsing and execution of the expression is done only when needed. ++*/ ++void sial_rsteofoneol(void) ++{ ++ eol=0; ++ virgin=1; ++#if linux ++ inpp=0; ++#endif ++} ++ ++void ++sial_zapif(void) ++{ ++ifblk_t *lst=sial_getblklst(); ++ifblk_t *last=lst; ++int b=0; ++ ++ /* we scan the entire list untill a condition is true or we ++ reach #else or we reach the end */ ++ while(lst) { ++ ++ switch(lst->type) { ++ ++ case BLK_IFDEF: ++ case BLK_IFNDEF: ++ { ++ char mname[MAX_SYMNAMELEN+1], c; ++ int i=0, j=lst->bstart+lst->dirlen; ++ int v; ++ ++ /* get the macro name and see if it exists */ ++ /* skip all white spaces */ ++ while((c=in->buf[j]) == ' ' || c == '\t') if(c=='\n' || !c) { ++ ++ sial_error("Macro name not found!"); ++ ++ } else j++; ++ ++ /* get the constant or macro name */ ++ while((c=in->buf[j]) != ' ' && c != '\t' && c != '(') { ++ ++ if(c=='\n' || !c) break; ++ ++ if(i==MAX_SYMNAMELEN) break; ++ ++ mname[i++]=c; ++ j++; ++ } ++ mname[i]='\0'; ++ lst->dirlen += (j-lst->bstart-lst->dirlen); ++ if(sial_getmac(mname,0)) v=1; ++ else v=0; ++ b=lst->type==BLK_IFDEF?v:!v; ++ ++ } ++ break; ++ ++ case BLK_IF: case BLK_ELIF: ++ { ++ node_t*n; ++ void sialpprestart(int); ++ void sialppparse(void); ++ char *expr=sial_getline(); ++ int len=lst->dirlen; ++ ++#if linux ++ sialpprestart(0); ++ inpp=1; ++#endif ++ lst->dirlen += (in->cursor-lst->exprpos-1); ++ sial_pushbuf(expr, 0, sial_free, expr, 0); ++ in->eofonpop=1; ++ in->cursor += len; ++ sialppparse(); ++ ++ sial_rsteofoneol(); ++ eol=0; ++ ++ /* get the resulting node_t*/ ++ n=sial_getppnode(); ++ ++ /* execute it */ ++ { ++ ++ int *exval; ++ jmp_buf exitjmp; ++ void *sa; ++ value_t *v; ++ ++ sa=sial_setexcept(); ++ ++ if(!setjmp(exitjmp)) { ++ ++ sial_pushjmp(J_EXIT, &exitjmp, &exval); ++ v=NODE_EXE(n); ++ sial_rmexcept(sa); ++ sial_popjmp(J_EXIT); ++ b=sial_bool(v); ++ sial_freeval(v); ++ ++ } else { ++ ++ sial_rmexcept(sa); ++ sial_parseback(); ++ } ++ } ++ } ++ break; ++ ++ case BLK_ELSE: ++ { ++ ++ b=1; ++ ++ } ++ break; ++ } ++ ++ last=lst; ++ if(b) break; ++ ++ /* count new lines */ ++ { ++ while(in->cursor < lst->bend+1) { ++ ++ if(sial_eol(in->buf[in->cursor])) ++ sial_line(1); ++ in->cursor++; ++ } ++ ++ } ++ lst=lst->next; ++ } ++ ++ if(lst) { ++ ++ /* remove the # directive itself */ ++ memset(in->buf+lst->bstart, ' ', lst->dirlen); ++ ++ /* zap all remaining blocks */ ++ while((lst=lst->next)) { sial_zapblk(lst); last=lst; } ++ } ++ ++ /* most remove the #endif */ ++ memset(in->buf+last->bend+1, ' ', 6); ++} ++ ++static int rawinput=0; ++void sial_rawinput(int on) { rawinput=on; } ++ ++/* ++ Get the next character from the input stream tack. ++*/ ++int ++sial_input(void) ++{ ++register char c; ++ ++redo: ++ ++ if(!in || eol) { ++ ++ return 0; ++ } ++ ++ if(in->cursor==in->len) { ++ ++#if linux ++ return (-1); ++#else ++ sial_popin(); ++ goto redo; ++#endif ++ } ++ ++ c=in->buf[in->cursor++]; ++ if(!rawinput) { ++ if(c=='\\') { ++ ++ if(in->cursor==in->len) return c; ++ else if(in->buf[in->cursor]=='\n') { ++ ++ sial_line(1); ++ in->cursor++; ++ goto redo; ++ } ++ ++ } else if(c=='/') { ++ ++ if(in->cursor==in->len) return c; ++ else if(in->buf[in->cursor]=='/') { ++ ++ /* C++ stype comment. Eat it. */ ++ in->cursor++; ++ while(in->cursorlen) { ++ ++ c=in->buf[in->cursor++]; ++ if(c=='\n') { ++ /* leave the newline in there */ ++ in->cursor--; ++ break; ++ } ++ } ++ goto redo; ++ ++ }else if(in->buf[in->cursor]=='*') { ++ ++ /* C style comment, eat it */ ++ in->cursor++; ++ while(in->cursorlen) { ++ ++ c=in->buf[in->cursor++]; ++ if(c=='*' && (in->cursorlen)) { ++ ++ if(in->buf[in->cursor]=='/') { ++ ++ in->cursor++; ++ break; ++ ++ } ++ ++ } else if(c=='/' && (in->cursorlen)) { ++ ++ if(in->buf[in->cursor]=='*') { ++ ++ sial_warning("Nested comment"); ++ ++ } ++ ++ } ++ if(c=='\n') sial_line(1); ++ } ++ goto redo; ++ } ++ ++ }else if(virgin && c=='#') { ++ ++ char *p=in->buf+in->cursor; ++ char *end=in->buf+in->len; ++ int c=0; ++ ++ /* skip white spaces '# define ... ' */ ++ while(p<(end-4) && (*p==' ' || *p=='\t')) { p++; c++; } ++ ++ /* this must be a preprocessor command */ ++ /* we trigger on the if, ifdef only. #define, #undef, #include are ++ handled by the lexer */ ++ ++ if(!strncmp(p, "if", 2)) { ++ ++ in->cursor += c; ++ sial_zapif(); ++ /* zapif sets the cursor correctly */ ++ goto redo; ++ } ++ } ++ } ++ ++ if(c=='\n') { ++ ++ virgin=1; ++ sial_line(1); ++ ++ }else if(c != ' ' && c != '\t') { ++ ++ virgin=0; ++ ++ } ++ else if(!rawinput){ ++ ++ register char c2=c; ++ ++ /* return one white space for a group of them */ ++ while((in->cursor < in->len) ++ && in->buf[in->cursor]==c2) in->cursor++; ++ ++ } ++ ++ return c; ++} ++ ++char * ++sial_cursorp() ++{ ++ if(!in) return 0; ++ return in->buf+in->cursor; ++} ++ ++void ++sial_unput(char c) ++{ ++ ++ if(!c) return; ++ if(!nin) return; ++ if(!in->cursor) { ++ ++ sial_error("Fatal unput error"); ++ ++ } ++ in->buf[--in->cursor]=c; ++ if(c=='\n') { ++ ++ sial_line(-1); ++ } ++} ++ ++/* ++ Get a single line from the parser stream. ++*/ ++char * ++sial_getline() ++{ ++char *buf2=0; ++ ++ /* use the current input stream for that */ ++ if(in) { ++ ++ /* save the cursor */ ++ int n=0, c; ++ char *buf=sial_alloc(in->len-in->cursor+1); ++ ++ while(!sial_eol(c=sial_input())) ++ buf[n++]=c; ++ buf[n]='\0'; ++ buf2=sial_alloc(n+2); ++ strcpy(buf2,buf); ++ buf2[n]=' '; ++ buf2[n+1]='\0'; ++ sial_free(buf); ++ /* leave the newline there */ ++ sial_unput(c); ++ } ++ return buf2; ++} ++ ++ ++/* read a complete line from the input stream */ ++void ++sial_include(void) ++{ ++char name[MAX_SYMNAMELEN+1]; ++int n=0; ++int c; ++int found=0; ++ ++ while((c=sial_input())) { ++ ++ if(c=='"') { ++ ++ if(!found) found++; ++ else break; ++ continue; ++ } ++ ++ if(c=='<') { ++ ++ found++; ++ continue; ++ ++ } ++ if(c=='>') break; ++ if(sial_eol(c)) { ++ ++ sial_error("Unexpected EOL on #include"); ++ } ++ if(found) { ++ ++ if(n==MAX_SYMNAMELEN) { ++ ++ sial_error("Filename too long"); ++ } ++ name[n++]=c; ++ ++ } ++ } ++ name[n]='\0'; ++ ++ /* flush the rest of the line */ ++ while((c=sial_input())) { ++ ++ if(sial_eol(c)) break; ++ } ++ sial_unput(c); ++ if(sial_fileipath(name)) { ++ ++ sial_pushfile(name); ++ ++ } else { ++ ++ sial_msg("Include file not found: '%s' [include path is '%s']", name, sial_getipath()); ++ } ++} +--- crash/extensions/libsial/Makefile.orig 2008-01-17 15:17:21.000000000 -0500 ++++ crash/extensions/libsial/Makefile 2008-01-04 09:42:08.000000000 -0500 +@@ -0,0 +1,94 @@ ++# ++# Copyright 2001 Silicon Graphics, Inc. All rights reserved. ++# ++# ++# Makefile for LIBSIAL ++# ++ ++# Must be berkeley yacc. Bison will not cut it ++YACC = bison ++ ++LDIRT = lex.sial.c lex.sialpp.c sial.tab.c sial.tab.h sialpp.tab.c \ ++ sialpp.tab.h y.output mkbaseop baseops.c y.tab.c y.tab.h \ ++ libsial.so* *.output ++ ++LIBDIR = /usr/lib ++TARGETS = libsial.a ++ ++CFLAGS += -O3 -g -fPIC ++ifeq ($(TARGET), PPC64) ++ CFLAGS += -m64 ++endif ++ ++CFILES = sial_util.c sial_node.c sial_var.c sial_func.c sial_str.c \ ++ sial_op.c sial_num.c sial_stat.c sial_builtin.c sial_type.c \ ++ sial_case.c sial_api.c sial_member.c sial_alloc.c sial_define.c \ ++ sial_input.c sial_print.c ++ ++OFILES = $(CFILES:.c=.o) sialpp.tab.o sial.tab.o lex.sial.o lex.sialpp.o \ ++ baseops.o ++ ++HFILES = sial.h sial_api.h ++ ++LSOURCES = sial-lsed sialpp-lsed sial.l sialpp.l sial.y sialpp.y mkbaseop.c ++ ++all: default ++ ++showfiles: ++ @echo $(RELDIR)/$(CFILES) $(RELDIR)/$(HFILES) $(RELDIR)/$(LSOURCES) ++ ++exports: all ++ install $(TARGETS) $(ROOT)$(LIBDIR) ++ ++headers: ++ install -m 644 $(HFILES) $(ROOT)/usr/include ++ ++install: headers exports ++ (cd scripts ; $(MAKE) install ) ++ ++baseops.o: mkbaseop.c ++ $(CC) $(CFLAGS) -o mkbaseop mkbaseop.c ++ ./mkbaseop > baseops.c ++ $(CC) $(CFLAGS) -c baseops.c ++ ++mkbaseop.c sial_member.o sial_op.o sial_stat.o sial_type.o y.tab.o : sial.tab.h ++ ++lex.sial.o: lex.sial.c sial.tab.c sial.h ++ $(CC) $(CFLAGS) -c lex.sial.c ++ ++lex.sial.c: sial.l ++ flex -L -Psial -t sial.l > lex.sial.c ++ ++sial.tab.c: sial.tab.h ++ ++sialpp.tab.o: sialpp.tab.c ++ $(CC) $(CFLAGS) -c sialpp.tab.c ++ ++sial.tab.o: sial.tab.c ++ $(CC) $(CFLAGS) -c sial.tab.c ++ ++sial.tab.h : sial.y ++ $(YACC) -psial -v -t -d sial.y ++ ++lex.sialpp.o: lex.sialpp.c sialpp.tab.c sial.h ++ $(CC) $(CFLAGS) -c lex.sialpp.c ++ ++lex.sialpp.c: sialpp.l ++ flex -Psialpp -t sialpp.l > lex.sialpp.c ++ ++sialpp.tab.c: sialpp.tab.h sial.tab.h ++ ++sialpp.tab.h : sialpp.y sial.tab.h ++ $(YACC) -psialpp -v -t -d sialpp.y ++ ++default: $(TARGETS) ++ ++$(CFILES): $(HFILES) sial.tab.h ++ ++$(TARGETS): $(OFILES) ++ $(AR) ccurl $(TARGETS) $(OFILES) ++ ++clean: ++ -/bin/rm -f *.o $(TARGETS) $(LDIRT) ++ ++clobber: clean +--- crash/extensions/libsial/sial_str.c.orig 2008-01-17 15:17:21.000000000 -0500 ++++ crash/extensions/libsial/sial_str.c 2008-01-04 09:42:08.000000000 -0500 +@@ -0,0 +1,185 @@ ++/* ++ * Copyright 2001 Silicon Graphics, Inc. All rights reserved. ++ */ ++#include ++#include ++#include "sial.h" ++ ++/* ++ Create a new string node from a string. ++*/ ++ ++value_t * ++sial_setstrval(value_t *val, char *buf) ++{ ++char *newbuf=sial_strdup(buf); ++ ++ val->v.data=(void*)newbuf; ++ val->type.type=V_STRING; ++ val->type.size=strlen(buf)+1; ++ val->set=0; ++ return val; ++} ++ ++value_t * ++sial_makestr(char *s) ++{ ++ return sial_setstrval(sial_newval(), s); ++} ++ ++static value_t* ++sial_exestr(char *buf) ++{ ++value_t *v=sial_newval(); ++ ++ sial_setstrval(v, buf); ++ return v; ++} ++ ++void ++sial_freestrnode(char *buf) ++{ ++ sial_free(buf); ++} ++ ++node_t* ++sial_allocstr(char *buf) ++{ ++node_t*n=sial_newnode(); ++ ++ n->exe=(xfct_t)sial_exestr; ++ n->free=(ffct_t)sial_freestrnode; ++ n->data=buf; ++ sial_setpos(&n->pos); ++ ++ return n; ++} ++ ++node_t* ++sial_strconcat(node_t*n1, node_t*n2) ++{ ++char *newbuf=sial_alloc(strlen(n1->data)+strlen(n2->data)+1); ++ ++ strcpy(newbuf, n1->data); ++ strcat(newbuf, n2->data); ++ sial_free(n1->data); ++ n1->data=newbuf; ++ sial_freenode(n2); ++ return n1; ++} ++ ++static int ++is_valid(int c, int base) ++{ ++ switch(base) ++ { ++ case 16: return (c>='0' && c<='9') || (toupper(c) >= 'A' && toupper(c) <= 'F'); ++ case 10: return (c>='0' && c<='9'); ++ case 8: return (c>='0' && c<='7'); ++ } ++ return 0; ++} ++ ++/* extract a number value_t from the input stream */ ++static int sial_getnum(int base) ++{ ++int val=0; ++ while(1) ++ { ++ char c=sial_input(), C; ++ ++ C=toupper(c); ++ if(is_valid(C, base)) { ++ ++ val=val*base; ++ val+=(C>='A')?10+('F'-C):'9'-C; ++ } ++ else ++ { ++ sial_unput(c); ++ break; ++ } ++ } ++ return val; ++} ++ ++int ++sial_getseq(int c) ++{ ++int i; ++static struct { ++ int code; ++ int value; ++} seqs[] = { ++ { 'n', '\n' }, ++ { 't', '\t' }, ++ { 'f', '\f' }, ++ { 'r', '\r' }, ++ { 'n', '\n' }, ++ { 'v', '\v' }, ++ { '\\', '\007' }, ++}; ++ for(i=0;i ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include "sial.h" ++ ++/* ++ The next few functions manege the files and associated functions. ++*/ ++struct fdata; ++ ++typedef struct fctype_t { ++ int idx; ++ struct fctype_t*next; ++ ++} fctype_t; ++ ++typedef struct func { ++ ++ char *name; /* name of the function */ ++ var_t*varlist; /* parameters information */ ++ var_t*rvar; /* return value_t information */ ++ node_t*body; /* execution node for body */ ++ int local; /* load i.e. static ? */ ++ srcpos_t pos; /* source position of function declaration */ ++ struct fdata *file; /* back pointer to corresponding file */ ++ struct func *next; /* linked list */ ++ ++} func; ++ ++typedef struct fdata { ++ ++ char *fname; /* name of the file */ ++ int isdso; /* is this from a loadable module ? ++ `globs' becomes the handle */ ++ time_t time; /* load time */ ++ var_t*fsvs; /* associated list of static variables */ ++ var_t*fgvs; /* associated list of global variables */ ++ void *globs; /* handle for these globals */ ++ func *funcs; /* chained list of functions */ ++ fctype_t *ctypes; /* ctypes declared by this function */ ++ struct fdata *next; /* chained list of files */ ++ ++} fdata; ++ ++static fdata *fall=0; ++void sialparse(void); ++static func * sial_getfbyname(char *name, fdata *thisfd); ++value_t * sial_execmcfunc(func *f, value_t **vp); ++ ++ull ++sial_getval(value_t*v) ++{ ++ull ret=0; ++ ++ if(!v) return 0; ++ ++ /* need to cast properly here */ ++ if(v->type.type==V_BASE || v->type.type==V_REF) { ++ ++ if(v->type.type==V_REF || !sial_issigned(v->type.typattr)) { ++ ++ switch(TYPE_SIZE(&v->type)) { ++ case 1: ret= (ull) v->v.uc; break; ++ case 2: ret= (ull) v->v.us; break; ++ case 4: ret= (ull) v->v.ul; break; ++ case 8: ret= (ull) v->v.ull; break; ++ default: sial_error("Oops getval base"); ++ } ++ ++ } else { ++ ++ switch(TYPE_SIZE(&v->type)) { ++ case 1: ret= (ull) v->v.sc; break; ++ case 2: ret= (ull) v->v.ss; break; ++ case 4: ret= (ull) v->v.sl; break; ++ case 8: ret= (ull) v->v.sll; break; ++ default: sial_error("Oops getval base"); ++ } ++ } ++ } ++ /* in the case of a struct/union we pass a pointer to it */ ++ else ret = (unsigned long)v->v.data; ++ return ret; ++} ++ ++static int ++sial_dohelp(char *fname) ++{ ++char buf[MAX_SYMNAMELEN+1]; ++char *hstr; ++ ++ sprintf(buf, "%s_help", fname); ++ ++ if(sial_chkfname(buf, 0)) { ++ ++ char buf2[MAX_SYMNAMELEN+1]; ++ char *ustr; ++ ++ sprintf(buf2, "%s_usage", fname); ++ ustr=(char*)(unsigned long)sial_exefunc(buf2, 0); ++ sial_msg("COMMAND: %s %s\n\n", fname , ustr?ustr:""); ++ hstr=(char*)(unsigned long)sial_exefunc(buf, 0); ++ sial_format(1, hstr); ++ sial_format(0, "\n"); ++ sial_msg("\n"); ++ return 1; ++ } ++ return 0; ++} ++ ++void ++sial_showallhelp() ++{ ++fdata *filep; ++ ++ for(filep=fall; filep; filep=filep->next) { ++ ++ func *funcp; ++ ++ for(funcp=filep->funcs;funcp;funcp=funcp->next) { ++ ++ (void)sial_dohelp(funcp->name); ++ ++ } ++ } ++} ++ ++int ++sial_showhelp(char *fname) ++{ ++ return sial_dohelp(fname); ++} ++ ++void* ++sial_getcurfile() { return fall; } ++ ++int ++sial_isnew(void *p) ++{ ++fdata *fd=(fdata *)p; ++struct stat stats; ++ ++ if(!stat(fd->fname, &stats)) { ++ ++ if(stats.st_mtime > fd->time) { ++ ++ return 1; ++ } ++ } ++ return 0; ++} ++ ++void * ++sial_findfile(char *name, int unlink) ++{ ++fdata *fd; ++fdata *last=0; ++ ++ for(fd=fall; fd; last=fd, fd=fd->next) { ++ ++ if(!strcmp(fd->fname, name)) { ++ ++ /* remove from the list ?*/ ++ if(unlink) { ++ ++ if(!last) fall=fd->next; ++ else last->next=fd->next; ++ ++ } ++ return fd; ++ } ++ ++ } ++ return 0; ++} ++ ++void ++sial_freefunc(func *fn) ++{ ++ sial_free(fn->name); ++ NODE_FREE(fn->body); ++ if(fn->varlist) sial_freesvs(fn->varlist); ++ sial_freevar(fn->rvar); ++ sial_free(fn); ++} ++ ++static void ++sial_unloadso(fdata *fd) ++{ ++typedef int (*fp_t)(void); ++fp_t fp; ++func *f; ++ ++ if((fp=(fp_t)dlsym(fd->globs, BT_ENDDSO_SYM))) { ++ ++ fp(); ++ } ++ for(f=fd->funcs; f; ) { ++ ++ func *n=f->next; ++ sial_rmbuiltin(f->varlist); ++ sial_freevar(f->varlist); ++ sial_free(f); ++ f=n; ++ } ++ ++ dlclose(fd->globs); ++ ++ if(fall==fd) fall=fd->next; ++ else { ++ ++ fdata *last=fall; ++ ++ while(last->next) { ++ ++ if(last->next==fd) { ++ ++ last->next=fd->next; ++ break; ++ } ++ last=last->next; ++ } ++ } ++ ++ /* free the associated static and global variables */ ++ if(fd->fsvs) sial_freesvs(fd->fsvs); ++ if(fd->fgvs) sial_freesvs(fd->fgvs); ++ sial_free(fd->fname); ++ sial_free(fd); ++} ++ ++static void (*cb)(char *, int)=0; ++void sial_setcallback(void (*scb)(char *, int)) ++{ cb=scb; } ++static void ++sial_docallback( fdata *fd, int load) ++{ ++func *f; ++ ++ if(!cb) return; ++ ++ for(f=fd->funcs; f; f=f->next) { ++ ++ cb(f->name, load); ++ } ++} ++ ++void ++sial_freefile(fdata *fd) ++{ ++ if(fd) { ++ ++ func *fct, *nxt; ++ fctype_t *ct, *nct; ++ ++ if(fd->isdso) { ++ ++ sial_unloadso(fd); ++ return; ++ } ++ ++ /* free the associated static and global variables */ ++ if(fd->fsvs) sial_freesvs(fd->fsvs); ++ if(fd->fgvs) sial_freesvs(fd->fgvs); ++ ++ /* free all function nodes */ ++ // let debugger know ... ++ sial_docallback(fd, 0); ++ for(fct=fd->funcs; fct; fct=nxt) { ++ ++ nxt=fct->next; ++ sial_freefunc(fct); ++ } ++ ++ for(ct=fd->ctypes; ct; ct=nct) { ++ ++ nct=ct->next; ++ sial_free(ct); ++ } ++ sial_free(fd->fname); ++ if(fd->globs) sial_rm_globals(fd->globs); ++ sial_free(fd); ++ } ++ else sial_warning("Oops freefile!"); ++} ++ ++int ++sial_deletefile(char *name) ++{ ++fdata *fd=sial_findfile(name, 0); ++ ++ if(fd) { ++ ++ sial_freefile(fd); ++ (void)sial_findfile(name, 1); ++ return 1; ++ ++ } ++ return 0; ++} ++ ++static int parsing=0; ++static jmp_buf parjmp; ++ ++void ++sial_parseback(void) ++{ ++ if(parsing) { ++ ++ parsing=0; ++ longjmp(parjmp, 1); ++ } ++} ++ ++/* link in a new set of static file variables */ ++int ++sial_file_decl(var_t*svs) ++{ ++ sial_validate_vars(svs); ++ ++ if(!fall->fsvs) ++ fall->fsvs=(void*)sial_newvlist(); ++ ++ if(!fall->fgvs) ++ fall->fgvs=(void*)sial_newvlist(); ++ ++ (void)sial_addnewsvs(fall->fgvs, fall->fsvs, svs); ++ ++ return 1; ++} ++ ++typedef struct sigaction sact; ++static int sigs[]={SIGSEGV, SIGILL, SIGTRAP, SIGINT, SIGPIPE}; ++#define S_NSIG (sizeof(sigs)/sizeof(sigs[0])) ++ ++void ++sial_except_handler(int sig) ++{ ++static int i=0; ++ if(sig != SIGPIPE && sig != SIGINT) sial_error("Exception caught!"); ++ sial_dojmp(J_EXIT, &i); ++} ++ ++void * ++sial_setexcept() ++{ ++int i; ++sact *osa=sial_alloc(S_NSIG*sizeof(sact)); ++#if linux ++sact na; ++ ++ memset(&na, 0, sizeof(na)); ++ na.sa_handler=sial_except_handler; ++ na.sa_flags=SA_NODEFER; ++ ++#else ++sact na={ SA_NODEFER+SA_SIGINFO, sial_except_handler, 0, 0 }; ++#endif ++ ++ ++ for(i=0;ifname="__expr__"; ++ fd->next=fall; ++ fall=fd; ++ ++ sial_pushbuf(exp2, "stdin", 0, 0, 0); ++ parsing=1; ++ if(!setjmp(parjmp)) { ++ ++ sial_rsteofoneol(); ++ sial_settakeproto(1); ++ sialparse(); ++ sial_settakeproto(0); ++ ++ /* remove longjump for parsing */ ++ parsing=0; ++ ++ if(!fall->fgvs) { ++ ++ sial_error("Invalid function declaration."); ++ ++ } ++ ++ ret=fall->fgvs->next; ++ ++ } else { ++ ++ sial_popallin(); ++ ret=0; ++ ++ } ++ sial_free(exp2); ++ /* only free the top of the fgvs list to keep 'ret' */ ++ if(fall->fgvs) sial_freevar(fall->fgvs); ++ if(fall->fsvs) sial_freesvs(fall->fsvs); ++ fall=fd->next; ++ sial_free(fd); ++ return ret; ++} ++ ++/* ++ Load a dso file. ++ We are looking for the btinit() and btshutdown() functions. ++ ++ btinit() will should initialized the package and call sial_builtin() ++ to install the sial functions. ++ ++ btshutdown(), if it exists, will be called when an unload of the ++ file is requested. The dso should deallocate memory etc... at that ++ time. ++*/ ++static int ++sial_loadso(char *fname, int silent) ++{ ++void *h; ++ ++ if((h=dlopen(fname, RTLD_LAZY))) { ++ ++ typedef int (*fp_t)(void); ++ fp_t fp; ++ ++ if((fp=(fp_t)dlsym(h, BT_INIDSO_SYM))) { ++ ++ btspec_t *sp; ++ ++ if(fp()) { ++ ++ if((sp=(btspec_t *)dlsym(h, BT_SPEC_SYM))) { ++ ++ int i; ++ fdata *fd=sial_calloc(sizeof(fdata)); ++ func **ff=&fd->funcs; ++ ++ fd->fname=fname; ++ fd->isdso=1; ++ fd->globs=h; ++ ++ for(i=0;sp[i].proto;i++) { ++ ++ var_t*v; ++ ++ if((v=sial_builtin(sp[i].proto, sp[i].fp))) { ++ ++ func *f=sial_alloc(sizeof(func)); ++ ++ f->varlist=v; ++ f->next=*ff; ++ *ff=f; ++ } ++ } ++ fd->next=fall; ++ fall=fd; ++ return 1; ++ ++ } else if(!silent) { ++ ++ sial_msg("Missing '%s' table in dso [%s]", BT_SPEC_SYM, fname); ++ ++ } ++ ++ } else if(!silent) { ++ ++ sial_msg("Could not initialize dso [%s]", fname); ++ ++ } ++ ++ } else if(!silent) { ++ ++ sial_msg("Missing '%s' function in dso [%s]", BT_INIDSO_SYM, fname); ++ } ++ dlclose(h); ++ } ++ else if(!silent) sial_msg(dlerror()); ++ sial_free(fname); ++ return 0; ++} ++ ++void ++sial_addfunc_ctype(int idx) ++{ ++fctype_t *fct=sial_alloc(sizeof(fctype_t)); ++ ++ fct->idx=idx; ++ fct->next=fall->ctypes; ++ fall->ctypes=fct; ++} ++ ++int ++sial_newfile(char *name, int silent) ++{ ++fdata *fd; ++fdata *oldf; ++char *fname=sial_strdup(name); ++void *mtag; ++ ++ /* check if this is a dso type file */ ++ if(!strcmp(fname+strlen(fname)-3, ".so")) { ++ ++ if(sial_findfile(name,0)) { ++ ++ if(!silent) ++ sial_msg("Warning: dso must be unloaded before reload\n"); ++ return 0; ++ } ++ return sial_loadso(fname, silent); ++ ++ } ++ ++ fd=sial_calloc(sizeof(fdata)); ++ oldf=sial_findfile(name,1); ++ ++ /* push this file onto the parser stack */ ++ if(!sial_pushfile(fname)) { ++ ++ sial_free(fname); ++ if(!silent && errno != EISDIR) sial_msg("File %s : %s\n", name, strerror(errno)); ++ return 0; ++ } ++ ++ /* we also need to remove the globals for this file ++ before starting the parsing */ ++ if(oldf && oldf->globs) { ++ ++ sial_rm_globals(oldf->globs); ++ oldf->globs=0; ++ ++ } ++ ++ needvar=instruct=0; ++ ++ fd->fname=fname; ++ ++ /* put it on the list */ ++ fd->next=fall; ++ fall=fd; ++ ++ /* we tag the current ctype list so we know later what to clean up */ ++ sial_tagst(); ++ ++ /* we also tag the macro stack so we can erase out defines and ++ keep the compiler and api ones. */ ++ mtag=sial_curmac(); ++ ++ parsing=1; ++ if(!setjmp(parjmp)) { ++ ++ func *fct; ++ int ret=1; ++ ++ /* parse it */ ++ sial_rsteofoneol(); ++ ++ sialparse(); ++ ++ /* remove longjump for parsing */ ++ parsing=0; ++ ++ /* before adding the globals we need to push all the static ++ variables for this file since the initialization expressions ++ might use them (e.g. sizeof('a static var')). Eh, as long as ++ we keep the interpreter handling a superset of the 'standard' C ++ I don't have a problem with it. Do you ? */ ++ ++ { ++ int lev; ++ ++ lev=sial_addsvs(S_STAT, fd->fsvs); ++ ++ /* ok to add the resulting globals now */ ++ fall->globs=sial_add_globals(fall->fgvs); ++ ++ sial_setsvlev(lev); ++ } ++ ++ /* ok to free olf version */ ++ if(oldf) sial_freefile(oldf); ++ ++ sial_flushtdefs(); ++ sial_flushmacs(mtag); ++ ++ /* we proceed with the callback */ ++ sial_docallback(fd, 1); ++ ++ fd->time=time(0); ++ ++ /* compilation was ok , check for a __init() function to execute */ ++ if((fct=sial_getfbyname("__init", fd))) { ++ ++ int *exval; ++ jmp_buf exitjmp; ++ sact *sa; ++ ++ sa=sial_setexcept(); ++ ++ if(!setjmp(exitjmp)) { ++ ++ sial_pushjmp(J_EXIT, &exitjmp, &exval); ++ sial_freeval(sial_execmcfunc(fct, 0)); ++ sial_rmexcept(sa); ++ sial_popjmp(J_EXIT); ++ ++ } ++ else { ++ ++ sial_rmexcept(sa); ++ ret=0; ++ } ++ ++ } ++ return ret; ++ } ++ else { ++ ++ /* remove all streams from the stack */ ++ sial_popallin(); ++ ++ /* error, free this partial one and reinstall old one */ ++ if(oldf) { ++ /* we zap the top pointer (it's fd) */ ++ oldf->next=fall->next; ++ fall=oldf; ++ oldf->globs=sial_add_globals(oldf->fgvs); ++ } ++ else { ++ ++ fall=fall->next; ++ } ++ ++ /* and free fd */ ++ sial_freefile(fd); ++ } ++ sial_flushtdefs(); ++ sial_flushmacs(mtag); ++ return 0; ++} ++ ++/* scan the current list of functions for the one named name */ ++static func * ++sial_getfbyname(char *name, fdata *thisfd) ++{ ++fdata *fd; ++ ++ /* check localy first */ ++ if(thisfd) { ++ ++ for(fd=fall; fd; fd=fd->next) { ++ ++ func *f; ++ ++ if(fd->isdso) continue; ++ ++ /* skip non-local function */ ++ if(thisfd != fd) continue; ++ ++ for(f=fd->funcs; f; f=f->next) { ++ ++ if(!strcmp(f->name, name)) return f; ++ } ++ } ++ } ++ ++ /* check global function */ ++ for(fd=fall; fd; fd=fd->next) { ++ ++ func *f; ++ ++ if(fd->isdso) continue; ++ ++ for(f=fd->funcs; f; f=f->next) { ++ ++ /* skip static functions not local */ ++ if(f->local) continue; ++ ++ if(!strcmp(f->name, name)) return f; ++ } ++ } ++ return 0; ++} ++ ++/* external boolean to check if a function exists */ ++int sial_funcexists(char *name) ++{ ++ return !(!(sial_getfbyname(name, 0))); ++} ++ ++/* ++ This combined set of functions enables the aplication to ++ get alist of currently defined commands that have a help. ++*/ ++static fdata *nxtfdata=0; ++static func *nxtfunc; ++void ++sial_rstscan(void) ++{ ++ nxtfdata=0; ++} ++char * ++sial_getnxtfct(void) ++{ ++ if(!nxtfdata) { ++ ++ if(!fall) return 0; ++ nxtfdata=fall; ++ nxtfunc=nxtfdata->funcs;; ++ } ++ ++ while(nxtfdata) { ++ ++ if(!nxtfdata->isdso) for(; nxtfunc; nxtfunc=nxtfunc->next) { ++ ++ int l=strlen(nxtfunc->name); ++ ++ if(l > 5) { ++ ++ if(!strcmp(nxtfunc->name+l-5, "_help")) { ++ ++ char buf[MAX_SYMNAMELEN+1]; ++ func *ret; ++ ++ strncpy(buf, nxtfunc->name, l-5); ++ buf[l-5]='\0'; ++ ++ /* make sure we do have the function */ ++ if((ret=sial_getfbyname(buf, 0))) { ++ ++ nxtfunc=nxtfunc->next; ++ return ret->name; ++ } ++ } ++ } ++ } ++ nxtfdata=nxtfdata->next; ++ if(nxtfdata) nxtfunc=nxtfdata->funcs; ++ } ++ sial_rstscan(); ++ return 0; ++} ++ ++/* ++ This is the entry point for the error handling ++*/ ++void ++sial_exevi(char *fname, int line) ++{ ++char buf[200]; ++char *ed=getenv("EDITOR"); ++ ++ if(!ed) ed="vi"; ++ snprintf(buf, sizeof(buf), "%s +%d %s", ed, line, fname); ++ system(buf); ++ sial_load(fname); ++} ++ ++/* ++ This funciton is called to start a vi session on a function ++ (file=0) or a file (file=1); ++*/ ++void ++sial_vi(char *fname, int file) ++{ ++int line, freeit=0; ++char *filename; ++ ++ if(file) { ++ ++ filename=sial_filempath(fname); ++ ++ if(!filename) { ++ ++ sial_msg("File not found : %s\n", fname); ++ return; ++ ++ } ++ ++ line=1; ++ freeit=1; ++ ++ ++ } else { ++ ++ func *f=sial_getfbyname(fname, 0); ++ ++ if(!f) { ++ ++ sial_msg("Function not found : %s\n", fname); ++ return; ++ ++ } else { ++ ++ filename=f->pos.file; ++ line=f->pos.line; ++ ++ } ++ } ++ ++ sial_exevi(filename, line); ++ ++ if(freeit) sial_free(filename); ++ ++} ++ ++char * ++sial_getfile(char *fname) ++{ ++func *f; ++ ++ if((f=sial_getfbyname(fname, 0))) return f->file->fname; ++ return 0; ++} ++ ++static void ++sial_insertfunc(func *f) ++{ ++ f->next=fall->funcs; ++ fall->funcs=f; ++} ++ ++value_t * ++sial_execmcfunc(func *f, value_t **vp) ++{ ++value_t *retval; ++jmp_buf env; ++var_t*parm=0; ++int i=0; ++char *ocurp, *curp; ++ ++ /* set the current path */ ++ { ++ char *p; ++ ++ curp=sial_strdup(f->file->fname); ++ if((p=strrchr(curp, '/'))) *p='\0'; ++ ocurp=sial_curp(curp); ++ } ++ ++ ++ if(!(setjmp(env))) { ++ ++ /* push a return level */ ++ sial_pushjmp(J_RETURN, &env, &retval); ++ ++ /* Now it's ok to add any static vars for this file */ ++ sial_addsvs(S_FILE, f->file->fsvs); ++ ++ /* we need to create brand new variables with ++ the name of the declared arguments */ ++ if(f->varlist) { ++ ++ for(i=0, parm=f->varlist->next; ++ vp && (parm != f->varlist) && vp[i]; ++ parm=parm->next, i++) { ++ ++ var_t*var=sial_newvar(parm->name); ++ ++ var->v=sial_cloneval(parm->v); ++ sial_chkandconvert(var->v, vp[i]); ++ sial_add_auto(var); ++ sial_freeval(vp[i]); ++ ++ } ++ } ++ if(vp && vp[i]) { ++ ++ sial_warning("Too many parameters to function call"); ++ ++ } else if(parm != f->varlist) { ++ ++ sial_warning("Not enough parameters for function call"); ++ } ++ ++ /* we execute the buddy of the function */ ++ retval=NODE_EXE(f->body); ++ ++ sial_freeval(retval); ++ ++ retval=0; ++ ++ sial_popjmp(J_RETURN); ++ } ++ ++ /* make sure non void function do return something */ ++ if(!retval) { ++ ++ if(!sial_isvoid(f->rvar->v->type.typattr)) ++ ++ sial_rwarning(&f->pos, "Non void function should return a value."); ++ ++ } else { ++ ++ /* type checking here ... */ ++ } ++ ++ sial_curp(ocurp); ++ sial_free(curp); ++ ++ return retval; ++} ++ ++/* this is the externalized function that the API users call to execute ++ a function */ ++ull ++sial_exefunc(char *fname, value_t **vp) ++{ ++func *f; ++ull ret; ++ ++ if(!sial_chkfname(fname, 0)) ++ sial_warning("Unknown function called: %s\n", fname); ++ ++ /* builtin vs cmc ...*/ ++ if((f=sial_getfbyname(fname, 0))) ret=sial_getval(sial_execmcfunc(f, vp)); ++ else ret=sial_getval(sial_exebfunc(fname, vp)); ++ /* sial_freeval(v); */ ++ return ret; ++} ++ ++value_t * ++sial_exefunc_common(char *fname, node_t*parms, fdata *fd) ++{ ++int i; ++node_t*args; ++value_t *vp[BT_MAXARGS+1]; ++func *f; ++ ++ /* We most execute before pushing the S_FILE vars so the the ++ local variable for the caller can still be accessed */ ++ for(i=0,args=parms; args; args=args->next) { ++ ++ if(i==BT_MAXARGS) { ++ ++ sial_error("Max number of parameters exceeded [%d]", BT_MAXARGS); ++ } ++ vp[i++]=NODE_EXE(args); ++ ++ } ++ ++ /* null out the rest */ ++ for(;i<=BT_MAXARGS;i++) vp[i]=0; ++ ++ /* builtin vs cmc ...*/ ++ if((f=sial_getfbyname(fname, fd))) return sial_execmcfunc(f, vp); ++ else return sial_exebfunc(fname, vp); ++} ++ ++ ++/* this function is called by the sial_exeop() through a CALL op. */ ++value_t * ++sial_docall(node_t*name, node_t*parms, void *arg) ++{ ++fdata *fd = arg; ++char *sname=sial_vartofunc(name); ++value_t *v=0; ++ ++ if(sial_chkfname(sname, fd)) { ++ ++ v=sial_exefunc_common(sname, parms, fd); ++ ++ } ++ else sial_rerror(&name->pos, "Unknown function being called:[%s]", sname, fd); ++ /* sial_vartofunc() allocates the name */ ++ /* we don't free this item if mem debug has been set */ ++ if(!sial_ismemdebug()) sial_free(sname); ++ return v; ++ ++} ++ ++int ++sial_newfunc(var_t*fvar, node_t* body) ++{ ++var_t*v=fvar->next; ++ ++ if(v == fvar) { ++ ++ sial_freevar(v); ++ NODE_FREE(body); ++ sial_error("Syntax error in function declaration"); ++ ++ }else{ ++ ++ func *fn, *fi ; ++ ++ sial_freevar(fvar); ++ ++ /* we do the func insertion first so that if we have a problem ++ we can jump our of the parser using the sial_parback() function ++ which will deallocate the stuff */ ++ ++ fn=sial_alloc(sizeof(func)); ++ if(sial_isstatic(v->v->type.typattr)) fn->local=1; ++ fn->rvar=v; ++ fn->varlist=v->dv->fargs; ++ ++ /* check for func(void) */ ++ if(fn->varlist && fn->varlist->next != fn->varlist) { ++ ++ var_t*v=fn->varlist->next; ++ ++ if(v->v->type.type != V_REF && sial_isvoid(v->v->type.typattr)) { ++ ++ /* cut the chain here */ ++ if(v->next != fn->varlist) { ++ ++ sial_error("function parameter cannot have 'void' type"); ++ } ++ sial_freesvs(fn->varlist); ++ fn->varlist=0; ++ } ++ } ++ ++ v->dv->fargs=0; ++ fn->name=sial_strdup(v->name); ++ fn->local=sial_isstatic(v->v->type.typattr)?1:0; ++ fn->body=body; ++ fn->file=fall; ++ ++ /* the position of the function is the position of the var_t*/ ++ memcpy(&fn->pos, &v->dv->pos, sizeof(srcpos_t)); ++ ++ /* emit a warning for variables in the main statement group that ++ shadow ont of the parameters */ ++ if(fn->varlist) { ++ ++ var_t*v; ++ ++ for(v=fn->varlist->next; v!=fn->varlist; v=v->next) { ++ ++ var_t*vs; ++ ++ if((vs=sial_inlist(v->name, sial_getsgrp_avs(body))) || ++ (vs=sial_inlist(v->name, sial_getsgrp_svs(body)))) { ++ ++ sial_rwarning(&vs->dv->pos, "variable '%s' shadow's a function parameter" ++ , v->name); ++ ++ } ++ } ++ } ++ ++ if((fi=sial_getfbyname(fn->name, fall))) { ++ ++ /* check for local conflicts */ ++ if(fi->file == fn->file) { ++ ++ sial_insertfunc(fn); ++ sial_rerror(&fn->pos, "Function '%s' redefinition, first defined in file '%s' line %d" ++ , fn->name, fi->pos.file, fi->pos.line); ++ ++ /* check for global conflicts */ ++ } else if(!fn->local) { ++ ++ sial_insertfunc(fn); ++ sial_rerror(&fn->pos, "Function '%s' already defined in file %s, line %d" ++ , fn->name, fi->pos.file, fi->pos.line); ++ ++ } /* else... it's a static that shadows a global somewhere else. So it's ok */ ++ ++ } ++ ++ /* Searching is all done, so insert it */ ++ sial_insertfunc(fn); ++ ++ /* check out the storage class. Only 'static' is supported */ ++ if(!sial_isjuststatic(v->v->type.typattr)) { ++ ++ sial_error("Only 'static' storage class is valid for a function"); ++ } ++ } ++ return 1; ++} ++ ++/* check for the existance of a function in the list */ ++int ++sial_chkfname(char *fname, void *vfd) ++{ ++fdata *fd=(fdata *)vfd; ++ ++ /* check script functions */ ++ if(!sial_getfbyname(fname, fd)) { ++ ++ /* check builtin list */ ++ if(sial_chkbuiltin(fname)) return 1; ++ return 0; ++ ++ } ++ return 1; ++} ++ ++/* ++ ++ Thsi is the interface function with the command interpreter. ++ It needs to be able to execute a function giving a name and ++ passing some random parameters to it. ++ ++ A return of 0 means "no such function". ++*/ ++int ++sial_runcmd(char *fname, var_t*args) ++{ ++ if(sial_chkfname(fname, 0)) { ++ ++ value_t *val; ++ int *exval; ++ jmp_buf exitjmp; ++ void *vp; ++ ull ret; ++ sact *sa; ++ ++ /* make sure arguments are available in the global vars */ ++ vp=sial_add_globals(args); ++ ++ /* we set the exception handler too... */ ++ sa=sial_setexcept(); ++ ++ if(!setjmp(exitjmp)) { ++ ++ sial_pushjmp(J_EXIT, &exitjmp, &exval); ++ ++ /* we need to create a var with that name */ ++ val=sial_exefunc_common(fname, 0, 0); ++ ++ sial_popjmp(J_EXIT); ++ ++ if(val) { ++ ++ ret=unival(val); ++ sial_freeval(val); ++ } ++ else ret=0; ++ } ++ else { ++ ++ ret=*exval; ++ } ++ ++ /* remove exception handlers and restore previous handlers */ ++ sial_rmexcept(sa); ++ ++ /* remove args from global vars */ ++ sial_rm_globals(vp); ++ return ret; ++ } ++ return 0; ++} ++ +--- crash/extensions/libsial/sial_op.c.orig 2008-01-17 15:17:21.000000000 -0500 ++++ crash/extensions/libsial/sial_op.c 2008-01-04 09:42:08.000000000 -0500 +@@ -0,0 +1,904 @@ ++/* ++ * Copyright 2001 Silicon Graphics, Inc. All rights reserved. ++ */ ++#include "sial.h" ++#include "sial.tab.h" ++#include ++#include ++ ++#define MAXPARMS 10 ++ ++typedef struct { ++ ++ int op; /* operator */ ++ int np; /* number of operands */ ++ node_t*parms[MAXPARMS]; /* operands */ ++ ++ srcpos_t pos; ++ ++} oper; ++ ++#define P1 (o->parms[0]) ++#define P2 (o->parms[1]) ++#define P3 (o->parms[2]) ++#define P4 (o->parms[3]) ++ ++#define V1 (v1?v1:(v1=NODE_EXE(P1))) ++#define V2 (v2?v2:(v2=NODE_EXE(P2))) ++#define V3 (v3?v3:(v3=NODE_EXE(P3))) ++#define V4 (v4?v4:(v4=NODE_EXE(P4))) ++ ++#define L1 (unival(V1)) ++#define L2 (unival(V2)) ++#define L3 (unival(V3)) ++#define L4 (unival(V4)) ++ ++#define S1 ((V1)->v.data) ++#define S2 ((V2)->v.data) ++#define S3 ((V3)->v.data) ++#define S4 ((V4)->v.data) ++ ++void sial_do_deref(int n, value_t *v, value_t *ref); ++ul ++sial_bool(value_t *v) ++{ ++ switch(v->type.type) { ++ ++ case V_BASE: ++ switch(v->type.size) { ++ case 1: return !(!(v->v.uc)); ++ case 2: return !(!(v->v.us)); ++ case 4: return !(!(v->v.ul)); ++ case 8: return !(!(v->v.ull)); ++ default: sial_error("Oops sial_bool()[%d]", v->type.size); break; ++ } ++ case V_STRING : return !(!(*((char*)(v->v.data)))); ++ case V_REF: return sial_defbsize()==8?(!(!(v->v.ull))):(!(!(v->v.ul))); ++ default : ++ ++ sial_error("Invalid operand for boolean expression"); ++ return 0; ++ } ++} ++ ++static int cops[]={BAND,BOR,NOT,LT,LE,EQ,GE,GT,NE,CEXPR}; ++#define NCOPS (sizeof(cops)/sizeof(cops[0])) ++ ++static int ++is_cond(int op) ++{ ++int i; ++ ++ for(i=0;itype)) { ++ ++ case 1: v1->v.uc=rl; break; ++ case 2: v1->v.us=rl; break; ++ case 4: v1->v.ul=rl; break; ++ case 8: v1->v.ull=rl; break; ++ ++ } ++ /* the result of an assignment cannot be a lvalue_t */ ++ v1->set=0; ++} ++ ++#define anyop(t) (V1->type.type==t || (o->np>1 && V2->type.type==t)) ++ ++typedef struct { ++ node_t*index; ++ node_t*var; ++ srcpos_t pos; ++} index_t ; ++ ++static value_t * ++sial_exeindex(index_t *i) ++{ ++value_t *var; ++value_t *vi=NODE_EXE(i->index); ++value_t *v; ++srcpos_t p; ++ ++ sial_curpos(&i->pos, &p); ++ ++ /* we need to make believe it's been initiazed */ ++ sial_setini(i->var); ++ var=NODE_EXE(i->var); ++ ++ /* check the type of the variable */ ++ /* if it's a pointer then index through the image */ ++ if(var->type.type==V_REF) { ++ ++ int size; ++ int n=sial_getval(vi); ++ value_t *ref; ++ ++ /* if this is an array and we're not at the rightmost index */ ++ if(var->type.idxlst && var->type.idxlst[1]) { ++ ++ int i, size=var->type.size; ++ ++ v=sial_cloneval(var); ++ ++ v->type.idxlst[0]=0; ++ for(i=1; var->type.idxlst[i]; i++) { ++ ++ size *= var->type.idxlst[i]; ++ v->type.idxlst[i]=var->type.idxlst[i+1]; ++ } ++ ++ if(sial_defbsize()==4) { ++ ++ v->v.ul+=size*n; ++ v->mem=v->v.ul; ++ ++ } else { ++ ++ v->v.ull+=size*n; ++ v->mem=v->v.ull; ++ } ++ ++ ++ } else { ++ ++ v=sial_newval(); ++ ref=sial_cloneval(var); ++ ++ if(var->type.ref==1) size=var->type.size; ++ else size=sial_defbsize(); ++ ++ if(sial_defbsize()==4) { ++ ++ ref->v.ul+=size*n; ++ ref->mem=ref->v.ul; ++ ++ } else { ++ ++ ref->v.ull+=size*n; ++ ref->mem=ref->v.ull; ++ } ++ sial_do_deref(1, v, ref); ++ sial_freeval(ref); ++ } ++ ++ } else { ++ ++ v=sial_newval(); ++ ++ /* use dynamic indexing aka awk indexing */ ++ sial_valindex(var, vi, v); ++ } ++ ++ /* discard expression results */ ++ sial_freeval(var); ++ sial_freeval(vi); ++ sial_curpos(&p, 0); ++ ++ return v; ++} ++ ++void ++sial_freeindex(index_t *i) ++{ ++ NODE_FREE(i->index); ++ NODE_FREE(i->var); ++ sial_free(i); ++} ++ ++node_t* ++sial_newindex(node_t*var, node_t*idx) ++{ ++index_t *i=sial_alloc(sizeof(index_t )); ++node_t*n=sial_newnode(); ++ ++ i->index=idx; ++ i->var=var; ++ n->exe=(xfct_t)sial_exeindex; ++ n->free=(ffct_t)sial_freeindex; ++ n->data=i; ++ sial_setpos(&i->pos); ++ return n; ++} ++ ++typedef struct { ++ node_t*fname; ++ node_t*parms; ++ srcpos_t pos; ++ void *file; ++} call; ++ ++static value_t * ++sial_execall(call *c) ++{ ++value_t *rv; ++srcpos_t p; ++ ++ sial_curpos(&c->pos, &p); ++ rv=sial_docall(c->fname, c->parms, c->file); ++ sial_curpos(&p, 0); ++ return rv; ++} ++ ++void ++sial_freecall(call *c) ++{ ++ NODE_FREE(c->fname); ++ sial_free_siblings(c->parms); ++ sial_free(c); ++} ++ ++node_t* ++sial_newcall(node_t* fname, node_t* parms) ++{ ++node_t*n=sial_newnode(); ++call *c=sial_alloc(sizeof(call)); ++ ++ c->fname=fname; ++ c->file=sial_getcurfile(); ++ c->parms=parms; ++ n->exe=(xfct_t)sial_execall; ++ n->free=(ffct_t)sial_freecall; ++ n->data=c; ++ sial_setpos(&c->pos); ++ return n; ++} ++ ++typedef struct { ++ node_t*expr; ++ srcpos_t pos; ++} adrof; ++ ++static value_t * ++sial_exeadrof(adrof *a) ++{ ++value_t *rv, *v=NODE_EXE(a->expr); ++ ++#if 0 ++ /* we can only do this op on something that came from system image ++ Must not allow creation of references to local variable */ ++ if(!v->mem) { ++ ++ sial_freeval(v); ++ sial_rerror(&a->pos, "Invalid operand to '&' operator"); ++ ++ } ++#endif ++ /* create the reference */ ++ rv=sial_newval(); ++ sial_duptype(&rv->type, &v->type); ++ sial_pushref(&rv->type, 1); ++ ++ /* remmember position in image */ ++ if(sial_defbsize()==8) rv->v.ull=v->mem; ++ else rv->v.ul=v->mem; ++ rv->mem=0; ++ ++ sial_freeval(v); ++ ++ return rv; ++} ++ ++void ++sial_freeadrof(adrof *a) ++{ ++ NODE_FREE(a->expr); ++ sial_free(a); ++} ++ ++node_t* ++sial_newadrof(node_t* expr) ++{ ++node_t*n=sial_newnode(); ++adrof *a=sial_alloc(sizeof(adrof)); ++ ++ a->expr=expr; ++ n->exe=(xfct_t)sial_exeadrof; ++ n->free=(ffct_t)sial_freeadrof; ++ n->data=a; ++ sial_setpos(&a->pos); ++ return n; ++} ++ ++static int ++sial_reftobase(value_t *v) ++{ ++int idx= v->type.idx; ++ ++ if(v->type.type==V_REF) { ++ ++ if(sial_defbsize()==4) ++ v->type.idx=B_UL; ++ else ++ v->type.idx=B_ULL; ++ } ++ return idx; ++} ++ ++static value_t* ++sial_docomp(int op, value_t *v1, value_t *v2) ++{ ++ ++ /* if one parameter is string then both must be */ ++ if(v1->type.type == V_STRING || v2->type.type == V_STRING) { ++ ++ if(v1->type.type != V_STRING || v2->type.type != V_STRING) { ++ ++ sial_error("Invalid condition arguments"); ++ } ++ else { ++ ++ switch(op) { ++ ++ case EQ: { /* expr == expr */ ++ ++ return sial_makebtype(!strcmp(v1->v.data, v2->v.data)); ++ ++ } ++ case GT: case GE: { /* expr > expr */ ++ ++ return sial_makebtype(strcmp(v1->v.data, v2->v.data) > 0); ++ ++ } ++ case LE: case LT: { /* expr <= expr */ ++ ++ return sial_makebtype(strcmp(v1->v.data, v2->v.data) < 0); ++ ++ } ++ case NE: { /* expr != expr */ ++ ++ return sial_makebtype(strcmp(v1->v.data, v2->v.data)); ++ ++ } ++ default: { ++ ++ sial_error("Oops conditional unknown 1"); ++ ++ } ++ } ++ } ++ ++ } ++ else { ++ ++ int idx1, idx2; ++ value_t *v=sial_newval(); ++ ++ /* make sure pointers are forced to proper basetype ++ before calling sial_baseop()*/ ++ idx1=sial_reftobase(v1); ++ idx2=sial_reftobase(v2); ++ ++ ++ switch(op) { ++ ++ case EQ: ++ case GT: ++ case GE: ++ case LE: ++ case LT: ++ case NE: ++ sial_baseop(op, v1, v2, v); ++ break; ++ default: { ++ ++ sial_error("Oops conditional unknown 2"); ++ ++ } ++ } ++ v1->type.idx=idx1; ++ v2->type.idx=idx2; ++ return v; ++ } ++ return 0; ++} ++ ++static value_t * ++sial_exeop(oper *o) ++{ ++value_t *v=0, *v1=0, *v2=0, *v3=0, *v4=0; ++int top; ++srcpos_t p; ++ ++ sial_curpos(&o->pos, &p); ++ ++ /* if ME (op on myself) operator, translate to normal operator ++ we will re-assign onto self when done */ ++ ++ top=getop(o->op); ++ ++ if(top == ASSIGN) { ++ ++ goto doop; ++ ++ } else if(top == IN) { ++ ++ /* the val in array[] test is valid for anything but struct/union */ ++ v=sial_makebtype((ull)sial_lookuparray(P1,P2)); ++ ++ } ++ else if(is_cond(top)) { ++ ++ /* the operands are eithr BASE (integer) or REF (pointer) */ ++ /* all conditional operators accept a mixture of pointers and integer */ ++ /* set the return as a basetype even if bool */ ++ ++ switch(top) { ++ ++ case CEXPR: { /* conditional expression expr ? : stmt : stmt */ ++ ++ if(sial_bool(V1)) { ++ ++ v=sial_cloneval(V2); ++ ++ } else { ++ ++ v=sial_cloneval(V3); ++ ++ } ++ ++ } ++ break; ++ case BOR: { /* a || b */ ++ ++ v=sial_makebtype((ull)(sial_bool(V1) || sial_bool(V2))); ++ ++ } ++ break; ++ case BAND: { /* a && b */ ++ ++ v=sial_makebtype((ull)(sial_bool(V1) && sial_bool(V2))); ++ ++ } ++ break; ++ case NOT: { /* ! expr */ ++ ++ v=sial_makebtype((ull)(! sial_bool(V1))); ++ ++ } ++ break; ++ default: { ++ ++ v=sial_docomp(top, V1, V2); ++ ++ } ++ } ++ ++ } else if(anyop(V_STRING)) { ++ ++ if(top == ADD) ++ { ++ char *buf; ++ ++ if(V1->type.type != V_STRING || V2->type.type != V_STRING) { ++ ++ sial_rerror(&P1->pos, "String concatenation needs two strings!"); ++ ++ } ++ buf=sial_alloc(strlen(S1)+strlen(S2)+1); ++ strcpy(buf, S1); ++ strcat(buf, S2); ++ v=sial_makestr(buf); ++ sial_free(buf); ++ } ++ else { ++ ++ sial_rerror(&P1->pos, "Invalid string operator"); ++ ++ } ++ } ++ /* arithmetic operator */ ++ else if(anyop(V_REF)) { ++ ++ int size; ++ value_t *vt; ++ ++ /* make sure we have the base type second */ ++ if(V1->type.type != V_REF) { vt=V1; v1=V2; v2=vt; } ++ ++ ++ if(V1->type.type == V_BASE) { ++inval: ++ sial_error("Invalid operand on pointer operation"); ++ } ++ ++ /* get the size of whas we reference */ ++ size=V1->type.size; ++ ++ switch(top) { ++ case ADD: { /* expr + expr */ ++ /* adding two pointers ? */ ++ if(V2->type.type == V_REF) goto inval; ++ ++ V1; ++ sial_transfer(v=sial_newval(), v1, ++ unival(v1) + L2 * size); ++ } ++ break; ++ case SUB: { /* expr - expr */ ++ /* different results if mixed types. ++ if both are pointers then result is a V_BASE */ ++ if(V2->type.type == V_REF) ++ v=sial_makebtype(L1 - L2); ++ ++ else { ++ V1; ++ sial_transfer(v=sial_newval(), v1, ++ unival(v1) - L2 * size); ++ } ++ } ++ break; ++ case PREDECR: { /* pre is easy */ ++ V1; ++ sial_transfer(v=sial_newval(), v1, ++ unival(v1) - size); ++ sial_setval(v1, v); ++ } ++ break; ++ case PREINCR: { ++ V1; ++ sial_transfer(v=sial_newval(), v1, ++ unival(v1) + size); ++ sial_setval(v1, v); ++ } ++ break; ++ case POSTINCR: { ++ V1; ++ sial_transfer(v=sial_newval(), v1, ++ unival(v1) + size); ++ sial_setval(v1, v); ++ sial_transfer(v, v1, unival(v1)); ++ } ++ break; ++ case POSTDECR: { ++ V1; ++ sial_transfer(v=sial_newval(), v1, ++ unival(v1) - size); ++ sial_setval(v1, v); ++ sial_transfer(v, v1, unival(v1)); ++ } ++ break; ++ default: ++ sial_error("Invalid operation on pointer [%d]",top); ++ } ++ } ++ else { ++ ++ /* both operands are V_BASE */ ++ switch(top) { ++ ++ /* for mod and div, we check for divide by zero */ ++ case MOD: case DIV: ++ if(!L2) { ++ sial_rerror(&P1->pos, "Mod by zero"); ++ } ++ case ADD: case SUB: case MUL: case XOR: ++ case OR: case AND: case SHL: case SHR: ++ { ++ sial_baseop(top, V1, V2, v=sial_newval()); ++ } ++ break; ++ case UMINUS: { ++ ++ value_t *v0=sial_newval(); ++ sial_defbtype(v0, (ull)0); ++ /* keep original type of v1 */ ++ v=sial_newval(); ++ sial_duptype(&v0->type, &V1->type); ++ sial_duptype(&v->type, &V1->type); ++ sial_baseop(SUB, v0, V1, v); ++ sial_freeval(v0); ++ /* must make result signed */ ++ sial_mkvsigned(v); ++ } ++ break; ++ case FLIP: { ++ ++ value_t *v0=sial_newval(); ++ sial_defbtype(v0, (ull)0xffffffffffffffffll); ++ /* keep original type of v1 */ ++ sial_duptype(&v0->type, &V1->type); ++ sial_baseop(XOR, v0, V1, v=sial_newval()); ++ sial_freeval(v0); ++ } ++ break; ++ case PREDECR: { /* pre is easy */ ++ V1; ++ sial_transfer(v=sial_newval(), v1, ++ unival(v1) - 1); ++ sial_setval(v1, v); ++ } ++ break; ++ case PREINCR: { ++ V1; ++ sial_transfer(v=sial_newval(), v1, ++ unival(v1) + 1); ++ sial_setval(v1, v); ++ } ++ break; ++ case POSTINCR: { ++ V1; ++ sial_transfer(v=sial_newval(), v1, ++ unival(v1) + 1); ++ sial_setval(v1, v); ++ sial_transfer(v, v1, unival(v1)); ++ } ++ break; ++ case POSTDECR: { ++ V1; ++ sial_transfer(v=sial_newval(), v1, ++ unival(v1) - 1); ++ sial_setval(v1, v); ++ sial_transfer(v, v1, unival(v1)); ++ } ++ break; ++ default: sial_rerror(&P1->pos, "Oops ops ! [%d]", top); ++ } ++ } ++doop: ++ /* need to assign the value_t back to P1 */ ++ if(top != o->op || top==ASSIGN) { ++ ++ /* in the case the Lvalue_t is a variable , bypass execution and set ini */ ++ if(P1->exe == sial_exevar) { ++ ++ char *name=NODE_NAME(P1); ++ var_t*va=sial_getvarbyname(name, 0, 0); ++ value_t *vp; ++ ++ sial_free(name); ++ ++ if(top != o->op) vp=v; ++ else vp=V2; ++ ++ sial_chkandconvert(va->v, vp); ++ ++ sial_freeval(v); ++ v=sial_cloneval(va->v); ++ va->ini=1; ++ ++ } else { ++ ++ if(!(V1->set)) { ++ ++ sial_rerror(&P1->pos, "Not Lvalue_t on assignment"); ++ ++ } ++ else { ++ ++ /* if it's a Me-op then v is already set */ ++ V1; ++ if(top != o->op) { ++ sial_setval(v1, v); ++ } else { ++ sial_setval(v1, V2); ++ v=sial_cloneval(V2); ++ } ++ ++ } ++ } ++ /* the result of a assignment if not an Lvalue_t */ ++ v->set=0; ++ } ++ sial_freeval(v1); ++ sial_freeval(v2); ++ sial_freeval(v3); ++ sial_freeval(v4); ++ sial_setpos(&p); ++ return v; ++} ++ ++void ++sial_freeop(oper *o) ++{ ++int i; ++ ++ for(i=0;inp;i++) NODE_FREE(o->parms[i]); ++ sial_free(o); ++} ++ ++node_t* ++sial_newop(int op, int nargs, ...) ++{ ++va_list ap; ++node_t*n=sial_newnode(); ++oper *o=sial_alloc(sizeof(oper)); ++int i; ++ ++ o->op=op; ++ o->np=nargs; ++ ++ sial_setpos(&o->pos); ++ ++ va_start(ap, nargs); ++ ++ for(i=0 ; iparms[i]=va_arg(ap, node_t*))) break;; ++ } ++ ++ n->exe=(xfct_t)sial_exeop; ++ n->free=(ffct_t)sial_freeop; ++ n->data=o; ++ ++ va_end(ap); ++ return n; ++} ++ ++/* mult is a special case since the parse always return a PTR token ++ for the '*' signed. The PTR token value_t is the number of '* found. ++*/ ++node_t* ++sial_newmult(node_t*n1, node_t*n2, int n) ++{ ++ if(n>1) { ++ ++ sial_error("Syntax error"); ++ } ++ return sial_newop(MUL, 2, n1, n2); ++} ++/* ++ This function is called when we want to set a value_t in live memory ++ using a pointer to it. ++*/ ++static void ++sial_setderef(value_t *v1, value_t *v2) ++{ ++ void *sial_adrval(value_t *); ++ sial_transval(v2->type.size, v1->type.size, v2, sial_issigned(v2->type.typattr)); ++ API_PUTMEM(v1->mem, sial_adrval(v2), v2->type.size); ++} ++ ++/* ++ Do a de-referencing from a pointer (ref) and put the result in v. ++*/ ++typedef struct { ++ int lev; ++ node_t*n; ++} ptrto; ++ ++void ++sial_do_deref(int n, value_t *v, value_t *ref) ++{ ++ull madr, new_madr; ++ ++ if(n > ref->type.ref) { ++ ++ sial_error("Too many levels of dereference"); ++ ++ }else { ++ ++ ++ if(sial_defbsize()==4) madr=(ull)ref->v.ul; ++ else madr=ref->v.ull; ++ ++ /* copy the target type to the returned value_t's type_t*/ ++ sial_duptype(&v->type, &ref->type); ++ ++ /* do a number of deferences according to PTR value_t */ ++ while(n--) { ++ ++ sial_popref(&v->type, 1); ++ ++ if(!v->type.ref) { ++ ++ /* make sure the pointer is pointing into the vmcore */ ++ if(is_ctype(v->type.type)) { ++ ++ v->v.data=sial_alloc(v->type.size); ++ sial_getmem(madr, v->v.data, v->type.size); ++ ++ } else { ++ ++ /* get the data from the system image */ ++ switch(TYPE_SIZE(&v->type)) { ++ ++ case 1: sial_getmem(madr, &v->v.uc, 1); ++ break; ++ case 2: sial_getmem(madr, &v->v.us, 2); ++ break; ++ case 4: sial_getmem(madr, &v->v.ul, 4); ++ break; ++ case 8: sial_getmem(madr, &v->v.ull, 8); ++ break; ++ ++ } ++ } ++ } ++ else { ++ ++ /* get the pointer at this address */ ++ if(sial_defbsize()==4) { ++ ++ sial_getmem(madr, &v->v.ul, 4); ++ new_madr=v->v.ul; ++ ++ } else { ++ ++ sial_getmem(madr, &v->v.ull, 8); ++ new_madr=v->v.ull; ++ } ++ } ++ ++ /* remember this address. For the '&' operator */ ++ v->mem=madr; ++ madr=new_madr; ++ } ++ } ++ ++ /* we can always assign to a reference */ ++ v->set=1; ++ v->setval=v; ++ v->setfct=sial_setderef; ++} ++ ++static value_t * ++sial_exepto(ptrto *pto) ++{ ++value_t *v=sial_newval(); ++int n=pto->lev; ++value_t *ref=NODE_EXE(pto->n); ++ ++ sial_do_deref(n, v, ref); ++ sial_freeval(ref); ++ return v; ++} ++ ++static void ++sial_freepto(ptrto *pto) ++{ ++ NODE_FREE(pto->n); ++ sial_free(pto); ++} ++ ++ ++/* same thing for the ptrto operator */ ++node_t* ++sial_newptrto(int lev, node_t*n) ++{ ++ptrto *pto=sial_alloc(sizeof(ptrto)); ++node_t*nn=sial_newnode(); ++ ++ pto->lev=lev; ++ pto->n=n; ++ nn->exe=(xfct_t)sial_exepto; ++ nn->free=(ffct_t)sial_freepto; ++ nn->data=pto; ++ return nn; ++} +--- crash/extensions/libsial/sial_member.c.orig 2008-01-17 15:17:21.000000000 -0500 ++++ crash/extensions/libsial/sial_member.c 2008-01-04 09:42:08.000000000 -0500 +@@ -0,0 +1,321 @@ ++/* ++ * Copyright 2001 Silicon Graphics, Inc. All rights reserved. ++ */ ++#include ++#include "sial.h" ++#include "sial.tab.h" ++ ++/* these function are used to access and set members in structs */ ++ ++/* define a member access */ ++typedef struct mem { ++ char *name; /* member name */ ++ int dir; /* direct/indirect access */ ++ node_t*expr; /* expression node_t*/ ++ stmember_t*stm; /* associated member information */ ++ char *local; /* local memory or ... */ ++ ull mem; /* ... system memory access */ ++ srcpos_t p; ++} mem; ++ ++void * ++sial_adrval(value_t *v) ++{ ++ switch(v->type.size) { ++ ++ case 1: return &v->v.uc; ++ case 2: return &v->v.us; ++ case 4: return &v->v.ul; ++ case 8: return &v->v.ull; ++ } ++ sial_error("Oops sial_adrval"); ++ return 0; ++} ++ ++/* some API secondary entry points */ ++void sial_member_soffset(member_t*m, int offset) { m->offset=offset; } ++void sial_member_ssize(member_t*m, int size) { m->size=size; } ++void sial_member_sfbit(member_t*m, int fbit) { m->fbit=fbit; } ++void sial_member_snbits(member_t*m, int nbits) { m->nbits=nbits; } ++void sial_member_sname(member_t*m, char *name) { m->name=sial_strdup(name); } ++ ++ ++void ++sial_setmem(mem *m, value_t *v) ++{ ++stmember_t*stm=m->stm; ++ ++ /* check type compatibility. Ctypes should point to the same stinfo...*/ ++ if(stm->type.type != v->type.type ++ /* pointer most point to the same type of object */ ++ || (v->type.type==V_REF && v->type.rtype != stm->type.rtype) ++ /* ctypes should point to the same stinfo */ ++ || (is_ctype(v->type.type) && v->type.idx != stm->type.idx)) { ++ ++ sial_error("Incompatible types for assignment"); ++ ++ } ++ ++ if(stm->m.nbits) { ++ ++ ull dvalue_t=0; ++ ++ if(v->type.type!=V_BASE) { ++ ++ sial_error("Invalid assignment to bit field"); ++ ++ } ++ ++ /* do the bit gymnastic */ ++ /* we need to create a ull that contain the current ++ bit of teh destination */ ++ if(m->local) { ++ ++ memmove(m->local+stm->m.offset, ((char*)(&dvalue_t))+8-stm->m.size, stm->m.size); ++ dvalue_t=set_bit_value_t(dvalue_t, v->v.ull, stm->m.nbits, stm->m.fbit); ++ memmove(((char*)(&dvalue_t))+8-stm->m.size, m->local+stm->m.offset, stm->m.size); ++ ++ } ++ ++ if(m->mem) { ++ ++ API_GETMEM(m->mem+stm->m.offset, ((char*)(&dvalue_t))+8-stm->m.size, stm->m.size); ++ dvalue_t=set_bit_value_t(dvalue_t, v->v.ull, stm->m.nbits, stm->m.fbit); ++ API_PUTMEM(m->mem+stm->m.offset, ((char*)(&dvalue_t))+8-stm->m.size, stm->m.size); ++ ++ } ++ ++ ++ } else { ++ ++ /* move the data */ ++ if(is_ctype(v->type.type)) { ++ ++ if(m->local) { ++ ++ memmove(m->local+stm->m.offset, v->v.data, stm->m.size); ++ ++ } ++ if(m->mem) { ++ ++ API_PUTMEM(m->mem+stm->m.offset, v->v.data, stm->m.size); ++ } ++ ++ } else { ++ ++ sial_transval(v->type.size, stm->m.size, v, sial_issigned(v->type.typattr)); ++ ++ if(m->local) { ++ ++ memmove(m->local+stm->m.offset, sial_adrval(v), stm->m.size); ++ ++ } ++ ++ if(m->mem) { ++ ++ API_PUTMEM(m->mem+stm->m.offset, sial_adrval(v), stm->m.size); ++ } ++ } ++ } ++} ++ ++#define vdata(p, t) ((t*)(p->v.data)) ++ ++void ++sial_exememlocal(value_t *vp, stmember_t* stm, value_t *v) ++{ ++ /* expression should be a ctype_t*/ ++ if(!is_ctype(vp->type.type)) { ++ ++ sial_error("Invalid type for '.' expression"); ++ } ++ /* get that value_t from the application memory */ ++ if(is_ctype(stm->type.type) && !stm->type.idxlst) { ++ ++ void *data=sial_alloc(stm->m.size); ++ ++ memmove(data, vdata(vp, char)+stm->m.offset, stm->m.size); ++ if(vp->mem) v->mem=vp->mem+stm->m.offset; ++ v->v.data=data; ++ ++ } ++ /* bit field gymnastic */ ++ else if(stm->m.nbits) { ++ ++ ull value=0; ++ ++ memmove(vdata(vp, char)+stm->m.offset, ((char*)&value)+(sizeof(value)-stm->m.size), stm->m.size); ++ get_bit_value(value, stm->m.nbits, stm->m.fbit, stm->m.size, v); ++ ++ } ++ /* check if this is an array, if so then create a reference to it */ ++ else if(stm->type.idxlst) { ++ ++ ull mempos=vp->mem+stm->m.offset; ++ if(sial_defbsize()==8) v->v.ull=mempos; ++ else v->v.ul=mempos; ++ v->mem=mempos; ++ ++ } else { ++ ++ switch(TYPE_SIZE(&stm->type)) { ++ case 1: ++ memmove(&v->v.uc, vdata(vp, char)+stm->m.offset, 1); ++ break; ++ case 2: ++ memmove(&v->v.us, vdata(vp, char)+stm->m.offset, 2); ++ break; ++ case 4: ++ memmove(&v->v.ul, vdata(vp, char)+stm->m.offset, 4); ++ break; ++ case 8: ++ memmove(&v->v.ull, vdata(vp, char)+stm->m.offset, 8); ++ break; ++ default: ++ sial_error("Oops exemem2[%d]", TYPE_SIZE(&stm->type)); ++ break; ++ } ++ if(vp->mem) v->mem=vp->mem+stm->m.offset; ++ } ++} ++ ++value_t * ++sial_exemem(mem *m) ++{ ++value_t *v=sial_newval(); ++value_t *vp=NODE_EXE(m->expr); ++stmember_t*stm; ++srcpos_t p; ++ ++ sial_curpos(&m->p, &p); ++ ++ if(vp->type.type == V_REF) { ++ ++ if(vp->type.ref > 1) { ++ ++ sial_error("Too many levels of indirection for access to [%s]", m->name); ++ ++ } ++ } ++ ++ /* get the member information and attach it */ ++ stm=m->stm=(stmember_t*)sial_member(m->name, &vp->type); ++ if(!stm) { ++ ++ sial_freeval(v); ++ sial_freeval(vp); ++ sial_error("Invalid member name specified : %s", m->name); ++ ++ } ++ ++ /* get a copy of the type of thise member and put it in v */ ++ sial_duptype(&v->type, &stm->type); ++ ++ /* indirect i.e. (struct*)->member *most* be relative to the ++ system image. This is a restriction of this language */ ++ if(m->dir==INDIRECT) { ++ ++ ull mempos; ++ ++ if(vp->type.type != V_REF || !is_ctype(vp->type.rtype)) { ++ ++ sial_error("Invalid type for '->' expression"); ++ } ++ ++ m->local=0; ++ m->mem=sial_defbsize()==8?vp->v.ull:vp->v.ul; ++ mempos=m->mem+stm->m.offset; ++ ++ /* get that value_t from the system image */ ++ if(is_ctype(v->type.type) && !stm->type.idxlst) { ++ ++ v->v.data=sial_alloc(stm->m.size); ++ API_GETMEM(mempos, v->v.data, stm->m.size); ++ v->mem=mempos; ++ ++ } ++ /* bit field gymnastic */ ++ else if(stm->m.nbits) { ++ ++ ull value=0; ++ ++ API_GETMEM(m->mem+stm->m.offset, &value, stm->m.size); ++ get_bit_value(value, stm->m.nbits, stm->m.fbit, stm->m.size, v); ++ /* no mempos for bit fields ... */ ++ ++ } ++ /* check if this is an array, if so then create a reference to it */ ++ else if(stm->type.idxlst) { ++ ++ if(sial_defbsize()==8) v->v.ull=mempos; ++ else v->v.ul=mempos; ++ v->mem=mempos; ++ ++ } else { ++ ++ v->mem=mempos; ++ ++ switch(TYPE_SIZE(&stm->type)) { ++ case 1: ++ API_GETMEM(mempos, &v->v.uc, 1); ++ break; ++ case 2: ++ API_GETMEM(mempos, &v->v.us, 2); ++ break; ++ case 4: ++ API_GETMEM(mempos, &v->v.ul, 4); ++ break; ++ case 8: ++ API_GETMEM(mempos, &v->v.ull, 8); ++ break; ++ default: ++ sial_error("Oops exemem[%d]", TYPE_SIZE(&stm->type)); ++ break; ++ } ++ ++ } ++ } ++ /* direct i.e. (struct).member *most* be in referance to a local ++ structure. */ ++ else { ++ ++ m->mem=vp->mem; ++ m->local=vp->v.data; ++ ++ /* extract the value from a local copy */ ++ sial_exememlocal(vp, stm, v); ++ } ++ sial_curpos(&p, 0); ++ sial_freeval(vp); ++ v->setfct=(setfct_t)sial_setmem; ++ v->setval=(value_t*)m; ++ v->set=1; ++ return v; ++} ++ ++void ++sial_freemem(mem *m) ++{ ++ NODE_FREE(m->expr); ++ sial_free(m->name); ++ sial_free(m); ++} ++ ++node_t* ++sial_newmem(int dir, node_t*expr, node_t*name) ++{ ++char *nstr=NODE_NAME(name); ++node_t*n=sial_newnode(); ++mem *m=sial_alloc(sizeof(mem)); ++ ++ /* dicard nam node_t*/ ++ NODE_FREE(name); ++ m->name=nstr; ++ m->dir=dir; ++ m->expr=expr; ++ sial_setpos(&m->p); ++ n->data=m; ++ n->exe=(xfct_t)sial_exemem; ++ n->free=(ffct_t)sial_freemem; ++ return n; ++} +--- crash/extensions/libsial/sialpp-lsed.orig 2008-01-17 15:17:21.000000000 -0500 ++++ crash/extensions/libsial/sialpp-lsed 2008-01-04 09:42:08.000000000 -0500 +@@ -0,0 +1,32 @@ ++s/yyback/sialppback/g ++s/yybgin/sialppbgin/g ++s/yycrank/sialppcrank/g ++s/yyerror/sialpperror/g ++s/yyestate/sialppestate/g ++s/yyextra/sialppextra/g ++s/yyfnd/sialppfnd/g ++s/yyin/sialppin/g ++s/yyinput/sialppinput/g ++s/yyleng/sialppleng/g ++s/yylex/sialpplex/g ++s/yylineno/sialpplineno/g ++s/yylook/sialpplook/g ++s/yylsp/sialpplsp/g ++s/yylstate/sialpplstate/g ++s/yylval/sialpplval/g ++s/yymatch/sialppmatch/g ++s/yymorfg/sialppmorfg/g ++s/yyolsp/sialppolsp/g ++s/yyout/sialppout/g ++s/yyoutput/sialppoutput/g ++s/yyprevious/sialppprevious/g ++s/yysbuf/sialppsbuf/g ++s/yysptr/sialppsptr/g ++s/yysvec/sialppsvec/g ++s/yytchar/sialpptchar/g ++s/yytext/sialpptext/g ++s/yytop/sialpptop/g ++s/yyunput/sialppunput/g ++s/yyvstop/sialppvstop/g ++s/yywrap/sialppwrap/g ++s/yydebug/sialdebug/g +--- crash/extensions/libsial/sial-lsed.orig 2008-01-17 15:17:21.000000000 -0500 ++++ crash/extensions/libsial/sial-lsed 2008-01-04 09:42:08.000000000 -0500 +@@ -0,0 +1,32 @@ ++s/yyback/sialback/g ++s/yybgin/sialbgin/g ++s/yycrank/sialcrank/g ++s/yyerror/sialerror/g ++s/yyestate/sialestate/g ++s/yyextra/sialextra/g ++s/yyfnd/sialfnd/g ++s/yyin/sialin/g ++s/yyinput/sialinput/g ++s/yyleng/sialleng/g ++s/yylex/siallex/g ++s/yylineno/siallineno/g ++s/yylook/siallook/g ++s/yylsp/siallsp/g ++s/yylstate/siallstate/g ++s/yylval/siallval/g ++s/yymatch/sialmatch/g ++s/yymorfg/sialmorfg/g ++s/yyolsp/sialolsp/g ++s/yyout/sialout/g ++s/yyoutput/sialoutput/g ++s/yyprevious/sialprevious/g ++s/yysbuf/sialsbuf/g ++s/yysptr/sialsptr/g ++s/yysvec/sialsvec/g ++s/yytchar/sialtchar/g ++s/yytext/sialtext/g ++s/yytop/sialtop/g ++s/yyunput/sialunput/g ++s/yyvstop/sialvstop/g ++s/yywrap/sialwrap/g ++s/yydebug/sialdebug/g +--- crash/extensions/libsial/sial_type.c.orig 2008-01-17 15:17:21.000000000 -0500 ++++ crash/extensions/libsial/sial_type.c 2008-01-04 09:42:08.000000000 -0500 +@@ -0,0 +1,1172 @@ ++/* ++ * Copyright 2001 Silicon Graphics, Inc. All rights reserved. ++ */ ++#include "sial.h" ++#include "sial.tab.h" ++#include ++#include ++/* ++ This file contains functions that deals with type and type ++ casting operators. ++*/ ++#define B_SIZE_MASK 0x0007f0 ++#define B_SIGN_MASK 0x00f000 ++#define B_STOR_MASK 0x1f0000 ++#define B_CHAR 0x000010 ++#define B_SHORT 0x000020 ++#define B_INT 0x000040 ++#define B_LONG 0x000080 ++#define B_LONGLONG 0x000100 ++#define B_FLOAT 0x000200 ++#define B_CONST 0x000400 ++#define B_SIGNED 0x001000 ++#define B_UNSIGNED 0x002000 ++#define B_STATIC 0x010000 ++#define B_REGISTER 0x020000 ++#define B_VOLATILE 0x040000 ++#define B_TYPEDEF 0x080000 ++#define B_EXTERN 0x100000 ++#define B_VOID 0x800000 ++#define B_USPEC 0x000001 /* user specified sign */ ++#define B_ENUM 0x000002 /* btype is from a enum */ ++ ++#define is_size(i) ((i)&B_SIZE_MASK) ++#define is_sign(i) ((i)&B_SIGN_MASK) ++#define is_stor(i) ((i)&B_STOR_MASK) ++#define issigned(v) (v->type.typattr & B_SIGNED) ++#define vsize(v) (is_size(v->type.typattr)) ++ ++static struct { ++ int btype; ++ int key; ++ char *name; ++} blut[] = { ++ { B_VOID, VOID , "void"}, ++ { B_TYPEDEF, TDEF , "tdef"}, ++ { B_EXTERN, EXTERN , "extern"}, ++ { B_STATIC, STATIC , "static"}, ++ { B_VOLATILE, VOLATILE , "volatile"}, ++ { B_CONST, CONST , "const"}, ++ { B_REGISTER, REGISTER , "register"}, ++ { B_UNSIGNED, UNSIGNED , "unsigned"}, ++ { B_SIGNED, SIGNED , "signed"}, ++ { B_CHAR, CHAR, "char" }, ++ { B_SHORT, SHORT , "short"}, ++ { B_INT, INT , "int"}, ++ { B_LONG, LONG , "long"}, ++ { B_LONGLONG, DOUBLE , "long long"}, ++ { B_FLOAT, FLOAT , "float"}, ++}; ++ ++type_t * ++sial_newtype() ++{ ++ return sial_calloc(sizeof(type_t)); ++} ++ ++void ++sial_freetype(type_t* t) ++{ ++ if(t->idxlst) sial_free(t->idxlst); ++ sial_free(t); ++} ++ ++/* this function is called by the parser to merge the ++ storage information (being hold in a basetype) into the real type_t*/ ++type_t* ++sial_addstorage(type_t*t1, type_t*t2) ++{ ++ t1->typattr |= is_stor(t2->typattr); ++ sial_freetype(t2); ++ return t1; ++} ++ ++char * ++sial_ctypename(int type) ++{ ++ switch(type) { ++ ++ case V_TYPEDEF: return "typedef"; ++ case V_STRUCT: return "struct"; ++ case V_UNION: return "union"; ++ case V_ENUM: return "enum"; ++ default: return "???"; ++ } ++} ++ ++int sial_isstatic(int atr) { return atr & B_STATIC; } ++int sial_isenum(int atr) { return atr & B_ENUM; } ++int sial_isconst(int atr) { return atr & B_CONST; } ++int sial_issigned(int atr) { return atr & B_SIGNED; } ++int sial_istdef(int atr) { return atr & B_TYPEDEF; } ++int sial_isxtern(int atr) { return atr & B_EXTERN; } ++int sial_isvoid(int atr) { return atr & B_VOID; } ++int sial_isstor(int atr) { return is_stor(atr); } ++int sial_is_struct(int ctype) { return ctype==V_STRUCT; } ++int sial_is_enum(int ctype) { return ctype==V_ENUM; } ++int sial_is_union(int ctype) { return ctype==V_UNION; } ++int sial_is_typedef(int ctype) { return ctype==V_TYPEDEF; } ++ ++/* type seting */ ++int sial_type_gettype(type_t*t) { return t->type; } ++void sial_type_settype(type_t*t, int type) { t->type=type; } ++void sial_type_setsize(type_t*t, int size) { t->size=size; } ++int sial_type_getsize(type_t*t) { return t->size; } ++void sial_type_setidx(type_t*t, ull idx) { t->idx=idx; } ++ull sial_type_getidx(type_t*t) { return t->idx; } ++void sial_type_setidxlst(type_t*t, int* idxlst) { t->idxlst=idxlst; } ++void sial_type_setref(type_t*t, int ref, int type) { t->ref=ref; t->rtype=type; } ++void sial_type_setfct(type_t*t, int val) { t->fct=val; } ++void sial_type_mkunion(type_t*t) { t->type=V_UNION; } ++void sial_type_mkenum(type_t*t) { t->type=V_ENUM; } ++void sial_type_mkstruct(type_t*t) { t->type=V_STRUCT; } ++void sial_type_mktypedef(type_t*t) { t->type=V_TYPEDEF; } ++ ++static int defbtype=B_LONG|B_SIGNED; ++static int defbidx=B_SL; ++static int defbsize=4; ++static int defbsign=B_SIGNED; ++int sial_defbsize() { return defbsize; } ++ ++char * ++sial_getbtypename(int typattr) ++{ ++int i; ++char *name=sial_alloc(200); ++ ++ name[0]='\0'; ++ for(i=0;itype.type==V_REF) { ++ ++ return TYPE_SIZE(&v->type)==4 ? (ull)(v->v.ul) : v->v.ull; ++ ++ } else switch(v->type.idx) { ++ ++ case B_SC: return (ull)(v->v.sc); ++ case B_UC: return (ull)(v->v.uc); ++ case B_SS: return (ull)(v->v.ss); ++ case B_US: return (ull)(v->v.us); ++ case B_SL: return (ull)(v->v.sl); ++ case B_UL: return (ull)(v->v.ul); ++ case B_SLL: return (ull)(v->v.sll); ++ case B_ULL: return (ull)(v->v.ull); ++ default: sial_error("Oops univ()[%d]", TYPE_SIZE(&v->type)); break; ++ } ++ return 0; ++} ++ ++void ++sial_duptype(type_t*t, type_t*ts) ++{ ++ memmove(t, ts, sizeof(type_t)); ++ if(ts->idxlst) { ++ ++ t->idxlst=sial_calloc(sizeof(int)*(MAXIDX+1)); ++ memmove(t->idxlst, ts->idxlst, sizeof(int)*(MAXIDX+1)); ++ } ++} ++ ++#define asarray(v) (v->arr!=v->arr->next) ++ ++/* ++ Duplicate a value_t. ++ On duplication we do verification of the value_ts involved. ++ this is to make it possible to pass array to subfunctions ++ and to override specific value_ts that also have arrays attached ++ to them. ++*/ ++void ++sial_dupval(value_t *v, value_t *vs) ++{ ++int isvoid=(v->type.typattr & B_VOID); ++ ++ /* if both have an attached array ... fail */ ++ if(asarray(v) && asarray(vs)) { ++ ++ sial_error("Can't override array"); ++ ++ } ++ /* when we are attaching a new array to the destination value_t ++ we need to add the destination reference count to the source */ ++ if(asarray(v)) { ++ ++ array_t*a=v->arr; ++ ++ /* preserve the array accross the freedata and memmove */ ++ v->arr=0; ++ sial_freedata(v); ++ ++ /* copy the new value_t over it */ ++ memmove(v, vs, sizeof(value_t)); ++ ++ /* and restore the array_t*/ ++ v->arr=a; ++ ++ } else { ++ ++ sial_refarray(vs, 1); ++ sial_freedata(v); ++ memmove(v, vs, sizeof(value_t)); ++ } ++ ++ sial_duptype(&v->type, &vs->type); ++ sial_dupdata(v, vs); ++ ++ /* conserve the void atribute across asignements */ ++ v->type.typattr |= isvoid; ++} ++ ++/* ++ clone a value_t. ++*/ ++value_t * ++sial_cloneval(value_t *v) ++{ ++value_t *nv=sial_alloc(sizeof(value_t)); ++ ++ memmove(nv, v, sizeof(value_t)); ++ sial_refarray(v, 1); ++ sial_dupdata(nv, v); ++ return nv; ++} ++ ++static signed long long ++twoscomp(ull val, int nbits) ++{ ++ return val | (0xffffffffffffffffll << nbits); ++ // XXX return (val-1)^0xffffffffll; ++} ++ ++/* ++ Get a bit field value_t from system image or live memory. ++ We do all operations with a ull untill the end. ++ Then we check for the basetype size and sign and convert ++ apropriatly. ++*/ ++void ++get_bit_value(ull val, int nbits, int boff, int size, value_t *v) ++{ ++ ull mask; ++ int dosign=0; ++ int vnbits=size*8; ++ ++ ++ val = API_GET_UINT64(&val); ++ ++ /* first get the value_t */ ++ if (nbits >= 32) { ++ int upper_bits = nbits - 32; ++ mask = ((1 << upper_bits) - 1); ++ mask = (mask << 32) | 0xffffffff; ++ } ++ else { ++ mask = ((1 << nbits) - 1); ++ } ++ val = val >> boff; ++ val &= mask; ++ ++ if(issigned(v)) { ++ ++ /* get the sign bit */ ++ if(val >> (nbits-1)) dosign=1; ++ ++ } ++ switch(vsize(v)) { ++ ++ case B_CHAR: { ++ if(dosign) { ++ v->v.sc=(signed char)twoscomp(val, nbits); ++ } ++ else { ++ v->v.uc=val; ++ } ++ } ++ break; ++ case B_SHORT: { ++ if(dosign) { ++ v->v.ss=(signed short)twoscomp(val, nbits); ++ } ++ else { ++ v->v.us=val; ++ } ++ } ++ break; ++ case B_LONG: ++ ++ if(sial_defbsize()==8) goto ll; ++ ++ case B_INT: { ++ if(dosign) { ++ v->v.sl=(signed long)twoscomp(val, nbits); ++ } ++ else { ++ v->v.ul=val; ++ } ++ } ++ break; ++ case B_LONGLONG: { ++ll: ++ if(dosign) { ++ v->v.sll=(signed long long)twoscomp(val, nbits); ++ } ++ else { ++ v->v.ull=val; ++ } ++ } ++ break; ++ default: ++ sial_error("Oops get_bit_value_t..."); ++ break; ++ } ++ ++} ++/* ++ Set a bit field value_t. dvalue_t is the destination value_t as read ++ from either the system image of live memory. ++ */ ++ull ++set_bit_value_t(ull dvalue, ull value, int nbits, int boff) ++{ ++ ull mask; ++ ++ if (nbits >= 32) { ++ int upper_bits = nbits - 32; ++ mask = ((1 << upper_bits) - 1); ++ mask = (mask << 32) | 0xffffffff; ++ } ++ else { ++ mask = ((1 << nbits) - 1); ++ } ++ /* strip out the current value_t */ ++ dvalue &= ~(mask << boff); ++ ++ /* put in the new one */ ++ dvalue |= (value << boff); ++ return dvalue; ++} ++ ++/* this function is called when we have determined the systems ++ default int size (64 bit vs 32 bits) */ ++void ++sial_setdefbtype(int size, int sign) ++{ ++int idx=B_INT; ++ ++ switch(size) { ++ ++ case 1: defbtype=B_CHAR; idx=B_UC; break; ++ case 2: defbtype=B_SHORT;idx=B_US; break; ++ case 4: defbtype=B_INT; idx=B_UL; break; ++ case 8: defbtype=B_LONGLONG; idx=B_ULL; break; ++ ++ } ++ if(sign) defbsign = B_SIGNED; ++ else defbsign = B_UNSIGNED; ++ defbtype |= defbsign; ++ defbsize=size; ++ defbidx=idx; ++} ++ ++static int ++getbtype(int token) ++{ ++int i; ++ ++ for(i=0;itype.type=V_BASE; ++ v->setfct=sial_setfct; ++ v->type.idx=idx; ++ v->mem=0; ++ switch(idx) { ++ ++ case B_UC: case B_SC: ++ v->type.size=1; ++ v->v.uc=i; ++ break; ++ case B_US: case B_SS: ++ v->type.size=2; ++ v->v.us=i; ++ break; ++ case B_UL: case B_SL: ++ v->type.size=4; ++ v->v.ul=i; ++ break; ++ case B_ULL: case B_SLL: ++ v->type.size=8; ++ v->v.ull=i; ++ break; ++ default: sial_error("Oops defbtypesize!"); break; ++ } ++ return v; ++} ++ ++value_t * ++sial_defbtype(value_t *v, ull i) ++{ ++ v->type.typattr=defbtype; ++ return sial_defbtypesize(v, i, defbidx); ++} ++ ++value_t * ++sial_makebtype(ull i) ++{ ++value_t *v=sial_calloc(sizeof(value_t)); ++ ++ sial_defbtype(v, i); ++ sial_setarray(&v->arr); ++ TAG(v); ++ return v; ++} ++ ++value_t * ++sial_newval() ++{ ++value_t *v=sial_makebtype(0); ++ ++ return v; ++} ++ ++/* take the current basetypes and generate a uniq index */ ++static void ++settypidx(type_t*t) ++{ ++int v1, v2, v3, size; ++ ++ if(t->typattr & B_CHAR) { ++ size=1; ++ v1=B_SC; v2=B_UC; ++ v3=(defbsign==B_SIGNED?B_SC:B_UC); ++ } else if(t->typattr & B_SHORT) { ++ size=2; ++ v1=B_SS; v2=B_US; v3=B_SS; ++ } else if(t->typattr & B_LONG) { ++ if(sial_defbsize()==4) { ++ size=4; ++ v1=B_SL; v2=B_UL; v3=B_SL; ++ } else goto ll; ++ } else if(t->typattr & B_INT) { ++go: ++ size=4; ++ v1=B_SL; v2=B_UL; v3=B_SL; ++ } else if(t->typattr & B_LONGLONG) { ++ll: ++ size=8; ++ v1=B_SLL; v2=B_ULL; v3=B_SLL; ++ } ++ else goto go; ++ ++ if(t->typattr & B_SIGNED) t->idx=v1; ++ else if(t->typattr & B_UNSIGNED) t->idx=v2; ++ else t->idx=v3; ++ t->size=size; ++} ++ ++/* take the current basetypes and generate a uniq index */ ++int ++sial_idxtoattr(int idx) ++{ ++int i; ++static struct { ++ ++ int idx; ++ int attr; ++ ++} atoidx[] = { ++ ++ {B_SC, B_SIGNED | B_CHAR}, ++ {B_UC, B_UNSIGNED| B_CHAR}, ++ {B_SS, B_SIGNED | B_SHORT}, ++ {B_US, B_UNSIGNED| B_SHORT}, ++ {B_SL, B_SIGNED | B_LONG}, ++ {B_UL, B_UNSIGNED| B_LONG}, ++ {B_SLL, B_SIGNED | B_LONGLONG}, ++ {B_ULL, B_UNSIGNED| B_LONGLONG}, ++}; ++ ++ for(i=0; i < sizeof(atoidx)/sizeof(atoidx[0]); i++) { ++ ++ if(atoidx[i].idx==idx) return atoidx[i].attr; ++ } ++ sial_error("Oops sial_idxtoattr!"); ++ return 0; ++} ++ ++void ++sial_mkvsigned(value_t*v) ++{ ++ v->type.typattr &= ~B_SIGN_MASK; ++ v->type.typattr |= B_SIGNED; ++ settypidx(&v->type); ++} ++ ++/* if there's no sign set the default */ ++void ++sial_chksign(type_t*t) ++{ ++ if(sial_isvoid(t->typattr)) return; ++ if(!is_sign(t->typattr)) { ++ ++ /* char is compile time dependant */ ++ if(t->idx==B_SC || t->idx==B_UC) t->typattr |= defbsign; ++ /* all other sizes are signed by default */ ++ else t->typattr |= B_SIGNED; ++ } ++ settypidx(t); ++} ++ ++/* if ther's no size specification, make it an INT */ ++void ++sial_chksize(type_t*t) ++{ ++ if(!sial_isvoid(t->typattr) && !is_size(t->typattr)) sial_addbtype(t, INT); ++} ++ ++/* create a new base type element */ ++type_t* ++sial_newbtype(int token) ++{ ++int btype; ++type_t*t=sial_newtype(); ++ ++ if(!token) btype=defbtype; ++ else { ++ ++ btype=getbtype(token); ++ if(is_sign(btype)) btype |= B_USPEC; ++ } ++ t->type=V_BASE; ++ t->typattr=btype; ++ settypidx(t); ++ TAG(t); ++ return t; ++} ++ ++/* set the default sign on a type if user did'nt specify one and not int */ ++#define set_base_sign(a) if(!(base & (B_USPEC|B_INT))) base = (base ^ is_sign(base)) | a ++ ++/* ++ char short int long longlong ++char XXX XXX XXX XXX XXX ++short XXX XXX OOO XXX XXX ++int XXX OOO XXX OOO OOO ++long XXX XXX OOO OOO XXX ++longlong XXX XXX OOO XXX XXX ++ ++ the parser let's you specify any of the B_ type. It's here that we ++ have to check things out ++ ++*/ ++type_t* ++sial_addbtype(type_t*t, int newtok) ++{ ++int btype=getbtype(newtok); ++int base=t->typattr; ++ ++ /* size specification. Check for 'long long' any other ++ combinaison of size is invalid as is 'long long long' */ ++ if(is_size(btype)) { ++ ++ int ibase=base; ++ ++ switch(btype) { ++ ++ case B_LONG: { ++ ++ ++ if(!(base & (B_CHAR|B_SHORT))) { ++ ++ set_base_sign(B_UNSIGNED); ++ ++ if(base & B_LONG || sial_defbsize()==8) { ++ ++ ibase &= ~B_LONGLONG; ++ base |= B_LONGLONG; ++ base &= ~B_LONG; ++ ++ } else { ++ ++ base |= B_LONG; ++ } ++ } ++ break; ++ } ++ case B_INT: { ++ ++ /* ++ * This is a bit of a hack to circumvent the ++ * problem that "long int" or "long long int" ++ * is a valid statement in C. ++ */ ++ if(!(base & (B_INT|B_CHAR|B_LONG|B_LONGLONG))) { ++ ++ set_base_sign(B_SIGNED); ++ base |= B_INT; ++ } ++ if (base & (B_LONG|B_LONGLONG)) ++ ibase = 0; ++ break; ++ } ++ case B_SHORT: { ++ ++ if(!(base & (B_SHORT|B_CHAR|B_LONG|B_LONGLONG))) { ++ ++ base |= B_SHORT; ++ set_base_sign(B_UNSIGNED); ++ } ++ ++ } ++ case B_CHAR: { ++ ++ if(!(base & (B_CHAR|B_SHORT|B_INT|B_LONG|B_LONGLONG))) { ++ ++ base |= B_CHAR; ++ set_base_sign(defbsign); ++ } ++ ++ } ++ } ++ ++ if(ibase == base) { ++ ++ sial_warning("Invalid combinaison of sizes"); ++ ++ } ++ ++ } else if(is_sign(btype)) { ++ ++ if(base & B_USPEC) { ++ ++ if(is_sign(btype) == is_sign(base)) ++ ++ sial_warning("duplicate type specifier"); ++ ++ else ++ ++ sial_error("invalid combination of type specifiers"); ++ } ++ /* always keep last found signed specification */ ++ base ^= is_sign(base); ++ base |= btype; ++ base |= B_USPEC; ++ ++ } else if(is_stor(btype)) { ++ ++ if(is_stor(base)) { ++ ++ sial_warning("Suplemental storage class ignore"); ++ ++ } ++ else base |= btype; ++ } ++ t->typattr=base; ++ settypidx(t); ++ return t; ++} ++ ++/* this function gets called back from the API when the user need to parse ++ a type declaration. Like when a typedef dwarf returns a type string */ ++ ++void ++sial_pushref(type_t*t, int ref) ++{ ++ if(t->type==V_REF) { ++ ++ t->ref += ref; ++ ++ } else { ++ ++ t->ref=ref; ++ ++ if(ref) { ++ ++ t->rtype=t->type; ++ t->type=V_REF; ++ } ++ } ++} ++void ++sial_popref(type_t*t, int ref) ++{ ++ ++ if(!t->ref) return; ++ ++ t->ref-=ref; ++ ++ if(!t->ref) { ++ ++ t->type=t->rtype; ++ } ++} ++ ++typedef struct { ++ int battr; ++ char *str; ++} bstr; ++static bstr btypstr[] = { ++ {CHAR, "char"}, ++ {SHORT, "short"}, ++ {INT, "int"}, ++ {LONG, "long"}, ++ {DOUBLE, "double"}, ++ {SIGNED, "signed"}, ++ {UNSIGNED, "unsigned"}, ++ {STATIC, "static"}, ++ {REGISTER, "register"}, ++ {VOLATILE, "volatile"}, ++ {VOID, "void"}, ++}; ++int ++sial_parsetype(char *str, type_t*t, int ref) ++{ ++char *p; ++char *tok, *pend; ++int ctype=0, i, first, found; ++type_t*bt=0; ++ ++ /* if it's a simple unamed ctype return 0 */ ++ if(!strcmp(str, "struct")) { t->type=V_STRUCT; return 0; } ++ if(!strcmp(str, "enum")) { t->type=V_ENUM; return 0; } ++ if(!strcmp(str, "union")) { t->type=V_UNION; return 0; } ++ ++ p=sial_strdup(str); ++ ++ /* get he level of reference */ ++ for(pend=p+strlen(p)-1; pend>=p; pend--) { ++ ++ if(*pend==' ' || *pend == '\t') continue; ++ if(*pend == '*' ) ref ++; ++ else break; ++ ++ } ++ *++pend='\0'; ++ ++again: ++ tok=strtok(p," "); ++ if(!strcmp(tok, "struct")) { ++ ++ ctype=V_STRUCT; ++ ++ } else if(!strcmp(tok, "union")) { ++ ++ ctype=V_UNION; ++ ++ } else if(!strcmp(tok, "enum")) { ++ sial_free(p); ++ p=(char*)sial_alloc(strlen("unsigned int") + 1); ++ /* force enum type into unigned int type for now */ ++ strcpy(p, "unsigned int"); ++ goto again; ++ ++ } ++ if(ctype) { ++ ++ char *name=strtok(NULL, " \t"); ++ bt=sial_getctype(ctype, name, 1); ++ ++ /* we accept unknow struct reference if it's a ref to it */ ++ /* the user will probably cast it to something else anyway... */ ++ if(!bt) { ++ ++ if(ref) { ++ ++ bt=(type_t*)sial_getvoidstruct(ctype); ++ ++ } else { ++ ++ sial_error("Unknown Struct/Union/Enum %s", name); ++ ++ } ++ } ++ ++ sial_duptype(t, bt); ++ sial_freetype(bt); ++ sial_pushref(t, ref); ++ sial_free(p); ++ return 1; ++ } ++ ++ /* this must be a basetype_t*/ ++ first=1; ++ do { ++ found=0; ++ for(i=0;inext->v->type); ++ sial_freesvs(v); ++ return type; ++} ++ ++typedef struct cast { ++ ++ type_t*t; ++ node_t*n; ++ srcpos_t pos; ++ ++} cast; ++ ++/* make sure we do the proper casting */ ++void ++sial_transval(int s1, int s2, value_t *v, int issigned) ++{ ++vu_t u; ++ ++ if(s1==s2) return; ++ ++ if(issigned) { ++ ++ switch(s1) { ++ case 1: ++ switch(s2) { ++ case 2: ++ u.us=v->v.sc; ++ break; ++ case 4: ++ u.ul=v->v.sc; ++ break; ++ case 8: ++ u.ull=v->v.sc; ++ break; ++ } ++ break; ++ case 2: ++ switch(s2) { ++ case 1: ++ u.uc=v->v.ss; ++ break; ++ case 4: ++ u.ul=v->v.ss; ++ break; ++ case 8: ++ u.ull=v->v.ss; ++ break; ++ } ++ break; ++ case 4: ++ switch(s2) { ++ case 2: ++ u.us=v->v.sl; ++ break; ++ case 1: ++ u.uc=v->v.sl; ++ break; ++ case 8: ++ u.ull=v->v.sl; ++ break; ++ } ++ break; ++ case 8: ++ switch(s2) { ++ case 2: ++ u.us=v->v.sll; ++ break; ++ case 4: ++ u.ul=v->v.sll; ++ break; ++ case 1: ++ u.uc=v->v.sll; ++ break; ++ } ++ break; ++ } ++ ++ } else { ++ ++ switch(s1) { ++ case 1: ++ switch(s2) { ++ case 2: ++ u.us=v->v.uc; ++ break; ++ case 4: ++ u.ul=v->v.uc; ++ break; ++ case 8: ++ u.ull=v->v.uc; ++ break; ++ } ++ break; ++ case 2: ++ switch(s2) { ++ case 1: ++ u.uc=v->v.us; ++ break; ++ case 4: ++ u.ul=v->v.us; ++ break; ++ case 8: ++ u.ull=v->v.us; ++ break; ++ } ++ break; ++ case 4: ++ switch(s2) { ++ case 2: ++ u.us=v->v.ul; ++ break; ++ case 1: ++ u.uc=v->v.ul; ++ break; ++ case 8: ++ u.ull=v->v.ul; ++ break; ++ } ++ break; ++ case 8: ++ switch(s2) { ++ case 2: ++ u.us=v->v.ull; ++ break; ++ case 4: ++ u.ul=v->v.ull; ++ break; ++ case 1: ++ u.uc=v->v.ull; ++ break; ++ } ++ break; ++ } ++ } ++ memmove(&v->v, &u, sizeof(u)); ++ if(v->type.type!=V_REF) v->type.size=s2; ++} ++ ++value_t * ++sial_execast(cast *c) ++{ ++/* we execute the expression node_t*/ ++value_t *v=NODE_EXE(c->n); ++ ++ /* ... and validate the type cast */ ++ if(v->type.type != V_REF && v->type.type != V_BASE) { ++ ++ sial_rerror(&c->pos, "Invalid typecast"); ++ ++ } ++ else { ++ ++ int vsize=TYPE_SIZE(&v->type); ++ int issigned=sial_issigned(v->type.typattr); ++ ++ /* Now, just copy the cast type over the current type_t*/ ++ sial_duptype(&v->type, c->t); ++ ++ /* Take into account the size of the two objects */ ++ sial_transval(vsize, TYPE_SIZE(c->t), v, issigned); ++ } ++ return v; ++} ++ ++void ++sial_freecast(cast *c) ++{ ++ NODE_FREE(c->n); ++ sial_freetype(c->t); ++ sial_free(c); ++} ++ ++node_t* ++sial_typecast(type_t*type, node_t*expr) ++{ ++ if(type->type==V_STRING) { ++ ++ sial_error("Cannot cast to a 'string'"); ++ return 0; ++ ++ } else { ++ ++ node_t*n=sial_newnode(); ++ cast *c=sial_alloc(sizeof(cast)); ++ ++ c->t=type; ++ c->n=expr; ++ n->exe=(xfct_t)sial_execast; ++ n->free=(ffct_t)sial_freecast; ++ n->data=c; ++ sial_setpos(&c->pos); ++ return n; ++ } ++} ++ ++/* ++ Validate type conversions on function calls and assignments. ++*/ ++void ++sial_chkandconvert(value_t *vto, value_t *vfrm) ++{ ++type_t*tto=&vto->type; ++type_t*tfrm=&vfrm->type; ++ ++ if(tto->type == tfrm->type) { ++ ++ if(tto->type == V_BASE) { ++ ++ int attr=tto->typattr; ++ int idx=tto->idx; ++ ++ sial_transval(tfrm->size, tto->size, vfrm, sial_issigned(vfrm->type.typattr)); ++ sial_dupval(vto, vfrm); ++ tto->typattr=attr; ++ tto->idx=idx; ++ return; ++ ++ } else if(tto->type == V_REF) { ++ ++ if(sial_isvoid(tto->typattr) || sial_isvoid(tfrm->typattr)) goto dupit; ++ ++ if(tto->ref == tfrm->ref && tto->rtype == tfrm->rtype) { ++ ++ if(is_ctype(tto->rtype)) { ++ ++ if(tto->idx == tfrm->idx || sial_samectypename(tto->rtype, tto->idx, tfrm->idx)) ++ goto dupit; ++ ++ } else if(tto->size == tfrm->size) { ++ ++ int attr=tto->typattr; ++ sial_dupval(vto, vfrm); ++ tto->typattr=attr; ++ return; ++ } ++ } ++ } ++ /* Allow assignments between enums of the same type */ ++ else if(is_ctype(tto->type) || tto->type == V_ENUM) { ++ ++ /* same structure type_t*/ ++ if(tto->idx == tfrm->idx || sial_samectypename(tto->type, tto->idx, tfrm->idx)) ++ goto dupit; ++ } ++ else if(tto->type == V_STRING) goto dupit; ++ ++ } ++ else if((tto->type == V_ENUM && tfrm->type == V_BASE) || ++ (tto->type == V_BASE && tfrm->type == V_ENUM)) { ++ /* convert type from or to enum */ ++ int attr=tto->typattr; ++ int idx=tto->idx; ++ ++ sial_transval(tfrm->size, tto->size, vfrm, sial_issigned(vfrm->type.typattr)); ++ sial_dupval(vto, vfrm); ++ tto->typattr=attr; ++ tto->idx=idx; ++ return; ++ } ++ // support NULL assignment to pointer ++ else if(tto->type == V_REF && tfrm->type == V_BASE && !sial_getval(vfrm)) return; ++ sial_error("Invalid type conversion"); ++ ++dupit: ++ sial_dupval(vto, vfrm); ++} ++ +--- crash/extensions/libsial/sialpp.l.orig 2008-01-17 15:17:21.000000000 -0500 ++++ crash/extensions/libsial/sialpp.l 2008-01-04 09:42:08.000000000 -0500 +@@ -0,0 +1,85 @@ ++%{ ++/* ++ * Copyright 2001 Silicon Graphics, Inc. All rights reserved. ++ */ ++ ++#define YY_NO_UNPUT ++%} ++ ++%{ ++#include ++ ++#ifdef linux ++#define YY_INPUT(buf,result,max_size) \ ++{ \ ++ int c = sial_input(); \ ++ result = (c == EOF) ? YY_NULL : (buf[0] = c, 1); \ ++} ++#endif ++ ++#include "sial.h" ++#include "sialpp.tab.h" ++#if linux ++#define yylval sialpplval ++#endif ++ ++#define retok(t) return(t) ++int nomacs=0; ++extern int sial_chkmacvar(char *); ++extern node_t *sial_newchar(void); ++%} ++ ++ABC [a-zA-Z_] ++ABCN [a-zA-Z0-9_] ++N [0-9] ++X [0-9a-fA-F] ++ ++%% ++ ++[ \t\n]+ { ; } ++ ++"defined" { retok(P_DEFINED); } ++"&&" { retok(P_BAND); } ++"||" { retok(P_BOR); } ++"<" { retok(P_LT); } ++"<=" { retok(P_LE); } ++"==" { retok(P_EQ); } ++">=" { retok(P_GE); } ++">" { retok(P_GT); } ++"!=" { retok(P_NE); } ++"|" { retok(P_OR); } ++"!" { retok(P_NOT); } ++"^" { retok(P_XOR); } ++">>" { retok(P_SHR); } ++"<<" { retok(P_SHL); } ++"+" { retok(P_ADD); } ++"-" { retok(P_SUB); } ++"/" { retok(P_DIV); } ++"%" { retok(P_MOD); } ++"*" { retok(P_MUL); } ++ ++(("0x"+){X}+[lL]*|{N}+[lL]*) { yylval.n = sial_newnum(yytext); retok(P_NUMBER); } ++ ++{ABC}{ABCN}* { ++ if(strlen(yytext) > MAX_SYMNAMELEN) { ++ ++ sial_error("Symbol name too long"); ++ } ++ if(nomacs || !sial_chkmacvar(yytext)) { ++ ++ yylval.n = sial_newvnode(yytext); ++ retok(P_VAR); ++ } ++ } ++ ++\'.\' { yylval.n = sial_makenum(B_SC, yytext[1]); retok(P_NUMBER); } ++\'\\.\' { yylval.n = sial_makenum(B_SC, sial_getseq(yytext[2])); retok(P_NUMBER); } ++ ++ ++. { retok(yylval.i = yytext[0]); } ++ ++%% ++#undef input ++#undef unput ++#define input() sial_input() ++#define unput(c) sial_unput(c) +--- crash/extensions/libsial/sial.l.orig 2008-01-17 15:17:21.000000000 -0500 ++++ crash/extensions/libsial/sial.l 2008-01-04 09:42:08.000000000 -0500 +@@ -0,0 +1,206 @@ ++%{ ++/* ++ * Copyright 2001 Silicon Graphics, Inc. All rights reserved. ++ */ ++%} ++ ++%{ ++#include ++ ++#ifdef linux ++#define YY_INPUT(buf,result,max_size) \ ++{ \ ++ int c = sial_input(); \ ++ result = (c == EOF) ? YY_NULL : (buf[0] = c, 1); \ ++} ++#endif ++ ++#define yylval siallval ++#include "sial.h" ++#define YY_NO_UNPUT ++#include "sial.tab.h" ++ ++#define retok(t) return(t) ++int needvar=0, instruct=0; ++node_t *lastv; ++static char *lastvar=0; ++char *sial_lastvar(void) { return lastvar; } ++extern void sial_skip_directive(void); ++extern void sial_define(void); ++extern void sial_include(void); ++extern void sial_undefine(void); ++extern char sial_newchar(void); ++extern int sial_chkmacvar(char *); ++%} ++ ++ABC [a-zA-Z_] ++ABCN [a-zA-Z0-9_] ++N [0-9] ++X [0-9a-fA-F] ++W [ \t\n] ++P #[ \t]* ++OP [(] ++CP [)] ++ ++%s var1 ++%s var2 ++%s var3 ++%s var4 ++%% ++ ++{W} { ; } ++ ++"..." { retok(VARARGS); } ++"&&" { retok(BAND); } ++"||" { retok(BOR); } ++"<" { retok(LT); } ++"<=" { retok(LE); } ++"==" { retok(EQ); } ++">=" { retok(GE); } ++">" { retok(GT); } ++"!=" { retok(NE); } ++ ++"&=" { retok(ANDME); } ++"|" { retok(OR); } ++"|=" { retok(ORME); } ++"!" { retok(NOT); } ++"^" { retok(XOR); } ++"&" { retok(AND); } ++"^=" { retok(XORME); } ++">>" { retok(SHR); } ++"<<=" { retok(SHLME); } ++">>=" { retok(SHRME); } ++"<<" { retok(SHL); } ++"++" { retok(INCR); } ++"+" { retok(ADD); } ++"--" { retok(DECR); } ++"-" { retok(SUB); } ++"+=" { retok(ADDME); } ++"-=" { retok(SUBME); } ++"*=" { retok(MULME); } ++"/=" { retok(DIVME); } ++"/" { retok(DIV); } ++"%=" { retok(MODME); } ++"%" { retok(MOD); } ++"=" { retok(ASSIGN); } ++"->" { retok(INDIRECT); } ++"." { retok(DIRECT); } ++"{" { needvar=0; retok('{'); } ++ ++\*+ { ++ yylval.i=strlen(yytext); ++ return PTR; ++ } ++ ++(("0x"+){X}+[lL]*|{N}+[lL]*) { yylval.n = sial_newnum(yytext); retok(NUMBER); } ++ ++{P}ident { sial_skip_directive(); } ++{P}pragma { sial_skip_directive(); } ++{P}define { ++ /* preprocessor command */ ++ /* either a simple constant or a macro */ ++ sial_define(); ++ } ++{P}include { ++ ++ /* file inclusion */ ++ sial_include(); ++ } ++{P}undef { ++ sial_undefine(); ++ } ++while { retok(WHILE); } ++for { retok(FOR); } ++do { retok(DO); } ++if { retok(IF); } ++else { retok(ELSE); } ++break { retok(BREAK); } ++continue { retok(CONTINUE); } ++in { retok(IN); } ++return { retok(RETURN); } ++ ++__char__ { retok(yylval.i=CHAR); } ++__short__ { retok(yylval.i=SHORT); } ++__int__ { retok(yylval.i=INT); } ++__float__ { retok(yylval.i=FLOAT); } ++__double__ { retok(yylval.i=DOUBLE); } ++__register__ { retok(yylval.i=REGISTER); } ++__volatile__ { retok(yylval.i=VOLATILE); } ++__void__ { retok(yylval.i=VOID); } ++__unsigned__ { retok(yylval.i=UNSIGNED); } ++__signed__ { retok(yylval.i=SIGNED); } ++__long__ { retok(yylval.i=LONG); } ++__const__ { retok(yylval.i=CONST); } ++__static__ { retok(yylval.i=STATIC); } ++__extern__ { retok(yylval.i=EXTERN); } ++ ++char { retok(yylval.i=CHAR); } ++short { retok(yylval.i=SHORT); } ++int { retok(yylval.i=INT); } ++float { retok(yylval.i=FLOAT); } ++double { retok(yylval.i=DOUBLE); } ++register { retok(yylval.i=REGISTER); } ++volatile { retok(yylval.i=VOLATILE); } ++void { retok(yylval.i=VOID); } ++unsigned { retok(yylval.i=UNSIGNED); } ++signed { retok(yylval.i=SIGNED); } ++long { retok(yylval.i=LONG); } ++const { retok(yylval.i=CONST); } ++static { retok(yylval.i=STATIC); } ++extern { retok(yylval.i=EXTERN); } ++ ++string { retok(yylval.i=STRTYPE); } ++__inline { ; } ++switch { retok(SWITCH); } ++case { retok(CASE); } ++default { retok(DEFAULT); } ++enum { retok(yylval.i=ENUM); } ++union { retok(yylval.i=UNION);} ++struct { retok(yylval.i=STRUCT); } ++typedef { retok(yylval.i=TDEF); } ++sizeof { retok(SIZEOF); } ++print { retok(PRINT); } ++printo { retok(PRINTO); } ++printd { retok(PRINTD); } ++printx { retok(PRINTX); } ++take_array { retok(TAKE_ARR); } ++ ++__var__ { BEGIN(var1); } ++{W}*{OP}{W}* { BEGIN(var2); } ++{ABC}{ABCN}* { BEGIN(var3); goto forcevar; } ++{W}*{CP}{W}* { BEGIN(INITIAL); } ++ ++ ++{ABC}{ABCN}* { ++ if((!needvar) && (yylval.t=sial_getctype(V_TYPEDEF, yytext, 1))) ++ { ++ /* hack to remember last tdef name */ ++ if(lastvar) sial_free(lastvar); ++ lastvar=sial_alloc(strlen(yytext)+1); ++ strcpy(lastvar, yytext); ++ needvar++; ++ retok(TYPEDEF); ++ } ++forcevar: ++ needvar=0; ++ if(strlen(yytext) > MAX_SYMNAMELEN) { ++ ++ sial_error("Symbol name too long"); ++ } ++ if(!sial_chkmacvar(yytext)) { ++ yylval.n = sial_newvnode(yytext); ++ retok(VAR); ++ } ++ } ++ ++\" { yylval.n = sial_newstr(); retok(STRING); } ++\'.\' { yylval.n = sial_makenum(B_SC, yytext[1]); retok(NUMBER); } ++\'\\.\' { yylval.n = sial_makenum(B_SC, sial_getseq(yytext[2])); retok(NUMBER); } ++ ++. { retok(yylval.i = yytext[0]); } ++ ++%% ++#undef input ++#undef unput ++#define input() sial_input() ++#define unput(c) sial_unput(c) +--- crash/extensions/libsial/sial_var.c.orig 2008-01-17 15:17:21.000000000 -0500 ++++ crash/extensions/libsial/sial_var.c 2008-01-04 09:42:08.000000000 -0500 +@@ -0,0 +1,1320 @@ ++/* ++ * Copyright 2001 Silicon Graphics, Inc. All rights reserved. ++ */ ++#include ++#include ++#include ++#include "sial.h" ++ ++/* ++ Get an existing variable from the current set. ++*/ ++ ++/* variable lists for the different scopes */ ++typedef struct { ++ int type; ++ var_t*svs; ++} svlist; ++ ++typedef struct glo { ++ struct glo *next; ++ var_t*vv; ++} glo; ++ ++/* ++ Free indexes specifications. ++*/ ++void ++sial_freeidx(idx_t *idx) ++{ ++int i; ++ ++ for(i=0;inidx;i++) { ++ ++ if(idx->idxs[i]) NODE_FREE(idx->idxs[i]); ++ } ++ sial_free(idx); ++} ++ ++/* ++ Free a variable declaration structure. ++*/ ++void ++sial_freedvar(dvar_t*dv) ++{ ++ if(!dv) return; ++ if(--dv->refcount) return; ++ if(dv->name) sial_free(dv->name); ++ if(dv->idx) sial_freeidx(dv->idx); ++ if(dv->init) NODE_FREE(dv->init); ++ if(dv->fargs) sial_freesvs(dv->fargs); ++ sial_free(dv); ++} ++ ++void ++sial_setarray(array_t**arpp) ++{ ++array_t*arp=*arpp; ++ ++ if(!arp) { ++ ++ arp=sial_calloc(sizeof(array_t)); ++ TAG(arp); ++ arp->next=arp->prev=arp; ++ arp->ref=1; ++ *arpp=arp; ++ } ++} ++ ++/* ++ this is the main variable declaration function. ++ We support the global scope attribute that make the declared ++ variable accessible to all function from all scripts. ++ ++ By default the scope of a variable either the statement block ++ where it was declared (or first used): ++ { ++ int var; ++ ... ++ } ++ Then it's scope is the block itself. ++ ++ Or the file, if it was declared outside of a function. ++ ++ Storage is by default 'automatic' and can be made permanent ++ by using the 'static' keywork in the declaration. ++ 'Volatile' and 'register' storage classes are supported but ++ have no effect. ++*/ ++var_t* ++sial_vardecl(dvar_t*dv, type_t*t) ++{ ++var_t*vlist=sial_newvlist(); ++var_t*var; ++ ++ /* type *and* dv can have ref counts. First comes from typedef parsing ++ second comes from the declaration itself */ ++ dv->ref += t->ref; ++ ++ /* add one level of ref for arrays */ ++ if(dv->idx) dv->ref++; ++ ++ /* reset ref level for tests below */ ++ sial_popref(t, t->ref); ++ ++ TAG(vlist); ++ ++ if(!t->type) { ++ ++ int sto=sial_isstor(t->typattr); ++ ++ sial_freetype(t); ++ t=sial_newbtype(0); ++ t->typattr |= sto; ++ } ++ else if(t->type==V_BASE && !dv->ref) { ++ ++ sial_chksign(t); ++ sial_chksize(t); ++ } ++ ++ /* is this a new typedef declaration ? */ ++ /* typedef is considered just like any other storage class */ ++ if(sial_istdef(t->typattr)) { ++ ++ sial_tdef_decl(dv, t); ++ return 0; ++ } ++ ++ while(dv) { ++ ++ /* disalow var names that match against already defined vars */ ++ if(dv->name[0]) { ++ type_t *t=sial_getctype(V_TYPEDEF, dv->name, 1); ++ if(t) { ++ ++ sial_freetype(t); ++ sial_warning("Variable '%s' already defined as typedef.\n"); ++ } ++ } ++ ++ /* ++ some sanity checks here that apply to both var and struct ++ declarations ++ */ ++ if(is_ctype(t->type) && !dv->ref) { ++ ++ if(dv->name[0]) { ++ ++ if(!instruct) { ++ ++ if(!sial_isxtern(t->typattr)) { ++ ++ sial_freesvs(vlist); ++ sial_error("struct/union instances not supported, please use pointers"); ++ } ++ ++ } else if(sial_ispartial(t)) { ++ ++ sial_freesvs(vlist); ++ sial_error("Reference to incomplete type"); ++ } ++ } ++ } ++ if(dv->nbits) { ++ ++ if(t->type != V_BASE) { ++ ++ sial_freesvs(vlist); ++ sial_error("Bit fields can only be of integer type"); ++ ++ } ++ if(dv->idx) { ++ ++ sial_freesvs(vlist); ++ sial_error("An array of bits ? Come on..."); ++ } ++ } ++ ++ var=sial_newvar(dv->name); ++ ++ t->fct=dv->fct; ++ sial_duptype(&var->v->type, t); ++ sial_pushref(&var->v->type, dv->ref); ++ ++ var->dv=dv; ++ ++ TAG(var); ++ ++ if(t->type == V_STRING) { ++ ++ sial_setstrval(var->v, ""); ++ ++ } ++ ++ sial_setpos(&dv->pos); ++ ++ sial_enqueue(vlist, var); ++ ++ dv=dv->next; ++ } ++ sial_free(t); ++ TAG(vlist); ++ return vlist; ++} ++ ++dvar_t* ++sial_newdvar(node_t*v) ++{ ++dvar_t*dv; ++ ++ dv=sial_alloc(sizeof(dvar_t)); ++ memset(dv, 0, sizeof(dvar_t)); ++ if(v) { ++ dv->name=NODE_NAME(v); ++ NODE_FREE(v); ++ ++ } else { ++ ++ dv->name=sial_alloc(1); ++ dv->name[0]='\0'; ++ } ++ dv->refcount=1; ++ sial_setpos(&dv->pos); ++ return dv; ++} ++ ++dvar_t* ++sial_dvarini(dvar_t*dv, node_t*init) ++{ ++ dv->init=init; ++ return dv; ++} ++ ++dvar_t* ++sial_dvarptr(int ref, dvar_t*dv) ++{ ++ dv->ref+=ref; ++ return dv; ++} ++ ++dvar_t* ++sial_dvaridx(dvar_t*dv, node_t*n) ++{ ++ if(!dv->idx) { ++ ++ dv->idx=sial_alloc(sizeof(idx_t)); ++ dv->idx->nidx=0; ++ } ++ dv->idx->idxs[dv->idx->nidx++]=n; ++ return dv; ++} ++ ++dvar_t* ++sial_dvarfld(dvar_t*dv, node_t*n) ++{ ++ ++ if(n) { ++ ++ value_t *va=sial_exenode(n); ++ ++ /* get the value_t for the bits */ ++ if(!va) dv->nbits=0; ++ else { ++ dv->nbits=unival(va); ++ sial_freeval(va); ++ } ++ NODE_FREE(n); ++ ++ } else dv->nbits=0; ++ ++ dv->bitfield=1; ++ return dv; ++} ++ ++dvar_t* ++sial_dvarfct(dvar_t*dv, var_t*fargs) ++{ ++ dv->fct=1; ++ dv->fargs=fargs; ++ return dv; ++} ++ ++dvar_t* ++sial_linkdvar(dvar_t*dvl, dvar_t*dv) ++{ ++dvar_t*v; ++ ++ /* need to keep declaration order for variable initialization */ ++ if(dv) { ++ ++ for(v=dvl; v->next; v=v->next); ++ dv->next=0; ++ v->next=dv; ++ } ++ return dvl; ++} ++ ++idx_t * ++sial_newidx(node_t*n) ++{ ++idx_t *idx; ++ ++ if(!instruct) { ++ ++ sial_error("Array supported only in struct/union declarations"); ++ } ++ idx=sial_alloc(sizeof(idx_t)); ++ idx->nidx=1; ++ idx->idxs[0]=n; ++ return idx; ++} ++ ++idx_t * ++sial_addidx(idx_t *idx, node_t*n) ++{ ++ if(idx->nidx==MAXIDX) { ++ ++ sial_error("Maximum number of dimension is %d", MAXIDX); ++ } ++ idx->idxs[idx->nidx++]=n; ++ return idx; ++} ++ ++static svlist svs[S_MAXDEEP]; ++static glo *globs=0; ++int svlev=0; ++ ++void ++sial_refarray(value_t *v, int inc) ++{ ++array_t*ap, *na; ++ ++ if(!v->arr) return; ++ v->arr->ref+=inc; ++ if(v->arr->ref == 0) { ++ ++ /* free all array element. */ ++ for(ap=v->arr->next; ap!=v->arr; ap=na) { ++ ++ na=ap->next; ++ sial_freeval(ap->idx); ++ sial_freeval(ap->val); ++ sial_free(ap); ++ } ++ sial_free(v->arr); ++ v->arr=0; ++ ++ } else { ++ ++ /* do the same to all sub arrays */ ++ for(ap=v->arr->next; ap!=v->arr; ap=na) { ++ ++ na=ap->next; ++ sial_refarray(ap->val, inc); ++ } ++ } ++ ++} ++ ++void ++sial_freedata(value_t *v) ++{ ++ ++ if(is_ctype(v->type.type) || v->type.type == V_STRING) { ++ ++ if(v->v.data) sial_free(v->v.data); ++ v->v.data=0; ++ ++ } ++ sial_refarray(v, -1); ++} ++ ++void ++sial_dupdata(value_t *v, value_t *vs) ++{ ++ ++ if(is_ctype(vs->type.type) || vs->type.type == V_STRING) { ++ ++ v->v.data=sial_alloc(vs->type.size); ++ memmove(v->v.data, vs->v.data, vs->type.size); ++ } ++} ++ ++void ++sial_freeval(value_t *v) ++{ ++ if(!v) return; ++ sial_freedata(v); ++ sial_free(v); ++} ++ ++ ++void ++sial_freevar(var_t*v) ++{ ++ ++ if(v->name) sial_free(v->name); ++ sial_freeval(v->v); ++ sial_freedvar(v->dv); ++ sial_free(v); ++} ++ ++void ++sial_enqueue(var_t*vl, var_t*v) ++{ ++ v->prev=vl->prev; ++ v->next=vl; ++ vl->prev->next=v; ++ vl->prev=v; ++} ++ ++void ++sial_dequeue(var_t*v) ++{ ++ v->prev->next=v->next; ++ v->next->prev=v->prev; ++ v->next=v->prev=v; ++} ++ ++/* ++ This function is called to validate variable declaration. ++ No array decalration for variables (this can only be checked in ++ sial_stat_decl() and sial_file_decl() usingthe idx field ofthe var struct. ++ Same comment for nbits. Only in struct declarations. ++*/ ++void ++sial_validate_vars(var_t*svs) ++{ ++var_t*v, *next; ++ ++ if(!svs) return; ++ ++ for(v=svs->next; v!=svs; v=next) { ++ ++ next=v->next; ++ ++ /* just remove extern variables */ ++ if(sial_isxtern(v->v->type.typattr)) { ++ ++ sial_dequeue(v); ++ sial_freevar(v); ++ ++ } else { ++ ++ if(v->dv->idx) { ++ ++ sial_freesvs(svs); ++ sial_error("Array instanciations not supported."); ++ ++ } ++ if(v->dv->nbits) { ++ ++ sial_freesvs(svs); ++ sial_error("Syntax error. Bit field unexpected."); ++ } ++ } ++ } ++} ++ ++var_t* ++sial_inlist(char *name, var_t*vl) ++{ ++var_t*vp; ++ ++ if(vl) { ++ ++ for(vp=vl->next; vp!=vl; vp=vp->next) { ++ ++ if(!strcmp(name, vp->name)) { ++ ++ return vp; ++ ++ } ++ ++ } ++ } ++ return 0; ++} ++ ++static var_t*apiglobs; ++ ++void ++sial_setapiglobs(void) ++{ ++ apiglobs=sial_newvlist(); ++ sial_add_globals(apiglobs); ++} ++ ++static var_t* ++sial_inglobs(char *name) ++{ ++var_t*vp; ++glo *g; ++ ++ for(g=globs; g; g=g->next) { ++ ++ if((vp=sial_inlist(name, g->vv))) return vp; ++ } ++ return 0; ++} ++ ++ ++void ++sial_chkglobsforvardups(var_t*vl) ++{ ++var_t*v; ++ ++ if(!vl) return; ++ ++ for(v=vl->next; v != vl; v=v->next) { ++ ++ var_t*vg; ++ ++ if(v->name[0] && (vg=sial_inglobs(v->name))) { ++ ++ /* if this is a prototype declaration then skip it */ ++ if(v->dv && v->dv->fct) continue; ++ ++ sial_rerror(&v->dv->pos, "Duplicate declaration of variable '%s', defined at %s:%d" ++ , v->name, vg->dv->pos.file, vg->dv->pos.line); ++ } ++ } ++} ++ ++/* ++ This function scans a list of variable and looks for those that have not been initialized yet. ++ Globals, statics and autos all get initialized through here. ++*/ ++static void ++sial_inivars(var_t*sv) ++{ ++var_t*v; ++ ++ if(!sv) return; ++ ++ for(v=sv->next; v!=sv; v=v->next) { ++ ++ /* check if we need to initialize it */ ++ if(!v->ini && v->dv && v->dv->init) { ++ ++ value_t *val; ++ srcpos_t pos; ++ ++ sial_curpos(&v->dv->pos, &pos); ++ ++ if((val=sial_exenode(v->dv->init))) { ++ ++ sial_chkandconvert(v->v, val); ++ sial_freeval(val); ++ v->ini=1; ++ ++ } else { ++ ++ sial_rwarning(&v->dv->pos, "Error initializing '%s'", v->name); ++ } ++ sial_curpos(&pos, 0); ++ } ++ } ++} ++ ++/* return the last set of globals */ ++var_t* ++sial_getcurgvar() ++{ ++ if(!globs) return 0; ++ return globs->vv; ++} ++ ++void * ++sial_add_globals(var_t*vv) ++{ ++glo *ng=sial_alloc(sizeof(glo)); ++ ++ sial_inivars(vv); ++ ng->vv=vv; ++ ng->next=globs; ++ sial_chkglobsforvardups(vv); ++ globs=ng; ++ return ng; ++} ++ ++void ++sial_rm_globals(void *vg) ++{ ++glo *g=(glo*)vg; ++ ++ if(globs) { ++ ++ if(globs==g) globs=g->next; ++ else { ++ ++ glo *gp; ++ ++ for(gp=globs; gp; gp=gp->next) { ++ ++ if(gp->next==g) { ++ ++ gp->next=g->next; ++ ++ } ++ ++ } ++ } ++ sial_free(g); ++ } ++} ++ ++ ++ ++/* ++ This is where we implement the variable scoping. ++*/ ++var_t* ++sial_getvarbyname(char *name, int silent, int local) ++{ ++var_t*vp; ++int i, aidx=0; ++ull apiv; ++ ++ for(i=svlev-1; i>=0; i--) { ++ ++ if((vp=sial_inlist(name, svs[i].svs))) { ++ ++ return vp; ++ } ++ if(svs[i].type==S_AUTO && !aidx) aidx=i; ++ ++ /* when we get to the function we're finished */ ++ if(svs[i].type==S_FILE) break; ++ } ++ ++ /* have'nt found any variable named like this one */ ++ /* first check the globals */ ++ if(!(vp=sial_inglobs(name))) { ++ ++ int off=0; ++ ++ /* check the API for a corresponding symbol */ ++ /* Jump over possible leading "IMG_" prefix */ ++ if(!strncmp(name, "IMG_", 4)) off=4; ++ if(!local && API_GETVAL(name+off, &apiv)) { ++ ++ vp=sial_newvar(name); ++ vp->ini=1; ++ ++ sial_defbtype(vp->v, apiv); ++ vp->v->mem=apiv; ++ ++ /* put this on the global list */ ++ sial_enqueue(apiglobs, vp); ++ } ++ else { ++ ++ if(silent) return 0; ++ sial_error("Unknown variable [%s]", name); ++ } ++ } ++ return vp; ++} ++ ++value_t * ++sial_exists(value_t *vname) ++{ ++char *name=sial_getptr(vname, char); ++ ++ return sial_defbtype(sial_newval(), (sial_getvarbyname(name, 1, 0) || sial_funcexists(name))); ++} ++ ++/* get a new empty vlist */ ++var_t* ++sial_newvlist() ++{ ++var_t*p=sial_newvar(""); ++ TAG(p); ++ TAG(p->name); ++ return p; ++} ++ ++/* this is called when we duplicate a list of automatic variables */ ++var_t* ++sial_dupvlist(var_t*vl) ++{ ++var_t*nv=(var_t*)sial_newvlist(); /* new root */ ++var_t*vp; ++ ++ for(vp=vl->next; vp !=vl; vp=vp->next) { ++ ++ var_t*v=sial_newvar(vp->name); /* new var_t*/ ++ ++ v->dv=vp->dv; ++ v->dv->refcount++; ++ v->ini=vp->ini; ++ sial_dupval(v->v, vp->v); ++ ++ /* we start with a new array for automatic variable */ ++ sial_refarray(v->v, -1); ++ v->v->arr=0; ++ sial_setarray(&v->v->arr); ++ ++ /* can't check ctypes for initialisation */ ++ if(is_ctype(v->v->type.type)) v->ini=1; ++ sial_enqueue(nv, v); ++ ++ } ++ return nv; ++} ++ ++void ++sial_addtolist(var_t*vl, var_t*v) ++{ ++ if(!v->name[0] || !sial_inlist(v->name, vl)) { ++ ++ sial_enqueue(vl, v); ++ ++ } else { ++ ++ /* if this is a prototype declaration then skip it */ ++ if(v->dv && v->dv->fct) return; ++ ++ sial_error("Duplicate declaration of variable %s", v->name); ++ } ++} ++ ++static void ++sial_chkforvardups(var_t*vl) ++{ ++var_t*v; ++ ++ if(!vl) return; ++ ++ for(v=vl->next; v!=vl; v=v->next) { ++ ++ var_t*v2=v->next; ++ ++ for(v2=v->next; v2!=vl; v2=v2->next) { ++ ++ if(v2->name[0] && !strcmp(v->name, v2->name)) { ++ ++ sial_rerror(&v2->dv->pos, "Duplicate declaration of variable '%s'", v->name); ++ ++ } ++ } ++ } ++} ++ ++static int takeproto=0; ++void sial_settakeproto(int v) { takeproto=v; } ++ ++ ++/* ++ This function scans a new list of declared variables ++ searching for static variables. ++*/ ++void ++sial_addnewsvs(var_t*avl, var_t*svl, var_t*nvl) ++{ ++var_t*v; ++ ++ if(nvl) { ++ ++ for(v=nvl->next; v!=nvl; ) { ++ ++ var_t*next; ++ ++ /* save next before sial_enqueue() trashes it ... */ ++ next=v->next; ++ ++ /* if this is a external variable or prototype function declaration ++ skip it */ ++ if((!takeproto && v->dv->fct && !v->dv->ref) || sial_isxtern(v->v->type.typattr)) { ++ ++ v=next; ++ continue; ++ } ++ ++ if(sial_isstatic(v->v->type.typattr)) { ++ ++ sial_addtolist(svl, v); ++ ++ } else { ++ ++ sial_addtolist(avl, v); ++ } ++ /* with each new variables check for duplicate declarations */ ++ sial_chkforvardups(avl); ++ sial_chkforvardups(svl); ++ ++ v=next; ++ } ++ /* discard nvl's root */ ++ sial_freevar(nvl); ++ } ++} ++ ++int ++sial_addsvs(int type, var_t*sv) ++{ ++int curlev=svlev; ++ ++ if(svlev==S_MAXDEEP) { ++ ++ sial_error("Svars stack overflow"); ++ ++ } else { ++ ++ svs[svlev].type=type; ++ svs[svlev].svs=sv; ++ svlev++; ++ ++ /* perform automatic initializations */ ++ sial_inivars(sv); ++ ++ /* if S_FILE then we are entering a function so start a newset of ++ stack variables */ ++ if(type == S_FILE ) { ++ ++ (void)sial_addsvs(S_AUTO, (var_t*)sial_newvlist()); ++ ++ } ++ } ++ return curlev; ++} ++ ++void ++sial_add_statics(var_t*var) ++{ ++int i; ++ ++ for(i=svlev-1;i>=0;i--) { ++ ++ if(svs[i].type==S_FILE ) { ++ ++ if(svs[i].svs) ++ sial_enqueue(svs[i].svs, var); ++ else ++ svs[i].svs=var; ++ return; ++ ++ } ++ } ++ sial_rwarning(&var->dv->pos, "No static context for var %s.", var->name); ++} ++ ++void sial_freesvs(var_t*v) ++{ ++var_t*vp; ++ ++ for(vp=v->next; vp != v; ) { ++ ++ var_t*vn=vp->next; ++ ++ sial_freevar(vp); ++ ++ vp=vn; ++ } ++ sial_freevar(v); ++} ++ ++int ++sial_getsvlev() { return svlev; } ++ ++/* reset the current level of execution and free up any automatic ++ variables. */ ++void ++sial_setsvlev(int newlev) ++{ ++int lev; ++ ++ for(lev=svlev-1; lev>=newlev; lev--) { ++ ++ if(svs[lev].type==S_AUTO) { ++ ++ sial_freesvs(svs[lev].svs); ++ ++ } ++ ++ } ++ svlev=newlev; ++} ++ ++/* ++ called by the 'var in array' bool expression. ++*/ ++int ++sial_lookuparray(node_t*vnode, node_t*arrnode) ++{ ++value_t *varr=NODE_EXE(arrnode); ++array_t*ap, *apr=varr->arr; ++value_t *val; ++int b=0; ++ ++ val=NODE_EXE(vnode); ++ ++ if(apr) { ++ ++ for(ap=apr->next; ap != apr; ap=ap->next) { ++ ++ if(VAL_TYPE(ap->idx) == VAL_TYPE(val)) { ++ ++ switch(VAL_TYPE(val)) { ++ case V_STRING: b=(!strcmp(ap->idx->v.data, val->v.data)); break; ++ case V_BASE: b=(unival(ap->idx)==unival(val)); break; ++ case V_REF: ++ if(sial_defbsize()==4) ++ b=(ap->idx->v.ul==val->v.ul); ++ else ++ b=(ap->idx->v.ull==val->v.ull); ++ break; ++ default: ++ sial_rerror(&vnode->pos, "Invalid indexing type %d", VAL_TYPE(val)); ++ } ++ if(b) break; ++ } ++ ++ } ++ } ++ sial_freeval(val); ++ sial_freeval(varr); ++ return b; ++} ++ ++/* ++ The actual for(i in array) core... ++*/ ++void ++sial_walkarray(node_t*varnode, node_t*arrnode, void (*cb)(void *), void *data) ++{ ++value_t *v; ++value_t *av; ++array_t*ap, *apr; ++ ++ sial_setini(varnode); ++ v=NODE_EXE(varnode); ++ ++ av=NODE_EXE(arrnode); ++ ++ if(av->arr) { ++ ++ for(apr=av->arr, ap=apr->next; ap != apr; ap=ap->next) { ++ ++ /* we set the value_t of the variable */ ++ sial_setval(v,ap->idx); ++ ++ (cb)(data); ++ ++ } ++ } ++ sial_freeval(v); ++ sial_freeval(av); ++} ++ ++/* scan the current array for a specific index and return value_t ++ XXX should use some hashing tables here for speed and scalability */ ++array_t* ++sial_getarrval(array_t**app, value_t *idx) ++{ ++array_t*ap, *apr; ++ ++ /* sial_setarray(app); AAA comment out */ ++ apr=*app; ++ ++ for(ap=apr->next; ap != apr; ap=ap->next) { ++ ++ if(ap->idx->type.type == idx->type.type) { ++ ++ int b=0; ++ ++ switch(idx->type.type) { ++ case V_STRING: b=(!strcmp(ap->idx->v.data, idx->v.data)); ++ break; ++ case V_BASE: b=(unival(ap->idx)==unival(idx)); ++ break; ++ case V_REF: ++ if(sial_defbsize()==4) ++ b=(ap->idx->v.ul==idx->v.ul); ++ else ++ b=(ap->idx->v.ull==idx->v.ull); ++ break; ++ default: ++ sial_error("Invalid index type %d", idx->type.type); ++ } ++ ++ if(b) { ++ ++ return ap; ++ ++ } ++ } ++ } ++ ++ /* we have not found this index, create one */ ++ ap=(array_t*)sial_calloc(sizeof(array_t)); ++ ap->idx=sial_makebtype(0); ++ sial_dupval(ap->idx, idx); ++ ++ /* just give it a int value_t of 0 for now */ ++ ap->val=sial_makebtype(0); ++ ++ /* we must set the same refenrence number as the ++ upper level array_t*/ ++ ap->val->arr->ref=apr->ref; ++ ++ /* link it in */ ++ ap->prev=apr->prev; ++ ap->next=apr; ++ apr->prev->next=ap; ++ apr->prev=ap; ++ ap->ref=0; ++ return ap; ++} ++ ++value_t * ++sial_intindex(value_t *a, int idx) ++{ ++value_t *v=sial_makebtype(idx); ++array_t*ap=sial_getarrval(&a->arr, v); ++ ++ sial_dupval(v, ap->val); ++ return v; ++} ++ ++value_t * ++sial_strindex(value_t *a, char *idx) ++{ ++value_t *v=sial_makestr(idx); ++array_t*ap=sial_getarrval(&a->arr, v); ++ ++ sial_dupval(v, ap->val); ++ return v; ++} ++ ++ ++void ++sial_setarrbval(array_t*a, int val) ++{ ++ sial_defbtype(a->val, (ull)val); ++} ++ ++array_t* ++sial_addarrelem(array_t**arr, value_t *idx, value_t *val) ++{ ++array_t*na; ++ ++ na=sial_getarrval(arr, idx); ++ ++ /* copy new val over */ ++ sial_freeval(na->val); ++ na->val=val; ++ ++ return na; ++} ++ ++/* insert a variable at the end of the list */ ++static void ++sial_varinsert(var_t*v) ++{ ++int i; ++ ++ for(i=svlev-1;i>=0;i--) { ++ ++ if(svs[i].type==S_AUTO) { ++ ++ sial_enqueue(svs[i].svs, v); ++ break; ++ } ++ } ++} ++ ++/* Dupicate and add a set of variables. Used to setup a function execution. ++ The new veriables are the actual parameters of the function so we mark them ++ As being initialized. ++*/ ++void ++sial_add_auto(var_t*nv) ++{ ++ nv->ini=1; ++ sial_varinsert(nv); ++} ++ ++void ++sial_valindex(value_t *var, value_t *idx, value_t *ret) ++{ ++ if(is_ctype(idx->type.type)) { ++ ++ sial_error("Invalid indexing type"); ++ ++ } else { ++ ++ array_t*a; ++ ++ a=sial_getarrval(&var->arr, idx); ++ ++ /* this is the first level of indexing through a variable */ ++ sial_dupval(ret, a->val); ++ ret->set=1; ++ ret->setval=a->val; ++ } ++} ++ ++void ++sial_addvalarray(value_t*v, value_t*idx, value_t*val) ++{ ++ sial_addarrelem(&v->arr, idx, val); ++ sial_freeval(idx); ++} ++ ++static void ++prtval(value_t*v) ++{ ++value_t*fmt=sial_makestr("%?"); ++ ++ sial_printf(fmt, v, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0); ++ sial_freeval(fmt); ++} ++ ++static void ++prlevel(char *name, value_t*root, int level) ++{ ++ARRAY_S *arr; ++ ++ for(arr=root->arr->next; arr != root->arr; arr=arr->next) { ++ ++ printf("%*s%s[", level*3, "", name); ++ prtval(arr->idx); ++ printf("]="); ++ prtval(arr->val); ++ printf("\n"); ++ prlevel(name, arr->val, level+1); ++ } ++} ++ ++/* sial_prarr builtin */ ++value_t* ++sial_prarr(value_t*vname, value_t*root) ++{ ++char *name=sial_getptr(vname, char); ++ printf("%s=", name); ++ prtval(root); ++ printf("\n"); ++ prlevel(name, root, 1); ++ return sial_makebtype(0); ++} ++ ++var_t* ++sial_newvar(char *name) ++{ ++var_t*v=sial_calloc(sizeof(var_t)); ++char *myname=sial_alloc(strlen(name)+1); ++ ++ TAG(myname); ++ strcpy(myname,name); ++ v->name=myname; ++ v->v=sial_makebtype(0); ++ v->v->setval=v->v; ++ v->next=v->prev=v; ++ return v; ++} ++ ++ ++typedef struct { ++ node_t*n; ++ char name[1]; ++} vnode_t ; ++ ++static int insizeof=0; ++void sial_setinsizeof(int v) { insizeof=v;} ++ ++value_t * ++sial_exevar(void *arg) ++{ ++vnode_t *vn = arg; ++value_t *nv; ++var_t*curv; ++srcpos_t pos; ++ ++ sial_curpos(&vn->n->pos, &pos); ++ ++ if(!(curv=sial_getvarbyname(vn->name, 0, 0))) { ++ ++ sial_error("Oops! Var ref1.[%s]", vn->name); ++ ++ } ++ if(!curv->ini && !insizeof) { ++ ++ sial_error("Variable [%s] used before being initialized", curv->name); ++ ++ } ++ ++ nv=sial_newval(); ++ sial_dupval(nv,curv->v); ++ nv->set=1; ++ nv->setval=curv->v; ++ nv->setfct=sial_setfct; ++ ++ sial_curpos(&pos, 0); ++ ++ return nv; ++} ++ ++/* make sure a variable is flaged as being inited */ ++void ++sial_setini(node_t*n) ++{ ++ if((void*)n->exe == (void*)sial_exevar) { ++ ++ var_t*v=sial_getvarbyname(((vnode_t*)(n->data))->name, 0, 0); ++ v->ini=1; ++ } ++} ++ ++ ++/* get the name of a function through a variable */ ++char * ++sial_vartofunc(node_t*name) ++{ ++char *vname=NODE_NAME(name); ++value_t *val; ++ ++ /* if the nore is a general expression, then vname is 0 */ ++ if(!vname) { ++ ++ val=sial_exenode(name); ++ ++ } else { ++ ++ var_t*v; ++ ++ v=sial_getvarbyname(vname, 1, 1); ++ if(!v) return vname; ++ val=v->v; ++ } ++ ++ switch(val->type.type) ++ { ++ case V_STRING: ++ { ++ char *p=sial_alloc(val->type.size+1); ++ /* return the value_t of that string variable */ ++ strcpy(p, val->v.data); ++ sial_free(vname); ++ return p; ++ } ++ default: ++ /* return the name of the variable itself */ ++ sial_error("Invalid type for function pointer, expected 'string'."); ++ return vname; ++ } ++} ++ ++char * ++sial_namevar(vnode_t*vn) ++{ ++char *p; ++ ++ p=sial_strdup(vn->name); ++ TAG(p); ++ return p; ++} ++ ++static void ++sial_freevnode(vnode_t*vn) ++{ ++ sial_free(vn); ++} ++ ++/* ++ create or return existing variable node. ++*/ ++node_t* ++sial_newvnode(char *name) ++{ ++node_t*n=sial_newnode(); ++vnode_t*vn=sial_alloc(sizeof(vnode_t)+strlen(name)+1); ++ ++ TAG(vn); ++ ++ strcpy(vn->name, name); ++ n->exe=(xfct_t)sial_exevar; ++ n->free=(ffct_t)sial_freevnode; ++ n->name=(nfct_t)sial_namevar; ++ n->data=vn; ++ vn->n=n; ++ ++ sial_setpos(&n->pos); ++ ++ return n; ++} ++ ++#define TO (*to) ++#define FRM (*frm) ++ ++void ++sial_cparrelems(array_t**to, array_t**frm) ++{ ++array_t*ap; ++ ++ if(FRM) { ++ ++ sial_setarray(to); ++ for(ap=FRM->next; ap!=FRM; ap=ap->next) { ++ ++ array_t*na=sial_calloc(sizeof(array_t)); ++ ++ /* copy value_ts */ ++ sial_dupval(na->idx, ap->idx); ++ sial_dupval(na->val, ap->val); ++ ++ /* link it in */ ++ na->prev=TO->prev; ++ na->next=TO; ++ TO->prev->next=na; ++ TO->prev=na; ++ na->ref=1; ++ ++ /* copy that branch */ ++ sial_cparrelems(&na->val->arr, &ap->val->arr); ++ } ++ } ++} ++ +--- crash/extensions/libsial/sial_api.h.orig 2008-01-17 15:17:21.000000000 -0500 ++++ crash/extensions/libsial/sial_api.h 2008-01-04 09:42:08.000000000 -0500 +@@ -0,0 +1,267 @@ ++/* ++ * Copyright 2001 Silicon Graphics, Inc. All rights reserved. ++ */ ++ ++/* minor and major version number */ ++#define S_MAJOR 3 ++#define S_MINOR 0 ++ ++#define MAX_SYMNAMELEN 100 ++#define MAXIDX 20 ++ ++/* abi values */ ++#define ABI_MIPS 1 ++#define ABI_INTEL_X86 2 ++#define ABI_INTEL_IA 3 ++#define ABI_S390 4 ++#define ABI_S390X 5 ++#define ABI_PPC64 6 ++ ++/* types of variables */ ++#define V_BASE 1 ++#define V_STRING 2 ++#define V_REF 3 ++#define V_ENUM 4 ++#define V_UNION 5 ++#define V_STRUCT 6 ++#define V_TYPEDEF 7 ++#define V_ARRAY 8 ++ ++#define ENUM_S struct enum_s ++#define DEF_S struct def_s ++#define MEMBER_S struct member_s ++#define TYPE_S struct type_s ++#define VALUE_S struct value_s ++#define ARRAY_S struct array_s ++#define NODE_S struct node_s ++#define IDX_S struct idx_s ++#define VAR_S struct var_s ++ ++ENUM_S; ++DEF_S; ++MEMBER_S; ++TYPE_S; ++VALUE_S; ++ARRAY_S; ++NODE_S; ++IDX_S; ++VAR_S; ++ ++#if linux ++#include ++typedef uint64_t ull; ++typedef uint32_t ul; ++#else ++typedef unsigned long long ull; ++typedef unsigned long ul; ++#endif ++ ++/* THe API function calls numbers */ ++typedef struct { ++ ++ int (*getmem)(ull, void *, int); /* write to system image */ ++ int (*putmem)(ull, void *, int); /* read from system image */ ++ char* (*member)(char *, ull, TYPE_S * /* get type and positional information ... */ ++ , MEMBER_S *, ull *lidx); /* ... about the member of a structure */ ++ int (*getctype)(int ctype, char * /* get struct/union type information */ ++ , TYPE_S*); ++ char* (*getrtype)(ull, TYPE_S *); /* get complex type information */ ++ int (*alignment)(ull); /* get alignment value for a type */ ++ int (*getval)(char *, ull *); /* get the value of a system variable */ ++ ENUM_S* (*getenum)(char *name); /* get the list of symbols for an enum type */ ++ DEF_S* (*getdefs)(void); /* get the list of compiler pre-defined macros */ ++ uint8_t (*get_uint8)(void*); ++ uint16_t (*get_uint16)(void*); ++ uint32_t (*get_uint32)(void*); ++ uint64_t (*get_uint64)(void*); ++ char* (*findsym)(char*); ++} apiops; ++ ++/* ++ Builtin API defines.... ++*/ ++/* call this function to install a new builtin ++ ++ proto is the function prototype ex: ++ struct proc* mybuiltin(int flag, char *str); ++ ++ "mybuiltin" will be the sial name for the function. ++ "fp" is the pointer to the builtin function code. ++ ++*/ ++typedef VALUE_S* bf_t(VALUE_S*, ...); ++typedef struct btspec { ++ char *proto; ++ bf_t *fp; ++} btspec_t; ++ ++/* dso entry points */ ++#define BT_SPEC_TABLE btspec_t bttlb[] ++#define BT_SPEC_SYM "bttlb" ++#define BT_INIDSO_FUNC int btinit ++#define BT_INIDSO_SYM "btinit" ++#define BT_ENDDSO_FUNC void btend ++#define BT_ENDDSO_SYM "btend" ++ ++/* maximum number of parameters that can be passed to a builtin */ ++#define BT_MAXARGS 20 ++ ++extern apiops *sial_ops; ++#define API_GETMEM(i, p, n) ((sial_ops->getmem)((i), (p), (n))) ++#define API_PUTMEM(i, p, n) ((sial_ops->putmem)((i), (p), (n))) ++#define API_MEMBER(n, i, tm, m, l) ((sial_ops->member)((n), (i), (tm), (m), (l))) ++#define API_GETCTYPE(i, n, t) ((sial_ops->getctype)((i), (n), (t))) ++#define API_GETRTYPE(i, t) ((sial_ops->getrtype)((i), (t))) ++#define API_ALIGNMENT(i) ((sial_ops->alignment)((i))) ++#define API_GETVAL(n, v) ((sial_ops->getval)((n), (v))) ++#define API_GETENUM(n) ((sial_ops->getenum)(n)) ++#define API_GETDEFS() ((sial_ops->getdefs)()) ++#define API_GET_UINT8(ptr) ((sial_ops->get_uint8)(ptr)) ++#define API_GET_UINT16(ptr) ((sial_ops->get_uint16)(ptr)) ++#define API_GET_UINT32(ptr) ((sial_ops->get_uint32)(ptr)) ++#define API_GET_UINT64(ptr) ((sial_ops->get_uint64)(ptr)) ++#define API_FINDSYM(p) ((sial_ops->findsym)(p)) ++ ++#if linux ++# if __LP64__ ++# define sial_getptr(v, t) ((t*)sial_getval(v)) ++# else ++# define sial_getptr(v, t) ((t*)(ul)sial_getval(v)) ++# endif ++#else ++# if (_MIPS_SZLONG == 64) ++# define sial_getptr(v, t) ((t*)sial_getval(v)) ++# else ++# define sial_getptr(v, t) ((t*)(ul)sial_getval(v)) ++# endif ++#endif ++ ++/* startup function */ ++int sial_open(void); /* initialize a session with sial */ ++void sial_apiset(apiops *, int, int, int);/* define the API for a connection */ ++void sial_setofile(void *); /* sial should output messages to this file */ ++void *sial_getofile(void); /* where is sial currently outputing */ ++void sial_setmpath(char *p); /* set the search path for sial scripts */ ++void sial_setipath(char *p); /* set the search path for sial include files */ ++VAR_S *sial_builtin(char *proto, bf_t);/* install a builtin function */ ++int sial_cmd(char *name, char **argv, int argc); /* execute a command w/ args */ ++ ++/* load/unload of script files and directories */ ++ull sial_load(char *); /* load/parse a file */ ++ull sial_unload(char *); /* load/parse a file */ ++void sial_loadall(void); /* load all files found in set path */ ++ ++/* variables associated functions */ ++VAR_S *sial_newvar(char *); /* create a new static/auto variable */ ++void *sial_add_globals(VAR_S*); /* add a set of variable to the globals context */ ++VAR_S *sial_newvlist(void); /* create a root for a list of variables */ ++ ++int sial_tryexe(char *, char**, int);/* try to execute a function */ ++int sial_parsetype(char*, TYPE_S *, int);/* parse a typedef line */ ++ull sial_exefunc(char *, VALUE_S **);/* to execute a function defined in sial */ ++ ++/* help related function */ ++void sial_showallhelp(void); /* display help info for all commands */ ++int sial_showhelp(char *); /* display help info for a single command */ ++ ++/* allocation related function */ ++void *sial_alloc(int); /* allocate some memory */ ++void *sial_calloc(int); /* allocate some 0 filed memory */ ++void sial_free(void*); /* free it */ ++char *sial_strdup(char*); /* equivalent of strdup() returns sial_free'able char */ ++void *sial_dupblock(void *p); /* duplicate the contain of a block of allocated memory */ ++void *sial_realloc(void *p, int size); /* reallocate a block */ ++void sial_maketemp(void *p); /* put a block on the temp list */ ++void sial_freetemp(void); /* free the temp list */ ++VALUE_S *sial_makebtype(ull); /* create a default base type value (int) */ ++ ++/* handle values */ ++VALUE_S *sial_newval(void); /* get a new placeholder for a value */ ++void sial_freeval(VALUE_S *); /* free a value* and associated structs */ ++VALUE_S *sial_makestr(char *); /* create a string value */ ++ull sial_getval(VALUE_S*); /* transform a random value to a ull */ ++VALUE_S *sial_cloneval(VALUE_S *); /* make a clone of a value */ ++ ++/* array related */ ++/* add a new array element to a value */ ++void sial_addvalarray(VALUE_S*v, VALUE_S*idx, VALUE_S*val); ++/* return the value associated with a int index */ ++VALUE_S *sial_intindex(VALUE_S *, int); ++/* return the value associated with a 'string' index */ ++VALUE_S *sial_strindex(VALUE_S *, char *); ++/* set the value of an array element */ ++void sial_setarrbval(ARRAY_S*, int); ++/* get the array element coresponding to index */ ++ARRAY_S *sial_getarrval(ARRAY_S**, VALUE_S*); ++/* get the initiale array for a variable */ ++ARRAY_S *sial_addarrelem(ARRAY_S**, VALUE_S*, VALUE_S*); ++ ++/* type manipulation */ ++int sial_is_struct(int); ++int sial_is_enum(int); ++int sial_is_union(int); ++int sial_is_typedef(int); ++int sial_type_gettype(TYPE_S*t); ++int sial_chkfname(char *fname, void *vfd); ++int sial_loadunload(int load, char *name, int silent); ++ ++void sial_type_settype(TYPE_S*t, int type); ++void sial_setcallback(void (*scb)(char *, int)); ++void sial_vilast(void); ++void sial_vi(char *fname, int file); ++void sial_type_setsize(TYPE_S*t, int size); ++int sial_type_getsize(TYPE_S*t); ++void sial_type_setidx(TYPE_S*t, ull idx); ++ull sial_type_getidx(TYPE_S*t); ++void sial_type_setidxlst(TYPE_S*t, int *idxlst); ++void sial_type_setref(TYPE_S*t, int ref, int type); ++void sial_type_setfct(TYPE_S*t, int val); ++void sial_type_mkunion(TYPE_S*t); ++void sial_type_mkenum(TYPE_S*t); ++void sial_type_mkstruct(TYPE_S*t); ++void sial_type_mktypedef(TYPE_S*t); ++TYPE_S*sial_newtype(void); ++void sial_freetype(TYPE_S*t); ++TYPE_S*sial_getctype(int ctype_t, char *name, int silent); ++void sial_type_free(TYPE_S* t); ++void sial_pushref(TYPE_S*t, int ref); ++void sial_duptype(TYPE_S*to, TYPE_S*from); ++int sial_defbsize(void); ++TYPE_S*sial_newbtype(int token); ++void sial_setdbg(unsigned int lvl); ++unsigned int sial_getdbg(void); ++void sial_setname(char *name); ++char *sial_getname(void); ++void sial_setclass(char *class); ++char **sial_getclass(void); ++ ++/* struct member functions */ ++void sial_member_soffset(MEMBER_S*m, int offset); ++void sial_member_ssize(MEMBER_S*m, int size); ++void sial_member_sfbit(MEMBER_S*m, int fbit); ++void sial_member_snbits(MEMBER_S*m, int nbits); ++void sial_member_sname(MEMBER_S*m, char *name); ++ ++/* enums */ ++ENUM_S* sial_add_enum(ENUM_S* e, char* name, int val); ++/* defines */ ++DEF_S* sial_add_def(DEF_S* d, char *name, char *val); ++ ++/* error handling */ ++/* display error w/ file/line coordinates */ ++/* does not return */ ++void sial_error(char *, ...); ++/* display warning w/ file/line coordinates */ ++void sial_warning(char *, ...); ++/* display a message and continue */ ++void sial_msg(char *, ...); ++/* display a debug message */ ++#define DBG_TYPE 0x00000001 ++#define DBG_STRUCT 0x00000002 ++#define DBG_NAME 0x10000000 // ++#define DBG_ALL 0x0fffffff ++void sial_dbg(int class, int level, char *, ...); ++void sial_dbg_named(int class, char *name, int level, char *, ...); ++ ++/* parsers debug flags */ ++extern int sialdebug, sialppdebug; +--- crash/extensions/libsial/README.orig 2008-01-17 15:17:21.000000000 -0500 ++++ crash/extensions/libsial/README 2008-01-04 09:42:08.000000000 -0500 +@@ -0,0 +1,1024 @@ ++ ++ SIAL : Simple Image Access Language ++ =================================== ++ ++ Sial is a C interpreter that permits easy access to the symbol ++and type information stored in a executable image like a coredump or live ++memory interfaces (e.g. /dev/kmem, /dev/mem). It support a FULL C ++syntax and the same variable and function scope and type. Symbols and ++type information in the image become standard variables and types in ++the sial scripts's context. ++ ++ This README focuses on the differences between sial and a C compiler ++so, for C syntax information, please refer to a C reference manual. I ++also explain the mechanisms of the API that allow sial to be inserted ++into any debugging tool that deals with objects and can furnish symbol ++and type information to sial through the API. The more specific lcrash ++sial implementation is described and a howto on creating commands is ++also given here. ++ ++Preprocessor commands ++--------------------- ++ ++ All preprocessor commands I knew of are supported. #define, ++ #undef, #ifdef, #if, #ifndef, #else, #elif, #endif and #include. ++ ++ This one is ignored: #ident #pragma ++ ++ Sial has a builtin secondary parser for preprocessor expression ++ evaluation. ++ ++Symbols ++------- ++ ++ The symbols from the system image and their associated value ++ are available from within sial. Since, most of the time, no ++ type information is associated with the symbols, a reference to ++ a symbol return the actual address of the symbol within the ++ system image. So you might say that sial adds a level of ++ reference for symbol information. Let's say there is a (int) ++ symbol called "nproc" that contains the number of processes ++ currently running on the system. To get the value of nproc from ++ sial one would have to write something like this: ++ ++ void ++ showprocs() ++ { ++ int i; ++ int np; ++ ++ np=*(int*)nproc; ++ ++ for(i=0;ip_next) ++ ++ do something... ++ } ++ } ++ ++Variable Initialization ++-------------------- ++ ++ Variable assignment at the time of declaration is supported. ++ Also, the function __init() will be executed, if it is defined, ++ right after the macro file as been compiled. ++ ++ Using an uinitialized variable will generate a run time error. ++ ++Variable types ++-------------- ++ ++ All types made available from the API can be used. ++ These are the types already defined in the executable image. ++ ++ Floating point types are not supported at this time. I have no ++ plan to support it. ++ ++ Declaration of arrays is not supported. To access a array from ++ the image, use a pointer. ++ ++ Unions and structures type declarations are supported and can be ++ used to create additional types that become available within ++ the same macro file. ++ ++ Typedef are supported. ++ ++ Function pointers are not supported (use 'string' type instead, ++ see "Operators" below) ++ ++ Sial defines a 'string' types to handle ansi C strings within ++ the interpreter. This string type also support some of the ++ operator (+, ==, =, !=, etc... see below) ++ ++Variable Scope ++-------------- ++ ++ All symbols available in the system image become global ++ variable in the sial context. ++ ++ Variable declared within sial can be given one of 3 different ++ scopes like in normal C. ++ ++ GLOBAL: A variable that is declared outside a ++ function that is not marked as static. this variable if ++ available to all macros that have been load into the ++ interpreter. ++ ++ Ex: file1: ++ ++ int global; int func() { } ++ ++ file2: ++ ++ func2() ++ { ++ int i; ++ ++ i=global; ++ } ++ ++ NB: since sial currently validates variable existence only at ++ run time there is no need to declare a 'global' as an 'extern' ++ variable. At run time, if none of the currently loaded macros ++ define a global variable called 'global' then 'i=global' will ++ fail with a 'unknown variable' error. ++ ++ FILE: A Variable that is declared outside any ++ functions that is tagged with the static keyword. This ++ variables is available to all functions defined in the ++ same macro file. ++ ++ Ex: ++ ++ file1: ++ ++ static int maxproc=100; ++ sraruc int array; ++ ++ __init() ++ { ++ int i; ++ ++ for(i=0;i<10;i++) array[i]=-1; ++ ++ } ++ ++ void func1() ++ { ++ int i; ++ ++ for(i=0;i, <=, >=. ++ ++ examples: ++ ++ s = "hello" + "World"; ++ ++ if("hello" == "world" ) { ... } ++ ++ The 'in' operator is supported on dynamic arrays. ++ ++ if(i in array) { ... } ++ str=i in array ? "yes" : "no"; ++ ++ Function callbacks ++ ------------------ ++ ++ Function calls through function pointers is not possible ++ currently. Instead, use a 'string' type to achieve the same ++ result. When sial is about to perform a call, it will look at ++ the type of the variable used to name the function. If the type ++ is 'string' it will use the value that string and call that ++ function instead. ++ ++ func0(int i, int j) ++ { ++ printf("i=%d j=%d\n", i,j); ++ } ++ func1(string func) ++ { ++ func(1,2); ++ } ++ main() ++ { ++ func1("func0"); ++ } ++ ++ In the above example, func1() ends up calling func0() not ++ func. This can be used as a callback mechanism, specially ++ useful for creating generating function that call s linked list ++ of objects and calls a variable function for each object. Like ++ a function that walks tasks or procs. ++ ++ The sizeof() operator is evaluated at execution time. So you ++ can supply a type, a variable, or any valid expression and the ++ appropriate size will be returned. The expression *will* be ++ executed, so be careful. ++ ++Statements ++---------- ++ ++ All C statements except 'goto' are supported. ++ ++ The 'for(i in array)' is supported on dynamic arrays. ++ ++Dynamic arrays ++------------------------------- ++ ++ When indexing through a non pointer variable you end up ++ creating a dynamic array. ++ ++ Example: ++ ++ int func() ++ { ++ char *cp, c; ++ int array, i; ++ ++ cp=(char *)symbol; ++ c=cp[10]; ++ ++ array[0]="one string"; ++ array[12]="second string"; ++ ++ for(i in array) { ++ ++ printf("array[%d]=%s\n", i, array[i]); ++ } ++ ++ } ++ ++ In the 'c=cp[10]' statement, sial goes to the system image to ++ get one 'char' at the address symbol+10. ++ ++ In the second case, 'array' is not a pointer, it's a 'int'. So ++ sial threats all indexing through it as dynamic. ++ ++ Additionally, sial supports multi levels of dynamic index, ++ which makes possible to create random trees of indexed values: ++ ++ int func() ++ { ++ int array, i, j; ++ ++ array[10]="array10"; ++ array[10][3]="array10,3"; ++ array[20]="array20"; ++ array[20][99]="array20,99"; ++ ++ for(i in array) { ++ ++ printf("array[%d]=%s\n", i, array[i]); ++ ++ for(j in array[i]) { ++ ++ printf("array[%d][%d]=%s\n", i, j, array[i][j]); ++ ++ } ++ ++ } ++ } ++ ++ I think it is a good thing to have, since system image access ++ and analysis require frequent lists search. So for example, ++ with dynamic arrays, one can walk the proc list taking note of ++ the proc*, then walking a user thread list taking note of the ++ thread* and gathering some metrics for each of these threads. ++ In order to get to these metrics at some later point in the ++ execution, something like this could be used: ++ ++ func() ++ { ++ proc *p; ++ ++ for(p in procs) { ++ ++ thread *t; ++ ++ for(t in procs[p]) { ++ ++ int rss, size; ++ ++ /* we use index 0 for rss and 1 for size */ ++ printf("proc %p, thread %p, rss:size = %d:%d\n" ++ , p, t, procs[p][t][0], procs[p][t][1]); ++ } ++ } ++ } ++ ++ Arrays are always passed by reference. On creation the ++ reference count is set to one. So this array will exist ++ untill the variable it's assigned to dies. ++ ++ Arrays can be created by dso's. See the DSo section below for more ++ information and examples of this. ++ ++ ++Sial API ++-------- ++ ++ Sial can be integrated into any tool that needs to access ++ symbol and type information from some object. Currently it is ++ integrated in lcrash and icrash (tools that access Linux and ++ Irix kernel images respectively), but it should be possible to ++ use it, for example, in dbx or gdb. The API gives a simple ++ interface through which the host application sends symbol and ++ type (including member) information and gives access to the ++ image itself so that sial can read random blocks of data from ++ the image. ++ ++ >> sial_builtin(bt *bt) ++ ++ Install some set of builtin function. See below ++ (builtin api). ++ ++ ++ >> sial_chkfname(char *fname, int silent); ++ ++ Will check for the exsistance of a function in sial. ++ Typically used to check xtra entry points before the ++ application registers a new command (see sial_setcallback). ++ ++ >> sial_open(): ++ ++ The first function that should be called is sial_open(). ++ sial_open() will return a value of 1 if everything is ok or 0 ++ in case of some problem. This call initializes internal date ++ for the sial package. ++ ++ >> sial_setapi(apiops* ops, int nbytes): ++ ++ This function will setup the callbacks that sial will use ++ to get information from the application. ++ ++ See 'callback interface' below. ++ ++ >> sial_load(char *name); ++ ++ To have sial load and compile a macro or a set of macro ++ use sial_load(). Parameter name gives the name of the ++ file to compile. If name points to a directory instead, ++ then all the files in this directory will be load. So ++ an application would call sial_load() when it first ++ starts up specifying some well known files or ++ directories to load. For example $HOME/.xxx and ++ /etc/xxx would be loaded, ~/.xxx containing user ++ defined macros, and /etc/xxx containing system macros. ++ ++ >> sial_unload(char *funcname) ++ ++ To unload the a macro file use this function. ++ "funcname" is the name of any global function in the ++ file you want to unload. ++ ++ >> void sial_setcallback(void (*scb)(char *)); ++ ++ To be called prior to any load calls. ++ After each loads, sial will call this function ++ back with the name of each functions compiled. ++ Typicly, the application will then perform checks ++ and potencially install a new command for this ++ function. ++ ++ ex: ++ void ++ reg_callback(char *name) ++ { ++ char fname[MAX_SYMNAMELEN+sizeof("_usage")+1]; ++ _command_t cmds[2]; ++ ++ snprintf(fname, sizeof(fname), "%s_help", name); ++ if(!sial_chkfname(fname, 0)) return; ++ snprintf(fname, sizeof(fname), "%s_usage", name); ++ if(!sial_chkfname(fname, 0)) return; ++ ++ cmds[0].cmd=strdup(name); ++ cmds[0].real_cmd=0; ++ cmds[0].cmdfunc=run_callback; ++ cmds[0].cmdparse=parse_callback; ++ cmds[0].cmdusage=usage_callback; ++ cmds[0].cmdhelp=help_callback; ++ cmds[1].cmd=0; ++ unregister_cmd(cmds[0].cmd); ++ (void)register_cmds(cmds); ++ } ++ ++ >> sial_setipath(char *path) ++ ++ When sial processes a #include directive it will use ++ the specified path as a search path. ++ The usual PATH format is supported ex: ++ "/etc/include:/usr/include". ++ ++ >> sial_setmpath(char *path) ++ ++ When sial_load() is called with a relative path name or ++ just the name of a file, it will use a search PATH to ++ locate it. The path parameter to sial_set,path() sets ++ this path. The usual PATH format is supported ex: ++ "/etc/xxx:/usr/lib/xxx". ++ ++ >> sial_setofile(FILE *ofile) ++ ++ All output of sial commands will be send to file ofile. ++ ++ >> sial_cmd(char *cmd, char **argv, int nargs) ++ ++ This is the way to execute a sial command that as been ++ loaded. 'cmd' is the name of function to call. 'argv' ++ are the argument to this function. 'nargs' is the ++ number of argument in array 'argv'. ++ ++ Sial_cmd() will process argv and make the corresponding ++ values available to the function by creating global ++ variables that the function can test and use. ++ ++ >> sial_showallhelp() ++ ++ This command will send a complete list of the commands ++ long with the usage and help for each one of them. This ++ function should be called when the user request ++ something like 'help all'. ++ ++ >> sial_showhelp(char *func) ++ ++ This will display the help information for a particular ++ function loaded in sial. ++ ++The callback interface ++---------------------- ++ ++ Everytime sial needs some piece of information, it will call ++ the application back for it. THe sial_apiset() function is used ++ to install this callback interface into sial. Here is the list ++ of callback functions: ++ ++ typedef unsigned long long ull; ++ ++ Sial_apiset() passes this structure to sial: ++ ++ typedef struct { ++ ++ int (*getmem)(ull, void *, int); ++ int (*putmem)(ull, void *, int); ++ int (*member)(char *, ull, type * , member *); ++ int (*getctype)(int ctype, char * , type*); ++ char* (*getrtype)(ull, type *); ++ int (*alignment)(ull); ++ int (*getval)(char *, ull *); ++ enum_t* (*getenum)(char *name); ++ def_t* (*getdefs)(); ++ uint8_t (*get_uint8)(void*); ++ uint16_t (*get_uint16)(void*); ++ uint32_t (*get_uint32)(void*); ++ uint64_t (*get_uint64)(void*); ++ } apiops; ++ ++ ++ The apiops* struct defines the following member and function pointers: ++ ++ -getmem(ull addr, void *buffer, int nbytes) ++ ++ Read nbytes from image at virtual address addr (32 or ++ 64 bit) to buffer. ++ ++ -putmem(ull addr, void *buffer, int nbytes) ++ ++ Write nbytes from buffer to image at virtual address ++ addr (32 or 64 bit). ++ ++ -member(char *name, ull pidx, type *tm, member *m); ++ ++ Get information on a structure member called name. ++ Pidx is a unique type index for the parent structure. ++ The getctype() function should fill in this index in ++ it's type*. The dwarf model uses unique indexes (die ++ offsets) that can be used here. 'tm' will hold ++ information on the type of the member. 'm' will hold ++ information on the member specific stuff (bit sizes, ++ bit offset etc.). ++ ++ Use the sial_member_...() functions to setup m. ++ Use the sial_type_...() functions to setup t. ++ ++ -getctype(int ctype, char *name, type *tout) ++ ++ Get type information for a complex type. Ctype ++ specifies that name is a type of type struct/union or ++ enum. tout contain the returned type information. ++ ++ -getrtype(ull idx, type *t) ++ ++ Gets the type string linked to a typedef. For example, ++ the gettdeftype() would return ++ "unsigned long long". This enables sial to drill down a ++ typedef (a typedef can be build from a typedef ++ itself) in order to perform proper type validation for ++ assignment or function parameters or return values. ++ ++ -getval(char *sname, ull *value) ++ ++ Returns the value of symbol "sname" from the image. The ++ value is returned in 'value'. On any image this is ++ address of the symbol within the image itself, not the ++ value of the symbol itself. See explanation of this ++ above. ++ ++ -getenum(char *name); ++ ++ Return a list of enum values. ++ Sial will make these available as symbol for the duration ++ of the compile. ++ ++ -getdefs() ++ ++ Return a list of #defines to be active througout the ++ sial session. ++ ++ -get_uint8/16/32/64() ++ ++ Return converted unsigned integers. As parameters are passed pointers ++ to unsigned int values in dump representation. The return values are ++ the corresponding unsigned int values in the representation of the ++ host architecture, where sial is running. ++ ++The builtin API ++--------------- ++ ++ Sometime it is necessary to create a C function that will ++ handle some piece of the work, that a macro cannot do. Sial's ++ builtin function are implemented this way. Generic function ++ like 'printf' or 'getstr' can get some parameter input from the ++ macros and do something (printf) or they get some information, ++ map it to a sial value and return it to a macro (getstr). ++ ++ ++ Sial can load new functiosn from DSOs. If the extension of ++ a file name is ".so" then sial opens it and gets a list ++ of function specs from it. Unload of that file will ++ uninstall these functions. ++ ++ The API between the dso and sial is quite simple at this time. ++ It has not been exercised as must as it would need to, so it ++ might get more flexible and thus complex in the future. ++ ++ Here are two examples of simple extensions. ++ ++ This is an example of a simple extension. An equivalent ++ os the "hello world" C program, but this one gets 2 parameters ++ , one int and one string and returns the received int. ++ ++ #include "sial_api.h" ++ ++ value * ++ helloworld(value *vi, value *vs) ++ { ++ int i=sial_getval(vi); ++ char *s=(char*)sial_getval(vs); ++ ++ sial_msg("Hello to the world![%d] s=[%s]\n", i, s); ++ return sial_makebtype(1); ++ } ++ ++ BT_SPEC_TABLE = { ++ { "int hello(int i, string s)", helloworld}, ++ { 0, 0} ++ }; ++ ++ static char *buf; ++ ++ BT_INIDSO_FUNC() ++ { ++ sial_msg("Hello world being initialized\n"); ++ buf=sial_alloc(1000); ++ return 1; ++ } ++ ++ BT_ENDDSO_FUNC() ++ { ++ sial_msg("Hello world being shutdown\n"); ++ sial_free(buf); ++ } ++ ++ The BT_SPEC_TABLE is scanned. It's a simple table ++ with 2 entries per functions and terminated with ++ a NULL prototype. ++ ++ The DSO initializer function is called. ++ If it returns 0 then installtion is terminates. ++ If it returns 1 we proceed forward. ++ ++ The prototype is compiled and a syntax error ++ will send the error message to the application ++ output file (stdout usually). ++ ++ When the prototype as compiled with no errors ++ the function is installed and ready to be used from ++ sial macros. ++ ++ Type checking is performed by sial at ++ execution time on both, the function parameters ++ andthe function return. ++ ++ DSO's can also receive, create and manipulate dynamic arrays. ++ Here is an example of this: ++ ++ #include "sial_api.h" ++ ++ #ifdef ARRAY_STATIC ++ static value *v; ++ #endif ++ ++ value * ++ mkarray(value* vi) ++ { ++ int i=sial_getval(vi); ++ #ifndef ARRAY_STATIC ++ value *v=sial_makebtype(0); ++ #endif ++ ++ sial_msg("Received value [%d]\n", i); ++ /* build an array indexed w/ int w/ 2 string values */ ++ sial_addvalarray(v, sial_makebtype(0) ++ , sial_makestr("Value of index 0")); ++ sial_addvalarray(v, sial_makebtype(2) ++ , sial_makestr("Value of index 2")); ++ #if ARRAY_STATIC ++ /* ++ For a static array use : ++ Then the array will persist until you Free it. ++ */ ++ sial_refarray(v, 1); ++ #endif ++ return v; ++ } ++ ++ value * ++ showstrarray(value* va) ++ { ++ value *v1=sial_strindex(va, "foo"); ++ value *v2=sial_strindex(va, "goo"); ++ ++ printf("array[1]=%d\n", sial_getval(v1)); ++ printf("array[2]=%d\n", sial_getval(v2)); ++ sial_addvalarray(va, sial_makestr("gaa"), sial_makebtype(3)); ++ sial_addvalarray(va, sial_makestr("doo"), sial_makebtype(4)); ++ sial_freeval(v1); ++ sial_freeval(v2); ++ return sial_makebtype(0); ++ } ++ ++ value * ++ showintarray(value* va) ++ { ++ value *v1=sial_intindex(va, 1); ++ value *v2=sial_intindex(va, 2); ++ ++ printf("array[1]=%d\n", sial_getval(v1)); ++ printf("array[2]=%d\n", sial_getval(v2)); ++ sial_freeval(v1); ++ sial_freeval(v2); ++ return sial_makebtype(0); ++ } ++ ++ BT_SPEC_TABLE = { ++ { "int mkarray(int i)", mkarray}, ++ { "void showintarray(int i)",showintarray}, ++ { "void showstrarray(int i)",showstrarray}, ++ { 0, 0} ++ }; ++ ++ static char *buf; ++ ++ BT_INIDSO_FUNC() ++ { ++ sial_msg("mkarray initialized\n"); ++ #ifdef ARRAY_STATIC ++ /* we will need a static value to attach the ++ array too */ ++ v=sial_makebtype(0); ++ #endif ++ return 1; ++ } ++ ++ BT_ENDDSO_FUNC() ++ { ++ sial_msg("mkarray being shutdown\n"); ++ #ifdef ARRAY_STATIC ++ sial_freeval(v); ++ /* freing the value decrements the reference ++ count by one. So, if none of the calling ++ macros copied the value to a static ++ sial variable, it will free the array */ ++ #endif ++ } ++ ++Macro Construction ++------------------ ++ ++ When sial as been integrated into an application and a basic ++ set of builtin command as been created, it is time to start ++ creating the macro themselves. Some basic rules and conventions ++ apply to macro construction that make the coding and ++ documenting steps of macro definition easy. ++ ++ I will use the function foo as an example. Function foo is ++ defined in file /usr/tmp/sial/foo. Function foo is a user ++ callable function, meaning that it can be executed by the ++ sial_cmd() function. The command input section of the ++ application can thus call sial_cmd("foo", char *argv, int ++ nargs) to execute the foo macro. ++ ++ ------------ file foo ------------- ++ ++ foo_opt(){ return "ab:c"; } ++ ++ foo_usage(){ return "[-a] [-b barg] [-c] addr [addr [addr...]]"; } ++ ++ foo_help(){ return "This is an example function"; } ++ ++ static int ++ doproc(proc_t *p) ++ { ++ printf("p=0x%p\n", p); ++ } ++ ++ int foo() ++ { ++ int all, i; ++ string barg; ++ ++ if(exists(aflag)) all=1; ++ else all=0; ++ ++ if(exists("bflag")) bval=barg; ++ ++ for(i in argv) { ++ ++ proc_t *p; ++ ++ p=(proc_t*)atoi(argv[i], 16); ++ ++ doproc(p); ++ ++ } ++ } ++ ++ ------------ end of file foo -------------- ++ ++ The application calls sial_load() to load foo. Sial calls ++ back the application with the names of all fucntions declared ++ in that file. The aplication can then register commands for ++ the user to type according to this list of functions. ++ In this case 'foo'. ++ ++ The application then uses sial_cmd() to run a specific ++ command 'foo'. ++ ++ Before executing the command, sial checks if a foo_opt() ++ function exists and if so, calls it. This function returns the ++ proper getopt() argument specification string. If this function ++ does not exists then all arguments are passed down to the foo() ++ function directly. ++ ++ If the arguments supplied by the user do not follow the proper ++ syntax then the function foo_usage() will be called, if it ++ exists. If the foo_usage() function does not exists, a generic ++ error message is generated by sial. ++ ++ If the command 'help foo' is issued, the application should be ++ calling sial_exefunc("help_foo", 0) whish will return a VALUE_S ++ for the help for foo. Or whatever foo_help() returns. ++ ++ Each option, their associated value and addition arguments are ++ made available to the foo funtion by creating the following ++ global variables before the actual call. ++ ++ Each option, if specified, will trigger the existence of flag ++ variable. In the foo() case, this means that variables aflag, ++ bflag and cflag can possibly exist. The function ++ exists("variable name") can then be used to test for this ++ option presence. ++ ++ If an option has an associated value (getopt's ':' is specified ++ on the string foo_opt() returns) this value is made available ++ as a string type variable called Xarg, where X is the option ++ letter. In the case of foo() variable 'string barg' would exist ++ if the -b option was supplied by the user. ++ ++ The rest of the arguments supplied by the user are made ++ available in an array of 'string' called argv. argv[0] is ++ set to the name of the function 'foo' and argc is a global ++ that defines how many argv their are. ++ ++Builtin functions ++================= ++ ++ Here is a description of the current set of builtin functions. ++ ++ unsigned long long ++ atoi(string value [, int base]) ++ ++ Convert a string value to a long long. Base is the base ++ that should be used to process the string e.g. 8, 10 or ++ 16. If not specified, then the standard numeric format ++ will be scnanned for ex: ++ ++ 0x[0-9a-fA-F]+ : hexadecimal ++ 0[0-7]+ : octal ++ [1-9]+[0-9]* : decimal ++ ++ This function is used when converting command line ++ arguments to pointers. ++ ++ Example: ++ ++ void ++ mycommand() ++ { ++ int i; ++ ++ for(i=1;i> mycommand 0xa80000004ab14578 ++ ++ int exists(string name) ++ ++ Checks for the existance of a variable. Returns 1 if ++ the variables does exist and 0 otherwise. This function ++ is mostly used to test if some options were specified ++ on when the macro was executed from command line. ++ ++ It can also be used to test for image variable. ++ ++ example: ++ ++ void ++ mycommand() ++ { ++ if(exists("aflag")) { ++ ++ // user has specified -a option ++ } ++ } ++ ++ void exit() ++ ++ Terminate macro excution now. ++ ++ int getchar() ++ ++ Get a single character from tty. ++ ++ string gets() ++ ++ Get a line of input from tty. ++ ++ string getstr(void *) ++ ++ Gets a null terminated string from the image at the ++ address specified. Sial will read a series of 16 byte ++ values from the image untill it find the \0 character. ++ Up to 4000 bytes will be read this way. ++ ++ string getnstr(void *, int n) ++ ++ Gets n characters from the image at the specified ++ address and returns the corresponding string. ++ ++ string itoa(unsigned long long) ++ ++ Convert a unsigned long long to a decimal string. ++ ++ void printf(char *fmt, ...); ++ ++ Send a formatted message to the screen or output file. ++ For proper allignment of output on 32 and 64 bit systems ++ one can use the %> sequence along with the %p format. ++ ++ On a 32 bit system %p will print a 8 character hexadecimal ++ value and on a 64 bit system it will print a 16 character ++ value. So, to get proper alignment on both type of systems ++ use the %> format which will print nothing on a 64 bit system ++ but will print 8 times the following character on a 32 bit ++ system. ++ ++ example: ++ ++ struct proc *p; ++ ++ printf("Proc %> uid pid\n"); ++ printf("0x%p %8d %8d\n" ++ , p, p->p_uid,p_p_pid); ++ ++ int sial_depend(string file) ++ ++ Loads a macro or directory of macros called ++ 'file'. Contrary to sial_load() it will not ++ give any error messages. Returns 1 on success ++ 0 otherwise. ++ ++ int sial_load(string file) ++ ++ Loads and compiles a sial macro file. ++ returns 1 if successful or 0 otherwise. ++ ++ void sial_unload(string file) ++ ++ Unload's a sial macro file ++ ++ string sprintf(string format, ...) ++ ++ Creates a string from the result of a sprintf. ++ Example: ++ ++ void ++ mycommand() ++ { ++ ++ string msg; ++ ++ msg=sprintf("i=%d\n", i); ++ } ++ The result will be truncated to maxbytes if it would be ++ longer. ++ ++ int strlen(string s) ++ ++ Return the length of string s. ++ ++ string substr(string s, int start, int len) ++ ++ Creates a string from the substring starting a charcater ++ 'start' of 's' for 'len' characters. ++ example: ++ ++ s=substr("this is the original", 6, 2); ++ ++ So 's' will become "is". ++ ++ ++ -------------------------------------------------------- ++ ++Questions/Comments ++Luc Chouinard, lucchouina@yahoo.com +--- crash/extensions/libsial/README.sial.orig 2008-01-17 15:17:21.000000000 -0500 ++++ crash/extensions/libsial/README.sial 2008-01-04 09:42:08.000000000 -0500 +@@ -0,0 +1,97 @@ ++ ++ This file (README.sial) gives some information specific to the crash(1) ++ integration of sial. ++ ++ Please refer to the README file generic libsial informations. ++ ++ An example script can be found as ../scripts/sial.c ++ ++ PATHS and ENVIRONMENT ++ ===================== ++ ++ The default location to look for macros and include files are ++ /usr/share/sial/.sial and ~/..sial ++ ++ The default 'root' for #include location are thus: ++ ++ /usr/share/sial/crash/include and ~/.sial/include. ++ ++ There are two environment variables that control these locations. ++ ++ SIAL_IPATH : path to use for include files. ++ ex: setenv SIAL_IPATH /usr/include:$(ROOT)/usr/include:~/.lcrash/include ++ ++ SIAL_MPATH : path to use for finding macro files. ++ ex: setenv SIAL_MPATH /usr/tmp/macs:~/.sial ++ ++ #define's ++ ===================== ++ ++ The current independent #define's are: ++ ++ Name Value/format ++ ==== ===== ++ linux 1 ++ __linux 1 ++ __linux__ 1 ++ unix 1 ++ __unix 1 ++ __unix 1 ++ LINUX_RELEASE 0x%08x ++ LINUX_2_2_16 (LINUX_RELEASE==0x020210) ++ LINUX_2_2_17 (LINUX_RELEASE==0x020211) ++ LINUX_2_4_0 (LINUX_RELEASE==0x020400) ++ LINUX_2_2_X (((LINUX_RELEASE) & 0xffff00) == 0x020200) ++ LINUX_2_4_X (((LINUX_RELEASE) & 0xffff00) == 0x020400) ++ ++ For i386 images/cores only. ++ ++ Name Value ++ ==== ==== ++ i386 1 ++ __i386 1 ++ __i386__ 1 ++ ++ For ia64 images/cores only. ++ ++ Name Value ++ ==== ===== ++ ia64 1 ++ __ia64 1 ++ __ia64__ 1 ++ __LP64__ 1 ++ _LONGLONG 1 ++ __LONG_MAX__ 9223372036854775807L ++ ++ If you feel there should be more standard defined pushed ++ there, let me know. ++ ++ ++ Loading/Unloading ++ ===================== ++ ++ crash defines two new commands for loading and unloading sial ++ macros called "sload" and "sunload", respectively. ++ ++ Using "sload" should be enough, since sial automaticly "sunload's" ++ the previous copy after successfully compiling the new one. ++ ++ DSO's must be unload'ed before you can reload them. ++ ++ Editing ++ ===================== ++ ++ To facilitate macro editing, crash makes a "edit" command available. ++ edit will get you directly in the macro file that ++ defines function "funcname" at the line where "funcname" starts. ++ ++ edit -f /somedir/somefile, will start editing a new/old file. ++ ++ edit -l will get you position on the "l"ast compile or runtime error. ++ ++ Macro integration into the crash framework ++ ============================================= ++ ++ Refer to the README file on writing a user level command. ++ Also look at the 'sial.c' example in the scripts directory ++ +--- crash/extensions/libsial/sial_num.c.orig 2008-01-17 15:17:21.000000000 -0500 ++++ crash/extensions/libsial/sial_num.c 2008-01-04 09:42:08.000000000 -0500 +@@ -0,0 +1,233 @@ ++/* ++ * Copyright 2001 Silicon Graphics, Inc. All rights reserved. ++ */ ++#include ++#include ++#include "sial.h" ++ ++typedef struct { ++ int type; ++ ull val; ++} num; ++ ++/* ++ Numeric constants. ++*/ ++ ++static value_t* ++sial_exenum(num *n) ++{ ++value_t *v=sial_newval(); ++ ++ v->type.type=V_BASE; ++ v->type.idx=n->type; ++ if(n->type==B_SLL) { ++ ++ll: ++ v->v.sll=n->val; ++ v->type.size=8; ++ ++ }else if(n->type==B_SC) { ++ ++ v->v.sc=n->val; ++ v->type.size=1; ++ ++ } else { ++ ++ if(sial_defbsize()==4) { ++ ++ v->v.sl=n->val; ++ v->type.size=4; ++ ++ } else { ++ ++ v->type.idx=B_SLL; ++ goto ll; ++ } ++ } ++ v->type.typattr=sial_idxtoattr(v->type.idx); ++ v->set=0; ++ return v; ++} ++ ++void ++sial_freenumnode(num *n) ++{ ++ sial_free(n); ++} ++ ++node_t* ++sial_makenum(int type, ull val) ++{ ++node_t*n=sial_newnode(); ++num *nu=sial_alloc(sizeof(num)); ++ ++ TAG(nu); ++ ++ nu->type=type; ++ nu->val=val; ++ n->exe=(xfct_t)sial_exenum; ++ n->free=(ffct_t)sial_freenumnode; ++ n->data=nu; ++ ++ sial_setpos(&n->pos); ++ return n; ++} ++ ++/* ++ Execution of the sizeof() operator. ++ This sould be done at compile time, but I have not setup ++ a 'type only' execution path for the nodes. ++ Runtime is good enough to cover mos cases. ++*/ ++#define SN_TYPE 1 ++#define SN_EXPR 2 ++ ++typedef struct { ++ int type; ++ void *p; ++ srcpos_t pos; ++} snode_t; ++ ++static value_t * ++sial_exesnode(snode_t*sn) ++{ ++srcpos_t pos; ++type_t*t; ++value_t *v=sial_newval(); ++value_t *v2=0; ++int size; ++ ++ sial_curpos(&sn->pos, &pos); ++ if(sn->type == SN_TYPE) { ++ ++ t=(type_t*)(sn->p); ++ ++ } else { ++ ++ sial_setinsizeof(1); ++ v2=NODE_EXE((node_t*)(sn->p)); ++ t=&v2->type; ++ sial_setinsizeof(0); ++ } ++ ++ switch(t->type) { ++ ++ case V_REF: ++ ++ if(t->idxlst) { ++ ++ int i; ++ for(size=t->size,i=0;t->idxlst[i];i++) size *= t->idxlst[i]; ++ ++ } else size=sial_defbsize(); ++ ++ break; ++ case V_STRUCT: case V_UNION: ++ ++ if(sial_ispartial(t)) { ++ ++ sial_error("Invalid type specified"); ++ } ++ size=t->size; ++ ++ break; ++ case V_BASE: case V_STRING: ++ size=t->size; ++ break; ++ ++ default: size=0; ++ } ++ ++ sial_defbtype(v, (ull)size); ++ ++ sial_curpos(&pos, 0); ++ ++ if(v2) sial_freeval(v2); ++ ++ return v; ++ ++} ++ ++static void ++sial_freesnode(snode_t*sn) ++{ ++ if(sn->type == SN_TYPE) sial_free(sn->p); ++ else NODE_FREE(sn->p); ++ sial_free(sn); ++} ++ ++node_t* ++sial_sizeof(void *p, int type) ++{ ++node_t*n=sial_newnode(); ++snode_t*sn=sial_alloc(sizeof(snode_t)); ++ ++ n->exe=(xfct_t)sial_exesnode; ++ n->free=(ffct_t)sial_freesnode; ++ n->data=sn; ++ sn->type=type; ++ sn->p=p; ++ sial_setpos(&sn->pos); ++ return n; ++} ++ ++node_t* ++sial_newnum(char *buf) ++{ ++int type; ++unsigned long long val; ++ ++ type=B_SL; ++ ++ /* get the value_t of this constant. Could be hex, octal or dec. */ ++ if(buf[0]=='0') { ++ ++ if(buf[1]=='x') { ++ ++ if(!sscanf(buf, "%llx", &val)) goto error; ++ ++ } else { ++ ++ if(!sscanf(buf,"%llo", &val)) goto error; ++ } ++ ++ } else { ++ ++ if(!sscanf(buf,"%lld", &val)) goto error; ++ ++ } ++ ++ if(val & 0xffffffff00000000ll) type=B_SLL; ++ ++ /* threat the long and long long atributes */ ++ { ++ int l=strlen(buf); ++ ++ if(l>1) { ++ ++ if(buf[l-1]=='l' || buf[l-1]=='L') { ++ ++ if(l>2) { ++ ++ if(sial_defbsize()==8 || buf[l-2]=='l' || buf[l-2]=='L') { ++ ++ type=B_SLL; ++ ++ } ++ else type=B_SL; ++ ++ } ++ ++ } ++ } ++ } ++ { ++ node_t*n=sial_makenum(type, val); ++ TAG(n->data); ++ return n; ++ } ++error: ++ sial_error("Oops! NUMBER"); ++ return 0; ++} +--- crash/extensions/libsial/sial_alloc.c.orig 2008-01-17 15:17:21.000000000 -0500 ++++ crash/extensions/libsial/sial_alloc.c 2008-01-04 09:42:08.000000000 -0500 +@@ -0,0 +1,430 @@ ++/* ++ * Copyright 2001 Silicon Graphics, Inc. All rights reserved. ++ */ ++ ++#define MEMDEBUG 1 ++/* ++*/ ++#include "sial.h" ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#ifdef __GNUC__ ++# if __LP64__ ++# define NKPC 16 ++# else ++# define NKPC 4 ++# endif ++#else ++// must be the SGI Mips compiler. ++# if (_MIPS_SZLONG == 64) ++# define NKPC 16 ++# else ++# define NKPC 4 ++# endif ++#endif ++#define PAGESIZE (NKPC*1024) ++ ++ ++/* ++ Jump defines ++*/ ++#define MAXJMPS (S_MAXDEEP*3) ++int njmps=0; ++ ++typedef struct blklist { ++ ++ struct blklist *next; /* root based doubly chained */ ++ struct blklist *prev; ++ int size; /* size of the allocation */ ++ int istmp; /* was flaged as temp ? */ ++ int level; /* coresponding level */ ++ void *caller; /* __return_address of caller */ ++ void *freer; /* __return_address of freer */ ++ ++} blist; ++ ++#define SIZEBL (((sizeof(blist)+8)/8)*8) ++ ++void pbl(void *p) ++{ ++blist *bl=(blist*)(((char*)p)-SIZEBL); ++ sial_msg("struct blklist *%p {", bl); ++ sial_msg(" next=%p", bl->next); ++ sial_msg(" prev=%p", bl->prev); ++ sial_msg(" size=%d", bl->size); ++ sial_msg(" istmp=%d", bl->istmp); ++ sial_msg(" level=%d", bl->level); ++ sial_msg(" caller=%p", bl->caller); ++ sial_msg(" freer=%p", bl->freer); ++} ++ ++static blist temp={ &temp, &temp, 0, 0, 0, 0, 0 }; ++ ++value_t* ++sial_findsym(value_t *vadr) ++{ ++ char *addr=sial_getptr(vadr, char); ++ char *p = API_FINDSYM(addr); ++ ++ if(p) { ++ return sial_setstrval(sial_newval(), p); ++ } else { ++ return sial_setstrval(sial_newval(),""); ++ } ++} ++ ++value_t* ++sial_showaddr(value_t *vadr) ++{ ++void *addr=sial_getptr(vadr, void); ++blist *bl; ++int n=0; ++ ++ for(bl=temp.next; bl != &temp; bl=bl->next) { ++ ++ if(bl->caller==addr) { ++ ++ if(!(n%8)) sial_msg("\n"); ++ sial_msg("0x%08x ", ((char *)bl) + SIZEBL); ++ n++; ++ } ++ } ++ return sial_makebtype(0); ++} ++ ++static int memdebug=0; ++ ++/* these two functions must *not* receive any values */ ++value_t* sial_memdebugon() { memdebug=1; return sial_makebtype(0); } ++value_t* sial_memdebugoff() { memdebug=0; return sial_makebtype(0); } ++int sial_ismemdebug() { return memdebug; } ++ ++value_t* ++sial_showtemp() ++{ ++blist *bl; ++int i, totsiz, totbl; ++static int ncallers=0; ++static void *callers[1000]; ++static int count[1000]; ++static int sizes[1000]; ++static int dir=0; ++ ++ if(!dir) { ++ ++ memset(callers, 0, sizeof(void*)*1000); ++ memset(count, 0, sizeof(int)*1000); ++ memset(sizes, 0, sizeof(int)*1000); ++ ncallers=0; ++ } ++ ++ if(dir==1) dir=0; ++ else dir=1; ++ ++ for(bl=temp.next; bl != &temp; bl=bl->next) { ++ ++ int i; ++ ++ for(i=0;icaller) { ++ if(dir) { count[i]++; sizes[i]+=bl->size; } ++ else { count[i]--; sizes[i]-=bl->size; } ++ break; ++ } ++ ++ if(i==ncallers) { ++ callers[ncallers]=bl->caller; ++ count[ncallers]=1; ++ sizes[ncallers]=bl->size; ++ ncallers++; ++ } ++ ++ } ++ totbl=totsiz=0; ++ for(i=0;icaller=retaddr; ++} ++ ++#define PAGEMASK 0xfffffffffffff000ll ++#define MAGIC 0xdeadbabe ++void * ++sial_alloc(int size) ++{ ++char *m; ++blist *bl; ++ ++#ifdef MEMDEBUG ++unsigned long p, pp; ++int npages; ++#endif ++ ++ size=size+SIZEBL; ++ ++#if MEMDEBUG ++ ++ if(memdebug) { ++ ++ npages=((size+PAGESIZE+4)/PAGESIZE)+2; ++ p=(unsigned long)malloc(npages*PAGESIZE); ++ p=(p+PAGESIZE)&PAGEMASK; ++ pp=p+((npages-2)*PAGESIZE); ++ p=pp-size; ++ p = p ^ (p & 0x0fll); ++ *((int*)(p-4))=MAGIC; ++ mprotect((void*)pp, PAGESIZE, PROT_READ); ++ m=(char*)p; ++ ++ } else { ++ ++ m=malloc(size); ++ } ++ ++#else ++ ++ m=malloc(size); ++ ++#endif ++ ++ ++ bl=(blist*)m; ++ bl->size=size; ++ bl->level=njmps; ++ bl->prev=bl->next=bl; ++ bl->istmp=0; ++ TAG(m+SIZEBL); ++ return m+SIZEBL; ++} ++ ++void ++sial_maketemp(void *p) ++{ ++blist *bl; ++ ++ if(!p) return; ++ ++ bl=(blist*)(((char*)p)-SIZEBL); ++ bl->prev=&temp; ++ bl->next=temp.next; ++ bl->istmp=1; ++ temp.next->prev=bl; ++ temp.next=bl; ++} ++ ++void * ++sial_calloc(int size) ++{ ++char *p=sial_alloc(size); ++ ++ TAG(p); ++ memset(p, 0, size); ++ return p; ++} ++ ++static void ++sial_free_bl(blist *bl, void *ra) ++{ ++ bl->freer=ra; ++ bl->prev->next=bl->next; ++ bl->next->prev=bl->prev; ++ ++#ifdef MEMDEBUG ++ ++ if(memdebug) { ++ ++ /* help out dbx/gdb when they're watching the allocated area ++ by writing over it */ ++ { ++ int i, ni=bl->size/sizeof(void*); ++ char *p=(char*)bl; ++ unsigned long up; ++ ++ for(i=0;inext; ++ sial_free_bl(bl, __return_address); ++ bl=next; ++ } ++} ++ ++int ++sial_istemp(void *p) ++{ ++ return ((blist*)(((char*)p)-SIZEBL))->istmp; ++} ++ ++char * ++sial_strdup(char *s) ++{ ++char *ns=sial_alloc(strlen(s)+1); ++ ++ strcpy(ns, s); ++ TAG(ns); ++ return ns; ++} ++ ++void * ++sial_dupblock(void *p) ++{ ++void *p2; ++int size=((blist*)(((char*)p)-SIZEBL))->size-SIZEBL; ++ ++ if(!p) return 0; ++ ++ p2=sial_alloc(size); ++ memcpy(p2, p, size); ++ return p2; ++} ++ ++/* cheap realloc. we drop the original ++ This function is only used ones in configmon(1) code */ ++void * ++sial_realloc(void *p, int size) ++{ ++int cursize=((blist*)(((char*)p)-SIZEBL))->size-SIZEBL; ++void *p2; ++ ++ p2=sial_calloc(size); ++ memcpy(p2, p, cursize 1) { ++ ++ jmp_buf *env; ++ ++ while(njmps && jmps[--njmps].type!=type); ++ if(jmps[njmps].val) *(jmps[njmps].val)=val; ++ env=jmps[njmps].env; ++ ++ /* reset the variable level too... */ ++ sial_setsvlev(jmps[njmps].svlev); ++ ++ longjmp(*env, 1); ++ /* NOT REACHED */ ++ ++ } else sial_parseback(); /* we use the same code for initializing ++ static and automatic variables. In the case of statiuc variables ++ is the initizer expression throws an error then there's no J_EXIT ++ jump context and njmps is null. It's treated as a parsing error */ ++} ++ ++void ++sial_popjmp(int type) ++{ ++ if(!njmps) { ++ ++ sial_error("Pop underflow!"); ++ } ++ njmps--; ++ if(jmps[njmps].type != type) { ++ ++ sial_error("Wrong pop! %d vs %d", jmps[njmps].type, type); ++ } ++ sial_setsvlev(jmps[njmps].svlev); ++} ++ +--- crash/extensions/libsial/sial.h.orig 2008-01-17 15:17:21.000000000 -0500 ++++ crash/extensions/libsial/sial.h 2008-01-04 09:42:08.000000000 -0500 +@@ -0,0 +1,465 @@ ++/* ++ * Copyright 2001 Silicon Graphics, Inc. All rights reserved. ++ */ ++#include "sial_api.h" ++typedef unsigned long long caddr; ++ ++#define SRCPOS_S struct srcpos_s ++#define DVAR_S struct dvar_s ++#define CASELIST_S struct caselist_s ++#define CASEVAL_S struct caseval_s ++#define STMEMBER_S struct stmember_s ++#define STINFO_S struct stinfo_s ++ ++SRCPOS_S; ++DVAR_S; ++CASELIST_S; ++CASEVAL_S; ++STMEMBER_S; ++STINFO_S; ++ ++ ++/************* source position tracking ************/ ++typedef SRCPOS_S { ++ char *file; ++ int line; ++ int col; ++} srcpos_t; ++ ++/* member information */ ++typedef MEMBER_S { ++ ++ char *name; ++ int offset; /* offset from top of structure */ ++ int size; /* size in bytes of the member or of the bit array */ ++ int fbit; /* fist bit (-1) is not a bit field */ ++ int nbits; /* number of bits for this member */ ++ int value; /* for a enum member, the corresponding value_t */ ++ ++} member_t; ++ ++/* list to hold enum constant information */ ++typedef ENUM_S { ++ ++ struct enum_s *next; ++ char *name; ++ int value; ++ ++} enum_t; ++ ++/* list of macro symbols and there corresponding value_ts */ ++typedef DEF_S { ++ struct def_s * next; ++ char *name; ++ char *val; ++ ++} def_t; ++ ++/* type_t information past back and forth */ ++typedef TYPE_S { ++ int type; /* type_t of type_t */ ++ ull idx; /* index to basetype_t or ctype_t */ ++ int size; /* size of this item */ ++ /* ... next fields are use internally */ ++ int typattr; /* base type_t qualifiers */ ++ int ref; /* level of reference */ ++ int fct; /* 1 if function pointer */ ++ int *idxlst; /* points to list of indexes if array */ ++ ull rtype; /* type_t a reference refers too */ ++} type_t; ++ ++/* scope/storage of variables */ ++#define S_FILE 1 /* persistant file scope */ ++#define S_STAT 2 /* persistant statement scope */ ++#define S_AUTO 3 /* stack (default) */ ++#define S_GLOB 4 /* add to the global variables */ ++ ++typedef union vu_s { ++ unsigned char uc; ++ signed char sc; ++ unsigned short us; ++ signed short ss; ++ unsigned int ul; ++ signed int sl; ++ unsigned long long ull; ++ signed long long sll; ++ void *data; ++} vu_t; ++ ++/************* value_t **************/ ++typedef VALUE_S { ++ type_t type; ++ int set; /* is this is a Lvalue_t then set is 1 */ ++ VALUE_S *setval;/* value_t to set back to */ ++ void (*setfct)(struct value_s*, struct value_s*); ++ /* the function that will set the value */ ++ ARRAY_S *arr; /* array associated with value */ ++ vu_t v; ++ ull mem; ++} value_t; ++ ++/************** array linked lists *****************/ ++typedef ARRAY_S { ++ ++ ARRAY_S *next; /* to support a linked list of array elements */ ++ ARRAY_S *prev; ++ int ref; /* reference count on this array */ ++ VALUE_S *idx; /* arrays can be indexed using any type of variables */ ++ VALUE_S *val; /* arrays element values */ ++ ++} array_t; ++ ++/************* node_t *************/ ++typedef NODE_S { ++ VALUE_S* (*exe)(void*); /* execute it */ ++ void (*free)(void*); /* free it up */ ++ char* (*name)(void*); /* get a name */ ++ void *data; /* opaque data */ ++ NODE_S* next; ++ SRCPOS_S pos; ++} node_t; ++ ++typedef IDX_S { ++ ++ int nidx; ++ NODE_S *idxs[MAXIDX]; ++ ++} idx_t; ++ ++/*************** variable list ****************/ ++typedef VAR_S { ++ ++ char *name; ++ VAR_S *next; ++ VAR_S *prev; ++ VALUE_S *v; ++ int ini; ++ DVAR_S *dv; ++ ++} var_t; ++ ++/* V_BASE subtype */ ++#define B_SC 0 /* signed char */ ++#define B_UC 1 /* unsignec char */ ++#define B_SS 2 /* signed short */ ++#define B_US 3 /* unsigned short */ ++#define B_SL 4 /* signed long */ ++#define B_UL 5 /* unsigned long */ ++#define B_SLL 6 /* signed long long */ ++#define B_ULL 7 /* unsigned long long */ ++ ++#define is_ctype(t) ((t)==V_UNION || (t)==V_STRUCT) ++#define VAL_TYPE(v) (v->type.type) ++#define TYPE_SIZE(t) ((t)->type==V_REF?sial_defbsize():(t)->size) ++ ++/* type_ts of jumps */ ++#define J_CONTINUE 1 ++#define J_BREAK 2 ++#define J_RETURN 3 ++#define J_EXIT 4 ++ ++#define sial_setval(v, v2) if((v)->set) ((v)->setfct)((v)->setval, (v2)) ++ ++/************* case *************/ ++typedef CASEVAL_S { ++ ++ int isdef; ++ ull val; ++ CASEVAL_S *next; ++ SRCPOS_S pos; ++ ++} caseval_t; ++ ++typedef CASELIST_S { ++ ++ CASEVAL_S *vals; ++ NODE_S *stmt; ++ CASELIST_S *next; ++ SRCPOS_S pos; ++ ++} caselist_t; ++ ++/*************** struct member info ****************/ ++typedef STMEMBER_S { ++ ++ TYPE_S type; /* corresponding type_t */ ++ MEMBER_S m; /* member information */ ++ ++ STMEMBER_S *next; ++ ++} stmember_t; ++ ++typedef DVAR_S { ++ ++ char *name; ++ int refcount; ++ int ref; ++ int fct; ++ int bitfield; ++ int nbits; ++ IDX_S *idx; ++ NODE_S *init; ++ VAR_S *fargs; ++ SRCPOS_S pos; ++ DVAR_S *next; ++ ++} dvar_t; ++ ++typedef STINFO_S { ++ char *name; /* structure name */ ++ ull idx; /* key for search */ ++ int all; /* local : partial or complete declaration ? */ ++ TYPE_S ctype; /* associated type */ ++ TYPE_S rtype; /* real type_t when typedef */ ++ STMEMBER_S *stm; /* linked list of members */ ++ ENUM_S *enums; /* enums names and values */ ++ STINFO_S *next; /* next struct on the list */ ++ ++} stinfo_t; ++ ++stinfo_t *sial_getstbyindex(ull idx, int type_t); ++ ++typedef value_t* (*xfct_t)(void *); ++typedef char* (*nfct_t)(void *); ++typedef void (*ffct_t)(void *); ++typedef void (*setfct_t)(value_t*, value_t*); ++ ++#ifdef DEBUG ++#define NODE_EXE(n) (printf("(%s):[%d]\n",__FILE__, __LINE__), (n)->exe((n)->data)) */ ++#else ++#define NODE_EXE(n) ((n)->exe((n)->data)) ++#endif ++#define NODE_NAME(n) ((n)->name?((n)->name((n)->data)):0) ++#define NODE_FREE(n) (sial_freenode(n)) ++ ++#ifdef __GNUC__ ++#define __return_address (void*)(__builtin_return_address(0)) ++#else ++// must be the SGI Mips compiler. ++#endif ++#if 1 ++#define TAG(p) sial_caller(p, __return_address) ++#else ++#define TAG(p) ; ++#endif ++ ++node_t *sial_sibling(node_t*, node_t*); ++node_t *sial_newnode(void); ++node_t *sial_newvnode(char *); ++node_t *sial_newstr(void); ++node_t *sial_newnum(char *); ++node_t *sial_newop(int op, int nagrs, ...); ++node_t *sial_newptrto(int, node_t*); ++node_t *sial_newmult(node_t*, node_t*, int); ++node_t *sial_newstat(int op, int nargs, ...); ++node_t *sial_stat_decl(node_t*, var_t*); ++node_t *sial_addstat(node_t*, node_t*); ++node_t *sial_type_cast(type_t*, node_t*); ++node_t *sial_newmem(int, node_t*, node_t*); ++node_t *sial_newcall(node_t*, node_t*); ++node_t *sial_newindex(node_t*, node_t*); ++node_t *sial_newadrof(node_t*); ++node_t *sial_newcase(node_t*, node_t*); ++node_t *sial_addcase(node_t*, node_t*); ++node_t *sial_caseval(int, node_t*); ++node_t *sial_addcaseval(node_t*, node_t*); ++node_t *sial_sizeof(void *p, int type_t); ++node_t *sial_tdeftovar(type_t *td); ++node_t *sial_getppnode(void); ++node_t *sial_allocstr(char *buf); ++node_t *sial_makenum(int type_t, ull val); ++node_t *sial_macexists(node_t *var_t); ++node_t *sial_newptype(var_t *v); ++node_t *sial_newpval(node_t *vn, int fmt); ++node_t *sial_strconcat(node_t *, node_t *); ++node_t *sial_typecast(type_t*type, node_t*expr); ++ ++dvar_t *sial_newdvar(node_t *v); ++dvar_t *sial_linkdvar(dvar_t *dvl, dvar_t *dv); ++dvar_t *sial_dvarini(dvar_t *dv, node_t *init); ++dvar_t *sial_dvaridx(dvar_t *dv, node_t *n); ++dvar_t *sial_dvarfld(dvar_t *dv, node_t *n); ++dvar_t *sial_dvarptr(int ref, dvar_t *dv); ++dvar_t *sial_dvarfct(dvar_t *dv, var_t *fargs); ++ ++void sial_pushjmp(int type_t, void *env, void *val); ++void sial_popjmp(int type_t); ++void *sial_getcurfile(void); ++void sial_walkarray(node_t *varnode_t, node_t *arrnode_t, void(*cb)(void *), void *data); ++void get_bit_value(ull val, int nbits, int boff, int size, value_t *v); ++void sial_enqueue(var_t *vl, var_t *v); ++void sial_freenode(node_t *n); ++void sial_validate_vars(var_t *svs); ++void sial_freesvs(var_t *svs); ++void *sial_setexcept(void); ++void sial_tdef_decl(dvar_t *dv, type_t *t); ++void sial_refarray(value_t *v, int inc); ++void *sial_curmac(void); ++void sial_setfct(value_t *v1, value_t *v2); ++void sial_exevi(char *fname, int line); ++void sial_unput(char); ++void sial_dupval(value_t *v, value_t *vs); ++void sial_parseback(void); ++void sial_curpos(srcpos_t *p, srcpos_t *s); ++void sial_rmexcept(void *osa); ++void sial_chksign(type_t*t); ++void sial_chksize(type_t*t); ++void sial_setpos(srcpos_t *p); ++void sial_rerror(srcpos_t *p, char *fmt, ...); ++void sial_rwarning(srcpos_t *p, char *fmt, ...); ++void sial_chkandconvert(value_t *vto, value_t *vfrm); ++void sial_warning(char *fmt, ...); ++void sial_format(int tabs, char *str); ++void sial_freevar(var_t*v); ++void sial_rmbuiltin(var_t*v); ++void sial_rm_globals(void *vg); ++void sial_addnewsvs(var_t*avl, var_t*svl, var_t*nvl); ++void sial_dojmp(int type, void *val); ++void sial_pushbuf(char *buf, char *fname, void(*f)(void*), void *d, void *m); ++void sial_rsteofoneol(void); ++void sial_settakeproto(int v); ++void sial_popallin(void); ++void sial_tagst(void); ++void sial_flushtdefs(void); ++void sial_setsvlev(int newlev); ++void sial_flushmacs(void *tag); ++void sial_add_auto(var_t*nv); ++void *sial_chkbuiltin(char *name); ++void sial_freedata(value_t *v); ++void sial_dupdata(value_t *v, value_t *vs); ++void sial_setarray(array_t**arpp); ++void sial_rawinput(int on); ++void sial_setini(node_t*n); ++void sial_valindex(value_t *var, value_t *idx, value_t *ret); ++void sial_free_siblings(node_t*ni); ++void sial_mkvsigned(value_t*v); ++void sial_transval(int s1, int s2, value_t *v, int issigned); ++void sial_popref(type_t*t, int ref); ++void sial_getmem(ull kp, void *p, int n); ++void sial_baseop(int op, value_t *v1, value_t *v2, value_t *result); ++void sial_setinsizeof(int v); ++void sial_freeidx(idx_t *idx); ++void sial_freedvar(dvar_t*dv); ++void sial_pushenums(enum_t *et); ++void sial_addfunc_ctype(int idx); ++void sial_setapiglobs(void); ++void sial_setbuiltins(void); ++void sial_setdefbtype(int size, int sign); ++void get_bit_value(ull val, int nbits, int boff, int size, value_t *v); ++void *sial_findfile(char *name, int unlink); ++void sial_newmac(char *mname, char *buf, int np, char **p, int silent); ++void *sial_getcurfile(void); ++void *sial_getcurfile(void); ++void sial_startctype(int type, node_t*namen); ++void sial_addtolist(var_t*vl, var_t*v); ++void sial_arch_swapvals(void* vp, void *sp); ++void sial_fillst(stinfo_t *st); ++void sial_exememlocal(value_t *vp, stmember_t* stm, value_t *v); ++void sial_do_deref(int n, value_t *v, value_t *ref); ++void sial_addneg(char *name); ++ ++stmember_t*sial_member(char *mname, type_t*tp); ++ ++ull set_bit_value_t(ull dvalue_t, ull value_t, int nbits, int boff); ++ull unival(value_t *); ++ul sial_bool(value_t *); ++ ++value_t *sial_docall(node_t *, node_t *, void *); ++value_t *sial_docast(void); ++value_t *sial_newval(void); ++value_t *sial_exebfunc(char *, value_t **); ++value_t *sial_exevar(void *); ++value_t *sial_exenode(node_t *); ++value_t *sial_setstrval(value_t *, char *); ++value_t *sial_defbtype(value_t *, ull); ++value_t *sial_defbtypesize(value_t *, ull, int); ++value_t *sial_sprintf(value_t *, ...); ++value_t *sial_printf(value_t *, ...); ++value_t *sial_exists(value_t *vname); ++value_t *sial_exit(int v); ++value_t *sial_bload(value_t *name); ++value_t *sial_bdepend(value_t *name); ++value_t *sial_bunload(value_t *vfname); ++value_t *sial_showtemp(void); ++value_t *sial_showaddr(value_t *vadr); ++value_t *sial_findsym(value_t *vadr); ++value_t *sial_memdebugon(void); ++value_t *sial_memdebugoff(void); ++value_t *sial_ismember(value_t*vp, value_t*vm); ++ ++value_t *sial_prarr(value_t*name, value_t*root); ++value_t *sial_getstr(value_t*vm); ++ ++var_t *sial_vardecl(dvar_t *dv, type_t *t); ++var_t *sial_inlist(char *name, var_t *vl); ++var_t *sial_dupvlist(var_t *vl); ++var_t *sial_getcurgvar(void); ++var_t *sial_getvarbyname(char *name, int silent, int local); ++var_t *sial_getsgrp_avs(node_t *n); ++var_t *sial_getsgrp_svs(node_t *n); ++var_t *sial_parsexpr(char *); ++ ++int sial_file_decl(var_t *svs); ++int sial_newfunc(var_t *fvar, node_t* body); ++int sial_line(int inc); ++int sial_samectypename(int type_t, ull idx1, ull idx2); ++int sial_issigned(int attr); ++int sial_isstatic(int atr); ++int sial_isjuststatic(int attr); ++int sial_isconst(int atr); ++int sial_issigned(int atr); ++int sial_istdef(int atr); ++int sial_isxtern(int atr); ++int sial_isvoid(int atr); ++int sial_isstor(int atr); ++int sial_ispartial(type_t*t); ++int sial_input(void); ++int sial_addsvs(int type, var_t*sv); ++int sial_pushfile(char *name); ++int sial_chkfname(char *fname, void *fd); ++int sial_lookuparray(node_t*vnode, node_t*arrnode); ++int sial_runcmd(char *fname, var_t*args); ++int sial_getseq(int c); ++int sial_newfile(char *name, int silent); ++int sial_deletefile(char *name); ++int sial_getsvlev(void); ++int sial_idxtoattr(int idx); ++int sial_docase(ull val, caselist_t*cl); ++int siallex(void); ++int sialpplex(void); ++int sial_ismemdebug(void); ++int sial_isenum(int atr); ++int sial_funcexists(char *name); ++int sial_isnew(void* p); ++int sial_isneg(char *name); ++ ++char *sial_vartofunc(node_t *name); ++char *sial_gettdefname(ull idx); ++char *sial_ctypename(int type_t); ++char *sial_filempath(char *fname); ++char *sial_fileipath(char *fname); ++char *sial_getline(void); ++char *sial_cursorp(void); ++char *sial_getbtypename(int typattr); ++char *sial_filename(void); ++char *sial_curp(char *); ++ ++type_t *sial_newcast(var_t *v); ++type_t *sial_newctype(int ctype_t, node_t *n); ++type_t *sial_addbtype(type_t *t, int newtok); ++type_t *sial_ctype_decl(int ctype_t, node_t *n, var_t *list); ++type_t *sial_enum_decl(int ctype_t, node_t *n, dvar_t *dvl); ++type_t *sial_addstorage(type_t *t1, type_t *t2); ++type_t *sial_getvoidstruct(int ctype); ++ ++extern int lineno, needvar, instruct, nomacs; ++node_t *lastv; ++ ++#define NULLNODE ((node_t*)0) ++ ++/* configuration variables */ ++#define S_MAXSTRLEN 1024 /* lengh of a STRING variable value_t */ ++#define S_MAXDEEP 500 /* maximum stacking of calls */ ++#define S_MAXFILES 200 /* maximum number of macro files */ ++ ++#define S_VARARG "__VARARG" /* name of the special var for ... */ +--- crash/extensions/libsial/sial_stat.c.orig 2008-01-17 15:17:21.000000000 -0500 ++++ crash/extensions/libsial/sial_stat.c 2008-01-04 09:42:08.000000000 -0500 +@@ -0,0 +1,435 @@ ++/* ++ * Copyright 2001 Silicon Graphics, Inc. All rights reserved. ++ */ ++#include "sial.h" ++#include "sial.tab.h" ++#include ++#include ++ ++#define MAXPARMS 10 ++ ++typedef struct stat { ++ ++ int stype; ++ int np; ++ struct stat *next; ++ srcpos_t pos; ++ node_t*n; ++ node_t*parms[MAXPARMS]; ++ var_t*svs; /* if statement block then these are the auto and static ++ wars for it */ ++ var_t*avs; ++ ++} stat; ++ ++#define SETVS value_t *v1=0,*v2=0,*v3=0,*v4=0 ++#define FV1 sial_freeval(v1),v1=0 ++#define FV2 sial_freeval(v2),v2=0 ++#define FV3 sial_freeval(v3),v3=0 ++#define FV4 sial_freeval(v4),v4=0 ++#define UNSETVS FV1,FV2,FV3,FV4 ++ ++#define P1 (s->parms[0]) ++#define P2 (s->parms[1]) ++#define P3 (s->parms[2]) ++#define P4 (s->parms[3]) ++ ++#define V1 (v1?v1:(v1=NODE_EXE(P1))) ++#define V2 (v2?v2:(v2=NODE_EXE(P2))) ++#define V3 (v3?v3:(v3=NODE_EXE(P3))) ++#define V4 (v4?v4:(v4=NODE_EXE(P4))) ++ ++#define L1 (unival(V1)) ++#define L2 (unival(V2)) ++#define L3 (unival(V3)) ++#define L4 (unival(V4)) ++ ++#define S1 (V1->v.data) ++#define S2 (V2->v.data) ++#define S3 (V3->v.data) ++#define S4 (V4->v.data) ++ ++/* this is used to execute staement lists e.g. i=1,j=3; */ ++static value_t* ++sial_exeplist(node_t*n) ++{ ++value_t *val=0; ++ ++ if(n) { ++ ++ do { ++ ++ if(val) sial_freeval(val), val=0; ++ val=NODE_EXE(n); ++ n=n->next; ++ ++ } while(n); ++ } ++ return val; ++} ++ ++static int ++sial_dofor(stat *s) ++{ ++jmp_buf brkenv; ++jmp_buf cntenv; ++SETVS; ++ ++ if(!setjmp(brkenv)) { ++ ++ sial_pushjmp(J_BREAK, &brkenv, 0); ++ ++ v1=sial_exeplist(P1); ++ FV1; ++ ++ while(!P2 || sial_bool(V2)) { ++ ++ FV2; ++ ++ if(!setjmp(cntenv)) { ++ ++ sial_pushjmp(J_CONTINUE, &cntenv, 0); ++ V4; ++ FV4; ++ sial_popjmp(J_CONTINUE); ++ ++ } ++ ++ UNSETVS; /* make sure we re-execute everything each time */ ++ v3=sial_exeplist(P3); ++ FV3; ++ } ++ sial_popjmp(J_BREAK); ++ ++ } ++ UNSETVS; ++ return 1; ++} ++ ++static int ++sial_dowhile(stat *s) ++{ ++jmp_buf brkenv; ++jmp_buf cntenv; ++SETVS; ++ ++ if(!setjmp(brkenv)) { ++ ++ sial_pushjmp(J_BREAK, &brkenv, 0); ++ ++ while(sial_bool(V1)) { ++ ++ FV1; ++ ++ if(!setjmp(cntenv)) { ++ ++ sial_pushjmp(J_CONTINUE, &cntenv, 0); ++ V2; ++ FV2; ++ sial_popjmp(J_CONTINUE); ++ ++ } ++ ++ UNSETVS; /* make sure we re-execute everything each time */ ++ } ++ FV1; ++ sial_popjmp(J_BREAK); ++ ++ } ++ ++ return 1; ++} ++ ++static int ++sial_dodo(stat *s) ++{ ++jmp_buf brkenv; ++jmp_buf cntenv; ++SETVS; ++ ++ if(!setjmp(brkenv)) { ++ ++ sial_pushjmp(J_BREAK, &brkenv, 0); ++ ++ do { ++ ++ FV2; ++ if(!setjmp(cntenv)) { ++ ++ sial_pushjmp(J_CONTINUE, &cntenv, 0); ++ V1; ++ FV1; ++ sial_popjmp(J_CONTINUE); ++ ++ } ++ ++ UNSETVS; /* make sure we re-execute everything each time */ ++ ++ } while (sial_bool(V2)); ++ FV2; ++ ++ sial_popjmp(J_BREAK); ++ ++ } ++ ++ UNSETVS; ++ return 1; ++} ++ ++static int ++sial_doif(stat *s) ++{ ++SETVS; ++ul b; ++ ++ b=sial_bool(V1); ++ FV1; ++ ++ if(s->np==3) { ++ ++ if (b) ++ V2; ++ else ++ V3; ++ ++ } else { ++ ++ if (b) ++ V2; ++ ++ } ++ ++ UNSETVS; ++ return 1; ++} ++ ++static int ++sial_doswitch(stat *s) ++{ ++jmp_buf brkenv; ++ull cval; ++SETVS; ++ ++ if(!setjmp(brkenv)) { ++ ++ sial_pushjmp(J_BREAK, &brkenv, 0); ++ cval=unival(V1); ++ FV1; ++ sial_docase(cval, P2->data); ++ sial_popjmp(J_BREAK); ++ ++ } ++ ++ UNSETVS; ++ return 1; ++} ++ ++static void ++sial_exein(stat *s) ++{ ++jmp_buf cntenv; ++SETVS; ++ ++ if(!setjmp(cntenv)) { ++ ++ sial_pushjmp(J_CONTINUE, &cntenv, 0); ++ V3; ++ sial_popjmp(J_CONTINUE); ++ ++ } ++ UNSETVS; ++} ++ ++static int ++sial_doin(stat *s) ++{ ++jmp_buf brkenv; ++ if(!setjmp(brkenv)) { ++ ++ sial_pushjmp(J_BREAK, &brkenv, 0); ++ sial_walkarray(P1, P2, (void (*)(void *))sial_exein, s); ++ sial_popjmp(J_BREAK); ++ } ++ return 1; ++} ++ ++/* this is where all of the flow control takes place */ ++ ++static value_t* ++sial_exestat(stat *s) ++{ ++srcpos_t p; ++value_t *val=0; ++ ++ do { ++ ++ /* dump the val while looping */ ++ if(val) sial_freeval(val); ++ val=0; ++ ++ sial_curpos(&s->pos, &p); ++ ++ ++ switch(s->stype) { ++ ++ case FOR : sial_dofor(s); break; ++ case WHILE: sial_dowhile(s); break; ++ case IN: sial_doin(s); break; ++ case IF: sial_doif(s); break; ++ case DO: sial_dodo(s); break; ++ case SWITCH: sial_doswitch(s); break; ++ case DOBLK: ++ { ++ int lev; ++ ++ /* add any static variables to the current context */ ++ lev=sial_addsvs(S_STAT, s->svs); ++ sial_addsvs(S_AUTO, sial_dupvlist(s->avs)); ++ ++ /* with the block statics inserted exeute the inside stmts */ ++ if(s->next) val=sial_exestat(s->next); ++ ++ /* remove any static variables to the current context */ ++ if(s->svs) sial_setsvlev(lev); ++ ++ sial_curpos(&p, 0); ++ ++ return val; ++ } ++ ++ case BREAK: sial_dojmp(J_BREAK, 0); break; ++ case CONTINUE: sial_dojmp(J_CONTINUE, 0); break; ++ case RETURN: { ++ ++ ++ if(s->parms[0]) { ++ ++ val=(s->parms[0]->exe)(s->parms[0]->data); ++ } ++ else val=sial_newval(); ++ ++ sial_curpos(&p, 0); ++ sial_dojmp(J_RETURN, val); ++ } ++ break; ++ case PATTERN: ++ ++ val=sial_exeplist(s->parms[0]); ++ ++ } ++ ++ sial_curpos(&p, 0); ++ ++ } while((s=s->next)); ++ ++ /* we most return a type val no mather what it is */ ++ /* that's just the way it is...Somethings will never change...*/ ++ if(!val) val=sial_newval(); ++ ++ return val; ++} ++ ++void ++sial_freestat(stat *s) ++{ ++int i; ++ ++ if(s->next) sial_freenode(s->next->n); ++ ++ for(i=0;inp && s->parms[i];i++) { ++ ++ NODE_FREE(s->parms[i]); ++ ++ } ++ sial_free(s); ++} ++ ++void ++sial_freestat_static(stat *s) ++{ ++ ++ if(s->next) sial_freenode(s->next->n); ++ ++ /* free associated static var list */ ++ sial_freesvs(s->svs); ++ sial_freesvs(s->avs); ++ sial_free(s); ++} ++ ++var_t*sial_getsgrp_avs(node_t*n) { return ((stat *)n->data)->avs; } ++var_t*sial_getsgrp_svs(node_t*n) { return ((stat *)n->data)->svs; } ++ ++/* add a set of static variable to a statement */ ++node_t* ++sial_stat_decl(node_t*n, var_t*svs) ++{ ++node_t*nn; ++stat *s; ++ ++ sial_validate_vars(svs); ++ ++ nn=sial_newnode(); ++ s=sial_alloc(sizeof(stat)); ++ ++ /* add statics and autos to this statement */ ++ s->svs=sial_newvlist(); ++ s->avs=sial_newvlist(); ++ sial_addnewsvs(s->avs, s->svs, svs); ++ ++ if(n) s->next=(stat*)(n->data); ++ else s->next=0; ++ s->stype=DOBLK; ++ s->n=nn; ++ nn->exe=(xfct_t)sial_exestat; ++ nn->free=(ffct_t)sial_freestat_static; ++ nn->data=s; ++ sial_setpos(&s->pos); ++ ++ return nn; ++} ++ ++node_t* ++sial_newstat(int type, int nargs, ...) ++{ ++va_list ap; ++node_t*n=sial_newnode(); ++stat *s=sial_alloc(sizeof(stat)); ++int i; ++ ++ s->stype=type; ++ ++ va_start(ap, nargs); ++ ++ for(i=0;iparms[i]=va_arg(ap, node_t*); ++ } ++ ++ s->np=i; ++ s->n=n; ++ s->next=0; ++ n->exe=(xfct_t)sial_exestat; ++ n->free=(ffct_t)sial_freestat; ++ n->data=s; ++ ++ sial_setpos(&s->pos); ++ ++ va_end(ap); ++ return n; ++} ++ ++node_t* ++sial_addstat(node_t*list, node_t*s) ++{ ++ if(!s && list) return list; ++ if(s && !list) return s; ++ else { ++ stat *sp=(stat*)(list->data); ++ ++ while(sp->next) sp=sp->next; ++ sp->next=(stat*)(s->data); ++ return list; ++ ++ } ++} ++ +--- crash/extensions/libsial/sial_util.c.orig 2008-01-17 15:17:21.000000000 -0500 ++++ crash/extensions/libsial/sial_util.c 2008-01-04 09:42:08.000000000 -0500 +@@ -0,0 +1,922 @@ ++/* ++ * Copyright 2001 Silicon Graphics, Inc. All rights reserved. ++ */ ++#include "sial.h" ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++static FILE *ofile=0; ++static int cols=25; ++static char *bold_on, *bold_off; ++ ++ ++static void ++sial_getwinsize(void) ++{ ++struct winsize w; ++ ++ if (ioctl (fileno(ofile), TIOCGWINSZ, &w) == 0) ++ { ++ cols=w.ws_col; ++ } ++ else /* use ENV */ ++ { ++ char *ewidth; ++ if ((ewidth = getenv ("COLUMNS"))) ++ cols = atoi (ewidth); ++ /* use what's in terminfo */ ++ if (cols <= 0) ++ cols = tigetnum ("co"); ++ } ++ if(cols <= 10) cols=10; ++ if(cols > 80) cols=80; ++} ++ ++void ++sial_setofile(void * f) ++{ ++int out; ++int ret; ++char *term; ++ ++ ofile=(FILE *)f; ++ ++ bold_on=""; ++ bold_off=""; ++ cols=80; ++ ++ out=fileno(ofile); ++ if(isatty(out)) ++ { ++ ++ if(!(term = getenv ("TERM"))) term="dumb"; ++ if(setupterm(term, out, &ret)!=ERR) ++ { ++ bold_on=tigetstr("bold"); ++ if(!bold_on) bold_on=""; ++ bold_off=tigetstr("sgr0"); ++ if(!bold_off) bold_off=""; ++ } ++ sial_getwinsize(); ++ } ++} ++ ++void * ++sial_getofile(void) ++{ ++ return ofile; ++} ++ ++/* ++ Output a line of text to the screen with line wrap ++ and escape sequence. ++*/ ++#define ESC '<' ++#define ESC2 '>' ++ ++static int ++sial_tabs(int tabs, char *t, int lf) ++{ ++int i; ++ ++ if(lf) fprintf(ofile, "\n"); ++ for(i=0;i cols) { ++ ++ char *p3=p+(cols-n-1); ++ ++ char c=*p3; ++ char c2=*(p3+1); ++ ++ *p3='-'; ++ *(p3+1)='\0'; ++ ++ fprintf(ofile, "%s", p); ++ *p3=c; ++ *(p3+1)=c2; ++ n=sial_tabs(tabs, t, 0); ++ ++ } else if(n + (p2-p) >= cols) { ++ ++ n=sial_tabs(tabs, t, 1); ++ ++ } else { ++ ++ fprintf(ofile, " "); ++ n++; ++ } ++ ++ } else if(*p=='\n') { ++ ++ n=sial_tabs(tabs, t, 1); ++ ++ } else { ++ ++ fprintf(ofile, "%c", *p); ++ n++; ++ } ++ } ++ ++} ++ ++void ++sial_msg(char *fmt, ...) ++{ ++va_list ap; ++ va_start(ap, fmt); ++ vfprintf(ofile, fmt, ap); ++ va_end(ap); ++} ++ ++void ++sial_freenode(node_t *n) ++{ ++ n->free(n->data); ++ sial_free(n); ++} ++ ++int lineno=1, lastline=1; ++int col=1; ++static char *filename=0; ++static char *lastfile=0; ++ ++void ++sial_setlastfile(char *fname, int line) ++{ ++ if(!fname) return; ++ if(lastfile) sial_free(lastfile); ++ lastfile=sial_strdup(fname); ++ lastline=line; ++} ++ ++void ++sial_rstpos(void) ++{ ++ lineno=1; ++ col=1; ++ /* do not free filename */ ++ filename=0; ++} ++ ++void ++sial_setpos(srcpos_t *p) ++{ ++ p->line=lineno; ++ p->col=col; ++ p->file=filename; ++} ++ ++/* set the current position */ ++void ++sial_curpos(srcpos_t *p, srcpos_t *s) ++{ ++ if(s) { ++ s->line=lineno; ++ s->col=col; ++ s->file=filename; ++ } ++ lineno=p->line; ++ col=p->col; ++ filename=p->file; ++} ++ ++int ++sial_line(int inc){ return lineno+=inc; } ++ ++int ++sial_col(int inc) { return col+=inc; } ++ ++char * ++sial_filename(void) { return filename; } ++ ++/* ++ This function scans a printf() fmt string and transaletes the %p ++ to %08x or %016x depending on the pointer size of the object image. ++ We also substiture %> for 8 spaces if the pointer size is 4 bytes, this ++ permits easy allignment of output on either 32 or 64 bit images. ++ ++ ex: ++ ++ Proc %> pid ppid ++ %p %3d %3d ++ ++ In this case the %> alligns the pid with it's corresponding value_t ++ in the next line of output. ++ ++ We also process the '?' format which will be set to match the ++ corresponding value_t type. ++ ++ Also, format versus argument type validation is performed. ++ ++*/ ++ ++/* ++ Printf formats have the form : ++ %3$-*3$.*4$lld ++ %20x ++ %08x ++ %-08.8f ++*/ ++/* these are the buildin blocks for a regex matching formats */ ++#define F_POSP "([0-9]+\\$)*" ++#define F_FLGS "([-'+ #0]*)" ++#define F_WARG "(\\*([0-9]+\\$)*){0,1}" ++#define F_WIDTH "([0-9]*)" ++#define F_PREC "((\\.(\\*([0-9]+\\$)*)*([0-9]*))*)" ++#define F_SIZE "([hlL]*)" ++#define F_FMT "([diouxXfeEgGcCsSpn?>]{1})" ++#define FMTREG F_POSP""F_FLGS""F_WARG""F_WIDTH""F_PREC""F_SIZE""F_FMT ++#define M_POSP 1 ++#define M_FLAGS 2 ++#define M_WIDTHARG 3 ++#define M_WIDTDIGITS 4 ++#define M_WIDTH 5 ++#define M_PRECARG 8 ++#define M_PRECDIGITS 9 ++#define M_PREC 10 ++#define M_SIZE 11 ++#define M_FMT 12 ++#define NMATCH 16 ++static int addit[]={M_FLAGS,M_WIDTHARG,M_WIDTH,M_PRECARG,M_PREC,M_SIZE}; ++ ++#define ptrto(idx) (matches[idx].rm_so==matches[idx].rm_eo?0:(pi+matches[idx].rm_so)) ++#define matchlen(idx) (matches[(idx)].rm_eo-matches[(idx)].rm_so) ++ ++void sial_error(char *fmt, ...); ++ ++static int ++chkforint(char *p, value_t **vals, int *curarg) ++{ ++int pos=-1; ++ ++ if(!p) return -1; ++ ++ /* a single star ? */ ++ if(isdigit(p[1])) { ++ ++ if(sscanf(p+1, "%d", &pos)!=1) { ++ ++ return pos; ++ } ++ pos--; ++ ++ } else { ++ ++ pos=*curarg; ++ *curarg=(*curarg)+1; ++ ++ } ++ ++ if(pos < BT_MAXARGS && vals[pos] && vals[pos]->type.type == V_BASE) return pos; ++ sial_error("Expected 'integer' type for arg%d", pos+1); ++ return -1; ++} ++ ++#define pushval(val, s, sig) ( \ ++ sig ? \ ++ ( \ ++ (s==8) ? \ ++ (val)->v.sll \ ++ : ( \ ++ (s==4) ? \ ++ (val)->v.sl \ ++ : ( \ ++ (s==2) ? \ ++ (val)->v.ss \ ++ :( \ ++ (s==1) ? \ ++ (val)->v.sc \ ++ :( \ ++ sial_error("Oops pushval"),1 \ ++ ) \ ++ ) \ ++ ) \ ++ ) \ ++ ) : ( \ ++ (s==8) ? \ ++ (val)->v.ull \ ++ : ( \ ++ (s==4) ? \ ++ (val)->v.ul \ ++ : ( \ ++ (s==2) ? \ ++ (val)->v.us \ ++ :( \ ++ (s==1) ? \ ++ (val)->v.uc \ ++ :( \ ++ sial_error("Oops pushval"),1 \ ++ ) \ ++ ) \ ++ ) \ ++ ) \ ++ ) \ ++ ) ++ ++ ++static char * ++add_fmt(int len, char *s, char *onefmt, int ppos, int wpos, int posarg, value_t **vals) ++{ ++int size=(vals[posarg]->type.type == V_REF ? sial_defbsize(): vals[posarg]->type.size); ++int sign=(vals[posarg]->type.type == V_REF ? 0 : sial_issigned(vals[posarg]->type.typattr)); ++ ++ if(vals[posarg]->type.type == V_STRING) { ++ ++ if(wpos>=0 && ppos<0) ++ s+=snprintf(s, len, onefmt ++ , (int)sial_getval(vals[wpos]) ++ , vals[posarg]->v.data); ++ else if(wpos<0 && ppos>=0) ++ s+=snprintf(s, len, onefmt ++ , (int)sial_getval(vals[ppos]) ++ , vals[posarg]->v.data); ++ else if(wpos>=0 && ppos>=0) ++ s+=snprintf(s, len, onefmt ++ , (int)sial_getval(vals[wpos]) ++ , (int)sial_getval(vals[ppos]) ++ , vals[posarg]->v.data); ++ else s+=snprintf(s, len, onefmt ++ , vals[posarg]->v.data); ++ ++ } else { ++#if defined(__s390x__) || defined(__s390__) ++ if(wpos>=0 && ppos<0) ++ s+=snprintf(s, len, onefmt ++ , (int)sial_getval(vals[wpos]) ++ , (unsigned long)pushval(vals[posarg], size, sign)); ++ else if(wpos<0 && ppos>=0) ++ s+=snprintf(s, len, onefmt ++ , (int)sial_getval(vals[ppos]) ++ , (unsigned long)pushval(vals[posarg], size, sign)); ++ else if(wpos>=0 && ppos>=0) ++ s+=snprintf(s, len, onefmt ++ , (int)sial_getval(vals[wpos]) ++ , (int)sial_getval(vals[ppos]) ++ , (unsigned long) pushval(vals[posarg], size, sign)); ++ else s+=snprintf(s, len, onefmt ++ , (unsigned long) pushval(vals[posarg], size, sign)); ++#else ++ if(wpos>=0 && ppos<0) ++ s+=snprintf(s, len, onefmt ++ , (int)sial_getval(vals[wpos]) ++ , pushval(vals[posarg], size, sign)); ++ else if(wpos<0 && ppos>=0) ++ s+=snprintf(s, len, onefmt ++ , (int)sial_getval(vals[ppos]) ++ , pushval(vals[posarg], size, sign)); ++ else if(wpos>=0 && ppos>=0) ++ s+=snprintf(s, len, onefmt ++ , (int)sial_getval(vals[wpos]) ++ , (int)sial_getval(vals[ppos]) ++ , pushval(vals[posarg], size, sign)); ++ else s+=snprintf(s, len, onefmt ++ , pushval(vals[posarg], size, sign)); ++#endif ++ } ++ return s; ++} ++ ++static char * ++sial_ptr(char *fmt, value_t **vals) ++{ ++ /* We need to ensure that we dont overflow our string buffer. Although its unlikely we will overflow it with ++ just numbers, strings will easliy overflow. So, lets check for strings and see how long they are. ++ */ ++int len=0; ++char *nfmt=NULL,*ni=NULL; ++char *onefmt=NULL, *onei=NULL; ++char *p=fmt; ++char last=' '; ++int curarg=0; ++#define NBYTES (len-(nfmt-ni)) ++ ++int i = 0; ++ ++ while(vals[i] != NULL) { ++ if(vals[i]->type.type == V_STRING) ++ len+=vals[i]->type.size; ++ i++; ++ } ++ /* We add a fudge factor of 100, which should cover all the number arguments */ ++ len+=strlen(fmt) + 100; ++ nfmt=sial_alloc(len); ++ ni=nfmt; ++ onefmt=sial_alloc(len); ++ onei=onefmt; ++ ++ ++ ++ while(*p) { ++ ++ if(*p=='%') { ++ ++ static regex_t preg; ++ static int done=0; ++ regmatch_t matches[NMATCH]; ++ ++ if(!done) { ++ ++ regcomp(&preg, FMTREG, REG_EXTENDED); ++ done=1; ++ } ++ ++ /* build a new format translation */ ++ onefmt=onei; ++ *onefmt++=*p++; ++ ++ /* if the returned pointer is (char*)-1 or NULL then something is wrong */ ++ if(!regexec(&preg, p, NMATCH, matches, 0)) { ++ ++ int i, n=matches[0].rm_eo-1; ++ int posarg, wpos, ppos; ++ char *pi=p; /* save p for ptrto() macro */ ++ ++ /* check that the width and precision field args point ++ to a int value_t. If they were used */ ++ wpos=chkforint(ptrto(M_WIDTHARG), vals, &curarg); ++ ppos=chkforint(ptrto(M_PRECARG), vals, &curarg); ++ ++ /* argument position was specfified ? */ ++ if(ptrto(M_POSP)) { ++ ++ /* we work from 0-n, printf works from 1-n */ ++ if(sscanf(ptrto(M_POSP), "%d", &posarg)==1) posarg--; ++ ++ if(posarg >= BT_MAXARGS || !vals[posarg]) { ++ sial_error("Invalid arg position specified [%d]", posarg+1); ++ } ++ ++ } else posarg=curarg++; ++ ++ /* jump over the format spec in the original */ ++ p+=n; ++#if 0 ++for(i=0;i=0 ){ ++ ++ *onefmt++='*'; ++ ++ } else goto def; ++ ++ break; ++ case M_PRECARG: ++ ++ if(ppos >=0 ){ ++ ++ *onefmt++='.'; ++ *onefmt++='*'; ++ ++ } else goto def; ++ ++ break; ++ case M_PREC: ++ if(ptrto(addit[i])) *onefmt++='.'; ++ goto def; ++ default: ++def: ++ if(ptrto(addit[i])) { ++ strcpy(onefmt, ptrto(addit[i])); ++ onefmt+=matchlen(addit[i]); ++ } ++ } ++ } ++ ++ if(*p=='p') { ++ ++ref: ++ /* if user overrides anything don't do nothing */ ++ if(ptrto(M_FLAGS)||ptrto(M_WIDTH)||ptrto(M_WIDTHARG)||ptrto(M_PREC)||ptrto(M_PRECARG)||ptrto(M_SIZE)) { ++ *onefmt++='p'; ++ ++ } else { ++ if(sial_defbsize()==8) { ++ ++ strcpy(onefmt, "016llx"); ++ onefmt+=6; ++ ++ } else { ++ ++ strcpy(onefmt, "08x"); ++ onefmt+=3; ++ } ++ } ++ *onefmt='\0'; ++ p++; ++ nfmt=add_fmt(NBYTES, nfmt, onei, ppos, wpos, posarg, vals); ++ ++ } else if(*p=='>') { ++ ++ nfmt--; ++ if(sial_defbsize()==8) { ++ ++ int i; ++ ++ for(i=0;i<8;i++) *nfmt++=last; ++ } ++ p++; ++ curarg--; ++ ++ } else if(*p=='?') { ++ ++ /* put the proper format for the user */ ++ if(!vals[posarg]) { ++ ++ sial_error("Expected additional argument %d\n", posarg+1); ++ ++ } else switch(vals[posarg]->type.type) { ++ ++ case V_BASE: case V_ENUM: ++ { ++ if(!ptrto(M_SIZE)) { ++ ++ if(vals[posarg]->type.size==8) { ++ ++ *onefmt++='l'; ++ *onefmt++='l'; ++ } ++ } ++ if(sial_issigned(vals[posarg]->type.typattr)) { ++ ++ *onefmt++='d'; ++ ++ }else{ ++ ++ *onefmt++='u'; ++ } ++ } ++ break; ++ case V_REF: ++ { ++ *p='p'; ++ goto ref; ++ } ++ case V_STRING: ++ { ++ *onefmt++='s'; ++ } ++ break; ++ } ++ p++; ++ *onefmt='\0'; ++ nfmt=add_fmt(NBYTES, nfmt, onei, ppos, wpos, posarg, vals); ++ ++ } else { ++ ++ /* check that format and value_t agree */ ++ /* can't do a lot more then check for strings vs anything_else */ ++ ++ if(!vals[posarg]) { ++ ++ sial_error("Expected additional argument %d\n", posarg+1); ++ ++ ++ } else if(*p=='s') { ++ ++ if(vals[posarg]->type.type != V_STRING) { ++ ++ sial_error("Expected type 'string' as arg%d", posarg+1); ++ } ++ ++ } else if(vals[posarg]->type.type == V_STRING) { ++ ++ sial_error("Incompatible type 'string' in arg%d", posarg+1); ++ ++ } ++ *onefmt++=*p++; ++ *onefmt='\0'; ++ nfmt=add_fmt(NBYTES, nfmt, onei, ppos, wpos, posarg, vals); ++ } ++ ++ } else { ++ ++ sial_warning("Malformed format specifier!"); ++ ++ } ++ ++ } else { ++ ++ last=*p; ++ if(nfmt-ni > len) sial_error("format tranlation overflow!"); ++ *nfmt++=*p++; ++ ++ } ++ } ++ sial_free(onei); ++ *nfmt='\0'; ++ return ni; ++} ++ ++value_t* sial_printf(value_t *vfmt, ...) ++{ ++char *fmt = sial_getptr(vfmt, char); ++va_list ap; ++value_t *vals[BT_MAXARGS]; ++int i; ++ ++ va_start(ap, vfmt); ++ for(i=0;i 9) ++ sial_msg("Invalid debug level value.\n"); ++ else ++ dbglvl=lvl; ++} ++char *sial_getname(void) ++{ ++ return dbg_name; ++} ++ ++void sial_setname(char *name) ++{ ++ if(dbg_name) sial_free(dbg_name); ++ dbg_name=sial_strdup(name); ++} ++ ++#define MAXCLASSES 10 ++static struct { ++ char *name; ++ int class; ++} classes [MAXCLASSES] = { ++ { "type", DBG_TYPE }, ++ { "struct", DBG_STRUCT }, ++ { 0 }, ++}; ++ ++char **sial_getclass(void) ++{ ++int i,j; ++static char *ptrs[MAXCLASSES+1]; ++ ++ for(i=j=0;classes[i].name;i++) { ++ if(clist&classes[i].class) ptrs[j++]=classes[i].name; ++ } ++ ptrs[i]=0; ++ return ptrs; ++} ++ ++void sial_setclass(char *cl) ++{ ++int i,j; ++ ++ for(i=0;classes[i].name;i++) { ++ if(!strcmp(classes[i].name,cl)) { ++ clist |= classes[i].class; ++ return; ++ } ++ } ++ sial_msg("Invalid class '%s' specified.\n", cl); ++} ++ ++static void ++sial_dbg_all(int class, char *name, int lvl, char *fmt, va_list ap) ++{ ++ if(lvl<=dbglvl && (clist & class) && (!dbg_name || !strcmp(name, dbg_name))) { ++ fprintf(ofile, "dbg(%d) : ", lvl); ++ vfprintf(ofile, fmt, ap); ++ } ++} ++ ++void ++sial_dbg(int class, int lvl, char *fmt, ...) ++{ ++va_list ap; ++ va_start(ap, fmt); ++ sial_dbg_all(class, 0, lvl, fmt, ap); ++ va_end(ap); ++} ++ ++void ++sial_dbg_named(int class, char *name, int lvl, char *fmt, ...) ++{ ++va_list ap; ++ va_start(ap, fmt); ++ sial_dbg_all(class, name, lvl, fmt, ap); ++ va_end(ap); ++} ++/******************************************************************/ ++ ++void ++sial_rerror(srcpos_t *p, char *fmt, ...) ++{ ++va_list ap; ++ ++ sial_setlastfile(p->file, p->line); ++ va_start(ap, fmt); ++ fprintf(ofile, "%s : line %d : Error: ", p->file, p->line); ++ vfprintf(ofile, fmt, ap); ++ fprintf(ofile, "\n"); ++ va_end(ap); ++ sial_exit(1); ++} ++ ++void ++sial_warning(char *fmt, ...) ++{ ++va_list ap; ++ ++ sial_setlastfile(filename, sial_line(0)); ++ va_start(ap, fmt); ++ fprintf(ofile, "%s : line %d : Warning: ", filename, lineno); ++ vfprintf(ofile, fmt, ap); ++ fprintf(ofile, "\n"); ++ va_end(ap); ++} ++ ++void ++sial_rwarning(srcpos_t *p, char *fmt, ...) ++{ ++va_list ap; ++ ++ sial_setlastfile(p->file, p->line); ++ va_start(ap, fmt); ++ fprintf(ofile, "%s : line %d : Warning: ", p->file, p->line); ++ vfprintf(ofile, fmt, ap); ++ fprintf(ofile, "\n"); ++ va_end(ap); ++} ++ ++void ++sial_vilast() ++{ ++ if(lastfile) { ++ ++ sial_exevi(lastfile, lastline); ++ ++ } else { ++ ++ sial_msg("No last error record available"); ++ } ++} ++ ++void ++sial_getcomment(void) ++{ ++ while(1) { ++ ++ unsigned char c; ++ ++ while((c=sial_input())!='*' && c!=255) ++ ++ if(c==255) goto bad; ++ ++ if((c=sial_input())=='/') return; ++ else if(c==255) { ++bad: ++ sial_error("Unterminated comment!"); ++ } ++ } ++} ++ ++/* on assignment this function is called to set the new value */ ++void ++sial_setfct(value_t *v1, value_t *v2) ++{ ++ /* duplicate type and data, safeguarding array info */ ++ sial_dupval(v1, v2); ++ ++ /* value_t v1 is still setable */ ++ v1->set=1; ++ v1->setval=v1; ++} ++ ++node_t * ++sial_sibling(node_t *n, node_t *m) ++{ ++node_t *p; ++ ++ if(m) { ++ ++ for(p=n;p->next;p=p->next); ++ p->next=m; ++ m->next=0; ++ } ++ return n; ++} ++ +--- crash/extensions/libsial/sial.y.orig 2008-01-17 15:17:21.000000000 -0500 ++++ crash/extensions/libsial/sial.y 2008-01-04 09:42:08.000000000 -0500 +@@ -0,0 +1,436 @@ ++%{ ++/* ++ * Copyright 2001 Silicon Graphics, Inc. All rights reserved. ++ */ ++#include "sial.h" ++#include ++#include ++#include ++#include ++// to help resolve type name versus var name ambiguity... ++#define VARON needvar=1; ++#define VAROFF needvar=0; ++static int sial_toctype(int); ++int sialerror(char *); ++%} ++ ++%union { ++ node_t *n; ++ char *s; ++ int i; ++ type_t *t; ++ dvar_t *d; ++ var_t *v; ++} ++ ++%token STATIC DOBLK WHILE RETURN TDEF EXTERN VARARGS ++%token CHAR SHORT FLOAT DOUBLE VOID INT UNSIGNED LONG SIGNED VOLATILE REGISTER STRTYPE CONST ++%token BREAK CONTINUE DO FOR FUNC ++%token IF PATTERN BASETYPE ++%token STRUCT ENUM UNION ++%token SWITCH CASE DEFAULT ++%token ELSE CEXPR ++%token VAR NUMBER STRING ++%token TYPEDEF ++%token '(' ')' ',' ';' '{' '}' ++ ++%type termlist term opt_term opt_termlist ++%type stmt stmtlist expstmt stmtgroup ++%type var opt_var c_string ++%type for if while switch case caselist caseconstlist caseconst ++ ++%type dvar dvarlist dvarini ++ ++%type one_var_decl var_decl_list var_decl farglist decl_list ++ ++%type type ctype rctype btype_list tdef typecast ++%type storage_list string type_decl ++%type ctype_decl ++%type btype storage ctype_tok print ++ ++%right ASSIGN ADDME SUBME MULME DIVME MODME ANDME XORME ++%right ORME SHLME SHRME ++%right '?' ++%left IN ++%left BOR ++%left BAND ++%left OR ++%left XOR ++%left AND ++%left EQ NE ++%left GE GT LE LT ++%left SHL SHR ++%left ADD SUB ++%left MUL DIV MOD ++%left PRINT PRINTO PRINTD PRINTX TAKE_ARR ++%right ADROF PTRTO PTR UMINUS SIZEOF TYPECAST POSTINCR PREINCR POSTDECR PREDECR INCR DECR FLIP NOT ++%left ARRAY CALL INDIRECT DIRECT ++ ++%% ++ ++file: ++ /* empty */ ++ | fileobj ++ | file fileobj ++ ; ++ ++fileobj: ++ function ++ | var_decl ';' { sial_file_decl($1); } ++ | ctype_decl ';' { ; } ++ ; ++ ++function: ++ one_var_decl stmtgroup ++ { sial_newfunc($1, $2); } ++ ; ++ ++ ++for: ++ FOR '(' opt_termlist ';' opt_term ';' opt_termlist ')' expstmt ++ { $$ = sial_newstat(FOR, 4, $3, $5, $7, $9); } ++ | FOR '(' var IN term ')' expstmt ++ { $$ = sial_newstat(IN, 3, $3, $5, $7); } ++ ; ++ ++if: ++ IF '(' {VARON} term {VAROFF} ')' { $$ = $4; } ++ ; ++ ++switch : ++ SWITCH '(' {VARON} term {VAROFF} ')' '{' caselist '}' ++ ++ { $$ = sial_newstat(SWITCH, 2, $4, $8); } ++ ; ++ ++caselist: ++ case ++ | caselist case { $$ = sial_addcase($1, $2); } ++ ; ++ ++case : ++ caseconstlist stmtlist { $$ = sial_newcase($1, $2); } ++ ; ++ ++caseconst: ++ CASE term ':' { $$ = sial_caseval(0, $2); } ++ | DEFAULT ':' { $$ = sial_caseval(1, 0); } ++ ; ++ ++caseconstlist: ++ caseconst ++ | caseconstlist caseconst { $$ = sial_addcaseval($1, $2); } ++ ; ++ ++opt_term: ++ /* empty */ { $$ = 0; } ++ | term ++ ; ++ ++termlist: ++ term ++ | termlist ',' term { $$ = sial_sibling($1, $3); } ++ ; ++ ++opt_termlist: ++ /* empty */ { $$ = 0; } ++ | termlist ++ ; ++ ++stmt: ++ termlist ';' { $$ = sial_newstat(PATTERN, 1, $1); } ++ | while expstmt { $$ = sial_newstat(WHILE, 2, $1, $2); } ++ | switch ++ | for ++ | if expstmt ELSE expstmt { $$ = sial_newstat(IF, 3, $1, $2, $4); } ++ | if expstmt { $$ = sial_newstat(IF, 2, $1, $2); } ++ | DO expstmt WHILE '(' term ')' ';' ++ { $$ = sial_newstat(DO, 2, $2, $5); } ++ | RETURN term ';' { $$ = sial_newstat(RETURN, 1, $2); } ++ | RETURN ';' { $$ = sial_newstat(RETURN, 1, NULLNODE); } ++ | BREAK ';' { $$ = sial_newstat(BREAK, 0); } ++ | CONTINUE ';' { $$ = sial_newstat(CONTINUE, 0); } ++ | ';' { $$ = 0; } ++ ; ++ ++stmtlist: ++ /* empty */ { $$ = 0; } ++ | stmt ++ | stmtgroup ++ | stmtlist stmt { $$ = sial_addstat($1, $2); } ++ | stmtlist stmtgroup { $$ = sial_addstat($1, $2); } ++ ; ++ ++stmtgroup: ++ '{' decl_list stmtlist '}' { $$ = sial_stat_decl($3, $2); } ++ | '{' stmtlist '}' { $$ = sial_stat_decl($2, 0); } ++ ; ++ ++expstmt: ++ stmt ++ | stmtgroup ++ ; ++ ++term: ++ ++ term '?' term ':' term %prec '?' ++ { $$ = sial_newop(CEXPR, 3, $1, $3, $5); } ++ | term BOR term { $$ = sial_newop(BOR, 2, $1, $3); } ++ | term BAND term { $$ = sial_newop(BAND, 2, $1, $3); } ++ | NOT term { $$ = sial_newop(NOT, 1, $2); } ++ | term ASSIGN term { $$ = sial_newop(ASSIGN, 2, $1, $3); } ++ | term EQ term { $$ = sial_newop(EQ, 2, $1, $3); } ++ | term GE term { $$ = sial_newop(GE, 2, $1, $3); } ++ | term GT term { $$ = sial_newop(GT, 2, $1, $3); } ++ | term LE term { $$ = sial_newop(LE, 2, $1, $3); } ++ | term LT term { $$ = sial_newop(LT, 2, $1, $3); } ++ | term IN term { $$ = sial_newop(IN, 2, $1, $3); } ++ | term NE term { $$ = sial_newop(NE, 2, $1, $3); } ++ | '(' term ')' { $$ = $2; } ++ | term ANDME term { $$ = sial_newop(ANDME, 2, $1, $3); } ++ | PTR term %prec PTRTO { $$ = sial_newptrto($1, $2); } ++ | AND term %prec ADROF { $$ = sial_newadrof($2); } ++ | term OR term { $$ = sial_newop(OR, 2, $1, $3); } ++ | term ORME term { $$ = sial_newop(ORME, 2, $1, $3); } ++ | term XOR term { $$ = sial_newop(XOR, 2, $1, $3); } ++ | term XORME term { $$ = sial_newop(XORME, 2, $1, $3); } ++ | term SHR term { $$ = sial_newop(SHR, 2, $1, $3); } ++ | term SHRME term { $$ = sial_newop(SHRME, 2, $1, $3); } ++ | term SHL term { $$ = sial_newop(SHL, 2, $1, $3); } ++ | term SHLME term { $$ = sial_newop(SHLME, 2, $1, $3); } ++ | term ADDME term { $$ = sial_newop(ADDME, 2, $1, $3); } ++ | term SUBME term { $$ = sial_newop(SUBME, 2, $1, $3); } ++ | term MULME term { $$ = sial_newop(MULME, 2, $1, $3); } ++ | term DIV term { $$ = sial_newop(DIV, 2, $1, $3); } ++ | term DIVME term { $$ = sial_newop(DIVME, 2, $1, $3); } ++ | term MODME term { $$ = sial_newop(MODME, 2, $1, $3); } ++ | term MOD term { $$ = sial_newop(MOD, 2, $1, $3); } ++ | term SUB term { $$ = sial_newop(SUB, 2, $1, $3); } ++ | term ADD term { $$ = sial_newop(ADD, 2, $1, $3); } ++ | term PTR term %prec MUL { $$ = sial_newmult($1, $3, $2); } ++ | term AND term { $$ = sial_newop(AND, 2, $1, $3); } ++ | SUB term %prec UMINUS { $$ = sial_newop(UMINUS, 1, $2); } ++ | '~' term %prec FLIP { $$ = sial_newop(FLIP, 1, $2); } ++ | '+' term %prec UMINUS { $$ = $2; } ++ | term '(' ')' %prec CALL { $$ = sial_newcall($1, NULLNODE); } ++ | term '(' termlist ')' %prec CALL { $$ = sial_newcall($1, $3); } ++ | DECR term { $$ = sial_newop(PREDECR, 1, $2); } ++ | INCR term { $$ = sial_newop(PREINCR, 1, $2); } ++ | term DECR { $$ = sial_newop(POSTDECR, 1, $1); } ++ | term INCR { $$ = sial_newop(POSTINCR, 1, $1); } ++ | term INDIRECT var { $$ = sial_newmem(INDIRECT, $1, $3); } ++ | term INDIRECT tdef { $$ = sial_newmem(INDIRECT, $1, sial_tdeftovar($3)); } // resolve ambiguity ++ | term DIRECT var { $$ = sial_newmem(DIRECT, $1, $3); } ++ | term DIRECT tdef { $$ = sial_newmem(DIRECT, $1, sial_tdeftovar($3)); } // resolve ambiguity ++ | term '[' term ']' %prec ARRAY ++ { $$ = sial_newindex($1, $3); } ++ | NUMBER ++ | c_string ++ | typecast term %prec TYPECAST { $$ = sial_typecast($1, $2); } ++ | SIZEOF '(' var_decl ')' ++ { $$ = sial_sizeof(sial_newcast($3), 1); } ++ | SIZEOF term { $$ = sial_sizeof($2, 2); } ++ | print '(' var_decl ')' %prec SIZEOF ++ { $$ = sial_newptype($3); } ++ | print term %prec SIZEOF { $$ = sial_newpval($2, $1); } ++ | TAKE_ARR '(' term ',' term ')' { $$ = $3; /* sial_newtakearr($3, $5); */ } ++ | var ++ ; ++ ++print: ++ PRINT ++ | PRINTX ++ | PRINTO ++ | PRINTD ++ ; ++ ++typecast: ++ '(' var_decl ')' { $$ = sial_newcast($2); } ++ ; ++ ++var_decl_list: ++ var_decl ';' ++ | var_decl_list var_decl ';' { sial_addnewsvs($1, $1, $2); $$=$1; } ++ ; ++ ++decl_list: ++ ctype_decl ';' { $$ = 0; } ++ | var_decl ';' { $$ = $1; } ++ | decl_list var_decl ';' { $$=$1; if($1 && $2) sial_addnewsvs($1, $1, $2); } ++ | decl_list ctype_decl ';' { $$ = $1; } ++ ; ++ ++ ++var_decl: ++ type_decl dvarlist { needvar=0; $$ = sial_vardecl($2, $1); } ++ ; ++ ++one_var_decl: ++ type_decl dvar { needvar=0; $$ = sial_vardecl($2, $1); } ++ ; ++ ++type_decl: ++ type { $$=$1; needvar++; } ++ | storage_list { $$=$1; needvar++; } ++ | type storage_list { $$=sial_addstorage($1, $2); needvar++; } ++ | storage_list type { $$=sial_addstorage($2, $1); needvar++; } ++ | type_decl PTR { $$=$1; sial_pushref($1, $2);; needvar++; } ++ | type_decl storage_list { $$=sial_addstorage($1, $2); needvar++; } ++ ; ++ ++type: ++ ctype ++ | tdef ++ | btype_list ++ | string ++ | ctype_decl ++ ; ++ ++ctype_decl: ++ ctype_tok var '{' {sial_startctype(sial_toctype($1),$2);instruct++;} var_decl_list '}' ++ { instruct--; $$ = sial_ctype_decl(sial_toctype($1), $2, $5); } ++ | ctype_tok tdef '{' {sial_startctype(sial_toctype($1),lastv=sial_tdeftovar($2));instruct++;} var_decl_list '}' ++ { instruct--; $$ = sial_ctype_decl(sial_toctype($1), lastv, $5); } ++ | ctype_tok var '{' dvarlist '}' ++ { $$ = sial_enum_decl(sial_toctype($1), $2, $4); } ++ | ctype_tok tdef '{' dvarlist '}' ++ { $$ = sial_enum_decl(sial_toctype($1), sial_tdeftovar($2), $4); } ++ ; ++ ++ctype: ++ rctype { $$ = $1; } ++ | ctype_tok '{' {instruct++;} var_decl_list '}' ++ { instruct--; $$ = sial_ctype_decl(sial_toctype($1), 0, $4); } ++ | ctype_tok '{' dvarlist '}' ++ { $$ = sial_enum_decl(sial_toctype($1), 0, $3); } ++ ; ++ ++farglist: ++ /* empty */ { $$ = 0; } ++ | one_var_decl { $$ = $1; } ++ | farglist ',' one_var_decl { ++ if(!$1) sial_error("Syntax error"); ++ if($3) sial_addnewsvs($1, $1, $3); $$=$1; ++ } ++ | farglist ',' VARARGS { ++ if(!$1) sial_error("Syntax error"); ++ sial_addtolist($1, sial_newvar(S_VARARG)); $$=$1; ++ } ++ ; ++ ++ ++string: ++ STRTYPE { ++ type_t *t; ++ t=sial_newtype(); ++ t->type=V_STRING; ++ t->typattr=0; ++ $$ = t; ++ } ++ ; ++ ++rctype: ++ ctype_tok var { $$ = sial_newctype(sial_toctype($1), $2); } ++ | ctype_tok tdef { $$ = sial_newctype(sial_toctype($1), sial_tdeftovar($2)); } ++ ; ++ ++ctype_tok: ++ STRUCT ++ | ENUM ++ | UNION ++ ; ++ ++btype_list: ++ btype { $$ = sial_newbtype($1); } ++ | btype_list btype { $$ = sial_addbtype($1, $2); } ++ ; ++ ++c_string: ++ STRING { $$ = $1; } ++ | c_string STRING { $$ = sial_strconcat($1, $2); } ++ ; ++ ++btype: ++ LONG ++ | CHAR ++ | INT ++ | SHORT ++ | UNSIGNED ++ | SIGNED ++ | DOUBLE ++ | FLOAT ++ | VOID ++ ; ++ ++storage_list: ++ storage { $$ = sial_newbtype($1); } ++ | storage_list storage { sial_error("Only one storage class can be speficied"); } ++ ; ++ ++storage: ++ STATIC ++ | VOLATILE ++ | REGISTER ++ | TDEF ++ | EXTERN ++ | CONST ++ ; ++ ++dvarlist: ++ dvarini { $$ = $1; } ++ | dvarlist ',' dvarini { $$ = sial_linkdvar($1, $3); } ++ ; ++ ++dvarini: ++ dvar { $$ = $1; } ++ | dvar ASSIGN term { $$ = sial_dvarini($1, $3); } ++ ; ++ ++dvar: ++ opt_var { $$ = sial_newdvar($1); needvar=0; } ++ | ':' term { $$ = sial_dvarfld(sial_newdvar(0), $2); } ++ | dvar ':' term { $$ = sial_dvarfld($1, $3); } ++ | dvar '[' opt_term ']' { $$ = sial_dvaridx($1, $3); } ++ | PTR dvar { $$ = sial_dvarptr($1, $2); } ++ | dvar '(' ')' { $$ = sial_dvarfct($1, 0); } ++ | dvar '(' farglist ')' { $$ = sial_dvarfct($1, $3); } ++ | '(' dvar ')' { $$ = $2; } ++ ; ++ ++opt_var: ++ /* empty */ { $$ = 0; } ++ | var { $$ = $1; } ++ ; ++ ++var: ++ VAR { $$ = $1; } ++ ; ++ ++tdef: ++ TYPEDEF { $$ = $1; } ++ ; ++ ++while: ++ WHILE '(' {VARON} term {VAROFF} ')' { $$ = $4; } ++ ; ++ ++%% ++ ++static int ++sial_toctype(int tok) ++{ ++ switch(tok) { ++ case STRUCT: return V_STRUCT; ++ case ENUM: return V_ENUM; ++ case UNION: return V_UNION; ++ default: sial_error("Oops sial_toctype!"); return 0; ++ } ++} ++ ++/* ++ This file gets included into the yacc specs. ++ So the "sial.h" is already included ++*/ ++ ++int sialerror(char *p) { sial_error(p); return 0; } ++ +--- crash/extensions/libsial/sial_define.c.orig 2008-01-17 15:17:21.000000000 -0500 ++++ crash/extensions/libsial/sial_define.c 2008-01-04 09:42:08.000000000 -0500 +@@ -0,0 +1,519 @@ ++/* ++ * Copyright 2001 Silicon Graphics, Inc. All rights reserved. ++ */ ++#include ++#include ++#include "sial.h" ++/* ++ This set of functions handle #define for simple constant or macros. ++ We read from the current parser input strem untill end of line. ++ ++ The big thing is that we need to do some parsing to get the deinf names ++ and parameters. Also at the time of the macro instanciation, we need to parse ++ the parameters again. That leads to a more complex package... ++*/ ++ ++#define MAXP 20 ++typedef struct mac_s { ++ ++ char *name; /* this macro name */ ++ int np; /* number of parameters */ ++ int issub; /* subs have to be threated differently */ ++ char **p; /* parameters names */ ++ char *buf; /* text for the macro itself */ ++ struct mac_s *next; /* next on the list */ ++ srcpos_t pos; ++ ++} mac_t; ++ ++typedef struct { ++ mac_t *m; ++ char **subs; ++} smac_t; ++ ++static mac_t* macs=0; ++ ++/* we have to add a space at the end of the value ++ Again, this is to prevent a EOF on the parsing stream */ ++def_t* ++sial_add_def(def_t*d, char*name, char*val) ++{ ++def_t*nd=sial_alloc(sizeof(def_t)); ++char *buf=sial_alloc(strlen(val)+2); ++ ++ strcpy(buf, val); ++ strcat(buf, " "); ++ sial_free(val); ++ nd->name=name; ++ nd->val=buf; ++ nd->next=d; ++ return nd; ++} ++ ++/* search for a macro is the current list */ ++mac_t * ++sial_getmac(char *name, int takeof) ++{ ++mac_t *m; ++mac_t *prev=0; ++mac_t *sial_getcurmac(void); ++ ++ if(takeof || !(m=sial_getcurmac())) m=macs; ++ ++ for(; m; m=m->next) { ++ ++ if( !strcmp(m->name, name) ) { ++ ++ if(takeof) { ++ ++ if(!prev) macs=m->next; ++ else prev->next=m->next; ++ ++ } ++ return m; ++ } ++ prev=m; ++ } ++ return 0; ++} ++ ++node_t* ++sial_macexists(node_t*var) ++{ ++char *name=NODE_NAME(var); ++int val; ++ ++ if(sial_getmac(name, 0)) val=1; ++ else val=0; ++ return sial_makenum(B_UL, val); ++} ++static void ++sial_freemac(mac_t*m) ++{ ++int i; ++ ++ for(i=0;inp;i++) sial_free(m->p[i]); ++ if(m->np) sial_free(m->p); ++ sial_free(m); ++} ++ ++/* ++ These are called at 2 different points. ++ One call at the very begining. One call for each file. ++*/ ++void* sial_curmac(void) { return macs; } ++ ++void ++sial_flushmacs(void *vtag) ++{ ++mac_t *m, *next; ++mac_t *tag=(mac_t *)vtag; ++ ++ for(m=macs; m!=tag; m=next) { ++ ++ next=m->next; ++ sial_freemac(m); ++ } ++ macs=m; ++} ++ ++/* this function is called to register a new macro. ++ The text associated w/ the macro is still on the parser stream. ++ Untill eol. ++*/ ++void ++sial_newmac(char *mname, char *buf, int np, char **p, int silent) ++{ ++char *p2; ++mac_t *m; ++ ++ { ++ char *p=buf+strlen(buf)-1; ++ ++ /* eliminate trailing blanks */ ++ while(*p && (*p==' ' || *p=='\t')) p--; ++ *(p+1)='\0'; ++ ++ /* eliminate leading blanks */ ++ p=buf; ++ while(*p && (*p==' ' || *p=='\t')) p++; ++ ++ /* copy and append a space. This is to prevent unloading of the ++ macro before the sial_chkvarmac() call as been performed */ ++ p2=sial_alloc(strlen(p)+2); ++ strcpy(p2, p); ++ sial_free(buf); ++ p2[strlen(p2)+1]='\0'; ++ p2[strlen(p2)]=' '; ++ buf=p2; ++ } ++ ++ if((m=sial_getmac(mname, 1)) && strcmp(m->buf, buf)) { ++ ++ /* when processing the compile options, be silent. */ ++ if(!silent) { ++ ++ sial_warning("Macro redefinition '%s' with different value_t\n" ++ "value_t=[%s]\n" ++ "Previous value_t at %s:%d=[%s]\n" ++ , mname, buf, m->pos.file, m->pos.line, m->buf); ++ } ++ ++ } ++ m=(mac_t*)sial_alloc(sizeof(mac_t)); ++ m->name=sial_strdup(mname); ++ m->np=np; ++ m->p=p; ++ m->buf=buf; ++ m->next=macs; ++ m->issub=0; ++ sial_setpos(&m->pos); ++ macs=m; ++} ++ ++/* this function is called by the enum declaration function and ++ when a enum type is extracted from the image to push a set ++ of define's onto the stack, that correspond to each identifier ++ in the enum. ++*/ ++void ++sial_pushenums(enum_t *et) ++{ ++ while(et) { ++ ++ char *buf=sial_alloc(40); ++ ++ sprintf(buf, "%d", et->value); ++ sial_newmac(et->name, buf, 0, 0, 0); ++ et=et->next; ++ } ++} ++ ++static void ++sial_skipcomment(void) ++{ ++int c; ++ ++ while((c=sial_input())) { ++ ++ if(c=='*') { ++ ++ int c2; ++ ++ if((c2=sial_input())=='/') return; ++ sial_unput(c2); ++ } ++ } ++} ++ ++static void ++sial_skipstr(void) ++{ ++int c; ++ ++ while((c=sial_input())) { ++ ++ if(c=='\\') sial_input(); ++ else if(c=='"') return; ++ } ++} ++ ++ ++/* skip over strings and comment to a specific chracter */ ++static void ++sial_skipto(int x) ++{ ++int c; ++ ++ while((c=sial_input())) { ++ ++ if(c==x) return; ++ ++ switch(c) { ++ ++ case '\\': ++ sial_input(); ++ break; ++ ++ case '"': ++ sial_skipstr(); ++ break; ++ ++ case '/': { ++ ++ int c2; ++ ++ if((c2=sial_input())=='*') { ++ ++ sial_skipcomment(); ++ ++ } else sial_unput(c2); ++ } ++ break; ++ ++ case '(': ++ ++ sial_skipto(')'); ++ break; ++ ++ case ')': ++ sial_error("Missing parameters to macro"); ++ break; ++ } ++ ++ } ++ ++ sial_error("Expected '%c'", x); ++} ++ ++ ++/* ++ This function gets called when the buffer for a macro as been fully ++ parsed. We need to take the associated parameter substitution macros ++ of of the stack and deallocate associated data. ++*/ ++static void ++sial_popmac(void *vsm) ++{ ++smac_t *sm=(smac_t *)vsm; ++int i; ++ ++ for(i=0;im->np;i++) { ++ ++ mac_t *m=sial_getmac(sm->m->p[i], 1); ++ ++ if(!m) sial_error("Oops macro pop!"); ++ sial_free(m->buf); ++ sial_free(m->name); ++ sial_free(m); ++ } ++ sial_free(sm->subs); ++ sial_free(sm); ++} ++ ++/* ++ ++ need to get the actual parameters from the parser stream. ++ This can be simple variable or complex multiple line expressions ++ with strings and commants imbedded in them... ++ ++*/ ++static int ++sial_pushmac(mac_t *m) ++{ ++int i; ++char **subs=sial_alloc(sizeof(char*)*m->np); ++smac_t *sm; ++int siallex(void); ++ ++ /* the next token should be a '(' */ ++ if(siallex() != '(') { ++ ++ sial_error("Expected '(' after '%s'", m->name); ++ ++ } ++ ++ /* get the parameters */ ++ for(i=0;inp;i++) { ++ ++ char *p=sial_cursorp(); ++ int nc; ++ ++ if(inp-1) sial_skipto(','); ++ else sial_skipto(')'); ++ ++ nc=sial_cursorp()-p-1; ++ subs[i]=sial_alloc(nc+2); ++ strncpy(subs[i], p, nc); ++ subs[i][nc]=' '; ++ subs[i][nc+1]='\0'; ++ } ++ ++ /* take care of the macro() case. ex: IS_R10000()i.e. no parms */ ++ if(!m->np) ++ sial_skipto(')'); ++ ++ sm=sial_alloc(sizeof(smac_t)); ++ ++ sm->m=m; ++ sm->subs=subs; ++ ++ /* we push the associated buffer on the stream */ ++ sial_pushbuf(m->buf, 0, sial_popmac, sm, 0); ++ ++ /* we push the subs onto the macro stack */ ++ for(i=0;inp;i++) { ++ ++ mac_t *pm=sial_alloc(sizeof(mac_t)); ++ ++ pm->name=sial_alloc(strlen(m->p[i])+1); ++ strcpy(pm->name, m->p[i]); ++ pm->np=0; ++ pm->p=0; ++ pm->buf=subs[i]; ++ pm->next=macs; ++ pm->issub=1; ++ macs=pm; ++ } ++ return 1; ++ ++} ++ ++ ++/* ++ This one is called from the lexer to check if a 'var' is to be substituted for ++ a macro ++*/ ++int ++sial_chkmacvar(char *mname) ++{ ++mac_t *m; ++ ++ if((m=sial_getmac(mname, 0))) { ++ ++ ++ /* simple constant ? */ ++ if(!m->p) { ++ ++ sial_pushbuf(m->buf, 0, 0, 0, m->issub ? m->next : 0); ++ ++ } else { ++ return sial_pushmac(m); ++ } ++ return 1; ++ ++ } ++ return 0; ++ ++} ++ ++/* ++ Skip an unsupported preprocessor directive. ++*/ ++void ++sial_skip_directive(void) ++{ ++ sial_free(sial_getline()); ++} ++ ++void ++sial_undefine(void) ++{ ++int c; ++int i=0; ++char mname[MAX_SYMNAMELEN+1]; ++mac_t *m; ++ ++ /* skip all white spaces */ ++ while((c=sial_input()) == ' ' || c == '\t') if(c=='\n' || !c) { ++ ++ sial_error("Macro name expected"); ++ } ++ ++ mname[i++]=c; ++ ++ /* get the constant or macro name */ ++ while((c=sial_input()) != ' ' && c != '\t') { ++ ++ if(c=='\n' || !c) break; ++ if(i==MAX_SYMNAMELEN) break; ++ mname[i++]=c; ++ } ++ mname[i]='\0'; ++ if((m=sial_getmac(mname, 1))) sial_freemac(m); ++ else sial_addneg(mname); ++} ++ ++/* ++ This one is called from the lexer after #define as been detected ++*/ ++void ++sial_define(void) ++{ ++int c; ++int i=0; ++char mname[MAX_SYMNAMELEN+1]; ++ ++ /* skip all white spaces */ ++ while((c=sial_input()) == ' ' || c == '\t') if(c=='\n' || !c) goto serror; ++ ++ mname[i++]=c; ++ ++ /* get the constant or macro name */ ++ while((c=sial_input()) != ' ' && c != '\t' && c != '(') { ++ ++ if(c=='\n' || !c) break; ++ ++ if(i==MAX_SYMNAMELEN) break; ++ ++ mname[i++]=c; ++ } ++ mname[i]='\0'; ++ ++ /* does this macro have paraneters */ ++ /* If so, '(' will be right after name of macro. No spaces. */ ++ if(c=='(') { ++ ++ int np, nc, done; ++ char **pnames; ++ char curname[MAX_SYMNAMELEN+1]; ++ ++ np=nc=done=0; ++ pnames=(char **)sial_alloc(sizeof(char*)*MAXP); ++ ++ while(!done) { ++ ++ c=sial_input(); ++ ++ switch(c) { ++ case '\n': case 0: ++ goto serror; ++ ++ /* continuation */ ++ case '\\': ++ if(sial_input()!='\n') goto serror; ++ break; ++ ++ case ',': ++ if(!nc) goto serror; ++last: ++ curname[nc]='\0'; ++ pnames[np]=sial_alloc(strlen(curname)+1); ++ strcpy(pnames[np], curname); ++ nc=0; ++ np++; ++ break; ++ ++ case ')': ++ done=1; ++ if(nc) goto last; ++ break; ++ ++ case ' ': ++ case '\t': ++ break; ++ ++ default: ++ curname[nc++]=c; ++ break; ++ } ++ } ++ sial_newmac(mname, sial_getline(), np, pnames, 0); ++ return; ++ ++ } else if(c == '\n') { ++ ++ /* if nothing speciied then set to "1" */ ++ sial_newmac(mname, sial_strdup("1"), 0, 0, 0); ++ ++ } else { ++ ++ sial_newmac(mname, sial_getline(), 0, 0, 0); ++ } ++ ++ return; ++ ++serror: ++ ++ sial_error("Syntax error on macro definition"); ++} +--- crash/extensions/libsial/sial_builtin.c.orig 2008-01-17 15:17:21.000000000 -0500 ++++ crash/extensions/libsial/sial_builtin.c 2008-01-04 09:42:08.000000000 -0500 +@@ -0,0 +1,434 @@ ++/* ++ * Copyright 2001 Silicon Graphics, Inc. All rights reserved. ++ */ ++#include ++#include ++#include ++#include ++#include ++#include "sial.h" ++ ++/* information necessary for a builtin function */ ++typedef struct builtin { ++ ++ var_t*v; /* resulting variable declaration after parsing */ ++ bf_t *fp; /* pointer to actual function */ ++ char *proto; /* associated prototype_t*/ ++ struct builtin *next; /* to chain them */ ++ ++} builtin; ++ ++#define BT_EINVAL 1 /* Something is wrong and it's not ... */ ++value_t* ++sial_exit(int v) ++{ ++ /* Were we compiling ? */ ++ sial_parseback(); ++ ++ /* we were running... exit () */ ++ sial_dojmp(J_EXIT, &v); ++ ++ /* NOT REACHED */ ++ return 0; ++} ++ ++value_t* ++sial_bexit(value_t *vv) ++{ ++int v=sial_getval(vv); ++ ++ /* we're not going back to the he caller so free ++ the input value_t */ ++ sial_freeval(vv); ++ sial_exit(v); ++ /* NOT REACHED */ ++ return 0; ++} ++ ++#define MAXBYTES 4000 ++#define INCREMENT 16 ++value_t * ++sial_getstr(value_t *vmadr) ++{ ++ull madr=sial_getval(vmadr); ++char *buf=sial_alloc(MAXBYTES+1); ++char *p=buf; ++value_t *v; ++ ++ /* sial as already verified that this is a V_REF */ ++ /* since this is reading from a unkown size pool ++ we have to do an exponential reduction on the number of bytes ++ read ... */ ++ buf[0]=0; ++ while(1) { ++ ++ int i; ++ ++ if(!API_GETMEM(madr, p, INCREMENT)) break; ++ ++ /* have we found the '\0' yet ? */ ++ for(i=0;i= MAXBYTES) { ++ buf[MAXBYTES]='\0'; ++ break; ++ } ++ ++ } ++ v=sial_setstrval(sial_newval(), buf); ++ sial_free(buf); ++ return v; ++} ++ ++value_t * ++sial_substr(value_t *vp, value_t *vi, value_t *vj) ++{ ++char *p=sial_getptr(vp, char); ++ul i=sial_getval(vi); ++int l=strlen(p); ++int j=(vj?sial_getval(vj):(l-i+1)); ++char *s; ++value_t *v; ++ ++ if((i+j-1)>l || !i) { ++ ++ sial_error("Valid positions are [1..%d]\n", l); ++ ++ } ++ ++ s=sial_alloc(j+1); ++ strncpy(s, p+i-1, j); ++ s[j]='\0'; ++ v=sial_setstrval(sial_newval(), s); ++ sial_free(s); ++ return v; ++} ++ ++value_t * ++sial_getnstr(value_t* vmadr, value_t* vl) ++{ ++ull madr=sial_getval(vmadr); ++ul l=sial_getval(vl); ++char *buf=sial_alloc(l+1); ++value_t *v; ++ ++ if(!API_GETMEM(madr, buf, l)) buf[0]='\0'; ++ else buf[l]='\0'; ++ v=sial_setstrval(sial_newval(), buf); ++ sial_free(buf); ++ return v; ++} ++ ++value_t * ++sial_atoi(value_t *vs, value_t* vbase) ++{ ++char *s=sial_getptr(vs, char); ++int base=vbase ? sial_getval(vbase) : 0; ++ ++ strtoull(s, 0, (int) base); ++ return sial_defbtypesize(sial_newval(), strtoull(s, 0, base), B_ULL); ++} ++ ++value_t * ++sial_itoa(value_t* vi) ++{ ++ull i=sial_getval(vi); ++char p[40]; ++ ++ sprintf(p, "%llu", (unsigned long long)i); ++ return sial_setstrval(sial_newval(), p); ++} ++ ++value_t * ++sial_strlen(value_t *vs) ++{ ++char *s=sial_getptr(vs, char); ++ull l; ++ if(!s) l=0; ++ else l=strlen(s); ++ ++ return sial_defbtype(sial_newval(), l); ++} ++ ++value_t * ++sial_getchar(void) ++{ ++char c; ++struct termio tio, stio; ++int in=fileno(stdin); ++ ++ if(ioctl(in, TCGETA, &tio)) c=255; ++ else { ++ stio=tio; ++ tio.c_lflag &= ~(ICANON | ECHO); ++ tio.c_iflag &= ~(ICRNL | INLCR); ++ tio.c_cc[VMIN] = 1; ++ tio.c_cc[VTIME] = 0; ++ ioctl(in, TCSETA, &tio); ++ c=getc(stdin); ++ ioctl(in, TCSETA, &stio); ++ } ++ return sial_defbtype(sial_newval(), (ull)c); ++} ++ ++value_t * ++sial_gets(void) ++{ ++char p[1024]; ++ ++ if(!fgets(p, sizeof(p)-1, stdin)) p[0]='\0'; ++ else p[strlen(p)-1]='\0'; ++ return sial_setstrval(sial_newval(), p); ++} ++ ++static builtin *bfuncs=0; ++ ++/* ++ Check for the existance of a bt function ++*/ ++void * ++sial_chkbuiltin(char *name) ++{ ++builtin *bf; ++ ++ for(bf=bfuncs; bf; bf=bf->next) { ++ ++ if(!strcmp(name, bf->v->name)) { ++ ++ return bf; ++ } ++ } ++ return 0; ++} ++ ++/* ++ Remove a builtin. ++ This is done when we 'unload' a *.so file. ++*/ ++void ++sial_rmbuiltin(var_t*v) ++{ ++builtin *bf; ++builtin *last=0; ++ ++ for(bf=bfuncs; bf; bf=bf->next) { ++ ++ if(!strcmp(v->name, bf->v->name)) { ++ ++ if(!last) bfuncs=bf->next; ++ else { ++ ++ last->next=bf->next; ++ } ++ sial_free(bf->proto); ++ sial_free(bf); ++ } ++ last=bf; ++ } ++} ++ ++/* ++ Install a new builtin function. ++*/ ++var_t* ++sial_builtin(char *proto, bf_t* fp) ++{ ++var_t*v; ++ ++ /* parse the prototype_t*/ ++ if((v=sial_parsexpr(proto))) { ++ ++ builtin *bt; ++ int nargs=0; ++ ++ /* check name */ ++ if(!v->name || !v->name[0]) { ++ ++ sial_freevar(v); ++ sial_msg("Syntax error: no function name specified [%s]\n", proto); ++ return 0; ++ } ++ ++ /* check for function with same name */ ++ if(sial_chkfname(v->name, 0)) { ++ ++ sial_freevar(v); ++ sial_msg("Function already defined [%s]\n", proto); ++ return 0; ++ } ++ ++ if(v->dv->fargs) { ++ ++ var_t*vn=v->dv->fargs->next; ++ ++ while(vn!=v->dv->fargs) { ++ ++ nargs++; ++ vn=vn->next; ++ } ++ } ++ /* check number of args */ ++ if(nargs > BT_MAXARGS) { ++ ++ sial_freevar(v); ++ sial_msg("Too many parameters to function (max=%d) [%s]\n", BT_MAXARGS, proto); ++ return 0; ++ } ++ ++ ++ bt=sial_alloc(sizeof(builtin)); ++ bt->proto=sial_strdup(proto); ++ bt->fp=fp; ++ bt->v=v; ++ bt->next=0; ++ ++ /* install it */ ++ if(!bfuncs) bfuncs=bt; ++ else { ++ builtin *btp; ++ ++ for(btp=bfuncs; ; btp=btp->next) if(!btp->next) break; ++ btp->next=bt; ++ } ++ return v; ++ } ++ ++ sial_msg("Builtin [%s] not loaded.", proto); ++ ++ return 0; ++} ++ ++#define bcast(f) ((bf_t*)f) ++ ++static btspec_t sialbfuncs[] = { ++ { "unsigned long long atoi(string, ...)",bcast(sial_atoi)}, ++ { "int exists(string)", bcast(sial_exists)}, ++ { "void exit(int)", bcast(sial_bexit)}, ++ { "int getchar()", bcast(sial_getchar)}, ++ { "string gets()", bcast(sial_gets)}, ++ { "string getstr(char *)", bcast(sial_getstr)}, ++ { "string getnstr(char *, int)", bcast(sial_getnstr)}, ++ { "string itoa(int)", bcast(sial_itoa)}, ++ { "void printf(string, ...)", bcast(sial_printf)}, ++ { "void showtemp()", bcast(sial_showtemp)}, ++ { "void showaddr(char *)", bcast(sial_showaddr)}, ++ { "void memdebugon()", bcast(sial_memdebugon)}, ++ { "void memdebugoff()", bcast(sial_memdebugoff)}, ++ { "int sial_load(string)", bcast(sial_bload)}, ++ { "int sial_unload(string)", bcast(sial_bunload)}, ++ { "int depend(string)", bcast(sial_bdepend)}, ++ { "int strlen(string)", bcast(sial_strlen)}, ++ { "string sprintf(string, ...)", bcast(sial_sprintf)}, ++ { "string substr(string, int, ...)", bcast(sial_substr)}, ++ { "void prarr(string name, int i)", bcast(sial_prarr)}, ++ { "int member(void*, string name)", bcast(sial_ismember)}, ++ { "string findsym(string)", bcast(sial_findsym)}, ++}; ++ ++ ++/* ++ Install the sial builtins. ++*/ ++void ++sial_setbuiltins() ++{ ++int i; ++ ++ for(i=0;iv->dv->fargs) { ++ ++ var_t*vv=bf->v->dv->fargs->next; ++ ++ while(vv != bf->v->dv->fargs) { ++ ++ if(vv->name && !strcmp(vv->name, S_VARARG)) { ++ while(nargsv); ++ sial_chkandconvert(lvals[nargs], vals[nargs]); ++ } ++ nargs++; ++ vv=vv->next; ++ } ++ } ++ ++ /* check parameters number */ ++ if(iv->dv->pos, "Too few parameters to '%s'", bf->proto); ++ ++ } else if(i>nargs){ ++ ++ sial_rerror(&bf->v->dv->pos, "Too many parameters to '%s'", bf->proto); ++ ++ } ++ ++ if(vals) { ++ /* the actual call */ ++ v=(bf->fp) ( ++ lvals[0], lvals[1], ++ lvals[2], lvals[3], ++ lvals[4], lvals[5], ++ lvals[6], lvals[7], ++ lvals[8], lvals[9], ++ lvals[10], lvals[11], ++ lvals[12], lvals[13], ++ lvals[14], lvals[15], ++ lvals[16], lvals[17], ++ lvals[18], lvals[19] ++ ); ++ } else { ++ ++ v=(bf->fp) ((value_t*)0); ++ } ++ ++ while(i) { ++ ++ --i; ++ sial_freeval(vals[i]); ++ sial_freeval(lvals[i]); ++ } ++ ++ /* make a copy of the return value_t info */ ++ vr=sial_cloneval(bf->v->v); ++ sial_chkandconvert(vr, v); ++ sial_freeval(v); ++ ++ return vr; ++ } ++ ++ sial_error("Oops. sial_exebfunc()"); ++ return 0; ++} +--- crash/extensions/libsial/sialpp.y.orig 2008-01-17 15:17:21.000000000 -0500 ++++ crash/extensions/libsial/sialpp.y 2008-01-04 09:42:08.000000000 -0500 +@@ -0,0 +1,88 @@ ++%{ ++/* ++ * Copyright 2001 Silicon Graphics, Inc. All rights reserved. ++ */ ++/* ++ This is the grammar for the preprocessor expression evaluation. ++*/ ++#include "sial.h" ++#include "sial.tab.h" ++#include ++#include ++#include ++static node_t *last_term; ++%} ++ ++%union { ++ node_t *n; ++ int i; ++} ++ ++%token P_VAR P_NUMBER ++%token P_DEFINED ++ ++%type term ++ ++%right '?' ++%left P_BOR ++%left P_BAND ++%left P_OR ++%left P_XOR ++%left P_AND ++%left P_EQ P_NE ++%left P_GE P_GT P_LE P_LT ++%left P_SHL P_SHR ++%left P_ADD P_SUB ++%left P_MUL P_DIV P_MOD ++%right P_UMINUS P_FLIP P_NOT ++ ++%% ++ ++term: ++ ++ term '?' term ':' term %prec '?' ++ { $$ = sial_newop(CEXPR, 3, $1, $3, $5); last_term = $$; } ++ | term P_BOR term { $$ = sial_newop(BOR, 2, $1, $3); last_term = $$; } ++ | term P_BAND term { $$ = sial_newop(BAND, 2, $1, $3); last_term = $$; } ++ | P_NOT term { $$ = sial_newop(NOT, 1, $2); last_term = $$; } ++ | term P_EQ term { $$ = sial_newop(EQ, 2, $1, $3); last_term = $$; } ++ | term P_GE term { $$ = sial_newop(GE, 2, $1, $3); last_term = $$; } ++ | term P_GT term { $$ = sial_newop(GT, 2, $1, $3); last_term = $$; } ++ | term P_LE term { $$ = sial_newop(LE, 2, $1, $3); last_term = $$; } ++ | term P_LT term { $$ = sial_newop(LT, 2, $1, $3); last_term = $$; } ++ | term P_NE term { $$ = sial_newop(NE, 2, $1, $3); last_term = $$; } ++ | '(' term ')' { $$ = $2; last_term == $$; } ++ | term P_OR term { $$ = sial_newop(OR, 2, $1, $3); last_term = $$; } ++ | term P_XOR term { $$ = sial_newop(XOR, 2, $1, $3); last_term = $$; } ++ | term P_SHR term { $$ = sial_newop(SHR, 2, $1, $3); last_term = $$; } ++ | term P_SHL term { $$ = sial_newop(SHL, 2, $1, $3); last_term = $$; } ++ | term P_DIV term { $$ = sial_newop(DIV, 2, $1, $3); last_term = $$; } ++ | term P_MOD term { $$ = sial_newop(MOD, 2, $1, $3); last_term = $$; } ++ | term P_SUB term { $$ = sial_newop(SUB, 2, $1, $3); last_term = $$; } ++ | term P_ADD term { $$ = sial_newop(ADD, 2, $1, $3); last_term = $$; } ++ | term P_MUL term { $$ = sial_newop(MUL, 2, $1, $3); last_term = $$; } ++ | term '&' term %prec P_AND { $$ = sial_newop(AND, 2, $1, $3); last_term = $$; } ++ | P_SUB term %prec P_UMINUS { $$ = sial_newop(UMINUS, 1, $2); last_term = $$; } ++ | '~' term %prec P_FLIP { $$ = sial_newop(FLIP, 1, $2); last_term = $$; } ++ | '+' term %prec P_UMINUS { $$ = $2; last_term = $$; } ++ | P_DEFINED '(' {nomacs++;} P_VAR ')' ++ { nomacs=0; $$ = sial_macexists($4); last_term = $$; } ++ | P_NUMBER { last_term = $$; } ++ | P_VAR { $$ = sial_makenum(B_UL, 0); last_term = $$; } ++ ; ++ ++%% ++ ++node_t * ++sial_getppnode() ++{ ++ return last_term; ++} ++ ++int ++sialpperror(char *s) ++{ ++ sial_error(s); ++ return 1; ++} ++ +--- crash/extensions/libsial/sial_node.c.orig 2008-01-17 15:17:21.000000000 -0500 ++++ crash/extensions/libsial/sial_node.c 2008-01-04 09:42:08.000000000 -0500 +@@ -0,0 +1,69 @@ ++/* ++ * Copyright 2001 Silicon Graphics, Inc. All rights reserved. ++ */ ++/* ++ These function are use to allocate a new node. ++ It's a layer between the type specific functions and the parser. ++*/ ++#include "sial.h" ++#include ++ ++/* ++ Allocate a new node structure ++*/ ++node_t* ++sial_newnode() ++{ ++node_t*n; ++ ++ n = (node_t*) sial_calloc(sizeof(node_t)); ++ TAG(n); ++ return n; ++} ++ ++void ++sial_free_siblings(node_t*ni) ++{ ++ while(ni) { ++ ++ node_t*next=ni->next; ++ ++ NODE_FREE(ni); ++ ++ ni=next; ++ } ++} ++ ++/* ++ This function is called du ring compile time ++ to exevaluate constant expression, like sizeof() and ++ array sizes and enum constant. ++*/ ++value_t * ++sial_exenode(node_t*n) ++{ ++value_t *v; ++int *exval; ++jmp_buf exitjmp; ++void *sa; ++srcpos_t p; ++ ++ sial_curpos(&n->pos, &p); ++ sa=sial_setexcept(); ++ ++ if(!setjmp(exitjmp)) { ++ ++ sial_pushjmp(J_EXIT, &exitjmp, &exval); ++ v=NODE_EXE(n); ++ sial_rmexcept(sa); ++ sial_popjmp(J_EXIT); ++ ++ } else { ++ ++ sial_rmexcept(sa); ++ return 0; ++ ++ } ++ sial_curpos(&p, 0); ++ return v; ++} +--- crash/extensions/libsial/sial_api.c.orig 2008-01-17 15:17:21.000000000 -0500 ++++ crash/extensions/libsial/sial_api.c 2008-01-04 09:42:08.000000000 -0500 +@@ -0,0 +1,1516 @@ ++/* ++ * Copyright 2001 Silicon Graphics, Inc. All rights reserved. ++ */ ++#include "sial.h" ++#include "sial.tab.h" ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++/* here we do some caching of the information. This can have a speedup effect ++ since it limits the number of accesses we do the dwarf (or whatever type) db that ++ drives the type and symbols information ++ */ ++ ++static stinfo_t slist={"root"}; ++ ++/* running key to new structures */ ++static ull nextidx=0, abitype=ABI_MIPS; ++#define LOCALTYPESBASE 0x8000000000000000ll ++static ull sial_nextidx(void) { return LOCALTYPESBASE+nextidx++; } ++ ++/* this set of function is used to cleanup tdefs after their use. ++ Trailing tdefs can be combersome. Trailing struct/union/enum get new idx ++ each time and are not a trouble */ ++static stinfo_t*tag=0; ++void ++sial_tagst(void) ++{ ++ tag=slist.next; ++} ++ ++void ++sial_flushtdefs(void) ++{ ++stinfo_t*st=slist.next; ++stinfo_t*last=&slist; ++ ++ while(st != tag) { ++ ++ stinfo_t*next=st->next; ++ ++ if(st->ctype.type==V_TYPEDEF && st->idx & LOCALTYPESBASE) { ++ ++ sial_free(st->name); ++ sial_free(st); ++ last->next=next; ++ ++ } else last=st; ++ ++ st=next; ++ ++ } ++ tag=0; ++} ++ ++static stinfo_t* ++sial_getst(char *name, int type) ++{ ++stinfo_t*tst; ++ ++ for(tst=slist.next; tst; tst=tst->next) { ++ ++ if(tst->ctype.type == type && tst->name && ! strcmp(tst->name, name)) { ++ ++ return tst; ++ } ++ } ++ return 0; ++} ++ ++#if 0 ++Not used yet. ++static void ++sial_rmst(stinfo_t*rst) ++{ ++stinfo_t*st=slist.next; ++stinfo_t*last=&slist; ++ ++ while(st) { ++ ++ if(st==rst) { ++ ++ last->next=st->next; ++ sial_free(st->name); ++ sial_free(st); ++ ++ return; ++ ++ } ++ ++ last=st; ++ st=st->next; ++ } ++} ++#endif ++ ++stinfo_t* ++sial_getstbyindex(ull idx, int type) ++{ ++stinfo_t*tst; ++ ++ for(tst=slist.next; tst; tst=tst->next) { ++ ++ if(tst->ctype.type == type && tst->idx == idx) { ++ ++ return tst; ++ } ++ } ++ return 0; ++} ++ ++static void ++sial_addst(stinfo_t*st) ++{ ++stinfo_t*tst; ++ ++ tst=slist.next; ++ slist.next=st; ++ st->next=tst; ++} ++ ++typedef struct neg_s { ++ struct neg_s *next; ++ char *name; ++} neg_t; ++ ++static neg_t *nlist=0; ++ ++void ++sial_addneg(char *name) ++{ ++neg_t *neg; ++ ++ neg=sial_alloc(sizeof *neg); ++ neg->name=sial_strdup(name); ++ neg->next=nlist; ++ nlist=neg; ++} ++ ++int ++sial_isneg(char *name) ++{ ++neg_t *neg; ++ ++ for(neg=nlist; neg; neg=neg->next) ++ if(!strcmp(neg->name, name)) return 1; ++ return 0; ++} ++ ++/* ++ This function is called by sial_vardecl() when the typedef storage class ++ as been specified. In which case we need to create new typedefs not variables. ++*/ ++void ++sial_tdef_decl(dvar_t*dv, type_t*t) ++{ ++ while(dv) { ++ ++ dvar_t*next; ++ ++ stinfo_t*st=sial_calloc(sizeof(stinfo_t)); ++ ++ if(dv->nbits) sial_error("No bits fields for typedefs"); ++ if(dv->idx) { ++ ++ /* we change a 'typedef type var[n];' into a 'typedef type_t*var;' */ ++ sial_freeidx(dv->idx); ++ dv->idx=0; ++ dv->ref++; ++ } ++#if 0 ++At this time we do not give any error messages or warnings. ++If a type is redefined within a single file that will means ++problem for the user put this is not a full blown C compiler. ++ ++ { ++ type_t*t=sial_newtype(); ++ ++ if(API_GETCTYPE(V_TYPEDEF, dv->name, t)) { ++ ++ sial_warning("Typedef %s already defined in image, redefinition ignored", ++ dv->name); ++ } ++ sial_freetype(t); ++ } ++#endif ++ t->typattr &= ~sial_istdef(t->typattr); ++ sial_duptype(&st->rtype, t); ++ sial_pushref(&st->rtype, dv->ref); ++ st->name=dv->name; ++ dv->name=0; ++ st->idx=sial_nextidx(); ++ st->ctype.type=V_TYPEDEF; ++ ++ sial_addst(st); ++ ++ next=dv->next; ++ dv->next=0; ++ sial_freedvar(dv); ++ dv=next; ++ } ++} ++ ++int ++sial_ispartial(type_t*t) ++{ ++stinfo_t*st=sial_getstbyindex(t->idx, t->type); ++ ++ if(!st) { ++ ++ sial_error("Oops sial_ispartial"); ++ } ++ return !st->all; ++} ++ ++char * ++sial_gettdefname(ull idx) ++{ ++stinfo_t*tst=sial_getstbyindex(idx, V_TYPEDEF); ++ ++ if(tst) return tst->name; ++ else return 0; ++} ++ ++static int init=0; ++static void ++sial_chkinit(void) ++{ ++ if(!init) { ++ ++ sial_error("Sial Package not initialized"); ++ ++ } ++} ++ ++void ++sial_getmem(ull kp, void *p, int n) ++{ ++ sial_chkinit(); ++ if(!API_GETMEM(kp, p, n)) { ++ ++ sial_error("Error on read from 0x%llx for %d", kp, n); ++ ++ } ++} ++ ++void ++sial_putmem(ull kp, char *p, int n) ++{ ++ sial_chkinit(); ++ if(!API_PUTMEM(kp, p,n)) { ++ ++ sial_error("Error on write at 0x%llx for %d", kp, n); ++ ++ } ++} ++ ++void ++sial_partialctype(int type, char *name) ++{ ++stinfo_t*st; ++ ++ /* check first if we have a partial of that type ++ already in progress (after a forward declaration) */ ++ if((st=sial_getst(name, type))) { ++ ++ /* if it's complete we need to start a new one */ ++ if(!st->all) return; ++ ++ } ++ st=sial_calloc(sizeof(stinfo_t)); ++ st->name=sial_strdup(name); ++ st->ctype.type=type; ++ st->all=0; ++ st->ctype.idx=st->idx=sial_nextidx(); ++ sial_addst(st); ++} ++ ++void ++sial_startctype_named(int type, char *name) ++{ ++stinfo_t*st; ++ ++ /* if no partial yet start one */ ++ if(!(st=sial_getst(name, type)) || st->all) ++ sial_partialctype(type, name); ++} ++ ++void ++sial_startctype(int type, node_t*namen) ++{ ++ sial_startctype_named(type, NODE_NAME(namen)); ++} ++ ++int ++sial_samectypename(int type, ull idx1, ull idx2) ++{ ++stinfo_t*st1, *st2; ++ ++ if((st1=sial_getstbyindex(idx1, type)) && ++ (st2=sial_getstbyindex(idx2, type))) { ++ ++ // check names ++ if(!strcmp(st1->name, st2->name)) return 1; ++ ++ // check all members and sizes in order ++ // unamed ctypes can end up here too... ++ if(st1->stm) { ++ stmember_t *m1=st1->stm, *m2=st2->stm; ++ while(m1 && m2) { ++ if(strcmp(m1->m.name, m2->m.name)) break; ++ if(m1->m.offset != m2->m.offset ) break; ++ if(m1->m.size != m2->m.size ) break; ++ m1=m1->next; ++ m2=m2->next; ++ } ++ if(!m1 && !m2) return 1; ++ } ++ else if(st1->enums) { ++ ++ enum_t *e1=st1->enums, *e2=st2->enums; ++ while(e1 && e2) { ++ if(strcmp(e1->name, e2->name)) break; ++ if(e1->value != e2->value ) break; ++ e1=e1->next; ++ e2=e2->next; ++ } ++ if(!e1 && !e2) return 1; ++ } ++ ++ } ++ return 0; ++} ++ ++#define VOIDIDX 0xbabebabell ++type_t* ++sial_getvoidstruct(int ctype) ++{ ++type_t*bt=sial_newtype(); ++ ++ bt->type=ctype; ++ bt->idx=VOIDIDX; ++ bt->size=0; ++ bt->ref=0; ++ return bt; ++} ++ ++void sial_fillst(stinfo_t *st); ++ ++/* Just in case this is an unnamed structure member then we need ++ to add it to the slist ourselves using the index. sial_getctype() would ++ not found it. ++*/ ++static void ++sial_memstinfo(stmember_t *stm, char *pname) ++{ ++int type=stm->type.ref?stm->type.rtype:stm->type.type; ++ ++ if(is_ctype(type)) { ++ ++ if(!sial_getstbyindex(stm->type.idx, type)) { ++ ++ stinfo_t*st=sial_calloc(sizeof(stinfo_t)); ++ ++ sial_duptype(&st->ctype, &stm->type); ++ st->ctype.type=type; ++ // dereference level is attached (wrongly) to type... ++ // zap it ++ st->ctype.ref=0; ++ st->idx=st->ctype.idx; ++ st->name=sial_strdup(pname); ++ sial_addst(st); ++ } ++ } ++} ++ ++void ++sial_fillst(stinfo_t *st) ++{ ++char *mname=0; ++ull idx=st->ctype.idx, lidx=0; ++stmember_t *stm=sial_calloc(sizeof(stmember_t)), **last=&st->stm; ++char *pname; ++ ++ sial_dbg_named(DBG_STRUCT, st->name, 2, "Fill St started [local=%d].\n", (idx & LOCALTYPESBASE) ? 1 : 0); ++ /* bail out if this is local type */ ++ if(idx & LOCALTYPESBASE) return; ++ ++ if(st->stm) sial_error("Oops sial_fillst!"); ++ ++ while((pname=API_MEMBER(mname, idx, &stm->type, &stm->m, &lidx))) { ++ ++ sial_dbg_named(DBG_STRUCT, st->name, 2, "member '%s'\n", pname); ++ sial_memstinfo(stm, pname); ++ stm->next=0; ++ *last=stm; ++ last=&stm->next; ++ mname=""; ++ stm=sial_calloc(sizeof(stmember_t)); ++ if(pname[0]) sial_free(pname); ++ } ++ st->all=1; ++ sial_free(stm); ++} ++ ++type_t* ++sial_getctype(int ctype, char *name, int silent) ++{ ++stinfo_t *st; ++type_t *t=sial_newtype(); ++ ++ sial_chkinit(); ++ sial_dbg_named(DBG_TYPE, name, 2, "getctype [%d] [%s] [s=%d]\n", ctype, name, silent); ++ if(!(st=sial_getst(name, ctype))) { ++ ++ sial_dbg_named(DBG_TYPE, name, 2, "getctype [%s] not found in cache\n", name); ++ if(silent && sial_isneg(name)) return 0; ++ ++ st=sial_calloc(sizeof(stinfo_t)); ++ if(!API_GETCTYPE(ctype, name, &st->ctype)) { ++ ++ sial_dbg_named(DBG_TYPE, name, 2, "[%s] not found in image\n", name); ++ sial_free(st); ++ sial_freetype(t); ++ // add any tdef to the neg list ++ if(ctype == V_TYPEDEF) sial_addneg(name); ++ if(silent) return 0; ++ /* we fill a partial structure for this one ++ assuming it will be defined later. This is to permit cross ++ referencing of structures, self referencing of structure, and ++ undefined structure (opaque structures) irix: see types.c : ++ __pasid_opaque ++ */ ++ sial_dbg_named(DBG_TYPE, name, 2, "[%s] creating partial type\n", name); ++ sial_partialctype(ctype, name); ++ return sial_getctype(ctype, name, silent); ++ } ++ sial_dbg_named(DBG_TYPE, name, 2, "getctype [%s] found in image\n", name); ++ st->name=sial_alloc(strlen(name)+1); ++ strcpy(st->name, name); ++ st->stm=0; ++ st->idx=st->ctype.idx; ++ st->all=1; ++ sial_addst(st); ++ /* ++ if this is a typedef then drill down to the real type ++ and make sure it is in the cache. That's what we return ++ ++ Bug cure: this would fail: ++ ++ struct sv { ++ int i; ++ }; ++ struct foo { ++ sv_t ms_sv; ++ }; ++ ++ Because the rtype index returned by API_GETRTYPE() is the die offset ++ in the image. If We already have redefine the real type locally the ++ call to sial_getctype() will not have a matching index later when we ++ don't find the index in the type cache. ++ ++ So we track the real index with ridx. This also ensures that ++ redefining a struct locally and using a typetef from the image will actualy ++ end up pointing to the local struct and not the image struct. ++ */ ++ if(ctype == V_TYPEDEF) { ++ ++ char *tname; ++ int itype; ++ ++ tname=API_GETRTYPE(st->idx, t); ++ ++ if(t->type==V_REF) itype=t->rtype; ++ else itype=t->type; ++ ++ /* if it's a named struct, enum or union then make sure we have it in the cache */ ++ if(is_ctype(itype) && tname && tname[0] && ++ (strcmp(tname,"struct ") != 0 ++ && strcmp(tname,"union ") != 0 ++ && strcmp(tname,"enum ") != 0)) { ++ ++ sial_freetype(t); ++ t=sial_getctype(itype, tname, silent); ++ ++ /* in IRIX we have a typedef struct __pasid_opaque* aspasid_t; ++ w/ no struct __pasid_opaque defined. The aspasid_t ends ++ up being used as a "named" void *. So we force a void * here */ ++ /* XXX: This should at least generate a warning */ ++ if(!t) { ++ sial_warning("voidstruct created (%s)\n", tname); ++ t=sial_getvoidstruct(itype); ++ } ++ } else if (is_ctype(itype) || itype == V_ENUM) { ++ ++ /* for unnamed structs, unions and enums create an entry */ ++ stinfo_t*st=sial_calloc(sizeof(stinfo_t)); ++ ++ sial_duptype(&st->ctype, t); ++ st->idx=t->idx; ++ st->name=sial_strdup(""); ++ sial_fillst(st); ++ sial_addst(st); ++ } ++ sial_duptype(&st->rtype, t); ++ ++ } else if(is_ctype(ctype)) { ++ ++ /* get all member info now ! */ ++ sial_fillst(st); ++ } ++ } ++ else sial_dbg_named(DBG_TYPE, name, 2, "getctype [%s] found in cache\n", name); ++ ++ if(ctype == V_ENUM || (ctype == V_TYPEDEF && st->rtype.type == V_ENUM)) { ++ st->enums=API_GETENUM(name); ++ sial_pushenums(st->enums); ++ } ++ if(ctype==V_TYPEDEF) sial_duptype(t, &st->rtype); ++ else sial_duptype(t, &st->ctype); ++ ++ return t; ++} ++ ++type_t* ++sial_newctype(int ctype, node_t*n) ++{ ++type_t*t; ++char *name; ++ ++ t=sial_getctype(ctype, name=NODE_NAME(n), 0); ++ NODE_FREE(n); ++ sial_free(name); ++ return t; ++} ++ ++/* ++ We don't use the type to point back to get the typedef name. ++ The type is now the real type not the type for the typedef. ++ So we keep a running sting of the last name variable name ++ the parser found and use that. ++ 5/23/00 ++*/ ++node_t* ++sial_tdeftovar(type_t*td) ++{ ++char *sial_lastvar(void); ++char *name=sial_lastvar(); ++ ++ sial_free(td); ++ return sial_newvnode(name); ++} ++ ++/* ++ Check to see if a cached member info is available ++*/ ++static stmember_t* ++sial_getm(char *name, type_t*tp, stinfo_t**sti) ++{ ++ull idx=tp->idx; ++stinfo_t*st; ++stmember_t*stm; ++ ++ for(st=slist.next; st; st=st->next) { ++ ++ if(st->idx == idx) { ++ ++ *sti=st; ++ ++ if(!st->stm) sial_fillst(st); ++ ++ for(stm=st->stm; stm; stm=stm->next) { ++ ++ ++ if(!strcmp(stm->m.name, name)) { ++ ++ return stm; ++ ++ } ++ } ++ } ++ } ++ return 0; ++} ++ ++value_t * ++sial_ismember(value_t*vp, value_t*vm) ++{ ++char *name=sial_getptr(vm, char); ++int ret=0; ++stinfo_t*st; ++ ++ if(sial_getm(name, &vp->type, &st)) ret=1; ++ ++ return sial_defbtype(sial_newval(), ret); ++} ++ ++/* XXX this entire stuff could very well be machine specific ... */ ++static int ++sial_getalign(type_t*t) ++{ ++ /* this is a custome type deal w/ it */ ++ if(t->type == V_BASE) { ++ ++ int n; ++ ++ /* Intel 386 ABI says that double values align on 4 bytes */ ++ if(abitype==ABI_INTEL_X86) n=((t->size>4)?4:t->size); ++ else n=t->size; ++ return n*8; ++ } ++ if(t->type == V_REF) { ++ /* ++ * This is an array but if there are additional references ++ * (>1) it is an array of pointers. In that case the pointer ++ * alignment has to be used. ++ */ ++ if(t->idxlst && t->ref == 1) { ++ int ret; ++ ++ sial_popref(t, 1); ++ ret=sial_getalign(t); ++ sial_pushref(t, 1); ++ return ret; ++ } ++ return sial_defbsize()*8; ++ } ++ /* alignment of a struct/union is on the largest of it's member or ++ largest allignment of sub structures */ ++ if(is_ctype(t->type)) { ++ ++ stinfo_t*st; ++ stmember_t*sm; ++ int maxallign=0; ++ ++ /* if this is a image type then let the api tell us */ ++ if(!(t->idx & LOCALTYPESBASE)) { ++ ++ return API_ALIGNMENT(t->idx)*8; ++ ++ } ++ ++ if(!(st=sial_getstbyindex(t->idx, t->type))) { ++ ++ sial_error("Oops sial_getalign"); ++ } ++ ++ for(sm=st->stm; sm; sm=sm->next) { ++ ++ int a=sial_getalign(&sm->type); ++ ++ if(a > maxallign) maxallign=a; ++ ++ } ++ ++ return maxallign; ++ ++ } ++ /* other types shoudl not be part of a ctype declaration ... */ ++ sial_error("Oops sial_getalign2!"); ++ return 0; ++} ++ ++static stinfo_t* ++sial_chkctype(int ctype, char *name) ++{ ++stinfo_t*sti; ++ ++ if(name) { ++ ++ /* we should already have a partial structure on the stack */ ++ sti=sial_getst(name, ctype); ++ ++#if 0 ++At this time I choose not to give any warning. ++Structure redefinition is a normal part of include files... ++ ++ /* We give a warning message for redefined types */ ++ { ++ type_t*t=sial_newtype(); ++ ++ if(API_GETCTYPE(ctype, name, t)) { ++ ++ sial_warning("%s %s redefinition", sial_ctypename(ctype), name); ++ } ++ sial_freetype(t); ++ } ++#endif ++ ++ if(sti->all) { ++ ++ sial_error("Oops sial_ctype_decl"); ++ } ++ ++ sial_free(name); ++ ++ } else { ++ ++ sti=sial_alloc(sizeof(stinfo_t)); ++ sti->name=0; ++ sti->idx=sial_nextidx(); ++ sial_addst(sti); ++ } ++ return sti; ++} ++ ++/* ++ This function is used to create new enum types. ++ The syntax for enum is: ++ enum ident { ++ ident [= int], ++ [ident [= int] ] ... ++ }; ++ So we check for an assign value and is it exists then ++ we reset the counter to it. ++ This is the way the mips compiler does it. Which migt be ++ the right way or not, although I fail to see why it's done ++ that way. ++ ++ So enum foo { ++ a, ++ b, ++ c=0, ++ d ++ }; ++ ++ Wil yield values : ++ ++ a=0 ++ b=1 ++ c=0 ++ c=1 ++*/ ++enum_t* ++sial_add_enum(enum_t*ep, char *name, int val) ++{ ++enum_t *epi, *nep=sial_alloc(sizeof(enum_t)); ++ ++ nep->name=name; ++ nep->value=val; ++ nep->next=0; ++ if(!ep) return nep; ++ epi=ep; ++ while(ep->next) ep=ep->next; ++ ep->next=nep; ++ return epi; ++} ++ ++type_t* ++sial_enum_decl(int ctype, node_t*n, dvar_t*dvl) ++{ ++dvar_t*dv=dvl, *next; ++int counter=0; ++stinfo_t*sti; ++enum_t *ep=0; ++char *name=n?NODE_NAME(n):0; ++type_t *t; ++ ++ if(n) sial_startctype(ctype, n); ++ sti=sial_chkctype(ctype, name); ++ ++ while(dv) { ++ ++ int val; ++ ++ /* evaluate an assignment ? */ ++ if(dv->init) { ++ ++ value_t *v=sial_exenode(dv->init); ++ ++ if(!v) { ++ ++ sial_rerror(&dv->pos, "Syntax error in enum expression"); ++ ++ } else if(v->type.type != V_BASE) { ++ ++ sial_rerror(&dv->pos, "Integer expression needed"); ++ } ++ ++ val=sial_getval(v); ++ counter=val+1; ++ sial_freeval(v); ++ ++ } else { ++ ++ val=counter++; ++ } ++ ++ ep=sial_add_enum(ep, dv->name, val); ++ ++ next=dv->next; ++ dv->next=0; ++ dv->name=0; ++ sial_freedvar(dv); ++ dv=next; ++ } ++ sti->enums=ep; ++ ++ /* now we push the values in the defines */ ++ sial_pushenums(sti->enums); ++ ++ /* we return a simple basetype_t*/ ++ /* after stahing the idx in rtype */ ++ t=sial_newbtype(INT); ++ t->rtype=sti->idx; ++ t->typattr |= sial_isenum(-1); ++ ++ return t; ++ ++} ++ ++/* ++ The next functions are used to produce a new type ++ and make it available throught the local cache. ++ This enables custom type definitions on top of the ++ ctypes defined in the object symbol tables. ++ ++ There is one function per suported architechture. ++ ++*/ ++/* macro for alignment to a log2 boundary */ ++#define Alignto(v, a) (((v) + (a) -1) & ~((a)-1)) ++/* ++ The algorith complies with the SysV mips ABI ++*/ ++type_t* ++sial_ctype_decl(int ctype, node_t*n, var_t*list) ++{ ++type_t*t; ++stinfo_t*sti; ++stmember_t **mpp; ++var_t*v; ++int bits_left, bit_alignment; ++int maxbytes, alignment, nextbit; ++char *name=n?NODE_NAME(n):0; ++ ++ if(list->next==list) { ++ ++ sial_error("Empty struct/union/enum declaration"); ++ } ++ ++ t=sial_newbtype(0); ++ sti=sial_chkctype(ctype, name); ++ t->type=sti->ctype.type=ctype; ++ t->idx=sti->ctype.idx=sti->idx; ++ sti->stm=0; ++ mpp=&sti->stm; ++ ++#if LDEBUG ++printf("\n%s %s\n", ctype==V_STRUCT?"Structure":"Union", name ? name : ""); ++#endif ++ ++ /* these are the running position in the structure/union */ ++ nextbit=0; /* next bit open for business */ ++ alignment=0; /* keeps track of the structure alignment ++ Mips ABI says align to bigest alignment of ++ all members of the struct/union. Also ++ unamed bit fields do not participate here. */ ++ maxbytes=0; /* tracking of the maximum member size for union */ ++ ++ for(v=list->next; v!=list; v=v->next) { ++ ++ stmember_t*stm=sial_calloc(sizeof(stmember_t)); ++ dvar_t*dv=v->dv; ++ int nbits; ++ ++ stm->m.name=sial_strdup(v->name); ++ sial_duptype(&stm->type, &v->v->type); ++ ++ /* if this member is a bit filed simply use that */ ++ if(dv->bitfield) { ++ ++ nbits=dv->nbits; ++ ++ /* aligment is the size of the declared base type size */ ++ bit_alignment=v->v->type.size*8; ++ ++ if(nbits > bit_alignment) { ++ ++ sial_error("Too many bits for specified type"); ++ } ++ ++ /* For unamed bit field align to smallest entity */ ++ /* except for 0 bit bit fields */ ++ if(!dv->name[0] && nbits) { ++ ++ bit_alignment=((nbits+7)/8)*8; ++ ++ } ++ ++ /* We compute the number of bits left in this entity */ ++ bits_left = bit_alignment - (nextbit%bit_alignment); ++ ++ /* 0 bits means, jump to next alignement unit anyway ++ if not already on such a boundary */ ++ if(!nbits && (bits_left != bit_alignment)) nbits=bits_left; ++ ++ /* Not enough space ? */ ++ if(nbits > bits_left) { ++ ++ /* jump to next start of entity */ ++ nextbit += bits_left; ++ ++ } ++ ++ /* update member information */ ++ stm->m.offset=(nextbit/bit_alignment)*v->v->type.size; ++ stm->m.fbit=nextbit % bit_alignment; ++ stm->m.nbits=nbits; ++ stm->m.size=v->v->type.size; ++#if LDEBUG ++ printf(" [%s] Bit member offset=%d, fbit=%d, nbits=%d\n", stm->m.name, stm->m.offset, stm->m.fbit, stm->m.nbits); ++#endif ++ /* an unamed bit field does not participate in the alignment value */ ++ if(!dv->name[0]) { ++ ++ bit_alignment=0; ++ ++ /* reset size so that it does not have affect in sial_getalign() */ ++ stm->type.size=1; ++ } ++ ++ } else { ++ ++ int nidx=1; ++ ++ if(dv->idx) { ++ ++ int i; ++ ++ /* flag it */ ++ stm->type.idxlst=sial_calloc(sizeof(int)*(dv->idx->nidx+1)); ++ ++ /* multiply all the [n][m][o]'s */ ++ for(i=0;iidx->nidx;i++) { ++ ++ value_t *vidx; ++ ull idxv; ++ ++ vidx=sial_exenode(dv->idx->idxs[i]); ++ if(!vidx) { ++ ++ sial_error("Error while evaluating array size"); ++ } ++ if(vidx->type.type != V_BASE) { ++ ++ sial_freeval(vidx); ++ sial_error("Invalid index type"); ++ ++ } ++ ++ idxv=sial_getval(vidx); ++ sial_freeval(vidx); ++ ++ stm->type.idxlst[i]=idxv; ++ ++ nidx *= idxv; ++ } ++ ++ ++ } ++ ++ /* the number of bits on which this item aligns itself */ ++ bit_alignment=sial_getalign(&stm->type); ++ ++ /* jump to this boundary */ ++ nextbit = Alignto(nextbit,bit_alignment); ++ ++ ++ if(stm->type.ref - (dv->idx?1:0)) { ++ ++ nbits=nidx*sial_defbsize()*8; ++ ++ } else { ++ ++ nbits=nidx*stm->type.size*8; ++ } ++ ++ if(abitype==ABI_INTEL_X86) { ++ ++ int pos=nextbit/8; ++ ++ pos = (pos & 0xfffffffc) + 3 - (pos & 0x2); ++ stm->m.offset=pos; ++ ++ } else { ++ ++ stm->m.offset=nextbit/8; ++ } ++ stm->m.nbits=0; ++ stm->m.size=nbits/8; ++#if LDEBUG ++printf(" [%s] Mmember offset=%d, size=%d size1=%d nidx=%d\n", stm->m.name, stm->m.offset, stm->m.size, stm->type.size, nidx); ++#endif ++ ++ } ++ ++ if(ctype==V_STRUCT) nextbit+=nbits; ++ /* Union members overlap */ ++ else nextbit=0; ++ ++ /* keep track of the maximum alignment */ ++ if(bit_alignment>alignment) alignment=bit_alignment; ++ ++ /* keep track of maximum size for unions */ ++ if(stm->m.size > maxbytes) maxbytes=stm->m.size; ++ ++ stm->next=0; ++ *mpp=stm; ++ mpp=&stm->next; ++ } ++ ++ /* pad the final structure according to it's most stricly aligned member */ ++ if(nextbit) nextbit = Alignto(nextbit, alignment); ++ else nextbit=Alignto(maxbytes*8, alignment); /* --> it's the case for a union */ ++ ++ t->size=sti->ctype.size=nextbit/8; ++ ++#if LDEBUG ++printf("Final size = %d\n", t->size); ++#endif ++ ++ sti->all=1; ++ sial_addfunc_ctype(sti->idx); ++ return t; ++} ++ ++/* ++ member access and caching. ++ If the member name is empty then the caller wants us ++ to populate the entire engregate. The apimember() should ++ support a getfirst() (member name == "") and getnext() ++ (member name != "") for this perpose. ++ */ ++stmember_t* ++sial_member(char *mname, type_t*tp) ++{ ++stinfo_t *sti; ++stmember_t *stm; ++ ++ if(!is_ctype(tp->type) && ! (tp->type==V_REF && is_ctype(tp->rtype))) { ++ ++ sial_error("Expression for member '%s' is not a struct/union", mname); ++ ++ ++ } ++ ++ if(tp->idx == VOIDIDX) { ++ ++ sial_error("Reference to member (%s) from unknown structure type", mname); ++ } ++ ++ if(!(stm=sial_getm(mname, tp, &sti))) { ++ ++ sial_error("Unknown member name [%s]", mname); ++ } ++ return stm; ++} ++ ++int ++sial_open() ++{ ++ sial_setofile(stdout); ++ /* push an empty level for parsing allocation */ ++ sial_pushjmp(0, 0, 0); ++ sial_setapiglobs(); ++ init=1; ++ sial_setbuiltins(); ++ return 1; ++} ++ ++/* here is a set of api function that do nothing */ ++static int apigetmem(ull iaddr, void *p, int nbytes) { return 1; } ++static int apiputmem(ull iaddr, void *p, int nbytes) { return 1; } ++static char* apimember(char *mname, ull pidx, type_t*tm, member_t *m, ull *lidx) { return 0; } ++static int apigetctype(int ctype, char *name, type_t*tout) { return 0; } ++static char * apigetrtype(ull idx, type_t*t) { return ""; } ++static int apialignment(ull idx) { return 0; } ++static int apigetval(char *name, ull *val) { return 0; } ++static enum_t* apigetenum(char *name) { return 0; } ++static def_t *apigetdefs(void) { return 0; } ++static char* apifindsym(char *p) { return 0; } ++ ++static apiops nullops= { ++ apigetmem, apiputmem, apimember, apigetctype, apigetrtype, apialignment, ++ apigetval, apigetenum, apigetdefs, 0, 0, 0, 0, apifindsym ++}; ++ ++apiops *sial_ops=&nullops;; ++ ++void ++sial_apiset(apiops *o, int abi, int nbpw, int sign) ++{ ++def_t *dt; ++ ++ sial_ops=o?o:&nullops; ++ sial_setdefbtype(nbpw, sign); ++ /* get the pre defines and push them. */ ++ dt=API_GETDEFS(); ++ while(dt) { ++ ++ sial_newmac(dt->name, dt->val, 0, 0, 1); ++ dt=dt->next; ++ } ++ /* add the sial define */ ++ sial_newmac(sial_strdup("sial"), sial_strdup("1"), 0, 0, 1); ++} ++ ++/* ++ Get and set path function. ++ ipath is include file search path. ++ mpath is macro search path ++*/ ++static char *mpath=""; ++static char *ipath=""; ++void sial_setmpath(char *p) { mpath=p; } ++void sial_setipath(char *p) { ipath=p; } ++char *sial_getmpath(void) { return mpath; } ++char *sial_getipath(void) { return ipath; } ++ ++static char *curp=0; ++char *sial_curp(char *p) { char *op=curp; p?(curp=p):(op=curp); return op; } ++ ++static char* ++sial_cattry(char *first, char *second) ++{ ++struct stat stats; ++char *buf=sial_alloc(strlen(first)+strlen(second)+2); ++ ++ strcpy(buf, first); ++ strcat(buf, "/"); ++ strcat(buf, second); ++ if(!stat(buf, &stats)) return buf; ++ sial_free(buf); ++ return 0; ++} ++ ++char * ++sial_filepath(char *fname, char *path) ++{ ++ struct stat buf; ++ /* valid file path, return immediatly */ ++ if(stat(fname,&buf) == 0) { ++ /* must return a free'able name */ ++ char *name=sial_strdup(fname); ++ TAG(name); ++ return name; ++ ++ } else if(fname[0]=='~') { ++ ++ if(strlen(fname)>1) { ++ ++ char *rname, *start; ++ struct passwd *pwd; ++ ++ if(fname[1]=='/') { ++ ++ /* current user name */ ++ pwd=getpwuid(getuid()); ++ ++ if(!pwd) { ++ sial_msg("Who are you : uid=%d \n?", getuid()); ++ return 0; ++ } ++ ++ start=fname+1; ++ ++ } else { ++ ++ char *p, s; ++ ++ for(p=fname+1;*p;p++) if(*p=='/') break; ++ s=*p; ++ *p='\0'; ++ ++ /* other user */ ++ pwd=getpwnam(fname+1); ++ if(!pwd) { ++ ++ sial_msg("Who is this : %s ?\n", fname+1); ++ return 0; ++ } ++ if(s) *p=s; ++ start=p; ++ } ++ rname=sial_alloc(strlen(start+1)+strlen(pwd->pw_dir)+2); ++ strcpy(rname, pwd->pw_dir); ++ strcat(rname, start); ++ return rname; ++ } ++ ++ } else { ++ ++ char *p=sial_strdup(path); ++ char *tok, *curp; ++ ++ /* we check if the file is found relatively to the current ++ position. I.e. the position of the running script */ ++ if((curp=sial_curp(0)) && (curp=sial_cattry(curp, fname))) { ++ ++ sial_free(p); ++ return curp; ++ } ++ ++ tok=strtok(p, ":"); ++ while(tok) { ++ ++ if((curp=sial_cattry(tok, fname))) { ++ ++ sial_free(p); ++ return curp; ++ } ++ tok=strtok(NULL, ":"); ++ ++ } ++ sial_free(p); ++ } ++ return 0; ++} ++ ++char* ++sial_filempath(char *fname) ++{ ++ return sial_filepath(fname, mpath); ++} ++ ++char * ++sial_fileipath(char *fname) ++{ ++ return sial_filepath(fname, ipath); ++} ++ ++/* load a file or a set of file */ ++int ++sial_loadunload(int load, char *name, int silent) ++{ ++DIR *dirp; ++int ret=1; ++char *fname=sial_filempath(name); ++ ++ if(!fname) { ++ ++ if(!silent) sial_msg("File not found : %s\n", name); ++ return 0; ++ } ++ ++ if((dirp=opendir(fname))) { ++ ++ struct dirent *dp; ++ char *buf; ++ ++ while ((dp = readdir(dirp)) != NULL) { ++ ++ if (!strcmp(dp->d_name, ".") || !strcmp(dp->d_name, "..")) ++ continue; ++ ++ buf=sial_alloc(strlen(fname)+dp->d_reclen+2); ++ sprintf(buf, "%s/%s", fname, dp->d_name); ++ if(load) { ++ ret &= sial_newfile(buf, silent); ++ }else{ ++ sial_deletefile(buf); ++ } ++ sial_free(buf); ++ } ++ closedir(dirp); ++ } ++ else { ++ ++ if(load) { ++ ret=sial_newfile(fname, silent); ++ }else{ ++ sial_deletefile(fname); ++ } ++ } ++ sial_free(fname); ++ return ret; ++} ++ ++/* ++ Load conditionaly. ++ If it's already load, return. ++*/ ++ull ++sial_depend(char *name) ++{ ++char *fname=sial_filempath(name); ++int ret=1 ; ++void *fp; ++ ++ if(!fname) ret=0; ++ else if(!(fp=sial_findfile(fname,0)) || sial_isnew(fp)) { ++ ++ ret=sial_loadunload(1, name, 1); ++ sial_free(fname); ++ } ++ return ret; ++} ++ ++value_t * ++sial_bdepend(value_t *vname) ++{ ++ return sial_makebtype(sial_depend(sial_getptr(vname, char))); ++} ++ ++ull ++sial_load(char *fname) ++{ ++ return sial_loadunload(1, fname, 0); ++} ++ ++value_t* ++sial_bload(value_t *vfname) ++{ ++char *fname=sial_getptr(vfname, char); ++value_t *v; ++ ++ v=sial_makebtype(sial_load(fname)); ++ return v; ++} ++ ++ull ++sial_unload(char *fname) ++{ ++ return sial_loadunload(0, fname, 0); ++} ++ ++value_t* ++sial_bunload(value_t *vfname) ++{ ++char *fname=sial_getptr(vfname, char); ++ ++ return sial_defbtype(sial_newval(), sial_unload(fname)); ++} ++ ++void ++sial_loadall() ++{ ++char *path=sial_strdup(sial_getmpath()); ++char *p, *pn; ++ ++ p=pn=path; ++ while(*pn) { ++ ++ if(*pn == ':') { ++ ++ *pn++='\0'; ++ sial_loadunload(1, p, 1); ++ p=pn; ++ ++ } else pn++; ++ } ++ if(p!=pn) sial_loadunload(1, p, 1); ++ /* sial_free(path); */ ++} ++ ++static void ++add_flag(var_t*flags, int c) ++{ ++char s[20]; ++var_t *v; ++ ++ sprintf(s, "%cflag", c); ++ v=sial_newvar(s); ++ sial_defbtype(v->v, (ull)0); ++ v->ini=1; ++ sial_enqueue(flags, v); ++} ++ ++int ++sial_cmd(char *fname, char **argv, int argc) ++{ ++value_t *idx, *val; ++ ++ sial_chkinit(); ++ ++ if(sial_chkfname(fname, 0)) { ++ ++ var_t*flags, *args, *narg; ++ char *opts, *newn=sial_alloc(strlen(fname)+sizeof("_usage")+1); ++ int c, i; ++ extern char *optarg; ++ extern int optind; ++ int dou; ++ char *f=sial_strdup("Xflag"); ++ ++ flags=(var_t*)sial_newvlist(); ++ ++ /* build a complete list of option variables */ ++ for(c='a';c<='z';c++) add_flag(flags, c); ++ for(c='A';c<='Z';c++) add_flag(flags, c); ++ ++ /* check if there is a getopt string associated with this command */ ++ /* there needs to be a fname_opt() and a fname_usage() function */ ++ sprintf(newn, "%s_opt", fname); ++ ++ if(sial_chkfname(newn, 0)) opts=(char*)(unsigned long)sial_exefunc(newn, 0); ++ else opts=""; ++ ++ sprintf(newn, "%s_usage", fname); ++ dou=sial_chkfname(newn, 0); ++ ++ /* build a set of variable from the given list of arguments */ ++ /* each options generate a conrresponding flag ex: -X sets Xflag to one ++ end the corresponding argument of a ":" option is in ex. Xarg ++ each additional arguments is keaped in the array args[] */ ++ ++ if(opts[0]) { ++ ++#ifdef linux ++ optind=0; ++#else ++ getoptreset(); ++#endif ++ while ((c = getopt(argc, argv, opts)) != -1) { ++ ++ var_t*flag, *opt; ++ char *a=sial_strdup("Xarg");; ++ ++ if(c==':') { ++ ++ sial_warning("Missing argument(s)"); ++ if(dou) sial_exefunc(newn, 0); ++ sial_free(a); ++ goto out; ++ ++ } else if(c=='?') { ++ ++ if(dou) { ++ ++ char *u=(char*)(unsigned long)sial_exefunc(newn, 0); ++ ++ if(u) sial_msg("usage: %s %s\n", fname, u); ++ } ++ sial_free(a); ++ goto out; ++ } ++ ++ ++ /* set the Xflag variable to 1 */ ++ f[0]=c; ++ flag=sial_inlist(f, flags); ++ sial_defbtype(flag->v, (ull)1); ++ flag->ini=1; ++ ++ /* create the Xarg variable */ ++ if(optarg && optarg[0]) { ++ ++ char *p=sial_alloc(strlen(optarg)+1); ++ ++ a[0]=c; ++ strcpy(p, optarg); ++ opt=(var_t*)sial_newvar(a); ++ sial_setstrval(opt->v, p); ++ opt->ini=1; ++ sial_enqueue(flags, opt); ++ } ++ sial_free(a); ++ } ++ sial_free(f); ++ } ++ else optind=1; ++ ++ /* put every other args into the argv[] array_t*/ ++ args=(var_t*)sial_newvar("argv"); ++ args->ini=1; ++ ++ /* create a argv[0] with the name of the command */ ++ { ++ ++ val=sial_makestr(fname); ++ idx=sial_makebtype(0); ++ ++ /* create the value's value */ ++ sial_addarrelem(&args->v->arr, idx, val); ++ sial_freeval(idx); ++ } ++ ++ for ( i=1; optind < argc; optind++, i++) { ++ ++ val=sial_makestr(argv[optind]); ++ idx=sial_makebtype(i); ++ ++ /* create the value's value */ ++ sial_addarrelem(&args->v->arr, idx, val); ++ sial_freeval(idx); ++ } ++ ++ narg=(var_t*)sial_newvar("argc"); ++ sial_defbtype(narg->v, i); ++ narg->ini=1; ++ ++ sial_enqueue(flags, narg); ++ ++ /* add the args variable to the flags queue */ ++ sial_enqueue(flags, args); ++ ++ /* now execute */ ++ sial_runcmd(fname, flags); ++ ++out: ++ /* free all arguments variables Xflag Xarg and argv[] */ ++ sial_freesvs(flags); ++ ++ sial_free(newn); ++ return 0; ++ } ++ return 1; ++} ++ +--- crash/extensions/libsial/sial_print.c.orig 2008-01-17 15:17:21.000000000 -0500 ++++ crash/extensions/libsial/sial_print.c 2008-01-04 09:42:08.000000000 -0500 +@@ -0,0 +1,398 @@ ++/* ++ * Copyright 2001 Silicon Graphics, Inc. All rights reserved. ++ */ ++#include ++#include ++#include ++#include "sial.h" ++/* ++ This set of function are used to print value_ts. ++*/ ++ ++/* utility that returns a string of '*' for a reference */ ++static ++char *sial_getref(int lev) ++{ ++static char *ptrs="*******"; ++ ++ return ptrs+strlen(ptrs)-lev; ++} ++ ++static char * ++sial_getidx(type_t *t, char*buf, int len) ++{ ++int pos=0; ++ ++ buf[0]='\0'; ++ if(t->idxlst) { ++ ++ int i; ++ ++ for(i=0; t->idxlst[i] && pos < len; i++) { ++ ++ pos += snprintf(buf+pos, len-pos, "[%d]", t->idxlst[i]); ++ } ++ } ++ return buf; ++} ++ ++#define INDENT 4 /* print indent at beginning of new line */ ++#define SPACER 16 /* space for type string */ ++#define NAMESPACE 16 /* space used for member/var names */ ++#define NBUNDLE 4 /* when printing arrays print this much before \n */ ++ ++static void ++sial_indent(int level, int indent) ++{ ++ if(!indent) return; ++ sial_msg("%*s", level*INDENT, ""); ++} ++ ++static void sial_ptype2(type_t*t, value_t*v, int level, int indent, char *name, int ref, int justv); ++ ++/* ++ Print a struct/union type or value ++*/ ++static void ++sial_print_ctype(type_t *t, value_t *v, int level, int indent, char *name, int ref, int justv) ++{ ++stinfo_t *st=sial_getstbyindex(t->idx, t->type); ++stmember_t *m; ++char buf[100]; ++ ++ if(!st) sial_error("Oops sial_print_ctype!"); ++ ++ if(!st->all) { ++ ++ sial_fillst(st); ++ if(!st->all) sial_error("Reference to a incomplete type"); ++ } ++ ++ sial_indent(level, indent); ++ ++ if(!justv) { ++ snprintf(buf, sizeof(buf)-1, "%s %s", sial_ctypename(t->type), st->name?st->name:""); ++ sial_msg("%-*s ", SPACER, buf); ++ ++ /* is this is a pointer, bail out */ ++ } ++ if(ref) return; ++ ++ if(v && !justv) sial_msg(" = "); ++ ++ sial_msg("{\n"); ++ ++ for(m=st->stm; m; m=m->next) { ++ ++ value_t *vm=0; ++ ++ sial_indent(level+1, 1); ++ if(v) { ++ vm=sial_newval(); ++ sial_duptype(&vm->type, &m->type); ++ sial_exememlocal(v, m, vm); ++ sial_ptype2(&vm->type, vm, level+1, 0, m->m.name, 0, 0); ++ ++ } else sial_ptype2(&m->type, vm, level+1, 0, m->m.name, 0, 0); ++ sial_msg(";\n"); ++ if(vm) sial_freeval(vm); ++ } ++ ++ sial_indent(level, 1); ++ sial_msg("}"); ++ if(name) sial_msg(" %s", name); ++ ++} ++ ++static void ++sial_prbval(value_t *v) ++{ ++ if(sial_issigned(v->type.typattr)) sial_msg("%8lld", sial_getval(v)); ++ else sial_msg("%8llu", sial_getval(v)); ++} ++ ++static int ++sial_prtstr(value_t *v, int justv) ++{ ++value_t *vs; ++char *s, *p; ++ ++ if(sial_defbsize()==8) v->v.ull=v->mem; ++ else v->v.ul=v->mem; ++ vs=sial_getstr(v); ++ s=sial_getptr(vs, char); ++ for(p=s; *p; p++) if(!isprint(*p)) return 0; ++ if(p==s) { sial_freeval(vs); return 0; } ++ if(!justv) sial_msg("= "); ++ sial_msg("\"%s\"", s); ++ sial_freeval(vs); ++ return 1; ++} ++ ++static void ++sial_prtarray(type_t*t, ull mem, int level, int idx) ++{ ++int i; ++int j, size=1; ++ ++ for(j=idx+1; t->idxlst[j]; j++) size *= t->idxlst[j]; ++ size *= t->type==V_REF ? sial_defbsize() : t->size; ++ ++ /* start printing */ ++ sial_msg("{"); ++ sial_msg("\n"); ++ sial_indent(level+1, 1); ++ ++ for(i=0; iidxlst[idx]; i++, mem += size) { ++ ++ if(t->idxlst[idx+1]) { ++ ++ sial_msg("[%d] = ", i); ++ sial_prtarray(t, mem, level+1, idx+1); ++ ++ } else { ++ ++ /* time to deref and print final type */ ++ value_t *v=sial_newval(), *vr=sial_newval(); ++ int *pi=t->idxlst; ++ ++ t->idxlst=0; ++ ++ sial_duptype(&vr->type, t); ++ sial_pushref(&vr->type, 1); ++ if(sial_defbsize()==8) vr->v.ull=mem; ++ else vr->v.ul=(ul)mem; ++ sial_do_deref(1, v, vr); ++ if(is_ctype(v->type.type) || !(i%NBUNDLE)) sial_msg("[%2d] ", i); ++ sial_ptype2(&v->type, v, level+1, 0, 0, 0, 1); ++ sial_msg(", "); ++ /* anything else then struct/unions, print in buddles */ ++ if(!is_ctype(v->type.type) && !((i+1)%NBUNDLE)) { ++ ++ sial_msg("\n"); ++ sial_indent(level+1, 1); ++ } ++ sial_freeval(v); ++ sial_freeval(vr); ++ t->idxlst=pi; ++ } ++ } ++ sial_msg("\n"); ++ sial_indent(level, 1); ++ sial_msg("}"); ++} ++ ++/* ++ Print a type. ++ Typical output of the 'whatis' command. ++*/ ++static ++void sial_ptype2(type_t*t, value_t*v, int level, int indent, char *name, int ref, int justv) ++{ ++int type=t->type; ++ ++ sial_indent(level, indent); ++ switch(type) { ++ ++ case V_STRUCT: case V_UNION: ++ ++ /* make sure we have all the member info */ ++ sial_print_ctype(t, v, level, 0, name, ref, justv); ++ break; ++ ++ ++ case V_TYPEDEF: ++ /* no typedef should get here */ ++ sial_warning("Typedef in print!"); ++ break; ++ ++ case V_ENUM: ++ /* no enum should get here */ ++ sial_warning("ENUM in print!"); ++ break; ++ ++ case V_REF: ++ { ++ int refi=t->ref, ref=refi; ++ ++ /* decrement ref if this was declared as a array */ ++ if(t->idxlst) ref--; ++ ++ /* print the referenced type */ ++ sial_popref(t, t->ref); ++ sial_ptype2(t, 0, level, 0, 0, 1, justv); ++ sial_pushref(t, refi); ++ ++ if(!justv) { ++ ++ char buf[100], buf2[100]; ++ int pos=0, len=sizeof(buf); ++ ++ buf[0]='\0'; ++ if(t->fct) buf[pos++]='('; ++ if(pos < len) ++ pos += snprintf(buf+pos, len-pos, "%s%s", sial_getref(ref), name?name:""); ++ if(pos < len) ++ pos += snprintf(buf+pos, len-pos, "%s", sial_getidx(t, buf2, sizeof(buf2))); ++ if(pos < len && t->fct) ++ pos += snprintf(buf+pos, len-pos, "%s", ")()"); ++ ++ sial_msg("%*s ", NAMESPACE, buf); ++ } ++ ++ /* arrays are ref with boundaries... */ ++ if(t->idxlst && v) { ++ ++ if(t->idxlst[1] || t->rtype!=V_BASE || t->size!=1 || !sial_prtstr(v, justv)) ++ { ++ if(!justv) sial_msg("= "); ++ sial_popref(t, 1); ++ sial_prtarray(t, v->mem, level, 0); ++ sial_pushref(t, 1); ++ } ++ ++ } else if(v) { ++ ++ if(!justv) sial_msg("= "); ++ if(!sial_getval(v)) sial_msg("(nil)"); ++ else { ++ if(sial_defbsize()==8) sial_msg("0x%016llx", sial_getval(v)); ++ else sial_msg("0x%08x", sial_getval(v)); ++ } ++ if(t->ref==1 && t->rtype==V_BASE && t->size==1) { ++ ++ (void)sial_prtstr(v, justv); ++ } ++ } ++ } ++ break; ++ ++ case V_BASE: ++ { ++ if(sial_isenum(t->typattr)) { ++ ++ stinfo_t *st=sial_getstbyindex(t->rtype, V_ENUM); ++ if(!justv) { ++ char buf[200]; ++ snprintf(buf, sizeof(buf), "enum %s", st->name?st->name:""); ++ sial_msg("%-*s ", SPACER, buf); ++ sial_msg("%*s ", NAMESPACE, (name&&v)?name:""); ++ } ++ if(v) { ++ ++ enum_t *e=st->enums; ++ ++ sial_msg("= "); ++ sial_prbval(v); ++ while(e) { ++ ++ if(e->value==sial_getval(v)) { ++ sial_msg(" [%s]", e->name); ++ break; ++ } ++ e=e->next; ++ } ++ if(!e) sial_msg(" [???]"); ++ ++ }else{ ++ ++ enum_t *e=st->enums; ++ int count=0; ++ ++ sial_msg(" {"); ++ while(e) { ++ ++ if(!(count%4)) { ++ sial_msg("\n"); ++ sial_indent(level+1, 1); ++ } ++ count ++; ++ sial_msg("%s = %d, ", e->name, e->value); ++ e=e->next; ++ ++ } ++ sial_msg("\n"); ++ sial_indent(level, 1); ++ sial_msg("%-*s ", SPACER, "}"); ++ if(ref) return; ++ sial_msg("%*s ", NAMESPACE, name?name:""); ++ } ++ ++ } else { ++ ++ if(!justv) { ++ sial_msg("%-*s " , SPACER , sial_getbtypename(t->typattr)); ++ if(ref) return; ++ sial_msg("%s%*s ", sial_getref(t->ref), NAMESPACE, name?name:""); ++ } ++ if(v) { ++ ++ if(!justv) sial_msg("= "); ++ sial_prbval(v); ++ } ++ } ++ } ++ break; ++ case V_STRING: ++ if(!justv) { ++ sial_msg("%-*s " , SPACER , "string"); ++ sial_msg("%*s ", NAMESPACE, name?name:""); ++ } ++ if(v) { ++ ++ if(!justv) sial_msg("= "); ++ sial_msg("\"%s\"", v->v.data); ++ } ++ break; ++ } ++ if(indent) sial_msg("\n"); ++} ++ ++static value_t* ++sial_ptype(value_t*v) ++{ ++ sial_ptype2(&v->type, 0, 0, 1, 0, 0, 0); ++ sial_msg("\n"); ++ return 0; ++} ++ ++node_t* ++sial_newptype(var_t*v) ++{ ++node_t*n=sial_newnode(); ++ ++ n->data=v->next->v; ++ v->next->v=0; /* save value against freeing */ ++ sial_freevar(v->next); ++ sial_freevar(v); ++ n->exe=(xfct_t)sial_ptype; ++ n->free=(ffct_t)sial_freeval; ++ n->name=0; ++ sial_setpos(&n->pos); ++ return n; ++} ++ ++static value_t * ++sial_pval(node_t*n) ++{ ++value_t *v=NODE_EXE(n); ++char *name=NODE_NAME(n); ++ ++ sial_ptype2(&v->type, v, 0, 1, name, 0, 0); ++ sial_free(name); ++ sial_freeval(v); ++ return 0; ++} ++ ++node_t* ++sial_newpval(node_t*vn, int fmt) ++{ ++node_t*n=sial_newnode(); ++ ++ n->data=vn; ++ n->exe=(xfct_t)sial_pval; ++ n->free=(ffct_t)sial_freenode; ++ n->name=0; ++ sial_setpos(&n->pos); ++ return n; ++} +--- crash/extensions/libsial/mkbaseop.c.orig 2008-01-17 15:17:21.000000000 -0500 ++++ crash/extensions/libsial/mkbaseop.c 2008-01-04 09:42:08.000000000 -0500 +@@ -0,0 +1,169 @@ ++/* ++ * Copyright 2000 Silicon Graphics, Inc. All rights reserved. ++ */ ++ ++#include ++#include ++#include "sial.h" ++#include "sial.tab.h" ++/* ++ This utility generates a operator function table for the base type. ++ Each combinaison of operand type and operation needs a dedicated ++ function. We use a table defined here in to generate an indirect table ++ that if indexed (from within sial_op.c) using : ++ ++ value_t * (func)(value_t *v1, value_t *v2) = table[type1][type2][op]; ++*/ ++static struct opss { ++ char *str; ++ char *acro; ++ int code; ++} opstbl[] = { ++ { "+", "ADD", ADD }, ++ { "-", "SUB", SUB }, ++ { "/", "DIV", DIV }, ++ { "*", "MUL", MUL }, ++ { "^", "XOR", XOR }, ++ { "%", "MOD", MOD }, ++ { "|", "OR", OR }, ++ { "&", "AND", AND }, ++ { "<<", "SHL", SHL }, ++ { ">>", "SHR", SHR }, ++ { "==", "EQ", EQ }, /* most be first bool */ ++ { ">", "GT", GT }, ++ { "<", "LT", LT }, ++ { ">=", "GE", GE }, ++ { "<=", "LE", LE }, ++ { "!=", "NE", NE }, ++}; ++ ++static char *typtbl[] = { "sc", "uc", "ss", "us", "sl", "ul", "sll", "ull" }; ++ ++#define NOPS (sizeof(opstbl)/sizeof(opstbl[0])) ++#define NTYPS (sizeof(typtbl)/sizeof(typtbl[0])) ++ ++int ++main() ++{ ++int i,j,k; ++ ++ printf("\ ++#include \"sial.h\"\n\ ++#include \"sial.tab.h\"\n\ ++/**************************************************************\n\ ++ This file is generated by a program.\n\ ++ Check out and modify libsial/mkbaseop.c instead !\n\ ++***************************************************************/\n"); ++ ++ ++ /* create all the functions for all combinaison */ ++ for(i=0;iv.%s = v1->v.%s %s v2->v.%s;\n" ++" ret->type.type=%s->type.type;\n" ++" ret->type.idx=%s->type.idx;\n" ++" ret->type.size=%s->type.size;\n" ++"}\n", ++ opstbl[k].acro, ++ typtbl[i], ++ typtbl[j], ++ j>=i?typtbl[j]:typtbl[i], ++ typtbl[i], ++ opstbl[k].str, ++ typtbl[j], ++ j>=i?"v2":"v1", ++ j>=i?"v2":"v1", ++ j>=i?"v2":"v1"); ++ ++ } else { ++ ++ printf("" ++"static void \n" ++"op_%s_%s_%s(value_t *v1,value_t *v2,value_t *ret)\n" ++"{\n" ++" ret->v.%s = ( v1->v.%s %s v2->v.%s );\n" ++" ret->type.type=V_BASE;\n" ++" ret->type.idx=B_UL;\n" ++" ret->type.size=4;\n" ++"}\n", ++ opstbl[k].acro, ++ typtbl[i], ++ typtbl[j], ++ "ul", ++ typtbl[i], ++ opstbl[k].str, ++ typtbl[j]); ++ } ++ ++ } ++ ++ } ++ ++ } ++ ++ /* create the array from within which the runtime functions ++ will indexed to get a function pointer */ ++ ++ printf("void (*opfuncs[%d][%d][%d])()={\n", NTYPS, NTYPS, NOPS); ++ ++ for(i=0;itype.idx][v2->type.idx][i])(v1,v2,ret);\n\ ++}\n", NOPS, NOPS); ++ exit(0); ++} +--- crash/extensions/libsial/sial_case.c.orig 2008-01-17 15:17:21.000000000 -0500 ++++ crash/extensions/libsial/sial_case.c 2008-01-04 09:42:08.000000000 -0500 +@@ -0,0 +1,125 @@ ++/* ++ * Copyright 2001 Silicon Graphics, Inc. All rights reserved. ++ */ ++/* ++ Set of functions to handle the case construct. ++*/ ++#include "sial.h" ++ ++void ++sial_freecaseval(caseval_t*cv) ++{ ++ sial_free(cv); ++} ++ ++node_t* ++sial_caseval(int isdef, node_t*val) ++{ ++caseval_t*cv=sial_alloc(sizeof(caseval_t)); ++node_t*n=sial_newnode(); ++value_t *v; ++ ++ cv->isdef=isdef; ++ if(val) { ++ ++ v=NODE_EXE(val); ++ cv->val=unival(v); ++ sial_freeval(v); ++ NODE_FREE(val); ++ ++ } else cv->val=0; ++ ++ sial_setpos(&cv->pos); ++ ++ cv->next=0; ++ n->data=cv; ++ return n; ++} ++ ++node_t* ++sial_addcaseval(node_t*n, node_t*n2) ++{ ++caseval_t*cv=(caseval_t*)n->data; ++caseval_t*ncv=(caseval_t*)n2->data; ++ ++ sial_free(n); ++ ncv->next=cv; ++ return n2; ++} ++ ++void ++sial_freecase(void *vcl) ++{ ++caselist_t*cl=(caselist_t*)vcl; ++ ++ NODE_FREE(cl->stmt); ++ sial_free(cl); ++} ++ ++node_t* ++sial_newcase(node_t*nc, node_t* n) ++{ ++caseval_t*cv=(caseval_t*)nc->data; ++caselist_t*cl=sial_alloc(sizeof(caselist_t)); ++node_t*nn=sial_newnode(); ++ ++ ++ nn->data=cl; ++ nn->free=(ffct_t)sial_freecase; ++ ++ cl->vals=cv; ++ sial_free(nc); ++ ++ cl->stmt=n; ++ cl->next=0; ++ ++ sial_setpos(&cl->pos); ++ ++ return nn; ++} ++ ++node_t* ++sial_addcase(node_t*n, node_t*n2) ++{ ++caselist_t*lcl; ++caselist_t*ncl=(caselist_t*)n2->data; ++caselist_t*cl=(caselist_t*)n->data; ++ ++ for(lcl=cl; lcl->next; lcl=lcl->next); ++ ++ /* we need to add case in the order they are listed */ ++ lcl->next=ncl; ++ sial_free(n2); ++ ncl->next=0; ++ ++ sial_setpos(&ncl->pos); ++ ++ return n; ++} ++ ++int ++sial_docase(ull val, caselist_t*cl) ++{ ++caselist_t*defclp=0, *clp; ++ ++ ++ for(clp=cl;clp;clp=clp->next) { ++ ++ caseval_t*cvp; ++ ++ for(cvp=clp->vals; cvp; cvp=cvp->next) { ++ ++ if(cvp->val==val) goto out; ++ else if(cvp->isdef) defclp=clp; ++ } ++ } ++out: ++ if(clp || (clp=defclp)) { ++ ++ for(;clp;clp=clp->next) { ++ ++ if(clp->stmt) NODE_EXE(clp->stmt); ++ } ++ } ++ return 1; ++} +--- crash/extensions/echo.c.orig 2008-01-17 15:17:21.000000000 -0500 ++++ crash/extensions/echo.c 2008-01-04 09:42:08.000000000 -0500 +@@ -0,0 +1,114 @@ ++/* echo.c - simple example of a crash extension ++ * ++ * Copyright (C) 2001, 2002 Mission Critical Linux, Inc. ++ * Copyright (C) 2002, 2003, 2004, 2005 David Anderson ++ * Copyright (C) 2002, 2003, 2004, 2005 Red Hat, Inc. All rights reserved. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; either version 2 of the License, or ++ * (at your option) any later version. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ */ ++ ++#include "defs.h" /* From the crash source top-level directory */ ++ ++int _init(void); ++int _fini(void); ++ ++void cmd_echo(void); /* Declare the commands and their help data. */ ++char *help_echo[]; ++ ++static struct command_table_entry command_table[] = { ++ { "echo", cmd_echo, help_echo, 0 }, /* One or more commands, */ ++ { NULL } /* terminated by NULL, */ ++}; ++ ++ ++int ++_init(void) /* Register the command set. */ ++{ ++ register_extension(command_table); ++ return 1; ++} ++ ++/* ++ * The _fini() function is called if the shared object is unloaded. ++ * If desired, perform any cleanups here. ++ */ ++int ++_fini(void) ++{ ++ return 1; ++} ++ ++ ++/* ++ * Arguments are passed to the command functions in the global args[argcnt] ++ * array. See getopt(3) for info on dash arguments. Check out defs.h and ++ * other crash commands for usage of the myriad of utility routines available ++ * to accomplish what your task. ++ */ ++void ++cmd_echo(void) ++{ ++ int c; ++ ++ while ((c = getopt(argcnt, args, "")) != EOF) { ++ switch(c) ++ { ++ default: ++ argerrs++; ++ break; ++ } ++ } ++ ++ if (argerrs) ++ cmd_usage(pc->curcmd, SYNOPSIS); ++ ++ while (args[optind]) ++ fprintf(fp, "%s ", args[optind++]); ++ ++ fprintf(fp, "\n"); ++} ++ ++/* ++ * The optional help data is simply an array of strings in a defined format. ++ * For example, the "help echo" command will use the help_echo[] string ++ * array below to create a help page that looks like this: ++ * ++ * NAME ++ * echo - echoes back its arguments ++ * ++ * SYNOPSIS ++ * echo arg ... ++ * ++ * DESCRIPTION ++ * This command simply echoes back its arguments. ++ * ++ * EXAMPLE ++ * Echo back all command arguments: ++ * ++ * crash> echo hello, world ++ * hello, world ++ * ++ */ ++ ++char *help_echo[] = { ++ "echo", /* command name */ ++ "echoes back its arguments", /* short description */ ++ "arg ...", /* argument synopsis, or " " if none */ ++ ++ " This command simply echoes back its arguments.", ++ "\nEXAMPLE", ++ " Echo back all command arguments:\n", ++ " crash> echo hello, world", ++ " hello, world", ++ NULL ++}; ++ ++ +--- crash/extensions/sial.c.orig 2008-01-17 15:17:21.000000000 -0500 ++++ crash/extensions/sial.c 2008-01-04 09:42:08.000000000 -0500 +@@ -0,0 +1,1021 @@ ++/* ++ * $Id: crash.patch,v 1.17 2008/01/23 20:55:40 crash Exp $ ++ * ++ * This file is part of lcrash, an analysis tool for Linux memory dumps. ++ * ++ * Created by Silicon Graphics, Inc. ++ * Contributions by IBM, and others ++ * ++ * Copyright (C) 1999 - 2005 Silicon Graphics, Inc. All rights reserved. ++ * Copyright (C) 2001, 2002 IBM Deutschland Entwicklung GmbH, IBM Corporation ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; either version 2 of the License, or ++ * (at your option) any later version. See the file COPYING for more ++ * information. ++ */ ++ ++#include "gdb-6.1/gdb/defs.h" ++#include "target.h" ++#include "symtab.h" ++#include "gdbtypes.h" ++#include "gdbcore.h" ++#include "frame.h" ++#include "value.h" ++#include "symfile.h" ++#include "objfiles.h" ++#include "gdbcmd.h" ++#include "call-cmds.h" ++#include "gdb_regex.h" ++#include "expression.h" ++#include "language.h" ++#include "demangle.h" ++#include "inferior.h" ++#include "linespec.h" ++#include "source.h" ++#include "filenames.h" /* for FILENAME_CMP */ ++#include "objc-lang.h" ++ ++#include "hashtab.h" ++ ++#include "gdb_obstack.h" ++#include "block.h" ++#include "dictionary.h" ++ ++#include ++#include ++#include "gdb_string.h" ++#include "gdb_stat.h" ++#include ++#include "cp-abi.h" ++ ++#include ++#include ++#include ++#include ++ ++///////////////////////////////////////////////////////////////////////// ++// some stuff from crash's defs.h, file which cannot be included here. ++// Hate to do this but this is a quick port. ++// If anyone cares to work on the include and defs structure to make ++// this work cleanly... ++// ++/* ++ * Global data (global_data.c) ++ */ ++extern char *args[]; ++extern int argcnt; ++extern int argerrs; ++#define SYNOPSIS (0x1) ++#define COMPLETE_HELP (0x2) ++#define PIPE_TO_LESS (0x4) ++#define KVADDR (0x1) ++#define QUIET (0x4) ++ ++typedef void (*cmd_func_t)(void); ++ ++struct command_table_entry { /* one for each command in menu */ ++ char *name; ++ cmd_func_t func; ++ char **help_data; ++ ulong flags; ++}; ++extern FILE *fp; ++extern char *crash_global_cmd(); ++ ++// ++///////////////////////////////////////////////////////////////////////// ++/* ++ This is the glue between the sial interpreter and crash. ++*/ ++ ++static int ++apigetmem(ull iaddr, void *p, int nbytes) ++{ ++ return readmem(iaddr, KVADDR, p, nbytes, NULL, QUIET); ++} ++ ++// Since crash is target dependant (build for the ++static uint8_t apigetuint8(void* ptr) ++{ ++uint8_t val; ++ if(!readmem((unsigned long)ptr, KVADDR, (char*)&val, sizeof val, NULL, QUIET)) return (uint8_t)-1; ++ return val; ++} ++ ++static uint16_t apigetuint16(void* ptr) ++{ ++uint16_t val; ++ if(!readmem((unsigned long)ptr, KVADDR, (char*)&val, sizeof val, NULL, QUIET)) return (uint16_t)-1; ++ return val; ++} ++ ++static uint32_t apigetuint32(void* ptr) ++{ ++uint32_t val; ++ if(!readmem((unsigned long)ptr, KVADDR, (char*)&val, sizeof val, NULL, QUIET)) return (uint32_t)-1; ++ return val; ++} ++ ++static uint64_t apigetuint64(void* ptr) ++{ ++uint64_t val; ++ if(!readmem((unsigned long)ptr, KVADDR, (char*)&val, sizeof val, NULL, QUIET)) return (uint64_t)-1; ++ return val; ++} ++ ++static int ++apiputmem(ull iaddr, void *p, int nbytes) ++{ ++ return 1; ++} ++ ++/* extract a complex type (struct, union and enum) */ ++static int ++apigetctype(int ctype, char *name, TYPE_S *tout) ++{ ++ struct symbol *sym; ++ struct type *type; ++ int v=0; ++ ++ sial_dbg_named(DBG_TYPE, name, 2, "Looking for type %d name [%s] in struct domain...", ctype, name); ++ sym = lookup_symbol(name, 0, STRUCT_DOMAIN, 0, (struct symtab **) NULL); ++ if(!sym) { ++ sial_dbg_named(DBG_TYPE, name, 2, "Not found.\nLooking for type %d name [%s] in var domain...", ctype, name); ++ sym = lookup_symbol(name, 0, VAR_DOMAIN, 0, (struct symtab **) NULL); ++ if(sym) { ++ sial_dbg_named(DBG_TYPE, name, 2, "found class=%d\n", sym->aclass); ++ if(sym->aclass == LOC_TYPEDEF) v=1; ++ } ++ } ++ ++ if (sym) { ++ type=sym->type; ++ if(sial_is_typedef(ctype) && v) goto match; ++ switch(TYPE_CODE(type)) { ++ case TYPE_CODE_TYPEDEF: case TYPE_CODE_INT: ++ if(sial_is_typedef(ctype)) goto match; break; ++ case TYPE_CODE_ENUM: if(sial_is_enum(ctype)) goto match; break; ++ case TYPE_CODE_STRUCT: if(sial_is_struct(ctype)) goto match; break; ++ case TYPE_CODE_UNION: if(sial_is_union(ctype)) goto match; break; ++ } ++ sial_dbg_named(DBG_TYPE, name, 2, "Found but no match.\n"); ++ } ++ else sial_dbg_named(DBG_TYPE, name, 2, "Not Found.\n"); ++ ++ return 0; ++ ++match: ++ sial_dbg_named(DBG_TYPE, name, 2, "Found.\n"); ++ /* populate */ ++ sial_type_settype(tout, ctype); ++ sial_type_setsize(tout, TYPE_LENGTH(type)); ++ sial_type_setidx(tout, (ull)(unsigned long)type); ++ sial_pushref(tout, 0); ++ return 1; ++} ++ ++/* set idx value to actual array indexes from specified size */ ++static void ++sial_setupidx(TYPE_S*t, int ref, int nidx, int *idxlst) ++{ ++ /* put the idxlst in index size format */ ++ if(nidx) { ++ ++ int i; ++ ++ for(i=0;itype; ++ } ++ } ++ ++ switch(TYPE_CODE(type)) { ++ ++ /* typedef inserts a level of reference to the 1'dactual type */ ++ case TYPE_CODE_PTR: ++ ++ ref++; ++ type=TYPE_TARGET_TYPE(type); ++ /* this could be a void*, in which case the drill down stops here */ ++ if(!type) { ++ ++ /* make it a char* */ ++ sial_parsetype("char", t, ref); ++ return 0; ++ ++ } ++ break; ++ ++ /* handle pointer to functions */ ++ case TYPE_CODE_FUNC: ++ ++ fctflg=1; ++ type=TYPE_TARGET_TYPE(type); ++ break; ++ ++ /* Is this an array ? if so then just skip this type info and ++ we only need information on the elements themselves */ ++ case TYPE_CODE_ARRAY: ++ if(!idxlst) idxlst=sial_calloc(sizeof(int)*(MAXIDX+1)); ++ if(nidx >= MAXIDX) sial_error("Too many indexes! max=%d\n", MAXIDX); ++ if (TYPE_LENGTH (type) > 0 && TYPE_LENGTH (TYPE_TARGET_TYPE (type)) > 0) ++ { ++ idxlst[nidx++]=TYPE_LENGTH (type) / TYPE_LENGTH (check_typedef(TYPE_TARGET_TYPE (type))); ++ } ++ type=TYPE_TARGET_TYPE(type); ++ break; ++ ++ /* typedef points to a typedef itself */ ++ case TYPE_CODE_TYPEDEF: ++ type=TYPE_TARGET_TYPE(type); ++ break; ++ ++ case TYPE_CODE_INT: ++ ++ sial_parsetype(tstr=TYPE_NAME(type), t, 0); ++ type=0; ++ break; ++ ++ case TYPE_CODE_UNION: ++ sial_type_mkunion(t); ++ goto label; ++ ++ case TYPE_CODE_ENUM: ++ sial_type_mkenum(t); ++ goto label; ++ ++ case TYPE_CODE_STRUCT: ++ { ++ sial_type_mkstruct(t); ++ ++label: ++ sial_type_setsize(t, TYPE_LENGTH(type)); ++ sial_type_setidx(t, (ull)(unsigned long)type); ++ tstr=TYPE_TAG_NAME(type); ++ type=0; ++ } ++ break; ++ ++ /* we don;t have all the info about it */ ++ case TYPE_CODE_VOID: ++ sial_parsetype("int", t, 0); ++ type=0; ++ break; ++ ++ ++ default: ++ sial_error("Oops drilldowntype"); ++ break; ++ } ++ ++ ++ } ++ sial_setupidx(t, ref, nidx, idxlst); ++ if(fctflg) sial_type_setfct(t, 1); ++ sial_pushref(t, ref+(nidx?1:0)); ++ if(tstr) return sial_strdup(tstr); ++ return sial_strdup(""); ++} ++ ++static char * ++apigetrtype(ull idx, TYPE_S *t) ++{ ++ return drilldowntype((struct type*)(unsigned long)(idx), t); ++} ++ ++/* ++ Return the name of a symbol at an address (if any) ++*/ ++static char* ++apifindsym(char *p) ++{ ++ return NULL; ++} ++ ++ ++/* ++ Get the type, size and position information for a member of a structure. ++*/ ++static char* ++apimember(char *mname, ull tnum, TYPE_S *tm, MEMBER_S *m, ull *lnum) ++{ ++struct type *type=(struct type*)(unsigned long)tnum; ++int midx; ++#define LASTNUM (*lnum) ++ ++ /* if we're being asked the next member in a getfirst/getnext sequence */ ++ if(mname && !mname[0] && LASTNUM) { ++ ++ midx = LASTNUM; ++ ++ } else { ++ ++ if (TYPE_CODE(type) == TYPE_CODE_TYPEDEF) { ++ return 0; ++ } ++ if ((TYPE_CODE(type) != TYPE_CODE_STRUCT) && (TYPE_CODE(type) != TYPE_CODE_UNION)) { ++ return 0; ++ } ++ midx=0; ++ } ++ while(midx < TYPE_NFIELDS(type)) { ++ ++ if (!mname || !mname[0] || !strcmp(mname, TYPE_FIELD_NAME(type, midx))) { ++ ++ check_typedef(TYPE_FIELD_TYPE(type, midx)); ++ sial_member_soffset(m, TYPE_FIELD_BITPOS(type, midx)/8); ++ sial_member_ssize(m, TYPE_FIELD_TYPE(type, midx)->length); ++ sial_member_snbits(m, TYPE_FIELD_BITSIZE(type, midx)); ++ sial_member_sfbit(m, TYPE_FIELD_BITSIZE(type, midx)); ++ sial_member_sname(m, TYPE_FIELD_NAME(type, midx)); ++ LASTNUM=midx+1; ++ return drilldowntype(TYPE_FIELD_TYPE(type, midx), tm); ++ } ++ midx++; ++ } ++ return 0; ++} ++ ++/* ++ This function gets the proper allignment value for a type. ++*/ ++static int ++apialignment(ull idx) ++{ ++struct type *type=(struct type *)(unsigned long)idx; ++ ++ while(1) ++ { ++ switch(TYPE_CODE(type)) { ++ ++ case TYPE_CODE_ARRAY: case TYPE_CODE_TYPEDEF: ++ type=TYPE_TARGET_TYPE(type); ++ break; ++ ++ case TYPE_CODE_STRUCT: ++ case TYPE_CODE_UNION: ++ { ++ int max=0, cur; ++ int midx=0; ++ ++ while(midx < TYPE_NFIELDS(type)) { ++ cur=apialignment((ull)(unsigned long)TYPE_FIELD_TYPE(type, midx)); ++ if(cur > max) max=cur; ++ midx++; ++ } ++ return max; ++ } ++ ++ ++ case TYPE_CODE_PTR: ++ case TYPE_CODE_ENUM: ++ case TYPE_CODE_INT: ++ ++ return TYPE_LENGTH (type); ++ ++ default: ++ ++ sial_error("Oops apialignment"); ++ } ++ } ++} ++ ++/* get the value of a symbol */ ++static int ++apigetval(char *name, ull *val) ++{ ++ if (symbol_exists(name)) { ++ *val=symbol_value(name); ++ return 1; ++ } ++ return 0; ++} ++ ++/* ++ Get the list of enum symbols. ++*/ ++ENUM_S* ++apigetenum(char *name) ++{ ++ struct symbol *sym; ++ ++ sym = lookup_symbol(name, 0, STRUCT_DOMAIN, 0, (struct symtab **) NULL); ++ if (sym && TYPE_CODE(sym->type)==TYPE_CODE_ENUM) { ++ ENUM_S *et=0; ++ struct type *type=sym->type; ++ int n=0; ++ while(n < TYPE_NFIELDS (type)) { ++ et=sial_add_enum(et, sial_strdup(TYPE_FIELD_NAME(type, n)), TYPE_FIELD_BITPOS(type, n)); ++ n++; ++ } ++ return et; ++ } ++ return 0; ++} ++ ++/* ++ Return the list of preprocessor defines. ++ For Irix we have to get the die for a startup.c file. ++ Of dwarf type DW_TAG_compile_unit. ++ the DW_AT_producer will contain the compile line. ++ ++ We then need to parse that line to get all the -Dname[=value] ++*/ ++DEF_S * ++apigetdefs(void) ++{ ++DEF_S *dt=0; ++int i; ++static struct linuxdefs_s { ++ ++ char *name; ++ char *value; ++ ++} linuxdefs[] = { ++ ++ {"crash", "1"}, ++ {"linux", "1"}, ++ {"__linux", "1"}, ++ {"__linux__", "1"}, ++ {"unix", "1"}, ++ {"__unix", "1"}, ++ {"__unix__", "1"}, ++ // helper macros ++ {"LINUX_2_2_16", "(LINUX_RELEASE==0x020210)"}, ++ {"LINUX_2_2_17", "(LINUX_RELEASE==0x020211)"}, ++ {"LINUX_2_4_0", "(LINUX_RELEASE==0x020400)"}, ++ {"LINUX_2_2_X", "(((LINUX_RELEASE) & 0xffff00) == 0x020200)"}, ++ {"LINUX_2_4_X", "(((LINUX_RELEASE) & 0xffff00) == 0x020400)"}, ++ {"LINUX_2_6_X", "(((LINUX_RELEASE) & 0xffff00) == 0x020600)"}, ++#ifdef i386 ++ {"i386", "1"}, ++ {"__i386", "1"}, ++ {"__i386__", "1"}, ++#endif ++#ifdef s390 ++ {"s390", "1"}, ++ {"__s390", "1"}, ++ {"__s390__", "1"}, ++#endif ++#ifdef s390x ++ {"s390x", "1"}, ++ {"__s390x", "1"}, ++ {"__s390x__", "1"}, ++#endif ++#ifdef __ia64__ ++ {"ia64", "1"}, ++ {"__ia64", "1"}, ++ {"__ia64__", "1"}, ++ {"__LP64__", "1"}, ++ {"_LONGLONG", "1"}, ++ {"__LONG_MAX__", "9223372036854775807L"}, ++#endif ++#ifdef ppc64 ++ {"ppc64", "1"}, ++ {"__ppc64", "1"}, ++ {"__ppc64__", "1"}, ++#endif ++ }; ++ ++static char *untdef[] = { ++ "clock", ++ "mode", ++ "pid", ++ "uid", ++ "xtime", ++ "init_task", ++ "size", ++ "type", ++ "level", ++ 0 ++}; ++ ++#if 0 ++How to extract basic set of -D flags from the kernel image ++ ++ prod=sial_strdup(kl_getproducer()); ++ for(p=prod; *p; p++) { ++ ++ if(*p=='-' && *(p+1)=='D') { ++ ++ char *def=p+2; ++ ++ while(*p && *p != '=' && *p != ' ') p++; ++ ++ if(!*p || *p == ' ') { ++ ++ *p='\0'; ++ dt=sial_add_def(dt, sial_strdup(def), sial_strdup("1")); ++ ++ } else { ++ ++ char *val=p+1; ++ ++ *p++='\0'; ++ while(*p && *p != ' ') p++; ++ *p='\0'; ++ ++ dt=sial_add_def(dt, sial_strdup(def), sial_strdup(val)); ++ } ++ } ++ } ++#endif ++ ++ /* remove some tdef with very usual identifier. ++ could also be cases where the kernel defined a type and variable with same name e.g. xtime. ++ the same can be accomplished in source using #undef or forcing the evaluation of ++ a indentifier as a variable name ex: __var(xtime). ++ ++ I tried to make the grammar as unambiqguous as I could. ++ ++ If this becomes to much of a problem I might diable usage of all image typedefs usage in sial! ++ */ ++ { ++ char **tdefname=untdef; ++ while(*tdefname) sial_addneg(*tdefname++);; ++ ++ } ++ ++ /* insert constant defines from list above */ ++ for(i=0;i\n" ++ , S_MAJOR, S_MINOR); ++} ++ ++static void ++run_callback(void) ++{ ++extern char *crash_global_cmd(); ++FILE *ofp = NULL; ++ ++ if (fp) { ++ ofp = sial_getofile(); ++ sial_setofile(fp); ++ } ++ ++ sial_cmd(crash_global_cmd(), args, argcnt); ++ ++ if (ofp) ++ sial_setofile(ofp); ++} ++ ++ ++void ++edit_cmd(void) ++{ ++int c, file=0; ++ while ((c = getopt(argcnt, args, "lf")) != EOF) { ++ switch(c) ++ { ++ case 'l': ++ sial_vilast(); ++ return; ++ break; ++ case 'f': ++ file++; ++ break; ++ default: ++ argerrs++; ++ break; ++ } ++ } ++ ++ if (argerrs) ++ cmd_usage(crash_global_cmd(), SYNOPSIS); ++ ++ else if(args[optind]) { ++ while(args[optind]) { ++ sial_vi(args[optind++], file); ++ } ++ } ++ else cmd_usage(crash_global_cmd(), SYNOPSIS); ++} ++ ++char *edit_help[]={ ++ "edit", ++ "Start a $EDITOR session of a sial function or file", ++ "<-f fileName>|", ++ "This command can be use during a tight development cycle", ++ "where frequent editing->run->editing sequences are executed.", ++ "To edit a known sial macro file use the -f option. To edit the file", ++ "at the location of a known function's declaration omit the -f option.", ++ "Use a single -l option to be brought to the last compile error location.", ++ "", ++ "EXAMPLES:", ++ " %s> edit -f ps", ++ " %s> edit ps", ++ " %s> edit ps_opt", ++ " %s> edit -l", ++ NULL ++}; ++ ++ ++// these control debug mode when parsing (pre-processor and compile) ++int sialdebug=0, sialppdebug=0; ++ ++void ++load_cmd(void) ++{ ++ if(argcnt< 2) cmd_usage(crash_global_cmd(), SYNOPSIS); ++ else { ++ sial_setofile(fp); ++ sial_loadunload(1, args[1], 0); ++ } ++} ++ ++char *load_help[]={ ++ "load", ++ "Load a sial file", ++ "|", ++ " Load a file or a directory. In the case of a directory", ++ " all files in that directory will be loaded.", ++ NULL ++ ++}; ++ ++void ++unload_cmd(void) ++{ ++ if(argcnt < 2) cmd_usage(crash_global_cmd(), SYNOPSIS); ++ else sial_loadunload(0, args[1], 0); ++} ++ ++char *unload_help[]={ ++ "unload", ++ "Unload a sial file", ++ "|", ++ " Unload a file or a directory. In the case of a directory", ++ " all files in that directory will be unloaded.", ++ NULL ++}; ++ ++void ++sdebug_cmd(void) ++{ ++ if(argcnt < 2) sial_msg("Current sial debug level is %d\n", sial_getdbg()); ++ else sial_setdbg(atoi(args[1])); ++} ++ ++char *sdebug_help[]={ ++ "sdebug", ++ "Print or set sial debug level", ++ "", ++ " Set the debug of sial. Without any parameter, shows the current debug level.", ++ NULL ++}; ++ ++void ++sname_cmd(void) ++{ ++ if(argcnt < 2) { ++ if(sial_getname()) sial_msg("Current sial name match is '%s'\n", sial_getname()); ++ else sial_msg("No name match specified yet.\n"); ++ } else sial_setname(args[1]); ++} ++ ++char *sname_help[]={ ++ "sname", ++ "Print or set sial name match.", ++ "", ++ " Set sial name string for matches. Debug messages that are object oriented", ++ " will only be displayed if the object name (struct, type, ...) matches this", ++ " value.", ++ NULL ++}; ++ ++void ++sclass_cmd(void) ++{ ++ if(argcnt < 2) { ++ char **classes=sial_getclass(); ++ sial_msg("Current sial classes are :"); ++ while(*classes) sial_msg("'%s' ", *classes++); ++ sial_msg("\n"); ++ ++ } ++ else { ++ int i; ++ for(i=1; i[, ]", ++ " Set sial debug classes. Only debug messages that are in the specified classes", ++ " will be displayed.", ++ NULL ++}; ++ ++#define NCMDS 100 ++static struct command_table_entry command_table[NCMDS] = { ++ ++ {"edit", edit_cmd, edit_help}, ++ {"load", load_cmd, load_help}, ++ {"unload", unload_cmd, unload_help}, ++ {"sdebug", sdebug_cmd, sdebug_help}, ++ {"sname", sname_cmd, sname_help}, ++ {"sclass", sclass_cmd, sclass_help}, ++ {(char *)0 } ++}; ++ ++static void ++add_sial_cmd(char *name, void (*cmd)(void), char **help, int flags) ++{ ++struct command_table_entry *cp; ++struct command_table_entry *crash_cmd_table(); ++ ++ // check for a clash with native commands ++ for (cp = crash_cmd_table(); cp->name; cp++) { ++ if (!strcmp(cp->name, name)) { ++ sial_msg("Sial command name '%s' conflicts with native crash command.\n", name); ++ return; ++ } ++ } ++ ++ // make sure we have enough space for the new command ++ if(!command_table[NCMDS-2].name) { ++ for (cp = command_table; cp->name; cp++); ++ cp->name=sial_strdup(name); ++ cp->func=cmd; ++ cp->help_data=help; ++ cp->flags=flags; ++ } ++} ++ ++static void ++rm_sial_cmd(char *name) ++{ ++struct command_table_entry *cp, *end; ++ ++ for (cp = command_table; cp->name; cp++) { ++ if (!strcmp(cp->name, name)) { ++ sial_free(cp->name); ++ memmove(cp, cp+1, sizeof *cp *(NCMDS-(cp-command_table)-1)); ++ break; ++ } ++ } ++} ++ ++/* ++ This function is called for every new function ++ generated by a load command. This enables us to ++ register new commands. ++ ++ We check here is the functions: ++ ++ fname_help() ++ fname_opt() ++ and ++ fname_usage() ++ ++ exist, and if so then we have a new command. ++ Then we associated (register) a function with ++ the standard sial callbacks. ++*/ ++void ++reg_callback(char *name, int load) ++{ ++char fname[MAX_SYMNAMELEN+sizeof("_usage")+1]; ++char *help_str, *opt_str; ++char **help=malloc(sizeof *help * 5); ++ ++ if(!help) return; ++ snprintf(fname, sizeof(fname), "%s_help", name); ++ if(sial_chkfname(fname, 0)) { ++ help_str=sial_strdup((char*)(unsigned long)sial_exefunc(fname, 0)); ++ snprintf(fname, sizeof(fname), "%s_usage", name); ++ if(sial_chkfname(fname, 0)) { ++ if(load) { ++ opt_str=sial_strdup((char*)(unsigned long)sial_exefunc(fname, 0)); ++ help[0]=sial_strdup(name); ++ help[1]=""; ++ help[2]=sial_strdup(opt_str); ++ help[3]=sial_strdup(help_str); ++ help[4]=0; ++ add_sial_cmd(name, run_callback, help, 0); ++ return; ++ } ++ else rm_sial_cmd(name); ++ } ++ sial_free(help_str); ++ } ++ free(help); ++ return; ++} ++ ++/* ++ * The _fini() function is called if the shared object is unloaded. ++ * If desired, perform any cleanups here. ++ */ ++void _fini() ++{ ++ // need to unload any files we have loaded ++ ++} ++ ++VALUE_S *curtask(VALUE_S *v, ...) ++{ ++unsigned long get_curtask(); ++ return sial_makebtype((ull)get_curtask()); ++} ++ ++_init() /* Register the command set. */ ++{ ++#define LCDIR "/usr/share/sial/crash" ++#define LCIDIR "include" ++#define LCUDIR ".sial" ++ ++ ++ if(sial_open() >= 0) { ++ ++ char *path, *ipath; ++ char *homepath=0; ++ char *home=getenv("HOME"); ++ ++ /* set api, default size, and default sign for types */ ++#ifdef i386 ++#define SIAL_ABI ABI_INTEL_X86 ++#else ++#ifdef __ia64__ ++#define SIAL_ABI ABI_INTEL_IA ++#else ++#ifdef __x86_64__ ++#define SIAL_ABI ABI_INTEL_IA ++#else ++#ifdef __s390__ ++#define SIAL_ABI ABI_S390 ++#else ++#ifdef __s390x__ ++#define SIAL_ABI ABI_S390X ++#else ++#ifdef PPC64 ++#define SIAL_ABI ABI_PPC64 ++#else ++#error sial: Unkown ABI ++#endif ++#endif ++#endif ++#endif ++#endif ++#endif ++ sial_apiset(&icops, SIAL_ABI, sizeof(long), 0); ++ ++ sial_version(); ++ ++ /* set the macro search path */ ++ if(!(path=getenv("SIAL_MPATH"))) { ++ ++ if(home) { ++ ++ path=sial_alloc(strlen(home)+sizeof(LCUDIR)+sizeof(LCDIR)+4); ++ homepath=sial_alloc(strlen(home)+sizeof(LCUDIR)+2); ++ ++ /* build a path for call to sial_load() */ ++ strcpy(homepath, home); ++ strcat(homepath, "/"); ++ strcat(homepath, LCUDIR); ++ ++ /* built the official path */ ++ strcpy(path, LCDIR); ++ strcat(path, ":"); ++ strcat(path, home); ++ strcat(path, "/"); ++ strcat(path, LCUDIR); ++ } ++ else path=LCDIR; ++ } ++ sial_setmpath(path); ++ ++ fprintf(fp, "\tLoading sial commands from %s .... ", ++ path); ++ ++ /* include path */ ++ if(!(ipath=getenv("SIAL_IPATH"))) { ++ ++ if(home) { ++ ++ ipath=sial_alloc(strlen(home)+sizeof(LCDIR)+sizeof(LCUDIR)+(sizeof(LCIDIR)*2)+(sizeof("/usr/include")+2)+6); ++ ++ /* built the official path */ ++ strcpy(ipath, LCDIR); ++ strcat(ipath, "/"LCIDIR":"); ++ strcat(ipath, home); ++ strcat(ipath, "/"); ++ strcat(ipath, LCUDIR); ++ strcat(ipath, "/"LCIDIR); ++ strcat(ipath, ":/usr/include"); ++ } ++ else ipath=LCDIR"/"LCIDIR; ++ } ++ sial_setipath(ipath); ++ ++ /* set the new function callback */ ++ sial_setcallback(reg_callback); ++ ++ /* load the default macros */ ++ sial_loadall(); ++ ++ /* load some sial specific commands */ ++ register_extension(command_table); ++ ++ /* some builtins */ ++ sial_builtin("int curtask()", curtask); ++ ++ fprintf(fp, "Done.\n"); ++ } ++ return 1; ++} +--- crash/extensions/dminfo.c.orig 2008-01-17 15:17:21.000000000 -0500 ++++ crash/extensions/dminfo.c 2008-01-04 09:42:08.000000000 -0500 +@@ -0,0 +1,1534 @@ ++/* dminfo.c - crash extension module for device-mapper analysis ++ * ++ * Copyright (C) 2005 NEC Corporation ++ * Copyright (C) 2005 Red Hat, Inc. All rights reserved. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; either version 2 of the License, or ++ * (at your option) any later version. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ */ ++ ++#include "defs.h" /* From the crash source top-level directory */ ++ ++int _init(void); ++int _fini(void); ++ ++/* ++ * Indices of size-offset array (Used by GET_xxx macros) ++ * ++ * DM__ ++ */ ++enum { ++ DM_hash_cell_name_list = 0, ++ DM_hash_cell_name, ++ DM_hash_cell_md, ++ ++ DM_mapped_device_disk, ++ DM_mapped_device_map, ++ ++ DM_gendisk_major, ++ DM_gendisk_first_minor, ++ DM_gendisk_disk_name, ++ ++ DM_dm_table_num_targets, ++ DM_dm_table_targets, ++ DM_dm_table_devices, ++ ++ DM_dm_target_type, ++ DM_dm_target_begin, ++ DM_dm_target_len, ++ DM_dm_target_private, ++ ++ DM_dm_dev_count, ++ DM_dm_dev_bdev, ++ DM_dm_dev_name, ++ ++ DM_dm_io_md, ++ DM_dm_io_bio, ++ ++ DM_target_type_name, ++ ++ DM_target_io_io, ++ ++ DM_block_device_bd_disk, ++ ++ DM_bio_bi_private, ++ ++ DM_bio_list_head, ++ ++ DM_linear_c_dev, ++ DM_linear_c_start, ++ ++ DM_multipath_hw_handler, ++ DM_multipath_nr_priority_groups, ++ DM_multipath_priority_groups, ++ DM_multipath_nr_valid_paths, ++ DM_multipath_current_pg, ++ DM_multipath_queue_if_no_path, ++ DM_multipath_queue_size, ++ ++ DM_hw_handler_type, ++ DM_hw_handler_type_name, ++ ++ DM_priority_group_ps, ++ DM_priority_group_pg_num, ++ DM_priority_group_bypassed, ++ DM_priority_group_nr_pgpaths, ++ DM_priority_group_pgpaths, ++ ++ DM_path_selector_type, ++ DM_path_selector_type_name, ++ ++ DM_pgpath_fail_count, ++ DM_pgpath_path, ++ ++ DM_path_dev, ++ DM_path_is_active, ++ ++ DM_mirror_set_rh, ++ DM_mirror_set_reads, ++ DM_mirror_set_writes, ++ DM_mirror_set_in_sync, ++ DM_mirror_set_nr_mirrors, ++ DM_mirror_set_mirror, ++ ++ DM_region_hash_log, ++ DM_region_hash_quiesced_regions, ++ DM_region_hash_recovered_regions, ++ ++ DM_dirty_log_type, ++ DM_dirty_log_type_name, ++ ++ DM_mirror_error_count, ++ DM_mirror_dev, ++ DM_mirror_offset, ++ ++ DM_crypt_config_dev, ++ DM_crypt_config_iv_mode, ++ DM_crypt_config_tfm, ++ DM_crypt_config_key_size, ++ DM_crypt_config_key, ++ ++ DM_crypto_tfm_crt_u, ++ DM_crypto_tfm___crt_alg, ++ ++ DM_crypto_alg_cra_name, ++ ++ DM_cipher_tfm_cit_mode, ++ ++ DM_stripe_c_stripes, ++ DM_stripe_c_chunk_mask, ++ DM_stripe_c_stripe, ++ ++ DM_stripe_dev, ++ ++ DM_dm_snapshot_origin, ++ DM_dm_snapshot_cow, ++ DM_dm_snapshot_chunk_size, ++ DM_dm_snapshot_valid, ++ DM_dm_snapshot_type, ++ ++ NR_DMINFO_MEMBER_TABLE_ENTRY ++}; ++ ++/* Size-offset array for structure's member */ ++static struct dminfo_member_entry { ++ unsigned long offset; ++ unsigned long size; ++} mbr_ary[NR_DMINFO_MEMBER_TABLE_ENTRY]; ++ ++/* ++ * Macros to retrieve data of given structure's member ++ * ++ * Macros except for the MSG assume 'struct s' is at 'addr' ++ */ ++#define MSG(msg, s, m) msg ": " s "." m ++ ++/* Initialize the size-offset array */ ++#define INIT_MBR_TABLE(s, m) \ ++ do { \ ++ if (!mbr_ary[DM_##s##_##m].size) { \ ++ mbr_ary[DM_##s##_##m].offset = MEMBER_OFFSET("struct " #s, #m); \ ++ mbr_ary[DM_##s##_##m].size = MEMBER_SIZE("struct " #s, #m); \ ++ } \ ++ } while (0) ++ ++/* ++ * Store the data of member m in ret. ++ * Initialize the size-offset array for the member m if needed. ++ */ ++#define GET_VALUE(addr, s, m, ret) \ ++ do { \ ++ INIT_MBR_TABLE(s, m); \ ++ if (sizeof(ret) < mbr_ary[DM_##s##_##m].size) \ ++ fprintf(fp, "%s\n", \ ++ MSG("ERROR: GET_VALUE size_check", #s, #m)); \ ++ readmem(addr + mbr_ary[DM_##s##_##m].offset, KVADDR, &ret, \ ++ mbr_ary[DM_##s##_##m].size, MSG("GET_VALUE", #s, #m), \ ++ FAULT_ON_ERROR);\ ++ } while (0) ++ ++/* ++ * Store the address of member m in ret. ++ * Initialize the size-offset array for the member m if needed. ++ */ ++#define GET_ADDR(addr, s, m, ret) \ ++ do { \ ++ INIT_MBR_TABLE(s, m); \ ++ ret = addr + mbr_ary[DM_##s##_##m].offset; \ ++ } while (0) ++ ++/* ++ * Store the string data of member m in ret. ++ * Initialize the size-offset array for the member m if needed. ++ */ ++#define GET_STR(addr, s, m, ret, len) \ ++ do { \ ++ INIT_MBR_TABLE(s, m); \ ++ if (!read_string(addr + mbr_ary[DM_##s##_##m].offset, ret, len - 1)) \ ++ fprintf(fp, "%s\n", MSG("ERROR: GET_STR", #s, #m)); \ ++ } while (0) ++ ++/* ++ * Store the string data pointed by member m in ret. ++ * Initialize the size-offset array for the member m if needed. ++ */ ++#define GET_PTR_STR(addr, s, m, ret, len) \ ++ do { \ ++ unsigned long tmp; \ ++ INIT_MBR_TABLE(s, m); \ ++ readmem(addr + mbr_ary[DM_##s##_##m].offset, KVADDR, &tmp, \ ++ mbr_ary[DM_##s##_##m].size, MSG("GET_PTR_STR", #s, #m),\ ++ FAULT_ON_ERROR);\ ++ if (!read_string(tmp, ret, len - 1)) \ ++ fprintf(fp, "%s\n", MSG("ERROR: GET_PTR_STR", #s, #m));\ ++ } while (0) ++ ++/* ++ * Utility function/macro to walk the list ++ */ ++static unsigned long ++get_next_from_list_head(unsigned long addr) ++{ ++ unsigned long ret; ++ ++ readmem(addr + OFFSET(list_head_next), KVADDR, &ret, sizeof(void *), ++ MSG("get_next_from_list_head", "list_head", "next"), ++ FAULT_ON_ERROR); ++ ++ return ret; ++} ++ ++#define list_for_each(next, head, last) \ ++ for (next = get_next_from_list_head(head), last = 0UL; \ ++ next && next != head && next != last; \ ++ last = next, next = get_next_from_list_head(next)) ++ ++/* ++ * device-mapper target analyzer ++ * ++ * device-mapper has various target driver: linear, mirror, multipath, etc. ++ * Information specific to target is stored in its own way. ++ * Target-specific analyzer is provided for each target driver for this reason. ++ */ ++static struct dminfo_target_analyzer { ++ struct dminfo_target_analyzer *next; ++ char *target_name; ++ int (*ready) (void); /* returns true if analyzer is available */ ++ void (*show_table) (unsigned long); /* display table info */ ++ void (*show_status) (unsigned long); /* display status info */ ++ void (*show_queue) (unsigned long); /* display queued I/O info */ ++} analyzers_head; ++ ++static void ++dminfo_register_target_analyzer(struct dminfo_target_analyzer *ta) ++{ ++ ta->next = analyzers_head.next; ++ analyzers_head.next = ta; ++} ++ ++static struct ++dminfo_target_analyzer *find_target_analyzer(char *target_type) ++{ ++ struct dminfo_target_analyzer *ta; ++ ++ for (ta = analyzers_head.next; ta; ta = ta->next) ++ if (!strcmp(ta->target_name, target_type)) ++ return ta; ++ ++ return NULL; ++} ++ ++/* ++ * zero target ++ */ ++static int ++zero_ready(void) ++{ ++ return 1; ++} ++ ++static void ++zero_show_table(unsigned long target) ++{ ++ unsigned long long start, len; ++ ++ /* Get target information */ ++ GET_VALUE(target, dm_target, begin, start); ++ GET_VALUE(target, dm_target, len, len); ++ ++ fprintf(fp, " begin:%llu len:%llu", start, len); ++} ++ ++static void ++zero_show_status(unsigned long target) ++{ ++ /* zero target has no status */ ++ fprintf(fp, " No status info"); ++} ++ ++static void ++zero_show_queue(unsigned long target) ++{ ++ /* zero target has no queue */ ++ fprintf(fp, " No queue info"); ++} ++ ++static struct dminfo_target_analyzer zero_analyzer = { ++ .target_name = "zero", ++ .ready = zero_ready, ++ .show_table = zero_show_table, ++ .show_status = zero_show_status, ++ .show_queue = zero_show_queue ++}; ++ ++/* ++ * error target ++ */ ++static int ++error_ready(void) ++{ ++ return 1; ++} ++ ++static void ++error_show_table(unsigned long target) ++{ ++ unsigned long long start, len; ++ ++ /* Get target information */ ++ GET_VALUE(target, dm_target, begin, start); ++ GET_VALUE(target, dm_target, len, len); ++ ++ fprintf(fp, " begin:%llu len:%llu", start, len); ++} ++ ++static void ++error_show_status(unsigned long target) ++{ ++ /* error target has no status */ ++ fprintf(fp, " No status info"); ++} ++ ++static void ++error_show_queue(unsigned long target) ++{ ++ /* error target has no queue */ ++ fprintf(fp, " No queue info"); ++} ++ ++static struct dminfo_target_analyzer error_analyzer = { ++ .target_name = "error", ++ .ready = error_ready, ++ .show_table = error_show_table, ++ .show_status = error_show_status, ++ .show_queue = error_show_queue ++}; ++ ++/* ++ * linear target ++ */ ++static int ++linear_ready(void) ++{ ++ static int debuginfo = 0; ++ ++ if (debuginfo) ++ return 1; ++ ++ if (STRUCT_EXISTS("struct linear_c")) { ++ debuginfo = 1; ++ return 1; ++ } else ++ fprintf(fp, "No such struct info: linear_c"); ++ ++ return 0; ++} ++ ++static void ++linear_show_table(unsigned long target) ++{ ++ unsigned long lc, dm_dev; ++ unsigned long long start, len, offset; ++ char devt[BUFSIZE]; ++ ++ /* Get target information */ ++ GET_VALUE(target, dm_target, begin, start); ++ GET_VALUE(target, dm_target, len, len); ++ GET_VALUE(target, dm_target, private, lc); ++ GET_VALUE(lc, linear_c, dev, dm_dev); ++ GET_STR(dm_dev, dm_dev, name, devt, BUFSIZE); ++ GET_VALUE(lc, linear_c, start, offset); ++ ++ fprintf(fp, " begin:%llu len:%llu dev:%s offset:%llu", ++ start, len, devt, offset); ++} ++ ++static void ++linear_show_status(unsigned long target) ++{ ++ /* linear target has no status */ ++ fprintf(fp, " No status info"); ++} ++ ++static void ++linear_show_queue(unsigned long target) ++{ ++ /* linear target has no I/O queue */ ++ fprintf(fp, " No queue info"); ++} ++ ++static struct dminfo_target_analyzer linear_analyzer = { ++ .target_name = "linear", ++ .ready = linear_ready, ++ .show_table = linear_show_table, ++ .show_status = linear_show_status, ++ .show_queue = linear_show_queue ++}; ++ ++/* ++ * mirror target ++ */ ++static int ++mirror_ready(void) ++{ ++ static int debuginfo = 0; ++ ++ if (debuginfo) ++ return 1; ++ ++ if (STRUCT_EXISTS("struct mirror_set")) { ++ debuginfo = 1; ++ return 1; ++ } else ++ fprintf(fp, "No such struct info: mirror_set"); ++ ++ return 0; ++} ++ ++static void ++mirror_show_table(unsigned long target) ++{ ++ unsigned int i, nr_mir; ++ unsigned long ms, rh, log, log_type, mir_size, mir_head, mir, dm_dev; ++ unsigned long long offset; ++ char buf[BUFSIZE]; ++ ++ /* Get the address of struct mirror_set */ ++ GET_VALUE(target, dm_target, private, ms); ++ ++ /* Get the log-type name of the mirror_set */ ++ GET_ADDR(ms, mirror_set, rh, rh); ++ GET_VALUE(rh, region_hash, log, log); ++ GET_VALUE(log, dirty_log, type, log_type); ++ GET_PTR_STR(log_type, dirty_log_type, name, buf, BUFSIZE); ++ fprintf(fp, " log:%s", buf); ++ ++ /* ++ * Display information for each mirror disks. ++ * ++ * mir_head = mirror_set.mirror. ++ * This is the head of struct mirror array. ++ */ ++ fprintf(fp, " dev:"); ++ mir_size = STRUCT_SIZE("struct mirror"); ++ GET_ADDR(ms, mirror_set, mirror, mir_head); ++ GET_VALUE(ms, mirror_set, nr_mirrors, nr_mir); ++ for (i = 0; i < nr_mir; i++) { ++ mir = mir_head + mir_size * i; /* Get next mirror */ ++ ++ /* Get the devt of the mirror disk */ ++ GET_VALUE(mir, mirror, dev, dm_dev); ++ GET_STR(dm_dev, dm_dev, name, buf, BUFSIZE); ++ ++ /* Get the offset of the mirror disk */ ++ GET_VALUE(mir, mirror, offset, offset); ++ ++ fprintf(fp, "%s(%llu)%s", buf, offset, ++ i == nr_mir - 1 ? "" : ","); ++ } ++ if (i != nr_mir) ++ fprintf(fp, " ERROR: dev are less than nr_mir:%d", nr_mir); ++} ++ ++static void ++mirror_show_status(unsigned long target) ++{ ++ unsigned int i, nr_mir, synced, nr_error; ++ unsigned long ms, mir_size, mir_head, mir, dm_dev; ++ char buf[BUFSIZE]; ++ ++ /* Get the address of struct mirror_set */ ++ GET_VALUE(target, dm_target, private, ms); ++ ++ /* Get the status info of the mirror_set */ ++ GET_VALUE(ms, mirror_set, in_sync, synced); ++ fprintf(fp, " in_sync:%d", synced); ++ ++ /* ++ * Display information for each mirror disks. ++ * ++ * mir_head = mirror_set.mirror. ++ * This is the head of struct mirror array. ++ */ ++ fprintf(fp, " dev:"); ++ mir_size = STRUCT_SIZE("struct mirror"); ++ GET_ADDR(ms, mirror_set, mirror, mir_head); ++ GET_VALUE(ms, mirror_set, nr_mirrors, nr_mir); ++ for (i = 0; i < nr_mir; i++) { ++ mir = mir_head + mir_size * i; /* Get next mirror */ ++ ++ /* Get the devt of the mirror disk */ ++ GET_VALUE(mir, mirror, dev, dm_dev); ++ GET_STR(dm_dev, dm_dev, name, buf, BUFSIZE); ++ ++ /* Get the offset of the mirror disk */ ++ GET_VALUE(mir, mirror, error_count, nr_error); ++ ++ fprintf(fp, "%s(%c,%d)%s", buf, nr_error ? 'D' : 'A', nr_error, ++ i == nr_mir - 1 ? "" : ","); ++ } ++ if (i != nr_mir) ++ fprintf(fp, " ERROR: dev are less than nr_mir:%d", nr_mir); ++} ++ ++static void ++mirror_show_queue(unsigned long target) ++{ ++ unsigned long ms, rlist, wlist, rhead, whead; ++ unsigned long rh, quis_head, rcov_head, quis_next, rcov_next; ++ ++ /* Get the address of struct mirror_set */ ++ GET_VALUE(target, dm_target, private, ms); ++ ++ /* Get the address of queued I/O lists in struct mirror_set */ ++ GET_ADDR(ms, mirror_set, reads, rlist); ++ GET_ADDR(ms, mirror_set, writes, wlist); ++ ++ /* Get the head of queued I/O lists */ ++ GET_VALUE(rlist, bio_list, head, rhead); ++ GET_VALUE(wlist, bio_list, head, whead); ++ fprintf(fp, " %s", rhead ? "reads" : "(reads)"); ++ fprintf(fp, " %s", whead ? "writes" : "(writes)"); ++ ++ /* Get the address of the struct region_hash */ ++ GET_ADDR(ms, mirror_set, rh, rh); ++ ++ /* Get the address of recover region lists in struct region_hash */ ++ GET_ADDR(rh, region_hash, quiesced_regions, quis_head); ++ GET_ADDR(rh, region_hash, recovered_regions, rcov_head); ++ ++ /* Get the head of recover region lists */ ++ quis_next = get_next_from_list_head(quis_head); ++ rcov_next = get_next_from_list_head(rcov_head); ++ ++ fprintf(fp, " %s", quis_next != quis_head ? "quiesced" : "(quiesced)"); ++ fprintf(fp, " %s", rcov_next != rcov_head ? "recovered" : "(recovered)"); ++} ++ ++static struct dminfo_target_analyzer mirror_analyzer = { ++ .target_name = "mirror", ++ .ready = mirror_ready, ++ .show_table = mirror_show_table, ++ .show_status = mirror_show_status, ++ .show_queue = mirror_show_queue ++}; ++ ++/* ++ * multipath target ++ */ ++static int ++multipath_ready(void) ++{ ++ static int debuginfo = 0; ++ ++ if (debuginfo) ++ return 1; ++ ++ if (STRUCT_EXISTS("struct multipath")) { ++ debuginfo = 1; ++ return 1; ++ } else ++ fprintf(fp, "No such struct info: multipath"); ++ ++ return 0; ++} ++ ++static void ++multipath_show_table(unsigned long target) ++{ ++ int i, j; ++ unsigned int queue_if_no_path, nr_pgs, pg_id, nr_paths; ++ unsigned long mp, hwh, hwh_type, ps, ps_type, path, dm_dev; ++ unsigned long pg_head, pg_next, pg_last; ++ unsigned long path_head, path_next, path_last; ++ char name[BUFSIZE]; ++ ++ /* Get the address of struct multipath */ ++ GET_VALUE(target, dm_target, private, mp); ++ ++ /* Get features information */ ++ GET_VALUE(mp, multipath, queue_if_no_path, queue_if_no_path); ++ ++ /* Get the hardware-handler information */ ++ GET_ADDR(mp, multipath, hw_handler, hwh); ++ GET_VALUE(hwh, hw_handler, type, hwh_type); ++ if (hwh_type) ++ GET_PTR_STR(hwh_type, hw_handler_type, name, name, BUFSIZE); ++ else ++ strcpy(name, "none"); ++ ++ /* Get the number of priority groups */ ++ GET_VALUE(mp, multipath, nr_priority_groups, nr_pgs); ++ ++ fprintf(fp, " queue_if_no_path:%d hwh:%s nr_pgs:%d\n", ++ queue_if_no_path, name, nr_pgs); ++ ++ /* Display information for each priority group */ ++ fprintf(fp, " %-2s %-13s %-8s %s", ++ "PG", "PATH_SELECTOR", "NR_PATHS", "PATHS"); ++ GET_ADDR(mp, multipath, priority_groups, pg_head); ++ i = 0; ++ list_for_each (pg_next, pg_head, pg_last) { ++ /* pg_next == struct priority_group */ ++ ++ /* Get the index of the priority group */ ++ GET_VALUE(pg_next, priority_group, pg_num, pg_id); ++ ++ /* Get the name of path selector */ ++ GET_ADDR(pg_next, priority_group, ps, ps); ++ GET_VALUE(ps, path_selector, type, ps_type); ++ GET_PTR_STR(ps_type, path_selector_type, name, name, BUFSIZE); ++ ++ /* Get the number of paths in the priority group */ ++ GET_VALUE(pg_next, priority_group, nr_pgpaths, nr_paths); ++ ++ fprintf(fp, "\n %-2d %-13s %-8d ", pg_id, name, nr_paths); ++ ++ /* Display information for each path */ ++ GET_ADDR(pg_next, priority_group, pgpaths, path_head); ++ j = 0; ++ list_for_each (path_next, path_head, path_last) { ++ /* path_next == struct pgpath */ ++ ++ /* Get the devt of the pgpath */ ++ GET_ADDR(path_next, pgpath, path, path); ++ GET_VALUE(path, path, dev, dm_dev); ++ GET_STR(dm_dev, dm_dev, name, name, BUFSIZE); ++ ++ fprintf(fp, " %s", name); ++ j++; ++ } ++ if (j != nr_paths) ++ fprintf(fp, " ERROR: paths are less than nr_paths:%d", ++ nr_paths); ++ i++; ++ } ++ if (i != nr_pgs) ++ fprintf(fp, " ERROR: pgs are less than nr_pgs:%d", nr_pgs); ++} ++ ++static void ++multipath_show_status(unsigned long target) ++{ ++ int i, j; ++ unsigned int queue_if_no_path, nr_pgs, pg_id, nr_paths; ++ unsigned int bypassed_pg, path_active, nr_fails; ++ unsigned long mp, hwh, hwh_type, cur_pg, path, dm_dev; ++ unsigned long pg_head, pg_next, pg_last; ++ unsigned long path_head, path_next, path_last; ++ char buf[BUFSIZE], path_status; ++ ++ /* Get the address of struct multipath */ ++ GET_VALUE(target, dm_target, private, mp); ++ ++ /* Get features information */ ++ GET_VALUE(mp, multipath, queue_if_no_path, queue_if_no_path); ++ ++ /* Get the hardware-handler information */ ++ GET_ADDR(mp, multipath, hw_handler, hwh); ++ GET_VALUE(hwh, hw_handler, type, hwh_type); ++ if (hwh_type) ++ GET_PTR_STR(hwh_type, hw_handler_type, name, buf, BUFSIZE); ++ else ++ strcpy(buf, "none"); ++ ++ /* Get the number of priority groups */ ++ GET_VALUE(mp, multipath, nr_priority_groups, nr_pgs); ++ ++ fprintf(fp, " queue_if_no_path:%d hwh:%s nr_pgs:%d\n", ++ queue_if_no_path, buf, nr_pgs); ++ ++ /* Display information for each priority group */ ++ fprintf(fp, " %-2s %-9s %-8s %s", ++ "PG", "PG_STATUS", "NR_PATHS", "PATHS"); ++ GET_ADDR(mp, multipath, priority_groups, pg_head); ++ i = 0; ++ list_for_each (pg_next, pg_head, pg_last) { ++ /* pg_next == struct priority_group */ ++ ++ /* Get the index of the priority group */ ++ GET_VALUE(pg_next, priority_group, pg_num, pg_id); ++ ++ /* Get the status of the priority group */ ++ GET_VALUE(pg_next, priority_group, bypassed, bypassed_pg); ++ if (bypassed_pg) ++ strcpy(buf, "disabled"); ++ else { ++ GET_VALUE(mp, multipath, current_pg, cur_pg); ++ if (pg_next == cur_pg) ++ strcpy(buf, "active"); ++ else ++ strcpy(buf, "enabled"); ++ } ++ ++ /* Get the number of paths in the priority group */ ++ GET_VALUE(pg_next, priority_group, nr_pgpaths, nr_paths); ++ ++ fprintf(fp, "\n %-2d %-9s %-8d ", pg_id, buf, nr_paths); ++ ++ /* Display information for each path */ ++ GET_ADDR(pg_next, priority_group, pgpaths, path_head); ++ j = 0; ++ list_for_each (path_next, path_head, path_last) { ++ /* path_next == struct pgpath */ ++ ++ /* Get the devt of the pgpath */ ++ GET_ADDR(path_next, pgpath, path, path); ++ GET_VALUE(path, path, dev, dm_dev); ++ GET_STR(dm_dev, dm_dev, name, buf, BUFSIZE); ++ ++ /* Get the status of the path */ ++ GET_VALUE(path, path, is_active, path_active); ++ GET_VALUE(path_next, pgpath, fail_count, nr_fails); ++ path_status = path_active ? 'A' : 'F'; ++ ++ fprintf(fp, " %s(%c,%u)", buf, path_status, nr_fails); ++ j++; ++ } ++ if (j != nr_paths) ++ fprintf(fp, " ERROR: paths are less than nr_paths:%d", ++ nr_paths); ++ i++; ++ } ++ if (i != nr_pgs) ++ fprintf(fp, " ERROR: pgs are less than nr_pgs:%d", nr_pgs); ++} ++ ++static void ++multipath_show_queue(unsigned long target) ++{ ++ unsigned int queue_size; ++ unsigned long mp; ++ ++ /* Get the address of struct multipath */ ++ GET_VALUE(target, dm_target, private, mp); ++ ++ /* Get the size of queued I/Os in this 'target' */ ++ GET_VALUE(mp, multipath, queue_size, queue_size); ++ ++ fprintf(fp, " queue_size:%d", queue_size); ++} ++ ++static struct dminfo_target_analyzer multipath_analyzer = { ++ .target_name = "multipath", ++ .ready = multipath_ready, ++ .show_table = multipath_show_table, ++ .show_status = multipath_show_status, ++ .show_queue = multipath_show_queue ++}; ++ ++/* ++ * crypt target ++ */ ++static int ++crypt_ready(void) ++{ ++ static int debuginfo = 0; ++ ++ if (debuginfo) ++ return 1; ++ ++ if (STRUCT_EXISTS("struct crypt_config")) { ++ debuginfo = 1; ++ return 1; ++ } else ++ fprintf(fp, "No such struct info: crypt_config"); ++ ++ return 0; ++} ++ ++#define DMINFO_CRYPTO_TFM_MODE_ECB 0x00000001 ++#define DMINFO_CRYPTO_TFM_MODE_CBC 0x00000002 ++ ++static void ++crypt_show_table(unsigned long target) ++{ ++ int i, cit_mode, key_size; ++ unsigned long cc, tfm, crt_alg, cipher, iv_mode, dm_dev; ++ char buf[BUFSIZE], *chainmode; ++ ++ /* Get the address of struct crypt_config */ ++ GET_VALUE(target, dm_target, private, cc); ++ ++ /* Get the cipher name of the crypt_tfm */ ++ GET_VALUE(cc, crypt_config, tfm, tfm); ++ GET_VALUE(tfm, crypto_tfm, __crt_alg, crt_alg); ++ GET_STR(crt_alg, crypto_alg, cra_name, buf, BUFSIZE); ++ fprintf(fp, " type:%s", buf); ++ ++ /* Get the cit_mode of the crypt_tfm */ ++ GET_ADDR(tfm, crypto_tfm, crt_u, cipher); ++ GET_VALUE(cipher, cipher_tfm, cit_mode, cit_mode); ++ ++ if (MEMBER_EXISTS("struct crypt_config", "iv_mode")) { ++ if (cit_mode == DMINFO_CRYPTO_TFM_MODE_CBC) ++ chainmode = "cbc"; ++ else if (cit_mode == DMINFO_CRYPTO_TFM_MODE_ECB) ++ chainmode = "ecb"; ++ else ++ chainmode = "unknown"; ++ ++ /* Get the iv_mode of the crypt_config */ ++ GET_VALUE(cc, crypt_config, iv_mode, iv_mode); ++ if (iv_mode) { ++ GET_PTR_STR(cc, crypt_config, iv_mode, buf, BUFSIZE); ++ fprintf(fp, "-%s-%s", chainmode, buf); ++ } else ++ fprintf(fp, "-%s", chainmode); ++ ++ } else { ++ /* Compatibility mode for old dm-crypt cipher strings */ ++ if (cit_mode == DMINFO_CRYPTO_TFM_MODE_CBC) ++ chainmode = "plain"; ++ else if (cit_mode == DMINFO_CRYPTO_TFM_MODE_ECB) ++ chainmode = "ecb"; ++ else ++ chainmode = "unknown"; ++ ++ fprintf(fp, "-%s", chainmode); ++ } ++ ++ /* Get the devt of the crypt_config */ ++ GET_VALUE(cc, crypt_config, dev, dm_dev); ++ GET_STR(dm_dev, dm_dev, name, buf, BUFSIZE); ++ fprintf(fp, " dev:%s", buf); ++ ++ /* ++ * Get the key of the crypt_config. ++ */ ++ GET_VALUE(cc, crypt_config, key_size, key_size); ++ GET_STR(cc, crypt_config, key, buf, MIN(key_size + 1, BUFSIZE)); ++ fprintf(fp, " key:"); ++ for (i = 0; i < key_size; i++) ++ fprintf(fp, "%02x", (unsigned char)buf[i]); ++} ++ ++static void ++crypt_show_status(unsigned long target) ++{ ++ /* crypt target has no status */ ++ fprintf(fp, " No status info"); ++} ++ ++static void ++crypt_show_queue(unsigned long target) ++{ ++ /* crypt target has no queue */ ++ fprintf(fp, " No queue info"); ++} ++ ++static struct dminfo_target_analyzer crypt_analyzer = { ++ .target_name = "crypt", ++ .ready = crypt_ready, ++ .show_table = crypt_show_table, ++ .show_status = crypt_show_status, ++ .show_queue = crypt_show_queue ++}; ++ ++/* ++ * stripe target ++ */ ++static int ++stripe_ready(void) ++{ ++ static int debuginfo = 0; ++ ++ if (debuginfo) ++ return 1; ++ ++ if (STRUCT_EXISTS("struct stripe_c")) { ++ debuginfo = 1; ++ return 1; ++ } else ++ fprintf(fp, "No such struct info: stripe_c"); ++ ++ return 0; ++} ++ ++static void ++stripe_show_table(unsigned long target) ++{ ++ unsigned int i, n_stripe; ++ unsigned long sc, stripe_size, s, head, dm_dev; ++ unsigned long long mask; ++ char buf[BUFSIZE]; ++ ++ /* Get the address of struct stripe_c */ ++ GET_VALUE(target, dm_target, private, sc); ++ ++ /* Get the chunk_size of the stripe_c */ ++ GET_VALUE(sc, stripe_c, chunk_mask, mask); ++ fprintf(fp, " chunk_size:%llu", mask + 1); ++ ++ /* ++ * Display the information of each stripe disks. ++ * ++ * head = stripe_c.stripe. ++ * This is the head of struct stripe array. ++ */ ++ stripe_size = STRUCT_SIZE("struct stripe"); ++ GET_ADDR(sc, stripe_c, stripe, head); ++ GET_VALUE(sc, stripe_c, stripes, n_stripe); ++ fprintf(fp, " dev:"); ++ for (i = 0; i < n_stripe; i++) { ++ s = head + stripe_size * i; /* Get next stripe */ ++ ++ /* Get the devt of the stripe disk */ ++ GET_VALUE(s, stripe, dev, dm_dev); ++ GET_STR(dm_dev, dm_dev, name, buf, BUFSIZE); ++ ++ fprintf(fp, "%s%s", buf, i == n_stripe - 1 ? "" : ","); ++ } ++ if (i != n_stripe) ++ fprintf(fp, " ERROR: dev are less than n_stripe:%d", n_stripe); ++} ++ ++static void ++stripe_show_status(unsigned long target) ++{ ++ /* stripe target has no status */ ++ fprintf(fp, " No status info"); ++} ++ ++static void ++stripe_show_queue(unsigned long target) ++{ ++ /* stripe target has no queue */ ++ fprintf(fp, " No queue info"); ++} ++ ++static struct dminfo_target_analyzer stripe_analyzer = { ++ .target_name = "striped", ++ .ready = stripe_ready, ++ .show_table = stripe_show_table, ++ .show_status = stripe_show_status, ++ .show_queue = stripe_show_queue ++}; ++ ++/* ++ * snapshot target ++ */ ++static int ++snapshot_ready(void) ++{ ++ static int debuginfo = 0; ++ ++ if (debuginfo) ++ return 1; ++ ++ if (STRUCT_EXISTS("struct dm_snapshot")) { ++ debuginfo = 1; ++ return 1; ++ } else ++ fprintf(fp, "No such struct info: dm_snapshot"); ++ ++ return 0; ++} ++ ++static void ++snapshot_show_table(unsigned long target) ++{ ++ unsigned long snap, orig_dev, cow_dev; ++ unsigned long long chunk_size; ++ char orig_name[BUFSIZE], cow_name[BUFSIZE], type; ++ ++ /* Get the address of struct dm_snapshot */ ++ GET_VALUE(target, dm_target, private, snap); ++ ++ /* Get snapshot parameters of the dm_snapshot */ ++ GET_VALUE(snap, dm_snapshot, origin, orig_dev); ++ GET_STR(orig_dev, dm_dev, name, orig_name, BUFSIZE); ++ GET_VALUE(snap, dm_snapshot, cow, cow_dev); ++ GET_STR(cow_dev, dm_dev, name, cow_name, BUFSIZE); ++ GET_VALUE(snap, dm_snapshot, type, type); ++ GET_VALUE(snap, dm_snapshot, chunk_size, chunk_size); ++ ++ fprintf(fp, " orig:%s cow:%s type:%c chunk_size:%llu", ++ orig_name, cow_name, type, chunk_size); ++} ++ ++static void ++snapshot_show_status(unsigned long target) ++{ ++ int valid; ++ unsigned long snap; ++ ++ /* Get the address of struct dm_snapshot */ ++ GET_VALUE(target, dm_target, private, snap); ++ ++ /* Get snapshot parameters of the dm_snapshot */ ++ GET_VALUE(snap, dm_snapshot, valid, valid); ++ ++ fprintf(fp, " vaild:%d", valid); ++} ++ ++static void ++snapshot_show_queue(unsigned long target) ++{ ++ fprintf(fp, " No queue info"); ++} ++ ++static struct dminfo_target_analyzer snapshot_analyzer = { ++ .target_name = "snapshot", ++ .ready = snapshot_ready, ++ .show_table = snapshot_show_table, ++ .show_status = snapshot_show_status, ++ .show_queue = snapshot_show_queue ++}; ++ ++/* ++ * snapshot-origin target ++ */ ++static int ++origin_ready(void) ++{ ++ return 1; ++} ++ ++static void ++origin_show_table(unsigned long target) ++{ ++ unsigned long dm_dev; ++ char buf[BUFSIZE]; ++ ++ /* Get the name of the struct dm_dev */ ++ GET_VALUE(target, dm_target, private, dm_dev); ++ GET_STR(dm_dev, dm_dev, name, buf, BUFSIZE); ++ ++ fprintf(fp, " orig_dev:%s", buf); ++} ++ ++static void ++origin_show_status(unsigned long target) ++{ ++ /* snapshot-origin target has no status */ ++ fprintf(fp, " No status info"); ++} ++ ++static void ++origin_show_queue(unsigned long target) ++{ ++ /* snapshot-origin target has no queue */ ++ fprintf(fp, " No queue info"); ++} ++ ++static struct dminfo_target_analyzer snapshot_origin_analyzer = { ++ .target_name = "snapshot-origin", ++ .ready = origin_ready, ++ .show_table = origin_show_table, ++ .show_status = origin_show_status, ++ .show_queue = origin_show_queue ++}; ++ ++/* ++ * Core part of dminfo ++ */ ++#define DMINFO_LIST 0 ++#define DMINFO_DEPS 1 ++#define DMINFO_TABLE 2 ++#define DMINFO_STATUS 3 ++#define DMINFO_QUEUE 4 ++ ++static int ++dm_core_ready(void) ++{ ++ static int debuginfo = 0; ++ ++ if (debuginfo) ++ return 1; ++ ++ if (STRUCT_EXISTS("struct hash_cell")) { ++ debuginfo = 1; ++ return 1; ++ } else ++ fprintf(fp, "No such struct info: hash_cell\n"); ++ ++ return 0; ++} ++ ++/* Display dependency information of the 'table' */ ++static void ++dminfo_show_deps(unsigned long table) ++{ ++ int major, minor, count; ++ unsigned long head, next, last, dev, bdev; ++ char buf[BUFSIZE]; ++ ++ /* head = dm_table.devices */ ++ GET_ADDR(table, dm_table, devices, head); ++ ++ fprintf(fp, " %-3s %-3s %-16s %-5s %s\n", ++ "MAJ", "MIN", "GENDISK", "COUNT", "DEVNAME"); ++ ++ list_for_each (next, head, last) { ++ /* Get dependency information. (next == struct *dm_dev) */ ++ GET_VALUE(next, dm_dev, count, count); ++ GET_VALUE(next, dm_dev, bdev, bdev); ++ GET_VALUE(bdev, block_device, bd_disk, dev); ++ GET_VALUE(dev, gendisk, major, major); ++ GET_VALUE(dev, gendisk, first_minor, minor); ++ GET_STR(dev, gendisk, disk_name, buf, BUFSIZE); ++ ++ fprintf(fp, " %-3d %-3d %-16lx %-5d %s\n", ++ major, minor, dev, count, buf); ++ } ++} ++ ++/* ++ * Display target specific information in the 'table', if the target ++ * analyzer is registered and available. ++ */ ++static void ++dminfo_show_details(unsigned long table, unsigned int num_targets, int info_type) ++{ ++ unsigned int i; ++ unsigned long head, target_size, target, target_type; ++ struct dminfo_target_analyzer *ta; ++ char buf[BUFSIZE]; ++ ++ /* ++ * head = dm_table.targets. ++ * This is the head of struct dm_target array. ++ */ ++ GET_VALUE(table, dm_table, targets, head); ++ target_size = STRUCT_SIZE("struct dm_target"); ++ ++ fprintf(fp, " %-16s %-11s %s\n", ++ "TARGET", "TARGET_TYPE", "PRIVATE_DATA"); ++ ++ for (i = 0; i < num_targets; i++, fprintf(fp, "\n")) { ++ target = head + target_size * i; /* Get next target */ ++ ++ /* Get target information */ ++ GET_VALUE(target, dm_target, type, target_type); ++ GET_PTR_STR(target_type, target_type, name, buf, BUFSIZE); ++ ++ fprintf(fp, " %-16lx %-11s", target, buf); ++ ++ if (!(ta = find_target_analyzer(buf)) || !ta->ready ++ || !ta->ready()) ++ continue; ++ ++ switch (info_type) { ++ case DMINFO_TABLE: ++ if (ta->show_table) ++ ta->show_table(target); ++ break; ++ case DMINFO_STATUS: ++ if (ta->show_status) ++ ta->show_status(target); ++ break; ++ case DMINFO_QUEUE: ++ if (ta->show_queue) ++ ta->show_queue(target); ++ break; ++ default: ++ break; ++ } ++ } ++ ++ if (i != num_targets) ++ fprintf(fp, " ERROR: targets are less than num_targets:%d", ++ num_targets); ++} ++ ++/* ++ * Display lists (and detail information if specified) of existing ++ * dm devices. ++ */ ++static void ++dminfo_show_list(int additional_info) ++{ ++ int i, major, minor, array_len; ++ unsigned int num_targets; ++ unsigned long _name_buckets, head, next, last, md, dev, table; ++ char buf[BUFSIZE]; ++ ++ _name_buckets = symbol_value("_name_buckets"); ++ array_len = get_array_length("_name_buckets", NULL, 0); ++ ++ if (additional_info == DMINFO_LIST) ++ fprintf(fp, "%-3s %-3s %-16s %-16s %-7s %s\n", ++ "MAJ", "MIN", "MAP_DEV", "DM_TABLE", ++ "TARGETS", "MAPNAME"); ++ ++ for (i = 0; i < array_len; i++) { ++ /* head = _name_buckets[i] */ ++ head = _name_buckets + (i * SIZE(list_head)); ++ ++ list_for_each (next, head, last) { /* next == hash_cell */ ++ /* Get device and table information */ ++ GET_PTR_STR(next, hash_cell, name, buf, BUFSIZE); ++ GET_VALUE(next, hash_cell, md, md); ++ GET_VALUE(md, mapped_device, disk, dev); ++ GET_VALUE(dev, gendisk, major, major); ++ GET_VALUE(dev, gendisk, first_minor, minor); ++ GET_VALUE(md, mapped_device, map, table); ++ GET_VALUE(table, dm_table, num_targets, num_targets); ++ ++ if (additional_info != DMINFO_LIST) ++ fprintf(fp, "%-3s %-3s %-16s %-16s %-7s %s\n", ++ "MAJ", "MIN", "MAP_DEV", "DM_TABLE", ++ "TARGETS", "MAPNAME"); ++ ++ fprintf(fp, "%-3d %-3d %-16lx %-16lx %-7d %s\n", ++ major, minor, md, table, num_targets, buf); ++ ++ switch(additional_info) { ++ case DMINFO_DEPS: ++ dminfo_show_deps(table); ++ break; ++ case DMINFO_TABLE: ++ case DMINFO_STATUS: ++ case DMINFO_QUEUE: ++ dminfo_show_details(table, num_targets, ++ additional_info); ++ break; ++ default: ++ break; ++ } ++ ++ if (additional_info != DMINFO_LIST) ++ fprintf(fp, "\n"); ++ } ++ } ++} ++ ++/* ++ * Display the original bio information for the 'bio'. ++ * If the 'bio' is for dm devices, the original bio information is pointed ++ * by bio.bi_private as struct target_io. ++ */ ++static void ++dminfo_show_bio(unsigned long bio) ++{ ++ int major, minor; ++ unsigned long target_io, dm_io, dm_bio, md, dev; ++ char buf[BUFSIZE]; ++ ++ /* Get original bio and device information */ ++ GET_VALUE(bio, bio, bi_private, target_io); ++ GET_VALUE(target_io, target_io, io, dm_io); ++ GET_VALUE(dm_io, dm_io, bio, dm_bio); ++ GET_VALUE(dm_io, dm_io, md, md); ++ GET_VALUE(md, mapped_device, disk, dev); ++ GET_VALUE(dev, gendisk, major, major); ++ GET_VALUE(dev, gendisk, first_minor, minor); ++ GET_STR(dev, gendisk, disk_name, buf, BUFSIZE); ++ ++ fprintf(fp, "%-16s %-3s %-3s %-16s %s\n", ++ "DM_BIO_ADDRESS", "MAJ", "MIN", "MAP_DEV", "DEVNAME"); ++ fprintf(fp, "%-16lx %-3d %-3d %-16lx %s\n", ++ dm_bio, major, minor, md, buf); ++} ++ ++static void ++cmd_dminfo(void) ++{ ++ int c, additional_info = DMINFO_LIST; ++ unsigned long bio; ++ ++ if (!dm_core_ready()) ++ return; ++ ++ /* Parse command line option */ ++ while ((c = getopt(argcnt, args, "b:dlqst")) != EOF) { ++ switch(c) ++ { ++ case 'b': ++ bio = stol(optarg, FAULT_ON_ERROR, NULL); ++ dminfo_show_bio(bio); ++ return; ++ case 'd': ++ additional_info = DMINFO_DEPS; ++ break; ++ case 'l': ++ additional_info = DMINFO_LIST; ++ break; ++ case 'q': ++ additional_info = DMINFO_QUEUE; ++ break; ++ case 's': ++ additional_info = DMINFO_STATUS; ++ break; ++ case 't': ++ additional_info = DMINFO_TABLE; ++ break; ++ default: ++ argerrs++; ++ break; ++ } ++ } ++ ++ if (argerrs) ++ cmd_usage(pc->curcmd, SYNOPSIS); ++ ++ dminfo_show_list(additional_info); ++} ++ ++/* ++ * dminfo help ++ */ ++static char *help_dminfo[] = { ++ "dminfo", /* command name */ ++ "device mapper (dm) information", /* short description */ ++ "[-b bio | -d | -l | -q | -s | -t]", /* argument synopsis */ ++ " This command displays information about device-mapper mapped ", ++ " devices (dm devices).", ++ " If no argument is entered, displays lists of existing dm devices.", ++ " It's same as -l option.", ++ "", ++ " -b bio displays the information of the dm device which the bio", ++ " is submitted in. If the bio isn't for dm devices,", ++ " results will be error.", ++ " -d displays dependency information for existing dm devices.", ++ " -l displays lists of existing dm devices.", ++ " -q displays queued I/O information for each target of", ++ " existing dm devices.", ++ " -s displays status information for each target of existing", ++ " dm devices.", ++ " -t displays table information for each target of existing", ++ " dm devices.", ++ "", ++ "EXAMPLE", ++ " Display lists of dm devices. \"MAP_DEV\" is the address of the", ++ " struct mapped_device. \"DM_TABLE\" is the address of the struct", ++ " dm_table. \"TARGETS\" is the number of targets which are in", ++ " the struct dm_table.", ++ "", ++ " %s> dminfo", ++ " MAJ MIN MAP_DEV DM_TABLE TARGETS MAPNAME", ++ " 253 8 c4866c80 c4866280 1 vg0-snap0", ++ " 253 6 f6a04a80 f6a04580 1 vg0-lv0-real", ++ " 253 0 c4840380 c4841880 1 mp0", ++ " 253 5 f7c50c80 c488e480 1 via_cbeheddbdd", ++ " 253 7 c4866a80 c4866380 1 vg0-snap0-cow", ++ " 253 4 d441e280 c919ed80 1 dummy1", ++ " 253 3 f5dc4280 cba81d80 1 dummy0", ++ " 253 2 f7c53180 c4866180 1 vg0-lv0", ++ " 253 1 f746d280 f746cd80 1 mp0p1", ++ "", ++ " Display the dm device information which the bio is submitted in.", ++ " The bio (ceacee80) is a clone of the bio (ceacee00) which is", ++ " submitted in the dm-3 (dummy0). And the bio (ceacee00) is a clone", ++ " of the bio (ceaced80) which is submitted in the dm-4 (dummy1), too.", ++ " The bio (ceaced80) is the original bio.", ++ "", ++ " %s> dminfo -b ceacee80", ++ " DM_BIO_ADDRESS MAJ MIN MAP_DEV DEVNAME", ++ " ceacee00 253 3 f5dc4280 dm-3", ++ " crash> dminfo -b ceacee00", ++ " DM_BIO_ADDRESS MAJ MIN MAP_DEV DEVNAME", ++ " ceaced80 253 4 d441e280 dm-4", ++ " crash> dminfo -b ceaced80", ++ " dminfo: invalid kernel virtual address: 64 type: \"GET_VALUE: dm_io.bio\"", ++ "", ++ " Display dependency information for each target.", ++ " The vg0-snap0 depends on thd dm-6 (vg0-lv0-real) and the dm-7", ++ " (vg0-snap0-cow)", ++ "", ++ " %s> dminfo -d", ++ " MAJ MIN MAP_DEV DM_TABLE TARGETS MAPNAME", ++ " 253 8 c4866c80 c4866280 1 vg0-snap0", ++ " MAJ MIN GENDISK COUNT DEVNAME", ++ " 253 7 c4866980 1 dm-7", ++ " 253 6 f6a04280 1 dm-6", ++ "", ++ " MAJ MIN MAP_DEV DM_TABLE TARGETS MAPNAME", ++ " 253 6 f6a04a80 f6a04580 1 vg0-lv0-real", ++ " MAJ MIN GENDISK COUNT DEVNAME", ++ " 8 0 f7f24c80 1 sda", ++ "", ++ " MAJ MIN MAP_DEV DM_TABLE TARGETS MAPNAME", ++ " 253 7 c4866a80 c4866380 1 vg0-snap0-cow", ++ " MAJ MIN GENDISK COUNT DEVNAME", ++ " 8 0 f7f24c80 1 sda", ++ "", ++ " MAJ MIN MAP_DEV DM_TABLE TARGETS MAPNAME", ++ " 253 2 f7c53180 c4866180 1 vg0-lv0", ++ " MAJ MIN GENDISK COUNT DEVNAME", ++ " 253 6 f6a04280 1 dm-6", ++ "", ++ " Display queued I/O information for each target.", ++ " The information is displayed under the \"PRIVATE_DATA\" column.", ++ "", ++ " %s> dminfo -q", ++ " MAJ MIN MAP_DEV DM_TABLE TARGETS MAPNAME", ++ " 253 5 f7c50c80 c488e480 1 via_cbeheddbdd", ++ " TARGET TARGET_TYPE PRIVATE_DATA", ++ " f8961080 mirror (reads) (writes) (quiesced) (recovered)", ++ "", ++ " --------------------------------------------------------------", ++ " \"reads/writes\" are members of the struct mirror_set, and", ++ " \"quiesced/recovered\" are members of the struct region_hash.", ++ " If the list is empty, the member is bracketed by \"()\".", ++ " --------------------------------------------------------------", ++ "", ++ " MAJ MIN MAP_DEV DM_TABLE TARGETS MAPNAME", ++ " 253 0 c4840380 c4841880 1 mp0", ++ " TARGET TARGET_TYPE PRIVATE_DATA", ++ " f8802080 multipath queue_size:0", ++ "", ++ " MAJ MIN MAP_DEV DM_TABLE TARGETS MAPNAME", ++ " 253 1 f746d280 f746cd80 1 mp0p1", ++ " TARGET TARGET_TYPE PRIVATE_DATA", ++ " f8821080 linear No queue info", ++ "", ++ " Display status information for each target.", ++ " The information is displayed under the \"PRIVATE_DATA\" column.", ++ "", ++ " %s> dminfo -s", ++ " MAJ MIN MAP_DEV DM_TABLE TARGETS MAPNAME", ++ " 253 0 c4840380 c4841880 1 mp0", ++ " TARGET TARGET_TYPE PRIVATE_DATA", ++ " f8802080 multipath queue_if_no_path:0 hwh:none nr_pgs:1", ++ " PG PG_STATUS NR_PATHS PATHS", ++ " 1 active 2 8:16(A,0) 8:32(A,0)", ++ "", ++ " --------------------------------------------------------------", ++ " Format of \"PATHS\": :(,)", ++ " Status: A:active, F:faulty", ++ " Fail_count: the value of the struct pgpath.fail_count", ++ " --------------------------------------------------------------", ++ "", ++ " MAJ MIN MAP_DEV DM_TABLE TARGETS MAPNAME", ++ " 253 5 f7c50c80 c488e480 1 via_cbeheddbdd", ++ " TARGET TARGET_TYPE PRIVATE_DATA", ++ " f8961080 mirror in_sync:1 dev:8:16(A,0),8:32(A,0)", ++ "", ++ " --------------------------------------------------------------", ++ " Format of \"dev\": :(,)", ++ " Status: A:active, D:degraded", ++ " Error_count: the value of the struct mirror.error_count", ++ " --------------------------------------------------------------", ++ "", ++ " MAJ MIN MAP_DEV DM_TABLE TARGETS MAPNAME", ++ " 253 1 f746d280 f746cd80 1 mp0p1", ++ " TARGET TARGET_TYPE PRIVATE_DATA", ++ " f8821080 linear No status info", ++ "", ++ " Display table information for each target.", ++ " The information is displayed under the \"PRIVATE_DATA\" column.", ++ "", ++ " %s> dminfo -t", ++ " MAJ MIN MAP_DEV DM_TABLE TARGETS MAPNAME", ++ " 253 8 c4866c80 c4866280 1 vg0-snap0", ++ " TARGET TARGET_TYPE PRIVATE_DATA", ++ " f89b4080 snapshot orig:253:6 cow:253:7 type:P chunk_size:16", ++ "", ++ " MAJ MIN MAP_DEV DM_TABLE TARGETS MAPNAME", ++ " 253 6 f6a04a80 f6a04580 1 vg0-lv0-real", ++ " TARGET TARGET_TYPE PRIVATE_DATA", ++ " f890f080 linear begin:0 len:204800 dev:8:5 offset:384", ++ "", ++ " MAJ MIN MAP_DEV DM_TABLE TARGETS MAPNAME", ++ " 253 0 c4840380 c4841880 1 mp0", ++ " TARGET TARGET_TYPE PRIVATE_DATA", ++ " f8802080 multipath queue_if_no_path:0 hwh:none nr_pgs:1", ++ " PG PATH_SELECTOR NR_PATHS PATHS", ++ " 1 round-robin 2 8:16 8:32", ++ "", ++ " MAJ MIN MAP_DEV DM_TABLE TARGETS MAPNAME", ++ " 253 5 f7c50c80 c488e480 1 via_cbeheddbdd", ++ " TARGET TARGET_TYPE PRIVATE_DATA", ++ " f8961080 mirror log:core dev:8:16(0),8:32(0)", ++ "", ++ " --------------------------------------------------------------", ++ " Format of \"dev\": :()", ++ " Offset: the value of the struct mirror.offset", ++ " --------------------------------------------------------------", ++ "", ++ " MAJ MIN MAP_DEV DM_TABLE TARGETS MAPNAME", ++ " 253 7 c4866a80 c4866380 1 vg0-snap0-cow", ++ " TARGET TARGET_TYPE PRIVATE_DATA", ++ " f899d080 linear begin:0 len:8192 dev:8:5 offset:205184", ++ "", ++ " MAJ MIN MAP_DEV DM_TABLE TARGETS MAPNAME", ++ " 253 2 f7c53180 c4866180 1 vg0-lv0", ++ " TARGET TARGET_TYPE PRIVATE_DATA", ++ " f8bbc080 snapshot-origin orig_dev:253:6", ++ "", ++ " MAJ MIN MAP_DEV DM_TABLE TARGETS MAPNAME", ++ " 253 1 f746d280 f746cd80 1 mp0p1", ++ " TARGET TARGET_TYPE PRIVATE_DATA", ++ " f8821080 linear begin:0 len:2040192 dev:253:0 offset:63", ++ NULL ++}; ++ ++/* ++ * Registering command extension ++ */ ++ ++static struct command_table_entry command_table[] = { ++ {"dminfo", cmd_dminfo, help_dminfo, 0}, ++ {NULL, NULL, NULL, 0}, ++}; ++ ++int _init(void) ++{ ++ register_extension(command_table); ++ ++ dminfo_register_target_analyzer(&zero_analyzer); ++ dminfo_register_target_analyzer(&error_analyzer); ++ dminfo_register_target_analyzer(&linear_analyzer); ++ dminfo_register_target_analyzer(&mirror_analyzer); ++ dminfo_register_target_analyzer(&multipath_analyzer); ++ dminfo_register_target_analyzer(&crypt_analyzer); ++ dminfo_register_target_analyzer(&stripe_analyzer); ++ dminfo_register_target_analyzer(&snapshot_analyzer); ++ dminfo_register_target_analyzer(&snapshot_origin_analyzer); ++ ++ return 0; ++} ++ ++int _fini(void) ++{ ++ return 0; ++} +--- crash/extensions/sial.mk.orig 2008-01-17 15:17:21.000000000 -0500 ++++ crash/extensions/sial.mk 2008-01-04 09:42:08.000000000 -0500 +@@ -0,0 +1,17 @@ ++# ++ifeq ($(TARGET), PPC64) ++ TARGET_FLAGS = -D$(TARGET) -m64 ++else ++ TARGET_FLAGS = -D$(TARGET) ++endif ++ ++all: sial.so ++ ++lib-sial: ++ cd libsial && make ++ ++sial.so: ../defs.h sial.c lib-sial ++ gcc -g -I.. -Ilibsial -I../gdb-6.1/bfd -I../gdb-6.1/include -I../gdb-6.1/gdb -I../gdb-6.1/gdb/config -nostartfiles -shared -rdynamic -o sial.so sial.c -fPIC $(TARGET_FLAGS) -Llibsial -lsial ++ ++clean: ++ cd libsial && make clean +--- crash/tools.c.orig 2008-01-17 15:17:20.000000000 -0500 ++++ crash/tools.c 2008-01-04 09:42:08.000000000 -0500 +@@ -1,8 +1,8 @@ + /* tools.c - core analysis suite + * + * Copyright (C) 1999, 2000, 2001, 2002 Mission Critical Linux, Inc. +- * Copyright (C) 2002, 2003, 2004, 2005 David Anderson +- * Copyright (C) 2002, 2003, 2004, 2005 Red Hat, Inc. All rights reserved. ++ * Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2008 David Anderson ++ * Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2008 Red Hat, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by +@@ -18,12 +18,12 @@ + #include "defs.h" + #include + +-static int calculate(char *, ulong *, ulonglong *, ulong); + static void print_number(struct number_option *, int, int); + static long alloc_hq_entry(void); + struct hq_entry; + static void dealloc_hq_entry(struct hq_entry *); + static void show_options(void); ++static void dump_struct_members(struct list_data *, int, ulong); + + /* + * General purpose error reporting routine. Type INFO prints the message +@@ -63,6 +63,8 @@ + + if ((new_line = (buf[0] == '\n'))) + shift_string_left(buf, 1); ++ else if (pc->flags & PLEASE_WAIT) ++ new_line = TRUE; + + if (pc->stdpipe) { + fprintf(pc->stdpipe, "%s%s: %s%s", +@@ -1770,6 +1772,42 @@ + pc->flags & HASH ? "on" : "off"); + return; + ++ } else if (STREQ(args[optind], "unwind")) { ++ if (args[optind+1]) { ++ optind++; ++ if (STREQ(args[optind], "on")) { ++ if ((kt->flags & DWARF_UNWIND_CAPABLE) || ++ !runtime) { ++ kt->flags |= DWARF_UNWIND; ++ kt->flags &= ~NO_DWARF_UNWIND; ++ } ++ } else if (STREQ(args[optind], "off")) { ++ kt->flags &= ~DWARF_UNWIND; ++ if (!runtime) ++ kt->flags |= NO_DWARF_UNWIND; ++ } else if (IS_A_NUMBER(args[optind])) { ++ value = stol(args[optind], ++ FAULT_ON_ERROR, NULL); ++ if (value) { ++ if ((kt->flags & DWARF_UNWIND_CAPABLE) || ++ !runtime) { ++ kt->flags |= DWARF_UNWIND; ++ kt->flags &= ~NO_DWARF_UNWIND; ++ } ++ } else { ++ kt->flags &= ~DWARF_UNWIND; ++ if (!runtime) ++ kt->flags |= NO_DWARF_UNWIND; ++ } ++ } else ++ goto invalid_set_command; ++ } ++ ++ if (runtime) ++ fprintf(fp, "unwind: %s\n", ++ kt->flags & DWARF_UNWIND ? "on" : "off"); ++ return; ++ + } else if (STREQ(args[optind], "refresh")) { + if (args[optind+1]) { + optind++; +@@ -1806,7 +1844,14 @@ + pc->flags |= SCROLL; + else if (STREQ(args[optind], "off")) + pc->flags &= ~SCROLL; +- else if (IS_A_NUMBER(args[optind])) { ++ else if (STREQ(args[optind], "more")) ++ pc->scroll_command = SCROLL_MORE; ++ else if (STREQ(args[optind], "less")) ++ pc->scroll_command = SCROLL_LESS; ++ else if (STREQ(args[optind], "CRASHPAGER")) { ++ if (CRASHPAGER_valid()) ++ pc->scroll_command = SCROLL_CRASHPAGER; ++ } else if (IS_A_NUMBER(args[optind])) { + value = stol(args[optind], + FAULT_ON_ERROR, NULL); + if (value) +@@ -1817,9 +1862,25 @@ + goto invalid_set_command; + } + +- if (runtime) +- fprintf(fp, "scroll: %s\n", +- pc->flags & SCROLL ? "on" : "off"); ++ if (runtime) { ++ fprintf(fp, "scroll: %s ", ++ pc->flags & SCROLL ? "on" : "off"); ++ switch (pc->scroll_command) ++ { ++ case SCROLL_LESS: ++ fprintf(fp, "(/usr/bin/less)\n"); ++ break; ++ case SCROLL_MORE: ++ fprintf(fp, "(/bin/more)\n"); ++ break; ++ case SCROLL_NONE: ++ fprintf(fp, "(none)\n"); ++ break; ++ case SCROLL_CRASHPAGER: ++ fprintf(fp, "(CRASHPAGER: %s)\n", getenv("CRASHPAGER")); ++ break; ++ } ++ } + + return; + +@@ -2004,6 +2065,10 @@ + pc->flags &= ~(DUMPFILE_TYPES); + if (is_netdump(args[optind], NETDUMP_LOCAL)) + pc->flags |= NETDUMP; ++ else if (is_kdump(args[optind], KDUMP_LOCAL)) ++ pc->flags |= KDUMP; ++ else if (is_xendump(args[optind])) ++ pc->flags |= XENDUMP; + else if (is_diskdump(args[optind])) + pc->flags |= DISKDUMP; + else if (is_lkcd_compressed_dump(args[optind])) +@@ -2054,6 +2119,31 @@ + pc->flags |= DATADEBUG; + return; + ++ } else if (STREQ(args[optind], "zero_excluded")) { ++ ++ if (args[optind+1]) { ++ optind++; ++ if (STREQ(args[optind], "on")) ++ *diskdump_flags |= ZERO_EXCLUDED; ++ else if (STREQ(args[optind], "off")) ++ *diskdump_flags &= ~ZERO_EXCLUDED; ++ else if (IS_A_NUMBER(args[optind])) { ++ value = stol(args[optind], ++ FAULT_ON_ERROR, NULL); ++ if (value) ++ *diskdump_flags |= ZERO_EXCLUDED; ++ else ++ *diskdump_flags &= ~ZERO_EXCLUDED; ++ } else ++ goto invalid_set_command; ++ } ++ ++ if (runtime) ++ fprintf(fp, "zero_excluded: %s\n", ++ *diskdump_flags & ZERO_EXCLUDED ? ++ "on" : "off"); ++ return; ++ + } else if (runtime) { + ulong pid, task; + +@@ -2106,7 +2196,23 @@ + static void + show_options(void) + { +- fprintf(fp, " scroll: %s\n", pc->flags & SCROLL ? "on" : "off"); ++ fprintf(fp, " scroll: %s ", ++ pc->flags & SCROLL ? "on" : "off"); ++ switch (pc->scroll_command) ++ { ++ case SCROLL_LESS: ++ fprintf(fp, "(/usr/bin/less)\n"); ++ break; ++ case SCROLL_MORE: ++ fprintf(fp, "(/bin/more)\n"); ++ break; ++ case SCROLL_NONE: ++ fprintf(fp, "(none)\n"); ++ break; ++ case SCROLL_CRASHPAGER: ++ fprintf(fp, "(CRASHPAGER: %s)\n", getenv("CRASHPAGER")); ++ break; ++ } + fprintf(fp, " radix: %d (%s)\n", pc->output_radix, + pc->output_radix == 10 ? "decimal" : + pc->output_radix == 16 ? "hexadecimal" : "unknown"); +@@ -2121,6 +2227,8 @@ + fprintf(fp, " edit: %s\n", pc->editing_mode); + fprintf(fp, " namelist: %s\n", pc->namelist); + fprintf(fp, " dumpfile: %s\n", pc->dumpfile); ++ fprintf(fp, " unwind: %s\n", kt->flags & DWARF_UNWIND ? "on" : "off"); ++ fprintf(fp, " zero_excluded: %s\n", *diskdump_flags & ZERO_EXCLUDED ? "on" : "off"); + } + + +@@ -2336,6 +2444,7 @@ + char *element2; + struct syment *sp; + ++ opcode = 0; + value1 = value2 = 0; + ll_value1 = ll_value2 = 0; + +@@ -2550,7 +2659,7 @@ + * its real value. The allowable multipliers are k, K, m, M, g and G, for + * kilobytes, megabytes and gigabytes. + */ +-static int ++int + calculate(char *s, ulong *value, ulonglong *llvalue, ulong flags) + { + ulong factor, bias; +@@ -2832,7 +2941,9 @@ + break; + + case 's': +- ld->structname = optarg; ++ if (ld->structname_args++ == 0) ++ hq_open(); ++ hq_enter((ulong)optarg); + break; + + case 'o': +@@ -2871,6 +2982,12 @@ + cmd_usage(pc->curcmd, SYNOPSIS); + } + ++ if (ld->structname_args) { ++ ld->structname = (char **)GETBUF(sizeof(char *) * ld->structname_args); ++ retrieve_list((ulong *)ld->structname, ld->structname_args); ++ hq_close(); ++ } ++ + while (args[optind]) { + if (strstr(args[optind], ".") && + arg_to_datatype(args[optind], sm, RETURN_ON_ERROR) > 1) { +@@ -2896,11 +3013,25 @@ + } + + /* +- * If it's not a symbol nor a number, bail out. ++ * If it's not a symbol nor a number, bail out if it ++ * cannot be evaluated as a start address. + */ +- if (!IS_A_NUMBER(args[optind])) ++ if (!IS_A_NUMBER(args[optind])) { ++ if (can_eval(args[optind])) { ++ value = eval(args[optind], FAULT_ON_ERROR, NULL); ++ if (IS_KVADDR(value)) { ++ if (ld->flags & LIST_START_ENTERED) ++ error(FATAL, ++ "list start already entered\n"); ++ ld->start = value; ++ ld->flags |= LIST_START_ENTERED; ++ goto next_arg; ++ } ++ } ++ + error(FATAL, "invalid argument: %s\n", + args[optind]); ++ } + + /* + * If the start is known, it's got to be an offset. +@@ -2941,7 +3072,8 @@ + ld->member_offset = value; + ld->flags |= LIST_OFFSET_ENTERED; + goto next_arg; +- } else if (!IS_A_NUMBER(args[optind+1]) && ++ } else if ((!IS_A_NUMBER(args[optind+1]) && ++ !can_eval(args[optind+1])) && + !strstr(args[optind+1], ".")) + error(FATAL, "symbol not found: %s\n", + args[optind+1]); +@@ -3002,8 +3134,12 @@ + hq_open(); + c = do_list(ld); + hq_close(); ++ ++ if (ld->structname_args) ++ FREEBUF(ld->structname); + } + ++ + /* + * Does the work for cmd_list() and any other function that requires the + * contents of a linked list. See cmd_list description above for details. +@@ -3013,7 +3149,7 @@ + { + ulong next, last, first; + ulong searchfor, readflag; +- int count, others; ++ int i, count, others; + + if (CRASHDEBUG(1)) { + others = 0; +@@ -3038,7 +3174,11 @@ + console("list_head_offset: %ld\n", ld->list_head_offset); + console(" end: %lx\n", ld->end); + console(" searchfor: %lx\n", ld->searchfor); +- console(" structname: %s\n", ld->structname); ++ console(" structname_args: %lx\n", ld->structname_args); ++ if (!ld->structname_args) ++ console(" structname: (unused)\n"); ++ for (i = 0; i < ld->structname_args; i++) ++ console(" structname[%d]: %s\n", i, ld->structname[i]); + console(" header: %s\n", ld->header); + } + +@@ -3065,20 +3205,21 @@ + fprintf(fp, "%lx\n", next - ld->list_head_offset); + + if (ld->structname) { +- switch (count_chars(ld->structname, '.')) +- { +- case 0: +- dump_struct(ld->structname, +- next - ld->list_head_offset, 0); +- break; +- case 1: +- dump_struct_member(ld->structname, +- next - ld->list_head_offset, 0); +- break; +- default: +- error(FATAL, +- "invalid structure reference: %s\n", +- ld->structname); ++ for (i = 0; i < ld->structname_args; i++) { ++ switch (count_chars(ld->structname[i], '.')) ++ { ++ case 0: ++ dump_struct(ld->structname[i], ++ next - ld->list_head_offset, 0); ++ break; ++ case 1: ++ dump_struct_members(ld, i, next); ++ break; ++ default: ++ error(FATAL, ++ "invalid structure reference: %s\n", ++ ld->structname[i]); ++ } + } + } + } +@@ -3148,6 +3289,42 @@ + } + + /* ++ * Issue a dump_struct_member() call for one or more structure ++ * members. Multiple members are passed in a comma-separated ++ * list using the the format: ++ * ++ * struct.member1,member2,member3 ++ */ ++void ++dump_struct_members(struct list_data *ld, int idx, ulong next) ++{ ++ int i, argc; ++ char *p1, *p2; ++ char *structname, *members; ++ char *arglist[MAXARGS]; ++ ++ structname = GETBUF(strlen(ld->structname[idx])+1); ++ members = GETBUF(strlen(ld->structname[idx])+1); ++ ++ strcpy(structname, ld->structname[idx]); ++ p1 = strstr(structname, ".") + 1; ++ ++ p2 = strstr(ld->structname[idx], ".") + 1; ++ strcpy(members, p2); ++ replace_string(members, ",", ' '); ++ argc = parse_line(members, arglist); ++ ++ for (i = 0; i < argc; i++) { ++ *p1 = NULLCHAR; ++ strcat(structname, arglist[i]); ++ dump_struct_member(structname, next - ld->list_head_offset, 0); ++ } ++ ++ FREEBUF(structname); ++ FREEBUF(members); ++} ++ ++/* + * The next set of functions are a general purpose hashing tool used to + * identify duplicate entries in a set of passed-in data, and if found, + * to fail the entry attempt. When a command wishes to verify a list +@@ -3552,6 +3729,52 @@ + return(-1); + } + ++/* ++ * For a given value, check to see if a hash queue entry exists. If an ++ * entry is found, return TRUE; for all other possibilities return FALSE. ++ */ ++int ++hq_entry_exists(ulong value) ++{ ++ struct hash_table *ht; ++ struct hq_entry *list_entry; ++ long hqi; ++ ++ if (!(pc->flags & HASH)) ++ return FALSE; ++ ++ ht = &hash_table; ++ ++ if (ht->flags & (HASH_QUEUE_NONE)) ++ return FALSE; ++ ++ if (!(ht->flags & HASH_QUEUE_OPEN)) ++ return FALSE; ++ ++ hqi = HQ_INDEX(value); ++ list_entry = ht->memptr + ht->queue_heads[hqi].next; ++ ++ while (TRUE) { ++ if (list_entry->value == value) ++ return TRUE; ++ ++ if (list_entry->next >= ht->count) { ++ error(INFO, corrupt_hq, ++ list_entry->value, ++ list_entry->next, ++ list_entry->order); ++ ht->flags |= HASH_QUEUE_NONE; ++ return FALSE; ++ } ++ ++ if (list_entry->next == 0) ++ break; ++ ++ list_entry = ht->memptr + list_entry->next; ++ } ++ ++ return FALSE; ++} + + /* + * K&R power function for integers +@@ -4210,6 +4433,9 @@ + { + ulonglong total, days, hours, minutes, seconds; + ++ if (CRASHDEBUG(2)) ++ error(INFO, "convert_time: %lld (%llx)\n", count, count); ++ + total = (count)/(ulonglong)machdep->hz; + + days = total / SEC_DAYS; +@@ -4297,15 +4523,140 @@ + return STREQ(MACHINE_TYPE, type); + } + ++int ++machine_type_mismatch(char *file, char *e_machine, char *alt, ulong query) ++{ ++ if (machine_type(e_machine) || machine_type(alt)) ++ return FALSE; ++ ++ if (query == KDUMP_LOCAL) /* already printed by NETDUMP_LOCAL */ ++ return TRUE; ++ ++ error(WARNING, "machine type mismatch:\n"); ++ ++ fprintf(fp, " crash utility: %s\n", MACHINE_TYPE); ++ fprintf(fp, " %s: %s%s%s\n\n", file, e_machine, ++ alt ? " or " : "", alt ? alt : ""); ++ ++ return TRUE; ++} + void + command_not_supported() + { +- error(FATAL, "command not supported on this architecture\n"); ++ error(FATAL, "command not supported on this architecture or kernel\n"); + } + + void + option_not_supported(int c) + { +- error(FATAL, "-%c option not supported on this architecture\n", ++ error(FATAL, "-%c option not supported on this architecture or kernel\n", + (char)c); + } ++ ++void ++please_wait(char *s) ++{ ++ if ((pc->flags & SILENT) || !(pc->flags & TTY) || ++ !DUMPFILE() || (pc->flags & RUNTIME)) ++ return; ++ ++ pc->flags |= PLEASE_WAIT; ++ ++ fprintf(fp, "\rplease wait... (%s)", s); ++ fflush(fp); ++} ++ ++void ++please_wait_done(void) ++{ ++ if ((pc->flags & SILENT) || !(pc->flags & TTY) || ++ !DUMPFILE() || (pc->flags & RUNTIME)) ++ return; ++ ++ pc->flags &= ~PLEASE_WAIT; ++ ++ fprintf(fp, "\r \r"); ++ fflush(fp); ++} ++ ++/* ++ * Compare two pathnames. ++ */ ++int ++pathcmp(char *p1, char *p2) ++{ ++ char c1, c2; ++ ++ do { ++ if ((c1 = *p1++) == '/') ++ while (*p1 == '/') { p1++; } ++ if ((c2 = *p2++) == '/') ++ while (*p2 == '/') { p2++; } ++ if (c1 == '\0') ++ return ((c2 == '/') && (*p2 == '\0')) ? 0 : c1 - c2; ++ } while (c1 == c2); ++ ++ return ((c2 == '\0') && (c1 == '/') && (*p1 == '\0')) ? 0 : c1 - c2; ++} ++ ++#include ++ ++/* ++ * Check the byte-order of an ELF file vs. the host byte order. ++ */ ++int ++endian_mismatch(char *file, char dumpfile_endian, ulong query) ++{ ++ char *endian; ++ ++ switch (dumpfile_endian) ++ { ++ case ELFDATA2LSB: ++ if (__BYTE_ORDER == __LITTLE_ENDIAN) ++ return FALSE; ++ endian = "big-endian"; ++ break; ++ case ELFDATA2MSB: ++ if (__BYTE_ORDER == __BIG_ENDIAN) ++ return FALSE; ++ endian = "little-endian"; ++ break; ++ default: ++ endian = "unknown"; ++ break; ++ } ++ ++ if (query == KDUMP_LOCAL) /* already printed by NETDUMP_LOCAL */ ++ return TRUE; ++ ++ error(WARNING, "endian mismatch:\n"); ++ ++ fprintf(fp, " crash utility: %s\n", ++ (__BYTE_ORDER == __LITTLE_ENDIAN) ? ++ "little-endian" : "big-endian"); ++ fprintf(fp, " %s: %s\n\n", file, endian); ++ ++ return TRUE; ++} ++ ++uint16_t ++swap16(uint16_t val, int swap) ++{ ++ if (swap) ++ return (((val & 0x00ff) << 8) | ++ ((val & 0xff00) >> 8)); ++ else ++ return val; ++} ++ ++uint32_t ++swap32(uint32_t val, int swap) ++{ ++ if (swap) ++ return (((val & 0x000000ffU) << 24) | ++ ((val & 0x0000ff00U) << 8) | ++ ((val & 0x00ff0000U) >> 8) | ++ ((val & 0xff000000U) >> 24)); ++ else ++ return val; ++} +--- crash/xen_hyper_command.c.orig 2008-01-17 15:17:20.000000000 -0500 ++++ crash/xen_hyper_command.c 2008-01-04 09:42:08.000000000 -0500 @@ -0,0 +1,1856 @@ +/* + * xen_hyper_command.c @@ -44081,8 +63743,14707 @@ +} + +#endif ---- crash/xen_hyper_global_data.c.orig 2007-08-27 15:02:36.000000000 -0400 -+++ crash/xen_hyper_global_data.c 2007-08-23 17:02:54.000000000 -0400 +--- crash/main.c.orig 2008-01-17 15:17:20.000000000 -0500 ++++ crash/main.c 2008-01-04 09:42:08.000000000 -0500 +@@ -1,8 +1,8 @@ + /* main.c - core analysis suite + * + * Copyright (C) 1999, 2000, 2001, 2002 Mission Critical Linux, Inc. +- * Copyright (C) 2002, 2003, 2004, 2005 David Anderson +- * Copyright (C) 2002, 2003, 2004, 2005 Red Hat, Inc. All rights reserved. ++ * Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2008 David Anderson ++ * Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2008 Red Hat, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by +@@ -16,6 +16,7 @@ + */ + + #include "defs.h" ++#include "xen_hyper_defs.h" + #include + #include + +@@ -23,23 +24,38 @@ + static int is_external_command(void); + static int is_builtin_command(void); + static int is_input_file(void); ++static void check_xen_hyper(void); + + static struct option long_options[] = { +- {"memory_module", 1, 0, 0}, +- {"memory_device", 1, 0, 0}, ++ {"memory_module", required_argument, 0, 0}, ++ {"memory_device", required_argument, 0, 0}, + {"no_kallsyms", 0, 0, 0}, + {"no_modules", 0, 0, 0}, + {"no_namelist_gzip", 0, 0, 0}, +- {"help", 0, 0, 0}, ++ {"help", optional_argument, 0, 'h'}, + {"data_debug", 0, 0, 0}, + {"no_data_debug", 0, 0, 0}, + {"no_crashrc", 0, 0, 0}, + {"no_kmem_cache", 0, 0, 0}, ++ {"kmem_cache_delay", 0, 0, 0}, + {"readnow", 0, 0, 0}, + {"smp", 0, 0, 0}, +- {"machdep", 1, 0, 0}, ++ {"machdep", required_argument, 0, 0}, + {"version", 0, 0, 0}, + {"buildinfo", 0, 0, 0}, ++ {"shadow_page_tables", 0, 0, 0}, ++ {"cpus", required_argument, 0, 0}, ++ {"no_ikconfig", 0, 0, 0}, ++ {"hyper", 0, 0, 0}, ++ {"p2m_mfn", required_argument, 0, 0}, ++ {"zero_excluded", 0, 0, 0}, ++ {"no_panic", 0, 0, 0}, ++ {"more", 0, 0, 0}, ++ {"less", 0, 0, 0}, ++ {"CRASHPAGER", 0, 0, 0}, ++ {"no_scroll", 0, 0, 0}, ++ {"reloc", required_argument, 0, 0}, ++ {"active", 0, 0, 0}, + {0, 0, 0, 0} + }; + +@@ -55,7 +71,7 @@ + */ + opterr = 0; + optind = 0; +- while((c = getopt_long(argc, argv, "LgH:h:e:i:sSvc:d:tf", ++ while((c = getopt_long(argc, argv, "Lkgh::e:i:sSvc:d:tfp:m:", + long_options, &option_index)) != -1) { + switch (c) + { +@@ -64,52 +80,55 @@ + "memory_module")) + pc->memory_module = optarg; + +- if (STREQ(long_options[option_index].name, ++ else if (STREQ(long_options[option_index].name, + "memory_device")) + pc->memory_device = optarg; + +- if (STREQ(long_options[option_index].name, ++ else if (STREQ(long_options[option_index].name, + "no_kallsyms")) + kt->flags |= NO_KALLSYMS; + +- if (STREQ(long_options[option_index].name, ++ else if (STREQ(long_options[option_index].name, + "no_modules")) + kt->flags |= NO_MODULE_ACCESS; + +- if (STREQ(long_options[option_index].name, ++ else if (STREQ(long_options[option_index].name, ++ "no_ikconfig")) ++ kt->flags |= NO_IKCONFIG; ++ ++ else if (STREQ(long_options[option_index].name, + "no_namelist_gzip")) + pc->flags |= NAMELIST_NO_GZIP; + +- if (STREQ(long_options[option_index].name, "help")) { +- program_usage(LONG_FORM); +- clean_exit(0); +- } +- +- if (STREQ(long_options[option_index].name, ++ else if (STREQ(long_options[option_index].name, + "data_debug")) + pc->flags |= DATADEBUG; + +- if (STREQ(long_options[option_index].name, ++ else if (STREQ(long_options[option_index].name, + "no_data_debug")) + pc->flags &= ~DATADEBUG; + +- if (STREQ(long_options[option_index].name, ++ else if (STREQ(long_options[option_index].name, + "no_kmem_cache")) + vt->flags |= KMEM_CACHE_UNAVAIL; + +- if (STREQ(long_options[option_index].name, ++ else if (STREQ(long_options[option_index].name, ++ "kmem_cache_delay")) ++ vt->flags |= KMEM_CACHE_DELAY; ++ ++ else if (STREQ(long_options[option_index].name, + "readnow")) + pc->flags |= READNOW; + +- if (STREQ(long_options[option_index].name, ++ else if (STREQ(long_options[option_index].name, + "smp")) + kt->flags |= SMP; + +- if (STREQ(long_options[option_index].name, ++ else if (STREQ(long_options[option_index].name, + "machdep")) + machdep->cmdline_arg = optarg; + +- if (STREQ(long_options[option_index].name, ++ else if (STREQ(long_options[option_index].name, + "version")) { + pc->flags |= VERSION_QUERY; + display_version(); +@@ -117,12 +136,72 @@ + clean_exit(0); + } + +- if (STREQ(long_options[option_index].name, ++ else if (STREQ(long_options[option_index].name, + "buildinfo")) { + dump_build_data(); + clean_exit(0); + } + ++ else if (STREQ(long_options[option_index].name, ++ "shadow_page_tables")) ++ kt->xen_flags |= SHADOW_PAGE_TABLES; ++ ++ else if (STREQ(long_options[option_index].name, "cpus")) ++ kt->cpus_override = optarg; ++ ++ else if (STREQ(long_options[option_index].name, "hyper")) ++ pc->flags |= XEN_HYPER; ++ ++ else if (STREQ(long_options[option_index].name, "p2m_mfn")) ++ xen_kdump_p2m_mfn(optarg); ++ ++ else if (STREQ(long_options[option_index].name, "zero_excluded")) ++ *diskdump_flags |= ZERO_EXCLUDED; ++ ++ else if (STREQ(long_options[option_index].name, "no_panic")) ++ tt->flags |= PANIC_TASK_NOT_FOUND; ++ ++ else if (STREQ(long_options[option_index].name, "more")) { ++ if ((pc->scroll_command != SCROLL_NONE) && ++ file_exists("/bin/more", NULL)) ++ pc->scroll_command = SCROLL_MORE; ++ } ++ ++ else if (STREQ(long_options[option_index].name, "less")) { ++ if ((pc->scroll_command != SCROLL_NONE) && ++ file_exists("/usr/bin/less", NULL)) ++ pc->scroll_command = SCROLL_LESS; ++ } ++ ++ else if (STREQ(long_options[option_index].name, "CRASHPAGER")) { ++ if ((pc->scroll_command != SCROLL_NONE) && ++ CRASHPAGER_valid()) ++ pc->scroll_command = SCROLL_CRASHPAGER; ++ } ++ ++ else if (STREQ(long_options[option_index].name, "no_scroll")) ++ pc->flags &= ~SCROLL; ++ ++ else if (STREQ(long_options[option_index].name, "no_crashrc")) ++ pc->flags |= NOCRASHRC; ++ ++ else if (STREQ(long_options[option_index].name, "active")) ++ tt->flags |= ACTIVE_ONLY; ++ ++ else if (STREQ(long_options[option_index].name, "reloc")) { ++ if (!calculate(optarg, &kt->relocate, NULL, 0)) { ++ error(INFO, "invalid --reloc argument: %s\n", ++ optarg); ++ program_usage(SHORT_FORM); ++ } ++ kt->flags |= RELOC_SET; ++ } ++ ++ else { ++ error(INFO, "internal error: option %s unhandled\n", ++ long_options[option_index].name); ++ program_usage(SHORT_FORM); ++ } + break; + + case 'f': +@@ -133,14 +212,25 @@ + pc->flags |= KERNEL_DEBUG_QUERY; + break; + +- case 'H': +- cmd_usage(optarg, COMPLETE_HELP); +- clean_exit(0); +- + case 'h': +- cmd_usage(optarg, COMPLETE_HELP|PIPE_TO_LESS); ++ /* note: long_getopt's handling of optional arguments is weak. ++ * To it, an optional argument must be part of the same argument ++ * as the flag itself (eg. --help=commands or -hcommands). ++ * We want to accept "--help commands" or "-h commands". ++ * So we must do that part ourselves. ++ */ ++ if (optarg != NULL) ++ cmd_usage(optarg, COMPLETE_HELP|PIPE_TO_SCROLL|MUST_HELP); ++ else if (argv[optind] != NULL && argv[optind][0] != '-') ++ cmd_usage(argv[optind++], COMPLETE_HELP|PIPE_TO_SCROLL|MUST_HELP); ++ else ++ program_usage(LONG_FORM); + clean_exit(0); + ++ case 'k': ++ pc->flags |= KERNTYPES; ++ break; ++ + case 'e': + if (STREQ(optarg, "vi")) + pc->editing_mode = "vi"; +@@ -168,7 +258,7 @@ + case 's': + pc->flags |= SILENT; + pc->flags &= ~SCROLL; +- pc->scroll_command = SCROLL_NONE; ++// pc->scroll_command = SCROLL_NONE; (why?) + break; + + case 'L': +@@ -193,14 +283,18 @@ + set_vas_debug(pc->debug); + break; + ++ case 'p': ++ force_page_size(optarg); ++ break; ++ ++ case 'm': ++ machdep->cmdline_arg = optarg; ++ break; ++ + default: +- if (STREQ(argv[optind-1], "-h")) +- program_usage(LONG_FORM); +- else { +- error(INFO, "invalid option: %s\n", +- argv[optind-1]); +- program_usage(SHORT_FORM); +- } ++ error(INFO, "invalid option: %s\n", ++ argv[optind-1]); ++ program_usage(SHORT_FORM); + } + } + opterr = 1; +@@ -229,7 +323,7 @@ + } else if (!is_readable(argv[optind])) + program_usage(SHORT_FORM); + +- if (is_elf_file(argv[optind])) { ++ if (is_kernel(argv[optind])) { + if (pc->namelist || pc->server_namelist) { + if (!select_namelist(argv[optind])) { + error(INFO, +@@ -261,8 +355,36 @@ + } + pc->flags |= NETDUMP; + pc->dumpfile = argv[optind]; +- pc->readmem = read_netdump; +- pc->writemem = write_netdump; ++ ++ if (is_sadump_xen()) { ++ pc->readmem = read_kdump; ++ pc->writemem = write_kdump; ++ } else { ++ pc->readmem = read_netdump; ++ pc->writemem = write_netdump; ++ } ++ ++ } else if (is_kdump(argv[optind], KDUMP_LOCAL)) { ++ if (pc->flags & MEMORY_SOURCES) { ++ error(INFO, ++ "too many dumpfile arguments\n"); ++ program_usage(SHORT_FORM); ++ } ++ pc->flags |= KDUMP; ++ pc->dumpfile = argv[optind]; ++ pc->readmem = read_kdump; ++ pc->writemem = write_kdump; ++ ++ } else if (is_xendump(argv[optind])) { ++ if (pc->flags & MEMORY_SOURCES) { ++ error(INFO, ++ "too many dumpfile arguments\n"); ++ program_usage(SHORT_FORM); ++ } ++ pc->flags |= XENDUMP; ++ pc->dumpfile = argv[optind]; ++ pc->readmem = read_xendump; ++ pc->writemem = write_xendump; + + } else if (is_diskdump(argv[optind])) { + if (pc->flags & MEMORY_SOURCES) { +@@ -322,6 +444,8 @@ + optind++; + } + ++ check_xen_hyper(); ++ + if (setjmp(pc->main_loop_env)) + clean_exit(1); + +@@ -332,11 +456,10 @@ + buf_init(); + cmdline_init(); + mem_init(); ++ hq_init(); + machdep_init(PRE_SYMTAB); + symtab_init(); + machdep_init(PRE_GDB); +- kernel_init(PRE_GDB); +- verify_version(); + datatype_init(); + + /* +@@ -361,17 +484,28 @@ + { + if (!(pc->flags & GDB_INIT)) { + gdb_session_init(); +- kernel_init(POST_GDB); +- machdep_init(POST_GDB); +- vm_init(); +- hq_init(); +- module_init(); +- help_init(); +- task_init(); +- vfs_init(); +- net_init(); +- dev_init(); +- machdep_init(POST_INIT); ++ if (XEN_HYPER_MODE()) { ++#ifdef XEN_HYPERVISOR_ARCH ++ machdep_init(POST_GDB); ++ xen_hyper_init(); ++ machdep_init(POST_INIT); ++#else ++ error(FATAL, XEN_HYPERVISOR_NOT_SUPPORTED); ++#endif ++ } else { ++ read_in_kernel_config(IKCFG_INIT); ++ kernel_init(); ++ machdep_init(POST_GDB); ++ vm_init(); ++ machdep_init(POST_VM); ++ module_init(); ++ help_init(); ++ task_init(); ++ vfs_init(); ++ net_init(); ++ dev_init(); ++ machdep_init(POST_INIT); ++ } + } else + SIGACTION(SIGINT, restart, &pc->sigaction, NULL); + +@@ -379,8 +513,17 @@ + * Display system statistics and current context. + */ + if (!(pc->flags & SILENT) && !(pc->flags & RUNTIME)) { +- display_sys_stats(); +- show_context(CURRENT_CONTEXT()); ++ if (XEN_HYPER_MODE()) { ++#ifdef XEN_HYPERVISOR_ARCH ++ xen_hyper_display_sys_stats(); ++ xen_hyper_show_vcpu_context(XEN_HYPER_VCPU_LAST_CONTEXT()); ++#else ++ error(FATAL, XEN_HYPERVISOR_NOT_SUPPORTED); ++#endif ++ } else { ++ display_sys_stats(); ++ show_context(CURRENT_CONTEXT()); ++ } + fprintf(fp, "\n"); + } + +@@ -426,8 +569,17 @@ + + if ((ct = get_command_table_entry(args[0]))) { + if (ct->flags & REFRESH_TASK_TABLE) { +- tt->refresh_task_table(); +- sort_context_array(); ++ if (XEN_HYPER_MODE()) { ++#ifdef XEN_HYPERVISOR_ARCH ++ xen_hyper_refresh_domain_context_space(); ++ xen_hyper_refresh_vcpu_context_space(); ++#else ++ error(FATAL, XEN_HYPERVISOR_NOT_SUPPORTED); ++#endif ++ } else { ++ tt->refresh_task_table(); ++ sort_context_array(); ++ } + } + if (!STREQ(pc->curcmd, pc->program_name)) + pc->lastcmd = pc->curcmd; +@@ -459,6 +611,9 @@ + + pc->curcmd = pc->program_name; + error(INFO, "command not found: %s\n", args[0]); ++ ++ if (pc->curcmd_flags & REPEAT) ++ pc->curcmd_flags &= ~REPEAT; + } + + +@@ -471,7 +626,7 @@ + struct command_table_entry *cp; + struct extension_table *ext; + +- for (cp = &base_command_table[0]; cp->name; cp++) { ++ for (cp = pc->cmd_table; cp->name; cp++) { + if (STREQ(cp->name, name)) + return cp; + } +@@ -591,6 +746,8 @@ + int i; + char *p1; + char buf[BUFSIZE]; ++ char homerc[BUFSIZE]; ++ char localrc[BUFSIZE]; + FILE *afp; + char *program; + +@@ -625,7 +782,8 @@ + machdep->verify_paddr = generic_verify_paddr; + pc->redhat_debug_loc = DEFAULT_REDHAT_DEBUG_LOCATION; + pc->cmdgencur = 0; +- pc->cmdgenspec = ~pc->cmdgencur; ++ pc->cmd_table = linux_command_table; ++ kt->BUG_bytes = -1; + + /* + * Get gdb version before initializing it since this might be one +@@ -637,7 +795,10 @@ + * Set up the default scrolling behavior for terminal output. + */ + if (isatty(fileno(stdout))) { +- if (file_exists("/usr/bin/less", NULL)) { ++ if (CRASHPAGER_valid()) { ++ pc->flags |= SCROLL; ++ pc->scroll_command = SCROLL_CRASHPAGER; ++ } else if (file_exists("/usr/bin/less", NULL)) { + pc->flags |= SCROLL; + pc->scroll_command = SCROLL_LESS; + } else if (file_exists("/bin/more", NULL)) { +@@ -685,11 +846,11 @@ + pc->home = "(unknown)"; + } else + strcpy(pc->home, p1); +- sprintf(buf, "%s/.%src", pc->home, pc->program_name); +- if (!(pc->flags & NOCRASHRC) && file_exists(buf, NULL)) { +- if ((afp = fopen(buf, "r")) == NULL) ++ sprintf(homerc, "%s/.%src", pc->home, pc->program_name); ++ if (!(pc->flags & NOCRASHRC) && file_exists(homerc, NULL)) { ++ if ((afp = fopen(homerc, "r")) == NULL) + error(INFO, "cannot open %s: %s\n", +- buf, strerror(errno)); ++ homerc, strerror(errno)); + else { + while (fgets(buf, BUFSIZE, afp)) + resolve_rc_cmd(buf, ALIAS_RCHOME); +@@ -698,11 +859,12 @@ + } + } + +- sprintf(buf, ".%src", pc->program_name); +- if (!(pc->flags & NOCRASHRC) && file_exists(buf, NULL)) { +- if ((afp = fopen(buf, "r")) == NULL) ++ sprintf(localrc, ".%src", pc->program_name); ++ if (!same_file(homerc, localrc) && ++ !(pc->flags & NOCRASHRC) && file_exists(localrc, NULL)) { ++ if ((afp = fopen(localrc, "r")) == NULL) + error(INFO, "cannot open %s: %s\n", +- buf, strerror(errno)); ++ localrc, strerror(errno)); + else { + while (fgets(buf, BUFSIZE, afp)) + resolve_rc_cmd(buf, ALIAS_RCLOCAL); +@@ -712,6 +874,8 @@ + + if (STREQ(pc->editing_mode, "no_mode")) + pc->editing_mode = "vi"; ++ ++ machdep_init(SETUP_ENV); + } + + +@@ -840,13 +1004,22 @@ + if (pc->flags & REM_S390D) + sprintf(&buf[strlen(buf)], + "%sREM_S390D", others++ ? "|" : ""); +- if (pc->flags & NETDUMP) ++ if (pc->flags & NETDUMP) + sprintf(&buf[strlen(buf)], + "%sNETDUMP", others++ ? "|" : ""); ++ if (pc->flags & XENDUMP) ++ sprintf(&buf[strlen(buf)], ++ "%sXENDUMP", others++ ? "|" : ""); ++ if (pc->flags & KDUMP) ++ sprintf(&buf[strlen(buf)], ++ "%sKDUMP", others++ ? "|" : ""); ++ if (pc->flags & SYSRQ) ++ sprintf(&buf[strlen(buf)], ++ "%sSYSRQ", others++ ? "|" : ""); + if (pc->flags & REM_NETDUMP) + sprintf(&buf[strlen(buf)], + "%sREM_NETDUMP", others++ ? "|" : ""); +- if (pc->flags & DISKDUMP) ++ if (pc->flags & DISKDUMP) + sprintf(&buf[strlen(buf)], + "%sDISKDUMP", others++ ? "|" : ""); + if (pc->flags & SYSMAP) +@@ -855,21 +1028,36 @@ + if (pc->flags & SYSMAP_ARG) + sprintf(&buf[strlen(buf)], + "%sSYSMAP_ARG", others++ ? "|" : ""); +- if (pc->flags & DATADEBUG) ++ if (pc->flags & DATADEBUG) + sprintf(&buf[strlen(buf)], + "%sDATADEBUG", others++ ? "|" : ""); +- if (pc->flags & FINDKERNEL) ++ if (pc->flags & FINDKERNEL) + sprintf(&buf[strlen(buf)], + "%sFINDKERNEL", others++ ? "|" : ""); +- if (pc->flags & VERSION_QUERY) ++ if (pc->flags & VERSION_QUERY) + sprintf(&buf[strlen(buf)], + "%sVERSION_QUERY", others++ ? "|" : ""); +- if (pc->flags & READNOW) ++ if (pc->flags & READNOW) + sprintf(&buf[strlen(buf)], + "%sREADNOW", others++ ? "|" : ""); +- if (pc->flags & NOCRASHRC) ++ if (pc->flags & NOCRASHRC) + sprintf(&buf[strlen(buf)], + "%sNOCRASHRC", others++ ? "|" : ""); ++ if (pc->flags & INIT_IFILE) ++ sprintf(&buf[strlen(buf)], ++ "%sINIT_IFILE", others++ ? "|" : ""); ++ if (pc->flags & XEN_HYPER) ++ sprintf(&buf[strlen(buf)], ++ "%sXEN_HYPER", others++ ? "|" : ""); ++ if (pc->flags & XEN_CORE) ++ sprintf(&buf[strlen(buf)], ++ "%sXEN_CORE", others++ ? "|" : ""); ++ if (pc->flags & PLEASE_WAIT) ++ sprintf(&buf[strlen(buf)], ++ "%sPLEASE_WAIT", others++ ? "|" : ""); ++ if (pc->flags & IFILE_ERROR) ++ sprintf(&buf[strlen(buf)], ++ "%sIFILE_ERROR", others++ ? "|" : ""); + + if (pc->flags) + strcat(buf, ")"); +@@ -933,10 +1121,36 @@ + fprintf(fp, " ifile_pipe: %lx\n", (ulong)pc->ifile_pipe); + fprintf(fp, " ifile_ofile: %lx\n", (ulong)pc->ifile_ofile); + fprintf(fp, " input_file: %s\n", pc->input_file); +- fprintf(fp, " scroll_command: %s\n", +- pc->scroll_command == SCROLL_NONE ? "(none)" : +- pc->scroll_command == SCROLL_LESS ? +- "/usr/bin/less" : "/bin/more"); ++ fprintf(fp, "ifile_in_progress: %lx (", pc->ifile_in_progress); ++ others = 0; ++ if (pc->ifile_in_progress & RCHOME_IFILE) ++ fprintf(fp, "%sRCHOME_IFILE", others++ ? "|" : ""); ++ if (pc->ifile_in_progress & RCLOCAL_IFILE) ++ fprintf(fp, "%sRCLOCAL_IFILE", others++ ? "|" : ""); ++ if (pc->ifile_in_progress & CMDLINE_IFILE) ++ fprintf(fp, "%sCMDLINE_IFILE", others++ ? "|" : ""); ++ if (pc->ifile_in_progress & RUNTIME_IFILE) ++ fprintf(fp, "%sRUNTIME_IFILE", others++ ? "|" : ""); ++ fprintf(fp, ")\n"); ++ fprintf(fp, " ifile_offset: %lld\n", (ulonglong)pc->ifile_offset); ++ fprintf(fp, "runtime_ifile_cmd: %s\n", pc->runtime_ifile_cmd ? ++ pc->runtime_ifile_cmd : "(unused)"); ++ fprintf(fp, " scroll_command: "); ++ switch (pc->scroll_command) ++ { ++ case SCROLL_NONE: ++ fprintf(fp, "SCROLL_NONE\n"); ++ break; ++ case SCROLL_LESS: ++ fprintf(fp, "SCROLL_LESS\n"); ++ break; ++ case SCROLL_MORE: ++ fprintf(fp, "SCROLL_MORE\n"); ++ break; ++ case SCROLL_CRASHPAGER: ++ fprintf(fp, "SCROLL_CRASHPAGER (%s)\n", getenv("CRASHPAGER")); ++ break; ++ } + + buf[0] = NULLCHAR; + fprintf(fp, " redirect: %lx ", pc->redirect); +@@ -1008,6 +1222,8 @@ + fprintf(fp, " tmp_fp: %lx\n", (ulong)pc->tmp_fp); + fprintf(fp, " tmpfile2: %lx\n", (ulong)pc->tmpfile2); + ++ fprintf(fp, " cmd_table: %s\n", XEN_HYPER_MODE() ? ++ "xen_hyper_command_table" : "linux_command_table"); + fprintf(fp, " curcmd: %s\n", pc->curcmd); + fprintf(fp, " lastcmd: %s\n", pc->lastcmd); + fprintf(fp, " cur_gdb_cmd: %d %s\n", pc->cur_gdb_cmd, +@@ -1016,7 +1232,30 @@ + gdb_command_string(pc->last_gdb_cmd, buf, FALSE)); + fprintf(fp, " cur_req: %lx\n", (ulong)pc->cur_req); + fprintf(fp, " cmdgencur: %ld\n", pc->cmdgencur); +- fprintf(fp, " cmdgenspec: %ld\n", pc->cmdgenspec); ++ fprintf(fp, " curcmd_flags: %lx (", pc->curcmd_flags); ++ others = 0; ++ if (pc->curcmd_flags & XEN_MACHINE_ADDR) ++ fprintf(fp, "%sXEN_MACHINE_ADDR", others ? "|" : ""); ++ if (pc->curcmd_flags & REPEAT) ++ fprintf(fp, "%sREPEAT", others ? "|" : ""); ++ if (pc->curcmd_flags & IDLE_TASK_SHOWN) ++ fprintf(fp, "%sIDLE_TASK_SHOWN", others ? "|" : ""); ++ if (pc->curcmd_flags & TASK_SPECIFIED) ++ fprintf(fp, "%sTASK_SPECIFIED", others ? "|" : ""); ++ if (pc->curcmd_flags & MEMTYPE_UVADDR) ++ fprintf(fp, "%sMEMTYPE_UVADDR", others ? "|" : ""); ++ if (pc->curcmd_flags & MEMTYPE_FILEADDR) ++ fprintf(fp, "%sMEMTYPE_FILEADDR", others ? "|" : ""); ++ if (pc->curcmd_flags & HEADER_PRINTED) ++ fprintf(fp, "%sHEADER_PRINTED", others ? "|" : ""); ++ if (pc->curcmd_flags & BAD_INSTRUCTION) ++ fprintf(fp, "%sBAD_INSTRUCTION", others ? "|" : ""); ++ if (pc->curcmd_flags & UD2A_INSTRUCTION) ++ fprintf(fp, "%sUD2A_INSTRUCTION", others ? "|" : ""); ++ if (pc->curcmd_flags & IRQ_IN_USE) ++ fprintf(fp, "%sIRQ_IN_USE", others ? "|" : ""); ++ fprintf(fp, ")\n"); ++ fprintf(fp, " curcmd_private: %llx\n", pc->curcmd_private); + fprintf(fp, " sigint_cnt: %d\n", pc->sigint_cnt); + fprintf(fp, " sigaction: %lx\n", (ulong)&pc->sigaction); + fprintf(fp, " gdb_sigaction: %lx\n", (ulong)&pc->gdb_sigaction); +@@ -1051,8 +1290,16 @@ + fprintf(fp, " readmem: read_daemon()\n"); + else if (pc->readmem == read_netdump) + fprintf(fp, " readmem: read_netdump()\n"); ++ else if (pc->readmem == read_xendump) ++ fprintf(fp, " readmem: read_xendump()\n"); ++ else if (pc->readmem == read_kdump) ++ fprintf(fp, " readmem: read_kdump()\n"); + else if (pc->readmem == read_memory_device) + fprintf(fp, " readmem: read_memory_device()\n"); ++ else if (pc->readmem == read_xendump_hyper) ++ fprintf(fp, " readmem: read_xendump_hyper()\n"); ++ else if (pc->readmem == read_diskdump) ++ fprintf(fp, " readmem: read_diskdump()\n"); + else + fprintf(fp, " readmem: %lx\n", (ulong)pc->readmem); + if (pc->writemem == write_dev_mem) +@@ -1065,8 +1312,14 @@ + fprintf(fp, " writemem: write_daemon()\n"); + else if (pc->writemem == write_netdump) + fprintf(fp, " writemem: write_netdump()\n"); ++ else if (pc->writemem == write_xendump) ++ fprintf(fp, " writemem: write_xendump()\n"); ++ else if (pc->writemem == write_kdump) ++ fprintf(fp, " writemem: write_kdump()\n"); + else if (pc->writemem == write_memory_device) + fprintf(fp, " writemem: write_memory_device()\n"); ++ else if (pc->writemem == write_diskdump) ++ fprintf(fp, " writemem: write_diskdump()\n"); + else + fprintf(fp, " writemem: %lx\n", (ulong)pc->writemem); + +@@ -1100,3 +1353,28 @@ + + exit(status); + } ++ ++/* ++ * Check whether this session is for xen hypervisor analysis. ++ */ ++static void ++check_xen_hyper(void) ++{ ++ if (!pc->namelist) ++ return; ++ ++ if (!XEN_HYPER_MODE()) { ++ if (STRNEQ(basename(pc->namelist), "xen-syms")) ++ pc->flags |= XEN_HYPER; ++ else ++ return; ++ } ++ ++#ifdef XEN_HYPERVISOR_ARCH ++ pc->cmd_table = xen_hyper_command_table; ++ if (pc->flags & XENDUMP) ++ pc->readmem = read_xendump_hyper; ++#else ++ error(FATAL, XEN_HYPERVISOR_NOT_SUPPORTED); ++#endif ++} +--- crash/s390dbf.c.orig 2008-01-17 15:17:20.000000000 -0500 ++++ crash/s390dbf.c 2008-01-04 09:42:08.000000000 -0500 +@@ -0,0 +1,1340 @@ ++/* ++ * s390 debug feature command for crash ++ * ++ * Copyright (C) IBM Corp. 2006 ++ * Author(s): Michael Holzheu ++ */ ++ ++#if defined(S390) || defined(S390X) ++ ++#include "defs.h" ++#include ++#include ++ ++/* ++ * Compat layer to integrate lcrash commands into crash ++ * Maps lcrash API to crash functions ++ */ ++ ++#define KL_NBPW sizeof(long) ++#define KL_ERRORFP stderr ++#define MAX_ARGS 128 ++#define MAX_CMDLINE 256 ++ ++#define C_FALSE 0x00000001 /* Command takes no arguments */ ++#define C_TRUE 0x00000002 /* Command requires arguments */ ++#define C_ALL 0x00000004 /* All elements */ ++#define C_PERM 0x00000008 /* Allocate perminant blocks */ ++#define C_TEMP 0x00000000 /* For completeness */ ++#define C_FULL 0x00000010 /* Full output */ ++#define C_LIST 0x00000020 /* List items */ ++#define C_NEXT 0x00000040 /* Follow links */ ++#define C_WRITE 0x00000080 /* Write output to file */ ++#define C_NO_OPCHECK 0x00000100 /* Don't reject bad cmd line options */ ++#define C_ITER 0x00000200 /* set iteration threshold */ ++ ++#define C_LFLG_SHFT 12 ++ ++#define KL_ARCH_S390 0 ++#define KL_ARCH_S390X 1 ++#ifdef __s390x__ ++#define KL_ARCH KL_ARCH_S390X ++#define FMTPTR "l" ++#define KL_PTRSZ 8 ++#else ++#define KL_ARCH KL_ARCH_S390 ++#define FMTPTR "ll" ++#define KL_PTRSZ 4 ++#endif ++ ++typedef unsigned long uaddr_t; ++typedef unsigned long kaddr_t; ++ ++typedef struct _syment { ++ char *s_name; ++ kaddr_t s_addr; ++} syment_t; ++ ++typedef struct option_s { ++ struct option_s *op_next; ++ char op_char; ++ char *op_arg; ++} option_t; ++ ++typedef struct command_s { ++ int flags; ++ char cmdstr[MAX_CMDLINE]; ++ char *command; ++ char *cmdline; ++ option_t *options; ++ int nargs; ++ char *args[MAX_ARGS]; ++ char *pipe_cmd; ++ FILE *ofp; ++ FILE *efp; ++} command_t; ++ ++static inline syment_t* kl_lkup_symaddr(kaddr_t addr) ++{ ++ static syment_t sym; ++ struct syment *crash_sym; ++ ++ crash_sym = value_search(addr, &sym.s_addr); ++ if (!crash_sym) ++ return NULL; ++ sym.s_name = crash_sym->name; ++ return &sym; ++} ++ ++static inline syment_t* kl_lkup_symname(char* name) ++{ ++ static syment_t sym; ++ sym.s_addr = symbol_value(name); ++ sym.s_name = NULL; ++ if(!sym.s_addr) ++ return NULL; ++ else ++ return &sym; ++} ++ ++static inline void GET_BLOCK(kaddr_t addr, int size, void* ptr) ++{ ++ readmem(addr, KVADDR,ptr,size,"GET_BLOCK",FAULT_ON_ERROR); ++} ++ ++static inline kaddr_t KL_VREAD_PTR(kaddr_t addr) ++{ ++ unsigned long ptr; ++ readmem(addr, KVADDR,&ptr,sizeof(ptr),"GET_BLOCK",FAULT_ON_ERROR); ++ return (kaddr_t)ptr; ++} ++ ++static inline uint32_t KL_GET_UINT32(void* ptr) ++{ ++ return *((uint32_t*)ptr); ++} ++ ++static inline uint64_t KL_GET_UINT64(void* ptr) ++{ ++ return *((uint64_t*)ptr); ++} ++ ++static inline kaddr_t KL_GET_PTR(void* ptr) ++{ ++ return *((kaddr_t*)ptr); ++} ++ ++static inline void* K_PTR(void* addr, char* struct_name, char* member_name) ++{ ++ return addr+MEMBER_OFFSET(struct_name,member_name); ++} ++ ++static inline uint32_t KL_UINT(void* ptr, char* struct_name, char* member_name) ++{ ++ return (uint32_t) ULONG(ptr+MEMBER_OFFSET(struct_name,member_name)); ++} ++ ++static inline uint32_t KL_VREAD_UINT32(kaddr_t addr) ++{ ++ uint32_t rc; ++ readmem(addr, KVADDR,&rc,sizeof(rc),"KL_VREAD_UINT32",FAULT_ON_ERROR); ++ return rc; ++} ++ ++static inline uint32_t KL_INT(void* ptr, char* struct_name, char* member_name) ++{ ++ return UINT(ptr+MEMBER_OFFSET(struct_name,member_name)); ++} ++ ++static inline int set_cmd_flags(command_t *cmd, int flags, char *extraops) ++{ ++ return 0; ++} ++ ++static inline void kl_s390tod_to_timeval(uint64_t todval, struct timeval *xtime) ++{ ++ todval -= 0x8126d60e46000000LL - (0x3c26700LL * 1000000 * 4096); ++ ++ todval >>= 12; ++ xtime->tv_sec = todval / 1000000; ++ xtime->tv_usec = todval % 1000000; ++} ++ ++static inline int kl_struct_len(char* struct_name) ++{ ++ return STRUCT_SIZE(struct_name); ++} ++ ++static inline kaddr_t kl_funcaddr(kaddr_t addr) ++{ ++ struct syment *crash_sym; ++ ++ crash_sym = value_search(addr, &addr); ++ if (!crash_sym) ++ return -1; ++ else ++ return crash_sym->value; ++} ++ ++#define CMD_USAGE(cmd, s) \ ++ fprintf(cmd->ofp, "Usage: %s %s\n", cmd->command, s); \ ++ fprintf(cmd->ofp, "Enter \"help %s\" for details.\n",cmd->command); ++ ++/* ++ * s390 debug feature implementation ++ */ ++ ++#ifdef DBF_DYNAMIC_VIEWS /* views defined in shared libs */ ++#include ++#endif ++ ++/* Local flags ++ */ ++ ++#define LOAD_FLAG (1 << C_LFLG_SHFT) ++#define VIEWS_FLAG (2 << C_LFLG_SHFT) ++ ++#ifndef MIN ++#define MIN(a,b) (((a)<(b))?(a):(b)) ++#endif ++ ++/* Stuff which has to match with include/asm-s390/debug.h */ ++ ++#define DBF_VERSION_V1 1 ++#define DBF_VERSION_V2 2 ++#define PAGE_SIZE 4096 ++#define DEBUG_MAX_VIEWS 10 /* max number of views in proc fs */ ++#define DEBUG_MAX_PROCF_LEN 16 /* max length for a proc file name */ ++#define DEBUG_SPRINTF_MAX_ARGS 10 ++ ++/* define debug-structures for lcrash */ ++#define DEBUG_DATA(entry) (char*)(entry + 1) ++ ++typedef struct debug_view_s debug_view_t; ++ ++/* struct to hold contents of struct __debug_entry from dump ++ */ ++typedef struct debug_entry_s{ ++ union { ++ struct { ++ unsigned long long clock:52; ++ unsigned long long exception:1; ++ unsigned long long level:3; ++ unsigned long long cpuid:8; ++ } fields; ++ ++ unsigned long long stck; ++ } id; ++ kaddr_t caller; /* changed from void* to kaddr_t */ ++} __attribute__((packed)) debug_entry_t; ++/* typedef struct __debug_entry debug_entry_t; */ ++ ++ ++static unsigned int dbf_version; ++ ++/* struct is used to manage contents of structs debug_info from dump ++ * in lcrash ++ */ ++typedef struct debug_info_s { ++ struct debug_info_s *next; ++ struct debug_info_s *prev; ++ kaddr_t next_dbi; /* store next ptr of struct in dump */ ++ kaddr_t prev_dbi; /* store prev ptr of struct in dump */ ++ int level; ++ int nr_areas; ++ int page_order; ++ int buf_size; ++ int entry_size; ++ void **areas; /* contents of debug areas from dump */ ++ int active_area; ++ int *active_entry; /* change to uint32_t ? */ ++ debug_view_t *views[DEBUG_MAX_VIEWS]; ++ char name[DEBUG_MAX_PROCF_LEN]; ++ kaddr_t addr; ++ int pages_per_area_v2; ++ void ***areas_v2; ++} debug_info_t; ++ ++ ++/* functions to generate dbf output ++ */ ++typedef int (debug_header_proc_t) (debug_info_t* id, debug_view_t* view, ++ int area, debug_entry_t* entry, ++ char* out_buf); ++typedef int (debug_format_proc_t) (debug_info_t* id, debug_view_t* view, ++ char* out_buf, const char* in_buf); ++typedef int (debug_prolog_proc_t) (debug_info_t* id, debug_view_t* view, ++ char* out_buf); ++ ++struct debug_view_s { ++ char name[DEBUG_MAX_PROCF_LEN]; ++ debug_prolog_proc_t* prolog_proc; ++ debug_header_proc_t* header_proc; ++ debug_format_proc_t* format_proc; ++ void* private_data; ++}; ++ ++#define LCRASH_DB_VIEWS 1000 ++ ++static debug_info_t *debug_area_first = NULL; ++static debug_info_t *debug_area_last = NULL; ++static debug_view_t *debug_views[LCRASH_DB_VIEWS]; ++static int initialized = 0; ++static iconv_t ebcdic_ascii_conv = 0; ++ ++void s390dbf_usage(command_t * cmd); ++static int add_lcrash_debug_view(debug_view_t *); ++static int dbe_size = 0; ++ ++static void ++EBCASC(char *inout, size_t len) ++{ ++ iconv(ebcdic_ascii_conv, &inout, &len, &inout, &len); ++} ++ ++/* ++ * prints header for debug entry ++ */ ++static int ++dflt_header_fn(debug_info_t * id, debug_view_t *view, ++ int area, debug_entry_t * entry, char *out_buf) ++{ ++ struct timeval time_val; ++ unsigned long long time; ++ char *except_str; ++ kaddr_t caller; ++ int rc = 0; ++ char *caller_name; ++ int offset; ++ char caller_buf[30]; ++ unsigned int level; ++ syment_t *caller_sym; ++ debug_entry_t lentry; /* store byte swapped values of entry */ ++ ++ lentry.id.stck = KL_GET_UINT64(&entry->id); ++ lentry.caller = KL_GET_PTR(&entry->caller); ++ level = lentry.id.fields.level; ++ time = lentry.id.stck; ++ ++ kl_s390tod_to_timeval(time, &time_val); ++ ++ if (lentry.id.fields.exception) ++ except_str = "*"; ++ else ++ except_str = "-"; ++ caller = lentry.caller; ++ if(KL_ARCH == KL_ARCH_S390){ ++ caller &= 0x7fffffff; ++ } ++ caller_sym = kl_lkup_symaddr(caller); ++ if(caller_sym){ ++ caller_name = caller_sym->s_name; ++ offset = caller - kl_funcaddr(caller); ++ } ++ else { ++ sprintf(caller_buf, "%llx", (unsigned long long)caller); ++ caller_name = caller_buf; ++ offset = 0; ++ } ++ ++ if(KL_ARCH == KL_ARCH_S390X){ ++ rc += sprintf(out_buf, ++ "%02i %011lu:%06lu %1u %1s %02i <%20s+%04i> ", ++ area, time_val.tv_sec, time_val.tv_usec, level, ++ except_str, entry->id.fields.cpuid, caller_name, ++ offset); ++ } else { ++ rc += sprintf(out_buf, ++ "%02i %011lu:%06lu %1u %1s %02i <%-20s+%04i> ", ++ area, time_val.tv_sec, time_val.tv_usec, level, ++ except_str, lentry.id.fields.cpuid, caller_name, ++ offset); ++ } ++ return rc; ++} ++ ++/* ++ * prints debug header in raw format ++ */ ++static int ++raw_header_fn(debug_info_t * id, debug_view_t *view, ++ int area, debug_entry_t * entry, char *out_buf) ++{ ++ int rc; ++ ++ rc = sizeof(debug_entry_t); ++ if (out_buf == NULL) ++ goto out; ++ memcpy(out_buf,entry,sizeof(debug_entry_t)); ++ out: ++ return rc; ++} ++ ++/* ++ * prints debug data in raw format ++ */ ++static int ++raw_format_fn(debug_info_t * id, debug_view_t *view, ++ char *out_buf, const char *in_buf) ++{ ++ int rc; ++ ++ rc = id->buf_size; ++ if (out_buf == NULL || in_buf == NULL) ++ goto out; ++ memcpy(out_buf, in_buf, id->buf_size); ++ out: ++ return rc; ++} ++ ++/* ++ * prints debug data in hex/ascii format ++ */ ++static int ++hex_ascii_format_fn(debug_info_t * id, debug_view_t *view, ++ char *out_buf, const char *in_buf) ++{ ++ int i, rc = 0; ++ ++ if (out_buf == NULL || in_buf == NULL) { ++ rc = id->buf_size * 4 + 3; ++ goto out; ++ } ++ for (i = 0; i < id->buf_size; i++) { ++ rc += sprintf(out_buf + rc, "%02x ", ++ ((unsigned char *) in_buf)[i]); ++ } ++ rc += sprintf(out_buf + rc, "| "); ++ for (i = 0; i < id->buf_size; i++) { ++ unsigned char c = in_buf[i]; ++ if (!isprint(c)) ++ rc += sprintf(out_buf + rc, "."); ++ else ++ rc += sprintf(out_buf + rc, "%c", c); ++ } ++ rc += sprintf(out_buf + rc, "\n"); ++ out: ++ return rc; ++} ++ ++/* ++ * prints debug data in sprintf format ++ */ ++static int ++sprintf_format_fn(debug_info_t * id, debug_view_t *view, ++ char *out_buf, const char *in_buf) ++{ ++#define _BUFSIZE 1024 ++ char buf[_BUFSIZE]; ++ int i, k, rc = 0, num_longs = 0, num_used_args = 0, num_strings = 0; ++ /* use kaddr_t to store long values of 32bit and 64bit archs here */ ++ kaddr_t inbuf_cpy[DEBUG_SPRINTF_MAX_ARGS]; ++ /* store ptrs to strings to be deallocated at end of this function */ ++ uaddr_t to_dealloc[DEBUG_SPRINTF_MAX_ARGS]; ++ kaddr_t addr; ++ ++ memset(buf, 0, sizeof(buf)); ++ memset(inbuf_cpy, 0, sizeof(inbuf_cpy)); ++ memset(to_dealloc, 0, sizeof(to_dealloc)); ++ ++ if (out_buf == NULL || in_buf == NULL) { ++ rc = id->buf_size * 4 + 3; ++ goto out; ++ } ++ ++ /* get the format string into buf */ ++ addr = KL_GET_PTR((void*)in_buf); ++ GET_BLOCK(addr, _BUFSIZE, buf); ++ ++ k = 0; ++ for (i = 0; buf[i] && (buf[i] != '\n'); i++) { ++ if (buf[i] != '%') ++ continue; ++ if (k == DEBUG_SPRINTF_MAX_ARGS) { ++ fprintf(KL_ERRORFP, ++ "\nToo much parameters in sprinf view (%i)\n" ++ ,k + 1); ++ fprintf(KL_ERRORFP, "Format String: %s)\n", buf); ++ break; ++ } ++ /* for sprintf we have only unsigned long values ... */ ++ if (buf[i+1] != 's'){ ++ /* we use KL_GET_PTR here to read ulong value */ ++ addr = KL_GET_PTR((void*) in_buf + ((k + 1)* KL_NBPW)); ++ inbuf_cpy[k] = addr; ++ } else { /* ... or ptrs to strings in debug areas */ ++ inbuf_cpy[k] = (uaddr_t) malloc(_BUFSIZE); ++ to_dealloc[num_strings++] = inbuf_cpy[k]; ++ addr = KL_GET_PTR((void*) in_buf + ((k + 1)* KL_NBPW)); ++ GET_BLOCK(addr, _BUFSIZE, ++ (void*)(uaddr_t)(inbuf_cpy[k])); ++ } ++ k++; ++ } ++ ++ /* count of longs fit into one entry */ ++ num_longs = id->buf_size / KL_NBPW; /* sizeof(long); */ ++ if(num_longs < 1) /* bufsize of entry too small */ ++ goto out; ++ if(num_longs == 1) { /* no args, just print the format string */ ++ rc = sprintf(out_buf + rc, "%s", buf); ++ goto out; ++ } ++ ++ /* number of arguments used for sprintf (without the format string) */ ++ num_used_args = MIN(DEBUG_SPRINTF_MAX_ARGS, (num_longs - 1)); ++ ++ rc = sprintf(out_buf + rc, buf, (uaddr_t)(inbuf_cpy[0]), ++ (uaddr_t)(inbuf_cpy[1]), (uaddr_t)(inbuf_cpy[2]), ++ (uaddr_t)(inbuf_cpy[3]), (uaddr_t)(inbuf_cpy[4]), ++ (uaddr_t)(inbuf_cpy[5]), (uaddr_t)(inbuf_cpy[6]), ++ (uaddr_t)(inbuf_cpy[7]), (uaddr_t)(inbuf_cpy[8]), ++ (uaddr_t)(inbuf_cpy[9])); ++ out: ++ while (num_strings--){ ++ free((char*)(to_dealloc[num_strings])); ++ } ++ return rc; ++} ++ ++ ++/*********************************** ++ * functions for debug-views ++ ***********************************/ ++ ++/* ++ * prints out actual debug level ++ */ ++static int ++prolog_level_fn(debug_info_t * id, ++ debug_view_t *view, char *out_buf) ++{ ++ int rc = 0; ++ ++ if (out_buf == NULL) { ++ rc = 2; ++ goto out; ++ } ++ rc = sprintf(out_buf, "%i\n", id->level); ++ out: ++ return rc; ++} ++ ++/* ++ * prints out actual pages_per_area ++ */ ++static int ++prolog_pages_fn(debug_info_t * id, ++ debug_view_t *view, char *out_buf) ++{ ++ int rc = 0; ++ ++ if (out_buf == NULL) { ++ rc = 2; ++ goto out; ++ } ++ rc = sprintf(out_buf, "%i\n", id->pages_per_area_v2); ++ out: ++ return rc; ++} ++ ++/* ++ * prints out prolog ++ */ ++static int ++prolog_fn(debug_info_t * id, ++ debug_view_t *view, char *out_buf) ++{ ++ int rc = 0; ++ ++ rc = sprintf(out_buf, "AREA TIME LEVEL EXCEPTION CP CALLING FUNCTION" ++ " + OFFSET DATA\n===================================" ++ "=======================================\n"); ++ return rc; ++} ++ ++/* ++ * prints debug data in hex format ++ */ ++static int ++hex_format_fn(debug_info_t * id, debug_view_t *view, ++ char *out_buf, const char *in_buf) ++{ ++ int i, rc = 0; ++ ++ for (i = 0; i < id->buf_size; i++) { ++ rc += sprintf(out_buf + rc, "%02x ", ++ ((unsigned char *) in_buf)[i]); ++ } ++ rc += sprintf(out_buf + rc, "\n"); ++ return rc; ++} ++ ++/* ++ * prints debug data in ascii format ++ */ ++static int ++ascii_format_fn(debug_info_t * id, debug_view_t *view, ++ char *out_buf, const char *in_buf) ++{ ++ int i, rc = 0; ++ ++ if (out_buf == NULL || in_buf == NULL) { ++ rc = id->buf_size + 1; ++ goto out; ++ } ++ for (i = 0; i < id->buf_size; i++) { ++ unsigned char c = in_buf[i]; ++ if (!isprint(c)) ++ rc += sprintf(out_buf + rc, "."); ++ else ++ rc += sprintf(out_buf + rc, "%c", c); ++ } ++ rc += sprintf(out_buf + rc, "\n"); ++ out: ++ return rc; ++} ++ ++/* ++ * prints debug data in ebcdic format ++ */ ++static int ++ebcdic_format_fn(debug_info_t * id, debug_view_t *view, ++ char *out_buf, const char *in_buf) ++{ ++ int i, rc = 0; ++ ++ if (out_buf == NULL || in_buf == NULL) { ++ rc = id->buf_size + 1; ++ goto out; ++ } ++ for (i = 0; i < id->buf_size; i++) { ++ char c = in_buf[i]; ++ EBCASC(&c, 1); ++ if (!isprint(c)) ++ rc += sprintf(out_buf + rc, "."); ++ else ++ rc += sprintf(out_buf + rc, "%c", c); ++ } ++ rc += sprintf(out_buf + rc, "\n"); ++ out: ++ return rc; ++} ++ ++debug_view_t ascii_view = { ++ "ascii", ++ &prolog_fn, ++ &dflt_header_fn, ++ &ascii_format_fn, ++}; ++ ++debug_view_t ebcdic_view = { ++ "ebcdic", ++ &prolog_fn, ++ &dflt_header_fn, ++ &ebcdic_format_fn, ++}; ++ ++debug_view_t hex_view = { ++ "hex", ++ &prolog_fn, ++ &dflt_header_fn, ++ &hex_format_fn, ++}; ++ ++debug_view_t level_view = { ++ "level", ++ &prolog_level_fn, ++ NULL, ++ NULL, ++}; ++ ++debug_view_t pages_view = { ++ "pages", ++ &prolog_pages_fn, ++ NULL, ++ NULL, ++}; ++ ++debug_view_t raw_view = { ++ "raw", ++ NULL, ++ &raw_header_fn, ++ &raw_format_fn, ++}; ++ ++debug_view_t hex_ascii_view = { ++ "hex_ascii", ++ &prolog_fn, ++ &dflt_header_fn, ++ &hex_ascii_format_fn, ++}; ++ ++debug_view_t sprintf_view = { ++ "sprintf", ++ &prolog_fn, ++ &dflt_header_fn, ++ &sprintf_format_fn, ++}; ++ ++ ++static debug_entry_t * ++debug_find_oldest_entry(debug_entry_t *entries, int num, int entry_size) ++{ ++ debug_entry_t *result, *current; ++ int i; ++ uint64_t clock1, clock2; ++ ++ result = entries; ++ current = entries; ++ for (i=0; i < num; i++) { ++ if (current->id.stck == 0) ++ break; ++ clock1 = current->id.fields.clock; ++ clock2 = result->id.fields.clock; ++ clock1 = KL_GET_UINT64(&clock1); ++ clock2 = KL_GET_UINT64(&clock2); ++ if (clock1 < clock2) ++ result = current; ++ current = (debug_entry_t *) ((char *) current + entry_size); ++ } ++ return result; ++} ++ ++ ++/* ++ * debug_format_output: ++ * - calls prolog, header and format functions of view to format output ++ */ ++static int ++debug_format_output_v1(debug_info_t * debug_area, debug_view_t *view, ++ FILE * ofp) ++{ ++ int i, j, len; ++ int nr_of_entries; ++ debug_entry_t *act_entry, *last_entry; ++ char *act_entry_data; ++ char buf[2048]; ++ ++ /* print prolog */ ++ if (view->prolog_proc) { ++ len = view->prolog_proc(debug_area, view, buf); ++ fwrite(buf,len, 1, ofp); ++ memset(buf, 0, 2048); ++ } ++ /* print debug records */ ++ if (!(view->format_proc) && !(view->header_proc)) ++ goto out; ++ if(debug_area->entry_size <= 0){ ++ fprintf(ofp, "Invalid entry_size: %i\n",debug_area->entry_size); ++ goto out; ++ } ++ nr_of_entries = (PAGE_SIZE << debug_area->page_order) / debug_area->entry_size; ++ for (i = 0; i < debug_area->nr_areas; i++) { ++ act_entry = debug_find_oldest_entry(debug_area->areas[i], ++ nr_of_entries, ++ debug_area->entry_size); ++ last_entry = (debug_entry_t *) ((char *) debug_area->areas[i] + ++ (PAGE_SIZE << debug_area->page_order) - ++ debug_area->entry_size); ++ for (j = 0; j < nr_of_entries; j++) { ++ act_entry_data = (char*)act_entry + dbe_size; ++ if (act_entry->id.stck == 0) ++ break; /* empty entry */ ++ if (view->header_proc) { ++ len = view->header_proc(debug_area, view, i, ++ act_entry, buf); ++ fwrite(buf,len, 1, ofp); ++ memset(buf, 0, 2048); ++ } ++ if (view->format_proc) { ++ len = view->format_proc(debug_area, view, ++ buf, act_entry_data); ++ fwrite(buf,len, 1, ofp); ++ memset(buf, 0, 2048); ++ } ++ act_entry = ++ (debug_entry_t *) (((char *) act_entry) + ++ debug_area->entry_size); ++ if (act_entry > last_entry) ++ act_entry = debug_area->areas[i]; ++ } ++ } ++ out: ++ return 1; ++} ++ ++/* ++ * debug_format_output_v2: ++ * - calls prolog, header and format functions of view to format output ++ */ ++static int ++debug_format_output_v2(debug_info_t * debug_area, ++ debug_view_t *view, FILE * ofp) ++{ ++ int i, j, k, len; ++ debug_entry_t *act_entry; ++ char *act_entry_data; ++ char buf[2048]; ++ ++ /* print prolog */ ++ if (view->prolog_proc) { ++ len = view->prolog_proc(debug_area, view, buf); ++ fwrite(buf,len, 1, ofp); ++ memset(buf, 0, 2048); ++ } ++ /* print debug records */ ++ if (!(view->format_proc) && !(view->header_proc)) ++ goto out; ++ if(debug_area->entry_size <= 0){ ++ fprintf(ofp, "Invalid entry_size: %i\n",debug_area->entry_size); ++ goto out; ++ } ++ for (i = 0; i < debug_area->nr_areas; i++) { ++ int nr_entries_per_page = PAGE_SIZE/debug_area->entry_size; ++ for (j = 0; j < debug_area->pages_per_area_v2; j++) { ++ act_entry = debug_area->areas_v2[i][j]; ++ for (k = 0; k < nr_entries_per_page; k++) { ++ act_entry_data = (char*)act_entry + dbe_size; ++ if (act_entry->id.stck == 0) ++ break; /* empty entry */ ++ if (view->header_proc) { ++ len = view->header_proc(debug_area, ++ view, i, act_entry, buf); ++ fwrite(buf,len, 1, ofp); ++ memset(buf, 0, 2048); ++ } ++ if (view->format_proc) { ++ len = view->format_proc(debug_area, ++ view, buf, act_entry_data); ++ fwrite(buf,len, 1, ofp); ++ memset(buf, 0, 2048); ++ } ++ act_entry = (debug_entry_t *) (((char *) ++ act_entry) + debug_area->entry_size); ++ } ++ } ++ } ++out: ++ return 1; ++} ++ ++static debug_info_t * ++find_debug_area(const char *area_name) ++{ ++ debug_info_t* act_debug_info = debug_area_first; ++ while(act_debug_info != NULL){ ++ if (strcmp(act_debug_info->name, area_name) == 0) ++ return act_debug_info; ++ act_debug_info = act_debug_info->next; ++ } ++ return NULL; ++} ++ ++static void ++dbf_init(void) ++{ ++ if (!initialized) { ++ if(dbf_version >= DBF_VERSION_V2) ++ add_lcrash_debug_view(&pages_view); ++ add_lcrash_debug_view(&ascii_view); ++ add_lcrash_debug_view(&level_view); ++ add_lcrash_debug_view(&ebcdic_view); ++ add_lcrash_debug_view(&hex_view); ++ add_lcrash_debug_view(&hex_ascii_view); ++ add_lcrash_debug_view(&sprintf_view); ++ add_lcrash_debug_view(&raw_view); ++ ebcdic_ascii_conv = iconv_open("ISO-8859-1", "EBCDIC-US"); ++ initialized = 1; ++ } ++} ++ ++static debug_view_t* ++get_debug_view(kaddr_t addr) ++{ ++ void* k_debug_view; ++ int k_debug_view_size; ++ debug_view_t* rc; ++ ++ rc = (debug_view_t*)malloc(sizeof(debug_view_t)); ++ memset(rc, 0, sizeof(debug_view_t)); ++ ++ k_debug_view_size = kl_struct_len("debug_view"); ++ k_debug_view = malloc(k_debug_view_size); ++ GET_BLOCK(addr, k_debug_view_size, k_debug_view); ++ strncpy(rc->name,K_PTR(k_debug_view,"debug_view","name"), ++ DEBUG_MAX_PROCF_LEN); ++ ++ free(k_debug_view); ++ return rc; ++} ++ ++static void ++free_debug_view(debug_view_t* view) ++{ ++ if(view) ++ free(view); ++} ++ ++static void ++debug_get_areas_v1(debug_info_t* db_info, void* k_dbi) ++{ ++ kaddr_t mem_pos; ++ kaddr_t dbe_addr; ++ int area_size, i; ++ ++ /* get areas */ ++ /* place to hold ptrs to debug areas in lcrash */ ++ area_size = PAGE_SIZE << db_info->page_order; ++ db_info->areas = (void**)malloc(db_info->nr_areas * sizeof(void *)); ++ memset(db_info->areas, 0, db_info->nr_areas * sizeof(void *)); ++ mem_pos = (kaddr_t) KL_UINT(k_dbi,"debug_info","areas"); ++ for (i = 0; i < db_info->nr_areas; i++) { ++ dbe_addr = KL_VREAD_PTR(mem_pos); ++ db_info->areas[i] = (debug_entry_t *) malloc(area_size); ++ /* read raw data for debug area */ ++ GET_BLOCK(dbe_addr, area_size, db_info->areas[i]); ++ mem_pos += KL_NBPW; ++ } ++} ++ ++static void ++debug_get_areas_v2(debug_info_t* db_info, void* k_dbi) ++{ ++ kaddr_t area_ptr; ++ kaddr_t page_array_ptr; ++ kaddr_t page_ptr; ++ int i,j; ++ db_info->areas_v2=(void***)malloc(db_info->nr_areas * sizeof(void **)); ++ area_ptr = (kaddr_t) KL_UINT(k_dbi,"debug_info","areas"); ++ for (i = 0; i < db_info->nr_areas; i++) { ++ db_info->areas_v2[i] = (void**)malloc(db_info->pages_per_area_v2 ++ * sizeof(void*)); ++ page_array_ptr = KL_VREAD_PTR(area_ptr); ++ for(j=0; j < db_info->pages_per_area_v2; j++) { ++ page_ptr = KL_VREAD_PTR(page_array_ptr); ++ db_info->areas_v2[i][j] = (void*)malloc(PAGE_SIZE); ++ /* read raw data for debug area */ ++ GET_BLOCK(page_ptr, PAGE_SIZE, db_info->areas_v2[i][j]); ++ page_array_ptr += KL_NBPW; ++ } ++ area_ptr += KL_NBPW; ++ } ++} ++ ++static debug_info_t* ++get_debug_info(kaddr_t addr,int get_areas) ++{ ++ void *k_dbi; ++ kaddr_t mem_pos; ++ kaddr_t view_addr; ++ debug_info_t* db_info; ++ int i; ++ int dbi_size; ++ ++ /* get sizes of kernel structures */ ++ if(!(dbi_size = kl_struct_len("debug_info"))){ ++ fprintf (KL_ERRORFP, ++ "Could not determine sizeof(struct debug_info)\n"); ++ return(NULL); ++ } ++ if(!(dbe_size = kl_struct_len("__debug_entry"))){ ++ fprintf(KL_ERRORFP, ++ "Could not determine sizeof(struct __debug_entry)\n"); ++ return(NULL); ++ } ++ ++ /* get kernel debug_info structure */ ++ k_dbi = malloc(dbi_size); ++ GET_BLOCK(addr, dbi_size, k_dbi); ++ ++ db_info = (debug_info_t*)malloc(sizeof(debug_info_t)); ++ memset(db_info, 0, sizeof(debug_info_t)); ++ ++ /* copy members */ ++ db_info->level = KL_INT(k_dbi,"debug_info","level"); ++ db_info->nr_areas = KL_INT(k_dbi,"debug_info","nr_areas"); ++ db_info->pages_per_area_v2= KL_INT(k_dbi,"debug_info","pages_per_area"); ++ db_info->page_order = KL_INT(k_dbi,"debug_info","page_order"); ++ db_info->buf_size = KL_INT(k_dbi,"debug_info","buf_size"); ++ db_info->entry_size = KL_INT(k_dbi,"debug_info","entry_size"); ++ db_info->next_dbi = KL_UINT(k_dbi,"debug_info","next"); ++ db_info->prev_dbi = KL_UINT(k_dbi,"debug_info","prev"); ++ db_info->addr = addr; ++ strncpy(db_info->name,K_PTR(k_dbi,"debug_info","name"), ++ DEBUG_MAX_PROCF_LEN); ++ ++ ++ if(get_areas){ ++ if(dbf_version == DBF_VERSION_V1) ++ debug_get_areas_v1(db_info,k_dbi); ++ else ++ debug_get_areas_v2(db_info,k_dbi); ++ } else { ++ db_info->areas = NULL; ++ } ++ ++ /* get views */ ++ mem_pos = (uaddr_t) K_PTR(k_dbi,"debug_info","views"); ++ memset(&db_info->views, 0, DEBUG_MAX_VIEWS * sizeof(void*)); ++ for (i = 0; i < DEBUG_MAX_VIEWS; i++) { ++ view_addr = KL_GET_PTR((void*)(uaddr_t)mem_pos); ++ if(view_addr == 0){ ++ break; ++ } else { ++ db_info->views[i] = get_debug_view(view_addr); ++ } ++ mem_pos += KL_NBPW; ++ } ++ free(k_dbi); ++ return db_info; ++} ++ ++static void ++free_debug_info_v1(debug_info_t * db_info) ++{ ++ int i; ++ if(db_info->areas){ ++ for (i = 0; i < db_info->nr_areas; i++) { ++ free(db_info->areas[i]); ++ } ++ } ++ for (i = 0; i < DEBUG_MAX_VIEWS; i++) { ++ free_debug_view(db_info->views[i]); ++ } ++ free(db_info->areas); ++ free(db_info); ++} ++ ++static void ++free_debug_info_v2(debug_info_t * db_info) ++{ ++ int i,j; ++ if(db_info->areas) { ++ for (i = 0; i < db_info->nr_areas; i++) { ++ for(j = 0; j < db_info->pages_per_area_v2; j++) { ++ free(db_info->areas_v2[i][j]); ++ } ++ free(db_info->areas[i]); ++ } ++ free(db_info->areas); ++ db_info->areas = NULL; ++ } ++ for (i = 0; i < DEBUG_MAX_VIEWS; i++) { ++ free_debug_view(db_info->views[i]); ++ } ++ free(db_info); ++} ++ ++static int ++get_debug_areas(void) ++{ ++ kaddr_t act_debug_area; ++ syment_t *debug_sym; ++ debug_info_t *act_debug_area_cpy; ++ ++ if(!(debug_sym = kl_lkup_symname("debug_area_first"))){ ++ printf("Did not find debug_areas"); ++ return -1; ++ } ++ act_debug_area = KL_VREAD_PTR(debug_sym->s_addr); ++ while(act_debug_area != 0){ ++ act_debug_area_cpy = get_debug_info(act_debug_area,0); ++ act_debug_area = act_debug_area_cpy->next_dbi; ++ if(debug_area_first == NULL){ ++ debug_area_first = act_debug_area_cpy; ++ } else { ++ debug_area_last->next = act_debug_area_cpy; ++ } ++ debug_area_last = act_debug_area_cpy; ++ } ++ return 0; ++} ++ ++static void ++free_debug_areas(void) ++{ ++ debug_info_t* next; ++ debug_info_t* act_debug_info = debug_area_first; ++ ++ while(act_debug_info != NULL){ ++ next = act_debug_info->next; ++ if(dbf_version == DBF_VERSION_V1) ++ free_debug_info_v1(act_debug_info); ++ else ++ free_debug_info_v2(act_debug_info); ++ act_debug_info = next; ++ } ++ ++ debug_area_first = NULL; ++ debug_area_last = NULL; ++} ++ ++static debug_view_t * ++find_lcrash_debug_view(const char *name) ++{ ++ int i; ++ for (i = 0; (i < LCRASH_DB_VIEWS) && (debug_views[i] != NULL); i++) { ++ if (strcmp(debug_views[i]->name, name) == 0) ++ return debug_views[i]; ++ } ++ return NULL; ++} ++ ++static void ++print_lcrash_debug_views(FILE * ofp) ++{ ++ int i; ++ fprintf(ofp, "REGISTERED VIEWS\n"); ++ fprintf(ofp, "=====================\n"); ++ for (i = 0; i < LCRASH_DB_VIEWS; i++) { ++ if (debug_views[i] == NULL) { ++ return; ++ } ++ fprintf(ofp, " - %s\n", debug_views[i]->name); ++ } ++} ++ ++static int ++add_lcrash_debug_view(debug_view_t *view) ++{ ++ int i; ++ for (i = 0; i < LCRASH_DB_VIEWS; i++) { ++ if (debug_views[i] == NULL) { ++ debug_views[i] = view; ++ return 0; ++ } ++ if (strcmp(debug_views[i]->name, view->name) == 0) ++ return -1; ++ } ++ return -1; ++} ++ ++static int ++list_one_view(char *area_name, char *view_name, command_t * cmd) ++{ ++ debug_info_t *db_info; ++ debug_view_t *db_view; ++ ++ if ((db_info = find_debug_area(area_name)) == NULL) { ++ fprintf(cmd->efp, "Debug log '%s' not found!\n", area_name); ++ return -1; ++ } ++ ++ db_info = get_debug_info(db_info->addr,1); ++ ++ if ((db_view = find_lcrash_debug_view(view_name)) == NULL) { ++ fprintf(cmd->efp, "View '%s' not registered!\n", view_name); ++ return -1; ++ } ++ if(dbf_version == DBF_VERSION_V1){ ++ debug_format_output_v1(db_info, db_view, cmd->ofp); ++ free_debug_info_v1(db_info); ++ } else { ++ debug_format_output_v2(db_info, db_view, cmd->ofp); ++ free_debug_info_v2(db_info); ++ } ++ return 0; ++} ++ ++static int ++list_areas(FILE * ofp) ++{ ++ debug_info_t* act_debug_info = debug_area_first; ++ fprintf(ofp, "Debug Logs:\n"); ++ fprintf(ofp, "==================\n"); ++ while(act_debug_info != NULL){ ++ fprintf(ofp, " - %s\n", act_debug_info->name); ++ act_debug_info = act_debug_info->next; ++ } ++ return 0; ++} ++ ++static int ++list_one_area(const char *area_name, command_t * cmd) ++{ ++ debug_info_t *db_info; ++ int i; ++ if ((db_info = find_debug_area(area_name)) == NULL) { ++ fprintf(cmd->efp, "Debug log '%s' not found!\n", area_name); ++ return -1; ++ } ++ fprintf(cmd->ofp, "INSTALLED VIEWS FOR '%s':\n", area_name); ++ fprintf(cmd->ofp, "================================================" ++ "==============================\n"); ++ for (i = 0; i < DEBUG_MAX_VIEWS; i++) { ++ if (db_info->views[i] != NULL) { ++ fprintf(cmd->ofp, " - %s ", db_info->views[i]->name); ++ if (find_lcrash_debug_view(db_info->views[i]->name)) ++ fprintf(cmd->ofp, "(available)\n"); ++ else ++ fprintf(cmd->ofp, "(not available)\n"); ++ } ++ } ++ fprintf(cmd->ofp, "=================================================" ++ "=============================\n"); ++ return 0; ++} ++ ++#ifdef DBF_DYNAMIC_VIEWS ++static int ++load_debug_view(const char *path, command_t * cmd) ++{ ++ void *library; ++ const char *error; ++ debug_view_t *(*view_init_func) (void); ++ ++ library = dlopen(path, RTLD_LAZY); ++ if (library == NULL) { ++ fprintf(cmd->efp, "Could not open %s: %s\n", path, dlerror()); ++ return (1); ++ } ++ ++ dlerror(); ++ ++ view_init_func = dlsym(library, "debug_view_init"); ++ error = dlerror(); ++ ++ if (error) { ++ fprintf(stderr, "could not find debug_view_init(): %s\n", ++ error); ++ exit(1); ++ } ++ ++ add_lcrash_debug_view((*view_init_func) ()); ++ ++ fprintf(cmd->ofp, "view %s loaded\n", path); ++ fflush(stdout); ++ return 0; ++} ++#endif ++ ++/* ++ * s390dbf_cmd() -- Run the 's390dbf' command. ++ */ ++static int ++s390dbf_cmd(command_t * cmd) ++{ ++ syment_t *dbf_version_sym; ++ int rc = 0; ++ ++ /* check version */ ++ ++ if(!(dbf_version_sym = kl_lkup_symname("debug_feature_version"))){ ++ fprintf(KL_ERRORFP, ++ "Could not determine debug_feature_version\n"); ++ return -1; ++ } ++ ++ dbf_version = KL_VREAD_UINT32(dbf_version_sym->s_addr); ++ ++ if ((dbf_version != DBF_VERSION_V1) && (dbf_version != DBF_VERSION_V2)){ ++ fprintf(cmd->efp,"lcrash does not support the" ++ " debug feature version of the dump kernel:\n"); ++ fprintf(cmd->efp,"DUMP: %i SUPPORTED: %i and %i\n", ++ dbf_version, DBF_VERSION_V1, DBF_VERSION_V2); ++ return -1; ++ } ++ ++ dbf_init(); ++ ++ if (cmd->flags & C_ALL) { ++ return (0); ++ } ++#ifdef DBF_DYNAMIC_VIEWS ++ if (cmd->flags & LOAD_FLAG) { ++ printf("loading: %s\n", cmd->args[0]); ++ return (load_debug_view(cmd->args[0], cmd)); ++ } ++#endif ++ if (cmd->flags & VIEWS_FLAG) { ++ print_lcrash_debug_views(cmd->ofp); ++ return (0); ++ } ++ if (cmd->nargs > 2) { ++ s390dbf_usage(cmd); ++ return (1); ++ } ++ ++ if(get_debug_areas() == -1) ++ return -1; ++ ++ switch (cmd->nargs) { ++ case 0: ++ rc = list_areas(cmd->ofp); ++ break; ++ case 1: ++ rc = list_one_area(cmd->args[0], cmd); ++ break; ++ case 2: ++ rc = list_one_view(cmd->args[0], cmd->args[1], cmd); ++ break; ++ } ++ ++ free_debug_areas(); ++ ++ return rc; ++} ++ ++#define _S390DBF_USAGE " [-v] [debug log] [debug view]" ++ ++/* ++ * s390dbf_usage() -- Print the usage string for the 's390dbf' command. ++ */ ++void ++s390dbf_usage(command_t * cmd) ++{ ++ CMD_USAGE(cmd, _S390DBF_USAGE); ++} ++ ++/* ++ * s390 debug feature command for crash ++ */ ++ ++char *help_s390dbf[] = { ++ "s390dbf", ++ "s390dbf prints out debug feature logs", ++ "[-v] [debug_log] [debug_log view]", ++ "", ++ "Display Debug logs:", ++ " + If called without parameters, all active debug logs are listed.", ++ " + If called with '-v', all debug views which are available to", ++ " 'crash' are listed", ++ " + If called with the name of a debug log, all debug-views for which", ++ " the debug-log has registered are listed. It is possible thatsome", ++ " of the debug views are not available to 'crash'.", ++ " + If called with the name of a debug-log and an available viewname,", ++ " the specified view is printed.", ++ NULL ++}; ++ ++void cmd_s390dbf() ++{ ++ int i,c; ++ ++ command_t cmd = { ++ .ofp = stdout, ++ .efp = stderr, ++ .cmdstr = "s390dbf", ++ .command = "s390dbf", ++ }; ++ ++ cmd.nargs=argcnt - 1; ++ for (i=1; i < argcnt; i++) ++ cmd.args[i-1] = args[i]; ++ ++ while ((c = getopt(argcnt, args, "v")) != EOF) { ++ switch(c) { ++ case 'v': ++ cmd.flags |= VIEWS_FLAG; ++ break; ++ default: ++ s390dbf_usage(&cmd); ++ return; ++ } ++ } ++ s390dbf_cmd(&cmd); ++} ++ ++#endif ++ +--- crash/s390.c.orig 2008-01-17 15:17:20.000000000 -0500 ++++ crash/s390.c 2008-01-04 09:42:08.000000000 -0500 +@@ -1,9 +1,9 @@ + /* s390.c - core analysis suite + * + * Copyright (C) 2001, 2002 Mission Critical Linux, Inc. +- * Copyright (C) 2002, 2003, 2004, 2005 David Anderson +- * Copyright (C) 2002, 2003, 2004, 2005 Red Hat, Inc. All rights reserved. +- * Copyright (C) 2005 Michael Holzheu, IBM Corporation ++ * Copyright (C) 2002, 2003, 2004, 2005, 2006 David Anderson ++ * Copyright (C) 2002, 2003, 2004, 2005, 2006 Red Hat, Inc. All rights reserved. ++ * Copyright (C) 2005, 2006 Michael Holzheu, IBM Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by +@@ -21,17 +21,6 @@ + #define S390_WORD_SIZE 4 + #define S390_ADDR_MASK 0x7fffffff + +-#define S390_PAGE_SHIFT 12 +-#define S390_PAGE_SIZE (1UL << S390_PAGE_SHIFT) +-#define S390_PAGE_MASK (~(S390_PAGE_SIZE-1)) +- +-#define S390_PGDIR_SHIFT 20 +-#define S390_PGDIR_SIZE (1UL << S390_PGDIR_SHIFT) +-#define S390_PGDIR_MASK (~(S390_PGDIR_SIZE-1)) +- +-#define S390_PTRS_PER_PGD 2048 +-#define S390_PTRS_PER_PTE 256 +- + #define S390_PMD_BASE_MASK (~((1UL<<6)-1)) + #define S390_PT_BASE_MASK S390_PMD_BASE_MASK + #define S390_PAGE_BASE_MASK (~((1UL<<12)-1)) +@@ -44,26 +33,10 @@ + #define S390_PAGE_INVALID 0x400 /* HW invalid */ + #define S390_PAGE_INVALID_MASK 0x601ULL /* for linux 2.6 */ + #define S390_PAGE_INVALID_NONE 0x401ULL /* for linux 2.6 */ +-#define S390_PAGE_TABLE_LEN 0xf /* only full page-tables */ +-#define S390_PAGE_TABLE_INV 0x20 /* invalid page-table */ + + #define S390_PTE_INVALID_MASK 0x80000900 + #define S390_PTE_INVALID(x) ((x) & S390_PTE_INVALID_MASK) + +-#define S390_PMD_INVALID_MASK 0x80000000 +-#define S390_PMD_INVALID(x) ((x) & S390_PMD_INVALID_MASK) +- +-/* pgd/pmd/pte query macros */ +-#define s390_pmd_none(x) ((x) & S390_PAGE_TABLE_INV) +-#define s390_pmd_bad(x) (((x) & (~S390_PMD_BASE_MASK & \ +- ~S390_PAGE_TABLE_INV)) != \ +- S390_PAGE_TABLE_LEN) +- +-#define s390_pte_none(x) (((x) & (S390_PAGE_INVALID | S390_RO_S390 | \ +- S390_PAGE_PRESENT)) == \ +- S390_PAGE_INVALID) +- +- + #define ASYNC_STACK_SIZE STACKSIZE() // can be 4096 or 8192 + #define KERNEL_STACK_SIZE STACKSIZE() // can be 4096 or 8192 + +@@ -73,8 +46,6 @@ + * declarations of static functions + */ + static void s390_print_lowcore(char*, struct bt_info*,int); +-static unsigned long s390_pgd_offset(unsigned long, unsigned long); +-static unsigned long s390_pte_offset(unsigned long, unsigned long); + static int s390_kvtop(struct task_context *, ulong, physaddr_t *, int); + static int s390_uvtop(struct task_context *, ulong, physaddr_t *, int); + static int s390_vtop(unsigned long, ulong, physaddr_t*, int); +@@ -86,7 +57,6 @@ + static ulong s390_processor_speed(void); + static int s390_eframe_search(struct bt_info *); + static void s390_back_trace_cmd(struct bt_info *); +-static void s390_back_trace(struct gnu_request *, struct bt_info *); + static void s390_dump_irq(int); + static void s390_get_stack_frame(struct bt_info *, ulong *, ulong *); + static int s390_dis_filter(ulong, char *); +@@ -158,7 +128,8 @@ + machdep->nr_irqs = 0; /* TBD */ + machdep->vmalloc_start = s390_vmalloc_start; + machdep->dump_irq = s390_dump_irq; +- machdep->hz = HZ; ++ if (!machdep->hz) ++ machdep->hz = HZ; + break; + + case POST_INIT: +@@ -178,8 +149,6 @@ + fprintf(fp, " flags: %lx (", machdep->flags); + if (machdep->flags & KSYMS_START) + fprintf(fp, "%sKSYMS_START", others++ ? "|" : ""); +- if (machdep->flags & SYSRQ) +- fprintf(fp, "%sSYSRQ", others++ ? "|" : ""); + fprintf(fp, ")\n"); + + fprintf(fp, " kvbase: %lx\n", machdep->kvbase); +@@ -230,19 +199,6 @@ + } + + /* +- * Check if address is in the vmalloc area +- */ +-int +-s390_IS_VMALLOC_ADDR(ulong addr) +-{ +- static unsigned long high_memory = 0; +- if(!high_memory){ +- high_memory = s390_vmalloc_start(); +- } +- return (addr > high_memory); +-} +- +-/* + * Check if address is in context's address space + */ + static int +@@ -293,7 +249,7 @@ + /* + * Check if page is mapped + */ +-inline int ++static inline int + s390_pte_present(unsigned long x) + { + if(THIS_KERNEL_VERSION >= LINUX(2,6,0)) { +@@ -307,60 +263,87 @@ + /* + * page table traversal functions + */ +-static unsigned long +-s390_pgd_offset(unsigned long pgd_base, unsigned long vaddr) +-{ +- unsigned long pgd_off, pmd_base; +- +- pgd_off = ((vaddr >> S390_PGDIR_SHIFT) & (S390_PTRS_PER_PGD - 1)) +- * S390_WORD_SIZE; +- readmem(pgd_base + pgd_off, PHYSADDR, &pmd_base,sizeof(long), +- "pgd_base",FAULT_ON_ERROR); +- return pmd_base; +-} + +-unsigned long s390_pte_offset(unsigned long pte_base, unsigned long vaddr) ++/* Segment table traversal function */ ++static ulong _kl_sg_table_deref_s390(ulong vaddr, ulong table, int len) + { +- unsigned pte_off, pte_val; ++ ulong offset, entry; + +- pte_off = ((vaddr >> S390_PAGE_SHIFT) & (S390_PTRS_PER_PTE - 1)) +- * S390_WORD_SIZE; +- readmem(pte_base + pte_off, PHYSADDR, &pte_val, sizeof(long), +- "pte_val",FAULT_ON_ERROR); +- return pte_val; ++ offset = ((vaddr >> 20) & 0x7ffUL) * 4; ++ if (offset >= (len + 1)*64) ++ /* Offset is over the table limit. */ ++ return 0; ++ readmem(table + offset, KVADDR, &entry, sizeof(entry), "entry", ++ FAULT_ON_ERROR); ++ ++ /* ++ * Check if the segment table entry could be read and doesn't have ++ * any of the reserved bits set. ++ */ ++ if (entry & 0x80000000UL) ++ return 0; ++ /* Check if the segment table entry has the invalid bit set. */ ++ if (entry & 0x40UL) ++ return 0; ++ /* Segment table entry is valid and well formed. */ ++ return entry; ++} ++ ++/* Page table traversal function */ ++static ulong _kl_pg_table_deref_s390(ulong vaddr, ulong table, int len) ++{ ++ ulong offset, entry; ++ ++ offset = ((vaddr >> 12) & 0xffUL) * 4; ++ if (offset >= (len + 1)*64) ++ /* Offset is over the table limit. */ ++ return 0; ++ readmem(table + offset, KVADDR, &entry, sizeof(entry), "entry", ++ FAULT_ON_ERROR); ++ /* ++ * Check if the page table entry could be read and doesn't have ++ * any of the reserved bits set. ++ */ ++ if (entry & 0x80000900UL) ++ return 0; ++ /* Check if the page table entry has the invalid bit set. */ ++ if (entry & 0x400UL) ++ return 0; ++ /* Page table entry is valid and well formed. */ ++ return entry; + } + +-/* +- * Generic vtop function for user and kernel addresses +- */ ++/* lookup virtual address in page tables */ + static int +-s390_vtop(unsigned long pgd_base, ulong kvaddr, physaddr_t *paddr, int verbose) ++s390_vtop(unsigned long table, ulong vaddr, physaddr_t *phys_addr, int verbose) + { +- unsigned pte_base, pte_val; ++ ulong entry, paddr; ++ int len; + +- /* get the pgd entry */ +- pte_base = s390_pgd_offset(pgd_base,kvaddr); +- if(S390_PMD_INVALID(pte_base) || +- s390_pmd_bad(pte_base) || +- s390_pmd_none(pte_base)) { +- *paddr = 0; +- return FALSE; +- } +- /* get the pte */ +- pte_base = pte_base & S390_PT_BASE_MASK; +- pte_val = s390_pte_offset(pte_base,kvaddr); +- if(S390_PTE_INVALID(pte_val) || +- s390_pte_none(pte_val)){ +- *paddr = 0; ++ /* ++ * Get the segment table entry. ++ * We assume that the segment table length field in the asce ++ * is set to the maximum value of 127 (which translates to ++ * a segment table with 2048 entries) and that the addressing ++ * mode is 31 bit. ++ */ ++ entry = _kl_sg_table_deref_s390(vaddr, table, 127); ++ if (!entry) + return FALSE; +- } +- if(!s390_pte_present(pte_val)){ +- /* swapped out */ +- *paddr = pte_val; ++ table = entry & 0x7ffffc00UL; ++ len = entry & 0xfUL; ++ ++ /* Get the page table entry */ ++ entry = _kl_pg_table_deref_s390(vaddr, table, len); ++ if (!entry) + return FALSE; +- } +- *paddr = (pte_val & S390_PAGE_BASE_MASK) | +- (kvaddr & (~(S390_PAGE_MASK))); ++ ++ /* Isolate the page origin from the page table entry. */ ++ paddr = entry & 0x7ffff000UL; ++ ++ /* Add the page offset and return the final value. */ ++ *phys_addr = paddr + (vaddr & 0xfffUL); ++ + return TRUE; + } + +@@ -483,7 +466,7 @@ + return FALSE; + } + fprintf(fp,"PTE PHYSICAL FLAGS\n"); +- fprintf(fp,"%08x %08x",pte, pte & S390_PAGE_BASE_MASK); ++ fprintf(fp,"%08lx %08lx",pte, pte & S390_PAGE_BASE_MASK); + fprintf(fp," ("); + if(pte & S390_PAGE_INVALID) + fprintf(fp,"INVALID "); +@@ -510,7 +493,7 @@ + /* + * returns cpu number of task + */ +-int ++static int + s390_cpu_of_task(unsigned long task) + { + int cpu; +@@ -551,12 +534,13 @@ + return FALSE; + } else { + /* Linux 2.6 */ +- unsigned long runqueue_addr, runqueue_offset, per_cpu_offset; ++ unsigned long runqueue_addr, runqueue_offset; + unsigned long cpu_offset, per_cpu_offset_addr, running_task; +- char runqueue[4096]; ++ char *runqueue; + int cpu; + + cpu = s390_cpu_of_task(task); ++ runqueue = GETBUF(SIZE(runqueue)); + + runqueue_offset=symbol_value("per_cpu__runqueues"); + per_cpu_offset_addr=symbol_value("__per_cpu_offset"); +@@ -564,10 +548,10 @@ + &cpu_offset, sizeof(long),"per_cpu_offset", + FAULT_ON_ERROR); + runqueue_addr=runqueue_offset + cpu_offset; +- readmem(runqueue_addr,KVADDR,&runqueue,sizeof(runqueue), ++ readmem(runqueue_addr,KVADDR,runqueue,SIZE(runqueue), + "runqueue", FAULT_ON_ERROR); +- running_task = *((unsigned long*)&runqueue[MEMBER_OFFSET( +- "runqueue", "curr")]); ++ running_task = ULONG(runqueue + OFFSET(runqueue_curr)); ++ FREEBUF(runqueue); + if(running_task == task) + return TRUE; + else +@@ -700,7 +684,7 @@ + } else if(skip_first_frame){ + skip_first_frame=0; + } else { +- fprintf(fp," #%i [%08x] ",i,backchain); ++ fprintf(fp," #%i [%08lx] ",i,backchain); + fprintf(fp,"%s at %x\n", closest_symbol(r14), r14); + if (bt->flags & BT_LINE_NUMBERS) + s390_dump_line_number(r14); +@@ -716,13 +700,15 @@ + frame_size = stack_base - old_backchain + + KERNEL_STACK_SIZE; + } else { +- frame_size = backchain - old_backchain; ++ frame_size = MIN((backchain - old_backchain), ++ (stack_base - old_backchain + ++ KERNEL_STACK_SIZE)); + } + for(j=0; j< frame_size; j+=4){ + if(j % 16 == 0){ +- fprintf(fp,"\n%08x: ",old_backchain+j); ++ fprintf(fp,"\n%08lx: ",old_backchain+j); + } +- fprintf(fp," %08x",ULONG(&stack[old_backchain - ++ fprintf(fp," %08lx",ULONG(&stack[old_backchain - + stack_base + j])); + } + fprintf(fp,"\n\n"); +@@ -771,10 +757,10 @@ + return; + } + fprintf(fp," LOWCORE INFO:\n"); +- fprintf(fp," -psw : %#010x %#010x\n", tmp[0], ++ fprintf(fp," -psw : %#010lx %#010lx\n", tmp[0], + tmp[1]); + if(show_symbols){ +- fprintf(fp," -function : %s at %x\n", ++ fprintf(fp," -function : %s at %lx\n", + closest_symbol(tmp[1] & S390_ADDR_MASK), + tmp[1] & S390_ADDR_MASK); + if (bt->flags & BT_LINE_NUMBERS) +@@ -783,12 +769,12 @@ + ptr = lc + MEMBER_OFFSET("_lowcore","cpu_timer_save_area"); + tmp[0]=UINT(ptr); + tmp[1]=UINT(ptr + S390_WORD_SIZE); +- fprintf(fp," -cpu timer: %#010x %#010x\n", tmp[0],tmp[1]); ++ fprintf(fp," -cpu timer: %#010lx %#010lx\n", tmp[0],tmp[1]); + + ptr = lc + MEMBER_OFFSET("_lowcore","clock_comp_save_area"); + tmp[0]=UINT(ptr); + tmp[1]=UINT(ptr + S390_WORD_SIZE); +- fprintf(fp," -clock cmp: %#010x %#010x\n", tmp[0], tmp[1]); ++ fprintf(fp," -clock cmp: %#010lx %#010lx\n", tmp[0], tmp[1]); + + fprintf(fp," -general registers:\n"); + ptr = lc + MEMBER_OFFSET("_lowcore","gpregs_save_area"); +@@ -796,25 +782,25 @@ + tmp[1]=ULONG(ptr + S390_WORD_SIZE); + tmp[2]=ULONG(ptr + 2 * S390_WORD_SIZE); + tmp[3]=ULONG(ptr + 3 * S390_WORD_SIZE); +- fprintf(fp," %#010x %#010x %#010x %#010x\n", ++ fprintf(fp," %#010lx %#010lx %#010lx %#010lx\n", + tmp[0],tmp[1],tmp[2],tmp[3]); + tmp[0]=ULONG(ptr + 4 * S390_WORD_SIZE); + tmp[1]=ULONG(ptr + 5 * S390_WORD_SIZE); + tmp[2]=ULONG(ptr + 6 * S390_WORD_SIZE); + tmp[3]=ULONG(ptr + 7 * S390_WORD_SIZE); +- fprintf(fp," %#010x %#010x %#010x %#010x\n", ++ fprintf(fp," %#010lx %#010lx %#010lx %#010lx\n", + tmp[0],tmp[1],tmp[2],tmp[3]); + tmp[0]=ULONG(ptr + 8 * S390_WORD_SIZE); + tmp[1]=ULONG(ptr + 9 * S390_WORD_SIZE); + tmp[2]=ULONG(ptr + 10* S390_WORD_SIZE); + tmp[3]=ULONG(ptr + 11* S390_WORD_SIZE); +- fprintf(fp," %#010x %#010x %#010x %#010x\n", ++ fprintf(fp," %#010lx %#010lx %#010lx %#010lx\n", + tmp[0],tmp[1],tmp[2],tmp[3]); + tmp[0]=ULONG(ptr + 12* S390_WORD_SIZE); + tmp[1]=ULONG(ptr + 13* S390_WORD_SIZE); + tmp[2]=ULONG(ptr + 14* S390_WORD_SIZE); + tmp[3]=ULONG(ptr + 15* S390_WORD_SIZE); +- fprintf(fp," %#010x %#010x %#010x %#010x\n", ++ fprintf(fp," %#010lx %#010lx %#010lx %#010lx\n", + tmp[0], tmp[1], tmp[2], tmp[3]); + + fprintf(fp," -access registers:\n"); +@@ -823,25 +809,25 @@ + tmp[1]=ULONG(ptr + S390_WORD_SIZE); + tmp[2]=ULONG(ptr + 2 * S390_WORD_SIZE); + tmp[3]=ULONG(ptr + 3 * S390_WORD_SIZE); +- fprintf(fp," %#010x %#010x %#010x %#010x\n", ++ fprintf(fp," %#010lx %#010lx %#010lx %#010lx\n", + tmp[0], tmp[1], tmp[2], tmp[3]); + tmp[0]=ULONG(ptr + 4 * S390_WORD_SIZE); + tmp[1]=ULONG(ptr + 5 * S390_WORD_SIZE); + tmp[2]=ULONG(ptr + 6 * S390_WORD_SIZE); + tmp[3]=ULONG(ptr + 7 * S390_WORD_SIZE); +- fprintf(fp," %#010x %#010x %#010x %#010x\n", ++ fprintf(fp," %#010lx %#010lx %#010lx %#010lx\n", + tmp[0], tmp[1], tmp[2], tmp[3]); + tmp[0]=ULONG(ptr + 8 * S390_WORD_SIZE); + tmp[1]=ULONG(ptr + 9 * S390_WORD_SIZE); + tmp[2]=ULONG(ptr + 10* S390_WORD_SIZE); + tmp[3]=ULONG(ptr + 11* S390_WORD_SIZE); +- fprintf(fp," %#010x %#010x %#010x %#010x\n", ++ fprintf(fp," %#010lx %#010lx %#010lx %#010lx\n", + tmp[0], tmp[1], tmp[2], tmp[3]); + tmp[0]=ULONG(ptr + 12* S390_WORD_SIZE); + tmp[1]=ULONG(ptr + 13* S390_WORD_SIZE); + tmp[2]=ULONG(ptr + 14* S390_WORD_SIZE); + tmp[3]=ULONG(ptr + 15* S390_WORD_SIZE); +- fprintf(fp," %#010x %#010x %#010x %#010x\n", ++ fprintf(fp," %#010lx %#010lx %#010lx %#010lx\n", + tmp[0], tmp[1], tmp[2], tmp[3]); + + fprintf(fp," -control registers:\n"); +@@ -850,26 +836,26 @@ + tmp[1]=ULONG(ptr + S390_WORD_SIZE); + tmp[2]=ULONG(ptr + 2 * S390_WORD_SIZE); + tmp[3]=ULONG(ptr + 3 * S390_WORD_SIZE); +- fprintf(fp," %#010x %#010x %#010x %#010x\n", ++ fprintf(fp," %#010lx %#010lx %#010lx %#010lx\n", + tmp[0], tmp[1], tmp[2], tmp[3]); + tmp[0]=ULONG(ptr + 4 * S390_WORD_SIZE); + tmp[1]=ULONG(ptr + 5 * S390_WORD_SIZE); + tmp[2]=ULONG(ptr + 6 * S390_WORD_SIZE); + tmp[3]=ULONG(ptr + 7 * S390_WORD_SIZE); +- fprintf(fp," %#010x %#010x %#010x %#010x\n", ++ fprintf(fp," %#010lx %#010lx %#010lx %#010lx\n", + tmp[0], tmp[1], tmp[2], tmp[3]); + + tmp[0]=ULONG(ptr); + tmp[1]=ULONG(ptr + S390_WORD_SIZE); + tmp[2]=ULONG(ptr + 2 * S390_WORD_SIZE); + tmp[3]=ULONG(ptr + 3 * S390_WORD_SIZE); +- fprintf(fp," %#010x %#010x %#010x %#010x\n", ++ fprintf(fp," %#010lx %#010lx %#010lx %#010lx\n", + tmp[0], tmp[1], tmp[2], tmp[3]); + tmp[0]=ULONG(ptr + 4 * S390_WORD_SIZE); + tmp[1]=ULONG(ptr + 5 * S390_WORD_SIZE); + tmp[2]=ULONG(ptr + 6 * S390_WORD_SIZE); + tmp[3]=ULONG(ptr + 7 * S390_WORD_SIZE); +- fprintf(fp," %#010x %#010x %#010x %#010x\n", ++ fprintf(fp," %#010lx %#010lx %#010lx %#010lx\n", + tmp[0], tmp[1], tmp[2], tmp[3]); + + ptr = lc + MEMBER_OFFSET("_lowcore","floating_pt_save_area"); +@@ -878,8 +864,8 @@ + tmp[1]=ULONG(ptr + 2 * S390_WORD_SIZE); + tmp[2]=ULONG(ptr + 4 * S390_WORD_SIZE); + tmp[3]=ULONG(ptr + 6 * S390_WORD_SIZE); +- fprintf(fp," %#018llx %#018llx\n", tmp[0], tmp[1]); +- fprintf(fp," %#018llx %#018llx\n", tmp[2], tmp[3]); ++ fprintf(fp," %#018lx %#018lx\n", tmp[0], tmp[1]); ++ fprintf(fp," %#018lx %#018lx\n", tmp[2], tmp[3]); + } + + /* +--- crash/unwind_x86_64.h.orig 2008-01-17 15:17:20.000000000 -0500 ++++ crash/unwind_x86_64.h 2008-01-04 09:42:08.000000000 -0500 +@@ -0,0 +1,92 @@ ++#define CONFIG_64BIT 1 ++#define NULL ((void *)0) ++ ++typedef unsigned long size_t; ++typedef unsigned char u8; ++typedef signed short s16; ++typedef unsigned short u16; ++typedef signed int s32; ++typedef unsigned int u32; ++typedef unsigned long long u64; ++ ++struct pt_regs { ++ unsigned long r15; ++ unsigned long r14; ++ unsigned long r13; ++ unsigned long r12; ++ unsigned long rbp; ++ unsigned long rbx; ++/* arguments: non interrupts/non tracing syscalls only save upto here*/ ++ unsigned long r11; ++ unsigned long r10; ++ unsigned long r9; ++ unsigned long r8; ++ unsigned long rax; ++ unsigned long rcx; ++ unsigned long rdx; ++ unsigned long rsi; ++ unsigned long rdi; ++ unsigned long orig_rax; ++/* end of arguments */ ++/* cpu exception frame or undefined */ ++ unsigned long rip; ++ unsigned long cs; ++ unsigned long eflags; ++ unsigned long rsp; ++ unsigned long ss; ++/* top of stack page */ ++}; ++ ++struct unwind_frame_info ++{ ++ struct pt_regs regs; ++}; ++ ++extern int unwind(struct unwind_frame_info *); ++extern void init_unwind_table(void); ++extern void free_unwind_table(void); ++ ++#define offsetof(TYPE, MEMBER) ((size_t) &((TYPE *)0)->MEMBER) ++#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) ++#define BUILD_BUG_ON(condition) ((void)sizeof(char[1 - 2*!!(condition)])) ++#define BUILD_BUG_ON_ZERO(e) (sizeof(char[1 - 2 * !!(e)]) - 1) ++#define FIELD_SIZEOF(t, f) (sizeof(((t*)0)->f)) ++#define get_unaligned(ptr) (*(ptr)) ++//#define __get_user(x,ptr) __get_user_nocheck((x),(ptr),sizeof(*(ptr))) ++#define THREAD_ORDER 1 ++#define THREAD_SIZE (PAGE_SIZE << THREAD_ORDER) ++ ++#define UNW_PC(frame) (frame)->regs.rip ++#define UNW_SP(frame) (frame)->regs.rsp ++#ifdef CONFIG_FRAME_POINTER ++ #define UNW_FP(frame) (frame)->regs.rbp ++ #define FRAME_RETADDR_OFFSET 8 ++ #define FRAME_LINK_OFFSET 0 ++ #define STACK_BOTTOM(tsk) (((tsk)->thread.rsp0 - 1) & ~(THREAD_SIZE - 1)) ++ #define STACK_TOP(tsk) ((tsk)->thread.rsp0) ++#endif ++ ++ ++#define EXTRA_INFO(f) { BUILD_BUG_ON_ZERO(offsetof(struct unwind_frame_info, f) % FIELD_SIZEOF(struct unwind_frame_info, f)) + offsetof(struct unwind_frame_info, f)/ FIELD_SIZEOF(struct unwind_frame_info, f), FIELD_SIZEOF(struct unwind_frame_info, f) } ++ ++#define PTREGS_INFO(f) EXTRA_INFO(regs.f) ++ ++#define UNW_REGISTER_INFO \ ++ PTREGS_INFO(rax),\ ++ PTREGS_INFO(rdx),\ ++ PTREGS_INFO(rcx),\ ++ PTREGS_INFO(rbx), \ ++ PTREGS_INFO(rsi), \ ++ PTREGS_INFO(rdi), \ ++ PTREGS_INFO(rbp), \ ++ PTREGS_INFO(rsp), \ ++ PTREGS_INFO(r8), \ ++ PTREGS_INFO(r9), \ ++ PTREGS_INFO(r10),\ ++ PTREGS_INFO(r11), \ ++ PTREGS_INFO(r12), \ ++ PTREGS_INFO(r13), \ ++ PTREGS_INFO(r14), \ ++ PTREGS_INFO(r15), \ ++ PTREGS_INFO(rip) ++ +--- crash/crash.8.orig 2008-01-17 15:17:21.000000000 -0500 ++++ crash/crash.8 2008-01-04 09:42:08.000000000 -0500 +@@ -5,7 +5,7 @@ + .TH CRASH 8 + .SH NAME + crash \- Analyze Linux crash data or a live system +-.SH SYNAPSIS ++.SH SYNOPSIS + .B crash + [ + .B -h +@@ -42,9 +42,13 @@ + is a tool for interactively analyzing the state of the Linux system + while it is running, or after a kernel crash has occurred and a + core dump has been created by the Red Hat +-.I netdump +-facility. It is loosely based on the SVR4 UNIX crash +-command, but has been signficantly enhanced ++.I netdump, ++.I diskdump, ++.I kdump, ++or ++.I xendump ++facilities. It is loosely based on the SVR4 UNIX crash ++command, but has been significantly enhanced + by completely merging it with the + .I gdb + debugger. The marriage of the two effectively combines the +@@ -207,15 +211,15 @@ + .I dis + disassembles memory, either entire kernel functions, from a + location for a specified number of instructions, or from the start of a +-fuction up to a specified memory location. ++function up to a specified memory location. + .TP + .I eval + evalues an expression or numeric type and displays the result +-in hexidecimal, decimal, octal and binary. ++in hexadecimal, decimal, octal and binary. + .TP + .I exit + causes +-.I crash ++.B crash + to exit. + .TP + .I extend +@@ -230,7 +234,7 @@ + in the system. + .TP + .I fuser +-displays the tasks using the specifed file or socket. ++displays the tasks using the specified file or socket. + .TP + .I gdb + passes its argument to the underlying +@@ -274,7 +278,7 @@ + display various network related data. + .TP + .I p +-passes its argumnts to the ++passes its arguments to the + .I gdb + "print" command for evaluation and display. + .TP +@@ -361,11 +365,85 @@ + .I wr + modifies the contents of memory. When writing to memory on + a live system, this command should obviously be used with great care. ++.SH FILES ++.TP ++.I .crashrc ++Initialization commands. The file can be located in the user's ++.B HOME ++directory and/or the current directory. Commands found in the ++.I .crashrc ++file in the ++.B HOME ++directory are executed before those in the current directory's ++.I .crashrc ++file. ++.SH ENVIRONMENT ++.TP ++.B EDITOR ++Command input is read using ++.BR readline(3). ++If ++.B EDITOR ++is set to ++.I emacs ++or ++.I vi ++then suitable keybindings are used. If ++.B EDITOR ++is not set, then ++.I vi ++is used. This can be overridden by ++.B set vi ++or ++.B set emacs ++commands located in a ++.IR .crashrc ++file, or by entering ++.B -e emacs ++on the ++.B crash ++command line. ++.TP ++.B CRASHPAGER ++If ++.B CRASHPAGER ++is set, its value is used as the name of the program to which command output will be sent. ++If not, then command output is sent to ++.B /usr/bin/less -E -X ++by default. ++.SH NOTES ++.PP ++If ++.B crash ++does not work, look for a newer version: kernel evolution frequently makes ++.B crash ++updates necessary. ++.PP ++The command ++.B set scroll off ++will cause output to be sent directly to ++the terminal rather than through a paging program. This is useful, ++for example, if you are running ++.B crash ++in a window of ++.BR emacs . + .SH AUTHOR + Dave Anderson wrote +-.B Crash ++.B crash + .TP + Jay Fenlason wrote this man page. + .SH "SEE ALSO" +-netdump(8) +-gdb(1) ++.PP ++The ++.I help ++command within ++.B crash ++provides more complete and accurate documentation than this man page. ++.PP ++.I http://people.redhat.com/anderson ++- the home page of the ++.B crash ++utility. ++.PP ++.BR netdump (8), ++.BR gdb (1) +--- crash/lkcd_common.c.orig 2008-01-17 15:17:20.000000000 -0500 ++++ crash/lkcd_common.c 2008-01-04 09:42:08.000000000 -0500 +@@ -3,8 +3,8 @@ + * Copyright (C) 1999, 2000, 2001, 2002 Mission Critical Linux, Inc. + * Copyright (C) 2002 Silicon Graphics, Inc. + * Copyright (C) 2002 Free Software Foundation, Inc. +- * Copyright (C) 2002, 2003, 2004, 2005 David Anderson +- * Copyright (C) 2002, 2003, 2004, 2005 Red Hat, Inc. All rights reserved. ++ * Copyright (C) 2002, 2003, 2004, 2005, 2007 David Anderson ++ * Copyright (C) 2002, 2003, 2004, 2005, 2007 Red Hat, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by +@@ -53,6 +53,8 @@ + + struct lkcd_environment lkcd_environment = { 0 }; + struct lkcd_environment *lkcd = &lkcd_environment; ++static int uncompress_errloc; ++static int uncompress_recover(unsigned char *, ulong, unsigned char *, ulong); + + ulonglong + fix_lkcd_address(ulonglong addr) +@@ -62,7 +64,7 @@ + + for (i = 0; i < lkcd->fix_addr_num; i++) { + if ( (addr >=lkcd->fix_addr[i].task) && +- (addr <= lkcd->fix_addr[i].task + STACKSIZE())){ ++ (addr < lkcd->fix_addr[i].task + STACKSIZE())){ + + offset = addr - lkcd->fix_addr[i].task; + addr = lkcd->fix_addr[i].saddr + offset; +@@ -208,6 +210,7 @@ + + case LKCD_DUMP_V8: + case LKCD_DUMP_V9: ++ case LKCD_DUMP_V10: + lkcd->version = LKCD_DUMP_V8; + return TRUE; + +@@ -623,6 +626,10 @@ + { + static int i = 0; + ++ if (pc->flags & SILENT) { ++ return; ++ } ++ + switch (++i%4) { + case 0: + lkcd_print("|\b"); +@@ -667,6 +674,8 @@ + { + uint64_t zone, page; + int ii, ret; ++ int max_zones; ++ struct physmem_zone *zones; + + zone = paddr & lkcd->zone_mask; + +@@ -693,19 +702,21 @@ + lkcd->num_zones++; + } + ++retry: + /* find the zone */ + for (ii=0; ii < lkcd->num_zones; ii++) { + if (lkcd->zones[ii].start == zone) { + if (lkcd->zones[ii].pages[page].offset != 0) { + if (lkcd->zones[ii].pages[page].offset != off) { +- error(INFO, "conflicting page: zone %lld, " ++ if (CRASHDEBUG(1)) ++ error(INFO, "LKCD: conflicting page: zone %lld, " + "page %lld: %lld, %lld != %lld\n", + (unsigned long long)zone, + (unsigned long long)page, + (unsigned long long)paddr, + (unsigned long long)off, + (unsigned long long)lkcd->zones[ii].pages[page].offset); +- abort(); ++ return -1; + } + ret = 0; + } else { +@@ -734,8 +745,20 @@ + ret = 1; + lkcd->num_zones++; + } else { +- lkcd_print("fixme, need to add more zones (ZONE_ALLOC)\n"); +- exit(1); ++ /* need to expand zone */ ++ max_zones = lkcd->max_zones * 2; ++ zones = malloc(max_zones * sizeof(struct physmem_zone)); ++ if (!zones) { ++ return -1; /* This should be fatal */ ++ } ++ BZERO(zones, max_zones * sizeof(struct physmem_zone)); ++ memcpy(zones, lkcd->zones, ++ lkcd->max_zones * sizeof(struct physmem_zone)); ++ free(lkcd->zones); ++ ++ lkcd->zones = zones; ++ lkcd->max_zones = max_zones; ++ goto retry; + } + } + +@@ -765,11 +788,32 @@ + } + + ++#ifdef IA64 ++ ++int ++lkcd_get_kernel_start(ulong *addr) ++{ ++ if (!addr) ++ return 0; ++ ++ switch (lkcd->version) ++ { ++ case LKCD_DUMP_V8: ++ case LKCD_DUMP_V9: ++ return lkcd_get_kernel_start_v8(addr); ++ ++ default: ++ return 0; ++ } ++} ++ ++#endif ++ + + int + lkcd_lseek(physaddr_t paddr) + { +- long i; ++ long i = 0; + int err; + int eof; + void *dp; +@@ -814,7 +858,7 @@ + lseek(lkcd->fd, lkcd->page_offset_max, SEEK_SET); + eof = FALSE; + while (!eof) { +- if( (i%2048) == 0) { ++ if( (i++%2048) == 0) { + lkcd_speedo(); + } + +@@ -1164,40 +1208,103 @@ + return 1; + } + ++/* Returns the bit offset if it's able to correct, or negative if not */ ++static int ++uncompress_recover(unsigned char *dest, ulong destlen, ++ unsigned char *source, ulong sourcelen) ++{ ++ int byte, bit; ++ ulong retlen = destlen; ++ int good_decomp = 0, good_rv = -1; ++ ++ /* Generate all single bit errors */ ++ if (sourcelen > 16384) { ++ lkcd_print("uncompress_recover: sourcelen %ld too long\n", ++ sourcelen); ++ return(-1); ++ } ++ for (byte = 0; byte < sourcelen; byte++) { ++ for (bit = 0; bit < 8; bit++) { ++ source[byte] ^= (1 << bit); ++ ++ if (uncompress(dest, &retlen, source, sourcelen) == Z_OK && ++ retlen == destlen) { ++ good_decomp++; ++ lkcd_print("good for flipping byte %d bit %d\n", ++ byte, bit); ++ good_rv = bit + byte * 8; ++ } ++ ++ /* Put it back */ ++ source[byte] ^= (1 << bit); ++ } ++ } ++ if (good_decomp == 0) { ++ lkcd_print("Could not correct gzip errors.\n"); ++ return -2; ++ } else if (good_decomp > 1) { ++ lkcd_print("Too many valid gzip decompressions: %d.\n", good_decomp); ++ return -3; ++ } else { ++ source[good_rv >> 8] ^= 1 << (good_rv % 8); ++ uncompress(dest, &retlen, source, sourcelen); ++ source[good_rv >> 8] ^= 1 << (good_rv % 8); ++ return good_rv; ++ } ++} ++ ++ + /* + * Uncompress a gzip'd buffer. ++ * ++ * Returns FALSE on error. If set, then ++ * a non-negative value of uncompress_errloc indicates the location of ++ * a single-bit error, and the data may be used. + */ + static int + lkcd_uncompress_gzip(unsigned char *dest, ulong destlen, + unsigned char *source, ulong sourcelen) + { + ulong retlen = destlen; ++ int rc = FALSE; + + switch (uncompress(dest, &retlen, source, sourcelen)) + { + case Z_OK: + if (retlen == destlen) +- return TRUE; ++ rc = TRUE; ++ break; + + lkcd_print("uncompress: returned length not page size: %ld\n", + retlen); +- return FALSE; ++ rc = FALSE; ++ break; + + case Z_MEM_ERROR: + lkcd_print("uncompress: Z_MEM_ERROR (not enough memory)\n"); +- return FALSE; ++ rc = FALSE; ++ break; + + case Z_BUF_ERROR: + lkcd_print("uncompress: " + "Z_BUF_ERROR (not enough room in output buffer)\n"); +- return FALSE; ++ rc = FALSE; ++ break; + + case Z_DATA_ERROR: + lkcd_print("uncompress: Z_DATA_ERROR (input data corrupted)\n"); +- return FALSE; ++ rc = FALSE; ++ break; ++ default: ++ rc = FALSE; ++ break; + } + +- return FALSE; ++ if (rc == FALSE) { ++ uncompress_errloc = ++ uncompress_recover(dest, destlen, source, sourcelen); ++ } ++ return rc; + } + + +@@ -1252,8 +1359,9 @@ + dp_flags = lkcd->get_dp_flags(); + dp_address = lkcd->get_dp_address(); + +- if (dp_flags & LKCD_DUMP_END) ++ if (dp_flags & LKCD_DUMP_END) { + return LKCD_DUMPFILE_END; ++ } + + if ((lkcd->flags & LKCD_VALID) && (page > lkcd->total_pages)) + lkcd->total_pages = page; +@@ -1315,3 +1423,15 @@ + } + } + ++int ++get_lkcd_regs_for_cpu(struct bt_info *bt, ulong *eip, ulong *esp) ++{ ++ switch (lkcd->version) { ++ case LKCD_DUMP_V8: ++ case LKCD_DUMP_V9: ++ return get_lkcd_regs_for_cpu_v8(bt, eip, esp); ++ default: ++ return -1; ++ } ++} ++ +--- crash/ppc.c.orig 2008-01-17 15:17:20.000000000 -0500 ++++ crash/ppc.c 2008-01-04 09:42:08.000000000 -0500 +@@ -51,6 +51,9 @@ + void + ppc_init(int when) + { ++ uint cpu_features; ++ ulong cur_cpu_spec; ++ + switch (when) + { + case PRE_SYMTAB: +@@ -135,9 +138,23 @@ + "irq_desc", NULL, 0); + else + machdep->nr_irqs = 0; +- machdep->hz = HZ; +- if (THIS_KERNEL_VERSION >= LINUX(2,6,0)) +- machdep->hz = 1000; ++ if (!machdep->hz) { ++ machdep->hz = HZ; ++ if (THIS_KERNEL_VERSION >= LINUX(2,6,0)) ++ machdep->hz = 1000; ++ } ++ if (symbol_exists("cur_cpu_spec")) { ++ get_symbol_data("cur_cpu_spec", sizeof(void *), &cur_cpu_spec); ++ readmem(cur_cpu_spec + MEMBER_OFFSET("cpu_spec", "cpu_user_features"), ++ KVADDR, &cpu_features, sizeof(uint), "cpu user features", ++ FAULT_ON_ERROR); ++ if (cpu_features & CPU_BOOKE) ++ machdep->flags |= CPU_BOOKE; ++ } ++ else ++ machdep->flags |= CPU_BOOKE; ++ machdep->section_size_bits = _SECTION_SIZE_BITS; ++ machdep->max_physmem_bits = _MAX_PHYSMEM_BITS; + break; + + case POST_INIT: +@@ -154,8 +171,6 @@ + fprintf(fp, " flags: %lx (", machdep->flags); + if (machdep->flags & KSYMS_START) + fprintf(fp, "%sKSYMS_START", others++ ? "|" : ""); +- if (machdep->flags & SYSRQ) +- fprintf(fp, "%sSYSRQ", others++ ? "|" : ""); + fprintf(fp, ")\n"); + + fprintf(fp, " kvbase: %lx\n", machdep->kvbase); +@@ -205,6 +220,9 @@ + fprintf(fp, " pmd: %lx\n", (ulong)machdep->pmd); + fprintf(fp, " ptbl: %lx\n", (ulong)machdep->ptbl); + fprintf(fp, " ptrs_per_pgd: %d\n", machdep->ptrs_per_pgd); ++ fprintf(fp, " section_size_bits: %ld\n", machdep->section_size_bits); ++ fprintf(fp, " max_physmem_bits: %ld\n", machdep->max_physmem_bits); ++ fprintf(fp, " sections_per_root: %ld\n", machdep->sections_per_root); + fprintf(fp, " machspec: %lx\n", (ulong)machdep->machspec); + } + +@@ -280,7 +298,11 @@ + + page_middle = (ulong *)pgd_pte; + +- page_table = page_middle + (BTOP(vaddr) & (PTRS_PER_PTE - 1)); ++ if (machdep->flags & CPU_BOOKE) ++ page_table = page_middle + (BTOP(vaddr) & (PTRS_PER_PTE - 1)); ++ else ++ page_table = (ulong *)(((pgd_pte & (ulong)machdep->pagemask) + machdep->kvbase) + ++ ((ulong)BTOP(vaddr) & (PTRS_PER_PTE-1))); + + if (verbose) + fprintf(fp, " PMD: %lx => %lx\n",(ulong)page_middle, +@@ -364,7 +386,11 @@ + + page_middle = (ulong *)pgd_pte; + +- page_table = page_middle + (BTOP(kvaddr) & (PTRS_PER_PTE-1)); ++ if (machdep->flags & CPU_BOOKE) ++ page_table = page_middle + (BTOP(kvaddr) & (PTRS_PER_PTE - 1)); ++ else ++ page_table = (ulong *)(((pgd_pte & (ulong)machdep->pagemask) + machdep->kvbase) + ++ ((ulong)BTOP(kvaddr) & (PTRS_PER_PTE-1))); + + if (verbose) + fprintf(fp, " PMD: %lx => %lx\n", (ulong)page_middle, +--- crash/README.orig 2008-01-17 15:17:21.000000000 -0500 ++++ crash/README 2008-01-17 15:17:18.000000000 -0500 +@@ -69,7 +69,7 @@ + After the kernel is re-compiled, the uncompressed "vmlinux" kernel + that is created in the top-level kernel build directory must be saved. + +- To build this utility, simply uncompress the tar file, enter the crash-4.0 ++ To build this utility, simply uncompress the tar file, enter the crash-4.0-5.0 + subdirectory, and type "make". The initial build will take several minutes + because the gdb module must be configured and and built. Alternatively, the + crash source RPM file may be installed and built, and the resultant crash +@@ -89,11 +89,14 @@ + + $ crash + +- crash 4.0 +- Copyright (C) 2002, 2003, 2004, 2005 Red Hat, Inc. +- Copyright (C) 2004, 2005 IBM Corporation +- Copyright (C) 1999-2005 Hewlett-Packard Co +- Copyright (C) 1999, 2002 Silicon Graphics, Inc. ++ crash 4.0-5.0 ++ Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2008 Red Hat, Inc. ++ Copyright (C) 2004, 2005, 2006 IBM Corporation ++ Copyright (C) 1999-2006 Hewlett-Packard Co ++ Copyright (C) 2005, 2006 Fujitsu Limited ++ Copyright (C) 2006, 2007 VA Linux Systems Japan K.K. ++ Copyright (C) 2005 NEC Corporation ++ Copyright (C) 1999, 2002, 2007 Silicon Graphics, Inc. + Copyright (C) 1999, 2000, 2001, 2002 Mission Critical Linux, Inc. + This program is free software, covered by the GNU General Public License, + and you are welcome to change it and/or distribute copies of it under +@@ -111,7 +114,7 @@ + KERNEL: /boot/vmlinux + DUMPFILE: /dev/mem + CPUS: 1 +- DATE: Wed Jul 13 13:26:00 2005 ++ DATE: Thu Jan 17 15:17:18 2008 + UPTIME: 10 days, 22:55:18 + LOAD AVERAGE: 0.08, 0.03, 0.01 + TASKS: 42 +@@ -139,7 +142,7 @@ + exit log rd task + extend mach repeat timer + +- crash version: 4.0 gdb version: 6.1 ++ crash version: 4.0-5.0 gdb version: 6.1 + For help on any command above, enter "help ". + For help on input options, enter "help input". + For help on output options, enter "help output". +@@ -152,11 +155,14 @@ + + $ crash vmlinux vmcore + +- crash 4.0 +- Copyright (C) 2002, 2003, 2004, 2005 Red Hat, Inc. +- Copyright (C) 2004, 2005 IBM Corporation +- Copyright (C) 1999-2005 Hewlett-Packard Co +- Copyright (C) 1999, 2002 Silicon Graphics, Inc. ++ crash 4.0-5.0 ++ Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2008 Red Hat, Inc. ++ Copyright (C) 2004, 2005, 2006 IBM Corporation ++ Copyright (C) 1999-2006 Hewlett-Packard Co ++ Copyright (C) 2005, 2006 Fujitsu Limited ++ Copyright (C) 2006, 2007 VA Linux Systems Japan K.K. ++ Copyright (C) 2005 NEC Corporation ++ Copyright (C) 1999, 2002, 2007 Silicon Graphics, Inc. + Copyright (C) 1999, 2000, 2001, 2002 Mission Critical Linux, Inc. + This program is free software, covered by the GNU General Public License, + and you are welcome to change it and/or distribute copies of it under +@@ -196,11 +202,14 @@ + + $ crash vmlinux.17 lcore.cr.17 + +- crash 4.0 +- Copyright (C) 2002, 2003, 2004, 2005 Red Hat, Inc. +- Copyright (C) 2004, 2005 IBM Corporation +- Copyright (C) 1999-2005 Hewlett-Packard Co +- Copyright (C) 1999, 2002 Silicon Graphics, Inc. ++ crash 4.0-5.0 ++ Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2008 Red Hat, Inc. ++ Copyright (C) 2004, 2005, 2006 IBM Corporation ++ Copyright (C) 1999-2006 Hewlett-Packard Co ++ Copyright (C) 2005, 2006 Fujitsu Limited ++ Copyright (C) 2006, 2007 VA Linux Systems Japan K.K. ++ Copyright (C) 2005 NEC Corporation ++ Copyright (C) 1999, 2002, 2007 Silicon Graphics, Inc. + Copyright (C) 1999, 2000, 2001, 2002 Mission Critical Linux, Inc. + This program is free software, covered by the GNU General Public License, + and you are welcome to change it and/or distribute copies of it under +--- crash/gdb-6.1/gdb/symtab.c.orig 2008-01-17 15:17:21.000000000 -0500 ++++ crash/gdb-6.1/gdb/symtab.c 2008-01-04 09:42:08.000000000 -0500 +@@ -4,7 +4,7 @@ + 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004 + Free Software Foundation, Inc. + Portions Copyright (C) 2001, 2002 Mission Critical Linux, Inc. +- Copyright (c) 2002, 2003, 2004, 2005 Red Hat, Inc. All rights reserved. ++ Copyright (c) 2002, 2003, 2004, 2005, 2007 Red Hat, Inc. All rights reserved. + + This file is part of GDB. + +@@ -4523,14 +4523,54 @@ + struct symbol *sym; + struct expression *expr; + struct cleanup *old_chain; +- ++ int i; ++ int allsect = 0; ++ char *secname; ++ char buf[80]; ++ + gdb_current_load_module = lm = (struct load_module *)req->addr; + + req->name = lm->mod_namelist; + gdb_delete_symbol_file(req); + +- sprintf(req->buf, "add-symbol-file %s 0x%lx", lm->mod_namelist, +- lm->mod_text_start); ++ for (i = 0 ; i < lm->mod_sections; i++) { ++ if (STREQ(lm->mod_section_data[i].name, ".text") && ++ (lm->mod_section_data[i].flags & SEC_FOUND)) ++ allsect = 1; ++ } ++ ++ if (!allsect) { ++ sprintf(req->buf, "add-symbol-file %s 0x%lx", lm->mod_namelist, ++ lm->mod_text_start ? lm->mod_text_start : lm->mod_base); ++ if (lm->mod_data_start) { ++ sprintf(buf, " -s .data 0x%lx", lm->mod_data_start); ++ strcat(req->buf, buf); ++ } ++ if (lm->mod_bss_start) { ++ sprintf(buf, " -s .bss 0x%lx", lm->mod_bss_start); ++ strcat(req->buf, buf); ++ } ++ if (lm->mod_rodata_start) { ++ sprintf(buf, " -s .rodata 0x%lx", lm->mod_rodata_start); ++ strcat(req->buf, buf); ++ } ++ } else { ++ sprintf(req->buf, "add-symbol-file %s 0x%lx", lm->mod_namelist, ++ lm->mod_text_start); ++ for (i = 0; i < lm->mod_sections; i++) { ++ secname = lm->mod_section_data[i].name; ++ if ((lm->mod_section_data[i].flags & SEC_FOUND) && ++ !STREQ(secname, ".text")) { ++ sprintf(buf, " -s %s 0x%lx", secname, ++ lm->mod_section_data[i].offset + lm->mod_base); ++ strcat(req->buf, buf); ++ } ++ } ++ } ++ ++ if (gdb_CRASHDEBUG(1)) { ++ fprintf_filtered(gdb_stdout, "gdb_add_symbol_file: %s\n", req->buf); ++ } + + execute_command(req->buf, FALSE); + +--- crash/gdb-6.1/gdb/symfile.c.orig 2008-01-17 15:17:21.000000000 -0500 ++++ crash/gdb-6.1/gdb/symfile.c 2008-01-04 09:42:08.000000000 -0500 +@@ -3,7 +3,7 @@ + Copyright 1990, 1991, 1992, 1993, 1994, 1995, 1996, 1997, 1998, + 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc. + Portions Copyright (C) 2001, 2002 Mission Critical Linux, Inc. +- Copyright (c) 2002, 2003, 2004, 2005 Red Hat, Inc. All rights reserved. ++ Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007 Red Hat, Inc. All rights reserved. + + Contributed by Cygnus Support, using pieces from other GDB modules. + +@@ -1678,7 +1678,11 @@ + to load the program. */ + sect_opts[section_index].name = ".text"; + sect_opts[section_index].value = arg; ++#ifdef CRASH_MERGE ++ if (++section_index >= num_sect_opts) ++#else + if (++section_index > num_sect_opts) ++#endif + { + num_sect_opts *= 2; + sect_opts = ((struct sect_opt *) +@@ -1714,7 +1718,11 @@ + { + sect_opts[section_index].value = arg; + expecting_sec_addr = 0; ++#ifdef CRASH_MERGE ++ if (++section_index >= num_sect_opts) ++#else + if (++section_index > num_sect_opts) ++#endif + { + num_sect_opts *= 2; + sect_opts = ((struct sect_opt *) +@@ -3510,6 +3518,13 @@ + bfd_byte * + symfile_relocate_debug_section (bfd *abfd, asection *sectp, bfd_byte *buf) + { ++#ifdef CRASH_MERGE ++ /* Executable files have all the relocations already resolved. ++ * Handle files linked with --emit-relocs. ++ * http://sources.redhat.com/ml/gdb/2006-08/msg00137.html */ ++ if ((abfd->flags & EXEC_P) != 0) ++ return NULL; ++#endif + /* We're only interested in debugging sections with relocation + information. */ + if ((sectp->flags & SEC_RELOC) == 0) +--- crash/gdb-6.1/gdb/ppc-linux-tdep.c.orig 2008-01-17 15:17:21.000000000 -0500 ++++ crash/gdb-6.1/gdb/ppc-linux-tdep.c 2008-01-04 09:42:08.000000000 -0500 +@@ -0,0 +1,1116 @@ ++/* Target-dependent code for GDB, the GNU debugger. ++ ++ Copyright 1986, 1987, 1989, 1991, 1992, 1993, 1994, 1995, 1996, ++ 1997, 2000, 2001, 2002, 2003 Free Software Foundation, Inc. ++ Copyright (c) 2004, 2005 Red Hat, Inc. All rights reserved. ++ ++ This file is part of GDB. ++ ++ This program is free software; you can redistribute it and/or modify ++ it under the terms of the GNU General Public License as published by ++ the Free Software Foundation; either version 2 of the License, or ++ (at your option) any later version. ++ ++ This program is distributed in the hope that it will be useful, ++ but WITHOUT ANY WARRANTY; without even the implied warranty of ++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ GNU General Public License for more details. ++ ++ You should have received a copy of the GNU General Public License ++ along with this program; if not, write to the Free Software ++ Foundation, Inc., 59 Temple Place - Suite 330, ++ Boston, MA 02111-1307, USA. */ ++ ++#include "defs.h" ++#include "frame.h" ++#include "inferior.h" ++#include "symtab.h" ++#include "target.h" ++#include "gdbcore.h" ++#include "gdbcmd.h" ++#include "symfile.h" ++#include "objfiles.h" ++#include "regcache.h" ++#include "value.h" ++#include "osabi.h" ++ ++#include "solib-svr4.h" ++#include "ppc-tdep.h" ++ ++/* The following instructions are used in the signal trampoline code ++ on GNU/Linux PPC. The kernel used to use magic syscalls 0x6666 and ++ 0x7777 but now uses the sigreturn syscalls. We check for both. */ ++#define INSTR_LI_R0_0x6666 0x38006666 ++#define INSTR_LI_R0_0x7777 0x38007777 ++#define INSTR_LI_R0_NR_sigreturn 0x38000077 ++#define INSTR_LI_R0_NR_rt_sigreturn 0x380000AC ++ ++#define INSTR_SC 0x44000002 ++ ++/* Since the *-tdep.c files are platform independent (i.e, they may be ++ used to build cross platform debuggers), we can't include system ++ headers. Therefore, details concerning the sigcontext structure ++ must be painstakingly rerecorded. What's worse, if these details ++ ever change in the header files, they'll have to be changed here ++ as well. */ ++ ++/* __SIGNAL_FRAMESIZE from */ ++#define PPC_LINUX_SIGNAL_FRAMESIZE 64 ++ ++/* From , offsetof(struct sigcontext_struct, regs) == 0x1c */ ++#define PPC_LINUX_REGS_PTR_OFFSET (PPC_LINUX_SIGNAL_FRAMESIZE + 0x1c) ++ ++/* From , ++ offsetof(struct sigcontext_struct, handler) == 0x14 */ ++#define PPC_LINUX_HANDLER_PTR_OFFSET (PPC_LINUX_SIGNAL_FRAMESIZE + 0x14) ++ ++/* From , values for PT_NIP, PT_R1, and PT_LNK */ ++#define PPC_LINUX_PT_R0 0 ++#define PPC_LINUX_PT_R1 1 ++#define PPC_LINUX_PT_R2 2 ++#define PPC_LINUX_PT_R3 3 ++#define PPC_LINUX_PT_R4 4 ++#define PPC_LINUX_PT_R5 5 ++#define PPC_LINUX_PT_R6 6 ++#define PPC_LINUX_PT_R7 7 ++#define PPC_LINUX_PT_R8 8 ++#define PPC_LINUX_PT_R9 9 ++#define PPC_LINUX_PT_R10 10 ++#define PPC_LINUX_PT_R11 11 ++#define PPC_LINUX_PT_R12 12 ++#define PPC_LINUX_PT_R13 13 ++#define PPC_LINUX_PT_R14 14 ++#define PPC_LINUX_PT_R15 15 ++#define PPC_LINUX_PT_R16 16 ++#define PPC_LINUX_PT_R17 17 ++#define PPC_LINUX_PT_R18 18 ++#define PPC_LINUX_PT_R19 19 ++#define PPC_LINUX_PT_R20 20 ++#define PPC_LINUX_PT_R21 21 ++#define PPC_LINUX_PT_R22 22 ++#define PPC_LINUX_PT_R23 23 ++#define PPC_LINUX_PT_R24 24 ++#define PPC_LINUX_PT_R25 25 ++#define PPC_LINUX_PT_R26 26 ++#define PPC_LINUX_PT_R27 27 ++#define PPC_LINUX_PT_R28 28 ++#define PPC_LINUX_PT_R29 29 ++#define PPC_LINUX_PT_R30 30 ++#define PPC_LINUX_PT_R31 31 ++#define PPC_LINUX_PT_NIP 32 ++#define PPC_LINUX_PT_MSR 33 ++#define PPC_LINUX_PT_CTR 35 ++#define PPC_LINUX_PT_LNK 36 ++#define PPC_LINUX_PT_XER 37 ++#define PPC_LINUX_PT_CCR 38 ++#define PPC_LINUX_PT_MQ 39 ++#define PPC_LINUX_PT_FPR0 48 /* each FP reg occupies 2 slots in this space */ ++#define PPC_LINUX_PT_FPR31 (PPC_LINUX_PT_FPR0 + 2*31) ++#define PPC_LINUX_PT_FPSCR (PPC_LINUX_PT_FPR0 + 2*32 + 1) ++ ++static int ppc_linux_at_sigtramp_return_path (CORE_ADDR pc); ++ ++/* Determine if pc is in a signal trampoline... ++ ++ Ha! That's not what this does at all. wait_for_inferior in ++ infrun.c calls PC_IN_SIGTRAMP in order to detect entry into a ++ signal trampoline just after delivery of a signal. But on ++ GNU/Linux, signal trampolines are used for the return path only. ++ The kernel sets things up so that the signal handler is called ++ directly. ++ ++ If we use in_sigtramp2() in place of in_sigtramp() (see below) ++ we'll (often) end up with stop_pc in the trampoline and prev_pc in ++ the (now exited) handler. The code there will cause a temporary ++ breakpoint to be set on prev_pc which is not very likely to get hit ++ again. ++ ++ If this is confusing, think of it this way... the code in ++ wait_for_inferior() needs to be able to detect entry into a signal ++ trampoline just after a signal is delivered, not after the handler ++ has been run. ++ ++ So, we define in_sigtramp() below to return 1 if the following is ++ true: ++ ++ 1) The previous frame is a real signal trampoline. ++ ++ - and - ++ ++ 2) pc is at the first or second instruction of the corresponding ++ handler. ++ ++ Why the second instruction? It seems that wait_for_inferior() ++ never sees the first instruction when single stepping. When a ++ signal is delivered while stepping, the next instruction that ++ would've been stepped over isn't, instead a signal is delivered and ++ the first instruction of the handler is stepped over instead. That ++ puts us on the second instruction. (I added the test for the ++ first instruction long after the fact, just in case the observed ++ behavior is ever fixed.) ++ ++ PC_IN_SIGTRAMP is called from blockframe.c as well in order to set ++ the frame's type (if a SIGTRAMP_FRAME). Because of our strange ++ definition of in_sigtramp below, we can't rely on the frame's type ++ getting set correctly from within blockframe.c. This is why we ++ take pains to set it in init_extra_frame_info(). ++ ++ NOTE: cagney/2002-11-10: I suspect the real problem here is that ++ the get_prev_frame() only initializes the frame's type after the ++ call to INIT_FRAME_INFO. get_prev_frame() should be fixed, this ++ code shouldn't be working its way around a bug :-(. */ ++ ++int ++ppc_linux_in_sigtramp (CORE_ADDR pc, char *func_name) ++{ ++ CORE_ADDR lr; ++ CORE_ADDR sp; ++ CORE_ADDR tramp_sp; ++ char buf[4]; ++ CORE_ADDR handler; ++ ++ lr = read_register (gdbarch_tdep (current_gdbarch)->ppc_lr_regnum); ++ if (!ppc_linux_at_sigtramp_return_path (lr)) ++ return 0; ++ ++ sp = read_register (SP_REGNUM); ++ ++ if (target_read_memory (sp, buf, sizeof (buf)) != 0) ++ return 0; ++ ++ tramp_sp = extract_unsigned_integer (buf, 4); ++ ++ if (target_read_memory (tramp_sp + PPC_LINUX_HANDLER_PTR_OFFSET, buf, ++ sizeof (buf)) != 0) ++ return 0; ++ ++ handler = extract_unsigned_integer (buf, 4); ++ ++ return (pc == handler || pc == handler + 4); ++} ++ ++static int ++insn_is_sigreturn (unsigned long pcinsn) ++{ ++ switch(pcinsn) ++ { ++ case INSTR_LI_R0_0x6666: ++ case INSTR_LI_R0_0x7777: ++ case INSTR_LI_R0_NR_sigreturn: ++ case INSTR_LI_R0_NR_rt_sigreturn: ++ return 1; ++ default: ++ return 0; ++ } ++} ++ ++/* ++ * The signal handler trampoline is on the stack and consists of exactly ++ * two instructions. The easiest and most accurate way of determining ++ * whether the pc is in one of these trampolines is by inspecting the ++ * instructions. It'd be faster though if we could find a way to do this ++ * via some simple address comparisons. ++ */ ++static int ++ppc_linux_at_sigtramp_return_path (CORE_ADDR pc) ++{ ++ char buf[12]; ++ unsigned long pcinsn; ++ if (target_read_memory (pc - 4, buf, sizeof (buf)) != 0) ++ return 0; ++ ++ /* extract the instruction at the pc */ ++ pcinsn = extract_unsigned_integer (buf + 4, 4); ++ ++ return ( ++ (insn_is_sigreturn (pcinsn) ++ && extract_unsigned_integer (buf + 8, 4) == INSTR_SC) ++ || ++ (pcinsn == INSTR_SC ++ && insn_is_sigreturn (extract_unsigned_integer (buf, 4)))); ++} ++ ++static CORE_ADDR ++ppc_linux_skip_trampoline_code (CORE_ADDR pc) ++{ ++ char buf[4]; ++ struct obj_section *sect; ++ struct objfile *objfile; ++ unsigned long insn; ++ CORE_ADDR plt_start = 0; ++ CORE_ADDR symtab = 0; ++ CORE_ADDR strtab = 0; ++ int num_slots = -1; ++ int reloc_index = -1; ++ CORE_ADDR plt_table; ++ CORE_ADDR reloc; ++ CORE_ADDR sym; ++ long symidx; ++ char symname[1024]; ++ struct minimal_symbol *msymbol; ++ ++ /* Find the section pc is in; return if not in .plt */ ++ sect = find_pc_section (pc); ++ if (!sect || strcmp (sect->the_bfd_section->name, ".plt") != 0) ++ return 0; ++ ++ objfile = sect->objfile; ++ ++ /* Pick up the instruction at pc. It had better be of the ++ form ++ li r11, IDX ++ ++ where IDX is an index into the plt_table. */ ++ ++ if (target_read_memory (pc, buf, 4) != 0) ++ return 0; ++ insn = extract_unsigned_integer (buf, 4); ++ ++ if ((insn & 0xffff0000) != 0x39600000 /* li r11, VAL */ ) ++ return 0; ++ ++ reloc_index = (insn << 16) >> 16; ++ ++ /* Find the objfile that pc is in and obtain the information ++ necessary for finding the symbol name. */ ++ for (sect = objfile->sections; sect < objfile->sections_end; ++sect) ++ { ++ const char *secname = sect->the_bfd_section->name; ++ if (strcmp (secname, ".plt") == 0) ++ plt_start = sect->addr; ++ else if (strcmp (secname, ".rela.plt") == 0) ++ num_slots = ((int) sect->endaddr - (int) sect->addr) / 12; ++ else if (strcmp (secname, ".dynsym") == 0) ++ symtab = sect->addr; ++ else if (strcmp (secname, ".dynstr") == 0) ++ strtab = sect->addr; ++ } ++ ++ /* Make sure we have all the information we need. */ ++ if (plt_start == 0 || num_slots == -1 || symtab == 0 || strtab == 0) ++ return 0; ++ ++ /* Compute the value of the plt table */ ++ plt_table = plt_start + 72 + 8 * num_slots; ++ ++ /* Get address of the relocation entry (Elf32_Rela) */ ++ if (target_read_memory (plt_table + reloc_index, buf, 4) != 0) ++ return 0; ++ reloc = extract_unsigned_integer (buf, 4); ++ ++ sect = find_pc_section (reloc); ++ if (!sect) ++ return 0; ++ ++ if (strcmp (sect->the_bfd_section->name, ".text") == 0) ++ return reloc; ++ ++ /* Now get the r_info field which is the relocation type and symbol ++ index. */ ++ if (target_read_memory (reloc + 4, buf, 4) != 0) ++ return 0; ++ symidx = extract_unsigned_integer (buf, 4); ++ ++ /* Shift out the relocation type leaving just the symbol index */ ++ /* symidx = ELF32_R_SYM(symidx); */ ++ symidx = symidx >> 8; ++ ++ /* compute the address of the symbol */ ++ sym = symtab + symidx * 4; ++ ++ /* Fetch the string table index */ ++ if (target_read_memory (sym, buf, 4) != 0) ++ return 0; ++ symidx = extract_unsigned_integer (buf, 4); ++ ++ /* Fetch the string; we don't know how long it is. Is it possible ++ that the following will fail because we're trying to fetch too ++ much? */ ++ if (target_read_memory (strtab + symidx, symname, sizeof (symname)) != 0) ++ return 0; ++ ++ /* This might not work right if we have multiple symbols with the ++ same name; the only way to really get it right is to perform ++ the same sort of lookup as the dynamic linker. */ ++ msymbol = lookup_minimal_symbol_text (symname, NULL); ++ if (!msymbol) ++ return 0; ++ ++ return SYMBOL_VALUE_ADDRESS (msymbol); ++} ++ ++/* The rs6000 version of FRAME_SAVED_PC will almost work for us. The ++ signal handler details are different, so we'll handle those here ++ and call the rs6000 version to do the rest. */ ++CORE_ADDR ++ppc_linux_frame_saved_pc (struct frame_info *fi) ++{ ++ if ((get_frame_type (fi) == SIGTRAMP_FRAME)) ++ { ++ CORE_ADDR regs_addr = ++ read_memory_integer (get_frame_base (fi) ++ + PPC_LINUX_REGS_PTR_OFFSET, 4); ++ /* return the NIP in the regs array */ ++ return read_memory_integer (regs_addr + 4 * PPC_LINUX_PT_NIP, 4); ++ } ++ else if (get_next_frame (fi) ++ && (get_frame_type (get_next_frame (fi)) == SIGTRAMP_FRAME)) ++ { ++ CORE_ADDR regs_addr = ++ read_memory_integer (get_frame_base (get_next_frame (fi)) ++ + PPC_LINUX_REGS_PTR_OFFSET, 4); ++ /* return LNK in the regs array */ ++ return read_memory_integer (regs_addr + 4 * PPC_LINUX_PT_LNK, 4); ++ } ++ else ++ return rs6000_frame_saved_pc (fi); ++} ++ ++void ++ppc_linux_init_extra_frame_info (int fromleaf, struct frame_info *fi) ++{ ++ rs6000_init_extra_frame_info (fromleaf, fi); ++ ++ if (get_next_frame (fi) != 0) ++ { ++ /* We're called from get_prev_frame_info; check to see if ++ this is a signal frame by looking to see if the pc points ++ at trampoline code */ ++ if (ppc_linux_at_sigtramp_return_path (get_frame_pc (fi))) ++ deprecated_set_frame_type (fi, SIGTRAMP_FRAME); ++ else ++ /* FIXME: cagney/2002-11-10: Is this double bogus? What ++ happens if the frame has previously been marked as a dummy? */ ++ deprecated_set_frame_type (fi, NORMAL_FRAME); ++ } ++} ++ ++int ++ppc_linux_frameless_function_invocation (struct frame_info *fi) ++{ ++ /* We'll find the wrong thing if we let ++ rs6000_frameless_function_invocation () search for a signal trampoline */ ++ if (ppc_linux_at_sigtramp_return_path (get_frame_pc (fi))) ++ return 0; ++ else ++ return rs6000_frameless_function_invocation (fi); ++} ++ ++void ++ppc_linux_frame_init_saved_regs (struct frame_info *fi) ++{ ++ if ((get_frame_type (fi) == SIGTRAMP_FRAME)) ++ { ++ CORE_ADDR regs_addr; ++ int i; ++ if (deprecated_get_frame_saved_regs (fi)) ++ return; ++ ++ frame_saved_regs_zalloc (fi); ++ ++ regs_addr = ++ read_memory_integer (get_frame_base (fi) ++ + PPC_LINUX_REGS_PTR_OFFSET, 4); ++ deprecated_get_frame_saved_regs (fi)[PC_REGNUM] = regs_addr + 4 * PPC_LINUX_PT_NIP; ++ deprecated_get_frame_saved_regs (fi)[gdbarch_tdep (current_gdbarch)->ppc_ps_regnum] = ++ regs_addr + 4 * PPC_LINUX_PT_MSR; ++ deprecated_get_frame_saved_regs (fi)[gdbarch_tdep (current_gdbarch)->ppc_cr_regnum] = ++ regs_addr + 4 * PPC_LINUX_PT_CCR; ++ deprecated_get_frame_saved_regs (fi)[gdbarch_tdep (current_gdbarch)->ppc_lr_regnum] = ++ regs_addr + 4 * PPC_LINUX_PT_LNK; ++ deprecated_get_frame_saved_regs (fi)[gdbarch_tdep (current_gdbarch)->ppc_ctr_regnum] = ++ regs_addr + 4 * PPC_LINUX_PT_CTR; ++ deprecated_get_frame_saved_regs (fi)[gdbarch_tdep (current_gdbarch)->ppc_xer_regnum] = ++ regs_addr + 4 * PPC_LINUX_PT_XER; ++ deprecated_get_frame_saved_regs (fi)[gdbarch_tdep (current_gdbarch)->ppc_mq_regnum] = ++ regs_addr + 4 * PPC_LINUX_PT_MQ; ++ for (i = 0; i < 32; i++) ++ deprecated_get_frame_saved_regs (fi)[gdbarch_tdep (current_gdbarch)->ppc_gp0_regnum + i] = ++ regs_addr + 4 * PPC_LINUX_PT_R0 + 4 * i; ++ for (i = 0; i < 32; i++) ++ deprecated_get_frame_saved_regs (fi)[FP0_REGNUM + i] = regs_addr + 4 * PPC_LINUX_PT_FPR0 + 8 * i; ++ } ++ else ++ rs6000_frame_init_saved_regs (fi); ++} ++ ++CORE_ADDR ++ppc_linux_frame_chain (struct frame_info *thisframe) ++{ ++ /* Kernel properly constructs the frame chain for the handler */ ++ if ((get_frame_type (thisframe) == SIGTRAMP_FRAME)) ++ return read_memory_integer (get_frame_base (thisframe), 4); ++ else ++ return rs6000_frame_chain (thisframe); ++} ++ ++/* ppc_linux_memory_remove_breakpoints attempts to remove a breakpoint ++ in much the same fashion as memory_remove_breakpoint in mem-break.c, ++ but is careful not to write back the previous contents if the code ++ in question has changed in between inserting the breakpoint and ++ removing it. ++ ++ Here is the problem that we're trying to solve... ++ ++ Once upon a time, before introducing this function to remove ++ breakpoints from the inferior, setting a breakpoint on a shared ++ library function prior to running the program would not work ++ properly. In order to understand the problem, it is first ++ necessary to understand a little bit about dynamic linking on ++ this platform. ++ ++ A call to a shared library function is accomplished via a bl ++ (branch-and-link) instruction whose branch target is an entry ++ in the procedure linkage table (PLT). The PLT in the object ++ file is uninitialized. To gdb, prior to running the program, the ++ entries in the PLT are all zeros. ++ ++ Once the program starts running, the shared libraries are loaded ++ and the procedure linkage table is initialized, but the entries in ++ the table are not (necessarily) resolved. Once a function is ++ actually called, the code in the PLT is hit and the function is ++ resolved. In order to better illustrate this, an example is in ++ order; the following example is from the gdb testsuite. ++ ++ We start the program shmain. ++ ++ [kev@arroyo testsuite]$ ../gdb gdb.base/shmain ++ [...] ++ ++ We place two breakpoints, one on shr1 and the other on main. ++ ++ (gdb) b shr1 ++ Breakpoint 1 at 0x100409d4 ++ (gdb) b main ++ Breakpoint 2 at 0x100006a0: file gdb.base/shmain.c, line 44. ++ ++ Examine the instruction (and the immediatly following instruction) ++ upon which the breakpoint was placed. Note that the PLT entry ++ for shr1 contains zeros. ++ ++ (gdb) x/2i 0x100409d4 ++ 0x100409d4 : .long 0x0 ++ 0x100409d8 : .long 0x0 ++ ++ Now run 'til main. ++ ++ (gdb) r ++ Starting program: gdb.base/shmain ++ Breakpoint 1 at 0xffaf790: file gdb.base/shr1.c, line 19. ++ ++ Breakpoint 2, main () ++ at gdb.base/shmain.c:44 ++ 44 g = 1; ++ ++ Examine the PLT again. Note that the loading of the shared ++ library has initialized the PLT to code which loads a constant ++ (which I think is an index into the GOT) into r11 and then ++ branchs a short distance to the code which actually does the ++ resolving. ++ ++ (gdb) x/2i 0x100409d4 ++ 0x100409d4 : li r11,4 ++ 0x100409d8 : b 0x10040984 ++ (gdb) c ++ Continuing. ++ ++ Breakpoint 1, shr1 (x=1) ++ at gdb.base/shr1.c:19 ++ 19 l = 1; ++ ++ Now we've hit the breakpoint at shr1. (The breakpoint was ++ reset from the PLT entry to the actual shr1 function after the ++ shared library was loaded.) Note that the PLT entry has been ++ resolved to contain a branch that takes us directly to shr1. ++ (The real one, not the PLT entry.) ++ ++ (gdb) x/2i 0x100409d4 ++ 0x100409d4 : b 0xffaf76c ++ 0x100409d8 : b 0x10040984 ++ ++ The thing to note here is that the PLT entry for shr1 has been ++ changed twice. ++ ++ Now the problem should be obvious. GDB places a breakpoint (a ++ trap instruction) on the zero value of the PLT entry for shr1. ++ Later on, after the shared library had been loaded and the PLT ++ initialized, GDB gets a signal indicating this fact and attempts ++ (as it always does when it stops) to remove all the breakpoints. ++ ++ The breakpoint removal was causing the former contents (a zero ++ word) to be written back to the now initialized PLT entry thus ++ destroying a portion of the initialization that had occurred only a ++ short time ago. When execution continued, the zero word would be ++ executed as an instruction an an illegal instruction trap was ++ generated instead. (0 is not a legal instruction.) ++ ++ The fix for this problem was fairly straightforward. The function ++ memory_remove_breakpoint from mem-break.c was copied to this file, ++ modified slightly, and renamed to ppc_linux_memory_remove_breakpoint. ++ In tm-linux.h, MEMORY_REMOVE_BREAKPOINT is defined to call this new ++ function. ++ ++ The differences between ppc_linux_memory_remove_breakpoint () and ++ memory_remove_breakpoint () are minor. All that the former does ++ that the latter does not is check to make sure that the breakpoint ++ location actually contains a breakpoint (trap instruction) prior ++ to attempting to write back the old contents. If it does contain ++ a trap instruction, we allow the old contents to be written back. ++ Otherwise, we silently do nothing. ++ ++ The big question is whether memory_remove_breakpoint () should be ++ changed to have the same functionality. The downside is that more ++ traffic is generated for remote targets since we'll have an extra ++ fetch of a memory word each time a breakpoint is removed. ++ ++ For the time being, we'll leave this self-modifying-code-friendly ++ version in ppc-linux-tdep.c, but it ought to be migrated somewhere ++ else in the event that some other platform has similar needs with ++ regard to removing breakpoints in some potentially self modifying ++ code. */ ++int ++ppc_linux_memory_remove_breakpoint (CORE_ADDR addr, char *contents_cache) ++{ ++ const unsigned char *bp; ++ int val; ++ int bplen; ++ char old_contents[BREAKPOINT_MAX]; ++ ++ /* Determine appropriate breakpoint contents and size for this address. */ ++ bp = BREAKPOINT_FROM_PC (&addr, &bplen); ++ if (bp == NULL) ++ error ("Software breakpoints not implemented for this target."); ++ ++ val = target_read_memory (addr, old_contents, bplen); ++ ++ /* If our breakpoint is no longer at the address, this means that the ++ program modified the code on us, so it is wrong to put back the ++ old value */ ++ if (val == 0 && memcmp (bp, old_contents, bplen) == 0) ++ val = target_write_memory (addr, contents_cache, bplen); ++ ++ return val; ++} ++ ++/* For historic reasons, PPC 32 GNU/Linux follows PowerOpen rather ++ than the 32 bit SYSV R4 ABI structure return convention - all ++ structures, no matter their size, are put in memory. Vectors, ++ which were added later, do get returned in a register though. */ ++ ++static enum return_value_convention ++ppc_linux_return_value (struct gdbarch *gdbarch, struct type *valtype, ++ struct regcache *regcache, void *readbuf, ++ const void *writebuf) ++{ ++ if ((TYPE_CODE (valtype) == TYPE_CODE_STRUCT ++ || TYPE_CODE (valtype) == TYPE_CODE_UNION) ++ && !((TYPE_LENGTH (valtype) == 16 || TYPE_LENGTH (valtype) == 8) ++ && TYPE_VECTOR (valtype))) ++ return RETURN_VALUE_STRUCT_CONVENTION; ++ else ++ return ppc_sysv_abi_return_value (gdbarch, valtype, regcache, readbuf, ++ writebuf); ++} ++ ++/* Fetch (and possibly build) an appropriate link_map_offsets ++ structure for GNU/Linux PPC targets using the struct offsets ++ defined in link.h (but without actual reference to that file). ++ ++ This makes it possible to access GNU/Linux PPC shared libraries ++ from a GDB that was not built on an GNU/Linux PPC host (for cross ++ debugging). */ ++ ++struct link_map_offsets * ++ppc_linux_svr4_fetch_link_map_offsets (void) ++{ ++ static struct link_map_offsets lmo; ++ static struct link_map_offsets *lmp = NULL; ++ ++ if (lmp == NULL) ++ { ++ lmp = &lmo; ++ ++ lmo.r_debug_size = 8; /* The actual size is 20 bytes, but ++ this is all we need. */ ++ lmo.r_map_offset = 4; ++ lmo.r_map_size = 4; ++ ++ lmo.link_map_size = 20; /* The actual size is 560 bytes, but ++ this is all we need. */ ++ lmo.l_addr_offset = 0; ++ lmo.l_addr_size = 4; ++ ++ lmo.l_name_offset = 4; ++ lmo.l_name_size = 4; ++ ++ lmo.l_next_offset = 12; ++ lmo.l_next_size = 4; ++ ++ lmo.l_prev_offset = 16; ++ lmo.l_prev_size = 4; ++ } ++ ++ return lmp; ++} ++ ++ ++/* Macros for matching instructions. Note that, since all the ++ operands are masked off before they're or-ed into the instruction, ++ you can use -1 to make masks. */ ++ ++#define insn_d(opcd, rts, ra, d) \ ++ ((((opcd) & 0x3f) << 26) \ ++ | (((rts) & 0x1f) << 21) \ ++ | (((ra) & 0x1f) << 16) \ ++ | ((d) & 0xffff)) ++ ++#define insn_ds(opcd, rts, ra, d, xo) \ ++ ((((opcd) & 0x3f) << 26) \ ++ | (((rts) & 0x1f) << 21) \ ++ | (((ra) & 0x1f) << 16) \ ++ | ((d) & 0xfffc) \ ++ | ((xo) & 0x3)) ++ ++#define insn_xfx(opcd, rts, spr, xo) \ ++ ((((opcd) & 0x3f) << 26) \ ++ | (((rts) & 0x1f) << 21) \ ++ | (((spr) & 0x1f) << 16) \ ++ | (((spr) & 0x3e0) << 6) \ ++ | (((xo) & 0x3ff) << 1)) ++ ++/* Read a PPC instruction from memory. PPC instructions are always ++ big-endian, no matter what endianness the program is running in, so ++ we can't use read_memory_integer or one of its friends here. */ ++static unsigned int ++read_insn (CORE_ADDR pc) ++{ ++ unsigned char buf[4]; ++ ++ read_memory (pc, buf, 4); ++ return (buf[0] << 24) | (buf[1] << 16) | (buf[2] << 8) | buf[3]; ++} ++ ++ ++/* An instruction to match. */ ++struct insn_pattern ++{ ++ unsigned int mask; /* mask the insn with this... */ ++ unsigned int data; /* ...and see if it matches this. */ ++ int optional; /* If non-zero, this insn may be absent. */ ++}; ++ ++/* Return non-zero if the instructions at PC match the series ++ described in PATTERN, or zero otherwise. PATTERN is an array of ++ 'struct insn_pattern' objects, terminated by an entry whose mask is ++ zero. ++ ++ When the match is successful, fill INSN[i] with what PATTERN[i] ++ matched. If PATTERN[i] is optional, and the instruction wasn't ++ present, set INSN[i] to 0 (which is not a valid PPC instruction). ++ INSN should have as many elements as PATTERN. Note that, if ++ PATTERN contains optional instructions which aren't present in ++ memory, then INSN will have holes, so INSN[i] isn't necessarily the ++ i'th instruction in memory. */ ++static int ++insns_match_pattern (CORE_ADDR pc, ++ struct insn_pattern *pattern, ++ unsigned int *insn) ++{ ++ int i; ++ ++ for (i = 0; pattern[i].mask; i++) ++ { ++ insn[i] = read_insn (pc); ++ if ((insn[i] & pattern[i].mask) == pattern[i].data) ++ pc += 4; ++ else if (pattern[i].optional) ++ insn[i] = 0; ++ else ++ return 0; ++ } ++ ++ return 1; ++} ++ ++ ++/* Return the 'd' field of the d-form instruction INSN, properly ++ sign-extended. */ ++static CORE_ADDR ++insn_d_field (unsigned int insn) ++{ ++ return ((((CORE_ADDR) insn & 0xffff) ^ 0x8000) - 0x8000); ++} ++ ++ ++/* Return the 'ds' field of the ds-form instruction INSN, with the two ++ zero bits concatenated at the right, and properly ++ sign-extended. */ ++static CORE_ADDR ++insn_ds_field (unsigned int insn) ++{ ++ return ((((CORE_ADDR) insn & 0xfffc) ^ 0x8000) - 0x8000); ++} ++ ++ ++/* If DESC is the address of a 64-bit PowerPC GNU/Linux function ++ descriptor, return the descriptor's entry point. */ ++static CORE_ADDR ++ppc64_desc_entry_point (CORE_ADDR desc) ++{ ++ /* The first word of the descriptor is the entry point. */ ++ return (CORE_ADDR) read_memory_unsigned_integer (desc, 8); ++} ++ ++ ++/* Pattern for the standard linkage function. These are built by ++ build_plt_stub in elf64-ppc.c, whose GLINK argument is always ++ zero. */ ++static struct insn_pattern ppc64_standard_linkage[] = ++ { ++ /* addis r12, r2, */ ++ { insn_d (-1, -1, -1, 0), insn_d (15, 12, 2, 0), 0 }, ++ ++ /* std r2, 40(r1) */ ++ { -1, insn_ds (62, 2, 1, 40, 0), 0 }, ++ ++ /* ld r11, (r12) */ ++ { insn_ds (-1, -1, -1, 0, -1), insn_ds (58, 11, 12, 0, 0), 0 }, ++ ++ /* addis r12, r12, 1 */ ++ { insn_d (-1, -1, -1, -1), insn_d (15, 12, 2, 1), 1 }, ++ ++ /* ld r2, (r12) */ ++ { insn_ds (-1, -1, -1, 0, -1), insn_ds (58, 2, 12, 0, 0), 0 }, ++ ++ /* addis r12, r12, 1 */ ++ { insn_d (-1, -1, -1, -1), insn_d (15, 12, 2, 1), 1 }, ++ ++ /* mtctr r11 */ ++ { insn_xfx (-1, -1, -1, -1), insn_xfx (31, 11, 9, 467), ++ 0 }, ++ ++ /* ld r11, (r12) */ ++ { insn_ds (-1, -1, -1, 0, -1), insn_ds (58, 11, 12, 0, 0), 0 }, ++ ++ /* bctr */ ++ { -1, 0x4e800420, 0 }, ++ ++ { 0, 0, 0 } ++ }; ++#define PPC64_STANDARD_LINKAGE_LEN \ ++ (sizeof (ppc64_standard_linkage) / sizeof (ppc64_standard_linkage[0])) ++ ++ ++/* Recognize a 64-bit PowerPC GNU/Linux linkage function --- what GDB ++ calls a "solib trampoline". */ ++static int ++ppc64_in_solib_call_trampoline (CORE_ADDR pc, char *name) ++{ ++ /* Detecting solib call trampolines on PPC64 GNU/Linux is a pain. ++ ++ It's not specifically solib call trampolines that are the issue. ++ Any call from one function to another function that uses a ++ different TOC requires a trampoline, to save the caller's TOC ++ pointer and then load the callee's TOC. An executable or shared ++ library may have more than one TOC, so even intra-object calls ++ may require a trampoline. Since executable and shared libraries ++ will all have their own distinct TOCs, every inter-object call is ++ also an inter-TOC call, and requires a trampoline --- so "solib ++ call trampolines" are just a special case. ++ ++ The 64-bit PowerPC GNU/Linux ABI calls these call trampolines ++ "linkage functions". Since they need to be near the functions ++ that call them, they all appear in .text, not in any special ++ section. The .plt section just contains an array of function ++ descriptors, from which the linkage functions load the callee's ++ entry point, TOC value, and environment pointer. So ++ in_plt_section is useless. The linkage functions don't have any ++ special linker symbols to name them, either. ++ ++ The only way I can see to recognize them is to actually look at ++ their code. They're generated by ppc_build_one_stub and some ++ other functions in bfd/elf64-ppc.c, so that should show us all ++ the instruction sequences we need to recognize. */ ++ unsigned int insn[PPC64_STANDARD_LINKAGE_LEN]; ++ ++ return insns_match_pattern (pc, ppc64_standard_linkage, insn); ++} ++ ++ ++/* When the dynamic linker is doing lazy symbol resolution, the first ++ call to a function in another object will go like this: ++ ++ - The user's function calls the linkage function: ++ ++ 100007c4: 4b ff fc d5 bl 10000498 ++ 100007c8: e8 41 00 28 ld r2,40(r1) ++ ++ - The linkage function loads the entry point (and other stuff) from ++ the function descriptor in the PLT, and jumps to it: ++ ++ 10000498: 3d 82 00 00 addis r12,r2,0 ++ 1000049c: f8 41 00 28 std r2,40(r1) ++ 100004a0: e9 6c 80 98 ld r11,-32616(r12) ++ 100004a4: e8 4c 80 a0 ld r2,-32608(r12) ++ 100004a8: 7d 69 03 a6 mtctr r11 ++ 100004ac: e9 6c 80 a8 ld r11,-32600(r12) ++ 100004b0: 4e 80 04 20 bctr ++ ++ - But since this is the first time that PLT entry has been used, it ++ sends control to its glink entry. That loads the number of the ++ PLT entry and jumps to the common glink0 code: ++ ++ 10000c98: 38 00 00 00 li r0,0 ++ 10000c9c: 4b ff ff dc b 10000c78 ++ ++ - The common glink0 code then transfers control to the dynamic ++ linker's fixup code: ++ ++ 10000c78: e8 41 00 28 ld r2,40(r1) ++ 10000c7c: 3d 82 00 00 addis r12,r2,0 ++ 10000c80: e9 6c 80 80 ld r11,-32640(r12) ++ 10000c84: e8 4c 80 88 ld r2,-32632(r12) ++ 10000c88: 7d 69 03 a6 mtctr r11 ++ 10000c8c: e9 6c 80 90 ld r11,-32624(r12) ++ 10000c90: 4e 80 04 20 bctr ++ ++ Eventually, this code will figure out how to skip all of this, ++ including the dynamic linker. At the moment, we just get through ++ the linkage function. */ ++ ++/* If the current thread is about to execute a series of instructions ++ at PC matching the ppc64_standard_linkage pattern, and INSN is the result ++ from that pattern match, return the code address to which the ++ standard linkage function will send them. (This doesn't deal with ++ dynamic linker lazy symbol resolution stubs.) */ ++static CORE_ADDR ++ppc64_standard_linkage_target (CORE_ADDR pc, unsigned int *insn) ++{ ++ struct gdbarch_tdep *tdep = gdbarch_tdep (current_gdbarch); ++ ++ /* The address of the function descriptor this linkage function ++ references. */ ++ CORE_ADDR desc ++ = ((CORE_ADDR) read_register (tdep->ppc_gp0_regnum + 2) ++ + (insn_d_field (insn[0]) << 16) ++ + insn_ds_field (insn[2])); ++ ++ /* The first word of the descriptor is the entry point. Return that. */ ++ return ppc64_desc_entry_point (desc); ++} ++ ++ ++/* Given that we've begun executing a call trampoline at PC, return ++ the entry point of the function the trampoline will go to. */ ++static CORE_ADDR ++ppc64_skip_trampoline_code (CORE_ADDR pc) ++{ ++ unsigned int ppc64_standard_linkage_insn[PPC64_STANDARD_LINKAGE_LEN]; ++ ++ if (insns_match_pattern (pc, ppc64_standard_linkage, ++ ppc64_standard_linkage_insn)) ++ return ppc64_standard_linkage_target (pc, ppc64_standard_linkage_insn); ++ else ++ return 0; ++} ++ ++ ++/* Support for CONVERT_FROM_FUNC_PTR_ADDR (ARCH, ADDR, TARG) on PPC64 ++ GNU/Linux. ++ ++ Usually a function pointer's representation is simply the address ++ of the function. On GNU/Linux on the 64-bit PowerPC however, a ++ function pointer is represented by a pointer to a TOC entry. This ++ TOC entry contains three words, the first word is the address of ++ the function, the second word is the TOC pointer (r2), and the ++ third word is the static chain value. Throughout GDB it is ++ currently assumed that a function pointer contains the address of ++ the function, which is not easy to fix. In addition, the ++ conversion of a function address to a function pointer would ++ require allocation of a TOC entry in the inferior's memory space, ++ with all its drawbacks. To be able to call C++ virtual methods in ++ the inferior (which are called via function pointers), ++ find_function_addr uses this function to get the function address ++ from a function pointer. */ ++ ++/* If ADDR points at what is clearly a function descriptor, transform ++ it into the address of the corresponding function. Be ++ conservative, otherwize GDB will do the transformation on any ++ random addresses such as occures when there is no symbol table. */ ++ ++static CORE_ADDR ++ppc64_linux_convert_from_func_ptr_addr (struct gdbarch *gdbarch, ++ CORE_ADDR addr, ++ struct target_ops *targ) ++{ ++ struct section_table *s = target_section_by_addr (targ, addr); ++ ++ /* Check if ADDR points to a function descriptor. */ ++ if (s && strcmp (s->the_bfd_section->name, ".opd") == 0) ++ return get_target_memory_unsigned (targ, addr, 8); ++ ++ return addr; ++} ++ ++#ifdef CRASH_MERGE ++enum { ++ PPC_ELF_NGREG = 48, ++ PPC_ELF_NFPREG = 33, ++ PPC_ELF_NVRREG = 33 ++}; ++ ++enum { ++ ELF_GREGSET_SIZE = (PPC_ELF_NGREG * 4), ++ ELF_FPREGSET_SIZE = (PPC_ELF_NFPREG * 8) ++}; ++#else ++enum { ++ ELF_NGREG = 48, ++ ELF_NFPREG = 33, ++ ELF_NVRREG = 33 ++}; ++ ++enum { ++ ELF_GREGSET_SIZE = (ELF_NGREG * 4), ++ ELF_FPREGSET_SIZE = (ELF_NFPREG * 8) ++}; ++#endif ++ ++void ++ppc_linux_supply_gregset (char *buf) ++{ ++ int regi; ++ struct gdbarch_tdep *tdep = gdbarch_tdep (current_gdbarch); ++ ++ for (regi = 0; regi < 32; regi++) ++ supply_register (regi, buf + 4 * regi); ++ ++ supply_register (PC_REGNUM, buf + 4 * PPC_LINUX_PT_NIP); ++ supply_register (tdep->ppc_lr_regnum, buf + 4 * PPC_LINUX_PT_LNK); ++ supply_register (tdep->ppc_cr_regnum, buf + 4 * PPC_LINUX_PT_CCR); ++ supply_register (tdep->ppc_xer_regnum, buf + 4 * PPC_LINUX_PT_XER); ++ supply_register (tdep->ppc_ctr_regnum, buf + 4 * PPC_LINUX_PT_CTR); ++ if (tdep->ppc_mq_regnum != -1) ++ supply_register (tdep->ppc_mq_regnum, buf + 4 * PPC_LINUX_PT_MQ); ++ supply_register (tdep->ppc_ps_regnum, buf + 4 * PPC_LINUX_PT_MSR); ++} ++ ++void ++ppc_linux_supply_fpregset (char *buf) ++{ ++ int regi; ++ struct gdbarch_tdep *tdep = gdbarch_tdep (current_gdbarch); ++ ++ for (regi = 0; regi < 32; regi++) ++ supply_register (FP0_REGNUM + regi, buf + 8 * regi); ++ ++ /* The FPSCR is stored in the low order word of the last doubleword in the ++ fpregset. */ ++ supply_register (tdep->ppc_fpscr_regnum, buf + 8 * 32 + 4); ++} ++ ++/* ++ Use a local version of this function to get the correct types for regsets. ++*/ ++ ++static void ++fetch_core_registers (char *core_reg_sect, ++ unsigned core_reg_size, ++ int which, ++ CORE_ADDR reg_addr) ++{ ++ if (which == 0) ++ { ++ if (core_reg_size == ELF_GREGSET_SIZE) ++ ppc_linux_supply_gregset (core_reg_sect); ++ else ++ warning ("wrong size gregset struct in core file"); ++ } ++ else if (which == 2) ++ { ++ if (core_reg_size == ELF_FPREGSET_SIZE) ++ ppc_linux_supply_fpregset (core_reg_sect); ++ else ++ warning ("wrong size fpregset struct in core file"); ++ } ++} ++ ++/* Register that we are able to handle ELF file formats using standard ++ procfs "regset" structures. */ ++ ++static struct core_fns ppc_linux_regset_core_fns = ++{ ++ bfd_target_elf_flavour, /* core_flavour */ ++ default_check_format, /* check_format */ ++ default_core_sniffer, /* core_sniffer */ ++ fetch_core_registers, /* core_read_registers */ ++ NULL /* next */ ++}; ++ ++static void ++ppc_linux_init_abi (struct gdbarch_info info, ++ struct gdbarch *gdbarch) ++{ ++ struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch); ++ ++ if (tdep->wordsize == 4) ++ { ++ /* Until November 2001, gcc did not comply with the 32 bit SysV ++ R4 ABI requirement that structures less than or equal to 8 ++ bytes should be returned in registers. Instead GCC was using ++ the the AIX/PowerOpen ABI - everything returned in memory ++ (well ignoring vectors that is). When this was corrected, it ++ wasn't fixed for GNU/Linux native platform. Use the ++ PowerOpen struct convention. */ ++ set_gdbarch_return_value (gdbarch, ppc_linux_return_value); ++ ++ /* Note: kevinb/2002-04-12: See note in rs6000_gdbarch_init regarding ++ *_push_arguments(). The same remarks hold for the methods below. */ ++ set_gdbarch_deprecated_frameless_function_invocation (gdbarch, ppc_linux_frameless_function_invocation); ++ set_gdbarch_deprecated_frame_chain (gdbarch, ppc_linux_frame_chain); ++ set_gdbarch_deprecated_frame_saved_pc (gdbarch, ppc_linux_frame_saved_pc); ++ ++ set_gdbarch_deprecated_frame_init_saved_regs (gdbarch, ++ ppc_linux_frame_init_saved_regs); ++ set_gdbarch_deprecated_init_extra_frame_info (gdbarch, ++ ppc_linux_init_extra_frame_info); ++ ++ set_gdbarch_memory_remove_breakpoint (gdbarch, ++ ppc_linux_memory_remove_breakpoint); ++ /* Shared library handling. */ ++ set_gdbarch_in_solib_call_trampoline (gdbarch, in_plt_section); ++ set_gdbarch_skip_trampoline_code (gdbarch, ++ ppc_linux_skip_trampoline_code); ++ set_solib_svr4_fetch_link_map_offsets ++ (gdbarch, ppc_linux_svr4_fetch_link_map_offsets); ++ } ++ ++ if (tdep->wordsize == 8) ++ { ++ /* Handle PPC64 GNU/Linux function pointers (which are really ++ function descriptors). */ ++ set_gdbarch_convert_from_func_ptr_addr ++ (gdbarch, ppc64_linux_convert_from_func_ptr_addr); ++ ++ set_gdbarch_in_solib_call_trampoline ++ (gdbarch, ppc64_in_solib_call_trampoline); ++ set_gdbarch_skip_trampoline_code (gdbarch, ppc64_skip_trampoline_code); ++ ++ /* PPC64 malloc's entry-point is called ".malloc". */ ++ set_gdbarch_name_of_malloc (gdbarch, ".malloc"); ++ } ++} ++ ++void ++_initialize_ppc_linux_tdep (void) ++{ ++ /* Register for all sub-familes of the POWER/PowerPC: 32-bit and ++ 64-bit PowerPC, and the older rs6k. */ ++ gdbarch_register_osabi (bfd_arch_powerpc, bfd_mach_ppc, GDB_OSABI_LINUX, ++ ppc_linux_init_abi); ++ gdbarch_register_osabi (bfd_arch_powerpc, bfd_mach_ppc64, GDB_OSABI_LINUX, ++ ppc_linux_init_abi); ++ gdbarch_register_osabi (bfd_arch_rs6000, bfd_mach_rs6k, GDB_OSABI_LINUX, ++ ppc_linux_init_abi); ++ add_core_fns (&ppc_linux_regset_core_fns); ++} +--- crash/lkcd_x86_trace.c.orig 2008-01-17 15:17:20.000000000 -0500 ++++ crash/lkcd_x86_trace.c 2008-01-04 09:42:08.000000000 -0500 +@@ -5,8 +5,8 @@ + /* + * lkcd_x86_trace.c + * +- * Copyright (C) 2002, 2003, 2004, 2005 David Anderson +- * Copyright (C) 2002, 2003, 2004, 2005 Red Hat, Inc. All rights reserved. ++ * Copyright (C) 2002, 2003, 2004, 2005, 2006 David Anderson ++ * Copyright (C) 2002, 2003, 2004, 2005, 2006 Red Hat, Inc. All rights reserved. + * + * Adapted as noted from the following LKCD files: + * +@@ -21,6 +21,9 @@ + + #include "lkcd_x86_trace.h" + ++#undef XEN_HYPER_MODE ++static int XEN_HYPER_MODE(void) { return (pc->flags & XEN_HYPER) != 0; } ++ + static void *kl_alloc_block(int, int); + static void kl_free_block(void *); + static void GET_BLOCK(kaddr_t, unsigned, void *); +@@ -47,12 +50,13 @@ + static int setup_trace_rec(kaddr_t, kaddr_t, int, trace_t *); + static int valid_ra(kaddr_t); + static int valid_ra_function(kaddr_t, char *); ++static int eframe_incr(kaddr_t, char *); + static int find_trace(kaddr_t, kaddr_t, kaddr_t, kaddr_t, trace_t *, int); + static void dump_stack_frame(trace_t *, sframe_t *, FILE *); + static void print_trace(trace_t *, int, FILE *); +-struct pt_regs; +-static int eframe_type(struct pt_regs *); +-static void print_eframe(FILE *, struct pt_regs *); ++static int eframe_type(uaddr_t *); ++char *funcname_display(char *); ++static void print_eframe(FILE *, uaddr_t *); + static void trace_banner(FILE *); + static void print_kaddr(kaddr_t, FILE *, int); + int do_text_list(kaddr_t, int, FILE *); +@@ -505,7 +509,7 @@ + { "receive_chars", NULL, + COMPILER_VERSION_EQUAL, GCC(2,96,0), 0, 0, 48 }, + { "default_idle", NULL, +- COMPILER_VERSION_START, GCC(3,3,2), 0, -4, 0 }, ++ COMPILER_VERSION_START, GCC(2,96,0), 0, -4, 0 }, + { NULL, NULL, 0, 0, 0, 0, 0 }, + }; + +@@ -1117,8 +1121,9 @@ + return(0); + } + +-#include ++#ifndef REDHAT + #include ++#endif + #define KERNEL_EFRAME 0 + #define USER_EFRAME 1 + #define KERNEL_EFRAME_SZ 13 /* no ss and esp */ +@@ -1141,31 +1146,34 @@ + * Check if the exception frame is of kernel or user type + * Is checking only DS and CS values sufficient ? + */ +-int eframe_type(struct pt_regs *regs) ++ ++int eframe_type(uaddr_t *int_eframe) + { +- if (((regs->xcs & 0xffff) == __KERNEL_CS) && +- ((regs->xds & 0xffff) == __KERNEL_DS)) ++ ushort xcs, xds; ++ ++ xcs = (ushort)(int_eframe[INT_EFRAME_CS] & 0xffff); ++ xds = (ushort)(int_eframe[INT_EFRAME_DS] & 0xffff); ++ ++ if ((xcs == __KERNEL_CS) && (xds == __KERNEL_DS)) + return KERNEL_EFRAME; + #ifdef REDHAT +- else if (((regs->xcs & 0xffff) == 0x60) && +- ((regs->xds & 0xffff) == 0x68)) ++ else if ((xcs == 0x60) && (xds == 0x68)) ++ return KERNEL_EFRAME; ++ else if ((xcs == 0x60) && (xds == 0x7b)) ++ return KERNEL_EFRAME; ++ else if (XEN() && (xcs == 0x61) && (xds == 0x7b)) + return KERNEL_EFRAME; +- else if (((regs->xcs & 0xffff) == 0x60) && +- ((regs->xds & 0xffff) == 0x7b)) +- return KERNEL_EFRAME; + #endif +- else if (((regs->xcs & 0xffff) == __USER_CS) && +- ((regs->xds & 0xffff) == __USER_DS)) ++ else if ((xcs == __USER_CS) && (xds == __USER_DS)) + return USER_EFRAME; + #ifdef REDHAT +- else if (((regs->xcs & 0xffff) == 0x73) && +- ((regs->xds & 0xffff) == 0x7b)) ++ else if ((xcs == 0x73) && (xds == 0x7b)) + return USER_EFRAME; + #endif + return -1; + } + +-void print_eframe(FILE *ofp, struct pt_regs *regs) ++void print_eframe(FILE *ofp, uaddr_t *regs) + { + int type = eframe_type(regs); + +@@ -1206,6 +1214,93 @@ + } \ + } + #endif ++ ++/* ++ * Determine how much to increment the stack pointer to find the ++ * exception frame associated with a generic "error_code" or "nmi" ++ * exception. ++ * ++ * The incoming addr is that of the call to the generic error_code ++ * or nmi exception handler function. Until later 2.6 kernels, the next ++ * instruction had always been an "addl $8,%esp". However, with later ++ * 2.6 kernels, that esp adjustment is no long valid, and there will be ++ * an immediate "jmp" instruction. Returns 4 or 12, whichever is appropriate. ++ * Cache the value the first time, and allow for future changes or additions. ++ */ ++ ++#define NMI_ADJ (0) ++#define ERROR_CODE_ADJ (1) ++#define EFRAME_ADJUSTS (ERROR_CODE_ADJ+1) ++ ++static int eframe_adjust[EFRAME_ADJUSTS] = { 0 }; ++ ++static int ++eframe_incr(kaddr_t addr, char *funcname) ++{ ++ instr_rec_t irp; ++ kaddr_t next; ++ int size, adj, val; ++ ++ if (STRNEQ(funcname, "nmi")) { ++ adj = NMI_ADJ; ++ val = eframe_adjust[NMI_ADJ]; ++ } else if (strstr(funcname, "error_code")) { ++ adj = ERROR_CODE_ADJ; ++ val = eframe_adjust[ERROR_CODE_ADJ]; ++ } else { ++ adj = -1; ++ val = 0; ++ error(INFO, ++ "unexpected exception frame marker: %lx (%s)\n", ++ addr, funcname); ++ } ++ ++ if (val) { ++ console("eframe_incr(%lx, %s): eframe_adjust[%d]: %d\n", ++ addr, funcname, adj, val); ++ return val; ++ } ++ ++ console("eframe_incr(%lx, %s): TBD:\n", addr, funcname); ++ ++ bzero(&irp, sizeof(irp)); ++ irp.aflag = 1; ++ irp.dflag = 1; ++ if (!(size = get_instr_info(addr, &irp))) { ++ if (CRASHDEBUG(1)) ++ error(INFO, ++ "eframe_incr(%lx, %s): get_instr_info(%lx) failed\n", ++ addr, funcname, addr); ++ return((THIS_KERNEL_VERSION > LINUX(2,6,9)) ? 4 : 12); ++ } ++ console(" addr: %lx size: %d opcode: 0x%x insn: \"%s\"\n", ++ addr, size, irp.opcode, irp.opcodep->name); ++ ++ next = addr + size; ++ bzero(&irp, sizeof(irp)); ++ irp.aflag = 1; ++ irp.dflag = 1; ++ if (!(size = get_instr_info(next, &irp))) { ++ if (CRASHDEBUG(1)) ++ error(INFO, ++ "eframe_incr(%lx, %s): get_instr_info(%lx) failed\n", ++ addr, funcname, next); ++ return((THIS_KERNEL_VERSION > LINUX(2,6,9)) ? 4 : 12); ++ } ++ console(" next: %lx size: %d opcode: 0x%x insn: \"%s\"\n", ++ next, size, irp.opcode, irp.opcodep->name); ++ ++ if (STREQ(irp.opcodep->name, "jmp")) ++ val = 4; ++ else ++ val = 12; ++ ++ if (adj >= 0) ++ eframe_adjust[adj] = val; ++ ++ return val; ++} ++ + /* + * find_trace() + * +@@ -1253,6 +1348,7 @@ + int flag; + int interrupted_system_call = FALSE; + struct bt_info *bt = trace->bt; ++ uaddr_t *pt; + #endif + sbp = trace->stack[curstkidx].ptr; + sbase = trace->stack[curstkidx].addr; +@@ -1322,7 +1418,17 @@ + } + } + asp = (uaddr_t*)((uaddr_t)sbp + (STACK_SIZE - (saddr - sp))); ++ + #ifdef REDHAT ++ if (XEN_HYPER_MODE()) { ++ func_name = kl_funcname(pc); ++ if (STREQ(func_name, "idle_loop") || STREQ(func_name, "hypercall") ++ || STREQ(func_name, "handle_exception")) { ++ UPDATE_FRAME(func_name, pc, 0, sp, bp, asp, 0, 0, bp - sp, 0); ++ return(trace->nframes); ++ } ++ } ++ + ra = GET_STACK_ULONG(bp + 4); + /* + * HACK: The get_framesize() function can return the proper +@@ -1447,7 +1553,8 @@ + bp = curframe->fp + frame_size; + } + #endif +- if ((func_name = kl_funcname(pc))) { ++ func_name = kl_funcname(pc); ++ if (func_name && !XEN_HYPER_MODE()) { + if (strstr(func_name, "kernel_thread")) { + ra = 0; + bp = saddr - 4; +@@ -1503,25 +1610,26 @@ + return(trace->nframes); + #ifdef REDHAT + } else if (strstr(func_name, "error_code") ++ || STREQ(func_name, "nmi_stack_correct") + || STREQ(func_name, "nmi")) { + #else + } else if (strstr(func_name, "error_code")) { + #endif + /* an exception frame */ +- sp = curframe->fp+12; ++ sp = curframe->fp + eframe_incr(pc, func_name); + + bp = sp + (KERNEL_EFRAME_SZ-1)*4; + asp = (uaddr_t*)((uaddr_t)sbp + (STACK_SIZE - + (saddr - sp))); + curframe = alloc_sframe(trace, flags); +- ra = ((struct pt_regs *)asp)->eip; +- frame_type = eframe_type((struct pt_regs*)asp); ++ ra = asp[INT_EFRAME_EIP]; ++ frame_type = eframe_type(asp); + UPDATE_FRAME(func_name, pc, ra, sp, bp, asp, + 0, 0, (bp - sp + 4), EX_FRAME); + + /* prepare for next kernel frame, if present */ + if (frame_type == KERNEL_EFRAME) { +- pc = ((struct pt_regs *)asp)->eip; ++ pc = asp[INT_EFRAME_EIP]; + sp = curframe->fp+4; + #ifdef REDHAT + bp = sp + get_framesize(pc, bt); +@@ -1540,20 +1648,20 @@ + sp = curframe->fp + 4; + asp = (uaddr_t*)((uaddr_t)sbp + (STACK_SIZE - + (saddr - sp))); +- frame_type = eframe_type((struct pt_regs*)asp); ++ frame_type = eframe_type(asp); + if (frame_type == KERNEL_EFRAME) + bp = curframe->fp+(KERNEL_EFRAME_SZ-1)*4; + else + bp = curframe->fp+(USER_EFRAME_SZ-1)*4; + curframe = alloc_sframe(trace, flags); +- ra = ((struct pt_regs *)asp)->eip; ++ ra = asp[INT_EFRAME_EIP]; + UPDATE_FRAME(func_name, pc, ra, sp, bp + 4, asp, + 0, 0, curframe->fp - curframe->sp+4, EX_FRAME); + + /* prepare for next kernel frame, if present */ + if (frame_type == KERNEL_EFRAME) { + sp = curframe->fp + 4; +- pc = ((struct pt_regs *)asp)->eip; ++ pc = asp[INT_EFRAME_EIP]; + #ifdef REDHAT + bp = sp + get_framesize(pc, bt); + #else +@@ -1571,6 +1679,46 @@ + } + } + } ++ if (func_name && XEN_HYPER_MODE()) { ++ if (STREQ(func_name, "continue_nmi") || ++ STREQ(func_name, "vmx_asm_vmexit_handler") || ++ STREQ(func_name, "deferred_nmi")) { ++ /* Interrupt frame */ ++ sp = curframe->fp + 4; ++ asp = (uaddr_t*)((uaddr_t)sbp + (STACK_SIZE - ++ (saddr - sp))); ++ bp = curframe->fp + (12 * 4); ++ curframe = alloc_sframe(trace, flags); ++ ra = *(asp + 9); ++ UPDATE_FRAME(func_name, pc, ra, sp, bp + 4, asp, ++ 0, 0, curframe->fp - curframe->sp+4, 12 * 4); ++ ++ /* contunue next frame */ ++ pc = ra; ++ sp = curframe->fp + 4; ++ bp = sp + get_framesize(pc, bt); ++ func_name = kl_funcname(pc); ++ if (!func_name) ++ return trace->nframes; ++ continue; ++ } ++ } ++ ++ /* ++ * Check for hypervisor_callback from user-space. ++ */ ++ if ((bt->flags & BT_XEN_STOP_THIS_CPU) && bt->tc->mm_struct && ++ STREQ(kl_funcname(curframe->pc), "hypervisor_callback")) { ++ pt = curframe->asp+1; ++ if (eframe_type(pt) == USER_EFRAME) { ++ if (program_context.debug >= 1) /* pc above */ ++ error(INFO, ++ "hypervisor_callback from user space\n"); ++ curframe->asp++; ++ curframe->flag |= EX_FRAME; ++ return(trace->nframes); ++ } ++ } + + /* Make sure our next frame pointer is valid (in the stack). + */ +@@ -1653,7 +1801,7 @@ + #ifdef REDHAT + kaddr_t fp = 0; + kaddr_t last_fp, last_pc, next_fp, next_pc; +- struct pt_regs *pt; ++ uaddr_t *pt; + struct bt_info *bt; + + bt = trace->bt; +@@ -1684,8 +1832,15 @@ + (bt->flags & (BT_HARDIRQ|BT_SOFTIRQ))) + return; + +- print_stack_entry(trace->bt, +- trace->bt->flags & BT_BUMP_FRAME_LEVEL ? ++ if ((frmp->level == 0) && (bt->flags & BT_XEN_STOP_THIS_CPU)) { ++ print_stack_entry(trace->bt, 0, trace->bt->stkptr, ++ symbol_value("stop_this_cpu"), ++ value_symbol(symbol_value("stop_this_cpu")), ++ frmp, ofp); ++ } ++ ++ print_stack_entry(trace->bt, (trace->bt->flags & ++ (BT_BUMP_FRAME_LEVEL|BT_XEN_STOP_THIS_CPU)) ? + frmp->level + 1 : frmp->level, + fp ? (ulong)fp : trace->bt->stkptr, + (ulong)frmp->pc, frmp->funcname, frmp, ofp); +@@ -1707,7 +1862,11 @@ + fprintf(ofp, " [0x%x]\n", frmp->pc); + #endif + if (frmp->flag & EX_FRAME) { +- pt = (struct pt_regs *)frmp->asp; ++ pt = frmp->asp; ++ if (CRASHDEBUG(1)) ++ fprintf(ofp, ++ " EXCEPTION FRAME: %lx\n", ++ (unsigned long)frmp->sp); + print_eframe(ofp, pt); + } + #ifdef REDHAT +@@ -1789,6 +1948,114 @@ + if (kt->flags & RA_SEEK) + bt->flags |= BT_SPECULATE; + ++ if (XENDUMP_DUMPFILE() && XEN() && is_task_active(bt->task) && ++ STREQ(kl_funcname(bt->instptr), "stop_this_cpu")) { ++ /* ++ * bt->instptr of "stop_this_cpu" is not a return ++ * address -- replace it with the actual return ++ * address found at the bt->stkptr location. ++ */ ++ if (readmem((ulong)bt->stkptr, KVADDR, &eip, ++ sizeof(ulong), "xendump eip", RETURN_ON_ERROR)) ++ bt->instptr = eip; ++ bt->flags |= BT_XEN_STOP_THIS_CPU; ++ if (CRASHDEBUG(1)) ++ error(INFO, "replacing stop_this_cpu with %s\n", ++ kl_funcname(bt->instptr)); ++ } ++ ++ if (XENDUMP_DUMPFILE() && XEN() && is_idle_thread(bt->task) && ++ is_task_active(bt->task) && ++ !(kt->xen_flags & XEN_SUSPEND) && ++ STREQ(kl_funcname(bt->instptr), "schedule")) { ++ /* ++ * This is an invalid (stale) schedule reference ++ * left in the task->thread. Move down the stack ++ * until the smp_call_function_interrupt return ++ * address is found. ++ */ ++ saddr = bt->stkptr; ++ while (readmem(saddr, KVADDR, &eip, ++ sizeof(ulong), "xendump esp", RETURN_ON_ERROR)) { ++ if (STREQ(kl_funcname(eip), "smp_call_function_interrupt")) { ++ bt->instptr = eip; ++ bt->stkptr = saddr; ++ bt->flags |= BT_XEN_STOP_THIS_CPU; ++ if (CRASHDEBUG(1)) ++ error(INFO, ++ "switch schedule to smp_call_function_interrupt\n"); ++ break; ++ } ++ saddr -= sizeof(void *); ++ if (saddr <= bt->stackbase) ++ break; ++ } ++ } ++ ++ if (XENDUMP_DUMPFILE() && XEN() && is_idle_thread(bt->task) && ++ is_task_active(bt->task) && ++ (kt->xen_flags & XEN_SUSPEND) && ++ STREQ(kl_funcname(bt->instptr), "schedule")) { ++ int framesize = 0; ++ /* ++ * This is an invalid (stale) schedule reference ++ * left in the task->thread. Move down the stack ++ * until the hypercall_page() return address is ++ * found, and fix up its framesize as we go. ++ */ ++ saddr = bt->stacktop; ++ while (readmem(saddr, KVADDR, &eip, ++ sizeof(ulong), "xendump esp", RETURN_ON_ERROR)) { ++ ++ if (STREQ(kl_funcname(eip), "xen_idle")) ++ framesize += sizeof(ulong); ++ else if (framesize) ++ framesize += sizeof(ulong); ++ ++ if (STREQ(kl_funcname(eip), "hypercall_page")) { ++ int framesize = 24; ++ bt->instptr = eip; ++ bt->stkptr = saddr; ++ if (CRASHDEBUG(1)) ++ error(INFO, ++ "switch schedule to hypercall_page (framesize: %d)\n", ++ framesize); ++ FRAMESIZE_CACHE_ENTER(eip, &framesize); ++ break; ++ } ++ saddr -= sizeof(void *); ++ if (saddr <= bt->stackbase) ++ break; ++ } ++ } ++ ++ if (XENDUMP_DUMPFILE() && XEN() && !is_idle_thread(bt->task) && ++ is_task_active(bt->task) && ++ STREQ(kl_funcname(bt->instptr), "schedule")) { ++ /* ++ * This is an invalid (stale) schedule reference ++ * left in the task->thread. Move down the stack ++ * until the smp_call_function_interrupt return ++ * address is found. ++ */ ++ saddr = bt->stacktop; ++ while (readmem(saddr, KVADDR, &eip, ++ sizeof(ulong), "xendump esp", RETURN_ON_ERROR)) { ++ if (STREQ(kl_funcname(eip), "smp_call_function_interrupt")) { ++ bt->instptr = eip; ++ bt->stkptr = saddr; ++ bt->flags |= BT_XEN_STOP_THIS_CPU; ++ if (CRASHDEBUG(1)) ++ error(INFO, ++ "switch schedule to smp_call_function_interrupt\n"); ++ break; ++ } ++ saddr -= sizeof(void *); ++ if (saddr <= bt->stackbase) ++ break; ++ } ++ } ++ + if (!verify_back_trace(bt) && !recoverable(bt, ofp) && + !BT_REFERENCE_CHECK(bt)) + error(INFO, "cannot resolve stack trace:\n"); +@@ -1797,12 +2064,14 @@ + return(0); + #endif + +- if (!(tsp = kl_alloc_block(TASK_STRUCT_SZ, K_TEMP))) { +- return(1); +- } +- if (kl_get_task_struct(task, 2, tsp)) { +- kl_free_block(tsp); +- return(1); ++ if (!XEN_HYPER_MODE()) { ++ if (!(tsp = kl_alloc_block(TASK_STRUCT_SZ, K_TEMP))) { ++ return(1); ++ } ++ if (kl_get_task_struct(task, 2, tsp)) { ++ kl_free_block(tsp); ++ return(1); ++ } + } + trace = (trace_t *)alloc_trace_rec(C_TEMP); + if (!trace) { +@@ -1874,7 +2143,9 @@ + #endif + print_trace(trace, flags, ofp); + } +- kl_free_block(tsp); ++ if (!XEN_HYPER_MODE()) ++ kl_free_block(tsp); ++ + free_trace_rec(trace); + #ifdef REDHAT + if (KL_ERROR == KLE_PRINT_TRACE_ERROR) { +@@ -1901,13 +2172,15 @@ + errcnt = 0; + KL_ERROR = 0; + +- if (!(tsp = kl_alloc_block(TASK_STRUCT_SZ, K_TEMP))) +- return FALSE; +- +- if (kl_get_task_struct(bt->task, 2, tsp)) { +- kl_free_block(tsp); +- return FALSE; +- } ++ if (!XEN_HYPER_MODE()) { ++ if (!(tsp = kl_alloc_block(TASK_STRUCT_SZ, K_TEMP))) ++ return FALSE; ++ ++ if (kl_get_task_struct(bt->task, 2, tsp)) { ++ kl_free_block(tsp); ++ return FALSE; ++ } ++ } + + trace = (trace_t *)alloc_trace_rec(C_TEMP); + if (!trace) +@@ -1952,7 +2225,9 @@ + } while (frmp != trace->frame); + } + +- kl_free_block(tsp); ++ if (!XEN_HYPER_MODE()) ++ kl_free_block(tsp); ++ + free_trace_rec(trace); + return (errcnt ? FALSE : TRUE); + } +@@ -1982,7 +2257,7 @@ + (sp && (bt->ref->hexval == sp->value))) + bt->ref->cmdflags |= BT_REF_FOUND; + if (frmp->flag & EX_FRAME) { +- type = eframe_type((struct pt_regs *)frmp->asp); ++ type = eframe_type(frmp->asp); + x86_dump_eframe_common(bt, (ulong *)frmp->asp, + (type == KERNEL_EFRAME)); + } +@@ -2192,11 +2467,12 @@ + else + buf[0] = NULLCHAR; + +- if ((sp = eframe_label(funcname, eip))) ++ if ((sp = eframe_label(funcname, eip))) + funcname = sp->name; + + fprintf(ofp, "%s#%d [%8lx] %s%s at %lx\n", +- level < 10 ? " " : "", level, esp, funcname, ++ level < 10 ? " " : "", level, esp, ++ funcname_display(funcname), + strlen(buf) ? buf : "", eip); + + if (bt->flags & BT_LINE_NUMBERS) { +@@ -2236,6 +2512,9 @@ + struct eframe_labels *efp; + struct syment *sp; + ++ if (XEN_HYPER_MODE()) ++ return NULL; /* ODA: need support ? */ ++ + efp = &eframe_labels; + + if (!efp->init) { +@@ -2325,6 +2604,25 @@ + } + + /* ++ * If it makes sense to display a different function/label name ++ * in a stack entry, it can be done here. Unlike eframe_label(), ++ * this routine won't cause the passed-in function name pointer ++ * to be changed -- this is strictly for display purposes only. ++ */ ++char * ++funcname_display(char *funcname) ++{ ++ struct syment *sp; ++ ++ if (STREQ(funcname, "nmi_stack_correct") && ++ (sp = symbol_search("nmi"))) ++ return sp->name; ++ ++ return funcname; ++} ++ ++ ++/* + * Cache 2k starting from the passed-in text address. This sits on top + * of the instrbuf 256-byte cache, but we don't want to extend its size + * because we can run off the end of a module segment -- if this routine +@@ -4858,6 +5156,8 @@ + } else { + codeptr++; + } ++ if (STREQ(op->name, "ud2a")) ++ codeptr += kt->BUG_bytes; + } else { + opcode = *codeptr; + op = &op_386[*codeptr]; +--- crash/ia64.c.orig 2008-01-17 15:17:20.000000000 -0500 ++++ crash/ia64.c 2008-01-04 09:42:08.000000000 -0500 +@@ -1,8 +1,8 @@ + /* ia64.c - core analysis suite + * + * Copyright (C) 1999, 2000, 2001, 2002 Mission Critical Linux, Inc. +- * Copyright (C) 2002, 2003, 2004, 2005 David Anderson +- * Copyright (C) 2002, 2003, 2004, 2005 Red Hat, Inc. All rights reserved. ++ * Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007 David Anderson ++ * Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007 Red Hat, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by +@@ -16,6 +16,8 @@ + */ + #ifdef IA64 + #include "defs.h" ++#include "xen_hyper_defs.h" ++#include + + static int ia64_verify_symbol(const char *, ulong, char); + static int ia64_eframe_search(struct bt_info *); +@@ -25,6 +27,8 @@ + static void try_old_unwind(struct bt_info *); + static void ia64_dump_irq(int); + static ulong ia64_processor_speed(void); ++static int ia64_vtop_4l(ulong, physaddr_t *paddr, ulong *pgd, int, int); ++static int ia64_vtop(ulong, physaddr_t *paddr, ulong *pgd, int, int); + static int ia64_uvtop(struct task_context *, ulong, physaddr_t *, int); + static int ia64_kvtop(struct task_context *, ulong, physaddr_t *, int); + static ulong ia64_get_task_pgd(ulong); +@@ -47,10 +51,12 @@ + static int ia64_verify_paddr(uint64_t); + static int ia64_available_memory(struct efi_memory_desc_t *); + static void ia64_post_init(void); ++static ulong ia64_in_per_cpu_mca_stack(void); + static struct line_number_hook ia64_line_number_hooks[]; + static ulong ia64_get_stackbase(ulong); + static ulong ia64_get_stacktop(ulong); + static void parse_cmdline_arg(void); ++static void ia64_calc_phys_start(void); + + struct unw_frame_info; + static void dump_unw_frame_info(struct unw_frame_info *); +@@ -62,6 +68,17 @@ + static ulong rse_read_reg(struct unw_frame_info *, int, int *); + static void rse_function_params(struct unw_frame_info *, char *); + ++static int ia64_vtop_4l_xen_wpt(ulong, physaddr_t *paddr, ulong *pgd, int, int); ++static int ia64_vtop_xen_wpt(ulong, physaddr_t *paddr, ulong *pgd, int, int); ++static int ia64_xen_kdump_p2m_create(struct xen_kdump_data *); ++static int ia64_xendump_p2m_create(struct xendump_data *); ++static void ia64_debug_dump_page(FILE *, char *, char *); ++static char *ia64_xendump_load_page(ulong, struct xendump_data *); ++static int ia64_xendump_page_index(ulong, struct xendump_data *); ++static ulong ia64_xendump_panic_task(struct xendump_data *); ++static void ia64_get_xendump_regs(struct xendump_data *, struct bt_info *, ulong *, ulong *); ++ ++static void ia64_init_hyper(int); + + struct machine_specific ia64_machine_specific = { 0 }; + +@@ -70,8 +87,22 @@ + { + struct syment *sp, *spn; + ++ if (XEN_HYPER_MODE()) { ++ ia64_init_hyper(when); ++ return; ++ } ++ + switch (when) + { ++ case SETUP_ENV: ++#if defined(PR_SET_FPEMU) && defined(PR_FPEMU_NOPRINT) ++ prctl(PR_SET_FPEMU, PR_FPEMU_NOPRINT, 0, 0, 0); ++#endif ++#if defined(PR_SET_UNALIGN) && defined(PR_UNALIGN_NOPRINT) ++ prctl(PR_SET_UNALIGN, PR_UNALIGN_NOPRINT, 0, 0, 0); ++#endif ++ break; ++ + case PRE_SYMTAB: + machdep->verify_symbol = ia64_verify_symbol; + machdep->machspec = &ia64_machine_specific; +@@ -92,17 +123,23 @@ + case 16384: + machdep->stacksize = (power(2, 1) * PAGESIZE()); + break; ++ case 65536: ++ machdep->stacksize = (power(2, 0) * PAGESIZE()); ++ break; + default: + machdep->stacksize = 32*1024; + break; + } + if ((machdep->pgd = (char *)malloc(PAGESIZE())) == NULL) + error(FATAL, "cannot malloc pgd space."); ++ if ((machdep->pud = (char *)malloc(PAGESIZE())) == NULL) ++ error(FATAL, "cannot malloc pud space."); + if ((machdep->pmd = (char *)malloc(PAGESIZE())) == NULL) + error(FATAL, "cannot malloc pmd space."); + if ((machdep->ptbl = (char *)malloc(PAGESIZE())) == NULL) + error(FATAL, "cannot malloc ptbl space."); + machdep->last_pgd_read = 0; ++ machdep->last_pud_read = 0; + machdep->last_pmd_read = 0; + machdep->last_ptbl_read = 0; + machdep->verify_paddr = ia64_verify_paddr; +@@ -115,14 +152,17 @@ + break; + + case PRE_GDB: ++ + if (pc->flags & KERNEL_DEBUG_QUERY) + return; ++ + /* + * Until the kernel core dump and va_server library code + * do the right thing with respect to the configured page size, + * try to recognize a fatal inequity between the compiled-in + * page size and the page size used by the kernel. + */ ++ + + if ((sp = symbol_search("empty_zero_page")) && + (spn = next_symbol(NULL, sp)) && +@@ -169,10 +209,14 @@ + machdep->machspec->kernel_start + + GIGABYTES((ulong)(4)); + if (machdep->machspec->phys_start == UNKNOWN_PHYS_START) +- machdep->machspec->phys_start = +- DEFAULT_PHYS_START; ++ ia64_calc_phys_start(); + } else + machdep->machspec->vmalloc_start = KERNEL_VMALLOC_BASE; ++ ++ machdep->xen_kdump_p2m_create = ia64_xen_kdump_p2m_create; ++ machdep->xendump_p2m_create = ia64_xendump_p2m_create; ++ machdep->xendump_panic_task = ia64_xendump_panic_task; ++ machdep->get_xendump_regs = ia64_get_xendump_regs; + break; + + case POST_GDB: +@@ -202,7 +246,10 @@ + else if (symbol_exists("_irq_desc")) + ARRAY_LENGTH_INIT(machdep->nr_irqs, irq_desc, + "_irq_desc", NULL, 0); +- machdep->hz = 1024; ++ if (!machdep->hz) ++ machdep->hz = 1024; ++ machdep->section_size_bits = _SECTION_SIZE_BITS; ++ machdep->max_physmem_bits = _MAX_PHYSMEM_BITS; + ia64_create_memmap(); + break; + +@@ -228,8 +275,10 @@ + char *arglist[MAXARGS]; + ulong value; + struct machine_specific *ms; ++ int vm_flag; + + ms = &ia64_machine_specific; ++ vm_flag = 0; + + if (!strstr(machdep->cmdline_arg, "=")) { + errflag = 0; +@@ -284,11 +333,37 @@ + continue; + } + } ++ } else if (STRNEQ(arglist[i], "vm=")) { ++ vm_flag++; ++ p = arglist[i] + strlen("vm="); ++ if (strlen(p)) { ++ if (STREQ(p, "4l")) { ++ machdep->flags |= VM_4_LEVEL; ++ continue; ++ } ++ } + } + + error(WARNING, "ignoring --machdep option: %s\n", arglist[i]); + } + ++ if (vm_flag) { ++ switch (machdep->flags & (VM_4_LEVEL)) ++ { ++ case VM_4_LEVEL: ++ error(NOTE, "using 4-level pagetable\n"); ++ c++; ++ break; ++ ++ default: ++ error(WARNING, "invalid vm= option\n"); ++ c++; ++ machdep->flags &= ~(VM_4_LEVEL); ++ break; ++ } ++ } ++ ++ + if (c) + fprintf(fp, "\n"); + } +@@ -314,6 +389,58 @@ + return TRUE; + } + ++ ++static ulong ++ia64_in_per_cpu_mca_stack(void) ++{ ++ int plen, i; ++ ulong flag; ++ ulong vaddr, paddr, stackbase, stacktop; ++ ulong *__per_cpu_mca; ++ struct task_context *tc; ++ ++ tc = CURRENT_CONTEXT(); ++ ++ if (STRNEQ(CURRENT_COMM(), "INIT")) ++ flag = INIT; ++ else if (STRNEQ(CURRENT_COMM(), "MCA")) ++ flag = MCA; ++ else ++ return 0; ++ ++ if (!symbol_exists("__per_cpu_mca") || ++ !(plen = get_array_length("__per_cpu_mca", NULL, 0)) || ++ (plen < kt->cpus)) ++ return 0; ++ ++ vaddr = SWITCH_STACK_ADDR(CURRENT_TASK()); ++ if (VADDR_REGION(vaddr) != KERNEL_CACHED_REGION) ++ return 0; ++ paddr = ia64_VTOP(vaddr); ++ ++ __per_cpu_mca = (ulong *)GETBUF(sizeof(ulong) * kt->cpus); ++ ++ if (!readmem(symbol_value("__per_cpu_mca"), KVADDR, __per_cpu_mca, ++ sizeof(ulong) * kt->cpus, "__per_cpu_mca", RETURN_ON_ERROR|QUIET)) ++ return 0; ++ ++ if (CRASHDEBUG(1)) { ++ for (i = 0; i < kt->cpus; i++) { ++ fprintf(fp, "__per_cpu_mca[%d]: %lx\n", ++ i, __per_cpu_mca[i]); ++ } ++ } ++ ++ stackbase = __per_cpu_mca[tc->processor]; ++ stacktop = stackbase + (STACKSIZE() * 2); ++ FREEBUF(__per_cpu_mca); ++ ++ if ((paddr >= stackbase) && (paddr < stacktop)) ++ return flag; ++ else ++ return 0; ++} ++ + void + ia64_dump_machdep_table(ulong arg) + { +@@ -401,12 +528,14 @@ + fprintf(fp, "%sUNW_R0", others++ ? "|" : ""); + if (machdep->flags & MEM_LIMIT) + fprintf(fp, "%sMEM_LIMIT", others++ ? "|" : ""); +- if (machdep->flags & SYSRQ) +- fprintf(fp, "%sSYSRQ", others++ ? "|" : ""); + if (machdep->flags & DEVMEMRD) + fprintf(fp, "%sDEVMEMRD", others++ ? "|" : ""); + if (machdep->flags & INIT) + fprintf(fp, "%sINIT", others++ ? "|" : ""); ++ if (machdep->flags & MCA) ++ fprintf(fp, "%sMCA", others++ ? "|" : ""); ++ if (machdep->flags & VM_4_LEVEL) ++ fprintf(fp, "%sVM_4_LEVEL", others++ ? "|" : ""); + fprintf(fp, ")\n"); + fprintf(fp, " kvbase: %lx\n", machdep->kvbase); + fprintf(fp, " identity_map_base: %lx\n", machdep->identity_map_base); +@@ -445,16 +574,25 @@ + (machdep->verify_paddr == ia64_verify_paddr) ? + "ia64_verify_paddr" : "generic_verify_paddr"); + fprintf(fp, " init_kernel_pgd: NULL\n"); ++ fprintf(fp, "xen_kdump_p2m_create: ia64_xen_kdump_p2m_create()\n"); ++ fprintf(fp, " xendump_p2m_create: ia64_xendump_p2m_create()\n"); ++ fprintf(fp, " xendump_panic_task: ia64_xendump_panic_task()\n"); ++ fprintf(fp, " get_xendump_regs: ia64_get_xendump_regs()\n"); + fprintf(fp, " value_to_symbol: generic_machdep_value_to_symbol()\n"); + fprintf(fp, " line_number_hooks: ia64_line_number_hooks\n"); + fprintf(fp, " last_pgd_read: %lx\n", machdep->last_pgd_read); ++ fprintf(fp, " last_pud_read: %lx\n", machdep->last_pud_read); + fprintf(fp, " last_pmd_read: %lx\n", machdep->last_pmd_read); + fprintf(fp, " last_ptbl_read: %lx\n", machdep->last_ptbl_read); + fprintf(fp, " pgd: %lx\n", (ulong)machdep->pgd); ++ fprintf(fp, " pud: %lx\n", (ulong)machdep->pud); + fprintf(fp, " pmd: %lx\n", (ulong)machdep->pmd); + fprintf(fp, " ptbl: %lx\n", (ulong)machdep->ptbl); + fprintf(fp, " ptrs_per_pgd: %d\n", machdep->ptrs_per_pgd); + fprintf(fp, " cmdline_arg: %s\n", machdep->cmdline_arg); ++ fprintf(fp, " section_size_bits: %ld\n", machdep->section_size_bits); ++ fprintf(fp, " max_physmem_bits: %ld\n", machdep->max_physmem_bits); ++ fprintf(fp, " sections_per_root: %ld\n", machdep->sections_per_root); + fprintf(fp, " machspec: ia64_machine_specific\n"); + fprintf(fp, " cpu_data_address: %lx\n", + machdep->machspec->cpu_data_address); +@@ -565,9 +703,9 @@ + if (CRASHDEBUG(8)) + fprintf(fp, "%016lx %s\n", value, name); + +- if (STREQ(name, "phys_start") && type == 'A') +- if (machdep->machspec->phys_start == UNKNOWN_PHYS_START) +- machdep->machspec->phys_start = value; ++// if (STREQ(name, "phys_start") && type == 'A') ++// if (machdep->machspec->phys_start == UNKNOWN_PHYS_START) ++// machdep->machspec->phys_start = value; + + region = VADDR_REGION(value); + +@@ -665,74 +803,148 @@ + return (machdep->mhz = mhz); + } + +- +-/* +- * Translates a user virtual address to its physical address. cmd_vtop() +- * sets the verbose flag so that the pte translation gets displayed; all +- * other callers quietly accept the translation. +- * +- * This routine can also take mapped kernel virtual addresses if the -u flag +- * was passed to cmd_vtop(). If so, it makes the translation using the +- * swapper_pg_dir, making it irrelevant in this processor's case. ++/* Generic abstraction to translate user or kernel virtual ++ * addresses to physical using a 4 level page table. + */ + static int +-ia64_uvtop(struct task_context *tc, ulong uvaddr, physaddr_t *paddr, int verbose) ++ia64_vtop_4l(ulong vaddr, physaddr_t *paddr, ulong *pgd, int verbose, int usr) + { +- ulong mm; +- ulong *pgd; + ulong *page_dir; ++ ulong *page_upper; + ulong *page_middle; + ulong *page_table; + ulong pgd_pte; ++ ulong pud_pte; + ulong pmd_pte; + ulong pte; + ulong region, offset; + +- if (!tc) +- error(FATAL, "current context invalid\n"); +- +- *paddr = 0; +- region = VADDR_REGION(uvaddr); ++ if (usr) { ++ region = VADDR_REGION(vaddr); ++ offset = (vaddr >> PGDIR_SHIFT) & ((PTRS_PER_PGD >> 3) - 1); ++ offset |= (region << (PAGESHIFT() - 6)); ++ page_dir = pgd + offset; ++ } else { ++ if (!(pgd = (ulong *)vt->kernel_pgd[0])) ++ error(FATAL, "cannot determine kernel pgd pointer\n"); ++ page_dir = pgd + ((vaddr >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1)); ++ } + +- if (IS_KVADDR(uvaddr)) +- return ia64_kvtop(tc, uvaddr, paddr, verbose); ++ if (verbose) ++ fprintf(fp, "PAGE DIRECTORY: %lx\n", (ulong)pgd); + +- if ((mm = task_mm(tc->task, TRUE))) +- pgd = ULONG_PTR(tt->mm_struct + OFFSET(mm_struct_pgd)); +- else +- readmem(tc->mm_struct + OFFSET(mm_struct_pgd), KVADDR, &pgd, +- sizeof(long), "mm_struct pgd", FAULT_ON_ERROR); ++ FILL_PGD(PAGEBASE(pgd), KVADDR, PAGESIZE()); ++ pgd_pte = ULONG(machdep->pgd + PAGEOFFSET(page_dir)); ++ ++ if (verbose) ++ fprintf(fp, " PGD: %lx => %lx\n", (ulong)page_dir, pgd_pte); + ++ if (!(pgd_pte)) ++ return FALSE; ++ ++ offset = (vaddr >> PUD_SHIFT) & (PTRS_PER_PUD - 1); ++ page_upper = (ulong *)(PTOV(pgd_pte & _PFN_MASK)) + offset; ++ ++ FILL_PUD(PAGEBASE(page_upper), KVADDR, PAGESIZE()); ++ pud_pte = ULONG(machdep->pud + PAGEOFFSET(page_upper)); ++ + if (verbose) +- fprintf(fp, "PAGE DIRECTORY: %lx\n", (ulong)pgd); ++ fprintf(fp, " PUD: %lx => %lx\n", (ulong)page_upper, pud_pte); ++ ++ if (!(pud_pte)) ++ return FALSE; ++ ++ offset = (vaddr >> PMD_SHIFT) & (PTRS_PER_PMD - 1); ++ page_middle = (ulong *)(PTOV(pud_pte & _PFN_MASK)) + offset; ++ ++ FILL_PMD(PAGEBASE(page_middle), KVADDR, PAGESIZE()); ++ pmd_pte = ULONG(machdep->pmd + PAGEOFFSET(page_middle)); ++ ++ if (verbose) ++ fprintf(fp, " PMD: %lx => %lx\n", (ulong)page_middle, pmd_pte); ++ ++ if (!(pmd_pte)) ++ return FALSE; ++ ++ offset = (vaddr >> PAGESHIFT()) & (PTRS_PER_PTE - 1); ++ page_table = (ulong *)(PTOV(pmd_pte & _PFN_MASK)) + offset; ++ ++ FILL_PTBL(PAGEBASE(page_table), KVADDR, PAGESIZE()); ++ pte = ULONG(machdep->ptbl + PAGEOFFSET(page_table)); + +- offset = (uvaddr >> PGDIR_SHIFT) & ((PTRS_PER_PGD >> 3) - 1); +- offset |= (region << (PAGESHIFT() - 6)); +- page_dir = pgd + offset; ++ if (verbose) ++ fprintf(fp, " PTE: %lx => %lx\n", (ulong)page_table, pte); ++ ++ if (!(pte & (_PAGE_P))) { ++ if (usr) ++ *paddr = pte; ++ if (pte && verbose) { ++ fprintf(fp, "\n"); ++ ia64_translate_pte(pte, 0, 0); ++ } ++ return FALSE; ++ } ++ ++ *paddr = (pte & _PFN_MASK) + PAGEOFFSET(vaddr); ++ ++ if (verbose) { ++ fprintf(fp, " PAGE: %lx\n\n", PAGEBASE(*paddr)); ++ ia64_translate_pte(pte, 0, 0); ++ } ++ ++ return TRUE; ++} ++ ++/* Generic abstraction to translate user or kernel virtual ++ * addresses to physical using a 3 level page table. ++ */ ++static int ++ia64_vtop(ulong vaddr, physaddr_t *paddr, ulong *pgd, int verbose, int usr) ++{ ++ ulong *page_dir; ++ ulong *page_middle; ++ ulong *page_table; ++ ulong pgd_pte; ++ ulong pmd_pte; ++ ulong pte; ++ ulong region, offset; ++ ++ if (usr) { ++ region = VADDR_REGION(vaddr); ++ offset = (vaddr >> PGDIR_SHIFT_3L) & ((PTRS_PER_PGD >> 3) - 1); ++ offset |= (region << (PAGESHIFT() - 6)); ++ page_dir = pgd + offset; ++ } else { ++ if (!(pgd = (ulong *)vt->kernel_pgd[0])) ++ error(FATAL, "cannot determine kernel pgd pointer\n"); ++ page_dir = pgd + ((vaddr >> PGDIR_SHIFT_3L) & (PTRS_PER_PGD - 1)); ++ } + ++ if (verbose) ++ fprintf(fp, "PAGE DIRECTORY: %lx\n", (ulong)pgd); ++ + FILL_PGD(PAGEBASE(pgd), KVADDR, PAGESIZE()); + pgd_pte = ULONG(machdep->pgd + PAGEOFFSET(page_dir)); + +- if (verbose) { ++ if (verbose) + fprintf(fp, " PGD: %lx => %lx\n", (ulong)page_dir, pgd_pte); +- } + + if (!(pgd_pte)) +- goto no_upage; ++ return FALSE; + +- offset = (uvaddr >> PMD_SHIFT) & (PTRS_PER_PMD - 1); ++ offset = (vaddr >> PMD_SHIFT) & (PTRS_PER_PMD - 1); + page_middle = (ulong *)(PTOV(pgd_pte & _PFN_MASK)) + offset; + + FILL_PMD(PAGEBASE(page_middle), KVADDR, PAGESIZE()); + pmd_pte = ULONG(machdep->pmd + PAGEOFFSET(page_middle)); + + if (verbose) +- fprintf(fp, " PMD: %lx => %lx\n", (ulong)page_middle,pmd_pte); ++ fprintf(fp, " PMD: %lx => %lx\n", (ulong)page_middle, pmd_pte); + + if (!(pmd_pte)) +- goto no_upage; ++ return FALSE; + +- offset = (uvaddr >> PAGESHIFT()) & (PTRS_PER_PTE - 1); ++ offset = (vaddr >> PAGESHIFT()) & (PTRS_PER_PTE - 1); + page_table = (ulong *)(PTOV(pmd_pte & _PFN_MASK)) + offset; + + FILL_PTBL(PAGEBASE(page_table), KVADDR, PAGESIZE()); +@@ -742,15 +954,16 @@ + fprintf(fp, " PTE: %lx => %lx\n", (ulong)page_table, pte); + + if (!(pte & (_PAGE_P))) { +- *paddr = pte; ++ if (usr) ++ *paddr = pte; + if (pte && verbose) { + fprintf(fp, "\n"); + ia64_translate_pte(pte, 0, 0); + } +- goto no_upage; ++ return FALSE; + } + +- *paddr = (pte & _PFN_MASK) + PAGEOFFSET(uvaddr); ++ *paddr = (pte & _PFN_MASK) + PAGEOFFSET(vaddr); + + if (verbose) { + fprintf(fp, " PAGE: %lx\n\n", PAGEBASE(*paddr)); +@@ -758,10 +971,50 @@ + } + + return TRUE; ++} + +-no_upage: + +- return FALSE; ++/* ++ * Translates a user virtual address to its physical address. cmd_vtop() ++ * sets the verbose flag so that the pte translation gets displayed; all ++ * other callers quietly accept the translation. ++ * ++ * This routine can also take mapped kernel virtual addresses if the -u flag ++ * was passed to cmd_vtop(). If so, it makes the translation using the ++ * swapper_pg_dir, making it irrelevant in this processor's case. ++ */ ++static int ++ia64_uvtop(struct task_context *tc, ulong uvaddr, physaddr_t *paddr, int verbose) ++{ ++ ulong mm; ++ ulong *pgd; ++ ++ if (!tc) ++ error(FATAL, "current context invalid\n"); ++ ++ *paddr = 0; ++ ++ if (IS_KVADDR(uvaddr)) ++ return ia64_kvtop(tc, uvaddr, paddr, verbose); ++ ++ if ((mm = task_mm(tc->task, TRUE))) ++ pgd = ULONG_PTR(tt->mm_struct + OFFSET(mm_struct_pgd)); ++ else ++ readmem(tc->mm_struct + OFFSET(mm_struct_pgd), KVADDR, &pgd, ++ sizeof(long), "mm_struct pgd", FAULT_ON_ERROR); ++ ++ if (XEN() && (kt->xen_flags & WRITABLE_PAGE_TABLES)) { ++ if (machdep->flags & VM_4_LEVEL) ++ return ia64_vtop_4l_xen_wpt(uvaddr, paddr, pgd, verbose, 1); ++ else ++ return ia64_vtop_xen_wpt(uvaddr, paddr, pgd, verbose, 1); ++ } else { ++ if (machdep->flags & VM_4_LEVEL) ++ return ia64_vtop_4l(uvaddr, paddr, pgd, verbose, 1); ++ else ++ return ia64_vtop(uvaddr, paddr, pgd, verbose, 1); ++ } ++ + } + + +@@ -774,13 +1027,6 @@ + ia64_kvtop(struct task_context *tc, ulong kvaddr, physaddr_t *paddr, int verbose) + { + ulong *pgd; +- ulong *page_dir; +- ulong *page_middle; +- ulong *page_table; +- ulong pgd_pte; +- ulong pmd_pte; +- ulong pte; +- ulong offset; + + if (!IS_KVADDR(kvaddr)) + return FALSE; +@@ -813,66 +1059,21 @@ + return TRUE; + } + +- pgd = (ulong *)vt->kernel_pgd[0]; +- +- if (verbose) { +- fprintf(fp, "PAGE DIRECTORY: %lx\n", (ulong)pgd); +- } +- +- page_dir = pgd + ((kvaddr >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1)); +- +- FILL_PGD(PAGEBASE(pgd), KVADDR, PAGESIZE()); +- pgd_pte = ULONG(machdep->pgd + PAGEOFFSET(page_dir)); +- +- if (verbose) { +- fprintf(fp, " PGD: %lx => %lx\n", (ulong)page_dir, pgd_pte); +- } +- +- if (!(pgd_pte)) +- goto no_kpage; +- +- offset = (kvaddr >> PMD_SHIFT) & (PTRS_PER_PMD - 1); +- page_middle = (ulong *)(PTOV(pgd_pte & _PFN_MASK)) + offset; +- +- FILL_PMD(PAGEBASE(page_middle), KVADDR, PAGESIZE()); +- pmd_pte = ULONG(machdep->pmd + PAGEOFFSET(page_middle)); +- +- if (verbose) +- fprintf(fp, " PMD: %lx => %lx\n", (ulong)page_middle, +- pmd_pte); +- +- if (!(pmd_pte)) +- goto no_kpage; +- +- offset = (kvaddr >> PAGESHIFT()) & (PTRS_PER_PTE - 1); +- page_table = (ulong *)(PTOV(pmd_pte & _PFN_MASK)) + offset; +- +- FILL_PTBL(PAGEBASE(page_table), KVADDR, PAGESIZE()); +- pte = ULONG(machdep->ptbl + PAGEOFFSET(page_table)); +- +- if (verbose) +- fprintf(fp, " PTE: %lx => %lx\n", (ulong)page_table, pte); +- +- if (!(pte & (_PAGE_P))) { +- if (pte && verbose) { +- fprintf(fp, "\n"); +- ia64_translate_pte(pte, 0, 0); +- } +- goto no_kpage; +- } +- +- *paddr = (pte & _PFN_MASK) + PAGEOFFSET(kvaddr); ++ if (!(pgd = (ulong *)vt->kernel_pgd[0])) ++ error(FATAL, "cannot determine kernel pgd pointer\n"); + +- if (verbose) { +- fprintf(fp, " PAGE: %lx\n\n", PAGEBASE(*paddr)); +- ia64_translate_pte(pte, 0, 0); ++ if (XEN() && (kt->xen_flags & WRITABLE_PAGE_TABLES)) { ++ if (machdep->flags & VM_4_LEVEL) ++ return ia64_vtop_4l_xen_wpt(kvaddr, paddr, pgd, verbose, 0); ++ else ++ return ia64_vtop_xen_wpt(kvaddr, paddr, pgd, verbose, 0); ++ } else { ++ if (machdep->flags & VM_4_LEVEL) ++ return ia64_vtop_4l(kvaddr, paddr, pgd, verbose, 0); ++ else ++ return ia64_vtop(kvaddr, paddr, pgd, verbose, 0); + } + +- return TRUE; +- +-no_kpage: +- +- return FALSE; + } + + /* +@@ -958,9 +1159,15 @@ + { + ulong ksp; + +- readmem(task + OFFSET(task_struct_thread_ksp), KVADDR, +- &ksp, sizeof(void *), +- "thread_struct ksp", FAULT_ON_ERROR); ++ if (XEN_HYPER_MODE()) { ++ readmem(task + XEN_HYPER_OFFSET(vcpu_thread_ksp), KVADDR, ++ &ksp, sizeof(void *), ++ "vcpu thread ksp", FAULT_ON_ERROR); ++ } else { ++ readmem(task + OFFSET(task_struct_thread_ksp), KVADDR, ++ &ksp, sizeof(void *), ++ "thread_struct ksp", FAULT_ON_ERROR); ++ } + + return ksp; + } +@@ -1315,7 +1522,10 @@ + BZERO(&eframe, sizeof(ulong) * NUM_PT_REGS); + + open_tmpfile(); +- dump_struct("pt_regs", addr, RADIX(16)); ++ if (XEN_HYPER_MODE()) ++ dump_struct("cpu_user_regs", addr, RADIX(16)); ++ else ++ dump_struct("pt_regs", addr, RADIX(16)); + rewind(pc->tmpfile); + + fval = 0; +@@ -1571,6 +1781,12 @@ + + fprintf(fp, " EFRAME: %lx\n", addr); + ++ if (bt->flags & BT_INCOMPLETE_USER_EFRAME) { ++ fprintf(fp, ++ " [exception frame incomplete -- check salinfo for complete context]\n"); ++ bt->flags &= ~BT_INCOMPLETE_USER_EFRAME; ++ } ++ + fprintf(fp, " B0: %016lx CR_IIP: %016lx\n", + eframe[P_b0], eframe[P_cr_iip]); + /** +@@ -2099,7 +2315,7 @@ + fprintf(fp, "(unknown)\n"); + fprintf(fp, " HZ: %d\n", machdep->hz); + fprintf(fp, " PAGE SIZE: %d\n", PAGESIZE()); +- fprintf(fp, " L1 CACHE SIZE: %d\n", l1_cache_size()); ++// fprintf(fp, " L1 CACHE SIZE: %d\n", l1_cache_size()); + fprintf(fp, " KERNEL STACK SIZE: %ld\n", STACKSIZE()); + fprintf(fp, " KERNEL CACHED REGION: %lx\n", + (ulong)KERNEL_CACHED_REGION << REGION_SHIFT); +@@ -2371,9 +2587,10 @@ + !readmem(ia64_boot_param+ + MEMBER_OFFSET("ia64_boot_param", "efi_memmap"), + KVADDR, &efi_memmap, sizeof(uint64_t), "efi_memmap", +- RETURN_ON_ERROR)) { +- error(WARNING, "cannot read ia64_boot_param: " +- "memory verification will not be performed\n\n"); ++ QUIET|RETURN_ON_ERROR)) { ++ if (!XEN() || CRASHDEBUG(1)) ++ error(WARNING, "cannot read ia64_boot_param: " ++ "memory verification will not be performed\n\n"); + return; + } + +@@ -2391,9 +2608,11 @@ + + if ((ms->mem_limit && (efi_memmap >= ms->mem_limit)) || + !readmem(PTOV(efi_memmap), KVADDR, memmap, +- ms->efi_memmap_size, "efi_mmap contents", RETURN_ON_ERROR)) { +- error(WARNING, "cannot read efi_mmap: " +- "memory verification will not be performed\n"); ++ ms->efi_memmap_size, "efi_mmap contents", ++ QUIET|RETURN_ON_ERROR)) { ++ if (!XEN() || (XEN() && CRASHDEBUG(1))) ++ error(WARNING, "cannot read efi_mmap: " ++ "EFI memory verification will not be performed\n\n"); + free(memmap); + return; + } +@@ -2605,6 +2824,8 @@ + ia64_post_init(void) + { + struct machine_specific *ms; ++ struct gnu_request req; ++ ulong flag; + + ms = &ia64_machine_specific; + +@@ -2677,12 +2898,16 @@ + } + } + +- if (symbol_exists("ia64_init_stack") && !ms->ia64_init_stack_size) +- ms->ia64_init_stack_size = get_array_length("ia64_init_stack", +- NULL, 0); ++ if (symbol_exists("ia64_init_stack") && !ms->ia64_init_stack_size) { ++ get_symbol_type("ia64_init_stack", NULL, &req); ++ ms->ia64_init_stack_size = req.length; ++ } + + if (DUMPFILE() && ia64_in_init_stack(SWITCH_STACK_ADDR(CURRENT_TASK()))) + machdep->flags |= INIT; ++ ++ if (DUMPFILE() && (flag = ia64_in_per_cpu_mca_stack())) ++ machdep->flags |= flag; + } + + /* +@@ -3326,4 +3551,775 @@ + (vaddr < (ulong)KERNEL_UNCACHED_BASE)); + } + ++/* Generic abstraction to translate user or kernel virtual ++ * addresses to physical using a 4 level page table. ++ */ ++static int ++ia64_vtop_4l_xen_wpt(ulong vaddr, physaddr_t *paddr, ulong *pgd, int verbose, int usr) ++{ ++ error(FATAL, "ia64_vtop_4l_xen_wpt: TBD\n"); ++ return FALSE; ++#ifdef TBD ++ ulong *page_dir; ++ ulong *page_upper; ++ ulong *page_middle; ++ ulong *page_table; ++ ulong pgd_pte; ++ ulong pud_pte; ++ ulong pmd_pte; ++ ulong pte; ++ ulong region, offset; ++ ++ ++ if (usr) { ++ region = VADDR_REGION(vaddr); ++ offset = (vaddr >> PGDIR_SHIFT) & ((PTRS_PER_PGD >> 3) - 1); ++ offset |= (region << (PAGESHIFT() - 6)); ++ page_dir = pgd + offset; ++ } else { ++ if (!(pgd = (ulong *)vt->kernel_pgd[0])) ++ error(FATAL, "cannot determine kernel pgd pointer\n"); ++ page_dir = pgd + ((vaddr >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1)); ++ } ++ ++ if (verbose) ++ fprintf(fp, "PAGE DIRECTORY: %lx\n", (ulong)pgd); ++ ++ FILL_PGD(PAGEBASE(pgd), KVADDR, PAGESIZE()); ++ pgd_pte = ULONG(machdep->pgd + PAGEOFFSET(page_dir)); ++ ++ if (verbose) ++ fprintf(fp, " PGD: %lx => %lx\n", (ulong)page_dir, pgd_pte); ++ ++ if (!(pgd_pte)) ++ return FALSE; ++ ++ offset = (vaddr >> PUD_SHIFT) & (PTRS_PER_PUD - 1); ++ page_upper = (ulong *)(PTOV(pgd_pte & _PFN_MASK)) + offset; ++ ++ FILL_PUD(PAGEBASE(page_upper), KVADDR, PAGESIZE()); ++ pud_pte = ULONG(machdep->pud + PAGEOFFSET(page_upper)); ++ ++ if (verbose) ++ fprintf(fp, " PUD: %lx => %lx\n", (ulong)page_upper, pud_pte); ++ ++ if (!(pud_pte)) ++ return FALSE; ++ ++ offset = (vaddr >> PMD_SHIFT) & (PTRS_PER_PMD - 1); ++ page_middle = (ulong *)(PTOV(pud_pte & _PFN_MASK)) + offset; ++ ++ FILL_PMD(PAGEBASE(page_middle), KVADDR, PAGESIZE()); ++ pmd_pte = ULONG(machdep->pmd + PAGEOFFSET(page_middle)); ++ ++ if (verbose) ++ fprintf(fp, " PMD: %lx => %lx\n", (ulong)page_middle, pmd_pte); ++ ++ if (!(pmd_pte)) ++ return FALSE; ++ ++ offset = (vaddr >> PAGESHIFT()) & (PTRS_PER_PTE - 1); ++ page_table = (ulong *)(PTOV(pmd_pte & _PFN_MASK)) + offset; ++ ++ FILL_PTBL(PAGEBASE(page_table), KVADDR, PAGESIZE()); ++ pte = ULONG(machdep->ptbl + PAGEOFFSET(page_table)); ++ ++ if (verbose) ++ fprintf(fp, " PTE: %lx => %lx\n", (ulong)page_table, pte); ++ ++ if (!(pte & (_PAGE_P))) { ++ if (usr) ++ *paddr = pte; ++ if (pte && verbose) { ++ fprintf(fp, "\n"); ++ ia64_translate_pte(pte, 0, 0); ++ } ++ return FALSE; ++ } ++ ++ *paddr = (pte & _PFN_MASK) + PAGEOFFSET(vaddr); ++ ++ if (verbose) { ++ fprintf(fp, " PAGE: %lx\n\n", PAGEBASE(*paddr)); ++ ia64_translate_pte(pte, 0, 0); ++ } ++ ++ return TRUE; ++#endif ++} ++ ++/* Generic abstraction to translate user or kernel virtual ++ * addresses to physical using a 3 level page table. ++ */ ++static int ++ia64_vtop_xen_wpt(ulong vaddr, physaddr_t *paddr, ulong *pgd, int verbose, int usr) ++{ ++ error(FATAL, "ia64_vtop_xen_wpt: TBD\n"); ++ return FALSE; ++#ifdef TBD ++ ulong *page_dir; ++ ulong *page_middle; ++ ulong *page_table; ++ ulong pgd_pte; ++ ulong pmd_pte; ++ ulong pte; ++ ulong region, offset; ++ ++ ++ if (usr) { ++ region = VADDR_REGION(vaddr); ++ offset = (vaddr >> PGDIR_SHIFT) & ((PTRS_PER_PGD >> 3) - 1); ++ offset |= (region << (PAGESHIFT() - 6)); ++ page_dir = pgd + offset; ++ } else { ++ if (!(pgd = (ulong *)vt->kernel_pgd[0])) ++ error(FATAL, "cannot determine kernel pgd pointer\n"); ++ page_dir = pgd + ((vaddr >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1)); ++ } ++ ++ if (verbose) ++ fprintf(fp, "PAGE DIRECTORY: %lx\n", (ulong)pgd); ++ ++ FILL_PGD(PAGEBASE(pgd), KVADDR, PAGESIZE()); ++ pgd_pte = ULONG(machdep->pgd + PAGEOFFSET(page_dir)); ++ ++ if (verbose) ++ fprintf(fp, " PGD: %lx => %lx\n", (ulong)page_dir, pgd_pte); ++ ++ if (!(pgd_pte)) ++ return FALSE; ++ ++ offset = (vaddr >> PMD_SHIFT) & (PTRS_PER_PMD - 1); ++ page_middle = (ulong *)(PTOV(pgd_pte & _PFN_MASK)) + offset; ++ ++ FILL_PMD(PAGEBASE(page_middle), KVADDR, PAGESIZE()); ++ pmd_pte = ULONG(machdep->pmd + PAGEOFFSET(page_middle)); ++ ++ if (verbose) ++ fprintf(fp, " PMD: %lx => %lx\n", (ulong)page_middle, pmd_pte); ++ ++ if (!(pmd_pte)) ++ return FALSE; ++ ++ offset = (vaddr >> PAGESHIFT()) & (PTRS_PER_PTE - 1); ++ page_table = (ulong *)(PTOV(pmd_pte & _PFN_MASK)) + offset; ++ ++ FILL_PTBL(PAGEBASE(page_table), KVADDR, PAGESIZE()); ++ pte = ULONG(machdep->ptbl + PAGEOFFSET(page_table)); ++ ++ if (verbose) ++ fprintf(fp, " PTE: %lx => %lx\n", (ulong)page_table, pte); ++ ++ if (!(pte & (_PAGE_P))) { ++ if (usr) ++ *paddr = pte; ++ if (pte && verbose) { ++ fprintf(fp, "\n"); ++ ia64_translate_pte(pte, 0, 0); ++ } ++ return FALSE; ++ } ++ ++ *paddr = (pte & _PFN_MASK) + PAGEOFFSET(vaddr); ++ ++ if (verbose) { ++ fprintf(fp, " PAGE: %lx\n\n", PAGEBASE(*paddr)); ++ ia64_translate_pte(pte, 0, 0); ++ } ++ ++ return TRUE; ++#endif ++} ++ ++#include "netdump.h" ++ ++/* ++ * Determine the relocatable physical address base. ++ */ ++static void ++ia64_calc_phys_start(void) ++{ ++ FILE *iomem; ++ int i, found, errflag; ++ char buf[BUFSIZE]; ++ char *p1; ++ ulong kernel_code_start; ++ struct vmcore_data *vd; ++ Elf64_Phdr *phdr; ++ ulong phys_start, text_start; ++ ++ /* ++ * Default to 64MB. ++ */ ++ machdep->machspec->phys_start = DEFAULT_PHYS_START; ++ ++ text_start = symbol_exists("_text") ? symbol_value("_text") : BADADDR; ++ ++ if (ACTIVE()) { ++ if ((iomem = fopen("/proc/iomem", "r")) == NULL) ++ return; ++ ++ errflag = 1; ++ while (fgets(buf, BUFSIZE, iomem)) { ++ if (strstr(buf, ": Kernel code")) { ++ clean_line(buf); ++ errflag = 0; ++ break; ++ } ++ } ++ fclose(iomem); ++ ++ if (errflag) ++ return; ++ ++ if (!(p1 = strstr(buf, "-"))) ++ return; ++ else ++ *p1 = NULLCHAR; ++ ++ errflag = 0; ++ kernel_code_start = htol(buf, RETURN_ON_ERROR|QUIET, &errflag); ++ if (errflag) ++ return; ++ ++ machdep->machspec->phys_start = kernel_code_start; ++ ++ if (CRASHDEBUG(1)) { ++ if (text_start == BADADDR) ++ fprintf(fp, "_text: (unknown) "); ++ else ++ fprintf(fp, "_text: %lx ", text_start); ++ fprintf(fp, "Kernel code: %lx -> ", kernel_code_start); ++ fprintf(fp, "phys_start: %lx\n\n", ++ machdep->machspec->phys_start); ++ } ++ ++ return; ++ } ++ ++ /* ++ * Get relocation value from whatever dumpfile format is being used. ++ */ ++ ++ if (DISKDUMP_DUMPFILE()) { ++ if (diskdump_phys_base(&phys_start)) { ++ machdep->machspec->phys_start = phys_start; ++ if (CRASHDEBUG(1)) ++ fprintf(fp, ++ "compressed kdump: phys_start: %lx\n", ++ phys_start); ++ } ++ return; ++ } else if (LKCD_DUMPFILE()) { ++ ++ if (lkcd_get_kernel_start(&phys_start)) { ++ machdep->machspec->phys_start = phys_start; ++ if (CRASHDEBUG(1)) ++ fprintf(fp, ++ "LKCD dump: phys_start: %lx\n", ++ phys_start); ++ } ++ } ++ ++ if ((vd = get_kdump_vmcore_data())) { ++ /* ++ * There should be at most one region 5 region, and it ++ * should be equal to "_text". If not, take whatever ++ * region 5 address comes first and hope for the best. ++ */ ++ for (i = found = 0; i < vd->num_pt_load_segments; i++) { ++ phdr = vd->load64 + i; ++ if (phdr->p_vaddr == text_start) { ++ machdep->machspec->phys_start = phdr->p_paddr; ++ found++; ++ break; ++ } ++ } ++ ++ for (i = 0; !found && (i < vd->num_pt_load_segments); i++) { ++ phdr = vd->load64 + i; ++ if (VADDR_REGION(phdr->p_vaddr) == KERNEL_VMALLOC_REGION) { ++ machdep->machspec->phys_start = phdr->p_paddr; ++ found++; ++ break; ++ } ++ } ++ ++ if (found && CRASHDEBUG(1)) { ++ if (text_start == BADADDR) ++ fprintf(fp, "_text: (unknown) "); ++ else ++ fprintf(fp, "_text: %lx ", text_start); ++ fprintf(fp, "p_vaddr: %lx p_paddr: %lx\n", ++ phdr->p_vaddr, phdr->p_paddr); ++ } ++ ++ return; ++ } ++} ++ ++/* ++ * From the xen vmcore, create an index of mfns for each page that makes ++ * up the dom0 kernel's complete phys_to_machine_mapping[max_pfn] array. ++ */ ++static int ++ia64_xen_kdump_p2m_create(struct xen_kdump_data *xkd) ++{ ++ /* ++ * Temporarily read physical (machine) addresses from vmcore by ++ * going directly to read_netdump() instead of via read_kdump(). ++ */ ++ pc->readmem = read_netdump; ++ ++ if (CRASHDEBUG(1)) ++ fprintf(fp, "ia64_xen_kdump_p2m_create: p2m_mfn: %lx\n", xkd->p2m_mfn); ++ ++ if ((xkd->p2m_mfn_frame_list = (ulong *)malloc(PAGESIZE())) == NULL) ++ error(FATAL, "cannot malloc p2m_frame_list"); ++ ++ if (!readmem(PTOB(xkd->p2m_mfn), PHYSADDR, xkd->p2m_mfn_frame_list, PAGESIZE(), ++ "xen kdump p2m mfn page", RETURN_ON_ERROR)) ++ error(FATAL, "cannot read xen kdump p2m mfn page\n"); ++ ++ xkd->p2m_frames = PAGESIZE()/sizeof(ulong); ++ ++ pc->readmem = read_kdump; ++ ++ return TRUE; ++} ++ ++physaddr_t ++ia64_xen_kdump_p2m(struct xen_kdump_data *xkd, physaddr_t pseudo) ++{ ++ ulong pgd_idx, pte_idx; ++ ulong pmd, pte; ++ physaddr_t paddr; ++ ++ /* ++ * Temporarily read physical (machine) addresses from vmcore by ++ * going directly to read_netdump() instead of via read_kdump(). ++ */ ++ pc->readmem = read_netdump; ++ ++ xkd->accesses += 2; ++ ++ pgd_idx = (pseudo >> PGDIR_SHIFT_3L) & (PTRS_PER_PGD - 1); ++ pmd = xkd->p2m_mfn_frame_list[pgd_idx] & _PFN_MASK; ++ if (!pmd) { ++ paddr = P2M_FAILURE; ++ goto out; ++ } ++ ++ pmd += ((pseudo >> PMD_SHIFT) & (PTRS_PER_PMD - 1)) * sizeof(ulong); ++ if (pmd != xkd->last_pmd_read) { ++ if (!readmem(pmd, PHYSADDR, &pte, sizeof(ulong), ++ "ia64_xen_kdump_p2m pmd", RETURN_ON_ERROR)) { ++ xkd->last_pmd_read = BADADDR; ++ xkd->last_mfn_read = BADADDR; ++ paddr = P2M_FAILURE; ++ goto out; ++ } ++ xkd->last_pmd_read = pmd; ++ } else { ++ pte = xkd->last_mfn_read; ++ xkd->cache_hits++; ++ } ++ pte = pte & _PFN_MASK; ++ if (!pte) { ++ paddr = P2M_FAILURE; ++ goto out; ++ } ++ ++ if (pte != xkd->last_mfn_read) { ++ if (!readmem(pte, PHYSADDR, xkd->page, PAGESIZE(), ++ "ia64_xen_kdump_p2m pte page", RETURN_ON_ERROR)) { ++ xkd->last_pmd_read = BADADDR; ++ xkd->last_mfn_read = BADADDR; ++ paddr = P2M_FAILURE; ++ goto out; ++ } ++ xkd->last_mfn_read = pte; ++ } else ++ xkd->cache_hits++; ++ ++ pte_idx = (pseudo >> PAGESHIFT()) & (PTRS_PER_PTE - 1); ++ paddr = *(((ulong *)xkd->page) + pte_idx); ++ if (!(paddr & _PAGE_P)) { ++ paddr = P2M_FAILURE; ++ goto out; ++ } ++ paddr = (paddr & _PFN_MASK) | PAGEOFFSET(pseudo); ++ ++out: ++ pc->readmem = read_kdump; ++ return paddr; ++} ++ ++#include "xendump.h" ++ ++/* ++ * Create an index of mfns for each page that makes up the ++ * kernel's complete phys_to_machine_mapping[max_pfn] array. ++ */ ++static int ++ia64_xendump_p2m_create(struct xendump_data *xd) ++{ ++ if (!symbol_exists("phys_to_machine_mapping")) { ++ xd->flags |= XC_CORE_NO_P2M; ++ return TRUE; ++ } ++ ++ error(FATAL, "ia64_xendump_p2m_create: TBD\n"); ++ ++ /* dummy calls for clean "make [wW]arn" */ ++ ia64_debug_dump_page(NULL, NULL, NULL); ++ ia64_xendump_load_page(0, xd); ++ ia64_xendump_page_index(0, xd); ++ ia64_xendump_panic_task(xd); /* externally called */ ++ ia64_get_xendump_regs(xd, NULL, NULL, NULL); /* externally called */ ++ ++ return FALSE; ++} ++ ++static void ++ia64_debug_dump_page(FILE *ofp, char *page, char *name) ++{ ++ int i; ++ ulong *up; ++ ++ fprintf(ofp, "%s\n", name); ++ ++ up = (ulong *)page; ++ for (i = 0; i < 1024; i++) { ++ fprintf(ofp, "%016lx: %016lx %016lx\n", ++ (ulong)((i * 2) * sizeof(ulong)), ++ *up, *(up+1)); ++ up += 2; ++ } ++} ++ ++/* ++ * Find the page associate with the kvaddr, and read its contents ++ * into the passed-in buffer. ++ */ ++static char * ++ia64_xendump_load_page(ulong kvaddr, struct xendump_data *xd) ++{ ++ error(FATAL, "ia64_xendump_load_page: TBD\n"); ++ ++ return NULL; ++} ++ ++/* ++ * Find the dumpfile page index associated with the kvaddr. ++ */ ++static int ++ia64_xendump_page_index(ulong kvaddr, struct xendump_data *xd) ++{ ++ error(FATAL, "ia64_xendump_page_index: TBD\n"); ++ ++ return 0; ++} ++ ++static ulong ++ia64_xendump_panic_task(struct xendump_data *xd) ++{ ++ if (CRASHDEBUG(1)) ++ error(INFO, "ia64_xendump_panic_task: TBD\n"); ++ ++ return NO_TASK; ++} ++ ++static void ++ia64_get_xendump_regs(struct xendump_data *xd, struct bt_info *bt, ulong *rip, ulong *rsp) ++{ ++ machdep->get_stack_frame(bt, rip, rsp); ++ ++ if (is_task_active(bt->task) && ++ !(bt->flags & (BT_TEXT_SYMBOLS_ALL|BT_TEXT_SYMBOLS)) && ++ STREQ(closest_symbol(*rip), "schedule")) ++ error(INFO, ++ "xendump: switch_stack possibly not saved -- try \"bt -t\"\n"); ++} ++ ++/* for XEN Hypervisor analysis */ ++ ++static int ++ia64_is_kvaddr_hyper(ulong addr) ++{ ++ return (addr >= HYPERVISOR_VIRT_START && addr < HYPERVISOR_VIRT_END); ++} ++ ++static int ++ia64_kvtop_hyper(struct task_context *tc, ulong kvaddr, physaddr_t *paddr, int verbose) ++{ ++ ulong virt_percpu_start, phys_percpu_start; ++ ulong addr, dirp, entry; ++ ++ if (!IS_KVADDR(kvaddr)) ++ return FALSE; ++ ++ if (PERCPU_VIRT_ADDR(kvaddr)) { ++ virt_percpu_start = symbol_value("__phys_per_cpu_start"); ++ phys_percpu_start = virt_percpu_start - DIRECTMAP_VIRT_START; ++ *paddr = kvaddr - PERCPU_ADDR + phys_percpu_start; ++ return TRUE; ++ } else if (DIRECTMAP_VIRT_ADDR(kvaddr)) { ++ *paddr = kvaddr - DIRECTMAP_VIRT_START; ++ return TRUE; ++ } else if (!FRAME_TABLE_VIRT_ADDR(kvaddr)) { ++ return FALSE; ++ } ++ ++ /* frametable virtual address */ ++ addr = kvaddr - xhmachdep->frame_table; ++ ++ dirp = symbol_value("frametable_pg_dir"); ++ dirp += ((addr >> PGDIR_SHIFT_3L) & (PTRS_PER_PGD - 1)) * sizeof(ulong); ++ readmem(dirp, KVADDR, &entry, sizeof(ulong), ++ "frametable_pg_dir", FAULT_ON_ERROR); ++ ++ dirp = entry & _PFN_MASK; ++ if (!dirp) ++ return FALSE; ++ dirp += ((addr >> PMD_SHIFT) & (PTRS_PER_PMD - 1)) * sizeof(ulong); ++ readmem(dirp, PHYSADDR, &entry, sizeof(ulong), ++ "frametable pmd", FAULT_ON_ERROR); ++ ++ dirp = entry & _PFN_MASK; ++ if (!dirp) ++ return FALSE; ++ dirp += ((addr >> PAGESHIFT()) & (PTRS_PER_PTE - 1)) * sizeof(ulong); ++ readmem(dirp, PHYSADDR, &entry, sizeof(ulong), ++ "frametable pte", FAULT_ON_ERROR); ++ ++ if (!(entry & _PAGE_P)) ++ return FALSE; ++ ++ *paddr = (entry & _PFN_MASK) + (kvaddr & (PAGESIZE() - 1)); ++ return TRUE; ++} ++ ++static void ++ia64_post_init_hyper(void) ++{ ++ struct machine_specific *ms; ++ ulong frame_table; ++ ++ ms = &ia64_machine_specific; ++ ++ if (symbol_exists("unw_init_frame_info")) { ++ machdep->flags |= NEW_UNWIND; ++ if (MEMBER_EXISTS("unw_frame_info", "pt")) { ++ if (MEMBER_EXISTS("cpu_user_regs", "ar_csd")) { ++ machdep->flags |= NEW_UNW_V3; ++ ms->unwind_init = unwind_init_v3; ++ ms->unwind = unwind_v3; ++ ms->unwind_debug = unwind_debug_v3; ++ ms->dump_unwind_stats = dump_unwind_stats_v3; ++ } else { ++ machdep->flags |= NEW_UNW_V2; ++ ms->unwind_init = unwind_init_v2; ++ ms->unwind = unwind_v2; ++ ms->unwind_debug = unwind_debug_v2; ++ ms->dump_unwind_stats = dump_unwind_stats_v2; ++ } ++ } else { ++ machdep->flags |= NEW_UNW_V1; ++ ms->unwind_init = unwind_init_v1; ++ ms->unwind = unwind_v1; ++ ms->unwind_debug = unwind_debug_v1; ++ ms->dump_unwind_stats = dump_unwind_stats_v1; ++ } ++ } else { ++ machdep->flags |= OLD_UNWIND; ++ ms->unwind_init = ia64_old_unwind_init; ++ ms->unwind = ia64_old_unwind; ++ } ++ ms->unwind_init(); ++ ++ if (symbol_exists("frame_table")) { ++ frame_table = symbol_value("frame_table"); ++ readmem(frame_table, KVADDR, &xhmachdep->frame_table, sizeof(ulong), ++ "frame_table virtual address", FAULT_ON_ERROR); ++ } else { ++ error(FATAL, "cannot find frame_table virtual address."); ++ } ++} ++ ++int ++ia64_in_mca_stack_hyper(ulong addr, struct bt_info *bt) ++{ ++ int plen, i; ++ ulong paddr, stackbase, stacktop; ++ ulong *__per_cpu_mca; ++ struct xen_hyper_vcpu_context *vcc; ++ ++ vcc = xen_hyper_vcpu_to_vcpu_context(bt->task); ++ if (!vcc) ++ return 0; ++ ++ if (!symbol_exists("__per_cpu_mca") || ++ !(plen = get_array_length("__per_cpu_mca", NULL, 0)) || ++ (plen < xht->pcpus)) ++ return 0; ++ ++ if (!machdep->kvtop(NULL, addr, &paddr, 0)) ++ return 0; ++ ++ __per_cpu_mca = (ulong *)GETBUF(sizeof(ulong) * xht->pcpus); ++ ++ if (!readmem(symbol_value("__per_cpu_mca"), KVADDR, __per_cpu_mca, ++ sizeof(ulong) * xht->pcpus, "__per_cpu_mca", RETURN_ON_ERROR|QUIET)) ++ return 0; ++ ++ if (CRASHDEBUG(1)) { ++ for (i = 0; i < xht->pcpus; i++) { ++ fprintf(fp, "__per_cpu_mca[%d]: %lx\n", ++ i, __per_cpu_mca[i]); ++ } ++ } ++ ++ stackbase = __per_cpu_mca[vcc->processor]; ++ stacktop = stackbase + (STACKSIZE() * 2); ++ FREEBUF(__per_cpu_mca); ++ ++ if ((paddr >= stackbase) && (paddr < stacktop)) ++ return 1; ++ else ++ return 0; ++} ++ ++static void ++ia64_init_hyper(int when) ++{ ++ struct syment *sp; ++ ++ switch (when) ++ { ++ case SETUP_ENV: ++#if defined(PR_SET_FPEMU) && defined(PR_FPEMU_NOPRINT) ++ prctl(PR_SET_FPEMU, PR_FPEMU_NOPRINT, 0, 0, 0); ++#endif ++#if defined(PR_SET_UNALIGN) && defined(PR_UNALIGN_NOPRINT) ++ prctl(PR_SET_UNALIGN, PR_UNALIGN_NOPRINT, 0, 0, 0); ++#endif ++ break; ++ ++ case PRE_SYMTAB: ++ machdep->verify_symbol = ia64_verify_symbol; ++ machdep->machspec = &ia64_machine_specific; ++ if (pc->flags & KERNEL_DEBUG_QUERY) ++ return; ++ machdep->pagesize = memory_page_size(); ++ machdep->pageshift = ffs(machdep->pagesize) - 1; ++ machdep->pageoffset = machdep->pagesize - 1; ++ machdep->pagemask = ~(machdep->pageoffset); ++ switch (machdep->pagesize) ++ { ++ case 4096: ++ machdep->stacksize = (power(2, 3) * PAGESIZE()); ++ break; ++ case 8192: ++ machdep->stacksize = (power(2, 2) * PAGESIZE()); ++ break; ++ case 16384: ++ machdep->stacksize = (power(2, 1) * PAGESIZE()); ++ break; ++ case 65536: ++ machdep->stacksize = (power(2, 0) * PAGESIZE()); ++ break; ++ default: ++ machdep->stacksize = 32*1024; ++ break; ++ } ++ if ((machdep->pgd = (char *)malloc(PAGESIZE())) == NULL) ++ error(FATAL, "cannot malloc pgd space."); ++ if ((machdep->pud = (char *)malloc(PAGESIZE())) == NULL) ++ error(FATAL, "cannot malloc pud space."); ++ if ((machdep->pmd = (char *)malloc(PAGESIZE())) == NULL) ++ error(FATAL, "cannot malloc pmd space."); ++ if ((machdep->ptbl = (char *)malloc(PAGESIZE())) == NULL) ++ error(FATAL, "cannot malloc ptbl space."); ++ machdep->last_pgd_read = 0; ++ machdep->last_pud_read = 0; ++ machdep->last_pmd_read = 0; ++ machdep->last_ptbl_read = 0; ++ machdep->verify_paddr = ia64_verify_paddr; ++ machdep->ptrs_per_pgd = PTRS_PER_PGD; ++ machdep->machspec->phys_start = UNKNOWN_PHYS_START; ++ /* ODA: if need make hyper version ++ if (machdep->cmdline_arg) ++ parse_cmdline_arg(); */ ++ break; ++ ++ case PRE_GDB: ++ ++ if (pc->flags & KERNEL_DEBUG_QUERY) ++ return; ++ ++ machdep->kvbase = HYPERVISOR_VIRT_START; ++ machdep->identity_map_base = HYPERVISOR_VIRT_START; ++ machdep->is_kvaddr = ia64_is_kvaddr_hyper; ++ machdep->is_uvaddr = generic_is_uvaddr; ++ machdep->eframe_search = ia64_eframe_search; ++ machdep->back_trace = ia64_back_trace_cmd; ++ machdep->processor_speed = xen_hyper_ia64_processor_speed; ++ machdep->uvtop = ia64_uvtop; ++ machdep->kvtop = ia64_kvtop_hyper; ++ machdep->get_stack_frame = ia64_get_stack_frame; ++ machdep->get_stackbase = ia64_get_stackbase; ++ machdep->get_stacktop = ia64_get_stacktop; ++ machdep->translate_pte = ia64_translate_pte; ++ machdep->memory_size = xen_hyper_ia64_memory_size; ++ machdep->dis_filter = ia64_dis_filter; ++ machdep->cmd_mach = ia64_cmd_mach; ++ machdep->get_smp_cpus = xen_hyper_ia64_get_smp_cpus; ++ machdep->line_number_hooks = ia64_line_number_hooks; ++ machdep->value_to_symbol = generic_machdep_value_to_symbol; ++ machdep->init_kernel_pgd = NULL; ++ ++ if ((sp = symbol_search("_stext"))) { ++ machdep->machspec->kernel_region = ++ VADDR_REGION(sp->value); ++ machdep->machspec->kernel_start = sp->value; ++ } else { ++// machdep->machspec->kernel_region = KERNEL_CACHED_REGION; ++// machdep->machspec->kernel_start = KERNEL_CACHED_BASE; ++ } ++ ++ /* machdep table for Xen Hypervisor */ ++ xhmachdep->pcpu_init = xen_hyper_ia64_pcpu_init; ++ break; ++ ++ case POST_GDB: ++ STRUCT_SIZE_INIT(switch_stack, "switch_stack"); ++ MEMBER_OFFSET_INIT(thread_struct_fph, "thread_struct", "fph"); ++ MEMBER_OFFSET_INIT(switch_stack_b0, "switch_stack", "b0"); ++ MEMBER_OFFSET_INIT(switch_stack_ar_bspstore, ++ "switch_stack", "ar_bspstore"); ++ MEMBER_OFFSET_INIT(switch_stack_ar_pfs, ++ "switch_stack", "ar_pfs"); ++ MEMBER_OFFSET_INIT(switch_stack_ar_rnat, ++ "switch_stack", "ar_rnat"); ++ MEMBER_OFFSET_INIT(switch_stack_pr, ++ "switch_stack", "pr"); ++ ++ XEN_HYPER_STRUCT_SIZE_INIT(cpuinfo_ia64, "cpuinfo_ia64"); ++ XEN_HYPER_MEMBER_OFFSET_INIT(cpuinfo_ia64_proc_freq, "cpuinfo_ia64", "proc_freq"); ++ XEN_HYPER_MEMBER_OFFSET_INIT(cpuinfo_ia64_vendor, "cpuinfo_ia64", "vendor"); ++ if (symbol_exists("per_cpu__cpu_info")) { ++ xht->cpu_data_address = symbol_value("per_cpu__cpu_info"); ++ } ++ /* kakuma Can this be calculated? */ ++ if (!machdep->hz) { ++ machdep->hz = XEN_HYPER_HZ; ++ } ++ break; ++ ++ case POST_INIT: ++ ia64_post_init_hyper(); ++ break; ++ } ++} + #endif +--- crash/gdb_interface.c.orig 2008-01-17 15:17:20.000000000 -0500 ++++ crash/gdb_interface.c 2008-01-04 09:42:08.000000000 -0500 +@@ -1,8 +1,8 @@ + /* gdb_interface.c - core analysis suite + * + * Copyright (C) 1999, 2000, 2001, 2002 Mission Critical Linux, Inc. +- * Copyright (C) 2002, 2003, 2004, 2005 David Anderson +- * Copyright (C) 2002, 2003, 2004, 2005 Red Hat, Inc. All rights reserved. ++ * Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007 David Anderson ++ * Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007 Red Hat, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by +@@ -31,9 +31,6 @@ + { + argc = 1; + +- if (CRASHDEBUG(1)) +- gdb_readnow_warning(); +- + if (pc->flags & SILENT) { + if (pc->flags & READNOW) + argv[argc++] = "--readnow"; +@@ -198,20 +195,28 @@ + retry: + BZERO(req->buf, BUFSIZE); + req->command = GNU_GET_DATATYPE; +- req->name = "task_struct"; ++ req->name = XEN_HYPER_MODE() ? "page_info" : "task_struct"; + req->flags = GNU_RETURN_ON_ERROR; + gdb_interface(req); + + if (req->flags & GNU_COMMAND_FAILED) { ++ if (XEN_HYPER_MODE()) ++ no_debugging_data(WARNING); /* just bail out */ ++ + if (!debug_data_pulled_in) { + if (CRASHDEBUG(1)) + error(INFO, +- "gdb_session_init: pulling in debug data by accessing init_mm.mmap\n"); ++ "gdb_session_init: pulling in debug data by accessing init_mm.mmap %s\n", ++ symbol_exists("sysfs_mount") ? ++ "and syfs_mount" : ""); + debug_data_pulled_in = TRUE; + req->command = GNU_PASS_THROUGH; + req->flags = GNU_RETURN_ON_ERROR|GNU_NO_READMEM; + req->name = NULL; +- sprintf(req->buf, "print init_mm.mmap"); ++ if (symbol_exists("sysfs_mount")) ++ sprintf(req->buf, "print sysfs_mount, init_mm.mmap"); ++ else ++ sprintf(req->buf, "print init_mm.mmap"); + gdb_interface(req); + if (!(req->flags & GNU_COMMAND_FAILED)) + goto retry; +@@ -237,11 +242,16 @@ + sprintf(req->buf, "set height 0"); + gdb_interface(req); + ++ req->command = GNU_PASS_THROUGH; ++ req->name = NULL, req->flags = 0; ++ sprintf(req->buf, "set width 0"); ++ gdb_interface(req); ++ + /* + * Patch gdb's symbol values with the correct values from either + * the System.map or non-debug vmlinux, whichever is in effect. + */ +- if ((pc->flags & SYSMAP) || ++ if ((pc->flags & SYSMAP) || (kt->flags & (RELOC_SET|RELOC_FORCE)) || + (pc->namelist_debug && !pc->debuginfo_file)) { + req->command = GNU_PATCH_SYMBOL_VALUES; + req->flags = GNU_RETURN_ON_ERROR; +@@ -556,6 +566,14 @@ + + error_hook = NULL; + ++ if (st->flags & ADD_SYMBOL_FILE) { ++ error(INFO, ++ "%s\n gdb add-symbol-file command failed\n", ++ st->current->mod_namelist); ++ delete_load_module(st->current->mod_base); ++ st->flags &= ~ADD_SYMBOL_FILE; ++ } ++ + if (pc->cur_gdb_cmd) { + pc->last_gdb_cmd = pc->cur_gdb_cmd; + pc->cur_gdb_cmd = 0; +@@ -619,6 +637,7 @@ + "clear", "disable", "enable", "condition", "ignore", "frame", + "select-frame", "f", "up", "down", "catch", "tcatch", "return", + "file", "exec-file", "core-file", "symbol-file", "load", "si", "ni", ++ "shell", + NULL /* must be last */ + }; + +@@ -628,7 +647,7 @@ + }; + + #define RESTRICTED_GDB_COMMAND \ +- "restricted gdb command: %s\n%s\"%s\" may only be used in a .gdbinit file or in a command file.\n%sThe .gdbinit file is read automatically during %s initialization.\n%sOther user-defined command files may be read interactively during\n%s%s runtime by using the gdb \"source\" command." ++ "restricted gdb command: %s\n%s\"%s\" may only be used in a .gdbinit file or in a command file.\n%sThe .gdbinit file is read automatically during %s initialization.\n%sOther user-defined command files may be read interactively during\n%s%s runtime by using the gdb \"source\" command.\n" + + static int + is_restricted_command(char *cmd, ulong flags) +@@ -722,8 +741,10 @@ + if (pc->cur_req->flags & GNU_NO_READMEM) + return TRUE; + +- if (UNIQUE_COMMAND("dis")) ++ if (pc->curcmd_flags & MEMTYPE_UVADDR) + memtype = UVADDR; ++ else if (pc->curcmd_flags & MEMTYPE_FILEADDR) ++ memtype = FILEADDR; + else if (!IS_KVADDR(addr)) { + if (STREQ(pc->curcmd, "gdb") && + STRNEQ(pc->cur_req->buf, "x/")) { +@@ -740,12 +761,11 @@ + if (CRASHDEBUG(1)) + console("gdb_readmem_callback[%d]: %lx %d\n", + memtype, addr, len); +- +-#ifdef OLDWAY +- return(readmem(addr, KVADDR, buf, len, +- "gdb_readmem_callback", RETURN_ON_ERROR)); +-#endif + ++ if (memtype == FILEADDR) ++ return(readmem(pc->curcmd_private, memtype, buf, len, ++ "gdb_readmem_callback", RETURN_ON_ERROR)); ++ + switch (len) + { + case SIZEOF_8BIT: +--- crash/lkcd_x86_trace.h.orig 2008-01-17 15:17:20.000000000 -0500 ++++ crash/lkcd_x86_trace.h 2008-01-04 09:42:08.000000000 -0500 +@@ -35,6 +35,25 @@ + + typedef uint32_t kaddr_t; + ++extern int INT_EFRAME_SS; ++extern int INT_EFRAME_ESP; ++extern int INT_EFRAME_EFLAGS; ++extern int INT_EFRAME_CS; ++extern int INT_EFRAME_EIP; ++extern int INT_EFRAME_ERR; ++extern int INT_EFRAME_ES; ++extern int INT_EFRAME_DS; ++extern int INT_EFRAME_EAX; ++extern int INT_EFRAME_EBP; ++extern int INT_EFRAME_EDI; ++extern int INT_EFRAME_ESI; ++extern int INT_EFRAME_EDX; ++extern int INT_EFRAME_ECX; ++extern int INT_EFRAME_EBX; ++extern int INT_EFRAME_GS; ++ ++extern ulong int_eframe[]; ++ + #endif /* REDHAT */ + + +--- crash/lkcd_fix_mem.h.orig 2008-01-17 15:17:20.000000000 -0500 ++++ crash/lkcd_fix_mem.h 2008-01-04 09:42:08.000000000 -0500 +@@ -1,3 +1,5 @@ ++/* OBSOLETE */ ++ + #ifdef IA64 + + #define UTSNAME_ENTRY_SZ 65 +--- crash/ppc64.c.orig 2008-01-17 15:17:20.000000000 -0500 ++++ crash/ppc64.c 2008-01-04 09:42:08.000000000 -0500 +@@ -1,8 +1,8 @@ + /* ppc64.c -- core analysis suite + * +- * Copyright (C) 2004, 2005 David Anderson +- * Copyright (C) 2004, 2005 Red Hat, Inc. All rights reserved. +- * Copyright (C) 2004 Haren Myneni, IBM Corporation ++ * Copyright (C) 2004, 2005, 2006 David Anderson ++ * Copyright (C) 2004, 2005, 2006 Red Hat, Inc. All rights reserved. ++ * Copyright (C) 2004, 2006 Haren Myneni, IBM Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by +@@ -47,6 +47,9 @@ + static char * ppc64_check_eframe(struct ppc64_pt_regs *); + static void ppc64_print_eframe(char *, struct ppc64_pt_regs *, + struct bt_info *); ++static void parse_cmdline_arg(void); ++static void ppc64_paca_init(void); ++static void ppc64_clear_machdep_cache(void); + + struct machine_specific ppc64_machine_specific = { { 0 }, 0, 0 }; + +@@ -64,26 +67,53 @@ + machdep->verify_symbol = ppc64_verify_symbol; + if (pc->flags & KERNEL_DEBUG_QUERY) + return; +- machdep->pagesize = memory_page_size(); ++ machdep->stacksize = PPC64_STACK_SIZE; ++ machdep->last_pgd_read = 0; ++ machdep->last_pmd_read = 0; ++ machdep->last_ptbl_read = 0; ++ machdep->machspec->last_level4_read = 0; ++ machdep->verify_paddr = generic_verify_paddr; ++ machdep->ptrs_per_pgd = PTRS_PER_PGD; ++ machdep->flags |= MACHDEP_BT_TEXT; ++ if (machdep->cmdline_arg) ++ parse_cmdline_arg(); ++ machdep->clear_machdep_cache = ppc64_clear_machdep_cache; ++ break; ++ ++ case PRE_GDB: ++ /* ++ * Recently there were changes made to kexec tools ++ * to support 64K page size. With those changes ++ * vmcore file obtained from a kernel which supports ++ * 64K page size cannot be analyzed using crash on a ++ * machine running with kernel supporting 4K page size ++ * ++ * The following modifications are required in crash ++ * tool to be in sync with kexec tools. ++ * ++ * Look if the following symbol exists. If yes then ++ * the dump was taken with a kernel supporting 64k ++ * page size. So change the page size accordingly. ++ * ++ * Also moved the following code block from ++ * PRE_SYMTAB case here. ++ */ ++ if (symbol_exists("__hash_page_64K")) ++ machdep->pagesize = PPC64_64K_PAGE_SIZE; ++ else ++ machdep->pagesize = memory_page_size(); + machdep->pageshift = ffs(machdep->pagesize) - 1; + machdep->pageoffset = machdep->pagesize - 1; + machdep->pagemask = ~((ulonglong)machdep->pageoffset); +- machdep->stacksize = 4 * machdep->pagesize; + if ((machdep->pgd = (char *)malloc(PAGESIZE())) == NULL) + error(FATAL, "cannot malloc pgd space."); + if ((machdep->pmd = (char *)malloc(PAGESIZE())) == NULL) + error(FATAL, "cannot malloc pmd space."); + if ((machdep->ptbl = (char *)malloc(PAGESIZE())) == NULL) + error(FATAL, "cannot malloc ptbl space."); +- machdep->last_pgd_read = 0; +- machdep->last_pmd_read = 0; +- machdep->last_ptbl_read = 0; +- machdep->verify_paddr = generic_verify_paddr; +- machdep->ptrs_per_pgd = PTRS_PER_PGD; +- machdep->flags |= MACHDEP_BT_TEXT; +- break; ++ if ((machdep->machspec->level4 = (char *)malloc(PAGESIZE())) == NULL) ++ error(FATAL, "cannot malloc level4 space."); + +- case PRE_GDB: + machdep->kvbase = symbol_value("_stext"); + machdep->identity_map_base = machdep->kvbase; + machdep->is_kvaddr = generic_is_kvaddr; +@@ -109,6 +139,57 @@ + break; + + case POST_GDB: ++ if (!(machdep->flags & (VM_ORIG|VM_4_LEVEL))) { ++ if (THIS_KERNEL_VERSION >= LINUX(2,6,14)) { ++ machdep->flags |= VM_4_LEVEL; ++ } else { ++ machdep->flags |= VM_ORIG; ++ } ++ } ++ if (machdep->flags & VM_ORIG) { ++ /* pre-2.6.14 layout */ ++ free(machdep->machspec->level4); ++ machdep->machspec->level4 = NULL; ++ machdep->ptrs_per_pgd = PTRS_PER_PGD; ++ } else { ++ /* 2.6.14 layout */ ++ struct machine_specific *m = machdep->machspec; ++ if (machdep->pagesize == 65536) { ++ /* 64K pagesize */ ++ m->l1_index_size = PTE_INDEX_SIZE_L4_64K; ++ m->l2_index_size = PMD_INDEX_SIZE_L4_64K; ++ m->l3_index_size = PUD_INDEX_SIZE_L4_64K; ++ m->l4_index_size = PGD_INDEX_SIZE_L4_64K; ++ m->pte_shift = symbol_exists("demote_segment_4k") ? ++ PTE_SHIFT_L4_64K_V2 : PTE_SHIFT_L4_64K_V1; ++ m->l2_masked_bits = PMD_MASKED_BITS_64K; ++ } else { ++ /* 4K pagesize */ ++ m->l1_index_size = PTE_INDEX_SIZE_L4_4K; ++ m->l2_index_size = PMD_INDEX_SIZE_L4_4K; ++ m->l3_index_size = PUD_INDEX_SIZE_L4_4K; ++ m->l4_index_size = PGD_INDEX_SIZE_L4_4K; ++ m->pte_shift = PTE_SHIFT_L4_4K; ++ m->l2_masked_bits = PMD_MASKED_BITS_4K; ++ } ++ ++ /* Compute ptrs per each level */ ++ m->l1_shift = machdep->pageshift; ++ m->ptrs_per_l1 = (1 << m->l1_index_size); ++ m->ptrs_per_l2 = (1 << m->l2_index_size); ++ m->ptrs_per_l3 = (1 << m->l3_index_size); ++ ++ machdep->ptrs_per_pgd = m->ptrs_per_l3; ++ ++ /* Compute shifts */ ++ m->l2_shift = m->l1_shift + m->l1_index_size; ++ m->l3_shift = m->l2_shift + m->l2_index_size; ++ m->l4_shift = m->l3_shift + m->l3_index_size; ++ } ++ ++ machdep->section_size_bits = _SECTION_SIZE_BITS; ++ machdep->max_physmem_bits = _MAX_PHYSMEM_BITS; ++ ppc64_paca_init(); + machdep->vmalloc_start = ppc64_vmalloc_start; + MEMBER_OFFSET_INIT(thread_struct_pg_tables, + "thread_struct", "pg_tables"); +@@ -178,9 +259,11 @@ + */ + BZERO(&machdep->machspec->hwintrstack, + NR_CPUS*sizeof(ulong)); +- machdep->hz = HZ; +- if (THIS_KERNEL_VERSION >= LINUX(2,6,0)) +- machdep->hz = 1000; ++ if (!machdep->hz) { ++ machdep->hz = HZ; ++ if (THIS_KERNEL_VERSION >= LINUX(2,6,0)) ++ machdep->hz = 1000; ++ } + /* + * IRQ stacks are introduced in 2.6 and also configurable. + */ +@@ -223,16 +306,18 @@ + void + ppc64_dump_machdep_table(ulong arg) + { +- int others; ++ int i, c, others; + + others = 0; + fprintf(fp, " flags: %lx (", machdep->flags); + if (machdep->flags & KSYMS_START) + fprintf(fp, "%sKSYMS_START", others++ ? "|" : ""); +- if (machdep->flags & SYSRQ) +- fprintf(fp, "%sSYSRQ", others++ ? "|" : ""); + if (machdep->flags & MACHDEP_BT_TEXT) + fprintf(fp, "%sMACHDEP_BT_TEXT", others++ ? "|" : ""); ++ if (machdep->flags & VM_ORIG) ++ fprintf(fp, "%sVM_ORIG", others++ ? "|" : ""); ++ if (machdep->flags & VM_4_LEVEL) ++ fprintf(fp, "%sVM_4_LEVEL", others++ ? "|" : ""); + fprintf(fp, ")\n"); + + fprintf(fp, " kvbase: %lx\n", machdep->kvbase); +@@ -269,15 +354,56 @@ + fprintf(fp, " is_kvaddr: generic_is_kvaddr()\n"); + fprintf(fp, " is_uvaddr: generic_is_uvaddr()\n"); + fprintf(fp, " verify_paddr: generic_verify_paddr()\n"); ++ fprintf(fp, " xendump_p2m_create: NULL\n"); ++ fprintf(fp, "xen_kdump_p2m_create: NULL\n"); + fprintf(fp, " line_number_hooks: ppc64_line_number_hooks\n"); + fprintf(fp, " last_pgd_read: %lx\n", machdep->last_pgd_read); + fprintf(fp, " last_pmd_read: %lx\n", machdep->last_pmd_read); + fprintf(fp, " last_ptbl_read: %lx\n", machdep->last_ptbl_read); ++ fprintf(fp, "clear_machdep_cache: ppc64_clear_machdep_cache()\n"); + fprintf(fp, " pgd: %lx\n", (ulong)machdep->pgd); + fprintf(fp, " pmd: %lx\n", (ulong)machdep->pmd); + fprintf(fp, " ptbl: %lx\n", (ulong)machdep->ptbl); + fprintf(fp, " ptrs_per_pgd: %d\n", machdep->ptrs_per_pgd); ++ fprintf(fp, " section_size_bits: %ld\n", machdep->section_size_bits); ++ fprintf(fp, " max_physmem_bits: %ld\n", machdep->max_physmem_bits); ++ fprintf(fp, " sections_per_root: %ld\n", machdep->sections_per_root); + fprintf(fp, " machspec: %lx\n", (ulong)machdep->machspec); ++ fprintf(fp, " hwintrstack[%d]: ", NR_CPUS); ++ for (c = 0; c < NR_CPUS; c++) { ++ for (others = 0, i = c; i < NR_CPUS; i++) { ++ if (machdep->machspec->hwintrstack[i]) ++ others++; ++ } ++ if (!others) { ++ fprintf(fp, "%s%s", ++ c && ((c % 4) == 0) ? "\n " : "", ++ c ? "(remainder unused)" : "(unused)"); ++ break; ++ } ++ ++ fprintf(fp, "%s%016lx ", ++ ((c % 4) == 0) ? "\n " : "", ++ machdep->machspec->hwintrstack[c]); ++ } ++ fprintf(fp, "\n"); ++ fprintf(fp, " hwstackbuf: %lx\n", (ulong)machdep->machspec->hwstackbuf); ++ fprintf(fp, " hwstacksize: %d\n", machdep->machspec->hwstacksize); ++ fprintf(fp, " level4: %lx\n", (ulong)machdep->machspec->level4); ++ fprintf(fp, " last_level4_read: %lx\n", (ulong)machdep->machspec->last_level4_read); ++ fprintf(fp, " l4_index_size: %d\n", machdep->machspec->l4_index_size); ++ fprintf(fp, " l3_index_size: %d\n", machdep->machspec->l3_index_size); ++ fprintf(fp, " l2_index_size: %d\n", machdep->machspec->l2_index_size); ++ fprintf(fp, " l1_index_size: %d\n", machdep->machspec->l1_index_size); ++ fprintf(fp, " ptrs_per_l3: %d\n", machdep->machspec->ptrs_per_l3); ++ fprintf(fp, " ptrs_per_l2: %d\n", machdep->machspec->ptrs_per_l2); ++ fprintf(fp, " ptrs_per_l1: %d\n", machdep->machspec->ptrs_per_l1); ++ fprintf(fp, " l4_shift: %d\n", machdep->machspec->l4_shift); ++ fprintf(fp, " l3_shift: %d\n", machdep->machspec->l3_shift); ++ fprintf(fp, " l2_shift: %d\n", machdep->machspec->l2_shift); ++ fprintf(fp, " l1_shift: %d\n", machdep->machspec->l1_shift); ++ fprintf(fp, " pte_shift: %d\n", machdep->machspec->pte_shift); ++ fprintf(fp, " l2_masked_bits: %x\n", machdep->machspec->l2_masked_bits); + } + + /* +@@ -342,7 +468,7 @@ + if (!(pte & _PAGE_PRESENT)) { + if (pte && verbose) { + fprintf(fp, "\n"); +- ppc64_translate_pte(pte, 0, 0); ++ ppc64_translate_pte(pte, 0, PTE_SHIFT); + } + return FALSE; + } +@@ -354,7 +480,90 @@ + + if (verbose) { + fprintf(fp, " PAGE: %lx\n\n", PAGEBASE(*paddr)); +- ppc64_translate_pte(pte, 0, 0); ++ ppc64_translate_pte(pte, 0, PTE_SHIFT); ++ } ++ ++ return TRUE; ++} ++ ++/* ++ * Virtual to physical memory translation. This function will be called ++ * by both ppc64_kvtop and ppc64_uvtop. ++ */ ++static int ++ppc64_vtop_level4(ulong vaddr, ulong *level4, physaddr_t *paddr, int verbose) ++{ ++ ulong *level4_dir; ++ ulong *page_dir; ++ ulong *page_middle; ++ ulong *page_table; ++ ulong level4_pte, pgd_pte, pmd_pte; ++ ulong pte; ++ ++ if (verbose) ++ fprintf(fp, "PAGE DIRECTORY: %lx\n", (ulong)level4); ++ ++ level4_dir = (ulong *)((ulong *)level4 + L4_OFFSET(vaddr)); ++ FILL_L4(PAGEBASE(level4), KVADDR, PAGESIZE()); ++ level4_pte = ULONG(machdep->machspec->level4 + PAGEOFFSET(level4_dir)); ++ if (verbose) ++ fprintf(fp, " L4: %lx => %lx\n", (ulong)level4_dir, level4_pte); ++ if (!level4_pte) ++ return FALSE; ++ ++ /* Sometimes we don't have level3 pagetable entries */ ++ if (machdep->machspec->l3_index_size != 0) { ++ page_dir = (ulong *)((ulong *)level4_pte + PGD_OFFSET_L4(vaddr)); ++ FILL_PGD(PAGEBASE(level4_pte), KVADDR, PAGESIZE()); ++ pgd_pte = ULONG(machdep->pgd + PAGEOFFSET(page_dir)); ++ ++ if (verbose) ++ fprintf(fp, " PGD: %lx => %lx\n", (ulong)page_dir, pgd_pte); ++ if (!pgd_pte) ++ return FALSE; ++ } else { ++ pgd_pte = level4_pte; ++ } ++ ++ page_middle = (ulong *)((ulong *)pgd_pte + PMD_OFFSET_L4(vaddr)); ++ FILL_PMD(PAGEBASE(pgd_pte), KVADDR, PAGESIZE()); ++ pmd_pte = ULONG(machdep->pmd + PAGEOFFSET(page_middle)); ++ ++ if (verbose) ++ fprintf(fp, " PMD: %lx => %lx\n", (ulong)page_middle, pmd_pte); ++ ++ if (!(pmd_pte)) ++ return FALSE; ++ ++ page_table = (ulong *)(pmd_pte & ~(machdep->machspec->l2_masked_bits)) ++ + (BTOP(vaddr) & (machdep->machspec->ptrs_per_l1 - 1)); ++ if (verbose) ++ fprintf(fp, " PMD: %lx => %lx\n",(ulong)page_middle, ++ (ulong)page_table); ++ ++ FILL_PTBL(PAGEBASE(pmd_pte), KVADDR, PAGESIZE()); ++ pte = ULONG(machdep->ptbl + PAGEOFFSET(page_table)); ++ ++ if (verbose) ++ fprintf(fp, " PTE: %lx => %lx\n", (ulong)page_table, pte); ++ ++ if (!(pte & _PAGE_PRESENT)) { ++ if (pte && verbose) { ++ fprintf(fp, "\n"); ++ ppc64_translate_pte(pte, 0, machdep->machspec->pte_shift); ++ } ++ return FALSE; ++ } ++ ++ if (!pte) ++ return FALSE; ++ ++ *paddr = PAGEBASE(PTOB(pte >> machdep->machspec->pte_shift)) ++ + PAGEOFFSET(vaddr); ++ ++ if (verbose) { ++ fprintf(fp, " PAGE: %lx\n\n", PAGEBASE(*paddr)); ++ ppc64_translate_pte(pte, 0, machdep->machspec->pte_shift); + } + + return TRUE; +@@ -411,7 +620,10 @@ + FAULT_ON_ERROR); + } + +- return ppc64_vtop(vaddr, pgd, paddr, verbose); ++ if (machdep->flags & VM_4_LEVEL) ++ return ppc64_vtop_level4(vaddr, pgd, paddr, verbose); ++ else ++ return ppc64_vtop(vaddr, pgd, paddr, verbose); + } + + /* +@@ -436,7 +648,10 @@ + return TRUE; + } + +- return ppc64_vtop(kvaddr, (ulong *)vt->kernel_pgd[0], paddr, verbose); ++ if (machdep->flags & VM_4_LEVEL) ++ return ppc64_vtop_level4(kvaddr, (ulong *)vt->kernel_pgd[0], paddr, verbose); ++ else ++ return ppc64_vtop(kvaddr, (ulong *)vt->kernel_pgd[0], paddr, verbose); + } + + /* +@@ -657,7 +872,7 @@ + * If a physaddr pointer is passed in, don't print anything. + */ + static int +-ppc64_translate_pte(ulong pte, void *physaddr, ulonglong unused) ++ppc64_translate_pte(ulong pte, void *physaddr, ulonglong pte_shift) + { + int c, len1, len2, len3, others, page_present; + char buf[BUFSIZE]; +@@ -668,7 +883,7 @@ + char *arglist[MAXARGS]; + ulong paddr; + +- paddr = PTOB(pte >> PTE_SHIFT); ++ paddr = PTOB(pte >> pte_shift); + page_present = (pte & _PAGE_PRESENT); + + if (physaddr) { +@@ -1034,8 +1249,12 @@ + ms->hwstacksize + STACK_FRAME_OVERHEAD; + bt->stackbuf = ms->hwstackbuf; + alter_stackbuf(bt); +- } else +- error(FATAL, "cannot find the stack info"); ++ } else { ++ if (CRASHDEBUG(1)) { ++ fprintf(fp, "cannot find the stack info.\n"); ++ } ++ return; ++ } + } + + +@@ -1270,20 +1489,11 @@ + return NULL; + } + +-/* +- * Print exception frame information for ppc64 +- */ + static void +-ppc64_print_eframe(char *efrm_str, struct ppc64_pt_regs *regs, +- struct bt_info *bt) ++ppc64_print_regs(struct ppc64_pt_regs *regs) + { + int i; + +- if (BT_REFERENCE_CHECK(bt)) +- return; +- +- fprintf(fp, " %s [%lx] exception frame:", efrm_str, regs->trap); +- + /* print out the gprs... */ + for(i=0; i<32; i++) { + if(!(i % 3)) +@@ -1315,9 +1525,78 @@ + fprintf(fp, "DAR: %016lx\n", regs->dar); + fprintf(fp, " DSISR: %016lx ", regs->dsisr); + fprintf(fp, " Syscall Result: %016lx\n", regs->result); ++} ++ ++/* ++ * Print the exception frame information ++ */ ++static void ++ppc64_print_eframe(char *efrm_str, struct ppc64_pt_regs *regs, ++ struct bt_info *bt) ++{ ++ if (BT_REFERENCE_CHECK(bt)) ++ return; ++ ++ fprintf(fp, " %s [%lx] exception frame:", efrm_str, regs->trap); ++ ppc64_print_regs(regs); + fprintf(fp, "\n"); + } + ++/* ++ * get SP and IP from the saved ptregs. ++ */ ++static int ++ppc64_kdump_stack_frame(struct bt_info *bt_in, ulong *nip, ulong *ksp) ++{ ++ struct ppc64_pt_regs *pt_regs; ++ unsigned long unip; ++ ++ pt_regs = (struct ppc64_pt_regs *)bt_in->machdep; ++ if (!pt_regs->gpr[1]) { ++ /* ++ * Not collected regs. May be the corresponding CPU not ++ * responded to an IPI. ++ */ ++ fprintf(fp, "%0lx: GPR1 register value (SP) was not saved\n", ++ bt_in->task); ++ return FALSE; ++ } ++ *ksp = pt_regs->gpr[1]; ++ if (IS_KVADDR(*ksp)) { ++ readmem(*ksp+16, KVADDR, &unip, sizeof(ulong), "Regs NIP value", ++ FAULT_ON_ERROR); ++ *nip = unip; ++ } else { ++ if (IN_TASK_VMA(bt_in->task, *ksp)) ++ fprintf(fp, "%0lx: Task is running in user space\n", ++ bt_in->task); ++ else ++ fprintf(fp, "%0lx: Invalid Stack Pointer %0lx\n", ++ bt_in->task, *ksp); ++ *nip = pt_regs->nip; ++ } ++ ++ if (bt_in->flags && ++ ((BT_TEXT_SYMBOLS|BT_TEXT_SYMBOLS_PRINT|BT_TEXT_SYMBOLS_NOPRINT))) ++ return TRUE; ++ ++ /* ++ * Print the collected regs for the active task ++ */ ++ ppc64_print_regs(pt_regs); ++ if (!IS_KVADDR(*ksp)) ++ return FALSE; ++ ++ fprintf(fp, " NIP [%016lx] %s\n", pt_regs->nip, ++ closest_symbol(pt_regs->nip)); ++ if (unip != pt_regs->link) ++ fprintf(fp, " LR [%016lx] %s\n", pt_regs->link, ++ closest_symbol(pt_regs->link)); ++ ++ fprintf(fp, "\n"); ++ ++ return TRUE; ++} + + /* + * Get the starting point for the active cpus in a diskdump/netdump. +@@ -1335,12 +1614,18 @@ + ulong ur_ksp = 0; + int check_hardirq, check_softirq; + int check_intrstack = TRUE; ++ struct ppc64_pt_regs *pt_regs; ++ ++ /* ++ * For the kdump vmcore, Use SP and IP values that are saved in ptregs. ++ */ ++ if (pc->flags & KDUMP) ++ return ppc64_kdump_stack_frame(bt_in, nip, ksp); + + bt = &bt_local; + BCOPY(bt_in, bt, sizeof(struct bt_info)); + ms = machdep->machspec; + ur_nip = ur_ksp = 0; +- struct ppc64_pt_regs *pt_regs; + + panic_task = tt->panic_task == bt->task ? TRUE : FALSE; + +@@ -1424,6 +1709,7 @@ + if (STREQ(sym, ".netconsole_netdump") || + STREQ(sym, ".netpoll_start_netdump") || + STREQ(sym, ".start_disk_dump") || ++ STREQ(sym, ".crash_kexec") || + STREQ(sym, ".disk_dump")) { + *nip = *up; + *ksp = bt->stackbase + +@@ -1853,7 +2139,7 @@ + fprintf(fp, "(unknown)\n"); + fprintf(fp, " HZ: %d\n", machdep->hz); + fprintf(fp, " PAGE SIZE: %d\n", PAGESIZE()); +- fprintf(fp, " L1 CACHE SIZE: %d\n", l1_cache_size()); ++// fprintf(fp, " L1 CACHE SIZE: %d\n", l1_cache_size()); + fprintf(fp, "KERNEL VIRTUAL BASE: %lx\n", machdep->kvbase); + fprintf(fp, "KERNEL VMALLOC BASE: %lx\n", vt->vmalloc_start); + fprintf(fp, " KERNEL STACK SIZE: %ld\n", STACKSIZE()); +@@ -2000,4 +2286,145 @@ + ppc64_dump_line_number(0); + } + ++/* ++ * Force the VM address-range selection via: ++ * ++ * --machdep vm=orig ++ * --machdep vm=2.6.14 ++ */ ++ ++void ++parse_cmdline_arg(void) ++{ ++ int i, c, errflag; ++ char *p; ++ char buf[BUFSIZE]; ++ char *arglist[MAXARGS]; ++ int lines = 0; ++ ++ if (!strstr(machdep->cmdline_arg, "=")) { ++ error(WARNING, "ignoring --machdep option: %s\n\n", ++ machdep->cmdline_arg); ++ return; ++ } ++ ++ strcpy(buf, machdep->cmdline_arg); ++ ++ for (p = buf; *p; p++) { ++ if (*p == ',') ++ *p = ' '; ++ } ++ ++ c = parse_line(buf, arglist); ++ ++ for (i = 0; i < c; i++) { ++ errflag = 0; ++ ++ if (STRNEQ(arglist[i], "vm=")) { ++ p = arglist[i] + strlen("vm="); ++ if (strlen(p)) { ++ if (STREQ(p, "orig")) { ++ machdep->flags |= VM_ORIG; ++ continue; ++ } else if (STREQ(p, "2.6.14")) { ++ machdep->flags |= VM_4_LEVEL; ++ continue; ++ } ++ } ++ } ++ ++ error(WARNING, "ignoring --machdep option: %s\n", arglist[i]); ++ lines++; ++ } ++ ++ switch (machdep->flags & (VM_ORIG|VM_4_LEVEL)) ++ { ++ case VM_ORIG: ++ error(NOTE, "using original PPC64 VM address ranges\n"); ++ lines++; ++ break; ++ ++ case VM_4_LEVEL: ++ error(NOTE, "using 4-level pagetable PPC64 VM address ranges\n"); ++ lines++; ++ break; ++ ++ case (VM_ORIG|VM_4_LEVEL): ++ error(WARNING, "cannot set both vm=orig and vm=2.6.14\n"); ++ lines++; ++ machdep->flags &= ~(VM_ORIG|VM_4_LEVEL); ++ break; ++ } ++ ++ if (lines) ++ fprintf(fp, "\n"); ++} ++ ++/* ++ * Updating any smp-related items that were possibly bypassed ++ * or improperly initialized in kernel_init(). ++ */ ++static void ++ppc64_paca_init(void) ++{ ++#define BITS_FOR_LONG sizeof(ulong)*8 ++ int i, cpus, nr_paca; ++ char *cpu_paca_buf; ++ ulong data_offset; ++ ulong cpu_online_map[NR_CPUS/BITS_FOR_LONG]; ++ ++ if (!symbol_exists("paca")) ++ error(FATAL, "PPC64: Could not find 'paca' symbol\n"); ++ ++ if (!symbol_exists("cpu_online_map")) ++ error(FATAL, "PPC64: Could not find 'cpu_online_map' symbol\n"); ++ ++ if (!MEMBER_EXISTS("paca_struct", "data_offset")) ++ return; ++ ++ STRUCT_SIZE_INIT(ppc64_paca, "paca_struct"); ++ data_offset = MEMBER_OFFSET("paca_struct", "data_offset"); ++ ++ cpu_paca_buf = GETBUF(SIZE(ppc64_paca)); ++ ++ if (!(nr_paca = get_array_length("paca", NULL, 0))) ++ nr_paca = NR_CPUS; ++ ++ if (nr_paca > NR_CPUS) { ++ error(WARNING, ++ "PPC64: Number of paca entries (%d) greater than NR_CPUS (%d)\n", ++ nr_paca, NR_CPUS); ++ error(FATAL, "Recompile crash with larger NR_CPUS\n"); ++ } ++ ++ readmem(symbol_value("cpu_online_map"), KVADDR, &cpu_online_map[0], ++ nr_paca/8, "cpu_online_map", FAULT_ON_ERROR); ++ ++ for (i = cpus = 0; i < nr_paca; i++) { ++ div_t val = div(i, BITS_FOR_LONG); ++ /* ++ * CPU online? ++ */ ++ if (!(cpu_online_map[val.quot] & (0x1UL << val.rem))) ++ continue; ++ ++ readmem(symbol_value("paca") + (i * SIZE(ppc64_paca)), ++ KVADDR, cpu_paca_buf, SIZE(ppc64_paca), ++ "paca entry", FAULT_ON_ERROR); ++ ++ kt->__per_cpu_offset[i] = ULONG(cpu_paca_buf + data_offset); ++ kt->flags |= PER_CPU_OFF; ++ cpus++; ++ } ++ kt->cpus = cpus; ++ if (kt->cpus > 1) ++ kt->flags |= SMP; ++} ++ ++void ++ppc64_clear_machdep_cache(void) ++{ ++ if (machdep->machspec->last_level4_read != vt->kernel_pgd[0]) ++ machdep->machspec->last_level4_read = 0; ++} + #endif /* PPC64 */ +--- crash/xendump.h.orig 2008-01-17 15:17:20.000000000 -0500 ++++ crash/xendump.h 2008-01-04 09:42:08.000000000 -0500 +@@ -0,0 +1,177 @@ ++/* ++ * xendump.h ++ * ++ * Copyright (C) 2006, 2007 David Anderson ++ * Copyright (C) 2006, 2007 Red Hat, Inc. All rights reserved. ++ * ++ * This software may be freely redistributed under the terms of the ++ * GNU General Public License. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program; if not, write to the Free Software ++ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. ++ */ ++#include ++#include ++ ++#define XC_SAVE_SIGNATURE "LinuxGuestRecord" ++#define XC_CORE_MAGIC 0xF00FEBED ++#define XC_CORE_MAGIC_HVM 0xF00FEBEE ++ ++/* ++ * From xenctrl.h, but probably not on most host machines. ++ */ ++typedef struct xc_core_header { ++ unsigned int xch_magic; ++ unsigned int xch_nr_vcpus; ++ unsigned int xch_nr_pages; ++ unsigned int xch_ctxt_offset; ++ unsigned int xch_index_offset; ++ unsigned int xch_pages_offset; ++} xc_core_header_t; ++ ++struct pfn_offset_cache { ++ off_t file_offset; ++ ulong pfn; ++ ulong cnt; ++}; ++#define PFN_TO_OFFSET_CACHE_ENTRIES (5000) ++ ++struct elf_index_pfn { ++ ulong index; ++ ulong pfn; ++}; ++#define INDEX_PFN_COUNT (128) ++ ++struct last_batch { ++ ulong index; ++ ulong start; ++ ulong end; ++ ulong accesses; ++ ulong duplicates; ++}; ++ ++struct xendump_data { ++ ulong flags; /* XENDUMP_LOCAL, plus anything else... */ ++ int xfd; ++ int pc_next; ++ uint page_size; ++ FILE *ofp; ++ char *page; ++ ulong accesses; ++ ulong cache_hits; ++ ulong redundant; ++ ulong last_pfn; ++ struct pfn_offset_cache *poc; ++ ++ struct xc_core_data { ++ int p2m_frames; ++ ulong *p2m_frame_index_list; ++ struct xc_core_header header; ++ int elf_class; ++ uint64_t format_version; ++ off_t elf_strtab_offset; ++ off_t shared_info_offset; ++ off_t ia64_mapped_regs_offset; ++ struct elf_index_pfn elf_index_pfn[INDEX_PFN_COUNT]; ++ struct last_batch last_batch; ++ Elf32_Ehdr *elf32; ++ Elf64_Ehdr *elf64; ++ } xc_core; ++ ++ struct xc_save_data { ++ ulong nr_pfns; ++ int vmconfig_size; ++ char *vmconfig_buf; ++ ulong *p2m_frame_list; ++ uint pfns_not; ++ off_t pfns_not_offset; ++ off_t vcpu_ctxt_offset; ++ off_t shared_info_page_offset; ++ off_t *batch_offsets; ++ ulong batch_count; ++ ulong *region_pfn_type; ++ ulong ia64_version; ++ ulong *ia64_page_offsets; ++ } xc_save; ++ ++ ulong panic_pc; ++ ulong panic_sp; ++}; ++ ++#define XC_SAVE (XENDUMP_LOCAL << 1) ++#define XC_CORE_ORIG (XENDUMP_LOCAL << 2) ++#define XC_CORE_P2M_CREATE (XENDUMP_LOCAL << 3) ++#define XC_CORE_PFN_CREATE (XENDUMP_LOCAL << 4) ++#define XC_CORE_NO_P2M (XENDUMP_LOCAL << 5) ++#define XC_SAVE_IA64 (XENDUMP_LOCAL << 6) ++#define XC_CORE_64BIT_HOST (XENDUMP_LOCAL << 7) ++#define XC_CORE_ELF (XENDUMP_LOCAL << 8) ++ ++#define MACHINE_BYTE_ORDER() \ ++ (machine_type("X86") || \ ++ machine_type("X86_64") || \ ++ machine_type("IA64") ? __LITTLE_ENDIAN : __BIG_ENDIAN) ++ ++#define BYTE_SWAP_REQUIRED(endian) (endian != MACHINE_BYTE_ORDER()) ++ ++static inline uint32_t ++swab32(uint32_t x) ++{ ++ return (((x & 0x000000ffU) << 24) | ++ ((x & 0x0000ff00U) << 8) | ++ ((x & 0x00ff0000U) >> 8) | ++ ((x & 0xff000000U) >> 24)); ++} ++ ++#define MFN_NOT_FOUND (-1) ++#define PFN_NOT_FOUND (-1) ++ ++#define INVALID_MFN (~0UL) ++ ++/* ++ * ia64 "xm save" format is completely different than the others. ++ */ ++typedef struct xen_domctl_arch_setup { ++ uint64_t flags; /* XEN_DOMAINSETUP_* */ ++/* #ifdef __ia64__ */ ++ uint64_t bp; /* mpaddr of boot param area */ ++ uint64_t maxmem; /* Highest memory address for MDT. */ ++ uint64_t xsi_va; /* Xen shared_info area virtual address. */ ++ uint32_t hypercall_imm; /* Break imm for Xen hypercalls. */ ++/* #endif */ ++} xen_domctl_arch_setup_t; ++ ++/* ++ * xc_core ELF note, which differs from the standard Elf[32|64]_Nhdr ++ * structure by the additional name field. ++ */ ++struct elfnote { ++ uint32_t namesz; ++ uint32_t descsz; ++ uint32_t type; ++ char name[4]; ++}; ++ ++#define XEN_ELFNOTE_DUMPCORE_NONE 0x2000000 ++#define XEN_ELFNOTE_DUMPCORE_HEADER 0x2000001 ++#define XEN_ELFNOTE_DUMPCORE_XEN_VERSION 0x2000002 ++#define XEN_ELFNOTE_DUMPCORE_FORMAT_VERSION 0x2000003 ++ ++struct xen_dumpcore_elfnote_header_desc { ++ uint64_t xch_magic; ++ uint64_t xch_nr_vcpus; ++ uint64_t xch_nr_pages; ++ uint64_t xch_page_size; ++}; ++ ++#define FORMAT_VERSION_0000000000000001 0x0000000000000001ULL ++ ++struct xen_dumpcore_elfnote_format_version_desc { ++ uint64_t version; ++}; ++ ++struct xen_dumpcore_p2m { ++ uint64_t pfn; ++ uint64_t gmfn; ++}; +--- crash/diskdump.h.orig 2008-01-17 15:17:20.000000000 -0500 ++++ crash/diskdump.h 2008-01-04 09:42:08.000000000 -0500 +@@ -1,8 +1,10 @@ + /* + * diskdump.h + * +- * Copyright (C) 2004, 2005 David Anderson +- * Copyright (C) 2004, 2005 Red Hat, Inc. All rights reserved. ++ * Copyright (C) 2004, 2005, 2006 David Anderson ++ * Copyright (C) 2004, 2005, 2006 Red Hat, Inc. All rights reserved. ++ * Copyright (C) 2005 FUJITSU LIMITED ++ * Copyright (C) 2005 NEC Corporation + * + * This software may be freely redistributed under the terms of the + * GNU General Public License. +@@ -10,7 +12,65 @@ + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. +- * +- * Author: David Anderson + */ + ++#include ++ ++#define divideup(x, y) (((x) + ((y) - 1)) / (y)) ++#define round(x, y) (((x) / (y)) * (y)) ++ ++#define DUMP_PARTITION_SIGNATURE "diskdump" ++#define SIG_LEN (sizeof(DUMP_PARTITION_SIGNATURE) - 1) ++#define DISK_DUMP_SIGNATURE "DISKDUMP" ++#define KDUMP_SIGNATURE "KDUMP " ++ ++#define DUMP_HEADER_COMPLETED 0 ++#define DUMP_HEADER_INCOMPLETED 1 ++#define DUMP_HEADER_COMPRESSED 8 ++ ++struct disk_dump_header { ++ char signature[SIG_LEN]; /* = "DISKDUMP" */ ++ int header_version; /* Dump header version */ ++ struct new_utsname utsname; /* copy of system_utsname */ ++ struct timeval timestamp; /* Time stamp */ ++ unsigned int status; /* Above flags */ ++ int block_size; /* Size of a block in byte */ ++ int sub_hdr_size; /* Size of arch dependent ++ header in blocks */ ++ unsigned int bitmap_blocks; /* Size of Memory bitmap in ++ block */ ++ unsigned int max_mapnr; /* = max_mapnr */ ++ unsigned int total_ram_blocks;/* Number of blocks should be ++ written */ ++ unsigned int device_blocks; /* Number of total blocks in ++ * the dump device */ ++ unsigned int written_blocks; /* Number of written blocks */ ++ unsigned int current_cpu; /* CPU# which handles dump */ ++ int nr_cpus; /* Number of CPUs */ ++ struct task_struct *tasks[0]; ++}; ++ ++struct disk_dump_sub_header { ++ long elf_regs; ++}; ++ ++struct kdump_sub_header { ++ unsigned long phys_base; ++ int dump_level; /* header_version 1 and later */ ++}; ++ ++/* page flags */ ++#define DUMP_DH_COMPRESSED 0x1 /* page is compressed */ ++ ++/* descriptor of each page for vmcore */ ++typedef struct page_desc { ++ off_t offset; /* the offset of the page data*/ ++ unsigned int size; /* the size of this dump page */ ++ unsigned int flags; /* flags */ ++ unsigned long long page_flags; /* page flags */ ++} page_desc_t; ++ ++#define DISKDUMP_CACHED_PAGES (16) ++#define PAGE_VALID (0x1) /* flags */ ++#define DISKDUMP_VALID_PAGE(flags) ((flags) & PAGE_VALID) ++ +--- crash/help.c.orig 2008-01-17 15:17:20.000000000 -0500 ++++ crash/help.c 2008-01-04 11:54:24.000000000 -0500 +@@ -1,8 +1,8 @@ + /* help.c - core analysis suite + * + * Copyright (C) 1999, 2000, 2001, 2002 Mission Critical Linux, Inc. +- * Copyright (C) 2002, 2003, 2004, 2005 David Anderson +- * Copyright (C) 2002, 2003, 2004, 2005 Red Hat, Inc. All rights reserved. ++ * Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2008 David Anderson ++ * Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2008 Red Hat, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by +@@ -19,7 +19,6 @@ + + static void reshuffle_cmdlist(void); + static int sort_command_name(const void *, const void *); +-static void display_help_screen(char *); + static void display_commands(void); + static void display_copying_info(void); + static void display_warranty_info(void); +@@ -106,34 +105,33 @@ + void + program_usage(int form) + { +- int i; +- char **p; +- FILE *less; ++ if (form == SHORT_FORM) { ++ fprintf(fp, program_usage_info[0], pc->program_name); ++ fprintf(fp, "\nEnter \"%s -h\" for details.\n", ++ pc->program_name); ++ clean_exit(1); ++ } else { ++ FILE *scroll; ++ char *scroll_command; ++ char **p; ++ ++ if ((scroll_command = setup_scroll_command()) && ++ (scroll = popen(scroll_command, "w"))) ++ fp = scroll; ++ else ++ scroll = NULL; + +- if (form == LONG_FORM) +- less = popen("/usr/bin/less", "w"); +- else +- less = NULL; +- +- p = program_usage_info; +- +- if (form == LONG_FORM) { +- if (less) +- fp = less; +- for (i = 0; program_usage_info[i]; i++, p++) { +- fprintf(fp, *p, pc->program_name); ++ for (p = program_usage_info; *p; p++) { ++ fprintf(fp, *p, pc->program_name); + fprintf(fp, "\n"); + } +- } else { +- fprintf(fp, *p, pc->program_name); +- fprintf(fp, "\nEnter \"%s -h\" for details.\n", +- pc->program_name); +- } +- fflush(fp); +- if (less) +- pclose(less); ++ fflush(fp); + +- clean_exit(1); ++ if (scroll) ++ pclose(scroll); ++ ++ clean_exit(0); ++ } + } + + +@@ -147,14 +145,16 @@ + struct command_table_entry *cp; + struct extension_table *ext; + +- for (pc->ncmds = 0, cp = &base_command_table[0]; cp->name; cp++) { ++ for (pc->ncmds = 0, cp = pc->cmd_table; cp->name; cp++) { + if (!(cp->flags & HIDDEN_COMMAND)) + pc->ncmds++; + } + + for (ext = extension_table; ext; ext = ext->next) { +- for (cp = ext->command_table; cp->name; cp++) +- pc->ncmds++; ++ for (cp = ext->command_table; cp->name; cp++) { ++ if (!(cp->flags & (CLEANUP|HIDDEN_COMMAND))) ++ pc->ncmds++; ++ } + } + + if (!pc->cmdlist) { +@@ -188,14 +188,16 @@ + for (i = 0; i < pc->cmdlistsz; i++) + pc->cmdlist[i] = NULL; + +- for (cnt = 0, cp = &base_command_table[0]; cp->name; cp++) { ++ for (cnt = 0, cp = pc->cmd_table; cp->name; cp++) { + if (!(cp->flags & HIDDEN_COMMAND)) + pc->cmdlist[cnt++] = cp->name; + } + + for (ext = extension_table; ext; ext = ext->next) { +- for (cp = ext->command_table; cp->name; cp++) +- pc->cmdlist[cnt++] = cp->name; ++ for (cp = ext->command_table; cp->name; cp++) { ++ if (!(cp->flags & (CLEANUP|HIDDEN_COMMAND))) ++ pc->cmdlist[cnt++] = cp->name; ++ } + } + + if (cnt > pc->cmdlistsz) +@@ -239,7 +241,7 @@ + oflag = 0; + + while ((c = getopt(argcnt, args, +- "efNDdmM:ngcaBbHhksvVoptTzLxO")) != EOF) { ++ "efNDdmM:ngcaBbHhkKsvVoptTzLxO")) != EOF) { + switch(c) + { + case 'e': +@@ -303,7 +305,11 @@ + return; + + case 'k': +- dump_kernel_table(); ++ dump_kernel_table(!VERBOSE); ++ return; ++ ++ case 'K': ++ dump_kernel_table(VERBOSE); + return; + + case 's': +@@ -349,6 +355,7 @@ + fprintf(fp, " -D - dumpfile memory usage\n"); + fprintf(fp, " -f - filesys table\n"); + fprintf(fp, " -k - kernel_table\n"); ++ fprintf(fp, " -K - kernel_table (verbose)\n"); + fprintf(fp, " -M machine specific\n"); + fprintf(fp, " -m - machdep_table\n"); + fprintf(fp, " -s - symbol table data\n"); +@@ -389,7 +396,7 @@ + if (oflag) + dump_offset_table(args[optind], FALSE); + else +- cmd_usage(args[optind], COMPLETE_HELP); ++ cmd_usage(args[optind], COMPLETE_HELP|MUST_HELP); + optind++; + } while (args[optind]); + } +@@ -398,7 +405,7 @@ + * Format and display the help menu. + */ + +-static void ++void + display_help_screen(char *indent) + { + int i, j, rows; +@@ -508,16 +515,16 @@ + " active perform the command(s) on the active thread on each CPU.\n", + " If none of the task-identifying arguments above are entered, the command", + " will be performed on all tasks.\n", +-" command select one or more of the following commands on the tasks", ++" command select one or more of the following commands to be run on the tasks", + " selected, or on all tasks:\n", +-" bt same as the \"bt\" command (optional flags: -r -t -l -e -R -f)", +-" vm same as the \"vm\" command (optional flags: -p -v -m -R)", +-" task same as the \"task\" command (optional flag: -R)", +-" files same as the \"files\" command (optional flag: -R)", +-" net same as the \"net\" command (optional flags: -s -S -R)", +-" set same as the \"set\" command", +-" sig same as the \"sig\" command", +-" vtop same as the \"vtop\" command (optional flags: -c -u -k)\n", ++" bt run the \"bt\" command (optional flags: -r -t -l -e -R -f -o)", ++" vm run the \"vm\" command (optional flags: -p -v -m -R)", ++" task run the \"task\" command (optional flag: -R)", ++" files run the \"files\" command (optional flag: -R)", ++" net run the \"net\" command (optional flags: -s -S -R)", ++" set run the \"set\" command", ++" sig run the \"sig\" command (optional flag: -g)", ++" vtop run the \"vtop\" command (optional flags: -c -u -k)\n", + " flag Pass this optional flag to the command selected.", + " argument Pass this argument to the command selected.", + " ", +@@ -651,6 +658,10 @@ + " argument is entered, the current value of the %s variable is shown. These", + " are the %s variables, acceptable arguments, and purpose:\n", + " scroll on | off controls output scrolling.", ++" scroll less /usr/bin/less as the output scrolling program.", ++" scroll more /bin/more as the output scrolling program.", ++" scroll CRASHPAGER use CRASHPAGER environment variable as the", ++" output scrolling program.", + " radix 10 | 16 sets output radix to 10 or 16.", + " refresh on | off controls internal task list refresh.", + " print_max number set maximum number of array elements to print.", +@@ -665,6 +676,8 @@ + " edit vi | emacs set line editing mode (from .%src file only).", + " namelist filename name of kernel (from .%src file only).", + " dumpfile filename name of core dumpfile (from .%src file only).", ++" zero_excluded on | off controls whether excluded pages from a dumpfile", ++" should return zero-filled memory.", + " ", + " Internal variables may be set in four manners:\n", + " 1. entering the set command in $HOME/.%src.", +@@ -694,11 +707,11 @@ + " STATE: TASK_RUNNING (PANIC)\n", + " Turn off output scrolling:\n", + " %s> set scroll off", +-" scroll: off", ++" scroll: off (/usr/bin/less)", + " ", + " Show the current state of %s internal variables:\n", + " %s> set -v", +-" scroll: on", ++" scroll: on (/usr/bin/less)", + " radix: 10 (decimal)", + " refresh: on", + " print_max: 256", +@@ -710,6 +723,7 @@ + " edit: vi", + " namelist: vmlinux", + " dumpfile: vmcore", ++" zero_excluded: off", + " ", + " Show the current context:\n", + " %s> set", +@@ -787,7 +801,7 @@ + char *help_ps[] = { + "ps", + "display process status information", +-"[-k|-u][-s][-p|-c|-t|-l] [pid | taskp | command] ...", ++"[-k|-u][-s][-p|-c|-t|-l|-a|-g|-r] [pid | taskp | command] ...", + " This command displays process status for selected, or all, processes" , + " in the system. If no arguments are entered, the process data is", + " is displayed for all processes. Selected process identifiers can be", +@@ -822,8 +836,9 @@ + " On SMP machines, the active task on each CPU will be highlighted by an", + " angle bracket (\">\") preceding its information.", + " ", +-" Alternatively, information regarding parent-child relationships, or", +-" per-task time usage data may be displayed:", ++" Alternatively, information regarding parent-child relationships,", ++" per-task time usage data, argument/environment data, thread groups,", ++" or resource limits may be displayed:", + " ", + " -p display the parental hierarchy of selected, or all, tasks.", + " -c display the children of selected, or all, tasks.", +@@ -832,6 +847,10 @@ + " -l display the task last_run or timestamp value, whichever applies,", + " of selected, or all, tasks; the list is sorted with the most", + " recently-run task (largest last_run/timestamp) shown first.", ++" -a display the command line arguments and environment strings of", ++" selected, or all, user-mode tasks.", ++" -g display tasks by thread group, of selected, or all, tasks.", ++" -r display resource limits (rlimits) of selected, or all, tasks.", + "\nEXAMPLES", + " Show the process status of all current tasks:\n", + " %s> ps", +@@ -1031,13 +1050,73 @@ + " 381 1 0 c34ddf28 IN 0.2 1316 224 automount", + " 391 1 1 c2777f28 IN 0.2 1316 224 automount", + " ...", ++" ", ++" Display the argument and environment data for the automount task:\n", ++" %s> ps -a automount", ++" PID: 3948 TASK: f722ee30 CPU: 0 COMMAND: \"automount\"", ++" ARG: /usr/sbin/automount --timeout=60 /net program /etc/auto.net", ++" ENV: SELINUX_INIT=YES", ++" CONSOLE=/dev/console", ++" TERM=linux", ++" INIT_VERSION=sysvinit-2.85", ++" PATH=/sbin:/usr/sbin:/bin:/usr/bin", ++" LC_MESSAGES=en_US", ++" RUNLEVEL=3", ++" runlevel=3", ++" PWD=/", ++" LANG=ja_JP.UTF-8", ++" PREVLEVEL=N", ++" previous=N", ++" HOME=/", ++" SHLVL=2", ++" _=/usr/sbin/automount", ++" ", ++" Display the tasks in the thread group containing task c20ab0b0:\n", ++" %s> ps -g c20ab0b0", ++" PID: 6425 TASK: f72f50b0 CPU: 0 COMMAND: \"firefox-bin\"", ++" PID: 6516 TASK: f71bf1b0 CPU: 0 COMMAND: \"firefox-bin\"", ++" PID: 6518 TASK: d394b930 CPU: 0 COMMAND: \"firefox-bin\"", ++" PID: 6520 TASK: c20aa030 CPU: 0 COMMAND: \"firefox-bin\"", ++" PID: 6523 TASK: c20ab0b0 CPU: 0 COMMAND: \"firefox-bin\"", ++" PID: 6614 TASK: f1f181b0 CPU: 0 COMMAND: \"firefox-bin\"", ++" ", ++" Display the tasks in the thread group for each instance of the", ++" program named \"multi-thread\":\n", ++" %s> ps -g multi-thread", ++" PID: 2522 TASK: 1003f0dc7f0 CPU: 1 COMMAND: \"multi-thread\"", ++" PID: 2523 TASK: 10037b13030 CPU: 1 COMMAND: \"multi-thread\"", ++" PID: 2524 TASK: 1003e064030 CPU: 1 COMMAND: \"multi-thread\"", ++" PID: 2525 TASK: 1003e13a7f0 CPU: 1 COMMAND: \"multi-thread\"", ++" ", ++" PID: 2526 TASK: 1002f82b7f0 CPU: 1 COMMAND: \"multi-thread\"", ++" PID: 2527 TASK: 1003e1737f0 CPU: 1 COMMAND: \"multi-thread\"", ++" PID: 2528 TASK: 10035b4b7f0 CPU: 1 COMMAND: \"multi-thread\"", ++" PID: 2529 TASK: 1003f0c37f0 CPU: 1 COMMAND: \"multi-thread\"", ++" PID: 2530 TASK: 10035597030 CPU: 1 COMMAND: \"multi-thread\"", ++" PID: 2531 TASK: 100184be7f0 CPU: 1 COMMAND: \"multi-thread\"", ++" ", ++" Display the resource limits of \"bash\" task 13896:\n", ++" %s> ps -r 13896", ++" PID: 13896 TASK: cf402000 CPU: 0 COMMAND: \"bash\"", ++" RLIMIT CURRENT MAXIMUM", ++" CPU (unlimited) (unlimited)", ++" FSIZE (unlimited) (unlimited)", ++" DATA (unlimited) (unlimited)", ++" STACK 10485760 (unlimited)", ++" CORE (unlimited) (unlimited)", ++" RSS (unlimited) (unlimited)", ++" NPROC 4091 4091", ++" NOFILE 1024 1024", ++" MEMLOCK 4096 4096", ++" AS (unlimited) (unlimited)", ++" LOCKS (unlimited) (unlimited)", + NULL + }; + + char *help_rd[] = { + "rd", + "read memory", +-"[-dDsup][-8|-16|-32|-64][-o offs][-e addr] [address|symbol] [count]", ++"[-dDsupxmf][-8|-16|-32|-64][-o offs][-e addr] [address|symbol] [count]", + " This command displays the contents of memory, with the output formatted", + " in several different manners. The starting address may be entered either", + " symbolically or by address. The default output size is the size of a long", +@@ -1046,9 +1125,12 @@ + " -p address argument is a physical address.", + " -u address argument is a user virtual address; only required on", + " processors with common user and kernel virtual address spaces.", ++" -m address argument is a xen host machine address.", ++" -f address argument is a dumpfile offset.", + " -d display output in signed decimal format (default is hexadecimal).", + " -D display output in unsigned decimal format (default is hexadecimal).", + " -s displays output symbolically when appropriate.", ++" -x do not display ASCII translation at end of each line.", + #ifdef NOTDEF + " -o Shows offset value from the starting address.", + #endif +@@ -1064,7 +1146,8 @@ + " 3. -u specifies a user virtual address, but is only necessary on", + " processors with common user and kernel virtual address spaces.", + " symbol symbol of starting address to read.", +-" count number of memory locations to display (default is 1).", ++" count number of memory locations to display (default is 1); if entered,", ++" must be the last argument on the command line.", + "\nEXAMPLES", + " Display the kernel_version string:\n", + " %s> rd kernel_version 4 ", +@@ -1155,7 +1238,7 @@ + "bt", + "backtrace", + #if defined(GDB_6_0) || defined(GDB_6_1) +-"[-a|-r|-t|-l|-e|-E|-f] [-R ref] [ -I ip ] [-S sp] [pid | taskp]", ++"[-a|-r|-t|-T|-l|-e|-E|-f|-o|-O] [-R ref] [ -I ip ] [-S sp] [pid | taskp]", + #else + "[-a|-r|-t|-l|-e|-f|-g] [-R ref] [ -I ip ] [-S sp] [pid | taskp]", + #endif +@@ -1167,14 +1250,26 @@ + " pages of memory containing the task_union structure.", + " -t display all text symbols found from the last known stack location", + " to the top of the stack. (helpful if the back trace fails)", ++" -T display all text symbols found from just above the task_struct or", ++" thread_info to the top of the stack. (helpful if the back trace", ++" fails or the -t option starts too high in the process stack).", + " -l show file and line number of each stack trace text location.", + " -e search the stack for possible kernel and user mode exception frames.", +-" -E search the IRQ stacks (x86, x86_64 and PPC64), and the exception", ++" -E search the IRQ stacks (x86, x86_64 and ppc64), and the exception", + " stacks (x86_64) for possible exception frames; all other arguments", + " will be ignored since this is not a context-sensitive operation.", + " -f display all stack data contained in a frame; this option can be", +-" used to determine the arguments passed to each function (x86 only);", +-" on IA64, the argument register contents are dumped.", ++" used to determine the arguments passed to each function; on ia64,", ++" the argument register contents are dumped.", ++" -o x86: use old backtrace method, permissable only on kernels that were", ++" compiled without the -fomit-frame_pointer.", ++" x86_64: use old backtrace method, which dumps potentially stale", ++" kernel text return addresses found on the stack.", ++" -O x86: use old backtrace method by default, permissable only on kernels", ++" that were compiled without the -fomit-frame_pointer; subsequent usage", ++" of this option toggles the backtrace method.", ++" x86_64: use old backtrace method by default; subsequent usage of this", ++" option toggles the backtrace method.", + #if !defined(GDB_6_0) && !defined(GDB_6_1) + " -g use gdb stack trace code. (alpha only)", + #endif +@@ -1189,11 +1284,8 @@ + " Note that all examples below are for x86 only. The output format will differ", + " for other architectures. x86 backtraces from kernels that were compiled", + " with the --fomit-frame-pointer CFLAG occasionally will drop stack frames,", +-" or display a stale frame reference. x86_64 backtraces are only slightly", +-" more intelligent than those generated from kernel oops messages; text return", +-" addresses shown in the back trace may include stale references. When in", +-" doubt as to the accuracy of a backtrace, the -t option may help fill in", +-" the blanks.\n", ++" or display a stale frame reference. When in doubt as to the accuracy of a", ++" backtrace, the -t or -T options may help fill in the blanks.\n", + "EXAMPLES", + " Display the stack trace of the active task(s) when the kernel panicked:\n", + " %s> bt -a", +@@ -1437,14 +1529,22 @@ + " ", + " Below is an example shared object file consisting of just one command, ", + " called \"echo\", which simply echoes back all arguments passed to it.", +-" Note the comments contained within it for further details. To build it,", +-" cut and paste the following output into a file, and call it, for example,", +-" \"extlib.c\". Then compile like so:", ++" Note the comments contained within it for further details. Cut and paste", ++" the following output into a file, and call it, for example, \"echo.c\".", ++" Then compiled in either of two manners. Either manually like so:", ++" ", ++" gcc -nostartfiles -shared -rdynamic -o echo.so echo.c -fPIC -D $(TARGET_CFLAGS)", ++" ", ++" where must be one of the MACHINE_TYPE #define's in defs.h,", ++" and where $(TARGET_CFLAGS) is the same as it is declared in the top-level", ++" Makefile after a build is completed. Or alternatively, the \"echo.c\" file", ++" can be copied into the \"extensions\" subdirectory, and compiled automatically", ++" like so:", + " ", +-" gcc -nostartfiles -shared -rdynamic -o extlib.so extlib.c", ++" make extensions", + " ", +-" The extlib.so file may be dynamically linked into %s during runtime, or", +-" during initialization by putting \"extend extlib.so\" into a .%src file", ++" The echo.so file may be dynamically linked into %s during runtime, or", ++" during initialization by putting \"extend echo.so\" into a .%src file", + " located in the current directory, or in the user's $HOME directory.", + " ", + "---------------------------------- cut here ----------------------------------", +@@ -1556,7 +1656,7 @@ + " PROCESSOR SPEED: 1993 Mhz", + " HZ: 100", + " PAGE SIZE: 4096", +-" L1 CACHE SIZE: 32", ++// " L1 CACHE SIZE: 32", + " KERNEL VIRTUAL BASE: c0000000", + " KERNEL VMALLOC BASE: e0800000", + " KERNEL STACK SIZE: 8192", +@@ -1583,7 +1683,8 @@ + " This command displays the timer queue entries, both old- and new-style,", + " in chronological order. In the case of the old-style timers, the", + " timer_table array index is shown; in the case of the new-style timers, ", +-" the timer_list address is shown.", ++" the timer_list address is shown. On later kernels, the timer data is", ++" per-cpu.", + "\nEXAMPLES", + " %s> timer", + " JIFFIES", +@@ -1610,6 +1711,37 @@ + " 372010 c2323f7c c0112d6c ", + " 372138 c2191f10 c0112d6c ", + " 8653052 c1f13f10 c0112d6c ", ++" ", ++" Display the timer queue on a 2-cpu system:\n", ++" %s> timer", ++" TVEC_BASES[0]: c1299be0", ++" JIFFIES", ++" 18256298", ++" EXPIRES TIMER_LIST FUNCTION", ++" 18256406 cd5ddec0 c01232bb ", ++" 18256677 ceea93e0 c011e3cc ", ++" 18256850 ceea7f64 c01232bb ", ++" 18258751 cd1d4f64 c01232bb ", ++" 18258792 cf5782f0 c011e3cc ", ++" 18261266 c03c9f80 c022fad5 ", ++" 18262196 c02dc2e0 c0233329 ", ++" 18270518 ceb8bf1c c01232bb ", ++" 18271327 c03c9120 c0222074 ", ++" 18271327 c03ca580 c0233ace ", ++" 18272532 c02d1e18 c0129946 ", ++" 18276518 c03c9fc0 c022fd40 ", ++" 18332334 ceea9970 c011e3cc ", ++" 18332334 cfb6a840 c011e3cc ", ++" 18665378 cec25ec0 c01232bb ", ++" TVEC_BASES[1]: c12a1be0", ++" JIFFIES", ++" 18256298", ++" EXPIRES TIMER_LIST FUNCTION", ++" 18256493 c02c7d00 c013dad5 ", ++" 18256499 c12a2db8 c0129946 ", ++" 18277900 ceebaec0 c01232bb ", ++" 18283769 cf739f64 c01232bb ", ++" 18331902 cee8af64 c01232bb ", + NULL + }; + +@@ -1905,7 +2037,7 @@ + char *help_irq[] = { + "irq", + "IRQ data", +-"[-d | -b | [index ...]]", ++"[[[index ...] | -u] | -d | -b]", + " This command collaborates the data in an irq_desc_t, along with its", + " associated hw_interrupt_type and irqaction structure data, into a", + " consolidated per-IRQ display. Alternatively, the intel interrupt", +@@ -1913,6 +2045,7 @@ + " If no index value argument(s) nor any options are entered, the IRQ", + " data for all IRQs will be displayed.\n", + " index a valid IRQ index.", ++" -u dump data for in-use IRQs only.", + " -d dump the intel interrupt descriptor table.", + " -b dump bottom half data.", + "\nEXAMPLES", +@@ -2013,7 +2146,7 @@ + char *help_sys[] = { + "sys", + "system data", +-"[-c [name|number]] ", ++"[-c [name|number]] config", + " This command displays system-specific data. If no arguments are entered,\n" + " the same system data shown during %s invocation is shown.\n", + " -c [name|number] If no name or number argument is entered, dump all", +@@ -2023,6 +2156,8 @@ + " that number is displayed. If the current output radix", + " has been set to 16, the system call numbers will be ", + " displayed in hexadecimal.", ++" config If the kernel was configured with CONFIG_IKCONFIG, then", ++" dump the in-kernel configuration data.", + " -panic Panic a live system. Requires write permission to", + " /dev/mem. Results in the %s context causing an", + " \"Attempted to kill the idle task!\" panic. (The dump", +@@ -2043,6 +2178,27 @@ + " VERSION: #24 SMP Mon Oct 11 17:41:40 CDT 1999", + " MACHINE: i686 (500 MHz)", + " MEMORY: 1 GB", ++"\n Dump the system configuration data (if CONFIG_IKCONFIG):\n", ++" %s> sys config", ++" #", ++" # Automatically generated make config: don't edit", ++" # Linux kernel version: 2.6.16", ++" # Mon Apr 10 07:58:06 2006", ++" #", ++" CONFIG_X86_64=y", ++" CONFIG_64BIT=y", ++" CONFIG_X86=y", ++" CONFIG_SEMAPHORE_SLEEPERS=y", ++" CONFIG_MMU=y", ++" CONFIG_RWSEM_GENERIC_SPINLOCK=y", ++" CONFIG_GENERIC_CALIBRATE_DELAY=y", ++" CONFIG_X86_CMPXCHG=y", ++" CONFIG_EARLY_PRINTK=y", ++" CONFIG_GENERIC_ISA_DMA=y", ++" CONFIG_GENERIC_IOMAP=y", ++" CONFIG_ARCH_MAY_HAVE_PC_FDC=y", ++" CONFIG_DMI=y", ++" ...", + "\n Dump the system call table:\n", + " %s> sys -c", + " NUM SYSTEM CALL FILE AND LINE NUMBER", +@@ -2191,13 +2347,18 @@ + char *help_mount[] = { + "mount", + "mounted filesystem data", +-"[-f] [-i] [vfsmount | superblock | devname | dirname | inode]", ++"[-f] [-i] [-n pid|task] [vfsmount|superblock|devname|dirname|inode]", + " This command displays basic information about the currently-mounted", + " filesystems. The per-filesystem dirty inode list or list of open", + " files for the filesystem may also be displayed.\n", + " -f dump dentries and inodes for open files in each filesystem.", + " -i dump all dirty inodes associated with each filesystem.\n", +-" Filesystems may be selected in the following forms:\n", ++" For kernels supporting namespaces, the -n option may be used to", ++" display the mounted filesystems with respect to the namespace of a", ++" specified task:\n", ++" -n pid a process PID.", ++" -n task a hexadecimal task_struct pointer.\n", ++" Specific filesystems may be selected using the following forms:\n", + " vfsmount hexadecimal address of filesystem vfsmount structure.", + " superblock hexadecimal address of filesystem super_block structure.", + " devname device name of filesystem.", +@@ -2721,22 +2882,22 @@ + char *help_sig[] = { + "sig", + "task signal handling", +-"[[-l] | [-s sigset]] | [pid | taskp] ...", ++"[[-l] | [-s sigset]] | [-g] [pid | taskp] ...", + " This command displays signal-handling data of one or more tasks. Multiple", + " task or PID numbers may be entered; if no arguments are entered, the signal", + " handling data of the current context will be displayed. The default display", + " shows:", + " ", +-" 1. Whether the task has an unblocked signal pending.", +-" 2. The contents of the \"signal\" and \"blocked\" sigset_t structures", +-" from the task_struct, both of which are represented as a 64-bit ", +-" hexadecimal value.", +-" 3. A formatted dump of the \"sig\" signal_struct structure referenced by", ++" 1. A formatted dump of the \"sig\" signal_struct structure referenced by", + " the task_struct. For each defined signal, it shows the sigaction", + " structure address, the signal handler, the signal sigset_t mask ", + " (also expressed as a 64-bit hexadecimal value), and the flags.", +-" 4. For each queued signal, if any, its signal number and associated", +-" siginfo structure address.", ++" 2. Whether the task has an unblocked signal pending.", ++" 3. The contents of the \"blocked\" and \"signal\" sigset_t structures", ++" from the task_struct/signal_struct, both of which are represented ", ++" as a 64-bit hexadecimal value.", ++" 4. For each queued signal, private and/or shared, if any, its signal", ++" number and associated siginfo structure address.", + " ", + " The -l option lists the signal numbers and their name(s). The -s option", + " translates a 64-bit hexadecimal value representing the contents of a", +@@ -2744,56 +2905,105 @@ + " ", + " pid a process PID.", + " taskp a hexadecimal task_struct pointer.", ++" -g displays signal information for all threads in a task's ", ++" thread group.", + " -l displays the defined signal numbers and names.", + " -s sigset translates a 64-bit hexadecimal value representing a sigset_t", + " into a list of signal names associated with the bits set.", + "\nEXAMPLES", +-" Dump the signal-handling data of PID 614:\n", +-" %s> sig 614", +-" PID: 614 TASK: c6f26000 CPU: 1 COMMAND: \"httpd\"", +-" SIGPENDING: no", +-" SIGNAL: 0000000000000000", +-" BLOCKED: 0000000000000000", +-" SIGNAL_STRUCT: c1913800 COUNT: 1", ++" Dump the signal-handling data of PID 8970:\n", ++" %s> sig 8970", ++" PID: 8970 TASK: f67d8560 CPU: 1 COMMAND: \"procsig\"", ++" SIGNAL_STRUCT: f6018680 COUNT: 1", + " SIG SIGACTION HANDLER MASK FLAGS ", +-" [1] c1913804 8057c98 0000000000000201 0 ", +-" [2] c1913818 8057c8c 0000000000000000 0 ", +-" [3] c191382c SIG_DFL 0000000000000000 0 ", +-" [4] c1913840 8057bd8 0000000000000000 80000000 (SA_RESETHAND)", +-" [5] c1913854 SIG_DFL 0000000000000000 0 ", +-" [6] c1913868 8057bd8 0000000000000000 80000000 (SA_RESETHAND)", +-" [7] c191387c 8057bd8 0000000000000000 80000000 (SA_RESETHAND)", +-" [8] c1913890 SIG_DFL 0000000000000000 0 ", +-" [9] c19138a4 SIG_DFL 0000000000000000 0 ", +-" [10] c19138b8 8057c98 0000000000000201 0 ", +-" [11] c19138cc 8057bd8 0000000000000000 80000000 (SA_RESETHAND)", +-" [12] c19138e0 SIG_DFL 0000000000000000 0 ", +-" [13] c19138f4 SIG_IGN 0000000000000000 0 ", +-" [14] c1913908 SIG_DFL 0000000000000000 0 ", +-" [15] c191391c 8057c8c 0000000000000000 0 ", +-" [16] c1913930 SIG_DFL 0000000000000000 0 ", +-" [17] c1913944 SIG_DFL 0000000000000000 0 ", +-" [18] c1913958 SIG_DFL 0000000000000000 0 ", +-" [19] c191396c SIG_DFL 0000000000000000 0 ", +-" [20] c1913980 SIG_DFL 0000000000000000 0 ", +-" [21] c1913994 SIG_DFL 0000000000000000 0 ", +-" [22] c19139a8 SIG_DFL 0000000000000000 0 ", +-" [23] c19139bc SIG_DFL 0000000000000000 0 ", +-" [24] c19139d0 SIG_DFL 0000000000000000 0 ", +-" [25] c19139e4 SIG_DFL 0000000000000000 0 ", +-" [26] c19139f8 SIG_DFL 0000000000000000 0 ", +-" [27] c1913a0c SIG_DFL 0000000000000000 0 ", +-" [28] c1913a20 SIG_DFL 0000000000000000 0 ", +-" [29] c1913a34 SIG_DFL 0000000000000000 0 ", +-" [30] c1913a48 SIG_DFL 0000000000000000 0 ", +-" [31] c1913a5c SIG_DFL 0000000000000000 0 ", +-" SIGQUEUE: (empty)", ++" [1] f7877684 SIG_DFL 0000000000000000 0 ", ++" [2] f7877698 SIG_DFL 0000000000000000 0 ", ++" ...", ++" [8] f7877710 SIG_DFL 0000000000000000 0 ", ++" [9] f7877724 SIG_DFL 0000000000000000 0 ", ++" [10] f7877738 804867a 0000000000000000 80000000 (SA_RESETHAND)", ++" [11] f787774c SIG_DFL 0000000000000000 0 ", ++" [12] f7877760 804867f 0000000000000000 10000004 (SA_SIGINFO|SA_RESTART)", ++" [13] f7877774 SIG_DFL 0000000000000000 0 ", ++" ...", ++" [31] f78778dc SIG_DFL 0000000000000000 0 ", ++" [32] f78778f0 SIG_DFL 0000000000000000 0 ", ++" [33] f7877904 SIG_DFL 0000000000000000 0 ", ++" [34] f7877918 804867f 0000000000000000 10000004 (SA_SIGINFO|SA_RESTART)", ++" [35] f787792c SIG_DFL 0000000000000000 0 ", ++" [36] f7877940 SIG_DFL 0000000000000000 0 ", ++" ...", ++" [58] f7877af8 SIG_DFL 0000000000000000 0 ", ++" [59] f7877b0c SIG_DFL 0000000000000000 0 ", ++" [60] f7877b20 SIG_DFL 0000000000000000 0 ", ++" [61] f7877b34 SIG_DFL 0000000000000000 0 ", ++" [62] f7877b48 SIG_DFL 0000000000000000 0 ", ++" [63] f7877b5c SIG_DFL 0000000000000000 0 ", ++" [64] f7877b70 804867f 0000000000000000 10000004 (SA_SIGINFO|SA_RESTART)", ++" SIGPENDING: no", ++" BLOCKED: 8000000200000800", ++" PRIVATE_PENDING", ++" SIGNAL: 0000000200000800", ++" SIGQUEUE: SIG SIGINFO ", ++" 12 f51b9c84", ++" 34 f51b9594", ++" SHARED_PENDING", ++" SIGNAL: 8000000000000800", ++" SIGQUEUE: SIG SIGINFO ", ++" 12 f51b9188", ++" 64 f51b9d18", ++" 64 f51b9500", ++" ", ++" Dump the signal-handling data for all tasks in the thread group containing", ++" PID 2578:\n", ++" %s> sig -g 2578", ++" PID: 2387 TASK: f617d020 CPU: 0 COMMAND: \"slapd\"", ++" SIGNAL_STRUCT: f7dede00 COUNT: 6", ++" SIG SIGACTION HANDLER MASK FLAGS", ++" [1] c1f60c04 a258a7 0000000000000000 10000000 (SA_RESTART)", ++" [2] c1f60c18 a258a7 0000000000000000 10000000 (SA_RESTART)", ++" [3] c1f60c2c SIG_DFL 0000000000000000 0", ++" [4] c1f60c40 SIG_DFL 0000000000000000 0", ++" [5] c1f60c54 a258a7 0000000000000000 10000000 (SA_RESTART)", ++" [6] c1f60c68 SIG_DFL 0000000000000000 0", ++" [7] c1f60c7c SIG_DFL 0000000000000000 0", ++" [8] c1f60c90 SIG_DFL 0000000000000000 0", ++" [9] c1f60ca4 SIG_DFL 0000000000000000 0", ++" [10] c1f60cb8 a25911 0000000000000000 10000000 (SA_RESTART)", ++" ...", ++" [64] c1f610f0 SIG_DFL 0000000000000000 0", ++" SHARED_PENDING", ++" SIGNAL: 0000000000000000", ++" SIGQUEUE: (empty)", ++" ", ++" PID: 2387 TASK: f617d020 CPU: 0 COMMAND: \"slapd\"", ++" SIGPENDING: no", ++" BLOCKED: 0000000000000000", ++" PRIVATE_PENDING", ++" SIGNAL: 0000000000000000", ++" SIGQUEUE: (empty)", ++" ", ++" PID: 2392 TASK: f6175aa0 CPU: 0 COMMAND: \"slapd\"", ++" SIGPENDING: no", ++" BLOCKED: 0000000000000000", ++" PRIVATE_PENDING", ++" SIGNAL: 0000000000000000", ++" SIGQUEUE: (empty)", ++" ", ++" PID: 2523 TASK: f7cd4aa0 CPU: 1 COMMAND: \"slapd\"", ++" SIGPENDING: no", ++" BLOCKED: 0000000000000000", ++" PRIVATE_PENDING", ++" SIGNAL: 0000000000000000", ++" SIGQUEUE: (empty)", ++" ", ++" ...", + " ", + " Translate the sigset_t mask value, cut-and-pasted from the signal handling", + " data from signals 1 and 10 above:", + " ", +-" %s> sig -s 0000000000000201", +-" SIGHUP SIGUSR1", ++" %s> sig -s 800A000000000201", ++" SIGHUP SIGUSR1 SIGRTMAX-14 SIGRTMAX-12 SIGRTMAX", + " ", + " List the signal numbers and their names:", + " ", +@@ -2829,6 +3039,40 @@ + " [29] SIGIO/SIGPOLL", + " [30] SIGPWR", + " [31] SIGSYS", ++" [32] SIGRTMIN", ++" [33] SIGRTMIN+1", ++" [34] SIGRTMIN+2", ++" [35] SIGRTMIN+3", ++" [36] SIGRTMIN+4", ++" [37] SIGRTMIN+5", ++" [38] SIGRTMIN+6", ++" [39] SIGRTMIN+7", ++" [40] SIGRTMIN+8", ++" [41] SIGRTMIN+9", ++" [42] SIGRTMIN+10", ++" [43] SIGRTMIN+11", ++" [44] SIGRTMIN+12", ++" [45] SIGRTMIN+13", ++" [46] SIGRTMIN+14", ++" [47] SIGRTMIN+15", ++" [48] SIGRTMIN+16", ++" [49] SIGRTMAX-15", ++" [50] SIGRTMAX-14", ++" [51] SIGRTMAX-13", ++" [52] SIGRTMAX-12", ++" [53] SIGRTMAX-11", ++" [54] SIGRTMAX-10", ++" [55] SIGRTMAX-9", ++" [56] SIGRTMAX-8", ++" [57] SIGRTMAX-7", ++" [58] SIGRTMAX-6", ++" [59] SIGRTMAX-5", ++" [60] SIGRTMAX-4", ++" [61] SIGRTMAX-3", ++" [62] SIGRTMAX-2", ++" [63] SIGRTMAX-1", ++" [64] SIGRTMAX", ++ + + NULL + }; +@@ -2836,8 +3080,8 @@ + char *help_struct[] = { + "struct", + "structure contents", +-"struct_name[.member] [[-o][-l offset][-r] [address | symbol] [count]]\n" +-" [-c count]", ++"struct_name[.member[,member]][-o][-l offset][-rfu] [address | symbol]\n" ++" [count | -c count]", + " This command displays either a structure definition, or a formatted display", + " of the contents of a structure at a specified address. When no address is", + " specified, the structure definition is shown along with the structure size.", +@@ -2845,7 +3089,8 @@ + " the scope of the data displayed to that particular member; when no address", + " is specified, the member's offset and definition are shown.\n", + " struct_name name of a C-code structure used by the kernel.", +-" .member name of a structure member.", ++" .member name of a structure member; to display multiple members of a", ++" structure, use a comma-separated list of members.", + " -o show member offsets when displaying structure definitions.", + " -l offset if the address argument is a pointer to a list_head structure", + " that is embedded in the target data structure, the offset", +@@ -2854,6 +3099,9 @@ + " 1. in \"structure.member\" format.", + " 2. a number of bytes. ", + " -r raw dump of structure data.", ++" -f address argument is a dumpfile offset.", ++" -u address argument is a user virtual address in the current", ++" context.", + " address hexadecimal address of a structure; if the address points", + " to an embedded list_head structure contained within the", + " target data structure, then the \"-l\" option must be used.", +@@ -2944,6 +3192,21 @@ + " struct mm_struct {", + " [12] pgd_t *pgd;", + " }\n", ++" Display the flags and virtual members of 4 contigous page structures", ++" in the mem_map page structure array:\n", ++" %s> page.flags,virtual c101196c 4", ++" flags = 0x8000,", ++" virtual = 0xc04b0000", ++" ", ++" flags = 0x8000,", ++" virtual = 0xc04b1000", ++" ", ++" flags = 0x8000,", ++" virtual = 0xc04b2000", ++" ", ++" flags = 0x8000,", ++" virtual = 0xc04b3000", ++" ", + " Display the array of tcp_sl_timer structures declared by tcp_slt_array[]:\n", + " %s> struct tcp_sl_timer tcp_slt_array 4", + " struct tcp_sl_timer {", +@@ -3052,8 +3315,8 @@ + char *help_union[] = { + "union", + "union contents", +-"union_name[.member] [[-o][-l offset][-r] [address | symbol] [count]]\n" +-" [-c count]", ++"union_name[.member[,member]] [-o][-l offset][-rfu] [address | symbol]\n" ++" [count | -c count]", + " This command displays either a union definition, or a formatted display", + " of the contents of a union at a specified address. When no address is", + " specified, the union definition is shown along with the union size.", +@@ -3061,7 +3324,8 @@ + " the scope of the data displayed to that particular member; when no address", + " is specified, the member's offset (always 0) and definition are shown.\n", + " union_name name of a C-code union used by the kernel.", +-" .member name of a union member.", ++" .member name of a union member; to display multiple members of a", ++" union, use a comma-separated list of members.", + " -o show member offsets when displaying union definitions.", + " (always 0)", + " -l offset if the address argument is a pointer to a list_head structure", +@@ -3071,6 +3335,9 @@ + " 1. in \"structure.member\" format.", + " 2. a number of bytes. ", + " -r raw dump of union data.", ++" -f address argument is a dumpfile offset.", ++" -u address argument is a user virtual address in the current", ++" context.", + " address hexadecimal address of a union; if the address points", + " to an embedded list_head structure contained within the", + " target union structure, then the \"-l\" option must be used.", +@@ -3152,7 +3419,7 @@ + char *help_mod[] = { + "mod", + "module information and loading of symbols and debugging data", +-"[ -s module [objfile] | -d module | -S [directory] | -D | -r ] ", ++"[ -s module [objfile] | -d module | -S [directory] | -D | -r | -o ] ", + " With no arguments, this command displays basic information of the currently", + " installed modules, consisting of the module address, name, size, the", + " object file name (if known), and whether the module was compiled with", +@@ -3203,6 +3470,7 @@ + " -r Reinitialize module data. All currently-loaded symbolic", + " and debugging data will be deleted, and the installed", + " module list will be updated (live system only).", ++" -o Load module symbols with old mechanism.", + " ", + " After symbolic and debugging data have been loaded, backtraces and text", + " disassembly will be displayed appropriately. Depending upon the processor", +@@ -3322,9 +3590,10 @@ + char *help__list[] = { + "list", + "linked list", +-"[[-o] offset] [-e end] [-s struct[.member]] [-H] start", ++"[[-o] offset] [-e end] [-s struct[.member[,member]]] [-H] start", + " This command dumps the contents of a linked list. The entries in a linked", +-" are typically data structures that are tied together in one of two formats:", ++" list are typically data structures that are tied together in one of two", ++" formats:", + " ", + " 1. A starting address points to a data structure; that structure contains", + " a member that is a pointer to the next structure, and so on. The list", +@@ -3335,7 +3604,7 @@ + " c. a pointer to the first item pointed to by the start address.", + " d. a pointer to its containing structure.", + " ", +-" 2. Many Linux lists are linked via embedded list_head structures contained ", ++" 2. Most Linux lists are linked via embedded list_head structures contained ", + " within the data structures in the list. The linked list is headed by an", + " external LIST_HEAD, which is simply a list_head structure initialized to", + " point to itself, signifying that the list is empty:", +@@ -3370,15 +3639,17 @@ + " entered.", + " -s struct For each address in list, format and print as this type of", + " structure; use the \"struct.member\" format in order to display", +-" a particular member of the structure.", ++" a particular member of the structure. To display multiple", ++" members of a structure, use a comma-separated list of members.", + " ", + " The meaning of the \"start\" argument, which can be expressed either", + " symbolically or in hexadecimal format, depends upon whether the -H option", + " is pre-pended or not:", + " ", + " start The address of the first structure in the list.", +-" -H start The address of the LIST_HEAD structure, typically expressed", +-" symbolically.", ++" -H start The address of the list_head structure, typically expressed", ++" symbolically, but also can be an expression evaluating to the", ++" address of the starting list_head structure.", + "\nEXAMPLES", + " Note that each task_struct is linked to its parent's task_struct via the", + " p_pptr member:", +@@ -3416,31 +3687,66 @@ + " The list of currently-registered file system types are headed up by a", + " struct file_system_type pointer named \"file_systems\", and linked by", + " the \"next\" field in each file_system_type structure. The following", +-" sequence displays the address and name of each registered file system type:", ++" sequence displays the structure address followed by the name and ", ++" fs_flags members of each registered file system type:", + " ", + " %s> p file_systems", +-" file_systems = $2 = (struct file_system_type *) 0xc02ebea0", +-" %s> list file_system_type.next -s file_system_type.name 0xc02ebea0", +-" c02ebea0", +-" name = 0xc0280372 \"proc\", ", +-" c02fd4a0", +-" name = 0xc02bf348 \"sockfs\", ", +-" c02eb544", +-" name = 0xc027c25a \"tmpfs\", ", +-" c02eb52c", +-" name = 0xc027c256 \"shm\", ", +-" c02ebbe0", +-" name = 0xc027e054 \"pipefs\", ", +-" c02ec9c0", +-" name = 0xc0283c13 \"ext2\", ", +-" c02ecaa8", +-" name = 0xc0284567 \"iso9660\", ", +-" c02ecc08", +-" name = 0xc0284cf5 \"nfs\", ", +-" c02edc60", +-" name = 0xc028d832 \"autofs\", ", +-" c02edfa0", +-" name = 0xc028e1e0 \"devpts\"", ++" file_systems = $1 = (struct file_system_type *) 0xc03adc90", ++" %s> list file_system_type.next -s file_system_type.name,fs_flags 0xc03adc90", ++" c03adc90", ++" name = 0xc02c05c8 \"rootfs\",", ++" fs_flags = 0x30,", ++" c03abf94", ++" name = 0xc02c0319 \"bdev\",", ++" fs_flags = 0x10,", ++" c03acb40", ++" name = 0xc02c07c4 \"proc\",", ++" fs_flags = 0x8,", ++" c03e9834", ++" name = 0xc02cfc83 \"sockfs\",", ++" fs_flags = 0x10,", ++" c03ab8e4", ++" name = 0xc02bf512 \"tmpfs\",", ++" fs_flags = 0x20,", ++" c03ab8c8", ++" name = 0xc02c3d6b \"shm\",", ++" fs_flags = 0x20,", ++" c03ac394", ++" name = 0xc02c03cf \"pipefs\",", ++" fs_flags = 0x10,", ++" c03ada74", ++" name = 0xc02c0e6b \"ext2\",", ++" fs_flags = 0x1,", ++" c03adc74", ++" name = 0xc02c0e70 \"ramfs\",", ++" fs_flags = 0x20,", ++" c03ade74", ++" name = 0xc02c0e76 \"hugetlbfs\",", ++" fs_flags = 0x20,", ++" c03adf8c", ++" name = 0xc02c0f84 \"iso9660\",", ++" fs_flags = 0x1,", ++" c03aec14", ++" name = 0xc02c0ffd \"devpts\",", ++" fs_flags = 0x8,", ++" c03e93f4", ++" name = 0xc02cf1b9 \"pcihpfs\",", ++" fs_flags = 0x28,", ++" e0831a14", ++" name = 0xe082f89f \"ext3\",", ++" fs_flags = 0x1,", ++" e0846af4", ++" name = 0xe0841ac6 \"usbdevfs\",", ++" fs_flags = 0x8,", ++" e0846b10", ++" name = 0xe0841acf \"usbfs\",", ++" fs_flags = 0x8,", ++" e0992370", ++" name = 0xe099176c \"autofs\",", ++" fs_flags = 0x0,", ++" e2dcc030", ++" name = 0xe2dc8849 \"nfs\",", ++" fs_flags = 0x48000,", + " ", + " In some kernels, the system run queue is a linked list headed up by the", + " \"runqueue_head\", which is defined like so:", +@@ -3555,7 +3861,7 @@ + char *help_kmem[] = { + "kmem", + "kernel memory", +-"[-f|-F|-p|-c|-C|-i|-s|-S|-v|-n] [-[l|L][a|i]] [slab-name] [[-P] address]", ++"[-f|-F|-p|-c|-C|-i|-s|-S|-v|-V|-n|-z] [-[l|L][a|i]] [slab] [[-P] address]", + " This command displays information about the use of kernel memory.\n", + " -f displays the contents of the system free memory headers.", + " also verifies that the page count equals nr_free_pages.", +@@ -3567,23 +3873,33 @@ + " -i displays general memory usage information", + " -s displays basic kmalloc() slab data.", + " -S displays all kmalloc() slab data, including all slab objects,", +-" and whether each object is in use or is free.", ++" and whether each object is in use or is free. If CONFIG_SLUB,", ++" slab data for each per-cpu slab is displayed, along with the", ++" address of each kmem_cache_node, its count of full and partial", ++" slabs, and a list of all tracked slabs.", + " -v displays the vmlist entries.", ++" -V displays the kernel vm_stat table if it exists, the cumulative", ++" page_states counter values if they exist, and/or the cumulative", ++" vm_event_states counter values if they exist.", + " -n display memory node data (if supported).", ++" -z displays per-zone memory statistics.", + " -la walks through the active_list and verifies nr_active_pages.", + " -li walks through the inactive_list and verifies nr_inactive_pages.", + " -La same as -la, but also dumps each page in the active_list.", + " -Li same as -li, but also dumps each page in the inactive_list.", +-" slab-name when used with -s or -S, limits the command to only the slab cache", +-" of name \"slab-name\". If the slab-name argument is \"list\", then", ++" slab when used with -s or -S, limits the command to only the slab cache", ++" of name \"slab\". If the slab argument is \"list\", then", + " all slab cache names and addresses are listed.", + " -P declares that the following address argument is a physical address.", + " address when used without any flag, the address can be a kernel virtual,", + " or physical address; a search is made through the symbol table,", + " the kmalloc() slab subsystem, the free list, the page_hash_table,", +-" the vmalloc() vmlist subsystem, and the mem_map array. If found", +-" in any of those areas, the information will be dumped in the", +-" same manner as if the flags were used.", ++" the vmalloc() vmlist subsystem, the current set of task_structs", ++" and kernel stacks, and the mem_map array. If found in any of", ++" those areas, the information will be dumped in the same manner as", ++" if the location-specific flags were used; if contained within a", ++" curent task_struct or kernel stack, that task's context will be", ++" displayed.", + " address when used with -s or -S, searches the kmalloc() slab subsystem", + " for the slab containing of this virtual address, showing whether", + " it is in use or free.", +@@ -3781,6 +4097,24 @@ + " c2f8ab60 c8095000 - c8097000 8192", + " c2f519e0 c8097000 - c8099000 8192", + " ", ++" Dump the vm_table contents:\n", ++" %s> kmem -V", ++" NR_ANON_PAGES: 38989", ++" NR_FILE_MAPPED: 3106", ++" NR_FILE_PAGES: 169570", ++" NR_SLAB: 32439", ++" NR_PAGETABLE: 1181", ++" NR_FILE_DIRTY: 4633", ++" NR_WRITEBACK: 0", ++" NR_UNSTABLE_NFS: 0", ++" NR_BOUNCE: 0", ++" NUMA_HIT: 63545992", ++" NUMA_MISS: 0", ++" NUMA_FOREIGN: 0", ++" NUMA_INTERLEAVE_HIT: 24002", ++" NUMA_LOCAL: 63545992", ++" NUMA_OTHER: 0", ++" ", + " Determine (and verify) the page cache size:\n", + " %s> kmem -c", + " page_cache_size: 18431 (verified)", +@@ -3979,18 +4313,21 @@ + char *help_dis[] = { + "dis", + "disassemble", +-"[-r][-l][-u] [address | symbol | (expression)] [count]", ++"[-r][-l][-u][-b [num]] [address | symbol | (expression)] [count]", + " This command disassembles source code instructions starting (or ending) at", + " a text address that may be expressed by value, symbol or expression:\n", + " -r (reverse) displays all instructions from the start of the ", + " routine up to and including the designated address.", + " -l displays source code line number data in addition to the ", + " disassembly output.", +-" -u address is a user virtual address; otherwise the address is ", +-" assumed to be a kernel virtual address. If this option is", +-" used, then -r and -l are ignored.", ++" -u address is a user virtual address in the current context;", ++" otherwise the address is assumed to be a kernel virtual address.", ++" If this option is used, then -r and -l are ignored.", ++" -b [num] modify the pre-calculated number of encoded bytes to skip after", ++" a kernel BUG (\"ud2a\") instruction; with no argument, displays", ++" the current number of bytes being skipped. (x86 and x86_64 only)", + " address starting hexadecimal text address.", +-" symbol symbol of starting text address. On PPC64, the symbol", ++" symbol symbol of starting text address. On ppc64, the symbol", + " preceded by '.' is used.", + " (expression) expression evaluating to a starting text address.", + " count the number of instructions to be disassembled (default is 1).", +@@ -4419,10 +4756,11 @@ + " Display various network related data:\n", + " -a display the ARP cache.", + " -s display open network socket/sock addresses, their family and type,", +-" and their source and destination addresses and ports.", ++" and for INET and INET6 families, their source and destination", ++" addresses and ports.", + " -S displays open network socket/sock addresses followed by a dump", + " of both structures.", +-" -n addr translates an IP address expressed as a decimal or hexadecimal ", ++" -n addr translates an IPv4 address expressed as a decimal or hexadecimal", + " value into a standard numbers-and-dots notation.", + " -R ref socket or sock address, or file descriptor.", + " pid a process PID.", +@@ -4450,8 +4788,8 @@ + " Display the sockets for PID 2517, using both -s and -S output formats:\n", + " %s> net -s 2517", + " PID: 2517 TASK: c1598000 CPU: 1 COMMAND: \"rlogin\"", +-" FD SOCKET SOCK FAMILY:TYPE SOURCE:PORT DESTINATION:PORT", +-" 3 c57375dc c1ff1850 INET:STREAM 10.1.8.20:1023 10.1.16.62:513", ++" FD SOCKET SOCK FAMILY:TYPE SOURCE-PORT DESTINATION-PORT", ++" 3 c57375dc c1ff1850 INET:STREAM 10.1.8.20-1023 10.1.16.62-513", + " ", + " %s> net -S 2517", + " PID: 2517 TASK: c1598000 CPU: 1 COMMAND: \"rlogin\"", +@@ -4497,52 +4835,52 @@ + " From \"foreach\", find all tasks with references to socket c08ea3cc:\n", + " %s> foreach net -s -R c08ea3cc", + " PID: 2184 TASK: c7026000 CPU: 1 COMMAND: \"klines.kss\"", +-" FD SOCKET SOCK FAMILY:TYPE SOURCE:PORT DESTINATION:PORT", +-" 5 c08ea3cc c50d3c80 INET:STREAM 0.0.0.0:1026 0.0.0.0:0", ++" FD SOCKET SOCK FAMILY:TYPE SOURCE-PORT DESTINATION-PORT", ++" 5 c08ea3cc c50d3c80 INET:STREAM 0.0.0.0-1026 0.0.0.0-0", + " ", + " PID: 2200 TASK: c670a000 CPU: 1 COMMAND: \"kpanel\"", +-" FD SOCKET SOCK FAMILY:TYPE SOURCE:PORT DESTINATION:PORT", +-" 5 c08ea3cc c50d3c80 INET:STREAM 0.0.0.0:1026 0.0.0.0:0", ++" FD SOCKET SOCK FAMILY:TYPE SOURCE-PORT DESTINATION-PORT", ++" 5 c08ea3cc c50d3c80 INET:STREAM 0.0.0.0-1026 0.0.0.0-0", + " ", + " PID: 2201 TASK: c648a000 CPU: 1 COMMAND: \"kbgndwm\"", +-" FD SOCKET SOCK FAMILY:TYPE SOURCE:PORT DESTINATION:PORT", +-" 5 c08ea3cc c50d3c80 INET:STREAM 0.0.0.0:1026 0.0.0.0:0", ++" FD SOCKET SOCK FAMILY:TYPE SOURCE-PORT DESTINATION-PORT", ++" 5 c08ea3cc c50d3c80 INET:STREAM 0.0.0.0-1026 0.0.0.0-0", + " ", + " PID: 19294 TASK: c250a000 CPU: 0 COMMAND: \"prefdm\"", +-" FD SOCKET SOCK FAMILY:TYPE SOURCE:PORT DESTINATION:PORT", +-" 5 c08ea3cc c50d3c80 INET:STREAM 0.0.0.0:1026 0.0.0.0:0", ++" FD SOCKET SOCK FAMILY:TYPE SOURCE-PORT DESTINATION-PORT", ++" 5 c08ea3cc c50d3c80 INET:STREAM 0.0.0.0-1026 0.0.0.0-0", + " ", + " PID: 2194 TASK: c62dc000 CPU: 1 COMMAND: \"kaudioserver\"", +-" FD SOCKET SOCK FAMILY:TYPE SOURCE:PORT DESTINATION:PORT", +-" 5 c08ea3cc c50d3c80 INET:STREAM 0.0.0.0:1026 0.0.0.0:0", ++" FD SOCKET SOCK FAMILY:TYPE SOURCE-PORT DESTINATION-PORT", ++" 5 c08ea3cc c50d3c80 INET:STREAM 0.0.0.0-1026 0.0.0.0-0", + " ", + " PID: 2195 TASK: c6684000 CPU: 1 COMMAND: \"maudio\"", +-" FD SOCKET SOCK FAMILY:TYPE SOURCE:PORT DESTINATION:PORT", +-" 5 c08ea3cc c50d3c80 INET:STREAM 0.0.0.0:1026 0.0.0.0:0", ++" FD SOCKET SOCK FAMILY:TYPE SOURCE-PORT DESTINATION-PORT", ++" 5 c08ea3cc c50d3c80 INET:STREAM 0.0.0.0-1026 0.0.0.0-0", + " ", + " PID: 2196 TASK: c6b58000 CPU: 1 COMMAND: \"kwmsound\"", +-" FD SOCKET SOCK FAMILY:TYPE SOURCE:PORT DESTINATION:PORT", +-" 5 c08ea3cc c50d3c80 INET:STREAM 0.0.0.0:1026 0.0.0.0:0", ++" FD SOCKET SOCK FAMILY:TYPE SOURCE-PORT DESTINATION-PORT", ++" 5 c08ea3cc c50d3c80 INET:STREAM 0.0.0.0-1026 0.0.0.0-0", + " ", + " PID: 2197 TASK: c6696000 CPU: 0 COMMAND: \"kfm\"", +-" FD SOCKET SOCK FAMILY:TYPE SOURCE:PORT DESTINATION:PORT", +-" 5 c08ea3cc c50d3c80 INET:STREAM 0.0.0.0:1026 0.0.0.0:0", ++" FD SOCKET SOCK FAMILY:TYPE SOURCE-PORT DESTINATION-PORT", ++" 5 c08ea3cc c50d3c80 INET:STREAM 0.0.0.0-1026 0.0.0.0-0", + " ", + " PID: 2199 TASK: c65ec000 CPU: 0 COMMAND: \"krootwm\"", +-" FD SOCKET SOCK FAMILY:TYPE SOURCE:PORT DESTINATION:PORT", +-" 5 c08ea3cc c50d3c80 INET:STREAM 0.0.0.0:1026 0.0.0.0:0", ++" FD SOCKET SOCK FAMILY:TYPE SOURCE-PORT DESTINATION-PORT", ++" 5 c08ea3cc c50d3c80 INET:STREAM 0.0.0.0-1026 0.0.0.0-0", + " ", + " PID: 694 TASK: c1942000 CPU: 0 COMMAND: \"prefdm\"", +-" FD SOCKET SOCK FAMILY:TYPE SOURCE:PORT DESTINATION:PORT", +-" 5 c08ea3cc c50d3c80 INET:STREAM 0.0.0.0:1026 0.0.0.0:0", ++" FD SOCKET SOCK FAMILY:TYPE SOURCE-PORT DESTINATION-PORT", ++" 5 c08ea3cc c50d3c80 INET:STREAM 0.0.0.0-1026 0.0.0.0-0", + " ", + " PID: 698 TASK: c6a2c000 CPU: 1 COMMAND: \"X\"", +-" FD SOCKET SOCK FAMILY:TYPE SOURCE:PORT DESTINATION:PORT", +-" 5 c08ea3cc c50d3c80 INET:STREAM 0.0.0.0:1026 0.0.0.0:0", ++" FD SOCKET SOCK FAMILY:TYPE SOURCE-PORT DESTINATION-PORT", ++" 5 c08ea3cc c50d3c80 INET:STREAM 0.0.0.0-1026 0.0.0.0-0", + " ", + " PID: 2159 TASK: c4a5a000 CPU: 1 COMMAND: \"kwm\"", +-" FD SOCKET SOCK FAMILY:TYPE SOURCE:PORT DESTINATION:PORT", +-" 5 c08ea3cc c50d3c80 INET:STREAM 0.0.0.0:1026 0.0.0.0:0", ++" FD SOCKET SOCK FAMILY:TYPE SOURCE-PORT DESTINATION-PORT", ++" 5 c08ea3cc c50d3c80 INET:STREAM 0.0.0.0-1026 0.0.0.0-0", + " ", + NULL + }; +@@ -4584,21 +4922,22 @@ + void + cmd_usage(char *cmd, int helpflag) + { +- int i; +- int found; +- char **p; ++ char **p, *scroll_command; + struct command_table_entry *cp; + char buf[BUFSIZE]; +- struct alias_data *ad; +- FILE *less; ++ FILE *scroll; ++ int i; + +- if (helpflag & PIPE_TO_LESS) { +- if ((less = popen("/usr/bin/less", "w")) != NULL) +- fp = less; +- helpflag &= ~PIPE_TO_LESS; +- } else +- less = NULL; +- ++ if (helpflag & PIPE_TO_SCROLL) { ++ if ((scroll_command = setup_scroll_command()) && ++ (scroll = popen(scroll_command, "w"))) ++ fp = scroll; ++ else ++ scroll = NULL; ++ } else { ++ scroll_command = NULL; ++ scroll = NULL; ++ } + + if (STREQ(cmd, "copying")) { + display_copying_info(); +@@ -4641,46 +4980,50 @@ + goto done_usage; + } + +- found = FALSE; +-retry: +- if ((cp = get_command_table_entry(cmd))) { +- if ((p = cp->help_data)) +- found = TRUE; +- } ++ /* look up command, possibly through an alias */ ++ for (;;) { ++ struct alias_data *ad; ++ ++ cp = get_command_table_entry(cmd); ++ if (cp != NULL) ++ break; /* found command */ ++ ++ /* try for an alias */ ++ ad = is_alias(cmd); ++ if (ad == NULL) ++ break; /* neither command nor alias */ + +- /* +- * Check for alias names or gdb commands. +- */ +- if (!found) { +- if ((ad = is_alias(cmd))) { +- cmd = ad->args[0]; +- goto retry; +- } ++ cmd = ad->args[0]; ++ cp = get_command_table_entry(cmd); ++ } + +- if (helpflag == SYNOPSIS) { +- fprintf(fp, +- "No usage data for the \"%s\" command is available.\n", ++ if (cp == NULL || (p = cp->help_data) == NULL) { ++ if (helpflag & SYNOPSIS) { ++ fprintf(fp, ++ "No usage data for the \"%s\" command" ++ " is available.\n", + cmd); + RESTART(); + } + +- if (STREQ(pc->curcmd, "help")) { +- if (cp) +- fprintf(fp, +- "No help data for the \"%s\" command is available.\n", ++ if (helpflag & MUST_HELP) { ++ if (cp || !(pc->flags & GDB_INIT)) ++ fprintf(fp, ++ "No help data for the \"%s\" command" ++ " is available.\n", + cmd); + else if (!gdb_pass_through(concat_args(buf, 0, FALSE), + NULL, GNU_RETURN_ON_ERROR)) + fprintf(fp, +- "No help data for \"%s\" is available.\n", +- cmd); ++ "No help data for \"%s\" is available.\n", ++ cmd); + } + goto done_usage; + } + + p++; + +- if (helpflag == SYNOPSIS) { ++ if (helpflag & SYNOPSIS) { + p++; + fprintf(fp, "Usage: %s ", cmd); + fprintf(fp, *p, pc->program_name, pc->program_name); +@@ -4711,10 +5054,12 @@ + + done_usage: + +- if (less) { +- fflush(less); +- pclose(less); ++ if (scroll) { ++ fflush(scroll); ++ pclose(scroll); + } ++ if (scroll_command) ++ FREEBUF(scroll_command); + } + + +@@ -4812,7 +5157,9 @@ + "The default output radix for gdb output and certain %s commands is", + "hexadecimal. This can be changed to decimal by entering \"set radix 10\"", + "or the alias \"dec\". It can be reverted back to hexadecimal by entering", +-"\"set radix 16\" or the alias \"hex\".", ++"\"set radix 16\" or the alias \"hex\".\n", ++"To execute an external shell command, precede the command with an \"!\".", ++"To escape to a shell, enter \"!\" alone.", + " ", + NULL + }; +@@ -4854,10 +5201,13 @@ + static + char *version_info[] = { + +-"Copyright (C) 2002, 2003, 2004, 2005 Red Hat, Inc.", +-"Copyright (C) 2004, 2005 IBM Corporation", +-"Copyright (C) 1999-2005 Hewlett-Packard Co", +-"Copyright (C) 1999, 2002 Silicon Graphics, Inc.", ++"Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2008 Red Hat, Inc.", ++"Copyright (C) 2004, 2005, 2006 IBM Corporation", ++"Copyright (C) 1999-2006 Hewlett-Packard Co", ++"Copyright (C) 2005, 2006 Fujitsu Limited", ++"Copyright (C) 2006, 2007 VA Linux Systems Japan K.K.", ++"Copyright (C) 2005 NEC Corporation", ++"Copyright (C) 1999, 2002, 2007 Silicon Graphics, Inc.", + "Copyright (C) 1999, 2000, 2001, 2002 Mission Critical Linux, Inc.", + "This program is free software, covered by the GNU General Public License,", + "and you are welcome to change it and/or distribute copies of it under", +--- crash/gdb-6.1.patch.orig 2008-01-17 15:17:21.000000000 -0500 ++++ crash/gdb-6.1.patch 2008-01-04 09:42:08.000000000 -0500 +@@ -0,0 +1,87 @@ ++--- gdb-6.1.orig/bfd/coff-alpha.c +++++ gdb-6.1/bfd/coff-alpha.c ++@@ -1455,7 +1455,7 @@ alpha_relocate_section (output_bfd, info ++ amt = sizeof (struct ecoff_section_tdata); ++ lita_sec_data = ((struct ecoff_section_tdata *) ++ bfd_zalloc (input_bfd, amt)); ++- ecoff_section_data (input_bfd, lita_sec) = lita_sec_data; +++ lita_sec->used_by_bfd = lita_sec_data; ++ } ++ ++ if (lita_sec_data->gp != 0) ++--- gdb-6.1.orig/sim/ppc/debug.c +++++ gdb-6.1/sim/ppc/debug.c ++@@ -28,6 +28,7 @@ ++ #ifdef HAVE_STDLIB_H ++ #include ++ #endif +++#include ++ ++ int ppc_trace[nr_trace_options]; ++ ++--- gdb-6.1.orig/gdb/remote.c +++++ gdb-6.1/gdb/remote.c ++@@ -3445,7 +3445,7 @@ remote_store_registers (int regnum) ++ { ++ int i; ++ regs = alloca (rs->sizeof_g_packet); ++- memset (regs, rs->sizeof_g_packet, 0); +++ memset (regs, 0, rs->sizeof_g_packet); ++ for (i = 0; i < NUM_REGS + NUM_PSEUDO_REGS; i++) ++ { ++ struct packet_reg *r = &rs->regs[i]; ++--- gdb-6.1.orig/gdb/std-regs.c +++++ gdb-6.1/gdb/std-regs.c ++@@ -61,7 +61,7 @@ value_of_builtin_frame_reg (struct frame ++ val = allocate_value (builtin_type_frame_reg); ++ VALUE_LVAL (val) = not_lval; ++ buf = VALUE_CONTENTS_RAW (val); ++- memset (buf, TYPE_LENGTH (VALUE_TYPE (val)), 0); +++ memset (buf, 0, TYPE_LENGTH (VALUE_TYPE (val))); ++ /* frame.base. */ ++ if (frame != NULL) ++ ADDRESS_TO_POINTER (builtin_type_void_data_ptr, buf, ++@@ -87,7 +87,7 @@ value_of_builtin_frame_fp_reg (struct fr ++ struct value *val = allocate_value (builtin_type_void_data_ptr); ++ char *buf = VALUE_CONTENTS_RAW (val); ++ if (frame == NULL) ++- memset (buf, TYPE_LENGTH (VALUE_TYPE (val)), 0); +++ memset (buf, 0, TYPE_LENGTH (VALUE_TYPE (val))); ++ else ++ ADDRESS_TO_POINTER (builtin_type_void_data_ptr, buf, ++ get_frame_base_address (frame)); ++@@ -105,7 +105,7 @@ value_of_builtin_frame_pc_reg (struct fr ++ struct value *val = allocate_value (builtin_type_void_data_ptr); ++ char *buf = VALUE_CONTENTS_RAW (val); ++ if (frame == NULL) ++- memset (buf, TYPE_LENGTH (VALUE_TYPE (val)), 0); +++ memset (buf, 0, TYPE_LENGTH (VALUE_TYPE (val))); ++ else ++ ADDRESS_TO_POINTER (builtin_type_void_data_ptr, buf, ++ get_frame_pc (frame)); ++--- gdb-6.1.orig/gdb/dwarf2-frame.c +++++ gdb-6.1/gdb/dwarf2-frame.c ++@@ -1353,7 +1353,9 @@ decode_frame_entry_1 (struct comp_unit * ++ else if (*augmentation == 'P') ++ { ++ /* Skip. */ ++- buf += size_of_encoded_value (*buf++); +++// buf += size_of_encoded_value (*buf++); +++ buf += size_of_encoded_value(*buf); +++ buf++; ++ augmentation++; ++ } ++ ++--- gdb-6.1/opcodes/i386-dis.c.orig +++++ gdb-6.1/opcodes/i386-dis.c ++@@ -2092,6 +2092,10 @@ print_insn (bfd_vma pc, disassemble_info ++ dp = &dis386_twobyte[*++codep]; ++ need_modrm = twobyte_has_modrm[*codep]; ++ uses_SSE_prefix = twobyte_uses_SSE_prefix[*codep]; +++ if (dp->name && strcmp(dp->name, "ud2a") == 0) { +++ extern int kernel_BUG_encoding_bytes(void); +++ codep += kernel_BUG_encoding_bytes(); +++ } ++ } ++ else ++ { +--- crash/diskdump.c.orig 2008-01-17 15:17:20.000000000 -0500 ++++ crash/diskdump.c 2008-01-04 09:42:08.000000000 -0500 +@@ -1,16 +1,16 @@ + /* + * diskdump.c + * +- * NOTE: The Red Hat diskdump module currently creates +- * vmcore dumpfiles that are identical to those made +- * by the Red Hat netdump module, and therefore the +- * dumpfile is recognized as such. But just in case +- * there's ever a divergence, this file is being kept +- * in place, along with the DISKDUMP-related #define's +- * and their usage throughout the crash sources. ++ * The diskdump module optionally creates either ELF vmcore ++ * dumpfiles, or compressed dumpfiles derived from the LKCD format. ++ * In the case of ELF vmcore files, since they are identical to ++ * netdump dumpfiles, the facilities in netdump.c are used. For ++ * compressed dumpfiles, the facilities in this file are used. + * +- * Copyright (C) 2004, 2005 David Anderson +- * Copyright (C) 2004, 2005 Red Hat, Inc. All rights reserved. ++ * Copyright (C) 2004, 2005, 2006, 2007, 2008 David Anderson ++ * Copyright (C) 2004, 2005, 2006, 2007, 2008 Red Hat, Inc. All rights reserved. ++ * Copyright (C) 2005 FUJITSU LIMITED ++ * Copyright (C) 2005 NEC Corporation + * + * This software may be freely redistributed under the terms of the + * GNU General Public License. +@@ -18,22 +18,292 @@ + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. +- * +- * Author: David Anderson + */ + + #include "defs.h" + #include "diskdump.h" + ++#define BITMAP_SECT_LEN 4096 ++ + struct diskdump_data { + ulong flags; /* DISKDUMP_LOCAL, plus anything else... */ + int dfd; /* dumpfile file descriptor */ + FILE *ofp; /* fprintf(dd->ofp, "xxx"); */ + int machine_type; /* machine type identifier */ ++ ++ /* header */ ++ struct disk_dump_header *header; ++ struct disk_dump_sub_header *sub_header; ++ struct kdump_sub_header *sub_header_kdump; ++ ++ size_t data_offset; ++ int block_size; ++ int block_shift; ++ char *bitmap; ++ int bitmap_len; ++ char *dumpable_bitmap; ++ int byte, bit; ++ char *compressed_page; /* copy of compressed page data */ ++ char *curbufptr; /* ptr to uncompressed page buffer */ ++ ++ /* page cache */ ++ struct page_cache_hdr { /* header for each cached page */ ++ uint32_t pg_flags; ++ uint64_t pg_addr; ++ char *pg_bufptr; ++ ulong pg_hit_count; ++ } page_cache_hdr[DISKDUMP_CACHED_PAGES]; ++ char *page_cache_buf; /* base of cached buffer pages */ ++ int evict_index; /* next page to evict */ ++ ulong evictions; /* total evictions done */ ++ ulong cached_reads; ++ ulong *valid_pages; ++ ulong accesses; + }; + + static struct diskdump_data diskdump_data = { 0 }; + static struct diskdump_data *dd = &diskdump_data; ++static int get_dump_level(void); ++ ++ulong *diskdump_flags = &diskdump_data.flags; ++ ++static inline int get_bit(char *map, int byte, int bit) ++{ ++ return map[byte] & (1<bitmap, nr >> 3, nr & 7); ++} ++ ++static inline int page_is_dumpable(unsigned int nr) ++{ ++ return dd->dumpable_bitmap[nr>>3] & (1 << (nr & 7)); ++} ++ ++static inline int dump_is_partial(const struct disk_dump_header *header) ++{ ++ return header->bitmap_blocks >= ++ divideup(divideup(header->max_mapnr, 8), dd->block_size) * 2; ++} ++ ++static int open_dump_file(char *file) ++{ ++ int fd; ++ ++ fd = open(file, O_RDONLY); ++ if (fd < 0) { ++ error(INFO, "diskdump / compressed kdump: unable to open dump file %s", file); ++ return FALSE; ++ } ++ dd->dfd = fd; ++ return TRUE; ++} ++ ++static int read_dump_header(char *file) ++{ ++ struct disk_dump_header *header = NULL; ++ struct disk_dump_sub_header *sub_header = NULL; ++ struct kdump_sub_header *sub_header_kdump = NULL; ++ int bitmap_len; ++ const int block_size = (int)sysconf(_SC_PAGESIZE); ++ off_t offset; ++ const off_t failed = (off_t)-1; ++ ulong pfn; ++ int i, j, max_sect_len; ++ ++ if (block_size < 0) ++ return FALSE; ++ ++ if ((header = malloc(block_size)) == NULL) ++ error(FATAL, "diskdump / compressed kdump: cannot malloc block_size buffer\n"); ++ ++ if (lseek(dd->dfd, 0, SEEK_SET) == failed) { ++ if (CRASHDEBUG(1)) ++ error(INFO, "diskdump / compressed kdump: cannot lseek dump header\n"); ++ goto err; ++ } ++ ++ if (read(dd->dfd, header, block_size) < block_size) { ++ if (CRASHDEBUG(1)) ++ error(INFO, "diskdump / compressed kdump: cannot read dump header\n"); ++ goto err; ++ } ++ ++ /* validate dump header */ ++ if (!memcmp(header->signature, DISK_DUMP_SIGNATURE, ++ sizeof(header->signature))) { ++ dd->flags |= DISKDUMP_LOCAL; ++ } else if (!memcmp(header->signature, KDUMP_SIGNATURE, ++ sizeof(header->signature))) { ++ dd->flags |= KDUMP_CMPRS_LOCAL; ++ if (header->header_version >= 1) ++ dd->flags |= ERROR_EXCLUDED; ++ } else { ++ if (CRASHDEBUG(1)) ++ error(INFO, ++ "diskdump / compressed kdump: dump does not have panic dump header\n"); ++ goto err; ++ } ++ ++ if (CRASHDEBUG(1)) ++ fprintf(fp, "%s: header->utsname.machine: %s\n", ++ DISKDUMP_VALID() ? "diskdump" : "compressed kdump", ++ header->utsname.machine); ++ ++ if (STRNEQ(header->utsname.machine, "i686") && ++ machine_type_mismatch(file, "X86", NULL, 0)) ++ goto err; ++ else if (STRNEQ(header->utsname.machine, "x86_64") && ++ machine_type_mismatch(file, "X86_64", NULL, 0)) ++ goto err; ++ else if (STRNEQ(header->utsname.machine, "ia64") && ++ machine_type_mismatch(file, "IA64", NULL, 0)) ++ goto err; ++ else if (STRNEQ(header->utsname.machine, "ppc64") && ++ machine_type_mismatch(file, "PPC64", NULL, 0)) ++ goto err; ++ ++ if (header->block_size != block_size) { ++ error(INFO, "%s: block size in the dump header does not match" ++ " with system page size\n", ++ DISKDUMP_VALID() ? "diskdump" : "compressed kdump"); ++ goto err; ++ } ++ dd->block_size = block_size; ++ dd->block_shift = ffs(block_size) - 1; ++ ++ if (sizeof(*header) + sizeof(void *) * header->nr_cpus > block_size || ++ header->nr_cpus <= 0) { ++ error(INFO, "%s: invalid nr_cpus value: %d\n", ++ DISKDUMP_VALID() ? "diskdump" : "compressed kdump", ++ header->nr_cpus); ++ goto err; ++ } ++ ++ /* read sub header */ ++ offset = (off_t)block_size; ++ if (lseek(dd->dfd, offset, SEEK_SET) == failed) { ++ error(INFO, "%s: cannot lseek dump sub header\n", ++ DISKDUMP_VALID() ? "diskdump" : "compressed kdump"); ++ ++ goto err; ++ } ++ ++ if (DISKDUMP_VALID()) { ++ if ((sub_header = malloc(block_size)) == NULL) ++ error(FATAL, "diskdump: cannot malloc sub_header buffer\n"); ++ ++ if (read(dd->dfd, sub_header, block_size) ++ < block_size) { ++ error(INFO, "diskdump: cannot read dump sub header\n"); ++ goto err; ++ } ++ dd->sub_header = sub_header; ++ } else if (KDUMP_CMPRS_VALID()) { ++ if ((sub_header_kdump = malloc(block_size)) == NULL) ++ error(FATAL, "compressed kdump: cannot malloc sub_header_kdump buffer\n"); ++ ++ if (read(dd->dfd, sub_header_kdump, block_size) ++ < block_size) { ++ error(INFO, "compressed kdump: cannot read dump sub header\n"); ++ goto err; ++ } ++ dd->sub_header_kdump = sub_header_kdump; ++ } ++ ++ /* read memory bitmap */ ++ bitmap_len = block_size * header->bitmap_blocks; ++ dd->bitmap_len = bitmap_len; ++ ++ offset = (off_t)block_size * (1 + header->sub_hdr_size); ++ if (lseek(dd->dfd, offset, SEEK_SET) == failed) { ++ error(INFO, "%s: cannot lseek memory bitmap\n", ++ DISKDUMP_VALID() ? "diskdump" : "compressed kdump"); ++ ++ goto err; ++ } ++ ++ if ((dd->bitmap = malloc(bitmap_len)) == NULL) ++ error(FATAL, "%s: cannot malloc bitmap buffer\n", ++ DISKDUMP_VALID() ? "diskdump" : "compressed kdump"); ++ ++ dd->dumpable_bitmap = calloc(bitmap_len, 1); ++ if (read(dd->dfd, dd->bitmap, bitmap_len) < bitmap_len) { ++ error(INFO, "%s: cannot read memory bitmap\n", ++ DISKDUMP_VALID() ? "diskdump" : "compressed kdump"); ++ goto err; ++ } ++ ++ if (dump_is_partial(header)) ++ memcpy(dd->dumpable_bitmap, dd->bitmap + bitmap_len/2, ++ bitmap_len/2); ++ else ++ memcpy(dd->dumpable_bitmap, dd->bitmap, bitmap_len); ++ ++ dd->data_offset ++ = (1 + header->sub_hdr_size + header->bitmap_blocks) ++ * header->block_size; ++ ++ dd->header = header; ++ ++ if (machine_type("X86")) ++ dd->machine_type = EM_386; ++ else if (machine_type("X86_64")) ++ dd->machine_type = EM_X86_64; ++ else if (machine_type("IA64")) ++ dd->machine_type = EM_IA_64; ++ else if (machine_type("PPC64")) ++ dd->machine_type = EM_PPC64; ++ else { ++ error(INFO, "%s: unsupported machine type: %s\n", ++ DISKDUMP_VALID() ? "diskdump" : "compressed kdump", ++ MACHINE_TYPE); ++ goto err; ++ } ++ ++ max_sect_len = divideup(header->max_mapnr, BITMAP_SECT_LEN); ++ ++ dd->valid_pages = calloc(sizeof(ulong), max_sect_len + 1); ++ pfn = 0; ++ for (i = 1; i < max_sect_len + 1; i++) { ++ dd->valid_pages[i] = dd->valid_pages[i - 1]; ++ for (j = 0; j < BITMAP_SECT_LEN; j++, pfn++) ++ if (page_is_dumpable(pfn)) ++ dd->valid_pages[i]++; ++ } ++ ++ return TRUE; ++ ++err: ++ free(header); ++ if (sub_header) ++ free(sub_header); ++ if (sub_header_kdump) ++ free(sub_header_kdump); ++ if (dd->bitmap) ++ free(dd->bitmap); ++ if (dd->dumpable_bitmap) ++ free(dd->dumpable_bitmap); ++ dd->flags &= ~(DISKDUMP_LOCAL|KDUMP_CMPRS_LOCAL); ++ return FALSE; ++} ++ ++static int ++pfn_to_pos(ulong pfn) ++{ ++ int desc_pos, j, valid; ++ ++ valid = dd->valid_pages[pfn / BITMAP_SECT_LEN]; ++ ++ for (j = round(pfn, BITMAP_SECT_LEN), desc_pos = valid; j <= pfn; j++) ++ if (page_is_dumpable(j)) ++ desc_pos++; ++ ++ return desc_pos; ++} ++ + + /* + * Determine whether a file is a diskdump creation, and if TRUE, +@@ -43,7 +313,28 @@ + int + is_diskdump(char *file) + { +- return FALSE; ++ int sz, i; ++ ++ if (!open_dump_file(file) || !read_dump_header(file)) ++ return FALSE; ++ ++ sz = dd->block_size * (DISKDUMP_CACHED_PAGES); ++ if ((dd->page_cache_buf = malloc(sz)) == NULL) ++ error(FATAL, "%s: cannot malloc compressed page_cache_buf\n", ++ DISKDUMP_VALID() ? "diskdump" : "compressed kdump"); ++ ++ for (i = 0; i < DISKDUMP_CACHED_PAGES; i++) ++ dd->page_cache_hdr[i].pg_bufptr = ++ &dd->page_cache_buf[i * dd->block_size]; ++ ++ if ((dd->compressed_page = (char *)malloc(dd->block_size)) == NULL) ++ error(FATAL, "%s: cannot malloc compressed page space\n", ++ DISKDUMP_VALID() ? "diskdump" : "compressed kdump"); ++ ++ if (CRASHDEBUG(1)) ++ diskdump_memory_dump(fp); ++ ++ return TRUE; + } + + /* +@@ -53,11 +344,141 @@ + int + diskdump_init(char *unused, FILE *fptr) + { +- if (!DISKDUMP_VALID()) +- return FALSE; ++ if (!DISKDUMP_VALID() && !KDUMP_CMPRS_VALID()) ++ return FALSE; + +- dd->ofp = fptr; +- return TRUE; ++ dd->ofp = fptr; ++ return TRUE; ++} ++ ++/* ++ * Get the relocational offset from the sub header of kdump. ++ */ ++int ++diskdump_phys_base(unsigned long *phys_base) ++{ ++ if (KDUMP_CMPRS_VALID()) { ++ *phys_base = dd->sub_header_kdump->phys_base; ++ return TRUE; ++ } ++ ++ return FALSE; ++} ++ ++/* ++ * Check whether paddr is already cached. ++ */ ++static int ++page_is_cached(physaddr_t paddr) ++{ ++ int i; ++ struct page_cache_hdr *pgc; ++ ++ dd->accesses++; ++ ++ for (i = 0; i < DISKDUMP_CACHED_PAGES; i++) { ++ ++ pgc = &dd->page_cache_hdr[i]; ++ ++ if (!DISKDUMP_VALID_PAGE(pgc->pg_flags)) ++ continue; ++ ++ if (pgc->pg_addr == paddr) { ++ pgc->pg_hit_count++; ++ dd->curbufptr = pgc->pg_bufptr; ++ dd->cached_reads++; ++ return TRUE; ++ } ++ } ++ return FALSE; ++} ++ ++/* ++ * Cache the page's data. ++ * ++ * If an empty page cache location is available, take it. Otherwise, evict ++ * the entry indexed by evict_index, and then bump evict index. The hit_count ++ * is only gathered for dump_diskdump_environment(). ++ * ++ * If the page is compressed, uncompress it into the selected page cache entry. ++ * If the page is raw, just copy it into the selected page cache entry. ++ * If all works OK, update diskdump->curbufptr to point to the page's ++ * uncompressed data. ++ */ ++static int ++cache_page(physaddr_t paddr) ++{ ++ int i, ret; ++ int found; ++ ulong pfn; ++ int desc_pos; ++ off_t seek_offset; ++ page_desc_t pd; ++ const int block_size = dd->block_size; ++ const off_t failed = (off_t)-1; ++ ulong retlen; ++ ++ for (i = found = 0; i < DISKDUMP_CACHED_PAGES; i++) { ++ if (DISKDUMP_VALID_PAGE(dd->page_cache_hdr[i].pg_flags)) ++ continue; ++ found = TRUE; ++ break; ++ } ++ ++ if (!found) { ++ i = dd->evict_index; ++ dd->page_cache_hdr[i].pg_hit_count = 0; ++ dd->evict_index = ++ (dd->evict_index+1) % DISKDUMP_CACHED_PAGES; ++ dd->evictions++; ++ } ++ ++ dd->page_cache_hdr[i].pg_flags = 0; ++ dd->page_cache_hdr[i].pg_addr = paddr; ++ dd->page_cache_hdr[i].pg_hit_count++; ++ ++ /* find page descriptor */ ++ pfn = paddr >> dd->block_shift; ++ desc_pos = pfn_to_pos(pfn); ++ seek_offset = dd->data_offset ++ + (off_t)(desc_pos - 1)*sizeof(page_desc_t); ++ lseek(dd->dfd, seek_offset, SEEK_SET); ++ ++ /* read page descriptor */ ++ if (read(dd->dfd, &pd, sizeof(pd)) != sizeof(pd)) ++ return READ_ERROR; ++ ++ /* sanity check */ ++ if (pd.size > block_size) ++ return READ_ERROR; ++ ++ if (lseek(dd->dfd, pd.offset, SEEK_SET) == failed) ++ return SEEK_ERROR; ++ ++ /* read page data */ ++ if (read(dd->dfd, dd->compressed_page, pd.size) != pd.size) ++ return READ_ERROR; ++ ++ if (pd.flags & DUMP_DH_COMPRESSED) { ++ retlen = block_size; ++ ret = uncompress((unsigned char *)dd->page_cache_hdr[i].pg_bufptr, ++ &retlen, ++ (unsigned char *)dd->compressed_page, ++ pd.size); ++ if ((ret != Z_OK) || (retlen != block_size)) { ++ error(INFO, "%s: uncompress failed: %d\n", ++ DISKDUMP_VALID() ? "diskdump" : "compressed kdump", ++ ret); ++ return READ_ERROR; ++ } ++ } else ++ memcpy(dd->page_cache_hdr[i].pg_bufptr, ++ dd->compressed_page, block_size); ++ ++ dd->page_cache_hdr[i].pg_flags |= PAGE_VALID; ++ dd->curbufptr = dd->page_cache_hdr[i].pg_bufptr; ++ ++ return TRUE; + } + + /* +@@ -66,7 +487,31 @@ + int + read_diskdump(int fd, void *bufptr, int cnt, ulong addr, physaddr_t paddr) + { +- return 0; ++ int ret; ++ physaddr_t curpaddr; ++ ulong pfn, page_offset; ++ ++ pfn = paddr >> dd->block_shift; ++ curpaddr = paddr & ~((physaddr_t)(dd->block_size-1)); ++ page_offset = paddr & ((physaddr_t)(dd->block_size-1)); ++ ++ if ((pfn >= dd->header->max_mapnr) || !page_is_ram(pfn)) ++ return SEEK_ERROR; ++ if (!page_is_dumpable(pfn)) { ++ if ((dd->flags & (ZERO_EXCLUDED|ERROR_EXCLUDED)) == ++ ERROR_EXCLUDED) ++ return PAGE_EXCLUDED; ++ memset(bufptr, 0, cnt); ++ return cnt; ++ } ++ ++ if (!page_is_cached(curpaddr)) ++ if ((ret = cache_page(curpaddr)) < 0) ++ return ret; ++ ++ memcpy(bufptr, dd->curbufptr + page_offset, cnt); ++ ++ return cnt; + } + + /* +@@ -81,7 +526,23 @@ + ulong + get_diskdump_panic_task(void) + { +- return NO_TASK; ++ if ((!DISKDUMP_VALID() && !KDUMP_CMPRS_VALID()) ++ || !get_active_set()) ++ return NO_TASK; ++ ++ return (ulong)dd->header->tasks[dd->header->current_cpu]; ++} ++ ++extern void get_netdump_regs_x86(struct bt_info *, ulong *, ulong *); ++extern void get_netdump_regs_x86_64(struct bt_info *, ulong *, ulong *); ++ ++static void ++get_diskdump_regs_ppc64(struct bt_info *bt, ulong *eip, ulong *esp) ++{ ++ if ((bt->task == tt->panic_task) && DISKDUMP_VALID()) ++ bt->machdep = &dd->sub_header->elf_regs; ++ ++ machdep->get_stack_frame(bt, eip, esp); + } + + /* +@@ -91,12 +552,35 @@ + void + get_diskdump_regs(struct bt_info *bt, ulong *eip, ulong *esp) + { +- switch (dd->machine_type) +- { +- default: +- error(FATAL, +- "diskdump support for this machine type is not available\n"); +- } ++ switch (dd->machine_type) ++ { ++ case EM_386: ++ return get_netdump_regs_x86(bt, eip, esp); ++ break; ++ ++ case EM_IA_64: ++ /* For normal backtraces, this information will be obtained ++ * frome the switch_stack structure, which is pointed to by ++ * the thread.ksp field of the task_struct. But it's still ++ * needed by the "bt -t" option. ++ */ ++ machdep->get_stack_frame(bt, eip, esp); ++ break; ++ ++ case EM_PPC64: ++ return get_diskdump_regs_ppc64(bt, eip, esp); ++ break; ++ ++ case EM_X86_64: ++ return get_netdump_regs_x86_64(bt, eip, esp); ++ break; ++ ++ default: ++ error(FATAL, "%s: unsupported machine type: %s\n", ++ DISKDUMP_VALID() ? "diskdump" : "compressed kdump", ++ MACHINE_TYPE); ++ ++ } + } + + /* +@@ -105,7 +589,10 @@ + uint + diskdump_page_size(void) + { +- return 0; ++ if (!DISKDUMP_VALID() && !KDUMP_CMPRS_VALID()) ++ return 0; ++ ++ return dd->header->block_size; + } + + /* +@@ -131,6 +618,197 @@ + int + diskdump_memory_dump(FILE *fp) + { ++ int i, others, dump_level; ++ struct disk_dump_header *dh; ++ struct disk_dump_sub_header *dsh; ++ struct kdump_sub_header *kdsh; ++ ulong *tasks; ++ ++ fprintf(fp, "diskdump_data: \n"); ++ fprintf(fp, " flags: %lx (", dd->flags); ++ others = 0; ++ if (dd->flags & DISKDUMP_LOCAL) ++ fprintf(fp, "%sDISKDUMP_LOCAL", others++ ? "|" : ""); ++ if (dd->flags & KDUMP_CMPRS_LOCAL) ++ fprintf(fp, "%sKDUMP_CMPRS_LOCAL", others++ ? "|" : ""); ++ if (dd->flags & ERROR_EXCLUDED) ++ fprintf(fp, "%sERROR_EXCLUDED", others++ ? "|" : ""); ++ if (dd->flags & ZERO_EXCLUDED) ++ fprintf(fp, "%sZERO_EXCLUDED", others++ ? "|" : ""); ++ fprintf(fp, ")\n"); ++ fprintf(fp, " dfd: %d\n", dd->dfd); ++ fprintf(fp, " ofp: %lx\n", (ulong)dd->ofp); ++ fprintf(fp, " machine_type: %d ", dd->machine_type); ++ switch (dd->machine_type) ++ { ++ case EM_386: ++ fprintf(fp, "(EM_386)\n"); break; ++ case EM_X86_64: ++ fprintf(fp, "(EM_X86_64)\n"); break; ++ case EM_IA_64: ++ fprintf(fp, "(EM_IA_64)\n"); break; ++ case EM_PPC64: ++ fprintf(fp, "(EM_PPC64)\n"); break; ++ default: ++ fprintf(fp, "(unknown)\n"); break; ++ } ++ ++ fprintf(fp, "\n header: %lx\n", (ulong)dd->header); ++ dh = dd->header; ++ fprintf(fp, " signature: \""); ++ for (i = 0; i < SIG_LEN; i++) ++ if (dh->signature[i]) ++ fprintf(fp, "%c", dh->signature[i]); ++ fprintf(fp, "\"\n"); ++ fprintf(fp, " header_version: %d\n", dh->header_version); ++ fprintf(fp, " utsname:\n"); ++ fprintf(fp, " sysname: %s\n", dh->utsname.sysname); ++ fprintf(fp, " nodename: %s\n", dh->utsname.nodename); ++ fprintf(fp, " release: %s\n", dh->utsname.release); ++ fprintf(fp, " version: %s\n", dh->utsname.version); ++ fprintf(fp, " machine: %s\n", dh->utsname.machine); ++ fprintf(fp, " domainname: %s\n", dh->utsname.domainname); ++ fprintf(fp, " timestamp:\n"); ++ fprintf(fp, " tv_sec: %lx\n", dh->timestamp.tv_sec); ++ fprintf(fp, " tv_usec: %lx\n", dh->timestamp.tv_usec); ++ fprintf(fp, " status: %x (", dh->status); ++ others = 0; ++ if (dh->status & DUMP_HEADER_COMPLETED) ++ fprintf(fp, "%sDUMP_HEADER_COMPLETED", others++ ? "|" : ""); ++ if (dh->status & DUMP_HEADER_INCOMPLETED) ++ fprintf(fp, "%sDUMP_HEADER_INCOMPLETED", others++ ? "|" : ""); ++ if (dh->status & DUMP_HEADER_COMPRESSED) ++ fprintf(fp, "%sDUMP_HEADER_COMPRESSED", others++ ? "|" : ""); ++ fprintf(fp, ")\n"); ++ fprintf(fp, " block_size: %d\n", dh->block_size); ++ fprintf(fp, " sub_hdr_size: %d\n", dh->sub_hdr_size); ++ fprintf(fp, " bitmap_blocks: %u\n", dh->bitmap_blocks); ++ fprintf(fp, " max_mapnr: %u\n", dh->max_mapnr); ++ fprintf(fp, " total_ram_blocks: %u\n", dh->total_ram_blocks); ++ fprintf(fp, " device_blocks: %u\n", dh->device_blocks); ++ fprintf(fp, " written_blocks: %u\n", dh->written_blocks); ++ fprintf(fp, " current_cpu: %u\n", dh->current_cpu); ++ fprintf(fp, " nr_cpus: %d\n", dh->nr_cpus); ++ tasks = (ulong *)&dh->tasks[0]; ++ fprintf(fp, " tasks[nr_cpus]: %lx\n", *tasks); ++ for (tasks++, i = 1; i < dh->nr_cpus; i++) { ++ fprintf(fp, " %lx\n", *tasks); ++ tasks++; ++ } ++ fprintf(fp, "\n"); ++ fprintf(fp, " sub_header: %lx ", (ulong)dd->sub_header); ++ if ((dsh = dd->sub_header)) { ++ fprintf(fp, "\n elf_regs: %lx\n", ++ (ulong)&dsh->elf_regs); ++ fprintf(fp, " dump_level: "); ++ if ((pc->flags & RUNTIME) && ++ ((dump_level = get_dump_level()) >= 0)) { ++ fprintf(fp, "%d (0x%x) %s", dump_level, dump_level, ++ dump_level ? "(" : ""); ++ ++#define DUMP_EXCLUDE_CACHE 0x00000001 /* Exclude LRU & SwapCache pages*/ ++#define DUMP_EXCLUDE_CLEAN 0x00000002 /* Exclude all-zero pages */ ++#define DUMP_EXCLUDE_FREE 0x00000004 /* Exclude free pages */ ++#define DUMP_EXCLUDE_ANON 0x00000008 /* Exclude Anon pages */ ++#define DUMP_SAVE_PRIVATE 0x00000010 /* Save private pages */ ++ ++ others = 0; ++ if (dump_level & DUMP_EXCLUDE_CACHE) ++ fprintf(fp, "%sDUMP_EXCLUDE_CACHE", ++ others++ ? "|" : ""); ++ if (dump_level & DUMP_EXCLUDE_CLEAN) ++ fprintf(fp, "%sDUMP_EXCLUDE_CLEAN", ++ others++ ? "|" : ""); ++ if (dump_level & DUMP_EXCLUDE_FREE) ++ fprintf(fp, "%sDUMP_EXCLUDE_FREE", ++ others++ ? "|" : ""); ++ if (dump_level & DUMP_EXCLUDE_ANON) ++ fprintf(fp, "%sDUMP_EXCLUDE_ANON", ++ others++ ? "|" : ""); ++ if (dump_level & DUMP_SAVE_PRIVATE) ++ fprintf(fp, "%sDUMP_SAVE_PRIVATE", ++ others++ ? "|" : ""); ++ fprintf(fp, "%s\n\n", dump_level ? ")" : ""); ++ } else ++ fprintf(fp, "%s\n\n", pc->flags & RUNTIME ? ++ "(unknown)" : "(undetermined)"); ++ ++ } else ++ fprintf(fp, "(n/a)\n\n"); ++ ++ fprintf(fp, " sub_header_kdump: %lx ", (ulong)dd->sub_header_kdump); ++ if ((kdsh = dd->sub_header_kdump)) { ++ fprintf(fp, "\n phys_base: %lx\n", ++ (ulong)kdsh->phys_base); ++ fprintf(fp, " dump_level: "); ++ if ((dump_level = get_dump_level()) >= 0) { ++ fprintf(fp, "%d (0x%x) %s", dump_level, dump_level, ++ dump_level ? "(" : ""); ++ ++#define DL_EXCLUDE_ZERO (0x001) /* Exclude Pages filled with Zeros */ ++#define DL_EXCLUDE_CACHE (0x002) /* Exclude Cache Pages without Private Pages */ ++#define DL_EXCLUDE_CACHE_PRI (0x004) /* Exclude Cache Pages with Private Pages */ ++#define DL_EXCLUDE_USER_DATA (0x008) /* Exclude UserProcessData Pages */ ++#define DL_EXCLUDE_FREE (0x010) /* Exclude Free Pages */ ++ ++ if (dump_level & DL_EXCLUDE_ZERO) ++ fprintf(fp, "%sDUMP_EXCLUDE_ZERO", ++ others++ ? "|" : ""); ++ if (dump_level & DL_EXCLUDE_CACHE) ++ fprintf(fp, "%sDUMP_EXCLUDE_CACHE", ++ others++ ? "|" : ""); ++ if (dump_level & DL_EXCLUDE_CACHE_PRI) ++ fprintf(fp, "%sDUMP_EXCLUDE_CACHE_PRI", ++ others++ ? "|" : ""); ++ if (dump_level & DL_EXCLUDE_USER_DATA) ++ fprintf(fp, "%sDUMP_EXCLUDE_USER_DATA", ++ others++ ? "|" : ""); ++ if (dump_level & DL_EXCLUDE_FREE) ++ fprintf(fp, "%sDUMP_EXCLUDE_FREE", ++ others++ ? "|" : ""); ++ others = 0; ++ ++ fprintf(fp, "%s\n\n", dump_level ? ")" : ""); ++ } else ++ fprintf(fp, "(unknown)\n\n"); ++ } else ++ fprintf(fp, "(n/a)\n\n"); ++ ++ fprintf(fp, " data_offset: %lx\n", (ulong)dd->data_offset); ++ fprintf(fp, " block_size: %d\n", dd->block_size); ++ fprintf(fp, " block_shift: %d\n", dd->block_shift); ++ fprintf(fp, " bitmap: %lx\n", (ulong)dd->bitmap); ++ fprintf(fp, " bitmap_len: %d\n", dd->bitmap_len); ++ fprintf(fp, " dumpable_bitmap: %lx\n", (ulong)dd->dumpable_bitmap); ++ fprintf(fp, " byte: %d\n", dd->byte); ++ fprintf(fp, " bit: %d\n", dd->bit); ++ fprintf(fp, " compressed_page: %lx\n", (ulong)dd->compressed_page); ++ fprintf(fp, " curbufptr: %lx\n\n", (ulong)dd->curbufptr); ++ ++ for (i = 0; i < DISKDUMP_CACHED_PAGES; i++) { ++ fprintf(fp, "%spage_cache_hdr[%d]:\n", i < 10 ? " " : "", i); ++ fprintf(fp, " pg_flags: %x (", dd->page_cache_hdr[i].pg_flags); ++ others = 0; ++ if (dd->page_cache_hdr[i].pg_flags & PAGE_VALID) ++ fprintf(fp, "%sPAGE_VALID", others++ ? "|" : ""); ++ fprintf(fp, ")\n"); ++ fprintf(fp, " pg_addr: %llx\n", (ulonglong)dd->page_cache_hdr[i].pg_addr); ++ fprintf(fp, " pg_bufptr: %lx\n", (ulong)dd->page_cache_hdr[i].pg_bufptr); ++ fprintf(fp, " pg_hit_count: %ld\n", dd->page_cache_hdr[i].pg_hit_count); ++ } ++ ++ fprintf(fp, "\n page_cache_buf: %lx\n", (ulong)dd->page_cache_buf); ++ fprintf(fp, " evict_index: %d\n", dd->evict_index); ++ fprintf(fp, " evictions: %ld\n", dd->evictions); ++ fprintf(fp, " accesses: %ld\n", dd->accesses); ++ fprintf(fp, " cached_reads: %ld ", dd->cached_reads); ++ if (dd->accesses) ++ fprintf(fp, "(%ld%%)\n", ++ dd->cached_reads * 100 / dd->accesses); ++ else ++ fprintf(fp, "\n"); ++ fprintf(fp, " valid_pages: %lx\n", (ulong)dd->valid_pages); ++ + return 0; + } + +@@ -142,3 +820,36 @@ + { + return 0; + } ++ ++/* ++ * Versions of disk_dump that support it contain the "dump_level" symbol. ++ * Version 1 and later compressed kdump dumpfiles contain the dump level ++ * in an additional field of the sub_header_kdump structure. ++ */ ++static int ++get_dump_level(void) ++{ ++ int dump_level; ++ ++ if (DISKDUMP_VALID()) { ++ if (symbol_exists("dump_level") && ++ readmem(symbol_value("dump_level"), KVADDR, &dump_level, ++ sizeof(dump_level), "dump_level", QUIET|RETURN_ON_ERROR)) ++ return dump_level; ++ } else if (KDUMP_CMPRS_VALID()) { ++ if (dd->header->header_version >= 1) ++ return dd->sub_header_kdump->dump_level; ++ } ++ ++ return -1; ++} ++ ++/* ++ * Used by the "sys" command to display [PARTIAL DUMP] ++ * after the dumpfile name. ++ */ ++int ++is_partial_diskdump(void) ++{ ++ return (get_dump_level() > 0 ? TRUE : FALSE); ++} +--- crash/unwind_x86_32_64.c.orig 2008-01-17 15:17:20.000000000 -0500 ++++ crash/unwind_x86_32_64.c 2008-01-04 09:42:08.000000000 -0500 +@@ -0,0 +1,1220 @@ ++#if defined(X86_64) ++/* ++ * Support for genarating DWARF CFI based backtraces. ++ * Borrowed heavily from the kernel's implementation of unwinding using the ++ * DWARF CFI written by Jan Beulich ++ */ ++ ++#ifdef X86_64 ++#include "unwind_x86_64.h" ++#endif ++#ifdef X86 ++#include "unwind_x86.h" ++#endif ++ ++#include "defs.h" ++ ++#define MAX_STACK_DEPTH 8 ++ ++static struct local_unwind_table { ++ struct { ++ unsigned long pc; ++ unsigned long range; ++ } core, init; ++ void *address; ++ unsigned long size; ++} *local_unwind_tables, default_unwind_table; ++ ++static int gather_in_memory_unwind_tables(void); ++static int populate_local_tables(ulong, char *); ++static int unwind_tables_cnt = 0; ++static struct local_unwind_table *find_table(unsigned long); ++static void dump_local_unwind_tables(void); ++ ++static const struct { ++ unsigned offs:BITS_PER_LONG / 2; ++ unsigned width:BITS_PER_LONG / 2; ++} reg_info[] = { ++ UNW_REGISTER_INFO ++}; ++ ++#undef PTREGS_INFO ++#undef EXTRA_INFO ++ ++#ifndef REG_INVALID ++#define REG_INVALID(r) (reg_info[r].width == 0) ++#endif ++ ++#define DW_CFA_nop 0x00 ++#define DW_CFA_set_loc 0x01 ++#define DW_CFA_advance_loc1 0x02 ++#define DW_CFA_advance_loc2 0x03 ++#define DW_CFA_advance_loc4 0x04 ++#define DW_CFA_offset_extended 0x05 ++#define DW_CFA_restore_extended 0x06 ++#define DW_CFA_undefined 0x07 ++#define DW_CFA_same_value 0x08 ++#define DW_CFA_register 0x09 ++#define DW_CFA_remember_state 0x0a ++#define DW_CFA_restore_state 0x0b ++#define DW_CFA_def_cfa 0x0c ++#define DW_CFA_def_cfa_register 0x0d ++#define DW_CFA_def_cfa_offset 0x0e ++#define DW_CFA_def_cfa_expression 0x0f ++#define DW_CFA_expression 0x10 ++#define DW_CFA_offset_extended_sf 0x11 ++#define DW_CFA_def_cfa_sf 0x12 ++#define DW_CFA_def_cfa_offset_sf 0x13 ++#define DW_CFA_val_offset 0x14 ++#define DW_CFA_val_offset_sf 0x15 ++#define DW_CFA_val_expression 0x16 ++#define DW_CFA_lo_user 0x1c ++#define DW_CFA_GNU_window_save 0x2d ++#define DW_CFA_GNU_args_size 0x2e ++#define DW_CFA_GNU_negative_offset_extended 0x2f ++#define DW_CFA_hi_user 0x3f ++ ++#define DW_EH_PE_FORM 0x07 ++#define DW_EH_PE_native 0x00 ++#define DW_EH_PE_leb128 0x01 ++#define DW_EH_PE_data2 0x02 ++#define DW_EH_PE_data4 0x03 ++#define DW_EH_PE_data8 0x04 ++#define DW_EH_PE_signed 0x08 ++#define DW_EH_PE_ADJUST 0x70 ++#define DW_EH_PE_abs 0x00 ++#define DW_EH_PE_pcrel 0x10 ++#define DW_EH_PE_textrel 0x20 ++#define DW_EH_PE_datarel 0x30 ++#define DW_EH_PE_funcrel 0x40 ++#define DW_EH_PE_aligned 0x50 ++#define DW_EH_PE_indirect 0x80 ++#define DW_EH_PE_omit 0xff ++ ++#define min(x,y) ({ \ ++ typeof(x) _x = (x); \ ++ typeof(y) _y = (y); \ ++ (void) (&_x == &_y); \ ++ _x < _y ? _x : _y; }) ++ ++#define max(x,y) ({ \ ++ typeof(x) _x = (x); \ ++ typeof(y) _y = (y); \ ++ (void) (&_x == &_y); \ ++ _x > _y ? _x : _y; }) ++#define STACK_LIMIT(ptr) (((ptr) - 1) & ~(THREAD_SIZE - 1)) ++ ++typedef unsigned long uleb128_t; ++typedef signed long sleb128_t; ++ ++struct unwind_item { ++ enum item_location { ++ Nowhere, ++ Memory, ++ Register, ++ Value ++ } where; ++ uleb128_t value; ++}; ++ ++struct unwind_state { ++ uleb128_t loc, org; ++ const u8 *cieStart, *cieEnd; ++ uleb128_t codeAlign; ++ sleb128_t dataAlign; ++ struct cfa { ++ uleb128_t reg, offs; ++ } cfa; ++ struct unwind_item regs[ARRAY_SIZE(reg_info)]; ++ unsigned stackDepth:8; ++ unsigned version:8; ++ const u8 *label; ++ const u8 *stack[MAX_STACK_DEPTH]; ++}; ++ ++static const struct cfa badCFA = { ARRAY_SIZE(reg_info), 1 }; ++ ++static uleb128_t get_uleb128(const u8 **pcur, const u8 *end) ++{ ++ const u8 *cur = *pcur; ++ uleb128_t value; ++ unsigned shift; ++ ++ for (shift = 0, value = 0; cur < end; shift += 7) { ++ if (shift + 7 > 8 * sizeof(value) ++ && (*cur & 0x7fU) >= (1U << (8 * sizeof(value) - shift))) { ++ cur = end + 1; ++ break; ++ } ++ value |= (uleb128_t)(*cur & 0x7f) << shift; ++ if (!(*cur++ & 0x80)) ++ break; ++ } ++ *pcur = cur; ++ ++ return value; ++} ++ ++static sleb128_t get_sleb128(const u8 **pcur, const u8 *end) ++{ ++ const u8 *cur = *pcur; ++ sleb128_t value; ++ unsigned shift; ++ ++ for (shift = 0, value = 0; cur < end; shift += 7) { ++ if (shift + 7 > 8 * sizeof(value) ++ && (*cur & 0x7fU) >= (1U << (8 * sizeof(value) - shift))) { ++ cur = end + 1; ++ break; ++ } ++ value |= (sleb128_t)(*cur & 0x7f) << shift; ++ if (!(*cur & 0x80)) { ++ value |= -(*cur++ & 0x40) << shift; ++ break; ++ } ++ } ++ *pcur = cur; ++ ++ return value; ++} ++ ++static unsigned long read_pointer(const u8 **pLoc, ++ const void *end, ++ signed ptrType) ++{ ++ unsigned long value = 0; ++ union { ++ const u8 *p8; ++ const u16 *p16u; ++ const s16 *p16s; ++ const u32 *p32u; ++ const s32 *p32s; ++ const unsigned long *pul; ++ } ptr; ++ ++ if (ptrType < 0 || ptrType == DW_EH_PE_omit) ++ return 0; ++ ptr.p8 = *pLoc; ++ switch(ptrType & DW_EH_PE_FORM) { ++ case DW_EH_PE_data2: ++ if (end < (const void *)(ptr.p16u + 1)) ++ return 0; ++ if(ptrType & DW_EH_PE_signed) ++ value = get_unaligned(ptr.p16s++); ++ else ++ value = get_unaligned(ptr.p16u++); ++ break; ++ case DW_EH_PE_data4: ++#ifdef CONFIG_64BIT ++ if (end < (const void *)(ptr.p32u + 1)) ++ return 0; ++ if(ptrType & DW_EH_PE_signed) ++ value = get_unaligned(ptr.p32s++); ++ else ++ value = get_unaligned(ptr.p32u++); ++ break; ++ case DW_EH_PE_data8: ++ BUILD_BUG_ON(sizeof(u64) != sizeof(value)); ++#else ++ BUILD_BUG_ON(sizeof(u32) != sizeof(value)); ++#endif ++ case DW_EH_PE_native: ++ if (end < (const void *)(ptr.pul + 1)) ++ return 0; ++ value = get_unaligned(ptr.pul++); ++ break; ++ case DW_EH_PE_leb128: ++ BUILD_BUG_ON(sizeof(uleb128_t) > sizeof(value)); ++ value = ptrType & DW_EH_PE_signed ++ ? get_sleb128(&ptr.p8, end) ++ : get_uleb128(&ptr.p8, end); ++ if ((const void *)ptr.p8 > end) ++ return 0; ++ break; ++ default: ++ return 0; ++ } ++ switch(ptrType & DW_EH_PE_ADJUST) { ++ case DW_EH_PE_abs: ++ break; ++ case DW_EH_PE_pcrel: ++ value += (unsigned long)*pLoc; ++ break; ++ default: ++ return 0; ++ } ++ ++/* TBD ++ if ((ptrType & DW_EH_PE_indirect) ++ && __get_user(value, (unsigned long *)value)) ++ return 0; ++*/ ++ *pLoc = ptr.p8; ++ ++ return value; ++} ++ ++static signed fde_pointer_type(const u32 *cie) ++{ ++ const u8 *ptr = (const u8 *)(cie + 2); ++ unsigned version = *ptr; ++ ++ if (version != 1) ++ return -1; /* unsupported */ ++ if (*++ptr) { ++ const char *aug; ++ const u8 *end = (const u8 *)(cie + 1) + *cie; ++ uleb128_t len; ++ ++ /* check if augmentation size is first (and thus present) */ ++ if (*ptr != 'z') ++ return -1; ++ /* check if augmentation string is nul-terminated */ ++ if ((ptr = memchr(aug = (const void *)ptr, 0, end - ptr)) == NULL) ++ return -1; ++ ++ptr; /* skip terminator */ ++ get_uleb128(&ptr, end); /* skip code alignment */ ++ get_sleb128(&ptr, end); /* skip data alignment */ ++ /* skip return address column */ ++ version <= 1 ? (void)++ptr : (void)get_uleb128(&ptr, end); ++ len = get_uleb128(&ptr, end); /* augmentation length */ ++ if (ptr + len < ptr || ptr + len > end) ++ return -1; ++ end = ptr + len; ++ while (*++aug) { ++ if (ptr >= end) ++ return -1; ++ switch(*aug) { ++ case 'L': ++ ++ptr; ++ break; ++ case 'P': { ++ signed ptrType = *ptr++; ++ ++ if (!read_pointer(&ptr, end, ptrType) || ptr > end) ++ return -1; ++ } ++ break; ++ case 'R': ++ return *ptr; ++ default: ++ return -1; ++ } ++ } ++ } ++ return DW_EH_PE_native|DW_EH_PE_abs; ++} ++ ++static int advance_loc(unsigned long delta, struct unwind_state *state) ++{ ++ state->loc += delta * state->codeAlign; ++ ++ return delta > 0; ++} ++ ++static void set_rule(uleb128_t reg, ++ enum item_location where, ++ uleb128_t value, ++ struct unwind_state *state) ++{ ++ if (reg < ARRAY_SIZE(state->regs)) { ++ state->regs[reg].where = where; ++ state->regs[reg].value = value; ++ } ++} ++ ++static int processCFI(const u8 *start, ++ const u8 *end, ++ unsigned long targetLoc, ++ signed ptrType, ++ struct unwind_state *state) ++{ ++ union { ++ const u8 *p8; ++ const u16 *p16; ++ const u32 *p32; ++ } ptr; ++ int result = 1; ++ ++ if (start != state->cieStart) { ++ state->loc = state->org; ++ result = processCFI(state->cieStart, state->cieEnd, 0, ptrType, state); ++ if (targetLoc == 0 && state->label == NULL) ++ return result; ++ } ++ for (ptr.p8 = start; result && ptr.p8 < end; ) { ++ switch(*ptr.p8 >> 6) { ++ uleb128_t value; ++ ++ case 0: ++ switch(*ptr.p8++) { ++ case DW_CFA_nop: ++ break; ++ case DW_CFA_set_loc: ++ if ((state->loc = read_pointer(&ptr.p8, end, ++ ptrType)) == 0) ++ result = 0; ++ break; ++ case DW_CFA_advance_loc1: ++ result = ptr.p8 < end && advance_loc(*ptr.p8++, state); ++ break; ++ case DW_CFA_advance_loc2: ++ result = ptr.p8 <= end + 2 ++ && advance_loc(*ptr.p16++, state); ++ break; ++ case DW_CFA_advance_loc4: ++ result = ptr.p8 <= end + 4 ++ && advance_loc(*ptr.p32++, state); ++ break; ++ case DW_CFA_offset_extended: ++ value = get_uleb128(&ptr.p8, end); ++ set_rule(value, Memory, ++ get_uleb128(&ptr.p8, end), state); ++ break; ++ case DW_CFA_val_offset: ++ value = get_uleb128(&ptr.p8, end); ++ set_rule(value, Value, ++ get_uleb128(&ptr.p8, end), state); ++ break; ++ case DW_CFA_offset_extended_sf: ++ value = get_uleb128(&ptr.p8, end); ++ set_rule(value, Memory, ++ get_sleb128(&ptr.p8, end), state); ++ break; ++ case DW_CFA_val_offset_sf: ++ value = get_uleb128(&ptr.p8, end); ++ set_rule(value, Value, ++ get_sleb128(&ptr.p8, end), state); ++ break; ++ case DW_CFA_restore_extended: ++ case DW_CFA_undefined: ++ case DW_CFA_same_value: ++ set_rule(get_uleb128(&ptr.p8, end), Nowhere, 0, state); ++ break; ++ case DW_CFA_register: ++ value = get_uleb128(&ptr.p8, end); ++ set_rule(value, Register, ++ get_uleb128(&ptr.p8, end), state); ++ break; ++ case DW_CFA_remember_state: ++ if (ptr.p8 == state->label) { ++ state->label = NULL; ++ return 1; ++ } ++ if (state->stackDepth >= MAX_STACK_DEPTH) ++ return 0; ++ state->stack[state->stackDepth++] = ptr.p8; ++ break; ++ case DW_CFA_restore_state: ++ if (state->stackDepth) { ++ const uleb128_t loc = state->loc; ++ const u8 *label = state->label; ++ ++ state->label = state->stack[state->stackDepth - 1]; ++ memcpy(&state->cfa, &badCFA, sizeof(state->cfa)); ++ memset(state->regs, 0, sizeof(state->regs)); ++ state->stackDepth = 0; ++ result = processCFI(start, end, 0, ptrType, state); ++ state->loc = loc; ++ state->label = label; ++ } else ++ return 0; ++ break; ++ case DW_CFA_def_cfa: ++ state->cfa.reg = get_uleb128(&ptr.p8, end); ++ /*nobreak*/ ++ case DW_CFA_def_cfa_offset: ++ state->cfa.offs = get_uleb128(&ptr.p8, end); ++ break; ++ case DW_CFA_def_cfa_sf: ++ state->cfa.reg = get_uleb128(&ptr.p8, end); ++ /*nobreak*/ ++ case DW_CFA_def_cfa_offset_sf: ++ state->cfa.offs = get_sleb128(&ptr.p8, end) ++ * state->dataAlign; ++ break; ++ case DW_CFA_def_cfa_register: ++ state->cfa.reg = get_uleb128(&ptr.p8, end); ++ break; ++ /*todo case DW_CFA_def_cfa_expression: */ ++ /*todo case DW_CFA_expression: */ ++ /*todo case DW_CFA_val_expression: */ ++ case DW_CFA_GNU_args_size: ++ get_uleb128(&ptr.p8, end); ++ break; ++ case DW_CFA_GNU_negative_offset_extended: ++ value = get_uleb128(&ptr.p8, end); ++ set_rule(value, Memory, (uleb128_t)0 - ++ get_uleb128(&ptr.p8, end), state); ++ break; ++ case DW_CFA_GNU_window_save: ++ default: ++ result = 0; ++ break; ++ } ++ break; ++ case 1: ++ result = advance_loc(*ptr.p8++ & 0x3f, state); ++ break; ++ case 2: ++ value = *ptr.p8++ & 0x3f; ++ set_rule(value, Memory, get_uleb128(&ptr.p8, end), ++ state); ++ break; ++ case 3: ++ set_rule(*ptr.p8++ & 0x3f, Nowhere, 0, state); ++ break; ++ } ++ if (ptr.p8 > end) ++ result = 0; ++ if (result && targetLoc != 0 && targetLoc < state->loc) ++ return 1; ++ } ++ ++ return result ++ && ptr.p8 == end ++ && (targetLoc == 0 ++ || (/*todo While in theory this should apply, gcc in practice omits ++ everything past the function prolog, and hence the location ++ never reaches the end of the function. ++ targetLoc < state->loc &&*/ state->label == NULL)); ++} ++ ++ ++/* Unwind to previous to frame. Returns 0 if successful, negative ++ * number in case of an error. */ ++int ++unwind(struct unwind_frame_info *frame) ++{ ++#define FRAME_REG(r, t) (((t *)frame)[reg_info[r].offs]) ++ const u32 *fde = NULL, *cie = NULL; ++ const u8 *ptr = NULL, *end = NULL; ++ unsigned long startLoc = 0, endLoc = 0, cfa; ++ unsigned i; ++ signed ptrType = -1; ++ uleb128_t retAddrReg = 0; ++// struct unwind_table *table; ++ void *unwind_table; ++ struct local_unwind_table *table; ++ struct unwind_state state; ++ u64 reg_ptr = 0; ++ ++ ++ if (UNW_PC(frame) == 0) ++ return -EINVAL; ++ ++ if ((table = find_table(UNW_PC(frame)))) { ++// unsigned long tableSize = unwind_table_size; ++ unsigned long tableSize = table->size; ++ ++ unwind_table = table->address; ++ ++ for (fde = unwind_table; ++ tableSize > sizeof(*fde) && tableSize - sizeof(*fde) >= *fde; ++ tableSize -= sizeof(*fde) + *fde, ++ fde += 1 + *fde / sizeof(*fde)) { ++ if (!*fde || (*fde & (sizeof(*fde) - 1))) ++ break; ++ if (!fde[1]) ++ continue; /* this is a CIE */ ++ if ((fde[1] & (sizeof(*fde) - 1)) ++ || fde[1] > (unsigned long)(fde + 1) ++ - (unsigned long)unwind_table) ++ continue; /* this is not a valid FDE */ ++ cie = fde + 1 - fde[1] / sizeof(*fde); ++ if (*cie <= sizeof(*cie) + 4 ++ || *cie >= fde[1] - sizeof(*fde) ++ || (*cie & (sizeof(*cie) - 1)) ++ || cie[1] ++ || (ptrType = fde_pointer_type(cie)) < 0) { ++ cie = NULL; /* this is not a (valid) CIE */ ++ continue; ++ } ++ ptr = (const u8 *)(fde + 2); ++ startLoc = read_pointer(&ptr, ++ (const u8 *)(fde + 1) + *fde, ++ ptrType); ++ endLoc = startLoc ++ + read_pointer(&ptr, ++ (const u8 *)(fde + 1) + *fde, ++ ptrType & DW_EH_PE_indirect ++ ? ptrType ++ : ptrType & (DW_EH_PE_FORM|DW_EH_PE_signed)); ++ if (UNW_PC(frame) >= startLoc && UNW_PC(frame) < endLoc) ++ break; ++ cie = NULL; ++ } ++ } ++ if (cie != NULL) { ++ memset(&state, 0, sizeof(state)); ++ state.cieEnd = ptr; /* keep here temporarily */ ++ ptr = (const u8 *)(cie + 2); ++ end = (const u8 *)(cie + 1) + *cie; ++ if ((state.version = *ptr) != 1) ++ cie = NULL; /* unsupported version */ ++ else if (*++ptr) { ++ /* check if augmentation size is first (and thus present) */ ++ if (*ptr == 'z') { ++ /* check for ignorable (or already handled) ++ * nul-terminated augmentation string */ ++ while (++ptr < end && *ptr) ++ if (strchr("LPR", *ptr) == NULL) ++ break; ++ } ++ if (ptr >= end || *ptr) ++ cie = NULL; ++ } ++ ++ptr; ++ } ++ if (cie != NULL) { ++ /* get code aligment factor */ ++ state.codeAlign = get_uleb128(&ptr, end); ++ /* get data aligment factor */ ++ state.dataAlign = get_sleb128(&ptr, end); ++ if (state.codeAlign == 0 || state.dataAlign == 0 || ptr >= end) ++ cie = NULL; ++ else { ++ retAddrReg = state.version <= 1 ? *ptr++ : get_uleb128(&ptr, end); ++ /* skip augmentation */ ++ if (((const char *)(cie + 2))[1] == 'z') ++ ptr += get_uleb128(&ptr, end); ++ if (ptr > end ++ || retAddrReg >= ARRAY_SIZE(reg_info) ++ || REG_INVALID(retAddrReg) ++ || reg_info[retAddrReg].width != sizeof(unsigned long)) ++ cie = NULL; ++ } ++ } ++ if (cie != NULL) { ++ state.cieStart = ptr; ++ ptr = state.cieEnd; ++ state.cieEnd = end; ++ end = (const u8 *)(fde + 1) + *fde; ++ /* skip augmentation */ ++ if (((const char *)(cie + 2))[1] == 'z') { ++ uleb128_t augSize = get_uleb128(&ptr, end); ++ ++ if ((ptr += augSize) > end) ++ fde = NULL; ++ } ++ } ++ if (cie == NULL || fde == NULL) ++ return -ENXIO; ++ ++ state.org = startLoc; ++ memcpy(&state.cfa, &badCFA, sizeof(state.cfa)); ++ /* process instructions */ ++ if (!processCFI(ptr, end, UNW_PC(frame), ptrType, &state) ++ || state.loc > endLoc ++ || state.regs[retAddrReg].where == Nowhere ++ || state.cfa.reg >= ARRAY_SIZE(reg_info) ++ || reg_info[state.cfa.reg].width != sizeof(unsigned long) ++ || state.cfa.offs % sizeof(unsigned long)) { ++ return -EIO; ++ } ++ /* update frame */ ++ cfa = FRAME_REG(state.cfa.reg, unsigned long) + state.cfa.offs; ++ startLoc = min((unsigned long)UNW_SP(frame), cfa); ++ endLoc = max((unsigned long)UNW_SP(frame), cfa); ++ if (STACK_LIMIT(startLoc) != STACK_LIMIT(endLoc)) { ++ startLoc = min(STACK_LIMIT(cfa), cfa); ++ endLoc = max(STACK_LIMIT(cfa), cfa); ++ } ++#ifndef CONFIG_64BIT ++# define CASES CASE(8); CASE(16); CASE(32) ++#else ++# define CASES CASE(8); CASE(16); CASE(32); CASE(64) ++#endif ++ for (i = 0; i < ARRAY_SIZE(state.regs); ++i) { ++ if (REG_INVALID(i)) { ++ if (state.regs[i].where == Nowhere) ++ continue; ++ return -EIO; ++ } ++ switch(state.regs[i].where) { ++ default: ++ break; ++ case Register: ++ if (state.regs[i].value >= ARRAY_SIZE(reg_info) ++ || REG_INVALID(state.regs[i].value) ++ || reg_info[i].width > reg_info[state.regs[i].value].width){ ++ return -EIO; ++ } ++ switch(reg_info[state.regs[i].value].width) { ++#define CASE(n) \ ++ case sizeof(u##n): \ ++ state.regs[i].value = FRAME_REG(state.regs[i].value, \ ++ const u##n); \ ++ break ++ CASES; ++#undef CASE ++ default: ++ return -EIO; ++ } ++ break; ++ } ++ } ++ for (i = 0; i < ARRAY_SIZE(state.regs); ++i) { ++ if (REG_INVALID(i)) ++ continue; ++ switch(state.regs[i].where) { ++ case Nowhere: ++ if (reg_info[i].width != sizeof(UNW_SP(frame)) ++ || &FRAME_REG(i, __typeof__(UNW_SP(frame))) ++ != &UNW_SP(frame)) ++ continue; ++ UNW_SP(frame) = cfa; ++ break; ++ case Register: ++ switch(reg_info[i].width) { ++#define CASE(n) case sizeof(u##n): \ ++ FRAME_REG(i, u##n) = state.regs[i].value; \ ++ break ++ CASES; ++#undef CASE ++ default: ++ return -EIO; ++ } ++ break; ++ case Value: ++ if (reg_info[i].width != sizeof(unsigned long)){ ++ return -EIO;} ++ FRAME_REG(i, unsigned long) = cfa + state.regs[i].value ++ * state.dataAlign; ++ break; ++ case Memory: { ++ unsigned long addr = cfa + state.regs[i].value ++ * state.dataAlign; ++ if ((state.regs[i].value * state.dataAlign) ++ % sizeof(unsigned long) ++ || addr < startLoc ++ || addr + sizeof(unsigned long) < addr ++ || addr + sizeof(unsigned long) > endLoc){ ++ return -EIO;} ++ switch(reg_info[i].width) { ++#define CASE(n) case sizeof(u##n): \ ++ readmem(addr, KVADDR, ®_ptr,sizeof(u##n), "register", RETURN_ON_ERROR|QUIET); \ ++ FRAME_REG(i, u##n) = (u##n)reg_ptr;\ ++ break ++ CASES; ++#undef CASE ++ default: ++ return -EIO; ++ } ++ } ++ break; ++ } ++ } ++ return 0; ++#undef CASES ++#undef FRAME_REG ++} ++ ++/* ++ * Initialize the unwind table(s) in the best-case order: ++ * ++ * 1. Use the in-memory kernel and module unwind tables. ++ * 2. Use the in-memory kernel-only .eh_frame data. (possible?) ++ * 3. Use the kernel-only .eh_frame data from the vmlinux file. ++ */ ++void ++init_unwind_table(void) ++{ ++ ulong unwind_table_size; ++ void *unwind_table; ++ ++ kt->flags &= ~DWARF_UNWIND; ++ ++ if (gather_in_memory_unwind_tables()) { ++ if (CRASHDEBUG(1)) ++ fprintf(fp, "init_unwind_table: DWARF_UNWIND_MEMORY (%d tables)\n", ++ unwind_tables_cnt); ++ ++ kt->flags |= DWARF_UNWIND_MEMORY; ++ if (unwind_tables_cnt > 1) ++ kt->flags |= DWARF_UNWIND_MODULES; ++ if (!(kt->flags & NO_DWARF_UNWIND)) ++ kt->flags |= DWARF_UNWIND; ++ ++ return; ++ } ++ ++ if (symbol_exists("__start_unwind") && ++ symbol_exists("__end_unwind")) { ++ unwind_table_size = symbol_value("__end_unwind") - ++ symbol_value("__start_unwind"); ++ ++ if (!(unwind_table = malloc(unwind_table_size))) { ++ error(WARNING, "cannot malloc unwind table space\n"); ++ goto try_eh_frame; ++ } ++ ++ if (!readmem(symbol_value("__start_unwind"), KVADDR, unwind_table, ++ unwind_table_size, "unwind table", RETURN_ON_ERROR)) { ++ error(WARNING, "cannot read unwind table data\n"); ++ free(unwind_table); ++ goto try_eh_frame; ++ } ++ ++ kt->flags |= DWARF_UNWIND_MEMORY; ++ if (!(kt->flags & NO_DWARF_UNWIND)) ++ kt->flags |= DWARF_UNWIND; ++ ++ default_unwind_table.size = unwind_table_size; ++ default_unwind_table.address = unwind_table; ++ ++ if (CRASHDEBUG(1)) ++ fprintf(fp, "init_unwind_table: DWARF_UNWIND_MEMORY\n"); ++ ++ return; ++ } ++ ++try_eh_frame: ++ ++ if (st->dwarf_eh_frame_size) { ++ int fd; ++ ++ unwind_table_size = st->dwarf_eh_frame_size; ++ ++ if (!(unwind_table = malloc(unwind_table_size))) { ++ error(WARNING, "cannot malloc unwind table space\n"); ++ return; ++ } ++ ++ if ((fd = open(pc->namelist, O_RDONLY)) < 0) { ++ error(WARNING, "cannot open %s for .eh_frame data\n", ++ pc->namelist); ++ free(unwind_table); ++ return; ++ } ++ ++ lseek(fd, st->dwarf_eh_frame_file_offset, SEEK_SET); ++ ++ if (read(fd, unwind_table, st->dwarf_eh_frame_size) != ++ st->dwarf_eh_frame_size) { ++ error(WARNING, "cannot read .eh_frame data from %s\n", ++ pc->namelist); ++ free(unwind_table); ++ return; ++ } ++ ++ close(fd); ++ ++ default_unwind_table.size = unwind_table_size; ++ default_unwind_table.address = unwind_table; ++ ++ kt->flags |= DWARF_UNWIND_EH_FRAME; ++ if (!(kt->flags & NO_DWARF_UNWIND)) ++ kt->flags |= DWARF_UNWIND; ++ ++ if (CRASHDEBUG(1)) ++ fprintf(fp, "init_unwind_table: DWARF_UNWIND_EH_FRAME\n"); ++ ++ return; ++ } ++} ++ ++/* ++ * Find the appropriate kernel-only "root_table" unwind_table, ++ * and pass it to populate_local_tables() to do the heavy lifting. ++ */ ++static int ++gather_in_memory_unwind_tables(void) ++{ ++ int i, cnt, found; ++ struct syment *sp, *root_tables[10]; ++ char *root_table_buf; ++ char buf[BUFSIZE]; ++ ulong name; ++ ++ STRUCT_SIZE_INIT(unwind_table, "unwind_table"); ++ MEMBER_OFFSET_INIT(unwind_table_core, "unwind_table", "core"); ++ MEMBER_OFFSET_INIT(unwind_table_init, "unwind_table", "init"); ++ MEMBER_OFFSET_INIT(unwind_table_address, "unwind_table", "address"); ++ MEMBER_OFFSET_INIT(unwind_table_size, "unwind_table", "size"); ++ MEMBER_OFFSET_INIT(unwind_table_link, "unwind_table", "link"); ++ MEMBER_OFFSET_INIT(unwind_table_name, "unwind_table", "name"); ++ ++ if (INVALID_SIZE(unwind_table) || ++ INVALID_MEMBER(unwind_table_core) || ++ INVALID_MEMBER(unwind_table_init) || ++ INVALID_MEMBER(unwind_table_address) || ++ INVALID_MEMBER(unwind_table_size) || ++ INVALID_MEMBER(unwind_table_link) || ++ INVALID_MEMBER(unwind_table_name)) { ++ if (CRASHDEBUG(1)) ++ error(NOTE, ++ "unwind_table structure has changed, or does not exist in this kernel\n"); ++ return 0; ++ } ++ ++ /* ++ * Unfortunately there are two kernel root_table symbols. ++ */ ++ if (!(cnt = get_syment_array("root_table", root_tables, 10))) ++ return 0; ++ ++ root_table_buf = GETBUF(SIZE(unwind_table)); ++ for (i = found = 0; i < cnt; i++) { ++ sp = root_tables[i]; ++ if (!readmem(sp->value, KVADDR, root_table_buf, ++ SIZE(unwind_table), "root unwind_table", ++ RETURN_ON_ERROR|QUIET)) ++ goto gather_failed; ++ ++ name = ULONG(root_table_buf + OFFSET(unwind_table_name)); ++ if (read_string(name, buf, strlen("kernel")+1) && ++ STREQ("kernel", buf)) { ++ found++; ++ if (CRASHDEBUG(1)) ++ fprintf(fp, "root_table name: %lx [%s]\n", ++ name, buf); ++ break; ++ } ++ } ++ ++ if (!found) ++ goto gather_failed; ++ ++ cnt = populate_local_tables(sp->value, root_table_buf); ++ ++ FREEBUF(root_table_buf); ++ return cnt; ++ ++gather_failed: ++ ++ FREEBUF(root_table_buf); ++ return 0; ++} ++ ++/* ++ * Transfer the relevant data from the kernel and module unwind_table ++ * structures to the local_unwind_table structures. ++ */ ++static int ++populate_local_tables(ulong root, char *buf) ++{ ++ struct list_data list_data, *ld; ++ int i, cnt; ++ ulong *table_list; ++ ulong vaddr; ++ struct local_unwind_table *tp; ++ ++ ld = &list_data; ++ BZERO(ld, sizeof(struct list_data)); ++ ld->start = root; ++ ld->member_offset = OFFSET(unwind_table_link); ++ if (CRASHDEBUG(1)) ++ ld->flags |= VERBOSE; ++ ++ hq_open(); ++ cnt = do_list(ld); ++ table_list = (ulong *)GETBUF(cnt * sizeof(ulong)); ++ cnt = retrieve_list(table_list, cnt); ++ hq_close(); ++ ++ if (!(local_unwind_tables = ++ malloc(sizeof(struct local_unwind_table) * cnt))) { ++ error(WARNING, "cannot malloc unwind_table space (%d tables)\n", ++ cnt); ++ FREEBUF(table_list); ++ return 0; ++ } ++ ++ for (i = 0; i < cnt; i++, tp++) { ++ ++ if (!readmem(table_list[i], KVADDR, buf, ++ SIZE(unwind_table), "unwind_table", ++ RETURN_ON_ERROR|QUIET)) { ++ error(WARNING, "cannot read unwind_table\n"); ++ goto failed; ++ } ++ ++ tp = &local_unwind_tables[i]; ++ ++ /* ++ * Copy the required table info for find_table(). ++ */ ++ BCOPY(buf + OFFSET(unwind_table_core), ++ (char *)&tp->core.pc, sizeof(ulong)*2); ++ BCOPY(buf + OFFSET(unwind_table_init), ++ (char *)&tp->init.pc, sizeof(ulong)*2); ++ BCOPY(buf + OFFSET(unwind_table_size), ++ (char *)&tp->size, sizeof(ulong)); ++ ++ /* ++ * Then read the DWARF CFI data. ++ */ ++ vaddr = ULONG(buf + OFFSET(unwind_table_address)); ++ ++ if (!(tp->address = malloc(tp->size))) { ++ error(WARNING, "cannot malloc unwind_table space\n"); ++ goto failed; ++ break; ++ } ++ if (!readmem(vaddr, KVADDR, tp->address, ++ tp->size, "DWARF CFI data", RETURN_ON_ERROR|QUIET)) { ++ error(WARNING, "cannot read unwind_table data\n"); ++ goto failed; ++ } ++ } ++ ++ unwind_tables_cnt = cnt; ++ ++ if (CRASHDEBUG(7)) ++ dump_local_unwind_tables(); ++ ++failed: ++ ++ FREEBUF(table_list); ++ return unwind_tables_cnt; ++} ++ ++/* ++ * Find the unwind_table containing a pc. ++ */ ++static struct local_unwind_table * ++find_table(unsigned long pc) ++{ ++ int i; ++ struct local_unwind_table *tp, *table; ++ ++ table = &default_unwind_table; ++ ++ for (i = 0; i < unwind_tables_cnt; i++, tp++) { ++ tp = &local_unwind_tables[i]; ++ if ((pc >= tp->core.pc ++ && pc < tp->core.pc + tp->core.range) ++ || (pc >= tp->init.pc ++ && pc < tp->init.pc + tp->init.range)) { ++ table = tp; ++ break; ++ } ++ } ++ ++ return table; ++} ++ ++static void ++dump_local_unwind_tables(void) ++{ ++ int i, others; ++ struct local_unwind_table *tp; ++ ++ others = 0; ++ fprintf(fp, "DWARF flags: ("); ++ if (kt->flags & DWARF_UNWIND) ++ fprintf(fp, "%sDWARF_UNWIND", others++ ? "|" : ""); ++ if (kt->flags & NO_DWARF_UNWIND) ++ fprintf(fp, "%sNO_DWARF_UNWIND", others++ ? "|" : ""); ++ if (kt->flags & DWARF_UNWIND_MEMORY) ++ fprintf(fp, "%sDWARF_UNWIND_MEMORY", others++ ? "|" : ""); ++ if (kt->flags & DWARF_UNWIND_EH_FRAME) ++ fprintf(fp, "%sDWARF_UNWIND_EH_FRAME", others++ ? "|" : ""); ++ if (kt->flags & DWARF_UNWIND_MODULES) ++ fprintf(fp, "%sDWARF_UNWIND_MODULES", others++ ? "|" : ""); ++ fprintf(fp, ")\n\n"); ++ ++ fprintf(fp, "default_unwind_table:\n"); ++ fprintf(fp, " address: %lx\n", ++ (ulong)default_unwind_table.address); ++ fprintf(fp, " size: %ld\n\n", ++ (ulong)default_unwind_table.size); ++ ++ fprintf(fp, "local_unwind_tables[%d]:\n", unwind_tables_cnt); ++ for (i = 0; i < unwind_tables_cnt; i++, tp++) { ++ tp = &local_unwind_tables[i]; ++ fprintf(fp, "[%d]\n", i); ++ fprintf(fp, " core: pc: %lx\n", tp->core.pc); ++ fprintf(fp, " range: %ld\n", tp->core.range); ++ fprintf(fp, " init: pc: %lx\n", tp->init.pc); ++ fprintf(fp, " range: %ld\n", tp->init.range); ++ fprintf(fp, " address: %lx\n", (ulong)tp->address); ++ fprintf(fp, " size: %ld\n", tp->size); ++ } ++} ++ ++ ++int ++dwarf_backtrace(struct bt_info *bt, int level, ulong stacktop) ++{ ++ unsigned long bp, offset; ++ struct syment *sp; ++ char *name; ++ struct unwind_frame_info *frame; ++ ++ frame = (struct unwind_frame_info *)GETBUF(sizeof(struct unwind_frame_info)); ++// frame->regs.rsp = bt->stkptr; ++// frame->regs.rip = bt->instptr; ++ UNW_SP(frame) = bt->stkptr; ++ UNW_PC(frame) = bt->instptr; ++ ++ /* read rbp from stack for non active tasks */ ++ if (!(bt->flags & BT_DUMPFILE_SEARCH) && !bt->bptr) { ++// readmem(frame->regs.rsp, KVADDR, &bp, ++ readmem(UNW_SP(frame), KVADDR, &bp, ++ sizeof(unsigned long), "reading bp", FAULT_ON_ERROR); ++ frame->regs.rbp = bp; /* fixme for x86 */ ++ } ++ ++ sp = value_search(UNW_PC(frame), &offset); ++ if (!sp) { ++ if (CRASHDEBUG(1)) ++ fprintf(fp, "unwind: cannot find symbol for PC: %lx\n", ++ UNW_PC(frame)); ++ goto bailout; ++ } ++ ++ /* ++ * If offset is zero, it means we have crossed over to the next ++ * function. Recalculate by adjusting the text address ++ */ ++ if (!offset) { ++ sp = value_search(UNW_PC(frame) - 1, &offset); ++ if (!sp) { ++ if (CRASHDEBUG(1)) ++ fprintf(fp, ++ "unwind: cannot find symbol for PC: %lx\n", ++ UNW_PC(frame)-1); ++ goto bailout; ++ } ++ } ++ ++ ++ ++ name = sp->name; ++ fprintf(fp, " #%d [%016lx] %s at %016lx \n", level, UNW_SP(frame), name, UNW_PC(frame)); ++ ++ if (CRASHDEBUG(2)) ++ fprintf(fp, " < SP: %lx PC: %lx FP: %lx >\n", UNW_SP(frame), ++ UNW_PC(frame), frame->regs.rbp); ++ ++ while ((UNW_SP(frame) < stacktop) ++ && !unwind(frame) && UNW_PC(frame)) { ++ /* To prevent rip pushed on IRQ stack being reported both ++ * both on the IRQ and process stacks ++ */ ++ if ((bt->flags & BT_IRQSTACK) && (UNW_SP(frame) >= stacktop - 16)) ++ break; ++ level++; ++ sp = value_search(UNW_PC(frame), &offset); ++ if (!sp) { ++ if (CRASHDEBUG(1)) ++ fprintf(fp, ++ "unwind: cannot find symbol for PC: %lx\n", ++ UNW_PC(frame)); ++ break; ++ } ++ ++ /* ++ * If offset is zero, it means we have crossed over to the next ++ * function. Recalculate by adjusting the text address ++ */ ++ if (!offset) { ++ sp = value_search(UNW_PC(frame) - 1, &offset); ++ if (!sp) { ++ if (CRASHDEBUG(1)) ++ fprintf(fp, ++ "unwind: cannot find symbol for PC: %lx\n", ++ UNW_PC(frame)-1); ++ goto bailout; ++ } ++ } ++ name = sp->name; ++ fprintf(fp, "%s#%d [%016lx] %s at %016lx \n", level < 10 ? " " : "", ++ level, UNW_SP(frame), name, UNW_PC(frame)); ++ ++ if (CRASHDEBUG(2)) ++ fprintf(fp, " < SP: %lx PC: %lx FP: %lx >\n", UNW_SP(frame), ++ UNW_PC(frame), frame->regs.rbp); ++ } ++ ++bailout: ++ FREEBUF(frame); ++ return ++level; ++} ++ ++int ++dwarf_print_stack_entry(struct bt_info *bt, int level) ++{ ++ unsigned long offset; ++ struct syment *sp; ++ char *name; ++ struct unwind_frame_info *frame; ++ ++ frame = (struct unwind_frame_info *)GETBUF(sizeof(struct unwind_frame_info)); ++ UNW_SP(frame) = bt->stkptr; ++ UNW_PC(frame) = bt->instptr; ++ ++ sp = value_search(UNW_PC(frame), &offset); ++ if (!sp) { ++ if (CRASHDEBUG(1)) ++ fprintf(fp, "unwind: cannot find symbol for PC: %lx\n", ++ UNW_PC(frame)); ++ goto bailout; ++ } ++ ++ /* ++ * If offset is zero, it means we have crossed over to the next ++ * function. Recalculate by adjusting the text address ++ */ ++ if (!offset) { ++ sp = value_search(UNW_PC(frame) - 1, &offset); ++ if (!sp) { ++ if (CRASHDEBUG(1)) ++ fprintf(fp, ++ "unwind: cannot find symbol for PC: %lx\n", ++ UNW_PC(frame)-1); ++ goto bailout; ++ } ++ } ++ name = sp->name; ++ fprintf(fp, " #%d [%016lx] %s at %016lx \n", level, UNW_SP(frame), name, UNW_PC(frame)); ++ ++bailout: ++ FREEBUF(frame); ++ return level; ++} ++ ++void ++dwarf_debug(struct bt_info *bt) ++{ ++ struct unwind_frame_info *frame; ++ ulong bp; ++ ++ if (!bt->hp->eip) { ++ dump_local_unwind_tables(); ++ return; ++ } ++ ++ if (!(kt->flags & DWARF_UNWIND_CAPABLE)) { ++ error(INFO, "not DWARF capable\n"); ++ return; ++ } ++ ++ frame = (struct unwind_frame_info *)GETBUF(sizeof(struct unwind_frame_info)); ++ ++ /* ++ * XXX: This only works for the first PC/SP pair seen in a normal ++ * backtrace, so it's not particularly helpful. Ideally it should ++ * be capable to take any PC/SP pair in a stack, but it appears to ++ * related to the rbp value. ++ */ ++ ++ UNW_PC(frame) = bt->hp->eip; ++ UNW_SP(frame) = bt->hp->esp; ++ ++ readmem(UNW_SP(frame), KVADDR, &bp, ++ sizeof(unsigned long), "reading bp", FAULT_ON_ERROR); ++ frame->regs.rbp = bp; /* fixme for x86 */ ++ ++ unwind(frame); ++ ++ fprintf(fp, "frame size: %lx (%lx)\n", ++ (ulong)UNW_SP(frame), (ulong)UNW_SP(frame) - bt->hp->esp); ++ ++ FREEBUF(frame); ++} ++ ++ ++#endif +--- crash/s390x.c.orig 2008-01-17 15:17:20.000000000 -0500 ++++ crash/s390x.c 2008-01-04 09:42:08.000000000 -0500 +@@ -1,9 +1,9 @@ + /* s390.c - core analysis suite + * + * Copyright (C) 2001, 2002 Mission Critical Linux, Inc. +- * Copyright (C) 2002, 2003, 2004, 2005 David Anderson +- * Copyright (C) 2002, 2003, 2004, 2005 Red Hat, Inc. All rights reserved. +- * Copyright (C) 2005 Michael Holzheu, IBM Corporation ++ * Copyright (C) 2002, 2003, 2004, 2005, 2006 David Anderson ++ * Copyright (C) 2002, 2003, 2004, 2005, 2006 Red Hat, Inc. All rights reserved. ++ * Copyright (C) 2005, 2006 Michael Holzheu, IBM Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by +@@ -20,24 +20,6 @@ + + #define S390X_WORD_SIZE 8 + +-#define S390X_PAGE_SHIFT 12 +-#define S390X_PAGE_SIZE (1ULL << S390X_PAGE_SHIFT) +-#define S390X_PAGE_MASK (~(S390X_PAGE_SIZE-1)) +- +-#define S390X_PGDIR_SHIFT 31 +-#define S390X_PGDIR_SIZE (1ULL << S390X_PGDIR_SHIFT) +-#define S390X_PGDIR_MASK (~(S390X_PGDIR_SIZE-1)) +- +-#define S390X_PMD_SHIFT 20 +-#define S390X_PMD_SIZE (1ULL << S390X_PMD_SHIFT) +-#define S390X_PMD_MASK (~(S390X_PMD_SIZE-1)) +- +-#define S390X_PTRS_PER_PGD 2048 +-#define S390X_PTRS_PER_PMD 2048 +-#define S390X_PTRS_PER_PTE 256 +- +-#define S390X_PMD_BASE_MASK (~((1ULL<<12)-1)) +-#define S390X_PT_BASE_MASK (~((1ULL<<11)-1)) + #define S390X_PAGE_BASE_MASK (~((1ULL<<12)-1)) + + /* Flags used in entries of page dirs and page tables. +@@ -48,37 +30,11 @@ + #define S390X_PAGE_INVALID 0x400ULL /* HW invalid */ + #define S390X_PAGE_INVALID_MASK 0x601ULL /* for linux 2.6 */ + #define S390X_PAGE_INVALID_NONE 0x401ULL /* for linux 2.6 */ +-#define S390X_PMD_ENTRY_INV 0x20ULL /* invalid segment table entry */ +-#define S390X_PGD_ENTRY_INV 0x20ULL /* invalid region table entry */ +-#define S390X_PMD_ENTRY 0x00 +-#define S390X_PGD_ENTRY_FIRST 0x05 /* first part of pmd is valid */ +-#define S390X_PGD_ENTRY_SECOND 0xc7 /* second part of pmd is valid */ +-#define S390X_PGD_ENTRY_FULL 0x07 /* complete pmd is valid */ + + /* bits 52, 55 must contain zeroes in a pte */ + #define S390X_PTE_INVALID_MASK 0x900ULL + #define S390X_PTE_INVALID(x) ((x) & S390X_PTE_INVALID_MASK) + +-/* pgd/pmd/pte query macros */ +-#define s390x_pgd_none(x) ((x) & S390X_PGD_ENTRY_INV) +-#define s390x_pgd_bad(x) !( (((x) & S390X_PGD_ENTRY_FIRST) == \ +- S390X_PGD_ENTRY_FIRST) || \ +- (((x) & S390X_PGD_ENTRY_SECOND) == \ +- S390X_PGD_ENTRY_SECOND) || \ +- (((x) & S390X_PGD_ENTRY_FULL) == \ +- S390X_PGD_ENTRY_FULL)) +- +-#define s390x_pmd_none(x) ((x) & S390X_PMD_ENTRY_INV) +-#define s390x_pmd_bad(x) (((x) & (~S390X_PT_BASE_MASK & \ +- ~S390X_PMD_ENTRY_INV)) != \ +- S390X_PMD_ENTRY) +- +-#define s390x_pte_none(x) (((x) & (S390X_PAGE_INVALID | \ +- S390X_PAGE_RO | \ +- S390X_PAGE_PRESENT)) == \ +- S390X_PAGE_INVALID) +- +- + #define ASYNC_STACK_SIZE STACKSIZE() // can be 8192 or 16384 + #define KERNEL_STACK_SIZE STACKSIZE() // can be 8192 or 16384 + +@@ -88,9 +44,6 @@ + * declarations of static functions + */ + static void s390x_print_lowcore(char*, struct bt_info*,int); +-static unsigned long s390x_pgd_offset(unsigned long, unsigned long); +-static unsigned long s390x_pmd_offset(unsigned long, unsigned long); +-static unsigned long s390x_pte_offset(unsigned long, unsigned long); + static int s390x_kvtop(struct task_context *, ulong, physaddr_t *, int); + static int s390x_uvtop(struct task_context *, ulong, physaddr_t *, int); + static int s390x_vtop(unsigned long, ulong, physaddr_t*, int); +@@ -173,7 +126,8 @@ + machdep->nr_irqs = 0; /* TBD */ + machdep->vmalloc_start = s390x_vmalloc_start; + machdep->dump_irq = s390x_dump_irq; +- machdep->hz = HZ; ++ if (!machdep->hz) ++ machdep->hz = HZ; + break; + + case POST_INIT: +@@ -193,8 +147,6 @@ + fprintf(fp, " flags: %lx (", machdep->flags); + if (machdep->flags & KSYMS_START) + fprintf(fp, "%sKSYMS_START", others++ ? "|" : ""); +- if (machdep->flags & SYSRQ) +- fprintf(fp, "%sSYSRQ", others++ ? "|" : ""); + fprintf(fp, ")\n"); + + fprintf(fp, " kvbase: %lx\n", machdep->kvbase); +@@ -207,7 +159,8 @@ + fprintf(fp, " hz: %d\n", machdep->hz); + fprintf(fp, " mhz: %ld\n", machdep->mhz); + fprintf(fp, " memsize: %lld (0x%llx)\n", +- machdep->memsize, machdep->memsize); ++ (unsigned long long)machdep->memsize, ++ (unsigned long long)machdep->memsize); + fprintf(fp, " bits: %d\n", machdep->bits); + fprintf(fp, " nr_irqs: %d\n", machdep->nr_irqs); + fprintf(fp, " eframe_search: s390x_eframe_search()\n"); +@@ -245,19 +198,6 @@ + } + + /* +- * Check if address is in the vmalloc area +- */ +-int +-s390x_IS_VMALLOC_ADDR(ulong addr) +-{ +- static unsigned long high_memory = 0; +- if(!high_memory){ +- high_memory = s390x_vmalloc_start(); +- } +- return (addr > high_memory); +-} +- +-/* + * Check if address is in context's address space + */ + static int +@@ -308,7 +248,7 @@ + /* + * Check if page is mapped + */ +-inline int s390x_pte_present(unsigned long x){ ++static inline int s390x_pte_present(unsigned long x){ + if(THIS_KERNEL_VERSION >= LINUX(2,6,0)){ + return !((x) & S390X_PAGE_INVALID) || + ((x) & S390X_PAGE_INVALID_MASK) == S390X_PAGE_INVALID_NONE; +@@ -317,81 +257,97 @@ + } + } + +-/* ++/* + * page table traversal functions + */ +-unsigned long s390x_pgd_offset(unsigned long pgd_base, unsigned long vaddr) +-{ +- unsigned long pgd_off, pmd_base; +- +- pgd_off = ((vaddr >> S390X_PGDIR_SHIFT) & +- (S390X_PTRS_PER_PGD - 1)) * 8; +- readmem(pgd_base + pgd_off, PHYSADDR, &pmd_base, sizeof(long), +- "pmd_base",FAULT_ON_ERROR); +- +- return pmd_base; +-} +- +-unsigned long s390x_pmd_offset(unsigned long pmd_base, unsigned long vaddr) +-{ +- unsigned long pmd_off, pte_base; +- +- pmd_off = ((vaddr >> S390X_PMD_SHIFT) & (S390X_PTRS_PER_PMD - 1)) +- * 8; +- readmem(pmd_base + pmd_off, PHYSADDR, &pte_base, sizeof(long), +- "pte_base",FAULT_ON_ERROR); +- return pte_base; +-} + +-unsigned long s390x_pte_offset(unsigned long pte_base, unsigned long vaddr) +-{ +- unsigned long pte_off, pte_val; +- +- pte_off = ((vaddr >> S390X_PAGE_SHIFT) & (S390X_PTRS_PER_PTE - 1)) +- * 8; +- readmem(pte_base + pte_off, PHYSADDR, &pte_val, sizeof(long), +- "pte_val",FAULT_ON_ERROR); +- return pte_val; ++/* Region or segment table traversal function */ ++static ulong _kl_rsg_table_deref_s390x(ulong vaddr, ulong table, ++ int len, int level) ++{ ++ ulong offset, entry; ++ ++ offset = ((vaddr >> (11*level + 20)) & 0x7ffULL) * 8; ++ if (offset >= (len + 1)*4096) ++ /* Offset is over the table limit. */ ++ return 0; ++ readmem(table + offset, KVADDR, &entry, sizeof(entry), "entry", ++ FAULT_ON_ERROR); ++ /* ++ * Check if the segment table entry could be read and doesn't have ++ * any of the reserved bits set. ++ */ ++ if ((entry & 0xcULL) != (level << 2)) ++ return 0; ++ /* Check if the region table entry has the invalid bit set. */ ++ if (entry & 0x40ULL) ++ return 0; ++ /* Region table entry is valid and well formed. */ ++ return entry; + } + +-/* +- * Generic vtop function for user and kernel addresses +- */ +-static int +-s390x_vtop(unsigned long pgd_base, ulong kvaddr, physaddr_t *paddr, int verbose) ++/* Page table traversal function */ ++static ulong _kl_pg_table_deref_s390x(ulong vaddr, ulong table) + { +- unsigned long pmd_base, pte_base, pte_val; ++ ulong offset, entry; + +- /* get the pgd entry */ +- pmd_base = s390x_pgd_offset(pgd_base,kvaddr); +- if(s390x_pgd_bad(pmd_base) || +- s390x_pgd_none(pmd_base)){ +- *paddr = 0; +- return FALSE; +- } +- /* get the pmd */ +- pmd_base = pmd_base & S390X_PMD_BASE_MASK; +- pte_base = s390x_pmd_offset(pmd_base,kvaddr); +- if(s390x_pmd_bad(pte_base) || +- s390x_pmd_none(pte_base)) { +- *paddr = 0; ++ offset = ((vaddr >> 12) & 0xffULL) * 8; ++ readmem(table + offset, KVADDR, &entry, sizeof(entry), "entry", ++ FAULT_ON_ERROR); ++ /* ++ * Check if the page table entry could be read and doesn't have ++ * any of the reserved bits set. ++ */ ++ if (entry & 0x900ULL) ++ return 0; ++ /* Check if the page table entry has the invalid bit set. */ ++ if (entry & 0x400ULL) ++ return 0; ++ /* Page table entry is valid and well formed. */ ++ return entry; ++} ++ ++/* lookup virtual address in page tables */ ++int s390x_vtop(ulong table, ulong vaddr, physaddr_t *phys_addr, int verbose) ++{ ++ ulong entry, paddr; ++ int level, len; ++ ++ /* ++ * Walk the region and segment tables. ++ * We assume that the table length field in the asce is set to the ++ * maximum value of 3 (which translates to a region first, region ++ * second, region third or segment table with 2048 entries) and that ++ * the addressing mode is 64 bit. ++ */ ++ len = 3; ++ /* Read the first entry to find the number of page table levels. */ ++ readmem(table, KVADDR, &entry, sizeof(entry), "entry", FAULT_ON_ERROR); ++ level = (entry & 0xcULL) >> 2; ++ if ((vaddr >> (31 + 11*level)) != 0ULL) { ++ /* Address too big for the number of page table levels. */ + return FALSE; + } +- /* get the pte */ +- pte_base = pte_base & S390X_PT_BASE_MASK; +- pte_val = s390x_pte_offset(pte_base,kvaddr); +- if (S390X_PTE_INVALID(pte_val) || +- s390x_pte_none(pte_val)){ +- *paddr = 0; +- return FALSE; ++ while (level >= 0) { ++ entry = _kl_rsg_table_deref_s390x(vaddr, table, len, level); ++ if (!entry) ++ return 0; ++ table = entry & ~0xfffULL; ++ len = entry & 0x3ULL; ++ level--; + } +- if(!s390x_pte_present(pte_val)){ +- /* swapped out */ +- *paddr = pte_val; ++ ++ /* Get the page table entry */ ++ entry = _kl_pg_table_deref_s390x(vaddr, entry & ~0x7ffULL); ++ if (!entry) + return FALSE; +- } +- *paddr = (pte_val & S390X_PAGE_BASE_MASK) | +- (kvaddr & (~(S390X_PAGE_MASK))); ++ ++ /* Isolate the page origin from the page table entry. */ ++ paddr = entry & ~0xfffULL; ++ ++ /* Add the page offset and return the final value. */ ++ *phys_addr = paddr + (vaddr & 0xfffULL); ++ + return TRUE; + } + +@@ -514,7 +470,7 @@ + return FALSE; + } + fprintf(fp,"PTE PHYSICAL FLAGS\n"); +- fprintf(fp,"%08x %08x",pte, pte & S390X_PAGE_BASE_MASK); ++ fprintf(fp,"%08lx %08llx",pte, pte & S390X_PAGE_BASE_MASK); + fprintf(fp," ("); + if(pte & S390X_PAGE_INVALID) + fprintf(fp,"INVALID "); +@@ -541,7 +497,7 @@ + /* + * returns cpu number of task + */ +-int ++static int + s390x_cpu_of_task(unsigned long task) + { + unsigned int cpu; +@@ -583,12 +539,13 @@ + return FALSE; + } else { + /* Linux 2.6 */ +- unsigned long runqueue_addr, runqueue_offset, per_cpu_offset; ++ unsigned long runqueue_addr, runqueue_offset; + unsigned long cpu_offset, per_cpu_offset_addr, running_task; +- char runqueue[4096]; ++ char *runqueue; + int cpu; + + cpu = s390x_cpu_of_task(task); ++ runqueue = GETBUF(SIZE(runqueue)); + + runqueue_offset=symbol_value("per_cpu__runqueues"); + per_cpu_offset_addr=symbol_value("__per_cpu_offset"); +@@ -596,10 +553,10 @@ + &cpu_offset, sizeof(long),"per_cpu_offset", + FAULT_ON_ERROR); + runqueue_addr=runqueue_offset + cpu_offset; +- readmem(runqueue_addr,KVADDR,&runqueue,sizeof(runqueue), ++ readmem(runqueue_addr,KVADDR,runqueue,SIZE(runqueue), + "runqueue", FAULT_ON_ERROR); +- running_task = *((unsigned long*)&runqueue[MEMBER_OFFSET( +- "runqueue", "curr")]); ++ running_task = ULONG(runqueue + OFFSET(runqueue_curr)); ++ FREEBUF(runqueue); + if(running_task == task) + return TRUE; + else +@@ -733,7 +690,7 @@ + } else if(skip_first_frame){ + skip_first_frame=0; + } else { +- fprintf(fp," #%i [%08x] ",i,backchain); ++ fprintf(fp," #%i [%08lx] ",i,backchain); + fprintf(fp,"%s at %x\n", closest_symbol(r14), r14); + if (bt->flags & BT_LINE_NUMBERS) + s390x_dump_line_number(r14); +@@ -743,22 +700,25 @@ + backchain = ULONG(&stack[backchain - stack_base + bc_offset]); + + /* print stack content if -f is specified */ +- if((bt->flags & BT_FULL) && !BT_REFERENCE_CHECK(bt)){ ++ if ((bt->flags & BT_FULL) && !BT_REFERENCE_CHECK(bt)) { + int frame_size; +- if(backchain == 0){ ++ if (backchain == 0) { + frame_size = stack_base - old_backchain + + KERNEL_STACK_SIZE; + } else { +- frame_size = backchain - old_backchain; ++ frame_size = MIN((backchain - old_backchain), ++ (stack_base - old_backchain + ++ KERNEL_STACK_SIZE)); + } +- for(j=0; j< frame_size; j+=4){ ++ for (j = 0; j < frame_size; j += 8) { + if(j % 16 == 0){ +- fprintf(fp,"\n%08x: ",old_backchain+j); ++ fprintf(fp, "%s %016lx: ", ++ j ? "\n" : "", old_backchain + j); + } +- fprintf(fp," %08x",ULONG(&stack[old_backchain - +- stack_base + j])); ++ fprintf(fp," %016lx", ++ ULONG(&stack[old_backchain - stack_base + j])); + } +- fprintf(fp,"\n\n"); ++ fprintf(fp, "\n"); + } + + /* Check for interrupt stackframe */ +@@ -804,26 +764,26 @@ + return; + } + fprintf(fp," LOWCORE INFO:\n"); +- fprintf(fp," -psw : %#018x %#018x\n", tmp[0], tmp[1]); ++ fprintf(fp," -psw : %#018lx %#018lx\n", tmp[0], tmp[1]); + if(show_symbols){ +- fprintf(fp," -function : %s at %x\n", ++ fprintf(fp," -function : %s at %lx\n", + closest_symbol(tmp[1]), tmp[1]); + if (bt->flags & BT_LINE_NUMBERS) + s390x_dump_line_number(tmp[1]); + } + ptr = lc + MEMBER_OFFSET("_lowcore","prefixreg_save_area"); + tmp[0] = UINT(ptr); +- fprintf(fp," -prefix : %#010x\n", tmp[0]); ++ fprintf(fp," -prefix : %#010lx\n", tmp[0]); + + ptr = lc + MEMBER_OFFSET("_lowcore","cpu_timer_save_area"); + tmp[0]=UINT(ptr); + tmp[1]=UINT(ptr + S390X_WORD_SIZE); +- fprintf(fp," -cpu timer: %#010x %#010x\n", tmp[0],tmp[1]); ++ fprintf(fp," -cpu timer: %#010lx %#010lx\n", tmp[0],tmp[1]); + + ptr = lc + MEMBER_OFFSET("_lowcore","clock_comp_save_area"); + tmp[0]=UINT(ptr); + tmp[1]=UINT(ptr + S390X_WORD_SIZE); +- fprintf(fp," -clock cmp: %#010x %#010x\n", tmp[0], tmp[1]); ++ fprintf(fp," -clock cmp: %#010lx %#010lx\n", tmp[0], tmp[1]); + + fprintf(fp," -general registers:\n"); + ptr = lc + MEMBER_OFFSET("_lowcore","gpregs_save_area"); +@@ -831,26 +791,26 @@ + tmp[1]=ULONG(ptr + S390X_WORD_SIZE); + tmp[2]=ULONG(ptr + 2 * S390X_WORD_SIZE); + tmp[3]=ULONG(ptr + 3 * S390X_WORD_SIZE); +- fprintf(fp," %#018x %#018x\n", tmp[0],tmp[1]); +- fprintf(fp," %#018x %#018x\n", tmp[2],tmp[3]); ++ fprintf(fp," %#018lx %#018lx\n", tmp[0],tmp[1]); ++ fprintf(fp," %#018lx %#018lx\n", tmp[2],tmp[3]); + tmp[0]=ULONG(ptr + 4 * S390X_WORD_SIZE); + tmp[1]=ULONG(ptr + 5 * S390X_WORD_SIZE); + tmp[2]=ULONG(ptr + 6 * S390X_WORD_SIZE); + tmp[3]=ULONG(ptr + 7 * S390X_WORD_SIZE); +- fprintf(fp," %#018x %#018x\n", tmp[0],tmp[1]); +- fprintf(fp," %#018x %#018x\n", tmp[2],tmp[3]); ++ fprintf(fp," %#018lx %#018lx\n", tmp[0],tmp[1]); ++ fprintf(fp," %#018lx %#018lx\n", tmp[2],tmp[3]); + tmp[0]=ULONG(ptr + 8 * S390X_WORD_SIZE); + tmp[1]=ULONG(ptr + 9 * S390X_WORD_SIZE); + tmp[2]=ULONG(ptr + 10* S390X_WORD_SIZE); + tmp[3]=ULONG(ptr + 11* S390X_WORD_SIZE); +- fprintf(fp," %#018x %#018x\n", tmp[0],tmp[1]); +- fprintf(fp," %#018x %#018x\n", tmp[2],tmp[3]); ++ fprintf(fp," %#018lx %#018lx\n", tmp[0],tmp[1]); ++ fprintf(fp," %#018lx %#018lx\n", tmp[2],tmp[3]); + tmp[0]=ULONG(ptr + 12* S390X_WORD_SIZE); + tmp[1]=ULONG(ptr + 13* S390X_WORD_SIZE); + tmp[2]=ULONG(ptr + 14* S390X_WORD_SIZE); + tmp[3]=ULONG(ptr + 15* S390X_WORD_SIZE); +- fprintf(fp," %#018x %#018x\n", tmp[0],tmp[1]); +- fprintf(fp," %#018x %#018x\n", tmp[2],tmp[3]); ++ fprintf(fp," %#018lx %#018lx\n", tmp[0],tmp[1]); ++ fprintf(fp," %#018lx %#018lx\n", tmp[2],tmp[3]); + + fprintf(fp," -access registers:\n"); + ptr = lc + MEMBER_OFFSET("_lowcore","access_regs_save_area"); +@@ -858,25 +818,25 @@ + tmp[1]=ULONG(ptr + 4); + tmp[2]=ULONG(ptr + 2 * 4); + tmp[3]=ULONG(ptr + 3 * 4); +- fprintf(fp," %#010x %#010x %#010x %#010x\n", ++ fprintf(fp," %#010lx %#010lx %#010lx %#010lx\n", + tmp[0], tmp[1], tmp[2], tmp[3]); + tmp[0]=ULONG(ptr + 4 * 4); + tmp[1]=ULONG(ptr + 5 * 4); + tmp[2]=ULONG(ptr + 6 * 4); + tmp[3]=ULONG(ptr + 7 * 4); +- fprintf(fp," %#010x %#010x %#010x %#010x\n", ++ fprintf(fp," %#010lx %#010lx %#010lx %#010lx\n", + tmp[0], tmp[1], tmp[2], tmp[3]); + tmp[0]=ULONG(ptr + 8 * 4); + tmp[1]=ULONG(ptr + 9 * 4); + tmp[2]=ULONG(ptr + 10* 4); + tmp[3]=ULONG(ptr + 11* 4); +- fprintf(fp," %#010x %#010x %#010x %#010x\n", ++ fprintf(fp," %#010lx %#010lx %#010lx %#010lx\n", + tmp[0], tmp[1], tmp[2], tmp[3]); + tmp[0]=ULONG(ptr + 12* 4); + tmp[1]=ULONG(ptr + 13* 4); + tmp[2]=ULONG(ptr + 14* 4); + tmp[3]=ULONG(ptr + 15* 4); +- fprintf(fp," %#010x %#010x %#010x %#010x\n", ++ fprintf(fp," %#010lx %#010lx %#010lx %#010lx\n", + tmp[0], tmp[1], tmp[2], tmp[3]); + + fprintf(fp," -control registers:\n"); +@@ -885,26 +845,26 @@ + tmp[1]=ULONG(ptr + S390X_WORD_SIZE); + tmp[2]=ULONG(ptr + 2 * S390X_WORD_SIZE); + tmp[3]=ULONG(ptr + 3 * S390X_WORD_SIZE); +- fprintf(fp," %#018x %#018x\n", tmp[0],tmp[1]); +- fprintf(fp," %#018x %#018x\n", tmp[2],tmp[3]); ++ fprintf(fp," %#018lx %#018lx\n", tmp[0],tmp[1]); ++ fprintf(fp," %#018lx %#018lx\n", tmp[2],tmp[3]); + tmp[0]=ULONG(ptr + 4 * S390X_WORD_SIZE); + tmp[1]=ULONG(ptr + 5 * S390X_WORD_SIZE); + tmp[2]=ULONG(ptr + 6 * S390X_WORD_SIZE); + tmp[3]=ULONG(ptr + 7 * S390X_WORD_SIZE); +- fprintf(fp," %#018x %#018x\n", tmp[0],tmp[1]); +- fprintf(fp," %#018x %#018x\n", tmp[2],tmp[3]); ++ fprintf(fp," %#018lx %#018lx\n", tmp[0],tmp[1]); ++ fprintf(fp," %#018lx %#018lx\n", tmp[2],tmp[3]); + tmp[0]=ULONG(ptr); + tmp[1]=ULONG(ptr + S390X_WORD_SIZE); + tmp[2]=ULONG(ptr + 2 * S390X_WORD_SIZE); + tmp[3]=ULONG(ptr + 3 * S390X_WORD_SIZE); +- fprintf(fp," %#018x %#018x\n", tmp[0],tmp[1]); +- fprintf(fp," %#018x %#018x\n", tmp[2],tmp[3]); ++ fprintf(fp," %#018lx %#018lx\n", tmp[0],tmp[1]); ++ fprintf(fp," %#018lx %#018lx\n", tmp[2],tmp[3]); + tmp[0]=ULONG(ptr + 4 * S390X_WORD_SIZE); + tmp[1]=ULONG(ptr + 5 * S390X_WORD_SIZE); + tmp[2]=ULONG(ptr + 6 * S390X_WORD_SIZE); + tmp[3]=ULONG(ptr + 7 * S390X_WORD_SIZE); +- fprintf(fp," %#018x %#018x\n", tmp[0],tmp[1]); +- fprintf(fp," %#018x %#018x\n", tmp[2],tmp[3]); ++ fprintf(fp," %#018lx %#018lx\n", tmp[0],tmp[1]); ++ fprintf(fp," %#018lx %#018lx\n", tmp[2],tmp[3]); + + ptr = lc + MEMBER_OFFSET("_lowcore","floating_pt_save_area"); + fprintf(fp," -floating point registers 0,2,4,6:\n"); +@@ -912,26 +872,26 @@ + tmp[1]=ULONG(ptr + S390X_WORD_SIZE); + tmp[2]=ULONG(ptr + 2 * S390X_WORD_SIZE); + tmp[3]=ULONG(ptr + 3 * S390X_WORD_SIZE); +- fprintf(fp," %#018x %#018x\n", tmp[0],tmp[1]); +- fprintf(fp," %#018x %#018x\n", tmp[2],tmp[3]); ++ fprintf(fp," %#018lx %#018lx\n", tmp[0],tmp[1]); ++ fprintf(fp," %#018lx %#018lx\n", tmp[2],tmp[3]); + tmp[0]=ULONG(ptr + 4 * S390X_WORD_SIZE); + tmp[1]=ULONG(ptr + 5 * S390X_WORD_SIZE); + tmp[2]=ULONG(ptr + 6 * S390X_WORD_SIZE); + tmp[3]=ULONG(ptr + 7 * S390X_WORD_SIZE); +- fprintf(fp," %#018x %#018x\n", tmp[0],tmp[1]); +- fprintf(fp," %#018x %#018x\n", tmp[2],tmp[3]); ++ fprintf(fp," %#018lx %#018lx\n", tmp[0],tmp[1]); ++ fprintf(fp," %#018lx %#018lx\n", tmp[2],tmp[3]); + tmp[0]=ULONG(ptr + 6 * S390X_WORD_SIZE); + tmp[1]=ULONG(ptr + 7 * S390X_WORD_SIZE); + tmp[2]=ULONG(ptr + 8 * S390X_WORD_SIZE); + tmp[3]=ULONG(ptr + 9 * S390X_WORD_SIZE); +- fprintf(fp," %#018x %#018x\n", tmp[0],tmp[1]); +- fprintf(fp," %#018x %#018x\n", tmp[2],tmp[3]); ++ fprintf(fp," %#018lx %#018lx\n", tmp[0],tmp[1]); ++ fprintf(fp," %#018lx %#018lx\n", tmp[2],tmp[3]); + tmp[0]=ULONG(ptr + 10* S390X_WORD_SIZE); + tmp[1]=ULONG(ptr + 11* S390X_WORD_SIZE); + tmp[2]=ULONG(ptr + 12* S390X_WORD_SIZE); + tmp[3]=ULONG(ptr + 13* S390X_WORD_SIZE); +- fprintf(fp," %#018x %#018x\n", tmp[0],tmp[1]); +- fprintf(fp," %#018x %#018x\n", tmp[2],tmp[3]); ++ fprintf(fp," %#018lx %#018lx\n", tmp[0],tmp[1]); ++ fprintf(fp," %#018lx %#018lx\n", tmp[2],tmp[3]); + } + + /* +--- crash/lkcd_dump_v8.h.orig 2008-01-17 15:17:20.000000000 -0500 ++++ crash/lkcd_dump_v8.h 2008-01-04 09:42:08.000000000 -0500 +@@ -235,4 +235,304 @@ + int stack_offset; + } lkcdinfo_t; + ++/* ++ * ++ * machine specific dump headers ++ * ++ */ ++ ++/* ++ * IA64 --------------------------------------------------------- ++ */ ++ ++#if defined(IA64) ++ ++#define DUMP_ASM_MAGIC_NUMBER 0xdeaddeadULL /* magic number */ ++#define DUMP_ASM_VERSION_NUMBER 0x5 /* version number */ ++ ++ ++struct pt_regs { ++ /* The following registers are saved by SAVE_MIN: */ ++ unsigned long b6; /* scratch */ ++ unsigned long b7; /* scratch */ ++ ++ unsigned long ar_csd; /* used by cmp8xchg16 (scratch) */ ++ unsigned long ar_ssd; /* reserved for future use (scratch) */ ++ ++ unsigned long r8; /* scratch (return value register 0) */ ++ unsigned long r9; /* scratch (return value register 1) */ ++ unsigned long r10; /* scratch (return value register 2) */ ++ unsigned long r11; /* scratch (return value register 3) */ ++ ++ unsigned long cr_ipsr; /* interrupted task's psr */ ++ unsigned long cr_iip; /* interrupted task's instruction pointer */ ++ unsigned long cr_ifs; /* interrupted task's function state */ ++ ++ unsigned long ar_unat; /* interrupted task's NaT register (preserved) */ ++ unsigned long ar_pfs; /* prev function state */ ++ unsigned long ar_rsc; /* RSE configuration */ ++ /* The following two are valid only if cr_ipsr.cpl > 0: */ ++ unsigned long ar_rnat; /* RSE NaT */ ++ unsigned long ar_bspstore; /* RSE bspstore */ ++ ++ unsigned long pr; /* 64 predicate registers (1 bit each) */ ++ unsigned long b0; /* return pointer (bp) */ ++ unsigned long loadrs; /* size of dirty partition << 16 */ ++ ++ unsigned long r1; /* the gp pointer */ ++ unsigned long r12; /* interrupted task's memory stack pointer */ ++ unsigned long r13; /* thread pointer */ ++ ++ unsigned long ar_fpsr; /* floating point status (preserved) */ ++ unsigned long r15; /* scratch */ ++ ++ /* The remaining registers are NOT saved for system calls. */ ++ ++ unsigned long r14; /* scratch */ ++ unsigned long r2; /* scratch */ ++ unsigned long r3; /* scratch */ ++ ++ /* The following registers are saved by SAVE_REST: */ ++ unsigned long r16; /* scratch */ ++ unsigned long r17; /* scratch */ ++ unsigned long r18; /* scratch */ ++ unsigned long r19; /* scratch */ ++ unsigned long r20; /* scratch */ ++ unsigned long r21; /* scratch */ ++ unsigned long r22; /* scratch */ ++ unsigned long r23; /* scratch */ ++ unsigned long r24; /* scratch */ ++ unsigned long r25; /* scratch */ ++ unsigned long r26; /* scratch */ ++ unsigned long r27; /* scratch */ ++ unsigned long r28; /* scratch */ ++ unsigned long r29; /* scratch */ ++ unsigned long r30; /* scratch */ ++ unsigned long r31; /* scratch */ ++ ++ unsigned long ar_ccv; /* compare/exchange value (scratch) */ ++ ++ /* ++ * Floating point registers that the kernel considers scratch: ++ */ ++ struct ia64_fpreg f6; /* scratch */ ++ struct ia64_fpreg f7; /* scratch */ ++ struct ia64_fpreg f8; /* scratch */ ++ struct ia64_fpreg f9; /* scratch */ ++ struct ia64_fpreg f10; /* scratch */ ++ struct ia64_fpreg f11; /* scratch */ ++}; ++ ++ ++ ++/* ++ * Structure: dump_header_asm_t ++ * Function: This is the header for architecture-specific stuff. It ++ * follows right after the dump header. ++ * ++ */ ++typedef struct _dump_header_asm_s { ++ ++ /* the dump magic number -- unique to verify dump is valid */ ++ uint64_t dha_magic_number; ++ ++ /* the version number of this dump */ ++ uint32_t dha_version; ++ ++ /* the size of this header (in case we can't read it) */ ++ uint32_t dha_header_size; ++ ++ /* pointer to pt_regs, (OLD: (struct pt_regs *, NEW: (uint64_t)) */ ++ uint64_t dha_pt_regs; ++ ++ /* the dump registers */ ++ struct pt_regs dha_regs; ++ ++ /* the rnat register saved after flushrs */ ++ uint64_t dha_rnat; ++ ++ /* the pfs register saved after flushrs */ ++ uint64_t dha_pfs; ++ ++ /* the bspstore register saved after flushrs */ ++ uint64_t dha_bspstore; ++ ++ /* smp specific */ ++ uint32_t dha_smp_num_cpus; ++ uint32_t dha_dumping_cpu; ++ struct pt_regs dha_smp_regs[NR_CPUS]; ++ uint64_t dha_smp_current_task[NR_CPUS]; ++ uint64_t dha_stack[NR_CPUS]; ++ uint64_t dha_stack_ptr[NR_CPUS]; ++ ++ /* load address of kernel */ ++ uint64_t dha_kernel_addr; ++ ++} __attribute__((packed)) dump_header_asm_t; ++ ++struct dump_CPU_info_ia64 { ++ struct pt_regs dha_smp_regs; ++ uint64_t dha_smp_current_task; ++ uint64_t dha_stack; ++ uint64_t dha_stack_ptr; ++} __attribute__((packed)) dump_CPU_info_ia64_t; ++ ++typedef struct dump_CPU_info_ia64 dump_CPU_info_t; ++ ++/* ++ * i386 --------------------------------------------------------- ++ */ ++ ++#elif defined(X86) ++ ++#define DUMP_ASM_MAGIC_NUMBER 0xdeaddeadULL /* magic number */ ++#define DUMP_ASM_VERSION_NUMBER 0x5 /* version number */ ++ ++ ++struct pt_regs { ++ long ebx; ++ long ecx; ++ long edx; ++ long esi; ++ long edi; ++ long ebp; ++ long eax; ++ int xds; ++ int xes; ++ long orig_eax; ++ long eip; ++ int xcs; ++ long eflags; ++ long esp; ++ int xss; ++}; ++ ++/* ++ * Structure: __dump_header_asm ++ * Function: This is the header for architecture-specific stuff. It ++ * follows right after the dump header. ++ */ ++typedef struct _dump_header_asm_s { ++ /* the dump magic number -- unique to verify dump is valid */ ++ uint64_t dha_magic_number; ++ ++ /* the version number of this dump */ ++ uint32_t dha_version; ++ ++ /* the size of this header (in case we can't read it) */ ++ uint32_t dha_header_size; ++ ++ /* the esp for i386 systems */ ++ uint32_t dha_esp; ++ ++ /* the eip for i386 systems */ ++ uint32_t dha_eip; ++ ++ /* the dump registers */ ++ struct pt_regs dha_regs; ++ ++ /* smp specific */ ++ uint32_t dha_smp_num_cpus; ++ uint32_t dha_dumping_cpu; ++ struct pt_regs dha_smp_regs[NR_CPUS]; ++ uint32_t dha_smp_current_task[NR_CPUS]; ++ uint32_t dha_stack[NR_CPUS]; ++ uint32_t dha_stack_ptr[NR_CPUS]; ++} __attribute__((packed)) dump_header_asm_t; ++ ++/* ++ * CPU specific part of dump_header_asm_t ++ */ ++typedef struct dump_CPU_info_s { ++ struct pt_regs dha_smp_regs; ++ uint64_t dha_smp_current_task; ++ uint64_t dha_stack; ++ uint64_t dha_stack_ptr; ++} __attribute__ ((packed)) dump_CPU_info_t; ++ ++ ++/* ++ * x86-64 --------------------------------------------------------- ++ */ ++ ++#elif defined(X86_64) ++ ++/* definitions */ ++#define DUMP_ASM_MAGIC_NUMBER 0xdeaddeadULL /* magic number */ ++#define DUMP_ASM_VERSION_NUMBER 0x2 /* version number */ ++ ++ ++struct pt_regs { ++ unsigned long r15; ++ unsigned long r14; ++ unsigned long r13; ++ unsigned long r12; ++ unsigned long rbp; ++ unsigned long rbx; ++/* arguments: non interrupts/non tracing syscalls only save upto here*/ ++ unsigned long r11; ++ unsigned long r10; ++ unsigned long r9; ++ unsigned long r8; ++ unsigned long rax; ++ unsigned long rcx; ++ unsigned long rdx; ++ unsigned long rsi; ++ unsigned long rdi; ++ unsigned long orig_rax; ++/* end of arguments */ ++/* cpu exception frame or undefined */ ++ unsigned long rip; ++ unsigned long cs; ++ unsigned long eflags; ++ unsigned long rsp; ++ unsigned long ss; ++/* top of stack page */ ++}; ++ ++/* ++ * Structure: dump_header_asm_t ++ * Function: This is the header for architecture-specific stuff. It ++ * follows right after the dump header. ++ */ ++typedef struct _dump_header_asm_s { ++ ++ /* the dump magic number -- unique to verify dump is valid */ ++ uint64_t dha_magic_number; ++ ++ /* the version number of this dump */ ++ uint32_t dha_version; ++ ++ /* the size of this header (in case we can't read it) */ ++ uint32_t dha_header_size; ++ ++ /* the dump registers */ ++ struct pt_regs dha_regs; ++ ++ /* smp specific */ ++ uint32_t dha_smp_num_cpus; ++ int dha_dumping_cpu; ++ struct pt_regs dha_smp_regs[NR_CPUS]; ++ uint64_t dha_smp_current_task[NR_CPUS]; ++ uint64_t dha_stack[NR_CPUS]; ++ uint64_t dha_stack_ptr[NR_CPUS]; ++} __attribute__((packed)) dump_header_asm_t; ++ ++ ++/* ++ * CPU specific part of dump_header_asm_t ++ */ ++typedef struct dump_CPU_info_s { ++ struct pt_regs dha_smp_regs; ++ uint64_t dha_smp_current_task; ++ uint64_t dha_stack; ++ uint64_t dha_stack_ptr; ++} __attribute__ ((packed)) dump_CPU_info_t; ++ ++#else ++ ++#define HAVE_NO_DUMP_HEADER_ASM 1 ++ ++#endif ++ + #endif /* _DUMP_H */ +--- crash/x86.c.orig 2008-01-17 15:17:20.000000000 -0500 ++++ crash/x86.c 2008-01-04 09:42:08.000000000 -0500 +@@ -1,8 +1,8 @@ + /* x86.c - core analysis suite + * + * Portions Copyright (C) 1999, 2000, 2001, 2002 Mission Critical Linux, Inc. +- * Copyright (C) 2002, 2003, 2004, 2005 David Anderson +- * Copyright (C) 2002, 2003, 2004, 2005 Red Hat, Inc. All rights reserved. ++ * Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007 David Anderson ++ * Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007 Red Hat, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by +@@ -51,6 +51,7 @@ + * rights to redistribute these changes. + */ + #include "defs.h" ++#include "xen_hyper_defs.h" + + #ifndef MCLX + +@@ -176,6 +177,7 @@ + static void db_symbol_values(db_sym_t, char **, db_expr_t *); + static int db_sym_numargs(db_sym_t, int *, char **); + static void x86_dump_line_number(ulong); ++static void x86_clear_machdep_cache(void); + + static ulong mach_debug = 0; + +@@ -215,7 +217,7 @@ + + argp = (int *)db_get_value((int)&fp->f_retaddr, 4, FALSE, bt); + /* +- * XXX etext is wrong for LKMs. We should attempt to interpret ++ * etext is wrong for LKMs. We should attempt to interpret + * the instruction at the return address in all cases. This + * may require better fault handling. + */ +@@ -685,6 +687,7 @@ + bt->debug || + (bt->flags & BT_FRAMESIZE_DEBUG) || + !(bt->flags & BT_OLD_BACK_TRACE)) { ++ bt->flags &= ~BT_OLD_BACK_TRACE; + lkcd_x86_back_trace(bt, 0, fp); + return; + } +@@ -962,8 +965,12 @@ + */ + static int x86_uvtop(struct task_context *, ulong, physaddr_t *, int); + static int x86_kvtop(struct task_context *, ulong, physaddr_t *, int); +-static int x86_uvtop_pae(struct task_context *, ulong, physaddr_t *, int); +-static int x86_kvtop_pae(struct task_context *, ulong, physaddr_t *, int); ++static int x86_uvtop_PAE(struct task_context *, ulong, physaddr_t *, int); ++static int x86_kvtop_PAE(struct task_context *, ulong, physaddr_t *, int); ++static int x86_uvtop_xen_wpt(struct task_context *, ulong, physaddr_t *, int); ++static int x86_kvtop_xen_wpt(struct task_context *, ulong, physaddr_t *, int); ++static int x86_uvtop_xen_wpt_PAE(struct task_context *, ulong, physaddr_t *, int); ++static int x86_kvtop_xen_wpt_PAE(struct task_context *, ulong, physaddr_t *, int); + static ulong x86_get_task_pgd(ulong); + static ulong x86_processor_speed(void); + static ulong x86_get_pc(struct bt_info *); +@@ -973,6 +980,7 @@ + static uint64_t x86_memory_size(void); + static ulong x86_vmalloc_start(void); + static ulong *read_idt_table(int); ++static void eframe_init(void); + #define READ_IDT_INIT 1 + #define READ_IDT_RUNTIME 2 + static char *extract_idt_function(ulong *, char *, ulong *); +@@ -983,26 +991,42 @@ + static int x86_dis_filter(ulong, char *); + static struct line_number_hook x86_line_number_hooks[]; + static int x86_is_uvaddr(ulong, struct task_context *); ++static void x86_init_kernel_pgd(void); ++static ulong xen_m2p_nonPAE(ulong); ++static int x86_xendump_p2m_create(struct xendump_data *); ++static int x86_xen_kdump_p2m_create(struct xen_kdump_data *); ++static char *x86_xen_kdump_load_page(ulong, char *); ++static char *x86_xen_kdump_load_page_PAE(ulong, char *); ++static ulong x86_xen_kdump_page_mfn(ulong); ++static ulong x86_xen_kdump_page_mfn_PAE(ulong); ++static ulong x86_xendump_panic_task(struct xendump_data *); ++static void x86_get_xendump_regs(struct xendump_data *, struct bt_info *, ulong *, ulong *); ++static char *x86_xendump_load_page(ulong, char *); ++static char *x86_xendump_load_page_PAE(ulong, char *); ++static int x86_xendump_page_index(ulong); ++static int x86_xendump_page_index_PAE(ulong); ++static void x86_init_hyper(int); ++static ulong x86_get_stackbase_hyper(ulong); ++static ulong x86_get_stacktop_hyper(ulong); ++ ++int INT_EFRAME_SS = 14; ++int INT_EFRAME_ESP = 13; ++int INT_EFRAME_EFLAGS = 12; /* CS lcall7 */ ++int INT_EFRAME_CS = 11; /* EIP lcall7 */ ++int INT_EFRAME_EIP = 10; /* EFLAGS lcall7 */ ++int INT_EFRAME_ERR = 9; ++int INT_EFRAME_ES = 8; ++int INT_EFRAME_DS = 7; ++int INT_EFRAME_EAX = 6; ++int INT_EFRAME_EBP = 5; ++int INT_EFRAME_EDI = 4; ++int INT_EFRAME_ESI = 3; ++int INT_EFRAME_EDX = 2; ++int INT_EFRAME_ECX = 1; ++int INT_EFRAME_EBX = 0; ++int INT_EFRAME_GS = -1; + +- +-#define INT_EFRAME_SS (14) +-#define INT_EFRAME_ESP (13) +-#define INT_EFRAME_EFLAGS (12) /* CS lcall7 */ +-#define INT_EFRAME_CS (11) /* EIP lcall7 */ +-#define INT_EFRAME_EIP (10) /* EFLAGS lcall7 */ +-#define INT_EFRAME_ERR (9) +- +-#define INT_EFRAME_ES (8) +-#define INT_EFRAME_DS (7) +-#define INT_EFRAME_EAX (6) +-#define INT_EFRAME_EBP (5) +-#define INT_EFRAME_EDI (4) +-#define INT_EFRAME_ESI (3) +-#define INT_EFRAME_EDX (2) +-#define INT_EFRAME_ECX (1) +-#define INT_EFRAME_EBX (0) +- +-#define USER_EFRAME_SIZE (INT_EFRAME_SS+1) ++#define MAX_USER_EFRAME_SIZE (16) + #define KERNEL_EFRAME_SIZE (INT_EFRAME_EFLAGS+1) + + #define EFRAME_USER (1) +@@ -1015,7 +1039,7 @@ + { + int i; + char buf[BUFSIZE], *sp; +- ulong int_eframe[USER_EFRAME_SIZE]; ++ ulong int_eframe[MAX_USER_EFRAME_SIZE]; + int eframe_type, args; + ulong value, *argp; + +@@ -1025,11 +1049,11 @@ + return(frame_number); + + GET_STACK_DATA(ep->eframe_addr, (char *)int_eframe, +- USER_EFRAME_SIZE * sizeof(ulong)); ++ SIZE(pt_regs)); + + if (int_eframe[INT_EFRAME_CS] & DPL_BITS) { + if (!INSTACK(ep->eframe_addr + +- (USER_EFRAME_SIZE*sizeof(ulong)) - 1, bt)) ++ SIZE(pt_regs) - 1, bt)) + return(frame_number); + /* error(FATAL, "read of exception frame would go beyond stack\n"); */ + eframe_type = EFRAME_USER; +@@ -1158,17 +1182,24 @@ + int_eframe[INT_EFRAME_EDX]); + + fprintf(fp, +- " DS: %04x ESI: %08lx ES: %04x EDI: %08lx \n", ++ " DS: %04x ESI: %08lx ES: %04x EDI: %08lx", + (short)int_eframe[INT_EFRAME_DS], + int_eframe[INT_EFRAME_ESI], + (short)int_eframe[INT_EFRAME_ES], + int_eframe[INT_EFRAME_EDI]); ++ if (kernel && (INT_EFRAME_GS != -1)) ++ fprintf(fp, " GS: %04x", (short)int_eframe[INT_EFRAME_GS]); ++ fprintf(fp, "\n"); + +- if (!kernel) +- fprintf(fp, " SS: %04x ESP: %08lx EBP: %08lx \n", ++ if (!kernel) { ++ fprintf(fp, " SS: %04x ESP: %08lx EBP: %08lx", + (short)int_eframe[INT_EFRAME_SS], + int_eframe[INT_EFRAME_ESP], + int_eframe[INT_EFRAME_EBP]); ++ if (INT_EFRAME_GS != -1) ++ fprintf(fp, " GS: %04x", (short)int_eframe[INT_EFRAME_GS]); ++ fprintf(fp, "\n"); ++ } + + fprintf(fp, + " CS: %04x EIP: %08lx ERR: %08lx EFLAGS: %08lx \n", +@@ -1355,7 +1386,7 @@ + */ + + struct x86_pt_regs { +- ulong reg_value[USER_EFRAME_SIZE]; ++ ulong reg_value[MAX_USER_EFRAME_SIZE]; + }; + + /* +@@ -1420,6 +1451,17 @@ + break; + } + ++ if (XEN() && ((short)pt->reg_value[INT_EFRAME_CS] == 0x61) && ++ ((short)pt->reg_value[INT_EFRAME_DS] == 0x7b) && ++ ((short)pt->reg_value[INT_EFRAME_ES] == 0x7b) && ++ IS_KVADDR(pt->reg_value[INT_EFRAME_EIP])) { ++ if (!(machdep->flags & OMIT_FRAME_PTR) && ++ !INSTACK(pt->reg_value[INT_EFRAME_EBP], bt)) ++ continue; ++ rv = bt->stackbase + sizeof(ulong) * (first - stack); ++ break; ++ } ++ + /* check for user exception frame */ + + if (((short)pt->reg_value[INT_EFRAME_CS] == 0x23) && +@@ -1441,6 +1483,20 @@ + rv = bt->stackbase + sizeof(ulong) * (first - stack); + break; + } ++ ++ /* ++ * 2.6 kernels using sysenter_entry instead of system_call ++ * have a funky trampoline EIP address. ++ */ ++ if (((short)pt->reg_value[INT_EFRAME_CS] == 0x73) && ++ ((short)pt->reg_value[INT_EFRAME_DS] == 0x7b) && ++ ((short)pt->reg_value[INT_EFRAME_ES] == 0x7b) && ++ ((short)pt->reg_value[INT_EFRAME_SS] == 0x7b) && ++ (pt->reg_value[INT_EFRAME_EFLAGS] == 0x246) && ++ IS_UVADDR(pt->reg_value[INT_EFRAME_ESP], bt->tc)) { ++ rv = bt->stackbase + sizeof(ulong) * (first - stack); ++ break; ++ } + } + return(rv); + } +@@ -1536,6 +1592,8 @@ + mode = "USER-MODE"; + } else if ((cs == 0x10) || (cs == 0x60)) { + mode = "KERNEL-MODE"; ++ } else if (XEN() && (cs == 0x61)) { ++ mode = "KERNEL-MODE"; + } else { + mode = "UNKNOWN-MODE"; + } +@@ -1626,6 +1684,11 @@ + { + struct syment *sp, *spn; + ++ if (XEN_HYPER_MODE()) { ++ x86_init_hyper(when); ++ return; ++ } ++ + switch (when) + { + case PRE_SYMTAB: +@@ -1639,7 +1702,7 @@ + machdep->stacksize = machdep->pagesize * 2; + if ((machdep->pgd = (char *)malloc(PAGESIZE())) == NULL) + error(FATAL, "cannot malloc pgd space."); +- if ((machdep->pmd = (char *)malloc(PAGESIZE())) == NULL) ++ if ((machdep->pmd = (char *)malloc(PAGESIZE())) == NULL) + error(FATAL, "cannot malloc pmd space."); + if ((machdep->ptbl = (char *)malloc(PAGESIZE())) == NULL) + error(FATAL, "cannot malloc ptbl space."); +@@ -1659,8 +1722,8 @@ + PGDIR_SHIFT = PGDIR_SHIFT_3LEVEL; + PTRS_PER_PTE = PTRS_PER_PTE_3LEVEL; + PTRS_PER_PGD = PTRS_PER_PGD_3LEVEL; +- machdep->uvtop = x86_uvtop_pae; +- machdep->kvtop = x86_kvtop_pae; ++ machdep->uvtop = x86_uvtop_PAE; ++ machdep->kvtop = x86_kvtop_PAE; + } else { + PGDIR_SHIFT = PGDIR_SHIFT_2LEVEL; + PTRS_PER_PTE = PTRS_PER_PTE_2LEVEL; +@@ -1696,14 +1759,19 @@ + machdep->cmd_mach = x86_cmd_mach; + machdep->get_smp_cpus = x86_get_smp_cpus; + machdep->line_number_hooks = x86_line_number_hooks; +- if (x86_omit_frame_pointer()) +- machdep->flags |= OMIT_FRAME_PTR; + machdep->flags |= FRAMESIZE_DEBUG; + machdep->value_to_symbol = generic_machdep_value_to_symbol; +- machdep->init_kernel_pgd = NULL; ++ machdep->init_kernel_pgd = x86_init_kernel_pgd; ++ machdep->xendump_p2m_create = x86_xendump_p2m_create; ++ machdep->xen_kdump_p2m_create = x86_xen_kdump_p2m_create; ++ machdep->xendump_panic_task = x86_xendump_panic_task; ++ machdep->get_xendump_regs = x86_get_xendump_regs; ++ machdep->clear_machdep_cache = x86_clear_machdep_cache; + break; + + case POST_GDB: ++ if (x86_omit_frame_pointer()) ++ machdep->flags |= OMIT_FRAME_PTR; + STRUCT_SIZE_INIT(user_regs_struct, "user_regs_struct"); + MEMBER_OFFSET_INIT(user_regs_struct_ebp, + "user_regs_struct", "ebp"); +@@ -1723,9 +1791,37 @@ + "irq_desc", NULL, 0); + else + machdep->nr_irqs = 224; /* NR_IRQS */ +- machdep->hz = HZ; +- if (THIS_KERNEL_VERSION >= LINUX(2,6,0)) +- machdep->hz = 1000; ++ if (!machdep->hz) { ++ machdep->hz = HZ; ++ if (THIS_KERNEL_VERSION >= LINUX(2,6,0)) ++ machdep->hz = 1000; ++ } ++ ++ if (machdep->flags & PAE) { ++ machdep->section_size_bits = _SECTION_SIZE_BITS_PAE; ++ machdep->max_physmem_bits = _MAX_PHYSMEM_BITS_PAE; ++ } else { ++ machdep->section_size_bits = _SECTION_SIZE_BITS; ++ machdep->max_physmem_bits = _MAX_PHYSMEM_BITS; ++ } ++ ++ if (XEN() && (kt->xen_flags & WRITABLE_PAGE_TABLES)) { ++ if (machdep->flags & PAE) ++ machdep->uvtop = x86_uvtop_xen_wpt_PAE; ++ else ++ machdep->uvtop = x86_uvtop_xen_wpt; ++ } ++ ++ if (XEN()) { ++ MEMBER_OFFSET_INIT(vcpu_guest_context_user_regs, ++ "vcpu_guest_context", "user_regs"); ++ MEMBER_OFFSET_INIT(cpu_user_regs_esp, ++ "cpu_user_regs", "esp"); ++ MEMBER_OFFSET_INIT(cpu_user_regs_eip, ++ "cpu_user_regs", "eip"); ++ } ++ ++ eframe_init(); + break; + + case POST_INIT: +@@ -1735,6 +1831,47 @@ + } + + /* ++ * Account for addition of pt_regs.xgs field in 2.6.20+ kernels. ++ */ ++static void ++eframe_init(void) ++{ ++ if (INVALID_SIZE(pt_regs)) { ++ if (THIS_KERNEL_VERSION < LINUX(2,6,20)) ++ ASSIGN_SIZE(pt_regs) = (MAX_USER_EFRAME_SIZE-1)*sizeof(ulong); ++ else { ++ ASSIGN_SIZE(pt_regs) = MAX_USER_EFRAME_SIZE*sizeof(ulong); ++ INT_EFRAME_SS = 15; ++ INT_EFRAME_ESP = 14; ++ INT_EFRAME_EFLAGS = 13; ++ INT_EFRAME_CS = 12; ++ INT_EFRAME_EIP = 11; ++ INT_EFRAME_ERR = 10; ++ INT_EFRAME_GS = 9; ++ } ++ return; ++ } ++ ++ INT_EFRAME_SS = MEMBER_OFFSET("pt_regs", "xss") / 4; ++ INT_EFRAME_ESP = MEMBER_OFFSET("pt_regs", "esp") / 4; ++ INT_EFRAME_EFLAGS = MEMBER_OFFSET("pt_regs", "eflags") / 4; ++ INT_EFRAME_CS = MEMBER_OFFSET("pt_regs", "xcs") / 4; ++ INT_EFRAME_EIP = MEMBER_OFFSET("pt_regs", "eip") / 4; ++ INT_EFRAME_ERR = MEMBER_OFFSET("pt_regs", "orig_eax") / 4; ++ if ((INT_EFRAME_GS = MEMBER_OFFSET("pt_regs", "xgs")) != -1) ++ INT_EFRAME_GS /= 4; ++ INT_EFRAME_ES = MEMBER_OFFSET("pt_regs", "xes") / 4; ++ INT_EFRAME_DS = MEMBER_OFFSET("pt_regs", "xds") / 4; ++ INT_EFRAME_EAX = MEMBER_OFFSET("pt_regs", "eax") / 4; ++ INT_EFRAME_EBP = MEMBER_OFFSET("pt_regs", "ebp") / 4; ++ INT_EFRAME_EDI = MEMBER_OFFSET("pt_regs", "edi") / 4; ++ INT_EFRAME_ESI = MEMBER_OFFSET("pt_regs", "esi") / 4; ++ INT_EFRAME_EDX = MEMBER_OFFSET("pt_regs", "edx") / 4; ++ INT_EFRAME_ECX = MEMBER_OFFSET("pt_regs", "ecx") / 4; ++ INT_EFRAME_EBX = MEMBER_OFFSET("pt_regs", "ebx") / 4; ++} ++ ++/* + * Needs to be done this way because of potential 4G/4G split. + */ + static int +@@ -1825,7 +1962,7 @@ + fprintf(fp, " PAGE: %s (4MB)\n\n", + mkstring(buf, VADDR_PRLEN, RJUST|LONG_HEX, + MKSTR(NONPAE_PAGEBASE(pgd_pte)))); +- x86_translate_pte(0, 0, pgd_pte); ++ x86_translate_pte(pgd_pte, 0, 0); + } + + *paddr = NONPAE_PAGEBASE(pgd_pte) + (vaddr & ~_4MB_PAGE_MASK); +@@ -1892,7 +2029,170 @@ + } + + static int +-x86_uvtop_pae(struct task_context *tc, ulong vaddr, physaddr_t *paddr, int verbose) ++x86_uvtop_xen_wpt(struct task_context *tc, ulong vaddr, physaddr_t *paddr, int verbose) ++{ ++ ulong mm, active_mm; ++ ulong *pgd; ++ ulong *page_dir; ++ ulong *page_middle; ++ ulong *machine_page_table, *pseudo_page_table; ++ ulong pgd_pte, pseudo_pgd_pte; ++ ulong pmd_pte; ++ ulong machine_pte, pseudo_pte; ++ char buf[BUFSIZE]; ++ ++ if (!tc) ++ error(FATAL, "current context invalid\n"); ++ ++ *paddr = 0; ++ ++ if (is_kernel_thread(tc->task) && IS_KVADDR(vaddr)) { ++ if (VALID_MEMBER(thread_struct_cr3)) ++ pgd = (ulong *)machdep->get_task_pgd(tc->task); ++ else { ++ if (INVALID_MEMBER(task_struct_active_mm)) ++ error(FATAL, "no cr3 or active_mm?\n"); ++ ++ readmem(tc->task + OFFSET(task_struct_active_mm), ++ KVADDR, &active_mm, sizeof(void *), ++ "task active_mm contents", FAULT_ON_ERROR); ++ ++ if (!active_mm) ++ error(FATAL, ++ "no active_mm for this kernel thread\n"); ++ ++ readmem(active_mm + OFFSET(mm_struct_pgd), ++ KVADDR, &pgd, sizeof(long), ++ "mm_struct pgd", FAULT_ON_ERROR); ++ } ++ } else { ++ if ((mm = task_mm(tc->task, TRUE))) ++ pgd = ULONG_PTR(tt->mm_struct + ++ OFFSET(mm_struct_pgd)); ++ else ++ readmem(tc->mm_struct + OFFSET(mm_struct_pgd), ++ KVADDR, &pgd, sizeof(long), "mm_struct pgd", ++ FAULT_ON_ERROR); ++ } ++ ++ if (verbose) ++ fprintf(fp, "PAGE DIRECTORY: %lx\n", (ulong)pgd); ++ ++ page_dir = pgd + (vaddr >> PGDIR_SHIFT); ++ ++ FILL_PGD(NONPAE_PAGEBASE(pgd), KVADDR, PAGESIZE()); ++ pgd_pte = ULONG(machdep->pgd + PAGEOFFSET(page_dir)); ++ ++ if (verbose) ++ fprintf(fp, " PGD: %s => %lx\n", ++ mkstring(buf, VADDR_PRLEN, RJUST|LONG_HEX, ++ MKSTR((ulong)page_dir)), ++ pgd_pte); ++ ++ if (!pgd_pte) ++ goto no_upage; ++ ++ if (pgd_pte & _PAGE_4M) { ++ if (verbose) ++ fprintf(fp, " PAGE: %s (4MB) [machine]\n", ++ mkstring(buf, VADDR_PRLEN, RJUST|LONG_HEX, ++ MKSTR(NONPAE_PAGEBASE(pgd_pte)))); ++ ++ pseudo_pgd_pte = xen_m2p_nonPAE(NONPAE_PAGEBASE(pgd_pte)); ++ ++ if (pseudo_pgd_pte == XEN_MFN_NOT_FOUND) { ++ if (verbose) ++ fprintf(fp, " PAGE: page not available\n"); ++ *paddr = PADDR_NOT_AVAILABLE; ++ return FALSE; ++ } ++ ++ pseudo_pgd_pte |= PAGEOFFSET(pgd_pte); ++ ++ if (verbose) { ++ fprintf(fp, " PAGE: %s (4MB)\n\n", ++ mkstring(buf, VADDR_PRLEN, RJUST|LONG_HEX, ++ MKSTR(NONPAE_PAGEBASE(pseudo_pgd_pte)))); ++ ++ x86_translate_pte(pseudo_pgd_pte, 0, 0); ++ } ++ ++ *paddr = NONPAE_PAGEBASE(pseudo_pgd_pte) + ++ (vaddr & ~_4MB_PAGE_MASK); ++ ++ return TRUE; ++ } ++ ++ page_middle = page_dir; ++ ++ FILL_PMD(NONPAE_PAGEBASE(page_middle), KVADDR, PAGESIZE()); ++ pmd_pte = ULONG(machdep->pmd + PAGEOFFSET(page_middle)); ++ ++ if (verbose) ++ fprintf(fp, " PMD: %s => %lx\n", ++ mkstring(buf, VADDR_PRLEN, RJUST|LONG_HEX, ++ MKSTR((ulong)page_middle)), ++ pmd_pte); ++ ++ if (!pmd_pte) ++ goto no_upage; ++ ++ machine_page_table = (ulong *)((NONPAE_PAGEBASE(pmd_pte)) + ++ ((vaddr>>10) & ((PTRS_PER_PTE-1)<<2))); ++ ++ pseudo_page_table = (ulong *) ++ xen_m2p_nonPAE(NONPAE_PAGEBASE(machine_page_table)); ++ ++ FILL_PTBL(NONPAE_PAGEBASE(pseudo_page_table), PHYSADDR, PAGESIZE()); ++ machine_pte = ULONG(machdep->ptbl + PAGEOFFSET(machine_page_table)); ++ ++ if (verbose) { ++ fprintf(fp, " PTE: %s [machine]\n", ++ mkstring(buf, VADDR_PRLEN, RJUST|LONG_HEX, ++ MKSTR((ulong)machine_page_table))); ++ ++ fprintf(fp, " PTE: %s => %lx\n", ++ mkstring(buf, VADDR_PRLEN, RJUST|LONG_HEX, ++ MKSTR((ulong)pseudo_page_table + ++ PAGEOFFSET(machine_page_table))), machine_pte); ++ } ++ ++ if (!(machine_pte & (_PAGE_PRESENT | _PAGE_PROTNONE))) { ++ *paddr = machine_pte; ++ ++ if (machine_pte && verbose) { ++ fprintf(fp, "\n"); ++ x86_translate_pte(machine_pte, 0, 0); ++ } ++ ++ goto no_upage; ++ } ++ ++ pseudo_pte = xen_m2p_nonPAE(NONPAE_PAGEBASE(machine_pte)); ++ pseudo_pte |= PAGEOFFSET(machine_pte); ++ ++ *paddr = NONPAE_PAGEBASE(pseudo_pte) + PAGEOFFSET(vaddr); ++ ++ if (verbose) { ++ fprintf(fp, " PAGE: %s [machine]\n", ++ mkstring(buf, VADDR_PRLEN, RJUST|LONG_HEX, ++ MKSTR(NONPAE_PAGEBASE(machine_pte)))); ++ ++ fprintf(fp, " PAGE: %s\n\n", ++ mkstring(buf, VADDR_PRLEN, RJUST|LONG_HEX, ++ MKSTR(NONPAE_PAGEBASE(pseudo_pte)))); ++ ++ x86_translate_pte(pseudo_pte, 0, 0); ++ } ++ ++ return TRUE; ++ ++no_upage: ++ return FALSE; ++} ++ ++static int ++x86_uvtop_PAE(struct task_context *tc, ulong vaddr, physaddr_t *paddr, int verbose) + { + ulong mm, active_mm; + ulonglong *pgd; +@@ -1962,7 +2262,7 @@ + + page_middle = PAE_PAGEBASE(page_dir_entry); + +- FILL_PMD(page_middle, PHYSADDR, PAGESIZE()); ++ FILL_PMD_PAE(page_middle, PHYSADDR, PAGESIZE()); + + offset = ((vaddr >> PMD_SHIFT) & (PTRS_PER_PMD-1)) * sizeof(ulonglong); + +@@ -1998,7 +2298,7 @@ + + page_table = PAE_PAGEBASE(page_middle_entry); + +- FILL_PTBL(page_table, PHYSADDR, PAGESIZE()); ++ FILL_PTBL_PAE(page_table, PHYSADDR, PAGESIZE()); + + offset = ((vaddr >> PAGESHIFT()) & (PTRS_PER_PTE-1)) * + sizeof(ulonglong); +@@ -2028,9 +2328,10 @@ + *paddr = physpage; + + if (verbose) { +- fprintf(fp, " PAGE: %s\n\n", +- mkstring(buf, VADDR_PRLEN, RJUST|LONGLONG_HEX, +- MKSTR(&physpage))); ++ ull = PAE_PAGEBASE(page_table_entry); ++ fprintf(fp, " PAGE: %s\n\n", ++ mkstring(buf, VADDR_PRLEN, RJUST|LONGLONG_HEX, ++ MKSTR(&ull))); + x86_translate_pte(0, 0, page_table_entry); + } + +@@ -2040,62 +2341,259 @@ + return FALSE; + } + +-/* +- * Translates a kernel virtual address to its physical address. cmd_vtop() +- * sets the verbose flag so that the pte translation gets displayed; all +- * other callers quietly accept the translation. +- */ +- + static int +-x86_kvtop(struct task_context *tc, ulong kvaddr, physaddr_t *paddr, int verbose) ++x86_uvtop_xen_wpt_PAE(struct task_context *tc, ulong vaddr, physaddr_t *paddr, int verbose) + { +- ulong *pgd; +- ulong *page_dir; +- ulong *page_middle; +- ulong *page_table; +- ulong pgd_pte; +- ulong pmd_pte; +- ulong pte; ++ ulong mm, active_mm; ++ ulonglong *pgd; ++ ulonglong page_dir_entry; ++ ulonglong page_middle, pseudo_page_middle; ++ ulonglong page_middle_entry; ++ ulonglong page_table, pseudo_page_table; ++ ulonglong page_table_entry; ++ ulonglong physpage, pseudo_physpage; ++ ulonglong ull; ++ ulong offset; + char buf[BUFSIZE]; + +- if (!IS_KVADDR(kvaddr)) +- return FALSE; ++ if (!tc) ++ error(FATAL, "current context invalid\n"); + +- if (!vt->vmalloc_start) { +- *paddr = VTOP(kvaddr); +- return TRUE; +- } ++ *paddr = 0; + +- if (!IS_VMALLOC_ADDR(kvaddr)) { +- *paddr = VTOP(kvaddr); +- if (!verbose) +- return TRUE; +- } ++ if (is_kernel_thread(tc->task) && IS_KVADDR(vaddr)) { ++ if (VALID_MEMBER(thread_struct_cr3)) ++ pgd = (ulonglong *)machdep->get_task_pgd(tc->task); ++ else { ++ if (INVALID_MEMBER(task_struct_active_mm)) ++ error(FATAL, "no cr3 or active_mm?\n"); + +- pgd = (ulong *)vt->kernel_pgd[0]; ++ readmem(tc->task + OFFSET(task_struct_active_mm), ++ KVADDR, &active_mm, sizeof(void *), ++ "task active_mm contents", FAULT_ON_ERROR); ++ ++ if (!active_mm) ++ error(FATAL, ++ "no active_mm for this kernel thread\n"); ++ ++ readmem(active_mm + OFFSET(mm_struct_pgd), ++ KVADDR, &pgd, sizeof(long), ++ "mm_struct pgd", FAULT_ON_ERROR); ++ } ++ } else { ++ if ((mm = task_mm(tc->task, TRUE))) ++ pgd = (ulonglong *)(ULONG_PTR(tt->mm_struct + ++ OFFSET(mm_struct_pgd))); ++ else ++ readmem(tc->mm_struct + OFFSET(mm_struct_pgd), ++ KVADDR, &pgd, sizeof(long), "mm_struct pgd", ++ FAULT_ON_ERROR); ++ } + + if (verbose) + fprintf(fp, "PAGE DIRECTORY: %lx\n", (ulong)pgd); + +- page_dir = pgd + (kvaddr >> PGDIR_SHIFT); ++ FILL_PGD(pgd, KVADDR, PTRS_PER_PGD * sizeof(ulonglong)); + +- FILL_PGD(NONPAE_PAGEBASE(pgd), KVADDR, PAGESIZE()); +- pgd_pte = ULONG(machdep->pgd + PAGEOFFSET(page_dir)); ++ offset = ((vaddr >> PGDIR_SHIFT) & (PTRS_PER_PGD-1)) * ++ sizeof(ulonglong); ++ ++ page_dir_entry = *((ulonglong *)&machdep->pgd[offset]); + + if (verbose) +- fprintf(fp, " PGD: %s => %lx\n", ++ fprintf(fp, " PGD: %s => %llx [machine]\n", + mkstring(buf, VADDR_PRLEN, RJUST|LONG_HEX, +- MKSTR((ulong)page_dir)), pgd_pte); +- +- if (!pgd_pte) +- goto no_kpage; ++ MKSTR((ulong)pgd + offset)), ++ page_dir_entry); + +- if (pgd_pte & _PAGE_4M) { ++ if (!(page_dir_entry & _PAGE_PRESENT)) { ++ goto no_upage; ++ } ++ ++ page_middle = PAE_PAGEBASE(page_dir_entry); ++ pseudo_page_middle = xen_m2p(page_middle); ++ ++ if (verbose) ++ fprintf(fp, " PGD: %s => %llx\n", ++ mkstring(buf, VADDR_PRLEN, RJUST|LONG_HEX, ++ MKSTR((ulong)pgd + offset)), ++ pseudo_page_middle | PAGEOFFSET(page_dir_entry) | ++ (page_dir_entry & _PAGE_NX)); ++ ++ FILL_PMD_PAE(pseudo_page_middle, PHYSADDR, PAGESIZE()); ++ ++ offset = ((vaddr >> PMD_SHIFT) & (PTRS_PER_PMD-1)) * sizeof(ulonglong); ++ ++ page_middle_entry = *((ulonglong *)&machdep->pmd[offset]); ++ ++ if (verbose) { ++ ull = page_middle + offset; ++ fprintf(fp, " PMD: %s => %llx [machine]\n", ++ mkstring(buf, VADDR_PRLEN, RJUST|LONGLONG_HEX, ++ MKSTR(&ull)), ++ page_middle_entry); ++ } ++ ++ if (!(page_middle_entry & _PAGE_PRESENT)) { ++ goto no_upage; ++ } ++ ++ if (page_middle_entry & _PAGE_PSE) { ++ error(FATAL, "_PAGE_PSE in an mfn not supported\n"); /* XXX */ ++ if (verbose) { ++ ull = PAE_PAGEBASE(page_middle_entry); ++ fprintf(fp, " PAGE: %s (2MB)\n\n", ++ mkstring(buf, VADDR_PRLEN, RJUST|LONGLONG_HEX, ++ MKSTR(&ull))); ++ x86_translate_pte(0, 0, page_middle_entry); ++ } ++ ++ physpage = PAE_PAGEBASE(page_middle_entry) + ++ (vaddr & ~_2MB_PAGE_MASK); ++ *paddr = physpage; ++ ++ return TRUE; ++ } ++ ++ page_table = PAE_PAGEBASE(page_middle_entry); ++ pseudo_page_table = xen_m2p(page_table); ++ ++ if (verbose) { ++ ull = page_middle + offset; ++ fprintf(fp, " PMD: %s => %llx\n", ++ mkstring(buf, VADDR_PRLEN, RJUST|LONGLONG_HEX, ++ MKSTR(&ull)), ++ pseudo_page_table | PAGEOFFSET(page_middle_entry) | ++ (page_middle_entry & _PAGE_NX)); ++ } ++ ++ FILL_PTBL_PAE(pseudo_page_table, PHYSADDR, PAGESIZE()); ++ ++ offset = ((vaddr >> PAGESHIFT()) & (PTRS_PER_PTE-1)) * ++ sizeof(ulonglong); ++ ++ page_table_entry = *((ulonglong *)&machdep->ptbl[offset]); ++ ++ if (verbose) { ++ ull = page_table + offset; ++ fprintf(fp, " PTE: %s => %llx [machine]\n", ++ mkstring(buf, VADDR_PRLEN, RJUST|LONGLONG_HEX, ++ MKSTR(&ull)), page_table_entry); ++ } ++ ++ if (!(page_table_entry & (_PAGE_PRESENT | _PAGE_PROTNONE))) { ++ *paddr = page_table_entry; ++ ++ if (page_table_entry && verbose) { ++ fprintf(fp, "\n"); ++ x86_translate_pte(0, 0, page_table_entry); ++ } ++ ++ goto no_upage; ++ } ++ ++ physpage = PAE_PAGEBASE(page_table_entry) + PAGEOFFSET(vaddr); ++ pseudo_physpage = xen_m2p(physpage); ++ ++ if (verbose) { ++ ull = page_table + offset; ++ fprintf(fp, " PTE: %s => %llx\n", ++ mkstring(buf, VADDR_PRLEN, RJUST|LONGLONG_HEX, ++ MKSTR(&ull)), ++ pseudo_physpage | PAGEOFFSET(page_table_entry) | ++ (page_table_entry & _PAGE_NX)); ++ } ++ ++ *paddr = pseudo_physpage + PAGEOFFSET(vaddr); ++ ++ if (verbose) { ++ fprintf(fp, " PAGE: %s [machine]\n", ++ mkstring(buf, VADDR_PRLEN, RJUST|LONGLONG_HEX, ++ MKSTR(&physpage))); ++ ++ pseudo_physpage += (PAGEOFFSET(vaddr) | ++ (page_table_entry & (_PAGE_NX|machdep->pageoffset))); ++ ++ fprintf(fp, " PAGE: %s\n\n", ++ mkstring(buf, VADDR_PRLEN, RJUST|LONGLONG_HEX, ++ MKSTR(&pseudo_physpage))); ++ ++ x86_translate_pte(0, 0, pseudo_physpage); ++ } ++ ++ return TRUE; ++ ++no_upage: ++ return FALSE; ++} ++ ++/* ++ * Translates a kernel virtual address to its physical address. cmd_vtop() ++ * sets the verbose flag so that the pte translation gets displayed; all ++ * other callers quietly accept the translation. ++ */ ++ ++static int ++x86_kvtop(struct task_context *tc, ulong kvaddr, physaddr_t *paddr, int verbose) ++{ ++ ulong *pgd; ++ ulong *page_dir; ++ ulong *page_middle; ++ ulong *page_table; ++ ulong pgd_pte; ++ ulong pmd_pte; ++ ulong pte; ++ char buf[BUFSIZE]; ++ ++ if (!IS_KVADDR(kvaddr)) ++ return FALSE; ++ ++ if (XEN_HYPER_MODE()) { ++ if (DIRECTMAP_VIRT_ADDR(kvaddr)) { ++ *paddr = kvaddr - DIRECTMAP_VIRT_START; ++ return TRUE; ++ } ++ pgd = (ulong *)symbol_value("idle_pg_table_l2"); ++ } else { ++ if (!vt->vmalloc_start) { ++ *paddr = VTOP(kvaddr); ++ return TRUE; ++ } ++ ++ if (!IS_VMALLOC_ADDR(kvaddr)) { ++ *paddr = VTOP(kvaddr); ++ if (!verbose) ++ return TRUE; ++ } ++ ++ if (XEN() && (kt->xen_flags & WRITABLE_PAGE_TABLES)) ++ return (x86_kvtop_xen_wpt(tc, kvaddr, paddr, verbose)); ++ ++ pgd = (ulong *)vt->kernel_pgd[0]; ++ } ++ ++ if (verbose) ++ fprintf(fp, "PAGE DIRECTORY: %lx\n", (ulong)pgd); ++ ++ page_dir = pgd + (kvaddr >> PGDIR_SHIFT); ++ ++ FILL_PGD(NONPAE_PAGEBASE(pgd), KVADDR, PAGESIZE()); ++ pgd_pte = ULONG(machdep->pgd + PAGEOFFSET(page_dir)); ++ ++ if (verbose) ++ fprintf(fp, " PGD: %s => %lx\n", ++ mkstring(buf, VADDR_PRLEN, RJUST|LONG_HEX, ++ MKSTR((ulong)page_dir)), pgd_pte); ++ ++ if (!pgd_pte) ++ goto no_kpage; ++ ++ if (pgd_pte & _PAGE_4M) { + if (verbose) { + fprintf(fp, " PAGE: %s (4MB)\n\n", + mkstring(buf, VADDR_PRLEN, RJUST|LONG_HEX, + MKSTR(NONPAE_PAGEBASE(pgd_pte)))); +- x86_translate_pte(0, 0, pgd_pte); ++ x86_translate_pte(pgd_pte, 0, 0); + } + + *paddr = NONPAE_PAGEBASE(pgd_pte) + (kvaddr & ~_4MB_PAGE_MASK); +@@ -2158,9 +2656,134 @@ + return FALSE; + } + ++static int ++x86_kvtop_xen_wpt(struct task_context *tc, ulong kvaddr, physaddr_t *paddr, int verbose) ++{ ++ ulong *pgd; ++ ulong *page_dir; ++ ulong *page_middle; ++ ulong *machine_page_table, *pseudo_page_table; ++ ulong pgd_pte, pseudo_pgd_pte; ++ ulong pmd_pte; ++ ulong machine_pte, pseudo_pte; ++ char buf[BUFSIZE]; ++ ++ pgd = (ulong *)vt->kernel_pgd[0]; ++ ++ if (verbose) ++ fprintf(fp, "PAGE DIRECTORY: %lx\n", (ulong)pgd); ++ ++ page_dir = pgd + (kvaddr >> PGDIR_SHIFT); ++ ++ FILL_PGD(NONPAE_PAGEBASE(pgd), KVADDR, PAGESIZE()); ++ pgd_pte = ULONG(machdep->pgd + PAGEOFFSET(page_dir)); ++ ++ if (verbose) ++ fprintf(fp, " PGD: %s => %lx\n", ++ mkstring(buf, VADDR_PRLEN, RJUST|LONG_HEX, ++ MKSTR((ulong)page_dir)), pgd_pte); ++ ++ if (!pgd_pte) ++ goto no_kpage; ++ ++ if (pgd_pte & _PAGE_4M) { ++ if (verbose) ++ fprintf(fp, " PAGE: %s (4MB) [machine]\n", ++ mkstring(buf, VADDR_PRLEN, RJUST|LONG_HEX, ++ MKSTR(NONPAE_PAGEBASE(pgd_pte)))); ++ ++ pseudo_pgd_pte = xen_m2p_nonPAE(NONPAE_PAGEBASE(pgd_pte)); ++ ++ if (pseudo_pgd_pte == XEN_MFN_NOT_FOUND) { ++ if (verbose) ++ fprintf(fp, " PAGE: page not available\n"); ++ *paddr = PADDR_NOT_AVAILABLE; ++ return FALSE; ++ } ++ ++ pseudo_pgd_pte |= PAGEOFFSET(pgd_pte); ++ ++ if (verbose) { ++ fprintf(fp, " PAGE: %s (4MB)\n\n", ++ mkstring(buf, VADDR_PRLEN, RJUST|LONG_HEX, ++ MKSTR(NONPAE_PAGEBASE(pseudo_pgd_pte)))); ++ ++ x86_translate_pte(pseudo_pgd_pte, 0, 0); ++ } ++ ++ *paddr = NONPAE_PAGEBASE(pseudo_pgd_pte) + ++ (kvaddr & ~_4MB_PAGE_MASK); ++ ++ return TRUE; ++ } ++ ++ page_middle = page_dir; ++ ++ FILL_PMD(NONPAE_PAGEBASE(page_middle), KVADDR, PAGESIZE()); ++ pmd_pte = ULONG(machdep->pmd + PAGEOFFSET(page_middle)); ++ ++ if (verbose) ++ fprintf(fp, " PMD: %s => %lx\n", ++ mkstring(buf, VADDR_PRLEN, RJUST|LONG_HEX, ++ MKSTR((ulong)page_middle)), pmd_pte); ++ ++ if (!pmd_pte) ++ goto no_kpage; ++ ++ machine_page_table = (ulong *)((NONPAE_PAGEBASE(pmd_pte)) + ++ ((kvaddr>>10) & ((PTRS_PER_PTE-1)<<2))); ++ ++ pseudo_page_table = (ulong *) ++ xen_m2p_nonPAE(NONPAE_PAGEBASE(machine_page_table)); ++ ++ FILL_PTBL(NONPAE_PAGEBASE(pseudo_page_table), PHYSADDR, PAGESIZE()); ++ machine_pte = ULONG(machdep->ptbl + PAGEOFFSET(machine_page_table)); ++ ++ if (verbose) { ++ fprintf(fp, " PTE: %s [machine]\n", ++ mkstring(buf, VADDR_PRLEN, RJUST|LONG_HEX, ++ MKSTR((ulong)machine_page_table))); ++ ++ fprintf(fp, " PTE: %s => %lx\n", ++ mkstring(buf, VADDR_PRLEN, RJUST|LONG_HEX, ++ MKSTR((ulong)pseudo_page_table + ++ PAGEOFFSET(machine_page_table))), machine_pte); ++ } ++ ++ if (!(machine_pte & (_PAGE_PRESENT | _PAGE_PROTNONE))) { ++ if (machine_pte && verbose) { ++ fprintf(fp, "\n"); ++ x86_translate_pte(machine_pte, 0, 0); ++ } ++ goto no_kpage; ++ } ++ ++ pseudo_pte = xen_m2p_nonPAE(NONPAE_PAGEBASE(machine_pte)); ++ pseudo_pte |= PAGEOFFSET(machine_pte); ++ ++ if (verbose) { ++ fprintf(fp, " PAGE: %s [machine]\n", ++ mkstring(buf, VADDR_PRLEN, RJUST|LONG_HEX, ++ MKSTR(NONPAE_PAGEBASE(machine_pte)))); ++ ++ fprintf(fp, " PAGE: %s\n\n", ++ mkstring(buf, VADDR_PRLEN, RJUST|LONG_HEX, ++ MKSTR(NONPAE_PAGEBASE(pseudo_pte)))); ++ ++ x86_translate_pte(pseudo_pte, 0, 0); ++ } ++ ++ *paddr = NONPAE_PAGEBASE(pseudo_pte) + PAGEOFFSET(kvaddr); ++ ++ return TRUE; ++ ++no_kpage: ++ return FALSE; ++} ++ + + static int +-x86_kvtop_pae(struct task_context *tc, ulong kvaddr, physaddr_t *paddr, int verbose) ++x86_kvtop_PAE(struct task_context *tc, ulong kvaddr, physaddr_t *paddr, int verbose) + { + ulonglong *pgd; + ulonglong page_dir_entry; +@@ -2177,18 +2800,29 @@ + if (!IS_KVADDR(kvaddr)) + return FALSE; + +- if (!vt->vmalloc_start) { +- *paddr = VTOP(kvaddr); +- return TRUE; +- } +- +- if (!IS_VMALLOC_ADDR(kvaddr)) { +- *paddr = VTOP(kvaddr); +- if (!verbose) ++ if (XEN_HYPER_MODE()) { ++ if (DIRECTMAP_VIRT_ADDR(kvaddr)) { ++ *paddr = kvaddr - DIRECTMAP_VIRT_START; + return TRUE; +- } ++ } ++ pgd = (ulonglong *)symbol_value("idle_pg_table_l3"); ++ } else { ++ if (!vt->vmalloc_start) { ++ *paddr = VTOP(kvaddr); ++ return TRUE; ++ } ++ ++ if (!IS_VMALLOC_ADDR(kvaddr)) { ++ *paddr = VTOP(kvaddr); ++ if (!verbose) ++ return TRUE; ++ } + +- pgd = (ulonglong *)vt->kernel_pgd[0]; ++ if (XEN() && (kt->xen_flags & WRITABLE_PAGE_TABLES)) ++ return (x86_kvtop_xen_wpt_PAE(tc, kvaddr, paddr, verbose)); ++ ++ pgd = (ulonglong *)vt->kernel_pgd[0]; ++ } + + if (verbose) + fprintf(fp, "PAGE DIRECTORY: %lx\n", (ulong)pgd); +@@ -2212,7 +2846,7 @@ + + page_middle = PAE_PAGEBASE(page_dir_entry); + +- FILL_PMD(page_middle, PHYSADDR, PAGESIZE()); ++ FILL_PMD_PAE(page_middle, PHYSADDR, PAGESIZE()); + + offset = ((kvaddr >> PMD_SHIFT) & (PTRS_PER_PMD-1)) * sizeof(ulonglong); + +@@ -2249,7 +2883,7 @@ + + page_table = PAE_PAGEBASE(page_middle_entry); + +- FILL_PTBL(page_table, PHYSADDR, PAGESIZE()); ++ FILL_PTBL_PAE(page_table, PHYSADDR, PAGESIZE()); + + offset = ((kvaddr >> PAGESHIFT()) & (PTRS_PER_PTE-1)) * + sizeof(ulonglong); +@@ -2277,9 +2911,10 @@ + *paddr = physpage; + + if (verbose) { ++ ull = PAE_PAGEBASE(page_table_entry); + fprintf(fp, " PAGE: %s\n\n", + mkstring(buf, VADDR_PRLEN, RJUST|LONGLONG_HEX, +- MKSTR(&physpage))); ++ MKSTR(&ull))); + x86_translate_pte(0, 0, page_table_entry); + } + +@@ -2289,11 +2924,170 @@ + return FALSE; + } + +-/* +- * Get the relevant page directory pointer from a task structure. +- */ +-static ulong +-x86_get_task_pgd(ulong task) ++static int ++x86_kvtop_xen_wpt_PAE(struct task_context *tc, ulong kvaddr, physaddr_t *paddr, int verbose) ++{ ++ ulonglong *pgd; ++ ulonglong page_dir_entry; ++ ulonglong page_middle, pseudo_page_middle; ++ ulonglong page_middle_entry; ++ ulonglong page_table, pseudo_page_table; ++ ulonglong page_table_entry; ++ ulonglong physpage, pseudo_physpage; ++ ulonglong ull; ++ ulong offset; ++ char buf[BUFSIZE]; ++ ++ pgd = (ulonglong *)vt->kernel_pgd[0]; ++ ++ if (verbose) ++ fprintf(fp, "PAGE DIRECTORY: %lx\n", (ulong)pgd); ++ ++ FILL_PGD(pgd, KVADDR, PTRS_PER_PGD * sizeof(ulonglong)); ++ ++ offset = ((kvaddr >> PGDIR_SHIFT) & (PTRS_PER_PGD-1)) * ++ sizeof(ulonglong); ++ ++ page_dir_entry = *((ulonglong *)&machdep->pgd[offset]); ++ ++ if (verbose) ++ fprintf(fp, " PGD: %s => %llx [machine]\n", ++ mkstring(buf, VADDR_PRLEN, RJUST|LONG_HEX, ++ MKSTR((ulong)pgd + offset)), ++ page_dir_entry); ++ ++ if (!(page_dir_entry & _PAGE_PRESENT)) { ++ goto no_kpage; ++ } ++ ++ page_middle = PAE_PAGEBASE(page_dir_entry); ++ pseudo_page_middle = xen_m2p(page_middle); ++ ++ if (verbose) ++ fprintf(fp, " PGD: %s => %llx\n", ++ mkstring(buf, VADDR_PRLEN, RJUST|LONG_HEX, ++ MKSTR((ulong)pgd + offset)), ++ pseudo_page_middle | PAGEOFFSET(page_dir_entry) | ++ (page_dir_entry & _PAGE_NX)); ++ ++ FILL_PMD_PAE(pseudo_page_middle, PHYSADDR, PAGESIZE()); ++ ++ offset = ((kvaddr >> PMD_SHIFT) & (PTRS_PER_PMD-1)) * sizeof(ulonglong); ++ ++ page_middle_entry = *((ulonglong *)&machdep->pmd[offset]); ++ ++ if (verbose) { ++ ull = page_middle + offset; ++ fprintf(fp, " PMD: %s => %llx [machine]\n", ++ mkstring(buf, VADDR_PRLEN, RJUST|LONGLONG_HEX, ++ MKSTR(&ull)), ++ page_middle_entry); ++ } ++ ++ if (!(page_middle_entry & _PAGE_PRESENT)) { ++ goto no_kpage; ++ } ++ ++ if (page_middle_entry & _PAGE_PSE) { ++ error(FATAL, "_PAGE_PSE in an mfn not supported\n"); /* XXX */ ++ if (verbose) { ++ ull = PAE_PAGEBASE(page_middle_entry); ++ fprintf(fp, " PAGE: %s (2MB)\n\n", ++ mkstring(buf, VADDR_PRLEN, RJUST|LONGLONG_HEX, ++ MKSTR(&ull))); ++ x86_translate_pte(0, 0, page_middle_entry); ++ } ++ ++ physpage = PAE_PAGEBASE(page_middle_entry) + ++ (kvaddr & ~_2MB_PAGE_MASK); ++ *paddr = physpage; ++ ++ ++ return TRUE; ++ } ++ ++ page_table = PAE_PAGEBASE(page_middle_entry); ++ pseudo_page_table = xen_m2p(page_table); ++ ++ if (verbose) { ++ ull = page_middle + offset; ++ fprintf(fp, " PMD: %s => %llx\n", ++ mkstring(buf, VADDR_PRLEN, RJUST|LONGLONG_HEX, ++ MKSTR(&ull)), ++ pseudo_page_table | PAGEOFFSET(page_middle_entry) | ++ (page_middle_entry & _PAGE_NX)); ++ } ++ ++ FILL_PTBL_PAE(pseudo_page_table, PHYSADDR, PAGESIZE()); ++ ++ offset = ((kvaddr >> PAGESHIFT()) & (PTRS_PER_PTE-1)) * ++ sizeof(ulonglong); ++ ++ page_table_entry = *((ulonglong *)&machdep->ptbl[offset]); ++ ++ if (verbose) { ++ ull = page_table + offset; ++ fprintf(fp, " PTE: %s => %llx [machine]\n", ++ mkstring(buf, VADDR_PRLEN, RJUST|LONGLONG_HEX, ++ MKSTR(&ull)), page_table_entry); ++ } ++ ++ if (!(page_table_entry & (_PAGE_PRESENT | _PAGE_PROTNONE))) { ++ if (page_table_entry && verbose) { ++ fprintf(fp, "\n"); ++ x86_translate_pte(0, 0, page_table_entry); ++ } ++ ++ goto no_kpage; ++ } ++ ++ physpage = PAE_PAGEBASE(page_table_entry) + PAGEOFFSET(kvaddr); ++ pseudo_physpage = xen_m2p(physpage); ++ ++ if (verbose) { ++ ull = page_table + offset; ++ fprintf(fp, " PTE: %s => %llx\n", ++ mkstring(buf, VADDR_PRLEN, RJUST|LONGLONG_HEX, ++ MKSTR(&ull)), ++ pseudo_physpage | PAGEOFFSET(page_table_entry) | ++ (page_table_entry & _PAGE_NX)); ++ } ++ ++ *paddr = pseudo_physpage + PAGEOFFSET(kvaddr); ++ ++ if (verbose) { ++ fprintf(fp, " PAGE: %s [machine]\n", ++ mkstring(buf, VADDR_PRLEN, RJUST|LONGLONG_HEX, ++ MKSTR(&physpage))); ++ ++ pseudo_physpage += (PAGEOFFSET(kvaddr) | ++ (page_table_entry & _PAGE_NX)); ++ ++ fprintf(fp, " PAGE: %s\n\n", ++ mkstring(buf, VADDR_PRLEN, RJUST|LONGLONG_HEX, ++ MKSTR(&pseudo_physpage))); ++ ++ x86_translate_pte(0, 0, pseudo_physpage); ++ } ++ ++ return TRUE; ++ ++no_kpage: ++ return FALSE; ++} ++ ++void ++x86_clear_machdep_cache(void) ++{ ++ machdep->machspec->last_pmd_read_PAE = 0; ++ machdep->machspec->last_ptbl_read_PAE = 0; ++} ++ ++/* ++ * Get the relevant page directory pointer from a task structure. ++ */ ++static ulong ++x86_get_task_pgd(ulong task) + { + long offset; + ulong cr3; +@@ -2341,6 +3135,7 @@ + x86_dump_machdep_table(ulong arg) + { + int others; ++ ulong xen_wpt; + + switch (arg) { + default: +@@ -2355,8 +3150,6 @@ + fprintf(fp, "%sPAE", others++ ? "|" : ""); + if (machdep->flags & OMIT_FRAME_PTR) + fprintf(fp, "%sOMIT_FRAME_PTR", others++ ? "|" : ""); +- if (machdep->flags & SYSRQ) +- fprintf(fp, "%sSYSRQ", others++ ? "|" : ""); + if (machdep->flags & FRAMESIZE_DEBUG) + fprintf(fp, "%sFRAMESIZE_DEBUG", others++ ? "|" : ""); + fprintf(fp, ")\n"); +@@ -2376,12 +3169,17 @@ + fprintf(fp, " eframe_search: x86_eframe_search()\n"); + fprintf(fp, " back_trace: x86_back_trace_cmd()\n"); + fprintf(fp, "get_processor_speed: x86_processor_speed()\n"); ++ xen_wpt = XEN() && (kt->xen_flags & WRITABLE_PAGE_TABLES); + if (machdep->flags & PAE) { +- fprintf(fp, " uvtop: x86_uvtop_pae()\n"); +- fprintf(fp, " kvtop: x86_uvtop_pae()\n"); ++ fprintf(fp, " uvtop: %s()\n", ++ xen_wpt ? "x86_uvtop_xen_wpt_PAE" : "x86_uvtop_PAE"); ++ fprintf(fp, " kvtop: x86_kvtop_PAE()%s\n", ++ xen_wpt ? " -> x86_kvtop_xen_wpt_PAE()" : ""); + } else { +- fprintf(fp, " uvtop: x86_uvtop()\n"); +- fprintf(fp, " kvtop: x86_uvtop()\n"); ++ fprintf(fp, " uvtop: %s()\n", ++ xen_wpt ? "x86_uvtop_xen_wpt" : "x86_uvtop"); ++ fprintf(fp, " kvtop: x86_kvtop()%s\n", ++ xen_wpt ? " -> x86_kvtop_xen_wpt()" : ""); + } + fprintf(fp, " get_task_pgd: x86_get_task_pgd()\n"); + fprintf(fp, " dump_irq: generic_dump_irq()\n"); +@@ -2399,7 +3197,7 @@ + fprintf(fp, " is_kvaddr: generic_is_kvaddr()\n"); + fprintf(fp, " is_uvaddr: generic_is_uvaddr()\n"); + fprintf(fp, " verify_paddr: generic_verify_paddr()\n"); +- fprintf(fp, " init_kernel_pgd: NULL\n"); ++ fprintf(fp, " init_kernel_pgd: x86_init_kernel_pgd()\n"); + fprintf(fp, " value_to_symbol: %s\n", + machdep->value_to_symbol == generic_machdep_value_to_symbol ? + "generic_machdep_value_to_symbol()" : +@@ -2412,6 +3210,14 @@ + fprintf(fp, " pmd: %lx\n", (ulong)machdep->pmd); + fprintf(fp, " ptbl: %lx\n", (ulong)machdep->ptbl); + fprintf(fp, " ptrs_per_pgd: %d\n", machdep->ptrs_per_pgd); ++ fprintf(fp, " section_size_bits: %ld\n", machdep->section_size_bits); ++ fprintf(fp, " max_physmem_bits: %ld\n", machdep->max_physmem_bits); ++ fprintf(fp, " sections_per_root: %ld\n", machdep->sections_per_root); ++ fprintf(fp, " xendump_p2m_create: x86_xendump_p2m_create()\n"); ++ fprintf(fp, " xendump_panic_task: x86_xendump_panic_task()\n"); ++ fprintf(fp, " get_xendump_regs: x86_get_xendump_regs()\n"); ++ fprintf(fp, "xen_kdump_p2m_create: x86_xen_kdump_p2m_create()\n"); ++ fprintf(fp, "clear_machdep_cache: x86_clear_machdep_cache()\n"); + fprintf(fp, " machspec: x86_machine_specific\n"); + fprintf(fp, " idt_table: %lx\n", + (ulong)machdep->machspec->idt_table); +@@ -2421,6 +3227,11 @@ + machdep->machspec->entry_tramp_end); + fprintf(fp, " entry_tramp_start_phys: %llx\n", + machdep->machspec->entry_tramp_start_phys); ++ fprintf(fp, " last_pmd_read_PAE: %llx\n", ++ machdep->machspec->last_pmd_read_PAE); ++ fprintf(fp, " last_ptbl_read_PAE: %llx\n", ++ machdep->machspec->last_ptbl_read_PAE); ++ + } + + /* +@@ -2732,6 +3543,9 @@ + switch (flag) + { + case READ_IDT_INIT: ++ if (!symbol_exists("idt_table")) ++ return NULL; ++ + if (!(idt = (ulong *)malloc(desc_struct_size))) { + error(WARNING, "cannot malloc idt_table\n\n"); + return NULL; +@@ -2779,6 +3593,10 @@ + break; + + case READ_IDT_RUNTIME: ++ if (!symbol_exists("idt_table")) ++ error(FATAL, ++ "idt_table does not exist on this architecture\n"); ++ + idt = (ulong *)GETBUF(desc_struct_size); + readmem(symbol_value("idt_table"), KVADDR, idt, + desc_struct_size, "idt_table", FAULT_ON_ERROR); +@@ -2942,7 +3760,11 @@ + !strstr(buf2, "+")) + sprintf(p1, buf1); + } +- } ++ } ++ else if (STREQ(argv[2], "ud2a")) ++ pc->curcmd_flags |= UD2A_INSTRUCTION; ++ else if (STREQ(argv[2], "(bad)")) ++ pc->curcmd_flags |= BAD_INSTRUCTION; + + if (CRASHDEBUG(1)) + console(" %s", inbuf); +@@ -2969,6 +3791,16 @@ + } + } + ++ if (XEN() && (count == 1) && symbol_exists("cpu_present_map")) { ++ ulong cpu_present_map; ++ ++ get_symbol_data("cpu_present_map", sizeof(ulong), ++ &cpu_present_map); ++ ++ cpucount = count_bits_long(cpu_present_map); ++ count = MAX(cpucount, kt->cpus); ++ } ++ + return count; + } + +@@ -3026,7 +3858,7 @@ + fprintf(fp, "(unknown)\n"); + fprintf(fp, " HZ: %d\n", machdep->hz); + fprintf(fp, " PAGE SIZE: %d\n", PAGESIZE()); +- fprintf(fp, " L1 CACHE SIZE: %d\n", l1_cache_size()); ++// fprintf(fp, " L1 CACHE SIZE: %d\n", l1_cache_size()); + fprintf(fp, "KERNEL VIRTUAL BASE: %lx\n", machdep->kvbase); + fprintf(fp, "KERNEL VMALLOC BASE: %lx\n", vt->vmalloc_start); + fprintf(fp, " KERNEL STACK SIZE: %ld\n", STACKSIZE()); +@@ -3092,31 +3924,31 @@ + * with the -fomit-frame-pointer flag. + */ + #define PUSH_BP_MOV_ESP_BP 0xe58955 ++#define PUSH_BP_CLR_EAX_MOV_ESP_BP 0xe589c03155ULL + + static int + x86_omit_frame_pointer(void) + { +- ulong push_bp_mov_esp_bp[3]; ++ ulonglong push_bp_mov_esp_bp; ++ int i; ++ char *checkfuncs[] = {"sys_open", "sys_fork", "sys_read"}; + + if (pc->flags & KERNEL_DEBUG_QUERY) + return FALSE; + +- if (!readmem(symbol_value("sys_open"), KVADDR, &push_bp_mov_esp_bp[0], +- sizeof(ulong), "x86_omit_frame_pointer", RETURN_ON_ERROR)) +- return TRUE; +- if (!readmem(symbol_value("sys_fork"), KVADDR, &push_bp_mov_esp_bp[1], +- sizeof(ulong), "x86_omit_frame_pointer", RETURN_ON_ERROR)) +- return TRUE; +- if (!readmem(symbol_value("sys_read"), KVADDR, &push_bp_mov_esp_bp[2], +- sizeof(ulong), "x86_omit_frame_pointer", RETURN_ON_ERROR)) +- return TRUE; +- +- if (((push_bp_mov_esp_bp[0] & 0xffffff) == PUSH_BP_MOV_ESP_BP) && +- ((push_bp_mov_esp_bp[1] & 0xffffff) == PUSH_BP_MOV_ESP_BP) && +- ((push_bp_mov_esp_bp[2] & 0xffffff) == PUSH_BP_MOV_ESP_BP)) +- return FALSE; ++ for (i = 0; i < 2; i++) { ++ if (!readmem(symbol_value(checkfuncs[i]), KVADDR, ++ &push_bp_mov_esp_bp, sizeof(ulonglong), ++ "x86_omit_frame_pointer", RETURN_ON_ERROR)) ++ return TRUE; ++ if (!(((push_bp_mov_esp_bp & 0x0000ffffffULL) == ++ PUSH_BP_MOV_ESP_BP) || ++ ((push_bp_mov_esp_bp & 0xffffffffffULL) == ++ PUSH_BP_CLR_EAX_MOV_ESP_BP))) ++ return TRUE; ++ } + +- return TRUE; ++ return FALSE; + } + + /* +@@ -3207,4 +4039,922 @@ + + return ((sp = value_search(value, offset))); + } ++ ++static void ++x86_init_kernel_pgd(void) ++{ ++ int i; ++ ulong value; ++ ++ value = symbol_value("swapper_pg_dir"); ++ ++ if (XEN()) ++ get_symbol_data("swapper_pg_dir", sizeof(ulong), &value); ++ else ++ value = symbol_value("swapper_pg_dir"); ++ ++ for (i = 0; i < NR_CPUS; i++) ++ vt->kernel_pgd[i] = value; ++ ++} ++ ++static ulong ++xen_m2p_nonPAE(ulong machine) ++{ ++ ulonglong pseudo; ++ ++ pseudo = xen_m2p((ulonglong)machine); ++ ++ if (pseudo == XEN_MACHADDR_NOT_FOUND) ++ return XEN_MFN_NOT_FOUND; ++ ++ return ((ulong)pseudo); ++} ++ ++#include "netdump.h" ++ ++/* ++ * From the xen vmcore, create an index of mfns for each page that makes ++ * up the dom0 kernel's complete phys_to_machine_mapping[max_pfn] array. ++ */ ++ ++#define MAX_X86_FRAMES (16) ++#define MFNS_PER_FRAME (PAGESIZE()/sizeof(ulong)) ++ ++static int ++x86_xen_kdump_p2m_create(struct xen_kdump_data *xkd) ++{ ++ int i, j; ++ ulong kvaddr; ++ ulong *up; ++ ulonglong *ulp; ++ ulong frames; ++ ulong frame_mfn[MAX_X86_FRAMES] = { 0 }; ++ int mfns[MAX_X86_FRAMES] = { 0 }; ++ ++ /* ++ * Temporarily read physical (machine) addresses from vmcore by ++ * going directly to read_netdump() instead of via read_kdump(). ++ */ ++ pc->readmem = read_netdump; ++ ++ if (xkd->flags & KDUMP_CR3) ++ goto use_cr3; ++ ++ xkd->p2m_frames = 0; ++ ++ if (CRASHDEBUG(1)) ++ fprintf(fp, "x86_xen_kdump_p2m_create: p2m_mfn: %lx\n", ++ xkd->p2m_mfn); ++ ++ if (!readmem(PTOB(xkd->p2m_mfn), PHYSADDR, xkd->page, PAGESIZE(), ++ "xen kdump p2m mfn page", RETURN_ON_ERROR)) ++ error(FATAL, "cannot read xen kdump p2m mfn page\n"); ++ ++ if (CRASHDEBUG(1)) { ++ up = (ulong *)xkd->page; ++ for (i = 0; i < 4; i++) { ++ fprintf(fp, "%08lx: %08lx %08lx %08lx %08lx\n", ++ (ulong)((i * 4) * sizeof(ulong)), ++ *up, *(up+1), *(up+2), *(up+3)); ++ up += 4; ++ } ++ fprintf(fp, "\n"); ++ } ++ ++ for (i = 0, up = (ulong *)xkd->page; i < MAX_X86_FRAMES; i++, up++) ++ frame_mfn[i] = *up; ++ ++ for (i = 0; i < MAX_X86_FRAMES; i++) { ++ if (!frame_mfn[i]) ++ break; ++ ++ if (!readmem(PTOB(frame_mfn[i]), PHYSADDR, xkd->page, ++ PAGESIZE(), "xen kdump p2m mfn list page", RETURN_ON_ERROR)) ++ error(FATAL, "cannot read xen kdump p2m mfn list page\n"); ++ ++ for (j = 0, up = (ulong *)xkd->page; j < MFNS_PER_FRAME; j++, up++) ++ if (*up) ++ mfns[i]++; ++ ++ xkd->p2m_frames += mfns[i]; ++ ++ if (CRASHDEBUG(7)) { ++ up = (ulong *)xkd->page; ++ for (j = 0; j < 256; j++) { ++ fprintf(fp, "%08lx: %08lx %08lx %08lx %08lx\n", ++ (ulong)((j * 4) * sizeof(ulong)), ++ *up, *(up+1), *(up+2), *(up+3)); ++ up += 4; ++ } ++ } ++ } ++ ++ if (CRASHDEBUG(1)) ++ fprintf(fp, "p2m_frames: %d\n", xkd->p2m_frames); ++ ++ if ((xkd->p2m_mfn_frame_list = (ulong *) ++ malloc(xkd->p2m_frames * sizeof(ulong))) == NULL) ++ error(FATAL, "cannot malloc p2m_frame_index_list"); ++ ++ for (i = 0, frames = xkd->p2m_frames; frames; i++) { ++ if (!readmem(PTOB(frame_mfn[i]), PHYSADDR, ++ &xkd->p2m_mfn_frame_list[i * MFNS_PER_FRAME], ++ mfns[i] * sizeof(ulong), "xen kdump p2m mfn list page", ++ RETURN_ON_ERROR)) ++ error(FATAL, "cannot read xen kdump p2m mfn list page\n"); ++ ++ frames -= mfns[i]; ++ } ++ ++ if (CRASHDEBUG(2)) { ++ for (i = 0; i < xkd->p2m_frames; i++) ++ fprintf(fp, "%lx ", xkd->p2m_mfn_frame_list[i]); ++ fprintf(fp, "\n"); ++ } ++ ++ pc->readmem = read_kdump; ++ return TRUE; ++ ++use_cr3: ++ if (CRASHDEBUG(1)) ++ fprintf(fp, "x86_xen_kdump_p2m_create: cr3: %lx\n", xkd->cr3); ++ ++ if (!readmem(PTOB(xkd->cr3), PHYSADDR, machdep->pgd, PAGESIZE(), ++ "xen kdump cr3 page", RETURN_ON_ERROR)) ++ error(FATAL, "cannot read xen kdump cr3 page\n"); ++ ++ if (CRASHDEBUG(7)) { ++ fprintf(fp, "contents of page directory page:\n"); ++ ++ if (machdep->flags & PAE) { ++ ulp = (ulonglong *)machdep->pgd; ++ fprintf(fp, ++ "%016llx %016llx %016llx %016llx\n", ++ *ulp, *(ulp+1), *(ulp+2), *(ulp+3)); ++ } else { ++ up = (ulong *)machdep->pgd; ++ for (i = 0; i < 256; i++) { ++ fprintf(fp, ++ "%08lx: %08lx %08lx %08lx %08lx\n", ++ (ulong)((i * 4) * sizeof(ulong)), ++ *up, *(up+1), *(up+2), *(up+3)); ++ up += 4; ++ } ++ } ++ } ++ ++ kvaddr = symbol_value("max_pfn"); ++ if (!x86_xen_kdump_load_page(kvaddr, xkd->page)) ++ return FALSE; ++ up = (ulong *)(xkd->page + PAGEOFFSET(kvaddr)); ++ ++ xkd->p2m_frames = (*up/(PAGESIZE()/sizeof(ulong))) + ++ ((*up%(PAGESIZE()/sizeof(ulong))) ? 1 : 0); ++ ++ if (CRASHDEBUG(1)) ++ fprintf(fp, "max_pfn at %lx: %lx (%ld) -> %d p2m_frames\n", ++ kvaddr, *up, *up, xkd->p2m_frames); ++ ++ if ((xkd->p2m_mfn_frame_list = (ulong *) ++ malloc(xkd->p2m_frames * sizeof(ulong))) == NULL) ++ error(FATAL, "cannot malloc p2m_frame_index_list"); ++ ++ kvaddr = symbol_value("phys_to_machine_mapping"); ++ if (!x86_xen_kdump_load_page(kvaddr, xkd->page)) ++ return FALSE; ++ up = (ulong *)(xkd->page + PAGEOFFSET(kvaddr)); ++ kvaddr = *up; ++ if (CRASHDEBUG(1)) ++ fprintf(fp, "phys_to_machine_mapping: %lx\n", kvaddr); ++ ++ if (CRASHDEBUG(7)) { ++ fprintf(fp, "contents of first phys_to_machine_mapping page:\n"); ++ if (!x86_xen_kdump_load_page(kvaddr, xkd->page)) ++ error(INFO, ++ "cannot read first phys_to_machine_mapping page\n"); ++ ++ up = (ulong *)xkd->page; ++ for (i = 0; i < 256; i++) { ++ fprintf(fp, "%08lx: %08lx %08lx %08lx %08lx\n", ++ (ulong)((i * 4) * sizeof(ulong)), ++ *up, *(up+1), *(up+2), *(up+3)); ++ up += 4; ++ } ++ } ++ ++ machdep->last_ptbl_read = BADADDR; ++ machdep->last_pmd_read = BADADDR; ++ ++ for (i = 0; i < xkd->p2m_frames; i++) { ++ xkd->p2m_mfn_frame_list[i] = x86_xen_kdump_page_mfn(kvaddr); ++ kvaddr += PAGESIZE(); ++ } ++ ++ if (CRASHDEBUG(1)) { ++ for (i = 0; i < xkd->p2m_frames; i++) ++ fprintf(fp, "%lx ", xkd->p2m_mfn_frame_list[i]); ++ fprintf(fp, "\n"); ++ } ++ ++ machdep->last_ptbl_read = 0; ++ machdep->last_pmd_read = 0; ++ pc->readmem = read_kdump; ++ ++ return TRUE; ++} ++ ++/* ++ * Find the page associate with the kvaddr, and read its contents ++ * into the passed-in buffer. ++ */ ++static char * ++x86_xen_kdump_load_page(ulong kvaddr, char *pgbuf) ++{ ++ ulong *entry; ++ ulong *up; ++ ulong mfn; ++ ++ if (machdep->flags & PAE) ++ return x86_xen_kdump_load_page_PAE(kvaddr, pgbuf); ++ ++ up = (ulong *)machdep->pgd; ++ entry = up + (kvaddr >> PGDIR_SHIFT); ++ mfn = (*entry) >> PAGESHIFT(); ++ ++ if (!readmem(PTOB(mfn), PHYSADDR, pgbuf, PAGESIZE(), ++ "xen kdump pgd entry", RETURN_ON_ERROR)) { ++ error(INFO, "cannot read/find pgd entry from cr3 page\n"); ++ return NULL; ++ } ++ ++ up = (ulong *)pgbuf; ++ entry = up + ((kvaddr >> 12) & (PTRS_PER_PTE-1)); ++ mfn = (*entry) >> PAGESHIFT(); ++ ++ if (!readmem(PTOB(mfn), PHYSADDR, pgbuf, PAGESIZE(), ++ "xen page table page", RETURN_ON_ERROR)) { ++ error(INFO, "cannot read/find page table page\n"); ++ return NULL; ++ } ++ ++ return pgbuf; ++} ++ ++static char * ++x86_xen_kdump_load_page_PAE(ulong kvaddr, char *pgbuf) ++{ ++ ulonglong *entry; ++ ulonglong *up; ++ ulong mfn; ++ ++ up = (ulonglong *)machdep->pgd; ++ entry = up + (kvaddr >> PGDIR_SHIFT); ++ mfn = (ulong)((*entry) >> PAGESHIFT()); ++ ++ if (!readmem(PTOB(mfn), PHYSADDR, pgbuf, PAGESIZE(), ++ "xen kdump pgd entry", RETURN_ON_ERROR)) { ++ error(INFO, "cannot read/find pgd entry from cr3 page\n"); ++ return NULL; ++ } ++ ++ up = (ulonglong *)pgbuf; ++ entry = up + ((kvaddr >> PMD_SHIFT) & (PTRS_PER_PMD-1)); ++ mfn = (ulong)((*entry) >> PAGESHIFT()); ++ ++ if (!readmem(PTOB(mfn), PHYSADDR, pgbuf, PAGESIZE(), ++ "xen kdump pmd entry", RETURN_ON_ERROR)) { ++ error(INFO, "cannot read/find pmd entry from pgd\n"); ++ return NULL; ++ } ++ ++ up = (ulonglong *)pgbuf; ++ entry = up + ((kvaddr >> PAGESHIFT()) & (PTRS_PER_PTE-1)); ++ mfn = (ulong)((*entry) >> PAGESHIFT()); ++ ++ if (!readmem(PTOB(mfn), PHYSADDR, pgbuf, PAGESIZE(), ++ "xen kdump page table page", RETURN_ON_ERROR)) { ++ error(INFO, "cannot read/find page table page from pmd\n"); ++ return NULL; ++ } ++ ++ return pgbuf; ++} ++ ++/* ++ * Return the mfn value associated with a virtual address. ++ */ ++static ulong ++x86_xen_kdump_page_mfn(ulong kvaddr) ++{ ++ ulong *entry; ++ ulong *up; ++ ulong mfn; ++ ++ if (machdep->flags & PAE) ++ return x86_xen_kdump_page_mfn_PAE(kvaddr); ++ ++ up = (ulong *)machdep->pgd; ++ entry = up + (kvaddr >> PGDIR_SHIFT); ++ mfn = (*entry) >> PAGESHIFT(); ++ ++ if ((mfn != machdep->last_ptbl_read) && ++ !readmem(PTOB(mfn), PHYSADDR, machdep->ptbl, PAGESIZE(), ++ "xen kdump pgd entry", RETURN_ON_ERROR)) ++ error(FATAL, ++ "cannot read/find pgd entry from cr3 page (mfn: %lx)\n", ++ mfn); ++ machdep->last_ptbl_read = mfn; ++ ++ up = (ulong *)machdep->ptbl; ++ entry = up + ((kvaddr >> 12) & (PTRS_PER_PTE-1)); ++ mfn = (*entry) >> PAGESHIFT(); ++ ++ return mfn; ++} ++ ++static ulong ++x86_xen_kdump_page_mfn_PAE(ulong kvaddr) ++{ ++ ulonglong *entry; ++ ulonglong *up; ++ ulong mfn; ++ ++ up = (ulonglong *)machdep->pgd; ++ entry = up + (kvaddr >> PGDIR_SHIFT); ++ mfn = (ulong)((*entry) >> PAGESHIFT()); ++ ++ if ((mfn != machdep->last_pmd_read) && ++ !readmem(PTOB(mfn), PHYSADDR, machdep->pmd, PAGESIZE(), ++ "xen kdump pgd entry", RETURN_ON_ERROR)) ++ error(FATAL, ++ "cannot read/find pgd entry from cr3 page (mfn: %lx)\n", ++ mfn); ++ machdep->last_pmd_read = mfn; ++ ++ up = (ulonglong *)machdep->pmd; ++ entry = up + ((kvaddr >> PMD_SHIFT) & (PTRS_PER_PMD-1)); ++ mfn = (ulong)((*entry) >> PAGESHIFT()); ++ ++ if ((mfn != machdep->last_ptbl_read) && ++ !readmem(PTOB(mfn), PHYSADDR, machdep->ptbl, PAGESIZE(), ++ "xen kdump pmd entry", RETURN_ON_ERROR)) ++ error(FATAL, ++ "cannot read/find pmd entry from pgd (mfn: %lx)\n", ++ mfn); ++ machdep->last_ptbl_read = mfn; ++ ++ up = (ulonglong *)machdep->ptbl; ++ entry = up + ((kvaddr >> PAGESHIFT()) & (PTRS_PER_PTE-1)); ++ mfn = (ulong)((*entry) >> PAGESHIFT()); ++ ++ return mfn; ++} ++ ++#include "xendump.h" ++ ++/* ++ * Create an index of mfns for each page that makes up the ++ * kernel's complete phys_to_machine_mapping[max_pfn] array. ++ */ ++static int ++x86_xendump_p2m_create(struct xendump_data *xd) ++{ ++ int i, idx; ++ ulong mfn, kvaddr, ctrlreg[8], ctrlreg_offset; ++ ulong *up; ++ ulonglong *ulp; ++ off_t offset; ++ ++ if (!symbol_exists("phys_to_machine_mapping")) { ++ xd->flags |= XC_CORE_NO_P2M; ++ return TRUE; ++ } ++ ++ if ((ctrlreg_offset = MEMBER_OFFSET("vcpu_guest_context", "ctrlreg")) == ++ INVALID_OFFSET) ++ error(FATAL, ++ "cannot determine vcpu_guest_context.ctrlreg offset\n"); ++ else if (CRASHDEBUG(1)) ++ fprintf(xd->ofp, ++ "MEMBER_OFFSET(vcpu_guest_context, ctrlreg): %ld\n", ++ ctrlreg_offset); ++ ++ offset = (off_t)xd->xc_core.header.xch_ctxt_offset + ++ (off_t)ctrlreg_offset; ++ ++ if (lseek(xd->xfd, offset, SEEK_SET) == -1) ++ error(FATAL, "cannot lseek to xch_ctxt_offset\n"); ++ ++ if (read(xd->xfd, &ctrlreg, sizeof(ctrlreg)) != ++ sizeof(ctrlreg)) ++ error(FATAL, "cannot read vcpu_guest_context ctrlreg[8]\n"); ++ ++ mfn = (ctrlreg[3] >> PAGESHIFT()) | (ctrlreg[3] << (BITS()-PAGESHIFT())); ++ ++ for (i = 0; CRASHDEBUG(1) && (i < 8); i++) { ++ fprintf(xd->ofp, "ctrlreg[%d]: %lx", i, ctrlreg[i]); ++ if (i == 3) ++ fprintf(xd->ofp, " -> mfn: %lx", mfn); ++ fprintf(xd->ofp, "\n"); ++ } ++ ++ if (!xc_core_mfn_to_page(mfn, machdep->pgd)) ++ error(FATAL, "cannot read/find cr3 page\n"); ++ ++ if (CRASHDEBUG(1)) { ++ fprintf(xd->ofp, "contents of page directory page:\n"); ++ ++ if (machdep->flags & PAE) { ++ ulp = (ulonglong *)machdep->pgd; ++ fprintf(xd->ofp, ++ "%016llx %016llx %016llx %016llx\n", ++ *ulp, *(ulp+1), *(ulp+2), *(ulp+3)); ++ } else { ++ up = (ulong *)machdep->pgd; ++ for (i = 0; i < 256; i++) { ++ fprintf(xd->ofp, ++ "%08lx: %08lx %08lx %08lx %08lx\n", ++ (ulong)((i * 4) * sizeof(ulong)), ++ *up, *(up+1), *(up+2), *(up+3)); ++ up += 4; ++ } ++ } ++ } ++ ++ kvaddr = symbol_value("max_pfn"); ++ if (!x86_xendump_load_page(kvaddr, xd->page)) ++ return FALSE; ++ up = (ulong *)(xd->page + PAGEOFFSET(kvaddr)); ++ if (CRASHDEBUG(1)) ++ fprintf(xd->ofp, "max_pfn: %lx\n", *up); ++ ++ xd->xc_core.p2m_frames = (*up/(PAGESIZE()/sizeof(ulong))) + ++ ((*up%(PAGESIZE()/sizeof(ulong))) ? 1 : 0); ++ ++ if ((xd->xc_core.p2m_frame_index_list = (ulong *) ++ malloc(xd->xc_core.p2m_frames * sizeof(int))) == NULL) ++ error(FATAL, "cannot malloc p2m_frame_index_list"); ++ ++ kvaddr = symbol_value("phys_to_machine_mapping"); ++ if (!x86_xendump_load_page(kvaddr, xd->page)) ++ return FALSE; ++ up = (ulong *)(xd->page + PAGEOFFSET(kvaddr)); ++ if (CRASHDEBUG(1)) ++ fprintf(fp, "phys_to_machine_mapping: %lx\n", *up); ++ ++ kvaddr = *up; ++ machdep->last_ptbl_read = BADADDR; ++ machdep->last_pmd_read = BADADDR; ++ ++ for (i = 0; i < xd->xc_core.p2m_frames; i++) { ++ if ((idx = x86_xendump_page_index(kvaddr)) == MFN_NOT_FOUND) ++ return FALSE; ++ xd->xc_core.p2m_frame_index_list[i] = idx; ++ kvaddr += PAGESIZE(); ++ } ++ ++ machdep->last_ptbl_read = 0; ++ machdep->last_pmd_read = 0; ++ ++ return TRUE; ++} ++ ++/* ++ * Find the page associate with the kvaddr, and read its contents ++ * into the passed-in buffer. ++ */ ++static char * ++x86_xendump_load_page(ulong kvaddr, char *pgbuf) ++{ ++ ulong *entry; ++ ulong *up; ++ ulong mfn; ++ ++ if (machdep->flags & PAE) ++ return x86_xendump_load_page_PAE(kvaddr, pgbuf); ++ ++ up = (ulong *)machdep->pgd; ++ entry = up + (kvaddr >> PGDIR_SHIFT); ++ mfn = (*entry) >> PAGESHIFT(); ++ ++ if (!xc_core_mfn_to_page(mfn, pgbuf)) { ++ error(INFO, "cannot read/find pgd entry from cr3 page\n"); ++ return NULL; ++ } ++ ++ up = (ulong *)pgbuf; ++ entry = up + ((kvaddr >> 12) & (PTRS_PER_PTE-1)); ++ mfn = (*entry) >> PAGESHIFT(); ++ ++ if (!xc_core_mfn_to_page(mfn, pgbuf)) { ++ error(INFO, "cannot read/find page table page\n"); ++ return NULL; ++ } ++ ++ return pgbuf; ++} ++ ++static char * ++x86_xendump_load_page_PAE(ulong kvaddr, char *pgbuf) ++{ ++ ulonglong *entry; ++ ulonglong *up; ++ ulong mfn; ++ ++ up = (ulonglong *)machdep->pgd; ++ entry = up + (kvaddr >> PGDIR_SHIFT); ++ mfn = (ulong)((*entry) >> PAGESHIFT()); ++ ++ if (!xc_core_mfn_to_page(mfn, pgbuf)) { ++ error(INFO, "cannot read/find pgd entry from cr3 page\n"); ++ return NULL; ++ } ++ ++ up = (ulonglong *)pgbuf; ++ entry = up + ((kvaddr >> PMD_SHIFT) & (PTRS_PER_PMD-1)); ++ mfn = (ulong)((*entry) >> PAGESHIFT()); ++ ++ if (!xc_core_mfn_to_page(mfn, pgbuf)) { ++ error(INFO, "cannot read/find pmd entry from pgd\n"); ++ return NULL; ++ } ++ ++ up = (ulonglong *)pgbuf; ++ entry = up + ((kvaddr >> PAGESHIFT()) & (PTRS_PER_PTE-1)); ++ mfn = (ulong)((*entry) >> PAGESHIFT()); ++ ++ if (!xc_core_mfn_to_page(mfn, pgbuf)) { ++ error(INFO, "cannot read/find page table page from pmd\n"); ++ return NULL; ++ } ++ ++ return pgbuf; ++} ++ ++/* ++ * Find the dumpfile page index associated with the kvaddr. ++ */ ++static int ++x86_xendump_page_index(ulong kvaddr) ++{ ++ int idx; ++ ulong *entry; ++ ulong *up; ++ ulong mfn; ++ ++ if (machdep->flags & PAE) ++ return x86_xendump_page_index_PAE(kvaddr); ++ ++ up = (ulong *)machdep->pgd; ++ entry = up + (kvaddr >> PGDIR_SHIFT); ++ mfn = (*entry) >> PAGESHIFT(); ++ if ((mfn != machdep->last_ptbl_read) && ++ !xc_core_mfn_to_page(mfn, machdep->ptbl)) { ++ error(INFO, "cannot read/find pgd entry from cr3 page\n"); ++ return MFN_NOT_FOUND; ++ } ++ machdep->last_ptbl_read = mfn; ++ ++ up = (ulong *)machdep->ptbl; ++ entry = up + ((kvaddr>>12) & (PTRS_PER_PTE-1)); ++ mfn = (*entry) >> PAGESHIFT(); ++ if ((idx = xc_core_mfn_to_page_index(mfn)) == MFN_NOT_FOUND) ++ error(INFO, "cannot determine page index for %lx\n", ++ kvaddr); ++ ++ return idx; ++} ++ ++static int ++x86_xendump_page_index_PAE(ulong kvaddr) ++{ ++ int idx; ++ ulonglong *entry; ++ ulonglong *up; ++ ulong mfn; ++ ++ up = (ulonglong *)machdep->pgd; ++ entry = up + (kvaddr >> PGDIR_SHIFT); ++ mfn = (ulong)((*entry) >> PAGESHIFT()); ++ if ((mfn != machdep->last_pmd_read) && ++ !xc_core_mfn_to_page(mfn, machdep->pmd)) { ++ error(INFO, "cannot read/find pgd entry from cr3 page\n"); ++ return MFN_NOT_FOUND; ++ } ++ machdep->last_pmd_read = mfn; ++ ++ up = (ulonglong *)machdep->pmd; ++ entry = up + ((kvaddr >> PMD_SHIFT) & (PTRS_PER_PMD-1)); ++ mfn = (ulong)((*entry) >> PAGESHIFT()); ++ if ((mfn != machdep->last_ptbl_read) && ++ !xc_core_mfn_to_page(mfn, machdep->ptbl)) { ++ error(INFO, "cannot read/find pmd entry from pgd\n"); ++ return MFN_NOT_FOUND; ++ } ++ machdep->last_ptbl_read = mfn; ++ ++ up = (ulonglong *)machdep->ptbl; ++ entry = up + ((kvaddr >> PAGESHIFT()) & (PTRS_PER_PTE-1)); ++ mfn = (ulong)((*entry) >> PAGESHIFT()); ++ if ((idx = xc_core_mfn_to_page_index(mfn)) == MFN_NOT_FOUND) ++ error(INFO, "cannot determine page index for %lx\n", ++ kvaddr); ++ ++ return idx; ++} ++ ++/* ++ * Pull the esp from the cpu_user_regs struct in the header ++ * turn it into a task, and match it with the active_set. ++ * Unfortunately, the registers in the vcpu_guest_context ++ * are not necessarily those of the panic task, so for now ++ * let get_active_set_panic_task() get the right task. ++ */ ++static ulong ++x86_xendump_panic_task(struct xendump_data *xd) ++{ ++ return NO_TASK; ++ ++#ifdef TO_BE_REVISITED ++ int i; ++ ulong esp; ++ off_t offset; ++ ulong task; ++ ++ ++ if (INVALID_MEMBER(vcpu_guest_context_user_regs) || ++ INVALID_MEMBER(cpu_user_regs_esp)) ++ return NO_TASK; ++ ++ offset = (off_t)xd->xc_core.header.xch_ctxt_offset + ++ (off_t)OFFSET(vcpu_guest_context_user_regs) + ++ (off_t)OFFSET(cpu_user_regs_esp); ++ ++ if (lseek(xd->xfd, offset, SEEK_SET) == -1) ++ return NO_TASK; ++ ++ if (read(xd->xfd, &esp, sizeof(ulong)) != sizeof(ulong)) ++ return NO_TASK; ++ ++ if (IS_KVADDR(esp) && (task = stkptr_to_task(esp))) { ++ ++ for (i = 0; i < NR_CPUS; i++) { ++ if (task == tt->active_set[i]) { ++ if (CRASHDEBUG(0)) ++ error(INFO, ++ "x86_xendump_panic_task: esp: %lx -> task: %lx\n", ++ esp, task); ++ return task; ++ } ++ } ++ ++ error(WARNING, ++ "x86_xendump_panic_task: esp: %lx -> task: %lx (not active)\n", ++ esp); ++ } ++ ++ return NO_TASK; ++#endif ++} ++ ++/* ++ * Because of an off-by-one vcpu bug in early xc_domain_dumpcore() ++ * instantiations, the registers in the vcpu_guest_context are not ++ * necessarily those of the panic task. If not, the eip/esp will be ++ * in stop_this_cpu, as a result of the IP interrupt in panic(), ++ * but the trace is strange because it comes out of the hypervisor ++ * at least if the vcpu had been idle. ++ */ ++static void ++x86_get_xendump_regs(struct xendump_data *xd, struct bt_info *bt, ulong *eip, ulong *esp) ++{ ++ ulong task, xeip, xesp; ++ off_t offset; ++ ++ if (INVALID_MEMBER(vcpu_guest_context_user_regs) || ++ INVALID_MEMBER(cpu_user_regs_eip) || ++ INVALID_MEMBER(cpu_user_regs_esp)) ++ goto generic; ++ ++ offset = (off_t)xd->xc_core.header.xch_ctxt_offset + ++ (off_t)OFFSET(vcpu_guest_context_user_regs) + ++ (off_t)OFFSET(cpu_user_regs_esp); ++ if (lseek(xd->xfd, offset, SEEK_SET) == -1) ++ goto generic; ++ if (read(xd->xfd, &xesp, sizeof(ulong)) != sizeof(ulong)) ++ goto generic; ++ ++ offset = (off_t)xd->xc_core.header.xch_ctxt_offset + ++ (off_t)OFFSET(vcpu_guest_context_user_regs) + ++ (off_t)OFFSET(cpu_user_regs_eip); ++ if (lseek(xd->xfd, offset, SEEK_SET) == -1) ++ goto generic; ++ if (read(xd->xfd, &xeip, sizeof(ulong)) != sizeof(ulong)) ++ goto generic; ++ ++ if (IS_KVADDR(xesp) && (task = stkptr_to_task(xesp)) && ++ (task == bt->task)) { ++ if (CRASHDEBUG(1)) ++ fprintf(xd->ofp, ++ "hooks from vcpu_guest_context: eip: %lx esp: %lx\n", xeip, xesp); ++ *eip = xeip; ++ *esp = xesp; ++ return; ++ } ++ ++generic: ++ return machdep->get_stack_frame(bt, eip, esp); ++} ++ ++/* for Xen Hypervisor analysis */ ++ ++static int ++x86_xenhyper_is_kvaddr(ulong addr) ++{ ++ if (machdep->flags & PAE) { ++ return (addr >= HYPERVISOR_VIRT_START_PAE); ++ } ++ return (addr >= HYPERVISOR_VIRT_START); ++} ++ ++static ulong ++x86_get_stackbase_hyper(ulong task) ++{ ++ struct xen_hyper_vcpu_context *vcc; ++ int pcpu; ++ ulong init_tss; ++ ulong esp, base; ++ char *buf; ++ ++ /* task means vcpu here */ ++ vcc = xen_hyper_vcpu_to_vcpu_context(task); ++ if (!vcc) ++ error(FATAL, "invalid vcpu\n"); ++ ++ pcpu = vcc->processor; ++ if (!xen_hyper_test_pcpu_id(pcpu)) { ++ error(FATAL, "invalid pcpu number\n"); ++ } ++ init_tss = symbol_value("init_tss"); ++ buf = GETBUF(XEN_HYPER_SIZE(tss_struct)); ++ init_tss += XEN_HYPER_SIZE(tss_struct) * pcpu; ++ if (!readmem(init_tss, KVADDR, buf, ++ XEN_HYPER_SIZE(tss_struct), "init_tss", RETURN_ON_ERROR)) { ++ error(FATAL, "cannot read init_tss.\n"); ++ } ++ esp = ULONG(buf + XEN_HYPER_OFFSET(tss_struct_esp0)); ++ FREEBUF(buf); ++ base = esp & (~(STACKSIZE() - 1)); ++ ++ return base; ++} ++ ++static ulong ++x86_get_stacktop_hyper(ulong task) ++{ ++ return x86_get_stackbase_hyper(task) + STACKSIZE(); ++} ++ ++static void ++x86_get_stack_frame_hyper(struct bt_info *bt, ulong *pcp, ulong *spp) ++{ ++ struct xen_hyper_vcpu_context *vcc; ++ int pcpu; ++ ulong *regs; ++ ulong esp, eip; ++ ++ /* task means vcpu here */ ++ vcc = xen_hyper_vcpu_to_vcpu_context(bt->task); ++ if (!vcc) ++ error(FATAL, "invalid vcpu\n"); ++ ++ pcpu = vcc->processor; ++ if (!xen_hyper_test_pcpu_id(pcpu)) { ++ error(FATAL, "invalid pcpu number\n"); ++ } ++ ++ if (bt->flags & BT_TEXT_SYMBOLS_ALL) { ++ if (spp) ++ *spp = x86_get_stackbase_hyper(bt->task); ++ if (pcp) ++ *pcp = 0; ++ bt->flags &= ~BT_TEXT_SYMBOLS_ALL; ++ return; ++ } ++ ++ regs = (ulong *)xen_hyper_id_to_dumpinfo_context(pcpu)->pr_reg_ptr; ++ esp = XEN_HYPER_X86_NOTE_ESP(regs); ++ eip = XEN_HYPER_X86_NOTE_EIP(regs); ++ ++ if (spp) { ++ if (esp < x86_get_stackbase_hyper(bt->task) || ++ esp >= x86_get_stacktop_hyper(bt->task)) ++ *spp = x86_get_stackbase_hyper(bt->task); ++ else ++ *spp = esp; ++ } ++ if (pcp) { ++ if (is_kernel_text(eip)) ++ *pcp = eip; ++ else ++ *pcp = 0; ++ } ++} ++ ++static void ++x86_init_hyper(int when) ++{ ++ switch (when) ++ { ++ case PRE_SYMTAB: ++ machdep->verify_symbol = x86_verify_symbol; ++ if (pc->flags & KERNEL_DEBUG_QUERY) ++ return; ++ machdep->pagesize = memory_page_size(); ++ machdep->pageshift = ffs(machdep->pagesize) - 1; ++ machdep->pageoffset = machdep->pagesize - 1; ++ machdep->pagemask = ~((ulonglong)machdep->pageoffset); ++ machdep->stacksize = machdep->pagesize * 4; /* ODA: magic num */ ++ if ((machdep->pgd = (char *)malloc(PAGESIZE())) == NULL) ++ error(FATAL, "cannot malloc pgd space."); ++ if ((machdep->pmd = (char *)malloc(PAGESIZE())) == NULL) ++ error(FATAL, "cannot malloc pmd space."); ++ if ((machdep->ptbl = (char *)malloc(PAGESIZE())) == NULL) ++ error(FATAL, "cannot malloc ptbl space."); ++ machdep->last_pgd_read = 0; ++ machdep->last_pmd_read = 0; ++ machdep->last_ptbl_read = 0; ++ machdep->machspec = &x86_machine_specific; /* some members used */ ++ break; ++ ++ case PRE_GDB: ++ if (symbol_exists("idle_pg_table_l3")) { ++ machdep->flags |= PAE; ++ PGDIR_SHIFT = PGDIR_SHIFT_3LEVEL; ++ PTRS_PER_PTE = PTRS_PER_PTE_3LEVEL; ++ PTRS_PER_PGD = PTRS_PER_PGD_3LEVEL; ++ machdep->kvtop = x86_kvtop_PAE; ++ machdep->kvbase = HYPERVISOR_VIRT_START_PAE; ++ } else { ++ PGDIR_SHIFT = PGDIR_SHIFT_2LEVEL; ++ PTRS_PER_PTE = PTRS_PER_PTE_2LEVEL; ++ PTRS_PER_PGD = PTRS_PER_PGD_2LEVEL; ++ machdep->kvtop = x86_kvtop; ++ free(machdep->pmd); ++ machdep->pmd = machdep->pgd; ++ machdep->kvbase = HYPERVISOR_VIRT_START; ++ } ++ machdep->ptrs_per_pgd = PTRS_PER_PGD; ++ machdep->identity_map_base = DIRECTMAP_VIRT_START; ++ machdep->is_kvaddr = x86_xenhyper_is_kvaddr; ++ machdep->eframe_search = x86_eframe_search; ++ machdep->back_trace = x86_back_trace_cmd; ++ machdep->processor_speed = x86_processor_speed; /* ODA: check */ ++ machdep->dump_irq = generic_dump_irq; /* ODA: check */ ++ machdep->get_stack_frame = x86_get_stack_frame_hyper; ++ machdep->get_stackbase = x86_get_stackbase_hyper; ++ machdep->get_stacktop = x86_get_stacktop_hyper; ++ machdep->translate_pte = x86_translate_pte; ++ machdep->memory_size = xen_hyper_x86_memory_size; ++ machdep->dis_filter = x86_dis_filter; ++// machdep->cmd_mach = x86_cmd_mach; /* ODA: check */ ++ machdep->get_smp_cpus = xen_hyper_x86_get_smp_cpus; ++// machdep->line_number_hooks = x86_line_number_hooks; /* ODA: check */ ++ machdep->flags |= FRAMESIZE_DEBUG; /* ODA: check */ ++ machdep->value_to_symbol = generic_machdep_value_to_symbol; ++ machdep->clear_machdep_cache = x86_clear_machdep_cache; ++ ++ /* machdep table for Xen Hypervisor */ ++ xhmachdep->pcpu_init = xen_hyper_x86_pcpu_init; ++ break; ++ ++ case POST_GDB: ++#if 0 /* ODA: need this ? */ ++ if (x86_omit_frame_pointer()) { ++ machdep->flags |= OMIT_FRAME_PTR; ++#endif ++ XEN_HYPER_STRUCT_SIZE_INIT(cpu_time, "cpu_time"); ++ XEN_HYPER_STRUCT_SIZE_INIT(cpuinfo_x86, "cpuinfo_x86"); ++ XEN_HYPER_STRUCT_SIZE_INIT(tss_struct, "tss_struct"); ++ XEN_HYPER_MEMBER_OFFSET_INIT(tss_struct_esp0, "tss_struct", "esp0"); ++ XEN_HYPER_MEMBER_OFFSET_INIT(cpu_time_local_tsc_stamp, "cpu_time", "local_tsc_stamp"); ++ XEN_HYPER_MEMBER_OFFSET_INIT(cpu_time_stime_local_stamp, "cpu_time", "stime_local_stamp"); ++ XEN_HYPER_MEMBER_OFFSET_INIT(cpu_time_stime_master_stamp, "cpu_time", "stime_master_stamp"); ++ XEN_HYPER_MEMBER_OFFSET_INIT(cpu_time_tsc_scale, "cpu_time", "tsc_scale"); ++ XEN_HYPER_MEMBER_OFFSET_INIT(cpu_time_calibration_timer, "cpu_time", "calibration_timer"); ++ if (symbol_exists("cpu_data")) { ++ xht->cpu_data_address = symbol_value("cpu_data"); ++ } ++/* KAK Can this be calculated? */ ++ if (!machdep->hz) { ++ machdep->hz = XEN_HYPER_HZ; ++ } ++ break; ++ ++ case POST_INIT: ++ break; ++ } ++} ++ + #endif /* X86 */ +--- crash/netdump.h.orig 2008-01-17 15:17:20.000000000 -0500 ++++ crash/netdump.h 2008-01-04 09:42:08.000000000 -0500 +@@ -24,3 +24,95 @@ + + #define NT_TASKSTRUCT 4 + #define NT_DISKDUMP 0x70000001 ++ ++#ifdef NOTDEF ++/* ++ * Note: Based upon the original, abandoned, proposal for ++ * its contents -- keep around for potential future use. ++ */ ++#ifndef NT_KDUMPINFO ++#define NT_KDUMPINFO 7 ++#endif ++ ++#endif /* NOTDEF */ ++ ++struct pt_load_segment { ++ off_t file_offset; ++ physaddr_t phys_start; ++ physaddr_t phys_end; ++ physaddr_t zero_fill; ++}; ++ ++struct vmcore_data { ++ ulong flags; ++ int ndfd; ++ FILE *ofp; ++ uint header_size; ++ char *elf_header; ++ uint num_pt_load_segments; ++ struct pt_load_segment *pt_load_segments; ++ Elf32_Ehdr *elf32; ++ Elf32_Phdr *notes32; ++ Elf32_Phdr *load32; ++ Elf64_Ehdr *elf64; ++ Elf64_Phdr *notes64; ++ Elf64_Phdr *load64; ++ void *nt_prstatus; ++ void *nt_prpsinfo; ++ void *nt_taskstruct; ++ ulong task_struct; ++ uint page_size; ++ ulong switch_stack; ++ uint num_prstatus_notes; ++ void *nt_prstatus_percpu[NR_CPUS]; ++ struct xen_kdump_data *xen_kdump_data; ++}; ++ ++/* ++ * ELF note types for Xen dom0/hypervisor kdumps. ++ * The comments below are from xen/include/public/elfnote.h. ++ */ ++ ++/* ++ * System information exported through crash notes. ++ * ++ * The kexec / kdump code will create one XEN_ELFNOTE_CRASH_INFO ++ * note in case of a system crash. This note will contain various ++ * information about the system, see xen/include/xen/elfcore.h. ++ */ ++#define XEN_ELFNOTE_CRASH_INFO 0x1000001 ++ ++/* ++ * System registers exported through crash notes. ++ * ++ * The kexec / kdump code will create one XEN_ELFNOTE_CRASH_REGS ++ * note per cpu in case of a system crash. This note is architecture ++ * specific and will contain registers not saved in the "CORE" note. ++ * See xen/include/xen/elfcore.h for more information. ++ */ ++#define XEN_ELFNOTE_CRASH_REGS 0x1000002 ++ ++ ++/* ++ * For (temporary) backwards compatibility. ++ */ ++#define NT_XEN_KDUMP_CR3 0x10000001 ++ ++struct xen_kdump_data { ++ ulong flags; ++ ulong cr3; ++ ulong p2m_mfn; ++ char *page; ++ ulong last_mfn_read; ++ ulong last_pmd_read; ++ ulong cache_hits; ++ ulong accesses; ++ int p2m_frames; ++ ulong *p2m_mfn_frame_list; ++}; ++ ++#define KDUMP_P2M_INIT (0x1) ++#define KDUMP_CR3 (0x2) ++#define KDUMP_MFN_LIST (0x4) ++ ++#define P2M_FAILURE ((physaddr_t)(0xffffffffffffffffLL)) +--- crash/xen_hyper_global_data.c.orig 2008-01-17 15:17:20.000000000 -0500 ++++ crash/xen_hyper_global_data.c 2008-01-04 09:42:08.000000000 -0500 @@ -0,0 +1,400 @@ +/* + * xen_hyper_global_data.c @@ -44484,3550 +78845,222 @@ +struct task_context fake_tc = { 0 }; + +#endif ---- crash/xen_hyper_dump_tables.c.orig 2007-08-27 15:02:36.000000000 -0400 -+++ crash/xen_hyper_dump_tables.c 2007-08-23 17:02:54.000000000 -0400 -@@ -0,0 +1,948 @@ -+/* -+ * xen_hyper_dump_tables.c -+ * -+ * Portions Copyright (C) 2006-2007 Fujitsu Limited -+ * Portions Copyright (C) 2006-2007 VA Linux Systems Japan K.K. -+ * -+ * Authors: Itsuro Oda -+ * Fumihiko Kakuma -+ * -+ * This file is part of Xencrash. -+ * -+ * Xencrash is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation (version 2 of the License). -+ * -+ * Xencrash is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with Xencrash; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. -+ */ -+ -+#include "defs.h" -+ -+#ifdef XEN_HYPERVISOR_ARCH -+#include "xen_hyper_defs.h" -+ -+static void xen_hyper_dump_xen_hyper_table(int verbose); -+static void xen_hyper_dump_xen_hyper_dumpinfo_table(int verbose); -+static void xen_hyper_dump_xen_hyper_domain_table(int verbose); -+static void xen_hyper_dump_xen_hyper_vcpu_table(int verbose); -+static void xen_hyper_dump_xen_hyper_pcpu_table(int verbose); -+static void xen_hyper_dump_xen_hyper_sched_table(int verbose); -+static void xen_hyper_dump_xen_hyper_size_table(char *spec, ulong makestruct); -+static void xen_hyper_dump_xen_hyper_offset_table(char *spec, ulong makestruct); -+ -+static void xen_hyper_dump_mem(void *mem, ulong len, int dsz); -+ -+/* -+ * Get help for a command, to dump an internal table, or the GNU public -+ * license copying/warranty information. -+ */ -+void -+xen_hyper_cmd_help(void) +--- crash/lkcd_fix_mem.c.orig 2008-01-17 15:17:20.000000000 -0500 ++++ crash/lkcd_fix_mem.c 2008-01-04 09:42:08.000000000 -0500 +@@ -20,21 +20,13 @@ + + #define LKCD_COMMON + #include "defs.h" +-#include "lkcd_fix_mem.h" ++#include "lkcd_dump_v8.h" + + static int fix_addr(dump_header_asm_t *); + + int +-fix_addr_v8(int fd) ++fix_addr_v8(dump_header_asm_t *dha) + { +- static dump_header_asm_t dump_header_asm_v8 = { 0 }; +- dump_header_asm_t *dha; +- dha = &dump_header_asm_v8; +- +- if (read(lkcd->fd, dha, sizeof(dump_header_asm_t)) != +- sizeof(dump_header_asm_t)) +- return -1; +- + fix_addr(dha); + + return 0; +@@ -59,14 +51,6 @@ + static int + fix_addr(dump_header_asm_t *dha) + { +- +- +- if (dha->dha_header_size != sizeof(dump_header_asm_t)) { +- error(INFO, "LKCD machine specific dump header doesn't match crash version\n"); +- error(INFO, "traceback of currently executing threads may not work\n\n"); +- } +- +- + lkcd->dump_header_asm = dha; + + +@@ -83,7 +67,7 @@ + if (dha->dha_stack[i] && dha->dha_smp_current_task[i]) { + lkcd->fix_addr[i].task = (ulong)dha->dha_smp_current_task[i]; + lkcd->fix_addr[i].saddr = (ulong)dha->dha_stack[i]; +- lkcd->fix_addr[i].sw = (ulong)dha->dha_switch_stack[i]; ++ lkcd->fix_addr[i].sw = (ulong)dha->dha_stack_ptr[i]; + /* remember the highest non-zero entry */ + lkcd->fix_addr_num = i + 1; + } else { +@@ -113,4 +97,14 @@ + return 0; + } + ++int lkcd_get_kernel_start_v8(ulong *addr) +{ -+ int c; -+ int oflag; ++ if (!addr) ++ return 0; + -+ oflag = 0; ++ *addr = ((dump_header_asm_t *)lkcd->dump_header_asm)->dha_kernel_addr; + -+ while ((c = getopt(argcnt, args, -+ "aBbcDgHhM:mnOopszX:")) != EOF) { -+ switch(c) -+ { -+ case 'a': -+ dump_alias_data(); -+ return; -+ case 'b': -+ dump_shared_bufs(); -+ return; -+ case 'B': -+ dump_build_data(); -+ return; -+ case 'c': -+ dump_numargs_cache(); -+ return; -+ case 'n': -+ case 'D': -+ dumpfile_memory(DUMPFILE_MEM_DUMP); -+ return; -+ case 'g': -+ dump_gdb_data(); -+ return; -+ case 'H': -+ dump_hash_table(VERBOSE); -+ return; -+ case 'h': -+ dump_hash_table(!VERBOSE); -+ return; -+ case 'M': -+ dump_machdep_table(stol(optarg, FAULT_ON_ERROR, NULL)); -+ return; -+ case 'm': -+ dump_machdep_table(0); -+ return; -+ case 'O': -+ dump_offset_table(NULL, TRUE); -+ return; -+ case 'o': -+ oflag = TRUE; ++ return 1; ++} ++ + #endif // IA64 +--- crash/dev.c.orig 2008-01-17 15:17:20.000000000 -0500 ++++ crash/dev.c 2008-01-04 09:42:08.000000000 -0500 +@@ -91,13 +91,13 @@ + switch(c) + { + case 'i': +- if (machine_type("X86") || machine_type("S390X")) ++ if (machine_type("S390X")) + option_not_supported(c); + do_io(); + return; + + case 'p': +- if (machine_type("X86") || machine_type("S390X")) ++ if (machine_type("S390X")) + option_not_supported(c); + do_pci(); + return; +@@ -141,6 +141,8 @@ + char *char_device_struct_buf; + ulong next, savenext, name, fops; + int major; ++ int name_typecode; ++ size_t name_size; + + if (!symbol_exists("chrdevs")) + error(FATAL, "chrdevs: symbol does not exist\n"); +@@ -188,6 +190,8 @@ + + char_device_struct_buf = GETBUF(SIZE(char_device_struct)); + cdp = (ulong *)&chrdevs[0]; ++ name_typecode = MEMBER_TYPE("char_device_struct", "name"); ++ name_size = (size_t)MEMBER_SIZE("char_device_struct", "name"); + + for (i = 0; i < MAX_DEV; i++, cdp++) { + if (!(*cdp)) +@@ -201,11 +205,18 @@ + OFFSET(char_device_struct_next)); + name = ULONG(char_device_struct_buf + + OFFSET(char_device_struct_name)); +- if (name) { +- if (!read_string(name, buf, BUFSIZE-1)) +- sprintf(buf, "(unknown)"); +- } else +- sprintf(buf, "(unknown)"); ++ switch (name_typecode) ++ { ++ case TYPE_CODE_ARRAY: ++ snprintf(buf, name_size, char_device_struct_buf + ++ OFFSET(char_device_struct_name)); + break; -+ case 'p': -+ dump_program_context(); -+ return; -+ case 's': -+ dump_symbol_table(); -+ return; -+ case 'X': -+ if (strlen(optarg) != 3) { -+ argerrs++; ++ case TYPE_CODE_PTR: ++ default: ++ if (!name || !read_string(name, buf, BUFSIZE-1)) ++ break; ++ } ++ + fops = ULONG(char_device_struct_buf + + OFFSET(char_device_struct_fops)); + major = INT(char_device_struct_buf + +@@ -243,11 +254,19 @@ + OFFSET(char_device_struct_next)); + name = ULONG(char_device_struct_buf + + OFFSET(char_device_struct_name)); +- if (name) { +- if (!read_string(name, buf, BUFSIZE-1)) +- sprintf(buf, "(unknown)"); +- } else +- sprintf(buf, "(unknown)"); ++ switch (name_typecode) ++ { ++ case TYPE_CODE_ARRAY: ++ snprintf(buf, name_size, char_device_struct_buf + ++ OFFSET(char_device_struct_name)); ++ break; ++ case TYPE_CODE_PTR: ++ default: ++ if (!name || !read_string(name, buf, BUFSIZE-1)) ++ sprintf(buf, "(unknown)"); + break; + } -+ if (!strncmp("Xen", optarg, strlen(optarg))) -+ xen_hyper_dump_xen_hyper_table(VERBOSE); -+ else if (!strncmp("xen", optarg, strlen(optarg))) -+ xen_hyper_dump_xen_hyper_table(!VERBOSE); -+ else if (!strncmp("Dmp", optarg, strlen(optarg))) -+ xen_hyper_dump_xen_hyper_dumpinfo_table(VERBOSE); -+ else if (!strncmp("dmp", optarg, strlen(optarg))) -+ xen_hyper_dump_xen_hyper_dumpinfo_table(!VERBOSE); -+ else if (!strncmp("Dom", optarg, strlen(optarg))) -+ xen_hyper_dump_xen_hyper_domain_table(VERBOSE); -+ else if (!strncmp("dom", optarg, strlen(optarg))) -+ xen_hyper_dump_xen_hyper_domain_table(!VERBOSE); -+ else if (!strncmp("Vcp", optarg, strlen(optarg))) -+ xen_hyper_dump_xen_hyper_vcpu_table(VERBOSE); -+ else if (!strncmp("vcp", optarg, strlen(optarg))) -+ xen_hyper_dump_xen_hyper_vcpu_table(!VERBOSE); -+ else if (!strncmp("Pcp", optarg, strlen(optarg))) -+ xen_hyper_dump_xen_hyper_pcpu_table(VERBOSE); -+ else if (!strncmp("pcp", optarg, strlen(optarg))) -+ xen_hyper_dump_xen_hyper_pcpu_table(!VERBOSE); -+ else if (!strncmp("Sch", optarg, strlen(optarg))) -+ xen_hyper_dump_xen_hyper_sched_table(VERBOSE); -+ else if (!strncmp("sch", optarg, strlen(optarg))) -+ xen_hyper_dump_xen_hyper_sched_table(!VERBOSE); -+ else if (!strncmp("siz", optarg, strlen(optarg))) -+ xen_hyper_dump_xen_hyper_size_table(NULL, TRUE); -+ else if (!strncmp("ofs", optarg, strlen(optarg))) -+ xen_hyper_dump_xen_hyper_offset_table(NULL, TRUE); -+ else { -+ argerrs++; -+ break; -+ } -+ return; -+ case 'z': -+ fprintf(fp, "help options:\n"); -+ fprintf(fp, " -a - alias data\n"); -+ fprintf(fp, " -b - shared buffer data\n"); -+ fprintf(fp, " -B - build data\n"); -+ fprintf(fp, " -c - numargs cache\n"); -+ fprintf(fp, " -M machine specific\n"); -+ fprintf(fp, " -m - machdep_table\n"); -+ fprintf(fp, " -s - symbol table data\n"); -+ fprintf(fp, " -o - offset_table and size_table\n"); -+ fprintf(fp, " -p - program_context\n"); -+ fprintf(fp, " -h - hash_table data\n"); -+ fprintf(fp, " -H - hash_table data (verbose)\n"); -+ fprintf(fp, " -X Xen - xen table data (verbose)\n"); -+ fprintf(fp, " -X xen - xen table data\n"); -+ fprintf(fp, " -X Dmp - dumpinfo table data (verbose)\n"); -+ fprintf(fp, " -X dmp - dumpinfo table data\n"); -+ fprintf(fp, " -X Dom - domain table data (verbose)\n"); -+ fprintf(fp, " -X dom - domain table data\n"); -+ fprintf(fp, " -X Vcp - vcpu table data (verbose)\n"); -+ fprintf(fp, " -X vcp - vcpu table data\n"); -+ fprintf(fp, " -X Pcp - pcpu table data (verbose)\n"); -+ fprintf(fp, " -X pcp - pcpu table data\n"); -+ fprintf(fp, " -X Sch - schedule table data (verbose)\n"); -+ fprintf(fp, " -X sch - schedule table data\n"); -+ fprintf(fp, " -X siz - size table data\n"); -+ fprintf(fp, " -X ofs - offset table data\n"); -+ return; -+ default: -+ argerrs++; -+ break; -+ } -+ } + -+ if (argerrs) -+ cmd_usage(pc->curcmd, COMPLETE_HELP); -+ -+ if (!args[optind]) { -+ if (oflag) -+ dump_offset_table(NULL, FALSE); -+ else -+ display_help_screen(""); -+ return; -+ } -+ -+ do { -+ if (oflag) -+ dump_offset_table(args[optind], FALSE); -+ else -+ cmd_usage(args[optind], COMPLETE_HELP); -+ optind++; -+ } while (args[optind]); -+} -+ -+/* -+ * "help -x xen" output -+ */ -+static void -+xen_hyper_dump_xen_hyper_table(int verbose) -+{ -+ char buf[XEN_HYPER_CMD_BUFSIZE]; -+ uint cpuid; -+ int len, flag, i; -+ -+ len = 14; -+ flag = XEN_HYPER_PRI_R; -+ -+ XEN_HYPER_PRI(fp, len, "cpu_data_address: ", buf, flag, -+ (buf, "%lu\n", xht->cpu_data_address)); -+ XEN_HYPER_PRI(fp, len, "cpu_curr: ", buf, flag, -+ (buf, "%u\n", xht->cpu_curr)); -+ XEN_HYPER_PRI(fp, len, "max_cpus: ", buf, flag, -+ (buf, "%u\n", xht->max_cpus)); -+ XEN_HYPER_PRI(fp, len, "cores: ", buf, flag, -+ (buf, "%d\n", xht->cores)); -+ XEN_HYPER_PRI(fp, len, "pcpus: ", buf, flag, -+ (buf, "%d\n", xht->pcpus)); -+ XEN_HYPER_PRI(fp, len, "vcpus: ", buf, flag, -+ (buf, "%d\n", xht->vcpus)); -+ XEN_HYPER_PRI(fp, len, "domains: ", buf, flag, -+ (buf, "%d\n", xht->domains)); -+ XEN_HYPER_PRI(fp, len, "sys_pages: ", buf, flag, -+ (buf, "%lu\n", xht->sys_pages)); -+ XEN_HYPER_PRI(fp, len, "crashing_cpu: ", buf, flag, -+ (buf, "%d\n", xht->crashing_cpu)); -+ XEN_HYPER_PRI(fp, len, "crashing_vcc: ", buf, flag, -+ (buf, "%p\n", xht->crashing_vcc)); -+ XEN_HYPER_PRI(fp, len, "max_page: ", buf, flag, -+ (buf, "%lu\n", xht->max_page)); -+ XEN_HYPER_PRI(fp, len, "total_pages: ", buf, flag, -+ (buf, "%lu\n", xht->total_pages)); -+ XEN_HYPER_PRI(fp, len, "cpumask: ", buf, flag, -+ (buf, "%p\n", xht->cpumask)); -+ if (verbose && xht->cpumask) { -+ xen_hyper_dump_mem(xht->cpumask, -+ XEN_HYPER_SIZE(cpumask_t), sizeof(long)); -+ } -+ XEN_HYPER_PRI(fp, len, "cpu_idxs: ", buf, flag, -+ (buf, "%p\n", xht->cpu_idxs)); -+ if (verbose) { -+ for_cpu_indexes(i, cpuid) -+ fprintf(fp, "%03d : %d\n", i, cpuid); -+ } -+} -+ -+/* -+ * "help -x dmp" output -+ */ -+static void -+xen_hyper_dump_xen_hyper_dumpinfo_table(int verbose) -+{ -+ char buf[XEN_HYPER_CMD_BUFSIZE]; -+ int len, flag; -+ -+ len = 25; -+ flag = XEN_HYPER_PRI_R; -+ -+ XEN_HYPER_PRI(fp, len, "note_ver: ", buf, flag, -+ (buf, "%u\n", xhdit->note_ver)); -+ XEN_HYPER_PRI(fp, len, "context_array: ", buf, flag, -+ (buf, "%p\n", xhdit->context_array)); -+ if (verbose && xhdit->context_array) { -+ xen_hyper_dump_mem((long *)xhdit->context_array, -+ sizeof(struct xen_hyper_dumpinfo_context) * -+ XEN_HYPER_MAX_CPUS(), sizeof(long)); -+ } -+ XEN_HYPER_PRI(fp, len, "context_xen_core_array: ", buf, flag, -+ (buf, "%p\n", xhdit->context_xen_core_array)); -+ if (verbose && xhdit->context_xen_core_array) { -+ xen_hyper_dump_mem((long *)xhdit->context_xen_core_array, -+ sizeof(struct xen_hyper_dumpinfo_context_xen_core) * -+ XEN_HYPER_MAX_CPUS(), sizeof(long)); -+ } -+ XEN_HYPER_PRI_CONST(fp, len, "context_xen_info: ", flag|XEN_HYPER_PRI_LF); -+ XEN_HYPER_PRI(fp, len, "note: ", buf, flag, -+ (buf, "%lx\n", xhdit->context_xen_info.note)); -+ XEN_HYPER_PRI(fp, len, "pcpu_id: ", buf, flag, -+ (buf, "%u\n", xhdit->context_xen_info.pcpu_id)); -+ XEN_HYPER_PRI(fp, len, "crash_xen_info_ptr: ", buf, flag, -+ (buf, "%p\n", xhdit->context_xen_info.crash_xen_info_ptr)); -+ XEN_HYPER_PRI(fp, len, "crash_note_core_array: ", buf, flag, -+ (buf, "%p\n", xhdit->crash_note_core_array)); -+ if (verbose && xhdit->crash_note_core_array) { -+ xen_hyper_dump_mem((long *)xhdit->crash_note_core_array, -+ xhdit->core_size * XEN_HYPER_NR_PCPUS(), -+ sizeof(long)); -+ } -+ XEN_HYPER_PRI(fp, len, "crash_note_xen_core_array: ", buf, flag, -+ (buf, "%p\n", xhdit->crash_note_xen_core_array)); -+ if (verbose && xhdit->crash_note_xen_core_array) { -+ xen_hyper_dump_mem( -+ xhdit->crash_note_xen_core_array, -+ xhdit->xen_core_size * XEN_HYPER_NR_PCPUS(), -+ sizeof(long)); -+ } -+ XEN_HYPER_PRI(fp, len, "crash_note_xen_info_ptr: ", buf, flag, -+ (buf, "%p\n", xhdit->crash_note_xen_info_ptr)); -+ if (verbose && xhdit->crash_note_xen_info_ptr) { -+ xen_hyper_dump_mem( -+ xhdit->crash_note_xen_info_ptr, -+ xhdit->xen_info_size, sizeof(long)); -+ } -+ XEN_HYPER_PRI(fp, len, "xen_info_cpu: ", buf, flag, -+ (buf, "%u\n", xhdit->xen_info_cpu)); -+ XEN_HYPER_PRI(fp, len, "note_size: ", buf, flag, -+ (buf, "%u\n", xhdit->note_size)); -+ XEN_HYPER_PRI(fp, len, "core_offset: ", buf, flag, -+ (buf, "%u\n", xhdit->core_offset)); -+ XEN_HYPER_PRI(fp, len, "core_size: ", buf, flag, -+ (buf, "%u\n", xhdit->core_size)); -+ XEN_HYPER_PRI(fp, len, "xen_core_offset: ", buf, flag, -+ (buf, "%u\n", xhdit->xen_core_offset)); -+ XEN_HYPER_PRI(fp, len, "xen_core_size: ", buf, flag, -+ (buf, "%u\n", xhdit->xen_core_size)); -+ XEN_HYPER_PRI(fp, len, "xen_info_offset: ", buf, flag, -+ (buf, "%u\n", xhdit->xen_info_offset)); -+ XEN_HYPER_PRI(fp, len, "xen_info_size: ", buf, flag, -+ (buf, "%u\n", xhdit->xen_info_size)); -+} -+ -+/* -+ * "help -x dom" output -+ */ -+static void -+xen_hyper_dump_xen_hyper_domain_table(int verbose) -+{ -+ char buf[XEN_HYPER_CMD_BUFSIZE]; -+ struct xen_hyper_domain_context *dcca; -+ int len, flag, i; -+ -+ len = 22; -+ flag = XEN_HYPER_PRI_R; -+ -+ XEN_HYPER_PRI(fp, len, "context_array: ", buf, flag, -+ (buf, "%p\n", xhdt->context_array)); -+ if (verbose) { -+ char buf1[XEN_HYPER_CMD_BUFSIZE]; -+ int j; -+ for (i = 0, dcca = xhdt->context_array; -+ i < xhdt->context_array_cnt; i++, dcca++) { -+ snprintf(buf, XEN_HYPER_CMD_BUFSIZE, "context_array[%d]: ", i); -+ XEN_HYPER_PRI_CONST(fp, len, buf, flag|XEN_HYPER_PRI_LF); -+ XEN_HYPER_PRI(fp, len, "domain: ", buf, flag, -+ (buf, "%lx\n", dcca->domain)); -+ XEN_HYPER_PRI(fp, len, "domain_id: ", buf, flag, -+ (buf, "%d\n", dcca->domain_id)); -+ XEN_HYPER_PRI(fp, len, "tot_pages: ", buf, flag, -+ (buf, "%x\n", dcca->tot_pages)); -+ XEN_HYPER_PRI(fp, len, "max_pages: ", buf, flag, -+ (buf, "%x\n", dcca->max_pages)); -+ XEN_HYPER_PRI(fp, len, "xenheap_pages: ", buf, flag, -+ (buf, "%x\n", dcca->xenheap_pages)); -+ XEN_HYPER_PRI(fp, len, "shared_info: ", buf, flag, -+ (buf, "%lx\n", dcca->shared_info)); -+ XEN_HYPER_PRI(fp, len, "sched_priv: ", buf, flag, -+ (buf, "%lx\n", dcca->sched_priv)); -+ XEN_HYPER_PRI(fp, len, "next_in_list: ", buf, flag, -+ (buf, "%lx\n", dcca->next_in_list)); -+ XEN_HYPER_PRI(fp, len, "domain_flags: ", buf, flag, -+ (buf, "%lx\n", dcca->domain_flags)); -+ XEN_HYPER_PRI(fp, len, "evtchn: ", buf, flag, -+ (buf, "%lx\n", dcca->evtchn)); -+ XEN_HYPER_PRI(fp, len, "vcpu_cnt: ", buf, flag, -+ (buf, "%d\n", dcca->vcpu_cnt)); -+ for (j = 0; j < XEN_HYPER_MAX_VIRT_CPUS; j++) { -+ snprintf(buf1, XEN_HYPER_CMD_BUFSIZE, "vcpu[%d]: ", j); -+ XEN_HYPER_PRI(fp, len, buf1, buf, flag, -+ (buf, "%lx\n", dcca->vcpu[j])); -+ } -+ XEN_HYPER_PRI(fp, len, "vcpu_context_array: ", buf, flag, -+ (buf, "%p\n", dcca->vcpu_context_array)); -+ } -+ } -+ XEN_HYPER_PRI(fp, len, "context_array_cnt: ", buf, flag, -+ (buf, "%d\n", xhdt->context_array_cnt)); -+ XEN_HYPER_PRI(fp, len, "running_domains: ", buf, flag, -+ (buf, "%lu\n", xhdt->running_domains)); -+ XEN_HYPER_PRI(fp, len, "dom_io: ", buf, flag, -+ (buf, "%p\n", xhdt->dom_io)); -+ XEN_HYPER_PRI(fp, len, "dom_xen: ", buf, flag, -+ (buf, "%p\n", xhdt->dom_xen)); -+ XEN_HYPER_PRI(fp, len, "dom0: ", buf, flag, -+ (buf, "%p\n", xhdt->dom0)); -+ XEN_HYPER_PRI(fp, len, "idle_domain: ", buf, flag, -+ (buf, "%p\n", xhdt->idle_domain)); -+ XEN_HYPER_PRI(fp, len, "curr_domain: ", buf, flag, -+ (buf, "%p\n", xhdt->curr_domain)); -+ XEN_HYPER_PRI(fp, len, "last: ", buf, flag, -+ (buf, "%p\n", xhdt->last)); -+ XEN_HYPER_PRI(fp, len, "domain_struct: ", buf, flag, -+ (buf, "%p\n", xhdt->domain_struct)); -+ XEN_HYPER_PRI(fp, len, "domain_struct_verify: ", buf, flag, -+ (buf, "%p\n", xhdt->domain_struct_verify)); -+} -+ -+/* -+ * "help -x vcp" output -+ */ -+static void -+xen_hyper_dump_xen_hyper_vcpu_table(int verbose) -+{ -+ char buf[XEN_HYPER_CMD_BUFSIZE]; -+ int len, flag; -+ -+ len = 25; -+ flag = XEN_HYPER_PRI_R; -+ -+ XEN_HYPER_PRI(fp, len, "vcpu_context_arrays: ", buf, flag, -+ (buf, "%p\n", xhvct->vcpu_context_arrays)); -+ XEN_HYPER_PRI(fp, len, "vcpu_context_arrays_cnt: ", buf, flag, -+ (buf, "%d\n", xhvct->vcpu_context_arrays_cnt)); -+ if (verbose) { -+ struct xen_hyper_vcpu_context_array *vcca; -+ struct xen_hyper_vcpu_context *vca; -+ int i, j; -+ -+ for (i = 0, vcca = xhvct->vcpu_context_arrays; -+ i < xhvct->vcpu_context_arrays_cnt; i++, vcca++) { -+ snprintf(buf, XEN_HYPER_CMD_BUFSIZE, "vcpu_context_arrays[%d]: ", i); -+ XEN_HYPER_PRI_CONST(fp, len, buf, flag|XEN_HYPER_PRI_LF); -+ if (vcca->context_array) { -+ XEN_HYPER_PRI(fp, len, "context_array: ", buf, flag, -+ (buf, "%p\n", vcca->context_array)); -+ } else { -+ XEN_HYPER_PRI(fp, len, "context_array: ", buf, flag, -+ (buf, "NULL\n")); -+ } -+ XEN_HYPER_PRI(fp, len, "context_array_cnt: ", buf, flag, -+ (buf, "%d\n", vcca->context_array_cnt)); -+ XEN_HYPER_PRI(fp, len, "context_array_valid: ", buf, flag, -+ (buf, "%d\n", vcca->context_array_valid)); -+ for (j = 0, vca = vcca->context_array; -+ j < vcca->context_array_cnt; j++, vca++) { -+ snprintf(buf, XEN_HYPER_CMD_BUFSIZE, "context_array[%d]: ", j); -+ XEN_HYPER_PRI_CONST(fp, len, buf, flag|XEN_HYPER_PRI_LF); -+ XEN_HYPER_PRI(fp, len, "vcpu: ", buf, flag, -+ (buf, "%lx\n", vca->vcpu)); -+ XEN_HYPER_PRI(fp, len, "vcpu_id: ", buf, flag, -+ (buf, "%d\n", vca->vcpu_id)); -+ XEN_HYPER_PRI(fp, len, "processor: ", buf, flag, -+ (buf, "%d\n", vca->processor)); -+ XEN_HYPER_PRI(fp, len, "vcpu_info: ", buf, flag, -+ (buf, "%lx\n", vca->vcpu_info)); -+ XEN_HYPER_PRI(fp, len, "domain: ", buf, flag, -+ (buf, "%lx\n", vca->domain)); -+ XEN_HYPER_PRI(fp, len, "next_in_list: ", buf, flag, -+ (buf, "%lx\n", vca->next_in_list)); -+ XEN_HYPER_PRI(fp, len, "sleep_tick: ", buf, flag, -+ (buf, "%lx\n", vca->sleep_tick)); -+ XEN_HYPER_PRI(fp, len, "sched_priv: ", buf, flag, -+ (buf, "%lx\n", vca->sched_priv)); -+ XEN_HYPER_PRI(fp, len, "state: ", buf, flag, -+ (buf, "%d\n", vca->state)); -+ XEN_HYPER_PRI(fp, len, "state_entry_time: ", buf, flag, -+ (buf, "%llux\n", (unsigned long long)(vca->state_entry_time))); -+ XEN_HYPER_PRI(fp, len, "runstate_guest: ", buf, flag, -+ (buf, "%lx\n", vca->runstate_guest)); -+ XEN_HYPER_PRI(fp, len, "vcpu_flags: ", buf, flag, -+ (buf, "%lx\n", vca->vcpu_flags)); -+ } -+ } -+ } -+ XEN_HYPER_PRI(fp, len, "idle_vcpu: ", buf, flag, -+ (buf, "%lx\n", xhvct->idle_vcpu)); -+ XEN_HYPER_PRI(fp, len, "idle_vcpu_context_array: ", buf, flag, -+ (buf, "%p\n", xhvct->idle_vcpu_context_array)); -+ XEN_HYPER_PRI(fp, len, "last: ", buf, flag, -+ (buf, "%p\n", xhvct->last)); -+ XEN_HYPER_PRI(fp, len, "vcpu_struct: ", buf, flag, -+ (buf, "%p\n", xhvct->vcpu_struct)); -+ XEN_HYPER_PRI(fp, len, "vcpu_struct_verify: ", buf, flag, -+ (buf, "%p\n", xhvct->vcpu_struct_verify)); -+} -+ -+/* -+ * "help -x pcp" output -+ */ -+static void -+xen_hyper_dump_xen_hyper_pcpu_table(int verbose) -+{ -+ char buf[XEN_HYPER_CMD_BUFSIZE]; -+ struct xen_hyper_pcpu_context *pcca; -+ int len, flag, i; -+#ifdef X86_64 -+ uint64_t *ist_p; -+ int j; -+#endif -+ -+ len = 21; -+ flag = XEN_HYPER_PRI_R; -+ -+ XEN_HYPER_PRI(fp, len, "context_array: ", buf, flag, -+ (buf, "%p\n", xhpct->context_array)); -+ if (verbose) { -+ for (i = 0, pcca = xhpct->context_array; -+ i < XEN_HYPER_MAX_CPUS(); i++, pcca++) { -+ snprintf(buf, XEN_HYPER_CMD_BUFSIZE, "context_array %d: ", i); -+ XEN_HYPER_PRI_CONST(fp, len, buf, flag|XEN_HYPER_PRI_LF); -+ XEN_HYPER_PRI(fp, len, "pcpu: ", buf, flag, -+ (buf, "%lx\n", pcca->pcpu)); -+ XEN_HYPER_PRI(fp, len, "processor_id: ", buf, flag, -+ (buf, "%u\n", pcca->processor_id)); -+ XEN_HYPER_PRI(fp, len, "guest_cpu_user_regs: ", buf, flag, -+ (buf, "%lx\n", pcca->guest_cpu_user_regs)); -+ XEN_HYPER_PRI(fp, len, "current_vcpu: ", buf, flag, -+ (buf, "%lx\n", pcca->current_vcpu)); -+ XEN_HYPER_PRI(fp, len, "init_tss: ", buf, flag, -+ (buf, "%lx\n", pcca->init_tss)); -+#ifdef X86 -+ XEN_HYPER_PRI(fp, len, "sp.esp0: ", buf, flag, -+ (buf, "%x\n", pcca->sp.esp0)); -+#endif -+#ifdef X86_64 -+ XEN_HYPER_PRI(fp, len, "sp.rsp0: ", buf, flag, -+ (buf, "%lx\n", pcca->sp.rsp0)); -+ for (j = 0, ist_p = pcca->ist; -+ j < XEN_HYPER_TSS_IST_MAX; j++, ist_p++) { -+ XEN_HYPER_PRI(fp, len, "ist: ", buf, flag, -+ (buf, "%lx\n", *ist_p)); -+ } -+#endif -+ } -+ } -+ XEN_HYPER_PRI(fp, len, "last: ", buf, flag, -+ (buf, "%p\n", xhpct->last)); -+ XEN_HYPER_PRI(fp, len, "pcpu_struct: ", buf, flag, -+ (buf, "%p\n", xhpct->pcpu_struct)); -+} -+ -+/* -+ * "help -x sch" output -+ */ -+static void -+xen_hyper_dump_xen_hyper_sched_table(int verbose) -+{ -+ struct xen_hyper_sched_context *schc; -+ char buf[XEN_HYPER_CMD_BUFSIZE]; -+ int len, flag, i; -+ -+ len = 21; -+ flag = XEN_HYPER_PRI_R; -+ -+ XEN_HYPER_PRI(fp, len, "name: ", buf, flag, -+ (buf, "%s\n", xhscht->name)); -+ XEN_HYPER_PRI(fp, len, "opt_sched: ", buf, flag, -+ (buf, "%s\n", xhscht->opt_sched)); -+ XEN_HYPER_PRI(fp, len, "sched_id: ", buf, flag, -+ (buf, "%d\n", xhscht->sched_id)); -+ XEN_HYPER_PRI(fp, len, "scheduler: ", buf, flag, -+ (buf, "%lx\n", xhscht->scheduler)); -+ XEN_HYPER_PRI(fp, len, "scheduler_struct: ", buf, flag, -+ (buf, "%p\n", xhscht->scheduler_struct)); -+ XEN_HYPER_PRI(fp, len, "sched_context_array: ", buf, flag, -+ (buf, "%p\n", xhscht->sched_context_array)); -+ if (verbose) { -+ for (i = 0, schc = xhscht->sched_context_array; -+ i < xht->pcpus; i++, schc++) { -+ XEN_HYPER_PRI(fp, len, "sched_context_array[", buf, -+ flag, (buf, "%d]\n", i)); -+ XEN_HYPER_PRI(fp, len, "schedule_data: ", buf, flag, -+ (buf, "%lx\n", schc->schedule_data)); -+ XEN_HYPER_PRI(fp, len, "curr: ", buf, flag, -+ (buf, "%lx\n", schc->curr)); -+ XEN_HYPER_PRI(fp, len, "idle: ", buf, flag, -+ (buf, "%lx\n", schc->idle)); -+ XEN_HYPER_PRI(fp, len, "sched_priv: ", buf, flag, -+ (buf, "%lx\n", schc->sched_priv)); -+ XEN_HYPER_PRI(fp, len, "tick: ", buf, flag, -+ (buf, "%lx\n", schc->tick)); -+ } -+ } -+} -+ -+/* -+ * "help -x siz" output -+ */ -+static void -+xen_hyper_dump_xen_hyper_size_table(char *spec, ulong makestruct) -+{ -+ char buf[XEN_HYPER_CMD_BUFSIZE]; -+ int len, flag; -+ -+ len = 23; -+ flag = XEN_HYPER_PRI_R; -+ -+ XEN_HYPER_PRI(fp, len, "ELF_Prstatus: ", buf, flag, -+ (buf, "%ld\n", xen_hyper_size_table.ELF_Prstatus)); -+ XEN_HYPER_PRI(fp, len, "ELF_Signifo: ", buf, flag, -+ (buf, "%ld\n", xen_hyper_size_table.ELF_Signifo)); -+ XEN_HYPER_PRI(fp, len, "ELF_Gregset: ", buf, flag, -+ (buf, "%ld\n", xen_hyper_size_table.ELF_Gregset)); -+ XEN_HYPER_PRI(fp, len, "ELF_Timeval: ", buf, flag, -+ (buf, "%ld\n", xen_hyper_size_table.ELF_Timeval)); -+ XEN_HYPER_PRI(fp, len, "arch_domain: ", buf, flag, -+ (buf, "%ld\n", xen_hyper_size_table.arch_domain)); -+ XEN_HYPER_PRI(fp, len, "arch_shared_info: ", buf, flag, -+ (buf, "%ld\n", xen_hyper_size_table.arch_shared_info)); -+ XEN_HYPER_PRI(fp, len, "cpu_info: ", buf, flag, -+ (buf, "%ld\n", xen_hyper_size_table.cpu_info)); -+ XEN_HYPER_PRI(fp, len, "cpu_time: ", buf, flag, -+ (buf, "%ld\n", xen_hyper_size_table.cpu_time)); -+ XEN_HYPER_PRI(fp, len, "cpu_user_regs: ", buf, flag, -+ (buf, "%ld\n", xen_hyper_size_table.cpu_user_regs)); -+ XEN_HYPER_PRI(fp, len, "cpumask_t: ", buf, flag, -+ (buf, "%ld\n", xen_hyper_size_table.cpumask_t)); -+ XEN_HYPER_PRI(fp, len, "cpuinfo_ia64: ", buf, flag, -+ (buf, "%ld\n", xen_hyper_size_table.cpuinfo_ia64)); -+ XEN_HYPER_PRI(fp, len, "cpuinfo_x86: ", buf, flag, -+ (buf, "%ld\n", xen_hyper_size_table.cpuinfo_x86)); -+ XEN_HYPER_PRI(fp, len, "crash_note_t: ", buf, flag, -+ (buf, "%ld\n", xen_hyper_size_table.crash_note_t)); -+ XEN_HYPER_PRI(fp, len, "crash_note_core_t: ", buf, flag, -+ (buf, "%ld\n", xen_hyper_size_table.crash_note_core_t)); -+ XEN_HYPER_PRI(fp, len, "crash_note_xen_t: ", buf, flag, -+ (buf, "%ld\n", xen_hyper_size_table.crash_note_xen_t)); -+ XEN_HYPER_PRI(fp, len, "crash_note_xen_core_t: ", buf, flag, -+ (buf, "%ld\n", xen_hyper_size_table.crash_note_xen_core_t)); -+ XEN_HYPER_PRI(fp, len, "crash_note_xen_info_t: ", buf, flag, -+ (buf, "%ld\n", xen_hyper_size_table.crash_note_xen_info_t)); -+ XEN_HYPER_PRI(fp, len, "crash_xen_core_t: ", buf, flag, -+ (buf, "%ld\n", xen_hyper_size_table.crash_xen_core_t)); -+ XEN_HYPER_PRI(fp, len, "crash_xen_info_t: ", buf, flag, -+ (buf, "%ld\n", xen_hyper_size_table.crash_xen_info_t)); -+ XEN_HYPER_PRI(fp, len, "domain: ", buf, flag, -+ (buf, "%ld\n", xen_hyper_size_table.domain)); -+#ifdef IA64 -+ XEN_HYPER_PRI(fp, len, "mm_struct: ", buf, flag, -+ (buf, "%ld\n", xen_hyper_size_table.mm_struct)); -+#endif -+ XEN_HYPER_PRI(fp, len, "note_buf_t: ", buf, flag, -+ (buf, "%ld\n", xen_hyper_size_table.note_buf_t)); -+ XEN_HYPER_PRI(fp, len, "schedule_data: ", buf, flag, -+ (buf, "%ld\n", xen_hyper_size_table.schedule_data)); -+ XEN_HYPER_PRI(fp, len, "scheduler: ", buf, flag, -+ (buf, "%ld\n", xen_hyper_size_table.scheduler)); -+ XEN_HYPER_PRI(fp, len, "shared_info: ", buf, flag, -+ (buf, "%ld\n", xen_hyper_size_table.shared_info)); -+ XEN_HYPER_PRI(fp, len, "timer: ", buf, flag, -+ (buf, "%ld\n", xen_hyper_size_table.timer)); -+ XEN_HYPER_PRI(fp, len, "tss_struct: ", buf, flag, -+ (buf, "%ld\n", xen_hyper_size_table.tss_struct)); -+ XEN_HYPER_PRI(fp, len, "vcpu: ", buf, flag, -+ (buf, "%ld\n", xen_hyper_size_table.vcpu)); -+ XEN_HYPER_PRI(fp, len, "vcpu_runstate_info: ", buf, flag, -+ (buf, "%ld\n", xen_hyper_size_table.vcpu_runstate_info)); -+ XEN_HYPER_PRI(fp, len, "xen_crash_xen_regs_t: ", buf, flag, -+ (buf, "%ld\n", xen_hyper_size_table.xen_crash_xen_regs_t)); -+} -+ -+/* -+ * "help -x ofs" output -+ */ -+static void -+xen_hyper_dump_xen_hyper_offset_table(char *spec, ulong makestruct) -+{ -+ char buf[XEN_HYPER_CMD_BUFSIZE]; -+ int len, flag; -+ -+ len = 45; -+ flag = XEN_HYPER_PRI_R; -+ -+ XEN_HYPER_PRI(fp, len, "ELF_Prstatus_pr_info: ", buf, flag, -+ (buf, "%ld\n", xen_hyper_offset_table.ELF_Prstatus_pr_info)); -+ XEN_HYPER_PRI(fp, len, "ELF_Prstatus_pr_cursig: ", buf, flag, -+ (buf, "%ld\n", xen_hyper_offset_table.ELF_Prstatus_pr_cursig)); -+ XEN_HYPER_PRI(fp, len, "ELF_Prstatus_pr_sigpend: ", buf, flag, -+ (buf, "%ld\n", xen_hyper_offset_table.ELF_Prstatus_pr_sigpend)); -+ XEN_HYPER_PRI(fp, len, "ELF_Prstatus_pr_sighold: ", buf, flag, -+ (buf, "%ld\n", xen_hyper_offset_table.ELF_Prstatus_pr_sighold)); -+ XEN_HYPER_PRI(fp, len, "ELF_Prstatus_pr_pid: ", buf, flag, -+ (buf, "%ld\n", xen_hyper_offset_table.ELF_Prstatus_pr_pid)); -+ XEN_HYPER_PRI(fp, len, "ELF_Prstatus_pr_ppid: ", buf, flag, -+ (buf, "%ld\n", xen_hyper_offset_table.ELF_Prstatus_pr_ppid)); -+ XEN_HYPER_PRI(fp, len, "ELF_Prstatus_pr_pgrp: ", buf, flag, -+ (buf, "%ld\n", xen_hyper_offset_table.ELF_Prstatus_pr_pgrp)); -+ XEN_HYPER_PRI(fp, len, "ELF_Prstatus_pr_sid: ", buf, flag, -+ (buf, "%ld\n", xen_hyper_offset_table.ELF_Prstatus_pr_sid)); -+ XEN_HYPER_PRI(fp, len, "ELF_Prstatus_pr_stime: ", buf, flag, -+ (buf, "%ld\n", xen_hyper_offset_table.ELF_Prstatus_pr_stime)); -+ XEN_HYPER_PRI(fp, len, "ELF_Prstatus_pr_cutime: ", buf, flag, -+ (buf, "%ld\n", xen_hyper_offset_table.ELF_Prstatus_pr_cutime)); -+ XEN_HYPER_PRI(fp, len, "ELF_Prstatus_pr_cstime: ", buf, flag, -+ (buf, "%ld\n", xen_hyper_offset_table.ELF_Prstatus_pr_cstime)); -+ XEN_HYPER_PRI(fp, len, "ELF_Prstatus_pr_reg: ", buf, flag, -+ (buf, "%ld\n", xen_hyper_offset_table.ELF_Prstatus_pr_reg)); -+ XEN_HYPER_PRI(fp, len, "ELF_Prstatus_pr_fpvalid: ", buf, flag, -+ (buf, "%ld\n", xen_hyper_offset_table.ELF_Prstatus_pr_fpvalid)); -+ XEN_HYPER_PRI(fp, len, "ELF_Timeval_tv_sec: ", buf, flag, -+ (buf, "%ld\n", xen_hyper_offset_table.ELF_Timeval_tv_sec)); -+ XEN_HYPER_PRI(fp, len, "ELF_Timeval_tv_usec: ", buf, flag, -+ (buf, "%ld\n", xen_hyper_offset_table.ELF_Timeval_tv_usec)); -+ -+#ifdef IA64 -+ XEN_HYPER_PRI(fp, len, "arch_domain_mm: ", buf, flag, -+ (buf, "%ld\n", xen_hyper_offset_table.arch_domain_mm)); -+#endif -+ -+ XEN_HYPER_PRI(fp, len, "arch_shared_info_max_pfn: ", buf, flag, -+ (buf, "%ld\n", xen_hyper_offset_table.arch_shared_info_max_pfn)); -+ XEN_HYPER_PRI(fp, len, "arch_shared_info_pfn_to_mfn_frame_list_list: ", buf, flag, -+ (buf, "%ld\n", xen_hyper_offset_table.arch_shared_info_pfn_to_mfn_frame_list_list)); -+ XEN_HYPER_PRI(fp, len, "arch_shared_info_nmi_reason: ", buf, flag, -+ (buf, "%ld\n", xen_hyper_offset_table.arch_shared_info_nmi_reason)); -+ -+ XEN_HYPER_PRI(fp, len, "cpu_info_guest_cpu_user_regs: ", buf, flag, -+ (buf, "%ld\n", xen_hyper_offset_table.cpu_info_guest_cpu_user_regs)); -+ XEN_HYPER_PRI(fp, len, "cpu_info_processor_id: ", buf, flag, -+ (buf, "%ld\n", xen_hyper_offset_table.cpu_info_processor_id)); -+ XEN_HYPER_PRI(fp, len, "cpu_info_current_vcpu: ", buf, flag, -+ (buf, "%ld\n", xen_hyper_offset_table.cpu_info_current_vcpu)); -+ -+ XEN_HYPER_PRI(fp, len, "cpu_time_local_tsc_stamp: ", buf, flag, -+ (buf, "%ld\n", xen_hyper_offset_table.cpu_time_local_tsc_stamp)); -+ XEN_HYPER_PRI(fp, len, "cpu_time_stime_local_stamp: ", buf, flag, -+ (buf, "%ld\n", xen_hyper_offset_table.cpu_time_stime_local_stamp)); -+ XEN_HYPER_PRI(fp, len, "cpu_time_stime_master_stamp: ", buf, flag, -+ (buf, "%ld\n", xen_hyper_offset_table.cpu_time_stime_master_stamp)); -+ XEN_HYPER_PRI(fp, len, "cpu_time_tsc_scale: ", buf, flag, -+ (buf, "%ld\n", xen_hyper_offset_table.cpu_time_tsc_scale)); -+ XEN_HYPER_PRI(fp, len, "cpu_time_calibration_timer: ", buf, flag, -+ (buf, "%ld\n", xen_hyper_offset_table.cpu_time_calibration_timer)); -+ -+ XEN_HYPER_PRI(fp, len, "crash_note_t_core: ", buf, flag, -+ (buf, "%ld\n", xen_hyper_offset_table.crash_note_t_core)); -+ XEN_HYPER_PRI(fp, len, "crash_note_t_xen: ", buf, flag, -+ (buf, "%ld\n", xen_hyper_offset_table.crash_note_t_xen)); -+ XEN_HYPER_PRI(fp, len, "crash_note_t_xen_regs: ", buf, flag, -+ (buf, "%ld\n", xen_hyper_offset_table.crash_note_t_xen_regs)); -+ XEN_HYPER_PRI(fp, len, "crash_note_t_xen_info: ", buf, flag, -+ (buf, "%ld\n", xen_hyper_offset_table.crash_note_t_xen_info)); -+ -+ XEN_HYPER_PRI(fp, len, "crash_note_core_t_note: ", buf, flag, -+ (buf, "%ld\n", xen_hyper_offset_table.crash_note_core_t_note)); -+ XEN_HYPER_PRI(fp, len, "crash_note_core_t_desc: ", buf, flag, -+ (buf, "%ld\n", xen_hyper_offset_table.crash_note_core_t_desc)); -+ -+ XEN_HYPER_PRI(fp, len, "crash_note_xen_t_note: ", buf, flag, -+ (buf, "%ld\n", xen_hyper_offset_table.crash_note_xen_t_note)); -+ XEN_HYPER_PRI(fp, len, "crash_note_xen_t_desc: ", buf, flag, -+ (buf, "%ld\n", xen_hyper_offset_table.crash_note_xen_t_desc)); -+ -+ XEN_HYPER_PRI(fp, len, "crash_note_xen_core_t_note: ", buf, flag, -+ (buf, "%ld\n", xen_hyper_offset_table.crash_note_xen_core_t_note)); -+ XEN_HYPER_PRI(fp, len, "crash_note_xen_core_t_desc: ", buf, flag, -+ (buf, "%ld\n", xen_hyper_offset_table.crash_note_xen_core_t_desc)); -+ -+ XEN_HYPER_PRI(fp, len, "crash_note_xen_info_t_note: ", buf, flag, -+ (buf, "%ld\n", xen_hyper_offset_table.crash_note_xen_info_t_note)); -+ XEN_HYPER_PRI(fp, len, "crash_note_xen_info_t_desc: ", buf, flag, -+ (buf, "%ld\n", xen_hyper_offset_table.crash_note_xen_info_t_desc)); -+ -+ XEN_HYPER_PRI(fp, len, "domain_page_list: ", buf, flag, -+ (buf, "%ld\n", xen_hyper_offset_table.domain_page_list)); -+ XEN_HYPER_PRI(fp, len, "domain_xenpage_list: ", buf, flag, -+ (buf, "%ld\n", xen_hyper_offset_table.domain_xenpage_list)); -+ XEN_HYPER_PRI(fp, len, "domain_domain_id: ", buf, flag, -+ (buf, "%ld\n", xen_hyper_offset_table.domain_domain_id)); -+ XEN_HYPER_PRI(fp, len, "domain_tot_pages: ", buf, flag, -+ (buf, "%ld\n", xen_hyper_offset_table.domain_tot_pages)); -+ XEN_HYPER_PRI(fp, len, "domain_max_pages: ", buf, flag, -+ (buf, "%ld\n", xen_hyper_offset_table.domain_max_pages)); -+ XEN_HYPER_PRI(fp, len, "domain_xenheap_pages: ", buf, flag, -+ (buf, "%ld\n", xen_hyper_offset_table.domain_xenheap_pages)); -+ XEN_HYPER_PRI(fp, len, "domain_shared_info: ", buf, flag, -+ (buf, "%ld\n", xen_hyper_offset_table.domain_shared_info)); -+ XEN_HYPER_PRI(fp, len, "domain_sched_priv: ", buf, flag, -+ (buf, "%ld\n", xen_hyper_offset_table.domain_sched_priv)); -+ XEN_HYPER_PRI(fp, len, "domain_next_in_list: ", buf, flag, -+ (buf, "%ld\n", xen_hyper_offset_table.domain_next_in_list)); -+ XEN_HYPER_PRI(fp, len, "domain_domain_flags: ", buf, flag, -+ (buf, "%lx\n", xen_hyper_offset_table.domain_domain_flags)); -+ XEN_HYPER_PRI(fp, len, "domain_evtchn: ", buf, flag, -+ (buf, "%ld\n", xen_hyper_offset_table.domain_evtchn)); -+ XEN_HYPER_PRI(fp, len, "domain_is_hvm: ", buf, flag, -+ (buf, "%ld\n", xen_hyper_offset_table.domain_is_hvm)); -+ XEN_HYPER_PRI(fp, len, "domain_is_privileged: ", buf, flag, -+ (buf, "%ld\n", xen_hyper_offset_table.domain_is_privileged)); -+ XEN_HYPER_PRI(fp, len, "domain_debugger_attached: ", buf, flag, -+ (buf, "%ld\n", xen_hyper_offset_table.domain_debugger_attached)); -+ XEN_HYPER_PRI(fp, len, "domain_is_polling: ", buf, flag, -+ (buf, "%ld\n", xen_hyper_offset_table.domain_is_polling)); -+ XEN_HYPER_PRI(fp, len, "domain_is_dying: ", buf, flag, -+ (buf, "%ld\n", xen_hyper_offset_table.domain_is_dying)); -+ XEN_HYPER_PRI(fp, len, "domain_is_paused_by_controller: ", buf, flag, -+ (buf, "%ld\n", xen_hyper_offset_table.domain_is_paused_by_controller)); -+ XEN_HYPER_PRI(fp, len, "domain_is_shutting_down: ", buf, flag, -+ (buf, "%ld\n", xen_hyper_offset_table.domain_is_shutting_down)); -+ XEN_HYPER_PRI(fp, len, "domain_is_shut_down: ", buf, flag, -+ (buf, "%ld\n", xen_hyper_offset_table.domain_is_shut_down)); -+ XEN_HYPER_PRI(fp, len, "domain_vcpu: ", buf, flag, -+ (buf, "%ld\n", xen_hyper_offset_table.domain_vcpu)); -+ XEN_HYPER_PRI(fp, len, "domain_arch: ", buf, flag, -+ (buf, "%ld\n", xen_hyper_offset_table.domain_arch)); -+ -+#ifdef IA64 -+ XEN_HYPER_PRI(fp, len, "mm_struct_pgd: ", buf, flag, -+ (buf, "%ld\n", xen_hyper_offset_table.mm_struct_pgd)); -+#endif -+ -+ XEN_HYPER_PRI(fp, len, "schedule_data_schedule_lock: ", buf, flag, -+ (buf, "%ld\n", xen_hyper_offset_table.schedule_data_schedule_lock)); -+ XEN_HYPER_PRI(fp, len, "schedule_data_curr: ", buf, flag, -+ (buf, "%ld\n", xen_hyper_offset_table.schedule_data_curr)); -+ XEN_HYPER_PRI(fp, len, "schedule_data_idle: ", buf, flag, -+ (buf, "%ld\n", xen_hyper_offset_table.schedule_data_idle)); -+ XEN_HYPER_PRI(fp, len, "schedule_data_sched_priv: ", buf, flag, -+ (buf, "%ld\n", xen_hyper_offset_table.schedule_data_sched_priv)); -+ XEN_HYPER_PRI(fp, len, "schedule_data_s_timer: ", buf, flag, -+ (buf, "%ld\n", xen_hyper_offset_table.schedule_data_s_timer)); -+ XEN_HYPER_PRI(fp, len, "schedule_data_tick: ", buf, flag, -+ (buf, "%ld\n", xen_hyper_offset_table.schedule_data_tick)); -+ -+ XEN_HYPER_PRI(fp, len, "scheduler_name: ", buf, flag, -+ (buf, "%ld\n", xen_hyper_offset_table.scheduler_name)); -+ XEN_HYPER_PRI(fp, len, "scheduler_opt_name: ", buf, flag, -+ (buf, "%ld\n", xen_hyper_offset_table.scheduler_opt_name)); -+ XEN_HYPER_PRI(fp, len, "scheduler_sched_id: ", buf, flag, -+ (buf, "%ld\n", xen_hyper_offset_table.scheduler_sched_id)); -+ XEN_HYPER_PRI(fp, len, "scheduler_init: ", buf, flag, -+ (buf, "%ld\n", xen_hyper_offset_table.scheduler_init)); -+ XEN_HYPER_PRI(fp, len, "scheduler_tick: ", buf, flag, -+ (buf, "%ld\n", xen_hyper_offset_table.scheduler_tick)); -+ XEN_HYPER_PRI(fp, len, "scheduler_init_vcpu: ", buf, flag, -+ (buf, "%ld\n", xen_hyper_offset_table.scheduler_init_vcpu)); -+ XEN_HYPER_PRI(fp, len, "scheduler_destroy_domain: ", buf, flag, -+ (buf, "%ld\n", xen_hyper_offset_table.scheduler_destroy_domain)); -+ XEN_HYPER_PRI(fp, len, "scheduler_sleep: ", buf, flag, -+ (buf, "%ld\n", xen_hyper_offset_table.scheduler_sleep)); -+ XEN_HYPER_PRI(fp, len, "scheduler_wake: ", buf, flag, -+ (buf, "%ld\n", xen_hyper_offset_table.scheduler_wake)); -+ XEN_HYPER_PRI(fp, len, "scheduler_set_affinity: ", buf, flag, -+ (buf, "%ld\n", xen_hyper_offset_table.scheduler_set_affinity)); -+ XEN_HYPER_PRI(fp, len, "scheduler_do_schedule: ", buf, flag, -+ (buf, "%ld\n", xen_hyper_offset_table.scheduler_do_schedule)); -+ XEN_HYPER_PRI(fp, len, "scheduler_adjust: ", buf, flag, -+ (buf, "%ld\n", xen_hyper_offset_table.scheduler_adjust)); -+ XEN_HYPER_PRI(fp, len, "scheduler_dump_settings: ", buf, flag, -+ (buf, "%ld\n", xen_hyper_offset_table.scheduler_dump_settings)); -+ XEN_HYPER_PRI(fp, len, "scheduler_dump_cpu_state: ", buf, flag, -+ (buf, "%ld\n", xen_hyper_offset_table.scheduler_dump_cpu_state)); -+ -+ XEN_HYPER_PRI(fp, len, "shared_info_vcpu_info: ", buf, flag, -+ (buf, "%ld\n", xen_hyper_offset_table.shared_info_vcpu_info)); -+ XEN_HYPER_PRI(fp, len, "shared_info_evtchn_pending: ", buf, flag, -+ (buf, "%ld\n", xen_hyper_offset_table.shared_info_evtchn_pending)); -+ XEN_HYPER_PRI(fp, len, "shared_info_evtchn_mask: ", buf, flag, -+ (buf, "%ld\n", xen_hyper_offset_table.shared_info_evtchn_mask)); -+ XEN_HYPER_PRI(fp, len, "shared_info_arch: ", buf, flag, -+ (buf, "%ld\n", xen_hyper_offset_table.shared_info_arch)); -+ -+ XEN_HYPER_PRI(fp, len, "timer_expires: ", buf, flag, -+ (buf, "%ld\n", xen_hyper_offset_table.timer_expires)); -+ XEN_HYPER_PRI(fp, len, "timer_cpu: ", buf, flag, -+ (buf, "%ld\n", xen_hyper_offset_table.timer_cpu)); -+ XEN_HYPER_PRI(fp, len, "timer_function: ", buf, flag, -+ (buf, "%ld\n", xen_hyper_offset_table.timer_function)); -+ XEN_HYPER_PRI(fp, len, "timer_data: ", buf, flag, -+ (buf, "%ld\n", xen_hyper_offset_table.timer_data)); -+ XEN_HYPER_PRI(fp, len, "timer_heap_offset: ", buf, flag, -+ (buf, "%ld\n", xen_hyper_offset_table.timer_heap_offset)); -+ XEN_HYPER_PRI(fp, len, "timer_killed: ", buf, flag, -+ (buf, "%ld\n", xen_hyper_offset_table.timer_killed)); -+ -+ XEN_HYPER_PRI(fp, len, "tss_struct_rsp0: ", buf, flag, -+ (buf, "%ld\n", xen_hyper_offset_table.tss_struct_rsp0)); -+ XEN_HYPER_PRI(fp, len, "tss_struct_esp0: ", buf, flag, -+ (buf, "%ld\n", xen_hyper_offset_table.tss_struct_esp0)); -+ -+ XEN_HYPER_PRI(fp, len, "vcpu_vcpu_id: ", buf, flag, -+ (buf, "%ld\n", xen_hyper_offset_table.vcpu_vcpu_id)); -+ XEN_HYPER_PRI(fp, len, "vcpu_processor: ", buf, flag, -+ (buf, "%ld\n", xen_hyper_offset_table.vcpu_processor)); -+ XEN_HYPER_PRI(fp, len, "vcpu_vcpu_info: ", buf, flag, -+ (buf, "%ld\n", xen_hyper_offset_table.vcpu_vcpu_info)); -+ XEN_HYPER_PRI(fp, len, "vcpu_domain: ", buf, flag, -+ (buf, "%ld\n", xen_hyper_offset_table.vcpu_domain)); -+ XEN_HYPER_PRI(fp, len, "vcpu_next_in_list: ", buf, flag, -+ (buf, "%ld\n", xen_hyper_offset_table.vcpu_next_in_list)); -+ XEN_HYPER_PRI(fp, len, "vcpu_timer: ", buf, flag, -+ (buf, "%ld\n", xen_hyper_offset_table.vcpu_timer)); -+ XEN_HYPER_PRI(fp, len, "vcpu_sleep_tick: ", buf, flag, -+ (buf, "%ld\n", xen_hyper_offset_table.vcpu_sleep_tick)); -+ XEN_HYPER_PRI(fp, len, "vcpu_poll_timer: ", buf, flag, -+ (buf, "%ld\n", xen_hyper_offset_table.vcpu_poll_timer)); -+ XEN_HYPER_PRI(fp, len, "vcpu_sched_priv: ", buf, flag, -+ (buf, "%ld\n", xen_hyper_offset_table.vcpu_sched_priv)); -+ XEN_HYPER_PRI(fp, len, "vcpu_runstate: ", buf, flag, -+ (buf, "%ld\n", xen_hyper_offset_table.vcpu_runstate)); -+ XEN_HYPER_PRI(fp, len, "vcpu_runstate_guest: ", buf, flag, -+ (buf, "%ld\n", xen_hyper_offset_table.vcpu_runstate_guest)); -+ XEN_HYPER_PRI(fp, len, "vcpu_vcpu_flags: ", buf, flag, -+ (buf, "%ld\n", xen_hyper_offset_table.vcpu_vcpu_flags)); -+ XEN_HYPER_PRI(fp, len, "vcpu_pause_count: ", buf, flag, -+ (buf, "%ld\n", xen_hyper_offset_table.vcpu_pause_count)); -+ XEN_HYPER_PRI(fp, len, "vcpu_virq_to_evtchn: ", buf, flag, -+ (buf, "%ld\n", xen_hyper_offset_table.vcpu_virq_to_evtchn)); -+ XEN_HYPER_PRI(fp, len, "vcpu_cpu_affinity: ", buf, flag, -+ (buf, "%ld\n", xen_hyper_offset_table.vcpu_cpu_affinity)); -+ XEN_HYPER_PRI(fp, len, "vcpu_nmi_addr: ", buf, flag, -+ (buf, "%ld\n", xen_hyper_offset_table.vcpu_nmi_addr)); -+ XEN_HYPER_PRI(fp, len, "vcpu_vcpu_dirty_cpumask: ", buf, flag, -+ (buf, "%ld\n", xen_hyper_offset_table.vcpu_vcpu_dirty_cpumask)); -+ XEN_HYPER_PRI(fp, len, "vcpu_arch: ", buf, flag, -+ (buf, "%ld\n", xen_hyper_offset_table.vcpu_arch)); -+ XEN_HYPER_PRI(fp, len, "vcpu_runstate_info_state: ", buf, flag, -+ (buf, "%ld\n", xen_hyper_offset_table.vcpu_runstate_info_state)); -+ XEN_HYPER_PRI(fp, len, "vcpu_runstate_info_state_entry_time: ", buf, flag, -+ (buf, "%ld\n", xen_hyper_offset_table.vcpu_runstate_info_state_entry_time)); -+ XEN_HYPER_PRI(fp, len, "vcpu_runstate_info_time: ", buf, flag, -+ (buf, "%ld\n", xen_hyper_offset_table.vcpu_runstate_info_time)); -+#ifdef IA64 -+ XEN_HYPER_PRI(fp, len, "vcpu_thread_ksp: ", buf, flag, -+ (buf, "%ld\n", xen_hyper_offset_table.vcpu_thread_ksp)); -+#endif -+} -+ -+/* -+ * dump specified memory with specified size. -+ */ -+#define DSP_BYTE_SIZE 16 -+ -+static void -+xen_hyper_dump_mem(void *mem, ulong len, int dsz) -+{ -+ long i, max; -+ void *mem_w = mem; -+ -+ if (!len || -+ (dsz != SIZEOF_8BIT && dsz != SIZEOF_16BIT && -+ dsz != SIZEOF_32BIT && dsz != SIZEOF_64BIT)) -+ return; -+ max = len / dsz + (len % dsz ? 1 : 0); -+ for (i = 0; i < max; i++) { -+ if (i != 0 && !(i % (DSP_BYTE_SIZE / dsz))) -+ fprintf(fp, "\n"); -+ if (i == 0 || !(i % (DSP_BYTE_SIZE / dsz))) -+ fprintf(fp, "%p : ", mem_w); -+ if (dsz == SIZEOF_8BIT) -+ fprintf(fp, "%02x ", *(uint8_t *)mem_w); -+ else if (dsz == SIZEOF_16BIT) -+ fprintf(fp, "%04x ", *(uint16_t *)mem_w); -+ else if (dsz == SIZEOF_32BIT) -+ fprintf(fp, "%08x ", *(uint32_t *)mem_w); -+ else if (dsz == SIZEOF_64BIT) -+ fprintf(fp, "%016llx ", *(unsigned long long *)mem_w); -+ mem_w = (char *)mem_w + dsz; -+ } -+ fprintf(fp, "\n"); -+} -+#endif ---- crash/defs.h.orig 2007-08-27 15:02:36.000000000 -0400 -+++ crash/defs.h 2007-08-27 11:50:30.000000000 -0400 -@@ -1,8 +1,8 @@ - /* defs.h - core analysis suite - * - * Copyright (C) 1999, 2000, 2001, 2002 Mission Critical Linux, Inc. -- * Copyright (C) 2002, 2003, 2004, 2005 David Anderson -- * Copyright (C) 2002, 2003, 2004, 2005 Red Hat, Inc. All rights reserved. -+ * Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007 David Anderson -+ * Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007 Red Hat, Inc. All rights reserved. - * Copyright (C) 2002 Silicon Graphics, Inc. - * - * This program is free software; you can redistribute it and/or modify -@@ -56,10 +56,10 @@ - #define FALSE (0) - - #ifdef X86 --#define NR_CPUS (32) -+#define NR_CPUS (256) - #endif - #ifdef X86_64 --#define NR_CPUS (32) -+#define NR_CPUS (256) - #endif - #ifdef ALPHA - #define NR_CPUS (64) -@@ -68,7 +68,7 @@ - #define NR_CPUS (32) - #endif - #ifdef IA64 --#define NR_CPUS (512) -+#define NR_CPUS (1024) - #endif - #ifdef PPC64 - #define NR_CPUS (128) -@@ -98,6 +98,7 @@ - #define LASTCHAR(s) (s[strlen(s)-1]) - #define FIRSTCHAR(s) (s[0]) - #define QUOTED_STRING(s) ((FIRSTCHAR(s) == '"') && (LASTCHAR(s) == '"')) -+#define PATHEQ(A, B) ((A) && (B) && (pathcmp((char *)(A), (char *)(B)) == 0)) - - #ifdef roundup - #undef roundup -@@ -106,6 +107,8 @@ - - typedef uint64_t physaddr_t; - -+#define PADDR_NOT_AVAILABLE (0x1ULL) -+ - typedef unsigned long long int ulonglong; - struct number_option { - ulong num; -@@ -155,8 +158,8 @@ - #define UNLINK_MODULES (0x1000000000ULL) - #define S390D (0x2000000000ULL) - #define REM_S390D (0x4000000000ULL) --#define PC_UNUSED_1 (0x8000000000ULL) --#define PC_UNUSED_2 (0x10000000000ULL) -+#define SYSRQ (0x8000000000ULL) -+#define KDUMP (0x10000000000ULL) - #define NETDUMP (0x20000000000ULL) - #define REM_NETDUMP (0x40000000000ULL) - #define SYSMAP (0x80000000000ULL) -@@ -169,11 +172,18 @@ - #define VERSION_QUERY (0x4000000000000ULL) - #define READNOW (0x8000000000000ULL) - #define NOCRASHRC (0x10000000000000ULL) -+#define INIT_IFILE (0x20000000000000ULL) -+#define XENDUMP (0x40000000000000ULL) -+#define XEN_HYPER (0x80000000000000ULL) -+#define XEN_CORE (0x100000000000000ULL) -+#define PLEASE_WAIT (0x200000000000000ULL) -+#define IFILE_ERROR (0x400000000000000ULL) -+#define KERNTYPES (0x800000000000000ULL) - - #define ACTIVE() (pc->flags & LIVE_SYSTEM) - #define DUMPFILE() (!(pc->flags & LIVE_SYSTEM)) --#define MEMORY_SOURCES (NETDUMP|MCLXCD|LKCD|DEVMEM|S390D|MEMMOD|DISKDUMP) --#define DUMPFILE_TYPES (DISKDUMP|NETDUMP|MCLXCD|LKCD|S390D) -+#define MEMORY_SOURCES (NETDUMP|KDUMP|MCLXCD|LKCD|DEVMEM|S390D|MEMMOD|DISKDUMP|XENDUMP) -+#define DUMPFILE_TYPES (DISKDUMP|NETDUMP|KDUMP|MCLXCD|LKCD|S390D|XENDUMP) - #define REMOTE() (pc->flags & REMOTE_DAEMON) - #define REMOTE_ACTIVE() (pc->flags & REM_LIVE_SYSTEM) - #define REMOTE_DUMPFILE() \ -@@ -182,16 +192,35 @@ - #define LKCD_DUMPFILE() (pc->flags & (LKCD|REM_LKCD)) - #define NETDUMP_DUMPFILE() (pc->flags & (NETDUMP|REM_NETDUMP)) - #define DISKDUMP_DUMPFILE() (pc->flags & DISKDUMP) -+#define KDUMP_DUMPFILE() (pc->flags & KDUMP) -+#define XENDUMP_DUMPFILE() (pc->flags & XENDUMP) -+#define XEN_HYPER_MODE() (pc->flags & XEN_HYPER) -+#define SYSRQ_TASK(X) ((pc->flags & SYSRQ) && is_task_active(X)) -+#define XEN_CORE_DUMPFILE() (pc->flags & XEN_CORE) -+#define LKCD_KERNTYPES() (pc->flags & KERNTYPES) - - #define NETDUMP_LOCAL (0x1) /* netdump_data flags */ - #define NETDUMP_REMOTE (0x2) --#define NETDUMP_VALID() (nd->flags & (NETDUMP_LOCAL|NETDUMP_REMOTE)) -+#define VMCORE_VALID() (nd->flags & (NETDUMP_LOCAL|NETDUMP_REMOTE|KDUMP_LOCAL)) - #define NETDUMP_ELF32 (0x4) - #define NETDUMP_ELF64 (0x8) - #define PARTIAL_DUMP (0x10) /* netdump or diskdump */ -+#define KDUMP_ELF32 (0x20) -+#define KDUMP_ELF64 (0x40) -+#define KDUMP_LOCAL (0x80) -+ -+#define DUMPFILE_FORMAT(flags) ((flags) & \ -+ (NETDUMP_ELF32|NETDUMP_ELF64|KDUMP_ELF32|KDUMP_ELF64)) -+ -+#define DISKDUMP_LOCAL (0x1) -+#define KDUMP_CMPRS_LOCAL (0x2) -+#define ERROR_EXCLUDED (0x4) -+#define ZERO_EXCLUDED (0x8) -+#define DISKDUMP_VALID() (dd->flags & DISKDUMP_LOCAL) -+#define KDUMP_CMPRS_VALID() (dd->flags & KDUMP_CMPRS_LOCAL) - --#define DISKDUMP_LOCAL (0x1) --#define DISKDUMP_VALID() (dd->flags & DISKDUMP_LOCAL) -+#define XENDUMP_LOCAL (0x1) -+#define XENDUMP_VALID() (xd->flags & XENDUMP_LOCAL) - - #define CRASHDEBUG(x) (pc->debug >= (x)) - -@@ -210,6 +239,7 @@ - #define SEEK_ERROR (-1) - #define READ_ERROR (-2) - #define WRITE_ERROR (-3) -+#define PAGE_EXCLUDED (-4) - - #define RESTART() (longjmp(pc->main_loop_env, 1)) - #define RESUME_FOREACH() (longjmp(pc->foreach_loop_env, 1)) -@@ -319,15 +349,28 @@ - #define SCROLL_NONE 0 - #define SCROLL_LESS 1 - #define SCROLL_MORE 2 -+#define SCROLL_CRASHPAGER 3 - ulong redirect; /* per-cmd origin and output flags */ - pid_t stdpipe_pid; /* per-cmd standard output pipe's pid */ - pid_t pipe_pid; /* per-cmd output pipe's pid */ - pid_t pipe_shell_pid; /* per-cmd output pipe's shell pid */ - char pipe_command[BUFSIZE]; /* pipe command line */ -+ struct command_table_entry *cmd_table; /* linux/xen command table */ - char *curcmd; /* currently-executing command */ - char *lastcmd; /* previously-executed command */ - ulong cmdgencur; /* current command generation number */ -- ulong cmdgenspec; /* specified command generation num */ -+ ulong curcmd_flags; /* general purpose per-command flag */ -+#define XEN_MACHINE_ADDR (0x1) -+#define REPEAT (0x2) -+#define IDLE_TASK_SHOWN (0x4) -+#define TASK_SPECIFIED (0x8) -+#define MEMTYPE_UVADDR (0x10) -+#define MEMTYPE_FILEADDR (0x20) -+#define HEADER_PRINTED (0x40) -+#define BAD_INSTRUCTION (0x80) -+#define UD2A_INSTRUCTION (0x100) -+#define IRQ_IN_USE (0x200) -+ ulonglong curcmd_private; /* general purpose per-command info */ - int cur_gdb_cmd; /* current gdb command */ - int last_gdb_cmd; /* previously-executed gdb command */ - int sigint_cnt; /* number of ignored SIGINTs */ -@@ -347,11 +390,11 @@ - struct extension_table *curext; /* extension being loaded */ - int (*readmem)(int, void *, int, ulong, physaddr_t); /* memory access */ - int (*writemem)(int, void *, int, ulong, physaddr_t);/* memory access */ -+ ulong ifile_in_progress; /* original xxx_IFILE flags */ -+ off_t ifile_offset; /* current offset into input file */ -+ char *runtime_ifile_cmd; /* runtime command using input file */ - }; - --#define UNIQUE_COMMAND(s) \ -- (STREQ(pc->curcmd, s) && (pc->cmdgencur == pc->cmdgenspec)) -- - #define READMEM pc->readmem - - typedef void (*cmd_func_t)(void); -@@ -365,6 +408,7 @@ - - #define REFRESH_TASK_TABLE (0x1) /* command_table_entry flags */ - #define HIDDEN_COMMAND (0x2) -+#define CLEANUP (0x4) /* for extensions only */ - - /* - * A linked list of extension table structures keeps track of the current -@@ -407,9 +451,32 @@ - #define KALLSYMS_V2 (0x2000) - #define TVEC_BASES_V2 (0x4000) - #define GCC_3_3_3 (0x8000) -+#define USE_OLD_BT (0x10000) -+#define ARCH_XEN (0x20000) -+#define NO_IKCONFIG (0x40000) -+#define DWARF_UNWIND (0x80000) -+#define NO_DWARF_UNWIND (0x100000) -+#define DWARF_UNWIND_MEMORY (0x200000) -+#define DWARF_UNWIND_EH_FRAME (0x400000) -+#define DWARF_UNWIND_CAPABLE (DWARF_UNWIND_MEMORY|DWARF_UNWIND_EH_FRAME) -+#define DWARF_UNWIND_MODULES (0x800000) -+#define BUGVERBOSE_OFF (0x1000000) -+#define RELOC_SET (0x2000000) -+#define RELOC_FORCE (0x4000000) - - #define GCC_VERSION_DEPRECATED (GCC_3_2|GCC_3_2_3|GCC_2_96|GCC_3_3_2|GCC_3_3_3) - -+#define XEN() (kt->flags & ARCH_XEN) -+ -+#define XEN_MACHINE_TO_MFN(m) ((ulonglong)(m) >> PAGESHIFT()) -+#define XEN_PFN_TO_PSEUDO(p) ((ulonglong)(p) << PAGESHIFT()) -+ -+#define XEN_MFN_NOT_FOUND (~0UL) -+#define XEN_PFNS_PER_PAGE (PAGESIZE()/sizeof(ulong)) -+#define XEN_FOREIGN_FRAME (1UL << (BITS()-1)) -+ -+#define XEN_MACHADDR_NOT_FOUND (~0ULL) -+ - struct kernel_table { /* kernel data */ - ulong flags; - ulong stext; -@@ -420,6 +487,7 @@ - ulong init_end; - ulong end; - int cpus; -+ char *cpus_override; - void (*display_bh)(void); - ulong module_list; - ulong kernel_module; -@@ -430,11 +498,36 @@ - uint kernel_version[3]; - uint gcc_version[3]; - int runq_siblings; -+ int kernel_NR_CPUS; - long __rq_idx[NR_CPUS]; - long __cpu_idx[NR_CPUS]; - long __per_cpu_offset[NR_CPUS]; -- long cpu_flags[NR_CPUS]; -+ ulong cpu_flags[NR_CPUS]; -+ int BUG_bytes; - #define NMI 0x1 -+ ulong xen_flags; -+#define WRITABLE_PAGE_TABLES (0x1) -+#define SHADOW_PAGE_TABLES (0x2) -+#define CANONICAL_PAGE_TABLES (0x4) -+#define XEN_SUSPEND (0x8) -+ char *m2p_page; -+ ulong phys_to_machine_mapping; -+ ulong p2m_table_size; -+#define P2M_MAPPING_CACHE (512) -+ struct p2m_mapping_cache { -+ ulong mapping; -+ ulong start; -+ ulong end; -+ } p2m_mapping_cache[P2M_MAPPING_CACHE]; -+#define P2M_MAPPING_TO_PAGE_INDEX(c) \ -+ (((kt->p2m_mapping_cache[c].mapping - kt->phys_to_machine_mapping)/PAGESIZE()) \ -+ * XEN_PFNS_PER_PAGE) -+ ulong last_mapping_read; -+ ulong p2m_cache_index; -+ ulong p2m_pages_searched; -+ ulong p2m_mfn_cache_hits; -+ ulong p2m_page_cache_hits; -+ ulong relocate; - }; - - /* -@@ -578,6 +671,7 @@ - ulonglong flags; - ulong instptr; - ulong stkptr; -+ ulong bptr; - ulong stackbase; - ulong stacktop; - char *stackbuf; -@@ -602,6 +696,8 @@ - (void *)(&bt->stackbuf[(ulong)STACK_OFFSET_TYPE(OFF)]), (size_t)(SZ)) - - struct machine_specific; /* uniquely defined below each machine's area */ -+struct xendump_data; -+struct xen_kdump_data; - - struct machdep_table { - ulong flags; -@@ -645,14 +741,24 @@ - char **file; - } *line_number_hooks; - ulong last_pgd_read; -+ ulong last_pud_read; - ulong last_pmd_read; - ulong last_ptbl_read; - char *pgd; -+ char *pud; - char *pmd; - char *ptbl; - int ptrs_per_pgd; - char *cmdline_arg; - struct machine_specific *machspec; -+ ulong section_size_bits; -+ ulong max_physmem_bits; -+ ulong sections_per_root; -+ int (*xendump_p2m_create)(struct xendump_data *); -+ ulong (*xendump_panic_task)(struct xendump_data *); -+ void (*get_xendump_regs)(struct xendump_data *, struct bt_info *, ulong *, ulong *); -+ void (*clear_machdep_cache)(void); -+ int (*xen_kdump_p2m_create)(struct xen_kdump_data *); - }; - - /* -@@ -660,19 +766,25 @@ - * as defined in their processor-specific files below. (see KSYMS_START defs). - */ - #define HWRESET (0x80000000) --#define SYSRQ (0x40000000) --#define OMIT_FRAME_PTR (0x20000000) --#define FRAMESIZE_DEBUG (0x10000000) --#define MACHDEP_BT_TEXT (0x8000000) --#define DEVMEMRD (0x4000000) --#define INIT (0x2000000) --#define SYSRQ_TASK(X) ((machdep->flags & SYSRQ) && is_task_active(X)) -+#define OMIT_FRAME_PTR (0x40000000) -+#define FRAMESIZE_DEBUG (0x20000000) -+#define MACHDEP_BT_TEXT (0x10000000) -+#define DEVMEMRD (0x8000000) -+#define INIT (0x4000000) -+#define VM_4_LEVEL (0x2000000) -+#define MCA (0x1000000) -+#define PAE (0x800000) - - extern struct machdep_table *machdep; - -+#ifndef HZ -+#define HZ sysconf(_SC_CLK_TCK) -+#endif -+ - #define IS_LAST_PGD_READ(pgd) ((ulong)(pgd) == machdep->last_pgd_read) - #define IS_LAST_PMD_READ(pmd) ((ulong)(pmd) == machdep->last_pmd_read) - #define IS_LAST_PTBL_READ(ptbl) ((ulong)(ptbl) == machdep->last_ptbl_read) -+#define IS_LAST_PUD_READ(pud) ((ulong)(pud) == machdep->last_pud_read) - - #define FILL_PGD(PGD, TYPE, SIZE) \ - if (!IS_LAST_PGD_READ(PGD)) { \ -@@ -681,6 +793,13 @@ - machdep->last_pgd_read = (ulong)(PGD); \ - } - -+#define FILL_PUD(PUD, TYPE, SIZE) \ -+ if (!IS_LAST_PUD_READ(PUD)) { \ -+ readmem((ulonglong)((ulong)(PUD)), TYPE, machdep->pud, \ -+ SIZE, "pud page", FAULT_ON_ERROR); \ -+ machdep->last_pud_read = (ulong)(PUD); \ -+ } -+ - #define FILL_PMD(PMD, TYPE, SIZE) \ - if (!IS_LAST_PMD_READ(PMD)) { \ - readmem((ulonglong)(PMD), TYPE, machdep->pmd, \ -@@ -695,10 +814,12 @@ - machdep->last_ptbl_read = (ulong)(PTBL); \ - } - -+#define SETUP_ENV (0) - #define PRE_SYMTAB (1) - #define PRE_GDB (2) - #define POST_GDB (3) - #define POST_INIT (4) -+#define POST_VM (5) - - #define FOREACH_BT (1) - #define FOREACH_VM (2) -@@ -737,6 +858,7 @@ - #define FOREACH_c_FLAG (0x40000) - #define FOREACH_f_FLAG (0x80000) - #define FOREACH_o_FLAG (0x100000) -+#define FOREACH_T_FLAG (0x200000) - - struct foreach_data { - ulong flags; -@@ -810,10 +932,15 @@ - long task_struct_last_run; - long task_struct_timestamp; - long task_struct_thread_info; -+ long task_struct_nsproxy; -+ long task_struct_rlim; - long thread_info_task; - long thread_info_cpu; - long thread_info_previous_esp; - long thread_info_flags; -+ long nsproxy_mnt_ns; -+ long mnt_namespace_root; -+ long mnt_namespace_list; - long pid_link_pid; - long pid_hash_chain; - long hlist_node_next; -@@ -830,6 +957,8 @@ - long tms_tms_stime; - long signal_struct_count; - long signal_struct_action; -+ long signal_struct_shared_pending; -+ long signal_struct_rlim; - long k_sigaction_sa; - long sigaction_sa_handler; - long sigaction_sa_flags; -@@ -875,8 +1004,13 @@ - long mm_struct_mmap; - long mm_struct_pgd; - long mm_struct_rss; -+ long mm_struct_anon_rss; - long mm_struct_total_vm; - long mm_struct_start_code; -+ long mm_struct_arg_start; -+ long mm_struct_arg_end; -+ long mm_struct_env_start; -+ long mm_struct_env_end; - long vm_area_struct_vm_mm; - long vm_area_struct_vm_next; - long vm_area_struct_vm_end; -@@ -948,6 +1082,7 @@ - long block_device_bd_disk; - long irq_desc_t_status; - long irq_desc_t_handler; -+ long irq_desc_t_chip; - long irq_desc_t_action; - long irq_desc_t_depth; - long irqdesc_action; -@@ -968,8 +1103,28 @@ - long hw_interrupt_type_ack; - long hw_interrupt_type_end; - long hw_interrupt_type_set_affinity; -+ long irq_chip_typename; -+ long irq_chip_startup; -+ long irq_chip_shutdown; -+ long irq_chip_enable; -+ long irq_chip_disable; -+ long irq_chip_ack; -+ long irq_chip_end; -+ long irq_chip_set_affinity; -+ long irq_chip_mask; -+ long irq_chip_mask_ack; -+ long irq_chip_unmask; -+ long irq_chip_eoi; -+ long irq_chip_retrigger; -+ long irq_chip_set_type; -+ long irq_chip_set_wake; - long irq_cpustat_t___softirq_active; - long irq_cpustat_t___softirq_mask; -+ long fdtable_max_fds; -+ long fdtable_max_fdset; -+ long fdtable_open_fds; -+ long fdtable_fd; -+ long files_struct_fdt; - long files_struct_max_fds; - long files_struct_max_fdset; - long files_struct_open_fds; -@@ -978,6 +1133,9 @@ - long file_f_dentry; - long file_f_vfsmnt; - long file_f_count; -+ long file_f_path; -+ long path_mnt; -+ long path_dentry; - long fs_struct_root; - long fs_struct_pwd; - long fs_struct_rootmnt; -@@ -1088,6 +1246,8 @@ - long inet_opt_dport; - long inet_opt_sport; - long inet_opt_num; -+ long ipv6_pinfo_rcv_saddr; -+ long ipv6_pinfo_daddr; - long timer_list_list; - long timer_list_next; - long timer_list_entry; -@@ -1123,6 +1283,7 @@ - long zone_struct_name; - long zone_struct_size; - long zone_struct_memsize; -+ long zone_struct_zone_start_pfn; - long zone_struct_zone_start_paddr; - long zone_struct_zone_start_mapnr; - long zone_struct_zone_mem_map; -@@ -1143,6 +1304,7 @@ - long zone_pages_min; - long zone_pages_low; - long zone_pages_high; -+ long zone_vm_stat; - long neighbour_next; - long neighbour_primary_key; - long neighbour_ha; -@@ -1210,7 +1372,29 @@ - long x8664_pda_irqstackptr; - long x8664_pda_level4_pgt; - long x8664_pda_cpunumber; -+ long x8664_pda_me; - long tss_struct_ist; -+ long mem_section_section_mem_map; -+ long vcpu_guest_context_user_regs; -+ long cpu_user_regs_eip; -+ long cpu_user_regs_esp; -+ long cpu_user_regs_rip; -+ long cpu_user_regs_rsp; -+ long unwind_table_core; -+ long unwind_table_init; -+ long unwind_table_address; -+ long unwind_table_size; -+ long unwind_table_link; -+ long unwind_table_name; -+ long rq_cfs; -+ long rq_rt; -+ long rq_nr_running; -+ long cfs_rq_rb_leftmost; -+ long cfs_rq_nr_running; -+ long cfs_rq_tasks_timeline; -+ long task_struct_se; -+ long sched_entity_run_node; -+ long rt_rq_active; - }; - - struct size_table { /* stash of commonly-used sizes */ -@@ -1239,6 +1423,7 @@ - long umode_t; - long dentry; - long files_struct; -+ long fdtable; - long fs_struct; - long file; - long inode; -@@ -1264,6 +1449,7 @@ - long net_device; - long sock; - long signal_struct; -+ long sigpending_signal; - long signal_queue; - long sighand_struct; - long sigqueue; -@@ -1292,15 +1478,21 @@ - long address_space; - long char_device_struct; - long inet_sock; -+ long in6_addr; - long socket; - long spinlock_t; - long radix_tree_root; - long radix_tree_node; - long x8664_pda; -+ long ppc64_paca; - long gate_struct; - long tss_struct; - long task_struct_start_time; - long cputime_t; -+ long mem_section; -+ long pid_link; -+ long unwind_table; -+ long rlimit; - }; - - struct array_table { -@@ -1327,6 +1519,7 @@ - int free_area_DIMENSION; - int prio_array_queue; - int height_to_maxindex; -+ int pid_hash; - }; - - /* -@@ -1365,6 +1558,7 @@ - #define MEMBER_OFFSET_INIT(X, Y, Z) (ASSIGN_OFFSET(X) = MEMBER_OFFSET(Y, Z)) - #define STRUCT_SIZE_INIT(X, Y) (ASSIGN_SIZE(X) = STRUCT_SIZE(Y)) - #define ARRAY_LENGTH_INIT(A, B, C, D, E) ((A) = get_array_length(C, D, E)) -+#define ARRAY_LENGTH_INIT_ALT(A, B, C, D, E) ((A) = get_array_length_alt(B, C, D, E)) - #define MEMBER_SIZE_INIT(X, Y, Z) (ASSIGN_SIZE(X) = MEMBER_SIZE(Y, Z)) - - /* -@@ -1389,6 +1583,7 @@ - #define ULONGLONG(ADDR) *((ulonglong *)((char *)(ADDR))) - #define ULONG_PTR(ADDR) *((ulong **)((char *)(ADDR))) - #define USHORT(ADDR) *((ushort *)((char *)(ADDR))) -+#define SHORT(ADDR) *((short *)((char *)(ADDR))) - #define VOID_PTR(ADDR) *((void **)((char *)(ADDR))) - - struct node_table { -@@ -1396,6 +1591,7 @@ - ulong pgdat; - ulong mem_map; - ulong size; -+ ulong present; - ulonglong start_paddr; - ulong start_mapnr; - }; -@@ -1420,6 +1616,7 @@ - ulong kmem_max_limit; - ulong kmem_max_cpus; - ulong kmem_cache_count; -+ ulong kmem_cache_len_nodes; - ulong PG_reserved; - ulong PG_slab; - int kmem_cache_namelen; -@@ -1441,17 +1638,36 @@ - ulong cached_vma_hits[VMA_CACHE]; - int vma_cache_index; - ulong vma_cache_fills; --}; -- --#define NODES (0x1) --#define ZONES (0x2) --#define PERCPU_KMALLOC_V1 (0x4) --#define COMMON_VADDR (0x8) --#define KMEM_CACHE_INIT (0x10) --#define V_MEM_MAP (0x20) --#define PERCPU_KMALLOC_V2 (0x40) --#define KMEM_CACHE_UNAVAIL (0x80) --#define DISCONTIGMEM (0x100) -+ void *mem_sec; -+ int ZONE_HIGHMEM; -+ ulong *node_online_map; -+ int node_online_map_len; -+ int nr_vm_stat_items; -+ char **vm_stat_items; -+}; -+ -+#define NODES (0x1) -+#define ZONES (0x2) -+#define PERCPU_KMALLOC_V1 (0x4) -+#define COMMON_VADDR (0x8) -+#define KMEM_CACHE_INIT (0x10) -+#define V_MEM_MAP (0x20) -+#define PERCPU_KMALLOC_V2 (0x40) -+#define KMEM_CACHE_UNAVAIL (0x80) -+#define FLATMEM (0x100) -+#define DISCONTIGMEM (0x200) -+#define SPARSEMEM (0x400) -+#define SPARSEMEM_EX (0x800) -+#define PERCPU_KMALLOC_V2_NODES (0x1000) -+#define KMEM_CACHE_DELAY (0x2000) -+#define NODES_ONLINE (0x4000) -+#define VM_STAT (0x8000) -+#define KMALLOC_SLUB (0x10000) -+ -+#define IS_FLATMEM() (vt->flags & FLATMEM) -+#define IS_DISCONTIGMEM() (vt->flags & DISCONTIGMEM) -+#define IS_SPARSEMEM() (vt->flags & SPARSEMEM) -+#define IS_SPARSEMEM_EX() (vt->flags & SPARSEMEM_EX) - - #define COMMON_VADDR_SPACE() (vt->flags & COMMON_VADDR) - #define PADDR_PRLEN (vt->paddr_prlen) -@@ -1478,7 +1694,8 @@ - long list_head_offset; - ulong end; - ulong searchfor; -- char *structname; -+ char **structname; -+ int structname_args; - char *header; - }; - #define LIST_OFFSET_ENTERED (VERBOSE << 1) -@@ -1584,8 +1801,11 @@ - int mods_installed; - struct load_module *current; - struct load_module *load_modules; -+ off_t dwarf_eh_frame_file_offset; -+ ulong dwarf_eh_frame_size; - }; - -+/* flags for st */ - #define KERNEL_SYMS (0x1) - #define MODULE_SYMS (0x2) - #define LOAD_MODULE_SYMS (0x4) -@@ -1596,6 +1816,8 @@ - #define NO_SEC_CONTENTS (0x40) - #define FORCE_DEBUGINFO (0x80) - #define CRC_MATCHES (0x100) -+#define ADD_SYMBOL_FILE (0x200) -+#define USE_OLD_ADD_SYM (0x400) - - #endif /* !GDB_COMMON */ - -@@ -1611,6 +1833,8 @@ - #define MOD_KALLSYMS (0x8) - #define MOD_INITRD (0x10) - -+#define SEC_FOUND (0x10000) -+ - struct mod_section_data { - #if defined(GDB_6_1) - struct bfd_section *section; -@@ -1659,6 +1883,8 @@ - #define KVADDR (0x1) - #define UVADDR (0x2) - #define PHYSADDR (0x4) -+#define XENMACHADDR (0x8) -+#define FILEADDR (0x10) - #define AMBIGUOUS (~0) - - #define USE_USER_PGD (UVADDR << 2) -@@ -1680,6 +1906,33 @@ - #define VIRTPAGEBASE(X) (((ulong)(X)) & (ulong)machdep->pagemask) - #define PHYSPAGEBASE(X) (((physaddr_t)(X)) & (physaddr_t)machdep->pagemask) - -+/* -+ * Sparse memory stuff -+ * These must follow the definitions in the kernel mmzone.h -+ */ -+#define SECTION_SIZE_BITS() (machdep->section_size_bits) -+#define MAX_PHYSMEM_BITS() (machdep->max_physmem_bits) -+#define SECTIONS_SHIFT() (MAX_PHYSMEM_BITS() - SECTION_SIZE_BITS()) -+#define PA_SECTION_SHIFT() (SECTION_SIZE_BITS()) -+#define PFN_SECTION_SHIFT() (SECTION_SIZE_BITS() - PAGESHIFT()) -+#define NR_MEM_SECTIONS() (1UL << SECTIONS_SHIFT()) -+#define PAGES_PER_SECTION() (1UL << PFN_SECTION_SHIFT()) -+#define PAGE_SECTION_MASK() (~(PAGES_PER_SECTION()-1)) -+ -+#define pfn_to_section_nr(pfn) ((pfn) >> PFN_SECTION_SHIFT()) -+#define section_nr_to_pfn(sec) ((sec) << PFN_SECTION_SHIFT()) -+ -+#define SECTIONS_PER_ROOT() (machdep->sections_per_root) -+ -+/* CONFIG_SPARSEMEM_EXTREME */ -+#define _SECTIONS_PER_ROOT_EXTREME() (PAGESIZE() / SIZE(mem_section)) -+/* !CONFIG_SPARSEMEM_EXTREME */ -+#define _SECTIONS_PER_ROOT() (1) -+ -+#define SECTION_NR_TO_ROOT(sec) ((sec) / SECTIONS_PER_ROOT()) -+#define NR_SECTION_ROOTS() (NR_MEM_SECTIONS() / SECTIONS_PER_ROOT()) -+#define SECTION_ROOT_MASK() (SECTIONS_PER_ROOT() - 1) -+ - /* - * Machine specific stuff - */ -@@ -1689,8 +1942,8 @@ - #define MACHINE_TYPE "X86" - #define PTOV(X) ((unsigned long)(X)+(machdep->kvbase)) - #define VTOP(X) ((unsigned long)(X)-(machdep->kvbase)) --#define IS_VMALLOC_ADDR(X) ((ulong)(X) >= vt->vmalloc_start) --#define KVBASE_MASK (0x1fffff) -+#define IS_VMALLOC_ADDR(X) (vt->vmalloc_start && (ulong)(X) >= vt->vmalloc_start) -+#define KVBASE_MASK (0x7fffff) - - #define PGDIR_SHIFT_2LEVEL (22) - #define PTRS_PER_PTE_2LEVEL (1024) -@@ -1721,25 +1974,86 @@ - - #define SWP_TYPE(entry) (((entry) >> 1) & 0x3f) - #define SWP_OFFSET(entry) ((entry) >> 8) -+#define __swp_type_PAE(entry) (((entry) >> 32) & 0x1f) -+#define __swp_type_nonPAE(entry) (((entry) >> 1) & 0x1f) -+#define __swp_offset_PAE(entry) (((entry) >> 32) >> 5) -+#define __swp_offset_nonPAE(entry) ((entry) >> 8) -+#define __swp_type(entry) (machdep->flags & PAE ? \ -+ __swp_type_PAE(entry) : __swp_type_nonPAE(entry)) -+#define __swp_offset(entry) (machdep->flags & PAE ? \ -+ __swp_offset_PAE(entry) : __swp_offset_nonPAE(entry)) - - #define TIF_SIGPENDING (2) - -+// CONFIG_X86_PAE -+#define _SECTION_SIZE_BITS_PAE 30 -+#define _MAX_PHYSMEM_BITS_PAE 36 -+ -+// !CONFIG_X86_PAE -+#define _SECTION_SIZE_BITS 26 -+#define _MAX_PHYSMEM_BITS 32 -+ -+#define IS_LAST_PMD_READ_PAE(pmd) ((ulong)(pmd) == machdep->machspec->last_pmd_read_PAE) -+#define IS_LAST_PTBL_READ_PAE(ptbl) ((ulong)(ptbl) == machdep->machspec->last_ptbl_read_PAE) -+ -+#define FILL_PMD_PAE(PMD, TYPE, SIZE) \ -+ if (!IS_LAST_PMD_READ_PAE(PMD)) { \ -+ readmem((ulonglong)(PMD), TYPE, machdep->pmd, \ -+ SIZE, "pmd page", FAULT_ON_ERROR); \ -+ machdep->machspec->last_pmd_read_PAE = (ulonglong)(PMD); \ -+ } -+ -+#define FILL_PTBL_PAE(PTBL, TYPE, SIZE) \ -+ if (!IS_LAST_PTBL_READ_PAE(PTBL)) { \ -+ readmem((ulonglong)(PTBL), TYPE, machdep->ptbl, \ -+ SIZE, "page table", FAULT_ON_ERROR); \ -+ machdep->machspec->last_ptbl_read_PAE = (ulonglong)(PTBL); \ -+ } -+ - #endif /* X86 */ - - #ifdef X86_64 - #define _64BIT_ - #define MACHINE_TYPE "X86_64" - --#define USERSPACE_TOP 0x0000008000000000 --#define __START_KERNEL_map 0xffffffff80000000 --#define PAGE_OFFSET 0x0000010000000000 -- --#define VMALLOC_START 0xffffff0000000000 --#define VMALLOC_END 0xffffff7fffffffff --#define MODULES_VADDR 0xffffffffa0000000 --#define MODULES_END 0xffffffffafffffff -+#define USERSPACE_TOP (machdep->machspec->userspace_top) -+#define PAGE_OFFSET (machdep->machspec->page_offset) -+#define VMALLOC_START (machdep->machspec->vmalloc_start_addr) -+#define VMALLOC_END (machdep->machspec->vmalloc_end) -+#define MODULES_VADDR (machdep->machspec->modules_vaddr) -+#define MODULES_END (machdep->machspec->modules_end) -+ -+#define __START_KERNEL_map 0xffffffff80000000UL - #define MODULES_LEN (MODULES_END - MODULES_VADDR) - -+#define USERSPACE_TOP_ORIG 0x0000008000000000 -+#define PAGE_OFFSET_ORIG 0x0000010000000000 -+#define VMALLOC_START_ADDR_ORIG 0xffffff0000000000 -+#define VMALLOC_END_ORIG 0xffffff7fffffffff -+#define MODULES_VADDR_ORIG 0xffffffffa0000000 -+#define MODULES_END_ORIG 0xffffffffafffffff -+ -+#define USERSPACE_TOP_2_6_11 0x0000800000000000 -+#define PAGE_OFFSET_2_6_11 0xffff810000000000 -+#define VMALLOC_START_ADDR_2_6_11 0xffffc20000000000 -+#define VMALLOC_END_2_6_11 0xffffe1ffffffffff -+#define MODULES_VADDR_2_6_11 0xffffffff88000000 -+#define MODULES_END_2_6_11 0xfffffffffff00000 -+ -+#define USERSPACE_TOP_XEN 0x0000800000000000 -+#define PAGE_OFFSET_XEN 0xffff880000000000 -+#define VMALLOC_START_ADDR_XEN 0xffffc20000000000 -+#define VMALLOC_END_XEN 0xffffe1ffffffffff -+#define MODULES_VADDR_XEN 0xffffffff88000000 -+#define MODULES_END_XEN 0xfffffffffff00000 -+ -+#define USERSPACE_TOP_XEN_RHEL4 0x0000008000000000 -+#define PAGE_OFFSET_XEN_RHEL4 0xffffff8000000000 -+#define VMALLOC_START_ADDR_XEN_RHEL4 0xffffff0000000000 -+#define VMALLOC_END_XEN_RHEL4 0xffffff7fffffffff -+#define MODULES_VADDR_XEN_RHEL4 0xffffffffa0000000 -+#define MODULES_END_XEN_RHEL4 0xffffffffafffffff -+ - #define PTOV(X) ((unsigned long)(X)+(machdep->kvbase)) - #define VTOP(X) x86_64_VTOP((ulong)(X)) - #define IS_VMALLOC_ADDR(X) x86_64_IS_VMALLOC_ADDR((ulong)(X)) -@@ -1757,12 +2071,34 @@ - #define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1)) - #define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) - -+#define IS_LAST_PML4_READ(pml4) ((ulong)(pml4) == machdep->machspec->last_pml4_read) -+ - #define FILL_PML4() { \ - if (!(pc->flags & RUNTIME) || ACTIVE()) \ -- readmem(vt->kernel_pgd[0], KVADDR, machdep->machspec->pml4, \ -+ if (!IS_LAST_PML4_READ(vt->kernel_pgd[0])) \ -+ readmem(vt->kernel_pgd[0], KVADDR, machdep->machspec->pml4, \ - PAGESIZE(), "init_level4_pgt", FAULT_ON_ERROR); \ -+ machdep->machspec->last_pml4_read = (ulong)(vt->kernel_pgd[0]); \ - } - -+#define FILL_PML4_HYPER() { \ -+ if (!machdep->machspec->last_pml4_read) { \ -+ readmem(symbol_value("idle_pg_table_4"), KVADDR, \ -+ machdep->machspec->pml4, PAGESIZE(), "idle_pg_table_4", \ -+ FAULT_ON_ERROR); \ -+ machdep->machspec->last_pml4_read = symbol_value("idle_pg_table_4"); \ -+ }\ -+} -+ -+#define IS_LAST_UPML_READ(pml) ((ulong)(pml) == machdep->machspec->last_upml_read) -+ -+#define FILL_UPML(PML, TYPE, SIZE) \ -+ if (!IS_LAST_UPML_READ(PML)) { \ -+ readmem((ulonglong)((ulong)(PML)), TYPE, machdep->machspec->upml, \ -+ SIZE, "pml page", FAULT_ON_ERROR); \ -+ machdep->machspec->last_upml_read = (ulong)(PML); \ -+ } -+ - /* - * PHYSICAL_PAGE_MASK changed (enlarged) between 2.4 and 2.6, so - * for safety, use the 2.6 values to generate it. -@@ -1791,11 +2127,22 @@ - - #define SWP_TYPE(entry) (((entry) >> 1) & 0x3f) - #define SWP_OFFSET(entry) ((entry) >> 8) -+#define __swp_type(entry) SWP_TYPE(entry) -+#define __swp_offset(entry) SWP_OFFSET(entry) - - #define TIF_SIGPENDING (2) - - #define PAGEBASE(X) (((ulong)(X)) & (ulong)machdep->pagemask) - -+#define _CPU_PDA_READ(CPU, BUFFER) \ -+ ((STRNEQ("_cpu_pda", closest_symbol((symbol_value("_cpu_pda") + \ -+ ((CPU) * sizeof(unsigned long)))))) && \ -+ (readmem(symbol_value("_cpu_pda") + ((CPU) * sizeof(void *)), \ -+ KVADDR, &cpu_pda_addr, sizeof(unsigned long), \ -+ "_cpu_pda addr", FAULT_ON_ERROR)) && \ -+ (readmem(cpu_pda_addr, KVADDR, (BUFFER), SIZE(x8664_pda), \ -+ "cpu_pda entry", FAULT_ON_ERROR))) -+ - #define CPU_PDA_READ(CPU, BUFFER) \ - (STRNEQ("cpu_pda", closest_symbol((symbol_value("cpu_pda") + \ - ((CPU) * SIZE(x8664_pda))))) && \ -@@ -1806,6 +2153,9 @@ - #define VALID_LEVEL4_PGT_ADDR(X) \ - (((X) == VIRTPAGEBASE(X)) && IS_KVADDR(X) && !IS_VMALLOC_ADDR(X)) - -+#define _SECTION_SIZE_BITS 27 -+#define _MAX_PHYSMEM_BITS 40 -+ - #endif /* X86_64 */ - - #ifdef ALPHA -@@ -1816,7 +2166,7 @@ - - #define PTOV(X) ((unsigned long)(X)+(machdep->kvbase)) - #define VTOP(X) ((unsigned long)(X)-(machdep->kvbase)) --#define IS_VMALLOC_ADDR(X) ((ulong)(X) >= vt->vmalloc_start) -+#define IS_VMALLOC_ADDR(X) (vt->vmalloc_start && (ulong)(X) >= vt->vmalloc_start) - #define KSEG_BASE_48_BIT (0xffff800000000000) - #define KSEG_BASE (0xfffffc0000000000) - #define _PFN_MASK (0xFFFFFFFF00000000) -@@ -1848,6 +2198,8 @@ - - #define SWP_TYPE(entry) (((entry) >> 32) & 0xff) - #define SWP_OFFSET(entry) ((entry) >> 40) -+#define __swp_type(entry) SWP_TYPE(entry) -+#define __swp_offset(entry) SWP_OFFSET(entry) - - #define TIF_SIGPENDING (2) - -@@ -1861,7 +2213,7 @@ - - #define PTOV(X) ((unsigned long)(X)+(machdep->kvbase)) - #define VTOP(X) ((unsigned long)(X)-(machdep->kvbase)) --#define IS_VMALLOC_ADDR(X) ((ulong)(X) >= vt->vmalloc_start) -+#define IS_VMALLOC_ADDR(X) (vt->vmalloc_start && (ulong)(X) >= vt->vmalloc_start) - - #define PGDIR_SHIFT (22) - #define PTRS_PER_PTE (1024) -@@ -1881,9 +2233,14 @@ - - #define SWP_TYPE(entry) (((entry) >> 1) & 0x7f) - #define SWP_OFFSET(entry) ((entry) >> 8) -+#define __swp_type(entry) SWP_TYPE(entry) -+#define __swp_offset(entry) SWP_OFFSET(entry) - - #define TIF_SIGPENDING (2) - -+#define _SECTION_SIZE_BITS 24 -+#define _MAX_PHYSMEM_BITS 44 -+ - #endif /* PPC */ - - #ifdef IA64 -@@ -1908,6 +2265,9 @@ - #define KERNEL_UNCACHED_BASE ((ulong)KERNEL_UNCACHED_REGION << REGION_SHIFT) - #define KERNEL_CACHED_BASE ((ulong)KERNEL_CACHED_REGION << REGION_SHIFT) - -+#define _SECTION_SIZE_BITS 30 -+#define _MAX_PHYSMEM_BITS 50 -+ - /* - * As of 2.6, these are no longer straight forward. - */ -@@ -1917,16 +2277,57 @@ - - #define SWITCH_STACK_ADDR(X) (ia64_get_switch_stack((ulong)(X))) - --#define PGDIR_SHIFT (PAGESHIFT() + 2*(PAGESHIFT()-3)) --#define PMD_SHIFT (PAGESHIFT() + (PAGESHIFT()-3)) --#define PTRS_PER_PGD (((ulong)(1)) << (PAGESHIFT()-3)) --#define PTRS_PER_PMD (((ulong)(1)) << (PAGESHIFT()-3)) --#define PTRS_PER_PTE (((ulong)(1)) << (PAGESHIFT()-3)) --#define PTRS_PER_PAGE (((ulong)(1)) << (PAGESHIFT()-3)) - #define __IA64_UL(x) ((unsigned long)(x)) - #define IA64_MAX_PHYS_BITS (50) /* max # of phys address bits (architected) */ - - /* -+ * How many pointers will a page table level hold expressed in shift -+ */ -+#define PTRS_PER_PTD_SHIFT (PAGESHIFT()-3) -+ -+/* -+ * Definitions for fourth level: -+ */ -+#define PTRS_PER_PTE (__IA64_UL(1) << (PTRS_PER_PTD_SHIFT)) -+ -+/* -+ * Definitions for third level: -+ * -+ * PMD_SHIFT determines the size of the area a third-level page table -+ * can map. -+ */ -+#define PMD_SHIFT (PAGESHIFT() + (PTRS_PER_PTD_SHIFT)) -+#define PMD_SIZE (1UL << PMD_SHIFT) -+#define PMD_MASK (~(PMD_SIZE-1)) -+#define PTRS_PER_PMD (1UL << (PTRS_PER_PTD_SHIFT)) -+ -+/* -+ * PUD_SHIFT determines the size of the area a second-level page table -+ * can map -+ */ -+#define PUD_SHIFT (PMD_SHIFT + (PTRS_PER_PTD_SHIFT)) -+#define PUD_SIZE (1UL << PUD_SHIFT) -+#define PUD_MASK (~(PUD_SIZE-1)) -+#define PTRS_PER_PUD (1UL << (PTRS_PER_PTD_SHIFT)) -+ -+/* -+ * Definitions for first level: -+ * -+ * PGDIR_SHIFT determines what a first-level page table entry can map. -+ */ -+ -+#define PGDIR_SHIFT_4L (PUD_SHIFT + (PTRS_PER_PTD_SHIFT)) -+#define PGDIR_SHIFT_3L (PMD_SHIFT + (PTRS_PER_PTD_SHIFT)) -+/* Turns out 4L & 3L PGDIR_SHIFT are the same (for now) */ -+#define PGDIR_SHIFT PGDIR_SHIFT_4L -+#define PGDIR_SIZE (__IA64_UL(1) << PGDIR_SHIFT) -+#define PGDIR_MASK (~(PGDIR_SIZE-1)) -+#define PTRS_PER_PGD_SHIFT PTRS_PER_PTD_SHIFT -+#define PTRS_PER_PGD (1UL << PTRS_PER_PGD_SHIFT) -+#define USER_PTRS_PER_PGD (5*PTRS_PER_PGD/8) /* regions 0-4 are user regions */ -+#define FIRST_USER_ADDRESS 0 -+ -+/* - * First, define the various bits in a PTE. Note that the PTE format - * matches the VHPT short format, the firt doubleword of the VHPD long - * format, and the first doubleword of the TLB insertion format. -@@ -1978,6 +2379,7 @@ - #define __DIRTY_BITS _PAGE_ED | __DIRTY_BITS_NO_ED - - #define EFI_PAGE_SHIFT (12) -+ - /* - * NOTE: #include'ing creates too many compiler problems, so - * this stuff is hardwired here; it's probably etched in stone somewhere. -@@ -2020,6 +2422,8 @@ - - #define SWP_TYPE(entry) (((entry) >> 1) & 0xff) - #define SWP_OFFSET(entry) ((entry) >> 9) -+#define __swp_type(entry) ((entry >> 2) & 0x7f) -+#define __swp_offset(entry) ((entry << 1) >> 10) - - #define TIF_SIGPENDING (1) - -@@ -2038,11 +2442,14 @@ - #define _64BIT_ - #define MACHINE_TYPE "PPC64" - -+#define PPC64_64K_PAGE_SIZE 65536 -+#define PPC64_STACK_SIZE 16384 -+ - #define PAGEBASE(X) (((ulong)(X)) & (ulong)machdep->pagemask) - - #define PTOV(X) ((unsigned long)(X)+(machdep->kvbase)) - #define VTOP(X) ((unsigned long)(X)-(machdep->kvbase)) --#define IS_VMALLOC_ADDR(X) ((ulong)(X) >= vt->vmalloc_start) -+#define IS_VMALLOC_ADDR(X) (vt->vmalloc_start && (ulong)(X) >= vt->vmalloc_start) - #define KERNELBASE machdep->pageoffset - - #define PGDIR_SHIFT (machdep->pageshift + (machdep->pageshift -3) + (machdep->pageshift - 2)) -@@ -2067,6 +2474,32 @@ - #define PGD_OFFSET(vaddr) ((vaddr >> PGDIR_SHIFT) & 0x7ff) - #define PMD_OFFSET(vaddr) ((vaddr >> PMD_SHIFT) & (PTRS_PER_PMD - 1)) - -+/* 4-level page table support */ -+ -+/* 4K pagesize */ -+#define PTE_INDEX_SIZE_L4_4K 9 -+#define PMD_INDEX_SIZE_L4_4K 7 -+#define PUD_INDEX_SIZE_L4_4K 7 -+#define PGD_INDEX_SIZE_L4_4K 9 -+#define PTE_SHIFT_L4_4K 17 -+#define PMD_MASKED_BITS_4K 0 -+ -+/* 64K pagesize */ -+#define PTE_INDEX_SIZE_L4_64K 12 -+#define PMD_INDEX_SIZE_L4_64K 12 -+#define PUD_INDEX_SIZE_L4_64K 0 -+#define PGD_INDEX_SIZE_L4_64K 4 -+#define PTE_SHIFT_L4_64K 32 -+#define PMD_MASKED_BITS_64K 0x1ff -+ -+#define L4_OFFSET(vaddr) ((vaddr >> (machdep->machspec->l4_shift)) & 0x1ff) -+ -+#define PGD_OFFSET_L4(vaddr) \ -+ ((vaddr >> (machdep->machspec->l3_shift)) & (machdep->machspec->ptrs_per_l3 - 1)) -+ -+#define PMD_OFFSET_L4(vaddr) \ -+ ((vaddr >> (machdep->machspec->l2_shift)) & (machdep->machspec->ptrs_per_l2 - 1)) -+ - #define _PAGE_PRESENT 0x001UL /* software: pte contains a translation */ - #define _PAGE_USER 0x002UL /* matches one of the PP bits */ - #define _PAGE_RW 0x004UL /* software: user write access allowed */ -@@ -2080,6 +2513,8 @@ - - #define SWP_TYPE(entry) (((entry) >> 1) & 0x7f) - #define SWP_OFFSET(entry) ((entry) >> 8) -+#define __swp_type(entry) SWP_TYPE(entry) -+#define __swp_offset(entry) SWP_OFFSET(entry) - - #define MSR_PR_LG 14 /* Problem State / Privilege Level */ - /* Used to find the user or kernel-mode frame*/ -@@ -2087,6 +2522,9 @@ - #define STACK_FRAME_OVERHEAD 112 - #define EXCP_FRAME_MARKER 0x7265677368657265 - -+#define _SECTION_SIZE_BITS 24 -+#define _MAX_PHYSMEM_BITS 44 -+ - #endif /* PPC64 */ - - #ifdef S390 -@@ -2095,7 +2533,7 @@ - - #define PTOV(X) ((unsigned long)(X)+(machdep->kvbase)) - #define VTOP(X) ((unsigned long)(X)-(machdep->kvbase)) --#define IS_VMALLOC_ADDR(X) s390_IS_VMALLOC_ADDR(X) -+#define IS_VMALLOC_ADDR(X) (vt->vmalloc_start && (ulong)(X) >= vt->vmalloc_start) - - #define PTRS_PER_PTE 1024 - #define PTRS_PER_PMD 1 -@@ -2105,6 +2543,8 @@ - #define SWP_TYPE(entry) (((entry) >> 2) & 0x1f) - #define SWP_OFFSET(entry) ((((entry) >> 11) & 0xfffffffe) | \ - (((entry) >> 7) & 0x1)) -+#define __swp_type(entry) SWP_TYPE(entry) -+#define __swp_offset(entry) SWP_OFFSET(entry) - - #define TIF_SIGPENDING (2) - -@@ -2116,7 +2556,7 @@ - - #define PTOV(X) ((unsigned long)(X)+(machdep->kvbase)) - #define VTOP(X) ((unsigned long)(X)-(machdep->kvbase)) --#define IS_VMALLOC_ADDR(X) ((ulong)(X) >= vt->vmalloc_start) -+#define IS_VMALLOC_ADDR(X) (vt->vmalloc_start && (ulong)(X) >= vt->vmalloc_start) - #define PTRS_PER_PTE 512 - #define PTRS_PER_PMD 1024 - #define PTRS_PER_PGD 2048 -@@ -2125,6 +2565,8 @@ - #define SWP_TYPE(entry) (((entry) >> 2) & 0x1f) - #define SWP_OFFSET(entry) ((((entry) >> 11) & 0xfffffffffffffffe) | \ - (((entry) >> 7) & 0x1)) -+#define __swp_type(entry) SWP_TYPE(entry) -+#define __swp_offset(entry) SWP_OFFSET(entry) - - #define TIF_SIGPENDING (2) - -@@ -2134,6 +2576,8 @@ - - #define SWP_TYPE(entry) (error("PLATFORM_SWP_TYPE: TBD\n")) - #define SWP_OFFSET(entry) (error("PLATFORM_SWP_OFFSET: TBD\n")) -+#define __swp_type(entry) SWP_TYPE(entry) -+#define __swp_offset(entry) SWP_OFFSET(entry) - - #endif /* PLATFORM */ - -@@ -2185,7 +2629,10 @@ - #define BADVAL ((ulong)(-1)) - #define UNUSED (-1) - -+#define UNINITIALIZED (BADVAL) -+ - #define BITS_PER_BYTE (8) -+#define BITS_PER_LONG (BITS_PER_BYTE * sizeof(long)) - - /* - * precision lengths for fprintf -@@ -2199,9 +2646,10 @@ - - #define MINSPACE (-100) - --#define SYNOPSIS (0x1) --#define COMPLETE_HELP (0x2) --#define PIPE_TO_LESS (0x4) -+#define SYNOPSIS (0x1) -+#define COMPLETE_HELP (0x2) -+#define PIPE_TO_SCROLL (0x4) -+#define MUST_HELP (0x8) - - #define LEFT_JUSTIFY (1) - #define RIGHT_JUSTIFY (2) -@@ -2419,17 +2867,22 @@ - /* - * ps command options. - */ --#define PS_BY_PID (0x1) --#define PS_BY_TASK (0x2) --#define PS_BY_CMD (0x4) --#define PS_SHOW_ALL (0x8) --#define PS_PPID_LIST (0x10) --#define PS_CHILD_LIST (0x20) --#define PS_KERNEL (0x40) --#define PS_USER (0x80) --#define PS_TIMES (0x100) --#define PS_KSTACKP (0x200) --#define PS_LAST_RUN (0x400) -+#define PS_BY_PID (0x1) -+#define PS_BY_TASK (0x2) -+#define PS_BY_CMD (0x4) -+#define PS_SHOW_ALL (0x8) -+#define PS_PPID_LIST (0x10) -+#define PS_CHILD_LIST (0x20) -+#define PS_KERNEL (0x40) -+#define PS_USER (0x80) -+#define PS_TIMES (0x100) -+#define PS_KSTACKP (0x200) -+#define PS_LAST_RUN (0x400) -+#define PS_ARGV_ENVP (0x800) -+#define PS_TGID_LIST (0x1000) -+#define PS_RLIMIT (0x2000) -+ -+#define PS_EXCLUSIVE (PS_TGID_LIST|PS_ARGV_ENVP|PS_TIMES|PS_CHILD_LIST|PS_PPID_LIST|PS_LAST_RUN|PS_RLIMIT) - - #define MAX_PS_ARGS (100) /* maximum command-line specific requests */ - -@@ -2461,7 +2914,7 @@ - extern struct program_context program_context, *pc; - extern struct task_table task_table, *tt; - extern struct kernel_table kernel_table, *kt; --extern struct command_table_entry base_command_table[]; -+extern struct command_table_entry linux_command_table[]; - extern char *args[MAXARGS]; - extern int argcnt; - extern int argerrs; -@@ -2534,6 +2987,9 @@ - void cmd_gdb(void); /* gdb_interface.c */ - void cmd_net(void); /* net.c */ - void cmd_extend(void); /* extensions.c */ -+#if defined(S390) || defined(S390X) -+void cmd_s390dbf(void); -+#endif - - /* - * main.c -@@ -2591,6 +3047,8 @@ - int interruptible(void); - int received_SIGINT(void); - void debug_redirect(char *); -+int CRASHPAGER_valid(void); -+char *setup_scroll_command(void); - - /* - * tools.c -@@ -2658,6 +3116,7 @@ - int hq_open(void); - int hq_close(void); - int hq_enter(ulong); -+int hq_entry_exists(ulong); - long get_embedded(void); - void dump_embedded(char *); - char *ordinal(ulong, char *); -@@ -2685,6 +3144,10 @@ - int machine_type(char *); - void command_not_supported(void); - void option_not_supported(int); -+void please_wait(char *); -+void please_wait_done(void); -+int pathcmp(char *, char *); -+int calculate(char *, ulong *, ulonglong *, ulong); - - - /* -@@ -2721,9 +3184,11 @@ - struct syment *next_symbol(char *, struct syment *); - struct syment *prev_symbol(char *, struct syment *); - void get_symbol_data(char *, long, void *); -+int try_get_symbol_data(char *, long, void *); - char *value_to_symstr(ulong, char *, ulong); - char *value_symbol(ulong); - ulong symbol_value(char *); -+ulong symbol_value_module(char *, char *); - int symbol_exists(char *s); - int kernel_symbol_exists(char *s); - int get_syment_array(char *, struct syment **, int); -@@ -2738,9 +3203,11 @@ - void dump_struct_table(ulong); - void dump_offset_table(char *, ulong); - int is_elf_file(char *); -+int file_elf_version(char *); - int is_system_map(char *); - int select_namelist(char *); - int get_array_length(char *, int *, long); -+int get_array_length_alt(char *, char *, int *, long); - int builtin_array_length(char *, int, int *); - char *get_line_number(ulong, char *, int); - char *get_build_directory(char *); -@@ -2768,6 +3235,7 @@ - long OFFSET_option(long, long, char *, char *, int, char *, char *); - long SIZE_option(long, long, char *, char *, int, char *, char *); - void dump_trace(ulong *); -+int enumerator_value(char *, long *); - - /* - * memory.c -@@ -2807,6 +3275,7 @@ - char *swap_location(ulonglong, char *); - void clear_swap_info_cache(void); - uint memory_page_size(void); -+void force_page_size(char *); - ulong first_vmalloc_address(void); - int l1_cache_size(void); - int dumpfile_memory(int); -@@ -2838,6 +3307,7 @@ - void open_files_dump(ulong, int, struct reference *); - void get_pathname(ulong, char *, int, int, ulong); - ulong file_to_dentry(ulong); -+ulong file_to_vfsmnt(ulong); - void nlm_files_dump(void); - int get_proc_version(void); - int file_checksum(char *, long *); -@@ -2874,6 +3344,7 @@ - void help_init(void); - void cmd_usage(char *, int); - void display_version(void); -+void display_help_screen(char *); - #ifdef X86 - #define dump_machdep_table(X) x86_dump_machdep_table(X) - #endif -@@ -2945,6 +3416,9 @@ - extern char *help_waitq[]; - extern char *help_whatis[]; - extern char *help_wr[]; -+#if defined(S390) || defined(S390X) -+extern char *help_s390dbf[]; -+#endif - - /* - * task.c -@@ -2962,10 +3436,12 @@ - ulong task_flags(ulong); - ulong task_state(ulong); - ulong task_mm(ulong, int); -+ulong task_tgid(ulong); - ulonglong task_last_run(ulong); - int comm_exists(char *); - struct task_context *task_to_context(ulong); - struct task_context *pid_to_context(ulong); -+struct task_context *tgid_to_context(ulong); - ulong stkptr_to_task(ulong); - ulong task_to_thread_info(ulong); - ulong task_to_stackbase(ulong); -@@ -3005,11 +3481,13 @@ - */ - void register_extension(struct command_table_entry *); - void dump_extension_table(int); -+void load_extension(char *); -+void unload_extension(char *); - - /* - * kernel.c - */ --void kernel_init(int); -+void kernel_init(void); - void module_init(void); - void verify_version(void); - void verify_spinlock(void); -@@ -3019,14 +3497,18 @@ - int is_system_call(char *, ulong); - void generic_dump_irq(int); - int generic_dis_filter(ulong, char *); -+int kernel_BUG_encoding_bytes(void); - void display_sys_stats(void); --void dump_kernel_table(void); -+char *get_uptime(char *, ulonglong *); -+void clone_bt_info(struct bt_info *, struct bt_info *, struct task_context *); -+void dump_kernel_table(int); - void dump_bt_info(struct bt_info *); - void dump_log(int); - void set_cpu(int); - void clear_machdep_cache(void); - struct stack_hook *gather_text_list(struct bt_info *); - int get_cpus_online(void); -+int get_cpus_possible(void); - void print_stack_text_syms(struct bt_info *, ulong, ulong); - void back_trace(struct bt_info *); - #define BT_RAW (0x1ULL) -@@ -3039,11 +3521,13 @@ - #define BT_EXCEPTION_FRAME (0x80ULL) - #define BT_LINE_NUMBERS (0x100ULL) - #define BT_USER_EFRAME (0x200ULL) -+#define BT_INCOMPLETE_USER_EFRAME (BT_USER_EFRAME) - #define BT_SAVE_LASTSP (0x400ULL) - #define BT_FROM_EXCEPTION (0x800ULL) - #define BT_FROM_CALLFRAME (0x1000ULL) - #define BT_EFRAME_SEARCH (0x2000ULL) - #define BT_SPECULATE (0x4000ULL) -+#define BT_FRAMESIZE_DISABLE (BT_SPECULATE) - #define BT_RESCHEDULE (0x8000ULL) - #define BT_SCHEDULE (BT_RESCHEDULE) - #define BT_RET_FROM_SMP_FORK (0x10000ULL) -@@ -3069,6 +3553,8 @@ - #define BT_DUMPFILE_SEARCH (0x800000000ULL) - #define BT_EFRAME_SEARCH2 (0x1000000000ULL) - #define BT_START (0x2000000000ULL) -+#define BT_TEXT_SYMBOLS_ALL (0x4000000000ULL) -+#define BT_XEN_STOP_THIS_CPU (0x8000000000ULL) - - #define BT_REF_HEXVAL (0x1) - #define BT_REF_SYMBOL (0x2) -@@ -3101,6 +3587,17 @@ - #define TYPE_S390D (REMOTE_VERBOSE << 6) - #define TYPE_NETDUMP (REMOTE_VERBOSE << 7) - -+ulonglong xen_m2p(ulonglong); -+ -+void read_in_kernel_config(int); -+ -+#define IKCFG_INIT (0) -+#define IKCFG_READ (1) -+ -+#define MAGIC_START "IKCFG_ST" -+#define MAGIC_END "IKCFG_ED" -+#define MAGIC_SIZE (sizeof(MAGIC_START) - 1) -+ - /* - * dev.c - */ -@@ -3129,7 +3626,6 @@ - void x86_display_idt_table(void); - #define display_idt_table() x86_display_idt_table() - #define KSYMS_START (0x1) --#define PAE (0x2) - void x86_dump_eframe_common(struct bt_info *bt, ulong *, int); - char *x86_function_called_by(ulong); - struct syment *x86_jmp_error_code(ulong); -@@ -3140,6 +3636,8 @@ - ulong entry_tramp_start; - ulong entry_tramp_end; - physaddr_t entry_tramp_start_phys; -+ ulonglong last_pmd_read_PAE; -+ ulonglong last_ptbl_read_PAE; - }; - - struct syment *x86_is_entry_tramp_address(ulong, ulong *); -@@ -3194,19 +3692,51 @@ - #define NMI_STACK 2 /* ebase[] offset to NMI exception stack */ - - struct machine_specific { -+ ulong userspace_top; -+ ulong page_offset; -+ ulong vmalloc_start_addr; -+ ulong vmalloc_end; -+ ulong modules_vaddr; -+ ulong modules_end; -+ ulong phys_base; - char *pml4; -+ char *upml; -+ ulong last_upml_read; -+ ulong last_pml4_read; - char *irqstack; -+ ulong irq_eframe_link; - struct x86_64_pt_regs_offsets pto; - struct x86_64_stkinfo stkinfo; - }; - - #define KSYMS_START (0x1) - #define PT_REGS_INIT (0x2) -+#define VM_ORIG (0x4) -+#define VM_2_6_11 (0x8) -+#define VM_XEN (0x10) -+#define NO_TSS (0x20) -+#define SCHED_TEXT (0x40) -+#define PHYS_BASE (0x80) -+#define VM_XEN_RHEL4 (0x100) -+ -+#define VM_FLAGS (VM_ORIG|VM_2_6_11|VM_XEN|VM_XEN_RHEL4) - - #define _2MB_PAGE_MASK (~((MEGABYTES(2))-1)) -+ -+#endif -+ -+#if defined(X86) || defined(X86_64) -+ -+/* -+ * unwind_x86_32_64.c -+ */ -+void init_unwind_table(void); -+int dwarf_backtrace(struct bt_info *, int, ulong); -+void dwarf_debug(struct bt_info *); -+int dwarf_print_stack_entry(struct bt_info *, int); -+ - #endif - --void x86_64_backtrace_notice(ulong); - - /* - * ppc64.c -@@ -3240,13 +3770,42 @@ - ulong hwintrstack[NR_CPUS]; - char *hwstackbuf; - uint hwstacksize; --}; -+ char *level4; -+ ulong last_level4_read; -+ -+ uint l4_index_size; -+ uint l3_index_size; -+ uint l2_index_size; -+ uint l1_index_size; -+ -+ uint ptrs_per_l3; -+ uint ptrs_per_l2; -+ uint ptrs_per_l1; -+ -+ uint l4_shift; -+ uint l3_shift; -+ uint l2_shift; -+ uint l1_shift; -+ -+ uint pte_shift; -+ uint l2_masked_bits; -+}; -+ -+#define IS_LAST_L4_READ(l4) ((ulong)(l4) == machdep->machspec->last_level4_read) -+ -+#define FILL_L4(L4, TYPE, SIZE) \ -+ if (!IS_LAST_L4_READ(L4)) { \ -+ readmem((ulonglong)((ulong)(L4)), TYPE, machdep->machspec->level4, \ -+ SIZE, "level4 page", FAULT_ON_ERROR); \ -+ machdep->machspec->last_level4_read = (ulong)(L4); \ -+ } - - void ppc64_init(int); - void ppc64_dump_machdep_table(ulong); - #define display_idt_table() \ - error(FATAL, "-d option is not applicable to PowerPC architecture\n") - #define KSYMS_START (0x1) -+#define VM_ORIG (0x2) - #endif - - /* -@@ -3258,6 +3817,8 @@ - #define display_idt_table() \ - error(FATAL, "-d option is not applicable to PowerPC architecture\n") - #define KSYMS_START (0x1) -+/* This should match PPC_FEATURE_BOOKE from include/asm-powerpc/cputable.h */ -+#define CPU_BOOKE (0x00008000) - #endif - - /* -@@ -3283,6 +3844,8 @@ - #define display_idt_table() \ - error(FATAL, "-d option TBD on ia64 architecture\n"); - int ia64_in_init_stack(ulong addr); -+int ia64_in_mca_stack_hyper(ulong addr, struct bt_info *bt); -+physaddr_t ia64_xen_kdump_p2m(struct xen_kdump_data *xkd, physaddr_t pseudo); - - #define OLD_UNWIND (0x1) /* CONFIG_IA64_NEW_UNWIND not turned on */ - #define NEW_UNWIND (0x2) /* CONFIG_IA64_NEW_UNWIND turned on */ -@@ -3396,10 +3959,26 @@ - int netdump_init(char *, FILE *); - ulong get_netdump_panic_task(void); - ulong get_netdump_switch_stack(ulong); --int netdump_memory_dump(FILE *); - FILE *set_netdump_fp(FILE *); -+int netdump_memory_dump(FILE *); - void get_netdump_regs(struct bt_info *, ulong *, ulong *); - int is_partial_netdump(void); -+void get_netdump_regs_x86(struct bt_info *, ulong *, ulong *); -+void get_netdump_regs_x86_64(struct bt_info *, ulong *, ulong *); -+struct vmcore_data; -+struct vmcore_data *get_kdump_vmcore_data(void); -+int read_kdump(int, void *, int, ulong, physaddr_t); -+int write_kdump(int, void *, int, ulong, physaddr_t); -+int is_kdump(char *, ulong); -+int kdump_init(char *, FILE *); -+ulong get_kdump_panic_task(void); -+uint kdump_page_size(void); -+int kdump_free_memory(void); -+int kdump_memory_used(void); -+int kdump_memory_dump(FILE *); -+void get_kdump_regs(struct bt_info *, ulong *, ulong *); -+void xen_kdump_p2m_mfn(char *); -+int is_sadump_xen(void); - - /* - * diskdump.c -@@ -3416,6 +3995,27 @@ - int diskdump_memory_dump(FILE *); - FILE *set_diskdump_fp(FILE *); - void get_diskdump_regs(struct bt_info *, ulong *, ulong *); -+int diskdump_phys_base(unsigned long *); -+ulong *diskdump_flags; -+int is_partial_diskdump(void); -+ -+/* -+ * xendump.c -+ */ -+int is_xendump(char *); -+int read_xendump(int, void *, int, ulong, physaddr_t); -+int write_xendump(int, void *, int, ulong, physaddr_t); -+uint xendump_page_size(void); -+int xendump_free_memory(void); -+int xendump_memory_used(void); -+int xendump_init(char *, FILE *); -+int xendump_memory_dump(FILE *); -+ulong get_xendump_panic_task(void); -+void get_xendump_regs(struct bt_info *, ulong *, ulong *); -+char *xc_core_mfn_to_page(ulong, char *); -+int xc_core_mfn_to_page_index(ulong); -+void xendump_panic_hook(char *); -+int read_xendump_hyper(int, void *, int, ulong, physaddr_t); - - /* - * net.c -@@ -3560,6 +4160,7 @@ - #define LKCD_DUMP_V7 (0x7) /* DUMP_VERSION_NUMBER */ - #define LKCD_DUMP_V8 (0x8) /* DUMP_VERSION_NUMBER */ - #define LKCD_DUMP_V9 (0x9) /* DUMP_VERSION_NUMBER */ -+#define LKCD_DUMP_V10 (0xa) /* DUMP_VERSION_NUMBER */ - - #define LKCD_DUMP_VERSION_NUMBER_MASK (0xf) - #define LKCD_DUMP_RAW (0x1) /* DUMP_[DH_]RAW */ -@@ -3764,7 +4365,6 @@ - extern int prettyprint_structs; - extern int prettyprint_arrays; - extern int repeat_count_threshold; --extern int repeat_count_threshold; - extern unsigned int print_max; - - /* -@@ -3814,4 +4414,8 @@ - extern int have_partial_symbols(void); - extern int have_full_symbols(void); - -+#if defined(X86) || defined(X86_64) || defined(IA64) -+#define XEN_HYPERVISOR_ARCH -+#endif -+ - #endif /* !GDB_COMMON */ ---- crash/xen_hyper_defs.h.orig 2007-08-27 15:02:36.000000000 -0400 -+++ crash/xen_hyper_defs.h 2007-08-23 17:02:54.000000000 -0400 -@@ -0,0 +1,970 @@ -+/* -+ * xen_hyper_defs.h -+ * -+ * Portions Copyright (C) 2006-2007 Fujitsu Limited -+ * Portions Copyright (C) 2006-2007 VA Linux Systems Japan K.K. -+ * -+ * Authors: Itsuro Oda -+ * Fumihiko Kakuma -+ * -+ * This file is part of Xencrash. -+ * -+ * Xencrash is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation (version 2 of the License). -+ * -+ * Xencrash is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with Xencrash; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. -+ */ -+ -+#ifdef XEN_HYPERVISOR_ARCH -+ -+#include -+#include -+ -+#ifdef X86 -+/* Xen Hypervisor address space layout */ -+#define IOREMAP_VIRT_END (0UL) -+#define IOREMAP_VIRT_START (0xFFC00000UL) -+#define DIRECTMAP_VIRT_END IOREMAP_VIRT_START -+#define DIRECTMAP_VIRT_START (0xFF000000UL) -+#define MAPCACHE_VIRT_END DIRECTMAP_VIRT_START -+#define MAPCACHE_VIRT_START (0xFFC00000UL) -+#define PERDOMAIN_VIRT_END DIRECTMAP_VIRT_START -+#define PERDOMAIN_VIRT_START (0xFE800000UL) -+#define SH_LINEAR_PT_VIRT_END PERDOMAIN_VIRT_START -+#define SH_LINEAR_PT_VIRT_START (0xFE400000UL) -+#define SH_LINEAR_PT_VIRT_START_PAE (0xFE000000UL) -+#define LINEAR_PT_VIRT_END SH_LINEAR_PT_VIRT_START -+#define LINEAR_PT_VIRT_START (0xFE000000UL) -+#define LINEAR_PT_VIRT_START_PAE (0xFD800000UL) -+#define RDWR_MPT_VIRT_END LINEAR_PT_VIRT_START -+#define RDWR_MPT_VIRT_START (0xFDC00000UL) -+#define RDWR_MPT_VIRT_START_PAE (0xFC800000UL) -+#define FRAMETABLE_VIRT_END RDWR_MPT_VIRT_START -+#define FRAMETABLE_VIRT_START (0xFC400000UL) -+#define FRAMETABLE_VIRT_START_PAE (0xF6800000UL) -+#define RO_MPT_VIRT_END FRAMETABLE_VIRT_START -+#define RO_MPT_VIRT_START (0xFC000000UL) -+#define RO_MPT_VIRT_START_PAE (0xF5800000UL) -+ -+#define HYPERVISOR_VIRT_START RO_MPT_VIRT_START -+#define HYPERVISOR_VIRT_START_PAE RO_MPT_VIRT_START_PAE -+#endif -+ -+#ifdef X86_64 -+#define HYPERVISOR_VIRT_START (0xffff800000000000) -+#define HYPERVISOR_VIRT_END (0xffff880000000000) -+#define DIRECTMAP_VIRT_START (0xffff830000000000) -+#define DIRECTMAP_VIRT_END (0xffff840000000000) -+#define PAGE_OFFSET_XEN_HYPER DIRECTMAP_VIRT_START -+#endif -+ -+#ifdef IA64 -+#define HYPERVISOR_VIRT_START (0xe800000000000000) -+#define HYPERVISOR_VIRT_END (0xf800000000000000) -+#define DEFAULT_SHAREDINFO_ADDR (0xf100000000000000) -+#define PERCPU_PAGE_SIZE 65536 -+#define PERCPU_ADDR (DEFAULT_SHAREDINFO_ADDR - PERCPU_PAGE_SIZE) -+#define DIRECTMAP_VIRT_START (0xf000000000000000) -+#define DIRECTMAP_VIRT_END PERCPU_ADDR -+#define VIRT_FRAME_TABLE_SIZE (0x0100000000000000) -+ -+#define PERCPU_VIRT_ADDR(vaddr) \ -+ (((vaddr) >= PERCPU_ADDR) && ((vaddr) < PERCPU_ADDR + PERCPU_PAGE_SIZE)) -+ -+#define FRAME_TABLE_VIRT_ADDR(vaddr) \ -+ ((vaddr) >= xhmachdep->frame_table && (vaddr) < xhmachdep->frame_table + VIRT_FRAME_TABLE_SIZE) -+ -+#undef IA64_RBS_OFFSET -+#define IA64_RBS_OFFSET ((XEN_HYPER_SIZE(vcpu) + 15) & ~15) -+ -+#endif /* IA64 */ -+ -+#define DIRECTMAP_VIRT_ADDR(vaddr) \ -+ (((vaddr) >= DIRECTMAP_VIRT_START) && ((vaddr) < DIRECTMAP_VIRT_END)) -+ -+typedef uint16_t domid_t; -+typedef uint32_t Elf_Word; -+ -+/* -+ * NOTE kakuma: The following defines are temporary version for -+ * elf note format which is used only in crash. -+ */ -+#define XEN_HYPER_ELF_NOTE_V1 1 -+#define XEN_HYPER_ELF_NOTE_V2 2 -+#define XEN_HYPER_ELF_NOTE_V3 3 -+#define XEN_HYPER_ELF_NOTE_V4 4 -+ -+#ifdef X86 -+#define XEN_HYPER_ELF_NOTE_V4_NOTE_SIZE 0x100 -+#endif -+#if defined(X86_64) || defined(IA64) -+#define XEN_HYPER_ELF_NOTE_V4_NOTE_SIZE 0x200 -+#endif -+ -+/* -+ * Xen Hyper -+ */ -+#define XEN_HYPER_SMP (0x400) -+ -+#ifdef X86 -+#define XEN_HYPER_MAX_VIRT_CPUS (32) -+#define XEN_HYPER_HZ 100 -+#endif -+#ifdef X86_64 -+#define XEN_HYPER_MAX_VIRT_CPUS (32) -+#define XEN_HYPER_HZ 100 -+#endif -+#ifdef IA64 -+#define XEN_HYPER_MAX_VIRT_CPUS (64) -+#define XEN_HYPER_HZ 100 -+#endif -+#ifndef XEN_HYPER_MAX_VIRT_CPUS -+#define XEN_HYPER_MAX_VIRT_CPUS (1) -+#endif -+ -+#if defined(X86) || defined(X86_64) -+#define XEN_HYPER_PERCPU_SHIFT 12 -+#define xen_hyper_per_cpu(var, cpu) \ -+ ((ulong)(var) + (((ulong)(cpu))<flags & XEN_HYPER_SMP) ? \ -+ (ulong)(var) + (xht->__per_cpu_offset[cpu]) : \ -+ (ulong)(var)) -+#endif -+ -+#if defined(X86) || defined(X86_64) -+#define XEN_HYPER_STACK_ORDER 2 -+#if 0 -+#define XEN_HYPER_STACK_SIZE (machdep->pagesize << XEN_HYPER_STACK_ORDER) -+#endif -+#define XEN_HYPER_GET_CPU_INFO(sp) \ -+ ((sp & ~(STACKSIZE()-1)) | \ -+ (STACKSIZE() - XEN_HYPER_SIZE(cpu_info))) -+#endif -+ -+#define XEN_HYPER_CONRING_SIZE 16384 -+ -+/* system time */ -+#define XEN_HYPER_NANO_TO_SEC(ns) ((ulonglong)((ns) / 1000000000ULL)) -+#define XEN_HYPER_MICR_TO_SEC(us) ((ulonglong)((us) / 1000000ULL)) -+#define XEN_HYPER_MILI_TO_SEC(ms) ((ulonglong)((ms) / 1000ULL)) -+ -+/* -+ * Domain -+ */ -+/* Prepared domain ID. */ -+#define XEN_HYPER_DOMID_IO (0x7FF1U) -+#define XEN_HYPER_DOMID_XEN (0x7FF2U) -+ -+/* Domain flags (domain_flags). */ -+ /* Is this domain privileged? */ -+#define XEN_HYPER__DOMF_privileged 0 -+#define XEN_HYPER_DOMF_privileged (1UL<= 0) -+#define XEN_HYPER_VALID_STRUCT(X) (xen_hyper_size_table.X >= 0) -+#define XEN_HYPER_VALID_MEMBER(X) (xen_hyper_offset_table.X >= 0) -+ -+#define XEN_HYPER_ASSIGN_SIZE(X) (xen_hyper_size_table.X) -+#define XEN_HYPER_ASSIGN_OFFSET(X) (xen_hyper_offset_table.X) -+ -+#define XEN_HYPER_STRUCT_SIZE_INIT(X, Y) (XEN_HYPER_ASSIGN_SIZE(X) = STRUCT_SIZE(Y)) -+#define XEN_HYPER_MEMBER_SIZE_INIT(X, Y, Z) (XEN_HYPER_ASSIGN_SIZE(X) = MEMBER_SIZE(Y, Z)) -+#define XEN_HYPER_MEMBER_OFFSET_INIT(X, Y, Z) (XEN_HYPER_ASSIGN_OFFSET(X) = MEMBER_OFFSET(Y, Z)) -+ -+/* -+ * System -+ */ -+#define XEN_HYPER_MAX_CPUS() (xht->max_cpus) -+#define XEN_HYPER_CRASHING_CPU() (xht->crashing_cpu) -+ -+/* -+ * Dump information -+ */ -+#define XEN_HYPER_X86_NOTE_EIP(regs) (regs[12]) -+#define XEN_HYPER_X86_NOTE_ESP(regs) (regs[15]) -+#define XEN_HYPER_X86_64_NOTE_RIP(regs) (regs[16]) -+#define XEN_HYPER_X86_64_NOTE_RSP(regs) (regs[19]) -+ -+/* -+ * Domain -+ */ -+#define XEN_HYPER_DOMAIN_F_INIT 0x1 -+ -+#define XEN_HYPER_NR_DOMAINS() (xht->domains) -+#define XEN_HYPER_RUNNING_DOMAINS() (xhdt->running_domains) -+ -+/* -+ * Phisycal CPU -+ */ -+#define XEN_HYPER_NR_PCPUS() (xht->pcpus) -+#define for_cpu_indexes(i, cpuid) \ -+ for (i = 0, cpuid = xht->cpu_idxs[i]; \ -+ i < XEN_HYPER_NR_PCPUS(); \ -+ cpuid = xht->cpu_idxs[++i]) -+#define XEN_HYPER_CURR_VCPU(pcpuid) \ -+ (xen_hyper_get_active_vcpu_from_pcpuid(pcpuid)) -+ -+/* -+ * VCPU -+ */ -+#define XEN_HYPER_VCPU_F_INIT 0x1 -+ -+#define XEN_HYPER_NR_VCPUS_IN_DOM(domain_context) (domain_context->vcpu_cnt) -+#define XEN_HYPER_VCPU_LAST_CONTEXT() (xhvct->last) -+ -+/* -+ * tools -+ */ -+#define XEN_HYPER_PRI(fp, len, str, buf, flag, args) \ -+ sprintf args; \ -+ xen_hyper_fpr_indent(fp, len, str, buf, flag); -+#define XEN_HYPER_PRI_CONST(fp, len, str, flag) \ -+ xen_hyper_fpr_indent(fp, len, str, NULL, flag); -+ -+#define XEN_HYPER_PRI_L (0x0) -+#define XEN_HYPER_PRI_R (0x1) -+#define XEN_HYPER_PRI_LF (0x2) -+ -+/* -+ * Global data -+ */ -+extern struct xen_hyper_machdep_table *xhmachdep; -+extern struct xen_hyper_table *xht; -+extern struct xen_hyper_dumpinfo_table *xhdit; -+extern struct xen_hyper_domain_table *xhdt; -+extern struct xen_hyper_vcpu_table *xhvct; -+extern struct xen_hyper_pcpu_table *xhpct; -+extern struct xen_hyper_sched_table *xhscht; -+extern struct xen_hyper_symbol_table_data *xhsymt; -+ -+extern struct xen_hyper_offset_table xen_hyper_offset_table; -+extern struct xen_hyper_size_table xen_hyper_size_table; -+ -+extern struct command_table_entry xen_hyper_command_table[]; -+extern struct task_context fake_tc; -+ -+/* -+ * Xen Hyper command help -+ */ -+extern char *xen_hyper_help_domain[]; -+extern char *xen_hyper_help_doms[]; -+extern char *xen_hyper_help_dumpinfo[]; -+extern char *xen_hyper_help_log[]; -+extern char *xen_hyper_help_pcpus[]; -+extern char *xen_hyper_help_sched[]; -+extern char *xen_hyper_help_sys[]; -+extern char *xen_hyper_help_vcpu[]; -+extern char *xen_hyper_help_vcpus[]; -+ -+/* -+ * Prototype -+ */ -+ulonglong xen_hyper_get_uptime_hyper(void); -+ -+/* -+ * x86 -+ */ -+int xen_hyper_x86_get_smp_cpus(void); -+uint64_t xen_hyper_x86_memory_size(void); -+ -+/* -+ * IA64 -+ */ -+int xen_hyper_ia64_get_smp_cpus(void); -+uint64_t xen_hyper_ia64_memory_size(void); -+ulong xen_hyper_ia64_processor_speed(void); -+ -+/* -+ * Xen Hyper -+ */ -+void xen_hyper_init(void); -+void xen_hyper_domain_init(void); -+void xen_hyper_vcpu_init(void); -+void xen_hyper_dumpinfo_init(void); -+void xen_hyper_misc_init(void); -+void xen_hyper_post_init(void); -+struct xen_hyper_dumpinfo_context *xen_hyper_id_to_dumpinfo_context(uint id); -+struct xen_hyper_dumpinfo_context *xen_hyper_note_to_dumpinfo_context(ulong note); -+char *xen_hyper_fill_elf_notes(ulong note, char *note_buf, int type); -+ -+/* domain */ -+void xen_hyper_refresh_domain_context_space(void); -+int xen_hyper_get_domains(void); -+char *xen_hyper_get_domain_next(int mod, ulong *next); -+domid_t xen_hyper_domain_to_id(ulong domain); -+char *xen_hyper_id_to_domain_struct(domid_t id); -+struct xen_hyper_domain_context * -+xen_hyper_domain_to_domain_context(ulong domain); -+struct xen_hyper_domain_context * -+xen_hyper_id_to_domain_context(domid_t id); -+struct xen_hyper_domain_context * -+xen_hyper_store_domain_context(struct xen_hyper_domain_context *dc, -+ ulong domain, char *dp); -+char *xen_hyper_read_domain_from_context(struct xen_hyper_domain_context *dc); -+char *xen_hyper_read_domain(ulong domain); -+char *xen_hyper_read_domain_verify(ulong domain); -+char *xen_hyper_fill_domain_struct(ulong domain, char *domain_struct); -+void xen_hyper_alloc_domain_context_space(int domains); -+ulong xen_hyper_domain_state(struct xen_hyper_domain_context *dc); -+ -+/* vcpu */ -+void xen_hyper_refresh_vcpu_context_space(void); -+struct xen_hyper_vcpu_context * -+xen_hyper_vcpu_to_vcpu_context(ulong vcpu); -+struct xen_hyper_vcpu_context * -+xen_hyper_id_to_vcpu_context(ulong domain, domid_t did, int vcid); -+struct xen_hyper_vcpu_context_array * -+xen_hyper_domain_to_vcpu_context_array(ulong domain); -+struct xen_hyper_vcpu_context_array * -+xen_hyper_domid_to_vcpu_context_array(domid_t id); -+struct xen_hyper_vcpu_context * -+xen_hyper_store_vcpu_context(struct xen_hyper_vcpu_context *vcc, -+ ulong vcpu, char *vcp); -+char * -+xen_hyper_read_vcpu_from_context(struct xen_hyper_vcpu_context *vcc); -+char *xen_hyper_read_vcpu(ulong vcpu); -+char *xen_hyper_read_vcpu_verify(ulong vcpu); -+char *xen_hyper_fill_vcpu_struct(ulong vcpu, char *vcpu_struct); -+void xen_hyper_alloc_vcpu_context_arrays_space(int domains); -+void xen_hyper_alloc_vcpu_context_space(struct xen_hyper_vcpu_context_array *vcca, int vcpus); -+int xen_hyper_vcpu_state(struct xen_hyper_vcpu_context *vcc); -+ -+/* pcpu */ -+#if defined(X86) || defined(X86_64) -+void xen_hyper_x86_pcpu_init(void); -+#elif defined(IA64) -+void xen_hyper_ia64_pcpu_init(void); -+#endif -+struct xen_hyper_pcpu_context *xen_hyper_id_to_pcpu_context(uint id); -+struct xen_hyper_pcpu_context *xen_hyper_pcpu_to_pcpu_context(ulong pcpu); -+struct xen_hyper_pcpu_context *xen_hyper_store_pcpu_context(struct xen_hyper_pcpu_context *pcc, -+ ulong pcpu, char *pcp); -+struct xen_hyper_pcpu_context *xen_hyper_store_pcpu_context_tss(struct xen_hyper_pcpu_context *pcc, -+ ulong init_tss, char *tss); -+char *xen_hyper_read_pcpu(ulong pcpu); -+char *xen_hyper_fill_pcpu_struct(ulong pcpu, char *pcpu_struct); -+void xen_hyper_alloc_pcpu_context_space(int pcpus); -+ -+/* others */ -+char *xen_hyper_x86_fill_cpu_data(int idx, char *cpuinfo_x86); -+char *xen_hyper_ia64_fill_cpu_data(int idx, char *cpuinfo_ia64); -+int xen_hyper_is_vcpu_crash(struct xen_hyper_vcpu_context *vcc); -+void xen_hyper_print_bt_header(FILE *out, ulong pcpu, int newline); -+ulong xen_hyper_get_active_vcpu_from_pcpuid(ulong pcpu); -+ulong xen_hyper_pcpu_to_active_vcpu(ulong pcpu); -+void xen_hyper_get_cpu_info(void); -+int xen_hyper_test_pcpu_id(uint pcpu_id); -+ -+/* -+ * Xen Hyper command -+ */ -+void xen_hyper_cmd_help(void); -+void xen_hyper_cmd_domain(void); -+void xen_hyper_cmd_doms(void); -+void xen_hyper_cmd_dumpinfo(void); -+void xen_hyper_cmd_log(void); -+void xen_hyper_dump_log(void); -+void xen_hyper_cmd_pcpus(void); -+void xen_hyper_cmd_sched(void); -+void xen_hyper_cmd_sys(void); -+void xen_hyper_cmd_vcpu(void); -+void xen_hyper_cmd_vcpus(void); -+void xen_hyper_display_sys_stats(void); -+ -+void xen_hyper_show_vcpu_context(struct xen_hyper_vcpu_context *vcc); -+char *xen_hyper_domain_state_string(struct xen_hyper_domain_context *dc, -+ char *buf, int verbose); -+char *xen_hyper_vcpu_state_string(struct xen_hyper_vcpu_context *vcc, -+ char *buf, int verbose); -+ -+/* tools */ -+void xen_hyper_fpr_indent(FILE *fp, int len, char *str1, char *str2, int flag); -+ -+#else -+ -+#define XEN_HYPERVISOR_NOT_SUPPORTED \ -+ "Xen hypervisor mode not supported on this architecture\n" -+ -+#endif ---- crash/vas_crash.h.orig 2007-08-27 15:02:36.000000000 -0400 -+++ crash/vas_crash.h 2006-10-11 09:14:36.000000000 -0400 + fops = ULONG(char_device_struct_buf + + OFFSET(char_device_struct_fops)); + major = INT(char_device_struct_buf + +@@ -1957,29 +1974,44 @@ + unsigned int class; + unsigned short device, vendor; + unsigned char busno; +- ulong *devlist, bus, devfn, tmp; ++ ulong *devlist, bus, devfn, prev, next; + char buf1[BUFSIZE]; + char buf2[BUFSIZE]; + char buf3[BUFSIZE]; + +- fprintf(fp, "%s BU:SL.FN CLASS: VENDOR-DEVICE\n", +- mkstring(buf1, VADDR_PRLEN, CENTER|LJUST, "PCI_DEV")); ++ if (!symbol_exists("pci_devices")) ++ error(FATAL, "no PCI devices found on this system.\n"); + + BZERO(&pcilist_data, sizeof(struct list_data)); + + if (VALID_MEMBER(pci_dev_global_list)) { +- get_symbol_data("pci_devices", sizeof(void *), &tmp); +- readmem(tmp + OFFSET(list_head_next), KVADDR, +- &pcilist_data.start, sizeof(void *), "pci devices", +- FAULT_ON_ERROR); ++ get_symbol_data("pci_devices", sizeof(void *), &pcilist_data.start); + pcilist_data.end = symbol_value("pci_devices"); + pcilist_data.list_head_offset = OFFSET(pci_dev_global_list); ++ readmem(symbol_value("pci_devices") + OFFSET(list_head_prev), ++ KVADDR, &prev, sizeof(void *), "list head prev", ++ FAULT_ON_ERROR); ++ /* ++ * Check if this system does not have any PCI devices. ++ */ ++ if ((pcilist_data.start == pcilist_data.end) && ++ (prev == pcilist_data.end)) ++ error(FATAL, "no PCI devices found on this system.\n"); + +- } else { ++ } else if (VALID_MEMBER(pci_dev_next)) { + get_symbol_data("pci_devices", sizeof(void *), + &pcilist_data.start); + pcilist_data.member_offset = OFFSET(pci_dev_next); +- } ++ /* ++ * Check if this system does not have any PCI devices. ++ */ ++ readmem(pcilist_data.start + pcilist_data.member_offset, ++ KVADDR, &next, sizeof(void *), "pci dev next", ++ FAULT_ON_ERROR); ++ if (!next) ++ error(FATAL, "no PCI devices found on this system.\n"); ++ } else ++ option_not_supported('p'); + + hq_open(); + devcnt = do_list(&pcilist_data); +@@ -1987,6 +2019,9 @@ + devcnt = retrieve_list(devlist, devcnt); + hq_close(); + ++ fprintf(fp, "%s BU:SL.FN CLASS: VENDOR-DEVICE\n", ++ mkstring(buf1, VADDR_PRLEN, CENTER|LJUST, "PCI_DEV")); ++ + for (i = 0; i < devcnt; i++) { + + /* +--- crash/vas_crash.h.orig 2008-01-17 15:17:20.000000000 -0500 ++++ crash/vas_crash.h 2008-01-04 09:42:08.000000000 -0500 @@ -1,8 +1,8 @@ /* vas_crash.h - kernel crash dump file format (on swap) * @@ -48048,495 +79081,396 @@ void save_core(void); ---- crash/netdump.h.orig 2007-08-27 15:02:36.000000000 -0400 -+++ crash/netdump.h 2007-05-15 15:04:36.000000000 -0400 -@@ -24,3 +24,95 @@ - - #define NT_TASKSTRUCT 4 - #define NT_DISKDUMP 0x70000001 -+ -+#ifdef NOTDEF -+/* -+ * Note: Based upon the original, abandoned, proposal for -+ * its contents -- keep around for potential future use. -+ */ -+#ifndef NT_KDUMPINFO -+#define NT_KDUMPINFO 7 -+#endif -+ -+#endif /* NOTDEF */ -+ -+struct pt_load_segment { -+ off_t file_offset; -+ physaddr_t phys_start; -+ physaddr_t phys_end; -+ physaddr_t zero_fill; -+}; -+ -+struct vmcore_data { -+ ulong flags; -+ int ndfd; -+ FILE *ofp; -+ uint header_size; -+ char *elf_header; -+ uint num_pt_load_segments; -+ struct pt_load_segment *pt_load_segments; -+ Elf32_Ehdr *elf32; -+ Elf32_Phdr *notes32; -+ Elf32_Phdr *load32; -+ Elf64_Ehdr *elf64; -+ Elf64_Phdr *notes64; -+ Elf64_Phdr *load64; -+ void *nt_prstatus; -+ void *nt_prpsinfo; -+ void *nt_taskstruct; -+ ulong task_struct; -+ uint page_size; -+ ulong switch_stack; -+ uint num_prstatus_notes; -+ void *nt_prstatus_percpu[NR_CPUS]; -+ struct xen_kdump_data *xen_kdump_data; -+}; -+ -+/* -+ * ELF note types for Xen dom0/hypervisor kdumps. -+ * The comments below are from xen/include/public/elfnote.h. -+ */ -+ -+/* -+ * System information exported through crash notes. -+ * -+ * The kexec / kdump code will create one XEN_ELFNOTE_CRASH_INFO -+ * note in case of a system crash. This note will contain various -+ * information about the system, see xen/include/xen/elfcore.h. -+ */ -+#define XEN_ELFNOTE_CRASH_INFO 0x1000001 -+ -+/* -+ * System registers exported through crash notes. -+ * -+ * The kexec / kdump code will create one XEN_ELFNOTE_CRASH_REGS -+ * note per cpu in case of a system crash. This note is architecture -+ * specific and will contain registers not saved in the "CORE" note. -+ * See xen/include/xen/elfcore.h for more information. -+ */ -+#define XEN_ELFNOTE_CRASH_REGS 0x1000002 -+ -+ -+/* -+ * For (temporary) backwards compatibility. -+ */ -+#define NT_XEN_KDUMP_CR3 0x10000001 -+ -+struct xen_kdump_data { -+ ulong flags; -+ ulong cr3; -+ ulong p2m_mfn; -+ char *page; -+ ulong last_mfn_read; -+ ulong last_pmd_read; -+ ulong cache_hits; -+ ulong accesses; -+ int p2m_frames; -+ ulong *p2m_mfn_frame_list; -+}; -+ -+#define KDUMP_P2M_INIT (0x1) -+#define KDUMP_CR3 (0x2) -+#define KDUMP_MFN_LIST (0x4) -+ -+#define P2M_FAILURE ((physaddr_t)(0xffffffffffffffffLL)) ---- crash/diskdump.h.orig 2007-08-27 15:02:36.000000000 -0400 -+++ crash/diskdump.h 2007-04-09 12:15:02.000000000 -0400 -@@ -1,8 +1,10 @@ - /* - * diskdump.h - * -- * Copyright (C) 2004, 2005 David Anderson -- * Copyright (C) 2004, 2005 Red Hat, Inc. All rights reserved. -+ * Copyright (C) 2004, 2005, 2006 David Anderson -+ * Copyright (C) 2004, 2005, 2006 Red Hat, Inc. All rights reserved. -+ * Copyright (C) 2005 FUJITSU LIMITED -+ * Copyright (C) 2005 NEC Corporation - * - * This software may be freely redistributed under the terms of the - * GNU General Public License. -@@ -10,7 +12,65 @@ - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. -- * -- * Author: David Anderson +--- crash/global_data.c.orig 2008-01-17 15:17:20.000000000 -0500 ++++ crash/global_data.c 2008-01-04 09:42:08.000000000 -0500 +@@ -68,7 +68,7 @@ + * To add a new command, declare it in defs.h and enter it in this table. */ -+#include +-struct command_table_entry base_command_table[] = { ++struct command_table_entry linux_command_table[] = { + {"*", cmd_pointer, help_pointer, 0}, + {"alias", cmd_alias, help_alias, 0}, + {"ascii", cmd_ascii, help_ascii, 0}, +@@ -117,6 +117,9 @@ + {"waitq", cmd_waitq, help_waitq, REFRESH_TASK_TABLE}, + {"whatis", cmd_whatis, help_whatis, 0}, + {"wr", cmd_wr, help_wr, 0}, ++#if defined(S390) || defined(S390X) ++ {"s390dbf", cmd_s390dbf, help_s390dbf, 0}, ++#endif + {(char *)NULL} + }; + +--- crash/unwind.c.orig 2008-01-17 15:17:20.000000000 -0500 ++++ crash/unwind.c 2008-01-04 09:42:08.000000000 -0500 +@@ -6,8 +6,8 @@ + /* + * unwind.c + * +- * Copyright (C) 2002, 2003, 2004, 2005 David Anderson +- * Copyright (C) 2002, 2003, 2004, 2005 Red Hat, Inc. All rights reserved. ++ * Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007 David Anderson ++ * Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007 Red Hat, Inc. All rights reserved. + * + * Adapted from: + * +@@ -36,6 +36,7 @@ + /* #include can't include this -- it's changing over time! */ + + #include "defs.h" ++#include "xen_hyper_defs.h" + + typedef unsigned char u8; + typedef unsigned long long u64; +@@ -64,6 +65,8 @@ + struct bt_info *); + static int unw_switch_from_osinit_v2(struct unw_frame_info *, + struct bt_info *); ++static int unw_switch_from_osinit_v3(struct unw_frame_info *, ++ struct bt_info *, char *); + static unsigned long get_init_stack_ulong(unsigned long addr); + static void unw_init_frame_info(struct unw_frame_info *, + struct bt_info *, ulong); +@@ -1397,9 +1400,22 @@ + req = &request; + + if (get_symbol_type("unw", "tables", req) == TYPE_CODE_UNDEF) { +- error(WARNING, "cannot determine unw.tables offset\n"); +- machdep->flags |= UNW_OUT_OF_SYNC; +- } else { ++ /* ++ * KLUDGE ALERT: ++ * If unw.tables cannot be ascertained by gdb, try unw.save_order, ++ * given that it is the field just after unw.tables. ++ */ ++ if (get_symbol_type("unw", "save_order", req) == TYPE_CODE_UNDEF) { ++ error(WARNING, "cannot determine unw.tables offset\n"); ++ machdep->flags |= UNW_OUT_OF_SYNC; ++ } else ++ req->member_offset -= BITS_PER_BYTE * sizeof(void *); + -+#define divideup(x, y) (((x) + ((y) - 1)) / (y)) -+#define round(x, y) (((x) / (y)) * (y)) ++ if (CRASHDEBUG(1)) ++ error(WARNING, "using unw.save_order to determine unw.tables\n"); ++ } + -+#define DUMP_PARTITION_SIGNATURE "diskdump" -+#define SIG_LEN (sizeof(DUMP_PARTITION_SIGNATURE) - 1) -+#define DISK_DUMP_SIGNATURE "DISKDUMP" -+#define KDUMP_SIGNATURE "KDUMP " ++ if (!(machdep->flags & UNW_OUT_OF_SYNC)) { + machdep->machspec->unw_tables_offset = + req->member_offset/BITS_PER_BYTE; + +@@ -1658,8 +1674,13 @@ + unw_get_sp(info, &sp); + unw_get_bsp(info, &bsp); + +- if (ip < GATE_ADDR + PAGE_SIZE) +- break; ++ if (XEN_HYPER_MODE()) { ++ if (!IS_KVADDR(ip)) ++ break; ++ } else { ++ if (ip < GATE_ADDR + PAGE_SIZE) ++ break; ++ } + + if ((sm = value_search(ip, NULL))) + name = sm->name; +@@ -1720,11 +1741,29 @@ + * ia64_init_handler. + */ + if (STREQ(name, "ia64_init_handler")) { +- unw_switch_from_osinit_v2(info, bt); +- frame++; +- goto restart; ++ if (symbol_exists("ia64_mca_modify_original_stack")) { ++ /* ++ * 2.6.14 or later kernels no longer keep ++ * minstate info in pt_regs/switch_stack. ++ * unw_switch_from_osinit_v3() will try ++ * to find the interrupted task and restart ++ * backtrace itself. ++ */ ++ if (unw_switch_from_osinit_v3(info, bt, "INIT") == FALSE) ++ break; ++ } else { ++ if (unw_switch_from_osinit_v2(info, bt) == FALSE) ++ break; ++ frame++; ++ goto restart; ++ } + } + ++ if (STREQ(name, "ia64_mca_handler") && ++ symbol_exists("ia64_mca_modify_original_stack")) ++ if (unw_switch_from_osinit_v3(info, bt, "MCA") == FALSE) ++ break; + -+#define DUMP_HEADER_COMPLETED 0 -+#define DUMP_HEADER_INCOMPLETED 1 -+#define DUMP_HEADER_COMPRESSED 8 + frame++; + + } while (unw_unwind(info) >= 0); +@@ -1844,8 +1883,13 @@ + ulong sw; + + sw = SWITCH_STACK_ADDR(bt->task); +- if (!INSTACK(sw, bt) && !ia64_in_init_stack(sw)) +- return FALSE; ++ if (XEN_HYPER_MODE()) { ++ if (!INSTACK(sw, bt) && !ia64_in_mca_stack_hyper(sw, bt)) ++ return FALSE; ++ } else { ++ if (!INSTACK(sw, bt) && !ia64_in_init_stack(sw)) ++ return FALSE; ++ } + + unw_init_frame_info(info, bt, sw); + return TRUE; +@@ -1967,6 +2011,124 @@ + return TRUE; + } + ++/* CPL (current privilege level) is 2-bit field */ ++#define IA64_PSR_CPL0_BIT 32 ++#define IA64_PSR_CPL_MASK (3UL << IA64_PSR_CPL0_BIT) + -+struct disk_dump_header { -+ char signature[SIG_LEN]; /* = "DISKDUMP" */ -+ int header_version; /* Dump header version */ -+ struct new_utsname utsname; /* copy of system_utsname */ -+ struct timeval timestamp; /* Time stamp */ -+ unsigned int status; /* Above flags */ -+ int block_size; /* Size of a block in byte */ -+ int sub_hdr_size; /* Size of arch dependent -+ header in blocks */ -+ unsigned int bitmap_blocks; /* Size of Memory bitmap in -+ block */ -+ unsigned int max_mapnr; /* = max_mapnr */ -+ unsigned int total_ram_blocks;/* Number of blocks should be -+ written */ -+ unsigned int device_blocks; /* Number of total blocks in -+ * the dump device */ -+ unsigned int written_blocks; /* Number of written blocks */ -+ unsigned int current_cpu; /* CPU# which handles dump */ -+ int nr_cpus; /* Number of CPUs */ -+ struct task_struct *tasks[0]; -+}; -+ -+struct disk_dump_sub_header { -+ long elf_regs; -+}; -+ -+struct kdump_sub_header { -+ unsigned long phys_base; -+ int dump_level; /* header_version 1 and later */ -+}; -+ -+/* page flags */ -+#define DUMP_DH_COMPRESSED 0x1 /* page is compressed */ -+ -+/* descriptor of each page for vmcore */ -+typedef struct page_desc { -+ off_t offset; /* the offset of the page data*/ -+ unsigned int size; /* the size of this dump page */ -+ unsigned int flags; /* flags */ -+ unsigned long long page_flags; /* page flags */ -+} page_desc_t; -+ -+#define DISKDUMP_CACHED_PAGES (16) -+#define PAGE_VALID (0x1) /* flags */ -+#define DISKDUMP_VALID_PAGE(flags) ((flags) & PAGE_VALID) -+ ---- crash/xendump.h.orig 2007-08-27 15:02:36.000000000 -0400 -+++ crash/xendump.h 2007-03-14 10:16:41.000000000 -0500 -@@ -0,0 +1,177 @@ -+/* -+ * xendump.h -+ * -+ * Copyright (C) 2006, 2007 David Anderson -+ * Copyright (C) 2006, 2007 Red Hat, Inc. All rights reserved. -+ * -+ * This software may be freely redistributed under the terms of the -+ * GNU General Public License. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. -+ */ -+#include -+#include -+ -+#define XC_SAVE_SIGNATURE "LinuxGuestRecord" -+#define XC_CORE_MAGIC 0xF00FEBED -+#define XC_CORE_MAGIC_HVM 0xF00FEBEE -+ -+/* -+ * From xenctrl.h, but probably not on most host machines. -+ */ -+typedef struct xc_core_header { -+ unsigned int xch_magic; -+ unsigned int xch_nr_vcpus; -+ unsigned int xch_nr_pages; -+ unsigned int xch_ctxt_offset; -+ unsigned int xch_index_offset; -+ unsigned int xch_pages_offset; -+} xc_core_header_t; -+ -+struct pfn_offset_cache { -+ off_t file_offset; -+ ulong pfn; -+ ulong cnt; -+}; -+#define PFN_TO_OFFSET_CACHE_ENTRIES (5000) -+ -+struct elf_index_pfn { -+ ulong index; -+ ulong pfn; -+}; -+#define INDEX_PFN_COUNT (128) -+ -+struct last_batch { -+ ulong index; -+ ulong start; -+ ulong end; -+ ulong accesses; -+ ulong duplicates; -+}; -+ -+struct xendump_data { -+ ulong flags; /* XENDUMP_LOCAL, plus anything else... */ -+ int xfd; -+ int pc_next; -+ uint page_size; -+ FILE *ofp; -+ char *page; -+ ulong accesses; -+ ulong cache_hits; -+ ulong redundant; -+ ulong last_pfn; -+ struct pfn_offset_cache *poc; -+ -+ struct xc_core_data { -+ int p2m_frames; -+ ulong *p2m_frame_index_list; -+ struct xc_core_header header; -+ int elf_class; -+ uint64_t format_version; -+ off_t elf_strtab_offset; -+ off_t shared_info_offset; -+ off_t ia64_mapped_regs_offset; -+ struct elf_index_pfn elf_index_pfn[INDEX_PFN_COUNT]; -+ struct last_batch last_batch; -+ Elf32_Ehdr *elf32; -+ Elf64_Ehdr *elf64; -+ } xc_core; -+ -+ struct xc_save_data { -+ ulong nr_pfns; -+ int vmconfig_size; -+ char *vmconfig_buf; -+ ulong *p2m_frame_list; -+ uint pfns_not; -+ off_t pfns_not_offset; -+ off_t vcpu_ctxt_offset; -+ off_t shared_info_page_offset; -+ off_t *batch_offsets; -+ ulong batch_count; -+ ulong *region_pfn_type; -+ ulong ia64_version; -+ ulong *ia64_page_offsets; -+ } xc_save; -+ -+ ulong panic_pc; -+ ulong panic_sp; -+}; -+ -+#define XC_SAVE (XENDUMP_LOCAL << 1) -+#define XC_CORE_ORIG (XENDUMP_LOCAL << 2) -+#define XC_CORE_P2M_CREATE (XENDUMP_LOCAL << 3) -+#define XC_CORE_PFN_CREATE (XENDUMP_LOCAL << 4) -+#define XC_CORE_NO_P2M (XENDUMP_LOCAL << 5) -+#define XC_SAVE_IA64 (XENDUMP_LOCAL << 6) -+#define XC_CORE_64BIT_HOST (XENDUMP_LOCAL << 7) -+#define XC_CORE_ELF (XENDUMP_LOCAL << 8) -+ -+#define MACHINE_BYTE_ORDER() \ -+ (machine_type("X86") || \ -+ machine_type("X86_64") || \ -+ machine_type("IA64") ? __LITTLE_ENDIAN : __BIG_ENDIAN) -+ -+#define BYTE_SWAP_REQUIRED(endian) (endian != MACHINE_BYTE_ORDER()) -+ -+static inline uint32_t -+swab32(uint32_t x) ++static int ++user_mode(struct bt_info *bt, unsigned long pt) +{ -+ return (((x & 0x000000ffU) << 24) | -+ ((x & 0x0000ff00U) << 8) | -+ ((x & 0x00ff0000U) >> 8) | -+ ((x & 0xff000000U) >> 24)); ++ unsigned long cr_ipsr; ++ ++ cr_ipsr = IA64_GET_STACK_ULONG(pt + offsetof(struct pt_regs, cr_ipsr)); ++ if (cr_ipsr & IA64_PSR_CPL_MASK) ++ return 1; ++ return 0; +} + -+#define MFN_NOT_FOUND (-1) -+#define PFN_NOT_FOUND (-1) -+ -+#define INVALID_MFN (~0UL) -+ +/* -+ * ia64 "xm save" format is completely different than the others. ++ * Cope with INIT/MCA stack for the kernel 2.6.14 or later ++ * ++ * Returns FALSE if no more unwinding is needed. + */ -+typedef struct xen_domctl_arch_setup { -+ uint64_t flags; /* XEN_DOMAINSETUP_* */ -+/* #ifdef __ia64__ */ -+ uint64_t bp; /* mpaddr of boot param area */ -+ uint64_t maxmem; /* Highest memory address for MDT. */ -+ uint64_t xsi_va; /* Xen shared_info area virtual address. */ -+ uint32_t hypercall_imm; /* Break imm for Xen hypercalls. */ -+/* #endif */ -+} xen_domctl_arch_setup_t; -+ -+/* -+ * xc_core ELF note, which differs from the standard Elf[32|64]_Nhdr -+ * structure by the additional name field. -+ */ -+struct elfnote { -+ uint32_t namesz; -+ uint32_t descsz; -+ uint32_t type; -+ char name[4]; -+}; -+ -+#define XEN_ELFNOTE_DUMPCORE_NONE 0x2000000 -+#define XEN_ELFNOTE_DUMPCORE_HEADER 0x2000001 -+#define XEN_ELFNOTE_DUMPCORE_XEN_VERSION 0x2000002 -+#define XEN_ELFNOTE_DUMPCORE_FORMAT_VERSION 0x2000003 -+ -+struct xen_dumpcore_elfnote_header_desc { -+ uint64_t xch_magic; -+ uint64_t xch_nr_vcpus; -+ uint64_t xch_nr_pages; -+ uint64_t xch_page_size; -+}; -+ -+#define FORMAT_VERSION_0000000000000001 0x0000000000000001ULL -+ -+struct xen_dumpcore_elfnote_format_version_desc { -+ uint64_t version; -+}; -+ -+struct xen_dumpcore_p2m { -+ uint64_t pfn; -+ uint64_t gmfn; -+}; ---- crash/unwind_x86.h.orig 2007-08-27 15:02:36.000000000 -0400 -+++ crash/unwind_x86.h 2006-10-20 15:42:08.000000000 -0400 -@@ -0,0 +1,2 @@ -+ -+ ---- crash/unwind_x86_64.h.orig 2007-08-27 15:02:36.000000000 -0400 -+++ crash/unwind_x86_64.h 2006-10-20 15:42:08.000000000 -0400 -@@ -0,0 +1,92 @@ -+#define CONFIG_64BIT 1 -+#define NULL ((void *)0) -+ -+typedef unsigned long size_t; -+typedef unsigned char u8; -+typedef signed short s16; -+typedef unsigned short u16; -+typedef signed int s32; -+typedef unsigned int u32; -+typedef unsigned long long u64; -+ -+struct pt_regs { -+ unsigned long r15; -+ unsigned long r14; -+ unsigned long r13; -+ unsigned long r12; -+ unsigned long rbp; -+ unsigned long rbx; -+/* arguments: non interrupts/non tracing syscalls only save upto here*/ -+ unsigned long r11; -+ unsigned long r10; -+ unsigned long r9; -+ unsigned long r8; -+ unsigned long rax; -+ unsigned long rcx; -+ unsigned long rdx; -+ unsigned long rsi; -+ unsigned long rdi; -+ unsigned long orig_rax; -+/* end of arguments */ -+/* cpu exception frame or undefined */ -+ unsigned long rip; -+ unsigned long cs; -+ unsigned long eflags; -+ unsigned long rsp; -+ unsigned long ss; -+/* top of stack page */ -+}; -+ -+struct unwind_frame_info ++#define ALIGN16(x) ((x)&~15) ++static int ++unw_switch_from_osinit_v3(struct unw_frame_info *info, struct bt_info *bt, ++ char *type) +{ -+ struct pt_regs regs; -+}; ++ unsigned long pt, sw, pid; ++ int processor; ++ char *p, *q; ++ struct task_context *tc = NULL; ++ struct bt_info clone_bt; + -+extern int unwind(struct unwind_frame_info *); -+extern void init_unwind_table(void); -+extern void free_unwind_table(void); ++ /* ++ * The structure of INIT/MCA stack ++ * ++ * +---------------------------+ <-------- IA64_STK_OFFSET ++ * | pt_regs | ++ * +---------------------------+ ++ * | switch_stack | ++ * +---------------------------+ ++ * | SAL/OS state | ++ * +---------------------------+ ++ * | 16 byte scratch area | ++ * +---------------------------+ <-------- SP at start of C handler ++ * | ..... | ++ * +---------------------------+ ++ * | RBS for MCA/INIT handler | ++ * +---------------------------+ ++ * | struct task for MCA/INIT | ++ * +---------------------------+ <-------- bt->task ++ */ ++ pt = ALIGN16(bt->task + IA64_STK_OFFSET - STRUCT_SIZE("pt_regs")); ++ sw = ALIGN16(pt - STRUCT_SIZE("switch_stack")); + -+#define offsetof(TYPE, MEMBER) ((size_t) &((TYPE *)0)->MEMBER) -+#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) -+#define BUILD_BUG_ON(condition) ((void)sizeof(char[1 - 2*!!(condition)])) -+#define BUILD_BUG_ON_ZERO(e) (sizeof(char[1 - 2 * !!(e)]) - 1) -+#define FIELD_SIZEOF(t, f) (sizeof(((t*)0)->f)) -+#define get_unaligned(ptr) (*(ptr)) -+//#define __get_user(x,ptr) __get_user_nocheck((x),(ptr),sizeof(*(ptr))) -+#define THREAD_ORDER 1 -+#define THREAD_SIZE (PAGE_SIZE << THREAD_ORDER) ++ /* ++ * 1. Try to find interrupted task from comm ++ * ++ * comm format of INIT/MCA task: ++ * - " " ++ * - " " ++ * where "" is either "INIT" or "MCA". ++ * The latter form is chosen if PID is 0. ++ * ++ * See ia64_mca_modify_comm() in arch/ia64/kernel/mca.c ++ */ ++ if (!bt->tc || !bt->tc->comm) ++ goto find_exframe; + -+#define UNW_PC(frame) (frame)->regs.rip -+#define UNW_SP(frame) (frame)->regs.rsp -+#ifdef CONFIG_FRAME_POINTER -+ #define UNW_FP(frame) (frame)->regs.rbp -+ #define FRAME_RETADDR_OFFSET 8 -+ #define FRAME_LINK_OFFSET 0 -+ #define STACK_BOTTOM(tsk) (((tsk)->thread.rsp0 - 1) & ~(THREAD_SIZE - 1)) -+ #define STACK_TOP(tsk) ((tsk)->thread.rsp0) -+#endif ++ if ((p = strstr(bt->tc->comm, type))) { ++ p += strlen(type); ++ if (*p != ' ') ++ goto find_exframe; ++ if ((q = strchr(++p, ' '))) { ++ /* " " */ ++ if (sscanf(++q, "%d", &processor) > 0) { ++ tc = pid_to_context(0); ++ while (tc) { ++ if (tc != bt->tc && ++ tc->processor == processor) ++ break; ++ tc = tc->tc_next; ++ } ++ } ++ } else if (sscanf(p, "%lu", &pid) > 0) ++ /* " " */ ++ tc = pid_to_context(pid); ++ } + ++ if (tc) { ++ /* Clone bt_info and do backtrace */ ++ clone_bt_info(bt, &clone_bt, tc); ++ if (!BT_REFERENCE_CHECK(&clone_bt)) { ++ fprintf(fp, "(%s) INTERRUPTED TASK\n", type); ++ print_task_header(fp, tc, 0); ++ } ++ if (!user_mode(bt, pt)) ++ back_trace(&clone_bt); ++ else if (!BT_REFERENCE_CHECK(bt)) { ++ fprintf(fp, " #0 [interrupted in user space]\n"); ++ /* at least show the incomplete exception frame */ ++ bt->flags |= BT_INCOMPLETE_USER_EFRAME; ++ ia64_exception_frame(pt, bt); ++ } ++ return FALSE; ++ } + -+#define EXTRA_INFO(f) { BUILD_BUG_ON_ZERO(offsetof(struct unwind_frame_info, f) % FIELD_SIZEOF(struct unwind_frame_info, f)) + offsetof(struct unwind_frame_info, f)/ FIELD_SIZEOF(struct unwind_frame_info, f), FIELD_SIZEOF(struct unwind_frame_info, f) } ++ /* task matching with INIT/MCA task's comm is not found */ + -+#define PTREGS_INFO(f) EXTRA_INFO(regs.f) ++find_exframe: ++ /* ++ * 2. If step 1 doesn't work, try best to find exception frame ++ */ ++ unw_init_from_interruption(info, bt, pt, sw); ++ if (!BT_REFERENCE_CHECK(bt)) ++ ia64_exception_frame(pt, bt); + -+#define UNW_REGISTER_INFO \ -+ PTREGS_INFO(rax),\ -+ PTREGS_INFO(rdx),\ -+ PTREGS_INFO(rcx),\ -+ PTREGS_INFO(rbx), \ -+ PTREGS_INFO(rsi), \ -+ PTREGS_INFO(rdi), \ -+ PTREGS_INFO(rbp), \ -+ PTREGS_INFO(rsp), \ -+ PTREGS_INFO(r8), \ -+ PTREGS_INFO(r9), \ -+ PTREGS_INFO(r10),\ -+ PTREGS_INFO(r11), \ -+ PTREGS_INFO(r12), \ -+ PTREGS_INFO(r13), \ -+ PTREGS_INFO(r14), \ -+ PTREGS_INFO(r15), \ -+ PTREGS_INFO(rip) ++ return TRUE; ++} + ---- crash/lkcd_vmdump_v1.h.orig 2007-08-27 15:02:36.000000000 -0400 -+++ crash/lkcd_vmdump_v1.h 2006-10-11 09:14:35.000000000 -0400 -@@ -1,8 +1,8 @@ - /* lkcd_vmdump_v1.h - core analysis suite - * - * Copyright (C) 1999, 2000, 2001, 2002 Mission Critical Linux, Inc. -- * Copyright (C) 2002, 2003, 2004, 2005 David Anderson -- * Copyright (C) 2002, 2003, 2004, 2005 Red Hat, Inc. All rights reserved. -+ * Copyright (C) 2002, 2003, 2004, 2005, 2006 David Anderson -+ * Copyright (C) 2002, 2003, 2004, 2005, 2006 Red Hat, Inc. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by -@@ -114,8 +114,12 @@ + static void + unw_init_frame_info (struct unw_frame_info *info, struct bt_info *bt, ulong sw) + { +--- crash/extensions.c.orig 2008-01-17 15:17:20.000000000 -0500 ++++ crash/extensions.c 2008-01-04 09:42:08.000000000 -0500 +@@ -18,9 +18,6 @@ + #include "defs.h" + #include - /* the dump registers */ - #ifndef IA64 -+#ifndef S390 -+#ifndef S390X - struct pt_regs dh_regs; - #endif -+#endif -+#endif +-static void load_extension(char *); +-static void unload_extension(char *); +- + #define DUMP_EXTENSIONS (0) + #define LOAD_EXTENSION (1) + #define UNLOAD_EXTENSION (2) +@@ -110,6 +107,7 @@ + void + dump_extension_table(int verbose) + { ++ int i; + struct extension_table *ext; + struct command_table_entry *cp; + char buf[BUFSIZE]; +@@ -120,23 +118,37 @@ - /* the address of the current task */ - struct task_struct *dh_current_task; ---- crash/lkcd_vmdump_v2_v3.h.orig 2007-08-27 15:02:36.000000000 -0400 -+++ crash/lkcd_vmdump_v2_v3.h 2006-10-11 09:14:35.000000000 -0400 + if (verbose) { + for (ext = extension_table; ext; ext = ext->next) { +- fprintf(fp, " filename: %s\n", ext->filename); +- fprintf(fp, " handle: %lx\n", (ulong)ext->handle); +- fprintf(fp, "command_table: %lx (", +- (ulong)ext->command_table); +- for (others = 0, cp = ext->command_table; cp->name;cp++) +- fprintf(fp, "%s%s%s", others++ ? " " : "", +- cp->name, cp->help_data ? "*" : ""); +- fprintf(fp, ")\n"); +- fprintf(fp, " flags: %lx (", ext->flags); ++ fprintf(fp, " filename: %s\n", ext->filename); ++ fprintf(fp, " handle: %lx\n", (ulong)ext->handle); ++ ++ ++ fprintf(fp, " flags: %lx (", ext->flags); + others = 0; + if (ext->flags & REGISTERED) + fprintf(fp, "%sREGISTERED", others++ ? + "|" : ""); + fprintf(fp, ")\n"); +- fprintf(fp, " next: %lx\n", (ulong)ext->next); +- fprintf(fp, " prev: %lx\n%s", +- (ulong)ext->prev, ext->next ? "\n" : ""); ++ fprintf(fp, " next: %lx\n", (ulong)ext->next); ++ fprintf(fp, " prev: %lx\n", (ulong)ext->prev); ++ ++ for (i = 0, cp = ext->command_table; cp->name; cp++, i++) { ++ fprintf(fp, "command_table[%d]: %lx\n", i, (ulong)cp); ++ fprintf(fp, " name: %s\n", cp->name); ++ fprintf(fp, " func: %lx\n", (ulong)cp->func); ++ fprintf(fp, " help_data: %lx\n", (ulong)cp->help_data); ++ fprintf(fp, " flags: %lx (", cp->flags); ++ others = 0; ++ if (cp->flags & CLEANUP) ++ fprintf(fp, "%sCLEANUP", others++ ? "|" : ""); ++ if (cp->flags & REFRESH_TASK_TABLE) ++ fprintf(fp, "%sREFRESH_TASK_TABLE", others++ ? "|" : ""); ++ if (cp->flags & HIDDEN_COMMAND) ++ fprintf(fp, "%sHIDDEN_COMMAND", others++ ? "|" : ""); ++ fprintf(fp, ")\n"); ++ } ++ ++ if (ext->next) ++ fprintf(fp, "\n"); + } + return; + } +@@ -171,7 +183,7 @@ + /* + * Load an extension library. + */ +-static void ++void + load_extension(char *lib) + { + struct extension_table *ext; +@@ -208,7 +220,7 @@ + * _init() function before dlopen() returns below. + */ + pc->curext = ext; +- ext->handle = dlopen(ext->filename, RTLD_NOW); ++ ext->handle = dlopen(ext->filename, RTLD_NOW|RTLD_GLOBAL); + + if (!ext->handle) { + strcpy(buf, dlerror()); +@@ -252,7 +264,7 @@ + /* + * Unload all, or as specified, extension libraries. + */ +-static void ++void + unload_extension(char *lib) + { + struct extension_table *ext; +@@ -342,4 +354,23 @@ + pc->curext->flags |= REGISTERED; /* Mark of approval */ + } + ++/* ++ * Hooks for sial. ++ */ ++unsigned long ++get_curtask(void) ++{ ++ return CURRENT_TASK(); ++} ++ ++char * ++crash_global_cmd(void) ++{ ++ return pc->curcmd; ++} + ++struct command_table_entry * ++crash_cmd_table(void) ++{ ++ return pc->cmd_table; ++} +--- crash/lkcd_vmdump_v2_v3.h.orig 2008-01-17 15:17:20.000000000 -0500 ++++ crash/lkcd_vmdump_v2_v3.h 2008-01-04 09:42:08.000000000 -0500 @@ -1,8 +1,8 @@ /* lkcd_vmdump_v2_v3.h - core analysis suite * @@ -48560,668 +79494,420 @@ } dump_header_asm_t; ---- crash/lkcd_dump_v5.h.orig 2007-08-27 15:02:36.000000000 -0400 -+++ crash/lkcd_dump_v5.h 2006-10-11 09:14:35.000000000 -0400 +--- crash/net.c.orig 2008-01-17 15:17:20.000000000 -0500 ++++ crash/net.c 2008-01-04 09:42:08.000000000 -0500 @@ -1,8 +1,8 @@ - /* lkcd_dump_v5.h - core analysis suite + /* net.c - core analysis suite + * + * Copyright (C) 1999, 2000, 2001, 2002 Mission Critical Linux, Inc. +- * Copyright (C) 2002, 2003, 2004, 2005 David Anderson +- * Copyright (C) 2002, 2003, 2004, 2005 Red Hat, Inc. All rights reserved. ++ * Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007 David Anderson ++ * Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007 Red Hat, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by +@@ -50,6 +50,7 @@ + #define STRUCT_NET_DEVICE (0x4) + #define SOCK_V1 (0x8) + #define SOCK_V2 (0x10) ++#define NO_INET_SOCK (0x20) + + #define DEV_NAME_MAX 100 + struct devinfo { +@@ -75,6 +76,7 @@ + static void dump_sockets(ulong, struct reference *); + static int sym_socket_dump(ulong, int, int, ulong, struct reference *); + static void dump_hw_addr(unsigned char *, int); ++static char *dump_in6_addr_port(uint16_t *, uint16_t, char *, int *); + + + #define MK_TYPE_T(f,s,m) \ +@@ -158,13 +160,6 @@ + "in_ifaddr", "ifa_address"); + + STRUCT_SIZE_INIT(sock, "sock"); +- MEMBER_OFFSET_INIT(sock_daddr, "sock", "daddr"); +- MEMBER_OFFSET_INIT(sock_rcv_saddr, "sock", "rcv_saddr"); +- MEMBER_OFFSET_INIT(sock_dport, "sock", "dport"); +- MEMBER_OFFSET_INIT(sock_sport, "sock", "sport"); +- MEMBER_OFFSET_INIT(sock_num, "sock", "num"); +- MEMBER_OFFSET_INIT(sock_family, "sock", "family"); +- MEMBER_OFFSET_INIT(sock_type, "sock", "type"); + + MEMBER_OFFSET_INIT(sock_family, "sock", "family"); + if (VALID_MEMBER(sock_family)) { +@@ -195,7 +190,23 @@ + */ + STRUCT_SIZE_INIT(inet_sock, "inet_sock"); + STRUCT_SIZE_INIT(socket, "socket"); +- MEMBER_OFFSET_INIT(inet_sock_inet, "inet_sock", "inet"); ++ ++ if (STRUCT_EXISTS("inet_opt")) { ++ MEMBER_OFFSET_INIT(inet_sock_inet, "inet_sock", "inet"); ++ MEMBER_OFFSET_INIT(inet_opt_daddr, "inet_opt", "daddr"); ++ MEMBER_OFFSET_INIT(inet_opt_rcv_saddr, "inet_opt", "rcv_saddr"); ++ MEMBER_OFFSET_INIT(inet_opt_dport, "inet_opt", "dport"); ++ MEMBER_OFFSET_INIT(inet_opt_sport, "inet_opt", "sport"); ++ MEMBER_OFFSET_INIT(inet_opt_num, "inet_opt", "num"); ++ } else { /* inet_opt moved to inet_sock */ ++ ASSIGN_OFFSET(inet_sock_inet) = 0; ++ MEMBER_OFFSET_INIT(inet_opt_daddr, "inet_sock", "daddr"); ++ MEMBER_OFFSET_INIT(inet_opt_rcv_saddr, "inet_sock", "rcv_saddr"); ++ MEMBER_OFFSET_INIT(inet_opt_dport, "inet_sock", "dport"); ++ MEMBER_OFFSET_INIT(inet_opt_sport, "inet_sock", "sport"); ++ MEMBER_OFFSET_INIT(inet_opt_num, "inet_sock", "num"); ++ } ++ + if (VALID_STRUCT(inet_sock) && + INVALID_MEMBER(inet_sock_inet)) { + /* +@@ -210,15 +221,36 @@ + * to subtract the size of the inet_opt struct + * from the size of the containing inet_sock. + */ ++ net->flags |= NO_INET_SOCK; + ASSIGN_OFFSET(inet_sock_inet) = + SIZE(inet_sock) - STRUCT_SIZE("inet_opt"); + } +- MEMBER_OFFSET_INIT(inet_opt_daddr, "inet_opt", "daddr"); +- MEMBER_OFFSET_INIT(inet_opt_rcv_saddr, "inet_opt", +- "rcv_saddr"); +- MEMBER_OFFSET_INIT(inet_opt_dport, "inet_opt", "dport"); +- MEMBER_OFFSET_INIT(inet_opt_sport, "inet_opt", "sport"); +- MEMBER_OFFSET_INIT(inet_opt_num, "inet_opt", "num"); ++ ++ /* ++ * If necessary, set inet_sock size and inet_sock_inet offset, ++ * accounting for the configuration-dependent, intervening, ++ * struct ipv6_pinfo pointer located in between the sock and ++ * inet_opt members of the inet_sock. ++ */ ++ if (!VALID_STRUCT(inet_sock)) ++ { ++ if (symbol_exists("tcpv6_protocol") && ++ symbol_exists("udpv6_protocol")) { ++ ASSIGN_SIZE(inet_sock) = SIZE(sock) + ++ sizeof(void *) + STRUCT_SIZE("inet_opt"); ++ ASSIGN_OFFSET(inet_sock_inet) = SIZE(sock) + ++ sizeof(void *); ++ } else { ++ ASSIGN_SIZE(inet_sock) = SIZE(sock) + ++ STRUCT_SIZE("inet_opt"); ++ ASSIGN_OFFSET(inet_sock_inet) = SIZE(sock); ++ } ++ } ++ ++ MEMBER_OFFSET_INIT(ipv6_pinfo_rcv_saddr, "ipv6_pinfo", "rcv_saddr"); ++ MEMBER_OFFSET_INIT(ipv6_pinfo_daddr, "ipv6_pinfo", "daddr"); ++ STRUCT_SIZE_INIT(in6_addr, "in6_addr"); ++ + net->flags |= SOCK_V2; + } + } +@@ -378,6 +410,24 @@ + nhash_buckets = (i = ARRAY_LENGTH(neigh_table_hash_buckets)) ? + i : get_array_length("neigh_table.hash_buckets", + NULL, sizeof(void *)); ++ ++ /* ++ * NOTE: 2.6.8 -> 2.6.9 neigh_table struct changed from: ++ * ++ * struct neighbour *hash_buckets[32]; ++ * to ++ * struct neighbour **hash_buckets; ++ * ++ * Even after hardwiring and testing with the correct ++ * array size, other changes cause this command to break ++ * down, so it needs to be looked at by someone who cares... ++ */ ++ ++ if (nhash_buckets == 0) { ++ option_not_supported('a'); ++ return; ++ } ++ + hash_bytes = nhash_buckets * sizeof(*hash_buckets); + + hash_buckets = (ulong *)GETBUF(hash_bytes); +@@ -609,8 +659,14 @@ + uint16_t dport, sport; + ushort num, family, type; + char *sockbuf, *inet_sockbuf; ++ ulong ipv6_pinfo, ipv6_rcv_saddr, ipv6_daddr; ++ uint16_t u6_addr16_src[8]; ++ uint16_t u6_addr16_dest[8]; ++ char buf2[BUFSIZE]; ++ int len; + + BZERO(buf, BUFSIZE); ++ BZERO(buf2, BUFSIZE); + sockbuf = inet_sockbuf = NULL; + + switch (net->flags & (SOCK_V1|SOCK_V2)) +@@ -646,6 +702,7 @@ + OFFSET(inet_opt_num)); + family = USHORT(inet_sockbuf + OFFSET(sock_common_skc_family)); + type = USHORT(inet_sockbuf + OFFSET(sock_sk_type)); ++ ipv6_pinfo = ULONG(inet_sockbuf + SIZE(sock)); + break; + } + +@@ -723,27 +780,28 @@ + } + + /* make sure we have room at the end... */ +- sprintf(&buf[strlen(buf)], "%s", space(MINSPACE-1)); ++// sprintf(&buf[strlen(buf)], "%s", space(MINSPACE-1)); ++ sprintf(&buf[strlen(buf)], " "); + + if (family == AF_INET) { + if (BITS32()) { +- sprintf(&buf[strlen(buf)], "%*s:%-*d%s", ++ sprintf(&buf[strlen(buf)], "%*s-%-*d%s", + BYTES_IP_ADDR, + inet_ntoa(*((struct in_addr *)&rcv_saddr)), + BYTES_PORT_NUM, + ntohs(sport), + space(1)); +- sprintf(&buf[strlen(buf)], "%*s:%-*d%s", ++ sprintf(&buf[strlen(buf)], "%*s-%-*d%s", + BYTES_IP_ADDR, + inet_ntoa(*((struct in_addr *)&daddr)), + BYTES_PORT_NUM, + ntohs(dport), + space(1)); + } else { +- sprintf(&buf[strlen(buf)], " %s:%d ", ++ sprintf(&buf[strlen(buf)], " %s-%d ", + inet_ntoa(*((struct in_addr *)&rcv_saddr)), + ntohs(sport)); +- sprintf(&buf[strlen(buf)], "%s:%d", ++ sprintf(&buf[strlen(buf)], "%s-%d", + inet_ntoa(*((struct in_addr *)&daddr)), + ntohs(dport)); + } +@@ -753,6 +811,60 @@ + FREEBUF(sockbuf); + if (inet_sockbuf) + FREEBUF(inet_sockbuf); ++ ++ if (family != AF_INET6) ++ return; ++ ++ switch (net->flags & (SOCK_V1|SOCK_V2)) ++ { ++ case SOCK_V1: ++ break; ++ ++ case SOCK_V2: ++ if (INVALID_MEMBER(ipv6_pinfo_rcv_saddr) || ++ INVALID_MEMBER(ipv6_pinfo_daddr)) ++ break; ++ ++ ipv6_rcv_saddr = ipv6_pinfo + OFFSET(ipv6_pinfo_rcv_saddr); ++ ipv6_daddr = ipv6_pinfo + OFFSET(ipv6_pinfo_daddr); ++ ++ if (!readmem(ipv6_rcv_saddr, KVADDR, u6_addr16_src, SIZE(in6_addr), ++ "ipv6_rcv_saddr buffer", QUIET|RETURN_ON_ERROR)) ++ break; ++ if (!readmem(ipv6_daddr, KVADDR, u6_addr16_dest, SIZE(in6_addr), ++ "ipv6_daddr buffer", QUIET|RETURN_ON_ERROR)) ++ break; ++ ++ sprintf(&buf[strlen(buf)], "%*s ", BITS32() ? 22 : 12, ++ dump_in6_addr_port(u6_addr16_src, sport, buf2, &len)); ++ if (BITS32() && (len > 22)) ++ len = 1; ++ mkstring(dump_in6_addr_port(u6_addr16_dest, dport, buf2, NULL), ++ len, CENTER, NULL); ++ sprintf(&buf[strlen(buf)], "%s", buf2); ++ ++ break; ++ } ++} ++ ++static char * ++dump_in6_addr_port(uint16_t *addr, uint16_t port, char *buf, int *len) ++{ ++ sprintf(buf, "%x:%x:%x:%x:%x:%x:%x:%x-%d", ++ ntohs(addr[0]), ++ ntohs(addr[1]), ++ ntohs(addr[2]), ++ ntohs(addr[3]), ++ ntohs(addr[4]), ++ ntohs(addr[5]), ++ ntohs(addr[6]), ++ ntohs(addr[7]), ++ ntohs(port)); ++ ++ if (len) ++ *len = strlen(buf); ++ ++ return buf; + } + + +@@ -899,6 +1011,8 @@ + fprintf(fp, "%sSTRUCT_DEVICE", others++ ? "|" : ""); + if (net->flags & STRUCT_NET_DEVICE) + fprintf(fp, "%sSTRUCT_NET_DEVICE", others++ ? "|" : ""); ++ if (net->flags & NO_INET_SOCK) ++ fprintf(fp, "%sNO_INET_SOCK", others++ ? "|" : ""); + if (net->flags & SOCK_V1) + fprintf(fp, "%sSOCK_V1", others++ ? "|" : ""); + if (net->flags & SOCK_V2) +@@ -972,7 +1086,7 @@ + void + dump_sockets_workhorse(ulong task, ulong flag, struct reference *ref) + { +- ulong files_struct_addr = 0; ++ ulong files_struct_addr = 0, fdtable_addr = 0; + int max_fdset = 0; + int max_fds = 0; + ulong open_fds_addr = 0; +@@ -1004,32 +1118,54 @@ + sizeof(void *), "task files contents", FAULT_ON_ERROR); + + if (files_struct_addr) { +- readmem(files_struct_addr + OFFSET(files_struct_max_fdset), +- KVADDR, &max_fdset, sizeof(int), +- "files_struct max_fdset", FAULT_ON_ERROR); +- +- readmem(files_struct_addr + OFFSET(files_struct_max_fds), +- KVADDR, &max_fds, sizeof(int), "files_struct max_fds", +- FAULT_ON_ERROR); +- } ++ if (VALID_MEMBER(files_struct_max_fdset)) { ++ readmem(files_struct_addr + OFFSET(files_struct_max_fdset), ++ KVADDR, &max_fdset, sizeof(int), ++ "files_struct max_fdset", FAULT_ON_ERROR); ++ readmem(files_struct_addr + OFFSET(files_struct_max_fds), ++ KVADDR, &max_fds, sizeof(int), "files_struct max_fds", ++ FAULT_ON_ERROR); ++ } ++ else if (VALID_MEMBER(files_struct_fdt)) { ++ readmem(files_struct_addr + OFFSET(files_struct_fdt), KVADDR, ++ &fdtable_addr, sizeof(void *), "fdtable buffer", ++ FAULT_ON_ERROR); ++ if (VALID_MEMBER(fdtable_max_fdset)) ++ readmem(fdtable_addr + OFFSET(fdtable_max_fdset), ++ KVADDR, &max_fdset, sizeof(int), ++ "fdtable_struct max_fdset", FAULT_ON_ERROR); ++ else ++ max_fdset = -1; ++ readmem(fdtable_addr + OFFSET(fdtable_max_fds), ++ KVADDR, &max_fds, sizeof(int), "fdtable_struct max_fds", ++ FAULT_ON_ERROR); ++ } ++ } + +- if (!files_struct_addr || (max_fdset == 0) || (max_fds == 0)) { ++ if ((VALID_MEMBER(files_struct_fdt) && !fdtable_addr) || ++ !files_struct_addr || (max_fdset == 0) || (max_fds == 0)) { + if (!NET_REFERENCE_CHECK(ref)) + fprintf(fp, "No open sockets.\n"); + return; + } + +- readmem(files_struct_addr + OFFSET(files_struct_open_fds), KVADDR, +- &open_fds_addr, sizeof(void *), "files_struct open_fds addr", +- FAULT_ON_ERROR); ++ if (VALID_MEMBER(fdtable_open_fds)){ ++ readmem(fdtable_addr + OFFSET(fdtable_open_fds), KVADDR, ++ &open_fds_addr, sizeof(void *), "files_struct open_fds addr", ++ FAULT_ON_ERROR); ++ readmem(fdtable_addr + OFFSET(fdtable_fd), KVADDR, &fd, ++ sizeof(void *), "files_struct fd addr", FAULT_ON_ERROR); ++ } else { ++ readmem(files_struct_addr + OFFSET(files_struct_open_fds), KVADDR, ++ &open_fds_addr, sizeof(void *), "files_struct open_fds addr", ++ FAULT_ON_ERROR); ++ readmem(files_struct_addr + OFFSET(files_struct_fd), KVADDR, &fd, ++ sizeof(void *), "files_struct fd addr", FAULT_ON_ERROR); ++ } + + if (open_fds_addr) +- readmem(open_fds_addr, KVADDR, &open_fds, sizeof(fd_set), +- "files_struct open_fds", FAULT_ON_ERROR); +- +- readmem(files_struct_addr + OFFSET(files_struct_fd), KVADDR, &fd, +- sizeof(void *), "files_struct fd addr", FAULT_ON_ERROR); +- ++ readmem(open_fds_addr, KVADDR, &open_fds, sizeof(fd_set), ++ "files_struct open_fds", FAULT_ON_ERROR); + if (!open_fds_addr || !fd) { + if (!NET_REFERENCE_CHECK(ref)) + fprintf(fp, "No open sockets.\n"); +@@ -1061,7 +1197,7 @@ + for (;;) { + unsigned long set; + i = j * __NFDBITS; +- if ((i >= max_fdset) || (i >= max_fds)) ++ if (((max_fdset >= 0) && (i >= max_fdset)) || (i >= max_fds)) + break; + set = open_fds.__fds_bits[j++]; + while (set) { +@@ -1096,9 +1232,9 @@ + */ + + static char *socket_hdr_32 = +-"FD SOCKET SOCK FAMILY:TYPE SOURCE:PORT DESTINATION:PORT"; ++"FD SOCKET SOCK FAMILY:TYPE SOURCE-PORT DESTINATION-PORT"; + static char *socket_hdr_64 = +-"FD SOCKET SOCK FAMILY:TYPE SOURCE:PORT DESTINATION:PORT"; ++"FD SOCKET SOCK FAMILY:TYPE SOURCE-PORT DESTINATION-PORT"; + + static int + sym_socket_dump(ulong file, +@@ -1223,7 +1359,12 @@ + dump_struct("sock", sock, 0); + break; + case SOCK_V2: +- dump_struct("inet_sock", sock, 0); ++ if (STRUCT_EXISTS("inet_sock") && !(net->flags & NO_INET_SOCK)) ++ dump_struct("inet_sock", sock, 0); ++ else if (STRUCT_EXISTS("sock")) ++ dump_struct("sock", sock, 0); ++ else ++ fprintf(fp, "\nunable to display inet_sock structure\n"); + break; + } + break; +--- crash/s390_dump.c.orig 2008-01-17 15:17:20.000000000 -0500 ++++ crash/s390_dump.c 2008-01-04 09:42:08.000000000 -0500 +@@ -1,8 +1,8 @@ + /* s390_dump.c - core analysis suite * * Copyright (C) 2001, 2002 Mission Critical Linux, Inc. - * Copyright (C) 2002, 2003, 2004, 2005 David Anderson - * Copyright (C) 2002, 2003, 2004, 2005 Red Hat, Inc. All rights reserved. + * Copyright (C) 2002, 2003, 2004, 2005, 2006 David Anderson + * Copyright (C) 2002, 2003, 2004, 2005, 2006 Red Hat, Inc. All rights reserved. + * Copyright (C) 2005 Michael Holzheu, IBM Corporation * * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by -@@ -35,7 +35,7 @@ - #ifndef _DUMP_H - #define _DUMP_H +@@ -16,7 +16,7 @@ + * GNU General Public License for more details. + */ + #include "defs.h" +-#include ++//#include + #include "ibm_common.h" --#include -+//#include + static FILE * s390_file; +@@ -69,10 +69,13 @@ + return WRITE_ERROR; + } - /* define TRUE and FALSE for use in our dump modules */ - #ifndef FALSE ---- crash/lkcd_dump_v7.h.orig 2007-08-27 15:02:36.000000000 -0400 -+++ crash/lkcd_dump_v7.h 2006-10-11 09:14:35.000000000 -0400 -@@ -1,8 +1,8 @@ - /* lkcd_dump_v5.h - core analysis suite - * - * Copyright (C) 2001, 2002 Mission Critical Linux, Inc. -- * Copyright (C) 2002, 2003, 2004, 2005 David Anderson -- * Copyright (C) 2002, 2003, 2004, 2005 Red Hat, Inc. All rights reserved. -+ * Copyright (C) 2002, 2003, 2004, 2005, 2006 David Anderson -+ * Copyright (C) 2002, 2003, 2004, 2005, 2006 Red Hat, Inc. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by -@@ -35,7 +35,7 @@ - #ifndef _DUMP_H - #define _DUMP_H - --#include -+//#include - - /* define TRUE and FALSE for use in our dump modules */ - #ifndef FALSE ---- crash/Makefile.orig 2007-08-27 15:02:36.000000000 -0400 -+++ crash/Makefile 2007-08-27 15:02:36.000000000 -0400 -@@ -3,8 +3,8 @@ - # Copyright (C) 1999, 2000, 2001, 2002 Mission Critical Linux, Inc. - # www.missioncriticallinux.com, info@missioncriticallinux.com - # --# Copyright (C) 2002, 2003, 2004, 2005 David Anderson --# Copyright (C) 2002, 2003, 2004, 2005 Red Hat, Inc. All rights reserved. -+# Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007 David Anderson -+# Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007 Red Hat, Inc. All rights reserved. - # - # This program is free software; you can redistribute it and/or modify - # it under the terms of the GNU General Public License as published by -@@ -35,10 +35,12 @@ - # - # GDB, GDB_FILES and GDB_OFILES will be configured automatically by configure - # --GDB=gdb-6.1 --GDB_FILES=${GDB_6.1_FILES} -+GDB= -+GDB_FILES= - GDB_OFILES= - -+GDB_PATCH_FILES=gdb-6.1.patch ++#define S390_PAGE_SHIFT 12 ++#define S390_PAGE_SIZE (1UL << S390_PAGE_SHIFT) + - # - # Default installation directory - # -@@ -60,22 +62,25 @@ - # (2) Or invoke make like so: - # make LDFLAGS=-static NAT_CLIBS="-lc -lresolv" GDBSERVER_LIBS="-lc -lresolv" + uint + s390_page_size(void) + { +- return PAGE_SIZE; ++ return S390_PAGE_SIZE; + } --GENERIC_HFILES=defs.h -+GENERIC_HFILES=defs.h xen_hyper_defs.h - MCORE_HFILES=va_server.h vas_crash.h --REDHAT_HFILES=netdump.h diskdump.h -+REDHAT_HFILES=netdump.h diskdump.h xendump.h - LKCD_DUMP_HFILES=lkcd_vmdump_v1.h lkcd_vmdump_v2_v3.h lkcd_dump_v5.h \ - lkcd_dump_v7.h lkcd_dump_v8.h lkcd_fix_mem.h - LKCD_TRACE_HFILES=lkcd_x86_trace.h - IBM_HFILES=ibm_common.h --UNWIND_HFILES=unwind.h unwind_i.h rse.h -+UNWIND_HFILES=unwind.h unwind_i.h rse.h unwind_x86.h unwind_x86_64.h - - CFILES=main.c tools.c global_data.c memory.c filesys.c help.c task.c \ - kernel.c test.c gdb_interface.c configure.c net.c dev.c \ -- alpha.c x86.c ppc.c ia64.c s390.c s390x.c ppc64.c x86_64.c \ -+ alpha.c x86.c ppc.c ia64.c s390.c s390x.c s390dbf.c ppc64.c x86_64.c \ - extensions.c remote.c va_server.c va_server_v1.c symbols.c cmdline.c \ - lkcd_common.c lkcd_v1.c lkcd_v2_v3.c lkcd_v5.c lkcd_v7.c lkcd_v8.c\ - lkcd_fix_mem.c s390_dump.c lkcd_x86_trace.c \ -- netdump.c diskdump.c unwind.c unwind_decoder.c -+ netdump.c diskdump.c xendump.c unwind.c unwind_decoder.c \ -+ unwind_x86_32_64.c \ -+ xen_hyper.c xen_hyper_command.c xen_hyper_global_data.c \ -+ xen_hyper_dump_tables.c - - SOURCE_FILES=${CFILES} ${GENERIC_HFILES} ${MCORE_HFILES} \ - ${REDHAT_CFILES} ${REDHAT_HFILES} ${UNWIND_HFILES} \ -@@ -83,11 +88,23 @@ - - OBJECT_FILES=main.o tools.o global_data.o memory.o filesys.o help.o task.o \ - build_data.o kernel.o test.o gdb_interface.o net.o dev.o \ -- alpha.o x86.o ppc.o ia64.o s390.o s390x.o ppc64.o x86_64.o \ -+ alpha.o x86.o ppc.o ia64.o s390.o s390x.o s390dbf.o ppc64.o x86_64.o \ - extensions.o remote.o va_server.o va_server_v1.o symbols.o cmdline.o \ - lkcd_common.o lkcd_v1.o lkcd_v2_v3.o lkcd_v5.o lkcd_v7.o lkcd_v8.o \ -- lkcd_fix_mem.o s390_dump.o netdump.o diskdump.o \ -- lkcd_x86_trace.o unwind_v1.o unwind_v2.o unwind_v3.o -+ lkcd_fix_mem.o s390_dump.o netdump.o diskdump.o xendump.o \ -+ lkcd_x86_trace.o unwind_v1.o unwind_v2.o unwind_v3.o \ -+ unwind_x86_32_64.o \ -+ xen_hyper.o xen_hyper_command.o xen_hyper_global_data.o \ -+ xen_hyper_dump_tables.o -+ -+# These are the current set of crash extensions sources. They are not built -+# by default unless the third command line of the "all:" stanza is uncommented. -+# Alternatively, they can be built by entering "make extensions" from this -+# directory. -+ -+EXTENSIONS=extensions -+EXTENSION_SOURCE_FILES=${EXTENSIONS}/Makefile ${EXTENSIONS}/echo.c ${EXTENSIONS}/dminfo.c -+EXTENSION_OBJECT_FILES=echo.so dminfo.so - - DAEMON_OBJECT_FILES=remote_daemon.o va_server.o va_server_v1.o \ - lkcd_common.o lkcd_v1.o lkcd_v2_v3.o lkcd_v5.o lkcd_v7.o lkcd_v8.o \ -@@ -150,10 +167,11 @@ - ${GDB}/gdb/main.c ${GDB}/gdb/symtab.c ${GDB}/gdb/target.c \ - ${GDB}/gdb/symfile.c ${GDB}/gdb/elfread.c \ - ${GDB}/gdb/ui-file.c ${GDB}/gdb/utils.c ${GDB}/gdb/dwarf2read.c \ -- ${GDB}/include/obstack.h -+ ${GDB}/include/obstack.h ${GDB}/gdb/ppc-linux-tdep.c - GDB_6.1_OFILES=${GDB}/gdb/main.o ${GDB}/gdb/symtab.o \ - ${GDB}/gdb/target.o ${GDB}/gdb/symfile.o ${GDB}/gdb/elfread.o \ -- ${GDB}/gdb/ui-file.o ${GDB}/gdb/utils.o ${GDB}/gdb/dwarf2read.o -+ ${GDB}/gdb/ui-file.o ${GDB}/gdb/utils.o ${GDB}/gdb/dwarf2read.o \ -+ ${GDB}/gdb/ppc-linux-tdep.o - - # - # GDB_FLAGS is passed up from the gdb Makefile. -@@ -175,7 +193,8 @@ - - CFLAGS=-g -D${TARGET} ${TARGET_CFLAGS} - --TAR_FILES=${SOURCE_FILES} Makefile COPYING README .rh_rpm_package crash.8 -+TAR_FILES=${SOURCE_FILES} Makefile COPYING README .rh_rpm_package crash.8 \ -+ ${EXTENSION_SOURCE_FILES} - CSCOPE_FILES=${SOURCE_FILES} - - READLINE_DIRECTORY=./${GDB}/readline -@@ -184,9 +203,13 @@ - - REDHATFLAGS=-DREDHAT - -+# To build the extensions library by default, uncomment the third command -+# line below. Otherwise they can be built by entering "make extensions". -+ - all: make_configure - @./configure -p "RPMPKG=${RPMPKG}" -b - @make --no-print-directory gdb_merge -+# @make --no-print-directory extensions - - gdb_merge: force - @if [ ! -f ${GDB}/README ]; then \ -@@ -206,6 +229,11 @@ - @for FILE in ${GDB_FILES}; do\ - echo $$FILE >> gdb.files; done - @tar --exclude-from gdb.files -xvzmf ${GDB}.tar.gz -+ @make --no-print-directory gdb_patch -+ -+gdb_patch: -+ if [ -f ${GDB}.patch ] && [ -s ${GDB}.patch ]; then \ -+ patch -p0 < ${GDB}.patch; fi - - library: make_build_data ${OBJECT_FILES} - ar -rs ${PROGRAM}lib.a ${OBJECT_FILES} -@@ -318,7 +346,7 @@ - remote_daemon.o: ${GENERIC_HFILES} remote.c - cc -c ${CFLAGS} -DDAEMON remote.c -o remote_daemon.o ${WARNING_OPTIONS} ${WARNING_ERROR} - --x86.o: ${GENERIC_HFILES} x86.c -+x86.o: ${GENERIC_HFILES} ${REDHAT_HFILES} x86.c - cc -c ${CFLAGS} -DMCLX x86.c ${WARNING_OPTIONS} ${WARNING_ERROR} - - alpha.o: ${GENERIC_HFILES} alpha.c -@@ -327,13 +355,13 @@ - ppc.o: ${GENERIC_HFILES} ppc.c - cc -c ${CFLAGS} ppc.c ${WARNING_OPTIONS} ${WARNING_ERROR} - --ia64.o: ${GENERIC_HFILES} ia64.c -+ia64.o: ${GENERIC_HFILES} ${REDHAT_HFILES} ia64.c - cc -c ${CFLAGS} ia64.c ${WARNING_OPTIONS} ${WARNING_ERROR} - - ppc64.o: ${GENERIC_HFILES} ppc64.c - cc -c ${CFLAGS} ppc64.c ${WARNING_OPTIONS} ${WARNING_ERROR} - --x86_64.o: ${GENERIC_HFILES} x86_64.c -+x86_64.o: ${GENERIC_HFILES} ${REDHAT_HFILES} x86_64.c - cc -c ${CFLAGS} x86_64.c ${WARNING_OPTIONS} ${WARNING_ERROR} - - s390.o: ${GENERIC_HFILES} ${IBM_HFILES} s390.c -@@ -342,6 +370,9 @@ - s390x.o: ${GENERIC_HFILES} ${IBM_HFILES} s390x.c - cc -c ${CFLAGS} s390x.c ${WARNING_OPTIONS} ${WARNING_ERROR} - -+s390dbf.o: ${GENERIC_HFILES} ${IBM_HFILES} s390dbf.c -+ cc -c ${CFLAGS} s390dbf.c ${WARNING_OPTIONS} ${WARNING_ERROR} -+ - s390_dump.o: ${GENERIC_HFILES} ${IBM_HFILES} s390_dump.c - cc -c ${CFLAGS} s390_dump.c ${WARNING_OPTIONS} ${WARNING_ERROR} - -@@ -353,12 +384,18 @@ - diskdump.o: ${GENERIC_HFILES} ${REDHAT_HFILES} diskdump.c - cc -c ${CFLAGS} diskdump.c ${WARNING_OPTIONS} ${WARNING_ERROR} - -+xendump.o: ${GENERIC_HFILES} ${REDHAT_HFILES} xendump.c -+ cc -c ${CFLAGS} xendump.c ${WARNING_OPTIONS} ${WARNING_ERROR} -+ - extensions.o: ${GENERIC_HFILES} extensions.c - cc -c ${CFLAGS} extensions.c ${WARNING_OPTIONS} ${WARNING_ERROR} - - lkcd_x86_trace.o: ${GENERIC_HFILES} ${LKCD_TRACE_HFILES} lkcd_x86_trace.c - cc -c ${CFLAGS} -DREDHAT lkcd_x86_trace.c ${WARNING_OPTIONS} ${WARNING_ERROR} - -+unwind_x86_32_64.o: ${GENERIC_HFILES} ${UNWIND_HFILES} unwind_x86_32_64.c -+ cc -c ${CFLAGS} unwind_x86_32_64.c -o unwind_x86_32_64.o ${WARNING_OPTIONS} ${WARNING_ERROR} -+ - unwind_v1.o: ${GENERIC_HFILES} ${UNWIND_HFILES} unwind.c unwind_decoder.c - cc -c ${CFLAGS} -DREDHAT -DUNWIND_V1 unwind.c -o unwind_v1.o ${WARNING_OPTIONS} ${WARNING_ERROR} - -@@ -371,6 +408,18 @@ - lkcd_fix_mem.o: ${GENERIC_HFILES} ${LKCD_HFILES} lkcd_fix_mem.c - cc -c ${CFLAGS} lkcd_fix_mem.c ${WARNING_OPTIONS} ${WARNING_ERROR} - -+xen_hyper.o: ${GENERIC_HFILES} xen_hyper.c -+ cc -c ${CFLAGS} xen_hyper.c ${WARNING_OPTIONS} ${WARNING_ERROR} -+ -+xen_hyper_command.o: ${GENERIC_HFILES} xen_hyper_command.c -+ cc -c ${CFLAGS} xen_hyper_command.c ${WARNING_OPTIONS} ${WARNING_ERROR} -+ -+xen_hyper_global_data.o: ${GENERIC_HFILES} xen_hyper_global_data.c -+ cc -c ${CFLAGS} xen_hyper_global_data.c ${WARNING_OPTIONS} ${WARNING_ERROR} -+ -+xen_hyper_dump_tables.o: ${GENERIC_HFILES} xen_hyper_dump_tables.c -+ cc -c ${CFLAGS} xen_hyper_dump_tables.c ${WARNING_OPTIONS} ${WARNING_ERROR} -+ - ${PROGRAM}: force - @make --no-print-directory all - -@@ -393,13 +442,13 @@ - - gdb_files: make_configure - @./configure -q -b -- @echo ${GDB_FILES} -+ @echo ${GDB_FILES} ${GDB_PATCH_FILES} - - show_files: - @if [ -f ${PROGRAM} ]; then \ -- ./${PROGRAM} --no_crashrc -h README > README; fi -- @echo ${SOURCE_FILES} Makefile ${GDB_FILES} COPYING README \ -- .rh_rpm_package crash.8 -+ ./${PROGRAM} --no_scroll --no_crashrc -h README > README; echo $?; fi -+ @echo ${SOURCE_FILES} Makefile ${GDB_FILES} ${GDB_PATCH_FILES} COPYING README \ -+ .rh_rpm_package crash.8 ${EXTENSION_SOURCE_FILES} - - ctags: - ctags ${SOURCE_FILES} -@@ -410,8 +459,8 @@ - - do_tar: - @if [ -f ${PROGRAM} ]; then \ -- ./${PROGRAM} --no_crashrc -h README > README; fi -- tar cvzf ${PROGRAM}.tar.gz ${TAR_FILES} ${GDB_FILES} -+ ./${PROGRAM} --no_scroll --no_crashrc -h README > README; fi -+ tar cvzf ${PROGRAM}.tar.gz ${TAR_FILES} ${GDB_FILES} ${GDB_PATCH_FILES} - @echo; ls -l ${PROGRAM}.tar.gz - - # To create a base tar file for Red Hat RPM packaging, pass the base RPM -@@ -421,12 +470,12 @@ - # spec file will have its own release number, which will in turn get passed - # to the "all" target upon the initial build. - --RELEASE=4.0 -+RELEASE= - - release: make_configure - @if [ "`id --user`" != "0" ]; then \ - echo "make release: must be super-user"; exit 1; fi -- @./configure -p "RPMPKG=${RPMPKG}" -u -g -+ @./configure -P "RPMPKG=${RPMPKG}" -u -g - @make --no-print-directory release_configure - @echo - @echo "cvs tag this release if necessary" -@@ -446,10 +495,10 @@ - @rm -f ${PROGRAM}-${RELEASE}.tar.gz - @rm -f ${PROGRAM}-${RELEASE}.src.rpm - @chown root ./RELDIR/${PROGRAM}-${RELEASE} -- @tar cf - ${SOURCE_FILES} Makefile ${GDB_FILES} COPYING \ -- .rh_rpm_package crash.8 | (cd ./RELDIR/${PROGRAM}-${RELEASE}; tar xf -) -+ @tar cf - ${SOURCE_FILES} Makefile ${GDB_FILES} ${GDB_PATCH_FILES} COPYING \ -+ .rh_rpm_package crash.8 ${EXTENSION_SOURCE_FILES} | (cd ./RELDIR/${PROGRAM}-${RELEASE}; tar xf -) - @cp ${GDB}.tar.gz ./RELDIR/${PROGRAM}-${RELEASE} -- @./${PROGRAM} --no_crashrc -h README > ./RELDIR/${PROGRAM}-${RELEASE}/README -+ @./${PROGRAM} --no_scroll --no_crashrc -h README > ./RELDIR/${PROGRAM}-${RELEASE}/README - @(cd ./RELDIR; find . -exec chown root {} ";") - @(cd ./RELDIR; find . -exec chgrp root {} ";") - @(cd ./RELDIR; find . -exec touch {} ";") -@@ -464,7 +513,7 @@ - cp ${PROGRAM}-${RELEASE}.tar.gz /usr/src/redhat/SOURCES; \ - /usr/bin/rpmbuild -bs ${PROGRAM}.spec > /dev/null; \ - rm -f /usr/src/redhat/SOURCES/${PROGRAM}-${RELEASE}.tar.gz; \ -- cp /usr/src/redhat/SRPMS/${PROGRAM}-${RELEASE}.src.rpm . ; \ -+ mv /usr/src/redhat/SRPMS/${PROGRAM}-${RELEASE}.src.rpm . ; \ - ls -l ${PROGRAM}-${RELEASE}.src.rpm; \ - exit 0; fi - -@@ -488,3 +537,11 @@ - - dis: - objdump --disassemble --line-numbers ${PROGRAM} > ${PROGRAM}.dis -+ -+extensions: make_configure -+ @./configure -q -b -+ @make --no-print-directory do_extensions -+ -+do_extensions: -+ @(cd extensions; make -i OBJECTS="$(EXTENSION_OBJECT_FILES)" \ -+ TARGET=$(TARGET) TARGET_CFLAGS=$(TARGET_CFLAGS)) ---- crash/gdb-6.1.patch.orig 2007-08-27 15:02:36.000000000 -0400 -+++ crash/gdb-6.1.patch 2007-04-03 11:43:05.000000000 -0400 -@@ -0,0 +1,87 @@ -+--- gdb-6.1.orig/bfd/coff-alpha.c -++++ gdb-6.1/bfd/coff-alpha.c -+@@ -1455,7 +1455,7 @@ alpha_relocate_section (output_bfd, info -+ amt = sizeof (struct ecoff_section_tdata); -+ lita_sec_data = ((struct ecoff_section_tdata *) -+ bfd_zalloc (input_bfd, amt)); -+- ecoff_section_data (input_bfd, lita_sec) = lita_sec_data; -++ lita_sec->used_by_bfd = lita_sec_data; -+ } -+ -+ if (lita_sec_data->gp != 0) -+--- gdb-6.1.orig/sim/ppc/debug.c -++++ gdb-6.1/sim/ppc/debug.c -+@@ -28,6 +28,7 @@ -+ #ifdef HAVE_STDLIB_H -+ #include -+ #endif -++#include -+ -+ int ppc_trace[nr_trace_options]; -+ -+--- gdb-6.1.orig/gdb/remote.c -++++ gdb-6.1/gdb/remote.c -+@@ -3445,7 +3445,7 @@ remote_store_registers (int regnum) -+ { -+ int i; -+ regs = alloca (rs->sizeof_g_packet); -+- memset (regs, rs->sizeof_g_packet, 0); -++ memset (regs, 0, rs->sizeof_g_packet); -+ for (i = 0; i < NUM_REGS + NUM_PSEUDO_REGS; i++) -+ { -+ struct packet_reg *r = &rs->regs[i]; -+--- gdb-6.1.orig/gdb/std-regs.c -++++ gdb-6.1/gdb/std-regs.c -+@@ -61,7 +61,7 @@ value_of_builtin_frame_reg (struct frame -+ val = allocate_value (builtin_type_frame_reg); -+ VALUE_LVAL (val) = not_lval; -+ buf = VALUE_CONTENTS_RAW (val); -+- memset (buf, TYPE_LENGTH (VALUE_TYPE (val)), 0); -++ memset (buf, 0, TYPE_LENGTH (VALUE_TYPE (val))); -+ /* frame.base. */ -+ if (frame != NULL) -+ ADDRESS_TO_POINTER (builtin_type_void_data_ptr, buf, -+@@ -87,7 +87,7 @@ value_of_builtin_frame_fp_reg (struct fr -+ struct value *val = allocate_value (builtin_type_void_data_ptr); -+ char *buf = VALUE_CONTENTS_RAW (val); -+ if (frame == NULL) -+- memset (buf, TYPE_LENGTH (VALUE_TYPE (val)), 0); -++ memset (buf, 0, TYPE_LENGTH (VALUE_TYPE (val))); -+ else -+ ADDRESS_TO_POINTER (builtin_type_void_data_ptr, buf, -+ get_frame_base_address (frame)); -+@@ -105,7 +105,7 @@ value_of_builtin_frame_pc_reg (struct fr -+ struct value *val = allocate_value (builtin_type_void_data_ptr); -+ char *buf = VALUE_CONTENTS_RAW (val); -+ if (frame == NULL) -+- memset (buf, TYPE_LENGTH (VALUE_TYPE (val)), 0); -++ memset (buf, 0, TYPE_LENGTH (VALUE_TYPE (val))); -+ else -+ ADDRESS_TO_POINTER (builtin_type_void_data_ptr, buf, -+ get_frame_pc (frame)); -+--- gdb-6.1.orig/gdb/dwarf2-frame.c -++++ gdb-6.1/gdb/dwarf2-frame.c -+@@ -1353,7 +1353,9 @@ decode_frame_entry_1 (struct comp_unit * -+ else if (*augmentation == 'P') -+ { -+ /* Skip. */ -+- buf += size_of_encoded_value (*buf++); -++// buf += size_of_encoded_value (*buf++); -++ buf += size_of_encoded_value(*buf); -++ buf++; -+ augmentation++; -+ } -+ -+--- gdb-6.1/opcodes/i386-dis.c.orig -++++ gdb-6.1/opcodes/i386-dis.c -+@@ -2092,6 +2092,10 @@ print_insn (bfd_vma pc, disassemble_info -+ dp = &dis386_twobyte[*++codep]; -+ need_modrm = twobyte_has_modrm[*codep]; -+ uses_SSE_prefix = twobyte_uses_SSE_prefix[*codep]; -++ if (dp->name && strcmp(dp->name, "ud2a") == 0) { -++ extern int kernel_BUG_encoding_bytes(void); -++ codep += kernel_BUG_encoding_bytes(); -++ } -+ } -+ else -+ { ---- crash/README.orig 2007-08-27 15:02:36.000000000 -0400 -+++ crash/README 2007-08-27 15:02:34.000000000 -0400 -@@ -69,7 +69,7 @@ - After the kernel is re-compiled, the uncompressed "vmlinux" kernel - that is created in the top-level kernel build directory must be saved. - -- To build this utility, simply uncompress the tar file, enter the crash-4.0 -+ To build this utility, simply uncompress the tar file, enter the crash-4.0-4.6 - subdirectory, and type "make". The initial build will take several minutes - because the gdb module must be configured and and built. Alternatively, the - crash source RPM file may be installed and built, and the resultant crash -@@ -89,11 +89,14 @@ - - $ crash - -- crash 4.0 -- Copyright (C) 2002, 2003, 2004, 2005 Red Hat, Inc. -- Copyright (C) 2004, 2005 IBM Corporation -- Copyright (C) 1999-2005 Hewlett-Packard Co -- Copyright (C) 1999, 2002 Silicon Graphics, Inc. -+ crash 4.0-4.6 -+ Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007 Red Hat, Inc. -+ Copyright (C) 2004, 2005, 2006 IBM Corporation -+ Copyright (C) 1999-2006 Hewlett-Packard Co -+ Copyright (C) 2005, 2006 Fujitsu Limited -+ Copyright (C) 2006, 2007 VA Linux Systems Japan K.K. -+ Copyright (C) 2005 NEC Corporation -+ Copyright (C) 1999, 2002, 2007 Silicon Graphics, Inc. - Copyright (C) 1999, 2000, 2001, 2002 Mission Critical Linux, Inc. - This program is free software, covered by the GNU General Public License, - and you are welcome to change it and/or distribute copies of it under -@@ -111,7 +114,7 @@ - KERNEL: /boot/vmlinux - DUMPFILE: /dev/mem - CPUS: 1 -- DATE: Wed Jul 13 13:26:00 2005 -+ DATE: Mon Aug 27 15:02:34 2007 - UPTIME: 10 days, 22:55:18 - LOAD AVERAGE: 0.08, 0.03, 0.01 - TASKS: 42 -@@ -139,7 +142,7 @@ - exit log rd task - extend mach repeat timer - -- crash version: 4.0 gdb version: 6.1 -+ crash version: 4.0-4.6 gdb version: 6.1 - For help on any command above, enter "help ". - For help on input options, enter "help input". - For help on output options, enter "help output". -@@ -152,11 +155,14 @@ - - $ crash vmlinux vmcore - -- crash 4.0 -- Copyright (C) 2002, 2003, 2004, 2005 Red Hat, Inc. -- Copyright (C) 2004, 2005 IBM Corporation -- Copyright (C) 1999-2005 Hewlett-Packard Co -- Copyright (C) 1999, 2002 Silicon Graphics, Inc. -+ crash 4.0-4.6 -+ Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007 Red Hat, Inc. -+ Copyright (C) 2004, 2005, 2006 IBM Corporation -+ Copyright (C) 1999-2006 Hewlett-Packard Co -+ Copyright (C) 2005, 2006 Fujitsu Limited -+ Copyright (C) 2006, 2007 VA Linux Systems Japan K.K. -+ Copyright (C) 2005 NEC Corporation -+ Copyright (C) 1999, 2002, 2007 Silicon Graphics, Inc. - Copyright (C) 1999, 2000, 2001, 2002 Mission Critical Linux, Inc. - This program is free software, covered by the GNU General Public License, - and you are welcome to change it and/or distribute copies of it under -@@ -196,11 +202,14 @@ - - $ crash vmlinux.17 lcore.cr.17 - -- crash 4.0 -- Copyright (C) 2002, 2003, 2004, 2005 Red Hat, Inc. -- Copyright (C) 2004, 2005 IBM Corporation -- Copyright (C) 1999-2005 Hewlett-Packard Co -- Copyright (C) 1999, 2002 Silicon Graphics, Inc. -+ crash 4.0-4.6 -+ Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007 Red Hat, Inc. -+ Copyright (C) 2004, 2005, 2006 IBM Corporation -+ Copyright (C) 1999-2006 Hewlett-Packard Co -+ Copyright (C) 2005, 2006 Fujitsu Limited -+ Copyright (C) 2006, 2007 VA Linux Systems Japan K.K. -+ Copyright (C) 2005 NEC Corporation -+ Copyright (C) 1999, 2002, 2007 Silicon Graphics, Inc. - Copyright (C) 1999, 2000, 2001, 2002 Mission Critical Linux, Inc. - This program is free software, covered by the GNU General Public License, - and you are welcome to change it and/or distribute copies of it under ---- crash/crash.8.orig 2007-08-27 15:02:36.000000000 -0400 -+++ crash/crash.8 2007-07-13 16:57:38.000000000 -0400 -@@ -5,7 +5,7 @@ - .TH CRASH 8 - .SH NAME - crash \- Analyze Linux crash data or a live system --.SH SYNAPSIS -+.SH SYNOPSIS - .B crash - [ - .B -h -@@ -42,9 +42,13 @@ - is a tool for interactively analyzing the state of the Linux system - while it is running, or after a kernel crash has occurred and a - core dump has been created by the Red Hat --.I netdump --facility. It is loosely based on the SVR4 UNIX crash --command, but has been signficantly enhanced -+.I netdump, -+.I diskdump, -+.I kdump, -+or -+.I xendump -+facilities. It is loosely based on the SVR4 UNIX crash -+command, but has been significantly enhanced - by completely merging it with the - .I gdb - debugger. The marriage of the two effectively combines the -@@ -207,15 +211,15 @@ - .I dis - disassembles memory, either entire kernel functions, from a - location for a specified number of instructions, or from the start of a --fuction up to a specified memory location. -+function up to a specified memory location. - .TP - .I eval - evalues an expression or numeric type and displays the result --in hexidecimal, decimal, octal and binary. -+in hexadecimal, decimal, octal and binary. - .TP - .I exit - causes --.I crash -+.B crash - to exit. - .TP - .I extend -@@ -230,7 +234,7 @@ - in the system. - .TP - .I fuser --displays the tasks using the specifed file or socket. -+displays the tasks using the specified file or socket. - .TP - .I gdb - passes its argument to the underlying -@@ -274,7 +278,7 @@ - display various network related data. - .TP - .I p --passes its argumnts to the -+passes its arguments to the - .I gdb - "print" command for evaluation and display. - .TP -@@ -361,11 +365,85 @@ - .I wr - modifies the contents of memory. When writing to memory on - a live system, this command should obviously be used with great care. -+.SH FILES -+.TP -+.I .crashrc -+Initialization commands. The file can be located in the user's -+.B HOME -+directory and/or the current directory. Commands found in the -+.I .crashrc -+file in the -+.B HOME -+directory are executed before those in the current directory's -+.I .crashrc -+file. -+.SH ENVIRONMENT -+.TP -+.B EDITOR -+Command input is read using -+.BR readline(3). -+If -+.B EDITOR -+is set to -+.I emacs -+or -+.I vi -+then suitable keybindings are used. If -+.B EDITOR -+is not set, then -+.I vi -+is used. This can be overridden by -+.B set vi -+or -+.B set emacs -+commands located in a -+.IR .crashrc -+file, or by entering -+.B -e emacs -+on the -+.B crash -+command line. -+.TP -+.B CRASHPAGER -+If -+.B CRASHPAGER -+is set, its value is used as the name of the program to which command output will be sent. -+If not, then command output is sent to -+.B /usr/bin/less -E -X -+by default. -+.SH NOTES -+.PP -+If -+.B crash -+does not work, look for a newer version: kernel evolution frequently makes -+.B crash -+updates necessary. -+.PP -+The command -+.B set scroll off -+will cause output to be sent directly to -+the terminal rather than through a paging program. This is useful, -+for example, if you are running -+.B crash -+in a window of -+.BR emacs . - .SH AUTHOR - Dave Anderson wrote --.B Crash -+.B crash - .TP - Jay Fenlason wrote this man page. - .SH "SEE ALSO" --netdump(8) --gdb(1) -+.PP -+The -+.I help -+command within -+.B crash -+provides more complete and accurate documentation than this man page. -+.PP -+.I http://people.redhat.com/anderson -+- the home page of the -+.B crash -+utility. -+.PP -+.BR netdump (8), -+.BR gdb (1) + int diff --git a/crash.spec b/crash.spec index 242b552..4f909da 100644 --- a/crash.spec +++ b/crash.spec @@ -4,7 +4,7 @@ Summary: crash utility for live systems; netdump, diskdump, kdump, LKCD or mcore dumpfiles Name: crash Version: 4.0 -Release: 4.6.2 +Release: 5.0.3 License: GPL Group: Development/Debuggers Source: %{name}-%{version}.tar.gz @@ -46,6 +46,9 @@ rm -rf %{buildroot} %doc README %changelog +* Wed Jan 23 2008 Dave Anderson - 4.0-5.0.3 +- Updated crash.patch to match upstream version 4.0-5.0. + * Wed Aug 29 2007 Dave Anderson - 4.0-4.6.2 - Updated crash.patch to match upstream version 4.0-4.6.