From 7bfed322a8219b1bda85ba66a8b3cbe7e8c8a5ea Mon Sep 17 00:00:00 2001 From: David Anderson Date: Tue, 29 Apr 2008 17:57:33 +0000 Subject: [PATCH] Added crash-devel subpackage. Updated crash.patch to match upstream version 4.0-6.3. --- crash.patch | 16192 ++++++++++++++++++++++++++------------------------ crash.spec | 6 +- 2 files changed, 8591 insertions(+), 7607 deletions(-) diff --git a/crash.patch b/crash.patch index 9a2cc45..97f4351 100644 --- a/crash.patch +++ b/crash.patch @@ -1,4 +1,4 @@ ---- crash/configure.c.orig 2008-02-19 16:16:12.000000000 -0500 +--- crash/configure.c.orig 2008-04-29 13:51:49.000000000 -0400 +++ crash/configure.c 2008-02-04 10:30:09.000000000 -0500 @@ -1,8 +1,8 @@ /* configure.c - core analysis suite @@ -146,8 +146,8 @@ } /* ---- crash/Makefile.orig 2008-02-19 16:16:13.000000000 -0500 -+++ crash/Makefile 2008-02-19 16:16:12.000000000 -0500 +--- crash/Makefile.orig 2008-04-29 13:51:49.000000000 -0400 ++++ crash/Makefile 2008-04-29 13:51:49.000000000 -0400 @@ -3,8 +3,8 @@ # Copyright (C) 1999, 2000, 2001, 2002 Mission Critical Linux, Inc. # www.missioncriticallinux.com, info@missioncriticallinux.com @@ -477,7 +477,7 @@ + +do_extensions: + @(cd extensions; make -i TARGET=$(TARGET) TARGET_CFLAGS=$(TARGET_CFLAGS)) ---- crash/lkcd_dump_v7.h.orig 2008-02-19 16:16:13.000000000 -0500 +--- crash/lkcd_dump_v7.h.orig 2008-04-29 13:51:49.000000000 -0400 +++ crash/lkcd_dump_v7.h 2008-01-04 09:42:08.000000000 -0500 @@ -1,8 +1,8 @@ /* lkcd_dump_v5.h - core analysis suite @@ -499,7 +499,7 @@ /* define TRUE and FALSE for use in our dump modules */ #ifndef FALSE ---- crash/cmdline.c.orig 2008-02-19 16:16:12.000000000 -0500 +--- crash/cmdline.c.orig 2008-04-29 13:51:49.000000000 -0400 +++ crash/cmdline.c 2008-01-04 09:42:08.000000000 -0500 @@ -1,8 +1,8 @@ /* cmdline.c - core analysis suite @@ -935,7 +935,7 @@ + stall(1000); } } ---- crash/lkcd_dump_v5.h.orig 2008-02-19 16:16:13.000000000 -0500 +--- crash/lkcd_dump_v5.h.orig 2008-04-29 13:51:49.000000000 -0400 +++ crash/lkcd_dump_v5.h 2008-01-04 09:42:08.000000000 -0500 @@ -1,8 +1,8 @@ /* lkcd_dump_v5.h - core analysis suite @@ -957,9 +957,9 @@ /* define TRUE and FALSE for use in our dump modules */ #ifndef FALSE ---- crash/xen_hyper.c.orig 2008-02-19 16:16:12.000000000 -0500 -+++ crash/xen_hyper.c 2008-01-04 09:42:08.000000000 -0500 -@@ -0,0 +1,1975 @@ +--- crash/xen_hyper.c.orig 2008-04-29 13:51:49.000000000 -0400 ++++ crash/xen_hyper.c 2008-04-23 15:50:49.000000000 -0400 +@@ -0,0 +1,1981 @@ +/* + * xen_hyper.c + * @@ -1003,6 +1003,12 @@ + long member_offset; +#endif + ++ if (machine_type("X86_64") && ++ symbol_exists("xen_phys_start") && !xen_phys_start()) ++ error(WARNING, ++ "This hypervisor is relocatable; if initialization fails below, try\n" ++ " using the \"--xen_phys_start
\" command line option.\n\n"); ++ + if (symbol_exists("crashing_cpu")) { + get_symbol_data("crashing_cpu", sizeof(xht->crashing_cpu), + &xht->crashing_cpu); @@ -2935,8 +2941,8 @@ + fprintf(out, "PCPU: %2d VCPU: %lx\n", vcc->processor, vcpu); +} +#endif ---- crash/netdump.c.orig 2008-02-19 16:16:12.000000000 -0500 -+++ crash/netdump.c 2008-01-16 10:00:24.000000000 -0500 +--- crash/netdump.c.orig 2008-04-29 13:51:49.000000000 -0400 ++++ crash/netdump.c 2008-04-23 14:49:00.000000000 -0400 @@ -1,7 +1,7 @@ /* netdump.c * @@ -3162,13 +3168,13 @@ - else + if (machine_type_mismatch(file, "X86_64", NULL, + source_query)) - goto bailout; ++ goto bailout; + break; + + case EM_386: + if (machine_type_mismatch(file, "X86", NULL, + source_query)) -+ goto bailout; + goto bailout; + break; default: @@ -3458,15 +3464,14 @@ off_t offset; + struct pt_load_segment *pls; + int i; - -- offset = (off_t)paddr + (off_t)nd->header_size; ++ + switch (DUMPFILE_FORMAT(nd->flags)) + { + case NETDUMP_ELF32: + offset = (off_t)paddr + (off_t)nd->header_size; + break; -- if (lseek(nd->ndfd, offset, SEEK_SET) != offset) +- offset = (off_t)paddr + (off_t)nd->header_size; + case NETDUMP_ELF64: + case KDUMP_ELF32: + case KDUMP_ELF64: @@ -3490,7 +3495,8 @@ + + break; + } -+ + +- if (lseek(nd->ndfd, offset, SEEK_SET) != offset) + if (lseek(nd->ndfd, offset, SEEK_SET) == -1) return SEEK_ERROR; @@ -3806,7 +3812,7 @@ netdump_print(" elf32: %lx\n", nd->elf32); netdump_print(" notes32: %lx\n", nd->notes32); netdump_print(" load32: %lx\n", nd->load32); -@@ -577,11 +841,66 @@ +@@ -577,11 +841,68 @@ netdump_print(" nt_prpsinfo: %lx\n", nd->nt_prpsinfo); netdump_print(" nt_taskstruct: %lx\n", nd->nt_taskstruct); netdump_print(" task_struct: %lx\n", nd->task_struct); @@ -3844,6 +3850,8 @@ + nd->xen_kdump_data->cache_hits * 100 / nd->xen_kdump_data->accesses); + netdump_print("\n p2m_frames: %d\n", + nd->xen_kdump_data->p2m_frames); ++ netdump_print(" xen_phys_start: %lx\n", ++ nd->xen_kdump_data->xen_phys_start); + netdump_print(" p2m_mfn_frame_list: %lx\n", + nd->xen_kdump_data->p2m_mfn_frame_list); + for (i = 0; i < nd->xen_kdump_data->p2m_frames; i++) @@ -3875,7 +3883,7 @@ dump_Elf32_Ehdr(nd->elf32); dump_Elf32_Phdr(nd->notes32, ELFREAD); for (i = 0; i < nd->num_pt_load_segments; i++) -@@ -594,6 +913,7 @@ +@@ -594,6 +915,7 @@ break; case NETDUMP_ELF64: @@ -3883,7 +3891,7 @@ dump_Elf64_Ehdr(nd->elf64); dump_Elf64_Phdr(nd->notes64, ELFREAD); for (i = 0; i < nd->num_pt_load_segments; i++) -@@ -865,6 +1185,9 @@ +@@ -865,6 +1187,9 @@ netdump_print(" e_machine: %d ", elf->e_machine); switch (elf->e_machine) { @@ -3893,7 +3901,7 @@ case EM_IA_64: netdump_print("(EM_IA_64)\n"); break; -@@ -961,8 +1284,11 @@ +@@ -961,8 +1286,11 @@ pls->phys_start = prog->p_paddr; netdump_print(" p_filesz: %lu (%lx)\n", prog->p_filesz, prog->p_filesz); @@ -3906,7 +3914,7 @@ netdump_print(" p_memsz: %lu (%lx)\n", prog->p_memsz, prog->p_memsz); netdump_print(" p_flags: %lx (", prog->p_flags); -@@ -1030,19 +1356,22 @@ +@@ -1030,19 +1358,22 @@ netdump_print("(?)\n"); } @@ -3935,7 +3943,7 @@ prog->p_memsz); netdump_print(" p_flags: %lx (", prog->p_flags); others = 0; -@@ -1053,7 +1382,7 @@ +@@ -1053,7 +1384,7 @@ if (prog->p_flags & PF_R) netdump_print("%sPF_R", others++ ? "|" : ""); netdump_print(")\n"); @@ -3944,7 +3952,7 @@ } /* -@@ -1061,20 +1390,22 @@ +@@ -1061,20 +1392,22 @@ */ static size_t @@ -3969,7 +3977,7 @@ ptr = (char *)note + sizeof(Elf32_Nhdr); BCOPY(ptr, buf, note->n_namesz); netdump_print("(\"%s\")\n", buf); -@@ -1085,17 +1416,26 @@ +@@ -1085,17 +1418,26 @@ { case NT_PRSTATUS: netdump_print("(NT_PRSTATUS)\n"); @@ -4000,7 +4008,7 @@ nd->nt_taskstruct = (void *)note; nd->task_struct = *((ulong *)(ptr + note->n_namesz)); nd->switch_stack = *((ulong *) -@@ -1105,25 +1445,128 @@ +@@ -1105,25 +1447,130 @@ case NT_DISKDUMP: netdump_print("(NT_DISKDUMP)\n"); uptr = (ulong *)(ptr + note->n_namesz); @@ -4082,6 +4090,8 @@ + */ + if (!nd->xen_kdump_data->p2m_mfn) + nd->xen_kdump_data->p2m_mfn = *(uptr+(words-1)); ++ if (words > 9 && !nd->xen_kdump_data->xen_phys_start) ++ nd->xen_kdump_data->xen_phys_start = *(uptr+(words-2)); + } + } + break; @@ -4140,7 +4150,7 @@ netdump_print("\n"); len = sizeof(Elf32_Nhdr); -@@ -1135,15 +1578,17 @@ +@@ -1135,15 +1582,17 @@ static size_t @@ -4160,7 +4170,7 @@ note = (Elf64_Nhdr *)((char *)nd->elf64 + offset); -@@ -1151,6 +1596,7 @@ +@@ -1151,6 +1600,7 @@ netdump_print(" n_namesz: %ld ", note->n_namesz); BZERO(buf, BUFSIZE); ptr = (char *)note + sizeof(Elf64_Nhdr); @@ -4168,7 +4178,7 @@ BCOPY(ptr, buf, note->n_namesz); netdump_print("(\"%s\")\n", buf); -@@ -1160,17 +1606,26 @@ +@@ -1160,17 +1610,26 @@ { case NT_PRSTATUS: netdump_print("(NT_PRSTATUS)\n"); @@ -4199,7 +4209,7 @@ nd->nt_taskstruct = (void *)note; nd->task_struct = *((ulong *)(ptr + note->n_namesz)); nd->switch_stack = *((ulong *) -@@ -1180,24 +1635,149 @@ +@@ -1180,24 +1639,151 @@ case NT_DISKDUMP: netdump_print("(NT_DISKDUMP)\n"); iptr = (int *)(ptr + note->n_namesz); @@ -4294,6 +4304,8 @@ + */ + if (!nd->xen_kdump_data->p2m_mfn) + nd->xen_kdump_data->p2m_mfn = *(up+(words-1)); ++ if (words > 9 && !nd->xen_kdump_data->xen_phys_start) ++ nd->xen_kdump_data->xen_phys_start = *(up+(words-2)); + } + } + break; @@ -4359,7 +4371,7 @@ } if (!lf) netdump_print("\n"); -@@ -1251,39 +1831,70 @@ +@@ -1251,39 +1837,70 @@ default: error(FATAL, @@ -4442,7 +4454,7 @@ bt->machdep = (void *)user_regs; } -@@ -1295,13 +1906,14 @@ +@@ -1295,13 +1912,14 @@ * the raw stack for some reasonable hooks. */ @@ -4459,7 +4471,7 @@ int check_hardirq, check_softirq; if (!is_task_active(bt->task)) { -@@ -1309,17 +1921,31 @@ +@@ -1309,17 +1927,31 @@ return; } @@ -4493,7 +4505,7 @@ STREQ(sym, "disk_dump")) { *eip = *up; *esp = search ? -@@ -1354,7 +1980,7 @@ +@@ -1354,7 +1986,7 @@ next_sysrq: *eip = *up; *esp = bt->stackbase + ((char *)(up+4) - bt->stackbuf); @@ -4502,7 +4514,7 @@ for (i++, up++; i < LONGS_PER_STACK; i++, up++) { sym = closest_symbol(*up); if (STREQ(sym, "sysrq_handle_crash")) -@@ -1371,7 +1997,15 @@ +@@ -1371,7 +2003,15 @@ *esp = search ? bt->stackbase + ((char *)(up+1) - bt->stackbuf) : *(up-1); @@ -4519,7 +4531,7 @@ return; } -@@ -1385,6 +2019,18 @@ +@@ -1385,6 +2025,18 @@ bt->stackbase + ((char *)(up-1) - bt->stackbuf); ipintr_func = *(up - 2); } @@ -4538,7 +4550,7 @@ } if (ipintr_eip) { -@@ -1393,6 +2039,12 @@ +@@ -1393,6 +2045,12 @@ return; } @@ -4551,7 +4563,7 @@ if (panic) return; -@@ -1418,7 +2070,9 @@ +@@ -1418,7 +2076,9 @@ goto retry; } @@ -4562,7 +4574,7 @@ machdep->get_stack_frame(bt, eip, esp); } -@@ -1429,8 +2083,18 @@ +@@ -1429,8 +2089,24 @@ Elf64_Nhdr *note; size_t len; @@ -4575,15 +4587,21 @@ + * panic task. Whereas in kdump, regs are captured for all + * CPUs if they responded to an IPI. + */ -+ if (nd->num_prstatus_notes > 1) ++ if (nd->num_prstatus_notes > 1) { ++ if (bt->tc->processor >= nd->num_prstatus_notes) ++ error(FATAL, ++ "cannot determine NT_PRSTATUS ELF note " ++ "for %s task: %lx\n", ++ (bt->task == tt->panic_task) ? ++ "panic" : "active", bt->task); + note = (Elf64_Nhdr *) + nd->nt_prstatus_percpu[bt->tc->processor]; -+ else ++ } else + note = (Elf64_Nhdr *)nd->nt_prstatus; len = sizeof(Elf64_Nhdr); len = roundup(len + note->n_namesz, 4); -@@ -1446,3 +2110,205 @@ +@@ -1446,3 +2122,224 @@ { return (nd->flags & PARTIAL_DUMP ? TRUE : FALSE); } @@ -4789,8 +4807,27 @@ + + return FALSE; +} ---- crash/x86_64.c.orig 2008-02-19 16:16:12.000000000 -0500 -+++ crash/x86_64.c 2008-01-04 09:42:08.000000000 -0500 ++ ++void ++set_xen_phys_start(char *arg) ++{ ++ ulong value; ++ int errflag = 0; ++ ++ value = htol(arg, RETURN_ON_ERROR|QUIET, &errflag); ++ if (!errflag) ++ xen_kdump_data.xen_phys_start = value; ++ else ++ error(WARNING, "invalid xen_phys_start argument: %s\n", arg); ++} ++ ++ulong ++xen_phys_start(void) ++{ ++ return nd->xen_kdump_data->xen_phys_start; ++} +--- crash/x86_64.c.orig 2008-04-29 13:51:49.000000000 -0400 ++++ crash/x86_64.c 2008-04-23 14:59:49.000000000 -0400 @@ -1,7 +1,7 @@ /* x86_64.c -- core analysis suite * @@ -5007,7 +5044,31 @@ break; case POST_GDB: -@@ -158,16 +272,49 @@ +@@ -140,8 +254,23 @@ + MEMBER_OFFSET_INIT(thread_struct_rip, "thread_struct", "rip"); + MEMBER_OFFSET_INIT(thread_struct_rsp, "thread_struct", "rsp"); + MEMBER_OFFSET_INIT(thread_struct_rsp0, "thread_struct", "rsp0"); ++ if (INVALID_MEMBER(thread_struct_rip)) ++ MEMBER_OFFSET_INIT(thread_struct_rip, "thread_struct", "ip"); ++ if (INVALID_MEMBER(thread_struct_rsp)) ++ MEMBER_OFFSET_INIT(thread_struct_rsp, "thread_struct", "sp"); ++ if (INVALID_MEMBER(thread_struct_rsp0)) ++ MEMBER_OFFSET_INIT(thread_struct_rsp0, "thread_struct", "sp0"); + STRUCT_SIZE_INIT(tss_struct, "tss_struct"); + MEMBER_OFFSET_INIT(tss_struct_ist, "tss_struct", "ist"); ++ if (INVALID_MEMBER(tss_struct_ist)) { ++ long x86_tss_offset, ist_offset; ++ x86_tss_offset = MEMBER_OFFSET("tss_struct", "x86_tss"); ++ ist_offset = MEMBER_OFFSET("x86_hw_tss", "ist"); ++ if ((x86_tss_offset != INVALID_OFFSET) && ++ (ist_offset != INVALID_OFFSET)) ++ ASSIGN_OFFSET(tss_struct_ist) = x86_tss_offset + ++ ist_offset; ++ } + MEMBER_OFFSET_INIT(user_regs_struct_rip, + "user_regs_struct", "rip"); + MEMBER_OFFSET_INIT(user_regs_struct_rsp, +@@ -158,16 +287,49 @@ if ((machdep->machspec->irqstack = (char *) malloc(machdep->machspec->stkinfo.isize)) == NULL) error(FATAL, "cannot malloc irqstack space."); @@ -5065,7 +5126,7 @@ break; case POST_INIT: -@@ -191,10 +338,26 @@ +@@ -191,10 +353,26 @@ fprintf(fp, "%sKSYMS_START", others++ ? "|" : ""); if (machdep->flags & PT_REGS_INIT) fprintf(fp, "%sPT_REGS_INIT", others++ ? "|" : ""); @@ -5094,7 +5155,7 @@ fprintf(fp, ")\n"); fprintf(fp, " kvbase: %lx\n", machdep->kvbase); -@@ -215,13 +378,32 @@ +@@ -215,13 +393,32 @@ fprintf(fp, " back_trace: x86_64_back_trace_cmd()\n"); else if (machdep->back_trace == x86_64_low_budget_back_trace_cmd) fprintf(fp, @@ -5130,7 +5191,7 @@ fprintf(fp, " get_task_pgd: x86_64_get_task_pgd()\n"); fprintf(fp, " dump_irq: x86_64_dump_irq()\n"); fprintf(fp, " get_stack_frame: x86_64_get_stack_frame()\n"); -@@ -239,6 +421,11 @@ +@@ -239,6 +436,11 @@ fprintf(fp, " is_uvaddr: x86_64_is_uvaddr()\n"); fprintf(fp, " verify_paddr: generic_verify_paddr()\n"); fprintf(fp, " init_kernel_pgd: x86_64_init_kernel_pgd()\n"); @@ -5142,7 +5203,7 @@ fprintf(fp, " line_number_hooks: x86_64_line_number_hooks\n"); fprintf(fp, " value_to_symbol: generic_machdep_value_to_symbol()\n"); fprintf(fp, " last_pgd_read: %lx\n", machdep->last_pgd_read); -@@ -248,9 +435,33 @@ +@@ -248,9 +450,33 @@ fprintf(fp, " pmd: %lx\n", (ulong)machdep->pmd); fprintf(fp, " ptbl: %lx\n", (ulong)machdep->ptbl); fprintf(fp, " ptrs_per_pgd: %d\n", machdep->ptrs_per_pgd); @@ -5177,7 +5238,7 @@ fprintf(fp, " pto: %s", machdep->flags & PT_REGS_INIT ? "\n" : "(uninitialized)\n"); if (machdep->flags & PT_REGS_INIT) { -@@ -276,8 +487,10 @@ +@@ -276,8 +502,10 @@ fprintf(fp, " rsp: %ld\n", ms->pto.rsp); fprintf(fp, " ss: %ld\n", ms->pto.ss); } @@ -5190,7 +5251,7 @@ fprintf(fp, " ebase[%s][7]:", arg ? "NR_CPUS" : "cpus"); cpus = arg ? NR_CPUS : kt->cpus; -@@ -306,9 +519,9 @@ +@@ -306,9 +534,9 @@ static void x86_64_cpu_pda_init(void) { @@ -5202,7 +5263,7 @@ struct syment *sp, *nsp; ulong offset, istacksize; -@@ -320,18 +533,44 @@ +@@ -320,18 +548,44 @@ MEMBER_OFFSET_INIT(x8664_pda_irqstackptr, "x8664_pda", "irqstackptr"); MEMBER_OFFSET_INIT(x8664_pda_level4_pgt, "x8664_pda", "level4_pgt"); MEMBER_OFFSET_INIT(x8664_pda_cpunumber, "x8664_pda", "cpunumber"); @@ -5253,7 +5314,7 @@ break; cpus++; -@@ -351,8 +590,8 @@ +@@ -351,8 +605,8 @@ i, level4_pgt, data_offset); } @@ -5264,7 +5325,7 @@ istacksize = i; } else if ((sp = symbol_search("boot_cpu_stack")) && (nsp = next_symbol(NULL, sp))) { -@@ -381,8 +620,9 @@ +@@ -381,8 +635,9 @@ * the address of &boot_cpu_stack[0]. */ sp = value_search(machdep->machspec->stkinfo.ibase[0], &offset); @@ -5276,7 +5337,7 @@ error(WARNING, "cpu 0 IRQ stack: %lx\n boot_cpu_stack: %lx\n\n", machdep->machspec->stkinfo.ibase[0], -@@ -448,6 +688,13 @@ +@@ -448,6 +703,13 @@ if (ms->stkinfo.ebase[c][0] == 0) break; } @@ -5290,7 +5351,7 @@ } if (ms->stkinfo.ebase[0][0] && ms->stkinfo.ebase[0][1]) -@@ -535,6 +782,10 @@ +@@ -535,6 +797,10 @@ if (clues >= 2) kt->cpu_flags[c] |= NMI; } @@ -5301,7 +5362,7 @@ } /* -@@ -576,7 +827,7 @@ +@@ -576,7 +842,7 @@ ulong x86_64_VTOP(ulong vaddr) { if (vaddr >= __START_KERNEL_map) @@ -5310,7 +5371,7 @@ else return ((vaddr) - PAGE_OFFSET); } -@@ -584,12 +835,21 @@ +@@ -584,12 +850,21 @@ /* * Include both vmalloc'd and module address space as VMALLOC space. */ @@ -5333,7 +5394,7 @@ /* * Refining this may cause more problems than just doing it this way. */ -@@ -616,43 +876,52 @@ +@@ -616,43 +891,52 @@ */ static int @@ -5410,7 +5471,7 @@ if (!(pgd_pte & _PAGE_PRESENT)) goto no_upage; -@@ -682,29 +951,31 @@ +@@ -682,29 +966,31 @@ /* * ptep = pte_offset_map(pmd, address); @@ -5461,7 +5522,7 @@ return TRUE; -@@ -713,1982 +984,4815 @@ +@@ -713,309 +999,835 @@ return FALSE; } @@ -5701,7 +5762,6 @@ - else - return (IS_KVADDR(task) && (ALIGNED_STACK_OFFSET(task) == 0)); -} -- + ulong mm; + ulong *pgd; + ulong pgd_paddr; @@ -5717,6 +5777,9 @@ + physaddr_t physpage; + char buf[BUFSIZE]; ++ if (!tc) ++ error(FATAL, "current context invalid\n"); + -/* - * easy enough... - */ @@ -5724,29 +5787,26 @@ -x86_64_processor_speed(void) -{ - unsigned long cpu_khz; -+ if (!tc) -+ error(FATAL, "current context invalid\n"); ++ *paddr = 0; - if (machdep->mhz) - return (machdep->mhz); -+ *paddr = 0; ++ if (IS_KVADDR(uvaddr)) ++ return x86_64_kvtop(tc, uvaddr, paddr, verbose); - if (symbol_exists("cpu_khz")) { - get_symbol_data("cpu_khz", sizeof(long), &cpu_khz); - if (cpu_khz) - return(machdep->mhz = cpu_khz/1000); - } -+ if (IS_KVADDR(uvaddr)) -+ return x86_64_kvtop(tc, uvaddr, paddr, verbose); - -- return 0; --} + if ((mm = task_mm(tc->task, TRUE))) + pgd = ULONG_PTR(tt->mm_struct + OFFSET(mm_struct_pgd)); + else + readmem(tc->mm_struct + OFFSET(mm_struct_pgd), KVADDR, &pgd, + sizeof(long), "mm_struct pgd", FAULT_ON_ERROR); +- return 0; +-} + pgd_paddr = x86_64_VTOP((ulong)pgd); + FILL_PGD(pgd_paddr, PHYSADDR, PAGESIZE()); + pgd = ((ulong *)pgd_paddr) + pgd_index(uvaddr); @@ -5756,14 +5816,6 @@ + if (!(pgd_pte & _PAGE_PRESENT)) + goto no_upage; --/* -- * Accept or reject a symbol from the kernel namelist. -- */ --static int --x86_64_verify_symbol(const char *name, ulong value, char type) --{ -- if (STREQ(name, "_text") || STREQ(name, "_stext")) -- machdep->flags |= KSYMS_START; + /* + * pmd = pmd_offset(pgd, address); + */ @@ -5783,10 +5835,18 @@ + fprintf(fp, " PAGE: %lx (2MB) [machine]\n", + PAGEBASE(pmd_pte) & PHYSICAL_PAGE_MASK); +-/* +- * Accept or reject a symbol from the kernel namelist. +- */ +-static int +-x86_64_verify_symbol(const char *name, ulong value, char type) +-{ +- if (STREQ(name, "_text") || STREQ(name, "_stext")) +- machdep->flags |= KSYMS_START; ++ pseudo_pmd_pte = xen_m2p(PAGEBASE(pmd_pte)); + - if (!name || !strlen(name) || !(machdep->flags & KSYMS_START)) - return FALSE; -+ pseudo_pmd_pte = xen_m2p(PAGEBASE(pmd_pte)); -+ + if (pseudo_pmd_pte == XEN_MACHADDR_NOT_FOUND) { + if (verbose) + fprintf(fp, " PAGE: page not available\n"); @@ -6068,6 +6128,10 @@ - ulong *up; - struct machine_specific *ms; - struct bt_info bt_local; +- +- if (bt->flags & BT_EFRAME_SEARCH2) { +- BCOPY(bt, &bt_local, sizeof(struct bt_info)); +- bt->flags &= ~(ulonglong)BT_EFRAME_SEARCH2; + ulong *pml4; + ulong *pgd; + ulong pgd_paddr; @@ -6080,14 +6144,15 @@ + ulong pte; + physaddr_t physpage; -- if (bt->flags & BT_EFRAME_SEARCH2) { -- BCOPY(bt, &bt_local, sizeof(struct bt_info)); -- bt->flags &= ~(ulonglong)BT_EFRAME_SEARCH2; +- ms = machdep->machspec; + if (!IS_KVADDR(kvaddr)) + return FALSE; - -- ms = machdep->machspec; ++ + if (XEN_HYPER_MODE()) { ++ if (XEN_VIRT_ADDR(kvaddr)) { ++ *paddr = kvaddr - XEN_VIRT_START + xen_phys_start(); ++ return TRUE; ++ } + if (DIRECTMAP_VIRT_ADDR(kvaddr)) { + *paddr = kvaddr - DIRECTMAP_VIRT_START; + return TRUE; @@ -6103,16 +6168,7 @@ + *paddr = x86_64_VTOP(kvaddr); + return TRUE; + } - -- for (c = 0; c < kt->cpus; c++) { -- if (ms->stkinfo.ibase[c] == 0) -- break; -- bt->hp->esp = ms->stkinfo.ibase[c]; -- fprintf(fp, "CPU %d IRQ STACK:\n", c); -- if ((cnt = x86_64_eframe_search(bt))) -- fprintf(fp, "\n"); -- else -- fprintf(fp, "(none found)\n\n"); ++ + if (!IS_VMALLOC_ADDR(kvaddr)) { + *paddr = x86_64_VTOP(kvaddr); + if (!verbose) @@ -6159,28 +6215,14 @@ + fprintf(fp, " PAGE: %lx (2MB)\n\n", + PAGEBASE(pmd_pte) & PHYSICAL_PAGE_MASK); + x86_64_translate_pte(pmd_pte, 0, 0); - } - -- for (c = 0; c < kt->cpus; c++) { -- for (i = 0; i < 7; i++) { -- if (ms->stkinfo.ebase[c][i] == 0) -- break; -- bt->hp->esp = ms->stkinfo.ebase[c][i]; -- fprintf(fp, "CPU %d %s EXCEPTION STACK:\n", -- c, x86_64_exception_stacks[i]); -- if ((cnt = x86_64_eframe_search(bt))) -- fprintf(fp, "\n"); -- else -- fprintf(fp, "(none found)\n\n"); -- } -- } ++ } ++ + physpage = (PAGEBASE(pmd_pte) & PHYSICAL_PAGE_MASK) + + (kvaddr & ~_2MB_PAGE_MASK); + *paddr = physpage; + return TRUE; + } - -- return 0; ++ + /* + * ptep = pte_offset_map(pmd, addr); + * pte = *ptep; @@ -6197,52 +6239,22 @@ + x86_64_translate_pte(pte, 0, 0); + } + goto no_kpage; - } - -- if (bt->hp && bt->hp->esp) { -- ms = machdep->machspec; -- bt->stkptr = bt->hp->esp; -- if ((estack = x86_64_in_exception_stack(bt))) { -- stacksize = ms->stkinfo.esize; -- bt->stackbase = estack; -- bt->stacktop = estack + ms->stkinfo.esize; -- bt->stackbuf = ms->irqstack; -- alter_stackbuf(bt); -- } else if ((irqstack = x86_64_in_irqstack(bt))) { -- stacksize = ms->stkinfo.isize; -- bt->stackbase = irqstack; -- bt->stacktop = irqstack + ms->stkinfo.isize; -- bt->stackbuf = ms->irqstack; -- alter_stackbuf(bt); -- } else if (!INSTACK(bt->stkptr, bt)) -- error(FATAL, -- "unrecognized stack address for this task: %lx\n", -- bt->hp->esp); -- } ++ } ++ + *paddr = (PAGEBASE(pte) & PHYSICAL_PAGE_MASK) + PAGEOFFSET(kvaddr); - -- stacksize = bt->stacktop - bt->stackbase - SIZE(pt_regs); ++ + if (verbose) { + fprintf(fp, " PAGE: %lx\n\n", + PAGEBASE(*paddr) & PHYSICAL_PAGE_MASK); + x86_64_translate_pte(pte, 0, 0); + } - -- if (bt->stkptr) -- i = (bt->stkptr - bt->stackbase)/sizeof(ulong); -- else -- i = 0; ++ + return TRUE; - -- for (cnt = 0; i <= stacksize/sizeof(ulong); i++) { -- up = (ulong *)(&bt->stackbuf[i*sizeof(ulong)]); ++ +no_kpage: + return FALSE; +} - -- if (x86_64_exception_frame(EFRAME_SEARCH|EFRAME_PRINT| -- EFRAME_VERIFY, 0, (char *)up, bt, fp)) -- cnt++; ++ + +static int +x86_64_kvtop_xen_wpt(struct task_context *tc, ulong kvaddr, physaddr_t *paddr, int verbose) @@ -6270,7 +6282,7 @@ + if (verbose) { + fprintf(fp, "PML4 DIRECTORY: %lx\n", vt->kernel_pgd[0]); + fprintf(fp, "PAGE DIRECTORY: %lx [machine]\n", *pml4); - } ++ } + if (!(*pml4) & _PAGE_PRESENT) + goto no_kpage; + pgd_paddr = (*pml4) & PHYSICAL_PAGE_MASK; @@ -6284,9 +6296,7 @@ + fprintf(fp, " PUD: %lx => %lx [machine]\n", (ulong)pgd, pgd_pte); + if (!(pgd_pte & _PAGE_PRESENT)) + goto no_kpage; - -- return cnt; --} ++ + /* + * pmd = pmd_offset(pgd, addr); + */ @@ -6305,32 +6315,16 @@ + if (verbose) + fprintf(fp, " PAGE: %lx (2MB) [machine]\n", + PAGEBASE(pmd_pte) & PHYSICAL_PAGE_MASK); - --static void --x86_64_display_full_frame(struct bt_info *bt, ulong rsp, FILE *ofp) --{ -- int i, u_idx; -- ulong *up; -- ulong words, addr; ++ + pseudo_pmd_pte = xen_m2p(PAGEBASE(pmd_pte)); - -- words = (rsp - bt->frameptr) / sizeof(ulong) + 1; ++ + if (pseudo_pmd_pte == XEN_MACHADDR_NOT_FOUND) { + if (verbose) + fprintf(fp, " PAGE: page not available\n"); + *paddr = PADDR_NOT_AVAILABLE; + return FALSE; + } - -- addr = bt->frameptr; -- u_idx = (bt->frameptr - bt->stackbase)/sizeof(ulong); -- for (i = 0; i < words; i++, u_idx++) { -- if (!(i & 1)) -- fprintf(ofp, "%s %lx: ", i ? "\n" : "", addr); -- -- up = (ulong *)(&bt->stackbuf[u_idx*sizeof(ulong)]); -- fprintf(ofp, "%016lx ", *up); -- addr += sizeof(ulong); ++ + pseudo_pmd_pte |= PAGEOFFSET(pmd_pte); + + if (verbose) { @@ -6347,18 +6341,8 @@ + + *paddr = physpage; + return TRUE; - } -- fprintf(ofp, "\n"); --} - --/* -- * Check a frame for a requested reference. -- */ --static void --x86_64_do_bt_reference_check(struct bt_info *bt, ulong text, char *name) --{ -- struct syment *sp; -- ulong offset; ++ } ++ + /* + * ptep = pte_offset_map(pmd, addr); + * pte = *ptep; @@ -6379,289 +6363,86 @@ + } + goto no_kpage; + } - -- if (!name) -- sp = value_search(text, &offset); -- else if (!text) -- sp = symbol_search(name); ++ + pseudo_pte = xen_m2p(pte & PHYSICAL_PAGE_MASK); + if (verbose) + fprintf(fp, " PTE: %lx\n", pseudo_pte + PAGEOFFSET(pte)); - -- switch (bt->ref->cmdflags & (BT_REF_SYMBOL|BT_REF_HEXVAL)) -- { -- case BT_REF_SYMBOL: -- if (name) { -- if (STREQ(name, bt->ref->str)) -- bt->ref->cmdflags |= BT_REF_FOUND; -- } else { -- if (sp && !offset && STREQ(sp->name, bt->ref->str)) -- bt->ref->cmdflags |= BT_REF_FOUND; -- } -- break; ++ + *paddr = (PAGEBASE(pseudo_pte) & PHYSICAL_PAGE_MASK) + PAGEOFFSET(kvaddr); - -- case BT_REF_HEXVAL: -- if (text) { -- if (bt->ref->hexval == text) -- bt->ref->cmdflags |= BT_REF_FOUND; -- } else if (sp && (bt->ref->hexval == sp->value)) -- bt->ref->cmdflags |= BT_REF_FOUND; -- else if (!name && !text && (bt->ref->hexval == 0)) -- bt->ref->cmdflags |= BT_REF_FOUND; -- break; ++ + if (verbose) { + fprintf(fp, " PAGE: %lx [machine]\n", + PAGEBASE(pte) & PHYSICAL_PAGE_MASK); + fprintf(fp, " PAGE: %lx\n\n", + PAGEBASE(*paddr) & PHYSICAL_PAGE_MASK); + x86_64_translate_pte(pseudo_pte + PAGEOFFSET(pte), 0, 0); - } ++ } + + return TRUE; + +no_kpage: + return FALSE; - } - ++} + - /* -- * print one entry of a stack trace ++ ++/* + * Determine where vmalloc'd memory starts. - */ --#define BACKTRACE_COMPLETE (1) --#define BACKTRACE_ENTRY_IGNORED (2) --#define BACKTRACE_ENTRY_DISPLAYED (3) --#define BACKTRACE_ENTRY_AND_EFRAME_DISPLAYED (4) -- --static int --x86_64_print_stack_entry(struct bt_info *bt, FILE *ofp, int level, -- int stkindex, ulong text) ++ */ +static ulong +x86_64_vmalloc_start(void) - { -- ulong rsp, offset; -- struct syment *sp; -- char *name; -- int result; -- long eframe_check; -- char buf[BUFSIZE]; -- -- eframe_check = -1; -- offset = 0; -- sp = value_search(text, &offset); -- if (!sp) -- return BACKTRACE_ENTRY_IGNORED; -- -- name = sp->name; -- -- if (bt->flags & BT_TEXT_SYMBOLS) { -- if (bt->flags & BT_EXCEPTION_FRAME) -- rsp = bt->stkptr; -- else -- rsp = bt->stackbase + (stkindex * sizeof(long)); -- fprintf(ofp, " [%s] %s at %lx\n", -- mkstring(buf, VADDR_PRLEN, RJUST|LONG_HEX, MKSTR(rsp)), -- name, text); -- if (BT_REFERENCE_CHECK(bt)) -- x86_64_do_bt_reference_check(bt, text, name); -- return BACKTRACE_ENTRY_DISPLAYED; -- } -- -- if (!offset && !(bt->flags & BT_EXCEPTION_FRAME) && -- !(bt->flags & BT_START)) { -- if (STREQ(name, "child_rip")) { -- if (symbol_exists("kernel_thread")) -- name = "kernel_thread"; -- else if (symbol_exists("arch_kernel_thread")) -- name = "arch_kernel_thread"; -- } -- else if (!(bt->flags & BT_SCHEDULE)) { -- if (STREQ(name, "error_exit")) -- eframe_check = 8; -- else { -- if (CRASHDEBUG(2)) -- fprintf(ofp, -- "< ignoring text symbol with no offset: %s() >\n", -- sp->name); -- return BACKTRACE_ENTRY_IGNORED; -- } -- } -- } -- -- if (bt->flags & BT_SCHEDULE) -- name = "schedule"; -- -- if (STREQ(name, "child_rip")) { -- if (symbol_exists("kernel_thread")) -- name = "kernel_thread"; -- else if (symbol_exists("arch_kernel_thread")) -- name = "arch_kernel_thread"; -- result = BACKTRACE_COMPLETE; -- } else if (STREQ(name, "cpu_idle")) -- result = BACKTRACE_COMPLETE; -- else -- result = BACKTRACE_ENTRY_DISPLAYED; -- -- if (bt->flags & BT_EXCEPTION_FRAME) -- rsp = bt->stkptr; -- else if (bt->flags & BT_START) -- rsp = bt->stkptr; -- else -- rsp = bt->stackbase + (stkindex * sizeof(long)); -- -- if ((bt->flags & BT_FULL)) { -- if (bt->frameptr) -- x86_64_display_full_frame(bt, rsp, ofp); -- bt->frameptr = rsp + sizeof(ulong); -- } -- -- fprintf(ofp, "%s#%d [%8lx] %s at %lx\n", level < 10 ? " " : "", level, -- rsp, name, text); -- -- if (bt->flags & BT_LINE_NUMBERS) { -- get_line_number(text, buf, FALSE); -- if (strlen(buf)) -- fprintf(ofp, " %s\n", buf); -- } -- -- if (eframe_check >= 0) { -- if (x86_64_exception_frame(EFRAME_PRINT|EFRAME_VERIFY, -- bt->stackbase + (stkindex*sizeof(long)) + eframe_check, -- NULL, bt, ofp)) -- result = BACKTRACE_ENTRY_AND_EFRAME_DISPLAYED; -- } -- -- if (BT_REFERENCE_CHECK(bt)) -- x86_64_do_bt_reference_check(bt, text, name); -- -- bt->call_target = name; -- -- if (is_direct_call_target(bt)) { -- if (CRASHDEBUG(2)) -- fprintf(ofp, "< enable BT_CHECK_CALLER for %s >\n", -- bt->call_target); -- bt->flags |= BT_CHECK_CALLER; -- } else { -- if (CRASHDEBUG(2) && (bt->flags & BT_CHECK_CALLER)) -- fprintf(ofp, "< disable BT_CHECK_CALLER for %s >\n", -- bt->call_target); -- if (bt->flags & BT_CHECK_CALLER) { -- if (CRASHDEBUG(2)) -- fprintf(ofp, "< set BT_NO_CHECK_CALLER >\n"); -- bt->flags |= BT_NO_CHECK_CALLER; -- } -- bt->flags &= ~(ulonglong)BT_CHECK_CALLER; -- } -- -- return result; ++{ + return ((ulong)VMALLOC_START); - } - - /* -- * Unroll a kernel stack. ++} ++ ++/* + * thread_info implementation makes for less accurate results here. - */ --static void --x86_64_back_trace_cmd(struct bt_info *bt) ++ */ +static int +x86_64_is_task_addr(ulong task) - { -- error(FATAL, "x86_64_back_trace_cmd: TBD\n"); ++{ + if (tt->flags & THREAD_INFO) + return IS_KVADDR(task); + else + return (IS_KVADDR(task) && (ALIGNED_STACK_OFFSET(task) == 0)); - } - - -- - /* -- * Determine whether the initial stack pointer is located in one of the -- * exception stacks. ++} ++ ++ ++/* + * easy enough... - */ - static ulong --x86_64_in_exception_stack(struct bt_info *bt) ++ */ ++static ulong +x86_64_processor_speed(void) - { -- int c, i; -- ulong rsp; -- ulong estack; -- struct machine_specific *ms; ++{ + unsigned long cpu_khz = 0; - -- rsp = bt->stkptr; -- ms = machdep->machspec; -- estack = 0; ++ + if (machdep->mhz) + return (machdep->mhz); - -- for (c = 0; !estack && (c < kt->cpus); c++) { -- for (i = 0; i < 7; i++) { -- if (ms->stkinfo.ebase[c][i] == 0) -- break; -- if ((rsp >= ms->stkinfo.ebase[c][i]) && -- (rsp < (ms->stkinfo.ebase[c][i] + -- ms->stkinfo.esize))) { -- estack = ms->stkinfo.ebase[c][i]; -- if (c != bt->tc->processor) -- error(INFO, -- "task cpu: %d exception stack cpu: %d\n", -- bt->tc->processor, c); -- break; -- } -- } ++ + if (symbol_exists("cpu_khz")) { + get_symbol_data("cpu_khz", sizeof(int), &cpu_khz); + if (cpu_khz) + return(machdep->mhz = cpu_khz/1000); - } - -- return estack; -+ return 0; - } - ++ } + - /* -- * Determine whether the current stack pointer is in a cpu's irqstack. ++ return 0; ++} ++ ++ ++/* + * Accept or reject a symbol from the kernel namelist. - */ --static ulong --x86_64_in_irqstack(struct bt_info *bt) ++ */ +static int +x86_64_verify_symbol(const char *name, ulong value, char type) - { -- int c; -- ulong rsp; -- ulong irqstack; -- struct machine_specific *ms; ++{ + if (STREQ(name, "_text") || STREQ(name, "_stext")) + machdep->flags |= KSYMS_START; - -- rsp = bt->stkptr; -- ms = machdep->machspec; -- irqstack = 0; ++ + if (!name || !strlen(name) || !(machdep->flags & KSYMS_START)) + return FALSE; + return TRUE; +} - -- for (c = 0; !irqstack && (c < kt->cpus); c++) { -- if (ms->stkinfo.ibase[c] == 0) -- break; -- if ((rsp >= ms->stkinfo.ibase[c]) && -- (rsp < (ms->stkinfo.ibase[c] + ms->stkinfo.isize))) { -- irqstack = ms->stkinfo.ibase[c]; -- if (c != bt->tc->processor) -- error(INFO, -- "task cpu: %d IRQ stack cpu: %d\n", -- bt->tc->processor, c); -- break; -- } -- } - -- return irqstack; ++ ++ +/* + * Get the relevant page directory pointer from a task structure. + */ @@ -6669,38 +6450,16 @@ +x86_64_get_task_pgd(ulong task) +{ + return (error(FATAL, "x86_64_get_task_pgd: N/A\n")); - } - --#define STACK_TRANSITION_ERRMSG_E_I_P \ --"cannot transition from exception stack to IRQ stack to current process stack:\n exception stack pointer: %lx\n IRQ stack pointer: %lx\n process stack pointer: %lx\n current stack base: %lx\n" --#define STACK_TRANSITION_ERRMSG_E_P \ --"cannot transition from exception stack to current process stack:\n exception stack pointer: %lx\n process stack pointer: %lx\n current_stack_base: %lx\n" --#define STACK_TRANSITION_ERRMSG_I_P \ --"cannot transition from IRQ stack to current process stack:\n IRQ stack pointer: %lx\n process stack pointer: %lx\n current stack base: %lx" - - /* -- * Low-budget back tracer -- dump text return addresses, following call chain -- * when possible, along with any verifiable exception frames. ++} ++ ++ ++/* + * Translate a PTE, returning TRUE if the page is present. + * If a physaddr pointer is passed in, don't print anything. - */ --static void --x86_64_low_budget_back_trace_cmd(struct bt_info *bt_in) ++ */ +static int +x86_64_translate_pte(ulong pte, void *physaddr, ulonglong unused) - { -- int i, level, done; -- ulong rsp, offset, stacktop; -- ulong *up; -- long cs; -- struct syment *sp, *spt; -- FILE *ofp; -- ulong estack, irqstack; -- ulong irq_eframe; -- struct bt_info bt_local, *bt; -- struct machine_specific *ms; -- ulong last_process_stack_eframe; -- ulong user_mode_eframe; ++{ + int c, others, len1, len2, len3; + ulong paddr; + char buf[BUFSIZE]; @@ -6710,125 +6469,53 @@ + char physbuf[BUFSIZE]; + char *arglist[MAXARGS]; + int page_present; - -- bt = &bt_local; -- BCOPY(bt_in, bt, sizeof(struct bt_info)); ++ + paddr = pte & PHYSICAL_PAGE_MASK; + page_present = pte & _PAGE_PRESENT; - -- level = 0; -- done = FALSE; -- irq_eframe = 0; -- last_process_stack_eframe = 0; -- bt->call_target = NULL; -- rsp = bt->stkptr; -- if (!rsp) { -- error(INFO, "cannot determine starting stack pointer\n"); -- return; ++ + if (physaddr) { + *((ulong *)physaddr) = paddr; + return page_present; - } -- ms = machdep->machspec; -- if (BT_REFERENCE_CHECK(bt)) -- ofp = pc->nullfp; -- else -- ofp = fp; ++ } + + sprintf(ptebuf, "%lx", pte); + len1 = MAX(strlen(ptebuf), strlen("PTE")); + fprintf(fp, "%s ", mkstring(buf, len1, CENTER|LJUST, "PTE")); - -- if (bt->flags & BT_TEXT_SYMBOLS) { -- fprintf(ofp, "%sSTART: %s%s at %lx\n", -- space(VADDR_PRLEN > 8 ? 14 : 6), -- closest_symbol(bt->instptr), -- STREQ(closest_symbol(bt->instptr), "thread_return") ? -- " (schedule)" : "", -- bt->instptr); -- } else if (bt->flags & BT_START) { -- x86_64_print_stack_entry(bt, ofp, level, -- 0, bt->instptr); -- bt->flags &= ~BT_START; -- level++; -- } ++ + if (!page_present && pte) { + swap_location(pte, buf); + if ((c = parse_line(buf, arglist)) != 3) + error(FATAL, "cannot determine swap location\n"); - ++ + len2 = MAX(strlen(arglist[0]), strlen("SWAP")); + len3 = MAX(strlen(arglist[2]), strlen("OFFSET")); - -- if ((estack = x86_64_in_exception_stack(bt))) { --in_exception_stack: -- bt->flags |= BT_EXCEPTION_STACK; -- /* -- * The stack buffer will have been loaded with the process -- * stack, so switch to the indicated exception stack. -- */ -- bt->stackbase = estack; -- bt->stacktop = estack + ms->stkinfo.esize; -- bt->stackbuf = ms->irqstack; ++ + fprintf(fp, "%s %s\n", + mkstring(buf2, len2, CENTER|LJUST, "SWAP"), + mkstring(buf3, len3, CENTER|LJUST, "OFFSET")); - -- if (!readmem(bt->stackbase, KVADDR, bt->stackbuf, -- bt->stacktop - bt->stackbase, -- bt->hp && (bt->hp->esp == bt->stkptr) ? -- "irqstack contents via hook" : "irqstack contents", -- RETURN_ON_ERROR)) -- error(FATAL, "read of exception stack at %lx failed\n", -- bt->stackbase); ++ + strcpy(buf2, arglist[0]); + strcpy(buf3, arglist[2]); + fprintf(fp, "%s %s %s\n", + mkstring(ptebuf, len1, CENTER|RJUST, NULL), + mkstring(buf2, len2, CENTER|RJUST, NULL), + mkstring(buf3, len3, CENTER|RJUST, NULL)); - -- /* -- * If irq_eframe is set, we've jumped back here from the -- * IRQ stack dump below. Do basically the same thing as if -- * had come from the processor stack, but presume that we -- * must have been in kernel mode, i.e., took an exception -- * while operating on an IRQ stack. (untested) -- */ -- if (irq_eframe) { -- bt->flags |= BT_EXCEPTION_FRAME; -- i = (irq_eframe - bt->stackbase)/sizeof(ulong); -- x86_64_print_stack_entry(bt, ofp, level, i, -- bt->instptr); -- bt->flags &= ~(ulonglong)BT_EXCEPTION_FRAME; -- cs = x86_64_exception_frame(EFRAME_PRINT|EFRAME_CS, 0, -- bt->stackbuf + (irq_eframe - bt->stackbase), -- bt, ofp); -- rsp += SIZE(pt_regs); /* guaranteed kernel mode */ -- level++; -- irq_eframe = 0; -- } ++ + return page_present; + } - -- stacktop = bt->stacktop - SIZE(pt_regs); ++ + sprintf(physbuf, "%lx", paddr); + len2 = MAX(strlen(physbuf), strlen("PHYSICAL")); + fprintf(fp, "%s ", mkstring(buf, len2, CENTER|LJUST, "PHYSICAL")); - -- for (i = (rsp - bt->stackbase)/sizeof(ulong); -- !done && (rsp < stacktop); i++, rsp += sizeof(ulong)) { ++ + fprintf(fp, "FLAGS\n"); - -- up = (ulong *)(&bt->stackbuf[i*sizeof(ulong)]); ++ + fprintf(fp, "%s %s ", + mkstring(ptebuf, len1, CENTER|RJUST, NULL), + mkstring(physbuf, len2, CENTER|RJUST, NULL)); + fprintf(fp, "("); + others = 0; - -- if (!is_kernel_text(*up)) -- continue; ++ + if (pte) { + if (pte & _PAGE_PRESENT) + fprintf(fp, "%sPRESENT", others++ ? "|" : ""); @@ -6855,32 +6542,12 @@ + } else { + fprintf(fp, "no mapping"); + } - -- switch (x86_64_print_stack_entry(bt, ofp, level, i,*up)) -- { -- case BACKTRACE_ENTRY_AND_EFRAME_DISPLAYED: -- rsp += SIZE(pt_regs); -- i += SIZE(pt_regs)/sizeof(ulong); -- case BACKTRACE_ENTRY_DISPLAYED: -- level++; -- break; -- case BACKTRACE_ENTRY_IGNORED: -- break; -- case BACKTRACE_COMPLETE: -- done = TRUE; -- break; -- } -- } ++ + fprintf(fp, ")\n"); - -- cs = x86_64_exception_frame(EFRAME_PRINT|EFRAME_CS, 0, -- bt->stackbuf + (bt->stacktop - bt->stackbase) - -- SIZE(pt_regs), bt, ofp); ++ + return (page_present); +} - -- if (!BT_REFERENCE_CHECK(bt)) -- fprintf(fp, "--- ---\n"); ++ +static char * +x86_64_exception_stacks[7] = { + "STACKFAULT", @@ -6891,20 +6558,7 @@ + "(unknown)", + "(unknown)" +}; - -- /* -- * stack = (unsigned long *) estack_end[-2]; -- */ -- up = (ulong *)(&bt->stackbuf[bt->stacktop - bt->stackbase]); -- up -= 2; -- rsp = bt->stkptr = *up; -- up -= 3; -- bt->instptr = *up; -- if (cs & 3) -- done = TRUE; /* user-mode exception */ -- else -- done = FALSE; /* kernel-mode exception */ -- bt->frameptr = 0; ++ +/* + * Look for likely exception frames in a stack. + */ @@ -6916,57 +6570,32 @@ + ulong *up; + struct machine_specific *ms; + struct bt_info bt_local; - -- /* -- * Print the return values from the estack end. -- */ -- if (!done) { -- bt->flags |= BT_START; -- x86_64_print_stack_entry(bt, ofp, level, -- 0, bt->instptr); -- bt->flags &= ~BT_START; -- level++; -- } -- } ++ + if (bt->flags & BT_EFRAME_SEARCH2) { + BCOPY(bt, &bt_local, sizeof(struct bt_info)); + bt->flags &= ~(ulonglong)BT_EFRAME_SEARCH2; - -- /* -- * IRQ stack entry always comes in via the process stack, regardless -- * whether it happened while running in user or kernel space. -- */ -- if (!done && (irqstack = x86_64_in_irqstack(bt))) { -- bt->flags |= BT_IRQSTACK; -- /* -- * Until coded otherwise, the stackbase will be pointing to -- * either the exception stack or, more likely, the process -- * stack base. Switch it to the IRQ stack. -- */ -- bt->stackbase = irqstack; -- bt->stacktop = irqstack + ms->stkinfo.isize; -- bt->stackbuf = ms->irqstack; ++ + ms = machdep->machspec; -- if (!readmem(bt->stackbase, KVADDR, -- bt->stackbuf, bt->stacktop - bt->stackbase, -- bt->hp && (bt->hp->esp == bt_in->stkptr) ? -- "irqstack contents via hook" : "irqstack contents", -- RETURN_ON_ERROR)) -- error(FATAL, "read of IRQ stack at %lx failed\n", -- bt->stackbase); -+ for (c = 0; c < kt->cpus; c++) { -+ if (ms->stkinfo.ibase[c] == 0) -+ break; -+ bt->hp->esp = ms->stkinfo.ibase[c]; -+ fprintf(fp, "CPU %d IRQ STACK:\n", c); -+ if ((cnt = x86_64_eframe_search(bt))) -+ fprintf(fp, "\n"); -+ else -+ fprintf(fp, "(none found)\n\n"); -+ } + for (c = 0; c < kt->cpus; c++) { + if (ms->stkinfo.ibase[c] == 0) +@@ -1028,1667 +1840,4013 @@ + fprintf(fp, "(none found)\n\n"); + } -- stacktop = bt->stacktop - 64; /* from kernel code */ +- for (c = 0; c < kt->cpus; c++) { +- for (i = 0; i < 7; i++) { +- if (ms->stkinfo.ebase[c][i] == 0) +- break; +- bt->hp->esp = ms->stkinfo.ebase[c][i]; +- fprintf(fp, "CPU %d %s EXCEPTION STACK:\n", +- c, x86_64_exception_stacks[i]); +- if ((cnt = x86_64_eframe_search(bt))) +- fprintf(fp, "\n"); +- else +- fprintf(fp, "(none found)\n\n"); +- } +- } + for (c = 0; c < kt->cpus; c++) { + for (i = 0; i < 7; i++) { + if (ms->stkinfo.ebase[c][i] == 0) @@ -6980,13 +6609,10 @@ + fprintf(fp, "(none found)\n\n"); + } + } - -- for (i = (rsp - bt->stackbase)/sizeof(ulong); -- !done && (rsp < stacktop); i++, rsp += sizeof(ulong)) { ++ + return 0; + } - -- up = (ulong *)(&bt->stackbuf[i*sizeof(ulong)]); ++ + if (bt->hp && bt->hp->esp) { + ms = machdep->machspec; + bt->stkptr = bt->hp->esp; @@ -7007,24 +6633,9 @@ + "unrecognized stack address for this task: %lx\n", + bt->hp->esp); + } - -- if (!is_kernel_text(*up)) -- continue; ++ + stacksize = bt->stacktop - bt->stackbase - SIZE(pt_regs); - -- switch (x86_64_print_stack_entry(bt, ofp, level, i,*up)) -- { -- case BACKTRACE_ENTRY_AND_EFRAME_DISPLAYED: -- rsp += SIZE(pt_regs); -- i += SIZE(pt_regs)/sizeof(ulong); -- case BACKTRACE_ENTRY_DISPLAYED: -- level++; -- break; -- case BACKTRACE_ENTRY_IGNORED: -- break; -- case BACKTRACE_COMPLETE: -- done = TRUE; -- break; ++ + if (bt->stkptr) + i = (bt->stkptr - bt->stackbase)/sizeof(ulong); + else @@ -7147,27 +6758,12 @@ + } else { + instr = 2; + arg = 3; - } - } - -- if (!BT_REFERENCE_CHECK(bt)) -- fprintf(fp, "--- ---\n"); ++ } ++ } ++ + if (c < (arg+1)) + break; - -- /* -- * stack = (unsigned long *) (irqstack_end[-1]); -- * (where irqstack_end is 64 bytes below page end) -- */ -- up = (ulong *)(&bt->stackbuf[stacktop - bt->stackbase]); -- up -= 1; -- irq_eframe = rsp = bt->stkptr = *up; -- up -= 1; -- bt->instptr = *up; -- bt->frameptr = 0; -- done = FALSE; -- } else -- irq_eframe = 0; ++ + if (STREQ(arglist[instr], "jmpq") || STREQ(arglist[instr], "jmp")) { + p1 = arglist[arg]; + reterror = 0; @@ -7178,37 +6774,15 @@ + } + } + close_tmpfile2(); - -- if (!done && (estack = x86_64_in_exception_stack(bt))) -- goto in_exception_stack; ++ + if (!locking_func) + bt->flags |= BT_FRAMESIZE_DISABLE; - -- if (!done && (bt->flags & (BT_EXCEPTION_STACK|BT_IRQSTACK))) { -- /* -- * Verify that the rsp pointer taken from either the -- * exception or IRQ stack points into the process stack. -- */ -- bt->stackbase = GET_STACKBASE(bt->tc->task); -- bt->stacktop = GET_STACKTOP(bt->tc->task); ++ + return locking_func; - -- if (!INSTACK(rsp, bt)) { -- switch (bt->flags & (BT_EXCEPTION_STACK|BT_IRQSTACK)) -- { -- case (BT_EXCEPTION_STACK|BT_IRQSTACK): -- error(FATAL, STACK_TRANSITION_ERRMSG_E_I_P, -- bt_in->stkptr, bt->stkptr, rsp, -- bt->stackbase); ++ +} - -- case BT_EXCEPTION_STACK: -- error(FATAL, STACK_TRANSITION_ERRMSG_E_P, -- bt_in->stkptr, rsp, bt->stackbase); - -- case BT_IRQSTACK: -- error(FATAL, STACK_TRANSITION_ERRMSG_I_P, -- bt_in->stkptr, rsp, bt->stackbase); ++ ++ +/* + * print one entry of a stack trace + */ @@ -7266,18 +6840,10 @@ + "< ignoring text symbol with no offset: %s() >\n", + sp->name); + return BACKTRACE_ENTRY_IGNORED; - } - } ++ } ++ } + } - -- /* -- * Now fill the local stack buffer from the process stack. -- */ -- if (!readmem(bt->stackbase, KVADDR, bt->stackbuf, -- bt->stacktop - bt->stackbase, -- "irqstack contents", RETURN_ON_ERROR)) -- error(FATAL, "read of process stack at %lx failed\n", -- bt->stackbase); ++ + if (bt->flags & BT_SCHEDULE) + name = "schedule"; + @@ -7303,21 +6869,8 @@ + if (bt->frameptr) + x86_64_display_full_frame(bt, rsp, ofp); + bt->frameptr = rsp + sizeof(ulong); - } - -- /* -- * For a normally blocked task, hand-create the first level. -- */ -- if (!done && -- !(bt->flags & (BT_TEXT_SYMBOLS|BT_EXCEPTION_STACK|BT_IRQSTACK)) && -- STREQ(closest_symbol(bt->instptr), "thread_return")) { -- bt->flags |= BT_SCHEDULE; -- i = (rsp - bt->stackbase)/sizeof(ulong); -- x86_64_print_stack_entry(bt, ofp, level, -- i, bt->instptr); -- bt->flags &= ~(ulonglong)BT_SCHEDULE; -- rsp += sizeof(ulong); -- level++; ++ } ++ + fprintf(ofp, "%s#%d [%8lx] %s at %lx", level < 10 ? " " : "", level, + rsp, name, text); + @@ -7327,12 +6880,8 @@ + if ((locking_func = text_lock_function(name, bt, text)) && + (spl = value_search(locking_func, &offset))) + fprintf(ofp, " (via %s)", spl->name); - } - -- /* -- * Dump the IRQ exception frame from the process stack. -- * If the CS register indicates a user exception frame, -- * then set done to TRUE to avoid the process stack walk-through. ++ } ++ + if (bt->flags & BT_FRAMESIZE_DISABLE) + fprintf(ofp, " *"); + @@ -8145,17 +7694,8 @@ + * Dump the IRQ exception frame from the process stack. + * If the CS register indicates a user exception frame, + * then set done to TRUE to avoid the process stack walk-through. - * Otherwise, bump up the rsp past the kernel-mode eframe. - */ -- if (irq_eframe) { -- bt->flags |= BT_EXCEPTION_FRAME; -- i = (irq_eframe - bt->stackbase)/sizeof(ulong); -- x86_64_print_stack_entry(bt, ofp, level, i, bt->instptr); -- bt->flags &= ~(ulonglong)BT_EXCEPTION_FRAME; -- cs = x86_64_exception_frame(EFRAME_PRINT|EFRAME_CS, 0, -- bt->stackbuf + (irq_eframe - bt->stackbase), bt, ofp); -- if (cs & 3) -- done = TRUE; /* IRQ from user-mode */ ++ * Otherwise, bump up the rsp past the kernel-mode eframe. ++ */ + if (irq_eframe) { + bt->flags |= BT_EXCEPTION_FRAME; + level = dwarf_print_stack_entry(bt, level); @@ -8343,33 +7883,68 @@ + INVALID_OFFSET); + err |= ((ms->pto.r8 = MEMBER_OFFSET("pt_regs", "r8")) == + INVALID_OFFSET); -+ err |= ((ms->pto.rax = MEMBER_OFFSET("pt_regs", "rax")) == -+ INVALID_OFFSET); -+ err |= ((ms->pto.rbx = MEMBER_OFFSET("pt_regs", "rbx")) == -+ INVALID_OFFSET); -+ err |= ((ms->pto.rcx = MEMBER_OFFSET("pt_regs", "rcx")) == -+ INVALID_OFFSET); -+ err |= ((ms->pto.rdx = MEMBER_OFFSET("pt_regs", "rdx")) == -+ INVALID_OFFSET); -+ err |= ((ms->pto.rsi = MEMBER_OFFSET("pt_regs", "rsi")) == -+ INVALID_OFFSET); -+ err |= ((ms->pto.rdi = MEMBER_OFFSET("pt_regs", "rdi")) == -+ INVALID_OFFSET); -+ err |= ((ms->pto.rip = MEMBER_OFFSET("pt_regs", "rip")) == -+ INVALID_OFFSET); -+ err |= ((ms->pto.rsp = MEMBER_OFFSET("pt_regs", "rsp")) == -+ INVALID_OFFSET); + err |= ((ms->pto.cs = MEMBER_OFFSET("pt_regs", "cs")) == + INVALID_OFFSET); + err |= ((ms->pto.ss = MEMBER_OFFSET("pt_regs", "ss")) == + INVALID_OFFSET); -+ err |= ((ms->pto.eflags = MEMBER_OFFSET("pt_regs", "eflags")) == -+ INVALID_OFFSET); -+ err |= ((ms->pto.orig_rax = -+ MEMBER_OFFSET("pt_regs", "orig_rax")) == -+ INVALID_OFFSET); -+ err |= ((ms->pto.rbp = MEMBER_OFFSET("pt_regs", "rbp")) == -+ INVALID_OFFSET); ++ /* ++ * x86/x86_64 merge changed traditional register names. ++ */ ++ if (((ms->pto.rbp = MEMBER_OFFSET("pt_regs", "rbp")) == ++ INVALID_OFFSET) && ++ ((ms->pto.rbp = MEMBER_OFFSET("pt_regs", "bp")) == ++ INVALID_OFFSET)) ++ err++; ++ if (((ms->pto.rax = MEMBER_OFFSET("pt_regs", "rax")) == ++ INVALID_OFFSET) && ++ ((ms->pto.rax = MEMBER_OFFSET("pt_regs", "ax")) == ++ INVALID_OFFSET)) ++ err++; ++ if (((ms->pto.rbx = MEMBER_OFFSET("pt_regs", "rbx")) == ++ INVALID_OFFSET) && ++ ((ms->pto.rbx = MEMBER_OFFSET("pt_regs", "bx")) == ++ INVALID_OFFSET)) ++ err++; ++ if (((ms->pto.rcx = MEMBER_OFFSET("pt_regs", "rcx")) == ++ INVALID_OFFSET) && ++ ((ms->pto.rcx = MEMBER_OFFSET("pt_regs", "cx")) == ++ INVALID_OFFSET)) ++ err++; ++ if (((ms->pto.rdx = MEMBER_OFFSET("pt_regs", "rdx")) == ++ INVALID_OFFSET) && ++ ((ms->pto.rdx = MEMBER_OFFSET("pt_regs", "dx")) == ++ INVALID_OFFSET)) ++ err++; ++ if (((ms->pto.rsi = MEMBER_OFFSET("pt_regs", "rsi")) == ++ INVALID_OFFSET) && ++ ((ms->pto.rsi = MEMBER_OFFSET("pt_regs", "si")) == ++ INVALID_OFFSET)) ++ err++; ++ if (((ms->pto.rdi = MEMBER_OFFSET("pt_regs", "rdi")) == ++ INVALID_OFFSET) && ++ ((ms->pto.rdi = MEMBER_OFFSET("pt_regs", "di")) == ++ INVALID_OFFSET)) ++ err++; ++ if (((ms->pto.rip = MEMBER_OFFSET("pt_regs", "rip")) == ++ INVALID_OFFSET) && ++ ((ms->pto.rip = MEMBER_OFFSET("pt_regs", "ip")) == ++ INVALID_OFFSET)) ++ err++; ++ if (((ms->pto.rsp = MEMBER_OFFSET("pt_regs", "rsp")) == ++ INVALID_OFFSET) && ++ ((ms->pto.rsp = MEMBER_OFFSET("pt_regs", "sp")) == ++ INVALID_OFFSET)) ++ err++; ++ if (((ms->pto.eflags = MEMBER_OFFSET("pt_regs", "eflags")) == ++ INVALID_OFFSET) && ++ ((ms->pto.eflags = MEMBER_OFFSET("pt_regs", "flags")) == ++ INVALID_OFFSET)) ++ err++; ++ if (((ms->pto.orig_rax = MEMBER_OFFSET("pt_regs", "orig_rax")) == ++ INVALID_OFFSET) && ++ ((ms->pto.orig_rax = MEMBER_OFFSET("pt_regs", "orig_ax")) == ++ INVALID_OFFSET)) ++ err++; + + if (err) + error(WARNING, "pt_regs structure has changed\n"); @@ -9006,7 +8581,8 @@ + } + } + } -+ + +- return 0; + return buf; +} + @@ -9081,14 +8657,41 @@ + !strstr(buf2, "+")) + sprintf(p1, buf1); + } -+ } -+ + } + +- if (bt->hp && bt->hp->esp) { +- ms = machdep->machspec; +- bt->stkptr = bt->hp->esp; +- if ((estack = x86_64_in_exception_stack(bt))) { +- stacksize = ms->stkinfo.esize; +- bt->stackbase = estack; +- bt->stacktop = estack + ms->stkinfo.esize; +- bt->stackbuf = ms->irqstack; +- alter_stackbuf(bt); +- } else if ((irqstack = x86_64_in_irqstack(bt))) { +- stacksize = ms->stkinfo.isize; +- bt->stackbase = irqstack; +- bt->stacktop = irqstack + ms->stkinfo.isize; +- bt->stackbuf = ms->irqstack; +- alter_stackbuf(bt); +- } else if (!INSTACK(bt->stkptr, bt)) +- error(FATAL, +- "unrecognized stack address for this task: %lx\n", +- bt->hp->esp); +- } + console(" %s", inbuf); -+ + +- stacksize = bt->stacktop - bt->stackbase - SIZE(pt_regs); + return TRUE; +} -+ -+ + +- if (bt->stkptr) +- i = (bt->stkptr - bt->stackbase)/sizeof(ulong); +- else +- i = 0; + +- for (cnt = 0; i <= stacksize/sizeof(ulong); i++) { +- up = (ulong *)(&bt->stackbuf[i*sizeof(ulong)]); +/* + * Override smp_num_cpus if possible and necessary. + */ @@ -9098,7 +8701,10 @@ + int i, cpus, nr_pda, cpunumber, _cpu_pda; + char *cpu_pda_buf; + ulong level4_pgt, cpu_pda_addr; -+ + +- if (x86_64_exception_frame(EFRAME_SEARCH|EFRAME_PRINT| +- EFRAME_VERIFY, 0, (char *)up, bt, fp)) +- cnt++; + if (!VALID_STRUCT(x8664_pda)) + return 1; + @@ -9107,9 +8713,7 @@ + if (LKCD_KERNTYPES()) { + if (symbol_exists("_cpu_pda")) + _cpu_pda = TRUE; - else -- rsp += SIZE(pt_regs); -- level++; ++ else + _cpu_pda = FALSE; + nr_pda = get_cpus_possible(); + } else { @@ -9140,8 +8744,9 @@ + if (cpunumber != cpus) + break; + cpus++; -+ } -+ + } + +- return cnt; + FREEBUF(cpu_pda_buf); + + return cpus; @@ -9176,20 +8781,34 @@ + cmd_usage(pc->curcmd, SYNOPSIS); + + x86_64_display_machine_stats(); -+} -+ + } + +/* + * "mach" command output. + */ -+static void + static void +-x86_64_display_full_frame(struct bt_info *bt, ulong rsp, FILE *ofp) +x86_64_display_machine_stats(void) -+{ + { +- int i, u_idx; +- ulong *up; +- ulong words, addr; + struct new_utsname *uts; + char buf[BUFSIZE]; + ulong mhz; -+ + +- words = (rsp - bt->frameptr) / sizeof(ulong) + 1; + uts = &kt->utsname; -+ + +- addr = bt->frameptr; +- u_idx = (bt->frameptr - bt->stackbase)/sizeof(ulong); +- for (i = 0; i < words; i++, u_idx++) { +- if (!(i & 1)) +- fprintf(ofp, "%s %lx: ", i ? "\n" : "", addr); +- +- up = (ulong *)(&bt->stackbuf[u_idx*sizeof(ulong)]); +- fprintf(ofp, "%016lx ", *up); +- addr += sizeof(ulong); + fprintf(fp, " MACHINE TYPE: %s\n", uts->machine); + fprintf(fp, " MEMORY SIZE: %s\n", get_memory_size(buf)); + fprintf(fp, " CPUS: %d\n", kt->cpus); @@ -9228,7 +8847,8 @@ + cpu_data = symbol_value("boot_cpu_data"); + boot_cpu = TRUE; + cpus = 1; -+ } + } +- fprintf(ofp, "\n"); + if (symbol_exists("_cpu_pda")) { + cpu_pda = symbol_value("_cpu_pda"); + _cpu_pda = TRUE; @@ -9257,11 +8877,12 @@ + } + cpu_data += SIZE(cpuinfo_x86); + } -+} -+ -+/* + } + + /* +- * Check a frame for a requested reference. + * "mach -m" -+ */ + */ +static char *e820type[] = { + "(invalid type)", + "E820_RAM", @@ -9270,23 +8891,50 @@ + "E820_NVS", +}; + -+static void + static void +-x86_64_do_bt_reference_check(struct bt_info *bt, ulong text, char *name) +x86_64_display_memmap(void) -+{ + { +- struct syment *sp; +- ulong offset; + ulong e820; + int nr_map, i; + char *buf, *e820entry_ptr; + ulonglong addr, size; + uint type; -+ + +- if (!name) +- sp = value_search(text, &offset); +- else if (!text) +- sp = symbol_search(name); + e820 = symbol_value("e820"); + if (CRASHDEBUG(1)) + dump_struct("e820map", e820, RADIX(16)); + buf = (char *)GETBUF(SIZE(e820map)); -+ + +- switch (bt->ref->cmdflags & (BT_REF_SYMBOL|BT_REF_HEXVAL)) +- { +- case BT_REF_SYMBOL: +- if (name) { +- if (STREQ(name, bt->ref->str)) +- bt->ref->cmdflags |= BT_REF_FOUND; +- } else { +- if (sp && !offset && STREQ(sp->name, bt->ref->str)) +- bt->ref->cmdflags |= BT_REF_FOUND; +- } +- break; + readmem(e820, KVADDR, &buf[0], SIZE(e820map), + "e820map", FAULT_ON_ERROR); -+ + +- case BT_REF_HEXVAL: +- if (text) { +- if (bt->ref->hexval == text) +- bt->ref->cmdflags |= BT_REF_FOUND; +- } else if (sp && (bt->ref->hexval == sp->value)) +- bt->ref->cmdflags |= BT_REF_FOUND; +- else if (!name && !text && (bt->ref->hexval == 0)) +- bt->ref->cmdflags |= BT_REF_FOUND; +- break; + nr_map = INT(buf + OFFSET(e820map_nr_map)); + + fprintf(fp, " PHYSICAL ADDRESS RANGE TYPE\n"); @@ -9298,20 +8946,56 @@ + type = UINT(e820entry_ptr + OFFSET(e820entry_type)); + fprintf(fp, "%016llx - %016llx %s\n", addr, addr+size, + e820type[type]); -+ } -+} -+ -+ + } + } + +-/* +- * print one entry of a stack trace +- */ +-#define BACKTRACE_COMPLETE (1) +-#define BACKTRACE_ENTRY_IGNORED (2) +-#define BACKTRACE_ENTRY_DISPLAYED (3) +-#define BACKTRACE_ENTRY_AND_EFRAME_DISPLAYED (4) +- +-static int +-x86_64_print_stack_entry(struct bt_info *bt, FILE *ofp, int level, +- int stkindex, ulong text) +-{ +- ulong rsp, offset; +- struct syment *sp; +- char *name; +- int result; +- long eframe_check; +- char buf[BUFSIZE]; + +- eframe_check = -1; +- offset = 0; +- sp = value_search(text, &offset); +- if (!sp) +- return BACKTRACE_ENTRY_IGNORED; +static const char *hook_files[] = { + "arch/x86_64/kernel/entry.S", + "arch/x86_64/kernel/head.S", + "arch/x86_64/kernel/semaphore.c" +}; -+ + +- name = sp->name; +#define ENTRY_S ((char **)&hook_files[0]) +#define HEAD_S ((char **)&hook_files[1]) +#define SEMAPHORE_C ((char **)&hook_files[2]) -+ + +- if (bt->flags & BT_TEXT_SYMBOLS) { +- if (bt->flags & BT_EXCEPTION_FRAME) +- rsp = bt->stkptr; +- else +- rsp = bt->stackbase + (stkindex * sizeof(long)); +- fprintf(ofp, " [%s] %s at %lx\n", +- mkstring(buf, VADDR_PRLEN, RJUST|LONG_HEX, MKSTR(rsp)), +- name, text); +- if (BT_REFERENCE_CHECK(bt)) +- x86_64_do_bt_reference_check(bt, text, name); +- return BACKTRACE_ENTRY_DISPLAYED; +- } +static struct line_number_hook x86_64_line_number_hooks[] = { + {"ret_from_fork", ENTRY_S}, + {"system_call", ENTRY_S}, @@ -9346,29 +9030,72 @@ + {"spurious_interrupt_bug", ENTRY_S}, + {"machine_check", ENTRY_S}, + {"call_debug", ENTRY_S}, -+ + +- if (!offset && !(bt->flags & BT_EXCEPTION_FRAME) && +- !(bt->flags & BT_START)) { +- if (STREQ(name, "child_rip")) { +- if (symbol_exists("kernel_thread")) +- name = "kernel_thread"; +- else if (symbol_exists("arch_kernel_thread")) +- name = "arch_kernel_thread"; +- } +- else if (!(bt->flags & BT_SCHEDULE)) { +- if (STREQ(name, "error_exit")) +- eframe_check = 8; +- else { +- if (CRASHDEBUG(2)) +- fprintf(ofp, +- "< ignoring text symbol with no offset: %s() >\n", +- sp->name); +- return BACKTRACE_ENTRY_IGNORED; +- } +- } +- } + {NULL, NULL} /* list must be NULL-terminated */ +}; -+ + +- if (bt->flags & BT_SCHEDULE) +- name = "schedule"; +static void +x86_64_dump_line_number(ulong callpc) +{ + error(FATAL, "x86_64_dump_line_number: TBD\n"); +} -+ + +- if (STREQ(name, "child_rip")) { +- if (symbol_exists("kernel_thread")) +- name = "kernel_thread"; +- else if (symbol_exists("arch_kernel_thread")) +- name = "arch_kernel_thread"; +- result = BACKTRACE_COMPLETE; +- } else if (STREQ(name, "cpu_idle")) +- result = BACKTRACE_COMPLETE; +- else +- result = BACKTRACE_ENTRY_DISPLAYED; +void +x86_64_compiler_warning_stub(void) +{ + struct line_number_hook *lhp; + char **p; -+ + +- if (bt->flags & BT_EXCEPTION_FRAME) +- rsp = bt->stkptr; +- else if (bt->flags & BT_START) +- rsp = bt->stkptr; +- else +- rsp = bt->stackbase + (stkindex * sizeof(long)); + lhp = &x86_64_line_number_hooks[0]; lhp++; + p = ENTRY_S; + x86_64_back_trace(NULL, NULL); + get_x86_64_frame(NULL, NULL, NULL); + x86_64_dump_line_number(0); +} -+ + +- if ((bt->flags & BT_FULL)) { +- if (bt->frameptr) +- x86_64_display_full_frame(bt, rsp, ofp); +- bt->frameptr = rsp + sizeof(ulong); +- } +/* + * Force the VM address-range selection via: + * @@ -9383,7 +9110,9 @@ + * + * --machdep irq_eframe_link= + */ -+ + +- fprintf(ofp, "%s#%d [%8lx] %s at %lx\n", level < 10 ? " " : "", level, +- rsp, name, text); +void +parse_cmdline_arg(void) +{ @@ -9400,53 +9129,46 @@ + error(WARNING, "ignoring --machdep option: %s\n\n", + machdep->cmdline_arg); + return; - } ++ } -- /* -- * Walk the process stack. -- */ -- for (i = (rsp - bt->stackbase)/sizeof(ulong); -- !done && (rsp < bt->stacktop); i++, rsp += sizeof(ulong)) { +- if (bt->flags & BT_LINE_NUMBERS) { +- get_line_number(text, buf, FALSE); +- if (strlen(buf)) +- fprintf(ofp, " %s\n", buf); +- } + strcpy(buf, machdep->cmdline_arg); -- up = (ulong *)(&bt->stackbuf[i*sizeof(ulong)]); +- if (eframe_check >= 0) { +- if (x86_64_exception_frame(EFRAME_PRINT|EFRAME_VERIFY, +- bt->stackbase + (stkindex*sizeof(long)) + eframe_check, +- NULL, bt, ofp)) +- result = BACKTRACE_ENTRY_AND_EFRAME_DISPLAYED; + for (p = buf; *p; p++) { + if (*p == ',') + *p = ' '; -+ } + } -- if (!is_kernel_text(*up)) -- continue; +- if (BT_REFERENCE_CHECK(bt)) +- x86_64_do_bt_reference_check(bt, text, name); + c = parse_line(buf, arglist); -- if ((bt->flags & BT_CHECK_CALLER)) { -- /* -- * A non-zero offset value from the value_search() -- * lets us know if it's a real text return address. -- */ -- spt = value_search(*up, &offset); -- /* -- * sp gets the syment of the function that the text -- * routine above called before leaving its return -- * address on the stack -- if it can be determined. -- */ -- sp = x86_64_function_called_by((*up)-5); +- bt->call_target = name; + for (i = vm_flag = 0; i < c; i++) { + errflag = 0; -- if (sp == NULL) { -- /* -- * We were unable to get the called function. -- * If the text address had an offset, then -- * it must have made an indirect call, and -- * can't have called our target function. -- */ -- if (offset) { -- if (CRASHDEBUG(1)) -- fprintf(ofp, -- "< ignoring %s() -- makes indirect call and NOT %s()>\n", -- spt->name, -- bt->call_target); +- if (is_direct_call_target(bt)) { +- if (CRASHDEBUG(2)) +- fprintf(ofp, "< enable BT_CHECK_CALLER for %s >\n", +- bt->call_target); +- bt->flags |= BT_CHECK_CALLER; +- } else { +- if (CRASHDEBUG(2) && (bt->flags & BT_CHECK_CALLER)) +- fprintf(ofp, "< disable BT_CHECK_CALLER for %s >\n", +- bt->call_target); +- if (bt->flags & BT_CHECK_CALLER) { +- if (CRASHDEBUG(2)) +- fprintf(ofp, "< set BT_NO_CHECK_CALLER >\n"); +- bt->flags |= BT_NO_CHECK_CALLER; + if (STRNEQ(arglist[i], "vm=")) { + vm_flag++; + p = arglist[i] + strlen("vm="); @@ -9497,46 +9219,26 @@ + value = stol(p, RETURN_ON_ERROR|QUIET, &errflag); + if (!errflag) { + machdep->machspec->irq_eframe_link = value; - continue; - } -- } else if (!STREQ(sp->name, bt->call_target)) { -- /* -- * We got function called by the text routine, -- * but it's not our target function. -- */ -- if (CRASHDEBUG(2)) -- fprintf(ofp, -- "< ignoring %s() -- calls %s() and NOT %s()>\n", -- spt->name, sp->name, -- bt->call_target); -- continue; - } ++ continue; ++ } ++ } } - -- switch (x86_64_print_stack_entry(bt, ofp, level, i,*up)) +- bt->flags &= ~(ulonglong)BT_CHECK_CALLER; ++ + error(WARNING, "ignoring --machdep option: %s\n", arglist[i]); + lines++; + } + + if (vm_flag) { + switch (machdep->flags & VM_FLAGS) - { -- case BACKTRACE_ENTRY_AND_EFRAME_DISPLAYED: -- last_process_stack_eframe = rsp + 8; -- rsp += SIZE(pt_regs); -- i += SIZE(pt_regs)/sizeof(ulong); -- case BACKTRACE_ENTRY_DISPLAYED: -- level++; ++ { + case 0: - break; -- case BACKTRACE_ENTRY_IGNORED: ++ break; + + case VM_ORIG: + error(NOTE, "using original x86_64 VM address ranges\n"); + lines++; - break; -- case BACKTRACE_COMPLETE: -- done = TRUE; ++ break; + + case VM_2_6_11: + error(NOTE, "using 2.6.11 x86_64 VM address ranges\n"); @@ -9546,17 +9248,8 @@ + case VM_XEN: + error(NOTE, "using xen x86_64 VM address ranges\n"); + lines++; - break; -- } -- } - -- if (!irq_eframe && !is_kernel_thread(bt->tc->task) && -- (GET_STACKBASE(bt->tc->task) == bt->stackbase)) { -- user_mode_eframe = bt->stacktop - SIZE(pt_regs); -- if (last_process_stack_eframe < user_mode_eframe) -- x86_64_exception_frame(EFRAME_PRINT, 0, bt->stackbuf + -- (bt->stacktop - bt->stackbase) - SIZE(pt_regs), -- bt, ofp); ++ break; ++ + case VM_XEN_RHEL4: + error(NOTE, "using RHEL4 xen x86_64 VM address ranges\n"); + lines++; @@ -9570,24 +9263,23 @@ + } } -- if (bt->flags & BT_TEXT_SYMBOLS) { -- if (BT_REFERENCE_FOUND(bt)) { -- print_task_header(fp, task_to_context(bt->task), 0); -- BCOPY(bt_in, bt, sizeof(struct bt_info)); -- bt->ref = NULL; -- machdep->back_trace(bt); -- fprintf(fp, "\n"); -- } +- return result; + if (lines) + fprintf(fp, "\n"); -+} -+ + } + +-/* +- * Unroll a kernel stack. +- */ +-static void +-x86_64_back_trace_cmd(struct bt_info *bt) +void +x86_64_clear_machdep_cache(void) -+{ + { +- error(FATAL, "x86_64_back_trace_cmd: TBD\n"); + machdep->machspec->last_upml_read = 0; -+} -+ + } + +static void +x86_64_irq_eframe_link_init(void) +{ @@ -9597,32 +9289,91 @@ + char link_register[BUFSIZE]; + char *arglist[MAXARGS]; + ulong max_instructions; -+ + + if (machdep->machspec->irq_eframe_link == UNINITIALIZED) + machdep->machspec->irq_eframe_link = 0; + else + return; -+ + +-/* +- * Determine whether the initial stack pointer is located in one of the +- * exception stacks. +- */ +-static ulong +-x86_64_in_exception_stack(struct bt_info *bt) +-{ +- int c, i; +- ulong rsp; +- ulong estack; +- struct machine_specific *ms; + if (THIS_KERNEL_VERSION < LINUX(2,6,9)) + return; -+ + +- rsp = bt->stkptr; +- ms = machdep->machspec; +- estack = 0; + if (!(sp = symbol_search("common_interrupt")) || + !(spn = next_symbol(NULL, sp))) { + return; + } -+ + +- for (c = 0; !estack && (c < kt->cpus); c++) { +- for (i = 0; i < 7; i++) { +- if (ms->stkinfo.ebase[c][i] == 0) +- break; +- if ((rsp >= ms->stkinfo.ebase[c][i]) && +- (rsp < (ms->stkinfo.ebase[c][i] + +- ms->stkinfo.esize))) { +- estack = ms->stkinfo.ebase[c][i]; +- if (c != bt->tc->processor) +- error(INFO, +- "task cpu: %d exception stack cpu: %d\n", +- bt->tc->processor, c); +- break; +- } +- } +- } + max_instructions = spn->value - sp->value; -+ + +- return estack; +-} + open_tmpfile(); -+ + +-/* +- * Determine whether the current stack pointer is in a cpu's irqstack. +- */ +-static ulong +-x86_64_in_irqstack(struct bt_info *bt) +-{ +- int c; +- ulong rsp; +- ulong irqstack; +- struct machine_specific *ms; + sprintf(buf, "x/%ldi 0x%lx", + max_instructions, sp->value); -+ + +- rsp = bt->stkptr; +- ms = machdep->machspec; +- irqstack = 0; + if (!gdb_pass_through(buf, pc->tmpfile, GNU_RETURN_ON_ERROR)) + return; -+ + +- for (c = 0; !irqstack && (c < kt->cpus); c++) { +- if (ms->stkinfo.ibase[c] == 0) +- break; +- if ((rsp >= ms->stkinfo.ibase[c]) && +- (rsp < (ms->stkinfo.ibase[c] + ms->stkinfo.isize))) { +- irqstack = ms->stkinfo.ibase[c]; +- if (c != bt->tc->processor) +- error(INFO, +- "task cpu: %d IRQ stack cpu: %d\n", +- bt->tc->processor, c); +- break; +- } +- } + link_register[0] = NULLCHAR; -+ + +- return irqstack; + rewind(pc->tmpfile); + while (fgets(buf, BUFSIZE, pc->tmpfile)) { + if (!strstr(buf, sp->name)) @@ -9631,7 +9382,7 @@ + continue; + if (strstr(arglist[2], "push")) + strcpy(link_register, arglist[3]); - } ++ } + close_tmpfile(); + + if (CRASHDEBUG(1)) @@ -9644,28 +9395,41 @@ + } +-#define STACK_TRANSITION_ERRMSG_E_I_P \ +-"cannot transition from exception stack to IRQ stack to current process stack:\n exception stack pointer: %lx\n IRQ stack pointer: %lx\n process stack pointer: %lx\n current stack base: %lx\n" +-#define STACK_TRANSITION_ERRMSG_E_P \ +-"cannot transition from exception stack to current process stack:\n exception stack pointer: %lx\n process stack pointer: %lx\n current_stack_base: %lx\n" +-#define STACK_TRANSITION_ERRMSG_I_P \ +-"cannot transition from IRQ stack to current process stack:\n IRQ stack pointer: %lx\n process stack pointer: %lx\n current stack base: %lx" +#include "netdump.h" -+ + /* -- * Functions that won't be called indirectly. -- * Add more to this as they are discovered. +- * Low-budget back tracer -- dump text return addresses, following call chain +- * when possible, along with any verifiable exception frames. + * From the xen vmcore, create an index of mfns for each page that makes + * up the dom0 kernel's complete phys_to_machine_mapping[max_pfn] array. */ --static const char *direct_call_targets[] = { -- "schedule", -- "schedule_timeout", -- NULL --}; +-static void +-x86_64_low_budget_back_trace_cmd(struct bt_info *bt_in) + +#define MAX_X86_64_FRAMES (512) +#define MFNS_PER_FRAME (PAGESIZE()/sizeof(ulong)) - - static int --is_direct_call_target(struct bt_info *bt) ++ ++static int +x86_64_xen_kdump_p2m_create(struct xen_kdump_data *xkd) { -- int i; +- int i, level, done; +- ulong rsp, offset, stacktop; +- ulong *up; +- long cs; +- struct syment *sp, *spt; +- FILE *ofp; +- ulong estack, irqstack; +- ulong irq_eframe; +- struct bt_info bt_local, *bt; +- struct machine_specific *ms; +- ulong last_process_stack_eframe; +- ulong user_mode_eframe; + int i, j; + ulong kvaddr; + ulong *up; @@ -9673,20 +9437,44 @@ + ulong frame_mfn[MAX_X86_64_FRAMES] = { 0 }; + int mfns[MAX_X86_64_FRAMES] = { 0 }; -- if (!bt->call_target || (bt->flags & BT_NO_CHECK_CALLER)) -- return FALSE; +- bt = &bt_local; +- BCOPY(bt_in, bt, sizeof(struct bt_info)); + /* + * Temporarily read physical (machine) addresses from vmcore by + * going directly to read_netdump() instead of via read_kdump(). + */ + pc->readmem = read_netdump; -- for (i = 0; direct_call_targets[i]; i++) { -- if (STREQ(direct_call_targets[i], bt->call_target)) -- return TRUE; +- level = 0; +- done = FALSE; +- irq_eframe = 0; +- last_process_stack_eframe = 0; +- bt->call_target = NULL; +- rsp = bt->stkptr; +- if (!rsp) { +- error(INFO, "cannot determine starting stack pointer\n"); +- return; +- } +- ms = machdep->machspec; +- if (BT_REFERENCE_CHECK(bt)) +- ofp = pc->nullfp; +- else +- ofp = fp; + if (xkd->flags & KDUMP_CR3) + goto use_cr3; -+ + +- if (bt->flags & BT_TEXT_SYMBOLS) { +- fprintf(ofp, "%sSTART: %s%s at %lx\n", +- space(VADDR_PRLEN > 8 ? 14 : 6), +- closest_symbol(bt->instptr), +- STREQ(closest_symbol(bt->instptr), "thread_return") ? +- " (schedule)" : "", +- bt->instptr); +- } else if (bt->flags & BT_START) { +- x86_64_print_stack_entry(bt, ofp, level, +- 0, bt->instptr); +- bt->flags &= ~BT_START; +- level++; + if (CRASHDEBUG(1)) + fprintf(fp, "x86_64_xen_kdump_p2m_create: p2m_mfn: %lx\n", + xkd->p2m_mfn); @@ -9719,54 +9507,139 @@ + x86_64_debug_dump_page(fp, xkd->page, "pfn_to_mfn_frame_list page"); } -- return FALSE; + if (CRASHDEBUG(1)) + fprintf(fp, "p2m_frames: %d\n", xkd->p2m_frames); -+ + +- if ((estack = x86_64_in_exception_stack(bt))) { +-in_exception_stack: +- bt->flags |= BT_EXCEPTION_STACK; +- /* +- * The stack buffer will have been loaded with the process +- * stack, so switch to the indicated exception stack. +- */ +- bt->stackbase = estack; +- bt->stacktop = estack + ms->stkinfo.esize; +- bt->stackbuf = ms->irqstack; + if ((xkd->p2m_mfn_frame_list = (ulong *) + malloc(xkd->p2m_frames * sizeof(ulong))) == NULL) + error(FATAL, "cannot malloc p2m_frame_index_list"); -+ + +- if (!readmem(bt->stackbase, KVADDR, bt->stackbuf, +- bt->stacktop - bt->stackbase, +- bt->hp && (bt->hp->esp == bt->stkptr) ? +- "irqstack contents via hook" : "irqstack contents", + for (i = 0, frames = xkd->p2m_frames; frames; i++) { + if (!readmem(PTOB(frame_mfn[i]), PHYSADDR, + &xkd->p2m_mfn_frame_list[i * MFNS_PER_FRAME], + mfns[i] * sizeof(ulong), "xen kdump p2m mfn list page", -+ RETURN_ON_ERROR)) + RETURN_ON_ERROR)) +- error(FATAL, "read of exception stack at %lx failed\n", +- bt->stackbase); + error(FATAL, "cannot read xen kdump p2m mfn list page\n"); -+ + +- /* +- * If irq_eframe is set, we've jumped back here from the +- * IRQ stack dump below. Do basically the same thing as if +- * had come from the processor stack, but presume that we +- * must have been in kernel mode, i.e., took an exception +- * while operating on an IRQ stack. (untested) +- */ +- if (irq_eframe) { +- bt->flags |= BT_EXCEPTION_FRAME; +- i = (irq_eframe - bt->stackbase)/sizeof(ulong); +- x86_64_print_stack_entry(bt, ofp, level, i, +- bt->instptr); +- bt->flags &= ~(ulonglong)BT_EXCEPTION_FRAME; +- cs = x86_64_exception_frame(EFRAME_PRINT|EFRAME_CS, 0, +- bt->stackbuf + (irq_eframe - bt->stackbase), +- bt, ofp); +- rsp += SIZE(pt_regs); /* guaranteed kernel mode */ +- level++; +- irq_eframe = 0; +- } + frames -= mfns[i]; + } -+ + +- stacktop = bt->stacktop - SIZE(pt_regs); + if (CRASHDEBUG(2)) { + for (i = 0; i < xkd->p2m_frames; i++) + fprintf(fp, "%lx ", xkd->p2m_mfn_frame_list[i]); + fprintf(fp, "\n"); + } -+ + +- for (i = (rsp - bt->stackbase)/sizeof(ulong); +- !done && (rsp < stacktop); i++, rsp += sizeof(ulong)) { + pc->readmem = read_kdump; + return TRUE; -+ + +- up = (ulong *)(&bt->stackbuf[i*sizeof(ulong)]); +use_cr3: -+ + +- if (!is_kernel_text(*up)) +- continue; + if (CRASHDEBUG(1)) + fprintf(fp, "x86_64_xen_kdump_p2m_create: cr3: %lx\n", xkd->cr3); -+ + +- switch (x86_64_print_stack_entry(bt, ofp, level, i,*up)) +- { +- case BACKTRACE_ENTRY_AND_EFRAME_DISPLAYED: +- rsp += SIZE(pt_regs); +- i += SIZE(pt_regs)/sizeof(ulong); +- case BACKTRACE_ENTRY_DISPLAYED: +- level++; +- break; +- case BACKTRACE_ENTRY_IGNORED: +- break; +- case BACKTRACE_COMPLETE: +- done = TRUE; +- break; +- } +- } + if (!readmem(PTOB(xkd->cr3), PHYSADDR, machdep->machspec->pml4, + PAGESIZE(), "xen kdump cr3 page", RETURN_ON_ERROR)) + error(FATAL, "cannot read xen kdump cr3 page\n"); -+ + +- cs = x86_64_exception_frame(EFRAME_PRINT|EFRAME_CS, 0, +- bt->stackbuf + (bt->stacktop - bt->stackbase) - +- SIZE(pt_regs), bt, ofp); + if (CRASHDEBUG(7)) + x86_64_debug_dump_page(fp, machdep->machspec->pml4, + "contents of PML4 page:"); -+ + +- if (!BT_REFERENCE_CHECK(bt)) +- fprintf(fp, "--- ---\n"); + kvaddr = symbol_value("end_pfn"); + if (!x86_64_xen_kdump_load_page(kvaddr, xkd->page)) + return FALSE; + up = (ulong *)(xkd->page + PAGEOFFSET(kvaddr)); -+ + +- /* +- * stack = (unsigned long *) estack_end[-2]; +- */ +- up = (ulong *)(&bt->stackbuf[bt->stacktop - bt->stackbase]); +- up -= 2; +- rsp = bt->stkptr = *up; +- up -= 3; +- bt->instptr = *up; +- if (cs & 3) +- done = TRUE; /* user-mode exception */ +- else +- done = FALSE; /* kernel-mode exception */ +- bt->frameptr = 0; + xkd->p2m_frames = (*up/(PAGESIZE()/sizeof(ulong))) + + ((*up%(PAGESIZE()/sizeof(ulong))) ? 1 : 0); -+ + +- /* +- * Print the return values from the estack end. +- */ +- if (!done) { +- bt->flags |= BT_START; +- x86_64_print_stack_entry(bt, ofp, level, +- 0, bt->instptr); +- bt->flags &= ~BT_START; +- level++; +- } +- } + if (CRASHDEBUG(1)) + fprintf(fp, "end_pfn at %lx: %lx (%ld) -> %d p2m_frames\n", + kvaddr, *up, *up, xkd->p2m_frames); @@ -9791,41 +9664,57 @@ + xkd->p2m_mfn_frame_list[i] = x86_64_xen_kdump_page_mfn(kvaddr); + kvaddr += PAGESIZE(); + } -+ + +- /* +- * IRQ stack entry always comes in via the process stack, regardless +- * whether it happened while running in user or kernel space. +- */ +- if (!done && (irqstack = x86_64_in_irqstack(bt))) { +- bt->flags |= BT_IRQSTACK; +- /* +- * Until coded otherwise, the stackbase will be pointing to +- * either the exception stack or, more likely, the process +- * stack base. Switch it to the IRQ stack. +- */ +- bt->stackbase = irqstack; +- bt->stacktop = irqstack + ms->stkinfo.isize; +- bt->stackbuf = ms->irqstack; + if (CRASHDEBUG(1)) { + for (i = 0; i < xkd->p2m_frames; i++) + fprintf(fp, "%lx ", xkd->p2m_mfn_frame_list[i]); + fprintf(fp, "\n"); + } -+ + +- if (!readmem(bt->stackbase, KVADDR, +- bt->stackbuf, bt->stacktop - bt->stackbase, +- bt->hp && (bt->hp->esp == bt_in->stkptr) ? +- "irqstack contents via hook" : "irqstack contents", +- RETURN_ON_ERROR)) +- error(FATAL, "read of IRQ stack at %lx failed\n", +- bt->stackbase); + machdep->last_pgd_read = 0; + machdep->last_ptbl_read = 0; + machdep->last_pmd_read = 0; + pc->readmem = read_kdump; -+ -+ return TRUE; - } --static struct syment * --x86_64_function_called_by(ulong rip) +- stacktop = bt->stacktop - 64; /* from kernel code */ ++ return TRUE; ++} + +- for (i = (rsp - bt->stackbase)/sizeof(ulong); +- !done && (rsp < stacktop); i++, rsp += sizeof(ulong)) { +static char * +x86_64_xen_kdump_load_page(ulong kvaddr, char *pgbuf) - { -- struct syment *sp; -- char buf[BUFSIZE], *p1; -- ulong value, offset; -- unsigned char byte; ++{ + ulong mfn; + ulong *pml4, *pgd, *pmd, *ptep; -- value = 0; -- sp = NULL; +- up = (ulong *)(&bt->stackbuf[i*sizeof(ulong)]); + pml4 = ((ulong *)machdep->machspec->pml4) + pml4_index(kvaddr); + mfn = ((*pml4) & PHYSICAL_PAGE_MASK) >> PAGESHIFT(); -- if (!readmem(rip, KVADDR, &byte, sizeof(unsigned char), "call byte", -- RETURN_ON_ERROR)) -- return sp; +- if (!is_kernel_text(*up)) +- continue; + if (CRASHDEBUG(3)) + fprintf(fp, + "[%lx] pml4: %lx mfn: %lx pml4_index: %lx\n", @@ -9839,83 +9728,258 @@ + x86_64_debug_dump_page(fp, machdep->pgd, + "contents of page upper directory page:"); -- if (byte != 0xe8) -- return sp; +- switch (x86_64_print_stack_entry(bt, ofp, level, i,*up)) +- { +- case BACKTRACE_ENTRY_AND_EFRAME_DISPLAYED: +- rsp += SIZE(pt_regs); +- i += SIZE(pt_regs)/sizeof(ulong); +- case BACKTRACE_ENTRY_DISPLAYED: +- level++; +- break; +- case BACKTRACE_ENTRY_IGNORED: +- break; +- case BACKTRACE_COMPLETE: +- done = TRUE; +- break; +- } +- } + pgd = ((ulong *)machdep->pgd) + pgd_index(kvaddr); + mfn = ((*pgd) & PHYSICAL_PAGE_MASK) >> PAGESHIFT(); -+ + +- if (!BT_REFERENCE_CHECK(bt)) +- fprintf(fp, "--- ---\n"); + if (CRASHDEBUG(3)) + fprintf(fp, + "[%lx] pgd: %lx mfn: %lx pgd_index: %lx\n", + kvaddr, *pgd, mfn, pgd_index(kvaddr)); -+ + +- /* +- * stack = (unsigned long *) (irqstack_end[-1]); +- * (where irqstack_end is 64 bytes below page end) +- */ +- up = (ulong *)(&bt->stackbuf[stacktop - bt->stackbase]); +- up -= 1; +- irq_eframe = rsp = bt->stkptr = *up; +- up -= 1; +- bt->instptr = *up; +- bt->frameptr = 0; +- done = FALSE; +- } else +- irq_eframe = 0; + if (!readmem(PTOB(mfn), PHYSADDR, machdep->pmd, PAGESIZE(), + "xen kdump pmd page", RETURN_ON_ERROR)) + error(FATAL, "cannot read/find pmd page\n"); -+ + +- if (!done && (estack = x86_64_in_exception_stack(bt))) +- goto in_exception_stack; + if (CRASHDEBUG(7)) + x86_64_debug_dump_page(fp, machdep->pmd, + "contents of page middle directory page:"); -+ + +- if (!done && (bt->flags & (BT_EXCEPTION_STACK|BT_IRQSTACK))) { +- /* +- * Verify that the rsp pointer taken from either the +- * exception or IRQ stack points into the process stack. +- */ +- bt->stackbase = GET_STACKBASE(bt->tc->task); +- bt->stacktop = GET_STACKTOP(bt->tc->task); + pmd = ((ulong *)machdep->pmd) + pmd_index(kvaddr); + mfn = ((*pmd) & PHYSICAL_PAGE_MASK) >> PAGESHIFT(); -+ + +- if (!INSTACK(rsp, bt)) { +- switch (bt->flags & (BT_EXCEPTION_STACK|BT_IRQSTACK)) +- { +- case (BT_EXCEPTION_STACK|BT_IRQSTACK): +- error(FATAL, STACK_TRANSITION_ERRMSG_E_I_P, +- bt_in->stkptr, bt->stkptr, rsp, +- bt->stackbase); + if (CRASHDEBUG(3)) + fprintf(fp, + "[%lx] pmd: %lx mfn: %lx pmd_index: %lx\n", + kvaddr, *pmd, mfn, pmd_index(kvaddr)); -+ + +- case BT_EXCEPTION_STACK: +- error(FATAL, STACK_TRANSITION_ERRMSG_E_P, +- bt_in->stkptr, rsp, bt->stackbase); + if (!readmem(PTOB(mfn), PHYSADDR, machdep->ptbl, PAGESIZE(), + "xen kdump page table page", RETURN_ON_ERROR)) + error(FATAL, "cannot read/find page table page\n"); -+ + +- case BT_IRQSTACK: +- error(FATAL, STACK_TRANSITION_ERRMSG_I_P, +- bt_in->stkptr, rsp, bt->stackbase); +- } +- } + if (CRASHDEBUG(7)) + x86_64_debug_dump_page(fp, machdep->ptbl, + "contents of page table page:"); -+ + +- /* +- * Now fill the local stack buffer from the process stack. +- */ +- if (!readmem(bt->stackbase, KVADDR, bt->stackbuf, +- bt->stacktop - bt->stackbase, +- "irqstack contents", RETURN_ON_ERROR)) +- error(FATAL, "read of process stack at %lx failed\n", +- bt->stackbase); +- } + ptep = ((ulong *)machdep->ptbl) + pte_index(kvaddr); + mfn = ((*ptep) & PHYSICAL_PAGE_MASK) >> PAGESHIFT(); -+ + +- /* +- * For a normally blocked task, hand-create the first level. +- */ +- if (!done && +- !(bt->flags & (BT_TEXT_SYMBOLS|BT_EXCEPTION_STACK|BT_IRQSTACK)) && +- STREQ(closest_symbol(bt->instptr), "thread_return")) { +- bt->flags |= BT_SCHEDULE; +- i = (rsp - bt->stackbase)/sizeof(ulong); +- x86_64_print_stack_entry(bt, ofp, level, +- i, bt->instptr); +- bt->flags &= ~(ulonglong)BT_SCHEDULE; +- rsp += sizeof(ulong); +- level++; +- } + if (CRASHDEBUG(3)) + fprintf(fp, + "[%lx] ptep: %lx mfn: %lx pte_index: %lx\n", + kvaddr, *ptep, mfn, pte_index(kvaddr)); -+ + +- /* +- * Dump the IRQ exception frame from the process stack. +- * If the CS register indicates a user exception frame, +- * then set done to TRUE to avoid the process stack walk-through. +- * Otherwise, bump up the rsp past the kernel-mode eframe. +- */ +- if (irq_eframe) { +- bt->flags |= BT_EXCEPTION_FRAME; +- i = (irq_eframe - bt->stackbase)/sizeof(ulong); +- x86_64_print_stack_entry(bt, ofp, level, i, bt->instptr); +- bt->flags &= ~(ulonglong)BT_EXCEPTION_FRAME; +- cs = x86_64_exception_frame(EFRAME_PRINT|EFRAME_CS, 0, +- bt->stackbuf + (irq_eframe - bt->stackbase), bt, ofp); +- if (cs & 3) +- done = TRUE; /* IRQ from user-mode */ +- else +- rsp += SIZE(pt_regs); +- level++; +- } + if (!readmem(PTOB(mfn), PHYSADDR, pgbuf, PAGESIZE(), + "xen kdump page table page", RETURN_ON_ERROR)) + error(FATAL, "cannot read/find pte page\n"); -+ + +- /* +- * Walk the process stack. +- */ +- for (i = (rsp - bt->stackbase)/sizeof(ulong); +- !done && (rsp < bt->stacktop); i++, rsp += sizeof(ulong)) { + if (CRASHDEBUG(7)) + x86_64_debug_dump_page(fp, pgbuf, + "contents of page:"); -+ + +- up = (ulong *)(&bt->stackbuf[i*sizeof(ulong)]); + return pgbuf; +} -+ + +- if (!is_kernel_text(*up)) +- continue; +static ulong +x86_64_xen_kdump_page_mfn(ulong kvaddr) +{ + ulong mfn; + ulong *pml4, *pgd, *pmd, *ptep; -+ + +- if ((bt->flags & BT_CHECK_CALLER)) { +- /* +- * A non-zero offset value from the value_search() +- * lets us know if it's a real text return address. +- */ +- spt = value_search(*up, &offset); +- /* +- * sp gets the syment of the function that the text +- * routine above called before leaving its return +- * address on the stack -- if it can be determined. +- */ +- sp = x86_64_function_called_by((*up)-5); + pml4 = ((ulong *)machdep->machspec->pml4) + pml4_index(kvaddr); + mfn = ((*pml4) & PHYSICAL_PAGE_MASK) >> PAGESHIFT(); -+ + +- if (sp == NULL) { +- /* +- * We were unable to get the called function. +- * If the text address had an offset, then +- * it must have made an indirect call, and +- * can't have called our target function. +- */ +- if (offset) { +- if (CRASHDEBUG(1)) +- fprintf(ofp, +- "< ignoring %s() -- makes indirect call and NOT %s()>\n", +- spt->name, +- bt->call_target); +- continue; +- } +- } else if (!STREQ(sp->name, bt->call_target)) { +- /* +- * We got function called by the text routine, +- * but it's not our target function. +- */ +- if (CRASHDEBUG(2)) +- fprintf(ofp, +- "< ignoring %s() -- calls %s() and NOT %s()>\n", +- spt->name, sp->name, +- bt->call_target); +- continue; +- } +- } + if ((mfn != machdep->last_pgd_read) && + !readmem(PTOB(mfn), PHYSADDR, machdep->pgd, PAGESIZE(), + "xen kdump pud entry", RETURN_ON_ERROR)) + error(FATAL, "cannot read/find pud page\n"); + machdep->last_pgd_read = mfn; -+ + +- switch (x86_64_print_stack_entry(bt, ofp, level, i,*up)) +- { +- case BACKTRACE_ENTRY_AND_EFRAME_DISPLAYED: +- last_process_stack_eframe = rsp + 8; +- rsp += SIZE(pt_regs); +- i += SIZE(pt_regs)/sizeof(ulong); +- case BACKTRACE_ENTRY_DISPLAYED: +- level++; +- break; +- case BACKTRACE_ENTRY_IGNORED: +- break; +- case BACKTRACE_COMPLETE: +- done = TRUE; +- break; +- } +- } + pgd = ((ulong *)machdep->pgd) + pgd_index(kvaddr); + mfn = ((*pgd) & PHYSICAL_PAGE_MASK) >> PAGESHIFT(); -+ + +- if (!irq_eframe && !is_kernel_thread(bt->tc->task) && +- (GET_STACKBASE(bt->tc->task) == bt->stackbase)) { +- user_mode_eframe = bt->stacktop - SIZE(pt_regs); +- if (last_process_stack_eframe < user_mode_eframe) +- x86_64_exception_frame(EFRAME_PRINT, 0, bt->stackbuf + +- (bt->stacktop - bt->stackbase) - SIZE(pt_regs), +- bt, ofp); +- } + if ((mfn != machdep->last_pmd_read) && + !readmem(PTOB(mfn), PHYSADDR, machdep->pmd, PAGESIZE(), + "xen kdump pmd entry", RETURN_ON_ERROR)) + error(FATAL, "cannot read/find pmd page\n"); + machdep->last_pmd_read = mfn; -+ + +- if (bt->flags & BT_TEXT_SYMBOLS) { +- if (BT_REFERENCE_FOUND(bt)) { +- print_task_header(fp, task_to_context(bt->task), 0); +- BCOPY(bt_in, bt, sizeof(struct bt_info)); +- bt->ref = NULL; +- machdep->back_trace(bt); +- fprintf(fp, "\n"); +- } +- } + pmd = ((ulong *)machdep->pmd) + pmd_index(kvaddr); + mfn = ((*pmd) & PHYSICAL_PAGE_MASK) >> PAGESHIFT(); + @@ -9929,17 +9993,27 @@ + mfn = ((*ptep) & PHYSICAL_PAGE_MASK) >> PAGESHIFT(); + + return mfn; -+} -+ + } + +#include "xendump.h" + -+/* + /* +- * Functions that won't be called indirectly. +- * Add more to this as they are discovered. + * Determine the physical address base for relocatable kernels. -+ */ + */ +-static const char *direct_call_targets[] = { +- "schedule", +- "schedule_timeout", +- NULL +-}; +- +-static int +-is_direct_call_target(struct bt_info *bt) +static void +x86_64_calc_phys_base(void) -+{ -+ int i; + { + int i; + FILE *iomem; + char buf[BUFSIZE]; + char *p1; @@ -9948,20 +10022,37 @@ + struct vmcore_data *vd; + static struct xendump_data *xd; + Elf64_Phdr *phdr; -+ + +- if (!bt->call_target || (bt->flags & BT_NO_CHECK_CALLER)) +- return FALSE; + if (machdep->flags & PHYS_BASE) /* --machdep override */ + return; -+ + +- for (i = 0; direct_call_targets[i]; i++) { +- if (STREQ(direct_call_targets[i], bt->call_target)) +- return TRUE; +- } + machdep->machspec->phys_base = 0; /* default/traditional */ -+ + +- return FALSE; +-} + if (!kernel_symbol_exists("phys_base")) + return; -+ + +-static struct syment * +-x86_64_function_called_by(ulong rip) +-{ +- struct syment *sp; +- char buf[BUFSIZE], *p1; +- ulong value, offset; +- unsigned char byte; + if (!symbol_exists("_text")) + return; + else + text_start = symbol_value("_text"); -+ + +- value = 0; +- sp = NULL; + if (ACTIVE()) { + if ((iomem = fopen("/proc/iomem", "r")) == NULL) + return; @@ -9998,23 +10089,20 @@ + fprintf(fp, "phys_base: %lx\n\n", + machdep->machspec->phys_base); + } -+ + +- if (!readmem(rip, KVADDR, &byte, sizeof(unsigned char), "call byte", +- RETURN_ON_ERROR)) +- return sp; + return; + } -- sprintf(buf, "x/i 0x%lx", rip); +- if (byte != 0xe8) +- return sp; + /* + * Get relocation value from whatever dumpfile format is being used. + */ -- open_tmpfile2(); -- if (gdb_pass_through(buf, pc->tmpfile2, GNU_RETURN_ON_ERROR)) { -- rewind(pc->tmpfile2); -- while (fgets(buf, BUFSIZE, pc->tmpfile2)) { -- if ((p1 = strstr(buf, "callq")) && -- whitespace(*(p1-1))) { -- if (extract_hex(p1, &value, NULLCHAR, TRUE)) -- break; +- sprintf(buf, "x/i 0x%lx", rip); + if (DISKDUMP_DUMPFILE()) { + if (diskdump_phys_base(&phys_base)) { + machdep->machspec->phys_base = phys_base; @@ -10024,7 +10112,15 @@ + } + return; + } -+ + +- open_tmpfile2(); +- if (gdb_pass_through(buf, pc->tmpfile2, GNU_RETURN_ON_ERROR)) { +- rewind(pc->tmpfile2); +- while (fgets(buf, BUFSIZE, pc->tmpfile2)) { +- if ((p1 = strstr(buf, "callq")) && +- whitespace(*(p1-1))) { +- if (extract_hex(p1, &value, NULLCHAR, TRUE)) +- break; + if ((vd = get_kdump_vmcore_data())) { + for (i = 0; i < vd->num_pt_load_segments; i++) { + phdr = vd->load64 + i; @@ -11875,8 +11971,8 @@ + } +} #endif /* X86_64 */ ---- crash/symbols.c.orig 2008-02-19 16:16:12.000000000 -0500 -+++ crash/symbols.c 2008-02-19 15:30:03.000000000 -0500 +--- crash/symbols.c.orig 2008-04-29 13:51:49.000000000 -0400 ++++ crash/symbols.c 2008-04-14 13:49:19.000000000 -0400 @@ -1,8 +1,8 @@ /* symbols.c - core analysis suite * @@ -12212,7 +12308,7 @@ } -@@ -2354,6 +2497,96 @@ +@@ -2354,6 +2497,106 @@ } /* @@ -12259,6 +12355,11 @@ + goto bailout; + break; + ++ case EM_S390: ++ if (machine_type_mismatch(file, "S390", NULL, 0)) ++ goto bailout; ++ break; ++ + default: + if (machine_type_mismatch(file, "(unknown)", NULL, 0)) + goto bailout; @@ -12292,6 +12393,11 @@ + goto bailout; + break; + ++ case EM_S390: ++ if (machine_type_mismatch(file, "S390X", NULL, 0)) ++ goto bailout; ++ break; ++ + default: + if (machine_type_mismatch(file, "(unknown)", NULL, 0)) + goto bailout; @@ -12309,7 +12415,7 @@ * Given a choice between two namelists, pick the one for gdb to use. * For now, just check get their stats and check their sizes; the larger * one presumably has debug data. -@@ -2427,7 +2660,7 @@ +@@ -2427,7 +2670,7 @@ goto not_system_map; if (parse_line(buf, mapitems) != 3) goto not_system_map; @@ -12318,7 +12424,7 @@ !hexadecimal(mapitems[0], 0) || (strlen(mapitems[1]) > 1)) goto not_system_map; } -@@ -3463,6 +3696,22 @@ +@@ -3463,6 +3706,22 @@ } /* @@ -12341,7 +12447,7 @@ * Return the value of a given symbol. */ ulong -@@ -3477,6 +3726,34 @@ +@@ -3477,6 +3736,34 @@ } /* @@ -12376,7 +12482,7 @@ * Return the symbol name of a given value, with no allowance for offsets. * Returns NULL on failure to allow for testing of a value. */ -@@ -3608,6 +3885,8 @@ +@@ -3608,6 +3895,8 @@ * #define STRUCT_EXISTS(X) (datatype_info((X), NULL, NULL) >= 0) * #define MEMBER_EXISTS(X,Y) (datatype_info((X), (Y), NULL) >= 0) * #define MEMBER_SIZE(X,Y) datatype_info((X), (Y), MEMBER_SIZE_REQUEST) @@ -12385,7 +12491,7 @@ * * to determine structure or union sizes, or member offsets. */ -@@ -3620,6 +3899,9 @@ +@@ -3620,6 +3909,9 @@ ulong type_found; char buf[BUFSIZE]; @@ -12395,7 +12501,7 @@ strcpy(buf, name); req = (struct gnu_request *)GETBUF(sizeof(struct gnu_request)); -@@ -3743,11 +4025,12 @@ +@@ -3743,11 +4035,12 @@ FREEBUF(req); @@ -12409,7 +12515,7 @@ if (req->is_typedef) { dm->flags |= TYPEDEF; } -@@ -3762,13 +4045,42 @@ +@@ -3762,13 +4055,42 @@ if (dm == MEMBER_SIZE_REQUEST) return member_size; @@ -12453,104 +12559,93 @@ * Get the basic type info for a symbol. Let the caller pass in the * gnu_request structure to have access to the full response; in either * case, return the type code. The member field can be used for structures -@@ -3928,25 +4240,59 @@ - void - cmd_struct(void) - { -- int c; -+ cmd_datatype_common(STRUCT_REQUEST); -+} -+/* -+ * This command displays either a union definition, or a formatted display -+ * of the contents of a union at a specified address. If no address is -+ * specified, the union size and the file in which the union is defined -+ * are also displayed. A union member may be appended to the union -+ * name (in a "union.member" format) in order to limit the scope of the data -+ * displayed to that particular member. Structure data is shown in hexadecimal -+ * format. The raw data in a union may be dumped with the -r flag. -+ */ -+void -+cmd_union(void) -+{ -+ cmd_datatype_common(UNION_REQUEST); -+} -+ -+/* -+ * After determining what type of data type follows the *, this routine -+ * has the identical functionality as cmd_struct() or cmd_union(). -+ */ -+void -+cmd_pointer(void) -+{ -+ cmd_datatype_common(0); -+} -+ -+static void -+cmd_datatype_common(ulong flags) -+{ -+ int i, c; - ulong addr, aflag; - struct syment *sp; - int rawdata; - long len; -- ulong flags; - ulong list_head_offset; - int count; -- struct datatype_member struct_member, *sm; -+ int argc_members; -+ int optind_save; -+ struct datatype_member datatype_member, *dm; -+ char *separator; -+ char *structname, *members; -+ char *memberlist[MAXARGS]; +@@ -3899,283 +4221,38 @@ + unsigned restore_radix; + long len; +- if ((len = UNION_SIZE(s)) < 0) +- error(FATAL, "invalid union name: %s\n", s); +- +- if (radix) { +- restore_radix = output_radix; +- output_radix = radix; +- output_format = (output_radix == 10) ? 0 : 'x'; +- } +- +- print_union(s, addr); +- +- if (radix) { +- output_radix = restore_radix; +- output_format = (output_radix == 10) ? 0 : 'x'; +- } +-} +- +-/* +- * This command displays either a structure definition, or a formatted display +- * of the contents of a structure at a specified address. If no address is +- * specified, the structure size and the file in which the structure is defined +- * are also displayed. A structure member may be appended to the structure +- * name (in a "struct.member" format) in order to limit the scope of the data +- * displayed to that particular member. Structure data is shown in hexadecimal +- * format. The raw data in a structure may be dumped with the -r flag. +- */ +-void +-cmd_struct(void) +-{ +- int c; +- ulong addr, aflag; +- struct syment *sp; +- int rawdata; +- long len; +- ulong flags; +- ulong list_head_offset; +- int count; +- struct datatype_member struct_member, *sm; +- - sm = &struct_member; - count = 1; -+ dm = &datatype_member; -+ count = 0xdeadbeef; - rawdata = 0; - aflag = 0; +- rawdata = 0; +- aflag = 0; - list_head_offset = 0; - flags = STRUCT_REQUEST; -+ list_head_offset = 0; -+ argc_members = 0; - +- - while ((c = getopt(argcnt, args, "c:rvol:")) != EOF) { - switch(c) -+ while ((c = getopt(argcnt, args, "fuc:rvol:")) != EOF) { -+ switch (c) - { - case 'c': - count = atoi(optarg); -@@ -3969,8 +4315,22 @@ - list_head_offset = stol(optarg, - FAULT_ON_ERROR, NULL); - else if (arg_to_datatype(optarg, +- { +- case 'c': +- count = atoi(optarg); +- break; +- +- case 'r': +- rawdata = 1; +- break; +- +- case 'v': +- flags |= STRUCT_VERBOSE; +- break; +- +- case 'o': +- flags |= SHOW_OFFSET; +- break; +- +- case 'l': +- if (IS_A_NUMBER(optarg)) +- list_head_offset = stol(optarg, +- FAULT_ON_ERROR, NULL); +- else if (arg_to_datatype(optarg, - sm, RETURN_ON_ERROR) > 1) - list_head_offset = sm->member_offset; -+ dm, RETURN_ON_ERROR) > 1) -+ list_head_offset = dm->member_offset; -+ else -+ error(FATAL, "invalid -l option: %s\n", -+ optarg); -+ break; -+ -+ case 'f': -+ if (!pc->dumpfile) -+ error(FATAL, -+ "-f option requires a dumpfile\n"); -+ pc->curcmd_flags |= MEMTYPE_FILEADDR; -+ break; -+ -+ case 'u': -+ pc->curcmd_flags |= MEMTYPE_UVADDR; - break; - - default: -@@ -3982,35 +4342,42 @@ - if (argerrs || !args[optind]) - cmd_usage(pc->curcmd, SYNOPSIS); - +- break; +- +- default: +- argerrs++; +- break; +- } +- } +- +- if (argerrs || !args[optind]) +- cmd_usage(pc->curcmd, SYNOPSIS); +- - if ((arg_to_datatype(args[optind++], sm, FAULT_ON_ERROR) > 1) && - rawdata) - error(FATAL, "member-specific output not allowed with -r\n"); @@ -12564,109 +12659,45 @@ - do_datatype_declaration(sm, flags | (sm->flags & TYPEDEF)); - return; - } -+ if ((count_chars(args[optind], ',')+1) > MAXARGS) -+ error(FATAL, "too many members in comma-separated list!\n"); -+ -+ if ((count_chars(args[optind], '.') > 1) || -+ (LASTCHAR(args[optind]) == ',') || -+ (LASTCHAR(args[optind]) == '.')) -+ error(FATAL, "invalid format: %s\n", args[optind]); -+ -+ optind_save = optind; -+ -+ /* -+ * Take care of address and count (array). -+ */ -+ while (args[++optind]) { -+ if (aflag && (count != 0xdeadbeef)) -+ error(FATAL, "too many arguments!\n"); - +- - while (args[optind]) { - if (clean_arg() && IS_A_NUMBER(args[optind])) { - if (aflag) - count = stol(args[optind], - FAULT_ON_ERROR, NULL); - else { +- if (clean_arg() && IS_A_NUMBER(args[optind])) { +- if (aflag) +- count = stol(args[optind], +- FAULT_ON_ERROR, NULL); +- else { - if (!IS_KVADDR(addr = htol(args[optind], -+ if (pc->curcmd_flags & MEMTYPE_FILEADDR) -+ pc->curcmd_private = stoll(args[optind], -+ FAULT_ON_ERROR, NULL); -+ else if (pc->curcmd_flags & MEMTYPE_UVADDR) { -+ addr = htol(args[optind], FAULT_ON_ERROR, -+ NULL); -+ } else if (!IS_KVADDR(addr = htol(args[optind], - FAULT_ON_ERROR, NULL))) - error(FATAL, - "invalid kernel virtual address: %s\n", - args[optind]); - aflag++; - } +- FAULT_ON_ERROR, NULL))) +- error(FATAL, +- "invalid kernel virtual address: %s\n", +- args[optind]); +- aflag++; +- } - } - else if ((sp = symbol_search(args[optind]))) { -+ } else if ((sp = symbol_search(args[optind]))) { - addr = sp->value; - aflag++; - } else { -@@ -4018,301 +4385,137 @@ - fprintf(fp, "possible aternatives:\n"); - if (!symbol_query(args[optind], " ", NULL)) - fprintf(fp, " (none found)\n"); +- addr = sp->value; +- aflag++; +- } else { +- fprintf(fp, "symbol not found: %s\n", args[optind]); +- fprintf(fp, "possible aternatives:\n"); +- if (!symbol_query(args[optind], " ", NULL)) +- fprintf(fp, " (none found)\n"); - return; -+ goto freebuf; - } +- } - optind++; - } - +- } +- - if (!aflag) -+ optind = optind_save; -+ -+ if (count == 0xdeadbeef) -+ count = 1; -+ else if (!aflag) - error(FATAL, "no kernel virtual address argument entered\n"); - -+ if ((flags & SHOW_OFFSET) && aflag) { -+ error(INFO, "-o option not valid with an address argument\n"); -+ flags &= ~SHOW_OFFSET; -+ } -+ - if (list_head_offset) - addr -= list_head_offset; - -+ /* -+ * Handle struct.member[,member] argument format. -+ */ -+ if (strstr(args[optind], ".")) { -+ structname = GETBUF(strlen(args[optind])+1); -+ strcpy(structname, args[optind]); -+ separator = strstr(structname, "."); -+ -+ members = GETBUF(strlen(args[optind])+1); -+ strcpy(members, separator+1); -+ replace_string(members, ",", ' '); -+ argc_members = parse_line(members, memberlist); -+ } else -+ structname = args[optind]; -+ -+ if ((arg_to_datatype(structname, dm, DATATYPE_QUERY|RETURN_ON_ERROR) < 1)) -+ error(FATAL, "invalid data structure reference: %s\n", structname); -+ -+ if ((argc_members > 1) && !aflag) { -+ error(INFO, flags & SHOW_OFFSET ? -+ "-o option not valid with multiple member format\n" : -+ "multiple member format not supported in this syntax\n"); -+ *separator = NULLCHAR; -+ argc_members = 0; -+ flags |= SHOW_OFFSET; -+ } -+ -+ len = dm->size; -+ - if (count < 0) { - addr -= len * abs(count); - addr += len; - } - +- error(FATAL, "no kernel virtual address argument entered\n"); +- +- if (list_head_offset) +- addr -= list_head_offset; +- +- if (count < 0) { +- addr -= len * abs(count); +- addr += len; +- } +- - for (c = 0; c < abs(count); c++, addr += len) { - if (rawdata) - raw_data_dump(addr, len, flags & STRUCT_VERBOSE); @@ -12679,83 +12710,12 @@ - if (sm->member) { - parse_for_member(sm, PARSE_FOR_DATA); - close_tmpfile(); -+ if (pc->curcmd_flags & MEMTYPE_FILEADDR) -+ addr = 0; /* unused, but parsed by gdb */ -+ -+ for (c = 0; c < abs(count); c++, addr += len, pc->curcmd_private += len) { -+ if (c) -+ fprintf(fp,"\n"); -+ -+ i = 0; -+ do { -+ if (argc_members) { -+ *separator = '.'; -+ strcpy(separator+1, memberlist[i]); - } +- } - } -+ -+ switch (arg_to_datatype(structname, dm, RETURN_ON_ERROR)) -+ { -+ case 0: error(FATAL, "invalid data structure reference: %s\n", -+ structname); -+ break; -+ case 1: break; -+ case 2: if (rawdata) -+ error(FATAL, -+ "member-specific output not allowed with -r\n"); -+ break; -+ } -+ -+ if (!(dm->flags & TYPEDEF)) { -+ if (flags &(STRUCT_REQUEST|UNION_REQUEST) ) { -+ if ((flags & (STRUCT_REQUEST|UNION_REQUEST)) != dm->type) -+ goto freebuf; -+ } else -+ flags |= dm->type; -+ } -+ -+ /* -+ * No address was passed -- dump the structure/member declaration. -+ */ -+ if (!aflag) { -+ do_datatype_declaration(dm, flags | (dm->flags & TYPEDEF)); -+ goto freebuf; -+ } -+ -+ if (!(flags & (UNION_REQUEST|STRUCT_REQUEST))) -+ error(FATAL, "invalid argument"); -+ -+ /* -+ * Display data. -+ */ -+ if (rawdata) -+ raw_data_dump(addr, len, flags & STRUCT_VERBOSE); -+ else { -+ if (dm->member) -+ open_tmpfile(); -+ -+ if (flags & UNION_REQUEST) -+ print_union(dm->name, addr); -+ else if (flags & STRUCT_REQUEST) -+ print_struct(dm->name, addr); -+ -+ if (dm->member) { -+ parse_for_member(dm, PARSE_FOR_DATA); -+ close_tmpfile(); -+ } -+ } -+ } while (++i < argc_members); -+ } -+ -+freebuf: -+ if (argc_members) { -+ FREEBUF(structname); -+ FREEBUF(members); - } - } - -+ - /* +- } +-} +- +-/* - * After determining what type of data type follows the *, this routine - * has the identical functionality as cmd_struct() or cmd_union(). - */ @@ -12850,15 +12810,23 @@ - - if (!(flags & (UNION_REQUEST|STRUCT_REQUEST))) - error(FATAL, "invalid argument!"); -- ++ if ((len = UNION_SIZE(s)) < 0) ++ error(FATAL, "invalid union name: %s\n", s); + - if (!aflag) - error(FATAL, "no kernel virtual address argument entered\n"); -- ++ if (radix) { ++ restore_radix = output_radix; ++ output_radix = radix; ++ output_format = (output_radix == 10) ? 0 : 'x'; ++ } + - if (count < 0) { - addr -= len * abs(count); - addr += len; - } -- ++ print_union(s, addr); + - for (c = 0; c < abs(count); c++, addr += len) { - if (rawdata) - raw_data_dump(addr, len, flags & STRUCT_VERBOSE); @@ -12877,78 +12845,129 @@ - } - } - } --} -- --/* -- * This command displays either a union definition, or a formatted display -- * of the contents of a union at a specified address. If no address is -- * specified, the union size and the file in which the union is defined -- * are also displayed. A union member may be appended to the union -- * name (in a "union.member" format) in order to limit the scope of the data -- * displayed to that particular member. Structure data is shown in hexadecimal -- * format. The raw data in a union may be dumped with the -r flag. -- */ --void --cmd_union(void) --{ ++ if (radix) { ++ output_radix = restore_radix; ++ output_format = (output_radix == 10) ? 0 : 'x'; ++ } + } + + /* ++ * This command displays either a structure definition, or a formatted display ++ * of the contents of a structure at a specified address. If no address is ++ * specified, the structure size and the file in which the structure is defined ++ * are also displayed. A structure member may be appended to the structure ++ * name (in a "struct.member" format) in order to limit the scope of the data ++ * displayed to that particular member. Structure data is shown in hexadecimal ++ * format. The raw data in a structure may be dumped with the -r flag. ++ */ ++void ++cmd_struct(void) ++{ ++ cmd_datatype_common(STRUCT_REQUEST); ++} ++/* + * This command displays either a union definition, or a formatted display + * of the contents of a union at a specified address. If no address is + * specified, the union size and the file in which the union is defined +@@ -4187,25 +4264,45 @@ + void + cmd_union(void) + { - int c; -- ulong addr, aflag; -- struct syment *sp; -- int rawdata; -- long len; ++ cmd_datatype_common(UNION_REQUEST); ++} ++ ++/* ++ * After determining what type of data type follows the *, this routine ++ * has the identical functionality as cmd_struct() or cmd_union(). ++ */ ++void ++cmd_pointer(void) ++{ ++ cmd_datatype_common(0); ++} ++ ++static void ++cmd_datatype_common(ulong flags) ++{ ++ int i, c; + ulong addr, aflag; + struct syment *sp; + int rawdata; + long len; - ulong flags; - int count; - struct datatype_member union_member, *um; -- ulong list_head_offset; -- + ulong list_head_offset; ++ int count; ++ int argc_members; ++ int optind_save; ++ struct datatype_member datatype_member, *dm; ++ char *separator; ++ char *structname, *members; ++ char *memberlist[MAXARGS]; + - um = &union_member; - count = 1; -- rawdata = 0; -- aflag = 0; ++ dm = &datatype_member; ++ count = 0xdeadbeef; + rawdata = 0; + aflag = 0; - list_head_offset = 0; - flags = UNION_REQUEST; -- ++ list_head_offset = 0; ++ argc_members = 0; + - while ((c = getopt(argcnt, args, "c:rvol:")) != EOF) { - switch(c) -- { -- case 'c': -- count = atoi(optarg); -- break; -- -- case 'r': -- rawdata = 1; -- break; -- -- case 'v': -- flags |= STRUCT_VERBOSE; -- break; -- -- case 'o': -- flags |= SHOW_OFFSET; -- break; -- ++ while ((c = getopt(argcnt, args, "fuc:rvol:")) != EOF) { ++ switch (c) + { + case 'c': + count = atoi(optarg); +@@ -4223,14 +4320,28 @@ + flags |= SHOW_OFFSET; + break; + - case 'l': -- if (IS_A_NUMBER(optarg)) -- list_head_offset = stol(optarg, -- FAULT_ON_ERROR, NULL); -- else if (arg_to_datatype(optarg, ++ case 'l': + if (IS_A_NUMBER(optarg)) + list_head_offset = stol(optarg, + FAULT_ON_ERROR, NULL); + else if (arg_to_datatype(optarg, - um, RETURN_ON_ERROR) > 1) - list_head_offset = um->member_offset; - break; -- -- default: -- argerrs++; -- break; -- } -- } -- -- if (argerrs || !args[optind]) -- cmd_usage(pc->curcmd, SYNOPSIS); -- ++ dm, RETURN_ON_ERROR) > 1) ++ list_head_offset = dm->member_offset; ++ else ++ error(FATAL, "invalid -l option: %s\n", ++ optarg); ++ break; ++ ++ case 'f': ++ if (!pc->dumpfile) ++ error(FATAL, ++ "-f option requires a dumpfile\n"); ++ pc->curcmd_flags |= MEMTYPE_FILEADDR; ++ break; ++ ++ case 'u': ++ pc->curcmd_flags |= MEMTYPE_UVADDR; ++ break; + + default: + argerrs++; +@@ -4241,75 +4352,177 @@ + if (argerrs || !args[optind]) + cmd_usage(pc->curcmd, SYNOPSIS); + - if ((arg_to_datatype(args[optind++], um, FAULT_ON_ERROR) > 1) && - rawdata) - error(FATAL, "member-specific output not allowed with -r\n"); -- ++ if ((count_chars(args[optind], ',')+1) > MAXARGS) ++ error(FATAL, "too many members in comma-separated list!\n"); + - if ((len = um->size) < 0) { - error(INFO, "union not found: %s\n", um->name); - cmd_usage(pc->curcmd, SYNOPSIS); @@ -12958,15 +12977,39 @@ - do_datatype_declaration(um, flags | (um->flags & TYPEDEF)); - return; - } -- ++ if ((count_chars(args[optind], '.') > 1) || ++ (LASTCHAR(args[optind]) == ',') || ++ (LASTCHAR(args[optind]) == '.')) ++ error(FATAL, "invalid format: %s\n", args[optind]); ++ ++ optind_save = optind; ++ ++ /* ++ * Take care of address and count (array). ++ */ ++ while (args[++optind]) { ++ if (aflag && (count != 0xdeadbeef)) ++ error(FATAL, "too many arguments!\n"); + - while (args[optind]) { -- if (clean_arg() && IS_A_NUMBER(args[optind])) { + if (clean_arg() && IS_A_NUMBER(args[optind])) { - if (aflag) - count = stol(args[optind], - FAULT_ON_ERROR, NULL); - else { - if (!IS_KVADDR(addr = htol(args[optind], -- FAULT_ON_ERROR, NULL))) ++ if (aflag) ++ count = stol(args[optind], ++ FAULT_ON_ERROR, NULL); ++ else { ++ if (pc->curcmd_flags & MEMTYPE_FILEADDR) ++ pc->curcmd_private = stoll(args[optind], ++ FAULT_ON_ERROR, NULL); ++ else if (pc->curcmd_flags & MEMTYPE_UVADDR) { ++ addr = htol(args[optind], FAULT_ON_ERROR, ++ NULL); ++ } else if (!IS_KVADDR(addr = htol(args[optind], + FAULT_ON_ERROR, NULL))) - error(FATAL, - "invalid kernel virtual address: %s\n", - args[optind]); @@ -12974,29 +13017,77 @@ - } - } - else if ((sp = symbol_search(args[optind]))) { -- addr = sp->value; -- aflag++; ++ error(FATAL, ++ "invalid kernel virtual address: %s\n", ++ args[optind]); ++ aflag++; ++ } ++ } else if ((sp = symbol_search(args[optind]))) { + addr = sp->value; + aflag++; - } else { -- fprintf(fp, "symbol not found: %s\n", args[optind]); -- fprintf(fp, "possible aternatives:\n"); -- if (!symbol_query(args[optind], " ", NULL)) -- fprintf(fp, " (none found)\n"); ++ } else { + fprintf(fp, "symbol not found: %s\n", args[optind]); + fprintf(fp, "possible aternatives:\n"); + if (!symbol_query(args[optind], " ", NULL)) + fprintf(fp, " (none found)\n"); - return; -- } ++ goto freebuf; + } - optind++; -- } -- + } + - if (!aflag) - error(FATAL, "no kernel virtual address argument entered\n"); -- -- if (list_head_offset) -- addr -= list_head_offset; -- -- if (count < 0) { -- addr -= len * abs(count); -- addr += len; -- } -- ++ optind = optind_save; ++ ++ if (count == 0xdeadbeef) ++ count = 1; ++ else if (!aflag) ++ error(FATAL, "no kernel virtual address argument entered\n"); ++ ++ if ((flags & SHOW_OFFSET) && aflag) { ++ error(INFO, "-o option not valid with an address argument\n"); ++ flags &= ~SHOW_OFFSET; ++ } + + if (list_head_offset) + addr -= list_head_offset; + ++ /* ++ * Handle struct.member[,member] argument format. ++ */ ++ if (strstr(args[optind], ".")) { ++ structname = GETBUF(strlen(args[optind])+1); ++ strcpy(structname, args[optind]); ++ separator = strstr(structname, "."); ++ ++ members = GETBUF(strlen(args[optind])+1); ++ strcpy(members, separator+1); ++ replace_string(members, ",", ' '); ++ argc_members = parse_line(members, memberlist); ++ } else ++ structname = args[optind]; ++ ++ if ((arg_to_datatype(structname, dm, DATATYPE_QUERY|RETURN_ON_ERROR) < 1)) ++ error(FATAL, "invalid data structure reference: %s\n", structname); ++ ++ if ((argc_members > 1) && !aflag) { ++ error(INFO, flags & SHOW_OFFSET ? ++ "-o option not valid with multiple member format\n" : ++ "multiple member format not supported in this syntax\n"); ++ *separator = NULLCHAR; ++ argc_members = 0; ++ flags |= SHOW_OFFSET; ++ } ++ ++ len = dm->size; ++ + if (count < 0) { + addr -= len * abs(count); + addr += len; + } + - for (c = 0; c < abs(count); c++, addr += len) { - if (rawdata) - raw_data_dump(addr, len, flags & STRUCT_VERBOSE); @@ -13009,20 +13100,86 @@ - if (um->member) { - parse_for_member(um, PARSE_FOR_DATA); - close_tmpfile(); -- } ++ if (pc->curcmd_flags & MEMTYPE_FILEADDR) ++ addr = 0; /* unused, but parsed by gdb */ ++ ++ for (c = 0; c < abs(count); c++, addr += len, pc->curcmd_private += len) { ++ if (c) ++ fprintf(fp,"\n"); ++ ++ i = 0; ++ do { ++ if (argc_members) { ++ *separator = '.'; ++ strcpy(separator+1, memberlist[i]); + } - } -- } --} -- --/* -- * Generic function for dumping data structure declarations, with a small -- * fixup for typedefs, sizes and member offsets. -+ * Generic function for dumping data structure declarations, with a small -+ * fixup for typedefs, sizes and member offsets. - */ - static void - do_datatype_declaration(struct datatype_member *dm, ulong flags) -@@ -4405,7 +4608,10 @@ ++ ++ switch (arg_to_datatype(structname, dm, RETURN_ON_ERROR)) ++ { ++ case 0: error(FATAL, "invalid data structure reference: %s\n", ++ structname); ++ break; ++ case 1: break; ++ case 2: if (rawdata) ++ error(FATAL, ++ "member-specific output not allowed with -r\n"); ++ break; ++ } ++ ++ if (!(dm->flags & TYPEDEF)) { ++ if (flags &(STRUCT_REQUEST|UNION_REQUEST) ) { ++ if ((flags & (STRUCT_REQUEST|UNION_REQUEST)) != dm->type) ++ goto freebuf; ++ } else ++ flags |= dm->type; ++ } ++ ++ /* ++ * No address was passed -- dump the structure/member declaration. ++ */ ++ if (!aflag) { ++ do_datatype_declaration(dm, flags | (dm->flags & TYPEDEF)); ++ goto freebuf; ++ } ++ ++ if (!(flags & (UNION_REQUEST|STRUCT_REQUEST))) ++ error(FATAL, "invalid argument"); ++ ++ /* ++ * Display data. ++ */ ++ if (rawdata) ++ raw_data_dump(addr, len, flags & STRUCT_VERBOSE); ++ else { ++ if (dm->member) ++ open_tmpfile(); ++ ++ if (flags & UNION_REQUEST) ++ print_union(dm->name, addr); ++ else if (flags & STRUCT_REQUEST) ++ print_struct(dm->name, addr); ++ ++ if (dm->member) { ++ parse_for_member(dm, PARSE_FOR_DATA); ++ close_tmpfile(); ++ } ++ } ++ } while (++i < argc_members); ++ } ++ ++freebuf: ++ if (argc_members) { ++ FREEBUF(structname); ++ FREEBUF(members); + } + } + ++ + /* + * Generic function for dumping data structure declarations, with a small + * fixup for typedefs, sizes and member offsets. +@@ -4405,7 +4618,10 @@ if (!(p1 = strstr(s, "."))) both = FALSE; @@ -13034,7 +13191,7 @@ if ((p1 == s) || !strlen(p1+1)) goto datatype_member_fatal; *p1 = NULLCHAR; -@@ -4634,6 +4840,27 @@ +@@ -4634,6 +4850,27 @@ } /* @@ -13062,7 +13219,7 @@ * Verify that a datatype exists, but return on error. */ int -@@ -4705,6 +4932,8 @@ +@@ -4705,6 +4942,8 @@ cmd_usage(pc->curcmd, SYNOPSIS); if ((sp = symbol_search(args[optind])) && !args[optind+1]) { @@ -13071,7 +13228,7 @@ sprintf(buf2, "%s = ", args[optind]); leader = strlen(buf2); if (module_symbol(sp->value, NULL, NULL, NULL, output_radix)) -@@ -4758,6 +4987,39 @@ +@@ -4758,6 +4997,39 @@ } /* @@ -13111,7 +13268,7 @@ * As a latch ditch effort before a command is thrown away by exec_command(), * args[0] is checked to see whether it's the name of a variable, structure, * union, or typedef. If so, args[0] is changed to the appropriate command, -@@ -4793,9 +5055,9 @@ +@@ -4793,9 +5065,9 @@ command = "whatis"; else if (!datatype_exists(args[0])) return FALSE; @@ -13123,7 +13280,7 @@ if (is_gdb_command(FALSE, RETURN_ON_ERROR)) { pc->curcmd = pc->program_name; error(FATAL, -@@ -5056,6 +5318,8 @@ +@@ -5056,6 +5328,8 @@ fprintf(ofp, "%sSTRUCT_VERBOSE", others++ ? "|" : ""); if (flags & SHOW_OFFSET) fprintf(ofp, "%sSHOW_OFFSET", others++ ? "|" : ""); @@ -13132,7 +13289,7 @@ fprintf(ofp, ")\n"); } -@@ -5079,7 +5343,8 @@ +@@ -5079,7 +5353,8 @@ s = dm->member; indent = 0; @@ -13142,7 +13299,7 @@ rewind(pc->tmpfile); switch (flag) -@@ -5090,7 +5355,7 @@ +@@ -5090,7 +5365,7 @@ next_item: while (fgets(buf, BUFSIZE, pc->tmpfile)) { if (STRNEQ(buf, lookfor1) || STRNEQ(buf, lookfor2)) { @@ -13151,7 +13308,7 @@ if (strstr(buf, "= {")) indent = count_leading_spaces(buf); if (strstr(buf, "[")) -@@ -5098,16 +5363,22 @@ +@@ -5098,16 +5373,22 @@ } if (on) { @@ -13174,7 +13331,7 @@ goto next_item; } break; -@@ -5174,7 +5445,7 @@ +@@ -5174,7 +5455,7 @@ { int i, c, len; long offset; @@ -13183,7 +13340,7 @@ char *arglist[MAXARGS]; char buf1[BUFSIZE]; char fmt[BUFSIZE]; -@@ -5186,6 +5457,9 @@ +@@ -5186,6 +5467,9 @@ return FALSE; } @@ -13193,7 +13350,7 @@ if (STRNEQ(inbuf, " union {")) dm->flags |= IN_UNION; if (STRNEQ(inbuf, " struct {")) -@@ -5215,9 +5489,20 @@ +@@ -5215,9 +5499,20 @@ } } } else if (c) { @@ -13217,7 +13374,7 @@ } if (!target) -@@ -5307,7 +5592,8 @@ +@@ -5307,7 +5602,8 @@ if ((retval = builtin_array_length(s, 0, two_dim))) return retval; @@ -13227,7 +13384,7 @@ if (!two_dim) { req = &gnu_request; if ((get_symbol_type(copy, NULL, req) == -@@ -5417,6 +5703,23 @@ +@@ -5417,6 +5713,23 @@ } /* @@ -13251,7 +13408,7 @@ * Designed for use by non-debug kernels, but used by all. */ int -@@ -5433,6 +5736,8 @@ +@@ -5433,6 +5746,8 @@ lenptr = &array_table.kmem_cache_s_c_name; else if (STREQ(s, "kmem_cache_s.array")) lenptr = &array_table.kmem_cache_s_array; @@ -13260,7 +13417,7 @@ else if (STREQ(s, "kmem_cache_s.cpudata")) lenptr = &array_table.kmem_cache_s_cpudata; else if (STREQ(s, "log_buf")) -@@ -5469,11 +5774,16 @@ +@@ -5469,11 +5784,16 @@ lenptr = &array_table.prio_array_queue; else if (STREQ(s, "height_to_maxindex")) lenptr = &array_table.height_to_maxindex; @@ -13278,7 +13435,7 @@ if (!lenptr) /* not stored */ return(len); -@@ -5606,8 +5916,16 @@ +@@ -5606,8 +5926,16 @@ OFFSET(task_struct_last_run)); fprintf(fp, " task_struct_timestamp: %ld\n", OFFSET(task_struct_timestamp)); @@ -13295,7 +13452,7 @@ fprintf(fp, " thread_info_task: %ld\n", OFFSET(thread_info_task)); -@@ -5618,11 +5936,31 @@ +@@ -5618,11 +5946,31 @@ fprintf(fp, " thread_info_previous_esp: %ld\n", OFFSET(thread_info_previous_esp)); @@ -13327,7 +13484,7 @@ fprintf(fp, " hlist_node_next: %ld\n", OFFSET(hlist_node_next)); fprintf(fp, " hlist_node_pprev: %ld\n", -@@ -5647,6 +5985,11 @@ +@@ -5647,6 +5995,11 @@ OFFSET(signal_struct_count)); fprintf(fp, " signal_struct_action: %ld\n", OFFSET(signal_struct_action)); @@ -13339,7 +13496,7 @@ fprintf(fp, " task_struct_start_time: %ld\n", OFFSET(task_struct_start_time)); fprintf(fp, " task_struct_times: %ld\n", -@@ -5766,10 +6109,22 @@ +@@ -5766,10 +6119,22 @@ OFFSET(mm_struct_pgd)); fprintf(fp, " mm_struct_rss: %ld\n", OFFSET(mm_struct_rss)); @@ -13362,7 +13519,7 @@ fprintf(fp, " vm_area_struct_vm_mm: %ld\n", OFFSET(vm_area_struct_vm_mm)); -@@ -5885,6 +6240,15 @@ +@@ -5885,6 +6250,15 @@ fprintf(fp, " page_pte: %ld\n", OFFSET(page_pte)); @@ -13378,7 +13535,7 @@ fprintf(fp, " swap_info_struct_swap_file: %ld\n", OFFSET(swap_info_struct_swap_file)); fprintf(fp, " swap_info_struct_swap_vfsmnt: %ld\n", -@@ -5922,6 +6286,8 @@ +@@ -5922,6 +6296,8 @@ OFFSET(irq_desc_t_status)); fprintf(fp, " irq_desc_t_handler: %ld\n", OFFSET(irq_desc_t_handler)); @@ -13387,7 +13544,7 @@ fprintf(fp, " irq_desc_t_action: %ld\n", OFFSET(irq_desc_t_action)); fprintf(fp, " irq_desc_t_depth: %ld\n", -@@ -5967,11 +6333,52 @@ +@@ -5967,11 +6343,52 @@ fprintf(fp, "hw_interrupt_type_set_affinity: %ld\n", OFFSET(hw_interrupt_type_set_affinity)); @@ -13440,7 +13597,7 @@ fprintf(fp, " files_struct_max_fds: %ld\n", OFFSET(files_struct_max_fds)); fprintf(fp, " files_struct_max_fdset: %ld\n", -@@ -5988,6 +6395,12 @@ +@@ -5988,6 +6405,12 @@ OFFSET(file_f_vfsmnt)); fprintf(fp, " file_f_count: %ld\n", OFFSET(file_f_count)); @@ -13453,7 +13610,7 @@ fprintf(fp, " fs_struct_root: %ld\n", OFFSET(fs_struct_root)); fprintf(fp, " fs_struct_pwd: %ld\n", -@@ -6165,6 +6578,47 @@ +@@ -6165,6 +6588,49 @@ fprintf(fp, " slab_free: %ld\n", OFFSET(slab_free)); @@ -13497,11 +13654,25 @@ + OFFSET(kmem_cache_cpu_page)); + fprintf(fp, " kmem_cache_cpu_node: %ld\n", + OFFSET(kmem_cache_cpu_node)); ++ fprintf(fp, " kmem_cache_flags: %ld\n", ++ OFFSET(kmem_cache_flags)); + fprintf(fp, " net_device_next: %ld\n", OFFSET(net_device_next)); fprintf(fp, " net_device_name: %ld\n", -@@ -6217,6 +6671,11 @@ +@@ -6175,6 +6641,11 @@ + OFFSET(net_device_addr_len)); + fprintf(fp, " net_device_ip_ptr: %ld\n", + OFFSET(net_device_ip_ptr)); ++ fprintf(fp, " net_device_dev_list: %ld\n", ++ OFFSET(net_device_dev_list)); ++ fprintf(fp, " net_dev_base_head: %ld\n", ++ OFFSET(net_dev_base_head)); ++ + fprintf(fp, " device_next: %ld\n", + OFFSET(device_next)); + fprintf(fp, " device_name: %ld\n", +@@ -6217,6 +6688,11 @@ fprintf(fp, " inet_opt_num: %ld\n", OFFSET(inet_opt_num)); @@ -13513,7 +13684,7 @@ fprintf(fp, " timer_list_list: %ld\n", OFFSET(timer_list_list)); fprintf(fp, " timer_list_next: %ld\n", -@@ -6291,6 +6750,8 @@ +@@ -6291,6 +6767,8 @@ OFFSET(zone_struct_size)); fprintf(fp, " zone_struct_memsize: %ld\n", OFFSET(zone_struct_memsize)); @@ -13522,7 +13693,7 @@ fprintf(fp, " zone_struct_zone_start_paddr: %ld\n", OFFSET(zone_struct_zone_start_paddr)); fprintf(fp, " zone_struct_zone_start_mapnr: %ld\n", -@@ -6324,6 +6785,8 @@ +@@ -6324,6 +6802,8 @@ OFFSET(zone_name)); fprintf(fp, " zone_spanned_pages: %ld\n", OFFSET(zone_spanned_pages)); @@ -13531,7 +13702,7 @@ fprintf(fp, " zone_zone_start_pfn: %ld\n", OFFSET(zone_zone_start_pfn)); fprintf(fp, " zone_pages_min: %ld\n", -@@ -6332,6 +6795,18 @@ +@@ -6332,6 +6812,18 @@ OFFSET(zone_pages_low)); fprintf(fp, " zone_pages_high: %ld\n", OFFSET(zone_pages_high)); @@ -13550,7 +13721,7 @@ fprintf(fp, " neighbour_next: %ld\n", OFFSET(neighbour_next)); -@@ -6471,10 +6946,61 @@ +@@ -6471,10 +6963,61 @@ OFFSET(x8664_pda_irqstackptr)); fprintf(fp, " x8664_pda_level4_pgt: %ld\n", OFFSET(x8664_pda_level4_pgt)); @@ -13612,7 +13783,7 @@ fprintf(fp, "\n size_table:\n"); fprintf(fp, " page: %ld\n", SIZE(page)); -@@ -6493,6 +7019,10 @@ +@@ -6493,6 +7036,10 @@ fprintf(fp, " array_cache: %ld\n", SIZE(array_cache)); fprintf(fp, " kmem_bufctl_t: %ld\n", SIZE(kmem_bufctl_t)); @@ -13623,7 +13794,7 @@ fprintf(fp, " swap_info_struct: %ld\n", SIZE(swap_info_struct)); fprintf(fp, " vm_area_struct: %ld\n", -@@ -6512,6 +7042,7 @@ +@@ -6512,6 +7059,7 @@ fprintf(fp, " fs_struct: %ld\n", SIZE(fs_struct)); fprintf(fp, " files_struct: %ld\n", SIZE(files_struct)); @@ -13631,7 +13802,7 @@ fprintf(fp, " file: %ld\n", SIZE(file)); fprintf(fp, " inode: %ld\n", SIZE(inode)); fprintf(fp, " vfsmount: %ld\n", SIZE(vfsmount)); -@@ -6546,8 +7077,11 @@ +@@ -6546,8 +7094,11 @@ fprintf(fp, " sock: %ld\n", SIZE(sock)); fprintf(fp, " inet_sock: %ld\n", SIZE(inet_sock)); fprintf(fp, " socket: %ld\n", SIZE(socket)); @@ -13643,7 +13814,7 @@ fprintf(fp, " signal_queue: %ld\n", SIZE(signal_queue)); fprintf(fp, " sigqueue: %ld\n", SIZE(sigqueue)); -@@ -6601,6 +7135,8 @@ +@@ -6601,6 +7152,8 @@ fprintf(fp, " x8664_pda: %ld\n", SIZE(x8664_pda)); @@ -13652,7 +13823,7 @@ fprintf(fp, " gate_struct: %ld\n", SIZE(gate_struct)); fprintf(fp, " tss_struct: %ld\n", -@@ -6609,7 +7145,22 @@ +@@ -6609,7 +7162,22 @@ SIZE(task_struct_start_time)); fprintf(fp, " cputime_t: %ld\n", SIZE(cputime_t)); @@ -13676,7 +13847,7 @@ fprintf(fp, "\n array_table:\n"); /* -@@ -6663,6 +7214,12 @@ +@@ -6663,6 +7231,12 @@ get_array_length("prio_array.queue", NULL, SIZE(list_head))); fprintf(fp, " height_to_maxindex: %d\n", ARRAY_LENGTH(height_to_maxindex)); @@ -13689,7 +13860,7 @@ if (spec) { int in_size_table, in_array_table, arrays, offsets, sizes; -@@ -6890,6 +7447,10 @@ +@@ -6890,6 +7464,10 @@ SEC_HAS_CONTENTS)) st->flags |= NO_SEC_CONTENTS; } @@ -13700,7 +13871,7 @@ break; case (uint)MODULE_SECTIONS: -@@ -6906,6 +7467,10 @@ +@@ -6906,6 +7484,10 @@ SEC_HAS_CONTENTS)) st->flags |= NO_SEC_CONTENTS; } @@ -13711,7 +13882,7 @@ break; default: -@@ -6960,8 +7525,9 @@ +@@ -6960,8 +7542,9 @@ i = lm->mod_sections; lm->mod_section_data[i].section = section; lm->mod_section_data[i].priority = prio; @@ -13722,7 +13893,7 @@ if (strlen(name) < MAX_MOD_SEC_NAME) strcpy(lm->mod_section_data[i].name, name); else -@@ -7013,7 +7579,7 @@ +@@ -7013,7 +7596,7 @@ */ static void @@ -13731,7 +13902,7 @@ { int i; asection *section; -@@ -7073,6 +7639,134 @@ +@@ -7073,6 +7656,134 @@ } /* @@ -13866,7 +14037,7 @@ * Later versons of insmod store basic address information of each * module in a format that looks like the following example of the * nfsd module: -@@ -7185,8 +7879,8 @@ +@@ -7185,8 +7896,8 @@ } if (CRASHDEBUG(1)) @@ -13877,7 +14048,7 @@ switch (kt->flags & (KMOD_V1|KMOD_V2)) { -@@ -7199,7 +7893,8 @@ +@@ -7199,7 +7910,8 @@ strcpy(lm->mod_namelist, namelist); else strncpy(lm->mod_namelist, namelist, MAX_MOD_NAMELIST-1); @@ -13887,7 +14058,7 @@ } if ((mbfd = bfd_openr(namelist, NULL)) == NULL) -@@ -7219,6 +7914,10 @@ +@@ -7219,6 +7931,10 @@ else if (symcount == 0) error(FATAL, "no symbols in object file: %s\n", namelist); @@ -13898,7 +14069,7 @@ sort_x = bfd_make_empty_symbol(mbfd); sort_y = bfd_make_empty_symbol(mbfd); if (sort_x == NULL || sort_y == NULL) -@@ -7251,17 +7950,33 @@ +@@ -7251,17 +7967,33 @@ add_symbol_file(struct load_module *lm) { struct gnu_request request, *req; @@ -13935,7 +14106,7 @@ sprintf(buf, "set complaints 0"); gdb_pass_through(buf, NULL, 0); -@@ -7382,7 +8097,12 @@ +@@ -7382,7 +8114,12 @@ bfd_map_over_sections(bfd, section_header_info, MODULE_SECTIONS); @@ -13949,7 +14120,7 @@ from = (bfd_byte *) minisyms; fromend = from + symcount * size; -@@ -7395,104 +8115,112 @@ +@@ -7395,104 +8132,112 @@ bfd_get_symbol_info(bfd, sym, &syminfo); secname = (char *)bfd_get_section_name(bfd, sym->section); @@ -14022,6 +14193,10 @@ + fprintf(fp, "%08lx (%c) [%s] %s\n", + (ulong)syminfo.value, + syminfo.type, secname, syminfo.name); ++ ++ if (! lm->mod_text_start) { ++ break; ++ } - case 'd': - case 'D': @@ -14029,23 +14204,19 @@ - fprintf(fp, "%08lx (%c) [%s] %s\n", - (ulong)syminfo.value, - syminfo.type, secname, syminfo.name); -+ if (! lm->mod_text_start) { ++ if ((st->flags & INSMOD_BUILTIN) && ++ (STREQ(name, "init_module") || ++ STREQ(name, "cleanup_module"))) + break; -+ } - if (STREQ(secname, ".rodata")) - syminfo.value += lm->mod_rodata_start; - else - syminfo.value += lm->mod_data_start; -+ if ((st->flags & INSMOD_BUILTIN) && -+ (STREQ(name, "init_module") || -+ STREQ(name, "cleanup_module"))) -+ break; -+ + syminfo.value += lm->mod_text_start; + found = 1; + break; - ++ + default: + break; + } @@ -14074,7 +14245,7 @@ + } + } + } -+ + + if (found) { strcpy(name, syminfo.name); strip_module_symbol_end(name); @@ -14140,7 +14311,7 @@ } lm->mod_load_symend = &lm->mod_load_symtable[lm->mod_load_symcnt]; -@@ -7713,7 +8441,7 @@ +@@ -7713,7 +8458,7 @@ ulong start, end; char *modbuf; ulong maxchunk, alloc; @@ -14149,7 +14320,7 @@ start = roundup(lm->mod_size_of_struct, sizeof(long)) + lm->mod_base; end = lm->mod_base + lm->mod_size; -@@ -8089,6 +8817,10 @@ +@@ -8089,6 +8834,10 @@ struct syment *sp_array[200], *sp; if (req->name == PATCH_KERNEL_SYMBOLS_START) { @@ -14160,8 +14331,18 @@ fprintf(fp, (pc->flags & SILENT) || !(pc->flags & TTY) ? "" : "\nplease wait... (patching %ld gdb minimal_symbol values) ", st->symcnt); ---- crash/defs.h.orig 2008-02-19 16:16:12.000000000 -0500 -+++ crash/defs.h 2008-02-06 16:41:12.000000000 -0500 +@@ -8267,7 +9016,8 @@ + if (pc->flags & DROP_CORE) + drop_core("DROP_CORE flag set: forcing a segmentation fault\n"); + +- gdb_readnow_warning(); ++ if (CRASHDEBUG(1)) ++ gdb_readnow_warning(); + + if (pc->flags & RUNTIME) { + sprintf(buf, "%s\n%s FILE: %s LINE: %d FUNCTION: %s()\n", +--- crash/defs.h.orig 2008-04-29 13:51:49.000000000 -0400 ++++ crash/defs.h 2008-04-23 14:49:44.000000000 -0400 @@ -1,8 +1,8 @@ /* defs.h - core analysis suite * @@ -14393,7 +14574,7 @@ void (*display_bh)(void); ulong module_list; ulong kernel_module; -@@ -430,11 +504,36 @@ +@@ -430,11 +504,39 @@ uint kernel_version[3]; uint gcc_version[3]; int runq_siblings; @@ -14402,9 +14583,13 @@ long __cpu_idx[NR_CPUS]; long __per_cpu_offset[NR_CPUS]; - long cpu_flags[NR_CPUS]; +-#define NMI 0x1 + ulong cpu_flags[NR_CPUS]; ++#define POSSIBLE (0x1) ++#define PRESENT (0x2) ++#define ONLINE (0x4) ++#define NMI (0x8) + int BUG_bytes; - #define NMI 0x1 + ulong xen_flags; +#define WRITABLE_PAGE_TABLES (0x1) +#define SHADOW_PAGE_TABLES (0x2) @@ -14431,7 +14616,7 @@ }; /* -@@ -511,6 +610,7 @@ +@@ -511,6 +613,7 @@ char *task_struct; char *thread_info; char *mm_struct; @@ -14439,7 +14624,7 @@ }; #define TASK_INIT_DONE (0x1) -@@ -527,6 +627,7 @@ +@@ -527,6 +630,7 @@ #define IRQSTACKS (0x800) #define TIMESPEC (0x1000) #define NO_TIMESPEC (0x2000) @@ -14447,7 +14632,7 @@ #define TASK_SLUSH (20) -@@ -578,6 +679,7 @@ +@@ -578,6 +682,7 @@ ulonglong flags; ulong instptr; ulong stkptr; @@ -14455,7 +14640,7 @@ ulong stackbase; ulong stacktop; char *stackbuf; -@@ -602,6 +704,8 @@ +@@ -602,6 +707,8 @@ (void *)(&bt->stackbuf[(ulong)STACK_OFFSET_TYPE(OFF)]), (size_t)(SZ)) struct machine_specific; /* uniquely defined below each machine's area */ @@ -14464,7 +14649,7 @@ struct machdep_table { ulong flags; -@@ -645,14 +749,24 @@ +@@ -645,14 +752,24 @@ char **file; } *line_number_hooks; ulong last_pgd_read; @@ -14489,7 +14674,7 @@ }; /* -@@ -660,19 +774,25 @@ +@@ -660,19 +777,25 @@ * as defined in their processor-specific files below. (see KSYMS_START defs). */ #define HWRESET (0x80000000) @@ -14522,7 +14707,7 @@ #define FILL_PGD(PGD, TYPE, SIZE) \ if (!IS_LAST_PGD_READ(PGD)) { \ -@@ -681,6 +801,13 @@ +@@ -681,6 +804,13 @@ machdep->last_pgd_read = (ulong)(PGD); \ } @@ -14536,7 +14721,7 @@ #define FILL_PMD(PMD, TYPE, SIZE) \ if (!IS_LAST_PMD_READ(PMD)) { \ readmem((ulonglong)(PMD), TYPE, machdep->pmd, \ -@@ -695,10 +822,12 @@ +@@ -695,10 +825,12 @@ machdep->last_ptbl_read = (ulong)(PTBL); \ } @@ -14549,7 +14734,7 @@ #define FOREACH_BT (1) #define FOREACH_VM (2) -@@ -737,6 +866,7 @@ +@@ -737,6 +869,7 @@ #define FOREACH_c_FLAG (0x40000) #define FOREACH_f_FLAG (0x80000) #define FOREACH_o_FLAG (0x100000) @@ -14557,7 +14742,7 @@ struct foreach_data { ulong flags; -@@ -810,10 +940,15 @@ +@@ -810,10 +943,15 @@ long task_struct_last_run; long task_struct_timestamp; long task_struct_thread_info; @@ -14573,7 +14758,7 @@ long pid_link_pid; long pid_hash_chain; long hlist_node_next; -@@ -830,6 +965,8 @@ +@@ -830,6 +968,8 @@ long tms_tms_stime; long signal_struct_count; long signal_struct_action; @@ -14582,7 +14767,7 @@ long k_sigaction_sa; long sigaction_sa_handler; long sigaction_sa_flags; -@@ -875,8 +1012,14 @@ +@@ -875,8 +1015,14 @@ long mm_struct_mmap; long mm_struct_pgd; long mm_struct_rss; @@ -14597,7 +14782,7 @@ long vm_area_struct_vm_mm; long vm_area_struct_vm_next; long vm_area_struct_vm_end; -@@ -948,6 +1091,7 @@ +@@ -948,6 +1094,7 @@ long block_device_bd_disk; long irq_desc_t_status; long irq_desc_t_handler; @@ -14605,7 +14790,7 @@ long irq_desc_t_action; long irq_desc_t_depth; long irqdesc_action; -@@ -968,8 +1112,28 @@ +@@ -968,8 +1115,28 @@ long hw_interrupt_type_ack; long hw_interrupt_type_end; long hw_interrupt_type_set_affinity; @@ -14634,7 +14819,7 @@ long files_struct_max_fds; long files_struct_max_fdset; long files_struct_open_fds; -@@ -978,6 +1142,9 @@ +@@ -978,6 +1145,9 @@ long file_f_dentry; long file_f_vfsmnt; long file_f_count; @@ -14644,7 +14829,16 @@ long fs_struct_root; long fs_struct_pwd; long fs_struct_rootmnt; -@@ -1088,6 +1255,8 @@ +@@ -1067,6 +1237,8 @@ + long net_device_type; + long net_device_addr_len; + long net_device_ip_ptr; ++ long net_device_dev_list; ++ long net_dev_base_head; + long device_next; + long device_name; + long device_type; +@@ -1088,6 +1260,8 @@ long inet_opt_dport; long inet_opt_sport; long inet_opt_num; @@ -14653,7 +14847,7 @@ long timer_list_list; long timer_list_next; long timer_list_entry; -@@ -1123,6 +1292,7 @@ +@@ -1123,6 +1297,7 @@ long zone_struct_name; long zone_struct_size; long zone_struct_memsize; @@ -14661,7 +14855,7 @@ long zone_struct_zone_start_paddr; long zone_struct_zone_start_mapnr; long zone_struct_zone_mem_map; -@@ -1143,6 +1313,7 @@ +@@ -1143,6 +1318,7 @@ long zone_pages_min; long zone_pages_low; long zone_pages_high; @@ -14669,7 +14863,7 @@ long neighbour_next; long neighbour_primary_key; long neighbour_ha; -@@ -1210,7 +1381,69 @@ +@@ -1210,7 +1386,70 @@ long x8664_pda_irqstackptr; long x8664_pda_level4_pgt; long x8664_pda_cpunumber; @@ -14725,6 +14919,7 @@ + long kmem_cache_cpu_freelist; + long kmem_cache_cpu_page; + long kmem_cache_cpu_node; ++ long kmem_cache_flags; + long zone_nr_active; + long zone_nr_inactive; + long zone_all_unreclaimable; @@ -14739,7 +14934,7 @@ }; struct size_table { /* stash of commonly-used sizes */ -@@ -1239,6 +1472,7 @@ +@@ -1239,6 +1478,7 @@ long umode_t; long dentry; long files_struct; @@ -14747,7 +14942,7 @@ long fs_struct; long file; long inode; -@@ -1264,6 +1498,7 @@ +@@ -1264,6 +1504,7 @@ long net_device; long sock; long signal_struct; @@ -14755,7 +14950,7 @@ long signal_queue; long sighand_struct; long sigqueue; -@@ -1292,15 +1527,28 @@ +@@ -1292,15 +1533,28 @@ long address_space; long char_device_struct; long inet_sock; @@ -14784,7 +14979,7 @@ }; struct array_table { -@@ -1327,6 +1575,9 @@ +@@ -1327,6 +1581,9 @@ int free_area_DIMENSION; int prio_array_queue; int height_to_maxindex; @@ -14794,7 +14989,7 @@ }; /* -@@ -1342,7 +1593,12 @@ +@@ -1342,7 +1599,12 @@ #define MEMBER_OFFSET(X,Y) datatype_info((X), (Y), NULL) #define MEMBER_EXISTS(X,Y) (datatype_info((X), (Y), NULL) >= 0) #define MEMBER_SIZE_REQUEST ((struct datatype_member *)(-1)) @@ -14807,7 +15002,7 @@ /* * The following set of macros can only be used with pre-intialized fields -@@ -1365,7 +1621,9 @@ +@@ -1365,7 +1627,9 @@ #define MEMBER_OFFSET_INIT(X, Y, Z) (ASSIGN_OFFSET(X) = MEMBER_OFFSET(Y, Z)) #define STRUCT_SIZE_INIT(X, Y) (ASSIGN_SIZE(X) = STRUCT_SIZE(Y)) #define ARRAY_LENGTH_INIT(A, B, C, D, E) ((A) = get_array_length(C, D, E)) @@ -14817,7 +15012,7 @@ /* * For use with non-debug kernels. -@@ -1389,6 +1647,7 @@ +@@ -1389,6 +1653,7 @@ #define ULONGLONG(ADDR) *((ulonglong *)((char *)(ADDR))) #define ULONG_PTR(ADDR) *((ulong **)((char *)(ADDR))) #define USHORT(ADDR) *((ushort *)((char *)(ADDR))) @@ -14825,7 +15020,7 @@ #define VOID_PTR(ADDR) *((void **)((char *)(ADDR))) struct node_table { -@@ -1396,6 +1655,7 @@ +@@ -1396,6 +1661,7 @@ ulong pgdat; ulong mem_map; ulong size; @@ -14833,7 +15028,7 @@ ulonglong start_paddr; ulong start_mapnr; }; -@@ -1420,8 +1680,10 @@ +@@ -1420,8 +1686,10 @@ ulong kmem_max_limit; ulong kmem_max_cpus; ulong kmem_cache_count; @@ -14844,7 +15039,7 @@ int kmem_cache_namelen; ulong page_hash_table; int page_hash_table_len; -@@ -1441,17 +1703,42 @@ +@@ -1441,17 +1709,42 @@ ulong cached_vma_hits[VMA_CACHE]; int vma_cache_index; ulong vma_cache_fills; @@ -14898,7 +15093,7 @@ #define COMMON_VADDR_SPACE() (vt->flags & COMMON_VADDR) #define PADDR_PRLEN (vt->paddr_prlen) -@@ -1478,7 +1765,8 @@ +@@ -1478,7 +1771,8 @@ long list_head_offset; ulong end; ulong searchfor; @@ -14908,7 +15103,7 @@ char *header; }; #define LIST_OFFSET_ENTERED (VERBOSE << 1) -@@ -1584,8 +1872,11 @@ +@@ -1584,8 +1878,11 @@ int mods_installed; struct load_module *current; struct load_module *load_modules; @@ -14920,7 +15115,7 @@ #define KERNEL_SYMS (0x1) #define MODULE_SYMS (0x2) #define LOAD_MODULE_SYMS (0x4) -@@ -1596,6 +1887,8 @@ +@@ -1596,6 +1893,8 @@ #define NO_SEC_CONTENTS (0x40) #define FORCE_DEBUGINFO (0x80) #define CRC_MATCHES (0x100) @@ -14929,7 +15124,7 @@ #endif /* !GDB_COMMON */ -@@ -1611,6 +1904,8 @@ +@@ -1611,6 +1910,8 @@ #define MOD_KALLSYMS (0x8) #define MOD_INITRD (0x10) @@ -14938,7 +15133,7 @@ struct mod_section_data { #if defined(GDB_6_1) struct bfd_section *section; -@@ -1659,6 +1954,8 @@ +@@ -1659,6 +1960,8 @@ #define KVADDR (0x1) #define UVADDR (0x2) #define PHYSADDR (0x4) @@ -14947,7 +15142,7 @@ #define AMBIGUOUS (~0) #define USE_USER_PGD (UVADDR << 2) -@@ -1680,6 +1977,33 @@ +@@ -1680,6 +1983,33 @@ #define VIRTPAGEBASE(X) (((ulong)(X)) & (ulong)machdep->pagemask) #define PHYSPAGEBASE(X) (((physaddr_t)(X)) & (physaddr_t)machdep->pagemask) @@ -14981,7 +15176,7 @@ /* * Machine specific stuff */ -@@ -1689,8 +2013,8 @@ +@@ -1689,8 +2019,8 @@ #define MACHINE_TYPE "X86" #define PTOV(X) ((unsigned long)(X)+(machdep->kvbase)) #define VTOP(X) ((unsigned long)(X)-(machdep->kvbase)) @@ -14992,7 +15187,7 @@ #define PGDIR_SHIFT_2LEVEL (22) #define PTRS_PER_PTE_2LEVEL (1024) -@@ -1721,25 +2045,91 @@ +@@ -1721,25 +2051,91 @@ #define SWP_TYPE(entry) (((entry) >> 1) & 0x3f) #define SWP_OFFSET(entry) ((entry) >> 8) @@ -15092,7 +15287,7 @@ #define PTOV(X) ((unsigned long)(X)+(machdep->kvbase)) #define VTOP(X) x86_64_VTOP((ulong)(X)) #define IS_VMALLOC_ADDR(X) x86_64_IS_VMALLOC_ADDR((ulong)(X)) -@@ -1757,12 +2147,34 @@ +@@ -1757,12 +2153,37 @@ #define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1)) #define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) @@ -15109,10 +15304,13 @@ +#define FILL_PML4_HYPER() { \ + if (!machdep->machspec->last_pml4_read) { \ -+ readmem(symbol_value("idle_pg_table_4"), KVADDR, \ -+ machdep->machspec->pml4, PAGESIZE(), "idle_pg_table_4", \ ++ unsigned long idle_pg_table = \ ++ symbol_exists("idle_pg_table_4") ? symbol_value("idle_pg_table_4") : \ ++ symbol_value("idle_pg_table"); \ ++ readmem(idle_pg_table, KVADDR, \ ++ machdep->machspec->pml4, PAGESIZE(), "idle_pg_table", \ + FAULT_ON_ERROR); \ -+ machdep->machspec->last_pml4_read = symbol_value("idle_pg_table_4"); \ ++ machdep->machspec->last_pml4_read = idle_pg_table; \ + }\ +} + @@ -15128,7 +15326,7 @@ /* * PHYSICAL_PAGE_MASK changed (enlarged) between 2.4 and 2.6, so * for safety, use the 2.6 values to generate it. -@@ -1791,11 +2203,22 @@ +@@ -1791,11 +2212,22 @@ #define SWP_TYPE(entry) (((entry) >> 1) & 0x3f) #define SWP_OFFSET(entry) ((entry) >> 8) @@ -15151,7 +15349,7 @@ #define CPU_PDA_READ(CPU, BUFFER) \ (STRNEQ("cpu_pda", closest_symbol((symbol_value("cpu_pda") + \ ((CPU) * SIZE(x8664_pda))))) && \ -@@ -1806,6 +2229,9 @@ +@@ -1806,6 +2238,9 @@ #define VALID_LEVEL4_PGT_ADDR(X) \ (((X) == VIRTPAGEBASE(X)) && IS_KVADDR(X) && !IS_VMALLOC_ADDR(X)) @@ -15161,7 +15359,7 @@ #endif /* X86_64 */ #ifdef ALPHA -@@ -1816,7 +2242,7 @@ +@@ -1816,7 +2251,7 @@ #define PTOV(X) ((unsigned long)(X)+(machdep->kvbase)) #define VTOP(X) ((unsigned long)(X)-(machdep->kvbase)) @@ -15170,7 +15368,7 @@ #define KSEG_BASE_48_BIT (0xffff800000000000) #define KSEG_BASE (0xfffffc0000000000) #define _PFN_MASK (0xFFFFFFFF00000000) -@@ -1848,6 +2274,8 @@ +@@ -1848,6 +2283,8 @@ #define SWP_TYPE(entry) (((entry) >> 32) & 0xff) #define SWP_OFFSET(entry) ((entry) >> 40) @@ -15179,7 +15377,7 @@ #define TIF_SIGPENDING (2) -@@ -1861,7 +2289,7 @@ +@@ -1861,7 +2298,7 @@ #define PTOV(X) ((unsigned long)(X)+(machdep->kvbase)) #define VTOP(X) ((unsigned long)(X)-(machdep->kvbase)) @@ -15188,7 +15386,7 @@ #define PGDIR_SHIFT (22) #define PTRS_PER_PTE (1024) -@@ -1881,9 +2309,14 @@ +@@ -1881,9 +2318,14 @@ #define SWP_TYPE(entry) (((entry) >> 1) & 0x7f) #define SWP_OFFSET(entry) ((entry) >> 8) @@ -15203,7 +15401,7 @@ #endif /* PPC */ #ifdef IA64 -@@ -1908,6 +2341,9 @@ +@@ -1908,6 +2350,9 @@ #define KERNEL_UNCACHED_BASE ((ulong)KERNEL_UNCACHED_REGION << REGION_SHIFT) #define KERNEL_CACHED_BASE ((ulong)KERNEL_CACHED_REGION << REGION_SHIFT) @@ -15213,7 +15411,7 @@ /* * As of 2.6, these are no longer straight forward. */ -@@ -1917,16 +2353,57 @@ +@@ -1917,16 +2362,57 @@ #define SWITCH_STACK_ADDR(X) (ia64_get_switch_stack((ulong)(X))) @@ -15277,7 +15475,7 @@ * First, define the various bits in a PTE. Note that the PTE format * matches the VHPT short format, the firt doubleword of the VHPD long * format, and the first doubleword of the TLB insertion format. -@@ -1978,6 +2455,7 @@ +@@ -1978,6 +2464,7 @@ #define __DIRTY_BITS _PAGE_ED | __DIRTY_BITS_NO_ED #define EFI_PAGE_SHIFT (12) @@ -15285,7 +15483,7 @@ /* * NOTE: #include'ing creates too many compiler problems, so * this stuff is hardwired here; it's probably etched in stone somewhere. -@@ -2020,6 +2498,8 @@ +@@ -2020,6 +2507,8 @@ #define SWP_TYPE(entry) (((entry) >> 1) & 0xff) #define SWP_OFFSET(entry) ((entry) >> 9) @@ -15294,7 +15492,7 @@ #define TIF_SIGPENDING (1) -@@ -2038,11 +2518,14 @@ +@@ -2038,11 +2527,14 @@ #define _64BIT_ #define MACHINE_TYPE "PPC64" @@ -15310,7 +15508,7 @@ #define KERNELBASE machdep->pageoffset #define PGDIR_SHIFT (machdep->pageshift + (machdep->pageshift -3) + (machdep->pageshift - 2)) -@@ -2067,6 +2550,33 @@ +@@ -2067,6 +2559,33 @@ #define PGD_OFFSET(vaddr) ((vaddr >> PGDIR_SHIFT) & 0x7ff) #define PMD_OFFSET(vaddr) ((vaddr >> PMD_SHIFT) & (PTRS_PER_PMD - 1)) @@ -15344,7 +15542,7 @@ #define _PAGE_PRESENT 0x001UL /* software: pte contains a translation */ #define _PAGE_USER 0x002UL /* matches one of the PP bits */ #define _PAGE_RW 0x004UL /* software: user write access allowed */ -@@ -2080,6 +2590,8 @@ +@@ -2080,6 +2599,8 @@ #define SWP_TYPE(entry) (((entry) >> 1) & 0x7f) #define SWP_OFFSET(entry) ((entry) >> 8) @@ -15353,7 +15551,7 @@ #define MSR_PR_LG 14 /* Problem State / Privilege Level */ /* Used to find the user or kernel-mode frame*/ -@@ -2087,6 +2599,9 @@ +@@ -2087,6 +2608,9 @@ #define STACK_FRAME_OVERHEAD 112 #define EXCP_FRAME_MARKER 0x7265677368657265 @@ -15363,7 +15561,7 @@ #endif /* PPC64 */ #ifdef S390 -@@ -2095,7 +2610,7 @@ +@@ -2095,7 +2619,7 @@ #define PTOV(X) ((unsigned long)(X)+(machdep->kvbase)) #define VTOP(X) ((unsigned long)(X)-(machdep->kvbase)) @@ -15372,7 +15570,7 @@ #define PTRS_PER_PTE 1024 #define PTRS_PER_PMD 1 -@@ -2105,6 +2620,8 @@ +@@ -2105,6 +2629,8 @@ #define SWP_TYPE(entry) (((entry) >> 2) & 0x1f) #define SWP_OFFSET(entry) ((((entry) >> 11) & 0xfffffffe) | \ (((entry) >> 7) & 0x1)) @@ -15381,7 +15579,7 @@ #define TIF_SIGPENDING (2) -@@ -2116,7 +2633,7 @@ +@@ -2116,7 +2642,7 @@ #define PTOV(X) ((unsigned long)(X)+(machdep->kvbase)) #define VTOP(X) ((unsigned long)(X)-(machdep->kvbase)) @@ -15390,7 +15588,7 @@ #define PTRS_PER_PTE 512 #define PTRS_PER_PMD 1024 #define PTRS_PER_PGD 2048 -@@ -2125,6 +2642,8 @@ +@@ -2125,6 +2651,8 @@ #define SWP_TYPE(entry) (((entry) >> 2) & 0x1f) #define SWP_OFFSET(entry) ((((entry) >> 11) & 0xfffffffffffffffe) | \ (((entry) >> 7) & 0x1)) @@ -15399,7 +15597,7 @@ #define TIF_SIGPENDING (2) -@@ -2134,6 +2653,8 @@ +@@ -2134,6 +2662,8 @@ #define SWP_TYPE(entry) (error("PLATFORM_SWP_TYPE: TBD\n")) #define SWP_OFFSET(entry) (error("PLATFORM_SWP_OFFSET: TBD\n")) @@ -15408,7 +15606,7 @@ #endif /* PLATFORM */ -@@ -2185,7 +2706,10 @@ +@@ -2185,7 +2715,10 @@ #define BADVAL ((ulong)(-1)) #define UNUSED (-1) @@ -15419,7 +15617,7 @@ /* * precision lengths for fprintf -@@ -2199,9 +2723,10 @@ +@@ -2199,9 +2732,10 @@ #define MINSPACE (-100) @@ -15433,7 +15631,7 @@ #define LEFT_JUSTIFY (1) #define RIGHT_JUSTIFY (2) -@@ -2419,17 +2944,22 @@ +@@ -2419,17 +2953,22 @@ /* * ps command options. */ @@ -15467,7 +15665,7 @@ #define MAX_PS_ARGS (100) /* maximum command-line specific requests */ -@@ -2461,7 +2991,7 @@ +@@ -2461,7 +3000,7 @@ extern struct program_context program_context, *pc; extern struct task_table task_table, *tt; extern struct kernel_table kernel_table, *kt; @@ -15476,7 +15674,7 @@ extern char *args[MAXARGS]; extern int argcnt; extern int argerrs; -@@ -2534,6 +3064,9 @@ +@@ -2534,6 +3073,9 @@ void cmd_gdb(void); /* gdb_interface.c */ void cmd_net(void); /* net.c */ void cmd_extend(void); /* extensions.c */ @@ -15486,7 +15684,7 @@ /* * main.c -@@ -2591,6 +3124,8 @@ +@@ -2591,6 +3133,8 @@ int interruptible(void); int received_SIGINT(void); void debug_redirect(char *); @@ -15495,7 +15693,7 @@ /* * tools.c -@@ -2658,6 +3193,7 @@ +@@ -2658,6 +3202,7 @@ int hq_open(void); int hq_close(void); int hq_enter(ulong); @@ -15503,7 +15701,7 @@ long get_embedded(void); void dump_embedded(char *); char *ordinal(ulong, char *); -@@ -2683,9 +3219,16 @@ +@@ -2683,9 +3228,16 @@ int clean_arg(void); int empty_list(ulong); int machine_type(char *); @@ -15521,7 +15719,7 @@ /* * symbols.c -@@ -2721,9 +3264,11 @@ +@@ -2721,9 +3273,11 @@ struct syment *next_symbol(char *, struct syment *); struct syment *prev_symbol(char *, struct syment *); void get_symbol_data(char *, long, void *); @@ -15533,7 +15731,7 @@ int symbol_exists(char *s); int kernel_symbol_exists(char *s); int get_syment_array(char *, struct syment **, int); -@@ -2738,9 +3283,12 @@ +@@ -2738,9 +3292,12 @@ void dump_struct_table(ulong); void dump_offset_table(char *, ulong); int is_elf_file(char *); @@ -15546,7 +15744,7 @@ int builtin_array_length(char *, int, int *); char *get_line_number(ulong, char *, int); char *get_build_directory(char *); -@@ -2768,6 +3316,7 @@ +@@ -2768,6 +3325,7 @@ long OFFSET_option(long, long, char *, char *, int, char *, char *); long SIZE_option(long, long, char *, char *, int, char *, char *); void dump_trace(ulong *); @@ -15554,7 +15752,7 @@ /* * memory.c -@@ -2807,6 +3356,7 @@ +@@ -2807,6 +3365,7 @@ char *swap_location(ulonglong, char *); void clear_swap_info_cache(void); uint memory_page_size(void); @@ -15562,7 +15760,7 @@ ulong first_vmalloc_address(void); int l1_cache_size(void); int dumpfile_memory(int); -@@ -2838,6 +3388,7 @@ +@@ -2838,6 +3397,7 @@ void open_files_dump(ulong, int, struct reference *); void get_pathname(ulong, char *, int, int, ulong); ulong file_to_dentry(ulong); @@ -15570,7 +15768,7 @@ void nlm_files_dump(void); int get_proc_version(void); int file_checksum(char *, long *); -@@ -2874,6 +3425,7 @@ +@@ -2874,6 +3434,7 @@ void help_init(void); void cmd_usage(char *, int); void display_version(void); @@ -15578,7 +15776,7 @@ #ifdef X86 #define dump_machdep_table(X) x86_dump_machdep_table(X) #endif -@@ -2945,6 +3497,9 @@ +@@ -2945,6 +3506,9 @@ extern char *help_waitq[]; extern char *help_whatis[]; extern char *help_wr[]; @@ -15588,7 +15786,7 @@ /* * task.c -@@ -2962,10 +3517,13 @@ +@@ -2962,10 +3526,13 @@ ulong task_flags(ulong); ulong task_state(ulong); ulong task_mm(ulong, int); @@ -15602,7 +15800,7 @@ ulong stkptr_to_task(ulong); ulong task_to_thread_info(ulong); ulong task_to_stackbase(ulong); -@@ -3005,11 +3563,17 @@ +@@ -3005,11 +3572,17 @@ */ void register_extension(struct command_table_entry *); void dump_extension_table(int); @@ -15621,7 +15819,7 @@ void module_init(void); void verify_version(void); void verify_spinlock(void); -@@ -3019,14 +3583,18 @@ +@@ -3019,14 +3592,20 @@ int is_system_call(char *, ulong); void generic_dump_irq(int); int generic_dis_filter(ulong, char *); @@ -15637,11 +15835,13 @@ void clear_machdep_cache(void); struct stack_hook *gather_text_list(struct bt_info *); int get_cpus_online(void); ++int get_cpus_present(void); +int get_cpus_possible(void); ++int in_cpu_map(int, int); void print_stack_text_syms(struct bt_info *, ulong, ulong); void back_trace(struct bt_info *); #define BT_RAW (0x1ULL) -@@ -3039,11 +3607,13 @@ +@@ -3039,11 +3618,13 @@ #define BT_EXCEPTION_FRAME (0x80ULL) #define BT_LINE_NUMBERS (0x100ULL) #define BT_USER_EFRAME (0x200ULL) @@ -15655,7 +15855,7 @@ #define BT_RESCHEDULE (0x8000ULL) #define BT_SCHEDULE (BT_RESCHEDULE) #define BT_RET_FROM_SMP_FORK (0x10000ULL) -@@ -3069,6 +3639,8 @@ +@@ -3069,6 +3650,8 @@ #define BT_DUMPFILE_SEARCH (0x800000000ULL) #define BT_EFRAME_SEARCH2 (0x1000000000ULL) #define BT_START (0x2000000000ULL) @@ -15664,7 +15864,7 @@ #define BT_REF_HEXVAL (0x1) #define BT_REF_SYMBOL (0x2) -@@ -3101,6 +3673,17 @@ +@@ -3101,6 +3684,17 @@ #define TYPE_S390D (REMOTE_VERBOSE << 6) #define TYPE_NETDUMP (REMOTE_VERBOSE << 7) @@ -15682,7 +15882,7 @@ /* * dev.c */ -@@ -3129,7 +3712,6 @@ +@@ -3129,7 +3723,6 @@ void x86_display_idt_table(void); #define display_idt_table() x86_display_idt_table() #define KSYMS_START (0x1) @@ -15690,7 +15890,7 @@ void x86_dump_eframe_common(struct bt_info *bt, ulong *, int); char *x86_function_called_by(ulong); struct syment *x86_jmp_error_code(ulong); -@@ -3140,6 +3722,8 @@ +@@ -3140,6 +3733,8 @@ ulong entry_tramp_start; ulong entry_tramp_end; physaddr_t entry_tramp_start_phys; @@ -15699,7 +15899,7 @@ }; struct syment *x86_is_entry_tramp_address(ulong, ulong *); -@@ -3194,19 +3778,54 @@ +@@ -3194,19 +3789,54 @@ #define NMI_STACK 2 /* ebase[] offset to NMI exception stack */ struct machine_specific { @@ -15755,7 +15955,7 @@ /* * ppc64.c -@@ -3240,13 +3859,42 @@ +@@ -3240,13 +3870,42 @@ ulong hwintrstack[NR_CPUS]; char *hwstackbuf; uint hwstacksize; @@ -15799,7 +15999,7 @@ #endif /* -@@ -3258,15 +3906,27 @@ +@@ -3258,15 +3917,27 @@ #define display_idt_table() \ error(FATAL, "-d option is not applicable to PowerPC architecture\n") #define KSYMS_START (0x1) @@ -15828,7 +16028,7 @@ /* * ia64.c -@@ -3283,6 +3943,8 @@ +@@ -3283,6 +3954,8 @@ #define display_idt_table() \ error(FATAL, "-d option TBD on ia64 architecture\n"); int ia64_in_init_stack(ulong addr); @@ -15837,7 +16037,7 @@ #define OLD_UNWIND (0x1) /* CONFIG_IA64_NEW_UNWIND not turned on */ #define NEW_UNWIND (0x2) /* CONFIG_IA64_NEW_UNWIND turned on */ -@@ -3396,10 +4058,26 @@ +@@ -3396,10 +4069,28 @@ int netdump_init(char *, FILE *); ulong get_netdump_panic_task(void); ulong get_netdump_switch_stack(ulong); @@ -15862,10 +16062,12 @@ +void get_kdump_regs(struct bt_info *, ulong *, ulong *); +void xen_kdump_p2m_mfn(char *); +int is_sadump_xen(void); ++void set_xen_phys_start(char *); ++ulong xen_phys_start(void); /* * diskdump.c -@@ -3416,6 +4094,28 @@ +@@ -3416,6 +4107,28 @@ int diskdump_memory_dump(FILE *); FILE *set_diskdump_fp(FILE *); void get_diskdump_regs(struct bt_info *, ulong *, ulong *); @@ -15894,7 +16096,7 @@ /* * net.c -@@ -3493,6 +4193,8 @@ +@@ -3493,6 +4206,8 @@ void lkcd_dumpfile_complaint(uint32_t, uint32_t, int); int set_mb_benchmark(ulong); ulonglong fix_lkcd_address(ulonglong); @@ -15903,7 +16105,7 @@ /* * lkcd_v1.c -@@ -3560,6 +4262,7 @@ +@@ -3560,6 +4275,7 @@ #define LKCD_DUMP_V7 (0x7) /* DUMP_VERSION_NUMBER */ #define LKCD_DUMP_V8 (0x8) /* DUMP_VERSION_NUMBER */ #define LKCD_DUMP_V9 (0x9) /* DUMP_VERSION_NUMBER */ @@ -15911,7 +16113,7 @@ #define LKCD_DUMP_VERSION_NUMBER_MASK (0xf) #define LKCD_DUMP_RAW (0x1) /* DUMP_[DH_]RAW */ -@@ -3764,7 +4467,6 @@ +@@ -3764,7 +4480,6 @@ extern int prettyprint_structs; extern int prettyprint_arrays; extern int repeat_count_threshold; @@ -15919,7 +16121,7 @@ extern unsigned int print_max; /* -@@ -3814,4 +4516,8 @@ +@@ -3814,4 +4529,8 @@ extern int have_partial_symbols(void); extern int have_full_symbols(void); @@ -15928,7 +16130,7 @@ +#endif + #endif /* !GDB_COMMON */ ---- crash/alpha.c.orig 2008-02-19 16:16:12.000000000 -0500 +--- crash/alpha.c.orig 2008-04-29 13:51:49.000000000 -0400 +++ crash/alpha.c 2008-01-04 09:42:08.000000000 -0500 @@ -1,8 +1,8 @@ /* alpha.c - core analysis suite @@ -15960,7 +16162,7 @@ fprintf(fp, ")\n"); fprintf(fp, " kvbase: %lx\n", machdep->kvbase); fprintf(fp, " identity_map_base: %lx\n", machdep->identity_map_base); ---- crash/va_server.c.orig 2008-02-19 16:16:12.000000000 -0500 +--- crash/va_server.c.orig 2008-04-29 13:51:49.000000000 -0400 +++ crash/va_server.c 2008-01-04 09:42:08.000000000 -0500 @@ -1,8 +1,8 @@ /* va_server.c - kernel crash dump file translation library @@ -15990,8 +16192,8 @@ if(read_map(crash_file)) { if(va_server_init_v1(crash_file, start, end, stride)) return -1; ---- crash/kernel.c.orig 2008-02-19 16:16:12.000000000 -0500 -+++ crash/kernel.c 2008-01-16 16:32:14.000000000 -0500 +--- crash/kernel.c.orig 2008-04-29 13:51:49.000000000 -0400 ++++ crash/kernel.c 2008-04-11 11:01:00.000000000 -0400 @@ -1,8 +1,8 @@ /* kernel.c - core analysis suite * @@ -16017,7 +16219,7 @@ static char *get_loadavg(char *); static void get_lkcd_regs(struct bt_info *, ulong *, ulong *); static void dump_sys_call_table(char *, int); -@@ -42,328 +43,452 @@ +@@ -42,330 +43,565 @@ static void verify_namelist(void); static char *debug_kernel_version(char *); static int restore_stack(struct bt_info *); @@ -16027,6 +16229,7 @@ +static void BUG_bytes_init(void); +static int BUG_x86(void); +static int BUG_x86_64(void); ++static void cpu_maps_init(void); /* @@ -16058,6 +16261,8 @@ - kt->init_end = symbol_value("__init_end"); - } - kt->end = symbol_value("_end"); ++ cpu_maps_init(); ++ + kt->stext = symbol_value("_stext"); + kt->etext = symbol_value("_etext"); + get_text_init_space(); @@ -16587,8 +16792,18 @@ + "tvec_root_s", "vec"); + STRUCT_SIZE_INIT(tvec_s, "tvec_s"); + MEMBER_OFFSET_INIT(tvec_s_vec, "tvec_s", "vec"); ++ } else { ++ STRUCT_SIZE_INIT(tvec_root_s, "tvec_root"); ++ if (VALID_STRUCT(tvec_root_s)) { ++ STRUCT_SIZE_INIT(tvec_t_base_s, "tvec_base"); ++ MEMBER_OFFSET_INIT(tvec_t_base_s_tv1, ++ "tvec_base", "tv1"); ++ MEMBER_OFFSET_INIT(tvec_root_s_vec, ++ "tvec_root", "vec"); ++ STRUCT_SIZE_INIT(tvec_s, "tvec"); ++ MEMBER_OFFSET_INIT(tvec_s_vec, "tvec", "vec"); ++ } + } -+ + STRUCT_SIZE_INIT(__wait_queue, "__wait_queue"); + if (VALID_STRUCT(__wait_queue)) { + if (MEMBER_EXISTS("__wait_queue", "task")) @@ -16725,8 +16940,6 @@ - if (!(kt->flags & NO_KALLSYMS)) - kt->flags |= KALLSYMS_V2; -- } -- break; + if (!(kt->flags & NO_KALLSYMS)) + kt->flags |= KALLSYMS_V2; + } @@ -16745,13 +16958,114 @@ + STRUCT_SIZE_INIT(pcpu_info, "pcpu_info"); + STRUCT_SIZE_INIT(vcpu_struct, "vcpu_struct"); + kt->flags |= ARCH_OPENVZ; - } ++ } + + BUG_bytes_init(); ++} ++ ++/* ++ * If the cpu_present_map, cpu_online_map and cpu_possible_maps exist, ++ * set up the kt->cpu_flags[NR_CPUS] with their settings. ++ */ ++static void ++cpu_maps_init(void) ++{ ++ int i, c, m, cpu, len; ++ char *buf; ++ ulong *maskptr; ++ struct mapinfo { ++ ulong cpu_flag; ++ char *name; ++ } mapinfo[] = { ++ { POSSIBLE, "cpu_possible_map" }, ++ { PRESENT, "cpu_present_map" }, ++ { ONLINE, "cpu_online_map" }, ++ }; ++ ++ if ((len = STRUCT_SIZE("cpumask_t")) < 0) ++ len = sizeof(ulong); ++ ++ buf = GETBUF(len); ++ ++ for (m = 0; m < sizeof(mapinfo)/sizeof(struct mapinfo); m++) { ++ if (!kernel_symbol_exists(mapinfo[m].name)) ++ continue; ++ ++ if (!readmem(symbol_value(mapinfo[m].name), KVADDR, buf, len, ++ mapinfo[m].name, RETURN_ON_ERROR)) { ++ error(WARNING, "cannot read %s\n", mapinfo[m].name); ++ continue; + } +- break; ++ ++ maskptr = (ulong *)buf; ++ for (i = 0; i < (len/sizeof(ulong)); i++, maskptr++) { ++ if (*maskptr == 0) ++ continue; ++ for (c = 0; c < BITS_PER_LONG; c++) ++ if (*maskptr & (0x1UL << c)) { ++ cpu = (i * BITS_PER_LONG) + c; ++ kt->cpu_flags[cpu] |= mapinfo[m].cpu_flag; ++ } ++ } ++ ++ if (CRASHDEBUG(1)) { ++ fprintf(fp, "%s: ", mapinfo[m].name); ++ for (i = 0; i < NR_CPUS; i++) { ++ if (kt->cpu_flags[i] & mapinfo[m].cpu_flag) ++ fprintf(fp, "%d ", i); ++ } ++ fprintf(fp, "\n"); ++ } ++ ++ } ++ ++ FREEBUF(buf); ++} ++ ++/* ++ * Determine whether a cpu is in one of the cpu masks. ++ */ ++int ++in_cpu_map(int map, int cpu) ++{ ++ if (cpu >= (kt->kernel_NR_CPUS ? kt->kernel_NR_CPUS : NR_CPUS)) { ++ error(INFO, "in_cpu_map: invalid cpu: %d\n", cpu); ++ return FALSE; ++ } ++ ++ switch (map) ++ { ++ case POSSIBLE: ++ if (!kernel_symbol_exists("cpu_possible_map")) { ++ error(INFO, "cpu_possible_map does not exist\n"); ++ return FALSE; ++ } ++ return (kt->cpu_flags[cpu] & POSSIBLE); ++ ++ case PRESENT: ++ if (!kernel_symbol_exists("cpu_present_map")) { ++ error(INFO, "cpu_present_map does not exist\n"); ++ return FALSE; ++ } ++ return (kt->cpu_flags[cpu] & PRESENT); ++ ++ case ONLINE: ++ if (!kernel_symbol_exists("cpu_online_map")) { ++ error(INFO, "cpu_online_map does not exist\n"); ++ return FALSE; ++ } ++ return (kt->cpu_flags[cpu] & ONLINE); + } ++ ++ return FALSE; } ++ /* -@@ -377,7 +502,7 @@ + * For lack of a better manner of verifying that the namelist and dumpfile + * (or live kernel) match up, verify that the Linux banner is where +@@ -377,7 +613,7 @@ { char buf[BUFSIZE]; ulong linux_banner; @@ -16760,7 +17074,7 @@ char *arglist[MAXARGS]; char *p1, *p2; struct syment *sp; -@@ -389,7 +514,7 @@ +@@ -389,7 +625,7 @@ if (!(sp = symbol_search("linux_banner"))) error(FATAL, "linux_banner symbol does not exist?\n"); @@ -16769,17 +17083,29 @@ linux_banner = symbol_value("linux_banner"); else get_symbol_data("linux_banner", sizeof(ulong), &linux_banner); -@@ -405,7 +530,8 @@ +@@ -405,9 +641,10 @@ error(WARNING, "cannot read linux_banner string\n"); if (ACTIVE()) { - if (strlen(kt->proc_version) && !STREQ(buf, kt->proc_version)) { -+ len = strlen(kt->proc_version) - 1; ++ len = strlen(kt->proc_version); + if ((len > 0) && (strncmp(buf, kt->proc_version, len) != 0)) { if (CRASHDEBUG(1)) { - fprintf(fp, "/proc/version:\n%s", +- fprintf(fp, "/proc/version:\n%s", ++ fprintf(fp, "/proc/version:\n%s\n", kt->proc_version); -@@ -471,6 +597,9 @@ + fprintf(fp, "linux_banner:\n%s\n", buf); + } +@@ -422,7 +659,7 @@ + fprintf(fp, "linux_banner:\n%s\n", buf); + goto bad_match; + } +- strcpy(kt->proc_version, buf); ++ strcpy(kt->proc_version, strip_linefeeds(buf)); + } + + verify_namelist(); +@@ -471,6 +708,9 @@ } } @@ -16789,7 +17115,7 @@ return; bad_match: -@@ -614,6 +743,10 @@ +@@ -614,6 +854,10 @@ if (pc->flags & KERNEL_DEBUG_QUERY) return; @@ -16800,7 +17126,34 @@ if (!strlen(kt->utsname.version)) return; -@@ -740,7 +873,7 @@ +@@ -633,7 +877,7 @@ + if (!strstr(buffer, "Linux version 2.")) + continue; + +- if (STREQ(buffer, kt->proc_version)) { ++ if (strstr(buffer, kt->proc_version)) { + found = TRUE; + break; + } +@@ -680,7 +924,7 @@ + if (found) { + if (CRASHDEBUG(1)) { + fprintf(fp, "verify_namelist:\n"); +- fprintf(fp, "/proc/version:\n%s", kt->proc_version); ++ fprintf(fp, "/proc/version:\n%s\n", kt->proc_version); + fprintf(fp, "utsname version: %s\n", + kt->utsname.version); + fprintf(fp, "%s:\n%s\n", namelist, buffer); +@@ -690,7 +934,7 @@ + + if (CRASHDEBUG(1)) { + fprintf(fp, "verify_namelist:\n"); +- fprintf(fp, "/proc/version:\n%s", kt->proc_version); ++ fprintf(fp, "/proc/version:\n%s\n", kt->proc_version); + fprintf(fp, "utsname version: %s\n", kt->utsname.version); + fprintf(fp, "%s:\n%s\n", namelist, buffer2); + } +@@ -740,7 +984,7 @@ { int c; int do_load_module_filter, do_machdep_filter, reverse; @@ -16809,7 +17162,7 @@ ulong curaddr; ulong revtarget; ulong count; -@@ -754,7 +887,16 @@ +@@ -754,7 +998,16 @@ char buf4[BUFSIZE]; char buf5[BUFSIZE]; @@ -16827,7 +17180,7 @@ sp = NULL; unfiltered = user_mode = do_machdep_filter = do_load_module_filter = 0; -@@ -763,7 +905,7 @@ +@@ -763,7 +1016,7 @@ req->flags |= GNU_FROM_TTY_OFF|GNU_RETURN_ON_ERROR; req->count = 1; @@ -16836,7 +17189,7 @@ switch(c) { case 'x': -@@ -786,6 +928,12 @@ +@@ -786,6 +1039,12 @@ BZERO(buf4, BUFSIZE); break; @@ -16849,7 +17202,7 @@ default: argerrs++; break; -@@ -846,7 +994,7 @@ +@@ -846,7 +1105,7 @@ if (user_mode) { sprintf(buf1, "x/%ldi 0x%lx", req->count ? req->count : 1, req->addr); @@ -16858,7 +17211,7 @@ gdb_pass_through(buf1, NULL, 0); return; } -@@ -962,7 +1110,9 @@ +@@ -962,7 +1221,9 @@ close_tmpfile(); } } @@ -16869,7 +17222,7 @@ if (!reverse) { FREEBUF(req->buf); -@@ -1053,6 +1203,185 @@ +@@ -1053,6 +1314,185 @@ FREEBUF(req); } @@ -17055,7 +17408,7 @@ #ifdef NOT_USED /* * To avoid premature stoppage/extension of a dis that includes -@@ -1094,7 +1423,8 @@ +@@ -1094,7 +1534,8 @@ } #define FRAMESIZE_DEBUG_MESSAGE \ @@ -17065,7 +17418,7 @@ /* * Display a kernel stack backtrace. Arguments may be any number pid or task -@@ -1108,18 +1438,25 @@ +@@ -1108,18 +1549,25 @@ * -s displays arguments symbolically. */ @@ -17098,7 +17451,7 @@ void cmd_bt(void) -@@ -1140,8 +1477,11 @@ +@@ -1140,8 +1588,11 @@ bt = &bt_info; BZERO(bt, sizeof(struct bt_info)); @@ -17112,7 +17465,7 @@ { case 'f': bt->flags |= BT_FULL; -@@ -1151,6 +1491,28 @@ +@@ -1151,6 +1602,28 @@ bt->flags |= BT_OLD_BACK_TRACE; break; @@ -17141,7 +17494,7 @@ case 'R': if (refptr) error(INFO, "only one -R option allowed\n"); -@@ -1217,6 +1579,9 @@ +@@ -1217,6 +1690,9 @@ } else if (*optarg == '-') { hook.esp = dtol(optarg+1, FAULT_ON_ERROR, NULL); hook.esp = (ulong)(0 - (long)hook.esp); @@ -17151,7 +17504,7 @@ } else hook.esp = dtol(optarg, FAULT_ON_ERROR, NULL); break; -@@ -1241,6 +1606,8 @@ +@@ -1241,6 +1717,8 @@ bt->flags |= BT_SYMBOLIC_ARGS; break; @@ -17160,7 +17513,7 @@ case 't': bt->flags |= BT_TEXT_SYMBOLS; break; -@@ -1255,6 +1622,11 @@ +@@ -1255,6 +1733,11 @@ } } @@ -17172,7 +17525,7 @@ if (argerrs) cmd_usage(pc->curcmd, SYNOPSIS); -@@ -1286,6 +1658,35 @@ +@@ -1286,6 +1769,35 @@ return; } @@ -17208,7 +17561,7 @@ if (active) { if (ACTIVE()) error(FATAL, -@@ -1350,9 +1751,10 @@ +@@ -1350,9 +1862,10 @@ char buf[BUFSIZE]; if (bt->flags & BT_TEXT_SYMBOLS) { @@ -17222,7 +17575,7 @@ } if (bt->hp) -@@ -1435,6 +1837,9 @@ +@@ -1435,6 +1948,9 @@ i < LONGS_PER_STACK; i++, up++) { if (is_kernel_text(*up)) fprintf(fp, "%lx: %s\n", @@ -17232,7 +17585,7 @@ bt->task + (i * sizeof(long)), value_to_symstr(*up, buf, 0)); } -@@ -1461,20 +1866,26 @@ +@@ -1461,20 +1977,26 @@ if (bt->hp) { if (bt->hp->esp && !INSTACK(bt->hp->esp, bt)) error(INFO, @@ -17262,7 +17615,7 @@ else machdep->get_stack_frame(bt, &eip, &esp); -@@ -1486,6 +1897,13 @@ +@@ -1486,6 +2008,13 @@ if (bt->flags & (BT_TEXT_SYMBOLS|BT_TEXT_SYMBOLS_PRINT|BT_TEXT_SYMBOLS_NOPRINT)) { @@ -17276,7 +17629,7 @@ if (machdep->flags & MACHDEP_BT_TEXT) { bt->instptr = eip; bt->stkptr = esp; -@@ -1666,6 +2084,7 @@ +@@ -1666,6 +2195,7 @@ fprintf(fp, " flags: %llx\n", bt->flags); fprintf(fp, " instptr: %lx\n", bt->instptr); fprintf(fp, " stkptr: %lx\n", bt->stkptr); @@ -17284,7 +17637,7 @@ fprintf(fp, " stackbase: %lx\n", bt->stackbase); fprintf(fp, " stacktop: %lx\n", bt->stacktop); fprintf(fp, " tc: %lx ", (ulong)bt->tc); -@@ -1700,6 +2119,11 @@ +@@ -1700,6 +2230,11 @@ return; } @@ -17296,7 +17649,7 @@ sysrq_eip = sysrq_esp = 0; for (i = 0, up = (ulong *)bt->stackbuf; i < LONGS_PER_STACK; i++, up++){ -@@ -1721,6 +2145,25 @@ +@@ -1721,6 +2256,25 @@ *esp = *(up-1); return; } @@ -17322,7 +17675,7 @@ if (STREQ(sym, "smp_stop_cpu_interrupt")) { *eip = *up; *esp = bt->task + -@@ -1837,8 +2280,8 @@ +@@ -1837,8 +2391,8 @@ return; } @@ -17333,7 +17686,7 @@ kt->kernel_module = sp->value; kt->module_list = (ulong)list.next; modules_found = TRUE; -@@ -1873,14 +2316,17 @@ +@@ -1873,14 +2427,17 @@ kallsymsbuf = kt->flags & KALLSYMS_V1 ? GETBUF(SIZE(kallsyms_header)) : NULL; @@ -17353,7 +17706,7 @@ kt->mods_installed = 0; kt->flags |= NO_MODULE_ACCESS; FREEBUF(modbuf); -@@ -1914,7 +2360,8 @@ +@@ -1914,7 +2471,8 @@ kallsymsbuf, SIZE(kallsyms_header), "kallsyms_header", RETURN_ON_ERROR|QUIET)) { error(WARNING, @@ -17363,7 +17716,7 @@ } else { nsyms = UINT(kallsymsbuf + OFFSET(kallsyms_header_symbols)); -@@ -1947,6 +2394,8 @@ +@@ -1947,6 +2505,8 @@ store_module_symbols_v2(total, kt->mods_installed); break; } @@ -17372,7 +17725,7 @@ } -@@ -2112,7 +2561,7 @@ +@@ -2112,7 +2672,7 @@ address = 0; flag = LIST_MODULE_HDR; @@ -17381,7 +17734,7 @@ switch(c) { case 'r': -@@ -2145,6 +2594,19 @@ +@@ -2145,6 +2705,19 @@ cmd_usage(pc->curcmd, SYNOPSIS); break; @@ -17401,7 +17754,7 @@ case 't': if (is_directory(optarg)) tree = optarg; -@@ -2459,7 +2921,7 @@ +@@ -2459,7 +3032,7 @@ static char * @@ -17410,7 +17763,7 @@ { char buf[BUFSIZE]; char file[BUFSIZE]; -@@ -2477,16 +2939,20 @@ +@@ -2477,16 +3050,20 @@ strcpy(file, filename); #ifdef MODULES_IN_CWD else { @@ -17441,7 +17794,7 @@ } #else else -@@ -2505,6 +2971,8 @@ +@@ -2505,6 +3082,8 @@ if ((st->flags & INSMOD_BUILTIN) && !filename) { sprintf(buf, "__insmod_%s_O/", modref); if (symbol_query(buf, NULL, &sp) == 1) { @@ -17450,7 +17803,7 @@ BZERO(buf, BUFSIZE); p1 = strstr(sp->name, "/"); if ((p2 = strstr(sp->name, file))) -@@ -2578,6 +3046,18 @@ +@@ -2578,6 +3157,18 @@ retbuf = search_directory_tree(dir, file); if (!retbuf) { @@ -17469,7 +17822,7 @@ sprintf(dir, "/lib/modules/%s", kt->utsname.release); if (!(retbuf = search_directory_tree(dir, file))) { switch (kt->flags & (KMOD_V1|KMOD_V2)) -@@ -2592,6 +3072,32 @@ +@@ -2592,6 +3183,32 @@ return retbuf; } @@ -17502,7 +17855,7 @@ /* * Unlink any temporary remote module object files. -@@ -2651,7 +3157,7 @@ +@@ -2651,7 +3268,7 @@ dump_log(int msg_level) { int i; @@ -17511,7 +17864,7 @@ char *buf; char last; ulong index; -@@ -2678,13 +3184,16 @@ +@@ -2678,14 +3295,17 @@ buf = GETBUF(log_buf_len); log_wrap = FALSE; @@ -17522,16 +17875,18 @@ - log_start &= log_buf_len-1; - index = (logged_chars < log_buf_len) ? 0 : log_start; +- + if (logged_chars < log_buf_len) { + index = 0; + } else { + get_symbol_data("log_end", sizeof(ulong), &index); + index &= log_buf_len-1; + } - ++ if ((logged_chars < log_buf_len) && (index == 0) && (buf[index] == '<')) loglevel = TRUE; -@@ -2787,6 +3296,8 @@ + else +@@ -2787,6 +3407,8 @@ do { if (sflag) dump_sys_call_table(args[optind], cnt++); @@ -17540,7 +17895,7 @@ else cmd_usage(args[optind], COMPLETE_HELP); optind++; -@@ -2867,6 +3378,9 @@ +@@ -2867,6 +3489,9 @@ if (NETDUMP_DUMPFILE() && is_partial_netdump()) fprintf(fp, " [PARTIAL DUMP]"); @@ -17550,7 +17905,7 @@ fprintf(fp, "\n"); } -@@ -2876,7 +3390,7 @@ +@@ -2876,7 +3501,7 @@ get_symbol_data("xtime", sizeof(struct timespec), &kt->date); fprintf(fp, " DATE: %s\n", strip_linefeeds(ctime(&kt->date.tv_sec))); @@ -17559,7 +17914,7 @@ fprintf(fp, "LOAD AVERAGE: %s\n", get_loadavg(buf)); fprintf(fp, " TASKS: %ld\n", RUNNING_TASKS()); fprintf(fp, " NODENAME: %s\n", uts->nodename); -@@ -2891,10 +3405,17 @@ +@@ -2891,10 +3516,17 @@ #ifdef WHO_CARES fprintf(fp, " DOMAINNAME: %s\n", uts->domainname); #endif @@ -17578,7 +17933,7 @@ else { strip_linefeeds(get_panicmsg(buf)); fprintf(fp, "\"%s\"%s\n", buf, -@@ -2952,28 +3473,42 @@ +@@ -2952,28 +3584,42 @@ /* * Calculate and return the uptime. */ @@ -17637,7 +17992,7 @@ #define FSHIFT 11 /* nr of bits of precision */ #define FIXED_1 (1<> FSHIFT) -@@ -3048,9 +3583,9 @@ +@@ -3048,9 +3694,9 @@ struct syment *sp, *spn; long size; #ifdef S390X @@ -17649,7 +18004,7 @@ #endif if (GDB_PATCHED()) error(INFO, "line numbers are not available\n"); -@@ -3068,6 +3603,8 @@ +@@ -3068,6 +3714,8 @@ readmem(symbol_value("sys_call_table"), KVADDR, sys_call_table, size, "sys_call_table", FAULT_ON_ERROR); @@ -17658,7 +18013,7 @@ if (spec) open_tmpfile(); -@@ -3080,13 +3617,17 @@ +@@ -3080,13 +3728,17 @@ "%3x " : "%3d ", i); fprintf(fp, "invalid sys_call_table entry: %lx (%s)\n", @@ -17678,7 +18033,7 @@ /* * For system call symbols whose first instruction is -@@ -3181,16 +3722,16 @@ +@@ -3181,16 +3833,16 @@ * "help -k" output */ void @@ -17698,7 +18053,7 @@ if (kt->flags & NO_MODULE_ACCESS) fprintf(fp, "%sNO_MODULE_ACCESS", others++ ? "|" : ""); if (kt->flags & TVEC_BASES_V1) -@@ -3225,6 +3766,30 @@ +@@ -3225,6 +3877,30 @@ fprintf(fp, "%sKMOD_V2", others++ ? "|" : ""); if (kt->flags & KALLSYMS_V2) fprintf(fp, "%sKALLSYMS_V2", others++ ? "|" : ""); @@ -17729,7 +18084,7 @@ fprintf(fp, ")\n"); fprintf(fp, " stext: %lx\n", kt->stext); fprintf(fp, " etext: %lx\n", kt->etext); -@@ -3234,8 +3799,10 @@ +@@ -3234,8 +3910,10 @@ fprintf(fp, " init_end: %lx\n", kt->init_end); fprintf(fp, " end: %lx\n", kt->end); fprintf(fp, " cpus: %d\n", kt->cpus); @@ -17740,7 +18095,7 @@ if (kt->display_bh == display_bh_1) fprintf(fp, " display_bh: display_bh_1()\n"); else if (kt->display_bh == display_bh_2) -@@ -3263,21 +3830,61 @@ +@@ -3263,21 +3941,89 @@ kt->kernel_version[1], kt->kernel_version[2]); fprintf(fp, " gcc_version: %d.%d.%d\n", kt->gcc_version[0], kt->gcc_version[1], kt->gcc_version[2]); @@ -17761,13 +18116,41 @@ + for (i = 0; i < nr_cpus; i++) fprintf(fp, "%s%.*lx ", (i % 4) == 0 ? "\n " : "", LONG_PRLEN, kt->__per_cpu_offset[i]); - fprintf(fp, "\n cpu_flags[NR_CPUS]:"); +- fprintf(fp, "\n cpu_flags[NR_CPUS]:"); - for (i = 0; i < NR_CPUS; i++) ++ fprintf(fp, "\n cpu_flags[NR_CPUS]: "); + for (i = 0; i < nr_cpus; i++) fprintf(fp, "%lx ", kt->cpu_flags[i]); -- fprintf(fp, "\n"); + fprintf(fp, "\n"); ++ fprintf(fp, " cpu_possible_map: "); ++ if (kernel_symbol_exists("cpu_possible_map")) { ++ for (i = 0; i < nr_cpus; i++) { ++ if (kt->cpu_flags[i] & POSSIBLE) ++ fprintf(fp, "%d ", i); ++ } ++ fprintf(fp, "\n"); ++ } else ++ fprintf(fp, "(does not exist)\n"); ++ fprintf(fp, " cpu_present_map: "); ++ if (kernel_symbol_exists("cpu_present_map")) { ++ for (i = 0; i < nr_cpus; i++) { ++ if (kt->cpu_flags[i] & PRESENT) ++ fprintf(fp, "%d ", i); ++ } ++ fprintf(fp, "\n"); ++ } else ++ fprintf(fp, "(does not exist)\n"); ++ fprintf(fp, " cpu_online_map: "); ++ if (kernel_symbol_exists("cpu_online_map")) { ++ for (i = 0; i < nr_cpus; i++) { ++ if (kt->cpu_flags[i] & ONLINE) ++ fprintf(fp, "%d ", i); ++ } ++ fprintf(fp, "\n"); ++ } else ++ fprintf(fp, "(does not exist)\n"); + others = 0; -+ fprintf(fp, "\n xen_flags: %lx (", kt->xen_flags); ++ fprintf(fp, " xen_flags: %lx (", kt->xen_flags); + if (kt->xen_flags & WRITABLE_PAGE_TABLES) + fprintf(fp, "%sWRITABLE_PAGE_TABLES", others++ ? "|" : ""); + if (kt->xen_flags & SHADOW_PAGE_TABLES) @@ -17807,7 +18190,7 @@ } /* -@@ -3314,7 +3921,7 @@ +@@ -3314,7 +4060,7 @@ if (machine_type("S390") || machine_type("S390X")) command_not_supported(); @@ -17816,7 +18199,7 @@ switch(c) { case 'd': -@@ -3344,6 +3951,17 @@ +@@ -3344,6 +4090,17 @@ kt->display_bh(); return; @@ -17834,7 +18217,7 @@ default: argerrs++; break; -@@ -3362,6 +3980,8 @@ +@@ -3362,6 +4119,8 @@ return; } @@ -17843,7 +18226,7 @@ while (args[optind]) { i = dtoi(args[optind], FAULT_ON_ERROR, NULL); if (i >= nr_irqs) -@@ -3402,13 +4022,22 @@ +@@ -3402,13 +4161,22 @@ readmem(irq_desc_addr + OFFSET(irq_desc_t_status), KVADDR, &status, sizeof(int), "irq_desc entry", FAULT_ON_ERROR); @@ -17868,7 +18251,7 @@ fprintf(fp, " IRQ: %d\n", irq); fprintf(fp, " STATUS: %x %s", status, status ? "(" : ""); others = 0; -@@ -3441,19 +4070,30 @@ +@@ -3441,19 +4209,30 @@ } else fprintf(fp, "%lx\n", handler); @@ -17906,7 +18289,7 @@ fprintf(fp, " startup: %lx ", tmp1); if (is_kernel_text(tmp1)) fprintf(fp, "<%s>", value_to_symstr(tmp1, buf, 0)); -@@ -3464,9 +4104,15 @@ +@@ -3464,9 +4243,15 @@ value_to_symstr(tmp2, buf, 0)); fprintf(fp, "\n"); @@ -17925,7 +18308,7 @@ fprintf(fp, " shutdown: %lx ", tmp1); if (is_kernel_text(tmp1)) fprintf(fp, "<%s>", value_to_symstr(tmp1, buf, 0)); -@@ -3494,9 +4140,14 @@ +@@ -3494,9 +4279,14 @@ fprintf(fp, "\n"); } @@ -17943,7 +18326,7 @@ fprintf(fp, " enable: %lx ", tmp1); if (is_kernel_text(tmp1)) fprintf(fp, "<%s>", value_to_symstr(tmp1, buf, 0)); -@@ -3507,9 +4158,14 @@ +@@ -3507,9 +4297,14 @@ value_to_symstr(tmp2, buf, 0)); fprintf(fp, "\n"); @@ -17961,7 +18344,7 @@ fprintf(fp, " disable: %lx ", tmp1); if (is_kernel_text(tmp1)) fprintf(fp, "<%s>", value_to_symstr(tmp1, buf, 0)); -@@ -3534,6 +4190,84 @@ +@@ -3534,6 +4329,84 @@ fprintf(fp, "<%s>", value_to_symstr(tmp2, buf, 0)); fprintf(fp, "\n"); @@ -18046,7 +18429,7 @@ } if (VALID_MEMBER(hw_interrupt_type_end)) { -@@ -3550,6 +4284,20 @@ +@@ -3550,6 +4423,20 @@ fprintf(fp, "<%s>", value_to_symstr(tmp2, buf, 0)); fprintf(fp, "\n"); @@ -18067,7 +18450,7 @@ } if (VALID_MEMBER(hw_interrupt_type_set_affinity)) { -@@ -3567,6 +4315,66 @@ +@@ -3567,6 +4454,66 @@ fprintf(fp, "<%s>", value_to_symstr(tmp2, buf, 0)); fprintf(fp, "\n"); @@ -18134,7 +18517,7 @@ } } -@@ -4146,7 +4954,7 @@ +@@ -4146,7 +5093,7 @@ } /* @@ -18143,7 +18526,28 @@ */ static void -@@ -4220,8 +5028,12 @@ +@@ -4169,8 +5116,20 @@ + */ + vec_root_size = (i = ARRAY_LENGTH(tvec_root_s_vec)) ? + i : get_array_length("tvec_root_s.vec", NULL, SIZE(list_head)); ++ if (!vec_root_size && ++ (i = get_array_length("tvec_root.vec", NULL, SIZE(list_head)))) ++ vec_root_size = i; ++ if (!vec_root_size) ++ error(FATAL, "cannot determine tvec_root.vec[] array size\n"); ++ + vec_size = (i = ARRAY_LENGTH(tvec_s_vec)) ? + i : get_array_length("tvec_s.vec", NULL, SIZE(list_head)); ++ if (!vec_size && ++ (i = get_array_length("tvec.vec", NULL, SIZE(list_head)))) ++ vec_size = i; ++ if (!vec_size) ++ error(FATAL, "cannot determine tvec.vec[] array size\n"); ++ + vec = (ulong *)GETBUF(SIZE(list_head) * MAX(vec_root_size, vec_size)); + cpu = 0; + +@@ -4220,8 +5179,12 @@ else tvec_bases = symbol_value("per_cpu__tvec_bases"); @@ -18158,7 +18562,7 @@ sprintf(buf1, "%ld", highest); flen = MAX(strlen(buf1), strlen("JIFFIES")); -@@ -4320,6 +5132,11 @@ +@@ -4320,6 +5283,11 @@ else tvec_bases = symbol_value("per_cpu__tvec_bases"); @@ -18170,7 +18574,7 @@ tv[1].base = tvec_bases + OFFSET(tvec_t_base_s_tv1); tv[1].end = tv[1].base + SIZE(tvec_root_s); -@@ -4475,9 +5292,16 @@ +@@ -4475,9 +5443,16 @@ ld->start = vec[i]; ld->list_head_offset = offset; ld->end = vec_kvaddr; @@ -18188,7 +18592,7 @@ if (!timer_cnt) continue; timer_list = (ulong *)GETBUF(timer_cnt * sizeof(ulong)); -@@ -4708,21 +5532,569 @@ +@@ -4708,21 +5683,606 @@ machdep->last_pgd_read = 0; machdep->last_pmd_read = 0; machdep->last_ptbl_read = 0; @@ -18199,8 +18603,7 @@ /* - * For kernels containing cpu_online_map, count the bits. -+ * For kernels containing at least the cpu_online_map, use it -+ * to determine the cpu count. ++ * If it exists, return the number of cpus in the cpu_online_map. */ int get_cpus_online() @@ -18241,8 +18644,47 @@ +} + +/* -+ * For kernels containing at least the cpu_possible_map, used -+ * to determine the cpu count (of online and offline cpus). ++ * If it exists, return the number of cpus in the cpu_present_map. ++ */ ++int ++get_cpus_present() ++{ ++ int i, len, present; ++ struct gnu_request req; ++ char *buf; ++ ulong *maskptr; ++ ++ if (!symbol_exists("cpu_present_map")) ++ return 0; ++ ++ if (LKCD_KERNTYPES()) { ++ if ((len = STRUCT_SIZE("cpumask_t")) < 0) ++ error(FATAL, "cannot determine type cpumask_t\n"); ++ } else ++ len = get_symbol_type("cpu_present_map", NULL, &req) == ++ TYPE_CODE_UNDEF ? sizeof(ulong) : req.length; ++ buf = GETBUF(len); ++ ++ present = 0; ++ ++ if (readmem(symbol_value("cpu_present_map"), KVADDR, buf, len, ++ "cpu_present_map", RETURN_ON_ERROR)) { ++ ++ maskptr = (ulong *)buf; ++ for (i = 0; i < (len/sizeof(ulong)); i++, maskptr++) ++ present += count_bits_long(*maskptr); ++ ++ FREEBUF(buf); ++ if (CRASHDEBUG(1)) ++ error(INFO, "get_cpus_present: present: %d\n", present); ++ } ++ ++ return present; ++} + +- return count_bits_long(cpu_online_map); ++/* ++ * If it exists, return the number of cpus in the cpu_possible_map. + */ +int +get_cpus_possible() @@ -18505,8 +18947,7 @@ + return found; +} + - -- return count_bits_long(cpu_online_map); ++ + +/* + * Read the relevant IKCONFIG (In Kernel Config) data if available. @@ -18762,7 +19203,7 @@ + break; + } } ---- crash/lkcd_vmdump_v1.h.orig 2008-02-19 16:16:13.000000000 -0500 +--- crash/lkcd_vmdump_v1.h.orig 2008-04-29 13:51:49.000000000 -0400 +++ crash/lkcd_vmdump_v1.h 2008-02-19 16:12:48.000000000 -0500 @@ -1,8 +1,8 @@ /* lkcd_vmdump_v1.h - core analysis suite @@ -18796,7 +19237,7 @@ /* the address of the current task */ struct task_struct *dh_current_task; ---- crash/xen_hyper_dump_tables.c.orig 2008-02-19 16:16:12.000000000 -0500 +--- crash/xen_hyper_dump_tables.c.orig 2008-04-29 13:51:49.000000000 -0400 +++ crash/xen_hyper_dump_tables.c 2008-01-04 09:42:08.000000000 -0500 @@ -0,0 +1,948 @@ +/* @@ -19747,7 +20188,7 @@ + fprintf(fp, "\n"); +} +#endif ---- crash/xendump.c.orig 2008-02-19 16:16:12.000000000 -0500 +--- crash/xendump.c.orig 2008-04-29 13:51:49.000000000 -0400 +++ crash/xendump.c 2008-01-04 09:42:08.000000000 -0500 @@ -0,0 +1,2848 @@ +/* @@ -22598,7 +23039,7 @@ +{ + return (XENDUMP_VALID() ? xd : NULL); +} ---- crash/lkcd_v8.c.orig 2008-02-19 16:16:12.000000000 -0500 +--- crash/lkcd_v8.c.orig 2008-04-29 13:51:49.000000000 -0400 +++ crash/lkcd_v8.c 2008-01-04 09:42:08.000000000 -0500 @@ -23,9 +23,185 @@ #include "lkcd_dump_v8.h" /* REMIND */ @@ -22852,9 +23293,9 @@ set_mb_benchmark((granularity * (i+1))/lkcd->page_size); } } ---- crash/xen_hyper_defs.h.orig 2008-02-19 16:16:12.000000000 -0500 -+++ crash/xen_hyper_defs.h 2008-01-04 09:42:08.000000000 -0500 -@@ -0,0 +1,970 @@ +--- crash/xen_hyper_defs.h.orig 2008-04-29 13:51:49.000000000 -0400 ++++ crash/xen_hyper_defs.h 2008-04-23 14:45:42.000000000 -0400 +@@ -0,0 +1,973 @@ +/* + * xen_hyper_defs.h + * @@ -22921,6 +23362,9 @@ +#define DIRECTMAP_VIRT_START (0xffff830000000000) +#define DIRECTMAP_VIRT_END (0xffff840000000000) +#define PAGE_OFFSET_XEN_HYPER DIRECTMAP_VIRT_START ++#define XEN_VIRT_START (0xffff828c80000000) ++#define XEN_VIRT_ADDR(vaddr) \ ++ (((vaddr) >= XEN_VIRT_START) && ((vaddr) < DIRECTMAP_VIRT_START)) +#endif + +#ifdef IA64 @@ -23825,7 +24269,7 @@ + "Xen hypervisor mode not supported on this architecture\n" + +#endif ---- crash/lkcd_v7.c.orig 2008-02-19 16:16:12.000000000 -0500 +--- crash/lkcd_v7.c.orig 2008-04-29 13:51:49.000000000 -0400 +++ crash/lkcd_v7.c 2008-01-04 09:42:08.000000000 -0500 @@ -89,7 +89,11 @@ ifd = 0; @@ -23840,8 +24284,8 @@ dump_index_size = (lkcd->memory_pages * sizeof(off_t)); lkcd->page_offsets = 0; strcpy(dumpfile_index_name, dumpfile); ---- crash/filesys.c.orig 2008-02-19 16:16:12.000000000 -0500 -+++ crash/filesys.c 2008-01-21 13:43:40.000000000 -0500 +--- crash/filesys.c.orig 2008-04-29 13:51:49.000000000 -0400 ++++ crash/filesys.c 2008-03-14 09:44:37.000000000 -0400 @@ -1,8 +1,8 @@ /* filesys.c - core analysis suite * @@ -23919,7 +24363,7 @@ if (pc->flags & KERNEL_DEBUG_QUERY) return; -@@ -228,24 +238,7 @@ +@@ -228,32 +238,37 @@ if (!strlen(kt->proc_version)) return; @@ -23943,9 +24387,10 @@ - if (found) { + if (match_file_string(pc->namelist, kt->proc_version, buffer)) { if (CRASHDEBUG(1)) { - fprintf(fp, "/proc/version:\n%s", kt->proc_version); +- fprintf(fp, "/proc/version:\n%s", kt->proc_version); ++ fprintf(fp, "/proc/version:\n%s\n", kt->proc_version); fprintf(fp, "%s:\n%s", pc->namelist, buffer); -@@ -253,7 +246,29 @@ + } return; } @@ -24049,7 +24494,16 @@ if (found) break; -@@ -797,30 +805,14 @@ +@@ -701,6 +709,8 @@ + + fclose(version); + ++ strip_linefeeds(kt->proc_version); ++ + return TRUE; + } + +@@ -797,30 +807,14 @@ static int verify_utsname(char *system_map) { @@ -24081,7 +24535,7 @@ return FALSE; if (extract_hex(buffer, &value, NULLCHAR, TRUE) && -@@ -1125,6 +1117,8 @@ +@@ -1125,6 +1119,8 @@ { int i; int c, found; @@ -24090,7 +24544,7 @@ char *spec_string; char buf1[BUFSIZE]; char buf2[BUFSIZE]; -@@ -1133,7 +1127,9 @@ +@@ -1133,7 +1129,9 @@ int flags = 0; int save_next; @@ -24101,7 +24555,7 @@ switch(c) { case 'i': -@@ -1144,6 +1140,19 @@ +@@ -1144,6 +1142,19 @@ flags |= MOUNT_PRINT_FILES; break; @@ -24121,7 +24575,7 @@ default: argerrs++; break; -@@ -1162,7 +1171,7 @@ +@@ -1162,7 +1173,7 @@ shift_string_left(spec_string, 2); open_tmpfile(); @@ -24130,7 +24584,7 @@ found = FALSE; rewind(pc->tmpfile); -@@ -1181,16 +1190,20 @@ +@@ -1181,16 +1192,20 @@ continue; for (i = 0; i < c; i++) { @@ -24154,7 +24608,7 @@ fprintf(fp, buf2); } found = FALSE; -@@ -1200,7 +1213,7 @@ +@@ -1200,7 +1215,7 @@ close_tmpfile(); } while (args[++optind]); } else @@ -24163,7 +24617,7 @@ } /* -@@ -1208,7 +1221,7 @@ +@@ -1208,7 +1223,7 @@ */ static void @@ -24172,7 +24626,7 @@ { ulong one_vfsmount_list; long sb_s_files; -@@ -1246,7 +1259,7 @@ +@@ -1246,7 +1261,7 @@ mount_cnt = 1; mntlist = &one_vfsmount_list; } else @@ -24181,7 +24635,7 @@ if (!strlen(mount_hdr)) { devlen = strlen("DEVNAME"); -@@ -1408,11 +1421,11 @@ +@@ -1408,11 +1423,11 @@ * Allocate and fill a list of the currently-mounted vfsmount pointers. */ static ulong * @@ -24195,7 +24649,7 @@ struct task_context *tc; ld = &list_data; -@@ -1421,9 +1434,26 @@ +@@ -1421,9 +1436,26 @@ if (symbol_exists("vfsmntlist")) { get_symbol_data("vfsmntlist", sizeof(void *), &ld->start); ld->end = symbol_value("vfsmntlist"); @@ -24224,7 +24678,7 @@ readmem(tc->task + OFFSET(task_struct_namespace), KVADDR, &namespace, sizeof(void *), "task namespace", -@@ -1497,7 +1527,7 @@ +@@ -1497,7 +1529,7 @@ goto nopath; if (VALID_MEMBER(file_f_vfsmnt)) { @@ -24233,7 +24687,7 @@ vfsmount_buf = GETBUF(SIZE(vfsmount)); for (m = found = 0, vfsmnt = mntlist; -@@ -1706,15 +1736,30 @@ +@@ -1706,15 +1738,30 @@ MEMBER_OFFSET_INIT(fs_struct_pwd, "fs_struct", "pwd"); MEMBER_OFFSET_INIT(fs_struct_rootmnt, "fs_struct", "rootmnt"); MEMBER_OFFSET_INIT(fs_struct_pwdmnt, "fs_struct", "pwdmnt"); @@ -24268,7 +24722,7 @@ MEMBER_OFFSET_INIT(dentry_d_inode, "dentry", "d_inode"); MEMBER_OFFSET_INIT(dentry_d_parent, "dentry", "d_parent"); MEMBER_OFFSET_INIT(dentry_d_covers, "dentry", "d_covers"); -@@ -1736,10 +1781,15 @@ +@@ -1736,10 +1783,15 @@ MEMBER_OFFSET_INIT(vfsmount_mnt_mountpoint, "vfsmount", "mnt_mountpoint"); MEMBER_OFFSET_INIT(namespace_root, "namespace", "root"); @@ -24284,7 +24738,7 @@ } else if (THIS_KERNEL_VERSION >= LINUX(2,4,20)) { if (CRASHDEBUG(2)) fprintf(fp, "hardwiring namespace stuff\n"); -@@ -1762,6 +1812,8 @@ +@@ -1762,6 +1814,8 @@ STRUCT_SIZE_INIT(umode_t, "umode_t"); STRUCT_SIZE_INIT(dentry, "dentry"); STRUCT_SIZE_INIT(files_struct, "files_struct"); @@ -24293,7 +24747,7 @@ STRUCT_SIZE_INIT(file, "file"); STRUCT_SIZE_INIT(inode, "inode"); STRUCT_SIZE_INIT(vfsmount, "vfsmount"); -@@ -1777,8 +1829,12 @@ +@@ -1777,8 +1831,12 @@ if (symbol_exists("height_to_maxindex")) { int tmp; @@ -24308,7 +24762,7 @@ STRUCT_SIZE_INIT(radix_tree_root, "radix_tree_root"); STRUCT_SIZE_INIT(radix_tree_node, "radix_tree_node"); MEMBER_OFFSET_INIT(radix_tree_root_height, -@@ -1998,8 +2054,9 @@ +@@ -1998,8 +2056,9 @@ open_files_dump(ulong task, int flags, struct reference *ref) { struct task_context *tc; @@ -24320,7 +24774,7 @@ ulong fs_struct_addr; char *dentry_buf, *fs_struct_buf; ulong root_dentry, pwd_dentry; -@@ -2027,6 +2084,8 @@ +@@ -2027,6 +2086,8 @@ BZERO(root_pathname, BUFSIZE); BZERO(pwd_pathname, BUFSIZE); files_struct_buf = GETBUF(SIZE(files_struct)); @@ -24329,7 +24783,7 @@ fill_task_struct(task); sprintf(files_header, " FD%s%s%s%s%s%s%sTYPE%sPATH\n", -@@ -2107,24 +2166,45 @@ +@@ -2107,24 +2168,45 @@ files_struct_addr = ULONG(tt->task_struct + OFFSET(task_struct_files)); @@ -24355,11 +24809,11 @@ + OFFSET(files_struct_max_fds)); + } + } -+ -+ if (VALID_MEMBER(files_struct_fdt)) { -+ fdtable_addr = ULONG(files_struct_buf + OFFSET(files_struct_fdt)); - if (!files_struct_addr || max_fdset == 0 || max_fds == 0) { ++ if (VALID_MEMBER(files_struct_fdt)) { ++ fdtable_addr = ULONG(files_struct_buf + OFFSET(files_struct_fdt)); ++ + if (fdtable_addr) { + readmem(fdtable_addr, KVADDR, fdtable_buf, + SIZE(fdtable), "fdtable buffer", FAULT_ON_ERROR); @@ -24385,7 +24839,7 @@ FREEBUF(files_struct_buf); return; } -@@ -2146,8 +2226,12 @@ +@@ -2146,8 +2228,12 @@ } } @@ -24400,7 +24854,7 @@ if (open_fds_addr) { if (VALID_MEMBER(files_struct_open_fds_init) && -@@ -2157,16 +2241,21 @@ +@@ -2157,16 +2243,21 @@ OFFSET(files_struct_open_fds_init), &open_fds, sizeof(fd_set)); else @@ -24425,7 +24879,7 @@ FREEBUF(files_struct_buf); return; } -@@ -2175,7 +2264,8 @@ +@@ -2175,7 +2266,8 @@ for (;;) { unsigned long set; i = j * __NFDBITS; @@ -24435,7 +24889,7 @@ break; set = open_fds.__fds_bits[j++]; while (set) { -@@ -2220,6 +2310,8 @@ +@@ -2220,6 +2312,8 @@ if (ref && (ref->cmdflags & FILES_REF_FOUND)) fprintf(fp, "\n"); @@ -24444,7 +24898,7 @@ FREEBUF(files_struct_buf); } -@@ -2494,6 +2586,20 @@ +@@ -2494,6 +2588,20 @@ } /* @@ -24465,7 +24919,7 @@ * get_pathname() fills in a pathname string for an ending dentry * See __d_path() in the kernel for help fixing problems. */ -@@ -3378,10 +3484,14 @@ +@@ -3378,10 +3486,14 @@ #define RADIX_TREE_MAP_SHIFT 6 #define RADIX_TREE_MAP_SIZE (1UL << RADIX_TREE_MAP_SHIFT) #define RADIX_TREE_MAP_MASK (RADIX_TREE_MAP_SIZE-1) @@ -24480,7 +24934,7 @@ }; /* -@@ -3533,16 +3643,15 @@ +@@ -3533,16 +3645,15 @@ radix_tree_lookup(ulong root_rnode, ulong index, int height) { unsigned int shift; @@ -24501,7 +24955,7 @@ if (slot == NULL) return NULL; -@@ -3551,15 +3660,13 @@ +@@ -3551,15 +3662,13 @@ sizeof(struct radix_tree_node), "radix_tree_node struct", FAULT_ON_ERROR); @@ -24520,7 +24974,7 @@ } int -@@ -3575,3 +3682,29 @@ +@@ -3575,3 +3684,29 @@ return TRUE; } @@ -24550,8 +25004,8 @@ + + return found; +} ---- crash/task.c.orig 2008-02-19 16:16:12.000000000 -0500 -+++ crash/task.c 2008-02-12 15:44:23.000000000 -0500 +--- crash/task.c.orig 2008-04-29 13:51:49.000000000 -0400 ++++ crash/task.c 2008-04-04 09:58:25.000000000 -0400 @@ -1,8 +1,8 @@ /* task.c - core analysis suite * @@ -24563,7 +25017,7 @@ * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by -@@ -27,11 +27,17 @@ +@@ -27,11 +27,18 @@ static void refresh_pidhash_task_table(void); static void refresh_pid_hash_task_table(void); static void refresh_hlist_task_table(void); @@ -24574,6 +25028,7 @@ static void refresh_context(ulong, ulong); static void parent_list(ulong); static void child_list(ulong); ++static void initialize_task_state(void); static void show_task_times(struct task_context *, ulong); +static void show_task_args(struct task_context *); +static void show_task_rlimit(struct task_context *); @@ -24581,7 +25036,7 @@ static int compare_start_time(const void *, const void *); static int start_time_timespec(void); static ulonglong convert_start_time(ulonglong, ulonglong); -@@ -46,11 +52,26 @@ +@@ -46,11 +53,26 @@ static void dump_runq(void); static void dump_runqueues(void); static void dump_prio_array(int, ulong, char *); @@ -24610,7 +25065,23 @@ static ulonglong task_blocked(ulong); static void translate_sigset(ulonglong); static ulonglong sigaction_mask(ulong); -@@ -151,8 +172,15 @@ +@@ -133,6 +155,15 @@ + "thread_struct", "eip"); + esp_offset = MEMBER_OFFSET_INIT(thread_struct_esp, + "thread_struct", "esp"); ++ /* ++ * Handle x86/x86_64 merger. ++ */ ++ if (eip_offset == INVALID_OFFSET) ++ eip_offset = MEMBER_OFFSET_INIT(thread_struct_eip, ++ "thread_struct", "ip"); ++ if (esp_offset == INVALID_OFFSET) ++ esp_offset = MEMBER_OFFSET_INIT(thread_struct_esp, ++ "thread_struct", "sp"); + ksp_offset = MEMBER_OFFSET_INIT(thread_struct_ksp, + "thread_struct", "ksp"); + ASSIGN_OFFSET(task_struct_thread_eip) = +@@ -151,8 +182,15 @@ get_idle_threads(&tt->idle_threads[0], kt->cpus); } @@ -24628,7 +25099,17 @@ if (VALID_MEMBER(task_struct_thread_info)) { MEMBER_OFFSET_INIT(thread_info_task, "thread_info", "task"); MEMBER_OFFSET_INIT(thread_info_cpu, "thread_info", "cpu"); -@@ -184,8 +212,13 @@ +@@ -170,6 +208,9 @@ + MEMBER_OFFSET_INIT(task_struct_processor, "task_struct", "processor"); + MEMBER_OFFSET_INIT(task_struct_p_pptr, "task_struct", "p_pptr"); + MEMBER_OFFSET_INIT(task_struct_parent, "task_struct", "parent"); ++ if (INVALID_MEMBER(task_struct_parent)) ++ MEMBER_OFFSET_INIT(task_struct_parent, "task_struct", ++ "real_parent"); + MEMBER_OFFSET_INIT(task_struct_has_cpu, "task_struct", "has_cpu"); + MEMBER_OFFSET_INIT(task_struct_cpus_runnable, + "task_struct", "cpus_runnable"); +@@ -184,8 +225,13 @@ MEMBER_OFFSET_INIT(task_struct_pids, "task_struct", "pids"); MEMBER_OFFSET_INIT(task_struct_last_run, "task_struct", "last_run"); MEMBER_OFFSET_INIT(task_struct_timestamp, "task_struct", "timestamp"); @@ -24643,7 +25124,7 @@ char buf[BUFSIZE]; strcpy(buf, "alias last ps -l"); alias_init(buf); -@@ -193,6 +226,17 @@ +@@ -193,6 +239,17 @@ MEMBER_OFFSET_INIT(pid_link_pid, "pid_link", "pid"); MEMBER_OFFSET_INIT(pid_hash_chain, "pid", "hash_chain"); @@ -24661,7 +25142,7 @@ MEMBER_OFFSET_INIT(pid_pid_chain, "pid", "pid_chain"); STRUCT_SIZE_INIT(task_struct, "task_struct"); -@@ -207,6 +251,8 @@ +@@ -207,6 +264,8 @@ MEMBER_OFFSET_INIT(signal_struct_count, "signal_struct", "count"); MEMBER_OFFSET_INIT(signal_struct_action, "signal_struct", "action"); @@ -24670,7 +25151,7 @@ MEMBER_OFFSET_INIT(k_sigaction_sa, "k_sigaction", "sa"); -@@ -217,17 +263,10 @@ +@@ -217,17 +276,10 @@ if (INVALID_MEMBER(sigpending_head)) MEMBER_OFFSET_INIT(sigpending_list, "sigpending", "list"); MEMBER_OFFSET_INIT(sigpending_signal, "sigpending", "signal"); @@ -24690,7 +25171,7 @@ STRUCT_SIZE_INIT(sighand_struct, "sighand_struct"); if (VALID_STRUCT(sighand_struct)) -@@ -249,6 +288,19 @@ +@@ -249,6 +301,19 @@ STRUCT_SIZE_INIT(cputime_t, "cputime_t"); @@ -24710,7 +25191,7 @@ if (VALID_MEMBER(runqueue_arrays)) MEMBER_OFFSET_INIT(task_struct_run_list, "task_struct", "run_list"); -@@ -279,12 +331,6 @@ +@@ -279,12 +344,6 @@ error(FATAL, "pidhash and pid_hash both exist -- cannot distinquish between them\n"); @@ -24723,7 +25204,7 @@ if (symbol_exists("pid_hash") && symbol_exists("pidhash_shift")) { int pidhash_shift; -@@ -302,7 +348,33 @@ +@@ -302,7 +361,33 @@ tt->refresh_task_table = refresh_pid_hash_task_table; } else { tt->pidhash_addr = symbol_value("pid_hash"); @@ -24758,7 +25239,7 @@ } tt->flags |= PID_HASH; -@@ -343,6 +415,10 @@ +@@ -343,6 +428,10 @@ irqstacks_init(); get_active_set(); @@ -24769,7 +25250,7 @@ tt->refresh_task_table(); if (tt->flags & TASK_REFRESH_OFF) -@@ -353,8 +429,11 @@ +@@ -353,11 +442,17 @@ set_context(NO_TASK, active_pid); tt->this_task = pid_to_task(active_pid); } @@ -24782,7 +25263,13 @@ sort_context_array(); -@@ -987,9 +1066,7 @@ ++ if (pc->flags & SILENT) ++ initialize_task_state(); ++ + tt->flags |= TASK_INIT_DONE; + } + +@@ -987,9 +1082,7 @@ return; if (DUMPFILE()) { /* impossible */ @@ -24793,7 +25280,7 @@ if (!symbol_exists("panic_threads")) tt->flags |= POPULATE_PANIC; } -@@ -1152,11 +1229,7 @@ +@@ -1152,11 +1245,7 @@ FREEBUF(pid_hash); @@ -24806,7 +25293,7 @@ if (ACTIVE() && (tt->flags & TASK_INIT_DONE)) refresh_context(curtask, curpid); -@@ -1176,12 +1249,14 @@ +@@ -1176,12 +1265,14 @@ { int i; ulong *pid_hash; @@ -24821,7 +25308,7 @@ struct task_context *tc; ulong curtask; ulong curpid; -@@ -1192,9 +1267,7 @@ +@@ -1192,9 +1283,7 @@ return; if (DUMPFILE()) { /* impossible */ @@ -24832,7 +25319,7 @@ if (!symbol_exists("panic_threads")) tt->flags |= POPULATE_PANIC; } -@@ -1211,8 +1284,21 @@ +@@ -1211,8 +1300,21 @@ curpid = CURRENT_PID(); } @@ -24856,7 +25343,7 @@ pid_hash = (ulong *)GETBUF(plen * sizeof(void *)); -@@ -1228,6 +1314,16 @@ +@@ -1228,6 +1330,16 @@ * The zero'th (PIDTYPE_PID) entry is the hlist_head array * that we want. */ @@ -24873,7 +25360,7 @@ pidhash_array = pid_hash[0]; FREEBUF(pid_hash); -@@ -1345,6 +1441,15 @@ +@@ -1345,6 +1457,15 @@ } } @@ -24889,7 +25376,7 @@ BZERO(tt->task_local, tt->max_tasks * sizeof(void *)); cnt = retrieve_list((ulong *)tt->task_local, cnt); -@@ -1394,11 +1499,7 @@ +@@ -1394,11 +1515,7 @@ FREEBUF(pid_hash); FREEBUF(nodebuf); @@ -24902,7 +25389,7 @@ if (ACTIVE() && (tt->flags & TASK_INIT_DONE)) refresh_context(curtask, curpid); -@@ -1406,151 +1507,751 @@ +@@ -1406,148 +1523,748 @@ tt->retries = MAX(tt->retries, retries); } @@ -25080,17 +25567,12 @@ -sort_context_array(void) -{ - ulong curtask; -- + - curtask = CURRENT_TASK(); - qsort((void *)tt->context_array, (size_t)tt->running_tasks, - sizeof(struct task_context), sort_by_pid); - set_context(curtask, NO_PID); -} - --static int --sort_by_pid(const void *arg1, const void *arg2) --{ -- struct task_context *t1, *t2; + /* + * Get the idle threads first. + */ @@ -25103,19 +25585,16 @@ + DUMPFILE() ? "\n" : ""); + } -- t1 = (struct task_context *)arg1; -- t2 = (struct task_context *)arg2; +-static int +-sort_by_pid(const void *arg1, const void *arg2) +-{ +- struct task_context *t1, *t2; + for (i = 0; i < len; i++) { + if (!pid_hash[i]) + continue; -- if ((t1->pid == 0) && (t2->pid == 0)) -- return (t1->processor < t2->processor ? -1 : -- t1->processor == t2->processor ? 0 : 1); -- else -- return (t1->pid < t2->pid ? -1 : -- t1->pid == t2->pid ? 0 : 1); --} +- t1 = (struct task_context *)arg1; +- t2 = (struct task_context *)arg2; + if (!readmem(pid_hash[i], KVADDR, nodebuf, + SIZE(pid_link), "pid_hash node pid_link", RETURN_ON_ERROR|QUIET)) { + error(INFO, "\ncannot read pid_hash node pid_link\n"); @@ -25126,6 +25605,13 @@ + goto retry_pid_hash; + } +- if ((t1->pid == 0) && (t2->pid == 0)) +- return (t1->processor < t2->processor ? -1 : +- t1->processor == t2->processor ? 0 : 1); +- else +- return (t1->pid < t2->pid ? -1 : +- t1->pid == t2->pid ? 0 : 1); +-} + kpp = pid_hash[i]; + next = ULONG(nodebuf + OFFSET(pid_link_pid)); + if (next) @@ -25133,16 +25619,13 @@ + pnext = ULONG(nodebuf + OFFSET(hlist_node_next)); + pprev = ULONG(nodebuf + OFFSET(hlist_node_pprev)); --static int --sort_by_last_run(const void *arg1, const void *arg2) --{ -- ulong task_last_run_stamp(ulong); -- struct task_context *t1, *t2; -- ulonglong lr1, lr2; + if (CRASHDEBUG(1)) + console("pid_hash[%d]: %lx task: %lx (node: %lx) next: %lx pprev: %lx\n", + i, pid_hash[i], next, kpp, pnext, pprev); -+ + +-static int +-sort_by_last_run(const void *arg1, const void *arg2) +-{ + while (next) { + if (!IS_TASK_ADDR(next)) { + error(INFO, @@ -25773,13 +26256,10 @@ +static int +sort_by_last_run(const void *arg1, const void *arg2) +{ -+ ulong task_last_run_stamp(ulong); -+ struct task_context *t1, *t2; -+ ulonglong lr1, lr2; - - t1 = (struct task_context *)arg1; - t2 = (struct task_context *)arg2; -@@ -1581,6 +2282,9 @@ + ulong task_last_run_stamp(ulong); + struct task_context *t1, *t2; + ulonglong lr1, lr2; +@@ -1581,6 +2298,9 @@ char * fill_task_struct(ulong task) { @@ -25789,7 +26269,7 @@ if (!IS_LAST_TASK_READ(task)) { if (!readmem(task, KVADDR, tt->task_struct, SIZE(task_struct), "fill_task_struct", -@@ -1632,6 +2336,9 @@ +@@ -1632,6 +2352,9 @@ bt->stackbase); } @@ -25799,7 +26279,7 @@ if (!IS_LAST_TASK_READ(bt->task)) { if (bt->stackbase == bt->task) { BCOPY(bt->stackbuf, tt->task_struct, SIZE(task_struct)); -@@ -1893,7 +2600,7 @@ +@@ -1893,7 +2616,7 @@ BZERO(&psinfo, sizeof(struct psinfo)); flag = 0; @@ -25808,7 +26288,7 @@ switch(c) { case 'k': -@@ -1907,39 +2614,55 @@ +@@ -1907,39 +2630,55 @@ break; /* @@ -25871,7 +26351,7 @@ default: argerrs++; break; -@@ -2020,6 +2743,18 @@ +@@ -2020,6 +2759,18 @@ show_last_run(tc); \ continue; \ } \ @@ -25890,7 +26370,7 @@ get_task_mem_usage(tc->task, tm); \ fprintf(fp, "%s", is_task_active(tc->task) ? "> " : " "); \ fprintf(fp, "%5ld %5ld %2s %s %3s", \ -@@ -2050,7 +2785,7 @@ +@@ -2050,7 +2801,7 @@ char buf2[BUFSIZE]; char buf3[BUFSIZE]; @@ -25899,7 +26379,7 @@ fprintf(fp, " PID PPID CPU %s ST %%MEM VSZ RSS COMM\n", flag & PS_KSTACKP ? -@@ -2076,6 +2811,8 @@ +@@ -2076,6 +2827,8 @@ return; } @@ -25908,7 +26388,7 @@ for (ac = 0; ac < psi->argc; ac++) { tm = &task_mem_usage; tc = FIRST_CONTEXT(); -@@ -2096,8 +2833,15 @@ +@@ -2096,8 +2849,15 @@ break; case PS_BY_CMD: @@ -25926,7 +26406,7 @@ break; } -@@ -2145,6 +2889,229 @@ +@@ -2145,6 +2905,229 @@ } /* @@ -26156,7 +26636,7 @@ * Put either the task_struct address or kernel stack pointer into a string. * If the kernel stack pointer is requested, piggy-back on top of the * back trace code to avoid having to deal with machine dependencies, -@@ -2229,11 +3196,8 @@ +@@ -2229,11 +3212,8 @@ use_kernel_timeval = STRUCT_EXISTS("kernel_timeval"); get_symbol_data("jiffies", sizeof(long), &jiffies); @@ -26170,7 +26650,7 @@ tsp = task_start_times; tc = tcp ? tcp : FIRST_CONTEXT(); -@@ -2330,8 +3294,7 @@ +@@ -2330,8 +3310,7 @@ for (i = 0, tsp = task_start_times; i < tasks; i++, tsp++) { print_task_header(fp, tsp->tc, 0); fprintf(fp, " RUN TIME: %s\n", symbol_exists("jiffies_64") ? @@ -26180,7 +26660,7 @@ convert_time(jiffies - tsp->start_time, buf1)); fprintf(fp, " START TIME: %llu\n", tsp->start_time); if (VALID_MEMBER(task_struct_times)) { -@@ -2397,15 +3360,33 @@ +@@ -2397,15 +3376,33 @@ static ulonglong convert_start_time(ulonglong start_time, ulonglong current) { @@ -26217,7 +26697,7 @@ default: break; } -@@ -2511,6 +3492,54 @@ +@@ -2511,6 +3508,54 @@ } /* @@ -26272,7 +26752,7 @@ * Return the first task found that belongs to a pid. */ ulong -@@ -2580,6 +3609,26 @@ +@@ -2580,6 +3625,26 @@ return NULL; } @@ -26299,7 +26779,7 @@ /* * Return the task_context structure of the first task found with a pid, -@@ -2816,20 +3865,39 @@ +@@ -2816,20 +3881,39 @@ /* @@ -26343,7 +26823,7 @@ } /* -@@ -2925,7 +3993,11 @@ +@@ -2925,7 +4009,11 @@ fprintf(fp, "COMMAND: \"%s\"\n", tc->comm); INDENT(indent); fprintf(fp, " TASK: %lx ", tc->task); @@ -26356,7 +26836,7 @@ fprintf(fp, "(1 of %d) ", cnt); if (tt->flags & THREAD_INFO) fprintf(fp, "[THREAD_INFO: %lx]", tc->thread_info); -@@ -2938,19 +4010,27 @@ +@@ -2938,19 +4026,27 @@ if (is_task_active(tc->task)) { if (machdep->flags & HWRESET) fprintf(fp, "(HARDWARE RESET)"); @@ -26387,7 +26867,7 @@ !SYSRQ_TASK(tc->task)) { fprintf(fp, "\n"); INDENT(indent); if (machine_type("S390") || machine_type("S390X")) -@@ -3006,6 +4086,10 @@ +@@ -3006,6 +4102,10 @@ cnt++ ? "" : "\n", tc->comm); break; } @@ -26398,7 +26878,7 @@ } -@@ -3182,6 +4266,22 @@ +@@ -3182,6 +4282,22 @@ return flags; } @@ -26421,7 +26901,7 @@ ulonglong task_last_run(ulong task) { -@@ -3197,6 +4297,10 @@ +@@ -3197,6 +4313,10 @@ } else if (VALID_MEMBER(task_struct_timestamp)) timestamp = tt->last_task_read ? ULONGLONG(tt->task_struct + OFFSET(task_struct_timestamp)) : 0; @@ -26432,7 +26912,7 @@ return timestamp; } -@@ -3368,6 +4472,12 @@ +@@ -3368,6 +4488,12 @@ task = NO_TASK; tc = FIRST_CONTEXT(); @@ -26445,7 +26925,7 @@ if (symbol_exists("panic_threads") && symbol_exists("panicmsg") && symbol_exists("panic_processor")) { -@@ -3411,6 +4521,9 @@ +@@ -3411,6 +4537,9 @@ use_task_0: @@ -26455,7 +26935,7 @@ tt->flags |= PANIC_TASK_NOT_FOUND; tc = FIRST_CONTEXT(); return(tc->task); -@@ -3448,50 +4561,74 @@ +@@ -3448,50 +4577,74 @@ int msg_found; BZERO(buf, BUFSIZE); @@ -26567,7 +27047,7 @@ return(buf); } -@@ -3517,7 +4654,7 @@ +@@ -3517,7 +4670,7 @@ BZERO(&foreach_data, sizeof(struct foreach_data)); fd = &foreach_data; @@ -26576,7 +27056,7 @@ switch(c) { case 'R': -@@ -3560,6 +4697,10 @@ +@@ -3560,6 +4713,10 @@ fd->flags |= FOREACH_r_FLAG; break; @@ -26587,7 +27067,7 @@ case 't': fd->flags |= FOREACH_t_FLAG; break; -@@ -3754,12 +4895,14 @@ +@@ -3754,12 +4911,14 @@ foreach(struct foreach_data *fd) { int i, j, k, a; @@ -26603,7 +27083,7 @@ struct bt_info bt_info, *bt; /* -@@ -3797,6 +4940,8 @@ +@@ -3797,6 +4956,8 @@ fd->reference ? fd->reference : ""); } @@ -26612,7 +27092,7 @@ for (k = 0; k < fd->keys; k++) { switch(fd->keyword_array[k]) { -@@ -3881,6 +5026,14 @@ +@@ -3881,6 +5042,14 @@ error(FATAL, "sig: -l and -s options are not applicable\n"); } @@ -26627,7 +27107,7 @@ break; case FOREACH_TEST: -@@ -3941,7 +5094,7 @@ +@@ -3941,7 +5110,7 @@ if (fd->reference) { BZERO(ref, sizeof(struct reference)); ref->str = fd->reference; @@ -26636,7 +27116,7 @@ print_task_header(fp, tc, subsequent++); for (k = 0; k < fd->keys; k++) { -@@ -3962,7 +5115,12 @@ +@@ -3962,7 +5131,12 @@ bt->flags |= BT_SYMBOLIC_ARGS; if (fd->flags & FOREACH_t_FLAG) bt->flags |= BT_TEXT_SYMBOLS; @@ -26650,7 +27130,7 @@ bt->flags |= BT_OLD_BACK_TRACE; if (fd->flags & FOREACH_e_FLAG) bt->flags |= BT_EFRAME_SEARCH; -@@ -4010,8 +5168,14 @@ +@@ -4010,8 +5184,14 @@ case FOREACH_SIG: pc->curcmd = "sig"; @@ -26667,7 +27147,7 @@ break; case FOREACH_SET: -@@ -4075,6 +5239,11 @@ +@@ -4075,6 +5255,11 @@ nlm_files_dump(); } break; @@ -26679,7 +27159,7 @@ } } -@@ -4161,7 +5330,7 @@ +@@ -4161,7 +5346,7 @@ fd = &foreach_data; fd->keys = 1; fd->keyword_array[0] = FOREACH_BT; @@ -26688,7 +27168,7 @@ dietask = lasttask = NO_TASK; -@@ -4188,6 +5357,12 @@ +@@ -4188,6 +5373,12 @@ break; } @@ -26701,7 +27181,7 @@ if (strstr(buf, " die at ")) { switch (dietask) { -@@ -4211,6 +5386,10 @@ +@@ -4211,6 +5402,10 @@ if (dietask == (NO_TASK+1)) error(WARNING, "multiple active tasks have called die\n\n"); @@ -26712,7 +27192,7 @@ found_panic_task: populate_panic_threads(); -@@ -4229,6 +5408,9 @@ +@@ -4229,6 +5424,9 @@ } } @@ -26722,7 +27202,7 @@ return NULL; } -@@ -4240,25 +5422,28 @@ +@@ -4240,25 +5438,28 @@ { ulong task; @@ -26762,7 +27242,7 @@ return NO_TASK; } -@@ -4298,14 +5483,17 @@ +@@ -4298,14 +5499,17 @@ tc = FIRST_CONTEXT(); for (i = 0; i < RUNNING_TASKS(); i++, tc++) { @@ -26782,7 +27262,7 @@ tt->panic_threads[0] = get_dumpfile_panic_task(); } -@@ -4331,7 +5519,7 @@ +@@ -4331,7 +5535,7 @@ void dump_task_table(int verbose) { @@ -26791,7 +27271,7 @@ struct task_context *tc; char buf[BUFSIZE]; int others, wrap, flen; -@@ -4363,6 +5551,12 @@ +@@ -4363,6 +5567,12 @@ fprintf(fp, "refresh_pid_hash_task_table()\n"); else if (tt->refresh_task_table == refresh_hlist_task_table) fprintf(fp, "refresh_hlist_task_table()\n"); @@ -26804,7 +27284,7 @@ else fprintf(fp, "%lx\n", (ulong)tt->refresh_task_table); -@@ -4411,6 +5605,9 @@ +@@ -4411,6 +5621,9 @@ if (tt->flags & NO_TIMESPEC) sprintf(&buf[strlen(buf)], "%sNO_TIMESPEC", others++ ? "|" : ""); @@ -26814,7 +27294,7 @@ sprintf(&buf[strlen(buf)], ")"); if (strlen(buf) > 54) -@@ -4436,14 +5633,16 @@ +@@ -4436,14 +5649,16 @@ fprintf(fp, " last_mm_read: %lx\n", tt->last_mm_read); fprintf(fp, " task_struct: %lx\n", (ulong)tt->task_struct); fprintf(fp, " mm_struct: %lx\n", (ulong)tt->mm_struct); @@ -26833,7 +27313,7 @@ if ((i % wrap) == 0) fprintf(fp, "\n "); fprintf(fp, "%.*lx ", flen, tt->panic_threads[i]); -@@ -4451,7 +5650,7 @@ +@@ -4451,7 +5666,7 @@ fprintf(fp, "\n"); fprintf(fp, " panic_ksp:"); @@ -26842,7 +27322,7 @@ if ((i % wrap) == 0) fprintf(fp, "\n "); fprintf(fp, "%.*lx ", flen, tt->panic_ksp[i]); -@@ -4459,7 +5658,7 @@ +@@ -4459,7 +5674,7 @@ fprintf(fp, "\n"); fprintf(fp, " hardirq_ctx:"); @@ -26851,7 +27331,7 @@ if ((i % wrap) == 0) fprintf(fp, "\n "); fprintf(fp, "%.*lx ", flen, tt->hardirq_ctx[i]); -@@ -4467,7 +5666,7 @@ +@@ -4467,7 +5682,7 @@ fprintf(fp, "\n"); fprintf(fp, " hardirq_tasks:"); @@ -26860,7 +27340,7 @@ if ((i % wrap) == 0) fprintf(fp, "\n "); fprintf(fp, "%.*lx ", flen, tt->hardirq_tasks[i]); -@@ -4475,7 +5674,7 @@ +@@ -4475,7 +5690,7 @@ fprintf(fp, "\n"); fprintf(fp, " softirq_ctx:"); @@ -26869,7 +27349,7 @@ if ((i % wrap) == 0) fprintf(fp, "\n "); fprintf(fp, "%.*lx ", flen, tt->softirq_ctx[i]); -@@ -4483,7 +5682,7 @@ +@@ -4483,7 +5698,7 @@ fprintf(fp, "\n"); fprintf(fp, " softirq_tasks:"); @@ -26878,7 +27358,7 @@ if ((i % wrap) == 0) fprintf(fp, "\n "); fprintf(fp, "%.*lx ", flen, tt->softirq_tasks[i]); -@@ -4491,7 +5690,7 @@ +@@ -4491,7 +5706,7 @@ fprintf(fp, "\n"); fprintf(fp, " idle_threads:"); @@ -26887,7 +27367,7 @@ if ((i % wrap) == 0) fprintf(fp, "\n "); fprintf(fp, "%.*lx ", flen, tt->idle_threads[i]); -@@ -4499,7 +5698,7 @@ +@@ -4499,7 +5714,7 @@ fprintf(fp, "\n"); fprintf(fp, " active_set:"); @@ -26896,7 +27376,7 @@ if ((i % wrap) == 0) fprintf(fp, "\n "); fprintf(fp, "%.*lx ", flen, tt->active_set[i]); -@@ -4546,6 +5745,9 @@ +@@ -4546,6 +5761,9 @@ if ((tc->pid == 0) && !STREQ(tc->comm, pc->program_name)) return TRUE; @@ -26906,7 +27386,7 @@ if (IS_ZOMBIE(task) || IS_EXITING(task)) return FALSE; -@@ -4641,6 +5843,16 @@ +@@ -4641,6 +5859,16 @@ cnt++; else BZERO(tasklist, sizeof(ulong) * NR_CPUS); @@ -26923,7 +27403,7 @@ } if (runqbuf) -@@ -4734,14 +5946,38 @@ +@@ -4734,14 +5962,38 @@ } else if (symbol_exists("per_cpu__runqueues")) { runq = symbol_value("per_cpu__runqueues"); per_cpu = TRUE; @@ -26964,7 +27444,7 @@ for (i = 0; i < kt->cpus; i++) { if ((kt->flags & SMP) && (kt->flags & PER_CPU_OFF)) { runq = symbol_value("per_cpu__runqueues") + -@@ -4759,7 +5995,8 @@ +@@ -4759,7 +6011,8 @@ cnt++; } } else if (VALID_MEMBER(runqueue_curr)) { @@ -26974,7 +27454,7 @@ readmem(runq, KVADDR, runqbuf, SIZE(runqueue), "(old) runqueues curr", FAULT_ON_ERROR); -@@ -4799,23 +6036,55 @@ +@@ -4799,23 +6052,55 @@ tt->flags &= ~ACTIVE_SET; } @@ -27045,7 +27525,7 @@ while (fgets(buf, BUFSIZE, pc->tmpfile)) { \ if (strstr(buf, " die+")) { \ switch (die_task) \ -@@ -4833,12 +6102,30 @@ +@@ -4833,12 +6118,30 @@ { \ case NO_TASK: \ panic_task = task; \ @@ -27076,7 +27556,7 @@ } /* -@@ -4850,11 +6137,14 @@ +@@ -4850,11 +6153,14 @@ int i, j, found; ulong task; char buf[BUFSIZE]; @@ -27093,7 +27573,7 @@ for (i = 0; i < NR_CPUS; i++) { if (!(task = tt->active_set[i])) -@@ -4867,15 +6157,16 @@ +@@ -4867,15 +6173,16 @@ if ((tp = fill_task_struct(task))) { if ((tc = store_context(NULL, task, tp))) tt->running_tasks++; @@ -27112,7 +27592,7 @@ close_tmpfile(); } -@@ -4903,7 +6194,7 @@ +@@ -4903,7 +6210,7 @@ raw_stack_dump(tt->hardirq_ctx[i], SIZE(thread_union)); rewind(pc->tmpfile); @@ -27121,7 +27601,7 @@ close_tmpfile(); } -@@ -4930,7 +6221,7 @@ +@@ -4930,7 +6237,7 @@ raw_stack_dump(tt->softirq_ctx[i], SIZE(thread_union)); rewind(pc->tmpfile); @@ -27130,7 +27610,7 @@ close_tmpfile(); } -@@ -4938,6 +6229,28 @@ +@@ -4938,6 +6245,28 @@ RESOLVE_PANIC_AND_DIE_CALLERS(); } @@ -27159,7 +27639,7 @@ return NO_TASK; } -@@ -4997,6 +6310,11 @@ +@@ -4997,6 +6326,11 @@ ulong *tlist; struct task_context *tc; @@ -27171,7 +27651,7 @@ if (VALID_MEMBER(runqueue_arrays)) { dump_runqueues(); return; -@@ -5017,120 +6335,370 @@ +@@ -5017,120 +6351,370 @@ error(FATAL, "cannot determine run queue structures being used\n"); @@ -27618,7 +28098,7 @@ offset = OFFSET(prio_array_queue) + (i * SIZE(list_head)); kvaddr = k_prio_array + offset; uvaddr = (ulong)u_prio_array + offset; -@@ -5143,9 +6711,6 @@ +@@ -5143,9 +6727,6 @@ if ((list_head[0] == kvaddr) && (list_head[1] == kvaddr)) continue; @@ -27628,7 +28108,7 @@ fprintf(fp, " [%3d] ", i); BZERO(ld, sizeof(struct list_data)); -@@ -5155,8 +6720,7 @@ +@@ -5155,8 +6736,7 @@ hq_open(); cnt = do_list(ld); hq_close(); @@ -27638,7 +28118,7 @@ cnt = retrieve_list(tlist, cnt); for (c = 0; c < cnt; c++) { if (!(tc = task_to_context(tlist[c]))) -@@ -5174,6 +6738,9 @@ +@@ -5174,6 +6754,9 @@ #define _NSIG_BPW machdep->bits #define _NSIG_WORDS (_NSIG / _NSIG_BPW) @@ -27648,7 +28128,7 @@ static struct signame { char *name; char *altname; -@@ -5209,23 +6776,56 @@ +@@ -5209,23 +6792,56 @@ /* 28 */ {"SIGWINCH", NULL}, /* 29 */ {"SIGIO", "SIGPOLL"}, /* 30 */ {"SIGPWR", NULL}, @@ -27714,7 +28194,7 @@ fprintf(fp, "\n"); } } -@@ -5236,8 +6836,7 @@ +@@ -5236,8 +6852,7 @@ static void translate_sigset(ulonglong sigset) { @@ -27724,7 +28204,7 @@ char buf[BUFSIZE]; if (!sigset) { -@@ -5246,21 +6845,42 @@ +@@ -5246,21 +6861,42 @@ } len = 0; @@ -27773,7 +28253,7 @@ } fprintf(fp, "\n"); } -@@ -5290,13 +6910,14 @@ +@@ -5290,13 +6926,14 @@ struct task_context *tc; ulong *tasklist; char *siglist; @@ -27789,7 +28269,7 @@ switch(c) { case 's': -@@ -5314,6 +6935,10 @@ +@@ -5314,6 +6951,10 @@ signame_list(); return; @@ -27800,7 +28280,7 @@ default: argerrs++; break; -@@ -5360,10 +6985,65 @@ +@@ -5360,10 +7001,65 @@ tasklist[tcnt++] = CURRENT_TASK(); for (c = 0; c < tcnt; c++) { @@ -27868,7 +28348,7 @@ } /* -@@ -5381,7 +7061,7 @@ +@@ -5381,7 +7077,7 @@ else { if (!(flags & FOREACH_TASK)) print_task_header(fp, tc, 0); @@ -27877,7 +28357,7 @@ } } -@@ -5401,40 +7081,34 @@ +@@ -5401,40 +7097,34 @@ * Dump all signal-handling data for a task. */ static void @@ -27933,7 +28413,7 @@ if (VALID_MEMBER(task_struct_sig)) signal_struct = ULONG(tt->task_struct + -@@ -5443,143 +7117,259 @@ +@@ -5443,143 +7133,259 @@ signal_struct = ULONG(tt->task_struct + OFFSET(task_struct_signal)); @@ -28312,7 +28792,7 @@ while (sigqueue) { readmem(sigqueue, KVADDR, signal_buf, SIZE_OPTION(signal_queue, sigqueue), -@@ -5597,14 +7387,17 @@ +@@ -5597,14 +7403,17 @@ OFFSET(siginfo_si_signo)); } @@ -28332,7 +28812,7 @@ } /* -@@ -5614,12 +7407,13 @@ +@@ -5614,12 +7423,13 @@ */ static ulonglong @@ -28348,7 +28828,7 @@ if (!tt->last_task_read) return 0; -@@ -5633,6 +7427,10 @@ +@@ -5633,6 +7443,10 @@ OFFSET(task_struct_signal)); } else return 0; @@ -28359,8 +28839,8 @@ switch (_NSIG_WORDS) { ---- crash/memory.c.orig 2008-02-19 16:16:12.000000000 -0500 -+++ crash/memory.c 2008-01-11 11:58:54.000000000 -0500 +--- crash/memory.c.orig 2008-04-29 13:51:49.000000000 -0400 ++++ crash/memory.c 2008-04-03 15:19:08.000000000 -0400 @@ -1,8 +1,8 @@ /* memory.c - core analysis suite * @@ -28372,7 +28852,7 @@ * Copyright (C) 2002 Silicon Graphics, Inc. * * This program is free software; you can redistribute it and/or modify -@@ -35,34 +35,46 @@ +@@ -35,34 +35,47 @@ ulong order; ulong slabsize; ulong num_slabs; @@ -28409,6 +28889,7 @@ + ulong addr; + ulong size; + } *vmlist; ++ ulong container; }; static char *memtype_string(int, int); @@ -28420,7 +28901,7 @@ static void dump_free_pages(struct meminfo *); static int dump_zone_page_usage(void); static void dump_multidimensional_free_pages(struct meminfo *); -@@ -72,19 +84,27 @@ +@@ -72,19 +85,27 @@ static void dump_page_hash_table(struct meminfo *); static void kmem_search(struct meminfo *); static void kmem_cache_init(void); @@ -28435,9 +28916,10 @@ static void dump_kmem_cache_percpu_v2(struct meminfo *); +static void dump_kmem_cache_slub(struct meminfo *); static void dump_kmem_cache_info_v2(struct meminfo *); +-static char *vaddr_to_kmem_cache(ulong, char *); +static void kmem_cache_list_slub(void); -+static ulong get_cpu_slab_ptr(struct meminfo *, int); - static char *vaddr_to_kmem_cache(ulong, char *); ++static ulong get_cpu_slab_ptr(struct meminfo *, int, ulong *); ++static char *vaddr_to_kmem_cache(ulong, char *, int); static ulong vaddr_to_slab(ulong); static void do_slab_chain(int, struct meminfo *); static void do_slab_chain_percpu_v1(long, struct meminfo *); @@ -28448,7 +28930,7 @@ static void save_slab_data(struct meminfo *); static int slab_data_saved(struct meminfo *); static void dump_saved_slab_data(void); -@@ -97,7 +117,9 @@ +@@ -97,7 +118,9 @@ static void gather_slab_free_list_percpu(struct meminfo *); static void gather_cpudata_list_v1(struct meminfo *); static void gather_cpudata_list_v2(struct meminfo *); @@ -28458,15 +28940,17 @@ static void gather_slab_cached_count(struct meminfo *); static void dump_slab_objects(struct meminfo *); static void dump_slab_objects_percpu(struct meminfo *); -@@ -110,6 +132,7 @@ +@@ -110,6 +133,9 @@ static void search(ulong, ulong, ulong, int, ulong *, int); static int next_upage(struct task_context *, ulong, ulong *); static int next_kpage(ulong, ulong *); -+static ulong next_vmlist_vaddr(struct meminfo *, ulong); ++static ulong last_vmalloc_address(void); ++static ulong next_vmlist_vaddr(ulong); ++static int next_identity_mapping(ulong, ulong *); static int vm_area_page_dump(ulong, ulong, ulong, ulong, void *, struct reference *); static int dump_swap_info(ulong, ulong *, ulong *); -@@ -118,15 +141,44 @@ +@@ -118,15 +144,45 @@ static char *vma_file_offset(ulong, ulong, char *); static ssize_t read_dev_kmem(ulong, char *, long); static void dump_memory_nodes(int); @@ -28508,20 +28992,22 @@ +static ulong compound_head(ulong); +static long count_partial(ulong); +static ulong get_freepointer(struct meminfo *, void *); ++static int count_free_objects(struct meminfo *, ulong); +char *is_slab_page(struct meminfo *, char *); +static void do_node_lists_slub(struct meminfo *, ulong, int); /* * Memory display modes specific to this file. -@@ -142,6 +194,7 @@ +@@ -142,6 +198,8 @@ #define DECIMAL (0x100) #define UDECIMAL (0x200) #define ASCII_ENDLINE (0x400) +#define NO_ASCII (0x800) ++#define SLAB_CACHE (0x1000) static ulong DISPLAY_DEFAULT; -@@ -182,6 +235,10 @@ +@@ -182,6 +240,10 @@ MEMBER_OFFSET_INIT(mm_struct_mmap, "mm_struct", "mmap"); MEMBER_OFFSET_INIT(mm_struct_pgd, "mm_struct", "pgd"); MEMBER_OFFSET_INIT(mm_struct_rss, "mm_struct", "rss"); @@ -28532,7 +29018,7 @@ MEMBER_OFFSET_INIT(mm_struct_total_vm, "mm_struct", "total_vm"); MEMBER_OFFSET_INIT(mm_struct_start_code, "mm_struct", "start_code"); MEMBER_OFFSET_INIT(vm_area_struct_vm_mm, "vm_area_struct", "vm_mm"); -@@ -222,7 +279,16 @@ +@@ -222,7 +284,16 @@ MEMBER_OFFSET_INIT(page_count, "page", "_count"); MEMBER_OFFSET_INIT(page_flags, "page", "flags"); MEMBER_OFFSET_INIT(page_mapping, "page", "mapping"); @@ -28549,7 +29035,7 @@ MEMBER_OFFSET_INIT(page_buffers, "page", "buffers"); MEMBER_OFFSET_INIT(page_lru, "page", "lru"); MEMBER_OFFSET_INIT(page_pte, "page", "pte"); -@@ -270,6 +336,7 @@ +@@ -270,6 +341,7 @@ STRUCT_SIZE_INIT(kmem_slab_s, "kmem_slab_s"); STRUCT_SIZE_INIT(slab_s, "slab_s"); STRUCT_SIZE_INIT(slab, "slab"); @@ -28557,7 +29043,7 @@ STRUCT_SIZE_INIT(pgd_t, "pgd_t"); if (!VALID_STRUCT(kmem_slab_s) && VALID_STRUCT(slab_s)) { -@@ -310,17 +377,49 @@ +@@ -310,17 +382,49 @@ !VALID_STRUCT(slab_s) && VALID_STRUCT(slab)) { vt->flags |= PERCPU_KMALLOC_V2; @@ -28618,7 +29104,7 @@ MEMBER_OFFSET_INIT(slab_list, "slab", "list"); MEMBER_OFFSET_INIT(slab_s_mem, "slab", "s_mem"); MEMBER_OFFSET_INIT(slab_inuse, "slab", "inuse"); -@@ -330,10 +429,6 @@ +@@ -330,10 +434,6 @@ MEMBER_OFFSET_INIT(array_cache_limit, "array_cache", "limit"); STRUCT_SIZE_INIT(array_cache, "array_cache"); @@ -28629,7 +29115,7 @@ MEMBER_OFFSET_INIT(kmem_list3_slabs_partial, "kmem_list3", "slabs_partial"); MEMBER_OFFSET_INIT(kmem_list3_slabs_full, -@@ -343,6 +438,47 @@ +@@ -343,6 +443,48 @@ MEMBER_OFFSET_INIT(kmem_list3_free_objects, "kmem_list3", "free_objects"); MEMBER_OFFSET_INIT(kmem_list3_shared, "kmem_list3", "shared"); @@ -28650,6 +29136,7 @@ + MEMBER_OFFSET_INIT(kmem_cache_cpu_slab, "kmem_cache", "cpu_slab"); + MEMBER_OFFSET_INIT(kmem_cache_list, "kmem_cache", "list"); + MEMBER_OFFSET_INIT(kmem_cache_name, "kmem_cache", "name"); ++ MEMBER_OFFSET_INIT(kmem_cache_flags, "kmem_cache", "flags"); + MEMBER_OFFSET_INIT(kmem_cache_cpu_freelist, "kmem_cache_cpu", "freelist"); + MEMBER_OFFSET_INIT(kmem_cache_cpu_page, "kmem_cache_cpu", "page"); + MEMBER_OFFSET_INIT(kmem_cache_cpu_node, "kmem_cache_cpu", "node"); @@ -28677,7 +29164,7 @@ } else { MEMBER_OFFSET_INIT(kmem_cache_s_c_nextp, "kmem_cache_s", "c_nextp"); -@@ -381,6 +517,22 @@ +@@ -381,6 +523,22 @@ "kmem_slab_s", "s_magic"); } @@ -28700,7 +29187,7 @@ if (machdep->init_kernel_pgd) machdep->init_kernel_pgd(); else if (symbol_exists("swapper_pg_dir")) { -@@ -415,10 +567,17 @@ +@@ -415,10 +573,17 @@ error(FATAL, "no swapper_pg_dir or cpu_pgd symbols exist?\n"); get_symbol_data("high_memory", sizeof(ulong), &vt->high_memory); @@ -28720,7 +29207,7 @@ vt->vmalloc_start = machdep->vmalloc_start(); if (IS_VMALLOC_ADDR(vt->mem_map)) vt->flags |= V_MEM_MAP; -@@ -478,7 +637,6 @@ +@@ -478,7 +643,6 @@ STRUCT_SIZE_INIT(free_area_struct, "free_area_struct"); STRUCT_SIZE_INIT(zone, "zone"); STRUCT_SIZE_INIT(zone_struct, "zone_struct"); @@ -28728,7 +29215,7 @@ STRUCT_SIZE_INIT(kmem_bufctl_t, "kmem_bufctl_t"); STRUCT_SIZE_INIT(swap_info_struct, "swap_info_struct"); STRUCT_SIZE_INIT(mm_struct, "mm_struct"); -@@ -488,13 +646,20 @@ +@@ -488,13 +652,20 @@ if (VALID_STRUCT(pglist_data)) { vt->flags |= ZONES; @@ -28752,7 +29239,7 @@ MEMBER_OFFSET_INIT(pglist_data_node_zones, "pglist_data", "node_zones"); -@@ -524,6 +689,7 @@ +@@ -524,6 +695,7 @@ ARRAY_LENGTH_INIT(vt->nr_zones, pglist_data_node_zones, "pglist_data.node_zones", NULL, SIZE_OPTION(zone_struct, zone)); @@ -28760,7 +29247,7 @@ if (VALID_STRUCT(zone_struct)) { MEMBER_OFFSET_INIT(zone_struct_free_pages, -@@ -539,6 +705,8 @@ +@@ -539,6 +711,8 @@ if (INVALID_MEMBER(zone_struct_size)) MEMBER_OFFSET_INIT(zone_struct_memsize, "zone_struct", "memsize"); @@ -28769,7 +29256,7 @@ MEMBER_OFFSET_INIT(zone_struct_zone_start_paddr, "zone_struct", "zone_start_paddr"); MEMBER_OFFSET_INIT(zone_struct_zone_start_mapnr, -@@ -565,8 +733,17 @@ +@@ -565,8 +739,17 @@ vt->dump_free_pages = dump_free_pages_zones_v1; } else if (VALID_STRUCT(zone)) { @@ -28789,7 +29276,7 @@ MEMBER_OFFSET_INIT(zone_free_area, "zone", "free_area"); MEMBER_OFFSET_INIT(zone_zone_pgdat, -@@ -579,12 +756,23 @@ +@@ -579,12 +762,23 @@ "zone", "zone_start_pfn"); MEMBER_OFFSET_INIT(zone_spanned_pages, "zone", "spanned_pages"); @@ -28813,7 +29300,7 @@ ARRAY_LENGTH_INIT(vt->nr_free_areas, zone_free_area, "zone.free_area", NULL, SIZE(free_area)); vt->dump_free_pages = dump_free_pages_zones_v2; -@@ -603,6 +791,8 @@ +@@ -603,6 +797,8 @@ vt->dump_kmem_cache = dump_kmem_cache_percpu_v1; else if (vt->flags & PERCPU_KMALLOC_V2) vt->dump_kmem_cache = dump_kmem_cache_percpu_v2; @@ -28822,7 +29309,7 @@ else vt->dump_kmem_cache = dump_kmem_cache; -@@ -640,13 +830,7 @@ +@@ -640,13 +836,7 @@ kmem_cache_init(); PG_reserved_flag_init(); @@ -28837,16 +29324,37 @@ } /* -@@ -685,7 +869,7 @@ +@@ -685,7 +875,7 @@ memtype = KVADDR; count = -1; - while ((c = getopt(argcnt, args, "e:pudDuso:81:3:6:")) != EOF) { -+ while ((c = getopt(argcnt, args, "xme:pfudDuso:81:3:6:")) != EOF) { ++ while ((c = getopt(argcnt, args, "xme:pfudDusSo:81:3:6:")) != EOF) { switch(c) { case '8': -@@ -748,12 +932,12 @@ +@@ -731,12 +921,15 @@ + break; + + case 's': +- if (flag & DISPLAY_DEFAULT) ++ case 'S': ++ if (flag & DISPLAY_DEFAULT) { + flag |= SYMBOLIC; +- else { +- error(INFO, +- "-s only allowed with %d-bit display\n", +- DISPLAY_DEFAULT == DISPLAY_64 ? ++ if (c == 'S') ++ flag |= SLAB_CACHE; ++ } else { ++ error(INFO, "-%c option" ++ " is only allowed with %d-bit display\n", ++ c, DISPLAY_DEFAULT == DISPLAY_64 ? + 64 : 32); + argerrs++; + } +@@ -748,12 +941,12 @@ break; case 'p': @@ -28861,7 +29369,7 @@ memtype = UVADDR; break; -@@ -767,6 +951,25 @@ +@@ -767,6 +960,25 @@ flag |= UDECIMAL; break; @@ -28887,7 +29395,7 @@ default: argerrs++; break; -@@ -830,7 +1033,7 @@ +@@ -830,7 +1042,7 @@ error(WARNING, "ending address ignored when count is specified\n"); @@ -28896,7 +29404,7 @@ flag |= ASCII_ENDLINE; if (memtype == KVADDR) { -@@ -839,7 +1042,6 @@ +@@ -839,7 +1051,6 @@ } display_memory(addr, count, flag, memtype); @@ -28904,7 +29412,15 @@ } /* -@@ -903,6 +1105,12 @@ +@@ -884,6 +1095,7 @@ + char ch; + int linelen; + char buf[BUFSIZE]; ++ char slab[BUFSIZE]; + int ascii_start; + char *hex_64_fmt = BITS32() ? "%.*llx " : "%.*lx "; + char *dec_64_fmt = BITS32() ? "%12lld " : "%15ld "; +@@ -903,6 +1115,12 @@ case PHYSADDR: addrtype = "PHYSADDR"; break; @@ -28917,7 +29433,7 @@ } if (CRASHDEBUG(4)) -@@ -970,7 +1178,8 @@ +@@ -970,12 +1188,26 @@ case DISPLAY_64: if ((flag & (HEXADECIMAL|SYMBOLIC|DISPLAY_DEFAULT)) == (HEXADECIMAL|SYMBOLIC|DISPLAY_DEFAULT)) { @@ -28927,7 +29443,25 @@ fprintf(fp, "%-16s ", value_to_symstr(mem.u64, buf, 0)); linelen += strlen(buf)+1; -@@ -993,7 +1202,8 @@ + break; + } ++ if ((flag & SLAB_CACHE) && ++ vaddr_to_kmem_cache(mem.u64, slab, ++ !VERBOSE)) { ++ if (CRASHDEBUG(1)) ++ sprintf(buf, "[%llx:%s]", ++ (ulonglong)mem.u64, ++ slab); ++ else ++ sprintf(buf, "[%s]", slab); ++ fprintf(fp, "%-16s ", buf); ++ linelen += strlen(buf)+1; ++ break; ++ } + } + if (flag & HEXADECIMAL) { + fprintf(fp, hex_64_fmt, LONG_LONG_PRLEN, +@@ -993,7 +1225,8 @@ case DISPLAY_32: if ((flag & (HEXADECIMAL|SYMBOLIC|DISPLAY_DEFAULT)) == (HEXADECIMAL|SYMBOLIC|DISPLAY_DEFAULT)) { @@ -28937,7 +29471,27 @@ fprintf(fp, INT_PRLEN == 16 ? "%-16s " : "%-8s ", value_to_symstr(mem.u32, -@@ -1138,7 +1348,7 @@ +@@ -1001,6 +1234,19 @@ + linelen += strlen(buf)+1; + break; + } ++ if ((flag & SLAB_CACHE) && ++ vaddr_to_kmem_cache(mem.u32, slab, ++ !VERBOSE)) { ++ if (CRASHDEBUG(1)) ++ sprintf(buf, "[%x:%s]", ++ mem.u32, slab); ++ else ++ sprintf(buf, "[%s]", slab); ++ fprintf(fp, INT_PRLEN == 16 ? ++ "%-16s " : "%-8s ", buf); ++ linelen += strlen(buf)+1; ++ break; ++ } + } + if (flag & HEXADECIMAL) { + fprintf(fp, "%.*x ", INT_PRLEN, mem.u32 ); +@@ -1138,7 +1384,7 @@ size = sizeof(void*); addr_entered = value_entered = FALSE; @@ -28946,7 +29500,7 @@ switch(c) { case '8': -@@ -1173,17 +1383,33 @@ +@@ -1173,17 +1419,33 @@ break; case 'p': @@ -28980,7 +29534,7 @@ default: argerrs++; break; -@@ -1262,6 +1488,9 @@ +@@ -1262,6 +1524,9 @@ case PHYSADDR: break; @@ -28990,7 +29544,7 @@ case AMBIGUOUS: error(INFO, "ambiguous address: %llx (requires -p, -u or -k)\n", -@@ -1309,6 +1538,8 @@ +@@ -1309,6 +1574,8 @@ raw_data_dump(ulong addr, long count, int symbolic) { long wordcnt; @@ -28999,7 +29553,7 @@ switch (sizeof(long)) { -@@ -1328,9 +1559,20 @@ +@@ -1328,9 +1595,20 @@ break; } @@ -29022,7 +29576,7 @@ } /* -@@ -1351,7 +1593,7 @@ +@@ -1351,7 +1629,7 @@ * is appropriate: * * addr a user, kernel or physical memory address. @@ -29031,7 +29585,7 @@ * buffer supplied buffer to read the data into. * size number of bytes to read. * type string describing the request -- helpful when the read fails. -@@ -1368,6 +1610,7 @@ +@@ -1368,6 +1646,7 @@ #define SEEK_ERRMSG "seek error: %s address: %llx type: \"%s\"\n" #define READ_ERRMSG "read error: %s address: %llx type: \"%s\"\n" #define WRITE_ERRMSG "write error: %s address: %llx type: \"%s\"\n" @@ -29039,7 +29593,7 @@ int readmem(ulonglong addr, int memtype, void *buffer, long size, -@@ -1376,6 +1619,7 @@ +@@ -1376,6 +1655,7 @@ int fd; long cnt; physaddr_t paddr; @@ -29047,7 +29601,7 @@ char *bufptr; if (CRASHDEBUG(4)) -@@ -1424,7 +1668,11 @@ +@@ -1424,7 +1704,11 @@ break; case PHYSADDR: @@ -29059,7 +29613,7 @@ } while (size > 0) { -@@ -1449,6 +1697,17 @@ +@@ -1449,6 +1733,17 @@ case PHYSADDR: paddr = addr; break; @@ -29077,7 +29631,7 @@ } /* -@@ -1460,7 +1719,7 @@ +@@ -1460,7 +1755,7 @@ cnt = size; switch (READMEM(fd, bufptr, cnt, @@ -29086,7 +29640,7 @@ { case SEEK_ERROR: if (PRINT_ERROR_MESSAGE) -@@ -1472,6 +1731,11 @@ +@@ -1472,6 +1767,11 @@ error(INFO, READ_ERRMSG, memtype_string(memtype, 0), addr, type); goto readmem_error; @@ -29098,7 +29652,7 @@ default: break; } -@@ -1610,6 +1874,9 @@ +@@ -1610,6 +1910,9 @@ int read_memory_device(int fd, void *bufptr, int cnt, ulong addr, physaddr_t paddr) { @@ -29108,7 +29662,7 @@ if (!machdep->verify_paddr(paddr)) { if (CRASHDEBUG(1)) error(INFO, "verify_paddr(%lx) failed\n", paddr); -@@ -1754,6 +2021,12 @@ +@@ -1754,6 +2057,12 @@ case PHYSADDR: sprintf(membuf, debug ? "PHYSADDR" : "physical"); break; @@ -29121,7 +29675,7 @@ default: if (debug) sprintf(membuf, "0x%x (?)", memtype); -@@ -1849,6 +2122,10 @@ +@@ -1849,6 +2158,10 @@ case PHYSADDR: break; @@ -29132,7 +29686,7 @@ } while (size > 0) { -@@ -1946,6 +2223,77 @@ +@@ -1946,6 +2259,77 @@ } /* @@ -29210,7 +29764,7 @@ * Translates a kernel virtual address to its physical address. cmd_vtop() * sets the verbose flag so that the pte translation gets displayed; all * other callers quietly accept the translation. -@@ -2113,6 +2461,8 @@ +@@ -2113,6 +2497,8 @@ break; } @@ -29219,7 +29773,7 @@ switch (memtype) { case UVADDR: fprintf(fp, "%s %s\n", -@@ -2126,9 +2476,12 @@ +@@ -2126,9 +2512,12 @@ return; } if (!uvtop(tc, vaddr, &paddr, 0)) { @@ -29234,7 +29788,7 @@ page_exists = FALSE; } else { fprintf(fp, "%s %s\n\n", -@@ -2161,9 +2514,13 @@ +@@ -2161,9 +2550,13 @@ } if (vtop_flags & USE_USER_PGD) { if (!uvtop(tc, vaddr, &paddr, 0)) { @@ -29250,7 +29804,7 @@ page_exists = FALSE; } else { fprintf(fp, "%s %s\n\n", -@@ -2176,9 +2533,13 @@ +@@ -2176,9 +2569,13 @@ uvtop(tc, vaddr, &paddr, VERBOSE); } else { if (!kvtop(tc, vaddr, &paddr, 0)) { @@ -29266,7 +29820,7 @@ page_exists = FALSE; } else { fprintf(fp, "%s %s\n\n", -@@ -2839,7 +3200,8 @@ +@@ -2839,7 +3236,8 @@ if (DO_REF_SEARCH(ref)) { if (VM_REF_CHECK_DECVAL(ref, @@ -29276,7 +29830,7 @@ if (DO_REF_DISPLAY(ref)) display = TRUE; else { -@@ -2979,7 +3341,20 @@ +@@ -2979,7 +3377,20 @@ if (!task_mm(task, TRUE)) return; @@ -29298,7 +29852,7 @@ tm->total_vm = ULONG(tt->mm_struct + OFFSET(mm_struct_total_vm)); tm->pgd_addr = ULONG(tt->mm_struct + OFFSET(mm_struct_pgd)); -@@ -3036,6 +3411,12 @@ +@@ -3036,6 +3447,12 @@ #define GET_INACTIVE_DIRTY (ADDRESS_SPECIFIED << 13) /* obsolete */ #define SLAB_GET_COUNTS (ADDRESS_SPECIFIED << 14) #define SLAB_WALKTHROUGH (ADDRESS_SPECIFIED << 15) @@ -29311,7 +29865,7 @@ #define GET_ALL \ (GET_SHARED_PAGES|GET_TOTALRAM_PAGES|GET_BUFFERS_PAGES|GET_SLAB_PAGES) -@@ -3045,8 +3426,8 @@ +@@ -3045,8 +3462,8 @@ { int i; int c; @@ -29322,7 +29876,7 @@ struct meminfo meminfo; ulonglong value[MAXARGS]; char buf[BUFSIZE]; -@@ -3054,18 +3435,26 @@ +@@ -3054,18 +3471,26 @@ int spec_addr; spec_addr = 0; @@ -29352,7 +29906,7 @@ case 'i': iflag = 1; break; -@@ -3153,13 +3542,13 @@ +@@ -3153,13 +3578,13 @@ if (argerrs) cmd_usage(pc->curcmd, SYNOPSIS); @@ -29368,7 +29922,7 @@ kmem_cache_init(); while (args[optind]) { -@@ -3198,8 +3587,6 @@ +@@ -3198,8 +3623,6 @@ if (pflag) { meminfo.spec_addr = value[i]; meminfo.flags = ADDRESS_SPECIFIED; @@ -29377,7 +29931,7 @@ dump_mem_map(&meminfo); pflag++; } -@@ -3234,6 +3621,8 @@ +@@ -3234,6 +3657,8 @@ } else { meminfo.spec_addr = value[i]; meminfo.flags = ADDRESS_SPECIFIED; @@ -29386,7 +29940,7 @@ if (meminfo.calls++) fprintf(fp, "\n"); vt->dump_kmem_cache(&meminfo); -@@ -3248,8 +3637,6 @@ +@@ -3248,8 +3673,6 @@ if (vflag) { meminfo.spec_addr = value[i]; meminfo.flags = ADDRESS_SPECIFIED; @@ -29395,7 +29949,7 @@ dump_vmlist(&meminfo); vflag++; } -@@ -3275,7 +3662,7 @@ +@@ -3275,7 +3698,7 @@ /* * no value arguments allowed! */ @@ -29404,7 +29958,7 @@ error(INFO, "no address arguments allowed with this option\n"); cmd_usage(pc->curcmd, SYNOPSIS); -@@ -3309,24 +3696,25 @@ +@@ -3309,24 +3732,25 @@ } if (sflag == 1) { @@ -29438,7 +29992,7 @@ } if (vflag == 1) -@@ -3343,6 +3731,9 @@ +@@ -3343,6 +3767,9 @@ if (nflag == 1) dump_memory_nodes(MEMORY_NODES_DUMP); @@ -29448,7 +30002,7 @@ if (lflag == 1) { dump_page_lists(&meminfo); } -@@ -3352,7 +3743,13 @@ +@@ -3352,7 +3779,13 @@ dump_page_lists(&meminfo); } @@ -29463,7 +30017,7 @@ cflag + Cflag + iflag + nflag + lflag + Lflag + meminfo.calls)) cmd_usage(pc->curcmd, SYNOPSIS); -@@ -3373,12 +3770,13 @@ +@@ -3373,12 +3806,13 @@ buf = (char *)GETBUF(SIZE(page)); if (!readmem(pageptr, KVADDR, buf, SIZE(page), @@ -29479,7 +30033,7 @@ if (count_bits_long(flags) == 1) vt->PG_reserved = flags; else -@@ -3386,12 +3784,64 @@ +@@ -3386,12 +3820,64 @@ if (CRASHDEBUG(2)) fprintf(fp, @@ -29545,7 +30099,7 @@ /* * dump_mem_map() displays basic data about each entry in the mem_map[] * array, or if an address is specified, just the mem_map[] entry for that -@@ -3438,22 +3888,20 @@ +@@ -3438,22 +3924,20 @@ #define PGMM_CACHED (512) static void @@ -29571,7 +30125,7 @@ char hdr[BUFSIZE]; char buf0[BUFSIZE]; char buf1[BUFSIZE]; -@@ -3462,6 +3910,7 @@ +@@ -3462,6 +3946,7 @@ char buf4[BUFSIZE]; char *page_cache; char *pcache; @@ -29579,7 +30133,7 @@ v22 = VALID_MEMBER(page_inode); /* page.inode vs. page.mapping */ -@@ -3549,22 +3998,62 @@ +@@ -3549,22 +4034,62 @@ done = FALSE; total_pages = 0; @@ -29653,7 +30207,7 @@ i++, pp += SIZE(page), phys += PAGESIZE()) { if ((i % PGMM_CACHED) == 0) { -@@ -3581,7 +4070,7 @@ +@@ -3581,7 +4106,7 @@ continue; } @@ -29662,7 +30216,7 @@ } pcache = page_cache + ((i%PGMM_CACHED) * SIZE(page)); -@@ -3653,11 +4142,12 @@ +@@ -3653,11 +4178,12 @@ } continue; } @@ -29676,7 +30230,7 @@ mapping = ULONG(pcache + OFFSET(page_mapping)); index = ULONG(pcache + OFFSET(page_index)); -@@ -3700,6 +4190,20 @@ +@@ -3700,6 +4226,20 @@ space(MINSPACE), mkstring(buf4, 8, CENTER|RJUST, " "), " "); @@ -29697,7 +30251,7 @@ else fprintf(fp, "%s%s%s%s%s%s%8ld %2d ", mkstring(buf0, VADDR_PRLEN, -@@ -3862,193 +4366,640 @@ +@@ -3862,6379 +4402,9844 @@ FREEBUF(page_cache); } @@ -29761,23 +30315,10 @@ - * Compute bytes till end of page. - */ - cnt = PAGESIZE() - PAGEOFFSET(addr); -- -- if (cnt > size) -- cnt = size; -- -- if (!readmem(addr, KVADDR, bufptr, size, -- "virtual page struct cache", RETURN_ON_ERROR|QUIET)) { -- BZERO(bufptr, size); -- if (!(vt->flags & V_MEM_MAP)) -- error(WARNING, -- "mem_map[] from %lx to %lx not accessible\n", -- addr, addr+size); -- } + v22 = VALID_MEMBER(page_inode); /* page.inode vs. page.mapping */ -- addr += cnt; -- bufptr += cnt; -- size -= cnt; +- if (cnt > size) +- cnt = size; + if (v22) { + sprintf(hdr, "%s%s%s%s%s%s%s%sCNT FLAGS\n", + mkstring(buf1, VADDR_PRLEN, CENTER, "PAGE"), @@ -29799,9 +30340,15 @@ + mkstring(buf3, VADDR_PRLEN, CENTER|RJUST, "MAPPING"), + space(MINSPACE), + mkstring(buf4, 8, CENTER|RJUST, "INDEX")); - } --} ++ } +- if (!readmem(addr, KVADDR, bufptr, size, +- "virtual page struct cache", RETURN_ON_ERROR|QUIET)) { +- BZERO(bufptr, size); +- if (!(vt->flags & V_MEM_MAP)) +- error(WARNING, +- "mem_map[] from %lx to %lx not accessible\n", +- addr, addr+size); + pg_spec = phys_spec = print_hdr = FALSE; + + switch (mi->flags) @@ -29827,13 +30374,15 @@ + default: + error(FATAL, "dump_mem_map: no memtype specified\n"); + break; -+ } + } + print_hdr = TRUE; + break; --/* -- * dump_page_hash_table() displays the entries in each page_hash_table. -- */ +- addr += cnt; +- bufptr += cnt; +- size -= cnt; +- } +-} + case GET_ALL: + shared = 0; + reserved = 0; @@ -29841,11 +30390,22 @@ + slabs = 0; + break; --#define PGHASH_CACHED (1024) + case GET_SHARED_PAGES: + shared = 0; + break; +-/* +- * dump_page_hash_table() displays the entries in each page_hash_table. +- */ ++ case GET_TOTALRAM_PAGES: ++ reserved = 0; ++ break; + +-#define PGHASH_CACHED (1024) ++ case GET_BUFFERS_PAGES: ++ buffers = 0; ++ break; + -static void -dump_page_hash_table(struct meminfo *hi) -{ @@ -29863,8 +30423,8 @@ - char buf[BUFSIZE]; - char hash_table[BUFSIZE]; - char *pcache, *pghash_cache; -+ case GET_TOTALRAM_PAGES: -+ reserved = 0; ++ case GET_SLAB_PAGES: ++ slabs = 0; + break; - if (!vt->page_hash_table) { @@ -29885,16 +30445,13 @@ - } else - error(FATAL, "cannot determine page cache size\n"); - return; -- } -+ case GET_BUFFERS_PAGES: -+ buffers = 0; ++ default: ++ print_hdr = TRUE; + break; + } - ld = &list_data; -+ case GET_SLAB_PAGES: -+ slabs = 0; -+ break; - +- - if (hi->spec_addr && (hi->flags & ADDRESS_SPECIFIED)) { - verbose = TRUE; - searchpage = hi->spec_addr; @@ -29904,26 +30461,13 @@ - } else { - verbose = FALSE; - searchpage = 0; -+ default: -+ print_hdr = TRUE; -+ break; - } - -- if (vt->page_hash_table_len == 0) -- error(FATAL, "cannot determine size of page_hash_table\n"); -- -- page_hash_table = vt->page_hash_table; -- len = vt->page_hash_table_len; -- entry_len = VALID_STRUCT(page_cache_bucket) ? -- SIZE(page_cache_bucket) : sizeof(void *); +- } + page_cache = GETBUF(SIZE(page) * PGMM_CACHED); + done = FALSE; + total_pages = 0; -- if (CRASHDEBUG(1)) { -- populated = 0; -- fprintf(fp, "page_hash_table length: %d\n", len); -- } +- if (vt->page_hash_table_len == 0) +- error(FATAL, "cannot determine size of page_hash_table\n"); + for (n = 0; n < vt->numnodes; n++) { + if (print_hdr) { + if (!(pc->curcmd_flags & HEADER_PRINTED)) @@ -29932,13 +30476,10 @@ + pc->curcmd_flags |= HEADER_PRINTED; + } -- get_symbol_type("page_cache_size", NULL, &req); -- if (req.length == sizeof(int)) { -- get_symbol_data("page_cache_size", sizeof(int), &ival); -- page_cache_size = (long)ival; -- } else -- get_symbol_data("page_cache_size", sizeof(long), -- &page_cache_size); +- page_hash_table = vt->page_hash_table; +- len = vt->page_hash_table_len; +- entry_len = VALID_STRUCT(page_cache_bucket) ? +- SIZE(page_cache_bucket) : sizeof(void *); + nt = &vt->node_table[n]; + total_pages += nt->size; + pp = nt->mem_map; @@ -29948,19 +30489,25 @@ + else + node_size = nt->size; -- pghash_cache = GETBUF(sizeof(void *) * PGHASH_CACHED); +- if (CRASHDEBUG(1)) { +- populated = 0; +- fprintf(fp, "page_hash_table length: %d\n", len); +- } + for (i = 0; i < node_size; + i++, pp += SIZE(page), phys += PAGESIZE()) { -- if (searchpage) -- open_tmpfile(); +- get_symbol_type("page_cache_size", NULL, &req); +- if (req.length == sizeof(int)) { +- get_symbol_data("page_cache_size", sizeof(int), &ival); +- page_cache_size = (long)ival; +- } else +- get_symbol_data("page_cache_size", sizeof(long), +- &page_cache_size); + if ((i % PGMM_CACHED) == 0) { + ppend = pp + ((PGMM_CACHED-1) * SIZE(page)); + physend = phys + ((PGMM_CACHED-1) * PAGESIZE()); -- hq_open(); -- for (i = total_cached = 0; i < len; i++, -- page_hash_table += entry_len) { +- pghash_cache = GETBUF(sizeof(void *) * PGHASH_CACHED); + if ((pg_spec && (mi->spec_addr > ppend)) || + (phys_spec && + (PHYSPAGEBASE(mi->spec_addr) > physend))) { @@ -29970,23 +30517,21 @@ + continue; + } +- if (searchpage) +- open_tmpfile(); ++ fill_mem_map_cache(pp, ppend, page_cache); ++ } + +- hq_open(); +- for (i = total_cached = 0; i < len; i++, +- page_hash_table += entry_len) { ++ pcache = page_cache + ((i%PGMM_CACHED) * SIZE(page)); + - if ((i % PGHASH_CACHED) == 0) { - readmem(page_hash_table, KVADDR, pghash_cache, - entry_len * PGHASH_CACHED, - "page hash cache", FAULT_ON_ERROR); - } -+ fill_mem_map_cache(pp, ppend, page_cache); -+ } - -- pcache = pghash_cache + ((i%PGHASH_CACHED) * entry_len); -- if (VALID_STRUCT(page_cache_bucket)) -- pcache += OFFSET(page_cache_bucket_chain); -- -- head = ULONG(pcache); -+ pcache = page_cache + ((i%PGMM_CACHED) * SIZE(page)); - -- if (!head) -- continue; + if (received_SIGINT()) + restart(0); + @@ -29994,24 +30539,18 @@ + (phys_spec && (phys == PHYSPAGEBASE(mi->spec_addr)))) + done = TRUE; -- if (verbose) -- fprintf(fp, "page_hash_table[%d]\n", i); -- -- if (CRASHDEBUG(1)) -- populated++; +- pcache = pghash_cache + ((i%PGHASH_CACHED) * entry_len); +- if (VALID_STRUCT(page_cache_bucket)) +- pcache += OFFSET(page_cache_bucket_chain); + if (!done && (pg_spec || phys_spec)) + continue; -+ + +- head = ULONG(pcache); + flags = ULONG(pcache + OFFSET(page_flags)); + count = UINT(pcache + OFFSET(page_count)); -- BZERO(ld, sizeof(struct list_data)); -- ld->flags = verbose; -- ld->start = head; -- ld->searchfor = searchpage; -- ld->member_offset = OFFSET(page_next_hash); -- cnt = do_list(ld); -- total_cached += cnt; +- if (!head) +- continue; + switch (mi->flags) + { + case GET_ALL: @@ -30028,23 +30567,25 @@ + error(FATAL, + "cannot determine whether pages have buffers\n"); -- if (ld->searchfor) -- break; +- if (verbose) +- fprintf(fp, "page_hash_table[%d]\n", i); +- +- if (CRASHDEBUG(1)) +- populated++; + if (mi->flags != GET_ALL) + continue; -- if (received_SIGINT()) -- restart(0); -- } -- hq_close(); +- BZERO(ld, sizeof(struct list_data)); +- ld->flags = verbose; +- ld->start = head; +- ld->searchfor = searchpage; +- ld->member_offset = OFFSET(page_next_hash); +- cnt = do_list(ld); +- total_cached += cnt; + /* FALLTHROUGH */ -- fprintf(fp, "%spage_cache_size: %ld ", verbose ? "\n" : "", -- page_cache_size); -- if (page_cache_size != total_cached) -- fprintf(fp, "(found %ld)\n", total_cached); -- else -- fprintf(fp, "(verified)\n"); +- if (ld->searchfor) +- break; + case GET_SLAB_PAGES: + if (v22) { + if ((flags >> v22_PG_Slab) & 1) @@ -30059,8 +30600,10 @@ + if (mi->flags != GET_ALL) + continue; -- if (CRASHDEBUG(1)) -- fprintf(fp, "heads containing page(s): %d\n", populated); +- if (received_SIGINT()) +- restart(0); +- } +- hq_close(); + /* FALLTHROUGH */ + + case GET_SHARED_PAGES: @@ -30492,16 +31035,14 @@ + + if (CRASHDEBUG(1)) + fprintf(fp, "heads containing page(s): %d\n", populated); - - if (searchpage) { - rewind(pc->tmpfile); -@@ -4057,5262 +5008,8014 @@ - if (CRASHDEBUG(1) && STRNEQ(buf, "tmpfile); ++ found = FALSE; ++ while (fgets(buf, BUFSIZE, pc->tmpfile)) { ++ if (CRASHDEBUG(1) && STRNEQ(buf, "flags & ADDRESS_SPECIFIED) && ld->searchfor) { + fprintf(fp, "%lx\n", ld->searchfor); + retval = TRUE; @@ -32311,19 +32858,30 @@ + fprintf(fp, "(verified)\n"); + } + } -+ + +- if (CRASHDEBUG(1)) +- fprintf(fp, "heads containing page(s): %d\n", populated); + if (mi->flags & GET_INACTIVE_LIST) { + if (!symbol_exists("inactive_list")) + error(FATAL, + "inactive_list does not exist in this kernel\n"); -+ + +- if (searchpage) { +- rewind(pc->tmpfile); +- found = FALSE; +- while (fgets(buf, BUFSIZE, pc->tmpfile)) { +- if (CRASHDEBUG(1) && STRNEQ(buf, "end = symbol_value("inactive_list"); + readmem(ld->end, KVADDR, &ld->start, sizeof(void *), + "LIST_HEAD contents", FAULT_ON_ERROR); @@ -32492,9 +33050,7 @@ + FAULT_ON_ERROR); + } + } - -- this_addr = htol(strip_linefeeds(buf), -- RETURN_ON_ERROR, &errflag); ++ + return retval; +} + @@ -32560,51 +33116,135 @@ + } + + cache = ULONG(cache_buf + next_offset); -+ + +- this_addr = htol(strip_linefeeds(buf), +- RETURN_ON_ERROR, &errflag); + if (vt->flags & (PERCPU_KMALLOC_V1|PERCPU_KMALLOC_V2)) + cache -= next_offset; -+ + +- if (this_addr == searchpage) { +- found = TRUE; +- break; +- } +- } +- close_tmpfile(); + } while (cache != cache_cache); -+ + +- if (found) { +- fprintf(fp, hash_table); +- fprintf(fp, "%lx\n", searchpage); +- hi->retval = TRUE; +- } +- } + FREEBUF(cache_buf); + return NULL; -+} -+ -+/* + } + + /* +- * dump_free_pages() displays basic data about pages currently resident +- * in the free_area[] memory lists. If the flags contains the VERBOSE +- * bit, each page slab base address is dumped. If an address is specified +- * only the free_area[] data containing that page is displayed, along with +- * the page slab base address. Specified addresses can either be physical +- * address or page structure pointers. + * Note same functionality as above, but instead it just + * dumps all slab cache names and their addresses. -+ */ -+static void + */ +-char *free_area_hdr1 = \ +- "AREA SIZE FREE_AREA_STRUCT BLOCKS PAGES\n"; +-char *free_area_hdr2 = \ +- "AREA SIZE FREE_AREA_STRUCT\n"; +- + static void +-dump_free_pages(struct meminfo *fi) +kmem_cache_list(void) -+{ + { +- int i; +- int order; +- ulong free_area; +- char *free_area_buf; +- ulong *pp; +- int nr_mem_lists; +- struct list_data list_data, *ld; +- long cnt, total_free, chunk_size; +- int nr_free_pages; + ulong cache, cache_cache, name; + long next_offset, name_offset; + char *cache_buf; -+ char buf[BUFSIZE]; -+ + char buf[BUFSIZE]; +- char last_free[BUFSIZE]; +- char last_free_hdr[BUFSIZE]; +- int verbose, errflag, found; +- physaddr_t searchphys; +- ulong this_addr; +- physaddr_t this_phys; +- int do_search; +- ulong kfp, offset; +- int flen, dimension; + +- if (vt->flags & (NODES|ZONES)) +- error(FATAL, "dump_free_pages called with (NODES|ZONES)\n"); + if (vt->flags & KMEM_CACHE_UNAVAIL) { + error(INFO, "kmem cache slab subsystem not available\n"); + return; + } -+ + +- nr_mem_lists = ARRAY_LENGTH(free_area); +- dimension = ARRAY_LENGTH(free_area_DIMENSION); + if (vt->flags & KMALLOC_SLUB) { + kmem_cache_list_slub(); + return; + } -+ + +- if (nr_mem_lists == 0) +- error(FATAL, "cannot determine size/dimensions of free_area\n"); + name_offset = vt->flags & (PERCPU_KMALLOC_V1|PERCPU_KMALLOC_V2) ? + OFFSET(kmem_cache_s_name) : OFFSET(kmem_cache_s_c_name); + next_offset = vt->flags & (PERCPU_KMALLOC_V1|PERCPU_KMALLOC_V2) ? + OFFSET(kmem_cache_s_next) : OFFSET(kmem_cache_s_c_nextp); -+ + +- if (dimension) +- error(FATAL, +- "dump_free_pages called with multidimensional free area\n"); + cache = cache_cache = symbol_value("cache_cache"); -+ + +- ld = &list_data; +- total_free = 0; +- searchphys = 0; +- do_search = FALSE; +- get_symbol_data("nr_free_pages", sizeof(int), &nr_free_pages); +- +- switch (fi->flags) +- { +- case GET_FREE_HIGHMEM_PAGES: +- error(FATAL, "GET_FREE_HIGHMEM_PAGES invalid in this kernel\n"); + cache_buf = GETBUF(SIZE(kmem_cache_s)); -+ + +- case GET_FREE_PAGES: +- fi->retval = (ulong)nr_free_pages; +- return; + do { + readmem(cache, KVADDR, cache_buf, SIZE(kmem_cache_s), + "kmem_cache_s buffer", FAULT_ON_ERROR); -+ + +- case ADDRESS_SPECIFIED: +- switch (fi->memtype) +- { +- case KVADDR: +- if (!page_to_phys(fi->spec_addr, &searchphys)) { +- if (!kvtop(NULL, fi->spec_addr, &searchphys, 0)) +- return; +- } +- break; +- case PHYSADDR: +- searchphys = fi->spec_addr; +- break; +- default: +- error(FATAL, "dump_free_pages: no memtype specified\n"); +- } +- do_search = TRUE; +- break; +- } + if (vt->kmem_cache_namelen) { + BCOPY(cache_buf+name_offset, buf, + vt->kmem_cache_namelen); @@ -32623,54 +33263,65 @@ + sprintf(buf, "(unknown)"); + } + } -+ + +- verbose = (do_search || (fi->flags & VERBOSE)) ? TRUE : FALSE; + fprintf(fp, "%lx %s\n", cache, buf); -+ + +- free_area_buf = GETBUF(nr_mem_lists * SIZE(free_area_struct)); +- kfp = free_area = symbol_value("free_area"); +- flen = MAX(VADDR_PRLEN, strlen("FREE_AREA_STRUCT")); +- readmem(free_area, KVADDR, free_area_buf, +- SIZE(free_area_struct) * nr_mem_lists, +- "free_area_struct", FAULT_ON_ERROR); + cache = ULONG(cache_buf + next_offset); -+ + +- if (do_search) +- open_tmpfile(); + if (vt->flags & (PERCPU_KMALLOC_V1|PERCPU_KMALLOC_V2)) + cache -= next_offset; -+ + +- if (!verbose) +- fprintf(fp, free_area_hdr1); + } while (cache != cache_cache); -+ + +- hq_open(); +- for (i = 0; i < nr_mem_lists; i++) { +- pp = (ulong *)(free_area_buf + (SIZE(free_area_struct)*i)); + FREEBUF(cache_buf); +} -+ + +- chunk_size = power(2, i); +/* + * Translate an address to its physical page number, verify that the + * page in fact belongs to the slab subsystem, and if so, return the + * name of the cache to which it belongs. + */ +static char * -+vaddr_to_kmem_cache(ulong vaddr, char *buf) ++vaddr_to_kmem_cache(ulong vaddr, char *buf, int verbose) +{ + physaddr_t paddr; + ulong page; + ulong cache; -- if (this_addr == searchpage) { -- found = TRUE; -- break; -- } -- } -- close_tmpfile(); + if (!kvtop(NULL, vaddr, &paddr, 0)) { -+ error(WARNING, -+ "cannot make virtual-to-physical translation: %lx\n", -+ vaddr); + if (verbose) +- fprintf(fp, free_area_hdr2); ++ error(WARNING, ++ "cannot make virtual-to-physical translation: %lx\n", ++ vaddr); + return NULL; + } -- if (found) { -- fprintf(fp, hash_table); -- fprintf(fp, "%lx\n", searchpage); -- hi->retval = TRUE; -- } +- fprintf(fp, "%3d ", i); +- sprintf(buf, "%ldk", (chunk_size * PAGESIZE())/1024); +- fprintf(fp, "%5s ", buf); + if (!phys_to_page(paddr, &page)) { -+ error(WARNING, "cannot find mem_map page for address: %lx\n", -+ vaddr); ++ if (verbose) ++ error(WARNING, ++ "cannot find mem_map page for address: %lx\n", ++ vaddr); + return NULL; - } ++ } + + if (vt->flags & KMALLOC_SLUB) { + readmem(compound_head(page)+OFFSET(page_slab), @@ -32692,76 +33343,35 @@ + error(FATAL, "cannot determine slab cache from page struct\n"); + + return(is_kmem_cache_addr(cache, buf)); - } - - /* -- * dump_free_pages() displays basic data about pages currently resident -- * in the free_area[] memory lists. If the flags contains the VERBOSE -- * bit, each page slab base address is dumped. If an address is specified -- * only the free_area[] data containing that page is displayed, along with -- * the page slab base address. Specified addresses can either be physical -- * address or page structure pointers. ++} ++ ++/* + * Translate an address to its physical page number, verify that the + * page in fact belongs to the slab subsystem, and if so, return the + * address of the slab to which it belongs. - */ --char *free_area_hdr1 = \ -- "AREA SIZE FREE_AREA_STRUCT BLOCKS PAGES\n"; --char *free_area_hdr2 = \ -- "AREA SIZE FREE_AREA_STRUCT\n"; -- --static void --dump_free_pages(struct meminfo *fi) ++ */ +static ulong +vaddr_to_slab(ulong vaddr) - { -- int i; -- int order; -- ulong free_area; -- char *free_area_buf; -- ulong *pp; -- int nr_mem_lists; -- struct list_data list_data, *ld; -- long cnt, total_free, chunk_size; -- int nr_free_pages; -- char buf[BUFSIZE]; -- char last_free[BUFSIZE]; -- char last_free_hdr[BUFSIZE]; -- int verbose, errflag, found; -- physaddr_t searchphys; -- ulong this_addr; -- physaddr_t this_phys; -- int do_search; -- ulong kfp, offset; -- int flen, dimension; ++{ + physaddr_t paddr; + ulong page; + ulong slab; - -- if (vt->flags & (NODES|ZONES)) -- error(FATAL, "dump_free_pages called with (NODES|ZONES)\n"); ++ + if (!kvtop(NULL, vaddr, &paddr, 0)) { + error(WARNING, + "cannot make virtual-to-physical translation: %lx\n", + vaddr); + return 0; + } - -- nr_mem_lists = ARRAY_LENGTH(free_area); -- dimension = ARRAY_LENGTH(free_area_DIMENSION); ++ + if (!phys_to_page(paddr, &page)) { + error(WARNING, "cannot find mem_map page for address: %lx\n", + vaddr); + return 0; + } - -- if (nr_mem_lists == 0) -- error(FATAL, "cannot determine size/dimensions of free_area\n"); ++ + slab = 0; - -- if (dimension) -- error(FATAL, -- "dump_free_pages called with multidimensional free area\n"); ++ + if (vt->flags & KMALLOC_SLUB) + slab = compound_head(page); + else if (VALID_MEMBER(page_prev)) @@ -32778,42 +33388,11 @@ + "page.lru.prev", FAULT_ON_ERROR); + else + error(FATAL, "unknown definition of struct page?\n"); - -- ld = &list_data; -- total_free = 0; -- searchphys = 0; -- do_search = FALSE; -- get_symbol_data("nr_free_pages", sizeof(int), &nr_free_pages); -- -- switch (fi->flags) -- { -- case GET_FREE_HIGHMEM_PAGES: -- error(FATAL, "GET_FREE_HIGHMEM_PAGES invalid in this kernel\n"); ++ + return slab; +} - -- case GET_FREE_PAGES: -- fi->retval = (ulong)nr_free_pages; -- return; - -- case ADDRESS_SPECIFIED: -- switch (fi->memtype) -- { -- case KVADDR: -- if (!page_to_phys(fi->spec_addr, &searchphys)) { -- if (!kvtop(NULL, fi->spec_addr, &searchphys, 0)) -- return; -- } -- break; -- case PHYSADDR: -- searchphys = fi->spec_addr; -- break; -- default: -- error(FATAL, "dump_free_pages: no memtype specified\n"); -- } -- do_search = TRUE; -- break; -- } ++ ++ +/* + * Initialize any data required for scouring the kmalloc subsystem more + * efficiently. @@ -32821,8 +33400,7 @@ +char slab_hdr[100] = { 0 }; +char kmem_cache_hdr[100] = { 0 }; +char free_inuse_hdr[100] = { 0 }; - -- verbose = (do_search || (fi->flags & VERBOSE)) ? TRUE : FALSE; ++ +static void +kmem_cache_init(void) +{ @@ -32830,31 +33408,33 @@ + long cache_count, num_offset, next_offset; + char *cache_buf; -- free_area_buf = GETBUF(nr_mem_lists * SIZE(free_area_struct)); -- kfp = free_area = symbol_value("free_area"); -- flen = MAX(VADDR_PRLEN, strlen("FREE_AREA_STRUCT")); -- readmem(free_area, KVADDR, free_area_buf, -- SIZE(free_area_struct) * nr_mem_lists, -- "free_area_struct", FAULT_ON_ERROR); +- fprintf(fp, "%s %s", +- mkstring(buf, flen, CENTER|LONG_HEX, MKSTR(kfp)), +- verbose ? "\n" : ""); + if (vt->flags & KMEM_CACHE_UNAVAIL) + return; -- if (do_search) -- open_tmpfile(); +- if (is_page_ptr(*pp, NULL)) { +- BZERO(ld, sizeof(struct list_data)); +- ld->flags = verbose; +- ld->start = *pp; +- ld->end = free_area; +- cnt = do_list(ld); +- total_free += (cnt * chunk_size); +- } else +- cnt = 0; + if ((vt->flags & KMEM_CACHE_DELAY) && !(pc->flags & RUNTIME)) + return; -- if (!verbose) -- fprintf(fp, free_area_hdr1); +- if (!verbose) +- fprintf(fp, "%6ld %6ld\n", cnt, cnt * chunk_size ); + if (DUMPFILE() && (vt->flags & KMEM_CACHE_INIT)) + return; -- hq_open(); -- for (i = 0; i < nr_mem_lists; i++) { -- pp = (ulong *)(free_area_buf + (SIZE(free_area_struct)*i)); +- free_area += SIZE(free_area_struct); +- kfp += SIZE(free_area_struct); + please_wait("gathering kmem slab cache data"); - -- chunk_size = power(2, i); ++ + if (!strlen(slab_hdr)) { + if (vt->flags & KMALLOC_SLUB) + sprintf(slab_hdr, @@ -32866,46 +33446,40 @@ + "SLAB%sMEMORY%sTOTAL ALLOCATED FREE\n", + space(VADDR_PRLEN > 8 ? 14 : 6), + space(VADDR_PRLEN > 8 ? 12 : 4)); -+ } + } +- hq_close(); -- if (verbose) -- fprintf(fp, free_area_hdr2); +- fprintf(fp, "\nnr_free_pages: %d ", nr_free_pages); +- if (total_free != nr_free_pages) +- fprintf(fp, "(found %ld)\n", total_free); +- else +- fprintf(fp, "(verified)\n"); + if (!strlen(kmem_cache_hdr)) + sprintf(kmem_cache_hdr, + "CACHE%sNAME OBJSIZE ALLOCATED TOTAL SLABS SSIZE\n", + space(VADDR_PRLEN > 8 ? 12 : 4)); -- fprintf(fp, "%3d ", i); -- sprintf(buf, "%ldk", (chunk_size * PAGESIZE())/1024); -- fprintf(fp, "%5s ", buf); +- if (!do_search) + if (!strlen(free_inuse_hdr)) + sprintf(free_inuse_hdr, "FREE / [ALLOCATED]\n"); - -- fprintf(fp, "%s %s", -- mkstring(buf, flen, CENTER|LONG_HEX, MKSTR(kfp)), -- verbose ? "\n" : ""); ++ + if (vt->flags & KMALLOC_SLUB) { + kmem_cache_init_slub(); -+ return; + return; + } -- if (is_page_ptr(*pp, NULL)) { -- BZERO(ld, sizeof(struct list_data)); -- ld->flags = verbose; -- ld->start = *pp; -- ld->end = free_area; -- cnt = do_list(ld); -- total_free += (cnt * chunk_size); -- } else -- cnt = 0; +- found = FALSE; +- rewind(pc->tmpfile); +- order = offset = 0; + num_offset = vt->flags & (PERCPU_KMALLOC_V1|PERCPU_KMALLOC_V2) ? + OFFSET(kmem_cache_s_num) : OFFSET(kmem_cache_s_c_num); + next_offset = vt->flags & (PERCPU_KMALLOC_V1|PERCPU_KMALLOC_V2) ? + OFFSET(kmem_cache_s_next) : OFFSET(kmem_cache_s_c_nextp); + max_cnum = max_limit = max_cpus = cache_count = 0; -- if (!verbose) -- fprintf(fp, "%6ld %6ld\n", cnt, cnt * chunk_size ); +- while (fgets(buf, BUFSIZE, pc->tmpfile)) { +- if (CRASHDEBUG(1) && STRNEQ(buf, "tmpfile); -- order = offset = 0; +- if (CRASHDEBUG(1) && !hexadecimal(strip_linefeeds(buf), 0)) +- continue; + tmp = (ulong)(UINT(cache_buf + num_offset)); -- while (fgets(buf, BUFSIZE, pc->tmpfile)) { -- if (CRASHDEBUG(1) && STRNEQ(buf, " max_cnum) + max_cnum = tmp; -+ + +- if (!page_to_phys(this_addr, &this_phys)) +- continue; + if ((tmp = max_cpudata_limit(cache, &tmp2)) > max_limit) + max_limit = tmp; + /* @@ -32964,23 +33542,16 @@ + return; + } -- if (strstr(buf, "nr_free_pages") || -- STREQ(buf, "\n")) -- continue; +- if ((searchphys >= this_phys) && +- (searchphys < (this_phys+chunk_size))) { +- if (searchphys > this_phys) +- offset = (searchphys - this_phys)/PAGESIZE(); +- found = TRUE; + if (tmp2 > max_cpus) + max_cpus = tmp2; - -- if (strstr(buf, "AREA")) { -- strcpy(last_free_hdr, buf); -- continue; -- } ++ + cache = ULONG(cache_buf + next_offset); - -- if (strstr(buf, "k")) { -- strcpy(last_free, buf); -- chunk_size = power(2, order) * PAGESIZE(); -- order++; -- continue; ++ + switch (vt->flags & (PERCPU_KMALLOC_V1|PERCPU_KMALLOC_V2)) + { + case PERCPU_KMALLOC_V1: @@ -32989,57 +33560,14 @@ + case PERCPU_KMALLOC_V2: + if (cache != cache_end) + cache -= next_offset; -+ break; + break; } - -- if (CRASHDEBUG(1) && !hexadecimal(strip_linefeeds(buf), 0)) -- continue; -+ } while (cache != cache_end); - -- errflag = 0; -- this_addr = htol(strip_linefeeds(buf), -- RETURN_ON_ERROR, &errflag); -- if (errflag) -- continue; -+ FREEBUF(cache_buf); - -- if (!page_to_phys(this_addr, &this_phys)) -- continue; -+ vt->kmem_max_c_num = max_cnum; -+ vt->kmem_max_limit = max_limit; -+ vt->kmem_max_cpus = max_cpus; -+ vt->kmem_cache_count = cache_count; - -- if ((searchphys >= this_phys) && -- (searchphys < (this_phys+chunk_size))) { -- if (searchphys > this_phys) -- offset = (searchphys - this_phys)/PAGESIZE(); -- found = TRUE; -- break; -- } -+ if (CRASHDEBUG(2)) { -+ fprintf(fp, "kmem_cache_init:\n"); -+ fprintf(fp, " kmem_max_c_num: %ld\n", vt->kmem_max_c_num); -+ fprintf(fp, " kmem_max_limit: %ld\n", vt->kmem_max_limit); -+ fprintf(fp, " kmem_max_cpus: %ld\n", vt->kmem_max_cpus); -+ fprintf(fp, " kmem_cache_count: %ld\n", vt->kmem_cache_count); - } +- } - close_tmpfile(); - if (found) { - order--; -+ if (!(vt->flags & KMEM_CACHE_INIT)) { -+ if (vt->flags & PERCPU_KMALLOC_V1) -+ ARRAY_LENGTH_INIT(vt->kmem_cache_namelen, -+ kmem_cache_s_name, "kmem_cache_s.name", -+ NULL, sizeof(char)); -+ else if (vt->flags & PERCPU_KMALLOC_V2) -+ vt->kmem_cache_namelen = 0; -+ else -+ ARRAY_LENGTH_INIT(vt->kmem_cache_namelen, -+ kmem_cache_s_c_name, "kmem_cache_s.c_name", -+ NULL, 0); -+ } ++ } while (cache != cache_end); - fprintf(fp, last_free_hdr); - fprintf(fp, last_free); @@ -33058,11 +33586,38 @@ - fprintf(fp, "%s of %ld pages) ", - ordinal(offset+1, buf), power(2, order)); - } -+ please_wait_done(); ++ FREEBUF(cache_buf); - fi->retval = TRUE; - fprintf(fp, "\n"); -- } ++ vt->kmem_max_c_num = max_cnum; ++ vt->kmem_max_limit = max_limit; ++ vt->kmem_max_cpus = max_cpus; ++ vt->kmem_cache_count = cache_count; ++ ++ if (CRASHDEBUG(2)) { ++ fprintf(fp, "kmem_cache_init:\n"); ++ fprintf(fp, " kmem_max_c_num: %ld\n", vt->kmem_max_c_num); ++ fprintf(fp, " kmem_max_limit: %ld\n", vt->kmem_max_limit); ++ fprintf(fp, " kmem_max_cpus: %ld\n", vt->kmem_max_cpus); ++ fprintf(fp, " kmem_cache_count: %ld\n", vt->kmem_cache_count); ++ } ++ ++ if (!(vt->flags & KMEM_CACHE_INIT)) { ++ if (vt->flags & PERCPU_KMALLOC_V1) ++ ARRAY_LENGTH_INIT(vt->kmem_cache_namelen, ++ kmem_cache_s_name, "kmem_cache_s.name", ++ NULL, sizeof(char)); ++ else if (vt->flags & PERCPU_KMALLOC_V2) ++ vt->kmem_cache_namelen = 0; ++ else ++ ARRAY_LENGTH_INIT(vt->kmem_cache_namelen, ++ kmem_cache_s_c_name, "kmem_cache_s.c_name", ++ NULL, 0); + } ++ ++ please_wait_done(); ++ + vt->flags |= KMEM_CACHE_INIT; } @@ -33313,7 +33868,10 @@ - } else - cnt = 0; + strcpy(buf, si->ignore); -+ + +- if (!verbose) +- fprintf(fp, +- "%6ld %6ld\n", cnt, cnt * chunk_size ); + p1 = buf; + while (*p1) { + if (*p1 == ',') @@ -33321,16 +33879,13 @@ + p1++; + } -- if (!verbose) -- fprintf(fp, -- "%6ld %6ld\n", cnt, cnt * chunk_size ); -+ argc = parse_line(buf, arglist); - - pp += (SIZE(free_area_struct)/sizeof(ulong)); - free_area += SIZE(free_area_struct); - kfp += SIZE(free_area_struct); - } - fprintf(fp, "\n"); ++ argc = parse_line(buf, arglist); ++ + for (i = 0; i < argc; i++) { + if (STREQ(name, arglist[i])) + return TRUE; @@ -33470,28 +34025,15 @@ + fprintf(fp, "%-18s %8ld ", si->curname, si->size); + sprintf(b1, "%c%dld ", '%', 9); } --} - ++ + fprintf(fp, b1, vt->flags & (PERCPU_KMALLOC_V2) ? + si->inuse - si->cpucached_cache : si->inuse); - --/* -- * Dump free pages in newer kernels that have zones. This is a work in -- * progress, because although the framework for memory nodes has been laid -- * down, complete support has not been put in place. -- */ --static char *zone_hdr = "ZONE NAME SIZE FREE"; ++ + fprintf(fp, "%8ld %5ld %3ldk\n", + si->num_slabs * si->c_num, + si->num_slabs, si->slabsize/1024); -+} + } --/* -- * From linux/mmzone.h -- */ --#define ZONE_DMA 0 --#define ZONE_NORMAL 1 --#define ZONE_HIGHMEM 2 +#define DUMP_SLAB_INFO() \ + { \ + char b1[BUFSIZE], b2[BUFSIZE]; \ @@ -33510,11 +34052,61 @@ + vt->flags & (PERCPU_KMALLOC_V1|PERCPU_KMALLOC_V2) ? \ + freeobjs + si->cpucached_slab : freeobjs); \ + } - - static void --dump_free_pages_zones_v1(struct meminfo *fi) ++ ++static void +dump_kmem_cache(struct meminfo *si) - { ++{ ++ char buf[BUFSIZE]; ++ char kbuf[BUFSIZE]; ++ char *reqname; ++ ulong cache_cache; ++ ulong name, magic; ++ int cnt; ++ char *p1; ++ ++ if (vt->flags & (PERCPU_KMALLOC_V1|PERCPU_KMALLOC_V2)) ++ error(FATAL, ++ "dump_kmem_cache called with PERCPU_KMALLOC_V[12] set\n"); ++ ++ si->found = si->retval = 0; ++ reqname = NULL; ++ ++ if ((!(si->flags & VERBOSE) || si->reqname) && ++ !(si->flags & (ADDRESS_SPECIFIED|GET_SLAB_PAGES))) ++ fprintf(fp, kmem_cache_hdr); + +-/* +- * Dump free pages in newer kernels that have zones. This is a work in +- * progress, because although the framework for memory nodes has been laid +- * down, complete support has not been put in place. +- */ +-static char *zone_hdr = "ZONE NAME SIZE FREE"; ++ si->addrlist = (ulong *)GETBUF((vt->kmem_max_c_num+1) * sizeof(ulong)); ++ cnt = 0; ++ si->cache = cache_cache = symbol_value("cache_cache"); + +-/* +- * From linux/mmzone.h +- */ +-#define ZONE_DMA 0 +-#define ZONE_NORMAL 1 +-#define ZONE_HIGHMEM 2 ++ if (si->flags & ADDRESS_SPECIFIED) { ++ if (!(p1 = vaddr_to_kmem_cache(si->spec_addr, kbuf, VERBOSE))) { ++ error(INFO, ++ "address is not allocated in slab subsystem: %lx\n", ++ si->spec_addr); ++ return; ++ } ++ ++ if (si->reqname && (si->reqname != p1)) ++ error(INFO, ++ "ignoring pre-selected %s cache for address: %lx\n", ++ si->reqname, si->spec_addr, si->reqname); + +-static void +-dump_free_pages_zones_v1(struct meminfo *fi) +-{ - int i, n; - ulong node_zones; - ulong size; @@ -33536,19 +34128,14 @@ - char last_zone[BUFSIZE]; - char last_area[BUFSIZE]; - char last_area_hdr[BUFSIZE]; -+ char buf[BUFSIZE]; -+ char kbuf[BUFSIZE]; -+ char *reqname; -+ ulong cache_cache; -+ ulong name, magic; -+ int cnt; -+ char *p1; ++ reqname = p1; ++ } else ++ reqname = si->reqname; - if (!(vt->flags & (NODES|ZONES))) -+ if (vt->flags & (PERCPU_KMALLOC_V1|PERCPU_KMALLOC_V2)) - error(FATAL, +- error(FATAL, - "dump_free_pages_zones_v1 called without (NODES|ZONES)\n"); -+ "dump_kmem_cache called with PERCPU_KMALLOC_V[12] set\n"); ++ si->cache_buf = GETBUF(SIZE(kmem_cache_s)); - if (fi->flags & ADDRESS_SPECIFIED) { - switch (fi->memtype) @@ -33572,8 +34159,10 @@ - do_search = FALSE; - } - verbose = (do_search || (fi->flags & VERBOSE)) ? TRUE : FALSE; -+ si->found = si->retval = 0; -+ reqname = NULL; ++ do { ++ if ((si->flags & VERBOSE) && !si->reqname && ++ !(si->flags & ADDRESS_SPECIFIED)) ++ fprintf(fp, "%s%s", cnt++ ? "\n" : "", kmem_cache_hdr); - if (VALID_MEMBER(zone_struct_size)) - zone_size_offset = OFFSET(zone_struct_size); @@ -33582,43 +34171,36 @@ - else - error(FATAL, - "zone_struct has neither size nor memsize field\n"); -+ if ((!(si->flags & VERBOSE) || si->reqname) && -+ !(si->flags & (ADDRESS_SPECIFIED|GET_SLAB_PAGES))) -+ fprintf(fp, kmem_cache_hdr); ++ readmem(si->cache, KVADDR, si->cache_buf, SIZE(kmem_cache_s), ++ "kmem_cache_s buffer", FAULT_ON_ERROR); - if (do_search) - open_tmpfile(); -+ si->addrlist = (ulong *)GETBUF((vt->kmem_max_c_num+1) * sizeof(ulong)); -+ cnt = 0; -+ si->cache = cache_cache = symbol_value("cache_cache"); -+ -+ if (si->flags & ADDRESS_SPECIFIED) { -+ if (!(p1 = vaddr_to_kmem_cache(si->spec_addr, kbuf))) { -+ error(INFO, -+ "address is not allocated in slab subsystem: %lx\n", -+ si->spec_addr); -+ return; ++ if (vt->kmem_cache_namelen) { ++ BCOPY(si->cache_buf + OFFSET(kmem_cache_s_c_name), ++ buf, vt->kmem_cache_namelen); ++ } else { ++ name = ULONG(si->cache_buf + ++ OFFSET(kmem_cache_s_c_name)); ++ if (!read_string(name, buf, BUFSIZE-1)) { ++ error(WARNING, ++ "cannot read kmem_cache_s.c_name string at %lx\n", ++ name); ++ sprintf(buf, "(unknown)"); ++ } + } -+ -+ if (si->reqname && (si->reqname != p1)) -+ error(INFO, -+ "ignoring pre-selected %s cache for address: %lx\n", -+ si->reqname, si->spec_addr, si->reqname); -+ -+ reqname = p1; -+ } else -+ reqname = si->reqname; - hq_open(); -+ si->cache_buf = GETBUF(SIZE(kmem_cache_s)); ++ if (reqname && !STREQ(reqname, buf)) ++ goto next_cache; - for (n = sum = found = 0; n < vt->numnodes; n++) { - nt = &vt->node_table[n]; - node_zones = nt->pgdat + OFFSET(pglist_data_node_zones); -+ do { -+ if ((si->flags & VERBOSE) && !si->reqname && -+ !(si->flags & ADDRESS_SPECIFIED)) -+ fprintf(fp, "%s%s", cnt++ ? "\n" : "", kmem_cache_hdr); ++ if (ignore_cache(si, buf)) { ++ fprintf(fp, "%lx %-18s [IGNORED]\n", si->cache, buf); ++ goto next_cache; ++ } - for (i = 0; i < vt->nr_zones; i++) { - @@ -33654,8 +34236,7 @@ - node_zones += SIZE(zone_struct); - continue; - } -+ readmem(si->cache, KVADDR, si->cache_buf, SIZE(kmem_cache_s), -+ "kmem_cache_s buffer", FAULT_ON_ERROR); ++ si->curname = buf; - if ((i == 0) && (vt->flags & NODES)) { - if (n) { @@ -33666,19 +34247,10 @@ - } - fprintf(fp, "%sNODE\n %2d\n", - n ? "\n" : "", nt->node_id); -+ if (vt->kmem_cache_namelen) { -+ BCOPY(si->cache_buf + OFFSET(kmem_cache_s_c_name), -+ buf, vt->kmem_cache_namelen); -+ } else { -+ name = ULONG(si->cache_buf + -+ OFFSET(kmem_cache_s_c_name)); -+ if (!read_string(name, buf, BUFSIZE-1)) { -+ error(WARNING, -+ "cannot read kmem_cache_s.c_name string at %lx\n", -+ name); -+ sprintf(buf, "(unknown)"); - } -+ } +- } ++ if (CRASHDEBUG(1)) ++ fprintf(fp, "cache: %lx %s\n", si->cache, si->curname); ++ console("cache: %lx %s\n", si->cache, si->curname); - fprintf(fp, "%s%s %s START_PADDR START_MAPNR\n", - i > 0 ? "\n" : "", @@ -33728,50 +34300,17 @@ - MKSTR(zone_start_mapnr))); - - sum += value; -- ++ magic = ULONG(si->cache_buf + OFFSET(kmem_cache_s_c_magic)); + - if (value) - found += dump_zone_free_area(node_zones+ - OFFSET(zone_struct_free_area), - vt->nr_free_areas, verbose); -+ if (reqname && !STREQ(reqname, buf)) -+ goto next_cache; - -- node_zones += SIZE(zone_struct); -+ if (ignore_cache(si, buf)) { -+ fprintf(fp, "%lx %-18s [IGNORED]\n", si->cache, buf); -+ goto next_cache; - } -- } - -- hq_close(); -+ si->curname = buf; - -- if (fi->flags & (GET_FREE_PAGES|GET_ZONE_SIZES|GET_FREE_HIGHMEM_PAGES)){ -- fi->retval = sum; -- return; -- } -+ if (CRASHDEBUG(1)) -+ fprintf(fp, "cache: %lx %s\n", si->cache, si->curname); -+ console("cache: %lx %s\n", si->cache, si->curname); - -- fprintf(fp, "\nnr_free_pages: %ld ", sum); -- if (sum == found) -- fprintf(fp, "(verified)\n"); -- else -- fprintf(fp, "(found %ld)\n", found); -+ magic = ULONG(si->cache_buf + OFFSET(kmem_cache_s_c_magic)); - -- if (!do_search) -- return; + if (magic == SLAB_C_MAGIC) { -- found = FALSE; -- rewind(pc->tmpfile); -- order = offset = 0; -- last_node[0] = NULLCHAR; -- last_zone[0] = NULLCHAR; -- last_area[0] = NULLCHAR; -- last_area_hdr[0] = NULLCHAR; +- node_zones += SIZE(zone_struct); +- } +- } + si->size = ULONG(si->cache_buf + + OFFSET(kmem_cache_s_c_org_size)); + if (!si->size) { @@ -33793,19 +34332,80 @@ + si->c_num = ULONG(si->cache_buf + + OFFSET(kmem_cache_s_c_num)); +- hq_close(); + do_slab_chain(SLAB_GET_COUNTS, si); +- if (fi->flags & (GET_FREE_PAGES|GET_ZONE_SIZES|GET_FREE_HIGHMEM_PAGES)){ +- fi->retval = sum; +- return; +- } ++ if (!(si->flags & (ADDRESS_SPECIFIED|GET_SLAB_PAGES))) ++ DUMP_KMEM_CACHE_INFO_V1(); + +- fprintf(fp, "\nnr_free_pages: %ld ", sum); +- if (sum == found) +- fprintf(fp, "(verified)\n"); +- else +- fprintf(fp, "(found %ld)\n", found); ++ if (si->flags == GET_SLAB_PAGES) ++ si->retval += (si->num_slabs * ++ (si->slabsize/PAGESIZE())); + +- if (!do_search) +- return; ++ if (si->flags & (VERBOSE|ADDRESS_SPECIFIED)) { ++ si->slab = (si->flags & ADDRESS_SPECIFIED) ? ++ vaddr_to_slab(si->spec_addr) : 0; ++ ++ do_slab_chain(SLAB_WALKTHROUGH, si); + +- found = FALSE; +- rewind(pc->tmpfile); +- order = offset = 0; +- last_node[0] = NULLCHAR; +- last_zone[0] = NULLCHAR; +- last_area[0] = NULLCHAR; +- last_area_hdr[0] = NULLCHAR; ++ if (si->found) { ++ fprintf(fp, kmem_cache_hdr); ++ DUMP_KMEM_CACHE_INFO_V1(); ++ fprintf(fp, slab_hdr); ++ DUMP_SLAB_INFO(); + ++ switch (si->found) ++ { ++ case KMEM_BUFCTL_ADDR: ++ fprintf(fp, " %lx ", ++ (ulong)si->spec_addr); ++ fprintf(fp, ++ "(ON-SLAB kmem_bufctl_t)\n"); ++ break; + - while (fgets(buf, BUFSIZE, pc->tmpfile)) { - if (CRASHDEBUG(1) && STRNEQ(buf, "flags & (ADDRESS_SPECIFIED|GET_SLAB_PAGES))) -+ DUMP_KMEM_CACHE_INFO_V1(); ++ case KMEM_SLAB_ADDR: ++ fprintf(fp, " %lx ", ++ (ulong)si->spec_addr); ++ fprintf(fp, ++ "(ON-SLAB kmem_slab_t)\n"); ++ break; - if (STRNEQ(buf, "nr_free_pages:")) - continue; -+ if (si->flags == GET_SLAB_PAGES) -+ si->retval += (si->num_slabs * -+ (si->slabsize/PAGESIZE())); ++ case KMEM_ON_SLAB: ++ fprintf(fp, " %lx ", ++ (ulong)si->spec_addr); ++ fprintf(fp, ++ "(unused part of slab)\n"); ++ break; ++ ++ case KMEM_OBJECT_ADDR_FREE: ++ fprintf(fp, free_inuse_hdr); ++ fprintf(fp, " %lx\n", ++ si->container ? si->container : ++ (ulong)si->spec_addr); ++ break; - if (STRNEQ(buf, "NODE")) { - fgets(buf, BUFSIZE, pc->tmpfile); @@ -33832,57 +34432,17 @@ - order++; - continue; - } -+ if (si->flags & (VERBOSE|ADDRESS_SPECIFIED)) { -+ si->slab = (si->flags & ADDRESS_SPECIFIED) ? -+ vaddr_to_slab(si->spec_addr) : 0; -+ -+ do_slab_chain(SLAB_WALKTHROUGH, si); ++ case KMEM_OBJECT_ADDR_INUSE: ++ fprintf(fp, free_inuse_hdr); ++ fprintf(fp, " [%lx]\n", ++ si->container ? si->container : ++ (ulong)si->spec_addr); ++ break; ++ } - if (CRASHDEBUG(0) && - !hexadecimal(strip_linefeeds(buf), 0)) - continue; -+ if (si->found) { -+ fprintf(fp, kmem_cache_hdr); -+ DUMP_KMEM_CACHE_INFO_V1(); -+ fprintf(fp, slab_hdr); -+ DUMP_SLAB_INFO(); -+ -+ switch (si->found) -+ { -+ case KMEM_BUFCTL_ADDR: -+ fprintf(fp, " %lx ", -+ (ulong)si->spec_addr); -+ fprintf(fp, -+ "(ON-SLAB kmem_bufctl_t)\n"); -+ break; -+ -+ case KMEM_SLAB_ADDR: -+ fprintf(fp, " %lx ", -+ (ulong)si->spec_addr); -+ fprintf(fp, -+ "(ON-SLAB kmem_slab_t)\n"); -+ break; -+ -+ case KMEM_ON_SLAB: -+ fprintf(fp, " %lx ", -+ (ulong)si->spec_addr); -+ fprintf(fp, -+ "(unused part of slab)\n"); -+ break; -+ -+ case KMEM_OBJECT_ADDR_FREE: -+ fprintf(fp, free_inuse_hdr); -+ fprintf(fp, " %lx\n", -+ (ulong)si->spec_addr); -+ break; -+ -+ case KMEM_OBJECT_ADDR_INUSE: -+ fprintf(fp, free_inuse_hdr); -+ fprintf(fp, " [%lx]\n", -+ (ulong)si->spec_addr); -+ break; -+ } -+ + break; + } + } @@ -34044,15 +34604,12 @@ + for (i = 0; i < vt->kmem_max_cpus; i++) + si->cpudata[i] = (ulong *) + GETBUF(vt->kmem_max_limit * sizeof(ulong)); - -- if (do_search) -- open_tmpfile(); ++ + cnt = 0; + si->cache = cache_cache = symbol_value("cache_cache"); - -- hq_open(); ++ + if (si->flags & ADDRESS_SPECIFIED) { -+ if (!(p1 = vaddr_to_kmem_cache(si->spec_addr, kbuf))) { ++ if (!(p1 = vaddr_to_kmem_cache(si->spec_addr, kbuf, VERBOSE))) { + error(INFO, + "address is not allocated in slab subsystem: %lx\n", + si->spec_addr); @@ -34066,26 +34623,12 @@ + reqname = p1; + } else + reqname = si->reqname; - -- for (n = sum = found = 0; n < vt->numnodes; n++) { -- nt = &vt->node_table[n]; -- node_zones = nt->pgdat + OFFSET(pglist_data_node_zones); ++ + do { + if ((si->flags & VERBOSE) && !si->reqname && + !(si->flags & ADDRESS_SPECIFIED)) + fprintf(fp, "%s%s", cnt++ ? "\n" : "", kmem_cache_hdr); - -- for (i = 0; i < vt->nr_zones; i++) { -- -- if (fi->flags == GET_FREE_PAGES) { -- readmem(node_zones+ -- OFFSET(zone_free_pages), -- KVADDR, &value, sizeof(ulong), -- "node_zones free_pages", -- FAULT_ON_ERROR); -- sum += value; -- node_zones += SIZE(zone); -- continue; ++ + if (vt->kmem_cache_namelen) { + readmem(si->cache+OFFSET(kmem_cache_s_name), + KVADDR, buf, vt->kmem_cache_namelen, @@ -34099,7 +34642,37 @@ + "cannot read kmem_cache_s.name string at %lx\n", + name); + sprintf(buf, "(unknown)"); - } ++ } ++ } + +- if (do_search) +- open_tmpfile(); ++ if (reqname && !STREQ(reqname, buf)) ++ goto next_cache; + +- hq_open(); ++ if (ignore_cache(si, buf)) { ++ fprintf(fp, "%lx %-18s [IGNORED]\n", si->cache, buf); ++ goto next_cache; ++ } + +- for (n = sum = found = 0; n < vt->numnodes; n++) { +- nt = &vt->node_table[n]; +- node_zones = nt->pgdat + OFFSET(pglist_data_node_zones); ++ si->curname = buf; + +- for (i = 0; i < vt->nr_zones; i++) { +- +- if (fi->flags == GET_FREE_PAGES) { +- readmem(node_zones+ +- OFFSET(zone_free_pages), +- KVADDR, &value, sizeof(ulong), +- "node_zones free_pages", +- FAULT_ON_ERROR); +- sum += value; +- node_zones += SIZE(zone); +- continue; +- } - - if (fi->flags == GET_FREE_HIGHMEM_PAGES) { - if (i == ZONE_HIGHMEM) { @@ -34121,55 +34694,11 @@ - sum += size; - node_zones += SIZE(zone); - continue; -+ } -+ -+ if (reqname && !STREQ(reqname, buf)) -+ goto next_cache; -+ -+ if (ignore_cache(si, buf)) { -+ fprintf(fp, "%lx %-18s [IGNORED]\n", si->cache, buf); -+ goto next_cache; -+ } -+ -+ si->curname = buf; -+ +- } + readmem(si->cache+OFFSET(kmem_cache_s_objsize), + KVADDR, &tmp_val, sizeof(uint), + "objsize", FAULT_ON_ERROR); + si->size = (ulong)tmp_val; -+ -+ if (!si->size) { -+ if (STREQ(si->curname, "kmem_cache")) -+ si->size = SIZE(kmem_cache_s); -+ else { -+ error(INFO, "\"%s\" cache: objsize: %ld\n", -+ si->curname, si->size); -+ si->errors++; - } -+ } -+ -+ readmem(si->cache+OFFSET(kmem_cache_s_flags), -+ KVADDR, &tmp_val, sizeof(uint), -+ "kmem_cache_s flags", FAULT_ON_ERROR); -+ si->c_flags = (ulong)tmp_val; -+ -+ readmem(si->cache+OFFSET(kmem_cache_s_gfporder), -+ KVADDR, &tmp_val, sizeof(uint), -+ "gfporder", FAULT_ON_ERROR); -+ si->order = (ulong)tmp_val; -+ -+ readmem(si->cache+OFFSET(kmem_cache_s_num), -+ KVADDR, &tmp_val, sizeof(uint), -+ "kmem_cache_s num", FAULT_ON_ERROR); -+ si->c_num = (ulong)tmp_val; -+ -+ do_slab_chain_percpu_v1(SLAB_GET_COUNTS, si); -+ -+ if (!(si->flags & (ADDRESS_SPECIFIED|GET_SLAB_PAGES))) { -+ DUMP_KMEM_CACHE_INFO_V1(); -+ if (CRASHDEBUG(3)) -+ dump_struct("kmem_cache_s", si->cache, 0); -+ } - if ((i == 0) && (vt->flags & NODES)) { - if (n) { @@ -34180,10 +34709,15 @@ - } - fprintf(fp, "%sNODE\n %2d\n", - n ? "\n" : "", nt->node_id); -- } -+ if (si->flags == GET_SLAB_PAGES) -+ si->retval += (si->num_slabs * -+ (si->slabsize/PAGESIZE())); ++ if (!si->size) { ++ if (STREQ(si->curname, "kmem_cache")) ++ si->size = SIZE(kmem_cache_s); ++ else { ++ error(INFO, "\"%s\" cache: objsize: %ld\n", ++ si->curname, si->size); ++ si->errors++; + } ++ } - fprintf(fp, "%s%s %s START_PADDR START_MAPNR\n", - i > 0 ? "\n" : "", @@ -34215,13 +34749,19 @@ - readmem(node_zones+OFFSET(zone_zone_mem_map), - KVADDR, &zone_mem_map, sizeof(ulong), - "node_zones zone_mem_map", FAULT_ON_ERROR); -+ if (si->flags & (VERBOSE|ADDRESS_SPECIFIED)) { ++ readmem(si->cache+OFFSET(kmem_cache_s_flags), ++ KVADDR, &tmp_val, sizeof(uint), ++ "kmem_cache_s flags", FAULT_ON_ERROR); ++ si->c_flags = (ulong)tmp_val; - readmem(node_zones+ OFFSET(zone_zone_start_pfn), - KVADDR, &zone_start_pfn, sizeof(ulong), - "node_zones zone_start_pfn", FAULT_ON_ERROR); - zone_start_paddr = PTOB(zone_start_pfn); -+ gather_cpudata_list_v1(si); ++ readmem(si->cache+OFFSET(kmem_cache_s_gfporder), ++ KVADDR, &tmp_val, sizeof(uint), ++ "gfporder", FAULT_ON_ERROR); ++ si->order = (ulong)tmp_val; - if (zone_mem_map) - zone_start_mapnr = @@ -34241,96 +34781,47 @@ - MKSTR(zone_start_mapnr))); - - sum += value; -+ si->slab = (si->flags & ADDRESS_SPECIFIED) ? -+ vaddr_to_slab(si->spec_addr) : 0; ++ readmem(si->cache+OFFSET(kmem_cache_s_num), ++ KVADDR, &tmp_val, sizeof(uint), ++ "kmem_cache_s num", FAULT_ON_ERROR); ++ si->c_num = (ulong)tmp_val; - if (value) - found += dump_zone_free_area(node_zones+ - OFFSET(zone_free_area), - vt->nr_free_areas, verbose); -+ do_slab_chain_percpu_v1(SLAB_WALKTHROUGH, si); ++ do_slab_chain_percpu_v1(SLAB_GET_COUNTS, si); - node_zones += SIZE(zone); -+ if (si->found) { -+ fprintf(fp, kmem_cache_hdr); -+ DUMP_KMEM_CACHE_INFO_V1(); -+ fprintf(fp, slab_hdr); -+ gather_slab_cached_count(si); -+ DUMP_SLAB_INFO(); -+ -+ switch (si->found) -+ { -+ case KMEM_BUFCTL_ADDR: -+ fprintf(fp, " %lx ", -+ (ulong)si->spec_addr); -+ fprintf(fp,"(kmem_bufctl_t)\n"); -+ break; -+ -+ case KMEM_SLAB_ADDR: -+ fprintf(fp, " %lx ", -+ (ulong)si->spec_addr); -+ fprintf(fp, "(slab_s)\n"); -+ break; -+ -+ case KMEM_ON_SLAB: -+ fprintf(fp, " %lx ", -+ (ulong)si->spec_addr); -+ fprintf(fp, "(unused part of slab)\n"); -+ break; -+ -+ case KMEM_OBJECT_ADDR_FREE: -+ fprintf(fp, free_inuse_hdr); -+ fprintf(fp, " %lx\n", -+ (ulong)si->spec_addr); -+ break; -+ -+ case KMEM_OBJECT_ADDR_INUSE: -+ fprintf(fp, free_inuse_hdr); -+ fprintf(fp, " [%lx]\n", -+ (ulong)si->spec_addr); -+ break; -+ -+ case KMEM_OBJECT_ADDR_CACHED: -+ fprintf(fp, free_inuse_hdr); -+ fprintf(fp, -+ " %lx (cpu %d cache)\n", -+ (ulong)si->spec_addr, si->cpu); -+ break; -+ } -+ -+ break; -+ } ++ if (!(si->flags & (ADDRESS_SPECIFIED|GET_SLAB_PAGES))) { ++ DUMP_KMEM_CACHE_INFO_V1(); ++ if (CRASHDEBUG(3)) ++ dump_struct("kmem_cache_s", si->cache, 0); } - } - hq_close(); -+next_cache: -+ readmem(si->cache+OFFSET(kmem_cache_s_next), -+ KVADDR, &si->cache, sizeof(ulong), -+ "kmem_cache_s next", FAULT_ON_ERROR); ++ if (si->flags == GET_SLAB_PAGES) ++ si->retval += (si->num_slabs * ++ (si->slabsize/PAGESIZE())); - if (fi->flags & (GET_FREE_PAGES|GET_ZONE_SIZES|GET_FREE_HIGHMEM_PAGES)){ - fi->retval = sum; - return; - } -+ si->cache -= OFFSET(kmem_cache_s_next); ++ if (si->flags & (VERBOSE|ADDRESS_SPECIFIED)) { - fprintf(fp, "\nnr_free_pages: %ld ", sum); - if (sum == found) - fprintf(fp, "(verified)\n"); - else - fprintf(fp, "(found %ld)\n", found); -+ } while (si->cache != cache_cache); ++ gather_cpudata_list_v1(si); - if (!do_search) - return; -+ if ((si->flags & ADDRESS_SPECIFIED) && !si->found) -+ error(INFO, "%s: address not found in cache: %lx\n", -+ reqname, si->spec_addr); -+ -+ if (si->errors) -+ error(INFO, "%ld error%s encountered\n", -+ si->errors, si->errors > 1 ? "s" : ""); ++ si->slab = (si->flags & ADDRESS_SPECIFIED) ? ++ vaddr_to_slab(si->spec_addr) : 0; - found = FALSE; - rewind(pc->tmpfile); @@ -34339,36 +34830,33 @@ - last_zone[0] = NULLCHAR; - last_area[0] = NULLCHAR; - last_area_hdr[0] = NULLCHAR; -+ FREEBUF(si->addrlist); -+ FREEBUF(si->kmem_bufctl); -+ for (i = 0; i < vt->kmem_max_cpus; i++) -+ FREEBUF(si->cpudata[i]); ++ do_slab_chain_percpu_v1(SLAB_WALKTHROUGH, si); -+} ++ if (si->found) { ++ fprintf(fp, kmem_cache_hdr); ++ DUMP_KMEM_CACHE_INFO_V1(); ++ fprintf(fp, slab_hdr); ++ gather_slab_cached_count(si); ++ DUMP_SLAB_INFO(); - while (fgets(buf, BUFSIZE, pc->tmpfile)) { - if (CRASHDEBUG(1) && STRNEQ(buf, "found) ++ { ++ case KMEM_BUFCTL_ADDR: ++ fprintf(fp, " %lx ", ++ (ulong)si->spec_addr); ++ fprintf(fp,"(kmem_bufctl_t)\n"); ++ break; - if (STRNEQ(buf, "nr_free_pages:")) - continue; -+/* -+ * Updated for 2.6 slab substructure. -+ */ -+static void -+dump_kmem_cache_percpu_v2(struct meminfo *si) -+{ -+ int i; -+ char buf[BUFSIZE]; -+ char kbuf[BUFSIZE]; -+ char *reqname; -+ ulong cache_end; -+ ulong name; -+ int cnt; -+ uint tmp_val; /* Used as temporary variable to read sizeof(int) and -+ assigned to ulong variable. We are doing this to mask -+ the endian issue */ -+ char *p1; ++ case KMEM_SLAB_ADDR: ++ fprintf(fp, " %lx ", ++ (ulong)si->spec_addr); ++ fprintf(fp, "(slab_s)\n"); ++ break; - if (STRNEQ(buf, "NODE")) { - fgets(buf, BUFSIZE, pc->tmpfile); @@ -34395,39 +34883,55 @@ - order++; - continue; - } -+ if (!(vt->flags & PERCPU_KMALLOC_V2)) -+ error(FATAL, -+ "dump_kmem_cache_percpu called without PERCPU_KMALLOC_V2\n"); ++ case KMEM_ON_SLAB: ++ fprintf(fp, " %lx ", ++ (ulong)si->spec_addr); ++ fprintf(fp, "(unused part of slab)\n"); ++ break; ++ ++ case KMEM_OBJECT_ADDR_FREE: ++ fprintf(fp, free_inuse_hdr); ++ fprintf(fp, " %lx\n", ++ si->container ? si->container : ++ (ulong)si->spec_addr); ++ break; - if (CRASHDEBUG(0) && - !hexadecimal(strip_linefeeds(buf), 0)) - continue; -+ si->found = si->retval = 0; -+ reqname = NULL; ++ case KMEM_OBJECT_ADDR_INUSE: ++ fprintf(fp, free_inuse_hdr); ++ fprintf(fp, " [%lx]\n", ++ si->container ? si->container : ++ (ulong)si->spec_addr); ++ break; - errflag = 0; - this_addr = htol(strip_linefeeds(buf), - RETURN_ON_ERROR, &errflag); - if (errflag) - continue; -+ if ((!(si->flags & VERBOSE) || si->reqname) && -+ !(si->flags & (ADDRESS_SPECIFIED|GET_SLAB_PAGES))) -+ fprintf(fp, kmem_cache_hdr); ++ case KMEM_OBJECT_ADDR_CACHED: ++ fprintf(fp, free_inuse_hdr); ++ fprintf(fp, ++ " %lx (cpu %d cache)\n", ++ si->container ? si->container : ++ (ulong)si->spec_addr, si->cpu); ++ break; ++ } ++ ++ break; ++ } ++ } ++ ++next_cache: ++ readmem(si->cache+OFFSET(kmem_cache_s_next), ++ KVADDR, &si->cache, sizeof(ulong), ++ "kmem_cache_s next", FAULT_ON_ERROR); - if (!page_to_phys(this_addr, &this_phys)) - continue; -+ si->addrlist = (ulong *)GETBUF((vt->kmem_max_c_num+1) * sizeof(ulong)); -+ si->kmem_bufctl = (int *)GETBUF((vt->kmem_max_c_num+1) * sizeof(int)); -+ for (i = 0; i < vt->kmem_max_cpus; i++) -+ si->cpudata[i] = (ulong *) -+ GETBUF(vt->kmem_max_limit * sizeof(ulong)); -+ if(vt->flags & PERCPU_KMALLOC_V2_NODES) -+ si->shared_array_cache = (ulong *) -+ GETBUF(vt->kmem_cache_len_nodes * -+ (vt->kmem_max_limit+1) * sizeof(ulong)); -+ else -+ si->shared_array_cache = (ulong *) -+ GETBUF((vt->kmem_max_limit+1) * sizeof(ulong)); ++ si->cache -= OFFSET(kmem_cache_s_next); - if ((searchphys >= this_phys) && - (searchphys < (this_phys+chunk_size))) { @@ -34436,34 +34940,17 @@ - found = TRUE; - break; - } -+ cnt = 0; -+ -+ get_symbol_data("cache_chain", sizeof(ulong), &si->cache); -+ si->cache -= OFFSET(kmem_cache_s_next); -+ cache_end = symbol_value("cache_chain"); -+ -+ if (si->flags & ADDRESS_SPECIFIED) { -+ if (!(p1 = vaddr_to_kmem_cache(si->spec_addr, kbuf))) { -+ error(INFO, -+ "address is not allocated in slab subsystem: %lx\n", -+ si->spec_addr); -+ return; -+ } -+ -+ if (si->reqname && (si->reqname != p1)) -+ error(INFO, -+ "ignoring pre-selected %s cache for address: %lx\n", -+ si->reqname, si->spec_addr, si->reqname); -+ reqname = p1; -+ } else -+ reqname = si->reqname; ++ } while (si->cache != cache_cache); - } - close_tmpfile(); -+ do { -+ if ((si->flags & VERBOSE) && !si->reqname && -+ !(si->flags & ADDRESS_SPECIFIED)) -+ fprintf(fp, "%s%s", cnt++ ? "\n" : "", kmem_cache_hdr); ++ if ((si->flags & ADDRESS_SPECIFIED) && !si->found) ++ error(INFO, "%s: address not found in cache: %lx\n", ++ reqname, si->spec_addr); ++ ++ if (si->errors) ++ error(INFO, "%ld error%s encountered\n", ++ si->errors, si->errors > 1 ? "s" : ""); - if (found) { - if (strlen(last_node)) @@ -34485,46 +34972,33 @@ - fprintf(fp, "(%llx is %s", fi->spec_addr, - PAGEOFFSET(fi->spec_addr) ? "in " : ""); - break; -+ if (vt->kmem_cache_namelen) { -+ readmem(si->cache+OFFSET(kmem_cache_s_name), -+ KVADDR, buf, vt->kmem_cache_namelen, -+ "name array", FAULT_ON_ERROR); -+ } else { -+ readmem(si->cache+OFFSET(kmem_cache_s_name), -+ KVADDR, &name, sizeof(ulong), -+ "name", FAULT_ON_ERROR); -+ if (!read_string(name, buf, BUFSIZE-1)) { -+ error(WARNING, -+ "cannot read kmem_cache_s.name string at %lx\n", -+ name); -+ sprintf(buf, "(unknown)"); - } +- } - fprintf(fp, "%s of %ld pages) ", - ordinal(offset+1, buf), power(2, order)); - } -- ++ FREEBUF(si->addrlist); ++ FREEBUF(si->kmem_bufctl); ++ for (i = 0; i < vt->kmem_max_cpus; i++) ++ FREEBUF(si->cpudata[i]); + - fi->retval = TRUE; - fprintf(fp, "\n"); - } --} -+ } + } -+ if (reqname && !STREQ(reqname, buf)) -+ goto next_cache; -static char * -page_usage_hdr = "ZONE NAME FREE ACTIVE INACTIVE_DIRTY INACTIVE_CLEAN MIN/LOW/HIGH"; -+ if (ignore_cache(si, buf)) { -+ fprintf(fp, "%lx %-18s [IGNORED]\n", si->cache, buf); -+ goto next_cache; -+ } - --/* +- + /* - * Display info about the non-free pages in each zone. -- */ ++ * Updated for 2.6 slab substructure. + */ -static int -dump_zone_page_usage(void) --{ ++static void ++dump_kmem_cache_percpu_v2(struct meminfo *si) + { - int i, n; - ulong value, node_zones; - struct node_table *nt; @@ -34534,7 +35008,17 @@ - char buf1[BUFSIZE]; - char buf2[BUFSIZE]; - char buf3[BUFSIZE]; -+ si->curname = buf; ++ int i; ++ char buf[BUFSIZE]; ++ char kbuf[BUFSIZE]; ++ char *reqname; ++ ulong cache_end; ++ ulong name; ++ int cnt; ++ uint tmp_val; /* Used as temporary variable to read sizeof(int) and ++ assigned to ulong variable. We are doing this to mask ++ the endian issue */ ++ char *p1; - if (!VALID_MEMBER(zone_struct_inactive_dirty_pages) || - !VALID_MEMBER(zone_struct_inactive_clean_pages) || @@ -34543,21 +35027,13 @@ - !VALID_MEMBER(zone_struct_pages_low) || - !VALID_MEMBER(zone_struct_pages_high)) - return FALSE; -+ readmem(si->cache+OFFSET(kmem_cache_s_objsize), -+ KVADDR, &tmp_val, sizeof(uint), -+ "objsize", FAULT_ON_ERROR); -+ si->size = (ulong)tmp_val; ++ if (!(vt->flags & PERCPU_KMALLOC_V2)) ++ error(FATAL, ++ "dump_kmem_cache_percpu called without PERCPU_KMALLOC_V2\n"); - fprintf(fp, "\n"); -+ if (!si->size) { -+ if (STREQ(si->curname, "kmem_cache")) -+ si->size = SIZE(kmem_cache_s); -+ else { -+ error(INFO, "\"%s\" cache: objsize: %ld\n", -+ si->curname, si->size); -+ si->errors++; -+ } -+ } ++ si->found = si->retval = 0; ++ reqname = NULL; - for (n = 0; n < vt->numnodes; n++) { - nt = &vt->node_table[n]; @@ -34568,10 +35044,9 @@ - n ? "\n" : "", nt->node_id); - } - fprintf(fp, "%s\n", page_usage_hdr); -+ readmem(si->cache+OFFSET(kmem_cache_s_flags), -+ KVADDR, &tmp_val, sizeof(uint), -+ "kmem_cache_s flags", FAULT_ON_ERROR); -+ si->c_flags = (ulong)tmp_val; ++ if ((!(si->flags & VERBOSE) || si->reqname) && ++ !(si->flags & (ADDRESS_SPECIFIED|GET_SLAB_PAGES))) ++ fprintf(fp, kmem_cache_hdr); - for (i = 0; i < vt->nr_zones; i++) { - readmem(node_zones+OFFSET(zone_struct_free_pages), @@ -34599,10 +35074,18 @@ - readmem(node_zones+OFFSET(zone_struct_pages_high), - KVADDR, &pages_high, sizeof(ulong), - "node_zones pages_high", FAULT_ON_ERROR); -+ readmem(si->cache+OFFSET(kmem_cache_s_gfporder), -+ KVADDR, &tmp_val, sizeof(uint), -+ "gfporder", FAULT_ON_ERROR); -+ si->order = (ulong)tmp_val; ++ si->addrlist = (ulong *)GETBUF((vt->kmem_max_c_num+1) * sizeof(ulong)); ++ si->kmem_bufctl = (int *)GETBUF((vt->kmem_max_c_num+1) * sizeof(int)); ++ for (i = 0; i < vt->kmem_max_cpus; i++) ++ si->cpudata[i] = (ulong *) ++ GETBUF(vt->kmem_max_limit * sizeof(ulong)); ++ if(vt->flags & PERCPU_KMALLOC_V2_NODES) ++ si->shared_array_cache = (ulong *) ++ GETBUF(vt->kmem_cache_len_nodes * ++ (vt->kmem_max_limit+1) * sizeof(ulong)); ++ else ++ si->shared_array_cache = (ulong *) ++ GETBUF((vt->kmem_max_limit+1) * sizeof(ulong)); - readmem(node_zones+OFFSET(zone_struct_name), KVADDR, - &value, sizeof(void *), @@ -34611,10 +35094,7 @@ - sprintf(namebuf, "%-8s", buf1); - else - sprintf(namebuf, "(unknown)"); -+ readmem(si->cache+OFFSET(kmem_cache_s_num), -+ KVADDR, &tmp_val, sizeof(uint), -+ "kmem_cache_s num", FAULT_ON_ERROR); -+ si->c_num = (ulong)tmp_val; ++ cnt = 0; - sprintf(buf2, "%ld/%ld/%ld", - pages_min, pages_low, pages_high); @@ -34627,34 +35107,63 @@ - inactive_clean_pages, - mkstring(buf3, strlen("MIN/LOW/HIGH"), - CENTER, buf2)); -+ if( vt->flags & PERCPU_KMALLOC_V2_NODES ) -+ do_slab_chain_percpu_v2_nodes(SLAB_GET_COUNTS, si); -+ else -+ do_slab_chain_percpu_v2(SLAB_GET_COUNTS, si); ++ get_symbol_data("cache_chain", sizeof(ulong), &si->cache); ++ si->cache -= OFFSET(kmem_cache_s_next); ++ cache_end = symbol_value("cache_chain"); - node_zones += SIZE(zone_struct); -+ if (!(si->flags & (ADDRESS_SPECIFIED|GET_SLAB_PAGES))) { -+ DUMP_KMEM_CACHE_INFO_V2(); -+ if (CRASHDEBUG(3)) -+ dump_struct("kmem_cache_s", si->cache, 0); ++ if (si->flags & ADDRESS_SPECIFIED) { ++ if (!(p1 = vaddr_to_kmem_cache(si->spec_addr, kbuf, VERBOSE))) { ++ error(INFO, ++ "address is not allocated in slab subsystem: %lx\n", ++ si->spec_addr); ++ return; } - } ++ ++ if (si->reqname && (si->reqname != p1)) ++ error(INFO, ++ "ignoring pre-selected %s cache for address: %lx\n", ++ si->reqname, si->spec_addr, si->reqname); ++ reqname = p1; ++ } else ++ reqname = si->reqname; - return TRUE; -} -+ if (si->flags == GET_SLAB_PAGES) -+ si->retval += (si->num_slabs * -+ (si->slabsize/PAGESIZE())); ++ do { ++ if ((si->flags & VERBOSE) && !si->reqname && ++ !(si->flags & ADDRESS_SPECIFIED)) ++ fprintf(fp, "%s%s", cnt++ ? "\n" : "", kmem_cache_hdr); ++ ++ if (vt->kmem_cache_namelen) { ++ readmem(si->cache+OFFSET(kmem_cache_s_name), ++ KVADDR, buf, vt->kmem_cache_namelen, ++ "name array", FAULT_ON_ERROR); ++ } else { ++ readmem(si->cache+OFFSET(kmem_cache_s_name), ++ KVADDR, &name, sizeof(ulong), ++ "name", FAULT_ON_ERROR); ++ if (!read_string(name, buf, BUFSIZE-1)) { ++ error(WARNING, ++ "cannot read kmem_cache_s.name string at %lx\n", ++ name); ++ sprintf(buf, "(unknown)"); ++ } ++ } -+ if (si->flags & (VERBOSE|ADDRESS_SPECIFIED)) { ++ if (reqname && !STREQ(reqname, buf)) ++ goto next_cache; -/* - * Dump the num "order" contents of the zone_t free_area array. - */ -char *free_area_hdr3 = "AREA SIZE FREE_AREA_STRUCT\n"; -char *free_area_hdr4 = "AREA SIZE FREE_AREA_STRUCT BLOCKS PAGES\n"; -+ if (!(vt->flags & PERCPU_KMALLOC_V2_NODES)) -+ gather_cpudata_list_v2(si); ++ if (ignore_cache(si, buf)) { ++ fprintf(fp, "%lx %-18s [IGNORED]\n", si->cache, buf); ++ goto next_cache; ++ } -static int -dump_zone_free_area(ulong free_area, int num, ulong verbose) @@ -34665,8 +35174,7 @@ - char buf[BUFSIZE]; - ulong free_area_buf[3]; - struct list_data list_data, *ld; -+ si->slab = (si->flags & ADDRESS_SPECIFIED) ? -+ vaddr_to_slab(si->spec_addr) : 0; ++ si->curname = buf; - if (VALID_STRUCT(free_area_struct)) { - if (SIZE(free_area_struct) != (3 * sizeof(ulong))) @@ -34680,36 +35188,57 @@ - SIZE(free_area)); - } else error(FATAL, - "neither free_area_struct or free_area structures exist\n"); -+ if (vt->flags & PERCPU_KMALLOC_V2_NODES) -+ do_slab_chain_percpu_v2_nodes(SLAB_WALKTHROUGH, si); -+ else -+ do_slab_chain_percpu_v2(SLAB_WALKTHROUGH, si); ++ readmem(si->cache+OFFSET(kmem_cache_s_objsize), ++ KVADDR, &tmp_val, sizeof(uint), ++ "objsize", FAULT_ON_ERROR); ++ si->size = (ulong)tmp_val; ++ ++ if (!si->size) { ++ if (STREQ(si->curname, "kmem_cache")) ++ si->size = SIZE(kmem_cache_s); ++ else { ++ error(INFO, "\"%s\" cache: objsize: %ld\n", ++ si->curname, si->size); ++ si->errors++; ++ } ++ } ++ ++ readmem(si->cache+OFFSET(kmem_cache_s_flags), ++ KVADDR, &tmp_val, sizeof(uint), ++ "kmem_cache_s flags", FAULT_ON_ERROR); ++ si->c_flags = (ulong)tmp_val; ++ ++ readmem(si->cache+OFFSET(kmem_cache_s_gfporder), ++ KVADDR, &tmp_val, sizeof(uint), ++ "gfporder", FAULT_ON_ERROR); ++ si->order = (ulong)tmp_val; ++ ++ readmem(si->cache+OFFSET(kmem_cache_s_num), ++ KVADDR, &tmp_val, sizeof(uint), ++ "kmem_cache_s num", FAULT_ON_ERROR); ++ si->c_num = (ulong)tmp_val; ++ ++ if( vt->flags & PERCPU_KMALLOC_V2_NODES ) ++ do_slab_chain_percpu_v2_nodes(SLAB_GET_COUNTS, si); ++ else ++ do_slab_chain_percpu_v2(SLAB_GET_COUNTS, si); - ld = &list_data; -+ if (si->found) { -+ fprintf(fp, kmem_cache_hdr); -+ DUMP_KMEM_CACHE_INFO_V2(); -+ fprintf(fp, slab_hdr); -+ gather_slab_cached_count(si); -+ DUMP_SLAB_INFO(); ++ if (!(si->flags & (ADDRESS_SPECIFIED|GET_SLAB_PAGES))) { ++ DUMP_KMEM_CACHE_INFO_V2(); ++ if (CRASHDEBUG(3)) ++ dump_struct("kmem_cache_s", si->cache, 0); ++ } - if (!verbose) - fprintf(fp, free_area_hdr4); -+ switch (si->found) -+ { -+ case KMEM_BUFCTL_ADDR: -+ fprintf(fp, " %lx ", -+ (ulong)si->spec_addr); -+ fprintf(fp,"(kmem_bufctl_t)\n"); -+ break; ++ if (si->flags == GET_SLAB_PAGES) ++ si->retval += (si->num_slabs * ++ (si->slabsize/PAGESIZE())); - total_free = 0; - flen = MAX(VADDR_PRLEN, strlen("FREE_AREA_STRUCT")); -+ case KMEM_SLAB_ADDR: -+ fprintf(fp, " %lx ", -+ (ulong)si->spec_addr); -+ fprintf(fp, "(slab)\n"); -+ break; ++ if (si->flags & (VERBOSE|ADDRESS_SPECIFIED)) { - for (i = 0; i < num; i++, - free_area += SIZE_OPTION(free_area_struct, free_area)) { @@ -34719,42 +35248,20 @@ - chunk_size = power(2, i); - sprintf(buf, "%ldk", (chunk_size * PAGESIZE())/1024); - fprintf(fp, " %7s ", buf); -+ case KMEM_ON_SLAB: -+ fprintf(fp, " %lx ", -+ (ulong)si->spec_addr); -+ fprintf(fp, "(unused part of slab)\n"); -+ break; -+ -+ case KMEM_OBJECT_ADDR_FREE: -+ fprintf(fp, free_inuse_hdr); -+ fprintf(fp, " %lx\n", -+ (ulong)si->spec_addr); -+ break; ++ if (!(vt->flags & PERCPU_KMALLOC_V2_NODES)) ++ gather_cpudata_list_v2(si); - readmem(free_area, KVADDR, free_area_buf, - sizeof(ulong) * 3, "free_area_struct", FAULT_ON_ERROR); -+ case KMEM_OBJECT_ADDR_INUSE: -+ fprintf(fp, free_inuse_hdr); -+ fprintf(fp, " [%lx]\n", -+ (ulong)si->spec_addr); -+ break; ++ si->slab = (si->flags & ADDRESS_SPECIFIED) ? ++ vaddr_to_slab(si->spec_addr) : 0; - fprintf(fp, "%s ", - mkstring(buf, flen, CENTER|LONG_HEX, MKSTR(free_area))); -+ case KMEM_OBJECT_ADDR_CACHED: -+ fprintf(fp, free_inuse_hdr); -+ fprintf(fp, -+ " %lx (cpu %d cache)\n", -+ (ulong)si->spec_addr, si->cpu); -+ break; -+ -+ case KMEM_OBJECT_ADDR_SHARED: -+ fprintf(fp, free_inuse_hdr); -+ fprintf(fp, -+ " %lx (shared cache)\n", -+ (ulong)si->spec_addr); -+ break; -+ } ++ if (vt->flags & PERCPU_KMALLOC_V2_NODES) ++ do_slab_chain_percpu_v2_nodes(SLAB_WALKTHROUGH, si); ++ else ++ do_slab_chain_percpu_v2(SLAB_WALKTHROUGH, si); - if (free_area_buf[0] == free_area) { - if (verbose) @@ -34762,12 +35269,16 @@ - else - fprintf(fp, "%6d %6d\n", 0, 0); - continue; -+ break; -+ } - } +- } - - if (verbose) - fprintf(fp, "\n"); ++ if (si->found) { ++ fprintf(fp, kmem_cache_hdr); ++ DUMP_KMEM_CACHE_INFO_V2(); ++ fprintf(fp, slab_hdr); ++ gather_slab_cached_count(si); ++ DUMP_SLAB_INFO(); - BZERO(ld, sizeof(struct list_data)); - ld->flags = verbose | RETURN_ON_DUPLICATE; @@ -34780,61 +35291,80 @@ - OFFSET(list_head_next); - else error(FATAL, - "neither page.list or page.lru exist?\n"); -+next_cache: -+ readmem(si->cache+OFFSET(kmem_cache_s_next), -+ KVADDR, &si->cache, sizeof(ulong), -+ "kmem_cache_s next", FAULT_ON_ERROR); ++ switch (si->found) ++ { ++ case KMEM_BUFCTL_ADDR: ++ fprintf(fp, " %lx ", ++ (ulong)si->spec_addr); ++ fprintf(fp,"(kmem_bufctl_t)\n"); ++ break; - cnt = do_list(ld); - if (cnt < 0) - error(FATAL, - "corrupted free list from free_area_struct: %lx\n", - free_area); -+ if (si->cache != cache_end) -+ si->cache -= OFFSET(kmem_cache_s_next); ++ case KMEM_SLAB_ADDR: ++ fprintf(fp, " %lx ", ++ (ulong)si->spec_addr); ++ fprintf(fp, "(slab)\n"); ++ break; - if (!verbose) - fprintf(fp, "%6d %6ld\n", cnt, cnt*chunk_size); -+ } while (si->cache != cache_end); ++ case KMEM_ON_SLAB: ++ fprintf(fp, " %lx ", ++ (ulong)si->spec_addr); ++ fprintf(fp, "(unused part of slab)\n"); ++ break; ++ ++ case KMEM_OBJECT_ADDR_FREE: ++ fprintf(fp, free_inuse_hdr); ++ fprintf(fp, " %lx\n", ++ si->container ? si->container : ++ (ulong)si->spec_addr); ++ break; - total_free += (cnt * chunk_size); - } -+ if ((si->flags & ADDRESS_SPECIFIED) && !si->found) -+ error(INFO, "%s: address not found in cache: %lx\n", -+ reqname, si->spec_addr); -+ -+ if (si->errors) -+ error(INFO, "%ld error%s encountered\n", -+ si->errors, si->errors > 1 ? "s" : ""); -+ -+ FREEBUF(si->addrlist); -+ FREEBUF(si->kmem_bufctl); -+ for (i = 0; i < vt->kmem_max_cpus; i++) -+ FREEBUF(si->cpudata[i]); -+ FREEBUF(si->shared_array_cache); ++ case KMEM_OBJECT_ADDR_INUSE: ++ fprintf(fp, free_inuse_hdr); ++ fprintf(fp, " [%lx]\n", ++ si->container ? si->container : ++ (ulong)si->spec_addr); ++ break; - return total_free; - } +-} ++ case KMEM_OBJECT_ADDR_CACHED: ++ fprintf(fp, free_inuse_hdr); ++ fprintf(fp, ++ " %lx (cpu %d cache)\n", ++ si->container ? si->container : ++ (ulong)si->spec_addr, si->cpu); ++ break; -+ - /* +-/* - * dump_kmeminfo displays basic memory use information typically shown - * by /proc/meminfo, and then some... -+ * Walk through the slab chain hanging off a kmem_cache_s structure, -+ * gathering basic statistics. -+ * -+ * TBD: Given a specified physical address, determine whether it's in this -+ * slab chain, and whether it's in use or not. - */ +- */ ++ case KMEM_OBJECT_ADDR_SHARED: ++ fprintf(fp, free_inuse_hdr); ++ fprintf(fp, ++ " %lx (shared cache)\n", ++ si->container ? si->container : ++ (ulong)si->spec_addr); ++ break; ++ } -char *kmeminfo_hdr = " PAGES TOTAL PERCENTAGE\n"; -+#define INSLAB(obj, si) \ -+ ((ulong)((ulong)(obj) & ~(si->slabsize-1)) == si->s_mem) ++ break; ++ } ++ } - static void +-static void -dump_kmeminfo(void) -+do_slab_chain(int cmd, struct meminfo *si) - { +-{ - ulong totalram_pages; - ulong freeram_pages; - ulong used_pages; @@ -34857,11 +35387,13 @@ - ulong get_slabs; - struct syment *sp_array[2]; - char buf[BUFSIZE]; -+ ulong tmp, magic; -+ ulong kmem_slab_end; -+ char *kmem_slab_s_buf; ++next_cache: ++ readmem(si->cache+OFFSET(kmem_cache_s_next), ++ KVADDR, &si->cache, sizeof(ulong), ++ "kmem_cache_s next", FAULT_ON_ERROR); -+ si->slabsize = (power(2, si->order) * PAGESIZE()); ++ if (si->cache != cache_end) ++ si->cache -= OFFSET(kmem_cache_s_next); - BZERO(&meminfo, sizeof(struct meminfo)); - meminfo.flags = GET_ALL; @@ -34870,7 +35402,7 @@ - shared_pages = meminfo.get_shared; - get_buffers = meminfo.get_buffers; - get_slabs = meminfo.get_slabs; -+ kmem_slab_end = si->cache + OFFSET(kmem_cache_s_c_offset); ++ } while (si->cache != cache_end); - fprintf(fp, kmeminfo_hdr); - /* @@ -34885,15 +35417,21 @@ - vt->totalram_pages : get_totalram; - } else - totalram_pages = get_totalram; -+ switch (cmd) -+ { -+ case SLAB_GET_COUNTS: -+ si->slab = ULONG(si->cache_buf + OFFSET(kmem_cache_s_c_firstp)); ++ if ((si->flags & ADDRESS_SPECIFIED) && !si->found) ++ error(INFO, "%s: address not found in cache: %lx\n", ++ reqname, si->spec_addr); ++ ++ if (si->errors) ++ error(INFO, "%ld error%s encountered\n", ++ si->errors, si->errors > 1 ? "s" : ""); - fprintf(fp, "%10s %7ld %11s ----\n", "TOTAL MEM", - totalram_pages, pages_to_size(totalram_pages, buf)); -+ if (slab_data_saved(si)) -+ return; ++ FREEBUF(si->addrlist); ++ FREEBUF(si->kmem_bufctl); ++ for (i = 0; i < vt->kmem_max_cpus; i++) ++ FREEBUF(si->cpudata[i]); ++ FREEBUF(si->shared_array_cache); - /* - * Get free pages from dump_free_pages() or its associates. @@ -34905,14 +35443,12 @@ - pct = (freeram_pages * 100)/totalram_pages; - fprintf(fp, "%10s %7ld %11s %3ld%% of TOTAL MEM\n", - "FREE", freeram_pages, pages_to_size(freeram_pages, buf), pct); -+ si->num_slabs = si->inuse = 0; ++} - used_pages = totalram_pages - freeram_pages; - pct = (used_pages * 100)/totalram_pages; - fprintf(fp, "%10s %7ld %11s %3ld%% of TOTAL MEM\n", - "USED", used_pages, pages_to_size(used_pages, buf), pct); -+ if (si->slab == kmem_slab_end) -+ return; - /* - * Get shared pages from dump_mem_map(). Note that this is done @@ -34922,7 +35458,13 @@ - pct = (shared_pages * 100)/totalram_pages; - fprintf(fp, "%10s %7ld %11s %3ld%% of TOTAL MEM\n", - "SHARED", shared_pages, pages_to_size(shared_pages, buf), pct); -+ kmem_slab_s_buf = GETBUF(SIZE(kmem_slab_s)); ++/* ++ * Walk through the slab chain hanging off a kmem_cache_s structure, ++ * gathering basic statistics. ++ * ++ * TBD: Given a specified physical address, determine whether it's in this ++ * slab chain, and whether it's in use or not. ++ */ - subtract_buffer_pages = 0; - if (symbol_exists("buffermem_pages")) { @@ -34936,30 +35478,28 @@ - subtract_buffer_pages = buffer_pages = nr_blockdev_pages(); - } else - buffer_pages = 0; -+ do { -+ if (received_SIGINT()) { -+ FREEBUF(kmem_slab_s_buf); -+ restart(0); -+ } ++#define INSLAB(obj, si) \ ++ ((ulong)((ulong)(obj) & ~(si->slabsize-1)) == si->s_mem) - pct = (buffer_pages * 100)/totalram_pages; - fprintf(fp, "%10s %7ld %11s %3ld%% of TOTAL MEM\n", - "BUFFERS", buffer_pages, pages_to_size(buffer_pages, buf), pct); -+ readmem(si->slab, KVADDR, kmem_slab_s_buf, -+ SIZE(kmem_slab_s), "kmem_slab_s buffer", -+ FAULT_ON_ERROR); ++static void ++do_slab_chain(int cmd, struct meminfo *si) ++{ ++ ulong tmp, magic; ++ ulong kmem_slab_end; ++ char *kmem_slab_s_buf; - if (CRASHDEBUG(1)) - error(NOTE, "pages with buffers: %ld\n", get_buffers); -+ magic = ULONG(kmem_slab_s_buf + -+ OFFSET(kmem_slab_s_s_magic)); ++ si->slabsize = (power(2, si->order) * PAGESIZE()); - /* - * page_cache_size has evolved from a long to an atomic_t to - * not existing at all. - */ -+ if (magic == SLAB_MAGIC_ALLOC) { - +- - if (symbol_exists("page_cache_size")) { - get_symbol_type("page_cache_size", NULL, &req); - if (req.length == sizeof(int)) { @@ -34972,23 +35512,35 @@ - get_symbol_data("nr_pagecache", sizeof(int), &tmp); - page_cache_size = (long)tmp; - } -- ++ kmem_slab_end = si->cache + OFFSET(kmem_cache_s_c_offset); ++ ++ switch (cmd) ++ { ++ case SLAB_GET_COUNTS: ++ si->slab = ULONG(si->cache_buf + OFFSET(kmem_cache_s_c_firstp)); + - page_cache_size -= subtract_buffer_pages; -- ++ if (slab_data_saved(si)) ++ return; + - pct = (page_cache_size * 100)/totalram_pages; - fprintf(fp, "%10s %7ld %11s %3ld%% of TOTAL MEM\n", - "CACHED", page_cache_size, - pages_to_size(page_cache_size, buf), pct); -- ++ si->num_slabs = si->inuse = 0; + - /* - * Although /proc/meminfo doesn't show it, show how much memory - * the slabs take up. - */ -- ++ if (si->slab == kmem_slab_end) ++ return; + - pct = (get_slabs * 100)/totalram_pages; - fprintf(fp, "%10s %7ld %11s %3ld%% of TOTAL MEM\n", - "SLAB", get_slabs, pages_to_size(get_slabs, buf), pct); -- ++ kmem_slab_s_buf = GETBUF(SIZE(kmem_slab_s)); + - if (symbol_exists("totalhigh_pages")) { - switch (get_syment_array("totalhigh_pages", sp_array, 2)) - { @@ -35008,13 +35560,21 @@ - totalhigh_pages = MAX(value1, value2); - break; - } -- ++ do { ++ if (received_SIGINT()) { ++ FREEBUF(kmem_slab_s_buf); ++ restart(0); ++ } + - pct = totalhigh_pages ? - (totalhigh_pages * 100)/totalram_pages : 0; - fprintf(fp, "\n%10s %7ld %11s %3ld%% of TOTAL MEM\n", - "TOTAL HIGH", totalhigh_pages, - pages_to_size(totalhigh_pages, buf), pct); -- ++ readmem(si->slab, KVADDR, kmem_slab_s_buf, ++ SIZE(kmem_slab_s), "kmem_slab_s buffer", ++ FAULT_ON_ERROR); + - meminfo.flags = GET_FREE_HIGHMEM_PAGES; - vt->dump_free_pages(&meminfo); - freehighmem_pages = meminfo.retval; @@ -35023,19 +35583,16 @@ - fprintf(fp, "%10s %7ld %11s %3ld%% of TOTAL HIGH\n", - "FREE HIGH", freehighmem_pages, - pages_to_size(freehighmem_pages, buf), pct); -- ++ magic = ULONG(kmem_slab_s_buf + ++ OFFSET(kmem_slab_s_s_magic)); + - totallowmem_pages = totalram_pages - totalhigh_pages; - pct = (totallowmem_pages * 100)/totalram_pages; - fprintf(fp, "%10s %7ld %11s %3ld%% of TOTAL MEM\n", - "TOTAL LOW", totallowmem_pages, - pages_to_size(totallowmem_pages, buf), pct); -- -- freelowmem_pages = freeram_pages - freehighmem_pages; -- pct = (freelowmem_pages * 100)/totallowmem_pages; -- fprintf(fp, "%10s %7ld %11s %3ld%% of TOTAL LOW\n", -- "FREE LOW", freelowmem_pages, -- pages_to_size(freelowmem_pages, buf), pct); -- } ++ if (magic == SLAB_MAGIC_ALLOC) { ++ + tmp = ULONG(kmem_slab_s_buf + + OFFSET(kmem_slab_s_s_inuse)); + @@ -35059,6 +35616,17 @@ + save_slab_data(si); + break; +- freelowmem_pages = freeram_pages - freehighmem_pages; +- pct = (freelowmem_pages * 100)/totallowmem_pages; +- fprintf(fp, "%10s %7ld %11s %3ld%% of TOTAL LOW\n", +- "FREE LOW", freelowmem_pages, +- pages_to_size(freelowmem_pages, buf), pct); +- } ++ case SLAB_WALKTHROUGH: ++ if (!si->slab) ++ si->slab = ULONG(si->cache_buf + ++ OFFSET(kmem_cache_s_c_firstp)); + - /* - * get swap data from dump_swap_info(). - */ @@ -35083,60 +35651,29 @@ - } else - error(INFO, "swap_info[%ld].swap_map at %lx is unaccessible\n", - totalused_pages, totalswap_pages); -+ case SLAB_WALKTHROUGH: -+ if (!si->slab) -+ si->slab = ULONG(si->cache_buf + -+ OFFSET(kmem_cache_s_c_firstp)); - -- dump_zone_page_usage(); --} + if (si->slab == kmem_slab_end) + return; --/* -- * Emulate 2.6 nr_blockdev_pages() function. -- */ --static ulong --nr_blockdev_pages(void) --{ -- struct list_data list_data, *ld; -- ulong *bdevlist; -- int i, bdevcnt; -- ulong inode, address_space; -- ulong nrpages; -- char *block_device_buf, *inode_buf, *address_space_buf; +- dump_zone_page_usage(); + if (CRASHDEBUG(1)) { + fprintf(fp, "search cache: [%s] ", si->curname); + if (si->flags & ADDRESS_SPECIFIED) + fprintf(fp, "for %llx", si->spec_addr); + fprintf(fp, "\n"); + } - -- block_device_buf = GETBUF(SIZE(block_device)); -- inode_buf = GETBUF(SIZE(inode)); -- address_space_buf = GETBUF(SIZE(address_space)); ++ + si->slab_buf = kmem_slab_s_buf = GETBUF(SIZE(kmem_slab_s)); - -- ld = &list_data; -- BZERO(ld, sizeof(struct list_data)); ++ + do { + if (received_SIGINT()) { + FREEBUF(kmem_slab_s_buf); + restart(0); + } - -- get_symbol_data("all_bdevs", sizeof(void *), &ld->start); -- ld->end = symbol_value("all_bdevs"); -- ld->list_head_offset = OFFSET(block_device_bd_list); ++ + readmem(si->slab, KVADDR, kmem_slab_s_buf, + SIZE(kmem_slab_s), "kmem_slab_s buffer", + FAULT_ON_ERROR); - -- hq_open(); -- bdevcnt = do_list(ld); -- bdevlist = (ulong *)GETBUF(bdevcnt * sizeof(ulong)); -- bdevcnt = retrieve_list(bdevlist, bdevcnt); -- hq_close(); ++ + dump_slab(si); + + if (si->found) { @@ -35148,6 +35685,48 @@ + OFFSET(kmem_slab_s_s_nextp)); + + } while (si->slab != kmem_slab_end); ++ ++ FREEBUF(kmem_slab_s_buf); ++ break; ++ } + } + ++ + /* +- * Emulate 2.6 nr_blockdev_pages() function. ++ * do_slab_chain() adapted for newer percpu slab format. + */ +-static ulong +-nr_blockdev_pages(void) +-{ +- struct list_data list_data, *ld; +- ulong *bdevlist; +- int i, bdevcnt; +- ulong inode, address_space; +- ulong nrpages; +- char *block_device_buf, *inode_buf, *address_space_buf; + +- block_device_buf = GETBUF(SIZE(block_device)); +- inode_buf = GETBUF(SIZE(inode)); +- address_space_buf = GETBUF(SIZE(address_space)); ++#define SLAB_BASE(X) (PTOB(BTOP(X))) + +- ld = &list_data; +- BZERO(ld, sizeof(struct list_data)); ++#define INSLAB_PERCPU(obj, si) \ ++ ((ulong)((ulong)(obj) & ~(si->slabsize-1)) == SLAB_BASE(si->s_mem)) + +- get_symbol_data("all_bdevs", sizeof(void *), &ld->start); +- ld->end = symbol_value("all_bdevs"); +- ld->list_head_offset = OFFSET(block_device_bd_list); ++#define SLAB_CHAINS (3) + +- hq_open(); +- bdevcnt = do_list(ld); +- bdevlist = (ulong *)GETBUF(bdevcnt * sizeof(ulong)); +- bdevcnt = retrieve_list(bdevlist, bdevcnt); +- hq_close(); ++static char *slab_chain_name_v1[] = {"full", "partial", "free"}; - /* - * go through the block_device list, emulating: @@ -35167,23 +35746,81 @@ - FAULT_ON_ERROR); - nrpages += ULONG(address_space_buf + - OFFSET(address_space_nrpages)); -+ FREEBUF(kmem_slab_s_buf); -+ break; ++static void ++do_slab_chain_percpu_v1(long cmd, struct meminfo *si) ++{ ++ int i, tmp, s; ++ int list_borked; ++ char *slab_s_buf; ++ ulong specified_slab; ++ ulong last; ++ ulong slab_chains[SLAB_CHAINS]; ++ ++ list_borked = 0; ++ si->slabsize = (power(2, si->order) * PAGESIZE()); ++ si->cpucached_slab = 0; ++ ++ if (VALID_MEMBER(kmem_cache_s_slabs)) { ++ slab_chains[0] = si->cache + OFFSET(kmem_cache_s_slabs); ++ slab_chains[1] = 0; ++ slab_chains[2] = 0; ++ } else { ++ slab_chains[0] = si->cache + OFFSET(kmem_cache_s_slabs_full); ++ slab_chains[1] = si->cache + OFFSET(kmem_cache_s_slabs_partial); ++ slab_chains[2] = si->cache + OFFSET(kmem_cache_s_slabs_free); } -+} - FREEBUF(bdevlist); - FREEBUF(block_device_buf); - FREEBUF(inode_buf); - FREEBUF(address_space_buf); -- ++ if (CRASHDEBUG(1)) { ++ fprintf(fp, "[ %s: %lx ", si->curname, si->cache); ++ fprintf(fp, "full: %lx partial: %lx free: %lx ]\n", ++ slab_chains[0], slab_chains[1], slab_chains[2]); ++ } + - return nrpages; -} ++ switch (cmd) ++ { ++ case SLAB_GET_COUNTS: ++ si->flags |= SLAB_GET_COUNTS; ++ si->flags &= ~SLAB_WALKTHROUGH; ++ si->cpucached_cache = 0; ++ si->num_slabs = si->inuse = 0; ++ gather_cpudata_list_v1(si); ++ ++ slab_s_buf = GETBUF(SIZE(slab_s)); ++ ++ for (s = 0; s < SLAB_CHAINS; s++) { ++ ++ if (!slab_chains[s]) ++ continue; - /* +-/* - * dump_vmlist() displays information from the vmlist. -+ * do_slab_chain() adapted for newer percpu slab format. - */ +- */ ++ if (!readmem(slab_chains[s], ++ KVADDR, &si->slab, sizeof(ulong), ++ "first slab", QUIET|RETURN_ON_ERROR)) { ++ error(INFO, ++ "%s: %s list: bad slab pointer: %lx\n", ++ si->curname, slab_chain_name_v1[s], ++ slab_chains[s]); ++ list_borked = 1; ++ continue; ++ } ++ ++ if (slab_data_saved(si)) { ++ FREEBUF(slab_s_buf); ++ return; ++ } ++ ++ if (si->slab == slab_chains[s]) ++ continue; ++ ++ last = slab_chains[s]; -static void -dump_vmlist(struct meminfo *vi) @@ -35194,12 +35831,29 @@ - ulong vmlist; - ulong addr, size, next, pcheck; - physaddr_t paddr; -+#define SLAB_BASE(X) (PTOB(BTOP(X))) ++ do { ++ if (received_SIGINT()) { ++ FREEBUF(slab_s_buf); ++ restart(0); ++ } - get_symbol_data("vmlist", sizeof(void *), &vmlist); - next = vmlist; -+#define INSLAB_PERCPU(obj, si) \ -+ ((ulong)((ulong)(obj) & ~(si->slabsize-1)) == SLAB_BASE(si->s_mem)) ++ if (!verify_slab_v1(si, last, s)) { ++ list_borked = 1; ++ continue; ++ } ++ last = si->slab - OFFSET(slab_s_list); ++ ++ readmem(si->slab, KVADDR, slab_s_buf, ++ SIZE(slab_s), "slab_s buffer", ++ FAULT_ON_ERROR); ++ ++ tmp = INT(slab_s_buf + OFFSET(slab_s_inuse)); ++ si->inuse += tmp; ++ ++ if (ACTIVE()) ++ gather_cpudata_list_v1(si); - while (next) { - if ((next == vmlist) && @@ -35210,8 +35864,32 @@ - fprintf(fp, "%s SIZE\n", - mkstring(buf, (VADDR_PRLEN * 2) + strlen(" - "), - CENTER|LJUST, "ADDRESS RANGE")); -- } -+#define SLAB_CHAINS (3) ++ si->s_mem = ULONG(slab_s_buf + ++ OFFSET(slab_s_s_mem)); ++ gather_slab_cached_count(si); ++ ++ si->num_slabs++; ++ ++ si->slab = ULONG(slab_s_buf + ++ OFFSET(slab_s_list)); ++ si->slab -= OFFSET(slab_s_list); ++ ++ /* ++ * Check for slab transition. (Tony Dziedzic) ++ */ ++ for (i = 0; i < SLAB_CHAINS; i++) { ++ if ((i != s) && ++ (si->slab == slab_chains[i])) { ++ error(NOTE, ++ "%s: slab chain inconsistency: %s list\n", ++ si->curname, ++ slab_chain_name_v1[s]); ++ list_borked = 1; ++ } ++ } ++ ++ } while (si->slab != slab_chains[s] && !list_borked); + } - readmem(next+OFFSET(vm_struct_addr), KVADDR, - &addr, sizeof(void *), @@ -35219,7 +35897,10 @@ - readmem(next+OFFSET(vm_struct_size), KVADDR, - &size, sizeof(ulong), - "vmlist size", FAULT_ON_ERROR); -+static char *slab_chain_name_v1[] = {"full", "partial", "free"}; ++ FREEBUF(slab_s_buf); ++ if (!list_borked) ++ save_slab_data(si); ++ break; - if (!(vi->flags & ADDRESS_SPECIFIED) || - ((vi->memtype == KVADDR) && @@ -35232,22 +35913,31 @@ - mkstring(buf2, VADDR_PRLEN, LONG_HEX|LJUST, - MKSTR(addr+size)), - size); -+static void -+do_slab_chain_percpu_v1(long cmd, struct meminfo *si) -+{ -+ int i, tmp, s; -+ int list_borked; -+ char *slab_s_buf; -+ ulong specified_slab; -+ ulong last; -+ ulong slab_chains[SLAB_CHAINS]; ++ case SLAB_WALKTHROUGH: ++ specified_slab = si->slab; ++ si->flags |= SLAB_WALKTHROUGH; ++ si->flags &= ~SLAB_GET_COUNTS; - if ((vi->flags & ADDRESS_SPECIFIED) && - (vi->memtype == PHYSADDR)) { - for (pcheck = addr; pcheck < (addr+size); - pcheck += PAGESIZE()) { - if (!kvtop(NULL, pcheck, &paddr, 0)) -- continue; ++ for (s = 0; s < SLAB_CHAINS; s++) { ++ if (!slab_chains[s]) ++ continue; ++ ++ if (!specified_slab) { ++ if (!readmem(slab_chains[s], ++ KVADDR, &si->slab, sizeof(ulong), ++ "slabs", QUIET|RETURN_ON_ERROR)) { ++ error(INFO, ++ "%s: %s list: bad slab pointer: %lx\n", ++ si->curname, ++ slab_chain_name_v1[s], ++ slab_chains[s]); ++ list_borked = 1; + continue; - if ((vi->spec_addr >= paddr) && - (vi->spec_addr < (paddr+PAGESIZE()))) { - if (vi->flags & GET_PHYS_TO_VMALLOC) { @@ -35266,264 +35956,25 @@ - LONG_HEX|LJUST, - MKSTR(addr+size)), size); - break; -- } -- } -+ list_borked = 0; -+ si->slabsize = (power(2, si->order) * PAGESIZE()); -+ si->cpucached_slab = 0; - -- } -+ if (VALID_MEMBER(kmem_cache_s_slabs)) { -+ slab_chains[0] = si->cache + OFFSET(kmem_cache_s_slabs); -+ slab_chains[1] = 0; -+ slab_chains[2] = 0; -+ } else { -+ slab_chains[0] = si->cache + OFFSET(kmem_cache_s_slabs_full); -+ slab_chains[1] = si->cache + OFFSET(kmem_cache_s_slabs_partial); -+ slab_chains[2] = si->cache + OFFSET(kmem_cache_s_slabs_free); -+ } - -- readmem(next+OFFSET(vm_struct_next), -- KVADDR, &next, sizeof(void *), -- "vmlist next", FAULT_ON_ERROR); -+ if (CRASHDEBUG(1)) { -+ fprintf(fp, "[ %s: %lx ", si->curname, si->cache); -+ fprintf(fp, "full: %lx partial: %lx free: %lx ]\n", -+ slab_chains[0], slab_chains[1], slab_chains[2]); - } - -- if (vi->flags & GET_HIGHEST) -- vi->retval = addr+size; --} -+ switch (cmd) -+ { -+ case SLAB_GET_COUNTS: -+ si->flags |= SLAB_GET_COUNTS; -+ si->flags &= ~SLAB_WALKTHROUGH; -+ si->cpucached_cache = 0; -+ si->num_slabs = si->inuse = 0; -+ gather_cpudata_list_v1(si); - --/* -- * dump_page_lists() displays information from the active_list, -- * inactive_dirty_list and inactive_clean_list from each zone. -- */ --static int --dump_page_lists(struct meminfo *mi) --{ -- int i, c, n, retval; -- ulong node_zones, pgdat; -- struct node_table *nt; -- struct list_data list_data, *ld; -- char buf[BUFSIZE]; -- ulong value; -- ulong inactive_clean_pages, inactive_clean_list; -- int nr_active_pages, nr_inactive_pages; -- int nr_inactive_dirty_pages; -+ slab_s_buf = GETBUF(SIZE(slab_s)); - -- ld = &list_data; -+ for (s = 0; s < SLAB_CHAINS; s++) { - -- retval = FALSE; -- nr_active_pages = nr_inactive_dirty_pages = -1; -+ if (!slab_chains[s]) -+ continue; - -- BZERO(ld, sizeof(struct list_data)); -- ld->list_head_offset = OFFSET(page_lru); -- if (mi->flags & ADDRESS_SPECIFIED) -- ld->searchfor = mi->spec_addr; -- else if (mi->flags & VERBOSE) -- ld->flags |= VERBOSE; -+ if (!readmem(slab_chains[s], -+ KVADDR, &si->slab, sizeof(ulong), -+ "first slab", QUIET|RETURN_ON_ERROR)) { -+ error(INFO, -+ "%s: %s list: bad slab pointer: %lx\n", -+ si->curname, slab_chain_name_v1[s], -+ slab_chains[s]); -+ list_borked = 1; -+ continue; -+ } - -- if (mi->flags & GET_ACTIVE_LIST) { -- if (!symbol_exists("active_list")) -- error(FATAL, -- "active_list does not exist in this kernel\n"); -+ if (slab_data_saved(si)) { -+ FREEBUF(slab_s_buf); -+ return; -+ } -+ -+ if (si->slab == slab_chains[s]) -+ continue; -+ -+ last = slab_chains[s]; - -- if (symbol_exists("nr_active_pages")) -- get_symbol_data("nr_active_pages", sizeof(int), -- &nr_active_pages); -- else -- error(FATAL, -- "nr_active_pages does not exist in this kernel\n"); -+ do { -+ if (received_SIGINT()) { -+ FREEBUF(slab_s_buf); -+ restart(0); -+ } - -- ld->end = symbol_value("active_list"); -- readmem(ld->end, KVADDR, &ld->start, sizeof(void *), -- "LIST_HEAD contents", FAULT_ON_ERROR); -+ if (!verify_slab_v1(si, last, s)) { -+ list_borked = 1; -+ continue; -+ } -+ last = si->slab - OFFSET(slab_s_list); -+ -+ readmem(si->slab, KVADDR, slab_s_buf, -+ SIZE(slab_s), "slab_s buffer", -+ FAULT_ON_ERROR); -+ -+ tmp = INT(slab_s_buf + OFFSET(slab_s_inuse)); -+ si->inuse += tmp; -+ -+ if (ACTIVE()) -+ gather_cpudata_list_v1(si); -+ -+ si->s_mem = ULONG(slab_s_buf + -+ OFFSET(slab_s_s_mem)); -+ gather_slab_cached_count(si); -+ -+ si->num_slabs++; - -- if (mi->flags & VERBOSE) -- fprintf(fp, "active_list:\n"); -+ si->slab = ULONG(slab_s_buf + -+ OFFSET(slab_s_list)); -+ si->slab -= OFFSET(slab_s_list); - -- if (ld->start == ld->end) { -- c = 0; -- ld->searchfor = 0; -- if (mi->flags & VERBOSE) -- fprintf(fp, "(empty)\n"); -- } else { -- hq_open(); -- c = do_list(ld); -- hq_close(); -+ /* -+ * Check for slab transition. (Tony Dziedzic) -+ */ -+ for (i = 0; i < SLAB_CHAINS; i++) { -+ if ((i != s) && -+ (si->slab == slab_chains[i])) { -+ error(NOTE, -+ "%s: slab chain inconsistency: %s list\n", -+ si->curname, -+ slab_chain_name_v1[s]); -+ list_borked = 1; -+ } -+ } -+ -+ } while (si->slab != slab_chains[s] && !list_borked); - } - -- if ((mi->flags & ADDRESS_SPECIFIED) && ld->searchfor) { -- fprintf(fp, "%lx\n", ld->searchfor); -- retval = TRUE; -- } else { -- fprintf(fp, "%snr_active_pages: %d ", -- mi->flags & VERBOSE ? "\n" : "", -- nr_active_pages); -- if (c != nr_active_pages) -- fprintf(fp, "(found %d)\n", c); -- else -- fprintf(fp, "(verified)\n"); -- } -- } -+ FREEBUF(slab_s_buf); -+ if (!list_borked) -+ save_slab_data(si); -+ break; - -- if (mi->flags & GET_INACTIVE_LIST) { -- if (!symbol_exists("inactive_list")) -- error(FATAL, -- "inactive_list does not exist in this kernel\n"); -+ case SLAB_WALKTHROUGH: -+ specified_slab = si->slab; -+ si->flags |= SLAB_WALKTHROUGH; -+ si->flags &= ~SLAB_GET_COUNTS; - -- if (symbol_exists("nr_inactive_pages")) -- get_symbol_data("nr_inactive_pages", sizeof(int), -- &nr_inactive_pages); -- else -- error(FATAL, -- "nr_active_pages does not exist in this kernel\n"); -+ for (s = 0; s < SLAB_CHAINS; s++) { -+ if (!slab_chains[s]) -+ continue; - -- ld->end = symbol_value("inactive_list"); -- readmem(ld->end, KVADDR, &ld->start, sizeof(void *), -- "LIST_HEAD contents", FAULT_ON_ERROR); -- -- if (mi->flags & VERBOSE) -- fprintf(fp, "inactive_list:\n"); -+ if (!specified_slab) { -+ if (!readmem(slab_chains[s], -+ KVADDR, &si->slab, sizeof(ulong), -+ "slabs", QUIET|RETURN_ON_ERROR)) { -+ error(INFO, -+ "%s: %s list: bad slab pointer: %lx\n", -+ si->curname, -+ slab_chain_name_v1[s], -+ slab_chains[s]); -+ list_borked = 1; -+ continue; -+ } + } + last = slab_chains[s]; + } else + last = 0; + + if (si->slab == slab_chains[s]) + continue; - -- if (ld->start == ld->end) { -- c = 0; -- ld->searchfor = 0; -- if (mi->flags & VERBOSE) -- fprintf(fp, "(empty)\n"); -- } else { -- hq_open(); -- c = do_list(ld); -- hq_close(); -- } ++ + if (CRASHDEBUG(1)) { + fprintf(fp, "search cache: [%s] ", si->curname); + if (si->flags & ADDRESS_SPECIFIED) + fprintf(fp, "for %llx", si->spec_addr); + fprintf(fp, "\n"); -+ } + } + + do { + if (received_SIGINT()) + restart(0); -- if ((mi->flags & ADDRESS_SPECIFIED) && ld->searchfor) { -- fprintf(fp, "%lx\n", ld->searchfor); -- retval = TRUE; -- } else { -- fprintf(fp, "%snr_inactive_pages: %d ", -- mi->flags & VERBOSE ? "\n" : "", -- nr_inactive_pages); -- if (c != nr_inactive_pages) -- fprintf(fp, "(found %d)\n", c); -- else -- fprintf(fp, "(verified)\n"); + if (!verify_slab_v1(si, last, s)) { + list_borked = 1; + continue; @@ -35544,28 +35995,38 @@ + + } while (si->slab != slab_chains[s] && !list_borked); } -+ + +- readmem(next+OFFSET(vm_struct_next), +- KVADDR, &next, sizeof(void *), +- "vmlist next", FAULT_ON_ERROR); + break; } -+} +- +- if (vi->flags & GET_HIGHEST) +- vi->retval = addr+size; + } -- if (mi->flags & GET_INACTIVE_DIRTY) { -- if (!symbol_exists("inactive_dirty_list")) -- error(FATAL, -- "inactive_dirty_list does not exist in this kernel\n"); -+/* + /* +- * dump_page_lists() displays information from the active_list, +- * inactive_dirty_list and inactive_clean_list from each zone. + * Try to preclude any attempt to translate a bogus slab structure. -+ */ - -- if (symbol_exists("nr_inactive_dirty_pages")) -- get_symbol_data("nr_inactive_dirty_pages", sizeof(int), -- &nr_inactive_dirty_pages); -- else -- error(FATAL, -- "nr_inactive_dirty_pages does not exist in this kernel\n"); -+static int + */ ++ + static int +-dump_page_lists(struct meminfo *mi) +verify_slab_v1(struct meminfo *si, ulong last, int s) -+{ + { +- int i, c, n, retval; +- ulong node_zones, pgdat; +- struct node_table *nt; +- struct list_data list_data, *ld; +- char buf[BUFSIZE]; +- ulong value; +- ulong inactive_clean_pages, inactive_clean_list; +- int nr_active_pages, nr_inactive_pages; +- int nr_inactive_dirty_pages; +- +- ld = &list_data; + char slab_s_buf[BUFSIZE]; + struct kernel_list_head *list_head; + unsigned int inuse; @@ -35573,15 +36034,191 @@ + char *list; + int errcnt; +- retval = FALSE; +- nr_active_pages = nr_inactive_dirty_pages = -1; ++ list = slab_chain_name_v1[s]; + +- BZERO(ld, sizeof(struct list_data)); +- ld->list_head_offset = OFFSET(page_lru); +- if (mi->flags & ADDRESS_SPECIFIED) +- ld->searchfor = mi->spec_addr; +- else if (mi->flags & VERBOSE) +- ld->flags |= VERBOSE; +- +- if (mi->flags & GET_ACTIVE_LIST) { +- if (!symbol_exists("active_list")) +- error(FATAL, +- "active_list does not exist in this kernel\n"); ++ errcnt = 0; + +- if (symbol_exists("nr_active_pages")) +- get_symbol_data("nr_active_pages", sizeof(int), +- &nr_active_pages); +- else +- error(FATAL, +- "nr_active_pages does not exist in this kernel\n"); ++ if (!readmem(si->slab, KVADDR, slab_s_buf, ++ SIZE(slab_s), "slab_s buffer", QUIET|RETURN_ON_ERROR)) { ++ error(INFO, "%s: %s list: bad slab pointer: %lx\n", ++ si->curname, list, si->slab); ++ return FALSE; ++ } + +- ld->end = symbol_value("active_list"); +- readmem(ld->end, KVADDR, &ld->start, sizeof(void *), +- "LIST_HEAD contents", FAULT_ON_ERROR); +- +- if (mi->flags & VERBOSE) +- fprintf(fp, "active_list:\n"); ++ list_head = (struct kernel_list_head *) ++ (slab_s_buf + OFFSET(slab_s_list)); + +- if (ld->start == ld->end) { +- c = 0; +- ld->searchfor = 0; +- if (mi->flags & VERBOSE) +- fprintf(fp, "(empty)\n"); +- } else { +- hq_open(); +- c = do_list(ld); +- hq_close(); +- } ++ if (!IS_KVADDR((ulong)list_head->next) || ++ !accessible((ulong)list_head->next)) { ++ error(INFO, "%s: %s list: slab: %lx bad next pointer: %lx\n", ++ si->curname, list, si->slab, ++ (ulong)list_head->next); ++ errcnt++; ++ } + +- if ((mi->flags & ADDRESS_SPECIFIED) && ld->searchfor) { +- fprintf(fp, "%lx\n", ld->searchfor); +- retval = TRUE; +- } else { +- fprintf(fp, "%snr_active_pages: %d ", +- mi->flags & VERBOSE ? "\n" : "", +- nr_active_pages); +- if (c != nr_active_pages) +- fprintf(fp, "(found %d)\n", c); +- else +- fprintf(fp, "(verified)\n"); +- } ++ if (last && (last != (ulong)list_head->prev)) { ++ error(INFO, "%s: %s list: slab: %lx bad prev pointer: %lx\n", ++ si->curname, list, si->slab, ++ (ulong)list_head->prev); ++ errcnt++; + } + +- if (mi->flags & GET_INACTIVE_LIST) { +- if (!symbol_exists("inactive_list")) +- error(FATAL, +- "inactive_list does not exist in this kernel\n"); ++ inuse = UINT(slab_s_buf + OFFSET(slab_s_inuse)); ++ if (inuse > si->c_num) { ++ error(INFO, "%s: %s list: slab: %lx bad inuse counter: %ld\n", ++ si->curname, list, si->slab, inuse); ++ errcnt++; ++ } + +- if (symbol_exists("nr_inactive_pages")) +- get_symbol_data("nr_inactive_pages", sizeof(int), +- &nr_inactive_pages); +- else +- error(FATAL, +- "nr_active_pages does not exist in this kernel\n"); ++ if (!last) ++ goto no_inuse_check_v1; + +- ld->end = symbol_value("inactive_list"); +- readmem(ld->end, KVADDR, &ld->start, sizeof(void *), +- "LIST_HEAD contents", FAULT_ON_ERROR); +- +- if (mi->flags & VERBOSE) +- fprintf(fp, "inactive_list:\n"); ++ switch (s) ++ { ++ case 0: /* full -- but can be one singular list */ ++ if (VALID_MEMBER(kmem_cache_s_slabs_full) && ++ (inuse != si->c_num)) { ++ error(INFO, ++ "%s: %s list: slab: %lx bad inuse counter: %ld\n", ++ si->curname, list, si->slab, inuse); ++ errcnt++; ++ } ++ break; + +- if (ld->start == ld->end) { +- c = 0; +- ld->searchfor = 0; +- if (mi->flags & VERBOSE) +- fprintf(fp, "(empty)\n"); +- } else { +- hq_open(); +- c = do_list(ld); +- hq_close(); ++ case 1: /* partial */ ++ if ((inuse == 0) || (inuse == si->c_num)) { ++ error(INFO, ++ "%s: %s list: slab: %lx bad inuse counter: %ld\n", ++ si->curname, list, si->slab, inuse); ++ errcnt++; + } ++ break; + +- if ((mi->flags & ADDRESS_SPECIFIED) && ld->searchfor) { +- fprintf(fp, "%lx\n", ld->searchfor); +- retval = TRUE; +- } else { +- fprintf(fp, "%snr_inactive_pages: %d ", +- mi->flags & VERBOSE ? "\n" : "", +- nr_inactive_pages); +- if (c != nr_inactive_pages) +- fprintf(fp, "(found %d)\n", c); +- else +- fprintf(fp, "(verified)\n"); ++ case 2: /* free */ ++ if (inuse > 0) { ++ error(INFO, ++ "%s: %s list: slab: %lx bad inuse counter: %ld\n", ++ si->curname, list, si->slab, inuse); ++ errcnt++; + } ++ break; + } + +- if (mi->flags & GET_INACTIVE_DIRTY) { +- if (!symbol_exists("inactive_dirty_list")) +- error(FATAL, +- "inactive_dirty_list does not exist in this kernel\n"); ++no_inuse_check_v1: ++ s_mem = ULONG(slab_s_buf + OFFSET(slab_s_s_mem)); ++ if (!IS_KVADDR(s_mem) || !accessible(s_mem)) { ++ error(INFO, "%s: %s list: slab: %lx bad s_mem pointer: %lx\n", ++ si->curname, list, si->slab, s_mem); ++ errcnt++; ++ } + +- if (symbol_exists("nr_inactive_dirty_pages")) +- get_symbol_data("nr_inactive_dirty_pages", sizeof(int), +- &nr_inactive_dirty_pages); +- else +- error(FATAL, +- "nr_inactive_dirty_pages does not exist in this kernel\n"); ++ return(errcnt ? FALSE : TRUE); ++} + - ld->end = symbol_value("inactive_dirty_list"); - readmem(ld->end, KVADDR, &ld->start, sizeof(void *), - "LIST_HEAD contents", FAULT_ON_ERROR); -+ list = slab_chain_name_v1[s]; ++/* ++ * Updated for 2.6 slab substructure. ++ */ - if (mi->flags & VERBOSE) - fprintf(fp, "%sinactive_dirty_list:\n", - mi->flags & GET_ACTIVE_LIST ? "\n" : ""); -+ errcnt = 0; ++static char *slab_chain_name_v2[] = {"partial", "full", "free"}; - if (ld->start == ld->end) { - c = 0; @@ -35593,12 +36230,15 @@ - c = do_list(ld); - hq_close(); - } -+ if (!readmem(si->slab, KVADDR, slab_s_buf, -+ SIZE(slab_s), "slab_s buffer", QUIET|RETURN_ON_ERROR)) { -+ error(INFO, "%s: %s list: bad slab pointer: %lx\n", -+ si->curname, list, si->slab); -+ return FALSE; -+ } ++static void ++do_slab_chain_percpu_v2(long cmd, struct meminfo *si) ++{ ++ int i, tmp, s; ++ int list_borked; ++ char *slab_buf; ++ ulong specified_slab; ++ ulong last; ++ ulong slab_chains[SLAB_CHAINS]; - if ((mi->flags & ADDRESS_SPECIFIED) && ld->searchfor) { - fprintf(fp, "%lx\n", ld->searchfor); @@ -35612,169 +36252,10 @@ - else - fprintf(fp, "(verified)\n"); - } -- } -+ list_head = (struct kernel_list_head *) -+ (slab_s_buf + OFFSET(slab_s_list)); - -- if (mi->flags & GET_INACTIVE_CLEAN) { -- if (INVALID_MEMBER(zone_struct_inactive_clean_list)) -- error(FATAL, -- "inactive_clean_list(s) do not exist in this kernel\n"); -+ if (!IS_KVADDR((ulong)list_head->next) || -+ !accessible((ulong)list_head->next)) { -+ error(INFO, "%s: %s list: slab: %lx bad next pointer: %lx\n", -+ si->curname, list, si->slab, -+ (ulong)list_head->next); -+ errcnt++; -+ } - -- get_symbol_data("pgdat_list", sizeof(void *), &pgdat); -+ if (last && (last != (ulong)list_head->prev)) { -+ error(INFO, "%s: %s list: slab: %lx bad prev pointer: %lx\n", -+ si->curname, list, si->slab, -+ (ulong)list_head->prev); -+ errcnt++; -+ } - -- if ((mi->flags & VERBOSE) && -- (mi->flags & (GET_ACTIVE_LIST|GET_INACTIVE_DIRTY))) -- fprintf(fp, "\n"); -+ inuse = UINT(slab_s_buf + OFFSET(slab_s_inuse)); -+ if (inuse > si->c_num) { -+ error(INFO, "%s: %s list: slab: %lx bad inuse counter: %ld\n", -+ si->curname, list, si->slab, inuse); -+ errcnt++; -+ } - -- for (n = 0; pgdat; n++) { -- nt = &vt->node_table[n]; -+ if (!last) -+ goto no_inuse_check_v1; - -- node_zones = nt->pgdat + OFFSET(pglist_data_node_zones); -+ switch (s) -+ { -+ case 0: /* full -- but can be one singular list */ -+ if (VALID_MEMBER(kmem_cache_s_slabs_full) && -+ (inuse != si->c_num)) { -+ error(INFO, -+ "%s: %s list: slab: %lx bad inuse counter: %ld\n", -+ si->curname, list, si->slab, inuse); -+ errcnt++; -+ } -+ break; - -- for (i = 0; i < vt->nr_zones; i++) { -- readmem(node_zones+OFFSET(zone_struct_name), -- KVADDR, &value, sizeof(void *), -- "zone_struct name", FAULT_ON_ERROR); -- if (!read_string(value, buf, BUFSIZE-1)) -- sprintf(buf, "(unknown) "); -+ case 1: /* partial */ -+ if ((inuse == 0) || (inuse == si->c_num)) { -+ error(INFO, -+ "%s: %s list: slab: %lx bad inuse counter: %ld\n", -+ si->curname, list, si->slab, inuse); -+ errcnt++; -+ } -+ break; - -- if (mi->flags & VERBOSE) { -- if (vt->numnodes > 1) -- fprintf(fp, "NODE %d ", n); -- fprintf(fp, -- "\"%s\" inactive_clean_list:\n", -- buf); -- } -+ case 2: /* free */ -+ if (inuse > 0) { -+ error(INFO, -+ "%s: %s list: slab: %lx bad inuse counter: %ld\n", -+ si->curname, list, si->slab, inuse); -+ errcnt++; -+ } -+ break; -+ } - -- readmem(node_zones + -- OFFSET(zone_struct_inactive_clean_pages), -- KVADDR, &inactive_clean_pages, -- sizeof(ulong), "inactive_clean_pages", -- FAULT_ON_ERROR); -+no_inuse_check_v1: -+ s_mem = ULONG(slab_s_buf + OFFSET(slab_s_s_mem)); -+ if (!IS_KVADDR(s_mem) || !accessible(s_mem)) { -+ error(INFO, "%s: %s list: slab: %lx bad s_mem pointer: %lx\n", -+ si->curname, list, si->slab, s_mem); -+ errcnt++; -+ } - -- readmem(node_zones + -- OFFSET(zone_struct_inactive_clean_list), -- KVADDR, &inactive_clean_list, -- sizeof(ulong), "inactive_clean_list", -- FAULT_ON_ERROR); -+ return(errcnt ? FALSE : TRUE); -+} - -- ld->start = inactive_clean_list; -- ld->end = node_zones + -- OFFSET(zone_struct_inactive_clean_list); -- if (mi->flags & ADDRESS_SPECIFIED) -- ld->searchfor = mi->spec_addr; -+/* -+ * Updated for 2.6 slab substructure. -+ */ - -- if (ld->start == ld->end) { -- c = 0; -- ld->searchfor = 0; -- if (mi->flags & VERBOSE) -- fprintf(fp, "(empty)\n"); -- } else { -- hq_open(); -- c = do_list(ld); -- hq_close(); -- } -+static char *slab_chain_name_v2[] = {"partial", "full", "free"}; - -- if ((mi->flags & ADDRESS_SPECIFIED) && -- ld->searchfor) { -- fprintf(fp, "%lx\n", ld->searchfor); -- retval = TRUE; -- } else { -- if (vt->numnodes > 1) -- fprintf(fp, "NODE %d ", n); -- fprintf(fp, "\"%s\" ", buf); -- fprintf(fp, -- "inactive_clean_pages: %ld ", -- inactive_clean_pages); -- if (c != inactive_clean_pages) -- fprintf(fp, "(found %d)\n", c); -- else -- fprintf(fp, "(verified)\n"); -- } -+static void -+do_slab_chain_percpu_v2(long cmd, struct meminfo *si) -+{ -+ int i, tmp, s; -+ int list_borked; -+ char *slab_buf; -+ ulong specified_slab; -+ ulong last; -+ ulong slab_chains[SLAB_CHAINS]; - -- node_zones += SIZE(zone_struct); -- } + list_borked = 0; + si->slabsize = (power(2, si->order) * PAGESIZE()); + si->cpucached_slab = 0; - -- readmem(pgdat + OFFSET_OPTION(pglist_data_node_next, -- pglist_data_pgdat_next), KVADDR, -- &pgdat, sizeof(void *), "pglist_data node_next", -- FAULT_ON_ERROR); -- } ++ + slab_chains[0] = si->cache + OFFSET(kmem_cache_s_lists) + + OFFSET(kmem_list3_slabs_partial); + slab_chains[1] = si->cache + OFFSET(kmem_cache_s_lists) + @@ -35788,8 +36269,10 @@ + slab_chains[0], slab_chains[1], slab_chains[2]); } -- return retval; --} +- if (mi->flags & GET_INACTIVE_CLEAN) { +- if (INVALID_MEMBER(zone_struct_inactive_clean_list)) +- error(FATAL, +- "inactive_clean_list(s) do not exist in this kernel\n"); + switch (cmd) + { + case SLAB_GET_COUNTS: @@ -35799,17 +36282,18 @@ + si->num_slabs = si->inuse = 0; + gather_cpudata_list_v2(si); +- get_symbol_data("pgdat_list", sizeof(void *), &pgdat); + slab_buf = GETBUF(SIZE(slab)); +- if ((mi->flags & VERBOSE) && +- (mi->flags & (GET_ACTIVE_LIST|GET_INACTIVE_DIRTY))) +- fprintf(fp, "\n"); + for (s = 0; s < SLAB_CHAINS; s++) { + if (!slab_chains[s]) + continue; --/* -- * Check whether an address is a kmem_cache_t address, and if so, return -- * a pointer to the static buffer containing its name string. Otherwise -- * return NULL on failure. -- */ +- for (n = 0; pgdat; n++) { +- nt = &vt->node_table[n]; + if (!readmem(slab_chains[s], + KVADDR, &si->slab, sizeof(ulong), + "first slab", QUIET|RETURN_ON_ERROR)) { @@ -35832,19 +36316,19 @@ + + last = slab_chains[s]; --#define PERCPU_NOT_SUPPORTED "per-cpu slab format not supported yet\n" +- node_zones = nt->pgdat + OFFSET(pglist_data_node_zones); + do { + if (received_SIGINT()) { + FREEBUF(slab_buf); + restart(0); + } --static char * --is_kmem_cache_addr(ulong vaddr, char *kbuf) --{ -- ulong cache, cache_cache, name; -- long next_offset, name_offset; -- char *cache_buf; +- for (i = 0; i < vt->nr_zones; i++) { +- readmem(node_zones+OFFSET(zone_struct_name), +- KVADDR, &value, sizeof(void *), +- "zone_struct name", FAULT_ON_ERROR); +- if (!read_string(value, buf, BUFSIZE-1)) +- sprintf(buf, "(unknown) "); + if (!verify_slab_v2(si, last, s)) { + list_borked = 1; + continue; @@ -35861,10 +36345,12 @@ + if (ACTIVE()) + gather_cpudata_list_v2(si); -- if (vt->flags & KMEM_CACHE_UNAVAIL) { -- error(INFO, "kmem cache slab subsystem not available\n"); -- return NULL; -- } +- if (mi->flags & VERBOSE) { +- if (vt->numnodes > 1) +- fprintf(fp, "NODE %d ", n); +- fprintf(fp, +- "\"%s\" inactive_clean_list:\n", +- buf); + si->s_mem = ULONG(slab_buf + + OFFSET(slab_s_mem)); + gather_slab_cached_count(si); @@ -35887,34 +36373,49 @@ + slab_chain_name_v2[s]); + list_borked = 1; + } -+ } + } + + } while (si->slab != slab_chains[s] && !list_borked); + } -- name_offset = vt->flags & (PERCPU_KMALLOC_V1|PERCPU_KMALLOC_V2) ? -- OFFSET(kmem_cache_s_name) : OFFSET(kmem_cache_s_c_name); -- next_offset = vt->flags & (PERCPU_KMALLOC_V1|PERCPU_KMALLOC_V2) ? -- OFFSET(kmem_cache_s_next) : OFFSET(kmem_cache_s_c_nextp); +- readmem(node_zones + +- OFFSET(zone_struct_inactive_clean_pages), +- KVADDR, &inactive_clean_pages, +- sizeof(ulong), "inactive_clean_pages", +- FAULT_ON_ERROR); + FREEBUF(slab_buf); + if (!list_borked) + save_slab_data(si); + break; -- cache = cache_cache = symbol_value("cache_cache"); +- readmem(node_zones + +- OFFSET(zone_struct_inactive_clean_list), +- KVADDR, &inactive_clean_list, +- sizeof(ulong), "inactive_clean_list", +- FAULT_ON_ERROR); + case SLAB_WALKTHROUGH: + specified_slab = si->slab; + si->flags |= SLAB_WALKTHROUGH; + si->flags &= ~SLAB_GET_COUNTS; -- cache_buf = GETBUF(SIZE(kmem_cache_s)); +- ld->start = inactive_clean_list; +- ld->end = node_zones + +- OFFSET(zone_struct_inactive_clean_list); +- if (mi->flags & ADDRESS_SPECIFIED) +- ld->searchfor = mi->spec_addr; + for (s = 0; s < SLAB_CHAINS; s++) { + if (!slab_chains[s]) + continue; -- do { -- readmem(cache, KVADDR, cache_buf, SIZE(kmem_cache_s), -- "kmem_cache_s buffer", FAULT_ON_ERROR); +- if (ld->start == ld->end) { +- c = 0; +- ld->searchfor = 0; +- if (mi->flags & VERBOSE) +- fprintf(fp, "(empty)\n"); +- } else { +- hq_open(); +- c = do_list(ld); +- hq_close(); + if (!specified_slab) { + if (!readmem(slab_chains[s], + KVADDR, &si->slab, sizeof(ulong), @@ -35926,7 +36427,26 @@ + slab_chains[s]); + list_borked = 1; + continue; -+ } + } +- +- if ((mi->flags & ADDRESS_SPECIFIED) && +- ld->searchfor) { +- fprintf(fp, "%lx\n", ld->searchfor); +- retval = TRUE; +- } else { +- if (vt->numnodes > 1) +- fprintf(fp, "NODE %d ", n); +- fprintf(fp, "\"%s\" ", buf); +- fprintf(fp, +- "inactive_clean_pages: %ld ", +- inactive_clean_pages); +- if (c != inactive_clean_pages) +- fprintf(fp, "(found %d)\n", c); +- else +- fprintf(fp, "(verified)\n"); +- } +- +- node_zones += SIZE(zone_struct); + last = slab_chains[s]; + } else + last = 0; @@ -35939,7 +36459,7 @@ + if (si->flags & ADDRESS_SPECIFIED) + fprintf(fp, "for %llx", si->spec_addr); + fprintf(fp, "\n"); -+ } + } + + do { + if (received_SIGINT()) @@ -35951,30 +36471,15 @@ + } + last = si->slab - OFFSET(slab_list); -- if (cache == vaddr) { -- if (vt->kmem_cache_namelen) { -- BCOPY(cache_buf+name_offset, kbuf, -- vt->kmem_cache_namelen); -- } else { -- name = ULONG(cache_buf + name_offset); -- if (!read_string(name, kbuf, BUFSIZE-1)) { -- if (vt->flags & -- (PERCPU_KMALLOC_V1|PERCPU_KMALLOC_V2)) -- error(FATAL, -- "cannot read kmem_cache_s.name string at %lx\n", -- name); -- else -- error(FATAL, -- "cannot read kmem_cache_s.c_name string at %lx\n", -- name); +- readmem(pgdat + OFFSET_OPTION(pglist_data_node_next, +- pglist_data_pgdat_next), KVADDR, +- &pgdat, sizeof(void *), "pglist_data node_next", +- FAULT_ON_ERROR); + dump_slab_percpu_v2(si); + + if (si->found) { + return; - } -- } -- FREEBUF(cache_buf); -- return kbuf; ++ } + + readmem(si->slab+OFFSET(slab_list), + KVADDR, &si->slab, sizeof(ulong), @@ -35984,48 +36489,26 @@ + + } while (si->slab != slab_chains[s] && !list_borked); } +- } -- cache = ULONG(cache_buf + next_offset); -- -- if (vt->flags & (PERCPU_KMALLOC_V1|PERCPU_KMALLOC_V2)) -- cache -= next_offset; +- return retval; + break; + } -+} + } -- } while (cache != cache_cache); -- FREEBUF(cache_buf); -- return NULL; --} +/* +* Added To Traverse the Nodelists +*/ -/* -- * Note same functionality as above, but instead it just -- * dumps all slab cache names and their addresses. +- * Check whether an address is a kmem_cache_t address, and if so, return +- * a pointer to the static buffer containing its name string. Otherwise +- * return NULL on failure. - */ - static void --kmem_cache_list(void) ++static void +do_slab_chain_percpu_v2_nodes(long cmd, struct meminfo *si) - { -- ulong cache, cache_cache, name; -- long next_offset, name_offset; -- char *cache_buf; -- char buf[BUFSIZE]; -- -- if (vt->flags & KMEM_CACHE_UNAVAIL) { -- error(INFO, "kmem cache slab subsystem not available\n"); -- return; -- } -- -- name_offset = vt->flags & (PERCPU_KMALLOC_V1|PERCPU_KMALLOC_V2) ? -- OFFSET(kmem_cache_s_name) : OFFSET(kmem_cache_s_c_name); -- next_offset = vt->flags & (PERCPU_KMALLOC_V1|PERCPU_KMALLOC_V2) ? -- OFFSET(kmem_cache_s_next) : OFFSET(kmem_cache_s_c_nextp); -- -- cache = cache_cache = symbol_value("cache_cache"); ++{ + int i, tmp, s; + int list_borked; + char *slab_buf; @@ -36035,35 +36518,27 @@ + ulong *start_address; + int index; -- cache_buf = GETBUF(SIZE(kmem_cache_s)); +-#define PERCPU_NOT_SUPPORTED "per-cpu slab format not supported yet\n" + list_borked = 0; + si->slabsize = (power(2, si->order) * PAGESIZE()); + si->cpucached_slab = 0; + start_address = (ulong *)GETBUF(sizeof(ulong) * vt->kmem_cache_len_nodes); -- do { -- readmem(cache, KVADDR, cache_buf, SIZE(kmem_cache_s), -- "kmem_cache_s buffer", FAULT_ON_ERROR); +-static char * +-is_kmem_cache_addr(ulong vaddr, char *kbuf) +-{ +- ulong cache, cache_cache, name; +- long next_offset, name_offset; +- char *cache_buf; + if (!readmem(si->cache+OFFSET(kmem_cache_s_lists), KVADDR, + &start_address[0], sizeof(ulong) * vt->kmem_cache_len_nodes, + "array nodelist array", RETURN_ON_ERROR)) + error(INFO, "cannot read kmem_cache nodelists array"); -- if (vt->kmem_cache_namelen) { -- BCOPY(cache_buf+name_offset, buf, -- vt->kmem_cache_namelen); -- } else { -- name = ULONG(cache_buf + name_offset); -- if (!read_string(name, buf, BUFSIZE-1)) { -- if (vt->flags & -- (PERCPU_KMALLOC_V1|PERCPU_KMALLOC_V2)) -- error(FATAL, -- "cannot read kmem_cache_s.name string at %lx\n", -- name); -- else -- error(FATAL, -- "cannot read kmem_cache_s.c_name string at %lx\n", -- name); +- if (vt->flags & KMEM_CACHE_UNAVAIL) { +- error(INFO, "kmem cache slab subsystem not available\n"); +- return NULL; +- } + switch (cmd) + { + case SLAB_GET_COUNTS: @@ -36084,10 +36559,12 @@ + fprintf(fp, "[ %s: %lx ", si->curname, si->cache); + fprintf(fp, "partial: %lx full: %lx free: %lx ]\n", + slab_chains[0], slab_chains[1], slab_chains[2]); - } -- } ++ } -- fprintf(fp, "%lx %s\n", cache, buf); +- name_offset = vt->flags & (PERCPU_KMALLOC_V1|PERCPU_KMALLOC_V2) ? +- OFFSET(kmem_cache_s_name) : OFFSET(kmem_cache_s_c_name); +- next_offset = vt->flags & (PERCPU_KMALLOC_V1|PERCPU_KMALLOC_V2) ? +- OFFSET(kmem_cache_s_next) : OFFSET(kmem_cache_s_c_nextp); + for (s = 0; s < SLAB_CHAINS; s++) { + if (!slab_chains[s]) + continue; @@ -36115,7 +36592,7 @@ + + last = slab_chains[s]; -- cache = ULONG(cache_buf + next_offset); +- cache = cache_cache = symbol_value("cache_cache"); + do { + if (received_SIGINT()) { + FREEBUF(slab_buf); @@ -36123,8 +36600,7 @@ + restart(0); + } -- if (vt->flags & (PERCPU_KMALLOC_V1|PERCPU_KMALLOC_V2)) -- cache -= next_offset; +- cache_buf = GETBUF(SIZE(kmem_cache_s)); + if (!verify_slab_v2(si, last, s)) { + list_borked = 1; + continue; @@ -36141,7 +36617,9 @@ + if (ACTIVE()) + gather_cpudata_list_v2_nodes(si, index); -- } while (cache != cache_cache); +- do { +- readmem(cache, KVADDR, cache_buf, SIZE(kmem_cache_s), +- "kmem_cache_s buffer", FAULT_ON_ERROR); + si->s_mem = ULONG(slab_buf + + OFFSET(slab_s_mem)); + gather_slab_cached_count(si); @@ -36152,8 +36630,26 @@ + OFFSET(slab_list)); + si->slab -= OFFSET(slab_list); -- FREEBUF(cache_buf); --} +- if (cache == vaddr) { +- if (vt->kmem_cache_namelen) { +- BCOPY(cache_buf+name_offset, kbuf, +- vt->kmem_cache_namelen); +- } else { +- name = ULONG(cache_buf + name_offset); +- if (!read_string(name, kbuf, BUFSIZE-1)) { +- if (vt->flags & +- (PERCPU_KMALLOC_V1|PERCPU_KMALLOC_V2)) +- error(FATAL, +- "cannot read kmem_cache_s.name string at %lx\n", +- name); +- else +- error(FATAL, +- "cannot read kmem_cache_s.c_name string at %lx\n", +- name); +- } +- } +- FREEBUF(cache_buf); +- return kbuf; + /* + * Check for slab transition. (Tony Dziedzic) + */ @@ -36170,29 +36666,15 @@ + + } while (si->slab != slab_chains[s] && !list_borked); + } -+ } + } --/* -- * Translate an address to its physical page number, verify that the -- * page in fact belongs to the slab subsystem, and if so, return the -- * name of the cache to which it belongs. -- */ --static char * --vaddr_to_kmem_cache(ulong vaddr, char *buf) --{ -- physaddr_t paddr; -- ulong page; -- ulong cache; +- cache = ULONG(cache_buf + next_offset); + if (!list_borked) + save_slab_data(si); + break; -- if (!kvtop(NULL, vaddr, &paddr, 0)) { -- error(WARNING, -- "cannot make virtual-to-physical translation: %lx\n", -- vaddr); -- return NULL; -- } +- if (vt->flags & (PERCPU_KMALLOC_V1|PERCPU_KMALLOC_V2)) +- cache -= next_offset; + case SLAB_WALKTHROUGH: + specified_slab = si->slab; + si->flags |= SLAB_WALKTHROUGH; @@ -36212,29 +36694,13 @@ + slab_chains[0], slab_chains[1], slab_chains[2]); + } -- if (!phys_to_page(paddr, &page)) { -- error(WARNING, "cannot find mem_map page for address: %lx\n", -- vaddr); -- return NULL; -- } +- } while (cache != cache_cache); + for (s = 0; s < SLAB_CHAINS; s++) { + if (!slab_chains[s]) + continue; -- if (VALID_MEMBER(page_next)) -- readmem(page+OFFSET(page_next), -- KVADDR, &cache, sizeof(void *), -- "page.next", FAULT_ON_ERROR); -- else if (VALID_MEMBER(page_list_next)) -- readmem(page+OFFSET(page_list_next), -- KVADDR, &cache, sizeof(void *), -- "page.list.next", FAULT_ON_ERROR); -- else if (VALID_MEMBER(page_lru)) -- readmem(page+OFFSET(page_lru)+OFFSET(list_head_next), -- KVADDR, &cache, sizeof(void *), -- "page.lru.next", FAULT_ON_ERROR); -- else -- error(FATAL, "cannot determine slab cache from page struct\n"); +- FREEBUF(cache_buf); +- return NULL; + if (!specified_slab) { + if (!readmem(slab_chains[s], + KVADDR, &si->slab, sizeof(ulong), @@ -36260,8 +36726,7 @@ + + si->s_mem = ULONG(slab_buf + + OFFSET(slab_s_mem)); - -- return(is_kmem_cache_addr(cache, buf)); ++ + if (CRASHDEBUG(1)) { + fprintf(fp, "search cache: [%s] ", si->curname); + if (si->flags & ADDRESS_SPECIFIED) @@ -36308,19 +36773,19 @@ } /* -- * Translate an address to its physical page number, verify that the -- * page in fact belongs to the slab subsystem, and if so, return the -- * address of the slab to which it belongs. +- * Note same functionality as above, but instead it just +- * dumps all slab cache names and their addresses. + * Try to preclude any attempt to translate a bogus slab structure. */ --static ulong --vaddr_to_slab(ulong vaddr) +-static void +-kmem_cache_list(void) +static int +verify_slab_v2(struct meminfo *si, ulong last, int s) { -- physaddr_t paddr; -- ulong page; -- ulong slab; +- ulong cache, cache_cache, name; +- long next_offset, name_offset; +- char *cache_buf; +- char buf[BUFSIZE]; + char slab_buf[BUFSIZE]; + struct kernel_list_head *list_head; + unsigned int inuse; @@ -36328,22 +36793,19 @@ + char *list; + int errcnt; -- if (!kvtop(NULL, vaddr, &paddr, 0)) { -- error(WARNING, -- "cannot make virtual-to-physical translation: %lx\n", -- vaddr); -- return 0; -- } +- if (vt->flags & KMEM_CACHE_UNAVAIL) { +- error(INFO, "kmem cache slab subsystem not available\n"); +- return; +- } + list = slab_chain_name_v2[s]; -- if (!phys_to_page(paddr, &page)) { -- error(WARNING, "cannot find mem_map page for address: %lx\n", -- vaddr); -- return 0; -- } +- name_offset = vt->flags & (PERCPU_KMALLOC_V1|PERCPU_KMALLOC_V2) ? +- OFFSET(kmem_cache_s_name) : OFFSET(kmem_cache_s_c_name); +- next_offset = vt->flags & (PERCPU_KMALLOC_V1|PERCPU_KMALLOC_V2) ? +- OFFSET(kmem_cache_s_next) : OFFSET(kmem_cache_s_c_nextp); + errcnt = 0; -- slab = 0; +- cache = cache_cache = symbol_value("cache_cache"); + if (!readmem(si->slab, KVADDR, slab_buf, + SIZE(slab), "slab buffer", QUIET|RETURN_ON_ERROR)) { + error(INFO, "%s: %s list: bad slab pointer: %lx\n", @@ -36351,6 +36813,235 @@ + return FALSE; + } +- cache_buf = GETBUF(SIZE(kmem_cache_s)); ++ list_head = (struct kernel_list_head *)(slab_buf + OFFSET(slab_list)); ++ if (!IS_KVADDR((ulong)list_head->next) || ++ !accessible((ulong)list_head->next)) { ++ error(INFO, "%s: %s list: slab: %lx bad next pointer: %lx\n", ++ si->curname, list, si->slab, ++ (ulong)list_head->next); ++ errcnt++; ++ } + +- do { +- readmem(cache, KVADDR, cache_buf, SIZE(kmem_cache_s), +- "kmem_cache_s buffer", FAULT_ON_ERROR); ++ if (last && (last != (ulong)list_head->prev)) { ++ error(INFO, "%s: %s list: slab: %lx bad prev pointer: %lx\n", ++ si->curname, list, si->slab, ++ (ulong)list_head->prev); ++ errcnt++; ++ } + +- if (vt->kmem_cache_namelen) { +- BCOPY(cache_buf+name_offset, buf, +- vt->kmem_cache_namelen); +- } else { +- name = ULONG(cache_buf + name_offset); +- if (!read_string(name, buf, BUFSIZE-1)) { +- if (vt->flags & +- (PERCPU_KMALLOC_V1|PERCPU_KMALLOC_V2)) +- error(FATAL, +- "cannot read kmem_cache_s.name string at %lx\n", +- name); +- else +- error(FATAL, +- "cannot read kmem_cache_s.c_name string at %lx\n", +- name); +- } +- } ++ inuse = UINT(slab_buf + OFFSET(slab_inuse)); ++ if (inuse > si->c_num) { ++ error(INFO, "%s: %s list: slab: %lx bad inuse counter: %ld\n", ++ si->curname, list, si->slab, inuse); ++ errcnt++; ++ } + +- fprintf(fp, "%lx %s\n", cache, buf); ++ if (!last) ++ goto no_inuse_check_v2; + +- cache = ULONG(cache_buf + next_offset); ++ switch (s) ++ { ++ case 0: /* partial */ ++ if ((inuse == 0) || (inuse == si->c_num)) { ++ error(INFO, ++ "%s: %s list: slab: %lx bad inuse counter: %ld\n", ++ si->curname, list, si->slab, inuse); ++ errcnt++; ++ } ++ break; + +- if (vt->flags & (PERCPU_KMALLOC_V1|PERCPU_KMALLOC_V2)) +- cache -= next_offset; ++ case 1: /* full */ ++ if (inuse != si->c_num) { ++ error(INFO, ++ "%s: %s list: slab: %lx bad inuse counter: %ld\n", ++ si->curname, list, si->slab, inuse); ++ errcnt++; ++ } ++ break; + +- } while (cache != cache_cache); ++ case 2: /* free */ ++ if (inuse > 0) { ++ error(INFO, ++ "%s: %s list: slab: %lx bad inuse counter: %ld\n", ++ si->curname, list, si->slab, inuse); ++ errcnt++; ++ } ++ break; ++ } + +- FREEBUF(cache_buf); ++no_inuse_check_v2: ++ s_mem = ULONG(slab_buf + OFFSET(slab_s_mem)); ++ if (!IS_KVADDR(s_mem) || !accessible(s_mem)) { ++ error(INFO, "%s: %s list: slab: %lx bad s_mem pointer: %lx\n", ++ si->curname, list, si->slab, s_mem); ++ errcnt++; ++ } ++ ++ return(errcnt ? FALSE : TRUE); + } + + /* +- * Translate an address to its physical page number, verify that the +- * page in fact belongs to the slab subsystem, and if so, return the +- * name of the cache to which it belongs. ++ * If it's a dumpfile, save the essential slab data to avoid re-reading ++ * the whole slab chain more than once. This may seem like overkill, but ++ * if the problem is a memory leak, or just the over-use of the buffer_head ++ * cache, it's painful to wait each time subsequent kmem -s or -i commands ++ * simply need the basic slab counts. + */ +-static char * +-vaddr_to_kmem_cache(ulong vaddr, char *buf) ++struct slab_data { ++ ulong cache_addr; ++ int num_slabs; ++ int inuse; ++ ulong cpucached_cache; ++}; ++ ++#define NO_SLAB_DATA ((void *)(-1)) ++ ++static void ++save_slab_data(struct meminfo *si) + { +- physaddr_t paddr; +- ulong page; +- ulong cache; ++ int i; + +- if (!kvtop(NULL, vaddr, &paddr, 0)) { +- error(WARNING, +- "cannot make virtual-to-physical translation: %lx\n", +- vaddr); +- return NULL; ++ if (si->flags & SLAB_DATA_NOSAVE) { ++ si->flags &= ~SLAB_DATA_NOSAVE; ++ return; + } + +- if (!phys_to_page(paddr, &page)) { +- error(WARNING, "cannot find mem_map page for address: %lx\n", +- vaddr); +- return NULL; ++ if (ACTIVE()) ++ return; ++ ++ if (vt->slab_data == NO_SLAB_DATA) ++ return; ++ ++ if (!vt->slab_data) { ++ if (!(vt->slab_data = (struct slab_data *) ++ malloc(sizeof(struct slab_data) * vt->kmem_cache_count))) { ++ error(INFO, "cannot malloc slab_data table"); ++ vt->slab_data = NO_SLAB_DATA; ++ return; ++ } ++ for (i = 0; i < vt->kmem_cache_count; i++) { ++ vt->slab_data[i].cache_addr = (ulong)NO_SLAB_DATA; ++ vt->slab_data[i].num_slabs = 0; ++ vt->slab_data[i].inuse = 0; ++ vt->slab_data[i].cpucached_cache = 0; ++ } + } + +- if (VALID_MEMBER(page_next)) +- readmem(page+OFFSET(page_next), +- KVADDR, &cache, sizeof(void *), +- "page.next", FAULT_ON_ERROR); +- else if (VALID_MEMBER(page_list_next)) +- readmem(page+OFFSET(page_list_next), +- KVADDR, &cache, sizeof(void *), +- "page.list.next", FAULT_ON_ERROR); +- else if (VALID_MEMBER(page_lru)) +- readmem(page+OFFSET(page_lru)+OFFSET(list_head_next), +- KVADDR, &cache, sizeof(void *), +- "page.lru.next", FAULT_ON_ERROR); +- else +- error(FATAL, "cannot determine slab cache from page struct\n"); ++ for (i = 0; i < vt->kmem_cache_count; i++) { ++ if (vt->slab_data[i].cache_addr == si->cache) ++ break; + +- return(is_kmem_cache_addr(cache, buf)); ++ if (vt->slab_data[i].cache_addr == (ulong)NO_SLAB_DATA) { ++ vt->slab_data[i].cache_addr = si->cache; ++ vt->slab_data[i].num_slabs = si->num_slabs; ++ vt->slab_data[i].inuse = si->inuse; ++ vt->slab_data[i].cpucached_cache = si->cpucached_cache; ++ break; ++ } ++ } + } + +-/* +- * Translate an address to its physical page number, verify that the +- * page in fact belongs to the slab subsystem, and if so, return the +- * address of the slab to which it belongs. +- */ +-static ulong +-vaddr_to_slab(ulong vaddr) ++static int ++slab_data_saved(struct meminfo *si) + { +- physaddr_t paddr; +- ulong page; +- ulong slab; ++ int i; + +- if (!kvtop(NULL, vaddr, &paddr, 0)) { +- error(WARNING, +- "cannot make virtual-to-physical translation: %lx\n", +- vaddr); +- return 0; +- } ++ if (ACTIVE() || !vt->slab_data || (vt->slab_data == NO_SLAB_DATA)) ++ return FALSE; + +- if (!phys_to_page(paddr, &page)) { +- error(WARNING, "cannot find mem_map page for address: %lx\n", +- vaddr); +- return 0; +- } ++ for (i = 0; i < vt->kmem_cache_count; i++) { ++ if (vt->slab_data[i].cache_addr == si->cache) { ++ si->inuse = vt->slab_data[i].inuse; ++ si->num_slabs = vt->slab_data[i].num_slabs; ++ si->cpucached_cache = vt->slab_data[i].cpucached_cache; ++ return TRUE; ++ } ++ } + +- slab = 0; ++ return FALSE; ++} + - if (VALID_MEMBER(page_prev)) - readmem(page+OFFSET(page_prev), - KVADDR, &slab, sizeof(void *), @@ -36365,296 +37056,54 @@ - "page.lru.prev", FAULT_ON_ERROR); - else - error(FATAL, "unknown definition of struct page?\n"); -+ list_head = (struct kernel_list_head *)(slab_buf + OFFSET(slab_list)); -+ if (!IS_KVADDR((ulong)list_head->next) || -+ !accessible((ulong)list_head->next)) { -+ error(INFO, "%s: %s list: slab: %lx bad next pointer: %lx\n", -+ si->curname, list, si->slab, -+ (ulong)list_head->next); -+ errcnt++; -+ } - -- return slab; --} -+ if (last && (last != (ulong)list_head->prev)) { -+ error(INFO, "%s: %s list: slab: %lx bad prev pointer: %lx\n", -+ si->curname, list, si->slab, -+ (ulong)list_head->prev); -+ errcnt++; -+ } - -+ inuse = UINT(slab_buf + OFFSET(slab_inuse)); -+ if (inuse > si->c_num) { -+ error(INFO, "%s: %s list: slab: %lx bad inuse counter: %ld\n", -+ si->curname, list, si->slab, inuse); -+ errcnt++; -+ } - --/* -- * Initialize any data required for scouring the kmalloc subsystem more -- * efficiently. -- */ --char slab_hdr[BUFSIZE] = { 0 }; --char kmem_cache_hdr[BUFSIZE] = { 0 }; --char free_inuse_hdr[BUFSIZE] = { 0 }; -+ if (!last) -+ goto no_inuse_check_v2; - --static void --kmem_cache_init(void) --{ -- ulong cache, cache_end, max_cnum, max_limit, max_cpus, tmp, tmp2; -- long cache_count, num_offset, next_offset; -- char *cache_buf; -+ switch (s) -+ { -+ case 0: /* partial */ -+ if ((inuse == 0) || (inuse == si->c_num)) { -+ error(INFO, -+ "%s: %s list: slab: %lx bad inuse counter: %ld\n", -+ si->curname, list, si->slab, inuse); -+ errcnt++; -+ } -+ break; - -- if (vt->flags & KMEM_CACHE_UNAVAIL) -- return; -+ case 1: /* full */ -+ if (inuse != si->c_num) { -+ error(INFO, -+ "%s: %s list: slab: %lx bad inuse counter: %ld\n", -+ si->curname, list, si->slab, inuse); -+ errcnt++; -+ } -+ break; - -- if (DUMPFILE() && (vt->flags & KMEM_CACHE_INIT)) -- return; -+ case 2: /* free */ -+ if (inuse > 0) { -+ error(INFO, -+ "%s: %s list: slab: %lx bad inuse counter: %ld\n", -+ si->curname, list, si->slab, inuse); -+ errcnt++; -+ } -+ break; -+ } - -- if (!strlen(slab_hdr)) -- sprintf(slab_hdr, -- "SLAB%sMEMORY%sTOTAL ALLOCATED FREE\n", -- space(VADDR_PRLEN > 8 ? 14 : 6), -- space(VADDR_PRLEN > 8 ? 12 : 4)); -+no_inuse_check_v2: -+ s_mem = ULONG(slab_buf + OFFSET(slab_s_mem)); -+ if (!IS_KVADDR(s_mem) || !accessible(s_mem)) { -+ error(INFO, "%s: %s list: slab: %lx bad s_mem pointer: %lx\n", -+ si->curname, list, si->slab, s_mem); -+ errcnt++; -+ } - -- if (!strlen(kmem_cache_hdr)) -- sprintf(kmem_cache_hdr, -- "CACHE%sNAME OBJSIZE ALLOCATED TOTAL SLABS SSIZE\n", -- space(VADDR_PRLEN > 8 ? 12 : 4)); -+ return(errcnt ? FALSE : TRUE); -+} - -- if (!strlen(free_inuse_hdr)) -- sprintf(free_inuse_hdr, "FREE / [ALLOCATED]\n"); -+/* -+ * If it's a dumpfile, save the essential slab data to avoid re-reading -+ * the whole slab chain more than once. This may seem like overkill, but -+ * if the problem is a memory leak, or just the over-use of the buffer_head -+ * cache, it's painful to wait each time subsequent kmem -s or -i commands -+ * simply need the basic slab counts. -+ */ -+struct slab_data { -+ ulong cache_addr; -+ int num_slabs; -+ int inuse; -+ ulong cpucached_cache; -+}; - -- num_offset = vt->flags & (PERCPU_KMALLOC_V1|PERCPU_KMALLOC_V2) ? -- OFFSET(kmem_cache_s_num) : OFFSET(kmem_cache_s_c_num); -- next_offset = vt->flags & (PERCPU_KMALLOC_V1|PERCPU_KMALLOC_V2) ? -- OFFSET(kmem_cache_s_next) : OFFSET(kmem_cache_s_c_nextp); -- max_cnum = max_limit = max_cpus = cache_count = 0; -+#define NO_SLAB_DATA ((void *)(-1)) - -- /* -- * Pre-2.6 versions used the "cache_cache" as the head of the -- * slab chain list. 2.6 uses the "cache_chain" list_head. -- */ -- if (vt->flags & PERCPU_KMALLOC_V2) { -- get_symbol_data("cache_chain", sizeof(ulong), &cache); -- cache -= next_offset; -- cache_end = symbol_value("cache_chain"); -- } else -- cache = cache_end = symbol_value("cache_cache"); -+static void -+save_slab_data(struct meminfo *si) -+{ -+ int i; - -- cache_buf = GETBUF(SIZE(kmem_cache_s)); -+ if (si->flags & SLAB_DATA_NOSAVE) { -+ si->flags &= ~SLAB_DATA_NOSAVE; -+ return; -+ } - -- do { -- cache_count++; -+ if (ACTIVE()) -+ return; - -- if (!readmem(cache, KVADDR, cache_buf, SIZE(kmem_cache_s), -- "kmem_cache_s buffer", RETURN_ON_ERROR)) { -- vt->flags |= KMEM_CACHE_UNAVAIL; -- error(INFO, -- "unable to initialize kmem slab cache subsystem\n\n"); -+ if (vt->slab_data == NO_SLAB_DATA) -+ return; -+ -+ if (!vt->slab_data) { -+ if (!(vt->slab_data = (struct slab_data *) -+ malloc(sizeof(struct slab_data) * vt->kmem_cache_count))) { -+ error(INFO, "cannot malloc slab_data table"); -+ vt->slab_data = NO_SLAB_DATA; - return; - } -+ for (i = 0; i < vt->kmem_cache_count; i++) { -+ vt->slab_data[i].cache_addr = (ulong)NO_SLAB_DATA; -+ vt->slab_data[i].num_slabs = 0; -+ vt->slab_data[i].inuse = 0; -+ vt->slab_data[i].cpucached_cache = 0; -+ } -+ } - -- tmp = (ulong)(UINT(cache_buf + num_offset)); -- -- if (tmp > max_cnum) -- max_cnum = tmp; -+ for (i = 0; i < vt->kmem_cache_count; i++) { -+ if (vt->slab_data[i].cache_addr == si->cache) -+ break; - -- if ((tmp = max_cpudata_limit(cache, &tmp2)) > max_limit) -- max_limit = tmp; -+ if (vt->slab_data[i].cache_addr == (ulong)NO_SLAB_DATA) { -+ vt->slab_data[i].cache_addr = si->cache; -+ vt->slab_data[i].num_slabs = si->num_slabs; -+ vt->slab_data[i].inuse = si->inuse; -+ vt->slab_data[i].cpucached_cache = si->cpucached_cache; -+ break; -+ } -+ } -+} - -- if (tmp2 > max_cpus) -- max_cpus = tmp2; -+static int -+slab_data_saved(struct meminfo *si) -+{ -+ int i; - -- cache = ULONG(cache_buf + next_offset); -+ if (ACTIVE() || !vt->slab_data || (vt->slab_data == NO_SLAB_DATA)) -+ return FALSE; - -- switch (vt->flags & (PERCPU_KMALLOC_V1|PERCPU_KMALLOC_V2)) -- { -- case PERCPU_KMALLOC_V1: -- cache -= next_offset; -- break; -- case PERCPU_KMALLOC_V2: -- if (cache != cache_end) -- cache -= next_offset; -- break; -+ for (i = 0; i < vt->kmem_cache_count; i++) { -+ if (vt->slab_data[i].cache_addr == si->cache) { -+ si->inuse = vt->slab_data[i].inuse; -+ si->num_slabs = vt->slab_data[i].num_slabs; -+ si->cpucached_cache = vt->slab_data[i].cpucached_cache; -+ return TRUE; - } -+ } - -- } while (cache != cache_end); -+ return FALSE; -+} - -- FREEBUF(cache_buf); +static void +dump_saved_slab_data(void) +{ + int i; -- vt->kmem_max_c_num = max_cnum; -- vt->kmem_max_limit = max_limit; -- vt->kmem_max_cpus = max_cpus; -- vt->kmem_cache_count = cache_count; +- return slab; + if (!vt->slab_data || (vt->slab_data == NO_SLAB_DATA)) + return; - -- if (CRASHDEBUG(2)) { -- fprintf(fp, "kmem_cache_init:\n"); -- fprintf(fp, " kmem_max_c_num: %ld\n", vt->kmem_max_c_num); -- fprintf(fp, " kmem_max_limit: %ld\n", vt->kmem_max_limit); -- fprintf(fp, " kmem_max_cpus: %ld\n", vt->kmem_max_cpus); -- fprintf(fp, " kmem_cache_count: %ld\n", vt->kmem_cache_count); -- } ++ + for (i = 0; i < vt->kmem_cache_count; i++) { + if (vt->slab_data[i].cache_addr == (ulong)NO_SLAB_DATA) + break; - -- if (!(vt->flags & KMEM_CACHE_INIT)) { -- if (vt->flags & PERCPU_KMALLOC_V1) -- ARRAY_LENGTH_INIT(vt->kmem_cache_namelen, -- kmem_cache_s_name, "kmem_cache_s.name", -- NULL, sizeof(char)); -- else if (vt->flags & PERCPU_KMALLOC_V2) -- vt->kmem_cache_namelen = 0; -- else -- ARRAY_LENGTH_INIT(vt->kmem_cache_namelen, -- kmem_cache_s_c_name, "kmem_cache_s.c_name", -- NULL, 0); ++ + fprintf(fp, + " cache: %lx inuse: %5d num_slabs: %3d cpucached_cache: %ld\n", + vt->slab_data[i].cache_addr, + vt->slab_data[i].inuse, + vt->slab_data[i].num_slabs, + vt->slab_data[i].cpucached_cache); - } -- -- vt->flags |= KMEM_CACHE_INIT; ++ } } +- /* -- * Determine the largest cpudata limit for a given cache. +- * Initialize any data required for scouring the kmalloc subsystem more +- * efficiently. + * Dump the contents of a kmem slab. */ --static ulong --max_cpudata_limit(ulong cache, ulong *cpus) -+ -+static void +-char slab_hdr[BUFSIZE] = { 0 }; +-char kmem_cache_hdr[BUFSIZE] = { 0 }; +-char free_inuse_hdr[BUFSIZE] = { 0 }; + + static void +-kmem_cache_init(void) +dump_slab(struct meminfo *si) { -- int i; -- ulong cpudata[NR_CPUS]; -- int limit; -- ulong max_limit; +- ulong cache, cache_end, max_cnum, max_limit, max_cpus, tmp, tmp2; +- long cache_count, num_offset, next_offset; +- char *cache_buf; + uint16_t s_offset; -- if (vt->flags & PERCPU_KMALLOC_V2) -- goto kmem_cache_s_array; +- if (vt->flags & KMEM_CACHE_UNAVAIL) +- return; + si->s_mem = ULONG(si->slab_buf + OFFSET(kmem_slab_s_s_mem)); + si->s_mem = PTOB(BTOP(si->s_mem)); -- if (INVALID_MEMBER(kmem_cache_s_cpudata)) { -- *cpus = 0; -- return 0; +- if (DUMPFILE() && (vt->flags & KMEM_CACHE_INIT)) +- return; + if (si->flags & ADDRESS_SPECIFIED) { + if (INSLAB(si->slab, si) && (si->spec_addr >= si->slab) && + (si->spec_addr < (si->slab+SIZE(kmem_slab_s)))) { @@ -36665,64 +37114,64 @@ + si->found = KMEM_ON_SLAB; /* But don't return yet... */ + else + return; - } - -- readmem(cache+OFFSET(kmem_cache_s_cpudata), -- KVADDR, &cpudata[0], -- sizeof(ulong) * ARRAY_LENGTH(kmem_cache_s_cpudata), -- "cpudata array", FAULT_ON_ERROR); ++ } ++ + si->s_freep = VOID_PTR(si->slab_buf + OFFSET(kmem_slab_s_s_freep)); + si->s_inuse = ULONG(si->slab_buf + OFFSET(kmem_slab_s_s_inuse)); + si->s_index = ULONG_PTR(si->slab_buf + OFFSET(kmem_slab_s_s_index)); + s_offset = USHORT(si->slab_buf + OFFSET(kmem_slab_s_s_offset)); -- for (i = max_limit = 0; (i < ARRAY_LENGTH(kmem_cache_s_cpudata)) && -- cpudata[i]; i++) { -- readmem(cpudata[i]+OFFSET(cpucache_s_limit), -- KVADDR, &limit, sizeof(int), -- "cpucache limit", FAULT_ON_ERROR); -- if (limit > max_limit) -- max_limit = limit; +- if (!strlen(slab_hdr)) +- sprintf(slab_hdr, +- "SLAB%sMEMORY%sTOTAL ALLOCATED FREE\n", +- space(VADDR_PRLEN > 8 ? 14 : 6), +- space(VADDR_PRLEN > 8 ? 12 : 4)); + if (!(si->flags & ADDRESS_SPECIFIED)) { + fprintf(fp, slab_hdr); + DUMP_SLAB_INFO(); - } ++ } -- *cpus = i; +- if (!strlen(kmem_cache_hdr)) +- sprintf(kmem_cache_hdr, +- "CACHE%sNAME OBJSIZE ALLOCATED TOTAL SLABS SSIZE\n", +- space(VADDR_PRLEN > 8 ? 12 : 4)); + dump_slab_objects(si); +} -- return max_limit; +- if (!strlen(free_inuse_hdr)) +- sprintf(free_inuse_hdr, "FREE / [ALLOCATED]\n"); +/* + * dump_slab() adapted for newer percpu slab format. + */ --kmem_cache_s_array: +- num_offset = vt->flags & (PERCPU_KMALLOC_V1|PERCPU_KMALLOC_V2) ? +- OFFSET(kmem_cache_s_num) : OFFSET(kmem_cache_s_c_num); +- next_offset = vt->flags & (PERCPU_KMALLOC_V1|PERCPU_KMALLOC_V2) ? +- OFFSET(kmem_cache_s_next) : OFFSET(kmem_cache_s_c_nextp); +- max_cnum = max_limit = max_cpus = cache_count = 0; +static void +dump_slab_percpu_v1(struct meminfo *si) +{ + int tmp; - -- readmem(cache+OFFSET(kmem_cache_s_array), -- KVADDR, &cpudata[0], -- sizeof(ulong) * ARRAY_LENGTH(kmem_cache_s_array), -- "array cache array", FAULT_ON_ERROR); ++ + readmem(si->slab+OFFSET(slab_s_s_mem), + KVADDR, &si->s_mem, sizeof(ulong), + "s_mem", FAULT_ON_ERROR); -- for (i = max_limit = 0; (i < ARRAY_LENGTH(kmem_cache_s_array)) && -- cpudata[i]; i++) { -- readmem(cpudata[i]+OFFSET(array_cache_limit), -- KVADDR, &limit, sizeof(int), -- "array cache limit", FAULT_ON_ERROR); -- if (limit > max_limit) -- max_limit = limit; -+ /* + /* +- * Pre-2.6 versions used the "cache_cache" as the head of the +- * slab chain list. 2.6 uses the "cache_chain" list_head. + * Include the array of kmem_bufctl_t's appended to slab. -+ */ + */ +- if (vt->flags & PERCPU_KMALLOC_V2) { +- get_symbol_data("cache_chain", sizeof(ulong), &cache); +- cache -= next_offset; +- cache_end = symbol_value("cache_chain"); +- } else +- cache = cache_end = symbol_value("cache_cache"); + tmp = SIZE(slab_s) + (SIZE(kmem_bufctl_t) * si->c_num); -+ + +- cache_buf = GETBUF(SIZE(kmem_cache_s)); + if (si->flags & ADDRESS_SPECIFIED) { + if (INSLAB_PERCPU(si->slab, si) && + (si->spec_addr >= si->slab) && @@ -36735,66 +37184,75 @@ + si->found = KMEM_ON_SLAB; /* But don't return yet... */ + else + return; - } ++ } -- *cpus = i; -- return max_limit; +- do { +- cache_count++; + readmem(si->slab+OFFSET(slab_s_inuse), + KVADDR, &tmp, sizeof(int), + "inuse", FAULT_ON_ERROR); + si->s_inuse = tmp; -+ + +- if (!readmem(cache, KVADDR, cache_buf, SIZE(kmem_cache_s), +- "kmem_cache_s buffer", RETURN_ON_ERROR)) { +- vt->flags |= KMEM_CACHE_UNAVAIL; +- error(INFO, +- "unable to initialize kmem slab cache subsystem\n\n"); +- return; +- } + readmem(si->slab+OFFSET(slab_s_free), + KVADDR, &si->free, SIZE(kmem_bufctl_t), + "kmem_bufctl_t", FAULT_ON_ERROR); -+ + +- tmp = (ulong)(UINT(cache_buf + num_offset)); + gather_slab_free_list_percpu(si); + gather_slab_cached_count(si); -+ + +- if (tmp > max_cnum) +- max_cnum = tmp; + if (!(si->flags & ADDRESS_SPECIFIED)) { + fprintf(fp, slab_hdr); + DUMP_SLAB_INFO(); + } -+ -+ dump_slab_objects_percpu(si); - } -+ - /* -- * Determine whether the current slab cache is contained in -- * the comma-separated list from a "kmem -I list1,list2 ..." -- * command entry. +- if ((tmp = max_cpudata_limit(cache, &tmp2)) > max_limit) +- max_limit = tmp; ++ dump_slab_objects_percpu(si); ++} + +- if (tmp2 > max_cpus) +- max_cpus = tmp2; + +- cache = ULONG(cache_buf + next_offset); ++/* + * Updated for 2.6 slab substructure. - */ --static int --ignore_cache(struct meminfo *si, char *name) ++ */ +static void +dump_slab_percpu_v2(struct meminfo *si) - { -- int i, argc; -- char *p1; -- char *arglist[MAXARGS]; -- char buf[BUFSIZE]; ++{ + int tmp; -- if (!si->ignore) -- return FALSE; +- switch (vt->flags & (PERCPU_KMALLOC_V1|PERCPU_KMALLOC_V2)) +- { +- case PERCPU_KMALLOC_V1: +- cache -= next_offset; +- break; +- case PERCPU_KMALLOC_V2: +- if (cache != cache_end) +- cache -= next_offset; +- break; +- } + readmem(si->slab+OFFSET(slab_s_mem), + KVADDR, &si->s_mem, sizeof(ulong), + "s_mem", FAULT_ON_ERROR); -- strcpy(buf, si->ignore); +- } while (cache != cache_end); + /* + * Include the array of kmem_bufctl_t's appended to slab. + */ + tmp = SIZE(slab) + (SIZE(kmem_bufctl_t) * si->c_num); -- p1 = buf; -- while (*p1) { -- if (*p1 == ',') -- *p1 = ' '; -- p1++; -- } +- FREEBUF(cache_buf); + if (si->flags & ADDRESS_SPECIFIED) { + if (INSLAB_PERCPU(si->slab, si) && + (si->spec_addr >= si->slab) && @@ -36809,19 +37267,37 @@ + return; + } -- argc = parse_line(buf, arglist); +- vt->kmem_max_c_num = max_cnum; +- vt->kmem_max_limit = max_limit; +- vt->kmem_max_cpus = max_cpus; +- vt->kmem_cache_count = cache_count; + readmem(si->slab+OFFSET(slab_inuse), + KVADDR, &tmp, sizeof(int), + "inuse", FAULT_ON_ERROR); + si->s_inuse = tmp; -- for (i = 0; i < argc; i++) { -- if (STREQ(name, arglist[i])) -- return TRUE; +- if (CRASHDEBUG(2)) { +- fprintf(fp, "kmem_cache_init:\n"); +- fprintf(fp, " kmem_max_c_num: %ld\n", vt->kmem_max_c_num); +- fprintf(fp, " kmem_max_limit: %ld\n", vt->kmem_max_limit); +- fprintf(fp, " kmem_max_cpus: %ld\n", vt->kmem_max_cpus); +- fprintf(fp, " kmem_cache_count: %ld\n", vt->kmem_cache_count); +- } + readmem(si->slab+OFFSET(slab_free), + KVADDR, &si->free, SIZE(kmem_bufctl_t), + "kmem_bufctl_t", FAULT_ON_ERROR); -+ + +- if (!(vt->flags & KMEM_CACHE_INIT)) { +- if (vt->flags & PERCPU_KMALLOC_V1) +- ARRAY_LENGTH_INIT(vt->kmem_cache_namelen, +- kmem_cache_s_name, "kmem_cache_s.name", +- NULL, sizeof(char)); +- else if (vt->flags & PERCPU_KMALLOC_V2) +- vt->kmem_cache_namelen = 0; +- else +- ARRAY_LENGTH_INIT(vt->kmem_cache_namelen, +- kmem_cache_s_c_name, "kmem_cache_s.c_name", +- NULL, 0); + gather_slab_free_list_percpu(si); + gather_slab_cached_count(si); + @@ -36830,52 +37306,71 @@ + DUMP_SLAB_INFO(); } -- return FALSE; +- vt->flags |= KMEM_CACHE_INIT; + dump_slab_objects_percpu(si); } - ++ + /* -- * dump_kmem_cache() displays basic information about kmalloc() slabs. -- * At this point, only kmem_cache_s structure data for each slab is dumped. -- * -- * TBD: Given a specified physical address, and determine which slab it came -- * from, and whether it's in use or not. +- * Determine the largest cpudata limit for a given cache. + * Gather the free objects in a slab into the si->addrlist, checking for + * specified addresses that are in-slab kmem_bufctls, and making error checks + * along the way. Object address checks are deferred to dump_slab_objects(). */ +-static ulong +-max_cpudata_limit(ulong cache, ulong *cpus) +-{ +- int i; +- ulong cpudata[NR_CPUS]; +- int limit; +- ulong max_limit; --#define SLAB_C_MAGIC 0x4F17A36DUL --#define SLAB_MAGIC_ALLOC 0xA5C32F2BUL /* slab is alive */ --#define SLAB_MAGIC_DESTROYED 0xB2F23C5AUL /* slab has been destroyed */ +- if (vt->flags & PERCPU_KMALLOC_V2) +- goto kmem_cache_s_array; +- +- if (INVALID_MEMBER(kmem_cache_s_cpudata)) { +- *cpus = 0; +- return 0; +- } +#define INOBJECT(addr, obj) ((addr >= obj) && (addr < (obj+si->size))) --#define SLAB_CFLGS_BUFCTL 0x020000UL /* bufctls in own cache */ +- readmem(cache+OFFSET(kmem_cache_s_cpudata), +- KVADDR, &cpudata[0], +- sizeof(ulong) * ARRAY_LENGTH(kmem_cache_s_cpudata), +- "cpudata array", FAULT_ON_ERROR); +static void +gather_slab_free_list(struct meminfo *si) +{ + ulong *next, obj; + ulong expected, cnt; --#define KMEM_SLAB_ADDR (1) --#define KMEM_BUFCTL_ADDR (2) --#define KMEM_OBJECT_ADDR_FREE (3) --#define KMEM_OBJECT_ADDR_INUSE (4) --#define KMEM_OBJECT_ADDR_CACHED (5) --#define KMEM_ON_SLAB (6) +- for (i = max_limit = 0; (i < ARRAY_LENGTH(kmem_cache_s_cpudata)) && +- cpudata[i]; i++) { +- readmem(cpudata[i]+OFFSET(cpucache_s_limit), +- KVADDR, &limit, sizeof(int), +- "cpucache limit", FAULT_ON_ERROR); +- if (limit > max_limit) +- max_limit = limit; +- } + BNEG(si->addrlist, sizeof(ulong) * (si->c_num+1)); -+ + +- *cpus = i; + if (!si->s_freep) + return; -+ + +- return max_limit; + cnt = 0; + expected = si->c_num - si->s_inuse; -+ + +-kmem_cache_s_array: + next = si->s_freep; + do { -+ + +- readmem(cache+OFFSET(kmem_cache_s_array), +- KVADDR, &cpudata[0], +- sizeof(ulong) * ARRAY_LENGTH(kmem_cache_s_array), +- "array cache array", FAULT_ON_ERROR); + if (cnt == si->c_num) { + error(INFO, + "\"%s\" cache: too many objects found in slab free list\n", @@ -36883,7 +37378,15 @@ + si->errors++; + return; + } -+ + +- for (i = max_limit = 0; (i < ARRAY_LENGTH(kmem_cache_s_array)) && +- cpudata[i]; i++) { +- readmem(cpudata[i]+OFFSET(array_cache_limit), +- KVADDR, &limit, sizeof(int), +- "array cache limit", FAULT_ON_ERROR); +- if (limit > max_limit) +- max_limit = limit; +- } + /* + * Off-slab kmem_bufctls are contained in arrays of object + * pointers that point to: @@ -36895,12 +37398,108 @@ + * 1. next kmem_bufctl (or NULL) if object is free. + * 2. the containing slab if the object is in use. + */ -+ + +- *cpus = i; +- return max_limit; +-} + if (si->c_flags & SLAB_CFLGS_BUFCTL) + obj = si->s_mem + ((next - si->s_index) * si->c_offset); + else + obj = (ulong)next - si->c_offset; +-/* +- * Determine whether the current slab cache is contained in +- * the comma-separated list from a "kmem -I list1,list2 ..." +- * command entry. +- */ +-static int +-ignore_cache(struct meminfo *si, char *name) +-{ +- int i, argc; +- char *p1; +- char *arglist[MAXARGS]; +- char buf[BUFSIZE]; ++ si->addrlist[cnt] = obj; + +- if (!si->ignore) +- return FALSE; ++ if (si->flags & ADDRESS_SPECIFIED) { ++ if (INSLAB(next, si) && ++ (si->spec_addr >= (ulong)next) && ++ (si->spec_addr < (ulong)(next + 1))) { ++ si->found = KMEM_BUFCTL_ADDR; ++ return; ++ } ++ } + +- strcpy(buf, si->ignore); ++ cnt++; + +- p1 = buf; +- while (*p1) { +- if (*p1 == ',') +- *p1 = ' '; +- p1++; +- } ++ if (!INSLAB(obj, si)) { ++ error(INFO, ++ "\"%s\" cache: address not contained within slab: %lx\n", ++ si->curname, obj); ++ si->errors++; ++ } + +- argc = parse_line(buf, arglist); ++ readmem((ulong)next, KVADDR, &next, sizeof(void *), ++ "s_freep chain entry", FAULT_ON_ERROR); ++ } while (next); + +- for (i = 0; i < argc; i++) { +- if (STREQ(name, arglist[i])) +- return TRUE; ++ if (cnt != expected) { ++ error(INFO, ++ "\"%s\" cache: free object mismatch: expected: %ld found: %ld\n", ++ si->curname, expected, cnt); ++ si->errors++; + } +- +- return FALSE; + } + + + /* +- * dump_kmem_cache() displays basic information about kmalloc() slabs. +- * At this point, only kmem_cache_s structure data for each slab is dumped. +- * +- * TBD: Given a specified physical address, and determine which slab it came +- * from, and whether it's in use or not. ++ * gather_slab_free_list() adapted for newer percpu slab format. + */ + +-#define SLAB_C_MAGIC 0x4F17A36DUL +-#define SLAB_MAGIC_ALLOC 0xA5C32F2BUL /* slab is alive */ +-#define SLAB_MAGIC_DESTROYED 0xB2F23C5AUL /* slab has been destroyed */ ++#define BUFCTL_END 0xffffFFFF + +-#define SLAB_CFLGS_BUFCTL 0x020000UL /* bufctls in own cache */ ++static void ++gather_slab_free_list_percpu(struct meminfo *si) ++{ ++ int i; ++ ulong obj; ++ ulong expected, cnt; ++ int free_index; ++ ulong kmembp; ++ short *kbp; + +-#define KMEM_SLAB_ADDR (1) +-#define KMEM_BUFCTL_ADDR (2) +-#define KMEM_OBJECT_ADDR_FREE (3) +-#define KMEM_OBJECT_ADDR_INUSE (4) +-#define KMEM_OBJECT_ADDR_CACHED (5) +-#define KMEM_ON_SLAB (6) ++ BNEG(si->addrlist, sizeof(ulong) * (si->c_num+1)); + -#define DUMP_KMEM_CACHE_INFO_V1() \ - { \ - char b1[BUFSIZE]; \ @@ -36913,17 +37512,13 @@ - si->inuse, si->num_slabs * si->c_num, \ - si->num_slabs, si->slabsize/1024); \ - } -+ si->addrlist[cnt] = obj; ++ if (CRASHDEBUG(1)) ++ fprintf(fp, "slab: %lx si->s_inuse: %ld si->c_num: %ld\n", ++ si->slab, si->s_inuse, si->c_num); -#define DUMP_KMEM_CACHE_INFO_V2() dump_kmem_cache_info_v2(si) -+ if (si->flags & ADDRESS_SPECIFIED) { -+ if (INSLAB(next, si) && -+ (si->spec_addr >= (ulong)next) && -+ (si->spec_addr < (ulong)(next + 1))) { -+ si->found = KMEM_BUFCTL_ADDR; -+ return; -+ } -+ } ++ if (si->s_inuse == si->c_num ) ++ return; -static void -dump_kmem_cache_info_v2(struct meminfo *si) @@ -36931,24 +37526,26 @@ - char b1[BUFSIZE]; - char b2[BUFSIZE]; - int namelen, sizelen, spacelen; -+ cnt++; ++ kmembp = si->slab + SIZE_OPTION(slab_s, slab); ++ readmem((ulong)kmembp, KVADDR, si->kmem_bufctl, ++ SIZE(kmem_bufctl_t) * si->c_num, ++ "kmem_bufctl array", FAULT_ON_ERROR); - fprintf(fp, "%s ", - mkstring(b1, VADDR_PRLEN, LJUST|LONG_HEX, MKSTR(si->cache))); -+ if (!INSLAB(obj, si)) { -+ error(INFO, -+ "\"%s\" cache: address not contained within slab: %lx\n", -+ si->curname, obj); -+ si->errors++; -+ } ++ if (CRASHDEBUG(1)) { ++ for (i = 0; (SIZE(kmem_bufctl_t) == sizeof(int)) && ++ (i < si->c_num); i++) ++ fprintf(fp, "%d ", si->kmem_bufctl[i]); - namelen = strlen(si->curname); - sprintf(b2, "%ld", si->size); - sizelen = strlen(b2); - spacelen = 0; -+ readmem((ulong)next, KVADDR, &next, sizeof(void *), -+ "s_freep chain entry", FAULT_ON_ERROR); -+ } while (next); ++ for (kbp = (short *)&si->kmem_bufctl[0], i = 0; ++ (SIZE(kmem_bufctl_t) == sizeof(short)) && (i < si->c_num); ++ i++) ++ fprintf(fp, "%d ", *(kbp + i)); - if (namelen++ > 18) { - spacelen = 29 - namelen - sizelen; @@ -36960,24 +37557,36 @@ - } else { - fprintf(fp, "%-18s %8ld ", si->curname, si->size); - sprintf(b1, "%c%dld ", '%', 9); -+ if (cnt != expected) { -+ error(INFO, -+ "\"%s\" cache: free object mismatch: expected: %ld found: %ld\n", -+ si->curname, expected, cnt); -+ si->errors++; ++ fprintf(fp, "\n"); } -+} - fprintf(fp, b1, vt->flags & (PERCPU_KMALLOC_V2) ? - si->inuse - si->cpucached_cache : si->inuse); ++ cnt = 0; ++ expected = si->c_num - si->s_inuse; - fprintf(fp, "%8ld %5ld %3ldk\n", - si->num_slabs * si->c_num, - si->num_slabs, si->slabsize/1024); -} -+/* -+ * gather_slab_free_list() adapted for newer percpu slab format. -+ */ ++ if (SIZE(kmem_bufctl_t) == sizeof(int)) { ++ for (free_index = si->free; free_index != BUFCTL_END; ++ free_index = si->kmem_bufctl[free_index]) { ++ ++ if (cnt == si->c_num) { ++ error(INFO, ++ "\"%s\" cache: too many objects found in slab free list\n", ++ si->curname); ++ si->errors++; ++ return; ++ } ++ ++ obj = si->s_mem + (free_index*si->size); ++ si->addrlist[cnt] = obj; ++ cnt++; ++ } ++ } else if (SIZE(kmem_bufctl_t) == sizeof(short)) { ++ kbp = (short *)&si->kmem_bufctl[0]; -#define DUMP_SLAB_INFO() \ - { \ @@ -36997,12 +37606,12 @@ - vt->flags & PERCPU_KMALLOC_V1 ? freeobjs + si->cpucached_slab :\ - freeobjs); \ - } -+#define BUFCTL_END 0xffffFFFF ++ for (free_index = si->free; free_index != BUFCTL_END; ++ free_index = (int)*(kbp + free_index)) { - static void +-static void -dump_kmem_cache(struct meminfo *si) -+gather_slab_free_list_percpu(struct meminfo *si) - { +-{ - char buf[BUFSIZE]; - char kbuf[BUFSIZE]; - char *reqname; @@ -37010,28 +37619,89 @@ - ulong name, magic; - int cnt; - char *p1; -- ++ if (cnt == si->c_num) { ++ error(INFO, ++ "\"%s\" cache: too many objects found in slab free list\n", si->curname); ++ si->errors++; ++ return; ++ } + - if (vt->flags & (PERCPU_KMALLOC_V1|PERCPU_KMALLOC_V2)) -- error(FATAL, ++ obj = si->s_mem + (free_index*si->size); ++ si->addrlist[cnt] = obj; ++ cnt++; ++ } ++ } else + error(FATAL, - "dump_kmem_cache called with PERCPU_KMALLOC_V[12] set\n"); -- ++ "size of kmem_bufctl_t (%d) not sizeof(int) or sizeof(short)\n", ++ SIZE(kmem_bufctl_t)); + - si->found = si->retval = 0; - reqname = NULL; -- ++ if (cnt != expected) { ++ error(INFO, ++ "\"%s\" cache: free object mismatch: expected: %ld found: %ld\n", ++ si->curname, expected, cnt); ++ si->errors++; ++ } ++} ++ ++ ++ ++/* ++ * Dump the FREE, [ALLOCATED] and objects of a slab. ++ */ ++ ++#define DUMP_SLAB_OBJECT() \ ++ for (j = on_free_list = 0; j < si->c_num; j++) { \ ++ if (obj == si->addrlist[j]) { \ ++ on_free_list = TRUE; \ ++ break; \ ++ } \ ++ } \ ++ \ ++ if (on_free_list) { \ ++ if (!(si->flags & ADDRESS_SPECIFIED)) \ ++ fprintf(fp, " %lx\n", obj); \ ++ if (si->flags & ADDRESS_SPECIFIED) { \ ++ if (INOBJECT(si->spec_addr, obj)) { \ ++ si->found = \ ++ KMEM_OBJECT_ADDR_FREE; \ ++ si->container = obj; \ ++ return; \ ++ } \ ++ } \ ++ } else { \ ++ if (!(si->flags & ADDRESS_SPECIFIED)) \ ++ fprintf(fp, " [%lx]\n", obj); \ ++ cnt++; \ ++ if (si->flags & ADDRESS_SPECIFIED) { \ ++ if (INOBJECT(si->spec_addr, obj)) { \ ++ si->found = \ ++ KMEM_OBJECT_ADDR_INUSE; \ ++ si->container = obj; \ ++ return; \ ++ } \ ++ } \ ++ } + - if ((!(si->flags & VERBOSE) || si->reqname) && - !(si->flags & (ADDRESS_SPECIFIED|GET_SLAB_PAGES))) - fprintf(fp, kmem_cache_hdr); -+ int i; -+ ulong obj; -+ ulong expected, cnt; -+ int free_index; -+ ulong kmembp; -+ short *kbp; ++static void ++dump_slab_objects(struct meminfo *si) ++{ ++ int i, j; ++ ulong *next; ++ int on_free_list; ++ ulong cnt, expected; ++ ulong bufctl, obj; - si->addrlist = (ulong *)GETBUF((vt->kmem_max_c_num+1) * sizeof(ulong)); - cnt = 0; - si->cache = cache_cache = symbol_value("cache_cache"); -+ BNEG(si->addrlist, sizeof(ulong) * (si->c_num+1)); ++ gather_slab_free_list(si); - if (si->flags & ADDRESS_SPECIFIED) { - if (!(p1 = vaddr_to_kmem_cache(si->spec_addr, kbuf))) { @@ -37045,37 +37715,36 @@ - error(INFO, - "ignoring pre-selected %s cache for address: %lx\n", - si->reqname, si->spec_addr, si->reqname); -+ if (CRASHDEBUG(1)) -+ fprintf(fp, "slab: %lx si->s_inuse: %ld si->c_num: %ld\n", -+ si->slab, si->s_inuse, si->c_num); ++ if ((si->flags & ADDRESS_SPECIFIED) && (si->found & ~KMEM_ON_SLAB)) ++ return; - reqname = p1; - } else - reqname = si->reqname; -+ if (si->s_inuse == si->c_num ) -+ return; ++ cnt = 0; ++ expected = si->s_inuse; ++ si->container = 0; - si->cache_buf = GETBUF(SIZE(kmem_cache_s)); -+ kmembp = si->slab + SIZE_OPTION(slab_s, slab); -+ readmem((ulong)kmembp, KVADDR, si->kmem_bufctl, -+ SIZE(kmem_bufctl_t) * si->c_num, -+ "kmem_bufctl array", FAULT_ON_ERROR); ++ if (CRASHDEBUG(1)) ++ for (i = 0; i < si->c_num; i++) { ++ fprintf(fp, "si->addrlist[%d]: %lx\n", ++ i, si->addrlist[i]); ++ } - do { - if ((si->flags & VERBOSE) && !si->reqname && - !(si->flags & ADDRESS_SPECIFIED)) - fprintf(fp, "%s%s", cnt++ ? "\n" : "", kmem_cache_hdr); -+ if (CRASHDEBUG(1)) { -+ for (i = 0; (SIZE(kmem_bufctl_t) == sizeof(int)) && -+ (i < si->c_num); i++) -+ fprintf(fp, "%d ", si->kmem_bufctl[i]); ++ if (!(si->flags & ADDRESS_SPECIFIED)) ++ fprintf(fp, free_inuse_hdr); - readmem(si->cache, KVADDR, si->cache_buf, SIZE(kmem_cache_s), - "kmem_cache_s buffer", FAULT_ON_ERROR); -+ for (kbp = (short *)&si->kmem_bufctl[0], i = 0; -+ (SIZE(kmem_bufctl_t) == sizeof(short)) && (i < si->c_num); -+ i++) -+ fprintf(fp, "%d ", *(kbp + i)); ++ /* For on-slab bufctls, c_offset is the distance between the start of ++ * an obj and its related bufctl. For off-slab bufctls, c_offset is ++ * the distance between objs in the slab. ++ */ - if (vt->kmem_cache_namelen) { - BCOPY(si->cache_buf + OFFSET(kmem_cache_s_c_name), @@ -37087,69 +37756,58 @@ - error(FATAL, - "cannot read kmem_cache_s.c_name string at %lx\n", - name); -- } -+ fprintf(fp, "\n"); -+ } ++ if (si->c_flags & SLAB_CFLGS_BUFCTL) { ++ for (i = 0, next = si->s_index; i < si->c_num; i++, next++) { ++ obj = si->s_mem + ++ ((next - si->s_index) * si->c_offset); ++ DUMP_SLAB_OBJECT(); + } ++ } else { ++ /* ++ * Get the "real" s_mem, i.e., without the offset stripped off. ++ * It contains the address of the first object. ++ */ ++ readmem(si->slab+OFFSET(kmem_slab_s_s_mem), ++ KVADDR, &obj, sizeof(ulong), ++ "s_mem", FAULT_ON_ERROR); - if (reqname && !STREQ(reqname, buf)) - goto next_cache; -+ cnt = 0; -+ expected = si->c_num - si->s_inuse; - +- - if (ignore_cache(si, buf)) { - fprintf(fp, "%lx %-18s [IGNORED]\n", si->cache, buf); - goto next_cache; -+ if (SIZE(kmem_bufctl_t) == sizeof(int)) { -+ for (free_index = si->free; free_index != BUFCTL_END; -+ free_index = si->kmem_bufctl[free_index]) { -+ -+ if (cnt == si->c_num) { -+ error(INFO, -+ "\"%s\" cache: too many objects found in slab free list\n", -+ si->curname); -+ si->errors++; -+ return; -+ } -+ -+ obj = si->s_mem + (free_index*si->size); -+ si->addrlist[cnt] = obj; -+ cnt++; - } -+ } else if (SIZE(kmem_bufctl_t) == sizeof(short)) { -+ kbp = (short *)&si->kmem_bufctl[0]; +- } ++ for (i = 0; i < si->c_num; i++) { ++ DUMP_SLAB_OBJECT(); - si->curname = buf; -+ for (free_index = si->free; free_index != BUFCTL_END; -+ free_index = (int)*(kbp + free_index)) { ++ if (si->flags & ADDRESS_SPECIFIED) { ++ bufctl = obj + si->c_offset; - if (CRASHDEBUG(1)) - fprintf(fp, "cache: %lx %s\n", si->cache, si->curname); - console("cache: %lx %s\n", si->cache, si->curname); -+ if (cnt == si->c_num) { -+ error(INFO, -+ "\"%s\" cache: too many objects found in slab free list\n", si->curname); -+ si->errors++; -+ return; -+ } ++ if ((si->spec_addr >= bufctl) && ++ (si->spec_addr < ++ (bufctl + SIZE(kmem_bufctl_t)))) { ++ si->found = KMEM_BUFCTL_ADDR; ++ return; ++ } ++ } - magic = ULONG(si->cache_buf + OFFSET(kmem_cache_s_c_magic)); -+ obj = si->s_mem + (free_index*si->size); -+ si->addrlist[cnt] = obj; -+ cnt++; -+ } -+ } else -+ error(FATAL, -+ "size of kmem_bufctl_t (%d) not sizeof(int) or sizeof(short)\n", -+ SIZE(kmem_bufctl_t)); ++ obj += (si->c_offset + SIZE(kmem_bufctl_t)); ++ } ++ } - if (magic == SLAB_C_MAGIC) { -+ if (cnt != expected) { -+ error(INFO, -+ "\"%s\" cache: free object mismatch: expected: %ld found: %ld\n", -+ si->curname, expected, cnt); -+ si->errors++; -+ } -+} ++ if (cnt != expected) { ++ error(INFO, ++ "\"%s\" cache: inuse object mismatch: expected: %ld found: %ld\n", ++ si->curname, expected, cnt); ++ si->errors++; ++ } - si->size = ULONG(si->cache_buf + - OFFSET(kmem_cache_s_c_org_size)); @@ -37171,69 +37829,43 @@ - OFFSET(kmem_cache_s_c_gfporder)); - si->c_num = ULONG(si->cache_buf + - OFFSET(kmem_cache_s_c_num)); ++} - do_slab_chain(SLAB_GET_COUNTS, si); - if (!(si->flags & (ADDRESS_SPECIFIED|GET_SLAB_PAGES))) - DUMP_KMEM_CACHE_INFO_V1(); +/* -+ * Dump the FREE, [ALLOCATED] and objects of a slab. -+ */ ++ * dump_slab_objects() adapted for newer percpu slab format. ++ */ - if (si->flags == GET_SLAB_PAGES) - si->retval += (si->num_slabs * - (si->slabsize/PAGESIZE())); -+#define DUMP_SLAB_OBJECT() \ -+ for (j = on_free_list = 0; j < si->c_num; j++) { \ -+ if (obj == si->addrlist[j]) { \ -+ on_free_list = TRUE; \ -+ break; \ -+ } \ -+ } \ -+ \ -+ if (on_free_list) { \ -+ if (!(si->flags & ADDRESS_SPECIFIED)) \ -+ fprintf(fp, " %lx\n", obj); \ -+ if (si->flags & ADDRESS_SPECIFIED) { \ -+ if (INOBJECT(si->spec_addr, obj)) { \ -+ si->found = \ -+ KMEM_OBJECT_ADDR_FREE; \ -+ return; \ -+ } \ -+ } \ -+ } else { \ -+ if (!(si->flags & ADDRESS_SPECIFIED)) \ -+ fprintf(fp, " [%lx]\n", obj); \ -+ cnt++; \ -+ if (si->flags & ADDRESS_SPECIFIED) { \ -+ if (INOBJECT(si->spec_addr, obj)) { \ -+ si->found = \ -+ KMEM_OBJECT_ADDR_INUSE; \ -+ return; \ -+ } \ -+ } \ -+ } ++static void ++dump_slab_objects_percpu(struct meminfo *si) ++{ ++ int i, j; ++ int on_free_list, on_cpudata_list, on_shared_list; ++ ulong cnt, expected; ++ ulong obj; - if (si->flags & (VERBOSE|ADDRESS_SPECIFIED)) { - si->slab = (si->flags & ADDRESS_SPECIFIED) ? - vaddr_to_slab(si->spec_addr) : 0; - - do_slab_chain(SLAB_WALKTHROUGH, si); -+static void -+dump_slab_objects(struct meminfo *si) -+{ -+ int i, j; -+ ulong *next; -+ int on_free_list; -+ ulong cnt, expected; -+ ulong bufctl, obj; ++ if ((si->flags & ADDRESS_SPECIFIED) && (si->found & ~KMEM_ON_SLAB)) ++ return; - if (si->found) { - fprintf(fp, kmem_cache_hdr); - DUMP_KMEM_CACHE_INFO_V1(); - fprintf(fp, slab_hdr); - DUMP_SLAB_INFO(); -+ gather_slab_free_list(si); ++ cnt = 0; ++ expected = si->s_inuse; ++ si->container = 0; - switch (si->found) - { @@ -37243,8 +37875,11 @@ - fprintf(fp, - "(ON-SLAB kmem_bufctl_t)\n"); - break; -+ if ((si->flags & ADDRESS_SPECIFIED) && (si->found & ~KMEM_ON_SLAB)) -+ return; ++ if (CRASHDEBUG(1)) ++ for (i = 0; i < si->c_num; i++) { ++ fprintf(fp, "si->addrlist[%d]: %lx\n", ++ i, si->addrlist[i]); ++ } - case KMEM_SLAB_ADDR: - fprintf(fp, " %lx ", @@ -37252,8 +37887,8 @@ - fprintf(fp, - "(ON-SLAB kmem_slab_t)\n"); - break; -+ cnt = 0; -+ expected = si->s_inuse; ++ if (!(si->flags & ADDRESS_SPECIFIED)) ++ fprintf(fp, free_inuse_hdr); - case KMEM_ON_SLAB: - fprintf(fp, " %lx ", @@ -37267,11 +37902,10 @@ - fprintf(fp, " %lx\n", - (ulong)si->spec_addr); - break; -+ if (CRASHDEBUG(1)) -+ for (i = 0; i < si->c_num; i++) { -+ fprintf(fp, "si->addrlist[%d]: %lx\n", -+ i, si->addrlist[i]); -+ } ++ for (i = 0, obj = si->s_mem; i < si->c_num; i++, obj += si->size) { ++ on_free_list = FALSE; ++ on_cpudata_list = FALSE; ++ on_shared_list = FALSE; - case KMEM_OBJECT_ADDR_INUSE: - fprintf(fp, free_inuse_hdr); @@ -37279,184 +37913,30 @@ - (ulong)si->spec_addr); - break; - } -+ if (!(si->flags & ADDRESS_SPECIFIED)) -+ fprintf(fp, free_inuse_hdr); - -- break; -- } -- } -+ /* For on-slab bufctls, c_offset is the distance between the start of -+ * an obj and its related bufctl. For off-slab bufctls, c_offset is -+ * the distance between objs in the slab. -+ */ - -- } else { -- error(INFO, "\"%s\" cache: invalid c_magic: %lx\n", -- si->curname, magic); -- si->errors++; -+ if (si->c_flags & SLAB_CFLGS_BUFCTL) { -+ for (i = 0, next = si->s_index; i < si->c_num; i++, next++) { -+ obj = si->s_mem + -+ ((next - si->s_index) * si->c_offset); -+ DUMP_SLAB_OBJECT(); - } -+ } else { -+ /* -+ * Get the "real" s_mem, i.e., without the offset stripped off. -+ * It contains the address of the first object. -+ */ -+ readmem(si->slab+OFFSET(kmem_slab_s_s_mem), -+ KVADDR, &obj, sizeof(ulong), -+ "s_mem", FAULT_ON_ERROR); - --next_cache: -- si->cache = ULONG(si->cache_buf + OFFSET(kmem_cache_s_c_nextp)); -+ for (i = 0; i < si->c_num; i++) { -+ DUMP_SLAB_OBJECT(); - -- } while (si->cache != cache_cache); -+ if (si->flags & ADDRESS_SPECIFIED) { -+ bufctl = obj + si->c_offset; - -- FREEBUF(si->cache_buf); -+ if ((si->spec_addr >= bufctl) && -+ (si->spec_addr < -+ (bufctl + SIZE(kmem_bufctl_t)))) { -+ si->found = KMEM_BUFCTL_ADDR; -+ return; -+ } -+ } - -- if ((si->flags & ADDRESS_SPECIFIED) && !si->found) -- error(INFO, "%s: address not found in cache: %lx\n", -- reqname, si->spec_addr); -- -- if (si->errors) -- error(INFO, "%ld error%s encountered\n", -- si->errors, si->errors > 1 ? "s" : ""); -+ obj += (si->c_offset + SIZE(kmem_bufctl_t)); -+ } -+ } -+ -+ if (cnt != expected) { -+ error(INFO, -+ "\"%s\" cache: inuse object mismatch: expected: %ld found: %ld\n", -+ si->curname, expected, cnt); -+ si->errors++; -+ } - -- FREEBUF(si->addrlist); - } - -+ - /* -- * dump_kmem_cache() adapted for newer percpu slab format. -+ * dump_slab_objects() adapted for newer percpu slab format. - */ - - static void --dump_kmem_cache_percpu_v1(struct meminfo *si) -+dump_slab_objects_percpu(struct meminfo *si) - { -- int i; -- char buf[BUFSIZE]; -- char kbuf[BUFSIZE]; -- char *reqname; -- ulong cache_cache; -- ulong name; -- int cnt; -- uint tmp_val; /* Used as temporary variable to read sizeof(int) and -- assigned to ulong variable. We are doing this to mask -- the endian issue */ -- char *p1; -+ int i, j; -+ int on_free_list, on_cpudata_list, on_shared_list; -+ ulong cnt, expected; -+ ulong obj; - -- if (!(vt->flags & PERCPU_KMALLOC_V1)) -- error(FATAL, -- "dump_kmem_cache_percpu called without PERCPU_KMALLOC_V1\n"); -+ if ((si->flags & ADDRESS_SPECIFIED) && (si->found & ~KMEM_ON_SLAB)) -+ return; - -- si->found = si->retval = 0; -- reqname = NULL; -+ cnt = 0; -+ expected = si->s_inuse; - -- if ((!(si->flags & VERBOSE) || si->reqname) && -- !(si->flags & (ADDRESS_SPECIFIED|GET_SLAB_PAGES))) -- fprintf(fp, kmem_cache_hdr); -+ if (CRASHDEBUG(1)) -+ for (i = 0; i < si->c_num; i++) { -+ fprintf(fp, "si->addrlist[%d]: %lx\n", -+ i, si->addrlist[i]); -+ } - -- si->addrlist = (ulong *)GETBUF((vt->kmem_max_c_num+1) * sizeof(ulong)); -- si->kmem_bufctl = (int *)GETBUF((vt->kmem_max_c_num+1) * sizeof(int)); -- for (i = 0; i < vt->kmem_max_cpus; i++) -- si->cpudata[i] = (ulong *) -- GETBUF(vt->kmem_max_limit * sizeof(ulong)); -+ if (!(si->flags & ADDRESS_SPECIFIED)) -+ fprintf(fp, free_inuse_hdr); - -- cnt = 0; -- si->cache = cache_cache = symbol_value("cache_cache"); -+ for (i = 0, obj = si->s_mem; i < si->c_num; i++, obj += si->size) { -+ on_free_list = FALSE; -+ on_cpudata_list = FALSE; -+ on_shared_list = FALSE; - -- if (si->flags & ADDRESS_SPECIFIED) { -- if (!(p1 = vaddr_to_kmem_cache(si->spec_addr, kbuf))) { + for (j = 0; j < si->c_num; j++) { + if (obj == si->addrlist[j]) { + on_free_list = TRUE; + break; + } + } -+ + +- break; +- } +- } + on_cpudata_list = check_cpudata_list(si, obj); + on_shared_list = check_shared_list(si, obj); -+ + +- } else { +- error(INFO, "\"%s\" cache: invalid c_magic: %lx\n", +- si->curname, magic); + if (on_free_list && on_cpudata_list) { - error(INFO, -- "address is not allocated in slab subsystem: %lx\n", -- si->spec_addr); -- return; ++ error(INFO, + "\"%s\" cache: object %lx on both free and cpu %d lists\n", + si->curname, obj, si->cpu); -+ si->errors++; + si->errors++; } -- -- if (si->reqname && (si->reqname != p1)) + if (on_free_list && on_shared_list) { - error(INFO, -- "ignoring pre-selected %s cache for address: %lx\n", -- si->reqname, si->spec_addr, si->reqname); -- reqname = p1; -- } else -- reqname = si->reqname; -- -- do { -- if ((si->flags & VERBOSE) && !si->reqname && -- !(si->flags & ADDRESS_SPECIFIED)) -- fprintf(fp, "%s%s", cnt++ ? "\n" : "", kmem_cache_hdr); -- -- if (vt->kmem_cache_namelen) { -- readmem(si->cache+OFFSET(kmem_cache_s_name), -- KVADDR, buf, vt->kmem_cache_namelen, -- "name array", FAULT_ON_ERROR); -- } else { -- readmem(si->cache+OFFSET(kmem_cache_s_name), -- KVADDR, &name, sizeof(ulong), -- "name", FAULT_ON_ERROR); -- if (!read_string(name, buf, BUFSIZE-1)) -- error(FATAL, -- "cannot read kmem_cache_s.name string at %lx\n", -- name); ++ error(INFO, + "\"%s\" cache: object %lx on both free and shared lists\n", + si->curname, obj); + si->errors++; @@ -37466,7 +37946,7 @@ + "\"%s\" cache: object %lx on both cpu %d and shared lists\n", + si->curname, obj, si->cpu); + si->errors++; - } ++ } + + if (on_free_list) { + if (!(si->flags & ADDRESS_SPECIFIED)) @@ -37475,6 +37955,7 @@ + if (INOBJECT(si->spec_addr, obj)) { + si->found = + KMEM_OBJECT_ADDR_FREE; ++ si->container = obj; + return; + } + } @@ -37487,6 +37968,7 @@ + if (INOBJECT(si->spec_addr, obj)) { + si->found = + KMEM_OBJECT_ADDR_CACHED; ++ si->container = obj; + return; + } + } @@ -37498,6 +37980,7 @@ + if (INOBJECT(si->spec_addr, obj)) { + si->found = + KMEM_OBJECT_ADDR_SHARED; ++ si->container = obj; + return; + } + } @@ -37509,56 +37992,119 @@ + if (INOBJECT(si->spec_addr, obj)) { + si->found = + KMEM_OBJECT_ADDR_INUSE; ++ si->container = obj; + return; + } + } + } + } -- if (reqname && !STREQ(reqname, buf)) -- goto next_cache; +-next_cache: +- si->cache = ULONG(si->cache_buf + OFFSET(kmem_cache_s_c_nextp)); +- +- } while (si->cache != cache_cache); +- +- FREEBUF(si->cache_buf); +- +- if ((si->flags & ADDRESS_SPECIFIED) && !si->found) +- error(INFO, "%s: address not found in cache: %lx\n", +- reqname, si->spec_addr); +- +- if (si->errors) +- error(INFO, "%ld error%s encountered\n", +- si->errors, si->errors > 1 ? "s" : ""); +- +- FREEBUF(si->addrlist); + if (cnt != expected) { + error(INFO, + "\"%s\" cache: inuse object mismatch: expected: %ld found: %ld\n", + si->curname, expected, cnt); + si->errors++; + } -+} + } -- if (ignore_cache(si, buf)) { -- fprintf(fp, "%lx %-18s [IGNORED]\n", si->cache, buf); -- goto next_cache; -- } -+/* + /* +- * dump_kmem_cache() adapted for newer percpu slab format. + * Determine how many of the "inuse" slab objects are actually cached + * in the kmem_cache_s header. Set the per-slab count and update the + * cumulative per-cache count. With the addition of the shared list + * check, the terms "cpucached_cache" and "cpucached_slab" are somewhat + * misleading. But they both are types of objects that are cached + * in the kmem_cache_s header, just not necessarily per-cpu. -+ */ + */ -- si->curname = buf; -+static void + static void +-dump_kmem_cache_percpu_v1(struct meminfo *si) +gather_slab_cached_count(struct meminfo *si) -+{ -+ int i; + { + int i; +- char buf[BUFSIZE]; +- char kbuf[BUFSIZE]; +- char *reqname; +- ulong cache_cache; +- ulong name; +- int cnt; +- uint tmp_val; /* Used as temporary variable to read sizeof(int) and +- assigned to ulong variable. We are doing this to mask +- the endian issue */ +- char *p1; +- +- if (!(vt->flags & PERCPU_KMALLOC_V1)) +- error(FATAL, +- "dump_kmem_cache_percpu called without PERCPU_KMALLOC_V1\n"); +- +- si->found = si->retval = 0; +- reqname = NULL; +- +- if ((!(si->flags & VERBOSE) || si->reqname) && +- !(si->flags & (ADDRESS_SPECIFIED|GET_SLAB_PAGES))) +- fprintf(fp, kmem_cache_hdr); +- +- si->addrlist = (ulong *)GETBUF((vt->kmem_max_c_num+1) * sizeof(ulong)); +- si->kmem_bufctl = (int *)GETBUF((vt->kmem_max_c_num+1) * sizeof(int)); +- for (i = 0; i < vt->kmem_max_cpus; i++) +- si->cpudata[i] = (ulong *) +- GETBUF(vt->kmem_max_limit * sizeof(ulong)); +- +- cnt = 0; +- si->cache = cache_cache = symbol_value("cache_cache"); +- +- if (si->flags & ADDRESS_SPECIFIED) { +- if (!(p1 = vaddr_to_kmem_cache(si->spec_addr, kbuf))) { +- error(INFO, +- "address is not allocated in slab subsystem: %lx\n", +- si->spec_addr); +- return; +- } +- +- if (si->reqname && (si->reqname != p1)) +- error(INFO, +- "ignoring pre-selected %s cache for address: %lx\n", +- si->reqname, si->spec_addr, si->reqname); +- reqname = p1; +- } else +- reqname = si->reqname; + ulong obj; + int in_cpudata, in_shared; -- readmem(si->cache+OFFSET(kmem_cache_s_objsize), -- KVADDR, &tmp_val, sizeof(uint), -- "objsize", FAULT_ON_ERROR); -- si->size = (ulong)tmp_val; +- do { +- if ((si->flags & VERBOSE) && !si->reqname && +- !(si->flags & ADDRESS_SPECIFIED)) +- fprintf(fp, "%s%s", cnt++ ? "\n" : "", kmem_cache_hdr); + si->cpucached_slab = 0; -- if (!si->size) { -- if (STREQ(si->curname, "kmem_cache")) -- si->size = SIZE(kmem_cache_s); -- else { -- error(INFO, "\"%s\" cache: objsize: %ld\n", -- si->curname, si->size); -- si->errors++; +- if (vt->kmem_cache_namelen) { +- readmem(si->cache+OFFSET(kmem_cache_s_name), +- KVADDR, buf, vt->kmem_cache_namelen, +- "name array", FAULT_ON_ERROR); +- } else { +- readmem(si->cache+OFFSET(kmem_cache_s_name), +- KVADDR, &name, sizeof(ulong), +- "name", FAULT_ON_ERROR); +- if (!read_string(name, buf, BUFSIZE-1)) +- error(FATAL, +- "cannot read kmem_cache_s.name string at %lx\n", +- name); + for (i = 0, obj = si->s_mem; i < si->c_num; i++, obj += si->size) { + in_cpudata = in_shared = 0; + if (check_cpudata_list(si, obj)) { @@ -37567,7 +38113,30 @@ + if (si->flags & SLAB_GET_COUNTS) { + si->cpucached_cache++; + } -+ } + } +- +- if (reqname && !STREQ(reqname, buf)) +- goto next_cache; +- +- if (ignore_cache(si, buf)) { +- fprintf(fp, "%lx %-18s [IGNORED]\n", si->cache, buf); +- goto next_cache; +- } +- +- si->curname = buf; +- +- readmem(si->cache+OFFSET(kmem_cache_s_objsize), +- KVADDR, &tmp_val, sizeof(uint), +- "objsize", FAULT_ON_ERROR); +- si->size = (ulong)tmp_val; +- +- if (!si->size) { +- if (STREQ(si->curname, "kmem_cache")) +- si->size = SIZE(kmem_cache_s); +- else { +- error(INFO, "\"%s\" cache: objsize: %ld\n", +- si->curname, si->size); +- si->errors++; + if (check_shared_list(si, obj)) { + in_shared = TRUE; + if (!in_cpudata) { @@ -37747,14 +38316,22 @@ - readmem(si->cache+OFFSET(kmem_cache_s_next), - KVADDR, &si->cache, sizeof(ulong), - "kmem_cache_s next", FAULT_ON_ERROR); -- -- si->cache -= OFFSET(kmem_cache_s_next); -- -- } while (si->cache != cache_cache); + if (CRASHDEBUG(2)) + fprintf(fp, "%s: cpu[%d] avail: %d\n", + si->curname, i, avail); +- si->cache -= OFFSET(kmem_cache_s_next); ++ readmem(cpudata[i]+SIZE(array_cache), ++ KVADDR, si->cpudata[i], ++ sizeof(void *) * avail, ++ "array_cache avail", FAULT_ON_ERROR); + +- } while (si->cache != cache_cache); ++ if (CRASHDEBUG(2)) ++ for (j = 0; j < avail; j++) ++ fprintf(fp, " %lx (cpu %d)\n", si->cpudata[i][j], i); ++ } + - if ((si->flags & ADDRESS_SPECIFIED) && !si->found) - error(INFO, "%s: address not found in cache: %lx\n", - reqname, si->spec_addr); @@ -37762,26 +38339,15 @@ - if (si->errors) - error(INFO, "%ld error%s encountered\n", - si->errors, si->errors > 1 ? "s" : ""); -+ readmem(cpudata[i]+SIZE(array_cache), -+ KVADDR, si->cpudata[i], -+ sizeof(void *) * avail, -+ "array_cache avail", FAULT_ON_ERROR); - -- FREEBUF(si->addrlist); -- FREEBUF(si->kmem_bufctl); -- for (i = 0; i < vt->kmem_max_cpus; i++) -- FREEBUF(si->cpudata[i]); -+ if (CRASHDEBUG(2)) -+ for (j = 0; j < avail; j++) -+ fprintf(fp, " %lx (cpu %d)\n", si->cpudata[i][j], i); -+ } - --} + /* + * If the shared list contains anything, gather them as well. + */ + BZERO(si->shared_array_cache, sizeof(ulong) * vt->kmem_max_limit); +- FREEBUF(si->addrlist); +- FREEBUF(si->kmem_bufctl); +- for (i = 0; i < vt->kmem_max_cpus; i++) +- FREEBUF(si->cpudata[i]); + if (!VALID_MEMBER(kmem_list3_shared) || + !VALID_MEMBER(kmem_cache_s_lists) || + !readmem(si->cache+OFFSET(kmem_cache_s_lists)+ @@ -37791,13 +38357,38 @@ + KVADDR, &avail, sizeof(int), "shared array_cache avail", + RETURN_ON_ERROR|QUIET) || !avail) + return; ++ ++ if (avail > vt->kmem_max_limit) { ++ error(INFO, ++ "\"%s\" cache: shared array_cache.avail %d greater than limit %ld\n", ++ si->curname, avail, vt->kmem_max_limit); ++ si->errors++; ++ return; ++ } --/* ++ if (CRASHDEBUG(2)) ++ fprintf(fp, "%s: shared avail: %d\n", ++ si->curname, avail); ++ ++ readmem(shared+SIZE(array_cache), KVADDR, si->shared_array_cache, ++ sizeof(void *) * avail, "shared array_cache avail", ++ FAULT_ON_ERROR); ++ ++ if (CRASHDEBUG(2)) ++ for (j = 0; j < avail; j++) ++ fprintf(fp, " %lx (shared list)\n", si->shared_array_cache[j]); + } + + ++ + /* - * Updated for 2.6 slab substructure. -- */ --static void ++ * Updated gather_cpudata_list_v2 for per-node kmem_list3's in kmem_cache + */ + static void -dump_kmem_cache_percpu_v2(struct meminfo *si) --{ ++gather_cpudata_list_v2_nodes(struct meminfo *si, int index) + { - int i; - char buf[BUFSIZE]; - char kbuf[BUFSIZE]; @@ -37809,77 +38400,50 @@ - assigned to ulong variable. We are doing this to mask - the endian issue */ - char *p1; -+ if (avail > vt->kmem_max_limit) { -+ error(INFO, -+ "\"%s\" cache: shared array_cache.avail %d greater than limit %ld\n", -+ si->curname, avail, vt->kmem_max_limit); -+ si->errors++; -+ return; -+ } - +- - if (!(vt->flags & PERCPU_KMALLOC_V2)) - error(FATAL, - "dump_kmem_cache_percpu called without PERCPU_KMALLOC_V2\n"); -+ if (CRASHDEBUG(2)) -+ fprintf(fp, "%s: shared avail: %d\n", -+ si->curname, avail); - +- - si->found = si->retval = 0; - reqname = NULL; -+ readmem(shared+SIZE(array_cache), KVADDR, si->shared_array_cache, -+ sizeof(void *) * avail, "shared array_cache avail", -+ FAULT_ON_ERROR); - -- if ((!(si->flags & VERBOSE) || si->reqname) && -- !(si->flags & (ADDRESS_SPECIFIED|GET_SLAB_PAGES))) -- fprintf(fp, kmem_cache_hdr); -+ if (CRASHDEBUG(2)) -+ for (j = 0; j < avail; j++) -+ fprintf(fp, " %lx (shared list)\n", si->shared_array_cache[j]); -+} - -- si->addrlist = (ulong *)GETBUF((vt->kmem_max_c_num+1) * sizeof(ulong)); -- si->kmem_bufctl = (int *)GETBUF((vt->kmem_max_c_num+1) * sizeof(int)); -- for (i = 0; i < vt->kmem_max_cpus; i++) -- si->cpudata[i] = (ulong *) -- GETBUF(vt->kmem_max_limit * sizeof(ulong)); - -- cnt = 0; - -- get_symbol_data("cache_chain", sizeof(ulong), &si->cache); -- si->cache -= OFFSET(kmem_cache_s_next); -- cache_end = symbol_value("cache_chain"); -+/* -+ * Updated gather_cpudata_list_v2 for per-node kmem_list3's in kmem_cache -+ */ -+static void -+gather_cpudata_list_v2_nodes(struct meminfo *si, int index) -+{ + int i, j; + int avail; + ulong cpudata[NR_CPUS]; + ulong shared; + ulong *start_address; -- if (si->flags & ADDRESS_SPECIFIED) { -- if (!(p1 = vaddr_to_kmem_cache(si->spec_addr, kbuf))) { +- if ((!(si->flags & VERBOSE) || si->reqname) && +- !(si->flags & (ADDRESS_SPECIFIED|GET_SLAB_PAGES))) +- fprintf(fp, kmem_cache_hdr); + start_address = (ulong *) GETBUF(sizeof(ulong) * vt->kmem_cache_len_nodes); + readmem(si->cache+OFFSET(kmem_cache_s_array), + KVADDR, &cpudata[0], + sizeof(ulong) * ARRAY_LENGTH(kmem_cache_s_array), + "array_cache array", FAULT_ON_ERROR); -+ + +- si->addrlist = (ulong *)GETBUF((vt->kmem_max_c_num+1) * sizeof(ulong)); +- si->kmem_bufctl = (int *)GETBUF((vt->kmem_max_c_num+1) * sizeof(int)); +- for (i = 0; i < vt->kmem_max_cpus; i++) +- si->cpudata[i] = (ulong *) +- GETBUF(vt->kmem_max_limit * sizeof(ulong)); + for (i = 0; (i < ARRAY_LENGTH(kmem_cache_s_array)) && + (cpudata[i]) && !(index); i++) { + BZERO(si->cpudata[i], sizeof(ulong) * vt->kmem_max_limit); -+ + +- cnt = 0; + readmem(cpudata[i]+OFFSET(array_cache_avail), + KVADDR, &avail, sizeof(int), + "array cache avail", FAULT_ON_ERROR); -+ + +- get_symbol_data("cache_chain", sizeof(ulong), &si->cache); +- si->cache -= OFFSET(kmem_cache_s_next); +- cache_end = symbol_value("cache_chain"); + if (!avail) + continue; -+ + +- if (si->flags & ADDRESS_SPECIFIED) { +- if (!(p1 = vaddr_to_kmem_cache(si->spec_addr, kbuf))) { + if (avail > vt->kmem_max_limit) { error(INFO, - "address is not allocated in slab subsystem: %lx\n", @@ -37889,11 +38453,7 @@ + si->curname, avail, vt->kmem_max_limit); + si->errors++; } -+ -+ if (CRASHDEBUG(2)) -+ fprintf(fp, "%s: cpu[%d] avail: %d\n", -+ si->curname, i, avail); - +- - if (si->reqname && (si->reqname != p1)) - error(INFO, - "ignoring pre-selected %s cache for address: %lx\n", @@ -37901,19 +38461,11 @@ - reqname = p1; - } else - reqname = si->reqname; -+ readmem(cpudata[i]+SIZE(array_cache), -+ KVADDR, si->cpudata[i], -+ sizeof(void *) * avail, -+ "array_cache avail", FAULT_ON_ERROR); - +- - do { - if ((si->flags & VERBOSE) && !si->reqname && - !(si->flags & ADDRESS_SPECIFIED)) - fprintf(fp, "%s%s", cnt++ ? "\n" : "", kmem_cache_hdr); -+ if (CRASHDEBUG(2)) -+ for (j = 0; j < avail; j++) -+ fprintf(fp, " %lx (cpu %d)\n", si->cpudata[i][j], i); -+ } - if (vt->kmem_cache_namelen) { - readmem(si->cache+OFFSET(kmem_cache_s_name), @@ -37928,6 +38480,26 @@ - "cannot read kmem_cache_s.name string at %lx\n", - name); - } ++ if (CRASHDEBUG(2)) ++ fprintf(fp, "%s: cpu[%d] avail: %d\n", ++ si->curname, i, avail); ++ ++ readmem(cpudata[i]+SIZE(array_cache), ++ KVADDR, si->cpudata[i], ++ sizeof(void *) * avail, ++ "array_cache avail", FAULT_ON_ERROR); + +- if (reqname && !STREQ(reqname, buf)) +- goto next_cache; ++ if (CRASHDEBUG(2)) ++ for (j = 0; j < avail; j++) ++ fprintf(fp, " %lx (cpu %d)\n", si->cpudata[i][j], i); ++ } + +- if (ignore_cache(si, buf)) { +- fprintf(fp, "%lx %-18s [IGNORED]\n", si->cache, buf); +- goto next_cache; +- } + /* + * If the shared list contains anything, gather them as well. + */ @@ -37937,8 +38509,7 @@ + si->current_cache_index = 0; + } -- if (reqname && !STREQ(reqname, buf)) -- goto next_cache; +- si->curname = buf; + if (!readmem(si->cache+OFFSET(kmem_cache_s_lists), KVADDR, &start_address[0], + sizeof(ulong) * vt->kmem_cache_len_nodes , "array nodelist array", + RETURN_ON_ERROR) || @@ -37950,10 +38521,10 @@ + return; + } -- if (ignore_cache(si, buf)) { -- fprintf(fp, "%lx %-18s [IGNORED]\n", si->cache, buf); -- goto next_cache; -- } +- readmem(si->cache+OFFSET(kmem_cache_s_objsize), +- KVADDR, &tmp_val, sizeof(uint), +- "objsize", FAULT_ON_ERROR); +- si->size = (ulong)tmp_val; + if (avail > vt->kmem_max_limit) { + error(INFO, + "\"%s\" cache: shared array_cache.avail %d greater than limit %ld\n", @@ -37963,19 +38534,6 @@ + return; + } -- si->curname = buf; -+ if (CRASHDEBUG(2)) -+ fprintf(fp, "%s: shared avail: %d\n", -+ si->curname, avail); - -- readmem(si->cache+OFFSET(kmem_cache_s_objsize), -- KVADDR, &tmp_val, sizeof(uint), -- "objsize", FAULT_ON_ERROR); -- si->size = (ulong)tmp_val; -+ readmem(shared+SIZE(array_cache), KVADDR, si->shared_array_cache + si->current_cache_index, -+ sizeof(void *) * avail, "shared array_cache avail", -+ FAULT_ON_ERROR); - - if (!si->size) { - if (STREQ(si->curname, "kmem_cache")) - si->size = SIZE(kmem_cache_s); @@ -37985,6 +38543,22 @@ - si->errors++; - } - } ++ if (CRASHDEBUG(2)) ++ fprintf(fp, "%s: shared avail: %d\n", ++ si->curname, avail); + +- readmem(si->cache+OFFSET(kmem_cache_s_flags), +- KVADDR, &tmp_val, sizeof(uint), +- "kmem_cache_s flags", FAULT_ON_ERROR); +- si->c_flags = (ulong)tmp_val; ++ readmem(shared+SIZE(array_cache), KVADDR, si->shared_array_cache + si->current_cache_index, ++ sizeof(void *) * avail, "shared array_cache avail", ++ FAULT_ON_ERROR); + +- readmem(si->cache+OFFSET(kmem_cache_s_gfporder), +- KVADDR, &tmp_val, sizeof(uint), +- "gfporder", FAULT_ON_ERROR); +- si->order = (ulong)tmp_val; + if ((si->current_cache_index + avail) > + (vt->kmem_max_limit * vt->kmem_cache_len_nodes)) { + error(INFO, @@ -37997,10 +38571,10 @@ + return; + } -- readmem(si->cache+OFFSET(kmem_cache_s_flags), -- KVADDR, &tmp_val, sizeof(uint), -- "kmem_cache_s flags", FAULT_ON_ERROR); -- si->c_flags = (ulong)tmp_val; +- readmem(si->cache+OFFSET(kmem_cache_s_num), +- KVADDR, &tmp_val, sizeof(uint), +- "kmem_cache_s num", FAULT_ON_ERROR); +- si->c_num = (ulong)tmp_val; + if (CRASHDEBUG(2)) + for (j = si->current_cache_index; j < (si->current_cache_index + avail); j++) + fprintf(fp, " %lx (shared list)\n", si->shared_array_cache[j]); @@ -38009,25 +38583,25 @@ + FREEBUF(start_address); +} -- readmem(si->cache+OFFSET(kmem_cache_s_gfporder), -- KVADDR, &tmp_val, sizeof(uint), -- "gfporder", FAULT_ON_ERROR); -- si->order = (ulong)tmp_val; +- do_slab_chain_percpu_v2(SLAB_GET_COUNTS, si); +/* + * Check whether a given address is contained in the previously-gathered + * percpu object cache. + */ -- readmem(si->cache+OFFSET(kmem_cache_s_num), -- KVADDR, &tmp_val, sizeof(uint), -- "kmem_cache_s num", FAULT_ON_ERROR); -- si->c_num = (ulong)tmp_val; +- if (!(si->flags & (ADDRESS_SPECIFIED|GET_SLAB_PAGES))) { +- DUMP_KMEM_CACHE_INFO_V2(); +- if (CRASHDEBUG(3)) +- dump_struct("kmem_cache_s", si->cache, 0); +- } +static int +check_cpudata_list(struct meminfo *si, ulong obj) +{ + int i, j; -- do_slab_chain_percpu_v2(SLAB_GET_COUNTS, si); +- if (si->flags == GET_SLAB_PAGES) +- si->retval += (si->num_slabs * +- (si->slabsize/PAGESIZE())); + for (i = 0; i < vt->kmem_max_cpus; i++) { + for (j = 0; si->cpudata[i][j]; j++) + if (si->cpudata[i][j] == obj) { @@ -38036,50 +38610,54 @@ + } + } -- if (!(si->flags & (ADDRESS_SPECIFIED|GET_SLAB_PAGES))) { -- DUMP_KMEM_CACHE_INFO_V2(); -- if (CRASHDEBUG(3)) -- dump_struct("kmem_cache_s", si->cache, 0); -- } +- if (si->flags & (VERBOSE|ADDRESS_SPECIFIED)) { + return FALSE; +} -- if (si->flags == GET_SLAB_PAGES) -- si->retval += (si->num_slabs * -- (si->slabsize/PAGESIZE())); +- gather_cpudata_list_v2(si); +/* + * Check whether a given address is contained in the previously-gathered + * shared object cache. + */ -- if (si->flags & (VERBOSE|ADDRESS_SPECIFIED)) { +- si->slab = (si->flags & ADDRESS_SPECIFIED) ? +- vaddr_to_slab(si->spec_addr) : 0; +static int +check_shared_list(struct meminfo *si, ulong obj) +{ + int i; -- gather_cpudata_list_v2(si); +- do_slab_chain_percpu_v2(SLAB_WALKTHROUGH, si); + if (INVALID_MEMBER(kmem_list3_shared) || + !si->shared_array_cache) + return FALSE; -- si->slab = (si->flags & ADDRESS_SPECIFIED) ? -- vaddr_to_slab(si->spec_addr) : 0; -+ for (i = 0; si->shared_array_cache[i]; i++) { -+ if (si->shared_array_cache[i] == obj) -+ return TRUE; -+ } - -- do_slab_chain_percpu_v2(SLAB_WALKTHROUGH, si); -+ return FALSE; -+} - - if (si->found) { - fprintf(fp, kmem_cache_hdr); - DUMP_KMEM_CACHE_INFO_V2(); - fprintf(fp, slab_hdr); - gather_slab_cached_count(si); - DUMP_SLAB_INFO(); ++ for (i = 0; si->shared_array_cache[i]; i++) { ++ if (si->shared_array_cache[i] == obj) ++ return TRUE; ++ } + +- switch (si->found) +- { +- case KMEM_BUFCTL_ADDR: +- fprintf(fp, " %lx ", +- (ulong)si->spec_addr); +- fprintf(fp,"(kmem_bufctl_t)\n"); +- break; ++ return FALSE; ++} + +- case KMEM_SLAB_ADDR: +- fprintf(fp, " %lx ", +- (ulong)si->spec_addr); +- fprintf(fp, "(slab)\n"); +- break; +/* + * Search the various memory subsystems for instances of this address. + * Start with the most specific areas, ending up with at least the @@ -38097,26 +38675,6 @@ + ulong task; + struct task_context *tc; -- switch (si->found) -- { -- case KMEM_BUFCTL_ADDR: -- fprintf(fp, " %lx ", -- (ulong)si->spec_addr); -- fprintf(fp,"(kmem_bufctl_t)\n"); -- break; -+ pc->curcmd_flags &= ~HEADER_PRINTED; - -- case KMEM_SLAB_ADDR: -- fprintf(fp, " %lx ", -- (ulong)si->spec_addr); -- fprintf(fp, "(slab)\n"); -- break; -+ switch (mi->memtype) -+ { -+ case KVADDR: -+ vaddr = mi->spec_addr; -+ break; - - case KMEM_ON_SLAB: - fprintf(fp, " %lx ", - (ulong)si->spec_addr); @@ -38128,19 +38686,18 @@ - fprintf(fp, " %lx\n", - (ulong)si->spec_addr); - break; -+ case PHYSADDR: -+ vaddr = mi->spec_addr < VTOP(vt->high_memory) ? -+ PTOV(mi->spec_addr) : BADADDR; -+ break; -+ } ++ pc->curcmd_flags &= ~HEADER_PRINTED; - case KMEM_OBJECT_ADDR_INUSE: - fprintf(fp, free_inuse_hdr); - fprintf(fp, " [%lx]\n", - (ulong)si->spec_addr); - break; -+ orig_flags = mi->flags; -+ mi->retval = 0; ++ switch (mi->memtype) ++ { ++ case KVADDR: ++ vaddr = mi->spec_addr; ++ break; - case KMEM_OBJECT_ADDR_CACHED: - fprintf(fp, free_inuse_hdr); @@ -38149,6 +38706,16 @@ - (ulong)si->spec_addr, si->cpu); - break; - } ++ case PHYSADDR: ++ vaddr = mi->spec_addr < VTOP(vt->high_memory) ? ++ PTOV(mi->spec_addr) : BADADDR; ++ break; ++ } + +- break; ++ orig_flags = mi->flags; ++ mi->retval = 0; ++ + /* + * Check first for a possible symbolic display of the virtual + * address associated with mi->spec_addr or PTOV(mi->spec_addr). @@ -38160,8 +38727,7 @@ + fprintf(fp, "\n"); + } + } - -- break; ++ + /* + * Check for a valid mapped address. + */ @@ -38214,7 +38780,7 @@ + */ + mi->flags = orig_flags; + mi->retval = 0; -+ if ((vaddr != BADADDR) && vaddr_to_kmem_cache(vaddr, buf)) { ++ if ((vaddr != BADADDR) && vaddr_to_kmem_cache(vaddr, buf, VERBOSE)) { + BZERO(&tmp_meminfo, sizeof(struct meminfo)); + tmp_meminfo.spec_addr = vaddr; + tmp_meminfo.memtype = KVADDR; @@ -38307,25 +38873,6 @@ - ulong tmp, magic; - ulong kmem_slab_end; - char *kmem_slab_s_buf; -- -- si->slabsize = (power(2, si->order) * PAGESIZE()); -- -- kmem_slab_end = si->cache + OFFSET(kmem_cache_s_c_offset); -- -- switch (cmd) -- { -- case SLAB_GET_COUNTS: -- si->slab = ULONG(si->cache_buf + OFFSET(kmem_cache_s_c_firstp)); -- -- if (slab_data_saved(si)) -- return; -- -- si->num_slabs = si->inuse = 0; -- -- if (si->slab == kmem_slab_end) -- return; -- -- kmem_slab_s_buf = GETBUF(SIZE(kmem_slab_s)); + int n; + ulong ppstart, ppend; + struct node_table *nt; @@ -38358,11 +38905,7 @@ + return FALSE; + } -- do { -- if (received_SIGINT()) { -- FREEBUF(kmem_slab_s_buf); -- restart(0); -- } +- si->slabsize = (power(2, si->order) * PAGESIZE()); + for (n = 0; n < vt->numnodes; n++) { + nt = &vt->node_table[n]; + if ((vt->flags & V_MEM_MAP) && (vt->numnodes == 1)) @@ -38370,17 +38913,59 @@ + else + node_size = nt->size; -- readmem(si->slab, KVADDR, kmem_slab_s_buf, -- SIZE(kmem_slab_s), "kmem_slab_s buffer", -- FAULT_ON_ERROR); +- kmem_slab_end = si->cache + OFFSET(kmem_cache_s_c_offset); + ppstart = nt->mem_map; + ppend = ppstart + (node_size * SIZE(page)); -- magic = ULONG(kmem_slab_s_buf + -- OFFSET(kmem_slab_s_s_magic)); +- switch (cmd) +- { +- case SLAB_GET_COUNTS: +- si->slab = ULONG(si->cache_buf + OFFSET(kmem_cache_s_c_firstp)); + if ((addr < ppstart) || (addr >= ppend)) + continue; +- if (slab_data_saved(si)) +- return; ++ /* ++ * We're in the mem_map range -- but it is a page pointer? ++ */ ++ if ((addr - ppstart) % SIZE(page)) ++ return FALSE; + +- si->num_slabs = si->inuse = 0; ++ if (phys) { ++ pgnum = (addr - nt->mem_map) / SIZE(page); ++ *phys = ((physaddr_t)pgnum * PAGESIZE()) + nt->start_paddr; ++ } + +- if (si->slab == kmem_slab_end) +- return; ++ return TRUE; ++ } + +- kmem_slab_s_buf = GETBUF(SIZE(kmem_slab_s)); ++ return FALSE; + +- do { +- if (received_SIGINT()) { +- FREEBUF(kmem_slab_s_buf); +- restart(0); +- } ++#ifdef PRE_NODES ++ ppstart = vt->mem_map; ++ ppend = ppstart + (vt->total_pages * vt->page_struct_len); + +- readmem(si->slab, KVADDR, kmem_slab_s_buf, +- SIZE(kmem_slab_s), "kmem_slab_s buffer", +- FAULT_ON_ERROR); ++ if ((addr < ppstart) || (addr >= ppend)) ++ return FALSE; + +- magic = ULONG(kmem_slab_s_buf + +- OFFSET(kmem_slab_s_s_magic)); ++ if ((addr - ppstart) % vt->page_struct_len) ++ return FALSE; + - if (magic == SLAB_MAGIC_ALLOC) { - - tmp = ULONG(kmem_slab_s_buf + @@ -38405,52 +38990,79 @@ - FREEBUF(kmem_slab_s_buf); - save_slab_data(si); - break; -+ /* -+ * We're in the mem_map range -- but it is a page pointer? -+ */ -+ if ((addr - ppstart) % SIZE(page)) -+ return FALSE; ++ return TRUE; ++#endif ++} ++ ++/* ++ * Return the physical address associated with this page pointer. ++ */ ++static int ++page_to_phys(ulong pp, physaddr_t *phys) ++{ ++ return(is_page_ptr(pp, phys)); ++} - case SLAB_WALKTHROUGH: - if (!si->slab) - si->slab = ULONG(si->cache_buf + - OFFSET(kmem_cache_s_c_firstp)); -+ if (phys) { -+ pgnum = (addr - nt->mem_map) / SIZE(page); -+ *phys = ((physaddr_t)pgnum * PAGESIZE()) + nt->start_paddr; -+ } - if (si->slab == kmem_slab_end) - return; -+ return TRUE; -+ } ++/* ++ * Return the page pointer associated with this physical address. ++ */ ++static int ++phys_to_page(physaddr_t phys, ulong *pp) ++{ ++ int n; ++ ulong pgnum; ++ struct node_table *nt; ++ physaddr_t pstart, pend; ++ ulong node_size; - if (CRASHDEBUG(1)) { - fprintf(fp, "search cache: [%s] ", si->curname); - if (si->flags & ADDRESS_SPECIFIED) - fprintf(fp, "for %llx", si->spec_addr); - fprintf(fp, "\n"); -- } -+ return FALSE; ++ if (IS_SPARSEMEM()) { ++ ulong map; ++ map = pfn_to_map(phys >> PAGESHIFT()); ++ if (map) { ++ *pp = map; ++ return TRUE; + } ++ return FALSE; ++ } - si->slab_buf = kmem_slab_s_buf = GETBUF(SIZE(kmem_slab_s)); -+#ifdef PRE_NODES -+ ppstart = vt->mem_map; -+ ppend = ppstart + (vt->total_pages * vt->page_struct_len); ++ for (n = 0; n < vt->numnodes; n++) { ++ nt = &vt->node_table[n]; ++ if ((vt->flags & V_MEM_MAP) && (vt->numnodes == 1)) ++ node_size = vt->max_mapnr; ++ else ++ node_size = nt->size; - do { - if (received_SIGINT()) { - FREEBUF(kmem_slab_s_buf); - restart(0); - } -+ if ((addr < ppstart) || (addr >= ppend)) -+ return FALSE; ++ pstart = nt->start_paddr; ++ pend = pstart + ((ulonglong)node_size * PAGESIZE()); - readmem(si->slab, KVADDR, kmem_slab_s_buf, - SIZE(kmem_slab_s), "kmem_slab_s buffer", - FAULT_ON_ERROR); -+ if ((addr - ppstart) % vt->page_struct_len) -+ return FALSE; ++ if ((phys < pstart) || (phys >= pend)) ++ continue; ++ /* ++ * We're in the physical range -- calculate the page. ++ */ ++ pgnum = BTOP(phys - pstart); ++ *pp = nt->mem_map + (pgnum * SIZE(page)); - dump_slab(si); - @@ -38463,61 +39075,57 @@ - OFFSET(kmem_slab_s_s_nextp)); - - } while (si->slab != kmem_slab_end); -+ return TRUE; -+#endif -+} ++ return TRUE; ++ } - FREEBUF(kmem_slab_s_buf); - break; - } -+/* -+ * Return the physical address associated with this page pointer. -+ */ -+static int -+page_to_phys(ulong pp, physaddr_t *phys) -+{ -+ return(is_page_ptr(pp, phys)); ++ return FALSE; ++ ++#ifdef PRE_NODES ++ if (phys >= (vt->total_pages * PAGESIZE())) ++ return FALSE; ++ ++ pgnum = PTOB(BTOP(phys)) / PAGESIZE(); ++ *pp = vt->mem_map + (pgnum * vt->page_struct_len); ++ ++ return TRUE; ++#endif } /* - * do_slab_chain() adapted for newer percpu slab format. -+ * Return the page pointer associated with this physical address. ++ * Try to read a string of non-NULL characters from a memory location, ++ * returning the number of characters read. */ -+static int -+phys_to_page(physaddr_t phys, ulong *pp) ++int ++read_string(ulong kvaddr, char *buf, int maxlen) +{ -+ int n; -+ ulong pgnum; -+ struct node_table *nt; -+ physaddr_t pstart, pend; -+ ulong node_size; ++ char strbuf[MIN_PAGE_SIZE]; ++ ulong kp; ++ char *bufptr; ++ long cnt, size; -#define SLAB_BASE(X) (PTOB(BTOP(X))) - -#define INSLAB_PERCPU(obj, si) \ - ((ulong)((ulong)(obj) & ~(si->slabsize-1)) == SLAB_BASE(si->s_mem)) -+ if (IS_SPARSEMEM()) { -+ ulong map; -+ map = pfn_to_map(phys >> PAGESHIFT()); -+ if (map) { -+ *pp = map; -+ return TRUE; -+ } -+ return FALSE; -+ } ++ BZERO(buf, maxlen); ++ BZERO(strbuf, MIN_PAGE_SIZE); -#define SLAB_CHAINS (3) -+ for (n = 0; n < vt->numnodes; n++) { -+ nt = &vt->node_table[n]; -+ if ((vt->flags & V_MEM_MAP) && (vt->numnodes == 1)) -+ node_size = vt->max_mapnr; -+ else -+ node_size = nt->size; ++ kp = kvaddr; ++ bufptr = strbuf; ++ size = maxlen; -static char *slab_chain_name_v1[] = {"full", "partial", "free"}; -+ pstart = nt->start_paddr; -+ pend = pstart + ((ulonglong)node_size * PAGESIZE()); ++ while (size > 0) { ++ cnt = MIN_PAGE_SIZE - (kp & (MIN_PAGE_SIZE-1)); ++ ++ if (cnt > size) ++ cnt = size; -static void -do_slab_chain_percpu_v1(long cmd, struct meminfo *si) @@ -38528,19 +39136,15 @@ - ulong specified_slab; - ulong last; - ulong slab_chains[SLAB_CHAINS]; -+ if ((phys < pstart) || (phys >= pend)) -+ continue; -+ /* -+ * We're in the physical range -- calculate the page. -+ */ -+ pgnum = BTOP(phys - pstart); -+ *pp = nt->mem_map + (pgnum * SIZE(page)); ++ if (!readmem(kp, KVADDR, bufptr, cnt, ++ "readstring characters", QUIET|RETURN_ON_ERROR)) ++ break; - list_borked = 0; - si->slabsize = (power(2, si->order) * PAGESIZE()); - si->cpucached_slab = 0; -+ return TRUE; -+ } ++ if (count_buffer_chars(bufptr, NULLCHAR, cnt)) ++ break; - if (VALID_MEMBER(kmem_cache_s_slabs)) { - slab_chains[0] = si->cache + OFFSET(kmem_cache_s_slabs); @@ -38550,17 +39154,19 @@ - slab_chains[0] = si->cache + OFFSET(kmem_cache_s_slabs_full); - slab_chains[1] = si->cache + OFFSET(kmem_cache_s_slabs_partial); - slab_chains[2] = si->cache + OFFSET(kmem_cache_s_slabs_free); -- } -+ return FALSE; ++ kp += cnt; ++ bufptr += cnt; ++ size -= cnt; + } - if (CRASHDEBUG(1)) { - fprintf(fp, "[ %s: %lx ", si->curname, si->cache); - fprintf(fp, "full: %lx partial: %lx free: %lx ]\n", - slab_chains[0], slab_chains[1], slab_chains[2]); - } -+#ifdef PRE_NODES -+ if (phys >= (vt->total_pages * PAGESIZE())) -+ return FALSE; ++ strcpy(buf, strbuf); ++ return (strlen(buf)); ++} - switch (cmd) - { @@ -38570,132 +39176,6 @@ - si->cpucached_cache = 0; - si->num_slabs = si->inuse = 0; - gather_cpudata_list_v1(si); -+ pgnum = PTOB(BTOP(phys)) / PAGESIZE(); -+ *pp = vt->mem_map + (pgnum * vt->page_struct_len); -+ -+ return TRUE; -+#endif -+} - -- slab_s_buf = GETBUF(SIZE(slab_s)); - -- for (s = 0; s < SLAB_CHAINS; s++) { -+/* -+ * Try to read a string of non-NULL characters from a memory location, -+ * returning the number of characters read. -+ */ -+int -+read_string(ulong kvaddr, char *buf, int maxlen) -+{ -+ char strbuf[MIN_PAGE_SIZE]; -+ ulong kp; -+ char *bufptr; -+ long cnt, size; - -- if (!slab_chains[s]) -- continue; -+ BZERO(buf, maxlen); -+ BZERO(strbuf, MIN_PAGE_SIZE); - -- if (!readmem(slab_chains[s], -- KVADDR, &si->slab, sizeof(ulong), -- "first slab", QUIET|RETURN_ON_ERROR)) { -- error(INFO, -- "%s: %s list: bad slab pointer: %lx\n", -- si->curname, slab_chain_name_v1[s], -- slab_chains[s]); -- list_borked = 1; -- continue; -- } -- -- if (slab_data_saved(si)) { -- FREEBUF(slab_s_buf); -- return; -- } -- -- if (si->slab == slab_chains[s]) -- continue; -- -- last = slab_chains[s]; -+ kp = kvaddr; -+ bufptr = strbuf; -+ size = maxlen; - -- do { -- if (received_SIGINT()) { -- FREEBUF(slab_s_buf); -- restart(0); -- } -+ while (size > 0) { -+ cnt = MIN_PAGE_SIZE - (kp & (MIN_PAGE_SIZE-1)); -+ -+ if (cnt > size) -+ cnt = size; - -- if (!verify_slab_v1(si, last, s)) { -- list_borked = 1; -- continue; -- } -- last = si->slab - OFFSET(slab_s_list); -- -- readmem(si->slab, KVADDR, slab_s_buf, -- SIZE(slab_s), "slab_s buffer", -- FAULT_ON_ERROR); -- -- tmp = INT(slab_s_buf + OFFSET(slab_s_inuse)); -- si->inuse += tmp; -- -- if (ACTIVE()) -- gather_cpudata_list_v1(si); -+ if (!readmem(kp, KVADDR, bufptr, cnt, -+ "readstring characters", QUIET|RETURN_ON_ERROR)) -+ break; - -- si->s_mem = ULONG(slab_s_buf + -- OFFSET(slab_s_s_mem)); -- gather_slab_cached_count(si); -- -- si->num_slabs++; -- -- si->slab = ULONG(slab_s_buf + -- OFFSET(slab_s_list)); -- si->slab -= OFFSET(slab_s_list); -+ if (count_buffer_chars(bufptr, NULLCHAR, cnt)) -+ break; - -- /* -- * Check for slab transition. (Tony Dziedzic) -- */ -- for (i = 0; i < SLAB_CHAINS; i++) { -- if ((i != s) && -- (si->slab == slab_chains[i])) { -- error(NOTE, -- "%s: slab chain inconsistency: %s list\n", -- si->curname, -- slab_chain_name_v1[s]); -- list_borked = 1; -- } -- } -- -- } while (si->slab != slab_chains[s] && !list_borked); -- } -+ kp += cnt; -+ bufptr += cnt; -+ size -= cnt; -+ } - -- FREEBUF(slab_s_buf); -- if (!list_borked) -- save_slab_data(si); -- break; -+ strcpy(buf, strbuf); -+ return (strlen(buf)); -+} - -- case SLAB_WALKTHROUGH: -- specified_slab = si->slab; -- si->flags |= SLAB_WALKTHROUGH; -- si->flags &= ~SLAB_GET_COUNTS; +/* + * "help -v" output + */ @@ -38707,9 +39187,7 @@ + int others; + ulong *up; -- for (s = 0; s < SLAB_CHAINS; s++) { -- if (!slab_chains[s]) -- continue; +- slab_s_buf = GETBUF(SIZE(slab_s)); + others = 0; + fprintf(fp, " flags: %lx %s(", + vt->flags, count_bits_long(vt->flags) > 4 ? "\n " : ""); @@ -38752,24 +39230,7 @@ + if (vt->flags & VM_EVENT) + fprintf(fp, "%sVM_EVENT", others++ ? "|" : "");\ -- if (!specified_slab) { -- if (!readmem(slab_chains[s], -- KVADDR, &si->slab, sizeof(ulong), -- "slabs", QUIET|RETURN_ON_ERROR)) { -- error(INFO, -- "%s: %s list: bad slab pointer: %lx\n", -- si->curname, -- slab_chain_name_v1[s], -- slab_chains[s]); -- list_borked = 1; -- continue; -- } -- last = slab_chains[s]; -- } else -- last = 0; -- -- if (si->slab == slab_chains[s]) -- continue; +- for (s = 0; s < SLAB_CHAINS; s++) { + fprintf(fp, ")\n"); + if (vt->kernel_pgd[0] == vt->kernel_pgd[1]) + fprintf(fp, " kernel_pgd[NR_CPUS]: %lx ...\n", @@ -38819,16 +39280,8 @@ + fprintf(fp, " start_mapnr: %ld\n", nt->start_mapnr); + } -- if (CRASHDEBUG(1)) { -- fprintf(fp, "search cache: [%s] ", si->curname); -- if (si->flags & ADDRESS_SPECIFIED) -- fprintf(fp, "for %llx", si->spec_addr); -- fprintf(fp, "\n"); -- } -- -- do { -- if (received_SIGINT()) -- restart(0); +- if (!slab_chains[s]) +- continue; + fprintf(fp, " dump_free_pages: "); + if (vt->dump_free_pages == dump_free_pages) + fprintf(fp, "dump_free_pages()\n"); @@ -38841,25 +39294,26 @@ + else + fprintf(fp, "%lx (unknown)\n", (ulong)vt->dump_free_pages); -- if (!verify_slab_v1(si, last, s)) { -- list_borked = 1; -- continue; -- } -- last = si->slab - OFFSET(slab_s_list); +- if (!readmem(slab_chains[s], +- KVADDR, &si->slab, sizeof(ulong), +- "first slab", QUIET|RETURN_ON_ERROR)) { +- error(INFO, +- "%s: %s list: bad slab pointer: %lx\n", +- si->curname, slab_chain_name_v1[s], +- slab_chains[s]); +- list_borked = 1; +- continue; +- } - -- dump_slab_percpu_v1(si); -- -- if (si->found) { -- return; -- } -- -- readmem(si->slab+OFFSET(slab_s_list), -- KVADDR, &si->slab, sizeof(ulong), -- "slab list", FAULT_ON_ERROR); -- -- si->slab -= OFFSET(slab_s_list); +- if (slab_data_saved(si)) { +- FREEBUF(slab_s_buf); +- return; +- } - -- } while (si->slab != slab_chains[s] && !list_borked); +- if (si->slab == slab_chains[s]) +- continue; +- +- last = slab_chains[s]; + fprintf(fp, " dump_kmem_cache: "); + if (vt->dump_kmem_cache == dump_kmem_cache) + fprintf(fp, "dump_kmem_cache()\n"); @@ -38888,13 +39342,11 @@ + for (i = 0; i < vt->node_online_map_len; i++) { + fprintf(fp, "%s%lx", i ? ", " : "[", *up); + up++; - } -- -- break; ++ } + fprintf(fp, "]\n"); + } else { + fprintf(fp, " node_online_map: (unused)\n"); - } ++ } + fprintf(fp, " nr_vm_stat_items: %d\n", vt->nr_vm_stat_items); + fprintf(fp, " vm_stat_items: %s", (vt->flags & VM_STAT) ? + "\n" : "(not used)\n"); @@ -38906,60 +39358,67 @@ + "\n" : "(not used)\n"); + for (i = 0; i < vt->nr_vm_event_items; i++) + fprintf(fp, " [%d] %s\n", i, vt->vm_event_items[i]); -+ -+ dump_vma_cache(VERBOSE); - } - /* -- * Try to preclude any attempt to translate a bogus slab structure. +- do { +- if (received_SIGINT()) { +- FREEBUF(slab_s_buf); +- restart(0); +- } ++ dump_vma_cache(VERBOSE); ++} + +- if (!verify_slab_v1(si, last, s)) { +- list_borked = 1; +- continue; +- } +- last = si->slab - OFFSET(slab_s_list); +- +- readmem(si->slab, KVADDR, slab_s_buf, +- SIZE(slab_s), "slab_s buffer", +- FAULT_ON_ERROR); +- +- tmp = INT(slab_s_buf + OFFSET(slab_s_inuse)); +- si->inuse += tmp; +- +- if (ACTIVE()) +- gather_cpudata_list_v1(si); ++/* + * Calculate the amount of memory referenced in the kernel-specific "nodes". - */ -- --static int --verify_slab_v1(struct meminfo *si, ulong last, int s) ++ */ +uint64_t +total_node_memory() - { -- char slab_s_buf[BUFSIZE]; -- struct kernel_list_head *list_head; -- unsigned int inuse; -- ulong s_mem; -- char *list; -- int errcnt; -- -- list = slab_chain_name_v1[s]; -- -- errcnt = 0; -- -- if (!readmem(si->slab, KVADDR, slab_s_buf, -- SIZE(slab_s), "slab_s buffer", QUIET|RETURN_ON_ERROR)) { -- error(INFO, "%s: %s list: bad slab pointer: %lx\n", -- si->curname, list, si->slab); -- return FALSE; -- } -- -- list_head = (struct kernel_list_head *) -- (slab_s_buf + OFFSET(slab_s_list)); ++{ + int i; + struct node_table *nt; + uint64_t total; -- if (!IS_KVADDR((ulong)list_head->next) || -- !accessible((ulong)list_head->next)) { -- error(INFO, "%s: %s list: slab: %lx bad next pointer: %lx\n", -- si->curname, list, si->slab, -- (ulong)list_head->next); -- errcnt++; -- } +- si->s_mem = ULONG(slab_s_buf + +- OFFSET(slab_s_s_mem)); +- gather_slab_cached_count(si); +- +- si->num_slabs++; +- +- si->slab = ULONG(slab_s_buf + +- OFFSET(slab_s_list)); +- si->slab -= OFFSET(slab_s_list); + for (i = total = 0; i < vt->numnodes; i++) { + nt = &vt->node_table[i]; -- if (last && (last != (ulong)list_head->prev)) { -- error(INFO, "%s: %s list: slab: %lx bad prev pointer: %lx\n", -- si->curname, list, si->slab, -- (ulong)list_head->prev); -- errcnt++; -- } +- /* +- * Check for slab transition. (Tony Dziedzic) +- */ +- for (i = 0; i < SLAB_CHAINS; i++) { +- if ((i != s) && +- (si->slab == slab_chains[i])) { +- error(NOTE, +- "%s: slab chain inconsistency: %s list\n", +- si->curname, +- slab_chain_name_v1[s]); +- list_borked = 1; +- } +- } +- +- } while (si->slab != slab_chains[s] && !list_borked); + if (CRASHDEBUG(1)) { + console("node_table[%d]: \n", i); + console(" id: %d\n", nt->node_id); @@ -38969,7 +39428,176 @@ + console(" mem_map: %lx\n", nt->mem_map); + console(" start_paddr: %lx\n", nt->start_paddr); + console(" start_mapnr: %ld\n", nt->start_mapnr); -+ } + } + +- FREEBUF(slab_s_buf); +- if (!list_borked) +- save_slab_data(si); +- break; ++ if (nt->present) ++ total += (uint64_t)((uint64_t)nt->present * (uint64_t)PAGESIZE()); ++ else ++ total += (uint64_t)((uint64_t)nt->size * (uint64_t)PAGESIZE()); ++ } + +- case SLAB_WALKTHROUGH: +- specified_slab = si->slab; +- si->flags |= SLAB_WALKTHROUGH; +- si->flags &= ~SLAB_GET_COUNTS; ++ return total; ++} + +- for (s = 0; s < SLAB_CHAINS; s++) { +- if (!slab_chains[s]) +- continue; ++/* ++ * Dump just the vm_area_struct cache table data so that it can be ++ * called from above or for debug purposes. ++ */ ++void ++dump_vma_cache(ulong verbose) ++{ ++ int i; ++ ulong vhits; + +- if (!specified_slab) { +- if (!readmem(slab_chains[s], +- KVADDR, &si->slab, sizeof(ulong), +- "slabs", QUIET|RETURN_ON_ERROR)) { +- error(INFO, +- "%s: %s list: bad slab pointer: %lx\n", +- si->curname, +- slab_chain_name_v1[s], +- slab_chains[s]); +- list_borked = 1; +- continue; +- } +- last = slab_chains[s]; +- } else +- last = 0; +- +- if (si->slab == slab_chains[s]) +- continue; ++ if (!verbose) ++ goto show_hits; + +- if (CRASHDEBUG(1)) { +- fprintf(fp, "search cache: [%s] ", si->curname); +- if (si->flags & ADDRESS_SPECIFIED) +- fprintf(fp, "for %llx", si->spec_addr); +- fprintf(fp, "\n"); +- } +- +- do { +- if (received_SIGINT()) +- restart(0); ++ for (i = 0; i < VMA_CACHE; i++) ++ fprintf(fp, " cached_vma[%2d]: %lx (%ld)\n", ++ i, vt->cached_vma[i], ++ vt->cached_vma_hits[i]); ++ fprintf(fp, " vma_cache: %lx\n", (ulong)vt->vma_cache); ++ fprintf(fp, " vma_cache_index: %d\n", vt->vma_cache_index); ++ fprintf(fp, " vma_cache_fills: %ld\n", vt->vma_cache_fills); ++ fflush(fp); + +- if (!verify_slab_v1(si, last, s)) { +- list_borked = 1; +- continue; +- } +- last = si->slab - OFFSET(slab_s_list); +- +- dump_slab_percpu_v1(si); +- +- if (si->found) { +- return; +- } +- +- readmem(si->slab+OFFSET(slab_s_list), +- KVADDR, &si->slab, sizeof(ulong), +- "slab list", FAULT_ON_ERROR); +- +- si->slab -= OFFSET(slab_s_list); +- +- } while (si->slab != slab_chains[s] && !list_borked); +- } ++show_hits: ++ if (vt->vma_cache_fills) { ++ for (i = vhits = 0; i < VMA_CACHE; i++) ++ vhits += vt->cached_vma_hits[i]; + +- break; +- } ++ fprintf(stderr, "%s vma hit rate: %2ld%% (%ld of %ld)\n", ++ verbose ? "" : " ", ++ (vhits * 100)/vt->vma_cache_fills, ++ vhits, vt->vma_cache_fills); ++ } + } + + /* +- * Try to preclude any attempt to translate a bogus slab structure. ++ * Guess at the "real" amount of physical memory installed, formatting ++ * it in a MB or GB based string. + */ +- +-static int +-verify_slab_v1(struct meminfo *si, ulong last, int s) ++char * ++get_memory_size(char *buf) + { +- char slab_s_buf[BUFSIZE]; +- struct kernel_list_head *list_head; +- unsigned int inuse; +- ulong s_mem; +- char *list; +- int errcnt; +- +- list = slab_chain_name_v1[s]; ++ uint64_t total; ++ ulong next_gig; ++#ifdef OLDWAY ++ ulong mbs, gbs; ++#endif + +- errcnt = 0; ++ total = machdep->memory_size(); + +- if (!readmem(si->slab, KVADDR, slab_s_buf, +- SIZE(slab_s), "slab_s buffer", QUIET|RETURN_ON_ERROR)) { +- error(INFO, "%s: %s list: bad slab pointer: %lx\n", +- si->curname, list, si->slab); +- return FALSE; +- } ++ if ((next_gig = roundup(total, GIGABYTES(1)))) { ++ if ((next_gig - total) <= MEGABYTES(64)) ++ total = next_gig; ++ } + +- list_head = (struct kernel_list_head *) +- (slab_s_buf + OFFSET(slab_s_list)); ++ return (pages_to_size((ulong)(total/PAGESIZE()), buf)); + +- if (!IS_KVADDR((ulong)list_head->next) || +- !accessible((ulong)list_head->next)) { +- error(INFO, "%s: %s list: slab: %lx bad next pointer: %lx\n", +- si->curname, list, si->slab, +- (ulong)list_head->next); +- errcnt++; +- } ++#ifdef OLDWAY ++ gbs = (ulong)(total/GIGABYTES(1)); ++ mbs = (ulong)(total/MEGABYTES(1)); ++ if (gbs) ++ mbs = (total % GIGABYTES(1))/MEGABYTES(1); + +- if (last && (last != (ulong)list_head->prev)) { +- error(INFO, "%s: %s list: slab: %lx bad prev pointer: %lx\n", +- si->curname, list, si->slab, +- (ulong)list_head->prev); +- errcnt++; +- } ++ if (total%MEGABYTES(1)) ++ mbs++; - inuse = UINT(slab_s_buf + OFFSET(slab_s_inuse)); - if (inuse > si->c_num) { @@ -38977,15 +39605,15 @@ - si->curname, list, si->slab, inuse); - errcnt++; - } -+ if (nt->present) -+ total += (uint64_t)((uint64_t)nt->present * (uint64_t)PAGESIZE()); -+ else -+ total += (uint64_t)((uint64_t)nt->size * (uint64_t)PAGESIZE()); -+ } ++ if (gbs) ++ sprintf(buf, mbs ? "%ld GB %ld MB" : "%ld GB", gbs, mbs); ++ else ++ sprintf(buf, "%ld MB", mbs); - if (!last) - goto no_inuse_check_v1; -+ return total; ++ return buf; ++#endif +} - switch (s) @@ -39000,14 +39628,14 @@ - } - break; +/* -+ * Dump just the vm_area_struct cache table data so that it can be -+ * called from above or for debug purposes. -+ */ -+void -+dump_vma_cache(ulong verbose) ++ * For use by architectures not having machine-specific manners for ++ * best determining physical memory size. ++ */ ++uint64_t ++generic_memory_size(void) +{ -+ int i; -+ ulong vhits; ++ if (machdep->memsize) ++ return machdep->memsize; - case 1: /* partial */ - if ((inuse == 0) || (inuse == si->c_num)) { @@ -39017,8 +39645,8 @@ - errcnt++; - } - break; -+ if (!verbose) -+ goto show_hits; ++ return (machdep->memsize = total_node_memory()); ++} - case 2: /* free */ - if (inuse > 0) { @@ -39028,15 +39656,22 @@ - errcnt++; - } - break; -- } -+ for (i = 0; i < VMA_CACHE; i++) -+ fprintf(fp, " cached_vma[%2d]: %lx (%ld)\n", -+ i, vt->cached_vma[i], -+ vt->cached_vma_hits[i]); -+ fprintf(fp, " vma_cache: %lx\n", (ulong)vt->vma_cache); -+ fprintf(fp, " vma_cache_index: %d\n", vt->vma_cache_index); -+ fprintf(fp, " vma_cache_fills: %ld\n", vt->vma_cache_fills); -+ fflush(fp); ++/* ++ * Determine whether a virtual address is user or kernel or ambiguous. ++ */ ++int ++vaddr_type(ulong vaddr, struct task_context *tc) ++{ ++ int memtype, found; ++ ++ if (!tc) ++ tc = CURRENT_CONTEXT(); ++ memtype = found = 0; ++ ++ if (machdep->is_uvaddr(vaddr, tc)) { ++ memtype |= UVADDR; ++ found++; + } -no_inuse_check_v1: - s_mem = ULONG(slab_s_buf + OFFSET(slab_s_s_mem)); @@ -39044,32 +39679,53 @@ - error(INFO, "%s: %s list: slab: %lx bad s_mem pointer: %lx\n", - si->curname, list, si->slab, s_mem); - errcnt++; -- } -+show_hits: -+ if (vt->vma_cache_fills) { -+ for (i = vhits = 0; i < VMA_CACHE; i++) -+ vhits += vt->cached_vma_hits[i]; ++ if (machdep->is_kvaddr(vaddr)) { ++ memtype |= KVADDR; ++ found++; + } - return(errcnt ? FALSE : TRUE); -+ fprintf(stderr, "%s vma hit rate: %2ld%% (%ld of %ld)\n", -+ verbose ? "" : " ", -+ (vhits * 100)/vt->vma_cache_fills, -+ vhits, vt->vma_cache_fills); -+ } ++ if (found == 1) ++ return memtype; ++ else ++ return AMBIGUOUS; } /* - * Updated for 2.6 slab substructure. -+ * Guess at the "real" amount of physical memory installed, formatting -+ * it in a MB or GB based string. ++ * Determine the first valid user space address */ -- ++static int ++address_space_start(struct task_context *tc, ulong *addr) ++{ ++ ulong vma; ++ char *vma_buf; + -static char *slab_chain_name_v2[] = {"partial", "full", "free"}; -- ++ if (!tc->mm_struct) ++ return FALSE; + -static void -do_slab_chain_percpu_v2(long cmd, struct meminfo *si) -+char * -+get_memory_size(char *buf) ++ fill_mm_struct(tc->mm_struct); ++ vma = ULONG(tt->mm_struct + OFFSET(mm_struct_mmap)); ++ if (!vma) ++ return FALSE; ++ vma_buf = fill_vma_cache(vma); ++ *addr = ULONG(vma_buf + OFFSET(vm_area_struct_vm_start)); ++ ++ return TRUE; ++} ++ ++/* ++ * Search for a given value between a starting and ending address range, ++ * applying an optional mask for "don't care" bits. As an alternative ++ * to entering the starting address value, -k means "start of kernel address ++ * space". For processors with ambiguous user/kernel address spaces, ++ * -u or -k must be used (with or without -s) as a differentiator. ++ */ ++void ++cmd_search(void) { - int i, tmp, s; - int list_borked; @@ -39077,16 +39733,20 @@ - ulong specified_slab; - ulong last; - ulong slab_chains[SLAB_CHAINS]; -+ uint64_t total; -+ ulong next_gig; -+#ifdef OLDWAY -+ ulong mbs, gbs; -+#endif ++ int c; ++ ulong start, end, mask, memtype, len; ++ ulong uvaddr_end; ++ int sflag; ++ struct meminfo meminfo; ++ ulong value_array[MAXARGS]; ++ struct syment *sp; - list_borked = 0; - si->slabsize = (power(2, si->order) * PAGESIZE()); - si->cpucached_slab = 0; -+ total = machdep->memory_size(); ++ start = end = mask = sflag = memtype = len = 0; ++ uvaddr_end = COMMON_VADDR_SPACE() ? (ulong)(-1) : machdep->kvbase; ++ BZERO(value_array, sizeof(ulong) * MAXARGS); - slab_chains[0] = si->cache + OFFSET(kmem_cache_s_lists) + - OFFSET(kmem_list3_slabs_partial); @@ -39094,47 +39754,105 @@ - OFFSET(kmem_list3_slabs_full); - slab_chains[2] = si->cache + OFFSET(kmem_cache_s_lists) + - OFFSET(kmem_list3_slabs_free); -+ if ((next_gig = roundup(total, GIGABYTES(1)))) { -+ if ((next_gig - total) <= MEGABYTES(64)) -+ total = next_gig; -+ } ++ while ((c = getopt(argcnt, args, "l:uks:e:v:m:")) != EOF) { ++ switch(c) ++ { ++ case 'u': ++ if (!sflag) { ++ address_space_start(CURRENT_CONTEXT(),&start); ++ sflag++; ++ } ++ memtype = UVADDR; ++ sflag++; ++ break; - if (CRASHDEBUG(1)) { - fprintf(fp, "[ %s: %lx ", si->curname, si->cache); - fprintf(fp, "partial: %lx full: %lx free: %lx ]\n", - slab_chains[0], slab_chains[1], slab_chains[2]); -- } -+ return (pages_to_size((ulong)(total/PAGESIZE()), buf)); ++ case 'k': ++ if (!sflag) { ++ start = machdep->kvbase; ++ if (machine_type("IA64") && ++ (start < machdep->identity_map_base) && ++ (kt->stext > start)) ++ start = kt->stext; ++ sflag++; ++ } ++ memtype = KVADDR; ++ sflag++; ++ break; ++ ++ case 's': ++ if ((sp = symbol_search(optarg))) ++ start = sp->value; ++ else ++ start = htol(optarg, FAULT_ON_ERROR, NULL); ++ sflag++; ++ break; ++ ++ case 'e': ++ if ((sp = symbol_search(optarg))) ++ end = sp->value; ++ else ++ end = htol(optarg, FAULT_ON_ERROR, NULL); ++ break; ++ ++ case 'l': ++ len = stol(optarg, FAULT_ON_ERROR, NULL); ++ break; ++ ++ case 'm': ++ mask = htol(optarg, FAULT_ON_ERROR, NULL); ++ break; ++ ++ default: ++ argerrs++; ++ break; ++ } + } - switch (cmd) -- { ++ if (argerrs || !sflag || !args[optind] || (len && end)) ++ cmd_usage(pc->curcmd, SYNOPSIS); ++ ++ if (!memtype) ++ memtype = vaddr_type(start, CURRENT_CONTEXT()); ++ ++ switch (memtype) + { - case SLAB_GET_COUNTS: - si->flags |= SLAB_GET_COUNTS; - si->flags &= ~SLAB_WALKTHROUGH; - si->cpucached_cache = 0; - si->num_slabs = si->inuse = 0; - gather_cpudata_list_v2(si); -+#ifdef OLDWAY -+ gbs = (ulong)(total/GIGABYTES(1)); -+ mbs = (ulong)(total/MEGABYTES(1)); -+ if (gbs) -+ mbs = (total % GIGABYTES(1))/MEGABYTES(1); -+ -+ if (total%MEGABYTES(1)) -+ mbs++; ++ case UVADDR: ++ if (!IS_UVADDR(start, CURRENT_CONTEXT())) { ++ error(INFO, "invalid user virtual address: %lx\n", ++ start); ++ cmd_usage(pc->curcmd, SYNOPSIS); ++ } ++ break; - slab_buf = GETBUF(SIZE(slab)); -+ if (gbs) -+ sprintf(buf, mbs ? "%ld GB %ld MB" : "%ld GB", gbs, mbs); -+ else -+ sprintf(buf, "%ld MB", mbs); ++ case KVADDR: ++ if (!IS_KVADDR(start)) { ++ error(INFO, "invalid kernel virtual address: %lx\n", ++ start); ++ cmd_usage(pc->curcmd, SYNOPSIS); ++ } ++ break; - for (s = 0; s < SLAB_CHAINS; s++) { - if (!slab_chains[s]) - continue; -+ return buf; -+#endif -+} ++ case AMBIGUOUS: ++ error(INFO, ++ "ambiguous virtual address: %lx (requires -u or -k)\n", ++ start); ++ cmd_usage(pc->curcmd, SYNOPSIS); ++ } - if (!readmem(slab_chains[s], - KVADDR, &si->slab, sizeof(ulong), @@ -39151,29 +39869,65 @@ - if (slab_data_saved(si)) { - FREEBUF(slab_buf); - return; -- } ++ if (!end && !len) { ++ switch (memtype) ++ { ++ case UVADDR: ++ end = uvaddr_end; ++ break; ++ ++ case KVADDR: ++ if (vt->vmalloc_start < machdep->identity_map_base) ++ end = (ulong)(-1); ++ else { ++ meminfo.memtype = KVADDR; ++ meminfo.spec_addr = 0; ++ meminfo.flags = (ADDRESS_SPECIFIED|GET_HIGHEST); ++ dump_vmlist(&meminfo); ++ end = meminfo.retval; ++ if (end < start) ++ end = (ulong)(-1); + } - - if (si->slab == slab_chains[s]) - continue; - - last = slab_chains[s]; -+/* -+ * For use by architectures not having machine-specific manners for -+ * best determining physical memory size. -+ */ -+uint64_t -+generic_memory_size(void) -+{ -+ if (machdep->memsize) -+ return machdep->memsize; ++ break; ++ } ++ } else if (len) ++ end = start + len; ++ ++ switch (memtype) ++ { ++ case UVADDR: ++ if (end > uvaddr_end) { ++ error(INFO, ++ "address range starts in user space and ends kernel space\n"); ++ cmd_usage(pc->curcmd, SYNOPSIS); ++ } ++ /* FALLTHROUGH */ ++ case KVADDR: ++ if (end < start) { ++ error(INFO, ++ "ending address %lx is below starting address %lx\n", ++ end, start); ++ cmd_usage(pc->curcmd, SYNOPSIS); ++ } ++ break; ++ } - do { - if (received_SIGINT()) { - FREEBUF(slab_buf); - restart(0); - } -+ return (machdep->memsize = total_node_memory()); -+} ++ c = 0; ++ while (args[optind]) { ++ value_array[c] = htol(args[optind], FAULT_ON_ERROR, NULL); ++ c++; ++ optind++; ++ } - if (!verify_slab_v2(si, last, s)) { - list_borked = 1; @@ -39190,13 +39944,8 @@ - - if (ACTIVE()) - gather_cpudata_list_v2(si); -+/* -+ * Determine whether a virtual address is user or kernel or ambiguous. -+ */ -+int -+vaddr_type(ulong vaddr, struct task_context *tc) -+{ -+ int memtype, found; ++ search(start, end, mask, memtype, value_array, c); ++} - si->s_mem = ULONG(slab_buf + - OFFSET(slab_s_mem)); @@ -39207,9 +39956,9 @@ - si->slab = ULONG(slab_buf + - OFFSET(slab_list)); - si->slab -= OFFSET(slab_list); -+ if (!tc) -+ tc = CURRENT_CONTEXT(); -+ memtype = found = 0; ++/* ++ * Do the work for cmd_search(). ++ */ - /* - * Check for slab transition. (Tony Dziedzic) @@ -39227,41 +39976,36 @@ - - } while (si->slab != slab_chains[s] && !list_borked); - } -+ if (machdep->is_uvaddr(vaddr, tc)) { -+ memtype |= UVADDR; -+ found++; -+ } ++#define SEARCHMASK(X) ((X) | mask) - FREEBUF(slab_buf); - if (!list_borked) - save_slab_data(si); - break; -+ if (machdep->is_kvaddr(vaddr)) { -+ memtype |= KVADDR; -+ found++; -+ } ++static void ++search(ulong start, ulong end, ulong mask, int memtype, ulong *value, int vcnt) ++{ ++ int i, j; ++ ulong pp, next, *ubp; ++ int wordcnt, lastpage; ++ ulong page; ++ physaddr_t paddr; ++ char *pagebuf; - case SLAB_WALKTHROUGH: - specified_slab = si->slab; - si->flags |= SLAB_WALKTHROUGH; - si->flags &= ~SLAB_GET_COUNTS; -+ if (found == 1) -+ return memtype; -+ else -+ return AMBIGUOUS; -+} ++ if (start & (sizeof(long)-1)) { ++ start &= ~(sizeof(long)-1); ++ error(INFO, "rounding down start address to: %lx\n", start); ++ } - for (s = 0; s < SLAB_CHAINS; s++) { - if (!slab_chains[s]) - continue; -+/* -+ * Determine the first valid user space address -+ */ -+static int -+address_space_start(struct task_context *tc, ulong *addr) -+{ -+ ulong vma; -+ char *vma_buf; ++ pagebuf = GETBUF(PAGESIZE()); ++ next = start; - if (!specified_slab) { - if (!readmem(slab_chains[s], @@ -39287,320 +40031,6 @@ - if (si->flags & ADDRESS_SPECIFIED) - fprintf(fp, "for %llx", si->spec_addr); - fprintf(fp, "\n"); -- } -- -- do { -- if (received_SIGINT()) -- restart(0); -- -- if (!verify_slab_v2(si, last, s)) { -- list_borked = 1; -- continue; -- } -- last = si->slab - OFFSET(slab_list); -+ if (!tc->mm_struct) -+ return FALSE; - -- dump_slab_percpu_v2(si); -- -- if (si->found) { -- return; -- } -- -- readmem(si->slab+OFFSET(slab_list), -- KVADDR, &si->slab, sizeof(ulong), -- "slab list", FAULT_ON_ERROR); -- -- si->slab -= OFFSET(slab_list); -+ fill_mm_struct(tc->mm_struct); -+ vma = ULONG(tt->mm_struct + OFFSET(mm_struct_mmap)); -+ if (!vma) -+ return FALSE; -+ vma_buf = fill_vma_cache(vma); -+ *addr = ULONG(vma_buf + OFFSET(vm_area_struct_vm_start)); - -- } while (si->slab != slab_chains[s] && !list_borked); -- } -- -- break; -- } -+ return TRUE; - } - - /* -- * Try to preclude any attempt to translate a bogus slab structure. -+ * Search for a given value between a starting and ending address range, -+ * applying an optional mask for "don't care" bits. As an alternative -+ * to entering the starting address value, -k means "start of kernel address -+ * space". For processors with ambiguous user/kernel address spaces, -+ * -u or -k must be used (with or without -s) as a differentiator. - */ --static int --verify_slab_v2(struct meminfo *si, ulong last, int s) -+void -+cmd_search(void) - { -- char slab_buf[BUFSIZE]; -- struct kernel_list_head *list_head; -- unsigned int inuse; -- ulong s_mem; -- char *list; -- int errcnt; -+ int c; -+ ulong start, end, mask, memtype, len; -+ ulong uvaddr_end; -+ int sflag; -+ struct meminfo meminfo; -+ ulong value_array[MAXARGS]; -+ struct syment *sp; - -- list = slab_chain_name_v2[s]; -+ start = end = mask = sflag = memtype = len = 0; -+ uvaddr_end = COMMON_VADDR_SPACE() ? (ulong)(-1) : machdep->kvbase; -+ BZERO(value_array, sizeof(ulong) * MAXARGS); - -- errcnt = 0; -+ while ((c = getopt(argcnt, args, "l:uks:e:v:m:")) != EOF) { -+ switch(c) -+ { -+ case 'u': -+ if (!sflag) { -+ address_space_start(CURRENT_CONTEXT(),&start); -+ sflag++; -+ } -+ memtype = UVADDR; -+ sflag++; -+ break; - -- if (!readmem(si->slab, KVADDR, slab_buf, -- SIZE(slab), "slab buffer", QUIET|RETURN_ON_ERROR)) { -- error(INFO, "%s: %s list: bad slab pointer: %lx\n", -- si->curname, list, si->slab); -- return FALSE; -- } -+ case 'k': -+ if (!sflag) { -+ start = machdep->kvbase; -+ sflag++; -+ } -+ memtype = KVADDR; -+ sflag++; -+ break; -+ -+ case 's': -+ if ((sp = symbol_search(optarg))) -+ start = sp->value; -+ else -+ start = htol(optarg, FAULT_ON_ERROR, NULL); -+ sflag++; -+ break; -+ -+ case 'e': -+ if ((sp = symbol_search(optarg))) -+ end = sp->value; -+ else -+ end = htol(optarg, FAULT_ON_ERROR, NULL); -+ break; - -- list_head = (struct kernel_list_head *)(slab_buf + OFFSET(slab_list)); -- if (!IS_KVADDR((ulong)list_head->next) || -- !accessible((ulong)list_head->next)) { -- error(INFO, "%s: %s list: slab: %lx bad next pointer: %lx\n", -- si->curname, list, si->slab, -- (ulong)list_head->next); -- errcnt++; -- } -+ case 'l': -+ len = stol(optarg, FAULT_ON_ERROR, NULL); -+ break; - -- if (last && (last != (ulong)list_head->prev)) { -- error(INFO, "%s: %s list: slab: %lx bad prev pointer: %lx\n", -- si->curname, list, si->slab, -- (ulong)list_head->prev); -- errcnt++; -- } -+ case 'm': -+ mask = htol(optarg, FAULT_ON_ERROR, NULL); -+ break; - -- inuse = UINT(slab_buf + OFFSET(slab_inuse)); -- if (inuse > si->c_num) { -- error(INFO, "%s: %s list: slab: %lx bad inuse counter: %ld\n", -- si->curname, list, si->slab, inuse); -- errcnt++; -- } -+ default: -+ argerrs++; -+ break; -+ } -+ } - -- if (!last) -- goto no_inuse_check_v2; -+ if (argerrs || !sflag || !args[optind] || (len && end)) -+ cmd_usage(pc->curcmd, SYNOPSIS); - -- switch (s) -+ if (!memtype) -+ memtype = vaddr_type(start, CURRENT_CONTEXT()); -+ -+ switch (memtype) - { -- case 0: /* partial */ -- if ((inuse == 0) || (inuse == si->c_num)) { -- error(INFO, -- "%s: %s list: slab: %lx bad inuse counter: %ld\n", -- si->curname, list, si->slab, inuse); -- errcnt++; -+ case UVADDR: -+ if (!IS_UVADDR(start, CURRENT_CONTEXT())) { -+ error(INFO, "invalid user virtual address: %lx\n", -+ start); -+ cmd_usage(pc->curcmd, SYNOPSIS); - } - break; - -- case 1: /* full */ -- if (inuse != si->c_num) { -- error(INFO, -- "%s: %s list: slab: %lx bad inuse counter: %ld\n", -- si->curname, list, si->slab, inuse); -- errcnt++; -+ case KVADDR: -+ if (!IS_KVADDR(start)) { -+ error(INFO, "invalid kernel virtual address: %lx\n", -+ start); -+ cmd_usage(pc->curcmd, SYNOPSIS); - } - break; - -- case 2: /* free */ -- if (inuse > 0) { -- error(INFO, -- "%s: %s list: slab: %lx bad inuse counter: %ld\n", -- si->curname, list, si->slab, inuse); -- errcnt++; -+ case AMBIGUOUS: -+ error(INFO, -+ "ambiguous virtual address: %lx (requires -u or -k)\n", -+ start); -+ cmd_usage(pc->curcmd, SYNOPSIS); -+ } -+ -+ if (!end && !len) { -+ switch (memtype) -+ { -+ case UVADDR: -+ end = uvaddr_end; -+ break; -+ -+ case KVADDR: -+ if (vt->vmalloc_start < machdep->identity_map_base) -+ end = (ulong)(-1); -+ else { -+ meminfo.memtype = KVADDR; -+ meminfo.spec_addr = 0; -+ meminfo.flags = (ADDRESS_SPECIFIED|GET_HIGHEST); -+ dump_vmlist(&meminfo); -+ end = meminfo.retval; -+ } -+ break; -+ } -+ } else if (len) -+ end = start + len; -+ -+ switch (memtype) -+ { -+ case UVADDR: -+ if (end > uvaddr_end) { -+ error(INFO, -+ "address range starts in user space and ends kernel space\n"); -+ cmd_usage(pc->curcmd, SYNOPSIS); -+ } -+ /* FALLTHROUGH */ -+ case KVADDR: -+ if (end < start) { -+ error(INFO, -+ "ending address %lx is below starting address %lx\n", -+ end, start); -+ cmd_usage(pc->curcmd, SYNOPSIS); - } - break; - } - --no_inuse_check_v2: -- s_mem = ULONG(slab_buf + OFFSET(slab_s_mem)); -- if (!IS_KVADDR(s_mem) || !accessible(s_mem)) { -- error(INFO, "%s: %s list: slab: %lx bad s_mem pointer: %lx\n", -- si->curname, list, si->slab, s_mem); -- errcnt++; -+ c = 0; -+ while (args[optind]) { -+ value_array[c] = htol(args[optind], FAULT_ON_ERROR, NULL); -+ c++; -+ optind++; - } - -- return(errcnt ? FALSE : TRUE); -+ search(start, end, mask, memtype, value_array, c); - } - - /* -- * If it's a dumpfile, save the essential slab data to avoid re-reading -- * the whole slab chain more than once. This may seem like overkill, but -- * if the problem is a memory leak, or just the over-use of the buffer_head -- * cache, it's painful to wait each time subsequent kmem -s or -i commands -- * simply need the basic slab counts. -+ * Do the work for cmd_search(). - */ --struct slab_data { -- ulong cache_addr; -- int num_slabs; -- int inuse; -- ulong cpucached_cache; --}; - --#define NO_SLAB_DATA ((void *)(-1)) -+#define SEARCHMASK(X) ((X) | mask) - --static void --save_slab_data(struct meminfo *si) -+static void -+search(ulong start, ulong end, ulong mask, int memtype, ulong *value, int vcnt) - { -- int i; -+ int i, j; -+ ulong pp, next, *ubp; -+ int wordcnt, lastpage; -+ ulong page; -+ physaddr_t paddr; -+ char *pagebuf; - -- if (ACTIVE()) -- return; -+ if (start & (sizeof(long)-1)) { -+ start &= ~(sizeof(long)-1); -+ error(INFO, "rounding down start address to: %lx\n", start); -+ } - -- if (vt->slab_data == NO_SLAB_DATA) -- return; -+ pagebuf = GETBUF(PAGESIZE()); -+ next = start; - -- if (!vt->slab_data) { -- if (!(vt->slab_data = (struct slab_data *) -- malloc(sizeof(struct slab_data) * vt->kmem_cache_count))) { -- error(INFO, "cannot malloc slab_data table"); -- vt->slab_data = NO_SLAB_DATA; -- return; -- } -- for (i = 0; i < vt->kmem_cache_count; i++) { -- vt->slab_data[i].cache_addr = (ulong)NO_SLAB_DATA; -- vt->slab_data[i].num_slabs = 0; -- vt->slab_data[i].inuse = 0; -- vt->slab_data[i].cpucached_cache = 0; + for (pp = VIRTPAGEBASE(start); next < end; next = pp) { + lastpage = (VIRTPAGEBASE(next) == VIRTPAGEBASE(end)); + if (LKCD_DUMPFILE()) @@ -39614,14 +40044,36 @@ + if (!next_upage(CURRENT_CONTEXT(), pp, &pp)) + return; + continue; -+ } + } +- +- do { +- if (received_SIGINT()) +- restart(0); +- +- if (!verify_slab_v2(si, last, s)) { +- list_borked = 1; +- continue; +- } +- last = si->slab - OFFSET(slab_list); + break; -+ + +- dump_slab_percpu_v2(si); +- +- if (si->found) { + case KVADDR: + if (!kvtop(CURRENT_CONTEXT(), pp, &paddr, 0) || + !phys_to_page(paddr, &page)) { + if (!next_kpage(pp, &pp)) -+ return; + return; +- } +- +- readmem(si->slab+OFFSET(slab_list), +- KVADDR, &si->slab, sizeof(ulong), +- "slab list", FAULT_ON_ERROR); +- +- si->slab -= OFFSET(slab_list); +- +- } while (si->slab != slab_chains[s] && !list_borked); + continue; + } + break; @@ -39632,11 +40084,8 @@ + pp += PAGESIZE(); + continue; } -- } -- for (i = 0; i < vt->kmem_cache_count; i++) { -- if (vt->slab_data[i].cache_addr == si->cache) -- break; +- break; + ubp = (ulong *)&pagebuf[next - pp]; + if (lastpage) { + if (end == (ulong)(-1)) @@ -39645,19 +40094,13 @@ + wordcnt = (end - next)/sizeof(long); + } else + wordcnt = (PAGESIZE() - (next - pp))/sizeof(long); - -- if (vt->slab_data[i].cache_addr == (ulong)NO_SLAB_DATA) { -- vt->slab_data[i].cache_addr = si->cache; -- vt->slab_data[i].num_slabs = si->num_slabs; -- vt->slab_data[i].inuse = si->inuse; -- vt->slab_data[i].cpucached_cache = si->cpucached_cache; -- break; ++ + for (i = 0; i < wordcnt; i++, ubp++, next += sizeof(long)) { + for (j = 0; j < vcnt; j++) { + if (SEARCHMASK(*ubp) == SEARCHMASK(value[j])) + fprintf(fp, "%lx: %lx\n", next, *ubp); + } - } ++ } + + if (CRASHDEBUG(1)) + if ((pp % (1024*1024)) == 0) @@ -39667,271 +40110,273 @@ } } --static int --slab_data_saved(struct meminfo *si) + -+/* + /* +- * Try to preclude any attempt to translate a bogus slab structure. + * Return the next mapped user virtual address page that comes after + * the passed-in address. -+ */ -+static int + */ + static int +-verify_slab_v2(struct meminfo *si, ulong last, int s) +next_upage(struct task_context *tc, ulong vaddr, ulong *nextvaddr) { -- int i; +- char slab_buf[BUFSIZE]; +- struct kernel_list_head *list_head; +- unsigned int inuse; +- ulong s_mem; +- char *list; +- int errcnt; + ulong vma, total_vm; + int found; + char *vma_buf; + ulong vm_start, vm_end; + void *vm_next; -+ + +- list = slab_chain_name_v2[s]; + if (!tc->mm_struct) + return FALSE; -+ + +- errcnt = 0; + fill_mm_struct(tc->mm_struct); + vma = ULONG(tt->mm_struct + OFFSET(mm_struct_mmap)); + total_vm = ULONG(tt->mm_struct + OFFSET(mm_struct_total_vm)); -- if (ACTIVE() || !vt->slab_data || (vt->slab_data == NO_SLAB_DATA)) +- if (!readmem(si->slab, KVADDR, slab_buf, +- SIZE(slab), "slab buffer", QUIET|RETURN_ON_ERROR)) { +- error(INFO, "%s: %s list: bad slab pointer: %lx\n", +- si->curname, list, si->slab); + if (!vma || (total_vm == 0)) return FALSE; +- } +- +- list_head = (struct kernel_list_head *)(slab_buf + OFFSET(slab_list)); +- if (!IS_KVADDR((ulong)list_head->next) || +- !accessible((ulong)list_head->next)) { +- error(INFO, "%s: %s list: slab: %lx bad next pointer: %lx\n", +- si->curname, list, si->slab, +- (ulong)list_head->next); +- errcnt++; +- } +- +- if (last && (last != (ulong)list_head->prev)) { +- error(INFO, "%s: %s list: slab: %lx bad prev pointer: %lx\n", +- si->curname, list, si->slab, +- (ulong)list_head->prev); +- errcnt++; +- } + +- inuse = UINT(slab_buf + OFFSET(slab_inuse)); +- if (inuse > si->c_num) { +- error(INFO, "%s: %s list: slab: %lx bad inuse counter: %ld\n", +- si->curname, list, si->slab, inuse); +- errcnt++; +- } ++ vaddr = VIRTPAGEBASE(vaddr) + PAGESIZE(); /* first possible page */ + +- if (!last) +- goto no_inuse_check_v2; ++ for (found = FALSE; vma; vma = (ulong)vm_next) { ++ vma_buf = fill_vma_cache(vma); + +- switch (s) +- { +- case 0: /* partial */ +- if ((inuse == 0) || (inuse == si->c_num)) { +- error(INFO, +- "%s: %s list: slab: %lx bad inuse counter: %ld\n", +- si->curname, list, si->slab, inuse); +- errcnt++; +- } +- break; ++ vm_start = ULONG(vma_buf + OFFSET(vm_area_struct_vm_start)); ++ vm_end = ULONG(vma_buf + OFFSET(vm_area_struct_vm_end)); ++ vm_next = VOID_PTR(vma_buf + OFFSET(vm_area_struct_vm_next)); + +- case 1: /* full */ +- if (inuse != si->c_num) { +- error(INFO, +- "%s: %s list: slab: %lx bad inuse counter: %ld\n", +- si->curname, list, si->slab, inuse); +- errcnt++; ++ if (vaddr <= vm_start) { ++ *nextvaddr = vm_start; ++ return TRUE; + } +- break; + +- case 2: /* free */ +- if (inuse > 0) { +- error(INFO, +- "%s: %s list: slab: %lx bad inuse counter: %ld\n", +- si->curname, list, si->slab, inuse); +- errcnt++; ++ if ((vaddr > vm_start) && (vaddr < vm_end)) { ++ *nextvaddr = vaddr; ++ return TRUE; + } +- break; +- } +- +-no_inuse_check_v2: +- s_mem = ULONG(slab_buf + OFFSET(slab_s_mem)); +- if (!IS_KVADDR(s_mem) || !accessible(s_mem)) { +- error(INFO, "%s: %s list: slab: %lx bad s_mem pointer: %lx\n", +- si->curname, list, si->slab, s_mem); +- errcnt++; + } + +- return(errcnt ? FALSE : TRUE); ++ return FALSE; + } + + /* +- * If it's a dumpfile, save the essential slab data to avoid re-reading +- * the whole slab chain more than once. This may seem like overkill, but +- * if the problem is a memory leak, or just the over-use of the buffer_head +- * cache, it's painful to wait each time subsequent kmem -s or -i commands +- * simply need the basic slab counts. ++ * Return the next mapped kernel virtual address in the vmlist ++ * that is equal to or comes after the passed-in address. + */ +-struct slab_data { +- ulong cache_addr; +- int num_slabs; +- int inuse; +- ulong cpucached_cache; +-}; +- +-#define NO_SLAB_DATA ((void *)(-1)) +- +-static void +-save_slab_data(struct meminfo *si) ++static ulong ++next_vmlist_vaddr(ulong vaddr) + { +- int i; ++ ulong i, count; ++ struct meminfo meminfo, *mi; + +- if (ACTIVE()) +- return; ++ mi = &meminfo; ++ BZERO(mi, sizeof(struct meminfo)); + +- if (vt->slab_data == NO_SLAB_DATA) +- return; ++ mi->flags = GET_VMLIST_COUNT; ++ dump_vmlist(mi); ++ count = mi->retval; + +- if (!vt->slab_data) { +- if (!(vt->slab_data = (struct slab_data *) +- malloc(sizeof(struct slab_data) * vt->kmem_cache_count))) { +- error(INFO, "cannot malloc slab_data table"); +- vt->slab_data = NO_SLAB_DATA; +- return; +- } +- for (i = 0; i < vt->kmem_cache_count; i++) { +- vt->slab_data[i].cache_addr = (ulong)NO_SLAB_DATA; +- vt->slab_data[i].num_slabs = 0; +- vt->slab_data[i].inuse = 0; +- vt->slab_data[i].cpucached_cache = 0; +- } +- } ++ if (!count) ++ return vaddr; + +- for (i = 0; i < vt->kmem_cache_count; i++) { +- if (vt->slab_data[i].cache_addr == si->cache) +- break; ++ mi->vmlist = (struct vmlist *)GETBUF(sizeof(struct vmlist)*count); ++ mi->flags = GET_VMLIST; ++ dump_vmlist(mi); + +- if (vt->slab_data[i].cache_addr == (ulong)NO_SLAB_DATA) { +- vt->slab_data[i].cache_addr = si->cache; +- vt->slab_data[i].num_slabs = si->num_slabs; +- vt->slab_data[i].inuse = si->inuse; +- vt->slab_data[i].cpucached_cache = si->cpucached_cache; ++ for (i = 0; i < count; i++) { ++ if (vaddr <= mi->vmlist[i].addr) { ++ vaddr = mi->vmlist[i].addr; + break; + } ++ if (vaddr < (mi->vmlist[i].addr + mi->vmlist[i].size)) ++ break; + } ++ ++ FREEBUF(mi->vmlist); ++ ++ return vaddr; + } + +-static int +-slab_data_saved(struct meminfo *si) ++ ++/* ++ * Return the next kernel virtual address page that comes after ++ * the passed-in, untranslatable, address. ++ */ ++static int ++next_kpage(ulong vaddr, ulong *nextvaddr) + { +- int i; ++ ulong vaddr_orig; + +- if (ACTIVE() || !vt->slab_data || (vt->slab_data == NO_SLAB_DATA)) +- return FALSE; ++ vaddr_orig = vaddr; ++ vaddr = VIRTPAGEBASE(vaddr) + PAGESIZE(); /* first possible page */ - for (i = 0; i < vt->kmem_cache_count; i++) { - if (vt->slab_data[i].cache_addr == si->cache) { - si->inuse = vt->slab_data[i].inuse; - si->num_slabs = vt->slab_data[i].num_slabs; - si->cpucached_cache = vt->slab_data[i].cpucached_cache; -- return TRUE; -- } -- } -- -- return FALSE; --} -+ vaddr = VIRTPAGEBASE(vaddr) + PAGESIZE(); /* first possible page */ - --static void --dump_saved_slab_data(void) --{ -- int i; -+ for (found = FALSE; vma; vma = (ulong)vm_next) { -+ vma_buf = fill_vma_cache(vma); - -- if (!vt->slab_data || (vt->slab_data == NO_SLAB_DATA)) -- return; -+ vm_start = ULONG(vma_buf + OFFSET(vm_area_struct_vm_start)); -+ vm_end = ULONG(vma_buf + OFFSET(vm_area_struct_vm_end)); -+ vm_next = VOID_PTR(vma_buf + OFFSET(vm_area_struct_vm_next)); - -- for (i = 0; i < vt->kmem_cache_count; i++) { -- if (vt->slab_data[i].cache_addr == (ulong)NO_SLAB_DATA) -- break; -+ if (vaddr <= vm_start) { -+ *nextvaddr = vm_start; -+ return TRUE; -+ } - -- fprintf(fp, -- " cache: %lx inuse: %5d num_slabs: %3d cpucached_cache: %ld\n", -- vt->slab_data[i].cache_addr, -- vt->slab_data[i].inuse, -- vt->slab_data[i].num_slabs, -- vt->slab_data[i].cpucached_cache); -+ if ((vaddr > vm_start) && (vaddr < vm_end)) { -+ *nextvaddr = vaddr; -+ return TRUE; -+ } - } -+ -+ return FALSE; - } - - /* -- * Dump the contents of a kmem slab. -+ * Return the next mapped kernel virtual address in the vmlist -+ * that is equal to or comes after the passed-in address. - */ -- --static void --dump_slab(struct meminfo *si) -+static ulong -+next_vmlist_vaddr(struct meminfo *mi, ulong vaddr) - { -- uint16_t s_offset; -+ ulong i, count; - -- si->s_mem = ULONG(si->slab_buf + OFFSET(kmem_slab_s_s_mem)); -- si->s_mem = PTOB(BTOP(si->s_mem)); -+ BZERO(mi, sizeof(struct meminfo)); - -- if (si->flags & ADDRESS_SPECIFIED) { -- if (INSLAB(si->slab, si) && (si->spec_addr >= si->slab) && -- (si->spec_addr < (si->slab+SIZE(kmem_slab_s)))){ -- si->found = KMEM_SLAB_ADDR; -- return; -- } -- if (INSLAB(si->spec_addr, si)) -- si->found = KMEM_ON_SLAB; /* But don't return yet... */ -- else -- return; -- } -+ mi->flags = GET_VMLIST_COUNT; -+ dump_vmlist(mi); -+ count = mi->retval; - -- si->s_freep = VOID_PTR(si->slab_buf + OFFSET(kmem_slab_s_s_freep)); -- si->s_inuse = ULONG(si->slab_buf + OFFSET(kmem_slab_s_s_inuse)); -- si->s_index = ULONG_PTR(si->slab_buf + OFFSET(kmem_slab_s_s_index)); -- s_offset = USHORT(si->slab_buf + OFFSET(kmem_slab_s_s_offset)); -+ if (!count) -+ return vaddr; - -- if (!(si->flags & ADDRESS_SPECIFIED)) { -- fprintf(fp, slab_hdr); -- DUMP_SLAB_INFO(); -+ mi->vmlist = (struct vmlist *)GETBUF(sizeof(struct vmlist)*count); -+ mi->flags = GET_VMLIST; -+ dump_vmlist(mi); -+ -+ for (i = 0; i < count; i++) { -+ if (vaddr <= mi->vmlist[i].addr) { -+ vaddr = mi->vmlist[i].addr; -+ break; -+ } -+ if (vaddr < (mi->vmlist[i].addr + mi->vmlist[i].size)) -+ break; - } - -- dump_slab_objects(si); -+ FREEBUF(mi->vmlist); -+ -+ return vaddr; - } - -+ - /* -- * dump_slab() adapted for newer percpu slab format. -+ * Return the next kernel virtual address page that comes after -+ * the passed-in address. - */ -- --static void --dump_slab_percpu_v1(struct meminfo *si) -+static int -+next_kpage(ulong vaddr, ulong *nextvaddr) - { -- int tmp; -- -- readmem(si->slab+OFFSET(slab_s_s_mem), -- KVADDR, &si->s_mem, sizeof(ulong), -- "s_mem", FAULT_ON_ERROR); -+ int n; -+ ulong paddr, vaddr_orig, node_size; -+ struct node_table *nt; -+ ulonglong pstart, pend; -+ ulong vmalloc_limit; -+ struct meminfo meminfo; - -- /* -- * Include the array of kmem_bufctl_t's appended to slab. -- */ -- tmp = SIZE(slab_s) + (SIZE(kmem_bufctl_t) * si->c_num); -+ vaddr_orig = vaddr; -+ vaddr = VIRTPAGEBASE(vaddr) + PAGESIZE(); /* first possible page */ - -- if (si->flags & ADDRESS_SPECIFIED) { -- if (INSLAB_PERCPU(si->slab, si) && -- (si->spec_addr >= si->slab) && -- (si->spec_addr < (si->slab+tmp))) { -- if (si->spec_addr >= (si->slab + SIZE(slab_s))) -- si->found = KMEM_BUFCTL_ADDR; -- else -- si->found = KMEM_SLAB_ADDR; -- } else if (INSLAB_PERCPU(si->spec_addr, si)) -- si->found = KMEM_ON_SLAB; /* But don't return yet... */ -- else -- return; -- } + if (vaddr < vaddr_orig) /* wrapped back to zero? */ + return FALSE; - -- readmem(si->slab+OFFSET(slab_s_inuse), -- KVADDR, &tmp, sizeof(int), -- "inuse", FAULT_ON_ERROR); -- si->s_inuse = tmp; -+ meminfo.memtype = KVADDR; -+ meminfo.spec_addr = 0; -+ meminfo.flags = (ADDRESS_SPECIFIED|GET_HIGHEST); -+ dump_vmlist(&meminfo); -+ vmalloc_limit = meminfo.retval; - -- readmem(si->slab+OFFSET(slab_s_free), -- KVADDR, &si->free, SIZE(kmem_bufctl_t), -- "kmem_bufctl_t", FAULT_ON_ERROR); ++ + if (IS_VMALLOC_ADDR(vaddr_orig)) { -+ if (IS_VMALLOC_ADDR(vaddr) && (vaddr < vmalloc_limit)) { ++ if (IS_VMALLOC_ADDR(vaddr) && ++ (vaddr < last_vmalloc_address())) { + if (machine_type("X86_64")) -+ vaddr = next_vmlist_vaddr(&meminfo, vaddr); ++ vaddr = next_vmlist_vaddr(vaddr); + *nextvaddr = vaddr; + return TRUE; + } - -- gather_slab_free_list_percpu(si); -- gather_slab_cached_count(si); ++ + if (vt->vmalloc_start < machdep->identity_map_base) { + *nextvaddr = machdep->identity_map_base; -+ return TRUE; -+ } - -- if (!(si->flags & ADDRESS_SPECIFIED)) { -- fprintf(fp, slab_hdr); -- DUMP_SLAB_INFO(); + return TRUE; + } ++ + return FALSE; } -- dump_slab_objects_percpu(si); --} -+ paddr = VTOP(vaddr); -+ -+ for (n = 0; n < vt->numnodes; n++) { -+ nt = &vt->node_table[n]; -+ if ((vt->flags & V_MEM_MAP) && (vt->numnodes == 1)) -+ node_size = vt->max_mapnr; -+ else -+ node_size = nt->size; -+ -+ pstart = nt->start_paddr; -+ pend = pstart + ((ulonglong)node_size * PAGESIZE()); -+ -+ if ((paddr < pstart) || (paddr >= pend)) -+ continue; -+ /* -+ * We're in the physical range. -+ */ -+ *nextvaddr = vaddr; +- return FALSE; ++ if (next_identity_mapping(vaddr, nextvaddr)) + return TRUE; -+ } - ++ + if (vt->vmalloc_start > vaddr) { + *nextvaddr = vt->vmalloc_start; + return TRUE; + } else + return FALSE; -+} + } - /* -- * Updated for 2.6 slab substructure. -+ * Display swap statistics. - */ -static void --dump_slab_percpu_v2(struct meminfo *si) +-dump_saved_slab_data(void) ++/* ++ * Display swap statistics. ++ */ +void +cmd_swap(void) { -- int tmp; +- int i; + int c; -- readmem(si->slab+OFFSET(slab_s_mem), -- KVADDR, &si->s_mem, sizeof(ulong), -- "s_mem", FAULT_ON_ERROR); +- if (!vt->slab_data || (vt->slab_data == NO_SLAB_DATA)) +- return; + while ((c = getopt(argcnt, args, "")) != EOF) { + switch(c) + { @@ -39941,55 +40386,39 @@ + } + } -- /* -- * Include the array of kmem_bufctl_t's appended to slab. -- */ -- tmp = SIZE(slab) + (SIZE(kmem_bufctl_t) * si->c_num); +- for (i = 0; i < vt->kmem_cache_count; i++) { +- if (vt->slab_data[i].cache_addr == (ulong)NO_SLAB_DATA) +- break; + if (argerrs) + cmd_usage(pc->curcmd, SYNOPSIS); -- if (si->flags & ADDRESS_SPECIFIED) { -- if (INSLAB_PERCPU(si->slab, si) && -- (si->spec_addr >= si->slab) && -- (si->spec_addr < (si->slab+tmp))) { -- if (si->spec_addr >= (si->slab + SIZE(slab))) -- si->found = KMEM_BUFCTL_ADDR; -- else -- si->found = KMEM_SLAB_ADDR; -- } else if (INSLAB_PERCPU(si->spec_addr, si)) -- si->found = KMEM_ON_SLAB; /* But don't return yet... */ -- else -- return; -- } +- fprintf(fp, +- " cache: %lx inuse: %5d num_slabs: %3d cpucached_cache: %ld\n", +- vt->slab_data[i].cache_addr, +- vt->slab_data[i].inuse, +- vt->slab_data[i].num_slabs, +- vt->slab_data[i].cpucached_cache); +- } + dump_swap_info(VERBOSE, NULL, NULL); -+} + } -- readmem(si->slab+OFFSET(slab_inuse), -- KVADDR, &tmp, sizeof(int), -- "inuse", FAULT_ON_ERROR); -- si->s_inuse = tmp; -+/* + /* +- * Dump the contents of a kmem slab. + * Do the work for cmd_swap(). -+ */ + */ -- readmem(si->slab+OFFSET(slab_free), -- KVADDR, &si->free, SIZE(kmem_bufctl_t), -- "kmem_bufctl_t", FAULT_ON_ERROR); +-static void +-dump_slab(struct meminfo *si) +#define SWP_USED 1 +#define SWAP_MAP_BAD 0x8000 - -- gather_slab_free_list_percpu(si); -- gather_slab_cached_count(si); ++ +char *swap_info_hdr = \ +"FILENAME TYPE SIZE USED PCT PRIORITY\n"; - -- if (!(si->flags & ADDRESS_SPECIFIED)) { -- fprintf(fp, slab_hdr); -- DUMP_SLAB_INFO(); -- } ++ +static int +dump_swap_info(ulong swapflags, ulong *totalswap_pages, ulong *totalused_pages) -+{ + { +- uint16_t s_offset; + int i, j; + int flags, swap_device, pages, prio, usedswap; + ulong swap_file, max, swap_map, pct; @@ -39999,101 +40428,64 @@ + ulong totalswap, totalused; + char buf[BUFSIZE]; -- dump_slab_objects_percpu(si); --} +- si->s_mem = ULONG(si->slab_buf + OFFSET(kmem_slab_s_s_mem)); +- si->s_mem = PTOB(BTOP(si->s_mem)); + if (!symbol_exists("nr_swapfiles")) + error(FATAL, "nr_swapfiles doesn't exist in this kernel!\n"); +- if (si->flags & ADDRESS_SPECIFIED) { +- if (INSLAB(si->slab, si) && (si->spec_addr >= si->slab) && +- (si->spec_addr < (si->slab+SIZE(kmem_slab_s)))){ +- si->found = KMEM_SLAB_ADDR; +- return; +- } +- if (INSLAB(si->spec_addr, si)) +- si->found = KMEM_ON_SLAB; /* But don't return yet... */ + if (!symbol_exists("swap_info")) + error(FATAL, "swap_info doesn't exist in this kernel!\n"); - ++ + swap_info = symbol_value("swap_info"); - --/* -- * Gather the free objects in a slab into the si->addrlist, checking for -- * specified addresses that are in-slab kmem_bufctls, and making error checks -- * along the way. Object address checks are deferred to dump_slab_objects(). -- */ ++ + if (swapflags & VERBOSE) + fprintf(fp, swap_info_hdr); - --#define INOBJECT(addr, obj) ((addr >= obj) && (addr < (obj+si->size))) ++ + totalswap = totalused = 0; - --static void --gather_slab_free_list(struct meminfo *si) --{ -- ulong *next, obj; -- ulong expected, cnt; ++ + for (i = 0; i < vt->nr_swapfiles; i++, + swap_info += SIZE(swap_info_struct)) { + fill_swap_info(swap_info); - -- BNEG(si->addrlist, sizeof(ulong) * (si->c_num+1)); ++ + flags = INT(vt->swap_info_struct + + OFFSET(swap_info_struct_flags)); - -- if (!si->s_freep) -- return; ++ + if (!(flags & SWP_USED)) + continue; - -- cnt = 0; -- expected = si->c_num - si->s_inuse; ++ + swap_file = ULONG(vt->swap_info_struct + + OFFSET(swap_info_struct_swap_file)); - -- next = si->s_freep; -- do { ++ + swap_device = INT(vt->swap_info_struct + + OFFSET_OPTION(swap_info_struct_swap_device, + swap_info_struct_old_block_size)); - -- if (cnt == si->c_num) { -- error(INFO, -- "\"%s\" cache: too many objects found in slab free list\n", -- si->curname); -- si->errors++; -- return; -- } ++ + pages = INT(vt->swap_info_struct + + OFFSET(swap_info_struct_pages)); - -- /* -- * Off-slab kmem_bufctls are contained in arrays of object -- * pointers that point to: -- * 1. next kmem_bufctl (or NULL) if the object is free. -- * 2. to the object if it the object is in use. -- * -- * On-slab kmem_bufctls resides just after the object itself, -- * and point to: -- * 1. next kmem_bufctl (or NULL) if object is free. -- * 2. the containing slab if the object is in use. -- */ ++ + totalswap += pages; + pages <<= (PAGESHIFT() - 10); - -- if (si->c_flags & SLAB_CFLGS_BUFCTL) -- obj = si->s_mem + ((next - si->s_index) * si->c_offset); -- else -- obj = (ulong)next - si->c_offset; ++ + prio = INT(vt->swap_info_struct + + OFFSET(swap_info_struct_prio)); - -- si->addrlist[cnt] = obj; ++ + if (MEMBER_SIZE("swap_info_struct", "max") == sizeof(int)) + max = UINT(vt->swap_info_struct + + OFFSET(swap_info_struct_max)); -+ else + else +- return; +- } + max = ULONG(vt->swap_info_struct + + OFFSET(swap_info_struct_max)); - -- if (si->flags & ADDRESS_SPECIFIED) { -- if (INSLAB(next, si) && -- (si->spec_addr >= (ulong)next) && -- (si->spec_addr < (ulong)(next + 1))) { -- si->found = KMEM_BUFCTL_ADDR; -- return; ++ + swap_map = ULONG(vt->swap_info_struct + + OFFSET(swap_info_struct_swap_map)); + @@ -40109,19 +40501,12 @@ + buf, BUFSIZE, 1, file_to_vfsmnt(swap_file)); + } else { + get_pathname(swap_file, buf, BUFSIZE, 1, 0); - } -- } ++ } + } else + sprintf(buf, "(unknown)"); - -- cnt++; ++ + map = (ushort *)GETBUF(sizeof(ushort) * max); - -- if (!INSLAB(obj, si)) { -- error(INFO, -- "\"%s\" cache: address not contained within slab: %lx\n", -- si->curname, obj); -- si->errors++; ++ + if (!readmem(swap_map, KVADDR, map, + sizeof(ushort) * max, "swap_info swap_map data", + RETURN_ON_ERROR|QUIET)) { @@ -40133,11 +40518,8 @@ + error(FATAL, + "swap_info[%d].swap_map at %lx is unaccessible\n", + i, swap_map); - } - -- readmem((ulong)next, KVADDR, &next, sizeof(void *), -- "s_freep chain entry", FAULT_ON_ERROR); -- } while (next); ++ } ++ + usedswap = 0; + for (j = 0; j < max; j++) { + switch (map[j]) @@ -40149,45 +40531,314 @@ + usedswap++; + } + } - -- if (cnt != expected) { -- error(INFO, -- "\"%s\" cache: free object mismatch: expected: %ld found: %ld\n", -- si->curname, expected, cnt); -- si->errors++; -+ FREEBUF(map); + ++ FREEBUF(map); + +- si->s_freep = VOID_PTR(si->slab_buf + OFFSET(kmem_slab_s_s_freep)); +- si->s_inuse = ULONG(si->slab_buf + OFFSET(kmem_slab_s_s_inuse)); +- si->s_index = ULONG_PTR(si->slab_buf + OFFSET(kmem_slab_s_s_index)); +- s_offset = USHORT(si->slab_buf + OFFSET(kmem_slab_s_s_offset)); + totalused += usedswap; + usedswap <<= (PAGESHIFT() - 10); + pct = (usedswap * 100)/pages; -+ + +- if (!(si->flags & ADDRESS_SPECIFIED)) { +- fprintf(fp, slab_hdr); +- DUMP_SLAB_INFO(); + if (swapflags & VERBOSE) + fprintf(fp, "%-15s %s %7dk %7dk %2ld%% %d\n", + buf, swap_device ? "PARTITION" : " FILE ", + pages, usedswap, pct, prio); } --} +- dump_slab_objects(si); + if (totalswap_pages) + *totalswap_pages = totalswap; + if (totalused_pages) + *totalused_pages = totalused; + + return TRUE; + } + + /* +- * dump_slab() adapted for newer percpu slab format. ++ * Translate a PTE into a swap device and offset string. + */ +- +-static void +-dump_slab_percpu_v1(struct meminfo *si) ++char * ++swap_location(ulonglong pte, char *buf) + { +- int tmp; +- +- readmem(si->slab+OFFSET(slab_s_s_mem), +- KVADDR, &si->s_mem, sizeof(ulong), +- "s_mem", FAULT_ON_ERROR); +- +- /* +- * Include the array of kmem_bufctl_t's appended to slab. +- */ +- tmp = SIZE(slab_s) + (SIZE(kmem_bufctl_t) * si->c_num); +- +- if (si->flags & ADDRESS_SPECIFIED) { +- if (INSLAB_PERCPU(si->slab, si) && +- (si->spec_addr >= si->slab) && +- (si->spec_addr < (si->slab+tmp))) { +- if (si->spec_addr >= (si->slab + SIZE(slab_s))) +- si->found = KMEM_BUFCTL_ADDR; +- else +- si->found = KMEM_SLAB_ADDR; +- } else if (INSLAB_PERCPU(si->spec_addr, si)) +- si->found = KMEM_ON_SLAB; /* But don't return yet... */ +- else +- return; +- } +- +- readmem(si->slab+OFFSET(slab_s_inuse), +- KVADDR, &tmp, sizeof(int), +- "inuse", FAULT_ON_ERROR); +- si->s_inuse = tmp; +- +- readmem(si->slab+OFFSET(slab_s_free), +- KVADDR, &si->free, SIZE(kmem_bufctl_t), +- "kmem_bufctl_t", FAULT_ON_ERROR); ++ char swapdev[BUFSIZE]; + +- gather_slab_free_list_percpu(si); +- gather_slab_cached_count(si); ++ if (!pte) ++ return NULL; + +- if (!(si->flags & ADDRESS_SPECIFIED)) { +- fprintf(fp, slab_hdr); +- DUMP_SLAB_INFO(); +- } ++ if (THIS_KERNEL_VERSION >= LINUX(2,6,0)) ++ sprintf(buf, "%s OFFSET: %lld", ++ get_swapdev(__swp_type(pte), swapdev), __swp_offset(pte)); ++ else ++ sprintf(buf, "%s OFFSET: %llx", ++ get_swapdev(SWP_TYPE(pte), swapdev), SWP_OFFSET(pte)); + +- dump_slab_objects_percpu(si); ++ return buf; + } + +- + /* +- * Updated for 2.6 slab substructure. ++ * Given the type field from a PTE, return the name of the swap device. + */ +-static void +-dump_slab_percpu_v2(struct meminfo *si) ++static char * ++get_swapdev(ulong type, char *buf) + { +- int tmp; +- +- readmem(si->slab+OFFSET(slab_s_mem), +- KVADDR, &si->s_mem, sizeof(ulong), +- "s_mem", FAULT_ON_ERROR); ++ unsigned int i, swap_info_len; ++ ulong swap_info, swap_file; ++ ulong vfsmnt; + +- /* +- * Include the array of kmem_bufctl_t's appended to slab. +- */ +- tmp = SIZE(slab) + (SIZE(kmem_bufctl_t) * si->c_num); ++ if (!symbol_exists("nr_swapfiles")) ++ error(FATAL, "nr_swapfiles doesn't exist in this kernel!\n"); + +- if (si->flags & ADDRESS_SPECIFIED) { +- if (INSLAB_PERCPU(si->slab, si) && +- (si->spec_addr >= si->slab) && +- (si->spec_addr < (si->slab+tmp))) { +- if (si->spec_addr >= (si->slab + SIZE(slab))) +- si->found = KMEM_BUFCTL_ADDR; +- else +- si->found = KMEM_SLAB_ADDR; +- } else if (INSLAB_PERCPU(si->spec_addr, si)) +- si->found = KMEM_ON_SLAB; /* But don't return yet... */ +- else +- return; +- } ++ if (!symbol_exists("swap_info")) ++ error(FATAL, "swap_info doesn't exist in this kernel!\n"); + +- readmem(si->slab+OFFSET(slab_inuse), +- KVADDR, &tmp, sizeof(int), +- "inuse", FAULT_ON_ERROR); +- si->s_inuse = tmp; ++ swap_info = symbol_value("swap_info"); + +- readmem(si->slab+OFFSET(slab_free), +- KVADDR, &si->free, SIZE(kmem_bufctl_t), +- "kmem_bufctl_t", FAULT_ON_ERROR); ++ swap_info_len = (i = ARRAY_LENGTH(swap_info)) ? ++ i : get_array_length("swap_info", NULL, 0); + +- gather_slab_free_list_percpu(si); +- gather_slab_cached_count(si); ++ sprintf(buf, "(unknown swap location)"); + +- if (!(si->flags & ADDRESS_SPECIFIED)) { +- fprintf(fp, slab_hdr); +- DUMP_SLAB_INFO(); +- } ++ if (type >= swap_info_len) ++ return buf; + +- dump_slab_objects_percpu(si); +-} ++ swap_info += (SIZE(swap_info_struct) * type); ++ fill_swap_info(swap_info); ++ swap_file = ULONG(vt->swap_info_struct + ++ OFFSET(swap_info_struct_swap_file)); + ++ if (swap_file) { ++ if (VALID_MEMBER(swap_info_struct_swap_vfsmnt)) { ++ vfsmnt = ULONG(vt->swap_info_struct + ++ OFFSET(swap_info_struct_swap_vfsmnt)); ++ get_pathname(swap_file, buf, BUFSIZE, 1, vfsmnt); ++ } else if (VALID_MEMBER (swap_info_struct_old_block_size)) { ++ get_pathname(file_to_dentry(swap_file), ++ buf, BUFSIZE, 1, 0); ++ } else { ++ get_pathname(swap_file, buf, BUFSIZE, 1, 0); ++ } ++ } + ++ return buf; +} /* -- * gather_slab_free_list() adapted for newer percpu slab format. -+ * Translate a PTE into a swap device and offset string. +- * Gather the free objects in a slab into the si->addrlist, checking for +- * specified addresses that are in-slab kmem_bufctls, and making error checks +- * along the way. Object address checks are deferred to dump_slab_objects(). ++ * If not currently stashed, cache the passed-in swap_info_struct. */ -+char * -+swap_location(ulonglong pte, char *buf) +- +-#define INOBJECT(addr, obj) ((addr >= obj) && (addr < (obj+si->size))) +- + static void +-gather_slab_free_list(struct meminfo *si) ++fill_swap_info(ulong swap_info) + { +- ulong *next, obj; +- ulong expected, cnt; +- +- BNEG(si->addrlist, sizeof(ulong) * (si->c_num+1)); +- +- if (!si->s_freep) ++ if (vt->last_swap_read == swap_info) + return; + +- cnt = 0; +- expected = si->c_num - si->s_inuse; +- +- next = si->s_freep; +- do { +- +- if (cnt == si->c_num) { +- error(INFO, +- "\"%s\" cache: too many objects found in slab free list\n", +- si->curname); +- si->errors++; +- return; +- } +- +- /* +- * Off-slab kmem_bufctls are contained in arrays of object +- * pointers that point to: +- * 1. next kmem_bufctl (or NULL) if the object is free. +- * 2. to the object if it the object is in use. +- * +- * On-slab kmem_bufctls resides just after the object itself, +- * and point to: +- * 1. next kmem_bufctl (or NULL) if object is free. +- * 2. the containing slab if the object is in use. +- */ ++ if (!vt->swap_info_struct && !(vt->swap_info_struct = (char *) ++ malloc(SIZE(swap_info_struct)))) ++ error(FATAL, "cannot malloc swap_info_struct space\n"); ++ ++ readmem(swap_info, KVADDR, vt->swap_info_struct, SIZE(swap_info_struct), ++ "fill_swap_info", FAULT_ON_ERROR); + +- if (si->c_flags & SLAB_CFLGS_BUFCTL) +- obj = si->s_mem + ((next - si->s_index) * si->c_offset); +- else +- obj = (ulong)next - si->c_offset; ++ vt->last_swap_read = swap_info; ++} + +- si->addrlist[cnt] = obj; ++/* ++ * If active, clear references to the swap_info references. ++ */ ++void ++clear_swap_info_cache(void) +{ -+ char swapdev[BUFSIZE]; ++ if (ACTIVE()) ++ vt->last_swap_read = 0; ++} + +- if (si->flags & ADDRESS_SPECIFIED) { +- if (INSLAB(next, si) && +- (si->spec_addr >= (ulong)next) && +- (si->spec_addr < (ulong)(next + 1))) { +- si->found = KMEM_BUFCTL_ADDR; +- return; +- } +- } + +- cnt++; ++/* ++ * Translage a vm_area_struct and virtual address into a filename ++ * and offset string. ++ */ + +- if (!INSLAB(obj, si)) { +- error(INFO, +- "\"%s\" cache: address not contained within slab: %lx\n", +- si->curname, obj); +- si->errors++; +- } ++#define PAGE_CACHE_SHIFT (machdep->pageshift) /* This is supposed to change! */ + +- readmem((ulong)next, KVADDR, &next, sizeof(void *), +- "s_freep chain entry", FAULT_ON_ERROR); +- } while (next); ++static char * ++vma_file_offset(ulong vma, ulong vaddr, char *buf) ++{ ++ ulong vm_file, vm_start, vm_offset, vm_pgoff, dentry, offset; ++ ulong vfsmnt; ++ char file[BUFSIZE]; ++ char *vma_buf, *file_buf; + +- if (cnt != expected) { +- error(INFO, +- "\"%s\" cache: free object mismatch: expected: %ld found: %ld\n", +- si->curname, expected, cnt); +- si->errors++; +- } +-} ++ if (!vma) ++ return NULL; + ++ vma_buf = fill_vma_cache(vma); + +-/* +- * gather_slab_free_list() adapted for newer percpu slab format. +- */ ++ vm_file = ULONG(vma_buf + OFFSET(vm_area_struct_vm_file)); -#define BUFCTL_END 0xffffFFFF -+ if (!pte) -+ return NULL; ++ if (!vm_file) ++ goto no_file_offset; -static void -gather_slab_free_list_percpu(struct meminfo *si) @@ -40198,63 +40849,64 @@ - int free_index; - ulong kmembp; - short *kbp; -+ if (THIS_KERNEL_VERSION >= LINUX(2,6,0)) -+ sprintf(buf, "%s OFFSET: %lld", -+ get_swapdev(__swp_type(pte), swapdev), __swp_offset(pte)); -+ else -+ sprintf(buf, "%s OFFSET: %llx", -+ get_swapdev(SWP_TYPE(pte), swapdev), SWP_OFFSET(pte)); ++ file_buf = fill_file_cache(vm_file); ++ dentry = ULONG(file_buf + OFFSET(file_f_dentry)); - BNEG(si->addrlist, sizeof(ulong) * (si->c_num+1)); -+ return buf; -+} ++ if (!dentry) ++ goto no_file_offset; - if (CRASHDEBUG(1)) - fprintf(fp, "slab: %lx si->s_inuse: %ld si->c_num: %ld\n", - si->slab, si->s_inuse, si->c_num); -+/* -+ * Given the type field from a PTE, return the name of the swap device. -+ */ -+static char * -+get_swapdev(ulong type, char *buf) -+{ -+ unsigned int i, swap_info_len; -+ ulong swap_info, swap_file; -+ ulong vfsmnt; ++ file[0] = NULLCHAR; ++ if (VALID_MEMBER(file_f_vfsmnt)) { ++ vfsmnt = ULONG(file_buf + OFFSET(file_f_vfsmnt)); ++ get_pathname(dentry, file, BUFSIZE, 1, vfsmnt); ++ } else ++ get_pathname(dentry, file, BUFSIZE, 1, 0); - if (si->s_inuse == si->c_num ) - return; -+ if (!symbol_exists("nr_swapfiles")) -+ error(FATAL, "nr_swapfiles doesn't exist in this kernel!\n"); ++ if (!strlen(file)) ++ goto no_file_offset; - kmembp = si->slab + SIZE_OPTION(slab_s, slab); - readmem((ulong)kmembp, KVADDR, si->kmem_bufctl, - SIZE(kmem_bufctl_t) * si->c_num, - "kmem_bufctl array", FAULT_ON_ERROR); -+ if (!symbol_exists("swap_info")) -+ error(FATAL, "swap_info doesn't exist in this kernel!\n"); ++ vm_start = ULONG(vma_buf + OFFSET(vm_area_struct_vm_start)); - if (CRASHDEBUG(1)) { - for (i = 0; (SIZE(kmem_bufctl_t) == sizeof(int)) && - (i < si->c_num); i++) - fprintf(fp, "%d ", si->kmem_bufctl[i]); -+ swap_info = symbol_value("swap_info"); ++ vm_offset = vm_pgoff = 0xdeadbeef; - for (kbp = (short *)&si->kmem_bufctl[0], i = 0; - (SIZE(kmem_bufctl_t) == sizeof(short)) && (i < si->c_num); - i++) - fprintf(fp, "%d ", *(kbp + i)); -+ swap_info_len = (i = ARRAY_LENGTH(swap_info)) ? -+ i : get_array_length("swap_info", NULL, 0); ++ if (VALID_MEMBER(vm_area_struct_vm_offset)) ++ vm_offset = ULONG(vma_buf + ++ OFFSET(vm_area_struct_vm_offset)); ++ else if (VALID_MEMBER(vm_area_struct_vm_pgoff)) ++ vm_pgoff = ULONG(vma_buf + ++ OFFSET(vm_area_struct_vm_pgoff)); ++ else ++ goto no_file_offset; - fprintf(fp, "\n"); -- } -+ sprintf(buf, "(unknown swap location)"); ++ if (vm_offset != 0xdeadbeef) ++ offset = VIRTPAGEBASE(vaddr) - vm_start + vm_offset; ++ else if (vm_pgoff != 0xdeadbeef) { ++ offset = ((vaddr - vm_start) >> PAGE_CACHE_SHIFT) + vm_pgoff; ++ offset <<= PAGE_CACHE_SHIFT; + } - cnt = 0; - expected = si->c_num - si->s_inuse; -+ if (type >= swap_info_len) -+ return buf; ++ sprintf(buf, "%s OFFSET: %lx", file, offset); - if (SIZE(kmem_bufctl_t) == sizeof(int)) { - for (free_index = si->free; free_index != BUFCTL_END; @@ -40274,25 +40926,13 @@ - } - } else if (SIZE(kmem_bufctl_t) == sizeof(short)) { - kbp = (short *)&si->kmem_bufctl[0]; -+ swap_info += (SIZE(swap_info_struct) * type); -+ fill_swap_info(swap_info); -+ swap_file = ULONG(vt->swap_info_struct + -+ OFFSET(swap_info_struct_swap_file)); ++ return buf; - for (free_index = si->free; free_index != BUFCTL_END; - free_index = (int)*(kbp + free_index)) { -+ if (swap_file) { -+ if (VALID_MEMBER(swap_info_struct_swap_vfsmnt)) { -+ vfsmnt = ULONG(vt->swap_info_struct + -+ OFFSET(swap_info_struct_swap_vfsmnt)); -+ get_pathname(swap_file, buf, BUFSIZE, 1, vfsmnt); -+ } else if (VALID_MEMBER (swap_info_struct_old_block_size)) { -+ get_pathname(file_to_dentry(swap_file), -+ buf, BUFSIZE, 1, 0); -+ } else { -+ get_pathname(swap_file, buf, BUFSIZE, 1, 0); -+ } -+ } ++no_file_offset: ++ return NULL; ++} - if (cnt == si->c_num) { - error(INFO, @@ -40300,60 +40940,54 @@ - si->errors++; - return; - } -+ return buf; -+} ++/* ++ * Translate a PTE into its physical address and flags. ++ */ ++void ++cmd_pte(void) ++{ ++ int c; ++ ulonglong pte; - obj = si->s_mem + (free_index*si->size); - si->addrlist[cnt] = obj; - cnt++; -- } ++ while ((c = getopt(argcnt, args, "")) != EOF) { ++ switch(c) ++ { ++ default: ++ argerrs++; ++ break; + } - } else - error(FATAL, - "size of kmem_bufctl_t (%d) not sizeof(int) or sizeof(short)\n", - SIZE(kmem_bufctl_t)); -+/* -+ * If not currently stashed, cache the passed-in swap_info_struct. -+ */ -+static void -+fill_swap_info(ulong swap_info) -+{ -+ if (vt->last_swap_read == swap_info) -+ return; ++ } - if (cnt != expected) { - error(INFO, - "\"%s\" cache: free object mismatch: expected: %ld found: %ld\n", - si->curname, expected, cnt); - si->errors++; -- } -+ if (!vt->swap_info_struct && !(vt->swap_info_struct = (char *) -+ malloc(SIZE(swap_info_struct)))) -+ error(FATAL, "cannot malloc swap_info_struct space\n"); -+ -+ readmem(swap_info, KVADDR, vt->swap_info_struct, SIZE(swap_info_struct), -+ "fill_swap_info", FAULT_ON_ERROR); ++ if (argerrs) ++ cmd_usage(pc->curcmd, SYNOPSIS); + -+ vt->last_swap_read = swap_info; - } ++ while (args[optind]) { ++ pte = htoll(args[optind], FAULT_ON_ERROR, NULL); ++ machdep->translate_pte((ulong)pte, NULL, pte); ++ optind++; + } +-} -+/* -+ * If active, clear references to the swap_info references. -+ */ -+void -+clear_swap_info_cache(void) -+{ -+ if (ACTIVE()) -+ vt->last_swap_read = 0; +} ++static char *node_zone_hdr = "ZONE NAME SIZE"; /* - * Dump the FREE, [ALLOCATED] and objects of a slab. - */ -+ * Translage a vm_area_struct and virtual address into a filename -+ * and offset string. -+ */ - +- -#define DUMP_SLAB_OBJECT() \ - for (j = on_free_list = 0; j < si->c_num; j++) { \ - if (obj == si->addrlist[j]) { \ @@ -40384,252 +41018,20 @@ - } \ - } \ - } -+#define PAGE_CACHE_SHIFT (machdep->pageshift) /* This is supposed to change! */ - --static void +- ++ * On systems supporting memory nodes, display the basic per-node data. ++ */ + static void -dump_slab_objects(struct meminfo *si) -+static char * -+vma_file_offset(ulong vma, ulong vaddr, char *buf) ++dump_memory_nodes(int initialize) { -- int i, j; + int i, j; - ulong *next; - int on_free_list; - ulong cnt, expected; - ulong bufctl, obj; -+ ulong vm_file, vm_start, vm_offset, vm_pgoff, dentry, offset; -+ ulong vfsmnt; -+ char file[BUFSIZE]; -+ char *vma_buf, *file_buf; - +- - gather_slab_free_list(si); -+ if (!vma) -+ return NULL; - -- if ((si->flags & ADDRESS_SPECIFIED) && (si->found & ~KMEM_ON_SLAB)) -- return; -+ vma_buf = fill_vma_cache(vma); - -- cnt = 0; -- expected = si->s_inuse; -+ vm_file = ULONG(vma_buf + OFFSET(vm_area_struct_vm_file)); - -- if (CRASHDEBUG(1)) -- for (i = 0; i < si->c_num; i++) { -- fprintf(fp, "si->addrlist[%d]: %lx\n", -- i, si->addrlist[i]); -- } -+ if (!vm_file) -+ goto no_file_offset; - -- if (!(si->flags & ADDRESS_SPECIFIED)) -- fprintf(fp, free_inuse_hdr); -+ file_buf = fill_file_cache(vm_file); -+ dentry = ULONG(file_buf + OFFSET(file_f_dentry)); - -- /* For on-slab bufctls, c_offset is the distance between the start of -- * an obj and its related bufctl. For off-slab bufctls, c_offset is -- * the distance between objs in the slab. -- */ -+ if (!dentry) -+ goto no_file_offset; - -- if (si->c_flags & SLAB_CFLGS_BUFCTL) { -- for (i = 0, next = si->s_index; i < si->c_num; i++, next++){ -- obj = si->s_mem + -- ((next - si->s_index) * si->c_offset); -- DUMP_SLAB_OBJECT(); -- } -- } else { -- /* -- * Get the "real" s_mem, i.e., without the offset stripped off. -- * It contains the address of the first object. -- */ -- readmem(si->slab+OFFSET(kmem_slab_s_s_mem), -- KVADDR, &obj, sizeof(ulong), -- "s_mem", FAULT_ON_ERROR); -+ file[0] = NULLCHAR; -+ if (VALID_MEMBER(file_f_vfsmnt)) { -+ vfsmnt = ULONG(file_buf + OFFSET(file_f_vfsmnt)); -+ get_pathname(dentry, file, BUFSIZE, 1, vfsmnt); -+ } else -+ get_pathname(dentry, file, BUFSIZE, 1, 0); - -- for (i = 0; i < si->c_num; i++) { -- DUMP_SLAB_OBJECT(); -+ if (!strlen(file)) -+ goto no_file_offset; - -- if (si->flags & ADDRESS_SPECIFIED) { -- bufctl = obj + si->c_offset; -+ vm_start = ULONG(vma_buf + OFFSET(vm_area_struct_vm_start)); - -- if ((si->spec_addr >= bufctl) && -- (si->spec_addr < -- (bufctl + SIZE(kmem_bufctl_t)))) { -- si->found = KMEM_BUFCTL_ADDR; -- return; -- } -- } -+ vm_offset = vm_pgoff = 0xdeadbeef; - -- obj += (si->c_offset + SIZE(kmem_bufctl_t)); -- } -+ if (VALID_MEMBER(vm_area_struct_vm_offset)) -+ vm_offset = ULONG(vma_buf + -+ OFFSET(vm_area_struct_vm_offset)); -+ else if (VALID_MEMBER(vm_area_struct_vm_pgoff)) -+ vm_pgoff = ULONG(vma_buf + -+ OFFSET(vm_area_struct_vm_pgoff)); -+ else -+ goto no_file_offset; -+ -+ if (vm_offset != 0xdeadbeef) -+ offset = VIRTPAGEBASE(vaddr) - vm_start + vm_offset; -+ else if (vm_pgoff != 0xdeadbeef) { -+ offset = ((vaddr - vm_start) >> PAGE_CACHE_SHIFT) + vm_pgoff; -+ offset <<= PAGE_CACHE_SHIFT; - } - -- if (cnt != expected) { -- error(INFO, -- "\"%s\" cache: inuse object mismatch: expected: %ld found: %ld\n", -- si->curname, expected, cnt); -- si->errors++; -- } -+ sprintf(buf, "%s OFFSET: %lx", file, offset); - --} -+ return buf; - -+no_file_offset: -+ return NULL; -+} - - /* -- * dump_slab_objects() adapted for newer percpu slab format. -+ * Translate a PTE into its physical address and flags. - */ -- --static void --dump_slab_objects_percpu(struct meminfo *si) -+void -+cmd_pte(void) - { -- int i, j; -- int on_free_list, on_cpudata_list; -- ulong cnt, expected; -- ulong obj; -- -- if ((si->flags & ADDRESS_SPECIFIED) && (si->found & ~KMEM_ON_SLAB)) -- return; -- -- cnt = 0; -- expected = si->s_inuse; -+ int c; -+ ulonglong pte; - -- if (CRASHDEBUG(1)) -- for (i = 0; i < si->c_num; i++) { -- fprintf(fp, "si->addrlist[%d]: %lx\n", -- i, si->addrlist[i]); -+ while ((c = getopt(argcnt, args, "")) != EOF) { -+ switch(c) -+ { -+ default: -+ argerrs++; -+ break; - } -+ } - -- if (!(si->flags & ADDRESS_SPECIFIED)) -- fprintf(fp, free_inuse_hdr); -- -- for (i = 0, obj = si->s_mem; i < si->c_num; i++, obj += si->size) { -- on_free_list = FALSE; -- on_cpudata_list = FALSE; -- -- for (j = 0; j < si->c_num; j++) { -- if (obj == si->addrlist[j]) { -- on_free_list = TRUE; -- break; -- } -- } -- -- on_cpudata_list = check_cpudata_list(si, obj); -+ if (argerrs) -+ cmd_usage(pc->curcmd, SYNOPSIS); - -- if (on_free_list && on_cpudata_list) { -- error(INFO, -- "\"%s\" cache: object %lx on both free and cpudata lists\n", -- si->curname, obj); -- si->errors++; -- } -- -- if (on_free_list) { -- if (!(si->flags & ADDRESS_SPECIFIED)) -- fprintf(fp, " %lx\n", obj); -- if (si->flags & ADDRESS_SPECIFIED) { -- if (INOBJECT(si->spec_addr, obj)) { -- si->found = -- KMEM_OBJECT_ADDR_FREE; -- return; -- } -- } -- } else if (on_cpudata_list) { -- if (!(si->flags & ADDRESS_SPECIFIED)) -- fprintf(fp, " %lx (cpu %d cache)\n", obj, -- si->cpu); -- cnt++; -- if (si->flags & ADDRESS_SPECIFIED) { -- if (INOBJECT(si->spec_addr, obj)) { -- si->found = -- KMEM_OBJECT_ADDR_CACHED; -- return; -- } -- } -- } else { -- if (!(si->flags & ADDRESS_SPECIFIED)) -- fprintf(fp, " [%lx]\n", obj); -- cnt++; -- if (si->flags & ADDRESS_SPECIFIED) { -- if (INOBJECT(si->spec_addr, obj)) { -- si->found = -- KMEM_OBJECT_ADDR_INUSE; -- return; -- } -- } -- } -+ while (args[optind]) { -+ pte = htoll(args[optind], FAULT_ON_ERROR, NULL); -+ machdep->translate_pte((ulong)pte, NULL, pte); -+ optind++; - } - -- if (cnt != expected) { -- error(INFO, -- "\"%s\" cache: inuse object mismatch: expected: %ld found: %ld\n", -- si->curname, expected, cnt); -- si->errors++; -- } - } - -+static char *node_zone_hdr = "ZONE NAME SIZE"; -+ - /* -- * Determine how many of the "inuse" slab objects are actually cached -- * in the kmem_cache_s header. Set the per-slab count and update the -- * cumulative per-cache count. -+ * On systems supporting memory nodes, display the basic per-node data. - */ -- - static void --gather_slab_cached_count(struct meminfo *si) -+dump_memory_nodes(int initialize) - { -- int i; -- ulong obj; -+ int i, j; + int n, id, node, flen, slen, badaddr; + ulong node_mem_map; + ulong node_start_paddr; @@ -40652,7 +41054,7 @@ + char buf5[BUFSIZE]; + struct node_table *nt; -- si->cpucached_slab = 0; +- if ((si->flags & ADDRESS_SPECIFIED) && (si->found & ~KMEM_ON_SLAB)) + if (!(vt->flags & (NODES|NODES_ONLINE)) && initialize) { + nt = &vt->node_table[0]; + nt->node_id = 0; @@ -40674,14 +41076,11 @@ + fprintf(fp, " start_paddr: %llx\n", nt->start_paddr); + fprintf(fp, " start_mapnr: %ld\n", nt->start_mapnr); + } -+ return; + return; + } -- for (i = 0, obj = si->s_mem; i < si->c_num; i++, obj += si->size) { -- if (check_cpudata_list(si, obj)) { -- si->cpucached_slab++; -- if (si->flags & SLAB_GET_COUNTS) { -- si->cpucached_cache++; +- cnt = 0; +- expected = si->s_inuse; + if (initialize) { + pgdat = UNINITIALIZED; + /* @@ -40702,42 +41101,41 @@ + error(WARNING, + "cannot determine pgdat list for this kernel/architecture\n\n"); + return; - } -- } ++ } + } + } else + pgdat = vt->node_table[0].pgdat; -+ + +- if (CRASHDEBUG(1)) +- for (i = 0; i < si->c_num; i++) { +- fprintf(fp, "si->addrlist[%d]: %lx\n", +- i, si->addrlist[i]); +- } + if (initialize && (pgdat == UNINITIALIZED)) { + error(WARNING, "cannot initialize pgdat list\n\n"); + return; - } --} ++ } --/* -- * Populate the percpu object list for a given slab. -- */ +- if (!(si->flags & ADDRESS_SPECIFIED)) +- fprintf(fp, free_inuse_hdr); + for (n = 0, badaddr = FALSE; pgdat; n++) { + if (n >= vt->numnodes) + error(FATAL, "numnodes out of sync with pgdat_list?\n"); --static void --gather_cpudata_list_v1(struct meminfo *si) --{ -- int i, j; -- int avail; -- ulong cpudata[NR_CPUS]; +- /* For on-slab bufctls, c_offset is the distance between the start of +- * an obj and its related bufctl. For off-slab bufctls, c_offset is +- * the distance between objs in the slab. +- */ + nt = &vt->node_table[n]; -- if (INVALID_MEMBER(kmem_cache_s_cpudata)) -- return; +- if (si->c_flags & SLAB_CFLGS_BUFCTL) { +- for (i = 0, next = si->s_index; i < si->c_num; i++, next++){ +- obj = si->s_mem + +- ((next - si->s_index) * si->c_offset); +- DUMP_SLAB_OBJECT(); + readmem(pgdat+OFFSET(pglist_data_node_id), KVADDR, &id, + sizeof(int), "pglist node_id", FAULT_ON_ERROR); - -- readmem(si->cache+OFFSET(kmem_cache_s_cpudata), -- KVADDR, &cpudata[0], -- sizeof(ulong) * ARRAY_LENGTH(kmem_cache_s_cpudata), -- "cpudata array", FAULT_ON_ERROR); ++ + if (VALID_MEMBER(pglist_data_node_mem_map)) { + readmem(pgdat+OFFSET(pglist_data_node_mem_map), KVADDR, + &node_mem_map, sizeof(ulong), @@ -40745,11 +41143,18 @@ + } else { + node_mem_map = BADADDR; + badaddr = TRUE; -+ } + } +- } else { +- /* +- * Get the "real" s_mem, i.e., without the offset stripped off. +- * It contains the address of the first object. +- */ +- readmem(si->slab+OFFSET(kmem_slab_s_s_mem), +- KVADDR, &obj, sizeof(ulong), +- "s_mem", FAULT_ON_ERROR); -- for (i = 0; (i < ARRAY_LENGTH(kmem_cache_s_cpudata)) && -- cpudata[i]; i++) { -- BZERO(si->cpudata[i], sizeof(ulong) * vt->kmem_max_limit); +- for (i = 0; i < si->c_num; i++) { +- DUMP_SLAB_OBJECT(); + if (VALID_MEMBER(pglist_data_node_start_paddr)) + readmem(pgdat+OFFSET(pglist_data_node_start_paddr), + KVADDR, &node_start_paddr, sizeof(ulong), @@ -40767,17 +41172,12 @@ + } + } else error(INFO, + "cannot determine zone starting physical address\n"); - -- readmem(cpudata[i]+OFFSET(cpucache_s_avail), -- KVADDR, &avail, sizeof(int), -- "cpucache avail", FAULT_ON_ERROR); ++ + if (VALID_MEMBER(pglist_data_node_start_mapnr)) + readmem(pgdat+OFFSET(pglist_data_node_start_mapnr), + KVADDR, &node_start_mapnr, sizeof(ulong), + "pglist node_start_mapnr", FAULT_ON_ERROR); - -- if (!avail) -- continue; ++ + if (VALID_MEMBER(pglist_data_node_size)) + readmem(pgdat+OFFSET(pglist_data_node_size), + KVADDR, &node_size, sizeof(ulong), @@ -40788,22 +41188,26 @@ + "pglist node_spanned_pages", FAULT_ON_ERROR); + node_size = node_spanned_pages; + } else error(INFO, "cannot determine zone size\n"); - -- if (avail > vt->kmem_max_limit) { -- error(INFO, -- "\"%s\" cache: cpucache_s.avail %d greater than limit %ld\n", -- si->curname, avail, vt->kmem_max_limit); -- si->errors++; ++ + if (VALID_MEMBER(pglist_data_node_present_pages)) + readmem(pgdat+OFFSET(pglist_data_node_present_pages), + KVADDR, &node_present_pages, sizeof(ulong), + "pglist node_present_pages", FAULT_ON_ERROR); + else + node_present_pages = 0; -+ + +- if (si->flags & ADDRESS_SPECIFIED) { +- bufctl = obj + si->c_offset; + readmem(pgdat+OFFSET(pglist_data_bdata), KVADDR, &bdata, + sizeof(ulong), "pglist bdata", FAULT_ON_ERROR); -+ + +- if ((si->spec_addr >= bufctl) && +- (si->spec_addr < +- (bufctl + SIZE(kmem_bufctl_t)))) { +- si->found = KMEM_BUFCTL_ADDR; +- return; +- } +- } + if (initialize) { + nt->node_id = id; + nt->pgdat = pgdat; @@ -40815,7 +41219,8 @@ + nt->mem_map = node_mem_map; + nt->start_paddr = node_start_paddr; + nt->start_mapnr = node_start_mapnr; -+ + +- obj += (si->c_offset + SIZE(kmem_bufctl_t)); + if (CRASHDEBUG(1)) { + fprintf(fp, "node_table[%d]: \n", n); + fprintf(fp, " id: %d\n", nt->node_id); @@ -40827,10 +41232,14 @@ + fprintf(fp, " start_mapnr: %ld\n", nt->start_mapnr); + } } +- } -- if (CRASHDEBUG(2)) -- fprintf(fp, "%s: cpu[%d] avail: %d\n", -- si->curname, i, avail); +- if (cnt != expected) { +- error(INFO, +- "\"%s\" cache: inuse object mismatch: expected: %ld found: %ld\n", +- si->curname, expected, cnt); +- si->errors++; +- } + if (!initialize) { + if (n) { + fprintf(fp, "\n"); @@ -40844,10 +41253,7 @@ + mkstring(buf3, flen, CENTER|LJUST, "BOOTMEM_DATA"), + mkstring(buf4, flen, CENTER|LJUST, "NODE_ZONES")); -- readmem(cpudata[i]+SIZE(cpucache_s), -- KVADDR, si->cpudata[i], -- sizeof(void *) * avail, -- "cpucache avail", FAULT_ON_ERROR); +-} + node_zones = pgdat + OFFSET(pglist_data_node_zones); + sprintf(buf5, " %2d %s %s %s %s\n", id, + mkstring(buf1, 8, CENTER|LJUST|LONG_DEC, @@ -40860,11 +41266,6 @@ + MKSTR(node_zones))); + fprintf(fp, "%s", buf5); -- if (CRASHDEBUG(2)) -- for (j = 0; j < avail; j++) -- fprintf(fp, " %lx\n", si->cpudata[i][j]); -- } --} + j = 12 + strlen(buf1) + strlen(buf2) + strlen(buf3) + + count_leading_spaces(buf4); + for (i = 1; i < vt->nr_zones; i++) { @@ -40894,20 +41295,21 @@ + } -/* -- * Updated for 2.6 slab percpu data structure. +- * dump_slab_objects() adapted for newer percpu slab format. - */ --static void --gather_cpudata_list_v2(struct meminfo *si) --{ -- int i, j; -- int avail; -- ulong cpudata[NR_CPUS]; + node_zones = pgdat + OFFSET(pglist_data_node_zones); + cum_zone_size = 0; + for (i = 0; i < vt->nr_zones; i++) { + if (CRASHDEBUG(7)) + fprintf(fp, "zone %d at %lx\n", i, node_zones); -+ + +-static void +-dump_slab_objects_percpu(struct meminfo *si) +-{ +- int i, j; +- int on_free_list, on_cpudata_list; +- ulong cnt, expected; +- ulong obj; + if (VALID_MEMBER(zone_struct_size)) + readmem(node_zones+OFFSET(zone_struct_size), + KVADDR, &zone_size, sizeof(ulong), @@ -40923,7 +41325,9 @@ + "zone spanned_pages", FAULT_ON_ERROR); + } else error(FATAL, + "zone_struct has neither size nor memsize field\n"); -+ + +- if ((si->flags & ADDRESS_SPECIFIED) && (si->found & ~KMEM_ON_SLAB)) +- return; + readmem(node_zones+ + OFFSET_OPTION(zone_struct_free_pages, + zone_free_pages), KVADDR, &free_pages, @@ -40967,7 +41371,9 @@ + "node_zones zone_start_pfn", + FAULT_ON_ERROR); + zone_start_paddr = PTOB(zone_start_pfn); -+ + +- cnt = 0; +- expected = si->s_inuse; + if (IS_SPARSEMEM()) { + zone_mem_map = 0; + zone_start_mapnr = 0; @@ -40975,7 +41381,12 @@ + phys = PTOB(zone_start_pfn); + zone_start_mapnr = phys/PAGESIZE(); + } -+ + +- if (CRASHDEBUG(1)) +- for (i = 0; i < si->c_num; i++) { +- fprintf(fp, "si->addrlist[%d]: %lx\n", +- i, si->addrlist[i]); +- } + } else if (!(vt->flags & NODES) && + INVALID_MEMBER(zone_zone_mem_map)) { + readmem(pgdat+OFFSET(pglist_data_node_mem_map), @@ -40990,10 +41401,8 @@ + "node_zones zone_mem_map", + FAULT_ON_ERROR); -- readmem(si->cache+OFFSET(kmem_cache_s_array), -- KVADDR, &cpudata[0], -- sizeof(ulong) * ARRAY_LENGTH(kmem_cache_s_array), -- "array_cache array", FAULT_ON_ERROR); +- if (!(si->flags & ADDRESS_SPECIFIED)) +- fprintf(fp, free_inuse_hdr); + if (zone_mem_map) + zone_start_mapnr = + (zone_mem_map - node_mem_map) / @@ -41002,9 +41411,9 @@ + zone_start_mapnr = 0; + } -- for (i = 0; (i < ARRAY_LENGTH(kmem_cache_s_array)) && -- cpudata[i]; i++) { -- BZERO(si->cpudata[i], sizeof(ulong) * vt->kmem_max_limit); +- for (i = 0, obj = si->s_mem; i < si->c_num; i++, obj += si->size) { +- on_free_list = FALSE; +- on_cpudata_list = FALSE; + if (IS_SPARSEMEM()) { + zone_mem_map = 0; + if (zone_size) { @@ -41029,9 +41438,12 @@ + sizeof(ulong), "node_zones zone_mem_map", + FAULT_ON_ERROR); -- readmem(cpudata[i]+OFFSET(array_cache_avail), -- KVADDR, &avail, sizeof(int), -- "array cache avail", FAULT_ON_ERROR); +- for (j = 0; j < si->c_num; j++) { +- if (obj == si->addrlist[j]) { +- on_free_list = TRUE; +- break; +- } +- } + if (!initialize) { + fprintf(fp, " %2d %-9s %7ld ", + i, buf1, zone_size); @@ -41046,15 +41458,14 @@ + MKSTR(zone_start_mapnr))); + } -- if (!avail) -- continue; +- on_cpudata_list = check_cpudata_list(si, obj); + node_zones += SIZE_OPTION(zone_struct, zone); + } -- if (avail > vt->kmem_max_limit) { +- if (on_free_list && on_cpudata_list) { - error(INFO, -- "\"%s\" cache: array_cache.avail %d greater than limit %ld\n", -- si->curname, avail, vt->kmem_max_limit); +- "\"%s\" cache: object %lx on both free and cpudata lists\n", +- si->curname, obj); - si->errors++; + if (initialize) { + if (vt->flags & NODES_ONLINE) { @@ -41077,49 +41488,74 @@ + else + pgdat = 0; } +- +- if (on_free_list) { +- if (!(si->flags & ADDRESS_SPECIFIED)) +- fprintf(fp, " %lx\n", obj); +- if (si->flags & ADDRESS_SPECIFIED) { +- if (INOBJECT(si->spec_addr, obj)) { +- si->found = +- KMEM_OBJECT_ADDR_FREE; +- return; +- } +- } +- } else if (on_cpudata_list) { +- if (!(si->flags & ADDRESS_SPECIFIED)) +- fprintf(fp, " %lx (cpu %d cache)\n", obj, +- si->cpu); +- cnt++; +- if (si->flags & ADDRESS_SPECIFIED) { +- if (INOBJECT(si->spec_addr, obj)) { +- si->found = +- KMEM_OBJECT_ADDR_CACHED; +- return; +- } +- } +- } else { +- if (!(si->flags & ADDRESS_SPECIFIED)) +- fprintf(fp, " [%lx]\n", obj); +- cnt++; +- if (si->flags & ADDRESS_SPECIFIED) { +- if (INOBJECT(si->spec_addr, obj)) { +- si->found = +- KMEM_OBJECT_ADDR_INUSE; +- return; +- } +- } +- } + } - ++ + if (n != vt->numnodes) { - if (CRASHDEBUG(2)) -- fprintf(fp, "%s: cpu[%d] avail: %d\n", -- si->curname, i, avail); -- -- readmem(cpudata[i]+SIZE(array_cache), -- KVADDR, si->cpudata[i], -- sizeof(void *) * avail, -- "array_cache avail", FAULT_ON_ERROR); ++ if (CRASHDEBUG(2)) + error(NOTE, "changing numnodes from %d to %d\n", + vt->numnodes, n); + vt->numnodes = n; -+ } + } -- if (CRASHDEBUG(2)) -- for (j = 0; j < avail; j++) -- fprintf(fp, " %lx\n", si->cpudata[i][j]); +- if (cnt != expected) { +- error(INFO, +- "\"%s\" cache: inuse object mismatch: expected: %ld found: %ld\n", +- si->curname, expected, cnt); +- si->errors++; - } + if (!initialize && IS_SPARSEMEM()) + dump_mem_sections(); } -/* -- * Check whether a given address is contained in the previously-gathered -- * percpu object cache. +- * Determine how many of the "inuse" slab objects are actually cached +- * in the kmem_cache_s header. Set the per-slab count and update the +- * cumulative per-cache count. - */ - --static int --check_cpudata_list(struct meminfo *si, ulong obj) -+static void + static void +-gather_slab_cached_count(struct meminfo *si) +dump_zone_stats(void) { -- int i, j; +- int i; +- ulong obj; - -- for (i = 0; i < vt->kmem_max_cpus; i++) { -- for (j = 0; si->cpudata[i][j]; j++) -- if (si->cpudata[i][j] == obj) { -- si->cpu = i; -- return TRUE; -- } -- } +- si->cpucached_slab = 0; + int i, n; + ulong pgdat, node_zones; + char *zonebuf; @@ -41136,50 +41572,49 @@ + zonebuf = GETBUF(SIZE_OPTION(zone_struct, zone)); + vm_stat_init(); -- return FALSE; +- for (i = 0, obj = si->s_mem; i < si->c_num; i++, obj += si->size) { +- if (check_cpudata_list(si, obj)) { +- si->cpucached_slab++; +- if (si->flags & SLAB_GET_COUNTS) { +- si->cpucached_cache++; +- } +- } +- } -} + for (n = 0; pgdat; n++) { + node_zones = pgdat + OFFSET(pglist_data_node_zones); +-/* +- * Populate the percpu object list for a given slab. +- */ + for (i = 0; i < vt->nr_zones; i++) { --/* -- * Search the various memory subsystems for instances of this address. -- * Start with the most specific areas, ending up with at least the -- * mem_map page data. -- */ -static void --kmem_search(struct meminfo *mi) +-gather_cpudata_list_v1(struct meminfo *si) -{ -- struct syment *sp; -- struct meminfo tmp_meminfo; -- char buf[BUFSIZE]; -- ulong vaddr, orig_flags; -- physaddr_t paddr; -- ulong offset; +- int i, j; +- int avail; +- ulong cpudata[NR_CPUS]; + if (!readmem(node_zones, KVADDR, zonebuf, + SIZE_OPTION(zone_struct, zone), + "zone buffer", FAULT_ON_ERROR)) + break; -- switch (mi->memtype) -- { -- case KVADDR: -- vaddr = mi->spec_addr; -- break; +- if (INVALID_MEMBER(kmem_cache_s_cpudata)) +- return; + value1 = ULONG(zonebuf + + OFFSET_OPTION(zone_struct_name, zone_name)); -- case PHYSADDR: -- vaddr = mi->spec_addr < VTOP(vt->high_memory) ? -- PTOV(mi->spec_addr) : BADADDR; -- break; -- } +- readmem(si->cache+OFFSET(kmem_cache_s_cpudata), +- KVADDR, &cpudata[0], +- sizeof(ulong) * ARRAY_LENGTH(kmem_cache_s_cpudata), +- "cpudata array", FAULT_ON_ERROR); + if (!read_string(value1, buf1, BUFSIZE-1)) + sprintf(buf1, "(unknown) "); -- orig_flags = mi->flags; -- mi->retval = 0; +- for (i = 0; (i < ARRAY_LENGTH(kmem_cache_s_cpudata)) && +- cpudata[i]; i++) { +- BZERO(si->cpudata[i], sizeof(ulong) * vt->kmem_max_limit); + if (VALID_MEMBER(zone_struct_size)) + value1 = value6 = ULONG(zonebuf + + OFFSET(zone_struct_size)); @@ -41194,17 +41629,9 @@ + } else error(FATAL, + "zone struct has unknown size field\n"); -- /* -- * Check first for a possible symbolic display of the virtual -- * address associated with mi->spec_addr or PTOV(mi->spec_addr). -- */ -- if (((vaddr >= kt->stext) && (vaddr <= kt->end)) || -- IS_MODULE_VADDR(mi->spec_addr)) { -- if ((sp = value_search(vaddr, &offset))) { -- show_symbol(sp, offset, SHOW_LINENUM | SHOW_RADIX()); -- fprintf(fp, "\n"); -- } -- } +- readmem(cpudata[i]+OFFSET(cpucache_s_avail), +- KVADDR, &avail, sizeof(int), +- "cpucache avail", FAULT_ON_ERROR); + value2 = ULONG(zonebuf + OFFSET_OPTION(zone_pages_min, + zone_struct_pages_min)); + value3 = ULONG(zonebuf + OFFSET_OPTION(zone_pages_low, @@ -41259,13 +41686,8 @@ + goto next_zone; + } -- /* -- * Check for a valid mapped address. -- */ -- if ((mi->memtype == KVADDR) && IS_VMALLOC_ADDR(mi->spec_addr)) { -- if (kvtop(NULL, mi->spec_addr, &paddr, 0)) { -- mi->flags = orig_flags; -- dump_vmlist(mi); +- if (!avail) +- continue; + if (VALID_MEMBER(zone_all_unreclaimable)) { + ivalue = UINT(zonebuf + + OFFSET(zone_all_unreclaimable)); @@ -41285,119 +41707,61 @@ + OFFSET(zone_pages_scanned)); + fprintf(fp, "PAGES_SCANNED: %ld ", value1); + } - fprintf(fp, "\n"); -- mi->spec_addr = paddr; -- mi->memtype = PHYSADDR; -- } -- goto mem_map; -- } -- /* -- * If the address is physical, check whether it's in vmalloc space. -- */ -- -- if (mi->memtype == PHYSADDR) { -- mi->flags = orig_flags; -- mi->flags |= GET_PHYS_TO_VMALLOC; -- mi->retval = 0; -- dump_vmlist(mi); -- mi->flags &= ~GET_PHYS_TO_VMALLOC; ++ fprintf(fp, "\n"); -- if (mi->retval) { -- if ((sp = value_search(mi->retval, &offset))) { -- show_symbol(sp, offset, -- SHOW_LINENUM | SHOW_RADIX()); -- fprintf(fp, "\n"); -- } -- dump_vmlist(mi); +- if (avail > vt->kmem_max_limit) { +- error(INFO, +- "\"%s\" cache: cpucache_s.avail %d greater than limit %ld\n", +- si->curname, avail, vt->kmem_max_limit); +- si->errors++; +next_zone: - fprintf(fp, "\n"); -- goto mem_map; ++ fprintf(fp, "\n"); + node_zones += SIZE_OPTION(zone_struct, zone); } -- } -- -- /* -- * Check whether the containing page belongs to the slab subsystem. -- */ -- mi->flags = orig_flags; -- mi->retval = 0; -- if ((vaddr != BADADDR) && vaddr_to_kmem_cache(vaddr, buf)) { -- BZERO(&tmp_meminfo, sizeof(struct meminfo)); -- tmp_meminfo.spec_addr = vaddr; -- tmp_meminfo.memtype = KVADDR; -- tmp_meminfo.flags = mi->flags; -- vt->dump_kmem_cache(&tmp_meminfo); -- fprintf(fp, "\n"); -- } -- -- /* -- * Check free list. -- */ -- mi->flags = orig_flags; -- mi->retval = 0; -- vt->dump_free_pages(mi); -- if (mi->retval) -- fprintf(fp, "\n"); -- if (vt->page_hash_table) { -- /* -- * Check the page cache. -- */ -- mi->flags = orig_flags; -- mi->retval = 0; -- dump_page_hash_table(mi); -- if (mi->retval) -- fprintf(fp, "\n"); +- if (CRASHDEBUG(2)) +- fprintf(fp, "%s: cpu[%d] avail: %d\n", +- si->curname, i, avail); + if ((n+1) < vt->numnodes) + pgdat = vt->node_table[n+1].pgdat; + else + pgdat = 0; - } ++ } --mem_map: -- mi->flags = orig_flags; -- dump_mem_map(mi); -- -- if (!mi->retval) -- fprintf(fp, "%llx: address not found\n", mi->spec_addr); +- readmem(cpudata[i]+SIZE(cpucache_s), +- KVADDR, si->cpudata[i], +- sizeof(void *) * avail, +- "cpucache avail", FAULT_ON_ERROR); + FREEBUF(zonebuf); +- if (CRASHDEBUG(2)) +- for (j = 0; j < avail; j++) +- fprintf(fp, " %lx\n", si->cpudata[i][j]); +- } } /* -- * Determine whether an address is a page pointer from the mem_map[] array. -- * If the caller requests it, return the associated physical address. +- * Updated for 2.6 slab percpu data structure. + * Gather essential information regarding each memory node. */ --int --is_page_ptr(ulong addr, physaddr_t *phys) -+static void + static void +-gather_cpudata_list_v2(struct meminfo *si) +node_table_init(void) { - int n; -- ulong ppstart, ppend; -- struct node_table *nt; -- ulong pgnum, node_size; +- int i, j; +- int avail; +- ulong cpudata[NR_CPUS]; - -- for (n = 0; n < vt->numnodes; n++) { -- nt = &vt->node_table[n]; -- if ((vt->flags & V_MEM_MAP) && (vt->numnodes == 1)) -- node_size = vt->max_mapnr; -- else -- node_size = nt->size; -- -- ppstart = nt->mem_map; -- ppend = ppstart + (node_size * SIZE(page)); -- -- if ((addr < ppstart) || (addr >= ppend)) -- continue; +- readmem(si->cache+OFFSET(kmem_cache_s_array), +- KVADDR, &cpudata[0], +- sizeof(ulong) * ARRAY_LENGTH(kmem_cache_s_array), +- "array_cache array", FAULT_ON_ERROR); ++ int n; + ulong pgdat; -- /* -- * We're in the mem_map range -- but it is a page pointer? -- */ -- if ((addr - ppstart) % SIZE(page)) -- return FALSE; +- for (i = 0; (i < ARRAY_LENGTH(kmem_cache_s_array)) && +- cpudata[i]; i++) { +- BZERO(si->cpudata[i], sizeof(ulong) * vt->kmem_max_limit); + /* + * Override numnodes -- some kernels may leave it at 1 on a system + * with multiple memory nodes. @@ -41405,9 +41769,9 @@ + if ((vt->flags & NODES) && (VALID_MEMBER(pglist_data_node_next) || + VALID_MEMBER(pglist_data_pgdat_next))) { -- if (phys) { -- pgnum = (addr - nt->mem_map) / SIZE(page); -- *phys = (pgnum * PAGESIZE()) + nt->start_paddr; +- readmem(cpudata[i]+OFFSET(array_cache_avail), +- KVADDR, &avail, sizeof(int), +- "array cache avail", FAULT_ON_ERROR); + get_symbol_data("pgdat_list", sizeof(void *), &pgdat); + + for (n = 0; pgdat; n++) { @@ -41421,161 +41785,204 @@ + error(NOTE, "changing numnodes from %d to %d\n", + vt->numnodes, n); + vt->numnodes = n; - } ++ } + } else + vt->flags &= ~NODES; -- return TRUE; -- } -- -- return FALSE; +- if (!avail) +- continue; + if (!(vt->node_table = (struct node_table *) + malloc(sizeof(struct node_table) * vt->numnodes))) + error(FATAL, "cannot malloc node_table %s(%d nodes)", + vt->numnodes > 1 ? "array " : "", vt->numnodes); --#ifdef PRE_NODES -- ppstart = vt->mem_map; -- ppend = ppstart + (vt->total_pages * vt->page_struct_len); +- if (avail > vt->kmem_max_limit) { +- error(INFO, +- "\"%s\" cache: array_cache.avail %d greater than limit %ld\n", +- si->curname, avail, vt->kmem_max_limit); +- si->errors++; +- } + BZERO(vt->node_table, sizeof(struct node_table) * vt->numnodes); -- if ((addr < ppstart) || (addr >= ppend)) -- return FALSE; +- if (CRASHDEBUG(2)) +- fprintf(fp, "%s: cpu[%d] avail: %d\n", +- si->curname, i, avail); + dump_memory_nodes(MEMORY_NODES_INITIALIZE); -- if ((addr - ppstart) % vt->page_struct_len) -- return FALSE; +- readmem(cpudata[i]+SIZE(array_cache), +- KVADDR, si->cpudata[i], +- sizeof(void *) * avail, +- "array_cache avail", FAULT_ON_ERROR); + qsort((void *)vt->node_table, (size_t)vt->numnodes, + sizeof(struct node_table), compare_node_data); -- return TRUE; --#endif +- if (CRASHDEBUG(2)) +- for (j = 0; j < avail; j++) +- fprintf(fp, " %lx\n", si->cpudata[i][j]); +- } + if (CRASHDEBUG(2)) + dump_memory_nodes(MEMORY_NODES_DUMP); } /* -- * Return the physical address associated with this page pointer. +- * Check whether a given address is contained in the previously-gathered +- * percpu object cache. + * The comparison function must return an integer less than, + * equal to, or greater than zero if the first argument is + * considered to be respectively less than, equal to, or + * greater than the second. If two members compare as equal, + * their order in the sorted array is undefined. */ --static int --page_to_phys(ulong pp, physaddr_t *phys) -+ -+static int + + static int +-check_cpudata_list(struct meminfo *si, ulong obj) +compare_node_data(const void *v1, const void *v2) { -- return(is_page_ptr(pp, phys)); +- int i, j; + struct node_table *t1, *t2; -+ + +- for (i = 0; i < vt->kmem_max_cpus; i++) { +- for (j = 0; si->cpudata[i][j]; j++) +- if (si->cpudata[i][j] == obj) { +- si->cpu = i; +- return TRUE; +- } +- } + t1 = (struct node_table *)v1; + t2 = (struct node_table *)v2; -+ + +- return FALSE; + return (t1->node_id < t2->node_id ? -1 : + t1->node_id == t2->node_id ? 0 : 1); } /* -- * Return the page pointer associated with this physical address. +- * Search the various memory subsystems for instances of this address. +- * Start with the most specific areas, ending up with at least the +- * mem_map page data. + * Depending upon the processor, and whether we're running live or on a + * dumpfile, get the system page size. */ --static int --phys_to_page(physaddr_t phys, ulong *pp) +-static void +-kmem_search(struct meminfo *mi) +uint +memory_page_size(void) { -- int n; -- ulong pgnum; -- struct node_table *nt; -- physaddr_t pstart, pend; -- ulong node_size; +- struct syment *sp; +- struct meminfo tmp_meminfo; +- char buf[BUFSIZE]; +- ulong vaddr, orig_flags; +- physaddr_t paddr; +- ulong offset; - -- for (n = 0; n < vt->numnodes; n++) { -- nt = &vt->node_table[n]; -- if ((vt->flags & V_MEM_MAP) && (vt->numnodes == 1)) -- node_size = vt->max_mapnr; -- else -- node_size = nt->size; -- -- pstart = nt->start_paddr; -- pend = pstart + ((ulonglong)node_size * PAGESIZE()); -- -- if ((phys < pstart) || (phys >= pend)) -- continue; -- /* -- * We're in the physical range -- calculate the page. -- */ -- pgnum = BTOP(phys - pstart); -- *pp = nt->mem_map + (pgnum * SIZE(page)); -- -- return TRUE; -- } +- switch (mi->memtype) +- { +- case KVADDR: +- vaddr = mi->spec_addr; +- break; + uint psz; -- return FALSE; +- case PHYSADDR: +- vaddr = mi->spec_addr < VTOP(vt->high_memory) ? +- PTOV(mi->spec_addr) : BADADDR; +- break; +- } + if (machdep->pagesize) + return machdep->pagesize; --#ifdef PRE_NODES -- if (phys >= (vt->total_pages * PAGESIZE())) -- return FALSE; +- orig_flags = mi->flags; +- mi->retval = 0; + if (REMOTE_MEMSRC()) + return remote_page_size(); -- pgnum = PTOB(BTOP(phys)) / PAGESIZE(); -- *pp = vt->mem_map + (pgnum * vt->page_struct_len); -- -- return TRUE; --#endif --} +- /* +- * Check first for a possible symbolic display of the virtual +- * address associated with mi->spec_addr or PTOV(mi->spec_addr). +- */ +- if (((vaddr >= kt->stext) && (vaddr <= kt->end)) || +- IS_MODULE_VADDR(mi->spec_addr)) { +- if ((sp = value_search(vaddr, &offset))) { +- show_symbol(sp, offset, SHOW_LINENUM | SHOW_RADIX()); +- fprintf(fp, "\n"); +- } +- } + switch (pc->flags & MEMORY_SOURCES) + { + case DISKDUMP: + psz = diskdump_page_size(); + break; +- /* +- * Check for a valid mapped address. +- */ +- if ((mi->memtype == KVADDR) && IS_VMALLOC_ADDR(mi->spec_addr)) { +- if (kvtop(NULL, mi->spec_addr, &paddr, 0)) { +- mi->flags = orig_flags; +- dump_vmlist(mi); +- fprintf(fp, "\n"); +- mi->spec_addr = paddr; +- mi->memtype = PHYSADDR; +- } +- goto mem_map; +- } +- /* +- * If the address is physical, check whether it's in vmalloc space. +- */ + case XENDUMP: + psz = xendump_page_size(); + break; --/* -- * Try to read a string of non-NULL characters from a memory location, -- * returning the number of characters read. -- */ --int --read_string(ulong kvaddr, char *buf, int maxlen) --{ -- char strbuf[MIN_PAGE_SIZE]; -- ulong kp; -- char *bufptr; -- long cnt, size; +- if (mi->memtype == PHYSADDR) { +- mi->flags = orig_flags; +- mi->flags |= GET_PHYS_TO_VMALLOC; +- mi->retval = 0; +- dump_vmlist(mi); +- mi->flags &= ~GET_PHYS_TO_VMALLOC; + case KDUMP: + psz = kdump_page_size(); + break; -- BZERO(buf, maxlen); -- BZERO(strbuf, MIN_PAGE_SIZE); +- if (mi->retval) { +- if ((sp = value_search(mi->retval, &offset))) { +- show_symbol(sp, offset, +- SHOW_LINENUM | SHOW_RADIX()); +- fprintf(fp, "\n"); +- } +- dump_vmlist(mi); +- fprintf(fp, "\n"); +- goto mem_map; +- } +- } + case NETDUMP: + psz = netdump_page_size(); + break; -- kp = kvaddr; -- bufptr = strbuf; -- size = maxlen; +- /* +- * Check whether the containing page belongs to the slab subsystem. +- */ +- mi->flags = orig_flags; +- mi->retval = 0; +- if ((vaddr != BADADDR) && vaddr_to_kmem_cache(vaddr, buf)) { +- BZERO(&tmp_meminfo, sizeof(struct meminfo)); +- tmp_meminfo.spec_addr = vaddr; +- tmp_meminfo.memtype = KVADDR; +- tmp_meminfo.flags = mi->flags; +- vt->dump_kmem_cache(&tmp_meminfo); +- fprintf(fp, "\n"); +- } + case MCLXCD: + psz = (uint)mclx_page_size(); + break; -- while (size > 0) { -- cnt = MIN_PAGE_SIZE - (kp & (MIN_PAGE_SIZE-1)); -- -- if (cnt > size) -- cnt = size; +- /* +- * Check free list. +- */ +- mi->flags = orig_flags; +- mi->retval = 0; +- vt->dump_free_pages(mi); +- if (mi->retval) +- fprintf(fp, "\n"); + case LKCD: +#if 0 /* REMIND: */ + psz = lkcd_page_size(); /* dh_dump_page_size is HW page size; should add dh_page_size */ @@ -41584,48 +41991,405 @@ +#endif + break; -- if (!readmem(kp, KVADDR, bufptr, cnt, -- "readstring characters", QUIET|RETURN_ON_ERROR)) -- break; +- if (vt->page_hash_table) { +- /* +- * Check the page cache. +- */ +- mi->flags = orig_flags; +- mi->retval = 0; +- dump_page_hash_table(mi); +- if (mi->retval) +- fprintf(fp, "\n"); +- } + case DEVMEM: + case MEMMOD: + psz = (uint)getpagesize(); + break; -- if (count_buffer_chars(bufptr, NULLCHAR, cnt)) -- break; +-mem_map: +- mi->flags = orig_flags; +- dump_mem_map(mi); + case S390D: + psz = s390_page_size(); + break; -- kp += cnt; -- bufptr += cnt; -- size -= cnt; +- if (!mi->retval) +- fprintf(fp, "%llx: address not found\n", mi->spec_addr); + default: + error(FATAL, "memory_page_size: invalid pc->flags: %lx\n", + pc->flags & MEMORY_SOURCES); - } ++ } -- strcpy(buf, strbuf); -- return (strlen(buf)); + return psz; } /* -- * "help -v" output +- * Determine whether an address is a page pointer from the mem_map[] array. +- * If the caller requests it, return the associated physical address. + * If the page size cannot be determined by the dumpfile (like kdump), + * and the processor default cannot be used, allow the force-feeding + * of a crash command-line page size option. */ +-int +-is_page_ptr(ulong addr, physaddr_t *phys) ++void ++force_page_size(char *s) + { +- int n; +- ulong ppstart, ppend; +- struct node_table *nt; +- ulong pgnum, node_size; ++ int k, err; ++ ulong psize; + +- for (n = 0; n < vt->numnodes; n++) { +- nt = &vt->node_table[n]; +- if ((vt->flags & V_MEM_MAP) && (vt->numnodes == 1)) +- node_size = vt->max_mapnr; +- else +- node_size = nt->size; ++ k = 1; ++ err = FALSE; + +- ppstart = nt->mem_map; +- ppend = ppstart + (node_size * SIZE(page)); ++ switch (LASTCHAR(s)) ++ { ++ case 'k': ++ case 'K': ++ LASTCHAR(s) = NULLCHAR; ++ if (!decimal(s, 0)) { ++ err = TRUE; ++ break; ++ } ++ k = 1024; + +- if ((addr < ppstart) || (addr >= ppend)) +- continue; ++ /* FALLTHROUGH */ + +- /* +- * We're in the mem_map range -- but it is a page pointer? +- */ +- if ((addr - ppstart) % SIZE(page)) +- return FALSE; ++ default: ++ if (decimal(s, 0)) ++ psize = dtol(s, QUIET|RETURN_ON_ERROR, &err); ++ else if (hexadecimal(s, 0)) ++ psize = htol(s, QUIET|RETURN_ON_ERROR, &err); ++ else ++ err = TRUE; ++ break; ++ } + +- if (phys) { +- pgnum = (addr - nt->mem_map) / SIZE(page); +- *phys = (pgnum * PAGESIZE()) + nt->start_paddr; +- } ++ if (err) ++ error(INFO, "invalid page size: %s\n", s); ++ else ++ machdep->pagesize = psize * k; ++} + +- return TRUE; +- } + +- return FALSE; ++/* ++ * Return the vmalloc address referenced by the first vm_struct ++ * on the vmlist. This can normally be used by the machine-specific ++ * xxx_vmalloc_start() routines. ++ */ + +-#ifdef PRE_NODES +- ppstart = vt->mem_map; +- ppend = ppstart + (vt->total_pages * vt->page_struct_len); ++ulong ++first_vmalloc_address(void) ++{ ++ ulong vmlist, addr; + +- if ((addr < ppstart) || (addr >= ppend)) +- return FALSE; ++ get_symbol_data("vmlist", sizeof(void *), &vmlist); + +- if ((addr - ppstart) % vt->page_struct_len) +- return FALSE; ++ if (!vmlist) ++ return 0; + +- return TRUE; +-#endif ++ if (!readmem(vmlist+OFFSET(vm_struct_addr), KVADDR, &addr, ++ sizeof(void *), "first vmlist addr", RETURN_ON_ERROR)) ++ non_matching_kernel(); ++ ++ return addr; + } + + /* +- * Return the physical address associated with this page pointer. ++ * Return the current vmalloc address limit, storing it ++ * if it's a dumpfile. + */ +-static int +-page_to_phys(ulong pp, physaddr_t *phys) ++ ++static ulong ++last_vmalloc_address(void) + { +- return(is_page_ptr(pp, phys)); +-} ++ struct meminfo meminfo; ++ static ulong vmalloc_limit = 0; ++ ++ if (!vmalloc_limit) { ++ BZERO(&meminfo, sizeof(struct meminfo)); ++ meminfo.memtype = KVADDR; ++ meminfo.spec_addr = 0; ++ meminfo.flags = (ADDRESS_SPECIFIED|GET_HIGHEST); ++ dump_vmlist(&meminfo); ++ vmalloc_limit = meminfo.retval; ++ } + ++ return vmalloc_limit; ++} + + /* +- * Return the page pointer associated with this physical address. ++ * Determine whether an identity-mapped virtual address ++ * refers to an existant physical page, and if not bump ++ * it up to the next node. + */ +-static int +-phys_to_page(physaddr_t phys, ulong *pp) ++static int ++next_identity_mapping(ulong vaddr, ulong *nextvaddr) + { + int n; +- ulong pgnum; + struct node_table *nt; +- physaddr_t pstart, pend; ++ ulonglong paddr, pstart, pend; + ulong node_size; + ++ paddr = VTOP(vaddr); ++ + for (n = 0; n < vt->numnodes; n++) { + nt = &vt->node_table[n]; + if ((vt->flags & V_MEM_MAP) && (vt->numnodes == 1)) + node_size = vt->max_mapnr; + else +- node_size = nt->size; ++ node_size = nt->size; + + pstart = nt->start_paddr; + pend = pstart + ((ulonglong)node_size * PAGESIZE()); + +- if ((phys < pstart) || (phys >= pend)) ++ /* ++ * Check the next node. ++ */ ++ if (paddr >= pend) ++ continue; ++ /* ++ * Bump up to the next node. ++ */ ++ if (paddr < pstart) { ++ *nextvaddr = PTOV(paddr); + continue; ++ } + /* +- * We're in the physical range -- calculate the page. ++ * We're in the physical range. + */ +- pgnum = BTOP(phys - pstart); +- *pp = nt->mem_map + (pgnum * SIZE(page)); +- ++ *nextvaddr = vaddr; + return TRUE; + } + + return FALSE; +- +-#ifdef PRE_NODES +- if (phys >= (vt->total_pages * PAGESIZE())) +- return FALSE; +- +- pgnum = PTOB(BTOP(phys)) / PAGESIZE(); +- *pp = vt->mem_map + (pgnum * vt->page_struct_len); +- +- return TRUE; +-#endif + } + + + /* +- * Try to read a string of non-NULL characters from a memory location, +- * returning the number of characters read. ++ * Return the L1 cache size in bytes, which can be found stored in the ++ * cache_cache. + */ ++ + int +-read_string(ulong kvaddr, char *buf, int maxlen) ++l1_cache_size(void) + { +- char strbuf[MIN_PAGE_SIZE]; +- ulong kp; +- char *bufptr; +- long cnt, size; ++ ulong cache; ++ ulong c_align; ++ int colour_off; ++ int retval; + +- BZERO(buf, maxlen); +- BZERO(strbuf, MIN_PAGE_SIZE); ++ retval = -1; + +- kp = kvaddr; +- bufptr = strbuf; +- size = maxlen; ++ if (VALID_MEMBER(kmem_cache_s_c_align)) { ++ cache = symbol_value("cache_cache"); ++ readmem(cache+OFFSET(kmem_cache_s_c_align), ++ KVADDR, &c_align, sizeof(ulong), ++ "c_align", FAULT_ON_ERROR); ++ retval = (int)c_align; ++ } else if (VALID_MEMBER(kmem_cache_s_colour_off)) { ++ cache = symbol_value("cache_cache"); ++ readmem(cache+OFFSET(kmem_cache_s_colour_off), ++ KVADDR, &colour_off, sizeof(int), ++ "colour_off", FAULT_ON_ERROR); ++ retval = colour_off; ++ } + +- while (size > 0) { +- cnt = MIN_PAGE_SIZE - (kp & (MIN_PAGE_SIZE-1)); +- +- if (cnt > size) +- cnt = size; ++ return retval; ++} + +- if (!readmem(kp, KVADDR, bufptr, cnt, +- "readstring characters", QUIET|RETURN_ON_ERROR)) +- break; ++/* ++ * Multi-purpose routine used to query/control dumpfile memory usage. ++ */ ++int ++dumpfile_memory(int cmd) ++{ ++ int retval; + +- if (count_buffer_chars(bufptr, NULLCHAR, cnt)) +- break; ++ retval = 0; + +- kp += cnt; +- bufptr += cnt; +- size -= cnt; ++ if (!DUMPFILE()) ++ return retval; ++ ++ switch (cmd) ++ { ++ case DUMPFILE_MEM_USED: ++ if (REMOTE_DUMPFILE()) ++ retval = remote_memory_used(); ++ else if (pc->flags & NETDUMP) ++ retval = netdump_memory_used(); ++ else if (pc->flags & KDUMP) ++ retval = kdump_memory_used(); ++ else if (pc->flags & XENDUMP) ++ retval = xendump_memory_used(); ++ else if (pc->flags & DISKDUMP) ++ retval = diskdump_memory_used(); ++ else if (pc->flags & LKCD) ++ retval = lkcd_memory_used(); ++ else if (pc->flags & MCLXCD) ++ retval = vas_memory_used(); ++ else if (pc->flags & S390D) ++ retval = s390_memory_used(); ++ break; ++ ++ case DUMPFILE_FREE_MEM: ++ if (REMOTE_DUMPFILE()) ++ retval = remote_free_memory(); ++ else if (pc->flags & NETDUMP) ++ retval = netdump_free_memory(); ++ else if (pc->flags & KDUMP) ++ retval = kdump_free_memory(); ++ else if (pc->flags & XENDUMP) ++ retval = xendump_free_memory(); ++ else if (pc->flags & DISKDUMP) ++ retval = diskdump_free_memory(); ++ else if (pc->flags & LKCD) ++ retval = lkcd_free_memory(); ++ else if (pc->flags & MCLXCD) ++ retval = vas_free_memory(NULL); ++ else if (pc->flags & S390D) ++ retval = s390_free_memory(); ++ break; ++ ++ case DUMPFILE_MEM_DUMP: ++ if (REMOTE_DUMPFILE()) ++ retval = remote_memory_dump(0); ++ else if (pc->flags & NETDUMP) ++ retval = netdump_memory_dump(fp); ++ else if (pc->flags & KDUMP) ++ retval = kdump_memory_dump(fp); ++ else if (pc->flags & XENDUMP) ++ retval = xendump_memory_dump(fp); ++ else if (pc->flags & DISKDUMP) ++ retval = diskdump_memory_dump(fp); ++ else if (pc->flags & LKCD) ++ retval = lkcd_memory_dump(set_lkcd_fp(fp)); ++ else if (pc->flags & MCLXCD) ++ retval = vas_memory_dump(fp); ++ else if (pc->flags & S390D) ++ retval = s390_memory_dump(fp); ++ break; ++ ++ case DUMPFILE_ENVIRONMENT: ++ if (pc->flags & LKCD) { ++ set_lkcd_fp(fp); ++ dump_lkcd_environment(0); ++ } else if (pc->flags & REM_LKCD) ++ retval = remote_memory_dump(VERBOSE); ++ break; + } + +- strcpy(buf, strbuf); +- return (strlen(buf)); ++ return retval; + } + +-/* +- * "help -v" output ++/* ++ * Functions for sparse mem support + */ ++ulong ++sparse_decode_mem_map(ulong coded_mem_map, ulong section_nr) ++{ ++ return coded_mem_map + ++ (section_nr_to_pfn(section_nr) * SIZE(page)); ++} ++ void -dump_vm_table(int verbose) -+force_page_size(char *s) ++sparse_mem_init(void) { - int i; - struct node_table *nt; - int others; -+ int k, err; -+ ulong psize; ++ ulong addr; ++ ulong mem_section_size; - others = 0; - fprintf(fp, " flags: %lx %s(", @@ -41658,18 +42422,7 @@ - if ((i % 4) == 0) - fprintf(fp, "\n "); - fprintf(fp, "%lx ", vt->kernel_pgd[i]); -+ k = 1; -+ err = FALSE; -+ -+ switch (LASTCHAR(s)) -+ { -+ case 'k': -+ case 'K': -+ LASTCHAR(s) = NULLCHAR; -+ if (!decimal(s, 0)) { -+ err = TRUE; -+ break; - } +- } - fprintf(fp, "\n"); - } - fprintf(fp, " high_memory: %lx\n", vt->high_memory); @@ -41702,320 +42455,13 @@ - fprintf(fp, " mem_map: %lx\n", nt->mem_map); - fprintf(fp, " start_paddr: %llx\n", nt->start_paddr); - fprintf(fp, " start_mapnr: %ld\n", nt->start_mapnr); -- } -+ k = 1024; - -- fprintf(fp, " dump_free_pages: "); -- if (vt->dump_free_pages == dump_free_pages) -- fprintf(fp, "dump_free_pages()\n"); -- else if (vt->dump_free_pages == dump_free_pages_zones_v1) -- fprintf(fp, "dump_free_pages_zones_v1()\n"); -- else if (vt->dump_free_pages == dump_free_pages_zones_v2) -- fprintf(fp, "dump_free_pages_zones_v2()\n"); -- else if (vt->dump_free_pages == dump_multidimensional_free_pages) -- fprintf(fp, "dump_multidimensional_free_pages()\n"); -- else -- fprintf(fp, "%lx (unknown)\n", (ulong)vt->dump_free_pages); -+ /* FALLTHROUGH */ - -- fprintf(fp, " dump_kmem_cache: "); -- if (vt->dump_kmem_cache == dump_kmem_cache) -- fprintf(fp, "dump_kmem_cache()\n"); -- else if (vt->dump_kmem_cache == dump_kmem_cache_percpu_v1) -- fprintf(fp, "dump_kmem_cache_percpu_v1()\n"); -- else if (vt->dump_kmem_cache == dump_kmem_cache_percpu_v2) -- fprintf(fp, "dump_kmem_cache_percpu_v2()\n"); -- else -- fprintf(fp, "%lx (unknown)\n", (ulong)vt->dump_kmem_cache); -- fprintf(fp, " slab_data: %lx\n", (ulong)vt->slab_data); -- if (verbose) -- dump_saved_slab_data(); -- fprintf(fp, " nr_swapfiles: %d\n", vt->nr_swapfiles); -- fprintf(fp, " last_swap_read: %lx\n", vt->last_swap_read); -- fprintf(fp, " swap_info_struct: %lx\n", (ulong)vt->swap_info_struct); -+ default: -+ if (decimal(s, 0)) -+ psize = dtol(s, QUIET|RETURN_ON_ERROR, &err); -+ else if (hexadecimal(s, 0)) -+ psize = htol(s, QUIET|RETURN_ON_ERROR, &err); -+ else -+ err = TRUE; -+ break; -+ } - -- dump_vma_cache(VERBOSE); -+ if (err) -+ error(INFO, "invalid page size: %s\n", s); -+ else -+ machdep->pagesize = psize * k; - } - -+ - /* -- * Calculate the amount of memory referenced in the kernel-specific "nodes". -+ * Return the vmalloc address referenced by the first vm_struct -+ * on the vmlist. This can normally be used by the machine-specific -+ * xxx_vmalloc_start() routines. - */ --uint64_t --total_node_memory() --{ -- int i; -- struct node_table *nt; -- uint64_t total; -- -- for (i = total = 0; i < vt->numnodes; i++) { -- nt = &vt->node_table[i]; - -- if (CRASHDEBUG(1)) { -- console("node_table[%d]: \n", i); -- console(" id: %d\n", nt->node_id); -- console(" pgdat: %lx\n", nt->pgdat); -- console(" size: %ld\n", nt->size); -- console(" mem_map: %lx\n", nt->mem_map); -- console(" start_paddr: %lx\n", nt->start_paddr); -- console(" start_mapnr: %ld\n", nt->start_mapnr); -- } -+ulong -+first_vmalloc_address(void) -+{ -+ ulong vmlist, addr; - -- total += (uint64_t)((uint64_t)nt->size * (uint64_t)PAGESIZE()); -- } -+ get_symbol_data("vmlist", sizeof(void *), &vmlist); - -- return total; -+ if (!vmlist) -+ return 0; -+ -+ if (!readmem(vmlist+OFFSET(vm_struct_addr), KVADDR, &addr, -+ sizeof(void *), "first vmlist addr", RETURN_ON_ERROR)) -+ non_matching_kernel(); -+ -+ return addr; - } - - /* -- * Dump just the vm_area_struct cache table data so that it can be -- * called from above or for debug purposes. -+ * Return the L1 cache size in bytes, which can be found stored in the -+ * cache_cache. - */ --void --dump_vma_cache(ulong verbose) --{ -- int i; -- ulong vhits; - -- if (!verbose) -- goto show_hits; -+int -+l1_cache_size(void) -+{ -+ ulong cache; -+ ulong c_align; -+ int colour_off; -+ int retval; - -- for (i = 0; i < VMA_CACHE; i++) -- fprintf(fp, " cached_vma[%2d]: %lx (%ld)\n", -- i, vt->cached_vma[i], -- vt->cached_vma_hits[i]); -- fprintf(fp, " vma_cache: %lx\n", (ulong)vt->vma_cache); -- fprintf(fp, " vma_cache_index: %d\n", vt->vma_cache_index); -- fprintf(fp, " vma_cache_fills: %ld\n", vt->vma_cache_fills); -- fflush(fp); -+ retval = -1; - --show_hits: -- if (vt->vma_cache_fills) { -- for (i = vhits = 0; i < VMA_CACHE; i++) -- vhits += vt->cached_vma_hits[i]; -+ if (VALID_MEMBER(kmem_cache_s_c_align)) { -+ cache = symbol_value("cache_cache"); -+ readmem(cache+OFFSET(kmem_cache_s_c_align), -+ KVADDR, &c_align, sizeof(ulong), -+ "c_align", FAULT_ON_ERROR); -+ retval = (int)c_align; -+ } else if (VALID_MEMBER(kmem_cache_s_colour_off)) { -+ cache = symbol_value("cache_cache"); -+ readmem(cache+OFFSET(kmem_cache_s_colour_off), -+ KVADDR, &colour_off, sizeof(int), -+ "colour_off", FAULT_ON_ERROR); -+ retval = colour_off; -+ } - -- fprintf(stderr, "%s vma hit rate: %2ld%% (%ld of %ld)\n", -- verbose ? "" : " ", -- (vhits * 100)/vt->vma_cache_fills, -- vhits, vt->vma_cache_fills); -- } -+ return retval; - } - - /* -- * Guess at the "real" amount of physical memory installed, formatting -- * it in a MB or GB based string. -+ * Multi-purpose routine used to query/control dumpfile memory usage. - */ --char * --get_memory_size(char *buf) -+int -+dumpfile_memory(int cmd) - { -- uint64_t total; -- ulong next_gig; --#ifdef OLDWAY -- ulong mbs, gbs; --#endif -- -- total = machdep->memory_size(); -+ int retval; - -- if ((next_gig = roundup(total, GIGABYTES(1)))) { -- if ((next_gig - total) <= MEGABYTES(64)) -- total = next_gig; -- } -+ retval = 0; - -- return (pages_to_size((ulong)(total/PAGESIZE()), buf)); -+ if (!DUMPFILE()) -+ return retval; - --#ifdef OLDWAY -- gbs = (ulong)(total/GIGABYTES(1)); -- mbs = (ulong)(total/MEGABYTES(1)); -- if (gbs) -- mbs = (total % GIGABYTES(1))/MEGABYTES(1); -+ switch (cmd) -+ { -+ case DUMPFILE_MEM_USED: -+ if (REMOTE_DUMPFILE()) -+ retval = remote_memory_used(); -+ else if (pc->flags & NETDUMP) -+ retval = netdump_memory_used(); -+ else if (pc->flags & KDUMP) -+ retval = kdump_memory_used(); -+ else if (pc->flags & XENDUMP) -+ retval = xendump_memory_used(); -+ else if (pc->flags & DISKDUMP) -+ retval = diskdump_memory_used(); -+ else if (pc->flags & LKCD) -+ retval = lkcd_memory_used(); -+ else if (pc->flags & MCLXCD) -+ retval = vas_memory_used(); -+ else if (pc->flags & S390D) -+ retval = s390_memory_used(); -+ break; - -- if (total%MEGABYTES(1)) -- mbs++; -+ case DUMPFILE_FREE_MEM: -+ if (REMOTE_DUMPFILE()) -+ retval = remote_free_memory(); -+ else if (pc->flags & NETDUMP) -+ retval = netdump_free_memory(); -+ else if (pc->flags & KDUMP) -+ retval = kdump_free_memory(); -+ else if (pc->flags & XENDUMP) -+ retval = xendump_free_memory(); -+ else if (pc->flags & DISKDUMP) -+ retval = diskdump_free_memory(); -+ else if (pc->flags & LKCD) -+ retval = lkcd_free_memory(); -+ else if (pc->flags & MCLXCD) -+ retval = vas_free_memory(NULL); -+ else if (pc->flags & S390D) -+ retval = s390_free_memory(); -+ break; - -- if (gbs) -- sprintf(buf, mbs ? "%ld GB %ld MB" : "%ld GB", gbs, mbs); -- else -- sprintf(buf, "%ld MB", mbs); -+ case DUMPFILE_MEM_DUMP: -+ if (REMOTE_DUMPFILE()) -+ retval = remote_memory_dump(0); -+ else if (pc->flags & NETDUMP) -+ retval = netdump_memory_dump(fp); -+ else if (pc->flags & KDUMP) -+ retval = kdump_memory_dump(fp); -+ else if (pc->flags & XENDUMP) -+ retval = xendump_memory_dump(fp); -+ else if (pc->flags & DISKDUMP) -+ retval = diskdump_memory_dump(fp); -+ else if (pc->flags & LKCD) -+ retval = lkcd_memory_dump(set_lkcd_fp(fp)); -+ else if (pc->flags & MCLXCD) -+ retval = vas_memory_dump(fp); -+ else if (pc->flags & S390D) -+ retval = s390_memory_dump(fp); -+ break; -+ -+ case DUMPFILE_ENVIRONMENT: -+ if (pc->flags & LKCD) { -+ set_lkcd_fp(fp); -+ dump_lkcd_environment(0); -+ } else if (pc->flags & REM_LKCD) -+ retval = remote_memory_dump(VERBOSE); -+ break; -+ } - -- return buf; --#endif -+ return retval; - } - --/* -- * For use by architectures not having machine-specific manners for -- * best determining physical memory size. -- */ --uint64_t --generic_memory_size(void) -+/* -+ * Functions for sparse mem support -+ */ -+ulong -+sparse_decode_mem_map(ulong coded_mem_map, ulong section_nr) - { -- if (machdep->memsize) -- return machdep->memsize; -- -- return (machdep->memsize = total_node_memory()); -+ return coded_mem_map + -+ (section_nr_to_pfn(section_nr) * SIZE(page)); - } - --/* -- * Determine whether a virtual address is user or kernel or ambiguous. -- */ --int --vaddr_type(ulong vaddr, struct task_context *tc) -+void -+sparse_mem_init(void) - { -- int memtype, found; -+ ulong addr; -+ ulong mem_section_size; - -- if (!tc) -- tc = CURRENT_CONTEXT(); -- memtype = found = 0; + if (!IS_SPARSEMEM()) + return; - -- if (machdep->is_uvaddr(vaddr, tc)) { -- memtype |= UVADDR; -- found++; -- } ++ + MEMBER_OFFSET_INIT(mem_section_section_mem_map, "mem_section", + "section_mem_map"); + STRUCT_SIZE_INIT(mem_section, "mem_section"); - -- if (machdep->is_kvaddr(vaddr)) { -- memtype |= KVADDR; -- found++; ++ + if (!MAX_PHYSMEM_BITS()) + error(FATAL, + "CONFIG_SPARSEMEM kernels not supported for this architecture\n"); @@ -42032,10 +42478,17 @@ + mem_section_size = SIZE(mem_section) * NR_SECTION_ROOTS(); } -- if (found == 1) -- return memtype; +- fprintf(fp, " dump_free_pages: "); +- if (vt->dump_free_pages == dump_free_pages) +- fprintf(fp, "dump_free_pages()\n"); +- else if (vt->dump_free_pages == dump_free_pages_zones_v1) +- fprintf(fp, "dump_free_pages_zones_v1()\n"); +- else if (vt->dump_free_pages == dump_free_pages_zones_v2) +- fprintf(fp, "dump_free_pages_zones_v2()\n"); +- else if (vt->dump_free_pages == dump_multidimensional_free_pages) +- fprintf(fp, "dump_multidimensional_free_pages()\n"); - else -- return AMBIGUOUS; +- fprintf(fp, "%lx (unknown)\n", (ulong)vt->dump_free_pages); + if (CRASHDEBUG(1)) { + fprintf(fp, "PAGESIZE=%d\n",PAGESIZE()); + fprintf(fp,"mem_section_size = %ld\n", mem_section_size); @@ -42054,46 +42507,64 @@ + addr = symbol_value("mem_section"); + readmem(addr, KVADDR,vt->mem_sec ,mem_section_size, + "memory section root table", FAULT_ON_ERROR); - } ++} --/* -- * Determine the first valid user space address -- */ --static int --address_space_start(struct task_context *tc, ulong *addr) +- fprintf(fp, " dump_kmem_cache: "); +- if (vt->dump_kmem_cache == dump_kmem_cache) +- fprintf(fp, "dump_kmem_cache()\n"); +- else if (vt->dump_kmem_cache == dump_kmem_cache_percpu_v1) +- fprintf(fp, "dump_kmem_cache_percpu_v1()\n"); +- else if (vt->dump_kmem_cache == dump_kmem_cache_percpu_v2) +- fprintf(fp, "dump_kmem_cache_percpu_v2()\n"); +- else +- fprintf(fp, "%lx (unknown)\n", (ulong)vt->dump_kmem_cache); +- fprintf(fp, " slab_data: %lx\n", (ulong)vt->slab_data); +- if (verbose) +- dump_saved_slab_data(); +- fprintf(fp, " nr_swapfiles: %d\n", vt->nr_swapfiles); +- fprintf(fp, " last_swap_read: %lx\n", vt->last_swap_read); +- fprintf(fp, " swap_info_struct: %lx\n", (ulong)vt->swap_info_struct); +char * +read_mem_section(ulong addr) - { -- ulong vma; -- char *vma_buf; ++{ + if (!IS_KVADDR(addr)) + return 0; + + readmem(addr, KVADDR, vt->mem_section, SIZE(mem_section), + "memory section", FAULT_ON_ERROR); -- if (!tc->mm_struct) -- return FALSE; +- dump_vma_cache(VERBOSE); + return vt->mem_section; -+} + } -- fill_mm_struct(tc->mm_struct); -- vma = ULONG(tt->mm_struct + OFFSET(mm_struct_mmap)); -- if (!vma) -- return FALSE; -- vma_buf = fill_vma_cache(vma); -- *addr = ULONG(vma_buf + OFFSET(vm_area_struct_vm_start)); -- -- return TRUE; +-/* +- * Calculate the amount of memory referenced in the kernel-specific "nodes". +- */ +-uint64_t +-total_node_memory() +ulong +nr_to_section(ulong nr) -+{ + { +- int i; +- struct node_table *nt; +- uint64_t total; + ulong addr; + ulong *mem_sec = vt->mem_sec; -+ + +- for (i = total = 0; i < vt->numnodes; i++) { +- nt = &vt->node_table[i]; + if (!IS_KVADDR(mem_sec[SECTION_NR_TO_ROOT(nr)])) + return 0; -+ + +- if (CRASHDEBUG(1)) { +- console("node_table[%d]: \n", i); +- console(" id: %d\n", nt->node_id); +- console(" pgdat: %lx\n", nt->pgdat); +- console(" size: %ld\n", nt->size); +- console(" mem_map: %lx\n", nt->mem_map); +- console(" start_paddr: %lx\n", nt->start_paddr); +- console(" start_mapnr: %ld\n", nt->start_mapnr); +- } + if (IS_SPARSEMEM_EX()) + addr = mem_sec[SECTION_NR_TO_ROOT(nr)] + + (nr & SECTION_ROOT_MASK()) * SIZE(mem_section); @@ -42101,74 +42572,40 @@ + addr = symbol_value("mem_section") + + (SECTIONS_PER_ROOT() * SECTION_NR_TO_ROOT(nr) + + (nr & SECTION_ROOT_MASK())) * SIZE(mem_section); -+ + +- total += (uint64_t)((uint64_t)nt->size * (uint64_t)PAGESIZE()); +- } + if (!IS_KVADDR(addr)) + return 0; -+ + +- return total; + return addr; } /* -- * Search for a given value between a starting and ending address range, -- * applying an optional mask for "don't care" bits. As an alternative -- * to entering the starting address value, -k means "start of kernel address -- * space". For processors with ambiguous user/kernel address spaces, -- * -u or -k must be used (with or without -s) as a differentiator. +- * Dump just the vm_area_struct cache table data so that it can be +- * called from above or for debug purposes. + * We use the lower bits of the mem_map pointer to store + * a little bit of information. There should be at least + * 3 bits here due to 32-bit alignment. */ -void --cmd_search(void) --{ -- int c; -- ulong start, end, mask, memtype, len; -- ulong uvaddr_end; -- int sflag; -- struct meminfo meminfo; -- ulong value_array[MAXARGS]; -- struct syment *sp; -- -- start = end = mask = sflag = memtype = len = 0; -- uvaddr_end = COMMON_VADDR_SPACE() ? (ulong)(-1) : machdep->kvbase; -- BZERO(value_array, sizeof(ulong) * MAXARGS); +-dump_vma_cache(ulong verbose) +#define SECTION_MARKED_PRESENT (1UL<<0) +#define SECTION_HAS_MEM_MAP (1UL<<1) +#define SECTION_MAP_LAST_BIT (1UL<<2) +#define SECTION_MAP_MASK (~(SECTION_MAP_LAST_BIT-1)) - -- while ((c = getopt(argcnt, args, "l:uks:e:v:m:")) != EOF) { -- switch(c) -- { -- case 'u': -- if (!sflag) { -- address_space_start(CURRENT_CONTEXT(),&start); -- sflag++; -- } -- memtype = UVADDR; -- sflag++; -- break; - -- case 'k': -- if (!sflag) { -- start = machdep->kvbase; -- sflag++; -- } -- memtype = KVADDR; -- sflag++; -- break; ++ ++ +int +valid_section(ulong addr) -+{ + { +- int i; +- ulong vhits; + char *mem_section; -- case 's': -- if ((sp = symbol_search(optarg))) -- start = sp->value; -- else -- start = htol(optarg, FAULT_ON_ERROR, NULL); -- sflag++; -- break; +- if (!verbose) +- goto show_hits; + if ((mem_section = read_mem_section(addr))) + return (ULONG(mem_section + + OFFSET(mem_section_section_mem_map)) && @@ -42176,20 +42613,23 @@ + return 0; +} -- case 'e': -- if ((sp = symbol_search(optarg))) -- end = sp->value; -- else -- end = htol(optarg, FAULT_ON_ERROR, NULL); -- break; +- for (i = 0; i < VMA_CACHE; i++) +- fprintf(fp, " cached_vma[%2d]: %lx (%ld)\n", +- i, vt->cached_vma[i], +- vt->cached_vma_hits[i]); +- fprintf(fp, " vma_cache: %lx\n", (ulong)vt->vma_cache); +- fprintf(fp, " vma_cache_index: %d\n", vt->vma_cache_index); +- fprintf(fp, " vma_cache_fills: %ld\n", vt->vma_cache_fills); +- fflush(fp); +int +section_has_mem_map(ulong addr) +{ + char *mem_section; -- case 'l': -- len = stol(optarg, FAULT_ON_ERROR, NULL); -- break; +-show_hits: +- if (vt->vma_cache_fills) { +- for (i = vhits = 0; i < VMA_CACHE; i++) +- vhits += vt->cached_vma_hits[i]; + if ((mem_section = read_mem_section(addr))) + return (ULONG(mem_section + + OFFSET(mem_section_section_mem_map)) @@ -42197,20 +42637,17 @@ + return 0; +} -- case 'm': -- mask = htol(optarg, FAULT_ON_ERROR, NULL); -- break; +- fprintf(stderr, "%s vma hit rate: %2ld%% (%ld of %ld)\n", +- verbose ? "" : " ", +- (vhits * 100)/vt->vma_cache_fills, +- vhits, vt->vma_cache_fills); +- } +ulong +section_mem_map_addr(ulong addr) +{ + char *mem_section; + ulong map; - -- default: -- argerrs++; -- break; -- } -- } ++ + if ((mem_section = read_mem_section(addr))) { + map = ULONG(mem_section + + OFFSET(mem_section_section_mem_map)); @@ -42218,45 +42655,35 @@ + return map; + } + return 0; -+} + } -- if (argerrs || !sflag || !args[optind] || (len && end)) -- cmd_usage(pc->curcmd, SYNOPSIS); - -- if (!memtype) -- memtype = vaddr_type(start, CURRENT_CONTEXT()); +-/* +- * Guess at the "real" amount of physical memory installed, formatting +- * it in a MB or GB based string. +- */ +-char * +-get_memory_size(char *buf) ++ +ulong +valid_section_nr(ulong nr) -+{ + { +- uint64_t total; +- ulong next_gig; +-#ifdef OLDWAY +- ulong mbs, gbs; +-#endif + ulong addr = nr_to_section(nr); -- switch (memtype) -- { -- case UVADDR: -- if (!IS_UVADDR(start, CURRENT_CONTEXT())) { -- error(INFO, "invalid user virtual address: %lx\n", -- start); -- cmd_usage(pc->curcmd, SYNOPSIS); -- } -- break; +- total = machdep->memory_size(); + if (valid_section(addr)) + return addr; -- case KVADDR: -- if (!IS_KVADDR(start)) { -- error(INFO, "invalid kernel virtual address: %lx\n", -- start); -- cmd_usage(pc->curcmd, SYNOPSIS); -- } -- break; +- if ((next_gig = roundup(total, GIGABYTES(1)))) { +- if ((next_gig - total) <= MEGABYTES(64)) +- total = next_gig; + return 0; +} - -- case AMBIGUOUS: -- error(INFO, -- "ambiguous virtual address: %lx (requires -u or -k)\n", -- start); -- cmd_usage(pc->curcmd, SYNOPSIS); ++ +ulong +pfn_to_map(ulong pfn) +{ @@ -42276,29 +42703,15 @@ + return mem_map; } -- if (!end && !len) { -- switch (memtype) -- { -- case UVADDR: -- end = uvaddr_end; -- break; +- return (pages_to_size((ulong)(total/PAGESIZE()), buf)); + return 0; +} -- case KVADDR: -- if (vt->vmalloc_start < machdep->identity_map_base) -- end = (ulong)(-1); -- else { -- meminfo.memtype = KVADDR; -- meminfo.spec_addr = 0; -- meminfo.flags = (ADDRESS_SPECIFIED|GET_HIGHEST); -- dump_vmlist(&meminfo); -- end = meminfo.retval; -- } -- break; -- } -- } else if (len) -- end = start + len; +-#ifdef OLDWAY +- gbs = (ulong)(total/GIGABYTES(1)); +- mbs = (ulong)(total/MEGABYTES(1)); +- if (gbs) +- mbs = (total % GIGABYTES(1))/MEGABYTES(1); +void +dump_mem_sections(void) +{ @@ -42310,23 +42723,14 @@ + char buf3[BUFSIZE]; + char buf4[BUFSIZE]; -- switch (memtype) -- { -- case UVADDR: -- if (end > uvaddr_end) { -- error(INFO, -- "address range starts in user space and ends kernel space\n"); -- cmd_usage(pc->curcmd, SYNOPSIS); -- } -- /* FALLTHROUGH */ -- case KVADDR: -- if (end < start) { -- error(INFO, -- "ending address %lx is below starting address %lx\n", -- end, start); -- cmd_usage(pc->curcmd, SYNOPSIS); +- if (total%MEGABYTES(1)) +- mbs++; + nr_mem_sections = NR_MEM_SECTIONS(); -+ + +- if (gbs) +- sprintf(buf, mbs ? "%ld GB %ld MB" : "%ld GB", gbs, mbs); +- else +- sprintf(buf, "%ld MB", mbs); + fprintf(fp, "\n"); + pad_line(fp, BITS32() ? 59 : 67, '-'); + fprintf(fp, "\n\nNR %s %s %s PFN\n", @@ -42353,16 +42757,12 @@ + LONG_DEC|LJUST, MKSTR(pfn)) : + mkstring(buf4, VADDR_PRLEN, + LONG_HEX|LJUST, MKSTR(pfn))); - } -- break; - } ++ } ++ } +} -- c = 0; -- while (args[optind]) { -- value_array[c] = htol(args[optind], FAULT_ON_ERROR, NULL); -- c++; -- optind++; +- return buf; +-#endif +void +list_mem_sections(void) +{ @@ -42381,39 +42781,34 @@ + section_nr_to_pfn(nr), + sparse_decode_mem_map(coded_mem_map,nr)); + } - } -- -- search(start, end, mask, memtype, value_array, c); ++ } } /* -- * Do the work for cmd_search(). +- * For use by architectures not having machine-specific manners for +- * best determining physical memory size. +- */ +-uint64_t +-generic_memory_size(void) + * For kernels containing the node_online_map or node_states[], + * return the number of online node bits set. - */ ++ */ +static int +get_nodes_online(void) -+{ + { +- if (machdep->memsize) +- return machdep->memsize; + int i, len, online; + struct gnu_request req; + ulong *maskptr; + long N_ONLINE; + ulong mapaddr; --#define SEARCHMASK(X) ((X) | mask) +- return (machdep->memsize = total_node_memory()); + if (!symbol_exists("node_online_map") && + !symbol_exists("node_states")) + return 0; - --static void --search(ulong start, ulong end, ulong mask, int memtype, ulong *value, int vcnt) --{ -- int i, j; -- ulong pp, next, *ubp; -- int wordcnt, lastpage; -- ulong page; -- physaddr_t paddr; -- char *pagebuf; ++ + if (LKCD_KERNTYPES()) { + if ((len = STRUCT_SIZE("nodemask_t")) < 0) + error(FATAL, "cannot determine type nodemask_t\n"); @@ -42446,59 +42841,49 @@ + maskptr = (ulong *)vt->node_online_map; + for (i = 0; i < vt->node_online_map_len; i++, maskptr++) + online += count_bits_long(*maskptr); - -- if (start & (sizeof(long)-1)) { -- start &= ~(sizeof(long)-1); -- error(INFO, "rounding down start address to: %lx\n", start); ++ + if (CRASHDEBUG(1)) { + fprintf(fp, "node_online_map: ["); + for (i = 0; i < vt->node_online_map_len; i++) + fprintf(fp, "%s%lx", i ? ", " : "", vt->node_online_map[i]); + fprintf(fp, "] -> nodes online: %d\n", online); - } - -- pagebuf = GETBUF(PAGESIZE()); -- next = start; ++ } ++ + if (online) + vt->numnodes = online; - -- for (pp = VIRTPAGEBASE(start); next < end; next = pp) { -- lastpage = (VIRTPAGEBASE(next) == VIRTPAGEBASE(end)); -- if (LKCD_DUMPFILE()) -- set_lkcd_nohash(); ++ + return online; -+} + } -- switch (memtype) -- { -- case UVADDR: -- if (!uvtop(CURRENT_CONTEXT(), pp, &paddr, 0) || -- !phys_to_page(paddr, &page)) { -- if (!next_upage(CURRENT_CONTEXT(), pp, &pp)) -- return; -- continue; -- } -- break; -+/* + /* +- * Determine whether a virtual address is user or kernel or ambiguous. +- */ +-int +-vaddr_type(ulong vaddr, struct task_context *tc) + * Return the next node index, with "first" being the first acceptable node. + */ +static int +next_online_node(int first) -+{ + { +- int memtype, found; +- +- if (!tc) +- tc = CURRENT_CONTEXT(); +- memtype = found = 0; + int i, j, node; + ulong mask, *maskptr; -- case KVADDR: -- if (!kvtop(CURRENT_CONTEXT(), pp, &paddr, 0) || -- !phys_to_page(paddr, &page)) { -- if (!next_kpage(pp, &pp)) -- return; -- continue; +- if (machdep->is_uvaddr(vaddr, tc)) { +- memtype |= UVADDR; +- found++; + if ((first/BITS_PER_LONG) >= vt->node_online_map_len) { + error(INFO, "next_online_node: %d is too large!\n", first); + return -1; -+ } -+ + } + +- if (machdep->is_kvaddr(vaddr)) { +- memtype |= KVADDR; +- found++; + maskptr = (ulong *)vt->node_online_map; + for (i = node = 0; i < vt->node_online_map_len; i++, maskptr++) { + mask = *maskptr; @@ -42506,44 +42891,34 @@ + if (mask & 1) { + if (node >= first) + return node; - } -- break; -- } ++ } + mask >>= 1; + } -+ } + } -- if (!readmem(paddr, PHYSADDR, pagebuf, PAGESIZE(), -- "search page", RETURN_ON_ERROR|QUIET)) { -- pp += PAGESIZE(); -- continue; -- } +- if (found == 1) +- return memtype; +- else +- return AMBIGUOUS; + return -1; -+} + } -- ubp = (ulong *)&pagebuf[next - pp]; -- if (lastpage) { -- if (end == (ulong)(-1)) -- wordcnt = PAGESIZE()/sizeof(long); -- else -- wordcnt = (end - next)/sizeof(long); -- } else -- wordcnt = (PAGESIZE() - (next - pp))/sizeof(long); -+/* + /* +- * Determine the first valid user space address + * Modify appropriately for architecture/kernel nuances. -+ */ + */ +-static int +-address_space_start(struct task_context *tc, ulong *addr) +static ulong +next_online_pgdat(int node) -+{ + { +- ulong vma; +- char *vma_buf; + char buf[BUFSIZE]; + ulong pgdat; -- for (i = 0; i < wordcnt; i++, ubp++, next += sizeof(long)) { -- for (j = 0; j < vcnt; j++) { -- if (SEARCHMASK(*ubp) == SEARCHMASK(value[j])) -- fprintf(fp, "%lx: %lx\n", next, *ubp); -- } -- } +- if (!tc->mm_struct) +- return FALSE; + /* + * Default -- look for type: struct pglist_data node_data[] + */ @@ -42558,9 +42933,14 @@ + if (get_symbol_type("node_data", NULL, NULL) != TYPE_CODE_ARRAY) + goto pgdat2; -- if (CRASHDEBUG(1)) -- if ((pp % (1024*1024)) == 0) -- console("%lx\n", pp); +- fill_mm_struct(tc->mm_struct); +- vma = ULONG(tt->mm_struct + OFFSET(mm_struct_mmap)); +- if (!vma) +- return FALSE; +- vma_buf = fill_vma_cache(vma); +- *addr = ULONG(vma_buf + OFFSET(vm_area_struct_vm_start)); +- +- return TRUE; + open_tmpfile(); + sprintf(buf, "whatis node_data"); + if (!gdb_pass_through(buf, fp, GNU_RETURN_ON_ERROR)) { @@ -42573,8 +42953,7 @@ + break; + } + close_tmpfile(); - -- pp += PAGESIZE(); ++ + if ((!strstr(buf, "struct pglist_data *") && + !strstr(buf, "pg_data_t *")) || + (count_chars(buf, '[') != 1) || @@ -42615,9 +42994,8 @@ + (count_chars(buf, '[') != 1) || + (count_chars(buf, ']') != 1)) + goto pgdat3; - } --} - ++ } ++ + if (!readmem(symbol_value("pgdat_list") + (node * sizeof(void *)), + KVADDR, &pgdat, sizeof(void *), "pgdat_list", RETURN_ON_ERROR) || + !IS_KVADDR(pgdat)) @@ -42630,36 +43008,62 @@ + return symbol_value("contig_page_data"); + + return 0; -+} + } /* -- * Return the next mapped user virtual address page that comes after -- * the passed-in address. +- * Search for a given value between a starting and ending address range, +- * applying an optional mask for "don't care" bits. As an alternative +- * to entering the starting address value, -k means "start of kernel address +- * space". For processors with ambiguous user/kernel address spaces, +- * -u or -k must be used (with or without -s) as a differentiator. + * Make the vm_stat[] array contents easily accessible. */ - static int --next_upage(struct task_context *tc, ulong vaddr, ulong *nextvaddr) +-void +-cmd_search(void) ++static int +vm_stat_init(void) { -- ulong vma, total_vm; -- int found; -- char *vma_buf; -- ulong vm_start, vm_end; -- void *vm_next; +- int c; +- ulong start, end, mask, memtype, len; +- ulong uvaddr_end; +- int sflag; +- struct meminfo meminfo; +- ulong value_array[MAXARGS]; +- struct syment *sp; + char buf[BUFSIZE]; + char *arglist[MAXARGS]; + int i, c, stringlen, total; + struct gnu_request *req; + char *start; -- if (!tc->mm_struct) -- return FALSE; +- start = end = mask = sflag = memtype = len = 0; +- uvaddr_end = COMMON_VADDR_SPACE() ? (ulong)(-1) : machdep->kvbase; +- BZERO(value_array, sizeof(ulong) * MAXARGS); + if (vt->flags & VM_STAT) + return TRUE; -+ + +- while ((c = getopt(argcnt, args, "l:uks:e:v:m:")) != EOF) { +- switch(c) +- { +- case 'u': +- if (!sflag) { +- address_space_start(CURRENT_CONTEXT(),&start); +- sflag++; +- } +- memtype = UVADDR; +- sflag++; +- break; + if ((vt->nr_vm_stat_items == -1) || !symbol_exists("vm_stat")) + goto bailout; -+ + +- case 'k': +- if (!sflag) { +- start = machdep->kvbase; +- sflag++; +- } +- memtype = KVADDR; +- sflag++; +- break; + /* + * look for type: type = atomic_long_t [] + */ @@ -42687,7 +43091,14 @@ + break; + } + close_tmpfile(); -+ + +- case 's': +- if ((sp = symbol_search(optarg))) +- start = sp->value; +- else +- start = htol(optarg, FAULT_ON_ERROR, NULL); +- sflag++; +- break; + if (!strstr(buf, "atomic_long_t") || + (count_chars(buf, '[') != 1) || + (count_chars(buf, ']') != 1)) @@ -42701,9 +43112,17 @@ + req->flags = GNU_PRINT_ENUMERATORS; + gdb_interface(req); + FREEBUF(req); -+ + +- case 'e': +- if ((sp = symbol_search(optarg))) +- end = sp->value; +- else +- end = htol(optarg, FAULT_ON_ERROR, NULL); +- break; + stringlen = 1; -+ + +- case 'l': +- len = stol(optarg, FAULT_ON_ERROR, NULL); + rewind(pc->tmpfile); + while (fgets(buf, BUFSIZE, pc->tmpfile)) { + if (strstr(buf, "{") || strstr(buf, "}")) @@ -42712,20 +43131,40 @@ + c = parse_line(buf, arglist); + if (STREQ(arglist[0], "NR_VM_ZONE_STAT_ITEMS")) { + vt->nr_vm_stat_items = atoi(arglist[2]); -+ break; + break; +- +- case 'm': +- mask = htol(optarg, FAULT_ON_ERROR, NULL); +- break; +- +- default: +- argerrs++; +- break; +- } + } else + stringlen += strlen(arglist[0]); -+ } -+ + } + +- if (argerrs || !sflag || !args[optind] || (len && end)) +- cmd_usage(pc->curcmd, SYNOPSIS); + total = stringlen + vt->nr_vm_stat_items + + (sizeof(void *) * vt->nr_vm_stat_items); + if (!(vt->vm_stat_items = (char **)malloc(total))) { + close_tmpfile(); + error(FATAL, "cannot malloc vm_stat_items cache\n"); + } -+ + +- if (!memtype) +- memtype = vaddr_type(start, CURRENT_CONTEXT()); + start = (char *)&vt->vm_stat_items[vt->nr_vm_stat_items]; -+ + +- switch (memtype) +- { +- case UVADDR: +- if (!IS_UVADDR(start, CURRENT_CONTEXT())) { +- error(INFO, "invalid user virtual address: %lx\n", +- start); +- cmd_usage(pc->curcmd, SYNOPSIS); + rewind(pc->tmpfile); + while (fgets(buf, BUFSIZE, pc->tmpfile)) { + if (strstr(buf, "{") || strstr(buf, "}")) @@ -42736,21 +43175,31 @@ + vt->vm_stat_items[i] = start; + strcpy(start, arglist[0]); + start += strlen(arglist[0]) + 1; -+ } + } +- break; + } + close_tmpfile(); -+ + +- case KVADDR: +- if (!IS_KVADDR(start)) { +- error(INFO, "invalid kernel virtual address: %lx\n", +- start); +- cmd_usage(pc->curcmd, SYNOPSIS); +- } +- break; + vt->flags |= VM_STAT; + return TRUE; -+ + +- case AMBIGUOUS: +- error(INFO, +- "ambiguous virtual address: %lx (requires -u or -k)\n", +- start); +- cmd_usage(pc->curcmd, SYNOPSIS); +bailout: + vt->nr_vm_stat_items = -1; + return FALSE; +} - -- fill_mm_struct(tc->mm_struct); -- vma = ULONG(tt->mm_struct + OFFSET(mm_struct_mmap)); -- total_vm = ULONG(tt->mm_struct + OFFSET(mm_struct_total_vm)); ++ +/* + * Either dump all vm_stat entries, or return the value of + * the specified vm_stat item. Use the global counter unless @@ -42763,31 +43212,56 @@ + ulong *vp; + ulong location; + int i; - -- if (!vma || (total_vm == 0)) ++ + if (!vm_stat_init()) { + if (!item) + if (CRASHDEBUG(1)) + error(INFO, + "vm_stat not available in this kernel\n"); - return FALSE; -+ } ++ return FALSE; + } -- vaddr = VIRTPAGEBASE(vaddr) + PAGESIZE(); /* first possible page */ -- -- for (found = FALSE; vma; vma = (ulong)vm_next) { -- vma_buf = fill_vma_cache(vma); +- if (!end && !len) { +- switch (memtype) +- { +- case UVADDR: +- end = uvaddr_end; +- break; + buf = GETBUF(sizeof(ulong) * vt->nr_vm_stat_items); -- vm_start = ULONG(vma_buf + OFFSET(vm_area_struct_vm_start)); -- vm_end = ULONG(vma_buf + OFFSET(vm_area_struct_vm_end)); -- vm_next = VOID_PTR(vma_buf + OFFSET(vm_area_struct_vm_next)); +- case KVADDR: +- if (vt->vmalloc_start < machdep->identity_map_base) +- end = (ulong)(-1); +- else { +- meminfo.memtype = KVADDR; +- meminfo.spec_addr = 0; +- meminfo.flags = (ADDRESS_SPECIFIED|GET_HIGHEST); +- dump_vmlist(&meminfo); +- end = meminfo.retval; +- } +- break; +- } +- } else if (len) +- end = start + len; + location = zone ? zone : symbol_value("vm_stat"); -- if (vaddr <= vm_start) { -- *nextvaddr = vm_start; -- return TRUE; +- switch (memtype) +- { +- case UVADDR: +- if (end > uvaddr_end) { +- error(INFO, +- "address range starts in user space and ends kernel space\n"); +- cmd_usage(pc->curcmd, SYNOPSIS); - } +- /* FALLTHROUGH */ +- case KVADDR: +- if (end < start) { +- error(INFO, +- "ending address %lx is below starting address %lx\n", +- end, start); +- cmd_usage(pc->curcmd, SYNOPSIS); +- } +- break; + readmem(location, KVADDR, buf, + sizeof(ulong) * vt->nr_vm_stat_items, + "vm_stat", FAULT_ON_ERROR); @@ -42799,36 +43273,43 @@ + for (i = 0; i < vt->nr_vm_stat_items; i++) + fprintf(fp, "%23s: %ld\n", vt->vm_stat_items[i], vp[i]); + return TRUE; -+ } + } -- if ((vaddr > vm_start) && (vaddr < vm_end)) { -- *nextvaddr = vaddr; +- c = 0; +- while (args[optind]) { +- value_array[c] = htol(args[optind], FAULT_ON_ERROR, NULL); +- c++; +- optind++; + vp = (ulong *)buf; + for (i = 0; i < vt->nr_vm_stat_items; i++) { + if (STREQ(vt->vm_stat_items[i], item)) { + *retval = vp[i]; - return TRUE; - } ++ return TRUE; ++ } } -@@ -9321,920 +13024,1044 @@ + +- search(start, end, mask, memtype, value_array, c); ++ return FALSE; } /* -- * Return the next kernel virtual address page that comes after -- * the passed-in address. +- * Do the work for cmd_search(). + * Dump the cumulative totals of the per_cpu__page_states counters. */ --static int --next_kpage(ulong vaddr, ulong *nextvaddr) +- +-#define SEARCHMASK(X) ((X) | mask) +- +-static void +-search(ulong start, ulong end, ulong mask, int memtype, ulong *value, int vcnt) +int +dump_page_states(void) { -- int n; -- ulong paddr, vaddr_orig, node_size; -- struct node_table *nt; -- ulonglong pstart, pend; -- ulong vmalloc_limit; -- struct meminfo meminfo; +- int i, j; +- ulong pp, next, *ubp; +- int wordcnt, lastpage; +- ulong page; +- physaddr_t paddr; +- char *pagebuf; + struct syment *sp; + ulong addr, value; + int i, c, fd, len, instance, members; @@ -42841,72 +43322,78 @@ + struct stat stat; + char *namebuf, *nameptr; -- vaddr_orig = vaddr; -- vaddr = VIRTPAGEBASE(vaddr) + PAGESIZE(); /* first possible page */ +- if (start & (sizeof(long)-1)) { +- start &= ~(sizeof(long)-1); +- error(INFO, "rounding down start address to: %lx\n", start); + if (!(sp = symbol_search("per_cpu__page_states"))) { + if (CRASHDEBUG(1)) + error(INFO, "per_cpu__page_states" + "not available in this kernel\n"); + return FALSE; -+ } + } -- if (vaddr < vaddr_orig) /* wrapped back to zero? */ -- return FALSE; +- pagebuf = GETBUF(PAGESIZE()); +- next = start; + instance = members = len = 0; -- meminfo.memtype = KVADDR; -- meminfo.spec_addr = 0; -- meminfo.flags = (ADDRESS_SPECIFIED|GET_HIGHEST); -- dump_vmlist(&meminfo); -- vmalloc_limit = meminfo.retval; +- for (pp = VIRTPAGEBASE(start); next < end; next = pp) { +- lastpage = (VIRTPAGEBASE(next) == VIRTPAGEBASE(end)); +- if (LKCD_DUMPFILE()) +- set_lkcd_nohash(); + sprintf(buf, "ptype struct page_state"); -- if (IS_VMALLOC_ADDR(vaddr_orig)) { -- if (IS_VMALLOC_ADDR(vaddr) && (vaddr < vmalloc_limit)) { -- *nextvaddr = vaddr; -- return TRUE; -- } +- switch (memtype) +- { +- case UVADDR: +- if (!uvtop(CURRENT_CONTEXT(), pp, &paddr, 0) || +- !phys_to_page(paddr, &page)) { +- if (!next_upage(CURRENT_CONTEXT(), pp, &pp)) +- return; +- continue; +- } +- break; + open_tmpfile(); + if (!gdb_pass_through(buf, fp, GNU_RETURN_ON_ERROR)) { + close_tmpfile(); + return FALSE; + } -- if (vt->vmalloc_start < machdep->identity_map_base) { -- *nextvaddr = machdep->identity_map_base; -- return TRUE; -- } +- case KVADDR: +- if (!kvtop(CURRENT_CONTEXT(), pp, &paddr, 0) || +- !phys_to_page(paddr, &page)) { +- if (!next_kpage(pp, &pp)) +- return; +- continue; +- } +- break; +- } + fflush(pc->tmpfile); + fd = fileno(pc->tmpfile); + fstat(fd, &stat); + namebuf = GETBUF(stat.st_size); + nameptr = namebuf; - -- return FALSE; ++ + rewind(pc->tmpfile); + while (fgets(buf, BUFSIZE, pc->tmpfile)) { + if (strstr(buf, "struct page_state") || + strstr(buf, "}")) + continue; + members++; - } ++ } -- paddr = VTOP(vaddr); +- if (!readmem(paddr, PHYSADDR, pagebuf, PAGESIZE(), +- "search page", RETURN_ON_ERROR|QUIET)) { +- pp += PAGESIZE(); + entry_list = (struct entry *) + GETBUF(sizeof(struct entry) * members); - -- for (n = 0; n < vt->numnodes; n++) { -- nt = &vt->node_table[n]; -- if ((vt->flags & V_MEM_MAP) && (vt->numnodes == 1)) -- node_size = vt->max_mapnr; -- else -- node_size = nt->size; ++ + rewind(pc->tmpfile); + i = 0; + while (fgets(buf, BUFSIZE, pc->tmpfile)) { + if (strstr(buf, "struct page_state") || + strstr(buf, "}")) -+ continue; + continue; +- } + strip_ending_char(strip_linefeeds(buf), ';'); + c = parse_line(buf, arglist); + strcpy(nameptr, arglist[c-1]); @@ -42918,26 +43405,26 @@ + } + close_tmpfile(); -- pstart = nt->start_paddr; -- pend = pstart + ((ulonglong)node_size * PAGESIZE()); +- ubp = (ulong *)&pagebuf[next - pp]; +- if (lastpage) { +- if (end == (ulong)(-1)) +- wordcnt = PAGESIZE()/sizeof(long); +- else +- wordcnt = (end - next)/sizeof(long); +- } else +- wordcnt = (PAGESIZE() - (next - pp))/sizeof(long); + open_tmpfile(); -- if ((paddr < pstart) || (paddr >= pend)) -- continue; -- /* -- * We're in the physical range. -- */ -- return TRUE; +- for (i = 0; i < wordcnt; i++, ubp++, next += sizeof(long)) { +- for (j = 0; j < vcnt; j++) { +- if (SEARCHMASK(*ubp) == SEARCHMASK(value[j])) +- fprintf(fp, "%lx: %lx\n", next, *ubp); +- } + for (c = 0; c < kt->cpus; c++) { + addr = sp->value + kt->__per_cpu_offset[c]; + dump_struct("page_state", addr, RADIX(16)); - } - -- if (vt->vmalloc_start > vaddr) { -- *nextvaddr = vt->vmalloc_start; -- return TRUE; -- } else -- return FALSE; ++ } ++ + i = 0; + rewind(pc->tmpfile); + while (fgets(buf, BUFSIZE, pc->tmpfile)) { @@ -42945,7 +43432,7 @@ + instance++; + i = 0; + continue; -+ } + } + if (strstr(buf, "}")) + continue; + strip_linefeeds(buf); @@ -42953,15 +43440,19 @@ + entry_list[i].value += value; + i++; + } -+ + +- if (CRASHDEBUG(1)) +- if ((pp % (1024*1024)) == 0) +- console("%lx\n", pp); + close_tmpfile(); -+ + +- pp += PAGESIZE(); + fprintf(fp, " PAGE_STATES:\n"); + for (i = 0; i < members; i++) { + sprintf(buf, "%s", entry_list[i].name); + fprintf(fp, "%s", mkstring(buf, len+2, RJUST, 0)); + fprintf(fp, ": %ld\n", entry_list[i].value); -+ } + } + + FREEBUF(namebuf); + FREEBUF(entry_list); @@ -42969,24 +43460,225 @@ + return TRUE; } + -/* -- * Display swap statistics. -+ +- * Return the next mapped user virtual address page that comes after +- * the passed-in address. +/* + * Dump the cumulative totals of the per_cpu__vm_event_state + * counters. */ --void --cmd_swap(void) +-static int +-next_upage(struct task_context *tc, ulong vaddr, ulong *nextvaddr) +static int +dump_vm_event_state(void) { -- int c; +- ulong vma, total_vm; +- int found; +- char *vma_buf; +- ulong vm_start, vm_end; +- void *vm_next; +- +- if (!tc->mm_struct) +- return FALSE; +- +- fill_mm_struct(tc->mm_struct); +- vma = ULONG(tt->mm_struct + OFFSET(mm_struct_mmap)); +- total_vm = ULONG(tt->mm_struct + OFFSET(mm_struct_total_vm)); + int i, c; + struct syment *sp; + ulong addr; + ulong *events, *cumulative; +- if (!vma || (total_vm == 0)) ++ if (!vm_event_state_init()) + return FALSE; + +- vaddr = VIRTPAGEBASE(vaddr) + PAGESIZE(); /* first possible page */ +- +- for (found = FALSE; vma; vma = (ulong)vm_next) { +- vma_buf = fill_vma_cache(vma); ++ events = (ulong *)GETBUF((sizeof(ulong) * vt->nr_vm_event_items) * 2); ++ cumulative = &events[vt->nr_vm_event_items]; + +- vm_start = ULONG(vma_buf + OFFSET(vm_area_struct_vm_start)); +- vm_end = ULONG(vma_buf + OFFSET(vm_area_struct_vm_end)); +- vm_next = VOID_PTR(vma_buf + OFFSET(vm_area_struct_vm_next)); ++ sp = symbol_search("per_cpu__vm_event_states"); + +- if (vaddr <= vm_start) { +- *nextvaddr = vm_start; +- return TRUE; ++ for (c = 0; c < kt->cpus; c++) { ++ addr = sp->value + kt->__per_cpu_offset[c]; ++ if (CRASHDEBUG(1)) { ++ fprintf(fp, "[%d]: %lx\n", c, addr); ++ dump_struct("vm_event_state", addr, RADIX(16)); + } ++ readmem(addr, KVADDR, events, ++ sizeof(ulong) * vt->nr_vm_event_items, ++ "vm_event_states buffer", FAULT_ON_ERROR); ++ for (i = 0; i < vt->nr_vm_event_items; i++) ++ cumulative[i] += events[i]; ++ } + +- if ((vaddr > vm_start) && (vaddr < vm_end)) { +- *nextvaddr = vaddr; +- return TRUE; +- } +- } ++ fprintf(fp, "\n VM_EVENT_STATES:\n"); ++ for (i = 0; i < vt->nr_vm_event_items; i++) ++ fprintf(fp, "%23s: %ld\n", vt->vm_event_items[i], cumulative[i]); + +- return FALSE; ++ FREEBUF(events); ++ ++ return TRUE; + } + +-/* +- * Return the next kernel virtual address page that comes after +- * the passed-in address. +- */ + static int +-next_kpage(ulong vaddr, ulong *nextvaddr) ++vm_event_state_init(void) + { +- int n; +- ulong paddr, vaddr_orig, node_size; +- struct node_table *nt; +- ulonglong pstart, pend; +- ulong vmalloc_limit; +- struct meminfo meminfo; ++ int i, c, stringlen, total; ++ long count; ++ struct gnu_request *req; ++ char *arglist[MAXARGS]; ++ char buf[BUFSIZE]; ++ char *start; + +- vaddr_orig = vaddr; +- vaddr = VIRTPAGEBASE(vaddr) + PAGESIZE(); /* first possible page */ ++ if (vt->flags & VM_EVENT) ++ return TRUE; + +- if (vaddr < vaddr_orig) /* wrapped back to zero? */ +- return FALSE; ++ if ((vt->nr_vm_event_items == -1) || ++ !symbol_exists("per_cpu__vm_event_states")) ++ goto bailout; + +- meminfo.memtype = KVADDR; +- meminfo.spec_addr = 0; +- meminfo.flags = (ADDRESS_SPECIFIED|GET_HIGHEST); +- dump_vmlist(&meminfo); +- vmalloc_limit = meminfo.retval; ++ if (!enumerator_value("NR_VM_EVENT_ITEMS", &count)) ++ return FALSE; + +- if (IS_VMALLOC_ADDR(vaddr_orig)) { +- if (IS_VMALLOC_ADDR(vaddr) && (vaddr < vmalloc_limit)) { +- *nextvaddr = vaddr; +- return TRUE; +- } ++ vt->nr_vm_event_items = count; + +- if (vt->vmalloc_start < machdep->identity_map_base) { +- *nextvaddr = machdep->identity_map_base; +- return TRUE; +- } ++ open_tmpfile(); ++ req = (struct gnu_request *)GETBUF(sizeof(struct gnu_request)); ++ req->command = GNU_GET_DATATYPE; ++ req->name = "vm_event_item"; ++ req->flags = GNU_PRINT_ENUMERATORS; ++ gdb_interface(req); ++ FREEBUF(req); + +- return FALSE; +- } ++ stringlen = 1; + +- paddr = VTOP(vaddr); ++ rewind(pc->tmpfile); ++ while (fgets(buf, BUFSIZE, pc->tmpfile)) { ++ if (strstr(buf, "{") || strstr(buf, "}")) ++ continue; ++ clean_line(buf); ++ c = parse_line(buf, arglist); ++ if (STREQ(arglist[0], "NR_VM_EVENT_ITEMS")) ++ break; ++ else ++ stringlen += strlen(arglist[0]); ++ } + +- for (n = 0; n < vt->numnodes; n++) { +- nt = &vt->node_table[n]; +- if ((vt->flags & V_MEM_MAP) && (vt->numnodes == 1)) +- node_size = vt->max_mapnr; +- else +- node_size = nt->size; ++ total = stringlen + vt->nr_vm_event_items + ++ (sizeof(void *) * vt->nr_vm_event_items); ++ if (!(vt->vm_event_items = (char **)malloc(total))) { ++ close_tmpfile(); ++ error(FATAL, "cannot malloc vm_event_items cache\n"); ++ } + +- pstart = nt->start_paddr; +- pend = pstart + ((ulonglong)node_size * PAGESIZE()); ++ start = (char *)&vt->vm_event_items[vt->nr_vm_event_items]; + +- if ((paddr < pstart) || (paddr >= pend)) ++ rewind(pc->tmpfile); ++ while (fgets(buf, BUFSIZE, pc->tmpfile)) { ++ if (strstr(buf, "{") || strstr(buf, "}")) + continue; +- /* +- * We're in the physical range. +- */ +- return TRUE; ++ c = parse_line(buf, arglist); ++ i = atoi(arglist[2]); ++ if (i < vt->nr_vm_event_items) { ++ vt->vm_event_items[i] = start; ++ strcpy(start, arglist[0]); ++ start += strlen(arglist[0]) + 1; ++ } + } ++ close_tmpfile(); + +- if (vt->vmalloc_start > vaddr) { +- *nextvaddr = vt->vmalloc_start; +- return TRUE; +- } else +- return FALSE; ++ vt->flags |= VM_EVENT; ++ return TRUE; ++ ++bailout: ++ vt->nr_vm_event_items = -1; ++ return FALSE; + } + ++ + /* +- * Display swap statistics. ++ * Support for slub.c slab cache. + */ +-void +-cmd_swap(void) ++static void ++kmem_cache_init_slub(void) + { +- int c; ++ if (CRASHDEBUG(1) && ++ !(vt->flags & CONFIG_NUMA) && (vt->numnodes > 1)) ++ error(WARNING, ++ "kmem_cache_init_slub: numnodes: %d without CONFIG_NUMA\n", ++ vt->numnodes); + - while ((c = getopt(argcnt, args, "")) != EOF) { - switch(c) - { @@ -42995,193 +43687,14 @@ - break; - } - } -+ if (!vm_event_state_init()) -+ return FALSE; ++ vt->cpu_slab_type = MEMBER_TYPE("kmem_cache", "cpu_slab"); - if (argerrs) - cmd_usage(pc->curcmd, SYNOPSIS); -+ events = (ulong *)GETBUF((sizeof(ulong) * vt->nr_vm_event_items) * 2); -+ cumulative = &events[vt->nr_vm_event_items]; - -- dump_swap_info(VERBOSE, NULL, NULL); --} -+ sp = symbol_search("per_cpu__vm_event_states"); - --/* -- * Do the work for cmd_swap(). -- */ -+ for (c = 0; c < kt->cpus; c++) { -+ addr = sp->value + kt->__per_cpu_offset[c]; -+ if (CRASHDEBUG(1)) { -+ fprintf(fp, "[%d]: %lx\n", c, addr); -+ dump_struct("vm_event_state", addr, RADIX(16)); -+ } -+ readmem(addr, KVADDR, events, -+ sizeof(ulong) * vt->nr_vm_event_items, -+ "vm_event_states buffer", FAULT_ON_ERROR); -+ for (i = 0; i < vt->nr_vm_event_items; i++) -+ cumulative[i] += events[i]; -+ } - --#define SWP_USED 1 --#define SWAP_MAP_BAD 0x8000 -+ fprintf(fp, "\n VM_EVENT_STATES:\n"); -+ for (i = 0; i < vt->nr_vm_event_items; i++) -+ fprintf(fp, "%23s: %ld\n", vt->vm_event_items[i], cumulative[i]); - --char *swap_info_hdr = \ --"FILENAME TYPE SIZE USED PCT PRIORITY\n"; -+ FREEBUF(events); -+ -+ return TRUE; -+} - - static int --dump_swap_info(ulong swapflags, ulong *totalswap_pages, ulong *totalused_pages) -+vm_event_state_init(void) - { -- int i, j; -- int flags, swap_device, pages, prio, usedswap; -- ulong swap_file, max, swap_map, pct; -- ulong vfsmnt; -- ulong swap_info; -- ushort *map; -- ulong totalswap, totalused; -+ int i, c, stringlen, total; -+ long count; -+ struct gnu_request *req; -+ char *arglist[MAXARGS]; - char buf[BUFSIZE]; -+ char *start; - -- if (!symbol_exists("nr_swapfiles")) -- error(FATAL, "nr_swapfiles doesn't exist in this kernel!\n"); -- -- if (!symbol_exists("swap_info")) -- error(FATAL, "swap_info doesn't exist in this kernel!\n"); -+ if (vt->flags & VM_EVENT) -+ return TRUE; - -- swap_info = symbol_value("swap_info"); -+ if ((vt->nr_vm_event_items == -1) || -+ !symbol_exists("per_cpu__vm_event_states")) -+ goto bailout; - -- if (swapflags & VERBOSE) -- fprintf(fp, swap_info_hdr); -+ if (!enumerator_value("NR_VM_EVENT_ITEMS", &count)) -+ return FALSE; - -- totalswap = totalused = 0; -+ vt->nr_vm_event_items = count; - -- for (i = 0; i < vt->nr_swapfiles; i++, -- swap_info += SIZE(swap_info_struct)){ -- fill_swap_info(swap_info); -+ open_tmpfile(); -+ req = (struct gnu_request *)GETBUF(sizeof(struct gnu_request)); -+ req->command = GNU_GET_DATATYPE; -+ req->name = "vm_event_item"; -+ req->flags = GNU_PRINT_ENUMERATORS; -+ gdb_interface(req); -+ FREEBUF(req); - -- flags = INT(vt->swap_info_struct + -- OFFSET(swap_info_struct_flags)); -+ stringlen = 1; - -- if (!(flags & SWP_USED)) -+ rewind(pc->tmpfile); -+ while (fgets(buf, BUFSIZE, pc->tmpfile)) { -+ if (strstr(buf, "{") || strstr(buf, "}")) - continue; -+ clean_line(buf); -+ c = parse_line(buf, arglist); -+ if (STREQ(arglist[0], "NR_VM_EVENT_ITEMS")) -+ break; -+ else -+ stringlen += strlen(arglist[0]); -+ } - -- swap_file = ULONG(vt->swap_info_struct + -- OFFSET(swap_info_struct_swap_file)); -+ total = stringlen + vt->nr_vm_event_items + -+ (sizeof(void *) * vt->nr_vm_event_items); -+ if (!(vt->vm_event_items = (char **)malloc(total))) { -+ close_tmpfile(); -+ error(FATAL, "cannot malloc vm_event_items cache\n"); -+ } - -- swap_device = INT(vt->swap_info_struct + -- OFFSET_OPTION(swap_info_struct_swap_device, -- swap_info_struct_old_block_size)); -+ start = (char *)&vt->vm_event_items[vt->nr_vm_event_items]; - -- pages = INT(vt->swap_info_struct + -- OFFSET(swap_info_struct_pages)); -+ rewind(pc->tmpfile); -+ while (fgets(buf, BUFSIZE, pc->tmpfile)) { -+ if (strstr(buf, "{") || strstr(buf, "}")) -+ continue; -+ c = parse_line(buf, arglist); -+ i = atoi(arglist[2]); -+ if (i < vt->nr_vm_event_items) { -+ vt->vm_event_items[i] = start; -+ strcpy(start, arglist[0]); -+ start += strlen(arglist[0]) + 1; -+ } -+ } -+ close_tmpfile(); - -- totalswap += pages; -- pages <<= (PAGESHIFT() - 10); -+ vt->flags |= VM_EVENT; -+ return TRUE; - -- prio = INT(vt->swap_info_struct + -- OFFSET(swap_info_struct_prio)); -+bailout: -+ vt->nr_vm_event_items = -1; -+ return FALSE; -+} - -- max = ULONG(vt->swap_info_struct + -- OFFSET(swap_info_struct_max)); - -- swap_map = ULONG(vt->swap_info_struct + -- OFFSET(swap_info_struct_swap_map)); -+/* -+ * Support for slub.c slab cache. -+ */ -+static void -+kmem_cache_init_slub(void) -+{ -+ if (CRASHDEBUG(1) && -+ !(vt->flags & CONFIG_NUMA) && (vt->numnodes > 1)) -+ error(WARNING, -+ "kmem_cache_init_slub: numnodes: %d without CONFIG_NUMA\n", -+ vt->numnodes); - -- if (swap_file) { -- if (VALID_MEMBER(swap_info_struct_swap_vfsmnt)) { -- vfsmnt = ULONG(vt->swap_info_struct + -- OFFSET(swap_info_struct_swap_vfsmnt)); -- get_pathname(swap_file, buf, BUFSIZE, -- 1, vfsmnt); -- } else if (VALID_MEMBER -- (swap_info_struct_old_block_size)) { -- get_pathname(file_to_dentry(swap_file), -- buf, BUFSIZE, 1, 0); -- } else { -- get_pathname(swap_file, buf, BUFSIZE, 1, 0); -- } -- } else -- sprintf(buf, "(unknown)"); -+ vt->cpu_slab_type = MEMBER_TYPE("kmem_cache", "cpu_slab"); -+ + vt->flags |= KMEM_CACHE_INIT; +} -+ + +- dump_swap_info(VERBOSE, NULL, NULL); +static void +kmem_cache_list_slub(void) +{ @@ -43210,20 +43723,29 @@ + + FREEBUF(cache_list); + FREEBUF(cache_buf); -+} -+ + } + +-/* +- * Do the work for cmd_swap(). +- */ +#define DUMP_KMEM_CACHE_INFO_SLUB() dump_kmem_cache_info_slub(si) -+ + +-#define SWP_USED 1 +-#define SWAP_MAP_BAD 0x8000 +static void +dump_kmem_cache_info_slub(struct meminfo *si) +{ + char b1[BUFSIZE]; + char b2[BUFSIZE]; + int namelen, sizelen, spacelen; -+ + +-char *swap_info_hdr = \ +-"FILENAME TYPE SIZE USED PCT PRIORITY\n"; + fprintf(fp, "%s ", + mkstring(b1, VADDR_PRLEN, LJUST|LONG_HEX, MKSTR(si->cache))); -+ + +-static int +-dump_swap_info(ulong swapflags, ulong *totalswap_pages, ulong *totalused_pages) + namelen = strlen(si->curname); + sprintf(b2, "%ld", si->objsize); + sizelen = strlen(b2); @@ -43242,51 +43764,52 @@ + } + + fprintf(fp, b1, si->inuse); - -- map = (ushort *)GETBUF(sizeof(ushort) * max); ++ + fprintf(fp, "%8ld %5ld %4ldk\n", + si->num_slabs * si->objects, + si->num_slabs, si->slabsize/1024); +} - -- if (!readmem(swap_map, KVADDR, map, -- sizeof(ushort) * max, "swap_info swap_map data", -- RETURN_ON_ERROR|QUIET)) { -- if (swapflags & RETURN_ON_ERROR) { -- *totalswap_pages = swap_map; -- *totalused_pages = i; -- return FALSE; -- } else -- error(FATAL, -- "swap_info[%d].swap_map at %lx is unaccessible\n", -- i, swap_map); ++ +static void +dump_kmem_cache_slub(struct meminfo *si) -+{ + { +- int i, j; +- int flags, swap_device, pages, prio, usedswap; +- ulong swap_file, max, swap_map, pct; +- ulong vfsmnt; +- ulong swap_info; +- ushort *map; +- ulong totalswap, totalused; + int i; + ulong name; + unsigned int size, objsize, objects, order, offset; + char *reqname, *p1; + char kbuf[BUFSIZE]; -+ char buf[BUFSIZE]; -+ + char buf[BUFSIZE]; + +- if (!symbol_exists("nr_swapfiles")) +- error(FATAL, "nr_swapfiles doesn't exist in this kernel!\n"); + si->cache_count = get_kmem_cache_list(&si->cache_list); + si->cache_buf = GETBUF(SIZE(kmem_cache)); -+ + +- if (!symbol_exists("swap_info")) +- error(FATAL, "swap_info doesn't exist in this kernel!\n"); + if (!si->reqname && + !(si->flags & (ADDRESS_SPECIFIED|GET_SLAB_PAGES))) + fprintf(fp, kmem_cache_hdr); -+ + +- swap_info = symbol_value("swap_info"); + if (si->flags & ADDRESS_SPECIFIED) { + if ((p1 = is_slab_page(si, kbuf))) { + si->flags |= VERBOSE; + si->slab = (ulong)si->spec_addr; -+ } else if (!(p1 = vaddr_to_kmem_cache(si->spec_addr, kbuf))) { ++ } else if (!(p1 = vaddr_to_kmem_cache(si->spec_addr, kbuf, ++ VERBOSE))) { + error(INFO, + "address is not allocated in slab subsystem: %lx\n", + si->spec_addr); + goto bailout; - } ++ } + + if (si->reqname && (si->reqname != p1)) + error(INFO, @@ -43296,21 +43819,14 @@ + } else + reqname = si->reqname; -- usedswap = 0; -- for (j = 0; j < max; j++) { -- switch (map[j]) -- { -- case SWAP_MAP_BAD: -- case 0: -- continue; -- default: -- usedswap++; -- } +- if (swapflags & VERBOSE) +- fprintf(fp, swap_info_hdr); + for (i = 0; i < si->cache_count; i++) { + if (!readmem(si->cache_list[i], KVADDR, si->cache_buf, + SIZE(kmem_cache), "kmem_cache buffer", RETURN_ON_ERROR)) + goto next_cache; -+ + +- totalswap = totalused = 0; + name = ULONG(si->cache_buf + OFFSET(kmem_cache_name)); + if (!read_string(name, buf, BUFSIZE-1)) + sprintf(buf, "(unknown)"); @@ -43323,18 +43839,19 @@ + fprintf(fp, "%lx %-18s [IGNORED]\n", + si->cache_list[i], buf); + goto next_cache; - } ++ } -- FREEBUF(map); +- for (i = 0; i < vt->nr_swapfiles; i++, +- swap_info += SIZE(swap_info_struct)){ +- fill_swap_info(swap_info); + objsize = UINT(si->cache_buf + OFFSET(kmem_cache_objsize)); + size = UINT(si->cache_buf + OFFSET(kmem_cache_size)); + objects = UINT(si->cache_buf + OFFSET(kmem_cache_objects)); + order = UINT(si->cache_buf + OFFSET(kmem_cache_order)); + offset = UINT(si->cache_buf + OFFSET(kmem_cache_offset)); -- totalused += usedswap; -- usedswap <<= (PAGESHIFT() - 10); -- pct = (usedswap * 100)/pages; +- flags = INT(vt->swap_info_struct + +- OFFSET(swap_info_struct_flags)); + si->cache = si->cache_list[i]; + si->curname = buf; + si->objsize = objsize; @@ -43346,18 +43863,9 @@ + if (!get_kmem_cache_slub_data(GET_SLUB_SLABS, si) || + !get_kmem_cache_slub_data(GET_SLUB_OBJECTS, si)) + goto next_cache; - -- if (swapflags & VERBOSE) -- fprintf(fp, "%-15s %s %7dk %7dk %2ld%% %d\n", -- buf, swap_device ? "PARTITION" : " FILE ", -- pages, usedswap, pct, prio); -- } ++ + DUMP_KMEM_CACHE_INFO_SLUB(); - -- if (totalswap_pages) -- *totalswap_pages = totalswap; -- if (totalused_pages) -- *totalused_pages = totalused; ++ + if (si->flags & ADDRESS_SPECIFIED) { + if (!si->slab) + si->slab = vaddr_to_slab(si->spec_addr); @@ -43367,30 +43875,28 @@ + if (!reqname && ((i+1) < si->cache_count)) + fprintf(fp, kmem_cache_hdr); + } - -- return TRUE; ++ +next_cache: + if (reqname) + break; + } -+ + +- if (!(flags & SWP_USED)) +- continue; +bailout: + FREEBUF(si->cache_list); + FREEBUF(si->cache_buf); - } ++} - /* -- * Translate a PTE into a swap device and offset string. -- */ --char * --swap_location(ulonglong pte, char *buf) +- swap_file = ULONG(vt->swap_info_struct + +- OFFSET(swap_info_struct_swap_file)); ++/* + * Emulate the total count calculation done by the + * slab_objects() sysfs function in slub.c. + */ +static int +get_kmem_cache_slub_data(long cmd, struct meminfo *si) - { -- char swapdev[BUFSIZE]; ++{ + int i, n, node; + ulong total_objects, total_slabs; + ulong cpu_slab_ptr, node_ptr; @@ -43400,8 +43906,9 @@ + short inuse; + ulong *nodes, *per_cpu; -- if (!pte) -- return NULL; +- swap_device = INT(vt->swap_info_struct + +- OFFSET_OPTION(swap_info_struct_swap_device, +- swap_info_struct_old_block_size)); + /* + * nodes[n] is not being used (for now) + * per_cpu[n] is a count of cpu_slab pages per node. @@ -43409,34 +43916,27 @@ + nodes = (ulong *)GETBUF(2 * sizeof(ulong) * vt->numnodes); + per_cpu = nodes + vt->numnodes; -- sprintf(buf, "%s OFFSET: %lld", -- get_swapdev(SWP_TYPE(pte), swapdev), SWP_OFFSET(pte)); +- pages = INT(vt->swap_info_struct + +- OFFSET(swap_info_struct_pages)); + total_slabs = total_objects = 0; -- return buf; --} +- totalswap += pages; +- pages <<= (PAGESHIFT() - 10); + for (i = 0; i < kt->cpus; i++) { -+ cpu_slab_ptr = get_cpu_slab_ptr(si, i); ++ cpu_slab_ptr = get_cpu_slab_ptr(si, i, NULL); --/* -- * Given the type field from a PTE, return the name of the swap device. -- */ --static char * --get_swapdev(ulong type, char *buf) --{ -- unsigned int i, swap_info_len; -- ulong swap_info, swap_file; -- ulong vfsmnt; +- prio = INT(vt->swap_info_struct + +- OFFSET(swap_info_struct_prio)); + if (!cpu_slab_ptr) + continue; -- if (!symbol_exists("nr_swapfiles")) -- error(FATAL, "nr_swapfiles doesn't exist in this kernel!\n"); +- max = ULONG(vt->swap_info_struct + +- OFFSET(swap_info_struct_max)); + if ((node = page_to_nid(cpu_slab_ptr)) < 0) + goto bailout; -- if (!symbol_exists("swap_info")) -- error(FATAL, "swap_info doesn't exist in this kernel!\n"); +- swap_map = ULONG(vt->swap_info_struct + +- OFFSET(swap_info_struct_swap_map)); + switch (cmd) + { + case GET_SLUB_OBJECTS: @@ -43447,7 +43947,21 @@ + total_objects += inuse; + break; -- swap_info = symbol_value("swap_info"); +- if (swap_file) { +- if (VALID_MEMBER(swap_info_struct_swap_vfsmnt)) { +- vfsmnt = ULONG(vt->swap_info_struct + +- OFFSET(swap_info_struct_swap_vfsmnt)); +- get_pathname(swap_file, buf, BUFSIZE, +- 1, vfsmnt); +- } else if (VALID_MEMBER +- (swap_info_struct_old_block_size)) { +- get_pathname(file_to_dentry(swap_file), +- buf, BUFSIZE, 1, 0); +- } else { +- get_pathname(swap_file, buf, BUFSIZE, 1, 0); +- } +- } else +- sprintf(buf, "(unknown)"); + case GET_SLUB_SLABS: + total_slabs++; + break; @@ -43464,8 +43978,7 @@ + node_ptr = si->cache + + OFFSET(kmem_cache_local_node); -- swap_info_len = (i = ARRAY_LENGTH(swap_info)) ? -- i : get_array_length("swap_info", NULL, 0); +- map = (ushort *)GETBUF(sizeof(ushort) * max); + if (!readmem(node_ptr + OFFSET(kmem_cache_node_nr_partial), + KVADDR, &node_nr_partial, sizeof(ulong), + "kmem_cache_node nr_partial", RETURN_ON_ERROR)) @@ -43475,28 +43988,156 @@ + "kmem_cache_node nr_slabs", RETURN_ON_ERROR)) + goto bailout; -- sprintf(buf, "(unknown swap location)"); +- if (!readmem(swap_map, KVADDR, map, +- sizeof(ushort) * max, "swap_info swap_map data", +- RETURN_ON_ERROR|QUIET)) { +- if (swapflags & RETURN_ON_ERROR) { +- *totalswap_pages = swap_map; +- *totalused_pages = i; + switch (cmd) + { + case GET_SLUB_OBJECTS: + if ((p = count_partial(node_ptr)) < 0) -+ return FALSE; + return FALSE; +- } else +- error(FATAL, +- "swap_info[%d].swap_map at %lx is unaccessible\n", +- i, swap_map); +- } + total_objects += p; + break; -- if (type >= swap_info_len) -- return buf; +- usedswap = 0; +- for (j = 0; j < max; j++) { +- switch (map[j]) +- { +- case SWAP_MAP_BAD: +- case 0: +- continue; +- default: +- usedswap++; +- } + case GET_SLUB_SLABS: + total_slabs += node_nr_partial; ++ break; + } + +- FREEBUF(map); +- +- totalused += usedswap; +- usedswap <<= (PAGESHIFT() - 10); +- pct = (usedswap * 100)/pages; ++ full_slabs = node_nr_slabs - per_cpu[n] - node_nr_partial; ++ objects = INT(si->cache_buf + OFFSET(kmem_cache_objects)); + +- if (swapflags & VERBOSE) +- fprintf(fp, "%-15s %s %7dk %7dk %2ld%% %d\n", +- buf, swap_device ? "PARTITION" : " FILE ", +- pages, usedswap, pct, prio); +- } ++ switch (cmd) ++ { ++ case GET_SLUB_OBJECTS: ++ total_objects += (full_slabs * objects); ++ break; + +- if (totalswap_pages) +- *totalswap_pages = totalswap; +- if (totalused_pages) +- *totalused_pages = totalused; ++ case GET_SLUB_SLABS: ++ total_slabs += full_slabs; + break; + } +- return TRUE; +-} ++ if (!(vt->flags & CONFIG_NUMA)) ++ break; ++ } + +-/* +- * Translate a PTE into a swap device and offset string. +- */ +-char * +-swap_location(ulonglong pte, char *buf) +-{ +- char swapdev[BUFSIZE]; ++ switch (cmd) ++ { ++ case GET_SLUB_OBJECTS: ++ si->inuse = total_objects; ++ break; + +- if (!pte) +- return NULL; ++ case GET_SLUB_SLABS: ++ si->num_slabs = total_slabs; ++ break; ++ } + +- sprintf(buf, "%s OFFSET: %lld", +- get_swapdev(SWP_TYPE(pte), swapdev), SWP_OFFSET(pte)); ++ FREEBUF(nodes); ++ return TRUE; + +- return buf; ++bailout: ++ FREEBUF(nodes); ++ return FALSE; + } + +-/* +- * Given the type field from a PTE, return the name of the swap device. +- */ +-static char * +-get_swapdev(ulong type, char *buf) ++ ++static void ++do_kmem_cache_slub(struct meminfo *si) + { +- unsigned int i, swap_info_len; +- ulong swap_info, swap_file; +- ulong vfsmnt; ++ int i, n; ++ ulong cpu_slab_ptr, node_ptr; ++ ulong node_nr_partial, node_nr_slabs; ++ ulong *per_cpu; + +- if (!symbol_exists("nr_swapfiles")) +- error(FATAL, "nr_swapfiles doesn't exist in this kernel!\n"); ++ per_cpu = (ulong *)GETBUF(sizeof(ulong) * vt->numnodes); + +- if (!symbol_exists("swap_info")) +- error(FATAL, "swap_info doesn't exist in this kernel!\n"); ++ for (i = 0; i < kt->cpus; i++) { ++ cpu_slab_ptr = get_cpu_slab_ptr(si, i, NULL); + +- swap_info = symbol_value("swap_info"); ++ fprintf(fp, "CPU %d SLAB:\n%s", i, ++ cpu_slab_ptr ? "" : " (empty)\n"); + +- swap_info_len = (i = ARRAY_LENGTH(swap_info)) ? +- i : get_array_length("swap_info", NULL, 0); ++ if (!cpu_slab_ptr) ++ continue; + +- sprintf(buf, "(unknown swap location)"); ++ if ((n = page_to_nid(cpu_slab_ptr)) >= 0) ++ per_cpu[n]++; + +- if (type >= swap_info_len) +- return buf; ++ si->slab = cpu_slab_ptr; ++ do_slab_slub(si, VERBOSE); + - swap_info += (SIZE(swap_info_struct) * type); - fill_swap_info(swap_info); - swap_file = ULONG(vt->swap_info_struct + - OFFSET(swap_info_struct_swap_file)); -+ full_slabs = node_nr_slabs - per_cpu[n] - node_nr_partial; -+ objects = INT(si->cache_buf + OFFSET(kmem_cache_objects)); ++ if (received_SIGINT()) ++ restart(0); ++ } - if (swap_file) { - if (VALID_MEMBER(swap_info_struct_swap_vfsmnt)) { @@ -43508,114 +44149,8 @@ - buf, BUFSIZE, 1, 0); - } else { - get_pathname(swap_file, buf, BUFSIZE, 1, 0); -+ switch (cmd) -+ { -+ case GET_SLUB_OBJECTS: -+ total_objects += (full_slabs * objects); -+ break; -+ -+ case GET_SLUB_SLABS: -+ total_slabs += full_slabs; -+ break; - } +- } - } - -- return buf; --} -+ if (!(vt->flags & CONFIG_NUMA)) -+ break; -+ } - --/* -- * If not currently stashed, cache the passed-in swap_info_struct. -- */ --static void --fill_swap_info(ulong swap_info) --{ -- if (vt->last_swap_read == swap_info) -- return; -+ switch (cmd) -+ { -+ case GET_SLUB_OBJECTS: -+ si->inuse = total_objects; -+ break; - -- if (!vt->swap_info_struct && !(vt->swap_info_struct = (char *) -- malloc(SIZE(swap_info_struct)))) -- error(FATAL, "cannot malloc swap_info_struct space\n"); -- -- readmem(swap_info, KVADDR, vt->swap_info_struct, SIZE(swap_info_struct), -- "fill_swap_info", FAULT_ON_ERROR); -+ case GET_SLUB_SLABS: -+ si->num_slabs = total_slabs; -+ break; -+ } - -- vt->last_swap_read = swap_info; -+ FREEBUF(nodes); -+ return TRUE; -+ -+bailout: -+ FREEBUF(nodes); -+ return FALSE; - } - --/* -- * If active, clear references to the swap_info references. -- */ --void --clear_swap_info_cache(void) -+ -+static void -+do_kmem_cache_slub(struct meminfo *si) - { -- if (ACTIVE()) -- vt->last_swap_read = 0; --} -+ int i, n; -+ ulong cpu_slab_ptr, node_ptr; -+ ulong node_nr_partial, node_nr_slabs; -+ ulong *per_cpu; - -+ per_cpu = (ulong *)GETBUF(sizeof(ulong) * vt->numnodes); - --/* -- * Translage a vm_area_struct and virtual address into a filename -- * and offset string. -- */ -+ for (i = 0; i < kt->cpus; i++) { -+ cpu_slab_ptr = get_cpu_slab_ptr(si, i); - --#define PAGE_CACHE_SHIFT (machdep->pageshift) /* This is supposed to change! */ -+ fprintf(fp, "CPU %d SLAB:\n%s", i, -+ cpu_slab_ptr ? "" : " (empty)\n"); - --static char * --vma_file_offset(ulong vma, ulong vaddr, char *buf) --{ -- ulong vm_file, vm_start, vm_offset, vm_pgoff, dentry, offset; -- ulong vfsmnt; -- char file[BUFSIZE]; -- char *vma_buf, *file_buf; -+ if (!cpu_slab_ptr) -+ continue; - -- if (!vma) -- return NULL; -+ if ((n = page_to_nid(cpu_slab_ptr)) >= 0) -+ per_cpu[n]++; - -- vma_buf = fill_vma_cache(vma); -+ si->slab = cpu_slab_ptr; -+ do_slab_slub(si, VERBOSE); - -- vm_file = ULONG(vma_buf + OFFSET(vm_area_struct_vm_file)); -+ if (received_SIGINT()) -+ restart(0); -+ } - -- if (!vm_file) -- goto no_file_offset; + for (n = 0; n < vt->numnodes; n++) { + if (vt->flags & CONFIG_NUMA) + node_ptr = ULONG(si->cache_buf + @@ -43625,8 +44160,8 @@ + node_ptr = si->cache + + OFFSET(kmem_cache_local_node); -- file_buf = fill_file_cache(vm_file); -- dentry = ULONG(file_buf + OFFSET(file_f_dentry)); +- return buf; +-} + if (!readmem(node_ptr + OFFSET(kmem_cache_node_nr_partial), + KVADDR, &node_nr_partial, sizeof(ulong), + "kmem_cache_node nr_partial", RETURN_ON_ERROR)) @@ -43636,9 +44171,114 @@ + "kmem_cache_node nr_slabs", RETURN_ON_ERROR)) + break; +-/* +- * If not currently stashed, cache the passed-in swap_info_struct. +- */ +-static void +-fill_swap_info(ulong swap_info) +-{ +- if (vt->last_swap_read == swap_info) +- return; ++ fprintf(fp, "KMEM_CACHE_NODE NODE SLABS PARTIAL PER-CPU\n"); + +- if (!vt->swap_info_struct && !(vt->swap_info_struct = (char *) +- malloc(SIZE(swap_info_struct)))) +- error(FATAL, "cannot malloc swap_info_struct space\n"); +- +- readmem(swap_info, KVADDR, vt->swap_info_struct, SIZE(swap_info_struct), +- "fill_swap_info", FAULT_ON_ERROR); ++ fprintf(fp, "%lx%s", node_ptr, space(VADDR_PRLEN > 8 ? 2 : 10)); ++ fprintf(fp, "%4d %5ld %7ld %7ld\n", ++ n, node_nr_slabs, node_nr_partial, per_cpu[n]); + +- vt->last_swap_read = swap_info; +-} ++ do_node_lists_slub(si, node_ptr, n); + +-/* +- * If active, clear references to the swap_info references. +- */ +-void +-clear_swap_info_cache(void) +-{ +- if (ACTIVE()) +- vt->last_swap_read = 0; +-} ++ if (!(vt->flags & CONFIG_NUMA)) ++ break; ++ } + ++ fprintf(fp, "\n"); + +-/* +- * Translage a vm_area_struct and virtual address into a filename +- * and offset string. +- */ ++ FREEBUF(per_cpu); ++} + +-#define PAGE_CACHE_SHIFT (machdep->pageshift) /* This is supposed to change! */ ++#define DUMP_SLAB_INFO_SLUB() \ ++ { \ ++ char b1[BUFSIZE], b2[BUFSIZE]; \ ++ fprintf(fp, " %s %s %4d %5ld %9d %4ld\n", \ ++ mkstring(b1, VADDR_PRLEN, LJUST|LONG_HEX, MKSTR(si->slab)), \ ++ mkstring(b2, VADDR_PRLEN, LJUST|LONG_HEX, MKSTR(vaddr)), \ ++ node, si->objects, inuse, si->objects - inuse); \ ++ } + +-static char * +-vma_file_offset(ulong vma, ulong vaddr, char *buf) ++static void ++do_slab_slub(struct meminfo *si, int verbose) + { +- ulong vm_file, vm_start, vm_offset, vm_pgoff, dentry, offset; +- ulong vfsmnt; +- char file[BUFSIZE]; +- char *vma_buf, *file_buf; ++ physaddr_t paddr; ++ ulong vaddr; ++ ushort inuse; ++ ulong freelist, cpu_freelist, cpu_slab_ptr; ++ int i, cpu_slab, is_free, node; ++ ulong p, q; + +- if (!vma) +- return NULL; ++ if (!si->slab) { ++ if (CRASHDEBUG(1)) ++ error(INFO, "-S option not supported for CONFIG_SLUB\n"); ++ return; ++ } + +- vma_buf = fill_vma_cache(vma); ++ if (!page_to_phys(si->slab, &paddr)) { ++ error(WARNING, ++ "%lx: cannot tranlate slab page to physical address\n", ++ si->slab); ++ return; ++ } + +- vm_file = ULONG(vma_buf + OFFSET(vm_area_struct_vm_file)); ++ node = page_to_nid(si->slab); + +- if (!vm_file) +- goto no_file_offset; ++ vaddr = PTOV(paddr); + +- file_buf = fill_file_cache(vm_file); +- dentry = ULONG(file_buf + OFFSET(file_f_dentry)); ++ if (verbose) ++ fprintf(fp, " %s", slab_hdr); + - if (!dentry) - goto no_file_offset; -+ fprintf(fp, "KMEM_CACHE_NODE NODE SLABS PARTIAL PER-CPU\n"); ++ if (!readmem(si->slab + OFFSET(page_inuse), KVADDR, &inuse, ++ sizeof(ushort), "page.inuse", RETURN_ON_ERROR)) ++ return; ++ if (!readmem(si->slab + OFFSET(page_freelist), KVADDR, &freelist, ++ sizeof(void *), "page.freelist", RETURN_ON_ERROR)) ++ return; - file[0] = NULLCHAR; - if (VALID_MEMBER(file_f_vfsmnt)) { @@ -43646,21 +44286,37 @@ - get_pathname(dentry, file, BUFSIZE, 1, vfsmnt); - } else - get_pathname(dentry, file, BUFSIZE, 1, 0); -+ fprintf(fp, "%lx%s", node_ptr, space(VADDR_PRLEN > 8 ? 2 : 10)); -+ fprintf(fp, "%4d %5ld %7ld %7ld\n", -+ n, node_nr_slabs, node_nr_partial, per_cpu[n]); ++ if (!verbose) { ++ DUMP_SLAB_INFO_SLUB(); ++ return; ++ } - if (!strlen(file)) - goto no_file_offset; -+ do_node_lists_slub(si, node_ptr, n); ++ for (i = 0, cpu_slab = -1; i < kt->cpus; i++) { ++ cpu_slab_ptr = get_cpu_slab_ptr(si, i, &cpu_freelist); - vm_start = ULONG(vma_buf + OFFSET(vm_area_struct_vm_start)); -+ if (!(vt->flags & CONFIG_NUMA)) ++ if (!cpu_slab_ptr) ++ continue; ++ if (cpu_slab_ptr == si->slab) { ++ cpu_slab = i; ++ /* ++ * Later slub scheme uses the per-cpu freelist ++ * and keeps page->inuse maxed out, so count ++ * the free objects by hand. ++ */ ++ if (cpu_freelist) ++ freelist = cpu_freelist; ++ if ((si->objects - inuse) == 0) ++ inuse = si->objects - ++ count_free_objects(si, freelist); + break; ++ } + } - vm_offset = vm_pgoff = 0xdeadbeef; -+ fprintf(fp, "\n"); ++ DUMP_SLAB_INFO_SLUB(); - if (VALID_MEMBER(vm_area_struct_vm_offset)) - vm_offset = ULONG(vma_buf + @@ -43670,75 +44326,79 @@ - OFFSET(vm_area_struct_vm_pgoff)); - else - goto no_file_offset; -+ FREEBUF(per_cpu); -+} ++ fprintf(fp, " %s", free_inuse_hdr); - if (vm_offset != 0xdeadbeef) - offset = VIRTPAGEBASE(vaddr) - vm_start + vm_offset; - else if (vm_pgoff != 0xdeadbeef) { - offset = ((vaddr - vm_start) >> PAGE_CACHE_SHIFT) + vm_pgoff; - offset <<= PAGE_CACHE_SHIFT; -+#define DUMP_SLAB_INFO_SLUB() \ -+ { \ -+ char b1[BUFSIZE], b2[BUFSIZE]; \ -+ fprintf(fp, " %s %s %4d %5ld %9d %4ld\n", \ -+ mkstring(b1, VADDR_PRLEN, LJUST|LONG_HEX, MKSTR(si->slab)), \ -+ mkstring(b2, VADDR_PRLEN, LJUST|LONG_HEX, MKSTR(vaddr)), \ -+ node, si->objects, inuse, si->objects - inuse); \ -+ } ++#define PAGE_MAPPING_ANON 1 + -+static void -+do_slab_slub(struct meminfo *si, int verbose) -+{ -+ physaddr_t paddr; -+ ulong vaddr; -+ ushort inuse; -+ ulong freelist, cpu_slab_ptr; -+ int i, cpu_slab, is_free, node; -+ ulong p, q; -+ -+ if (!si->slab) { -+ if (CRASHDEBUG(1)) -+ error(INFO, "-S option not supported for CONFIG_SLUB\n"); -+ return; ++ if (CRASHDEBUG(1)) { ++ fprintf(fp, "< SLUB: free list START: >\n"); ++ i = 0; ++ for (q = freelist; q; q = get_freepointer(si, (void *)q)) { ++ if (q & PAGE_MAPPING_ANON) { ++ fprintf(fp, ++ "< SLUB: free list END: %lx (%d found) >\n", ++ q, i); ++ break; ++ } ++ fprintf(fp, " %lx\n", q); ++ i++; ++ } ++ if (!q) ++ fprintf(fp, "< SLUB: free list END (%d found) >\n", i); } - sprintf(buf, "%s OFFSET: %lx", file, offset); -+ if (!page_to_phys(si->slab, &paddr)) { -+ error(WARNING, -+ "%lx: cannot tranlate slab page to physical address\n", -+ si->slab); -+ return; -+ } -+ -+ node = page_to_nid(si->slab); -+ -+ vaddr = PTOV(paddr); ++ for (p = vaddr; p < vaddr + si->objects * si->size; p += si->size) { ++ is_free = FALSE; ++ for (is_free = 0, q = freelist; q; ++ q = get_freepointer(si, (void *)q)) { ++ if (q == BADADDR) ++ return; ++ if (q & PAGE_MAPPING_ANON) ++ break; ++ if (p == q) { ++ is_free = TRUE; ++ break; ++ } ++ } - return buf; -+ if (verbose) -+ fprintf(fp, " %s", slab_hdr); ++ if (si->flags & ADDRESS_SPECIFIED) { ++ if ((si->spec_addr < p) || ++ (si->spec_addr >= (p + si->size))) { ++ if (!(si->flags & VERBOSE)) ++ continue; ++ } ++ } -no_file_offset: - return NULL; --} -+ if (!readmem(si->slab + OFFSET(page_inuse), KVADDR, &inuse, -+ sizeof(ushort), "page.inuse", RETURN_ON_ERROR)) -+ return; -+ if (!readmem(si->slab + OFFSET(page_freelist), KVADDR, &freelist, -+ sizeof(void *), "page.freelist", RETURN_ON_ERROR)) -+ return; ++ fprintf(fp, " %s%lx%s", ++ is_free ? " " : "[", ++ p, is_free ? " " : "]"); ++ if (is_free && (cpu_slab >= 0)) ++ fprintf(fp, "(cpu %d cache)", cpu_slab); ++ fprintf(fp, "\n"); ++ ++ } + } -/* - * Translate a PTE into its physical address and flags. - */ -void -cmd_pte(void) --{ ++static int ++count_free_objects(struct meminfo *si, ulong freelist) + { - int c; - ulonglong pte; -+ DUMP_SLAB_INFO_SLUB(); - +- - while ((c = getopt(argcnt, args, "")) != EOF) { - switch(c) - { @@ -43747,37 +44407,36 @@ - break; - } - } -+ if (!verbose) -+ return; - +- - if (argerrs) - cmd_usage(pc->curcmd, SYNOPSIS); -+ for (i = 0, cpu_slab = -1; i < kt->cpus; i++) { -+ cpu_slab_ptr = get_cpu_slab_ptr(si, i); ++ int c; ++ ulong q; - while (args[optind]) { - pte = htoll(args[optind], FAULT_ON_ERROR, NULL); - machdep->translate_pte((ulong)pte, NULL, pte); - optind++; -+ if (!cpu_slab_ptr) -+ continue; -+ if (cpu_slab_ptr == si->slab) { -+ cpu_slab = i; ++ c = 0; ++ for (q = freelist; q; q = get_freepointer(si, (void *)q)) { ++ if (q & PAGE_MAPPING_ANON) + break; -+ } ++ c++; } --} -- ++ return c; + } + -static char *node_zone_hdr = "ZONE NAME SIZE"; -+ fprintf(fp, " %s", free_inuse_hdr); -/* - * On systems supporting memory nodes, display the basic per-node data. - */ -static void -dump_memory_nodes(int initialize) --{ ++static ulong ++get_freepointer(struct meminfo *si, void *object) + { - int i, j; - int n, id, flen, slen; - ulong node_mem_map; @@ -43798,18 +44457,7 @@ - char buf4[BUFSIZE]; - char buf5[BUFSIZE]; - struct node_table *nt; -+ for (p = vaddr; p < vaddr + si->objects * si->size; p += si->size) { -+ is_free = FALSE; -+ for (is_free = 0, q = freelist; q; -+ q = get_freepointer(si, (void *)q)) { -+ if (q == BADADDR) -+ return; -+ if (p == q) { -+ is_free = TRUE; -+ break; -+ } -+ } - +- - if (!(vt->flags & NODES)) { - if (!initialize) - error(FATAL, @@ -43826,36 +44474,8 @@ - nt->start_paddr = 0; - nt->start_mapnr = 0; - return; -+ if (si->flags & ADDRESS_SPECIFIED) { -+ if ((si->spec_addr < p) || -+ (si->spec_addr >= (p + si->size))) { -+ if (!(si->flags & VERBOSE)) -+ continue; -+ } - } +- } - } - -- if (initialize) -- get_symbol_data("pgdat_list", sizeof(void *), &pgdat); -- else -- pgdat = vt->node_table[0].pgdat; -+ fprintf(fp, " %s%lx%s", -+ is_free ? " " : "[", -+ p, is_free ? " " : "]"); -+ if (is_free && (cpu_slab >= 0)) -+ fprintf(fp, "(cpu %d cache)", cpu_slab); -+ fprintf(fp, "\n"); - -- for (n = 0; pgdat; n++) { -- if (n >= vt->numnodes) -- error(FATAL, "numnodes out of sync with pgdat_list?\n"); -+ } -+} - -- nt = &vt->node_table[n]; -+static ulong -+get_freepointer(struct meminfo *si, void *object) -+{ + ulong vaddr, nextfree; + + vaddr = (ulong)(object + si->slab_offset); @@ -43863,19 +44483,49 @@ + sizeof(void *), "get_freepointer", RETURN_ON_ERROR)) + return BADADDR; -- readmem(pgdat+OFFSET(pglist_data_node_id), KVADDR, &id, -- sizeof(int), "pglist node_id", FAULT_ON_ERROR); +- if (initialize) +- get_symbol_data("pgdat_list", sizeof(void *), &pgdat); +- else +- pgdat = vt->node_table[0].pgdat; + return nextfree; +} +- for (n = 0; pgdat; n++) { +- if (n >= vt->numnodes) +- error(FATAL, "numnodes out of sync with pgdat_list?\n"); ++static void ++do_node_lists_slub(struct meminfo *si, ulong node_ptr, int node) ++{ ++ ulong next, list_head, flags; ++ int first; + +- nt = &vt->node_table[n]; ++ list_head = node_ptr + OFFSET(kmem_cache_node_partial); ++ if (!readmem(list_head, KVADDR, &next, sizeof(ulong), ++ "kmem_cache_node partial", RETURN_ON_ERROR)) ++ return; + +- readmem(pgdat+OFFSET(pglist_data_node_id), KVADDR, &id, +- sizeof(int), "pglist node_id", FAULT_ON_ERROR); ++ fprintf(fp, "NODE %d PARTIAL:\n%s", node, ++ next == list_head ? " (empty)\n" : ""); ++ first = 0; ++ while (next != list_head) { ++ si->slab = next - OFFSET(page_lru); ++ if (first++ == 0) ++ fprintf(fp, " %s", slab_hdr); ++ do_slab_slub(si, !VERBOSE); ++ ++ if (received_SIGINT()) ++ restart(0); + - readmem(pgdat+OFFSET(pglist_data_node_mem_map), KVADDR, - &node_mem_map, sizeof(ulong), - "node_mem_map", FAULT_ON_ERROR); -+static void -+do_node_lists_slub(struct meminfo *si, ulong node_ptr, int node) -+{ -+ ulong next, list_head; -+ int first; ++ if (!readmem(next, KVADDR, &next, sizeof(ulong), ++ "page.lru.next", RETURN_ON_ERROR)) ++ return; ++ } - if (VALID_MEMBER(pglist_data_node_start_paddr)) - readmem(pgdat+OFFSET(pglist_data_node_start_paddr), @@ -43889,26 +44539,23 @@ - node_start_paddr = PTOB(node_start_pfn); - } else error(INFO, - "cannot determine zone starting physical address\n"); -+ list_head = node_ptr + OFFSET(kmem_cache_node_partial); -+ if (!readmem(list_head, KVADDR, &next, sizeof(ulong), -+ "kmem_cache_node partial", RETURN_ON_ERROR)) ++#define SLAB_STORE_USER (0x00010000UL) ++ flags = ULONG(si->cache_buf + OFFSET(kmem_cache_flags)); ++ ++ if (INVALID_MEMBER(kmem_cache_node_full) || ++ !(flags & SLAB_STORE_USER)) { ++ fprintf(fp, "NODE %d FULL:\n (not tracked)\n", node); + return; ++ } - if (VALID_MEMBER(pglist_data_node_start_mapnr)) - readmem(pgdat+OFFSET(pglist_data_node_start_mapnr), - KVADDR, &node_start_mapnr, sizeof(ulong), - "pglist node_start_mapnr", FAULT_ON_ERROR); -+ fprintf(fp, "NODE %d PARTIAL:\n%s", node, -+ next == list_head ? " (empty)\n" : ""); -+ first = 0; -+ while (next != list_head) { -+ si->slab = next - OFFSET(page_lru); -+ if (first++ == 0) -+ fprintf(fp, " %s", slab_hdr); -+ do_slab_slub(si, !VERBOSE); -+ -+ if (received_SIGINT()) -+ restart(0); ++ list_head = node_ptr + OFFSET(kmem_cache_node_full); ++ if (!readmem(list_head, KVADDR, &next, sizeof(ulong), ++ "kmem_cache_node full", RETURN_ON_ERROR)) ++ return; - if (VALID_MEMBER(pglist_data_node_size)) - readmem(pgdat+OFFSET(pglist_data_node_size), @@ -43920,17 +44567,19 @@ - "pglist node_spanned_pages", FAULT_ON_ERROR); - node_size = node_spanned_pages; - } else error(INFO, "cannot determine zone size\n"); -+ if (!readmem(next, KVADDR, &next, sizeof(ulong), -+ "page.lru.next", RETURN_ON_ERROR)) -+ return; -+ } ++ fprintf(fp, "NODE %d FULL:\n%s", node, ++ next == list_head ? " (empty)\n" : ""); ++ first = 0; ++ while (next != list_head) { ++ si->slab = next - OFFSET(page_lru); ++ if (first++ == 0) ++ fprintf(fp, " %s", slab_hdr); ++ do_slab_slub(si, !VERBOSE); - readmem(pgdat+OFFSET(pglist_data_bdata), KVADDR, &bdata, - sizeof(ulong), "pglist bdata", FAULT_ON_ERROR); -+ if (INVALID_MEMBER(kmem_cache_node_full)) { -+ fprintf(fp, "NODE %d FULL:\n (not tracked)\n", node); -+ return; -+ } ++ if (received_SIGINT()) ++ restart(0); - if (initialize) { - nt->node_id = id; @@ -43943,10 +44592,11 @@ - nt->start_paddr = node_start_paddr; - nt->start_mapnr = node_start_mapnr; - } -+ list_head = node_ptr + OFFSET(kmem_cache_node_full); -+ if (!readmem(list_head, KVADDR, &next, sizeof(ulong), -+ "kmem_cache_node full", RETURN_ON_ERROR)) -+ return; ++ if (!readmem(next, KVADDR, &next, sizeof(ulong), ++ "page.lru.next", RETURN_ON_ERROR)) ++ return; ++ } ++} - if (!initialize) { - if (n) { @@ -43960,14 +44610,6 @@ - mkstring(buf2, flen, CENTER|LJUST, "PGLIST_DATA"), - mkstring(buf3, flen, CENTER|LJUST, "BOOTMEM_DATA"), - mkstring(buf4, flen, CENTER|LJUST, "NODE_ZONES")); -+ fprintf(fp, "NODE %d FULL:\n%s", node, -+ next == list_head ? " (empty)\n" : ""); -+ first = 0; -+ while (next != list_head) { -+ si->slab = next - OFFSET(page_lru); -+ if (first++ == 0) -+ fprintf(fp, " %s", slab_hdr); -+ do_slab_slub(si, !VERBOSE); - node_zones = pgdat + OFFSET(pglist_data_node_zones); - sprintf(buf5, " %2d %s %s %s %s\n", id, @@ -43980,8 +44622,14 @@ - mkstring(buf4, flen, CENTER|LJUST|LONG_HEX, - MKSTR(node_zones))); - fprintf(fp, "%s", buf5); -+ if (received_SIGINT()) -+ restart(0); ++static char * ++is_kmem_cache_addr_slub(ulong vaddr, char *kbuf) ++{ ++ int i, cnt; ++ ulong *cache_list; ++ ulong name; ++ char *cache_buf; ++ int found; - j = 12 + strlen(buf1) + strlen(buf2) + strlen(buf3) + - count_leading_spaces(buf4); @@ -44002,7 +44650,9 @@ - mkstring(buf3, strlen("START_MAPNR"), - CENTER|LONG_DEC|RJUST, - MKSTR(node_start_mapnr))); -- ++ cnt = get_kmem_cache_list(&cache_list); ++ cache_buf = GETBUF(SIZE(kmem_cache)); + - sprintf(buf2, "%s %s START_PADDR START_MAPNR", - node_zone_hdr, - mkstring(buf1, VADDR_PRLEN, CENTER|RJUST, @@ -44010,17 +44660,12 @@ - slen = strlen(buf2); - fprintf(fp, "\n%s\n", buf2); - } -+ if (!readmem(next, KVADDR, &next, sizeof(ulong), -+ "page.lru.next", RETURN_ON_ERROR)) -+ return; -+ } -+} - +- - node_zones = pgdat + OFFSET(pglist_data_node_zones); - for (i = 0; i < vt->nr_zones; i++) { - if (CRASHDEBUG(7)) - fprintf(fp, "zone at %lx\n", node_zones); - +- - if (VALID_MEMBER(zone_struct_size)) - readmem(node_zones+OFFSET(zone_struct_size), - KVADDR, &zone_size, sizeof(ulong), @@ -44085,18 +44730,6 @@ - zone_zone_mem_map), KVADDR, &zone_mem_map, - sizeof(ulong), "node_zones zone_mem_map", - FAULT_ON_ERROR); -+static char * -+is_kmem_cache_addr_slub(ulong vaddr, char *kbuf) -+{ -+ int i, cnt; -+ ulong *cache_list; -+ ulong name; -+ char *cache_buf; -+ int found; -+ -+ cnt = get_kmem_cache_list(&cache_list); -+ cache_buf = GETBUF(SIZE(kmem_cache)); -+ + for (i = 0, found = FALSE; i < cnt; i++) { + if (cache_list[i] != vaddr) + continue; @@ -44139,12 +44772,12 @@ + found = TRUE; + break; + } ++ ++ FREEBUF(cache_list); ++ FREEBUF(cache_buf); - if (n != vt->numnodes) - error(FATAL, "numnodes out of sync with pgdat_list?\n"); -+ FREEBUF(cache_list); -+ FREEBUF(cache_buf); -+ + return (found ? kbuf : NULL); } @@ -44359,7 +44992,7 @@ - return psz; -} -+ if (!(page_flags & vt->PG_slab)) ++ if (!(page_flags & (1 << vt->PG_slab))) + return NULL; -/* @@ -44413,33 +45046,49 @@ - * cache_cache. + * Figure out which of the kmem_cache.cpu_slab declarations + * is used by this kernel, and return a pointer to the slab -+ * page being used. ++ * page being used. Return the kmem_cache_cpu.freelist pointer ++ * if requested. */ - -int -l1_cache_size(void) +static ulong -+get_cpu_slab_ptr(struct meminfo *si, int cpu) ++get_cpu_slab_ptr(struct meminfo *si, int cpu, ulong *cpu_freelist) { - ulong cache_cache; - ulong c_align; - int colour_off; - int retval; -+ ulong cpu_slab_ptr, page; ++ ulong cpu_slab_ptr, page, freelist; - cache_cache = symbol_value("cache_cache"); ++ if (cpu_freelist) ++ *cpu_freelist = 0; + +- retval = -1; + switch (vt->cpu_slab_type) + { + case TYPE_CODE_STRUCT: + cpu_slab_ptr = ULONG(si->cache_buf + + OFFSET(kmem_cache_cpu_slab) + + OFFSET(kmem_cache_cpu_page)); ++ if (cpu_freelist && VALID_MEMBER(kmem_cache_cpu_freelist)) ++ *cpu_freelist = ULONG(si->cache_buf + ++ OFFSET(kmem_cache_cpu_slab) + ++ OFFSET(kmem_cache_cpu_freelist)); + break; - -- retval = -1; ++ + case TYPE_CODE_ARRAY: + cpu_slab_ptr = ULONG(si->cache_buf + + OFFSET(kmem_cache_cpu_slab) + (sizeof(void *)*cpu)); ++ ++ if (cpu_slab_ptr && cpu_freelist && ++ VALID_MEMBER(kmem_cache_cpu_freelist)) { ++ if (readmem(cpu_slab_ptr + OFFSET(kmem_cache_cpu_freelist), ++ KVADDR, &freelist, sizeof(void *), ++ "kmem_cache_cpu.freelist", RETURN_ON_ERROR)) ++ *cpu_freelist = freelist; ++ } + + if (cpu_slab_ptr && VALID_MEMBER(kmem_cache_cpu_page)) { + if (!readmem(cpu_slab_ptr + OFFSET(kmem_cache_cpu_page), @@ -44586,12 +45235,12 @@ + return found; +} +#endif /* NOT_USED */ ---- crash/unwind_x86.h.orig 2008-02-19 16:16:12.000000000 -0500 +--- crash/unwind_x86.h.orig 2008-04-29 13:51:49.000000000 -0400 +++ crash/unwind_x86.h 2008-01-04 09:42:08.000000000 -0500 @@ -0,0 +1,2 @@ + + ---- crash/extensions/Makefile.orig 2008-02-19 16:16:13.000000000 -0500 +--- crash/extensions/Makefile.orig 2008-04-29 13:51:49.000000000 -0400 +++ crash/extensions/Makefile 2008-01-04 09:42:08.000000000 -0500 @@ -0,0 +1,47 @@ +# @@ -44641,7 +45290,7 @@ + @for MAKEFILE in `grep -sl "^clean:" *.mk`; \ + do make --no-print-directory -f $$MAKEFILE clean; \ + done ---- crash/extensions/libsial/sial_input.c.orig 2008-02-19 16:16:13.000000000 -0500 +--- crash/extensions/libsial/sial_input.c.orig 2008-04-29 13:51:49.000000000 -0400 +++ crash/extensions/libsial/sial_input.c 2008-01-04 09:42:08.000000000 -0500 @@ -0,0 +1,802 @@ +/* @@ -45446,7 +46095,7 @@ + sial_msg("Include file not found: '%s' [include path is '%s']", name, sial_getipath()); + } +} ---- crash/extensions/libsial/Makefile.orig 2008-02-19 16:16:13.000000000 -0500 +--- crash/extensions/libsial/Makefile.orig 2008-04-29 13:51:49.000000000 -0400 +++ crash/extensions/libsial/Makefile 2008-01-04 09:42:08.000000000 -0500 @@ -0,0 +1,94 @@ +# @@ -45543,7 +46192,7 @@ + -/bin/rm -f *.o $(TARGETS) $(LDIRT) + +clobber: clean ---- crash/extensions/libsial/sial_str.c.orig 2008-02-19 16:16:13.000000000 -0500 +--- crash/extensions/libsial/sial_str.c.orig 2008-04-29 13:51:49.000000000 -0400 +++ crash/extensions/libsial/sial_str.c 2008-01-04 09:42:08.000000000 -0500 @@ -0,0 +1,185 @@ +/* @@ -45731,7 +46380,7 @@ + sial_error("String too long at %d", iline); + return NULLNODE; +} ---- crash/extensions/libsial/sial_func.c.orig 2008-02-19 16:16:13.000000000 -0500 +--- crash/extensions/libsial/sial_func.c.orig 2008-04-29 13:51:49.000000000 -0400 +++ crash/extensions/libsial/sial_func.c 2008-01-04 09:42:08.000000000 -0500 @@ -0,0 +1,1191 @@ +/* @@ -46925,7 +47574,7 @@ + return 0; +} + ---- crash/extensions/libsial/sial_op.c.orig 2008-02-19 16:16:13.000000000 -0500 +--- crash/extensions/libsial/sial_op.c.orig 2008-04-29 13:51:49.000000000 -0400 +++ crash/extensions/libsial/sial_op.c 2008-01-04 09:42:08.000000000 -0500 @@ -0,0 +1,904 @@ +/* @@ -47832,7 +48481,7 @@ + nn->data=pto; + return nn; +} ---- crash/extensions/libsial/sial_member.c.orig 2008-02-19 16:16:13.000000000 -0500 +--- crash/extensions/libsial/sial_member.c.orig 2008-04-29 13:51:49.000000000 -0400 +++ crash/extensions/libsial/sial_member.c 2008-01-04 09:42:08.000000000 -0500 @@ -0,0 +1,321 @@ +/* @@ -48156,7 +48805,7 @@ + n->free=(ffct_t)sial_freemem; + return n; +} ---- crash/extensions/libsial/sialpp-lsed.orig 2008-02-19 16:16:13.000000000 -0500 +--- crash/extensions/libsial/sialpp-lsed.orig 2008-04-29 13:51:49.000000000 -0400 +++ crash/extensions/libsial/sialpp-lsed 2008-01-04 09:42:08.000000000 -0500 @@ -0,0 +1,32 @@ +s/yyback/sialppback/g @@ -48191,7 +48840,7 @@ +s/yyvstop/sialppvstop/g +s/yywrap/sialppwrap/g +s/yydebug/sialdebug/g ---- crash/extensions/libsial/sial-lsed.orig 2008-02-19 16:16:13.000000000 -0500 +--- crash/extensions/libsial/sial-lsed.orig 2008-04-29 13:51:49.000000000 -0400 +++ crash/extensions/libsial/sial-lsed 2008-01-04 09:42:08.000000000 -0500 @@ -0,0 +1,32 @@ +s/yyback/sialback/g @@ -48226,7 +48875,7 @@ +s/yyvstop/sialvstop/g +s/yywrap/sialwrap/g +s/yydebug/sialdebug/g ---- crash/extensions/libsial/sial_type.c.orig 2008-02-19 16:16:13.000000000 -0500 +--- crash/extensions/libsial/sial_type.c.orig 2008-04-29 13:51:49.000000000 -0400 +++ crash/extensions/libsial/sial_type.c 2008-01-04 09:42:08.000000000 -0500 @@ -0,0 +1,1172 @@ +/* @@ -49401,7 +50050,7 @@ + sial_dupval(vto, vfrm); +} + ---- crash/extensions/libsial/sialpp.l.orig 2008-02-19 16:16:13.000000000 -0500 +--- crash/extensions/libsial/sialpp.l.orig 2008-04-29 13:51:49.000000000 -0400 +++ crash/extensions/libsial/sialpp.l 2008-01-04 09:42:08.000000000 -0500 @@ -0,0 +1,85 @@ +%{ @@ -49489,7 +50138,7 @@ +#undef unput +#define input() sial_input() +#define unput(c) sial_unput(c) ---- crash/extensions/libsial/sial.l.orig 2008-02-19 16:16:13.000000000 -0500 +--- crash/extensions/libsial/sial.l.orig 2008-04-29 13:51:49.000000000 -0400 +++ crash/extensions/libsial/sial.l 2008-01-04 09:42:08.000000000 -0500 @@ -0,0 +1,206 @@ +%{ @@ -49698,7 +50347,7 @@ +#undef unput +#define input() sial_input() +#define unput(c) sial_unput(c) ---- crash/extensions/libsial/sial_var.c.orig 2008-02-19 16:16:13.000000000 -0500 +--- crash/extensions/libsial/sial_var.c.orig 2008-04-29 13:51:49.000000000 -0400 +++ crash/extensions/libsial/sial_var.c 2008-01-04 09:42:08.000000000 -0500 @@ -0,0 +1,1320 @@ +/* @@ -51021,7 +51670,7 @@ + } +} + ---- crash/extensions/libsial/sial_api.h.orig 2008-02-19 16:16:13.000000000 -0500 +--- crash/extensions/libsial/sial_api.h.orig 2008-04-29 13:51:49.000000000 -0400 +++ crash/extensions/libsial/sial_api.h 2008-01-04 09:42:08.000000000 -0500 @@ -0,0 +1,267 @@ +/* @@ -51291,7 +51940,7 @@ + +/* parsers debug flags */ +extern int sialdebug, sialppdebug; ---- crash/extensions/libsial/README.orig 2008-02-19 16:16:13.000000000 -0500 +--- crash/extensions/libsial/README.orig 2008-04-29 13:51:49.000000000 -0400 +++ crash/extensions/libsial/README 2008-01-04 09:42:08.000000000 -0500 @@ -0,0 +1,1024 @@ + @@ -52318,7 +52967,7 @@ + +Questions/Comments +Luc Chouinard, lucchouina@yahoo.com ---- crash/extensions/libsial/README.sial.orig 2008-02-19 16:16:13.000000000 -0500 +--- crash/extensions/libsial/README.sial.orig 2008-04-29 13:51:49.000000000 -0400 +++ crash/extensions/libsial/README.sial 2008-01-04 09:42:08.000000000 -0500 @@ -0,0 +1,97 @@ + @@ -52418,7 +53067,7 @@ + Refer to the README file on writing a user level command. + Also look at the 'sial.c' example in the scripts directory + ---- crash/extensions/libsial/sial_num.c.orig 2008-02-19 16:16:13.000000000 -0500 +--- crash/extensions/libsial/sial_num.c.orig 2008-04-29 13:51:49.000000000 -0400 +++ crash/extensions/libsial/sial_num.c 2008-01-04 09:42:08.000000000 -0500 @@ -0,0 +1,233 @@ +/* @@ -52654,7 +53303,7 @@ + sial_error("Oops! NUMBER"); + return 0; +} ---- crash/extensions/libsial/sial_alloc.c.orig 2008-02-19 16:16:13.000000000 -0500 +--- crash/extensions/libsial/sial_alloc.c.orig 2008-04-29 13:51:49.000000000 -0400 +++ crash/extensions/libsial/sial_alloc.c 2008-01-04 09:42:08.000000000 -0500 @@ -0,0 +1,430 @@ +/* @@ -53087,7 +53736,7 @@ + sial_setsvlev(jmps[njmps].svlev); +} + ---- crash/extensions/libsial/sial.h.orig 2008-02-19 16:16:13.000000000 -0500 +--- crash/extensions/libsial/sial.h.orig 2008-04-29 13:51:49.000000000 -0400 +++ crash/extensions/libsial/sial.h 2008-01-04 09:42:08.000000000 -0500 @@ -0,0 +1,465 @@ +/* @@ -53555,7 +54204,7 @@ +#define S_MAXFILES 200 /* maximum number of macro files */ + +#define S_VARARG "__VARARG" /* name of the special var for ... */ ---- crash/extensions/libsial/sial_stat.c.orig 2008-02-19 16:16:13.000000000 -0500 +--- crash/extensions/libsial/sial_stat.c.orig 2008-04-29 13:51:49.000000000 -0400 +++ crash/extensions/libsial/sial_stat.c 2008-01-04 09:42:08.000000000 -0500 @@ -0,0 +1,435 @@ +/* @@ -53993,7 +54642,7 @@ + } +} + ---- crash/extensions/libsial/sial_util.c.orig 2008-02-19 16:16:13.000000000 -0500 +--- crash/extensions/libsial/sial_util.c.orig 2008-04-29 13:51:49.000000000 -0400 +++ crash/extensions/libsial/sial_util.c 2008-01-04 09:42:08.000000000 -0500 @@ -0,0 +1,922 @@ +/* @@ -54918,7 +55567,7 @@ + return n; +} + ---- crash/extensions/libsial/sial.y.orig 2008-02-19 16:16:13.000000000 -0500 +--- crash/extensions/libsial/sial.y.orig 2008-04-29 13:51:49.000000000 -0400 +++ crash/extensions/libsial/sial.y 2008-01-04 09:42:08.000000000 -0500 @@ -0,0 +1,436 @@ +%{ @@ -55357,7 +56006,7 @@ + +int sialerror(char *p) { sial_error(p); return 0; } + ---- crash/extensions/libsial/sial_define.c.orig 2008-02-19 16:16:13.000000000 -0500 +--- crash/extensions/libsial/sial_define.c.orig 2008-04-29 13:51:49.000000000 -0400 +++ crash/extensions/libsial/sial_define.c 2008-01-04 09:42:08.000000000 -0500 @@ -0,0 +1,519 @@ +/* @@ -55879,7 +56528,7 @@ + + sial_error("Syntax error on macro definition"); +} ---- crash/extensions/libsial/sial_builtin.c.orig 2008-02-19 16:16:13.000000000 -0500 +--- crash/extensions/libsial/sial_builtin.c.orig 2008-04-29 13:51:49.000000000 -0400 +++ crash/extensions/libsial/sial_builtin.c 2008-01-04 09:42:08.000000000 -0500 @@ -0,0 +1,434 @@ +/* @@ -56316,7 +56965,7 @@ + sial_error("Oops. sial_exebfunc()"); + return 0; +} ---- crash/extensions/libsial/sialpp.y.orig 2008-02-19 16:16:13.000000000 -0500 +--- crash/extensions/libsial/sialpp.y.orig 2008-04-29 13:51:49.000000000 -0400 +++ crash/extensions/libsial/sialpp.y 2008-01-04 09:42:08.000000000 -0500 @@ -0,0 +1,88 @@ +%{ @@ -56407,7 +57056,7 @@ + return 1; +} + ---- crash/extensions/libsial/sial_node.c.orig 2008-02-19 16:16:13.000000000 -0500 +--- crash/extensions/libsial/sial_node.c.orig 2008-04-29 13:51:49.000000000 -0400 +++ crash/extensions/libsial/sial_node.c 2008-01-04 09:42:08.000000000 -0500 @@ -0,0 +1,69 @@ +/* @@ -56479,7 +57128,7 @@ + sial_curpos(&p, 0); + return v; +} ---- crash/extensions/libsial/sial_api.c.orig 2008-02-19 16:16:13.000000000 -0500 +--- crash/extensions/libsial/sial_api.c.orig 2008-04-29 13:51:49.000000000 -0400 +++ crash/extensions/libsial/sial_api.c 2008-01-04 09:42:08.000000000 -0500 @@ -0,0 +1,1516 @@ +/* @@ -57998,7 +58647,7 @@ + return 1; +} + ---- crash/extensions/libsial/sial_print.c.orig 2008-02-19 16:16:13.000000000 -0500 +--- crash/extensions/libsial/sial_print.c.orig 2008-04-29 13:51:49.000000000 -0400 +++ crash/extensions/libsial/sial_print.c 2008-01-04 09:42:08.000000000 -0500 @@ -0,0 +1,398 @@ +/* @@ -58399,7 +59048,7 @@ + sial_setpos(&n->pos); + return n; +} ---- crash/extensions/libsial/mkbaseop.c.orig 2008-02-19 16:16:13.000000000 -0500 +--- crash/extensions/libsial/mkbaseop.c.orig 2008-04-29 13:51:49.000000000 -0400 +++ crash/extensions/libsial/mkbaseop.c 2008-01-04 09:42:08.000000000 -0500 @@ -0,0 +1,169 @@ +/* @@ -58571,7 +59220,7 @@ +}\n", NOPS, NOPS); + exit(0); +} ---- crash/extensions/libsial/sial_case.c.orig 2008-02-19 16:16:13.000000000 -0500 +--- crash/extensions/libsial/sial_case.c.orig 2008-04-29 13:51:49.000000000 -0400 +++ crash/extensions/libsial/sial_case.c 2008-01-04 09:42:08.000000000 -0500 @@ -0,0 +1,125 @@ +/* @@ -58699,7 +59348,7 @@ + } + return 1; +} ---- crash/extensions/echo.c.orig 2008-02-19 16:16:13.000000000 -0500 +--- crash/extensions/echo.c.orig 2008-04-29 13:51:49.000000000 -0400 +++ crash/extensions/echo.c 2008-01-04 09:42:08.000000000 -0500 @@ -0,0 +1,114 @@ +/* echo.c - simple example of a crash extension @@ -58816,11 +59465,11 @@ +}; + + ---- crash/extensions/sial.c.orig 2008-02-19 16:16:13.000000000 -0500 +--- crash/extensions/sial.c.orig 2008-04-29 13:51:49.000000000 -0400 +++ crash/extensions/sial.c 2008-01-04 09:42:08.000000000 -0500 @@ -0,0 +1,1021 @@ +/* -+ * $Id: crash.patch,v 1.19 2008/02/19 21:22:10 crash Exp $ ++ * $Id: crash.patch,v 1.20 2008/04/29 17:57:32 crash Exp $ + * + * This file is part of lcrash, an analysis tool for Linux memory dumps. + * @@ -59840,7 +60489,7 @@ + } + return 1; +} ---- crash/extensions/dminfo.c.orig 2008-02-19 16:16:13.000000000 -0500 +--- crash/extensions/dminfo.c.orig 2008-04-29 13:51:49.000000000 -0400 +++ crash/extensions/dminfo.c 2008-01-04 09:42:08.000000000 -0500 @@ -0,0 +1,1534 @@ +/* dminfo.c - crash extension module for device-mapper analysis @@ -61377,7 +62026,7 @@ +{ + return 0; +} ---- crash/extensions/sial.mk.orig 2008-02-19 16:16:13.000000000 -0500 +--- crash/extensions/sial.mk.orig 2008-04-29 13:51:49.000000000 -0400 +++ crash/extensions/sial.mk 2008-01-04 09:42:08.000000000 -0500 @@ -0,0 +1,17 @@ +# @@ -61397,7 +62046,7 @@ + +clean: + cd libsial && make clean ---- crash/tools.c.orig 2008-02-19 16:16:12.000000000 -0500 +--- crash/tools.c.orig 2008-04-29 13:51:49.000000000 -0400 +++ crash/tools.c 2008-01-04 09:42:08.000000000 -0500 @@ -1,8 +1,8 @@ /* tools.c - core analysis suite @@ -61997,7 +62646,7 @@ + else + return val; +} ---- crash/xen_hyper_command.c.orig 2008-02-19 16:16:12.000000000 -0500 +--- crash/xen_hyper_command.c.orig 2008-04-29 13:51:49.000000000 -0400 +++ crash/xen_hyper_command.c 2008-01-04 09:42:08.000000000 -0500 @@ -0,0 +1,1856 @@ +/* @@ -63856,8 +64505,8 @@ +} + +#endif ---- crash/main.c.orig 2008-02-19 16:16:12.000000000 -0500 -+++ crash/main.c 2008-01-04 09:42:08.000000000 -0500 +--- crash/main.c.orig 2008-04-29 13:51:49.000000000 -0400 ++++ crash/main.c 2008-04-23 14:38:07.000000000 -0400 @@ -1,8 +1,8 @@ /* main.c - core analysis suite * @@ -63877,7 +64526,7 @@ #include #include -@@ -23,23 +24,38 @@ +@@ -23,23 +24,39 @@ static int is_external_command(void); static int is_builtin_command(void); static int is_input_file(void); @@ -63909,6 +64558,7 @@ + {"no_ikconfig", 0, 0, 0}, + {"hyper", 0, 0, 0}, + {"p2m_mfn", required_argument, 0, 0}, ++ {"xen_phys_start", required_argument, 0, 0}, + {"zero_excluded", 0, 0, 0}, + {"no_panic", 0, 0, 0}, + {"more", 0, 0, 0}, @@ -63920,7 +64570,7 @@ {0, 0, 0, 0} }; -@@ -55,7 +71,7 @@ +@@ -55,7 +72,7 @@ */ opterr = 0; optind = 0; @@ -63929,7 +64579,7 @@ long_options, &option_index)) != -1) { switch (c) { -@@ -64,52 +80,55 @@ +@@ -64,52 +81,55 @@ "memory_module")) pc->memory_module = optarg; @@ -64001,7 +64651,7 @@ "version")) { pc->flags |= VERSION_QUERY; display_version(); -@@ -117,12 +136,72 @@ +@@ -117,12 +137,75 @@ clean_exit(0); } @@ -64025,6 +64675,9 @@ + else if (STREQ(long_options[option_index].name, "p2m_mfn")) + xen_kdump_p2m_mfn(optarg); + ++ else if (STREQ(long_options[option_index].name, "xen_phys_start")) ++ set_xen_phys_start(optarg); ++ + else if (STREQ(long_options[option_index].name, "zero_excluded")) + *diskdump_flags |= ZERO_EXCLUDED; + @@ -64075,7 +64728,7 @@ break; case 'f': -@@ -133,14 +212,25 @@ +@@ -133,14 +216,25 @@ pc->flags |= KERNEL_DEBUG_QUERY; break; @@ -64106,7 +64759,7 @@ case 'e': if (STREQ(optarg, "vi")) pc->editing_mode = "vi"; -@@ -168,7 +258,7 @@ +@@ -168,7 +262,7 @@ case 's': pc->flags |= SILENT; pc->flags &= ~SCROLL; @@ -64115,7 +64768,7 @@ break; case 'L': -@@ -193,14 +283,18 @@ +@@ -193,14 +287,18 @@ set_vas_debug(pc->debug); break; @@ -64141,7 +64794,7 @@ } } opterr = 1; -@@ -229,7 +323,7 @@ +@@ -229,7 +327,7 @@ } else if (!is_readable(argv[optind])) program_usage(SHORT_FORM); @@ -64150,7 +64803,7 @@ if (pc->namelist || pc->server_namelist) { if (!select_namelist(argv[optind])) { error(INFO, -@@ -261,8 +355,36 @@ +@@ -261,8 +359,36 @@ } pc->flags |= NETDUMP; pc->dumpfile = argv[optind]; @@ -64189,7 +64842,7 @@ } else if (is_diskdump(argv[optind])) { if (pc->flags & MEMORY_SOURCES) { -@@ -322,6 +444,8 @@ +@@ -322,6 +448,8 @@ optind++; } @@ -64198,7 +64851,7 @@ if (setjmp(pc->main_loop_env)) clean_exit(1); -@@ -332,11 +456,10 @@ +@@ -332,11 +460,10 @@ buf_init(); cmdline_init(); mem_init(); @@ -64211,7 +64864,7 @@ datatype_init(); /* -@@ -361,17 +484,28 @@ +@@ -361,17 +488,28 @@ { if (!(pc->flags & GDB_INIT)) { gdb_session_init(); @@ -64251,7 +64904,7 @@ } else SIGACTION(SIGINT, restart, &pc->sigaction, NULL); -@@ -379,8 +513,17 @@ +@@ -379,8 +517,17 @@ * Display system statistics and current context. */ if (!(pc->flags & SILENT) && !(pc->flags & RUNTIME)) { @@ -64271,7 +64924,7 @@ fprintf(fp, "\n"); } -@@ -426,8 +569,17 @@ +@@ -426,8 +573,17 @@ if ((ct = get_command_table_entry(args[0]))) { if (ct->flags & REFRESH_TASK_TABLE) { @@ -64291,7 +64944,7 @@ } if (!STREQ(pc->curcmd, pc->program_name)) pc->lastcmd = pc->curcmd; -@@ -459,6 +611,9 @@ +@@ -459,6 +615,9 @@ pc->curcmd = pc->program_name; error(INFO, "command not found: %s\n", args[0]); @@ -64301,7 +64954,7 @@ } -@@ -471,7 +626,7 @@ +@@ -471,7 +630,7 @@ struct command_table_entry *cp; struct extension_table *ext; @@ -64310,7 +64963,7 @@ if (STREQ(cp->name, name)) return cp; } -@@ -591,6 +746,8 @@ +@@ -591,6 +750,8 @@ int i; char *p1; char buf[BUFSIZE]; @@ -64319,7 +64972,7 @@ FILE *afp; char *program; -@@ -625,7 +782,8 @@ +@@ -625,7 +786,8 @@ machdep->verify_paddr = generic_verify_paddr; pc->redhat_debug_loc = DEFAULT_REDHAT_DEBUG_LOCATION; pc->cmdgencur = 0; @@ -64329,7 +64982,7 @@ /* * Get gdb version before initializing it since this might be one -@@ -637,7 +795,10 @@ +@@ -637,7 +799,10 @@ * Set up the default scrolling behavior for terminal output. */ if (isatty(fileno(stdout))) { @@ -64341,7 +64994,7 @@ pc->flags |= SCROLL; pc->scroll_command = SCROLL_LESS; } else if (file_exists("/bin/more", NULL)) { -@@ -685,11 +846,11 @@ +@@ -685,11 +850,11 @@ pc->home = "(unknown)"; } else strcpy(pc->home, p1); @@ -64357,7 +65010,7 @@ else { while (fgets(buf, BUFSIZE, afp)) resolve_rc_cmd(buf, ALIAS_RCHOME); -@@ -698,11 +859,12 @@ +@@ -698,11 +863,12 @@ } } @@ -64374,7 +65027,7 @@ else { while (fgets(buf, BUFSIZE, afp)) resolve_rc_cmd(buf, ALIAS_RCLOCAL); -@@ -712,6 +874,8 @@ +@@ -712,6 +878,8 @@ if (STREQ(pc->editing_mode, "no_mode")) pc->editing_mode = "vi"; @@ -64383,7 +65036,7 @@ } -@@ -840,13 +1004,22 @@ +@@ -840,13 +1008,22 @@ if (pc->flags & REM_S390D) sprintf(&buf[strlen(buf)], "%sREM_S390D", others++ ? "|" : ""); @@ -64408,7 +65061,7 @@ sprintf(&buf[strlen(buf)], "%sDISKDUMP", others++ ? "|" : ""); if (pc->flags & SYSMAP) -@@ -855,21 +1028,36 @@ +@@ -855,21 +1032,36 @@ if (pc->flags & SYSMAP_ARG) sprintf(&buf[strlen(buf)], "%sSYSMAP_ARG", others++ ? "|" : ""); @@ -64450,7 +65103,7 @@ if (pc->flags) strcat(buf, ")"); -@@ -933,10 +1121,36 @@ +@@ -933,10 +1125,36 @@ fprintf(fp, " ifile_pipe: %lx\n", (ulong)pc->ifile_pipe); fprintf(fp, " ifile_ofile: %lx\n", (ulong)pc->ifile_ofile); fprintf(fp, " input_file: %s\n", pc->input_file); @@ -64491,7 +65144,7 @@ buf[0] = NULLCHAR; fprintf(fp, " redirect: %lx ", pc->redirect); -@@ -1008,6 +1222,8 @@ +@@ -1008,6 +1226,8 @@ fprintf(fp, " tmp_fp: %lx\n", (ulong)pc->tmp_fp); fprintf(fp, " tmpfile2: %lx\n", (ulong)pc->tmpfile2); @@ -64500,7 +65153,7 @@ fprintf(fp, " curcmd: %s\n", pc->curcmd); fprintf(fp, " lastcmd: %s\n", pc->lastcmd); fprintf(fp, " cur_gdb_cmd: %d %s\n", pc->cur_gdb_cmd, -@@ -1016,7 +1232,30 @@ +@@ -1016,7 +1236,30 @@ gdb_command_string(pc->last_gdb_cmd, buf, FALSE)); fprintf(fp, " cur_req: %lx\n", (ulong)pc->cur_req); fprintf(fp, " cmdgencur: %ld\n", pc->cmdgencur); @@ -64532,7 +65185,7 @@ fprintf(fp, " sigint_cnt: %d\n", pc->sigint_cnt); fprintf(fp, " sigaction: %lx\n", (ulong)&pc->sigaction); fprintf(fp, " gdb_sigaction: %lx\n", (ulong)&pc->gdb_sigaction); -@@ -1051,8 +1290,16 @@ +@@ -1051,8 +1294,16 @@ fprintf(fp, " readmem: read_daemon()\n"); else if (pc->readmem == read_netdump) fprintf(fp, " readmem: read_netdump()\n"); @@ -64549,7 +65202,7 @@ else fprintf(fp, " readmem: %lx\n", (ulong)pc->readmem); if (pc->writemem == write_dev_mem) -@@ -1065,8 +1312,14 @@ +@@ -1065,8 +1316,14 @@ fprintf(fp, " writemem: write_daemon()\n"); else if (pc->writemem == write_netdump) fprintf(fp, " writemem: write_netdump()\n"); @@ -64564,7 +65217,7 @@ else fprintf(fp, " writemem: %lx\n", (ulong)pc->writemem); -@@ -1100,3 +1353,28 @@ +@@ -1100,3 +1357,28 @@ exit(status); } @@ -64593,7 +65246,7 @@ + error(FATAL, XEN_HYPERVISOR_NOT_SUPPORTED); +#endif +} ---- crash/s390dbf.c.orig 2008-02-19 16:16:12.000000000 -0500 +--- crash/s390dbf.c.orig 2008-04-29 13:51:49.000000000 -0400 +++ crash/s390dbf.c 2008-01-04 09:42:08.000000000 -0500 @@ -0,0 +1,1340 @@ +/* @@ -65936,7 +66589,7 @@ + +#endif + ---- crash/s390.c.orig 2008-02-19 16:16:12.000000000 -0500 +--- crash/s390.c.orig 2008-04-29 13:51:49.000000000 -0400 +++ crash/s390.c 2008-01-04 09:42:08.000000000 -0500 @@ -1,9 +1,9 @@ /* s390.c - core analysis suite @@ -66399,7 +67052,7 @@ } /* ---- crash/unwind_x86_64.h.orig 2008-02-19 16:16:12.000000000 -0500 +--- crash/unwind_x86_64.h.orig 2008-04-29 13:51:49.000000000 -0400 +++ crash/unwind_x86_64.h 2008-01-04 09:42:08.000000000 -0500 @@ -0,0 +1,92 @@ +#define CONFIG_64BIT 1 @@ -66494,8 +67147,8 @@ + PTREGS_INFO(r15), \ + PTREGS_INFO(rip) + ---- crash/crash.8.orig 2008-02-19 16:16:13.000000000 -0500 -+++ crash/crash.8 2008-01-04 09:42:08.000000000 -0500 +--- crash/crash.8.orig 2008-04-29 13:51:49.000000000 -0400 ++++ crash/crash.8 2008-04-16 08:31:53.000000000 -0400 @@ -5,7 +5,7 @@ .TH CRASH 8 .SH NAME @@ -66559,7 +67212,11 @@ .I gdb "print" command for evaluation and display. .TP -@@ -361,11 +365,85 @@ +@@ -358,14 +362,89 @@ + .I whatis + displays the definition of structures, unions, typedefs or + text/data symbols. ++.TP .I wr modifies the contents of memory. When writing to memory on a live system, this command should obviously be used with great care. @@ -66648,7 +67305,7 @@ +.PP +.BR netdump (8), +.BR gdb (1) ---- crash/lkcd_common.c.orig 2008-02-19 16:16:12.000000000 -0500 +--- crash/lkcd_common.c.orig 2008-04-29 13:51:49.000000000 -0400 +++ crash/lkcd_common.c 2008-01-04 09:42:08.000000000 -0500 @@ -3,8 +3,8 @@ * Copyright (C) 1999, 2000, 2001, 2002 Mission Critical Linux, Inc. @@ -66934,7 +67591,7 @@ + } +} + ---- crash/ppc.c.orig 2008-02-19 16:16:12.000000000 -0500 +--- crash/ppc.c.orig 2008-04-29 13:51:49.000000000 -0400 +++ crash/ppc.c 2008-01-04 09:42:08.000000000 -0500 @@ -51,6 +51,9 @@ void @@ -67018,14 +67675,14 @@ if (verbose) fprintf(fp, " PMD: %lx => %lx\n", (ulong)page_middle, ---- crash/README.orig 2008-02-19 16:16:13.000000000 -0500 -+++ crash/README 2008-02-19 16:16:11.000000000 -0500 +--- crash/README.orig 2008-04-29 13:51:49.000000000 -0400 ++++ crash/README 2008-04-29 13:51:47.000000000 -0400 @@ -69,7 +69,7 @@ After the kernel is re-compiled, the uncompressed "vmlinux" kernel that is created in the top-level kernel build directory must be saved. - To build this utility, simply uncompress the tar file, enter the crash-4.0 -+ To build this utility, simply uncompress the tar file, enter the crash-4.0-6.0 ++ To build this utility, simply uncompress the tar file, enter the crash-4.0-6.3 subdirectory, and type "make". The initial build will take several minutes because the gdb module must be configured and and built. Alternatively, the crash source RPM file may be installed and built, and the resultant crash @@ -67038,7 +67695,7 @@ - Copyright (C) 2004, 2005 IBM Corporation - Copyright (C) 1999-2005 Hewlett-Packard Co - Copyright (C) 1999, 2002 Silicon Graphics, Inc. -+ crash 4.0-6.0 ++ crash 4.0-6.3 + Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2008 Red Hat, Inc. + Copyright (C) 2004, 2005, 2006 IBM Corporation + Copyright (C) 1999-2006 Hewlett-Packard Co @@ -67054,7 +67711,7 @@ DUMPFILE: /dev/mem CPUS: 1 - DATE: Wed Jul 13 13:26:00 2005 -+ DATE: Tue Feb 19 16:16:11 2008 ++ DATE: Tue Apr 29 13:51:47 2008 UPTIME: 10 days, 22:55:18 LOAD AVERAGE: 0.08, 0.03, 0.01 TASKS: 42 @@ -67063,7 +67720,7 @@ extend mach repeat timer - crash version: 4.0 gdb version: 6.1 -+ crash version: 4.0-6.0 gdb version: 6.1 ++ crash version: 4.0-6.3 gdb version: 6.1 For help on any command above, enter "help ". For help on input options, enter "help input". For help on output options, enter "help output". @@ -67076,7 +67733,7 @@ - Copyright (C) 2004, 2005 IBM Corporation - Copyright (C) 1999-2005 Hewlett-Packard Co - Copyright (C) 1999, 2002 Silicon Graphics, Inc. -+ crash 4.0-6.0 ++ crash 4.0-6.3 + Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2008 Red Hat, Inc. + Copyright (C) 2004, 2005, 2006 IBM Corporation + Copyright (C) 1999-2006 Hewlett-Packard Co @@ -67096,7 +67753,7 @@ - Copyright (C) 2004, 2005 IBM Corporation - Copyright (C) 1999-2005 Hewlett-Packard Co - Copyright (C) 1999, 2002 Silicon Graphics, Inc. -+ crash 4.0-6.0 ++ crash 4.0-6.3 + Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2008 Red Hat, Inc. + Copyright (C) 2004, 2005, 2006 IBM Corporation + Copyright (C) 1999-2006 Hewlett-Packard Co @@ -67107,7 +67764,7 @@ Copyright (C) 1999, 2000, 2001, 2002 Mission Critical Linux, Inc. This program is free software, covered by the GNU General Public License, and you are welcome to change it and/or distribute copies of it under ---- crash/gdb-6.1/gdb/symtab.c.orig 2008-02-19 16:16:13.000000000 -0500 +--- crash/gdb-6.1/gdb/symtab.c.orig 2008-04-29 13:51:49.000000000 -0400 +++ crash/gdb-6.1/gdb/symtab.c 2008-01-04 09:42:08.000000000 -0500 @@ -4,7 +4,7 @@ 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004 @@ -67176,7 +67833,7 @@ execute_command(req->buf, FALSE); ---- crash/gdb-6.1/gdb/symfile.c.orig 2008-02-19 16:16:13.000000000 -0500 +--- crash/gdb-6.1/gdb/symfile.c.orig 2008-04-29 13:51:49.000000000 -0400 +++ crash/gdb-6.1/gdb/symfile.c 2008-01-04 09:42:08.000000000 -0500 @@ -3,7 +3,7 @@ Copyright 1990, 1991, 1992, 1993, 1994, 1995, 1996, 1997, 1998, @@ -67225,7 +67882,7 @@ /* We're only interested in debugging sections with relocation information. */ if ((sectp->flags & SEC_RELOC) == 0) ---- crash/gdb-6.1/gdb/ppc-linux-tdep.c.orig 2008-02-19 16:16:13.000000000 -0500 +--- crash/gdb-6.1/gdb/ppc-linux-tdep.c.orig 2008-04-29 13:51:49.000000000 -0400 +++ crash/gdb-6.1/gdb/ppc-linux-tdep.c 2008-01-04 09:42:08.000000000 -0500 @@ -0,0 +1,1116 @@ +/* Target-dependent code for GDB, the GNU debugger. @@ -68344,16 +69001,16 @@ + ppc_linux_init_abi); + add_core_fns (&ppc_linux_regset_core_fns); +} ---- crash/lkcd_x86_trace.c.orig 2008-02-19 16:16:12.000000000 -0500 -+++ crash/lkcd_x86_trace.c 2008-01-04 09:42:08.000000000 -0500 +--- crash/lkcd_x86_trace.c.orig 2008-04-29 13:51:49.000000000 -0400 ++++ crash/lkcd_x86_trace.c 2008-04-23 13:56:05.000000000 -0400 @@ -5,8 +5,8 @@ /* * lkcd_x86_trace.c * - * Copyright (C) 2002, 2003, 2004, 2005 David Anderson - * Copyright (C) 2002, 2003, 2004, 2005 Red Hat, Inc. All rights reserved. -+ * Copyright (C) 2002, 2003, 2004, 2005, 2006 David Anderson -+ * Copyright (C) 2002, 2003, 2004, 2005, 2006 Red Hat, Inc. All rights reserved. ++ * Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2008 David Anderson ++ * Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2008 Red Hat, Inc. All rights reserved. * * Adapted as noted from the following LKCD files: * @@ -68554,7 +69211,7 @@ #endif sbp = trace->stack[curstkidx].ptr; sbase = trace->stack[curstkidx].addr; -@@ -1322,7 +1418,17 @@ +@@ -1322,7 +1418,18 @@ } } asp = (uaddr_t*)((uaddr_t)sbp + (STACK_SIZE - (saddr - sp))); @@ -68563,6 +69220,7 @@ + if (XEN_HYPER_MODE()) { + func_name = kl_funcname(pc); + if (STREQ(func_name, "idle_loop") || STREQ(func_name, "hypercall") ++ || STREQ(func_name, "tracing_off") + || STREQ(func_name, "handle_exception")) { + UPDATE_FRAME(func_name, pc, 0, sp, bp, asp, 0, 0, bp - sp, 0); + return(trace->nframes); @@ -68572,7 +69230,7 @@ ra = GET_STACK_ULONG(bp + 4); /* * HACK: The get_framesize() function can return the proper -@@ -1447,7 +1553,8 @@ +@@ -1447,7 +1554,8 @@ bp = curframe->fp + frame_size; } #endif @@ -68582,7 +69240,7 @@ if (strstr(func_name, "kernel_thread")) { ra = 0; bp = saddr - 4; -@@ -1503,25 +1610,26 @@ +@@ -1503,25 +1611,26 @@ return(trace->nframes); #ifdef REDHAT } else if (strstr(func_name, "error_code") @@ -68613,7 +69271,7 @@ sp = curframe->fp+4; #ifdef REDHAT bp = sp + get_framesize(pc, bt); -@@ -1540,20 +1648,20 @@ +@@ -1540,20 +1649,20 @@ sp = curframe->fp + 4; asp = (uaddr_t*)((uaddr_t)sbp + (STACK_SIZE - (saddr - sp))); @@ -68637,13 +69295,14 @@ #ifdef REDHAT bp = sp + get_framesize(pc, bt); #else -@@ -1571,6 +1679,46 @@ +@@ -1571,6 +1680,47 @@ } } } + if (func_name && XEN_HYPER_MODE()) { + if (STREQ(func_name, "continue_nmi") || + STREQ(func_name, "vmx_asm_vmexit_handler") || ++ STREQ(func_name, "handle_nmi_mce") || + STREQ(func_name, "deferred_nmi")) { + /* Interrupt frame */ + sp = curframe->fp + 4; @@ -68684,7 +69343,7 @@ /* Make sure our next frame pointer is valid (in the stack). */ -@@ -1653,7 +1801,7 @@ +@@ -1653,7 +1803,7 @@ #ifdef REDHAT kaddr_t fp = 0; kaddr_t last_fp, last_pc, next_fp, next_pc; @@ -68693,7 +69352,7 @@ struct bt_info *bt; bt = trace->bt; -@@ -1684,8 +1832,15 @@ +@@ -1684,8 +1834,15 @@ (bt->flags & (BT_HARDIRQ|BT_SOFTIRQ))) return; @@ -68711,7 +69370,7 @@ frmp->level + 1 : frmp->level, fp ? (ulong)fp : trace->bt->stkptr, (ulong)frmp->pc, frmp->funcname, frmp, ofp); -@@ -1707,7 +1862,11 @@ +@@ -1707,7 +1864,11 @@ fprintf(ofp, " [0x%x]\n", frmp->pc); #endif if (frmp->flag & EX_FRAME) { @@ -68724,7 +69383,7 @@ print_eframe(ofp, pt); } #ifdef REDHAT -@@ -1789,6 +1948,114 @@ +@@ -1789,6 +1950,114 @@ if (kt->flags & RA_SEEK) bt->flags |= BT_SPECULATE; @@ -68839,7 +69498,7 @@ if (!verify_back_trace(bt) && !recoverable(bt, ofp) && !BT_REFERENCE_CHECK(bt)) error(INFO, "cannot resolve stack trace:\n"); -@@ -1797,12 +2064,14 @@ +@@ -1797,12 +2066,14 @@ return(0); #endif @@ -68860,7 +69519,7 @@ } trace = (trace_t *)alloc_trace_rec(C_TEMP); if (!trace) { -@@ -1874,7 +2143,9 @@ +@@ -1874,7 +2145,9 @@ #endif print_trace(trace, flags, ofp); } @@ -68871,7 +69530,7 @@ free_trace_rec(trace); #ifdef REDHAT if (KL_ERROR == KLE_PRINT_TRACE_ERROR) { -@@ -1901,13 +2172,15 @@ +@@ -1901,13 +2174,15 @@ errcnt = 0; KL_ERROR = 0; @@ -68894,7 +69553,7 @@ trace = (trace_t *)alloc_trace_rec(C_TEMP); if (!trace) -@@ -1952,7 +2225,9 @@ +@@ -1952,7 +2227,9 @@ } while (frmp != trace->frame); } @@ -68905,7 +69564,7 @@ free_trace_rec(trace); return (errcnt ? FALSE : TRUE); } -@@ -1982,7 +2257,7 @@ +@@ -1982,7 +2259,7 @@ (sp && (bt->ref->hexval == sp->value))) bt->ref->cmdflags |= BT_REF_FOUND; if (frmp->flag & EX_FRAME) { @@ -68914,7 +69573,7 @@ x86_dump_eframe_common(bt, (ulong *)frmp->asp, (type == KERNEL_EFRAME)); } -@@ -2192,11 +2467,12 @@ +@@ -2192,11 +2469,12 @@ else buf[0] = NULLCHAR; @@ -68929,7 +69588,7 @@ strlen(buf) ? buf : "", eip); if (bt->flags & BT_LINE_NUMBERS) { -@@ -2236,6 +2512,9 @@ +@@ -2236,6 +2514,9 @@ struct eframe_labels *efp; struct syment *sp; @@ -68939,7 +69598,17 @@ efp = &eframe_labels; if (!efp->init) { -@@ -2325,6 +2604,25 @@ +@@ -2255,7 +2536,8 @@ + efp->tracesys_exit = symbol_search("tracesys_exit"); + } + +- if ((efp->sysenter = symbol_search("sysenter_entry"))) { ++ if ((efp->sysenter = symbol_search("sysenter_entry")) || ++ (efp->sysenter = symbol_search("ia32_sysenter_target"))) { + if ((sp = symbol_search("sysexit_ret_end_marker"))) + efp->sysenter_end = sp; + else if ((sp = symbol_search("system_call"))) +@@ -2325,6 +2607,25 @@ } /* @@ -68965,7 +69634,7 @@ * Cache 2k starting from the passed-in text address. This sits on top * of the instrbuf 256-byte cache, but we don't want to extend its size * because we can run off the end of a module segment -- if this routine -@@ -4858,6 +5156,8 @@ +@@ -4858,6 +5159,8 @@ } else { codeptr++; } @@ -68974,7 +69643,7 @@ } else { opcode = *codeptr; op = &op_386[*codeptr]; ---- crash/ia64.c.orig 2008-02-19 16:16:12.000000000 -0500 +--- crash/ia64.c.orig 2008-04-29 13:51:49.000000000 -0400 +++ crash/ia64.c 2008-01-04 09:42:08.000000000 -0500 @@ -1,8 +1,8 @@ /* ia64.c - core analysis suite @@ -70531,7 +71200,7 @@ + } +} #endif ---- crash/gdb_interface.c.orig 2008-02-19 16:16:12.000000000 -0500 +--- crash/gdb_interface.c.orig 2008-04-29 13:51:49.000000000 -0400 +++ crash/gdb_interface.c 2008-01-04 09:42:08.000000000 -0500 @@ -1,8 +1,8 @@ /* gdb_interface.c - core analysis suite @@ -70665,7 +71334,7 @@ switch (len) { case SIZEOF_8BIT: ---- crash/lkcd_x86_trace.h.orig 2008-02-19 16:16:13.000000000 -0500 +--- crash/lkcd_x86_trace.h.orig 2008-04-29 13:51:49.000000000 -0400 +++ crash/lkcd_x86_trace.h 2008-01-04 09:42:08.000000000 -0500 @@ -35,6 +35,25 @@ @@ -70693,7 +71362,7 @@ #endif /* REDHAT */ ---- crash/lkcd_fix_mem.h.orig 2008-02-19 16:16:13.000000000 -0500 +--- crash/lkcd_fix_mem.h.orig 2008-04-29 13:51:49.000000000 -0400 +++ crash/lkcd_fix_mem.h 2008-01-04 09:42:08.000000000 -0500 @@ -1,3 +1,5 @@ +/* OBSOLETE */ @@ -70701,16 +71370,16 @@ #ifdef IA64 #define UTSNAME_ENTRY_SZ 65 ---- crash/ppc64.c.orig 2008-02-19 16:16:12.000000000 -0500 -+++ crash/ppc64.c 2008-01-04 09:42:08.000000000 -0500 +--- crash/ppc64.c.orig 2008-04-29 13:51:49.000000000 -0400 ++++ crash/ppc64.c 2008-04-08 17:06:40.000000000 -0400 @@ -1,8 +1,8 @@ /* ppc64.c -- core analysis suite * - * Copyright (C) 2004, 2005 David Anderson - * Copyright (C) 2004, 2005 Red Hat, Inc. All rights reserved. - * Copyright (C) 2004 Haren Myneni, IBM Corporation -+ * Copyright (C) 2004, 2005, 2006 David Anderson -+ * Copyright (C) 2004, 2005, 2006 Red Hat, Inc. All rights reserved. ++ * Copyright (C) 2004, 2005, 2006, 2007, 2008 David Anderson ++ * Copyright (C) 2004, 2005, 2006, 2007, 2008 Red Hat, Inc. All rights reserved. + * Copyright (C) 2004, 2006 Haren Myneni, IBM Corporation * * This program is free software; you can redistribute it and/or modify @@ -71322,17 +71991,21 @@ +static void +ppc64_paca_init(void) +{ -+#define BITS_FOR_LONG sizeof(ulong)*8 + int i, cpus, nr_paca; + char *cpu_paca_buf; + ulong data_offset; -+ ulong cpu_online_map[NR_CPUS/BITS_FOR_LONG]; ++ int map; + + if (!symbol_exists("paca")) + error(FATAL, "PPC64: Could not find 'paca' symbol\n"); + -+ if (!symbol_exists("cpu_online_map")) -+ error(FATAL, "PPC64: Could not find 'cpu_online_map' symbol\n"); ++ if (symbol_exists("cpu_present_map")) ++ map = PRESENT; ++ else if (symbol_exists("cpu_online_map")) ++ map = ONLINE; ++ else ++ error(FATAL, ++ "PPC64: cannot find 'cpu_present_map' or 'cpu_online_map' symbols\n"); + + if (!MEMBER_EXISTS("paca_struct", "data_offset")) + return; @@ -71352,15 +72025,11 @@ + error(FATAL, "Recompile crash with larger NR_CPUS\n"); + } + -+ readmem(symbol_value("cpu_online_map"), KVADDR, &cpu_online_map[0], -+ nr_paca/8, "cpu_online_map", FAULT_ON_ERROR); -+ + for (i = cpus = 0; i < nr_paca; i++) { -+ div_t val = div(i, BITS_FOR_LONG); + /* -+ * CPU online? ++ * CPU present (or online)? + */ -+ if (!(cpu_online_map[val.quot] & (0x1UL << val.rem))) ++ if (!in_cpu_map(map, i)) + continue; + + readmem(symbol_value("paca") + (i * SIZE(ppc64_paca)), @@ -71383,7 +72052,7 @@ + machdep->machspec->last_level4_read = 0; +} #endif /* PPC64 */ ---- crash/xendump.h.orig 2008-02-19 16:16:12.000000000 -0500 +--- crash/xendump.h.orig 2008-04-29 13:51:49.000000000 -0400 +++ crash/xendump.h 2008-01-04 09:42:08.000000000 -0500 @@ -0,0 +1,177 @@ +/* @@ -71563,7 +72232,7 @@ + uint64_t pfn; + uint64_t gmfn; +}; ---- crash/diskdump.h.orig 2008-02-19 16:16:12.000000000 -0500 +--- crash/diskdump.h.orig 2008-04-29 13:51:49.000000000 -0400 +++ crash/diskdump.h 2008-01-04 09:42:08.000000000 -0500 @@ -1,8 +1,10 @@ /* @@ -71646,8 +72315,8 @@ +#define PAGE_VALID (0x1) /* flags */ +#define DISKDUMP_VALID_PAGE(flags) ((flags) & PAGE_VALID) + ---- crash/help.c.orig 2008-02-19 16:16:12.000000000 -0500 -+++ crash/help.c 2008-01-04 11:54:24.000000000 -0500 +--- crash/help.c.orig 2008-04-29 13:51:49.000000000 -0400 ++++ crash/help.c 2008-03-18 14:47:57.000000000 -0400 @@ -1,8 +1,8 @@ /* help.c - core analysis suite * @@ -71984,11 +72653,11 @@ "rd", "read memory", -"[-dDsup][-8|-16|-32|-64][-o offs][-e addr] [address|symbol] [count]", -+"[-dDsupxmf][-8|-16|-32|-64][-o offs][-e addr] [address|symbol] [count]", ++"[-dDsSupxmf][-8|-16|-32|-64][-o offs][-e addr] [address|symbol] [count]", " This command displays the contents of memory, with the output formatted", " in several different manners. The starting address may be entered either", " symbolically or by address. The default output size is the size of a long", -@@ -1046,9 +1125,12 @@ +@@ -1046,9 +1125,15 @@ " -p address argument is a physical address.", " -u address argument is a user virtual address; only required on", " processors with common user and kernel virtual address spaces.", @@ -71997,11 +72666,14 @@ " -d display output in signed decimal format (default is hexadecimal).", " -D display output in unsigned decimal format (default is hexadecimal).", " -s displays output symbolically when appropriate.", ++" -S displays output symbolically when appropriate; if the address", ++" references a slab cache object, the name of the slab cache will", ++" be displayed in brackets.", +" -x do not display ASCII translation at end of each line.", #ifdef NOTDEF " -o Shows offset value from the starting address.", #endif -@@ -1064,7 +1146,8 @@ +@@ -1064,40 +1149,44 @@ " 3. -u specifies a user virtual address, but is only necessary on", " processors with common user and kernel virtual address spaces.", " symbol symbol of starting address to read.", @@ -72011,7 +72683,72 @@ "\nEXAMPLES", " Display the kernel_version string:\n", " %s> rd kernel_version 4 ", -@@ -1155,7 +1238,7 @@ + " c0226a6c: 2e322e32 35312d35 00000000 00000001 2.2.5-15........\n", +-" Display the same block of memory, with and without symbols:\n", +-" %s> rd c1157f00 52 ", +-" c1157f00: c0131f7a 00000400 00000015 c013206e z...........n ..", +-" c1157f10: 00000100 c3d4c140 00000100 00000246 ....@.......F...", +-" c1157f20: 019b2065 c2a5bb90 080ac618 c02a83d0 e ............*.", +-" c1157f30: 40000025 01a45067 c1156000 00000000 %..@gP...`......", +-" c1157f40: c011b4f7 c1156000 c2a5bb90 080ac618 .....`..........", +-" c1157f50: 00000001 00000000 c1a45000 c19b2000 .........P... ..", +-" c1157f60: c1157f84 0000003b c022c000 c1156000 ....;.....\"..`..", +-" c1157f70: 00000000 fffffe00 bffff6fc 0000002e ................", +-" c1157f80: c022c000 ffffffff c01178ba c1156000 ..\"......x...`..", +-" c1157f90: 00000000 080ac618 bffff6ac 00000001 ................", +-" c1157fa0: c1156000 c1156000 c1157fb8 c1156000 .`...`.......`..", +-" c1157fb0: c1157fb8 c1156000 c1156000 c115608c .....`...`...`..", +-" c1157fc0: c01096c8 ffffffff bffff6fc 00000002 ................\n", +-" %s> rd -s c1157f00 52", +-" c1157f00: alloc_fd_array+0x1a 00000400 00000015 expand_fd_array+0x72 ", +-" c1157f10: 00000100 c3d4c140 00000100 00000246 ", +-" c1157f20: 019b2065 c2a5bb90 080ac618 c02a83d0 ", +-" c1157f30: 40000025 01a45067 c1156000 00000000 ", +-" c1157f40: do_wp_page+0x17f c1156000 c2a5bb90 080ac618 ", +-" c1157f50: 00000001 00000000 c1a45000 c19b2000 ", +-" c1157f60: c1157f84 0000003b init_task_union c1156000 ", +-" c1157f70: 00000000 fffffe00 bffff6fc 0000002e ", +-" c1157f80: init_task_union ffffffff sys_wait4+0x2be c1156000 ", +-" c1157f90: 00000000 080ac618 bffff6ac 00000001 ", +-" c1157fa0: c1156000 c1156000 c1157fb8 c1156000 ", +-" c1157fb0: c1157fb8 c1156000 c1156000 c115608c ", +-" c1157fc0: system_call+0x34 ffffffff bffff6fc 00000002\n", ++" Display the same block of memory, first without symbols, again", ++" with symbols, and then with symbols and slab cache references:\n", ++" %s> rd dff12e80 36", ++" dff12e80: dff12e94 00000000 c05a363a dff12ed0 ........:6Z.....", ++" dff12e90: 00000001 dff12e98 0041fe3f ffffffff ........?.A.....", ++" dff12ea0: 00000001 d5147800 00000000 def8abc0 .....x..........", ++" dff12eb0: dff12ebc c05a4aa0 00000000 dff12ed0 .....JZ.........", ++" dff12ec0: 00000001 00000000 00000000 00000000 ................", ++" dff12ed0: 0808b353 00000000 dff12efc c0698220 S........... .i.", ++" dff12ee0: dff12efc df7c6480 00000001 c046f99b .....d|.......F.", ++" dff12ef0: 00000000 00000000 0808b352 dff12f68 ........R...h/..", ++" dff12f00: c155a128 00000000 00000001 ffffffff (.U.............", ++" %s> rd -s dff12e80 36", ++" dff12e80: dff12e94 00000000 sock_aio_write+83 dff12ed0 ", ++" dff12e90: 00000001 dff12e98 0041fe3f ffffffff ", ++" dff12ea0: 00000001 d5147800 00000000 def8abc0 ", ++" dff12eb0: dff12ebc sys_recvfrom+207 00000000 dff12ed0 ", ++" dff12ec0: 00000001 00000000 00000000 00000000 ", ++" dff12ed0: 0808b353 00000000 dff12efc socket_file_ops ", ++" dff12ee0: dff12efc df7c6480 00000001 do_sync_write+182 ", ++" dff12ef0: 00000000 00000000 0808b352 dff12f68 ", ++" dff12f00: c155a128 00000000 00000001 ffffffff ", ++" %s> rd -S dff12e80 36", ++" dff12e80: [size-4096] 00000000 sock_aio_write+83 [size-4096] ", ++" dff12e90: 00000001 [size-4096] 0041fe3f ffffffff ", ++" dff12ea0: 00000001 [sock_inode_cache] 00000000 [filp] ", ++" dff12eb0: [size-4096] sys_recvfrom+207 00000000 [size-4096] ", ++" dff12ec0: 00000001 00000000 00000000 00000000 ", ++" dff12ed0: 0808b353 00000000 [size-4096] socket_file_ops ", ++" dff12ee0: [size-4096] [filp] 00000001 do_sync_write+182 ", ++" dff12ef0: 00000000 00000000 0808b352 [size-4096] ", ++" dff12f00: [vm_area_struct] 00000000 00000001 ffffffff\n", + " Read jiffies in hexadecimal and decimal format:\n", + " %s> rd jiffies", + " c0213ae0: 0008cc3a :...\n", +@@ -1155,7 +1244,7 @@ "bt", "backtrace", #if defined(GDB_6_0) || defined(GDB_6_1) @@ -72020,7 +72757,7 @@ #else "[-a|-r|-t|-l|-e|-f|-g] [-R ref] [ -I ip ] [-S sp] [pid | taskp]", #endif -@@ -1167,14 +1250,26 @@ +@@ -1167,14 +1256,26 @@ " pages of memory containing the task_union structure.", " -t display all text symbols found from the last known stack location", " to the top of the stack. (helpful if the back trace fails)", @@ -72050,7 +72787,7 @@ #if !defined(GDB_6_0) && !defined(GDB_6_1) " -g use gdb stack trace code. (alpha only)", #endif -@@ -1189,11 +1284,8 @@ +@@ -1189,11 +1290,8 @@ " Note that all examples below are for x86 only. The output format will differ", " for other architectures. x86 backtraces from kernels that were compiled", " with the --fomit-frame-pointer CFLAG occasionally will drop stack frames,", @@ -72064,7 +72801,7 @@ "EXAMPLES", " Display the stack trace of the active task(s) when the kernel panicked:\n", " %s> bt -a", -@@ -1437,14 +1529,22 @@ +@@ -1437,14 +1535,22 @@ " ", " Below is an example shared object file consisting of just one command, ", " called \"echo\", which simply echoes back all arguments passed to it.", @@ -72093,7 +72830,7 @@ " located in the current directory, or in the user's $HOME directory.", " ", "---------------------------------- cut here ----------------------------------", -@@ -1556,7 +1656,7 @@ +@@ -1556,7 +1662,7 @@ " PROCESSOR SPEED: 1993 Mhz", " HZ: 100", " PAGE SIZE: 4096", @@ -72102,7 +72839,7 @@ " KERNEL VIRTUAL BASE: c0000000", " KERNEL VMALLOC BASE: e0800000", " KERNEL STACK SIZE: 8192", -@@ -1583,7 +1683,8 @@ +@@ -1583,7 +1689,8 @@ " This command displays the timer queue entries, both old- and new-style,", " in chronological order. In the case of the old-style timers, the", " timer_table array index is shown; in the case of the new-style timers, ", @@ -72112,7 +72849,7 @@ "\nEXAMPLES", " %s> timer", " JIFFIES", -@@ -1610,6 +1711,37 @@ +@@ -1610,6 +1717,37 @@ " 372010 c2323f7c c0112d6c ", " 372138 c2191f10 c0112d6c ", " 8653052 c1f13f10 c0112d6c ", @@ -72150,7 +72887,7 @@ NULL }; -@@ -1905,7 +2037,7 @@ +@@ -1905,7 +2043,7 @@ char *help_irq[] = { "irq", "IRQ data", @@ -72159,7 +72896,7 @@ " This command collaborates the data in an irq_desc_t, along with its", " associated hw_interrupt_type and irqaction structure data, into a", " consolidated per-IRQ display. Alternatively, the intel interrupt", -@@ -1913,6 +2045,7 @@ +@@ -1913,6 +2051,7 @@ " If no index value argument(s) nor any options are entered, the IRQ", " data for all IRQs will be displayed.\n", " index a valid IRQ index.", @@ -72167,7 +72904,7 @@ " -d dump the intel interrupt descriptor table.", " -b dump bottom half data.", "\nEXAMPLES", -@@ -2013,7 +2146,7 @@ +@@ -2013,7 +2152,7 @@ char *help_sys[] = { "sys", "system data", @@ -72176,7 +72913,7 @@ " This command displays system-specific data. If no arguments are entered,\n" " the same system data shown during %s invocation is shown.\n", " -c [name|number] If no name or number argument is entered, dump all", -@@ -2023,6 +2156,8 @@ +@@ -2023,6 +2162,8 @@ " that number is displayed. If the current output radix", " has been set to 16, the system call numbers will be ", " displayed in hexadecimal.", @@ -72185,7 +72922,7 @@ " -panic Panic a live system. Requires write permission to", " /dev/mem. Results in the %s context causing an", " \"Attempted to kill the idle task!\" panic. (The dump", -@@ -2043,6 +2178,27 @@ +@@ -2043,6 +2184,27 @@ " VERSION: #24 SMP Mon Oct 11 17:41:40 CDT 1999", " MACHINE: i686 (500 MHz)", " MEMORY: 1 GB", @@ -72213,7 +72950,7 @@ "\n Dump the system call table:\n", " %s> sys -c", " NUM SYSTEM CALL FILE AND LINE NUMBER", -@@ -2191,13 +2347,18 @@ +@@ -2191,13 +2353,18 @@ char *help_mount[] = { "mount", "mounted filesystem data", @@ -72234,7 +72971,7 @@ " vfsmount hexadecimal address of filesystem vfsmount structure.", " superblock hexadecimal address of filesystem super_block structure.", " devname device name of filesystem.", -@@ -2721,22 +2882,22 @@ +@@ -2721,22 +2888,22 @@ char *help_sig[] = { "sig", "task signal handling", @@ -72265,7 +73002,7 @@ " ", " The -l option lists the signal numbers and their name(s). The -s option", " translates a 64-bit hexadecimal value representing the contents of a", -@@ -2744,56 +2905,105 @@ +@@ -2744,56 +2911,105 @@ " ", " pid a process PID.", " taskp a hexadecimal task_struct pointer.", @@ -72412,7 +73149,7 @@ " ", " List the signal numbers and their names:", " ", -@@ -2829,6 +3039,40 @@ +@@ -2829,6 +3045,40 @@ " [29] SIGIO/SIGPOLL", " [30] SIGPWR", " [31] SIGSYS", @@ -72453,7 +73190,7 @@ NULL }; -@@ -2836,8 +3080,8 @@ +@@ -2836,8 +3086,8 @@ char *help_struct[] = { "struct", "structure contents", @@ -72464,7 +73201,7 @@ " This command displays either a structure definition, or a formatted display", " of the contents of a structure at a specified address. When no address is", " specified, the structure definition is shown along with the structure size.", -@@ -2845,7 +3089,8 @@ +@@ -2845,7 +3095,8 @@ " the scope of the data displayed to that particular member; when no address", " is specified, the member's offset and definition are shown.\n", " struct_name name of a C-code structure used by the kernel.", @@ -72474,7 +73211,7 @@ " -o show member offsets when displaying structure definitions.", " -l offset if the address argument is a pointer to a list_head structure", " that is embedded in the target data structure, the offset", -@@ -2854,6 +3099,9 @@ +@@ -2854,6 +3105,9 @@ " 1. in \"structure.member\" format.", " 2. a number of bytes. ", " -r raw dump of structure data.", @@ -72484,7 +73221,7 @@ " address hexadecimal address of a structure; if the address points", " to an embedded list_head structure contained within the", " target data structure, then the \"-l\" option must be used.", -@@ -2944,6 +3192,21 @@ +@@ -2944,6 +3198,21 @@ " struct mm_struct {", " [12] pgd_t *pgd;", " }\n", @@ -72506,7 +73243,7 @@ " Display the array of tcp_sl_timer structures declared by tcp_slt_array[]:\n", " %s> struct tcp_sl_timer tcp_slt_array 4", " struct tcp_sl_timer {", -@@ -3052,8 +3315,8 @@ +@@ -3052,8 +3321,8 @@ char *help_union[] = { "union", "union contents", @@ -72517,7 +73254,7 @@ " This command displays either a union definition, or a formatted display", " of the contents of a union at a specified address. When no address is", " specified, the union definition is shown along with the union size.", -@@ -3061,7 +3324,8 @@ +@@ -3061,7 +3330,8 @@ " the scope of the data displayed to that particular member; when no address", " is specified, the member's offset (always 0) and definition are shown.\n", " union_name name of a C-code union used by the kernel.", @@ -72527,7 +73264,7 @@ " -o show member offsets when displaying union definitions.", " (always 0)", " -l offset if the address argument is a pointer to a list_head structure", -@@ -3071,6 +3335,9 @@ +@@ -3071,6 +3341,9 @@ " 1. in \"structure.member\" format.", " 2. a number of bytes. ", " -r raw dump of union data.", @@ -72537,7 +73274,7 @@ " address hexadecimal address of a union; if the address points", " to an embedded list_head structure contained within the", " target union structure, then the \"-l\" option must be used.", -@@ -3152,7 +3419,7 @@ +@@ -3152,7 +3425,7 @@ char *help_mod[] = { "mod", "module information and loading of symbols and debugging data", @@ -72546,7 +73283,7 @@ " With no arguments, this command displays basic information of the currently", " installed modules, consisting of the module address, name, size, the", " object file name (if known), and whether the module was compiled with", -@@ -3203,6 +3470,7 @@ +@@ -3203,6 +3476,7 @@ " -r Reinitialize module data. All currently-loaded symbolic", " and debugging data will be deleted, and the installed", " module list will be updated (live system only).", @@ -72554,7 +73291,7 @@ " ", " After symbolic and debugging data have been loaded, backtraces and text", " disassembly will be displayed appropriately. Depending upon the processor", -@@ -3322,9 +3590,10 @@ +@@ -3322,9 +3596,10 @@ char *help__list[] = { "list", "linked list", @@ -72567,7 +73304,7 @@ " ", " 1. A starting address points to a data structure; that structure contains", " a member that is a pointer to the next structure, and so on. The list", -@@ -3335,7 +3604,7 @@ +@@ -3335,7 +3610,7 @@ " c. a pointer to the first item pointed to by the start address.", " d. a pointer to its containing structure.", " ", @@ -72576,7 +73313,7 @@ " within the data structures in the list. The linked list is headed by an", " external LIST_HEAD, which is simply a list_head structure initialized to", " point to itself, signifying that the list is empty:", -@@ -3370,15 +3639,17 @@ +@@ -3370,15 +3645,17 @@ " entered.", " -s struct For each address in list, format and print as this type of", " structure; use the \"struct.member\" format in order to display", @@ -72597,7 +73334,7 @@ "\nEXAMPLES", " Note that each task_struct is linked to its parent's task_struct via the", " p_pptr member:", -@@ -3416,31 +3687,66 @@ +@@ -3416,31 +3693,66 @@ " The list of currently-registered file system types are headed up by a", " struct file_system_type pointer named \"file_systems\", and linked by", " the \"next\" field in each file_system_type structure. The following", @@ -72687,7 +73424,7 @@ " ", " In some kernels, the system run queue is a linked list headed up by the", " \"runqueue_head\", which is defined like so:", -@@ -3555,7 +3861,7 @@ +@@ -3555,7 +3867,7 @@ char *help_kmem[] = { "kmem", "kernel memory", @@ -72696,7 +73433,7 @@ " This command displays information about the use of kernel memory.\n", " -f displays the contents of the system free memory headers.", " also verifies that the page count equals nr_free_pages.", -@@ -3567,23 +3873,33 @@ +@@ -3567,23 +3879,33 @@ " -i displays general memory usage information", " -s displays basic kmalloc() slab data.", " -S displays all kmalloc() slab data, including all slab objects,", @@ -72736,7 +73473,7 @@ " address when used with -s or -S, searches the kmalloc() slab subsystem", " for the slab containing of this virtual address, showing whether", " it is in use or free.", -@@ -3781,6 +4097,24 @@ +@@ -3781,6 +4103,24 @@ " c2f8ab60 c8095000 - c8097000 8192", " c2f519e0 c8097000 - c8099000 8192", " ", @@ -72761,7 +73498,7 @@ " Determine (and verify) the page cache size:\n", " %s> kmem -c", " page_cache_size: 18431 (verified)", -@@ -3979,18 +4313,21 @@ +@@ -3979,18 +4319,21 @@ char *help_dis[] = { "dis", "disassemble", @@ -72788,7 +73525,7 @@ " preceded by '.' is used.", " (expression) expression evaluating to a starting text address.", " count the number of instructions to be disassembled (default is 1).", -@@ -4419,10 +4756,11 @@ +@@ -4419,10 +4762,11 @@ " Display various network related data:\n", " -a display the ARP cache.", " -s display open network socket/sock addresses, their family and type,", @@ -72802,7 +73539,7 @@ " value into a standard numbers-and-dots notation.", " -R ref socket or sock address, or file descriptor.", " pid a process PID.", -@@ -4450,8 +4788,8 @@ +@@ -4450,8 +4794,8 @@ " Display the sockets for PID 2517, using both -s and -S output formats:\n", " %s> net -s 2517", " PID: 2517 TASK: c1598000 CPU: 1 COMMAND: \"rlogin\"", @@ -72813,7 +73550,7 @@ " ", " %s> net -S 2517", " PID: 2517 TASK: c1598000 CPU: 1 COMMAND: \"rlogin\"", -@@ -4497,52 +4835,52 @@ +@@ -4497,52 +4841,52 @@ " From \"foreach\", find all tasks with references to socket c08ea3cc:\n", " %s> foreach net -s -R c08ea3cc", " PID: 2184 TASK: c7026000 CPU: 1 COMMAND: \"klines.kss\"", @@ -72890,7 +73627,7 @@ " ", NULL }; -@@ -4584,21 +4922,22 @@ +@@ -4584,21 +4928,22 @@ void cmd_usage(char *cmd, int helpflag) { @@ -72925,7 +73662,7 @@ if (STREQ(cmd, "copying")) { display_copying_info(); -@@ -4641,46 +4980,50 @@ +@@ -4641,46 +4986,50 @@ goto done_usage; } @@ -73000,7 +73737,7 @@ p++; fprintf(fp, "Usage: %s ", cmd); fprintf(fp, *p, pc->program_name, pc->program_name); -@@ -4711,10 +5054,12 @@ +@@ -4711,10 +5060,12 @@ done_usage: @@ -73016,7 +73753,7 @@ } -@@ -4812,7 +5157,9 @@ +@@ -4812,7 +5163,9 @@ "The default output radix for gdb output and certain %s commands is", "hexadecimal. This can be changed to decimal by entering \"set radix 10\"", "or the alias \"dec\". It can be reverted back to hexadecimal by entering", @@ -73027,7 +73764,7 @@ " ", NULL }; -@@ -4854,10 +5201,13 @@ +@@ -4854,10 +5207,13 @@ static char *version_info[] = { @@ -73045,7 +73782,7 @@ "Copyright (C) 1999, 2000, 2001, 2002 Mission Critical Linux, Inc.", "This program is free software, covered by the GNU General Public License,", "and you are welcome to change it and/or distribute copies of it under", ---- crash/gdb-6.1.patch.orig 2008-02-19 16:16:13.000000000 -0500 +--- crash/gdb-6.1.patch.orig 2008-04-29 13:51:49.000000000 -0400 +++ crash/gdb-6.1.patch 2008-02-19 15:31:45.000000000 -0500 @@ -0,0 +1,97 @@ +--- gdb-6.1.orig/bfd/coff-alpha.c @@ -73145,7 +73882,7 @@ + #include + #include + #include ---- crash/diskdump.c.orig 2008-02-19 16:16:12.000000000 -0500 +--- crash/diskdump.c.orig 2008-04-29 13:51:49.000000000 -0400 +++ crash/diskdump.c 2008-01-04 09:42:08.000000000 -0500 @@ -1,16 +1,16 @@ /* @@ -73991,7 +74728,7 @@ +{ + return (get_dump_level() > 0 ? TRUE : FALSE); +} ---- crash/unwind_x86_32_64.c.orig 2008-02-19 16:16:12.000000000 -0500 +--- crash/unwind_x86_32_64.c.orig 2008-04-29 13:51:49.000000000 -0400 +++ crash/unwind_x86_32_64.c 2008-01-04 09:42:08.000000000 -0500 @@ -0,0 +1,1220 @@ +#if defined(X86_64) @@ -75214,8 +75951,8 @@ + + +#endif ---- crash/s390x.c.orig 2008-02-19 16:16:12.000000000 -0500 -+++ crash/s390x.c 2008-01-04 09:42:08.000000000 -0500 +--- crash/s390x.c.orig 2008-04-29 13:51:49.000000000 -0400 ++++ crash/s390x.c 2008-04-11 11:26:36.000000000 -0400 @@ -1,9 +1,9 @@ /* s390.c - core analysis suite * @@ -75360,7 +76097,7 @@ if(THIS_KERNEL_VERSION >= LINUX(2,6,0)){ return !((x) & S390X_PAGE_INVALID) || ((x) & S390X_PAGE_INVALID_MASK) == S390X_PAGE_INVALID_NONE; -@@ -317,81 +257,97 @@ +@@ -317,81 +257,102 @@ } } @@ -75449,6 +76186,14 @@ - if(s390x_pmd_bad(pte_base) || - s390x_pmd_none(pte_base)) { - *paddr = 0; +- return FALSE; +- } +- /* get the pte */ +- pte_base = pte_base & S390X_PT_BASE_MASK; +- pte_val = s390x_pte_offset(pte_base,kvaddr); +- if (S390X_PTE_INVALID(pte_val) || +- s390x_pte_none(pte_val)){ +- *paddr = 0; + offset = ((vaddr >> 12) & 0xffULL) * 8; + readmem(table + offset, KVADDR, &entry, sizeof(entry), "entry", + FAULT_ON_ERROR); @@ -75486,13 +76231,9 @@ + /* Address too big for the number of page table levels. */ return FALSE; } -- /* get the pte */ -- pte_base = pte_base & S390X_PT_BASE_MASK; -- pte_val = s390x_pte_offset(pte_base,kvaddr); -- if (S390X_PTE_INVALID(pte_val) || -- s390x_pte_none(pte_val)){ -- *paddr = 0; -- return FALSE; +- if(!s390x_pte_present(pte_val)){ +- /* swapped out */ +- *paddr = pte_val; + while (level >= 0) { + entry = _kl_rsg_table_deref_s390x(vaddr, table, len, level); + if (!entry) @@ -75500,10 +76241,12 @@ + table = entry & ~0xfffULL; + len = entry & 0x3ULL; + level--; - } -- if(!s390x_pte_present(pte_val)){ -- /* swapped out */ -- *paddr = pte_val; ++ } ++ ++ /* Check if this is a large page. */ ++ if (entry & 0x400ULL) ++ /* Add the 1MB page offset and return the final value. */ ++ return table + (vaddr & 0xfffffULL); + + /* Get the page table entry */ + entry = _kl_pg_table_deref_s390x(vaddr, entry & ~0x7ffULL); @@ -75522,7 +76265,7 @@ return TRUE; } -@@ -514,7 +470,7 @@ +@@ -514,7 +475,7 @@ return FALSE; } fprintf(fp,"PTE PHYSICAL FLAGS\n"); @@ -75531,7 +76274,7 @@ fprintf(fp," ("); if(pte & S390X_PAGE_INVALID) fprintf(fp,"INVALID "); -@@ -541,7 +497,7 @@ +@@ -541,7 +502,7 @@ /* * returns cpu number of task */ @@ -75540,7 +76283,7 @@ s390x_cpu_of_task(unsigned long task) { unsigned int cpu; -@@ -583,12 +539,13 @@ +@@ -583,12 +544,13 @@ return FALSE; } else { /* Linux 2.6 */ @@ -75556,7 +76299,7 @@ runqueue_offset=symbol_value("per_cpu__runqueues"); per_cpu_offset_addr=symbol_value("__per_cpu_offset"); -@@ -596,10 +553,10 @@ +@@ -596,10 +558,10 @@ &cpu_offset, sizeof(long),"per_cpu_offset", FAULT_ON_ERROR); runqueue_addr=runqueue_offset + cpu_offset; @@ -75570,7 +76313,7 @@ if(running_task == task) return TRUE; else -@@ -733,7 +690,7 @@ +@@ -733,7 +695,7 @@ } else if(skip_first_frame){ skip_first_frame=0; } else { @@ -75579,7 +76322,7 @@ fprintf(fp,"%s at %x\n", closest_symbol(r14), r14); if (bt->flags & BT_LINE_NUMBERS) s390x_dump_line_number(r14); -@@ -743,22 +700,25 @@ +@@ -743,22 +705,25 @@ backchain = ULONG(&stack[backchain - stack_base + bc_offset]); /* print stack content if -f is specified */ @@ -75613,7 +76356,7 @@ } /* Check for interrupt stackframe */ -@@ -804,26 +764,26 @@ +@@ -804,26 +769,26 @@ return; } fprintf(fp," LOWCORE INFO:\n"); @@ -75645,7 +76388,7 @@ fprintf(fp," -general registers:\n"); ptr = lc + MEMBER_OFFSET("_lowcore","gpregs_save_area"); -@@ -831,26 +791,26 @@ +@@ -831,26 +796,26 @@ tmp[1]=ULONG(ptr + S390X_WORD_SIZE); tmp[2]=ULONG(ptr + 2 * S390X_WORD_SIZE); tmp[3]=ULONG(ptr + 3 * S390X_WORD_SIZE); @@ -75680,7 +76423,7 @@ fprintf(fp," -access registers:\n"); ptr = lc + MEMBER_OFFSET("_lowcore","access_regs_save_area"); -@@ -858,25 +818,25 @@ +@@ -858,25 +823,25 @@ tmp[1]=ULONG(ptr + 4); tmp[2]=ULONG(ptr + 2 * 4); tmp[3]=ULONG(ptr + 3 * 4); @@ -75710,7 +76453,7 @@ tmp[0], tmp[1], tmp[2], tmp[3]); fprintf(fp," -control registers:\n"); -@@ -885,26 +845,26 @@ +@@ -885,26 +850,26 @@ tmp[1]=ULONG(ptr + S390X_WORD_SIZE); tmp[2]=ULONG(ptr + 2 * S390X_WORD_SIZE); tmp[3]=ULONG(ptr + 3 * S390X_WORD_SIZE); @@ -75745,7 +76488,7 @@ ptr = lc + MEMBER_OFFSET("_lowcore","floating_pt_save_area"); fprintf(fp," -floating point registers 0,2,4,6:\n"); -@@ -912,26 +872,26 @@ +@@ -912,26 +877,26 @@ tmp[1]=ULONG(ptr + S390X_WORD_SIZE); tmp[2]=ULONG(ptr + 2 * S390X_WORD_SIZE); tmp[3]=ULONG(ptr + 3 * S390X_WORD_SIZE); @@ -75780,7 +76523,7 @@ } /* ---- crash/lkcd_dump_v8.h.orig 2008-02-19 16:16:13.000000000 -0500 +--- crash/lkcd_dump_v8.h.orig 2008-04-29 13:51:49.000000000 -0400 +++ crash/lkcd_dump_v8.h 2008-01-04 09:42:08.000000000 -0500 @@ -235,4 +235,304 @@ int stack_offset; @@ -76087,16 +76830,16 @@ +#endif + #endif /* _DUMP_H */ ---- crash/x86.c.orig 2008-02-19 16:16:12.000000000 -0500 -+++ crash/x86.c 2008-01-04 09:42:08.000000000 -0500 +--- crash/x86.c.orig 2008-04-29 13:51:49.000000000 -0400 ++++ crash/x86.c 2008-04-23 13:56:05.000000000 -0400 @@ -1,8 +1,8 @@ /* x86.c - core analysis suite * * Portions Copyright (C) 1999, 2000, 2001, 2002 Mission Critical Linux, Inc. - * Copyright (C) 2002, 2003, 2004, 2005 David Anderson - * Copyright (C) 2002, 2003, 2004, 2005 Red Hat, Inc. All rights reserved. -+ * Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007 David Anderson -+ * Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007 Red Hat, Inc. All rights reserved. ++ * Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2008 David Anderson ++ * Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2008 Red Hat, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -76358,7 +77101,7 @@ } else { PGDIR_SHIFT = PGDIR_SHIFT_2LEVEL; PTRS_PER_PTE = PTRS_PER_PTE_2LEVEL; -@@ -1696,14 +1759,19 @@ +@@ -1696,19 +1759,45 @@ machdep->cmd_mach = x86_cmd_mach; machdep->get_smp_cpus = x86_get_smp_cpus; machdep->line_number_hooks = x86_line_number_hooks; @@ -76381,7 +77124,33 @@ STRUCT_SIZE_INIT(user_regs_struct, "user_regs_struct"); MEMBER_OFFSET_INIT(user_regs_struct_ebp, "user_regs_struct", "ebp"); -@@ -1723,9 +1791,37 @@ + MEMBER_OFFSET_INIT(user_regs_struct_esp, + "user_regs_struct", "esp"); ++ if (!VALID_STRUCT(user_regs_struct)) { ++ /* Use this hardwired version -- sometimes the ++ * debuginfo doesn't pick this up even though ++ * it exists in the kernel; it shouldn't change. ++ */ ++ struct x86_user_regs_struct { ++ long ebx, ecx, edx, esi, edi, ebp, eax; ++ unsigned short ds, __ds, es, __es; ++ unsigned short fs, __fs, gs, __gs; ++ long orig_eax, eip; ++ unsigned short cs, __cs; ++ long eflags, esp; ++ unsigned short ss, __ss; ++ }; ++ ASSIGN_SIZE(user_regs_struct) = ++ sizeof(struct x86_user_regs_struct); ++ ASSIGN_OFFSET(user_regs_struct_ebp) = ++ offsetof(struct x86_user_regs_struct, ebp); ++ ASSIGN_OFFSET(user_regs_struct_esp) = ++ offsetof(struct x86_user_regs_struct, esp); ++ } + MEMBER_OFFSET_INIT(thread_struct_cr3, "thread_struct", "cr3"); + STRUCT_SIZE_INIT(cpuinfo_x86, "cpuinfo_x86"); + STRUCT_SIZE_INIT(e820map, "e820map"); +@@ -1723,9 +1812,37 @@ "irq_desc", NULL, 0); else machdep->nr_irqs = 224; /* NR_IRQS */ @@ -76422,7 +77191,7 @@ break; case POST_INIT: -@@ -1735,6 +1831,47 @@ +@@ -1735,6 +1852,67 @@ } /* @@ -76447,30 +77216,50 @@ + return; + } + -+ INT_EFRAME_SS = MEMBER_OFFSET("pt_regs", "xss") / 4; -+ INT_EFRAME_ESP = MEMBER_OFFSET("pt_regs", "esp") / 4; -+ INT_EFRAME_EFLAGS = MEMBER_OFFSET("pt_regs", "eflags") / 4; -+ INT_EFRAME_CS = MEMBER_OFFSET("pt_regs", "xcs") / 4; -+ INT_EFRAME_EIP = MEMBER_OFFSET("pt_regs", "eip") / 4; -+ INT_EFRAME_ERR = MEMBER_OFFSET("pt_regs", "orig_eax") / 4; -+ if ((INT_EFRAME_GS = MEMBER_OFFSET("pt_regs", "xgs")) != -1) -+ INT_EFRAME_GS /= 4; -+ INT_EFRAME_ES = MEMBER_OFFSET("pt_regs", "xes") / 4; -+ INT_EFRAME_DS = MEMBER_OFFSET("pt_regs", "xds") / 4; -+ INT_EFRAME_EAX = MEMBER_OFFSET("pt_regs", "eax") / 4; -+ INT_EFRAME_EBP = MEMBER_OFFSET("pt_regs", "ebp") / 4; -+ INT_EFRAME_EDI = MEMBER_OFFSET("pt_regs", "edi") / 4; -+ INT_EFRAME_ESI = MEMBER_OFFSET("pt_regs", "esi") / 4; -+ INT_EFRAME_EDX = MEMBER_OFFSET("pt_regs", "edx") / 4; -+ INT_EFRAME_ECX = MEMBER_OFFSET("pt_regs", "ecx") / 4; -+ INT_EFRAME_EBX = MEMBER_OFFSET("pt_regs", "ebx") / 4; ++ if (MEMBER_EXISTS("pt_regs", "esp")) { ++ INT_EFRAME_SS = MEMBER_OFFSET("pt_regs", "xss") / 4; ++ INT_EFRAME_ESP = MEMBER_OFFSET("pt_regs", "esp") / 4; ++ INT_EFRAME_EFLAGS = MEMBER_OFFSET("pt_regs", "eflags") / 4; ++ INT_EFRAME_CS = MEMBER_OFFSET("pt_regs", "xcs") / 4; ++ INT_EFRAME_EIP = MEMBER_OFFSET("pt_regs", "eip") / 4; ++ INT_EFRAME_ERR = MEMBER_OFFSET("pt_regs", "orig_eax") / 4; ++ if ((INT_EFRAME_GS = MEMBER_OFFSET("pt_regs", "xgs")) != -1) ++ INT_EFRAME_GS /= 4; ++ INT_EFRAME_ES = MEMBER_OFFSET("pt_regs", "xes") / 4; ++ INT_EFRAME_DS = MEMBER_OFFSET("pt_regs", "xds") / 4; ++ INT_EFRAME_EAX = MEMBER_OFFSET("pt_regs", "eax") / 4; ++ INT_EFRAME_EBP = MEMBER_OFFSET("pt_regs", "ebp") / 4; ++ INT_EFRAME_EDI = MEMBER_OFFSET("pt_regs", "edi") / 4; ++ INT_EFRAME_ESI = MEMBER_OFFSET("pt_regs", "esi") / 4; ++ INT_EFRAME_EDX = MEMBER_OFFSET("pt_regs", "edx") / 4; ++ INT_EFRAME_ECX = MEMBER_OFFSET("pt_regs", "ecx") / 4; ++ INT_EFRAME_EBX = MEMBER_OFFSET("pt_regs", "ebx") / 4; ++ } else { ++ INT_EFRAME_SS = MEMBER_OFFSET("pt_regs", "ss") / 4; ++ INT_EFRAME_ESP = MEMBER_OFFSET("pt_regs", "sp") / 4; ++ INT_EFRAME_EFLAGS = MEMBER_OFFSET("pt_regs", "flags") / 4; ++ INT_EFRAME_CS = MEMBER_OFFSET("pt_regs", "cs") / 4; ++ INT_EFRAME_EIP = MEMBER_OFFSET("pt_regs", "ip") / 4; ++ INT_EFRAME_ERR = MEMBER_OFFSET("pt_regs", "orig_ax") / 4; ++ if ((INT_EFRAME_GS = MEMBER_OFFSET("pt_regs", "gs")) != -1) ++ INT_EFRAME_GS /= 4; ++ INT_EFRAME_ES = MEMBER_OFFSET("pt_regs", "es") / 4; ++ INT_EFRAME_DS = MEMBER_OFFSET("pt_regs", "ds") / 4; ++ INT_EFRAME_EAX = MEMBER_OFFSET("pt_regs", "ax") / 4; ++ INT_EFRAME_EBP = MEMBER_OFFSET("pt_regs", "bp") / 4; ++ INT_EFRAME_EDI = MEMBER_OFFSET("pt_regs", "di") / 4; ++ INT_EFRAME_ESI = MEMBER_OFFSET("pt_regs", "si") / 4; ++ INT_EFRAME_EDX = MEMBER_OFFSET("pt_regs", "dx") / 4; ++ INT_EFRAME_ECX = MEMBER_OFFSET("pt_regs", "cx") / 4; ++ INT_EFRAME_EBX = MEMBER_OFFSET("pt_regs", "bx") / 4; ++ } +} + +/* * Needs to be done this way because of potential 4G/4G split. */ static int -@@ -1825,7 +1962,7 @@ +@@ -1825,7 +2003,7 @@ fprintf(fp, " PAGE: %s (4MB)\n\n", mkstring(buf, VADDR_PRLEN, RJUST|LONG_HEX, MKSTR(NONPAE_PAGEBASE(pgd_pte)))); @@ -76479,7 +77268,7 @@ } *paddr = NONPAE_PAGEBASE(pgd_pte) + (vaddr & ~_4MB_PAGE_MASK); -@@ -1892,7 +2029,170 @@ +@@ -1892,7 +2070,170 @@ } static int @@ -76651,7 +77440,7 @@ { ulong mm, active_mm; ulonglong *pgd; -@@ -1962,7 +2262,7 @@ +@@ -1962,7 +2303,7 @@ page_middle = PAE_PAGEBASE(page_dir_entry); @@ -76660,7 +77449,7 @@ offset = ((vaddr >> PMD_SHIFT) & (PTRS_PER_PMD-1)) * sizeof(ulonglong); -@@ -1998,7 +2298,7 @@ +@@ -1998,7 +2339,7 @@ page_table = PAE_PAGEBASE(page_middle_entry); @@ -76669,7 +77458,7 @@ offset = ((vaddr >> PAGESHIFT()) & (PTRS_PER_PTE-1)) * sizeof(ulonglong); -@@ -2028,9 +2328,10 @@ +@@ -2028,9 +2369,10 @@ *paddr = physpage; if (verbose) { @@ -76683,7 +77472,7 @@ x86_translate_pte(0, 0, page_table_entry); } -@@ -2040,62 +2341,259 @@ +@@ -2040,54 +2382,251 @@ return FALSE; } @@ -76780,13 +77569,10 @@ + fprintf(fp, " PGD: %s => %llx [machine]\n", mkstring(buf, VADDR_PRLEN, RJUST|LONG_HEX, - MKSTR((ulong)page_dir)), pgd_pte); -- -- if (!pgd_pte) -- goto no_kpage; + MKSTR((ulong)pgd + offset)), + page_dir_entry); -- if (pgd_pte & _PAGE_4M) { +- if (!pgd_pte) + if (!(page_dir_entry & _PAGE_PRESENT)) { + goto no_upage; + } @@ -76967,10 +77753,10 @@ + MKSTR((ulong)page_dir)), pgd_pte); + + if (!pgd_pte) -+ goto no_kpage; -+ -+ if (pgd_pte & _PAGE_4M) { - if (verbose) { + goto no_kpage; + + if (pgd_pte & _PAGE_4M) { +@@ -2095,7 +2634,7 @@ fprintf(fp, " PAGE: %s (4MB)\n\n", mkstring(buf, VADDR_PRLEN, RJUST|LONG_HEX, MKSTR(NONPAE_PAGEBASE(pgd_pte)))); @@ -76979,7 +77765,7 @@ } *paddr = NONPAE_PAGEBASE(pgd_pte) + (kvaddr & ~_4MB_PAGE_MASK); -@@ -2158,9 +2656,134 @@ +@@ -2158,9 +2697,134 @@ return FALSE; } @@ -77115,7 +77901,7 @@ { ulonglong *pgd; ulonglong page_dir_entry; -@@ -2177,18 +2800,29 @@ +@@ -2177,18 +2841,32 @@ if (!IS_KVADDR(kvaddr)) return FALSE; @@ -77133,20 +77919,23 @@ return TRUE; - } + } -+ pgd = (ulonglong *)symbol_value("idle_pg_table_l3"); ++ if (symbol_exists("idle_pg_table_l3")) ++ pgd = (ulonglong *)symbol_value("idle_pg_table_l3"); ++ else ++ pgd = (ulonglong *)symbol_value("idle_pg_table"); + } else { + if (!vt->vmalloc_start) { + *paddr = VTOP(kvaddr); + return TRUE; + } -+ + +- pgd = (ulonglong *)vt->kernel_pgd[0]; + if (!IS_VMALLOC_ADDR(kvaddr)) { + *paddr = VTOP(kvaddr); + if (!verbose) + return TRUE; + } - -- pgd = (ulonglong *)vt->kernel_pgd[0]; ++ + if (XEN() && (kt->xen_flags & WRITABLE_PAGE_TABLES)) + return (x86_kvtop_xen_wpt_PAE(tc, kvaddr, paddr, verbose)); + @@ -77155,7 +77944,7 @@ if (verbose) fprintf(fp, "PAGE DIRECTORY: %lx\n", (ulong)pgd); -@@ -2212,7 +2846,7 @@ +@@ -2212,7 +2890,7 @@ page_middle = PAE_PAGEBASE(page_dir_entry); @@ -77164,7 +77953,7 @@ offset = ((kvaddr >> PMD_SHIFT) & (PTRS_PER_PMD-1)) * sizeof(ulonglong); -@@ -2249,7 +2883,7 @@ +@@ -2249,7 +2927,7 @@ page_table = PAE_PAGEBASE(page_middle_entry); @@ -77173,7 +77962,7 @@ offset = ((kvaddr >> PAGESHIFT()) & (PTRS_PER_PTE-1)) * sizeof(ulonglong); -@@ -2277,9 +2911,10 @@ +@@ -2277,9 +2955,10 @@ *paddr = physpage; if (verbose) { @@ -77185,7 +77974,7 @@ x86_translate_pte(0, 0, page_table_entry); } -@@ -2289,11 +2924,170 @@ +@@ -2289,13 +2968,172 @@ return FALSE; } @@ -77196,7 +77985,8 @@ -x86_get_task_pgd(ulong task) +static int +x86_kvtop_xen_wpt_PAE(struct task_context *tc, ulong kvaddr, physaddr_t *paddr, int verbose) -+{ + { +- long offset; + ulonglong *pgd; + ulonglong page_dir_entry; + ulonglong page_middle, pseudo_page_middle; @@ -77358,18 +78148,21 @@ + */ +static ulong +x86_get_task_pgd(ulong task) - { - long offset; ++{ ++ long offset; ulong cr3; -@@ -2341,6 +3135,7 @@ + + offset = OFFSET_OPTION(task_struct_thread, task_struct_tss); +@@ -2341,6 +3179,8 @@ x86_dump_machdep_table(ulong arg) { int others; + ulong xen_wpt; ++ char buf[BUFSIZE]; switch (arg) { default: -@@ -2355,8 +3150,6 @@ +@@ -2355,8 +3195,6 @@ fprintf(fp, "%sPAE", others++ ? "|" : ""); if (machdep->flags & OMIT_FRAME_PTR) fprintf(fp, "%sOMIT_FRAME_PTR", others++ ? "|" : ""); @@ -77378,7 +78171,7 @@ if (machdep->flags & FRAMESIZE_DEBUG) fprintf(fp, "%sFRAMESIZE_DEBUG", others++ ? "|" : ""); fprintf(fp, ")\n"); -@@ -2376,12 +3169,17 @@ +@@ -2376,12 +3214,17 @@ fprintf(fp, " eframe_search: x86_eframe_search()\n"); fprintf(fp, " back_trace: x86_back_trace_cmd()\n"); fprintf(fp, "get_processor_speed: x86_processor_speed()\n"); @@ -77400,7 +78193,7 @@ } fprintf(fp, " get_task_pgd: x86_get_task_pgd()\n"); fprintf(fp, " dump_irq: generic_dump_irq()\n"); -@@ -2399,7 +3197,7 @@ +@@ -2399,7 +3242,7 @@ fprintf(fp, " is_kvaddr: generic_is_kvaddr()\n"); fprintf(fp, " is_uvaddr: generic_is_uvaddr()\n"); fprintf(fp, " verify_paddr: generic_verify_paddr()\n"); @@ -77409,7 +78202,7 @@ fprintf(fp, " value_to_symbol: %s\n", machdep->value_to_symbol == generic_machdep_value_to_symbol ? "generic_machdep_value_to_symbol()" : -@@ -2412,6 +3210,14 @@ +@@ -2412,6 +3255,48 @@ fprintf(fp, " pmd: %lx\n", (ulong)machdep->pmd); fprintf(fp, " ptbl: %lx\n", (ulong)machdep->ptbl); fprintf(fp, " ptrs_per_pgd: %d\n", machdep->ptrs_per_pgd); @@ -77421,10 +78214,44 @@ + fprintf(fp, " get_xendump_regs: x86_get_xendump_regs()\n"); + fprintf(fp, "xen_kdump_p2m_create: x86_xen_kdump_p2m_create()\n"); + fprintf(fp, "clear_machdep_cache: x86_clear_machdep_cache()\n"); ++ fprintf(fp, " INT_EFRAME_[reg]:\n"); ++ fprintf(fp, "%s %d\n", ++ mkstring(buf, 21, RJUST, "SS: "), INT_EFRAME_SS); ++ fprintf(fp, "%s %d\n", ++ mkstring(buf, 21, RJUST, "ESP: "), INT_EFRAME_ESP); ++ fprintf(fp, "%s %d\n", ++ mkstring(buf, 21, RJUST, "EFLAGS: "), INT_EFRAME_EFLAGS); ++ fprintf(fp, "%s %d\n", ++ mkstring(buf, 21, RJUST, "CS: "), INT_EFRAME_CS); ++ fprintf(fp, "%s %d\n", ++ mkstring(buf, 21, RJUST, "IP: "), INT_EFRAME_EIP); ++ fprintf(fp, "%s %d\n", ++ mkstring(buf, 21, RJUST, "ERR: "), INT_EFRAME_ERR); ++ fprintf(fp, "%s %d\n", ++ mkstring(buf, 21, RJUST, "ES: "), INT_EFRAME_ES); ++ fprintf(fp, "%s %d\n", ++ mkstring(buf, 21, RJUST, "DS: "), INT_EFRAME_DS); ++ fprintf(fp, "%s %d\n", ++ mkstring(buf, 21, RJUST, "EAX: "), INT_EFRAME_EAX); ++ fprintf(fp, "%s %d\n", ++ mkstring(buf, 21, RJUST, "EBP: "), INT_EFRAME_EBP); ++ fprintf(fp, "%s %d\n", ++ mkstring(buf, 21, RJUST, "EDI: "), INT_EFRAME_EDI); ++ fprintf(fp, "%s %d\n", ++ mkstring(buf, 21, RJUST, "ESI: "), INT_EFRAME_ESI); ++ fprintf(fp, "%s %d\n", ++ mkstring(buf, 21, RJUST, "EDX: "), INT_EFRAME_EDX); ++ fprintf(fp, "%s %d\n", ++ mkstring(buf, 21, RJUST, "ECX: "), INT_EFRAME_ECX); ++ fprintf(fp, "%s %d\n", ++ mkstring(buf, 21, RJUST, "EBX: "), INT_EFRAME_EBX); ++ fprintf(fp, "%s %d\n", ++ mkstring(buf, 21, RJUST, "GS: "), INT_EFRAME_GS); ++ fprintf(fp, " machspec: x86_machine_specific\n"); fprintf(fp, " idt_table: %lx\n", (ulong)machdep->machspec->idt_table); -@@ -2421,6 +3227,11 @@ +@@ -2421,6 +3306,11 @@ machdep->machspec->entry_tramp_end); fprintf(fp, " entry_tramp_start_phys: %llx\n", machdep->machspec->entry_tramp_start_phys); @@ -77436,7 +78263,7 @@ } /* -@@ -2732,6 +3543,9 @@ +@@ -2732,6 +3622,9 @@ switch (flag) { case READ_IDT_INIT: @@ -77446,7 +78273,7 @@ if (!(idt = (ulong *)malloc(desc_struct_size))) { error(WARNING, "cannot malloc idt_table\n\n"); return NULL; -@@ -2779,6 +3593,10 @@ +@@ -2779,6 +3672,10 @@ break; case READ_IDT_RUNTIME: @@ -77457,7 +78284,7 @@ idt = (ulong *)GETBUF(desc_struct_size); readmem(symbol_value("idt_table"), KVADDR, idt, desc_struct_size, "idt_table", FAULT_ON_ERROR); -@@ -2942,7 +3760,11 @@ +@@ -2942,7 +3839,11 @@ !strstr(buf2, "+")) sprintf(p1, buf1); } @@ -77470,7 +78297,7 @@ if (CRASHDEBUG(1)) console(" %s", inbuf); -@@ -2969,6 +3791,16 @@ +@@ -2969,6 +3870,16 @@ } } @@ -77487,7 +78314,7 @@ return count; } -@@ -3026,7 +3858,7 @@ +@@ -3026,7 +3937,7 @@ fprintf(fp, "(unknown)\n"); fprintf(fp, " HZ: %d\n", machdep->hz); fprintf(fp, " PAGE SIZE: %d\n", PAGESIZE()); @@ -77496,7 +78323,7 @@ fprintf(fp, "KERNEL VIRTUAL BASE: %lx\n", machdep->kvbase); fprintf(fp, "KERNEL VMALLOC BASE: %lx\n", vt->vmalloc_start); fprintf(fp, " KERNEL STACK SIZE: %ld\n", STACKSIZE()); -@@ -3092,31 +3924,31 @@ +@@ -3092,31 +4003,31 @@ * with the -fomit-frame-pointer flag. */ #define PUSH_BP_MOV_ESP_BP 0xe58955 @@ -77544,7 +78371,7 @@ } /* -@@ -3207,4 +4039,922 @@ +@@ -3207,4 +4118,923 @@ return ((sp = value_search(value, offset))); } @@ -78398,7 +79225,8 @@ + break; + + case PRE_GDB: -+ if (symbol_exists("idle_pg_table_l3")) { ++ if (symbol_exists("create_pae_xen_mappings") || ++ symbol_exists("idle_pg_table_l3")) { + machdep->flags |= PAE; + PGDIR_SHIFT = PGDIR_SHIFT_3LEVEL; + PTRS_PER_PTE = PTRS_PER_PTE_3LEVEL; @@ -78467,9 +79295,9 @@ +} + #endif /* X86 */ ---- crash/netdump.h.orig 2008-02-19 16:16:12.000000000 -0500 -+++ crash/netdump.h 2008-01-04 09:42:08.000000000 -0500 -@@ -24,3 +24,95 @@ +--- crash/netdump.h.orig 2008-04-29 13:51:49.000000000 -0400 ++++ crash/netdump.h 2008-04-23 14:39:05.000000000 -0400 +@@ -24,3 +24,96 @@ #define NT_TASKSTRUCT 4 #define NT_DISKDUMP 0x70000001 @@ -78558,6 +79386,7 @@ + ulong accesses; + int p2m_frames; + ulong *p2m_mfn_frame_list; ++ ulong xen_phys_start; +}; + +#define KDUMP_P2M_INIT (0x1) @@ -78565,7 +79394,7 @@ +#define KDUMP_MFN_LIST (0x4) + +#define P2M_FAILURE ((physaddr_t)(0xffffffffffffffffLL)) ---- crash/xen_hyper_global_data.c.orig 2008-02-19 16:16:12.000000000 -0500 +--- crash/xen_hyper_global_data.c.orig 2008-04-29 13:51:49.000000000 -0400 +++ crash/xen_hyper_global_data.c 2008-01-04 09:42:08.000000000 -0500 @@ -0,0 +1,400 @@ +/* @@ -78968,7 +79797,7 @@ +struct task_context fake_tc = { 0 }; + +#endif ---- crash/lkcd_fix_mem.c.orig 2008-02-19 16:16:12.000000000 -0500 +--- crash/lkcd_fix_mem.c.orig 2008-04-29 13:51:49.000000000 -0400 +++ crash/lkcd_fix_mem.c 2008-01-04 09:42:08.000000000 -0500 @@ -20,21 +20,13 @@ @@ -79033,7 +79862,7 @@ +} + #endif // IA64 ---- crash/dev.c.orig 2008-02-19 16:16:12.000000000 -0500 +--- crash/dev.c.orig 2008-04-29 13:51:49.000000000 -0400 +++ crash/dev.c 2008-01-04 09:42:08.000000000 -0500 @@ -91,13 +91,13 @@ switch(c) @@ -79182,7 +80011,7 @@ for (i = 0; i < devcnt; i++) { /* ---- crash/vas_crash.h.orig 2008-02-19 16:16:12.000000000 -0500 +--- crash/vas_crash.h.orig 2008-04-29 13:51:49.000000000 -0400 +++ crash/vas_crash.h 2008-01-04 09:42:08.000000000 -0500 @@ -1,8 +1,8 @@ /* vas_crash.h - kernel crash dump file format (on swap) @@ -79204,7 +80033,7 @@ void save_core(void); ---- crash/global_data.c.orig 2008-02-19 16:16:12.000000000 -0500 +--- crash/global_data.c.orig 2008-04-29 13:51:49.000000000 -0400 +++ crash/global_data.c 2008-01-04 09:42:08.000000000 -0500 @@ -68,7 +68,7 @@ * To add a new command, declare it in defs.h and enter it in this table. @@ -79225,7 +80054,7 @@ {(char *)NULL} }; ---- crash/unwind.c.orig 2008-02-19 16:16:12.000000000 -0500 +--- crash/unwind.c.orig 2008-04-29 13:51:49.000000000 -0400 +++ crash/unwind.c 2008-01-04 09:42:08.000000000 -0500 @@ -6,8 +6,8 @@ /* @@ -79471,7 +80300,7 @@ static void unw_init_frame_info (struct unw_frame_info *info, struct bt_info *bt, ulong sw) { ---- crash/extensions.c.orig 2008-02-19 16:16:12.000000000 -0500 +--- crash/extensions.c.orig 2008-04-29 13:51:49.000000000 -0400 +++ crash/extensions.c 2008-01-04 09:42:08.000000000 -0500 @@ -18,9 +18,6 @@ #include "defs.h" @@ -79592,7 +80421,7 @@ +{ + return pc->cmd_table; +} ---- crash/lkcd_vmdump_v2_v3.h.orig 2008-02-19 16:16:13.000000000 -0500 +--- crash/lkcd_vmdump_v2_v3.h.orig 2008-04-29 13:51:49.000000000 -0400 +++ crash/lkcd_vmdump_v2_v3.h 2008-02-19 16:14:53.000000000 -0500 @@ -1,8 +1,8 @@ /* lkcd_vmdump_v2_v3.h - core analysis suite @@ -79633,8 +80462,8 @@ #include /* for pt_regs */ #endif ---- crash/net.c.orig 2008-02-19 16:16:12.000000000 -0500 -+++ crash/net.c 2008-01-04 09:42:08.000000000 -0500 +--- crash/net.c.orig 2008-04-29 13:51:49.000000000 -0400 ++++ crash/net.c 2008-03-17 16:58:26.000000000 -0400 @@ -1,8 +1,8 @@ /* net.c - core analysis suite * @@ -79654,7 +80483,16 @@ #define DEV_NAME_MAX 100 struct devinfo { -@@ -75,6 +76,7 @@ +@@ -64,6 +65,8 @@ + #define BYTES_IP_TUPLE (BYTES_IP_ADDR + BYTES_PORT_NUM + 1) + + static void show_net_devices(void); ++static void show_net_devices_v2(void); ++static void show_net_devices_v3(void); + static void print_neighbour_q(ulong, int); + static void get_netdev_info(ulong, struct devinfo *); + static void get_device_name(ulong, char *); +@@ -75,6 +78,7 @@ static void dump_sockets(ulong, struct reference *); static int sym_socket_dump(ulong, int, int, ulong, struct reference *); static void dump_hw_addr(unsigned char *, int); @@ -79662,7 +80500,16 @@ #define MK_TYPE_T(f,s,m) \ -@@ -158,13 +160,6 @@ +@@ -109,6 +113,8 @@ + "net_device", "addr_len"); + net->dev_ip_ptr = MEMBER_OFFSET_INIT(net_device_ip_ptr, + "net_device", "ip_ptr"); ++ MEMBER_OFFSET_INIT(net_device_dev_list, "net_device", "dev_list"); ++ MEMBER_OFFSET_INIT(net_dev_base_head, "net", "dev_base_head"); + ARRAY_LENGTH_INIT(net->net_device_name_index, + net_device_name, "net_device.name", NULL, sizeof(char)); + net->flags |= (NETDEV_INIT|STRUCT_NET_DEVICE); +@@ -158,13 +164,6 @@ "in_ifaddr", "ifa_address"); STRUCT_SIZE_INIT(sock, "sock"); @@ -79676,7 +80523,7 @@ MEMBER_OFFSET_INIT(sock_family, "sock", "family"); if (VALID_MEMBER(sock_family)) { -@@ -195,7 +190,23 @@ +@@ -195,7 +194,23 @@ */ STRUCT_SIZE_INIT(inet_sock, "inet_sock"); STRUCT_SIZE_INIT(socket, "socket"); @@ -79701,7 +80548,7 @@ if (VALID_STRUCT(inet_sock) && INVALID_MEMBER(inet_sock_inet)) { /* -@@ -210,15 +221,36 @@ +@@ -210,15 +225,36 @@ * to subtract the size of the inet_opt struct * from the size of the containing inet_sock. */ @@ -79744,7 +80591,140 @@ net->flags |= SOCK_V2; } } -@@ -378,6 +410,24 @@ +@@ -323,8 +359,16 @@ + long flen; + char buf[BUFSIZE]; + ++ if (symbol_exists("dev_base_head")) { ++ show_net_devices_v2(); ++ return; ++ } else if (symbol_exists("init_net")) { ++ show_net_devices_v3(); ++ return; ++ } ++ + if (!symbol_exists("dev_base")) +- error(FATAL, "dev_base does not exist!\n"); ++ error(FATAL, "dev_base, dev_base_head or init_net do not exist!\n"); + + get_symbol_data("dev_base", sizeof(void *), &next); + +@@ -352,6 +396,114 @@ + } while (next); + } + ++static void ++show_net_devices_v2(void) ++{ ++ struct list_data list_data, *ld; ++ char *net_device_buf; ++ char buf[BUFSIZE]; ++ ulong *ndevlist; ++ int ndevcnt, i; ++ long flen; ++ ++ if (!net->netdevice) /* initialized in net_init() */ ++ return; ++ ++ flen = MAX(VADDR_PRLEN, strlen(net->netdevice)); ++ ++ fprintf(fp, "%s NAME IP ADDRESS(ES)\n", ++ mkstring(upper_case(net->netdevice, buf), ++ flen, CENTER|LJUST, NULL)); ++ ++ net_device_buf = GETBUF(SIZE(net_device)); ++ ++ ld = &list_data; ++ BZERO(ld, sizeof(struct list_data)); ++ get_symbol_data("dev_base_head", sizeof(void *), &ld->start); ++ ld->end = symbol_value("dev_base_head"); ++ ld->list_head_offset = OFFSET(net_device_dev_list); ++ ++ hq_open(); ++ ndevcnt = do_list(ld); ++ ndevlist = (ulong *)GETBUF(ndevcnt * sizeof(ulong)); ++ ndevcnt = retrieve_list(ndevlist, ndevcnt); ++ hq_close(); ++ ++ for (i = 0; i < ndevcnt; ++i) { ++ readmem(ndevlist[i], KVADDR, net_device_buf, ++ SIZE(net_device), "net_device buffer", ++ FAULT_ON_ERROR); ++ ++ fprintf(fp, "%s ", ++ mkstring(buf, flen, CENTER|RJUST|LONG_HEX, ++ MKSTR(ndevlist[i]))); ++ ++ get_device_name(ndevlist[i], buf); ++ fprintf(fp, "%-6s ", buf); ++ ++ get_device_address(ndevlist[i], buf); ++ fprintf(fp, "%s\n", buf); ++ } ++ ++ FREEBUF(ndevlist); ++ FREEBUF(net_device_buf); ++} ++ ++static void ++show_net_devices_v3(void) ++{ ++ struct list_data list_data, *ld; ++ char *net_device_buf; ++ char buf[BUFSIZE]; ++ ulong *ndevlist; ++ int ndevcnt, i; ++ long flen; ++ ++ if (!net->netdevice) /* initialized in net_init() */ ++ return; ++ ++ flen = MAX(VADDR_PRLEN, strlen(net->netdevice)); ++ ++ fprintf(fp, "%s NAME IP ADDRESS(ES)\n", ++ mkstring(upper_case(net->netdevice, buf), ++ flen, CENTER|LJUST, NULL)); ++ ++ net_device_buf = GETBUF(SIZE(net_device)); ++ ++ ld = &list_data; ++ BZERO(ld, sizeof(struct list_data)); ++ ld->start = ld->end = ++ symbol_value("init_net") + OFFSET(net_dev_base_head); ++ ld->list_head_offset = OFFSET(net_device_dev_list); ++ ++ hq_open(); ++ ndevcnt = do_list(ld); ++ ndevlist = (ulong *)GETBUF(ndevcnt * sizeof(ulong)); ++ ndevcnt = retrieve_list(ndevlist, ndevcnt); ++ hq_close(); ++ ++ /* ++ * Skip the first entry (init_net). ++ */ ++ for (i = 1; i < ndevcnt; ++i) { ++ readmem(ndevlist[i], KVADDR, net_device_buf, ++ SIZE(net_device), "net_device buffer", ++ FAULT_ON_ERROR); ++ ++ fprintf(fp, "%s ", ++ mkstring(buf, flen, CENTER|RJUST|LONG_HEX, ++ MKSTR(ndevlist[i]))); ++ ++ get_device_name(ndevlist[i], buf); ++ fprintf(fp, "%-6s ", buf); ++ ++ get_device_address(ndevlist[i], buf); ++ fprintf(fp, "%s\n", buf); ++ } ++ ++ FREEBUF(ndevlist); ++ FREEBUF(net_device_buf); ++} + + /* + * Perform the actual work of dumping the ARP table... +@@ -378,6 +530,24 @@ nhash_buckets = (i = ARRAY_LENGTH(neigh_table_hash_buckets)) ? i : get_array_length("neigh_table.hash_buckets", NULL, sizeof(void *)); @@ -79769,7 +80749,7 @@ hash_bytes = nhash_buckets * sizeof(*hash_buckets); hash_buckets = (ulong *)GETBUF(hash_bytes); -@@ -609,8 +659,14 @@ +@@ -609,8 +779,14 @@ uint16_t dport, sport; ushort num, family, type; char *sockbuf, *inet_sockbuf; @@ -79784,7 +80764,7 @@ sockbuf = inet_sockbuf = NULL; switch (net->flags & (SOCK_V1|SOCK_V2)) -@@ -646,6 +702,7 @@ +@@ -646,6 +822,7 @@ OFFSET(inet_opt_num)); family = USHORT(inet_sockbuf + OFFSET(sock_common_skc_family)); type = USHORT(inet_sockbuf + OFFSET(sock_sk_type)); @@ -79792,7 +80772,7 @@ break; } -@@ -723,27 +780,28 @@ +@@ -723,27 +900,28 @@ } /* make sure we have room at the end... */ @@ -79826,7 +80806,7 @@ inet_ntoa(*((struct in_addr *)&daddr)), ntohs(dport)); } -@@ -753,6 +811,60 @@ +@@ -753,6 +931,60 @@ FREEBUF(sockbuf); if (inet_sockbuf) FREEBUF(inet_sockbuf); @@ -79887,7 +80867,7 @@ } -@@ -899,6 +1011,8 @@ +@@ -899,6 +1131,8 @@ fprintf(fp, "%sSTRUCT_DEVICE", others++ ? "|" : ""); if (net->flags & STRUCT_NET_DEVICE) fprintf(fp, "%sSTRUCT_NET_DEVICE", others++ ? "|" : ""); @@ -79896,7 +80876,7 @@ if (net->flags & SOCK_V1) fprintf(fp, "%sSOCK_V1", others++ ? "|" : ""); if (net->flags & SOCK_V2) -@@ -972,7 +1086,7 @@ +@@ -972,7 +1206,7 @@ void dump_sockets_workhorse(ulong task, ulong flag, struct reference *ref) { @@ -79905,7 +80885,7 @@ int max_fdset = 0; int max_fds = 0; ulong open_fds_addr = 0; -@@ -1004,32 +1118,54 @@ +@@ -1004,32 +1238,54 @@ sizeof(void *), "task files contents", FAULT_ON_ERROR); if (files_struct_addr) { @@ -79978,7 +80958,7 @@ if (!open_fds_addr || !fd) { if (!NET_REFERENCE_CHECK(ref)) fprintf(fp, "No open sockets.\n"); -@@ -1061,7 +1197,7 @@ +@@ -1061,7 +1317,7 @@ for (;;) { unsigned long set; i = j * __NFDBITS; @@ -79987,7 +80967,7 @@ break; set = open_fds.__fds_bits[j++]; while (set) { -@@ -1096,9 +1232,9 @@ +@@ -1096,9 +1352,9 @@ */ static char *socket_hdr_32 = @@ -79999,7 +80979,7 @@ static int sym_socket_dump(ulong file, -@@ -1223,7 +1359,12 @@ +@@ -1223,7 +1479,12 @@ dump_struct("sock", sock, 0); break; case SOCK_V2: @@ -80013,7 +80993,7 @@ break; } break; ---- crash/s390_dump.c.orig 2008-02-19 16:16:12.000000000 -0500 +--- crash/s390_dump.c.orig 2008-04-29 13:51:49.000000000 -0400 +++ crash/s390_dump.c 2008-01-04 09:42:08.000000000 -0500 @@ -1,8 +1,8 @@ /* s390_dump.c - core analysis suite diff --git a/crash.spec b/crash.spec index 1082237..13d3594 100644 --- a/crash.spec +++ b/crash.spec @@ -4,7 +4,7 @@ Summary: crash utility for live systems; netdump, diskdump, kdump, LKCD or mcore dumpfiles Name: crash Version: 4.0 -Release: 6.0.5 +Release: 6.3 License: GPL Group: Development/Debuggers Source: %{name}-%{version}.tar.gz @@ -64,6 +64,10 @@ rm -rf %{buildroot} %{_includedir}/* %changelog +* Tue Aug 29 2008 Dave Anderson - 4.0-6.3 +- Added crash-devel subpackage +- Updated crash.patch to match upstream version 4.0-6.3 + * Wed Feb 20 2008 Dave Anderson - 4.0-6.0.5 - Second attempt at addressing the GCC 4.3 build, which failed due to additional ptrace.h includes in the lkcd vmdump header files.