diff --git a/.crash.metadata b/.crash.metadata index 10ef077..c8d8321 100644 --- a/.crash.metadata +++ b/.crash.metadata @@ -1,2 +1,2 @@ -334bce71a69ccf8abefaf8c4bc5eec67c9b43c9e SOURCES/crash-7.2.8.tar.gz +20865107a4a2ffcb31d9b2f390f72e1dcc3a5dbc SOURCES/crash-7.2.9.tar.gz 026f4c9e1c8152a2773354551c523acd32d7f00e SOURCES/gdb-7.6.tar.gz diff --git a/.gitignore b/.gitignore index c11877e..989bab0 100644 --- a/.gitignore +++ b/.gitignore @@ -1,2 +1,2 @@ -SOURCES/crash-7.2.8.tar.gz +SOURCES/crash-7.2.9.tar.gz SOURCES/gdb-7.6.tar.gz diff --git a/SOURCES/github_b80b16549e24.patch b/SOURCES/github_b80b16549e24.patch deleted file mode 100644 index 975ea2e..0000000 --- a/SOURCES/github_b80b16549e24.patch +++ /dev/null @@ -1,43 +0,0 @@ -commit b80b16549e24769c7d5fe3a10c4b1a1c4b5161f3 -Author: Dave Anderson -Date: Mon Mar 23 15:52:11 2020 -0400 - - Fix for reading compressed kdump dumpfiles from systems with physical - memory located at extraordinarily high addresses. In a system with - a physical address range from 0x602770ecf000 to 0x6027ffffffff, the - crash utility fails during session initialization due to an integer - overflow, ending with the error message "crash: vmlinux and vmcore - do not match!". - (chenjialong@huawei.com) - -diff --git crash-7.2.8/diskdump.c crash-7.2.8/diskdump.c -index e88243e616cc..328c932dad5a 100644 ---- crash-7.2.8/diskdump.c -+++ crash-7.2.8/diskdump.c -@@ -233,7 +233,7 @@ clean_diskdump_data(void) - } - - static inline int --get_bit(char *map, int byte, int bit) -+get_bit(char *map, unsigned long byte, int bit) - { - return map[byte] & (1<max_mapnr = header->max_mapnr; - - /* read memory bitmap */ -- bitmap_len = block_size * header->bitmap_blocks; -+ bitmap_len = (off_t)block_size * header->bitmap_blocks; - dd->bitmap_len = bitmap_len; - - offset = (off_t)block_size * (1 + header->sub_hdr_size); -@@ -744,7 +744,7 @@ restart: - memcpy(dd->dumpable_bitmap, dd->bitmap, bitmap_len); - - dd->data_offset -- = (1 + header->sub_hdr_size + header->bitmap_blocks) -+ = (1UL + header->sub_hdr_size + header->bitmap_blocks) - * header->block_size; - - dd->header = header; diff --git a/SOURCES/rhel8_arm64_52_bit_fix.patch b/SOURCES/rhel8_arm64_52_bit_fix.patch new file mode 100644 index 0000000..8efc331 --- /dev/null +++ b/SOURCES/rhel8_arm64_52_bit_fix.patch @@ -0,0 +1,1037 @@ +commit fe84f31c36bff278f68553d521c983836c5670e2 +Author: Bhupesh Sharma +Date: Tue Dec 1 02:23:53 2020 +0530 + + arm64 crash fix for older kernels + + Signed-off-by: Bhupesh Sharma + +diff --git a/arm64.c b/arm64.c +index fdf77bd5e0c1..6d825c9e67de 100644 +--- a/arm64.c ++++ b/arm64.c +@@ -27,13 +27,9 @@ + static struct machine_specific arm64_machine_specific = { 0 }; + static int arm64_verify_symbol(const char *, ulong, char); + static void arm64_parse_cmdline_args(void); +-static int arm64_search_for_kimage_voffset(ulong); +-static int verify_kimage_voffset(void); + static void arm64_calc_kimage_voffset(void); + static void arm64_calc_phys_offset(void); +-static void arm64_calc_physvirt_offset(void); + static void arm64_calc_virtual_memory_ranges(void); +-static void arm64_get_section_size_bits(void); + static int arm64_kdump_phys_base(ulong *); + static ulong arm64_processor_speed(void); + static void arm64_init_kernel_pgd(void); +@@ -82,10 +78,9 @@ static int arm64_on_irq_stack(int, ulong); + static void arm64_set_irq_stack(struct bt_info *); + static void arm64_set_process_stack(struct bt_info *); + static int arm64_get_kvaddr_ranges(struct vaddr_range *); +-static void arm64_get_crash_notes(void); ++static int arm64_get_crash_notes(void); + static void arm64_calc_VA_BITS(void); + static int arm64_is_uvaddr(ulong, struct task_context *); +-static void arm64_calc_KERNELPACMASK(void); + + + /* +@@ -149,12 +144,6 @@ arm64_init(int when) + if (kernel_symbol_exists("kimage_voffset")) + machdep->flags |= NEW_VMEMMAP; + +- if (!machdep->pagesize && +- (string = pc->read_vmcoreinfo("PAGESIZE"))) { +- machdep->pagesize = atoi(string); +- free(string); +- } +- + if (!machdep->pagesize) { + /* + * Kerneldoc Documentation/arm64/booting.txt describes +@@ -181,16 +170,17 @@ arm64_init(int when) + + } + +- /* +- * This code section will only be executed if the kernel is +- * earlier than Linux 4.4 (if there is no vmcoreinfo) +- */ + if (!machdep->pagesize && + kernel_symbol_exists("swapper_pg_dir") && + kernel_symbol_exists("idmap_pg_dir")) { +- value = symbol_value("swapper_pg_dir") - +- symbol_value("idmap_pg_dir"); ++ if (kernel_symbol_exists("tramp_pg_dir")) ++ value = symbol_value("tramp_pg_dir"); ++ else if (kernel_symbol_exists("reserved_ttbr0")) ++ value = symbol_value("reserved_ttbr0"); ++ else ++ value = symbol_value("swapper_pg_dir"); + ++ value -= symbol_value("idmap_pg_dir"); + /* + * idmap_pg_dir is 2 pages prior to 4.1, + * and 3 pages thereafter. Only 4K and 64K +@@ -215,21 +205,13 @@ arm64_init(int when) + machdep->pagemask = ~((ulonglong)machdep->pageoffset); + + arm64_calc_VA_BITS(); +- arm64_calc_KERNELPACMASK(); + ms = machdep->machspec; +- if (ms->VA_BITS_ACTUAL) { +- ms->page_offset = ARM64_PAGE_OFFSET_ACTUAL; +- machdep->identity_map_base = ARM64_PAGE_OFFSET_ACTUAL; +- machdep->kvbase = ARM64_PAGE_OFFSET_ACTUAL; +- ms->userspace_top = ARM64_USERSPACE_TOP_ACTUAL; +- } else { +- ms->page_offset = ARM64_PAGE_OFFSET; +- machdep->identity_map_base = ARM64_PAGE_OFFSET; +- machdep->kvbase = ARM64_VA_START; +- ms->userspace_top = ARM64_USERSPACE_TOP; +- } ++ ms->page_offset = ARM64_PAGE_OFFSET; ++ machdep->identity_map_base = ARM64_PAGE_OFFSET; ++ machdep->kvbase = ARM64_VA_START; + machdep->is_kvaddr = generic_is_kvaddr; + machdep->kvtop = arm64_kvtop; ++ ms->userspace_top = ARM64_USERSPACE_TOP; + if (machdep->flags & NEW_VMEMMAP) { + struct syment *sp; + +@@ -238,15 +220,11 @@ arm64_init(int when) + sp = kernel_symbol_search("_end"); + ms->kimage_end = (sp ? sp->value : 0); + +- if (ms->VA_BITS_ACTUAL) { +- ms->modules_vaddr = (st->_stext_vmlinux & TEXT_OFFSET_MASK) - ARM64_MODULES_VSIZE; +- ms->modules_end = ms->modules_vaddr + ARM64_MODULES_VSIZE -1; +- } else { +- ms->modules_vaddr = ARM64_VA_START; +- if (kernel_symbol_exists("kasan_init")) +- ms->modules_vaddr += ARM64_KASAN_SHADOW_SIZE; +- ms->modules_end = ms->modules_vaddr + ARM64_MODULES_VSIZE -1; +- } ++ ms->modules_vaddr = ARM64_VA_START; ++ if (kernel_symbol_exists("kasan_init")) ++ ms->modules_vaddr += ARM64_KASAN_SHADOW_SIZE; ++ ms->modules_end = ms->modules_vaddr ++ + ARM64_MODULES_VSIZE -1; + + ms->vmalloc_start_addr = ms->modules_end + 1; + +@@ -338,6 +316,10 @@ arm64_init(int when) + + machdep->uvtop = arm64_uvtop; + machdep->is_uvaddr = arm64_is_uvaddr; ++ if (kernel_symbol_exists("vabits_user") && ++ readmem(symbol_value("vabits_user"), KVADDR, ++ &value, sizeof(ulong), "vabits_user", RETURN_ON_ERROR)) ++ machdep->machspec->vabits_user = value; + machdep->eframe_search = arm64_eframe_search; + machdep->back_trace = arm64_back_trace_cmd; + machdep->in_alternate_stack = arm64_in_alternate_stack; +@@ -365,7 +347,6 @@ arm64_init(int when) + + /* use machdep parameters */ + arm64_calc_phys_offset(); +- arm64_calc_physvirt_offset(); + + if (CRASHDEBUG(1)) { + if (machdep->flags & NEW_VMEMMAP) +@@ -373,15 +354,13 @@ arm64_init(int when) + machdep->machspec->kimage_voffset); + fprintf(fp, "phys_offset: %lx\n", + machdep->machspec->phys_offset); +- fprintf(fp, "physvirt_offset: %lx\n", machdep->machspec->physvirt_offset); + } + + break; + + case POST_GDB: + arm64_calc_virtual_memory_ranges(); +- arm64_get_section_size_bits(); +- ++ machdep->section_size_bits = _SECTION_SIZE_BITS; + if (!machdep->max_physmem_bits) { + if ((string = pc->read_vmcoreinfo("NUMBER(MAX_PHYSMEM_BITS)"))) { + machdep->max_physmem_bits = atol(string); +@@ -393,24 +372,8 @@ arm64_init(int when) + else + machdep->max_physmem_bits = _MAX_PHYSMEM_BITS; + } +- + ms = machdep->machspec; + +- if (CRASHDEBUG(1)) { +- if (ms->VA_BITS_ACTUAL) { +- fprintf(fp, "CONFIG_ARM64_VA_BITS: %ld\n", ms->CONFIG_ARM64_VA_BITS); +- fprintf(fp, " VA_BITS_ACTUAL: %ld\n", ms->VA_BITS_ACTUAL); +- fprintf(fp, "(calculated) VA_BITS: %ld\n", ms->VA_BITS); +- fprintf(fp, " PAGE_OFFSET: %lx\n", ARM64_PAGE_OFFSET_ACTUAL); +- fprintf(fp, " VA_START: %lx\n", ms->VA_START); +- fprintf(fp, " modules: %lx - %lx\n", ms->modules_vaddr, ms->modules_end); +- fprintf(fp, " vmalloc: %lx - %lx\n", ms->vmalloc_start_addr, ms->vmalloc_end); +- fprintf(fp, "kernel image: %lx - %lx\n", ms->kimage_text, ms->kimage_end); +- fprintf(fp, " vmemmap: %lx - %lx\n\n", ms->vmemmap_vaddr, ms->vmemmap_end); +- } +- } +- +- + if (THIS_KERNEL_VERSION >= LINUX(4,0,0)) { + ms->__SWP_TYPE_BITS = 6; + ms->__SWP_TYPE_SHIFT = 2; +@@ -470,17 +433,18 @@ arm64_init(int when) + * of the crash. We need this information to extract correct + * backtraces from the panic task. + */ +- if (!LIVE()) +- arm64_get_crash_notes(); ++ if (!LIVE() && !arm64_get_crash_notes()) ++ error(WARNING, ++ "cannot retrieve registers for active task%s\n\n", ++ kt->cpus > 1 ? "s" : ""); ++ + break; + + case LOG_ONLY: + machdep->machspec = &arm64_machine_specific; + arm64_calc_VA_BITS(); +- arm64_calc_KERNELPACMASK(); + arm64_calc_phys_offset(); + machdep->machspec->page_offset = ARM64_PAGE_OFFSET; +- arm64_calc_physvirt_offset(); + break; + } + } +@@ -655,20 +619,9 @@ arm64_dump_machdep_table(ulong arg) + + fprintf(fp, " machspec: %lx\n", (ulong)ms); + fprintf(fp, " VA_BITS: %ld\n", ms->VA_BITS); +- fprintf(fp, " CONFIG_ARM64_VA_BITS: %ld\n", ms->CONFIG_ARM64_VA_BITS); +- fprintf(fp, " VA_START: "); +- if (ms->VA_START) +- fprintf(fp, "%lx\n", ms->VA_START); +- else +- fprintf(fp, "(unused)\n"); +- fprintf(fp, " VA_BITS_ACTUAL: "); +- if (ms->VA_BITS_ACTUAL) +- fprintf(fp, "%ld\n", ms->VA_BITS_ACTUAL); +- else +- fprintf(fp, "(unused)\n"); +- fprintf(fp, "CONFIG_ARM64_KERNELPACMASK: "); +- if (ms->CONFIG_ARM64_KERNELPACMASK) +- fprintf(fp, "%lx\n", ms->CONFIG_ARM64_KERNELPACMASK); ++ fprintf(fp, " vabits_user: "); ++ if (ms->vabits_user) ++ fprintf(fp, "%ld\n", ms->vabits_user); + else + fprintf(fp, "(unused)\n"); + fprintf(fp, " userspace_top: %016lx\n", ms->userspace_top); +@@ -757,14 +710,12 @@ arm64_parse_machdep_arg_l(char *argstring, char *param, ulong *value) + int flags = RETURN_ON_ERROR | QUIET; + int err = 0; + +- if (STRNEQ(argstring, "max_physmem_bits")) { +- *value = dtol(p, flags, &err); +- } else if (STRNEQ(argstring, "vabits_actual")) { +- *value = dtol(p, flags, &err); +- } else if (megabytes) { ++ if (megabytes) { + *value = dtol(p, flags, &err); + if (!err) + *value = MEGABYTES(*value); ++ } else if (STRNEQ(argstring, "max_physmem_bits")) { ++ *value = dtol(p, flags, &err); + } else { + *value = htol(p, flags, &err); + } +@@ -830,12 +781,6 @@ arm64_parse_cmdline_args(void) + "setting max_physmem_bits to: %ld\n\n", + machdep->max_physmem_bits); + continue; +- } else if (arm64_parse_machdep_arg_l(arglist[i], "vabits_actual", +- &machdep->machspec->VA_BITS_ACTUAL)) { +- error(NOTE, +- "setting vabits_actual to: %ld\n\n", +- machdep->machspec->VA_BITS_ACTUAL); +- continue; + } + + error(WARNING, "ignoring --machdep option: %s\n", +@@ -844,60 +789,11 @@ arm64_parse_cmdline_args(void) + } + } + +-#define MIN_KIMG_ALIGN (0x00200000) /* kimage load address must be aligned 2M */ +-/* +- * Traverse the entire dumpfile to find/verify kimage_voffset. +- */ +-static int +-arm64_search_for_kimage_voffset(ulong phys_base) +-{ +- ulong kimage_load_addr; +- ulong phys_end; +- struct machine_specific *ms = machdep->machspec; +- +- if (!arm_kdump_phys_end(&phys_end)) +- return FALSE; +- +- for (kimage_load_addr = phys_base; +- kimage_load_addr <= phys_end; kimage_load_addr += MIN_KIMG_ALIGN) { +- ms->kimage_voffset = ms->vmalloc_start_addr - kimage_load_addr; +- +- if ((kt->flags2 & KASLR) && (kt->flags & RELOC_SET)) +- ms->kimage_voffset += (kt->relocate * - 1); +- +- if (verify_kimage_voffset()) { +- if (CRASHDEBUG(1)) +- error(INFO, +- "dumpfile searched for kimage_voffset: %lx\n\n", +- ms->kimage_voffset); +- break; +- } +- } +- +- if (kimage_load_addr > phys_end) +- return FALSE; +- +- return TRUE; +-} +- +-static int +-verify_kimage_voffset(void) +-{ +- ulong kimage_voffset; +- +- if (!readmem(symbol_value("kimage_voffset"), KVADDR, &kimage_voffset, +- sizeof(kimage_voffset), "verify kimage_voffset", QUIET|RETURN_ON_ERROR)) +- return FALSE; +- +- return (machdep->machspec->kimage_voffset == kimage_voffset); +-} +- + static void + arm64_calc_kimage_voffset(void) + { + struct machine_specific *ms = machdep->machspec; +- ulong phys_addr = 0; +- int errflag; ++ ulong phys_addr; + + if (ms->kimage_voffset) /* vmcoreinfo, ioctl, or --machdep override */ + return; +@@ -905,6 +801,7 @@ arm64_calc_kimage_voffset(void) + if (ACTIVE()) { + char buf[BUFSIZE]; + char *p1; ++ int errflag; + FILE *iomem; + ulong kimage_voffset, vaddr; + +@@ -945,24 +842,9 @@ arm64_calc_kimage_voffset(void) + if (errflag) + return; + +- } else if (KDUMP_DUMPFILE()) { +- errflag = 1; +- if (arm_kdump_phys_base(&phys_addr)) { /* Get start address of first memory block */ +- ms->kimage_voffset = ms->vmalloc_start_addr - phys_addr; +- if ((kt->flags2 & KASLR) && (kt->flags & RELOC_SET)) +- ms->kimage_voffset += (kt->relocate * -1); +- if (verify_kimage_voffset() || arm64_search_for_kimage_voffset(phys_addr)) +- errflag = 0; +- } +- +- if (errflag) { +- error(WARNING, +- "kimage_voffset cannot be determined from the dumpfile.\n"); +- error(CONT, +- "Try using the command line option: --machdep kimage_voffset=\n"); +- } +- return; +- } else { ++ } else if (KDUMP_DUMPFILE()) ++ arm_kdump_phys_base(&phys_addr); /* Get start address of first memory block */ ++ else { + error(WARNING, + "kimage_voffset cannot be determined from the dumpfile.\n"); + error(CONT, +@@ -978,25 +860,6 @@ arm64_calc_kimage_voffset(void) + ms->kimage_voffset += (kt->relocate * -1); + } + +-static void +-arm64_calc_physvirt_offset(void) +-{ +- struct machine_specific *ms = machdep->machspec; +- ulong physvirt_offset; +- struct syment *sp; +- +- ms->physvirt_offset = ms->phys_offset - ms->page_offset; +- +- if ((sp = kernel_symbol_search("physvirt_offset")) && +- machdep->machspec->kimage_voffset) { +- if (READMEM(pc->mfd, &physvirt_offset, sizeof(physvirt_offset), +- sp->value, sp->value - +- machdep->machspec->kimage_voffset) > 0) { +- ms->physvirt_offset = physvirt_offset; +- } +- } +-} +- + static void + arm64_calc_phys_offset(void) + { +@@ -1020,16 +883,10 @@ arm64_calc_phys_offset(void) + physaddr_t paddr; + ulong vaddr; + struct syment *sp; +- char *string; + + if ((machdep->flags & NEW_VMEMMAP) && + ms->kimage_voffset && (sp = kernel_symbol_search("memstart_addr"))) { + if (pc->flags & PROC_KCORE) { +- if ((string = pc->read_vmcoreinfo("NUMBER(PHYS_OFFSET)"))) { +- ms->phys_offset = htol(string, QUIET, NULL); +- free(string); +- return; +- } + vaddr = symbol_value_from_proc_kallsyms("memstart_addr"); + if (vaddr == BADVAL) + vaddr = sp->value; +@@ -1092,31 +949,6 @@ arm64_calc_phys_offset(void) + fprintf(fp, "using %lx as phys_offset\n", ms->phys_offset); + } + +-/* +- * Determine SECTION_SIZE_BITS either by reading VMCOREINFO or the kernel +- * config, otherwise use the 64-bit ARM default definiton. +- */ +-static void +-arm64_get_section_size_bits(void) +-{ +- int ret; +- char *string; +- +- machdep->section_size_bits = _SECTION_SIZE_BITS; +- +- if ((string = pc->read_vmcoreinfo("NUMBER(SECTION_SIZE_BITS)"))) { +- machdep->section_size_bits = atol(string); +- free(string); +- } else if (kt->ikconfig_flags & IKCONFIG_AVAIL) { +- if ((ret = get_kernel_config("CONFIG_MEMORY_HOTPLUG", NULL)) == IKCONFIG_Y) { +- if ((ret = get_kernel_config("CONFIG_HOTPLUG_SIZE_BITS", &string)) == IKCONFIG_STR) +- machdep->section_size_bits = atol(string); +- } +- } +- +- if (CRASHDEBUG(1)) +- fprintf(fp, "SECTION_SIZE_BITS: %ld\n", machdep->section_size_bits); +-} + + /* + * Determine PHYS_OFFSET either by reading VMCOREINFO or the kernel +@@ -1172,14 +1004,9 @@ ulong + arm64_VTOP(ulong addr) + { + if (machdep->flags & NEW_VMEMMAP) { +- if (machdep->machspec->VA_START && +- (addr >= machdep->machspec->kimage_text) && +- (addr <= machdep->machspec->kimage_end)) { +- return addr - machdep->machspec->kimage_voffset; +- } +- + if (addr >= machdep->machspec->page_offset) +- return addr + machdep->machspec->physvirt_offset; ++ return machdep->machspec->phys_offset ++ + (addr - machdep->machspec->page_offset); + else if (machdep->machspec->kimage_voffset) + return addr - machdep->machspec->kimage_voffset; + else /* no randomness */ +@@ -1253,11 +1080,6 @@ arm64_uvtop(struct task_context *tc, ulong uvaddr, physaddr_t *paddr, int verbos + } + } + +-#define PTE_ADDR_LOW ((((1UL) << (48 - machdep->pageshift)) - 1) << machdep->pageshift) +-#define PTE_ADDR_HIGH ((0xfUL) << 12) +-#define PTE_TO_PHYS(pteval) (machdep->max_physmem_bits == 52 ? \ +- (((pteval & PTE_ADDR_LOW) | ((pteval & PTE_ADDR_HIGH) << 36))) : (pteval & PTE_ADDR_LOW)) +- + #define PMD_TYPE_MASK 3 + #define PMD_TYPE_SECT 1 + #define PMD_TYPE_TABLE 2 +@@ -1350,7 +1172,7 @@ arm64_vtop_3level_64k(ulong pgd, ulong vaddr, physaddr_t *paddr, int verbose) + * #define __PAGETABLE_PUD_FOLDED + */ + +- pmd_base = (ulong *)PTOV(PTE_TO_PHYS(pgd_val)); ++ pmd_base = (ulong *)PTOV(pgd_val & PHYS_MASK & (s32)machdep->pagemask); + FILL_PMD(pmd_base, KVADDR, PTRS_PER_PMD_L3_64K * sizeof(ulong)); + pmd_ptr = pmd_base + (((vaddr) >> PMD_SHIFT_L3_64K) & (PTRS_PER_PMD_L3_64K - 1)); + pmd_val = ULONG(machdep->pmd + PAGEOFFSET(pmd_ptr)); +@@ -1360,7 +1182,7 @@ arm64_vtop_3level_64k(ulong pgd, ulong vaddr, physaddr_t *paddr, int verbose) + goto no_page; + + if ((pmd_val & PMD_TYPE_MASK) == PMD_TYPE_SECT) { +- ulong sectionbase = PTE_TO_PHYS(pmd_val) & SECTION_PAGE_MASK_512MB; ++ ulong sectionbase = (pmd_val & SECTION_PAGE_MASK_512MB) & PHYS_MASK; + if (verbose) { + fprintf(fp, " PAGE: %lx (512MB)\n\n", sectionbase); + arm64_translate_pte(pmd_val, 0, 0); +@@ -1369,7 +1191,7 @@ arm64_vtop_3level_64k(ulong pgd, ulong vaddr, physaddr_t *paddr, int verbose) + return TRUE; + } + +- pte_base = (ulong *)PTOV(PTE_TO_PHYS(pmd_val)); ++ pte_base = (ulong *)PTOV(pmd_val & PHYS_MASK & (s32)machdep->pagemask); + FILL_PTBL(pte_base, KVADDR, PTRS_PER_PTE_L3_64K * sizeof(ulong)); + pte_ptr = pte_base + (((vaddr) >> machdep->pageshift) & (PTRS_PER_PTE_L3_64K - 1)); + pte_val = ULONG(machdep->ptbl + PAGEOFFSET(pte_ptr)); +@@ -1379,7 +1201,7 @@ arm64_vtop_3level_64k(ulong pgd, ulong vaddr, physaddr_t *paddr, int verbose) + goto no_page; + + if (pte_val & PTE_VALID) { +- *paddr = PTE_TO_PHYS(pte_val) + PAGEOFFSET(vaddr); ++ *paddr = (PAGEBASE(pte_val) & PHYS_MASK) + PAGEOFFSET(vaddr); + if (verbose) { + fprintf(fp, " PAGE: %lx\n\n", PAGEBASE(*paddr)); + arm64_translate_pte(pte_val, 0, 0); +@@ -1682,11 +1504,10 @@ arm64_stackframe_init(void) + machdep->machspec->kern_eframe_offset = SIZE(pt_regs); + } + +- if ((sp1 = kernel_symbol_search("__exception_text_start")) && +- (sp2 = kernel_symbol_search("__exception_text_end"))) { +- machdep->machspec->__exception_text_start = sp1->value; +- machdep->machspec->__exception_text_end = sp2->value; +- } ++ machdep->machspec->__exception_text_start = ++ symbol_value("__exception_text_start"); ++ machdep->machspec->__exception_text_end = ++ symbol_value("__exception_text_end"); + if ((sp1 = kernel_symbol_search("__irqentry_text_start")) && + (sp2 = kernel_symbol_search("__irqentry_text_end"))) { + machdep->machspec->__irqentry_text_start = sp1->value; +@@ -1812,14 +1633,13 @@ static int + arm64_is_kernel_exception_frame(struct bt_info *bt, ulong stkptr) + { + struct arm64_pt_regs *regs; +- struct machine_specific *ms = machdep->machspec; + + regs = (struct arm64_pt_regs *)&bt->stackbuf[(ulong)(STACK_OFFSET_TYPE(stkptr))]; + + if (INSTACK(regs->sp, bt) && INSTACK(regs->regs[29], bt) && + !(regs->pstate & (0xffffffff00000000ULL | PSR_MODE32_BIT)) && + is_kernel_text(regs->pc) && +- is_kernel_text(regs->regs[30] | ms->CONFIG_ARM64_KERNELPACMASK)) { ++ is_kernel_text(regs->regs[30])) { + switch (regs->pstate & PSR_MODE_MASK) + { + case PSR_MODE_EL1t: +@@ -1896,38 +1716,20 @@ arm64_eframe_search(struct bt_info *bt) + return count; + } + +-static char *arm64_exception_functions[] = { +- "do_undefinstr", +- "do_sysinstr", +- "do_debug_exception", +- "do_mem_abort", +- "do_el0_irq_bp_hardening", +- "do_sp_pc_abort", +- NULL +-}; +- + static int + arm64_in_exception_text(ulong ptr) + { + struct machine_specific *ms = machdep->machspec; +- char *name, **func; ++ ++ if ((ptr >= ms->__exception_text_start) && ++ (ptr < ms->__exception_text_end)) ++ return TRUE; + + if (ms->__irqentry_text_start && ms->__irqentry_text_end && + ((ptr >= ms->__irqentry_text_start) && + (ptr < ms->__irqentry_text_end))) + return TRUE; + +- if (ms->__exception_text_start && ms->__exception_text_end) { +- if ((ptr >= ms->__exception_text_start) && +- (ptr < ms->__exception_text_end)) +- return TRUE; +- } else if ((name = closest_symbol(ptr))) { /* Linux 5.5 and later */ +- for (func = &arm64_exception_functions[0]; *func; func++) { +- if (STREQ(name, *func)) +- return TRUE; +- } +- } +- + return FALSE; + } + +@@ -1963,7 +1765,6 @@ arm64_print_stackframe_entry(struct bt_info *bt, int level, struct arm64_stackfr + * See, for example, "bl schedule" before ret_to_user(). + */ + branch_pc = frame->pc - 4; +- + name = closest_symbol(branch_pc); + name_plus_offset = NULL; + +@@ -2175,7 +1976,7 @@ arm64_unwind_frame(struct bt_info *bt, struct arm64_stackframe *frame) + unsigned long stack_mask; + unsigned long irq_stack_ptr, orig_sp; + struct arm64_pt_regs *ptregs; +- struct machine_specific *ms = machdep->machspec; ++ struct machine_specific *ms; + + stack_mask = (unsigned long)(ARM64_STACK_SIZE) - 1; + fp = frame->fp; +@@ -2189,8 +1990,6 @@ arm64_unwind_frame(struct bt_info *bt, struct arm64_stackframe *frame) + frame->sp = fp + 0x10; + frame->fp = GET_STACK_ULONG(fp); + frame->pc = GET_STACK_ULONG(fp + 8); +- if (is_kernel_text(frame->pc | ms->CONFIG_ARM64_KERNELPACMASK)) +- frame->pc |= ms->CONFIG_ARM64_KERNELPACMASK; + + if ((frame->fp == 0) && (frame->pc == 0)) + return FALSE; +@@ -2242,6 +2041,7 @@ arm64_unwind_frame(struct bt_info *bt, struct arm64_stackframe *frame) + * irq_stack_ptr = IRQ_STACK_PTR(raw_smp_processor_id()); + * orig_sp = IRQ_STACK_TO_TASK_STACK(irq_stack_ptr); (pt_regs pointer on process stack) + */ ++ ms = machdep->machspec; + irq_stack_ptr = ms->irq_stacks[bt->tc->processor] + ms->irq_stack_size - 16; + + if (frame->sp == irq_stack_ptr) { +@@ -2843,8 +2643,6 @@ arm64_print_text_symbols(struct bt_info *bt, struct arm64_stackframe *frame, FIL + char buf2[BUFSIZE]; + char *name; + ulong start; +- ulong val; +- struct machine_specific *ms = machdep->machspec; + + if (bt->flags & BT_TEXT_SYMBOLS_ALL) + start = bt->stackbase; +@@ -2859,10 +2657,8 @@ arm64_print_text_symbols(struct bt_info *bt, struct arm64_stackframe *frame, FIL + + for (i = (start - bt->stackbase)/sizeof(ulong); i < LONGS_PER_STACK; i++) { + up = (ulong *)(&bt->stackbuf[i*sizeof(ulong)]); +- val = *up; +- if (is_kernel_text(val | ms->CONFIG_ARM64_KERNELPACMASK)) { +- val |= ms->CONFIG_ARM64_KERNELPACMASK; +- name = closest_symbol(val); ++ if (is_kernel_text(*up)) { ++ name = closest_symbol(*up); + fprintf(ofp, " %s[%s] %s at %lx", + bt->flags & BT_ERROR_MASK ? + " " : "", +@@ -2871,13 +2667,13 @@ arm64_print_text_symbols(struct bt_info *bt, struct arm64_stackframe *frame, FIL + MKSTR(bt->stackbase + + (i * sizeof(long)))), + bt->flags & BT_SYMBOL_OFFSET ? +- value_to_symstr(val, buf2, bt->radix) : +- name, val); +- if (module_symbol(val, NULL, &lm, NULL, 0)) ++ value_to_symstr(*up, buf2, bt->radix) : ++ name, *up); ++ if (module_symbol(*up, NULL, &lm, NULL, 0)) + fprintf(ofp, " [%s]", lm->mod_name); + fprintf(ofp, "\n"); + if (BT_REFERENCE_CHECK(bt)) +- arm64_do_bt_reference_check(bt, val, name); ++ arm64_do_bt_reference_check(bt, *up, name); + } + } + } +@@ -3180,7 +2976,6 @@ arm64_print_exception_frame(struct bt_info *bt, ulong pt_regs, int mode, FILE *o + struct syment *sp; + ulong LR, SP, offset; + char buf[BUFSIZE]; +- struct machine_specific *ms = machdep->machspec; + + if (CRASHDEBUG(1)) + fprintf(ofp, "pt_regs: %lx\n", pt_regs); +@@ -3196,8 +2991,6 @@ arm64_print_exception_frame(struct bt_info *bt, ulong pt_regs, int mode, FILE *o + rows = 4; + } else { + LR = regs->regs[30]; +- if (is_kernel_text (LR | ms->CONFIG_ARM64_KERNELPACMASK)) +- LR |= ms->CONFIG_ARM64_KERNELPACMASK; + SP = regs->sp; + top_reg = 29; + is_64_bit = TRUE; +@@ -3345,7 +3138,7 @@ arm64_translate_pte(ulong pte, void *physaddr, ulonglong unused) + char *arglist[MAXARGS]; + int page_present; + +- paddr = PTE_TO_PHYS(pte); ++ paddr = pte & PHYS_MASK & (s32)machdep->pagemask; + page_present = pte & (PTE_VALID | machdep->machspec->PTE_PROT_NONE); + + if (physaddr) { +@@ -3611,8 +3404,8 @@ arm64_display_machine_stats(void) + fprintf(fp, " HZ: %d\n", machdep->hz); + fprintf(fp, " PAGE SIZE: %d\n", PAGESIZE()); + fprintf(fp, "KERNEL VIRTUAL BASE: %lx\n", machdep->machspec->page_offset); +- fprintf(fp, "KERNEL MODULES BASE: %lx\n", machdep->machspec->modules_vaddr); + fprintf(fp, "KERNEL VMALLOC BASE: %lx\n", machdep->machspec->vmalloc_start_addr); ++ fprintf(fp, "KERNEL MODULES BASE: %lx\n", machdep->machspec->modules_vaddr); + fprintf(fp, "KERNEL VMEMMAP BASE: %lx\n", machdep->machspec->vmemmap_vaddr); + fprintf(fp, " KERNEL STACK SIZE: %ld\n", STACKSIZE()); + if (machdep->machspec->irq_stack_size) { +@@ -3642,7 +3435,7 @@ arm64_get_smp_cpus(void) + /* + * Retrieve task registers for the time of the crash. + */ +-static void ++static int + arm64_get_crash_notes(void) + { + struct machine_specific *ms = machdep->machspec; +@@ -3651,10 +3444,10 @@ arm64_get_crash_notes(void) + ulong offset; + char *buf, *p; + ulong *notes_ptrs; +- ulong i, found; ++ ulong i; + + if (!symbol_exists("crash_notes")) +- return; ++ return FALSE; + + crash_notes = symbol_value("crash_notes"); + +@@ -3666,9 +3459,9 @@ arm64_get_crash_notes(void) + */ + if (!readmem(crash_notes, KVADDR, ¬es_ptrs[kt->cpus-1], + sizeof(notes_ptrs[kt->cpus-1]), "crash_notes", RETURN_ON_ERROR)) { +- error(WARNING, "cannot read \"crash_notes\"\n"); ++ error(WARNING, "cannot read crash_notes\n"); + FREEBUF(notes_ptrs); +- return; ++ return FALSE; + } + + if (symbol_exists("__per_cpu_offset")) { +@@ -3684,11 +3477,12 @@ arm64_get_crash_notes(void) + if (!(ms->panic_task_regs = calloc((size_t)kt->cpus, sizeof(struct arm64_pt_regs)))) + error(FATAL, "cannot calloc panic_task_regs space\n"); + +- for (i = found = 0; i < kt->cpus; i++) { ++ for (i = 0; i < kt->cpus; i++) { ++ + if (!readmem(notes_ptrs[i], KVADDR, buf, SIZE(note_buf), + "note_buf_t", RETURN_ON_ERROR)) { +- error(WARNING, "cpu %d: cannot read NT_PRSTATUS note\n", i); +- continue; ++ error(WARNING, "failed to read note_buf_t\n"); ++ goto fail; + } + + /* +@@ -3718,24 +3512,19 @@ arm64_get_crash_notes(void) + note->n_descsz == notesz) + BCOPY((char *)note, buf, notesz); + } else { +- error(WARNING, "cpu %d: cannot find NT_PRSTATUS note\n", i); ++ error(WARNING, ++ "cannot find NT_PRSTATUS note for cpu: %d\n", i); + continue; + } + } + +- /* +- * Check the sanity of NT_PRSTATUS note only for each online cpu. +- * If this cpu has invalid note, continue to find the crash notes +- * for other online cpus. +- */ + if (note->n_type != NT_PRSTATUS) { +- error(WARNING, "cpu %d: invalid NT_PRSTATUS note (n_type != NT_PRSTATUS)\n", i); +- continue; ++ error(WARNING, "invalid note (n_type != NT_PRSTATUS)\n"); ++ goto fail; + } +- +- if (!STRNEQ(p, "CORE")) { +- error(WARNING, "cpu %d: invalid NT_PRSTATUS note (name != \"CORE\")\n", i); +- continue; ++ if (p[0] != 'C' || p[1] != 'O' || p[2] != 'R' || p[3] != 'E') { ++ error(WARNING, "invalid note (name != \"CORE\"\n"); ++ goto fail; + } + + /* +@@ -3748,17 +3537,18 @@ arm64_get_crash_notes(void) + + BCOPY(p + OFFSET(elf_prstatus_pr_reg), &ms->panic_task_regs[i], + sizeof(struct arm64_pt_regs)); +- +- found++; + } + + FREEBUF(buf); + FREEBUF(notes_ptrs); ++ return TRUE; + +- if (!found) { +- free(ms->panic_task_regs); +- ms->panic_task_regs = NULL; +- } ++fail: ++ FREEBUF(buf); ++ FREEBUF(notes_ptrs); ++ free(ms->panic_task_regs); ++ ms->panic_task_regs = NULL; ++ return FALSE; + } + + static void +@@ -3882,13 +3672,9 @@ arm64_IS_VMALLOC_ADDR(ulong vaddr) + (vaddr <= machdep->machspec->kimage_end)) + return FALSE; + +- if (ms->VA_START && (vaddr >= ms->VA_START)) +- return TRUE; +- + return ((vaddr >= ms->vmalloc_start_addr && vaddr <= ms->vmalloc_end) || + ((machdep->flags & VMEMMAP) && +- ((vaddr >= ms->vmemmap_vaddr && vaddr <= ms->vmemmap_end) || +- (vaddr >= ms->vmalloc_end && vaddr <= ms->vmemmap_vaddr))) || ++ (vaddr >= ms->vmemmap_vaddr && vaddr <= ms->vmemmap_end)) || + (vaddr >= ms->modules_vaddr && vaddr <= ms->modules_end)); + } + +@@ -3897,57 +3683,9 @@ arm64_calc_VA_BITS(void) + { + int bitval; + struct syment *sp; +- ulong vabits_actual, value; ++ ulong value; + char *string; + +- if ((string = pc->read_vmcoreinfo("NUMBER(VA_BITS)"))) { +- value = atol(string); +- free(string); +- machdep->machspec->CONFIG_ARM64_VA_BITS = value; +- } +- +- if (kernel_symbol_exists("vabits_actual")) { +- if (pc->flags & PROC_KCORE) { +- vabits_actual = symbol_value_from_proc_kallsyms("vabits_actual"); +- if ((vabits_actual != BADVAL) && (READMEM(pc->mfd, &value, sizeof(ulong), +- vabits_actual, KCORE_USE_VADDR) > 0)) { +- if (CRASHDEBUG(1)) +- fprintf(fp, +- "/proc/kcore: vabits_actual: %ld\n", value); +- machdep->machspec->VA_BITS_ACTUAL = value; +- machdep->machspec->VA_BITS = value; +- machdep->machspec->VA_START = _VA_START(machdep->machspec->VA_BITS_ACTUAL); +- } else +- error(FATAL, "/proc/kcore: cannot read vabits_actual\n"); +- } else if (ACTIVE()) +- error(FATAL, "cannot determine VA_BITS_ACTUAL: please use /proc/kcore\n"); +- else { +- if ((string = pc->read_vmcoreinfo("NUMBER(TCR_EL1_T1SZ)"))) { +- /* See ARMv8 ARM for the description of +- * TCR_EL1.T1SZ and how it can be used +- * to calculate the vabits_actual +- * supported by underlying kernel. +- * +- * Basically: +- * vabits_actual = 64 - T1SZ; +- */ +- value = 64 - strtoll(string, NULL, 0); +- if (CRASHDEBUG(1)) +- fprintf(fp, "vmcoreinfo : vabits_actual: %ld\n", value); +- free(string); +- machdep->machspec->VA_BITS_ACTUAL = value; +- machdep->machspec->VA_BITS = value; +- machdep->machspec->VA_START = _VA_START(machdep->machspec->VA_BITS_ACTUAL); +- } else if (machdep->machspec->VA_BITS_ACTUAL) { +- machdep->machspec->VA_BITS = machdep->machspec->VA_BITS_ACTUAL; +- machdep->machspec->VA_START = _VA_START(machdep->machspec->VA_BITS_ACTUAL); +- } else +- error(FATAL, "cannot determine VA_BITS_ACTUAL\n"); +- } +- +- return; +- } +- + if (!(sp = symbol_search("swapper_pg_dir")) && + !(sp = symbol_search("idmap_pg_dir")) && + !(sp = symbol_search("_text")) && +@@ -3976,12 +3714,14 @@ arm64_calc_VA_BITS(void) + /* + * Verify against dumpfiles that export VA_BITS in vmcoreinfo + */ +- if (machdep->machspec->CONFIG_ARM64_VA_BITS && +- (machdep->machspec->VA_BITS != machdep->machspec->CONFIG_ARM64_VA_BITS)) { +- error(WARNING, "VA_BITS: calculated: %ld vmcoreinfo: %ld\n", +- machdep->machspec->VA_BITS, machdep->machspec->CONFIG_ARM64_VA_BITS); +- machdep->machspec->VA_BITS = machdep->machspec->CONFIG_ARM64_VA_BITS; +- } ++ if ((string = pc->read_vmcoreinfo("NUMBER(VA_BITS)"))) { ++ value = atol(string); ++ free(string); ++ if (machdep->machspec->VA_BITS != value) ++ error(WARNING, "VA_BITS: calculated: %ld vmcoreinfo: %ld\n", ++ machdep->machspec->VA_BITS, value); ++ } ++ + + if (CRASHDEBUG(1)) + fprintf(fp, "VA_BITS: %ld\n", machdep->machspec->VA_BITS); +@@ -3999,15 +3739,6 @@ arm64_calc_VA_BITS(void) + * + * Since VMEMMAP_SIZE is dependent upon the size of a struct page, + * the two ranges cannot be determined until POST_GDB. +- * +- * Since 52-bit VA was introduced: +- * +- * #define STRUCT_PAGE_MAX_SHIFT 6 +- * #define VMEMMAP_SIZE (UL(1) << (VA_BITS - PAGE_SHIFT - 1 + STRUCT_PAGE_MAX_SHIFT)) +- * #define VMEMMAP_START (-VMEMMAP_SIZE) +- * #define VMALLOC_START (MODULES_END) +- * #define VMALLOC_END (- PUD_SIZE - VMEMMAP_SIZE - SZ_64K) +- * #define vmemmap ((struct page *)VMEMMAP_START - (memstart_addr >> PAGE_SHIFT)) + */ + + #define ALIGN(x, a) __ALIGN_KERNEL((x), (a)) +@@ -4019,23 +3750,10 @@ static void + arm64_calc_virtual_memory_ranges(void) + { + struct machine_specific *ms = machdep->machspec; +- ulong value, vmemmap_start, vmemmap_end, vmemmap_size, vmalloc_end; +- char *string; +- int ret; ++ ulong vmemmap_start, vmemmap_end, vmemmap_size; ++ ulong vmalloc_end; + ulong PUD_SIZE = UNINITIALIZED; + +- if (!machdep->machspec->CONFIG_ARM64_VA_BITS) { +- if ((string = pc->read_vmcoreinfo("NUMBER(VA_BITS)"))) { +- value = atol(string); +- free(string); +- machdep->machspec->CONFIG_ARM64_VA_BITS = value; +- } else if (kt->ikconfig_flags & IKCONFIG_AVAIL) { +- if ((ret = get_kernel_config("CONFIG_ARM64_VA_BITS", +- &string)) == IKCONFIG_STR) +- machdep->machspec->CONFIG_ARM64_VA_BITS = atol(string); +- } +- } +- + if (THIS_KERNEL_VERSION < LINUX(3,17,0)) /* use original hardwired values */ + return; + +@@ -4054,24 +3772,8 @@ arm64_calc_virtual_memory_ranges(void) + break; + } + +-#define STRUCT_PAGE_MAX_SHIFT 6 +- +- if (ms->VA_BITS_ACTUAL) { +- ulong va_bits_min = 48; +- +- if (machdep->machspec->CONFIG_ARM64_VA_BITS < 48) +- va_bits_min = ms->CONFIG_ARM64_VA_BITS; +- +- vmemmap_size = (1UL) << (va_bits_min - machdep->pageshift - 1 + STRUCT_PAGE_MAX_SHIFT); +- vmalloc_end = (- PUD_SIZE - vmemmap_size - KILOBYTES(64)); +- vmemmap_start = (-vmemmap_size - MEGABYTES(2)); +- ms->vmalloc_end = vmalloc_end - 1; +- ms->vmemmap_vaddr = vmemmap_start; +- ms->vmemmap_end = -1; +- return; +- } +- + if (machdep->flags & NEW_VMEMMAP) ++#define STRUCT_PAGE_MAX_SHIFT 6 + vmemmap_size = 1UL << (ms->VA_BITS - machdep->pageshift - 1 + + STRUCT_PAGE_MAX_SHIFT); + else +@@ -4095,7 +3797,7 @@ arm64_calc_virtual_memory_ranges(void) + static int + arm64_is_uvaddr(ulong addr, struct task_context *tc) + { +- return (addr < machdep->machspec->userspace_top); ++ return (addr < ARM64_USERSPACE_TOP); + } + + +@@ -4120,20 +3822,6 @@ arm64_swp_offset(ulong pte) + return pte; + } + +-static void arm64_calc_KERNELPACMASK(void) +-{ +- ulong value; +- char *string; +- +- if ((string = pc->read_vmcoreinfo("NUMBER(KERNELPACMASK)"))) { +- value = htol(string, QUIET, NULL); +- free(string); +- machdep->machspec->CONFIG_ARM64_KERNELPACMASK = value; +- if (CRASHDEBUG(1)) +- fprintf(fp, "CONFIG_ARM64_KERNELPACMASK: %lx\n", value); +- } +-} +- + #endif /* ARM64 */ + + +diff --git a/defs.h b/defs.h +index 95949507cae4..48f5e05e32f3 100644 +--- a/defs.h ++++ b/defs.h +@@ -3052,7 +3052,7 @@ typedef u64 pte_t; + #define MACHINE_TYPE "ARM64" + + #define PTOV(X) \ +- ((unsigned long)(X) - (machdep->machspec->physvirt_offset)) ++ ((unsigned long)(X)-(machdep->machspec->phys_offset)+(machdep->machspec->page_offset)) + + #define VTOP(X) arm64_VTOP((ulong)(X)) + +@@ -3186,17 +3186,9 @@ typedef signed int s32; + */ + #define ARM64_VA_START ((0xffffffffffffffffUL) \ + << machdep->machspec->VA_BITS) +-#define _VA_START(va) ((0xffffffffffffffffUL) - \ +- ((1UL) << ((va) - 1)) + 1) +-#define TEXT_OFFSET_MASK (~((MEGABYTES(2UL))-1)) +- + #define ARM64_PAGE_OFFSET ((0xffffffffffffffffUL) \ + << (machdep->machspec->VA_BITS - 1)) +-#define ARM64_PAGE_OFFSET_ACTUAL ((0xffffffffffffffffUL) \ +- - ((1UL) << machdep->machspec->VA_BITS_ACTUAL) + 1) +- + #define ARM64_USERSPACE_TOP ((1UL) << machdep->machspec->VA_BITS) +-#define ARM64_USERSPACE_TOP_ACTUAL ((1UL) << machdep->machspec->VA_BITS_ACTUAL) + + /* only used for v4.6 or later */ + #define ARM64_MODULES_VSIZE MEGABYTES(128) +@@ -3299,11 +3291,7 @@ struct machine_specific { + ulong kern_eframe_offset; + ulong machine_kexec_start; + ulong machine_kexec_end; +- ulong VA_BITS_ACTUAL; +- ulong CONFIG_ARM64_VA_BITS; +- ulong VA_START; +- ulong CONFIG_ARM64_KERNELPACMASK; +- ulong physvirt_offset; ++ ulong vabits_user; + }; + + struct arm64_stackframe { +@@ -3552,8 +3540,6 @@ struct arm64_stackframe { + machdep->machspec->last_p4d_read = (ulong)(P4D); \ + } + +-#define MAX_POSSIBLE_PHYSMEM_BITS 52 +- + /* + * PHYSICAL_PAGE_MASK changed (enlarged) between 2.4 and 2.6, so + * for safety, use the 2.6 values to generate it. diff --git a/SPECS/crash.spec b/SPECS/crash.spec index c7faa81..fe2b870 100644 --- a/SPECS/crash.spec +++ b/SPECS/crash.spec @@ -3,8 +3,8 @@ # Summary: Kernel analysis utility for live systems, netdump, diskdump, kdump, LKCD or mcore dumpfiles Name: crash -Version: 7.2.8 -Release: 3%{?dist} +Version: 7.2.9 +Release: 2%{?dist} License: GPLv3 Group: Development/Debuggers Source0: https://github.com/crash-utility/crash/archive/crash-%{version}.tar.gz @@ -18,7 +18,7 @@ Requires: binutils Provides: bundled(gdb) = 7.6 Patch0: lzo_snappy.patch Patch1: rhel8_build.patch -Patch2: github_b80b16549e24.patch +Patch2: rhel8_arm64_52_bit_fix.patch %description The core analysis suite is a self-contained tool that can be used to @@ -41,7 +41,7 @@ offered by Mission Critical Linux, or the LKCD kernel patch. %setup -n %{name}-%{version} -q %patch0 -p1 -b lzo_snappy.patch %patch1 -p1 -b rhel8_build.patch -%patch2 -p1 -b github_b80b16549e24.patch +%patch2 -p1 -b rhel8_arm64_52_bit_fix.patch %build cp %{SOURCE1} . @@ -72,6 +72,37 @@ rm -rf %{buildroot} %{_includedir}/* %changelog +* Tue Dec 1 2020 Bhupesh Sharma - 7.2.9-2 +- Fix the sources file to add gdb-7.6 tarball + [The line was somehow removed when using rhpkg new-sources to + update the crash tarball location] + Resolves: rhbz#1881854 + +* Tue Dec 1 2020 Bhupesh Sharma - 7.2.9-1 +- Rebase to upstream crash version 7.2.9 +- Also minimize the rhel-only patches to the bare minimum. + Resolves: rhbz#1881854 + +* Wed Nov 5 2020 Bhupesh Sharma - 7.2.8-8 +- crash/arm64: Fix arm64 read error with 'idmap_ptrs_per_pgd' symbol with debug kernel + Resolves: rhbz#1876039 + +* Mon Aug 17 2020 Bhupesh Sharma - 7.2.8-7 +- crash/sadump, kaslr: fix failure of calculating kaslr_offset due to an sadump format restriction + Resolves: rhbz#1855527 + +* Fri Aug 7 2020 Bhupesh Sharma - 7.2.8-6 +- aarch64: Revert to reading CONFIG_ARM64_USER_VA_BITS_52 and CONFIG_ARM64_PA_BITS=52 for 52-bit VA/PA space. + Resolves: rhbz#1861086 + +* Mon Jul 27 2020 Bhupesh Sharma - 7.2.8-5 +- aarch64: Support reading extended 52-bit address space via crash-utility + Resolves: rhbz#1861086 + +* Fri Jul 10 2020 Bhupesh Sharma - 7.2.8-4 +- Replace people.redhat.com references with github equivalents. + Resolves: rhbz#1851745 + * Mon Jun 22 2020 Bhupesh Sharma - 7.2.8-3 - Fix for reading compressed kdump dumpfiles from systems with physical memory Resolves: rhbz#1819606