diff --git a/SOURCES/gcc-RHEL-105072-1.patch b/SOURCES/gcc-RHEL-105072-1.patch new file mode 100644 index 0000000..c37c018 --- /dev/null +++ b/SOURCES/gcc-RHEL-105072-1.patch @@ -0,0 +1,63 @@ +commit 90986c5f0aa61cd22a9132486304ba5d12aae6c4 +Author: Florian Weimer +Date: Mon Nov 22 13:30:23 2021 +0100 + + libgcc: Remove tbase member from struct unw_eh_callback_data + + It is always a null pointer. + + libgcc/ChangeLog + + * unwind-dw2-fde-dip.c (struct unw_eh_callback_data): Remove + tbase member. + (base_from_cb_data): Adjust. + (_Unwind_IteratePhdrCallback): Likewise. + (_Unwind_Find_FDE): Likewise. + +diff --git a/libgcc/unwind-dw2-fde-dip.c b/libgcc/unwind-dw2-fde-dip.c +index 5095b6830bf79e2e..4a4d990f455e5c11 100644 +--- a/libgcc/unwind-dw2-fde-dip.c ++++ b/libgcc/unwind-dw2-fde-dip.c +@@ -104,7 +104,6 @@ static const fde * _Unwind_Find_registered_FDE (void *pc, struct dwarf_eh_bases + struct unw_eh_callback_data + { + _Unwind_Ptr pc; +- void *tbase; + void *dbase; + void *func; + const fde *ret; +@@ -154,7 +153,7 @@ base_from_cb_data (unsigned char encoding, struct unw_eh_callback_data *data) + return 0; + + case DW_EH_PE_textrel: +- return (_Unwind_Ptr) data->tbase; ++ return 0; + case DW_EH_PE_datarel: + return (_Unwind_Ptr) data->dbase; + default: +@@ -431,7 +430,7 @@ _Unwind_IteratePhdrCallback (struct dl_phdr_info *info, size_t size, void *ptr) + As soon as GLIBC will provide API so to notify that a library has been + removed, we could cache this (and thus use search_object). */ + ob.pc_begin = NULL; +- ob.tbase = data->tbase; ++ ob.tbase = NULL; + ob.dbase = data->dbase; + ob.u.single = (fde *) eh_frame; + ob.s.i = 0; +@@ -461,7 +460,6 @@ _Unwind_Find_FDE (void *pc, struct dwarf_eh_bases *bases) + return ret; + + data.pc = (_Unwind_Ptr) pc; +- data.tbase = NULL; + data.dbase = NULL; + data.func = NULL; + data.ret = NULL; +@@ -472,7 +470,7 @@ _Unwind_Find_FDE (void *pc, struct dwarf_eh_bases *bases) + + if (data.ret) + { +- bases->tbase = data.tbase; ++ bases->tbase = NULL; + bases->dbase = data.dbase; + bases->func = data.func; + } diff --git a/SOURCES/gcc-RHEL-105072-10.patch b/SOURCES/gcc-RHEL-105072-10.patch new file mode 100644 index 0000000..938486b --- /dev/null +++ b/SOURCES/gcc-RHEL-105072-10.patch @@ -0,0 +1,51 @@ +commit 94ccaf62c378c3737f7e4b6a80e1160157119171 +Author: Thomas Neumann +Date: Mon Sep 19 18:10:02 2022 +0200 + + Avoid depending on destructor order + + In some scenarios (e.g., when mixing gcc and clang code), it can + happen that frames are deregistered after the lookup structure + has already been destroyed. That in itself would be fine, but + it triggers an assert in __deregister_frame_info_bases that + expects to find the frame. + + To avoid that, we now remember that the btree as already been + destroyed and disable the assert in that case. + + libgcc/ChangeLog: + + * unwind-dw2-fde.c: (release_register_frames) Remember + when the btree has been destroyed. + (__deregister_frame_info_bases) Disable the assert when + shutting down. + +diff --git a/libgcc/unwind-dw2-fde.c b/libgcc/unwind-dw2-fde.c +index f38efd3c09efc3e9..b0d07ccd53b30f4c 100644 +--- a/libgcc/unwind-dw2-fde.c ++++ b/libgcc/unwind-dw2-fde.c +@@ -48,6 +48,7 @@ typedef __UINTPTR_TYPE__ uintptr_type; + #include "unwind-dw2-btree.h" + + static struct btree registered_frames; ++static bool in_shutdown; + + static void + release_registered_frames (void) __attribute__ ((destructor (110))); +@@ -57,6 +58,7 @@ release_registered_frames (void) + /* Release the b-tree and all frames. Frame releases that happen later are + * silently ignored */ + btree_destroy (®istered_frames); ++ in_shutdown = true; + } + + static void +@@ -282,7 +284,7 @@ __deregister_frame_info_bases (const void *begin) + __gthread_mutex_unlock (&object_mutex); + #endif + +- gcc_assert (ob); ++ gcc_assert (in_shutdown || ob); + return (void *) ob; + } + diff --git a/SOURCES/gcc-RHEL-105072-11.patch b/SOURCES/gcc-RHEL-105072-11.patch new file mode 100644 index 0000000..bb807c9 --- /dev/null +++ b/SOURCES/gcc-RHEL-105072-11.patch @@ -0,0 +1,42 @@ +commit 386ebf75f4c0342b1f823f4e4aba07abda3288d1 +Author: Thomas Neumann +Date: Fri Sep 23 15:57:13 2022 +0200 + + fix assert in __deregister_frame_info_bases + + When using the atomic fast path deregistering can fail during + program shutdown if the lookup structures are already destroyed. + The assert in __deregister_frame_info_bases takes that into + account. In the non-fast-path case however is not aware of + program shutdown, which caused a compiler error on such platforms. + We fix that by introducing a constant for in_shutdown in + non-fast-path builds. + We also drop the destructor priority, as it is not supported on + all platforms and we no longer rely upon the priority anyway. + + libgcc/ChangeLog: + * unwind-dw2-fde.c: Introduce a constant for in_shutdown + for the non-fast-path case. Drop destructor priority. + +diff --git a/libgcc/unwind-dw2-fde.c b/libgcc/unwind-dw2-fde.c +index b0d07ccd53b30f4c..27fea89dc314ccd0 100644 +--- a/libgcc/unwind-dw2-fde.c ++++ b/libgcc/unwind-dw2-fde.c +@@ -51,7 +51,7 @@ static struct btree registered_frames; + static bool in_shutdown; + + static void +-release_registered_frames (void) __attribute__ ((destructor (110))); ++release_registered_frames (void) __attribute__ ((destructor)); + static void + release_registered_frames (void) + { +@@ -67,6 +67,8 @@ static void + init_object (struct object *ob); + + #else ++/* Without fast path frame deregistration must always succeed. */ ++static const int in_shutdown = 0; + + /* The unseen_objects list contains objects that have been registered + but not yet categorized in any way. The seen_objects list has had diff --git a/SOURCES/gcc-RHEL-105072-12.patch b/SOURCES/gcc-RHEL-105072-12.patch new file mode 100644 index 0000000..886d24b --- /dev/null +++ b/SOURCES/gcc-RHEL-105072-12.patch @@ -0,0 +1,1811 @@ +commit 146e4591403239d662f36cab2d8b78a47cd01bd2 +Author: Florian Weimer +Date: Wed Jul 30 12:03:43 2025 +0200 + + libgcc: Decrease size of _Unwind_FrameState and even more size of cleared area in uw_frame_state_for + + The following patch implements something that has Florian found as + low hanging fruit in our unwinder and has been discussed in the + https://gcc.gnu.org/wiki/cauldron2022#cauldron2022talks.inprocess_unwinding_bof + talk. + _Unwind_FrameState type seems to be (unlike the pre-GCC 3 frame_state + which has been part of ABI) private to unwind-dw2.c + unwind.inc it + includes, it is always defined on the stack of some entrypoints, initialized + by static uw_frame_state_for and the address of it is also passed to other + static functions or the static inlines handling machine dependent unwinding, + but it isn't fortunately passed to any callbacks or public functions, so I + think we can safely change it any time we want. + Florian mentioned that the structure is large even on x86_64, 384 bytes + there, starts with 328 bytes long element with frame_state_reg_info type + which then starts with an array with __LIBGCC_DWARF_FRAME_REGISTERS__ + 1 + elements, each of them is 16 bytes long, on x86_64 + __LIBGCC_DWARF_FRAME_REGISTERS__ is just 17 but even that is big, on say + riscv __LIBGCC_DWARF_FRAME_REGISTERS__ is I think 128, on powerpc 111, + on sh 153 etc. And, we memset to zero the whole fs variable with the + _Unwind_FrameState type at the start of the unwinding. + The reason why each element is 16 byte (on 64-bit arches) is that it + contains some pointer or pointer sized integer and then an enum (with just + 7 different enumerators) + padding. + + The following patch decreases it by moving the enum into a separate + array and using just one byte for each register in that second array. + We could compress it even more, say 4 bits per register, but I don't + want to uglify the code for it too much and make the accesses slower. + Furthermore, the clearing of the object can clear only thos how array + and members after it, because REG_UNSAVED enumerator (0) doesn't actually + need any pointer or pointer sized integer, it is just the other kinds + that need to have there something. + By doing this, on x86_64 the above numbers change to _Unwind_FrameState + type being now 264 bytes long, frame_state_reg_info 208 bytes and we + don't clear the first 144 bytes of the object, so the memset is 120 bytes, + so ~ 31% of the old clearing size. On riscv 64-bit assuming it has same + structure layout rules for the few types used there that would be + ~ 2160 bytes of _Unwind_FrameState type before and ~ 1264 bytes after, + with the memset previously ~ 2160 bytes and after ~ 232 bytes after. + + We've also talked about possibly adding a number of initially initialized + regs and initializing the rest lazily, but at least for x86_64 with + 18 elements in the array that doesn't seem to be worth it anymore, + especially because return address column is 16 there and that is usually the + first thing to be touched. It might theory help with lots of registers if + they are usually untouched, but would uglify and complicate any stores to + how by having to check there for the not initialized yet cases and lazy + initialization, and similarly for all reads of how to do there if below + last initialized one, use how, otherwise imply REG_UNSAVED. + + The disadvantage of the patch is that touching reg[x].loc and how[x] + now means 2 cachelines rather than one as before, and I admit beyond + bootstrap/regtest I haven't benchmarked it in any way. + + 2022-10-06 Jakub Jelinek + + * unwind-dw2.h (REG_UNSAVED, REG_SAVED_OFFSET, REG_SAVED_REG, + REG_SAVED_EXP, REG_SAVED_VAL_OFFSET, REG_SAVED_VAL_EXP, + REG_UNDEFINED): New anonymous enum, moved from inside of + struct frame_state_reg_info. + (struct frame_state_reg_info): Remove reg[].how element and the + anonymous enum there. Add how element. + * unwind-dw2.c: Include stddef.h. + (uw_frame_state_for): Don't clear first + offsetof (_Unwind_FrameState, regs.how[0]) bytes of *fs. + (execute_cfa_program, __frame_state_for, uw_update_context_1, + uw_update_context): Use fs->regs.how[X] instead of fs->regs.reg[X].how + or fs.regs.how[X] instead of fs.regs.reg[X].how. + * config/sh/linux-unwind.h (sh_fallback_frame_state): Likewise. + * config/bfin/linux-unwind.h (bfin_fallback_frame_state): Likewise. + * config/pa/linux-unwind.h (pa32_fallback_frame_state): Likewise. + * config/pa/hpux-unwind.h (UPDATE_FS_FOR_SAR, UPDATE_FS_FOR_GR, + UPDATE_FS_FOR_FR, UPDATE_FS_FOR_PC, pa_fallback_frame_state): + Likewise. + * config/alpha/vms-unwind.h (alpha_vms_fallback_frame_state): + Likewise. + * config/alpha/linux-unwind.h (alpha_fallback_frame_state): Likewise. + * config/arc/linux-unwind.h (arc_fallback_frame_state, + arc_frob_update_context): Likewise. + * config/riscv/linux-unwind.h (riscv_fallback_frame_state): Likewise. + * config/nios2/linux-unwind.h (NIOS2_REG): Likewise. + * config/nds32/linux-unwind.h (NDS32_PUT_FS_REG): Likewise. + * config/s390/tpf-unwind.h (s390_fallback_frame_state): Likewise. + * config/s390/linux-unwind.h (s390_fallback_frame_state): Likewise. + * config/sparc/sol2-unwind.h (sparc64_frob_update_context, + MD_FALLBACK_FRAME_STATE_FOR): Likewise. + * config/sparc/linux-unwind.h (sparc64_fallback_frame_state, + sparc64_frob_update_context, sparc_fallback_frame_state): Likewise. + * config/i386/sol2-unwind.h (x86_64_fallback_frame_state, + x86_fallback_frame_state): Likewise. + * config/i386/w32-unwind.h (i386_w32_fallback_frame_state): Likewise. + * config/i386/linux-unwind.h (x86_64_fallback_frame_state, + x86_fallback_frame_state): Likewise. + * config/i386/freebsd-unwind.h (x86_64_freebsd_fallback_frame_state): + Likewise. + * config/i386/dragonfly-unwind.h + (x86_64_dragonfly_fallback_frame_state): Likewise. + * config/i386/gnu-unwind.h (x86_gnu_fallback_frame_state): Likewise. + * config/csky/linux-unwind.h (csky_fallback_frame_state): Likewise. + * config/aarch64/linux-unwind.h (aarch64_fallback_frame_state): + Likewise. + * config/aarch64/freebsd-unwind.h + (aarch64_freebsd_fallback_frame_state): Likewise. + * config/aarch64/aarch64-unwind.h (aarch64_frob_update_context): + Likewise. + * config/or1k/linux-unwind.h (or1k_fallback_frame_state): Likewise. + * config/mips/linux-unwind.h (mips_fallback_frame_state): Likewise. + * config/loongarch/linux-unwind.h (loongarch_fallback_frame_state): + Likewise. + * config/m68k/linux-unwind.h (m68k_fallback_frame_state): Likewise. + * config/xtensa/linux-unwind.h (xtensa_fallback_frame_state): + Likewise. + * config/rs6000/darwin-fallback.c (set_offset): Likewise. + * config/rs6000/aix-unwind.h (MD_FROB_UPDATE_CONTEXT): Likewise. + * config/rs6000/linux-unwind.h (ppc_fallback_frame_state): Likewise. + * config/rs6000/freebsd-unwind.h (frob_update_context): Likewise. + +Conflicts: + libgcc/config/arc/linux-unwind.h + (missing commit 68a650ba57a446fef31722cc2d5ac0752dc1b531 downstream) + libgcc/config/loongarch/linux-unwind.h + (loongarch missing downstream) + libgcc/config/tilepro/linux-unwind.h + (tile backend still exists downstream) + +diff --git a/libgcc/config/aarch64/aarch64-unwind.h b/libgcc/config/aarch64/aarch64-unwind.h +index 3158af4c8c371fac..466e4235991485ea 100644 +--- a/libgcc/config/aarch64/aarch64-unwind.h ++++ b/libgcc/config/aarch64/aarch64-unwind.h +@@ -80,7 +80,7 @@ aarch64_frob_update_context (struct _Unwind_Context *context, + { + const int reg = DWARF_REGNUM_AARCH64_RA_STATE; + int ra_signed; +- if (fs->regs.reg[reg].how == REG_UNSAVED) ++ if (fs->regs.how[reg] == REG_UNSAVED) + ra_signed = fs->regs.reg[reg].loc.offset & 0x1; + else + ra_signed = _Unwind_GetGR (context, reg) & 0x1; +diff --git a/libgcc/config/aarch64/freebsd-unwind.h b/libgcc/config/aarch64/freebsd-unwind.h +index 99f1cb729a3c3f02..6e4b390a654eb96e 100644 +--- a/libgcc/config/aarch64/freebsd-unwind.h ++++ b/libgcc/config/aarch64/freebsd-unwind.h +@@ -90,7 +90,7 @@ aarch64_freebsd_fallback_frame_state + fs->regs.cfa_offset = new_cfa - (_Unwind_Ptr) context->cfa; + + for (n = 0; n < 32; n++) +- fs->regs.reg[n].how = REG_SAVED_OFFSET; ++ fs->regs.how[n] = REG_SAVED_OFFSET; + + for (n = 0; n < 30; n++) + fs->regs.reg[n].loc.offset = (_Unwind_Ptr) &(sc->XREG(n)) - new_cfa; +@@ -98,7 +98,7 @@ aarch64_freebsd_fallback_frame_state + fs->regs.reg[30].loc.offset = (_Unwind_Ptr) &(sc->REG_NAME(lr)) - new_cfa; + fs->regs.reg[31].loc.offset = (_Unwind_Ptr) &(sc->REG_NAME(sp)) - new_cfa; + +- fs->regs.reg[DARC].how = REG_SAVED_OFFSET; ++ fs->regs.how[DARC] = REG_SAVED_OFFSET; + fs->regs.reg[DARC].loc.offset = (_Unwind_Ptr) &(sc->REG_NAME(elr)) - new_cfa; + + fs->retaddr_column = DARC; +diff --git a/libgcc/config/aarch64/linux-unwind.h b/libgcc/config/aarch64/linux-unwind.h +index 39894b7abb4d6415..f2cd2dbac8b7621c 100644 +--- a/libgcc/config/aarch64/linux-unwind.h ++++ b/libgcc/config/aarch64/linux-unwind.h +@@ -89,7 +89,7 @@ aarch64_fallback_frame_state (struct _Unwind_Context *context, + + for (i = 0; i < AARCH64_DWARF_NUMBER_R; i++) + { +- fs->regs.reg[AARCH64_DWARF_R0 + i].how = REG_SAVED_OFFSET; ++ fs->regs.how[AARCH64_DWARF_R0 + i] = REG_SAVED_OFFSET; + fs->regs.reg[AARCH64_DWARF_R0 + i].loc.offset = + (_Unwind_Ptr) & (sc->regs[i]) - new_cfa; + } +@@ -115,7 +115,7 @@ aarch64_fallback_frame_state (struct _Unwind_Context *context, + { + _Unwind_Sword offset; + +- fs->regs.reg[AARCH64_DWARF_V0 + i].how = REG_SAVED_OFFSET; ++ fs->regs.how[AARCH64_DWARF_V0 + i] = REG_SAVED_OFFSET; + + /* sigcontext contains 32 128bit registers for V0 to + V31. The kernel will have saved the contents of the +@@ -142,12 +142,12 @@ aarch64_fallback_frame_state (struct _Unwind_Context *context, + } + } + +- fs->regs.reg[31].how = REG_SAVED_OFFSET; ++ fs->regs.how[31] = REG_SAVED_OFFSET; + fs->regs.reg[31].loc.offset = (_Unwind_Ptr) & (sc->sp) - new_cfa; + + fs->signal_frame = 1; + +- fs->regs.reg[__LIBGCC_DWARF_ALT_FRAME_RETURN_COLUMN__].how = ++ fs->regs.how[__LIBGCC_DWARF_ALT_FRAME_RETURN_COLUMN__] = + REG_SAVED_VAL_OFFSET; + fs->regs.reg[__LIBGCC_DWARF_ALT_FRAME_RETURN_COLUMN__].loc.offset = + (_Unwind_Ptr) (sc->pc) - new_cfa; +diff --git a/libgcc/config/alpha/linux-unwind.h b/libgcc/config/alpha/linux-unwind.h +index 93bedd68fde71c6a..90666e1c196489d7 100644 +--- a/libgcc/config/alpha/linux-unwind.h ++++ b/libgcc/config/alpha/linux-unwind.h +@@ -67,17 +67,17 @@ alpha_fallback_frame_state (struct _Unwind_Context *context, + fs->regs.cfa_offset = new_cfa - (long) context->cfa; + for (i = 0; i < 30; ++i) + { +- fs->regs.reg[i].how = REG_SAVED_OFFSET; ++ fs->regs.how[i] = REG_SAVED_OFFSET; + fs->regs.reg[i].loc.offset + = (long) &sc->sc_regs[i] - new_cfa; + } + for (i = 0; i < 31; ++i) + { +- fs->regs.reg[i+32].how = REG_SAVED_OFFSET; ++ fs->regs.how[i+32] = REG_SAVED_OFFSET; + fs->regs.reg[i+32].loc.offset + = (long) &sc->sc_fpregs[i] - new_cfa; + } +- fs->regs.reg[64].how = REG_SAVED_OFFSET; ++ fs->regs.how[64] = REG_SAVED_OFFSET; + fs->regs.reg[64].loc.offset = (long)&sc->sc_pc - new_cfa; + fs->retaddr_column = 64; + fs->signal_frame = 1; +diff --git a/libgcc/config/alpha/vms-unwind.h b/libgcc/config/alpha/vms-unwind.h +index 0f461a226bc93614..7c30792adf4925b9 100644 +--- a/libgcc/config/alpha/vms-unwind.h ++++ b/libgcc/config/alpha/vms-unwind.h +@@ -50,7 +50,7 @@ extern int SYS$GL_CALL_HANDL; + + #define UPDATE_FS_FOR_CFA_GR(FS, GRN, LOC, CFA) \ + do { \ +-(FS)->regs.reg[GRN].how = REG_SAVED_OFFSET; \ ++(FS)->regs.how[GRN] = REG_SAVED_OFFSET; \ + (FS)->regs.reg[GRN].loc.offset = (_Unwind_Sword) ((REG) (LOC) - (REG) (CFA)); \ + } while (0); + +@@ -212,10 +212,10 @@ alpha_vms_fallback_frame_state (struct _Unwind_Context *context, + if (eh_debug) + printf ("FALLBACK: REGISTER frame procedure\n"); + +- fs->regs.reg[RA_COLUMN].how = REG_SAVED_REG; ++ fs->regs.how[RA_COLUMN] = REG_SAVED_REG; + fs->regs.reg[RA_COLUMN].loc.reg = pv->pdsc$b_save_ra; + +- fs->regs.reg[29].how = REG_SAVED_REG; ++ fs->regs.how[29] = REG_SAVED_REG; + fs->regs.reg[29].loc.reg = pv->pdsc$b_save_fp; + + break; +diff --git a/libgcc/config/arc/linux-unwind.h b/libgcc/config/arc/linux-unwind.h +index af2084f354bc0998..8d3ca46da68d1e04 100644 +--- a/libgcc/config/arc/linux-unwind.h ++++ b/libgcc/config/arc/linux-unwind.h +@@ -115,12 +115,12 @@ arc_fallback_frame_state (struct _Unwind_Context *context, + { + if (register_id_for_index[i] == -1) + continue; +- fs->regs.reg[register_id_for_index[i]].how = REG_SAVED_OFFSET; ++ fs->regs.how[register_id_for_index[i]] = REG_SAVED_OFFSET; + fs->regs.reg[register_id_for_index[i]].loc.offset + = ((_Unwind_Ptr) &(regs[i])) - new_cfa; + } + +- fs->regs.reg[31].how = REG_SAVED_VAL_OFFSET; ++ fs->regs.how[31] = REG_SAVED_VAL_OFFSET; + fs->regs.reg[31].loc.offset = ((_Unwind_Ptr) (regs[ret])) - new_cfa; + + fs->retaddr_column = 31; +@@ -139,7 +139,7 @@ arc_frob_update_context (struct _Unwind_Context *context, + _Unwind_Word fp_val; + asm ("mov %0,fp" : "=r" (fp_val)); + +- switch (fs->regs.reg[27].how) ++ switch (fs->regs.how[27]) + { + case REG_UNSAVED: + case REG_UNDEFINED: +diff --git a/libgcc/config/bfin/linux-unwind.h b/libgcc/config/bfin/linux-unwind.h +index b07693a9fd5a80cc..42f80d75a2dfc0d1 100644 +--- a/libgcc/config/bfin/linux-unwind.h ++++ b/libgcc/config/bfin/linux-unwind.h +@@ -68,93 +68,93 @@ bfin_fallback_frame_state (struct _Unwind_Context *context, + fs->regs.cfa_reg = 14; + fs->regs.cfa_offset = new_cfa - (long) context->cfa; + +- fs->regs.reg[0].how = REG_SAVED_OFFSET; ++ fs->regs.how[0] = REG_SAVED_OFFSET; + fs->regs.reg[0].loc.offset = (long)&sc->sc_r0 - new_cfa; +- fs->regs.reg[1].how = REG_SAVED_OFFSET; ++ fs->regs.how[1] = REG_SAVED_OFFSET; + fs->regs.reg[1].loc.offset = (long)&sc->sc_r1 - new_cfa; +- fs->regs.reg[2].how = REG_SAVED_OFFSET; ++ fs->regs.how[2] = REG_SAVED_OFFSET; + fs->regs.reg[2].loc.offset = (long)&sc->sc_r2 - new_cfa; +- fs->regs.reg[3].how = REG_SAVED_OFFSET; ++ fs->regs.how[3] = REG_SAVED_OFFSET; + fs->regs.reg[3].loc.offset = (long)&sc->sc_r3 - new_cfa; +- fs->regs.reg[4].how = REG_SAVED_OFFSET; ++ fs->regs.how[4] = REG_SAVED_OFFSET; + fs->regs.reg[4].loc.offset = (long)&sc->sc_r4 - new_cfa; +- fs->regs.reg[5].how = REG_SAVED_OFFSET; ++ fs->regs.how[5] = REG_SAVED_OFFSET; + fs->regs.reg[5].loc.offset = (long)&sc->sc_r5 - new_cfa; +- fs->regs.reg[6].how = REG_SAVED_OFFSET; ++ fs->regs.how[6] = REG_SAVED_OFFSET; + fs->regs.reg[6].loc.offset = (long)&sc->sc_r6 - new_cfa; +- fs->regs.reg[7].how = REG_SAVED_OFFSET; ++ fs->regs.how[7] = REG_SAVED_OFFSET; + fs->regs.reg[7].loc.offset = (long)&sc->sc_r7 - new_cfa; +- fs->regs.reg[8].how = REG_SAVED_OFFSET; ++ fs->regs.how[8] = REG_SAVED_OFFSET; + fs->regs.reg[8].loc.offset = (long)&sc->sc_p0 - new_cfa; +- fs->regs.reg[9].how = REG_SAVED_OFFSET; ++ fs->regs.how[9] = REG_SAVED_OFFSET; + fs->regs.reg[9].loc.offset = (long)&sc->sc_p1 - new_cfa; +- fs->regs.reg[10].how = REG_SAVED_OFFSET; ++ fs->regs.how[10] = REG_SAVED_OFFSET; + fs->regs.reg[10].loc.offset = (long)&sc->sc_p2 - new_cfa; +- fs->regs.reg[11].how = REG_SAVED_OFFSET; ++ fs->regs.how[11] = REG_SAVED_OFFSET; + fs->regs.reg[11].loc.offset = (long)&sc->sc_p3 - new_cfa; +- fs->regs.reg[12].how = REG_SAVED_OFFSET; ++ fs->regs.how[12] = REG_SAVED_OFFSET; + fs->regs.reg[12].loc.offset = (long)&sc->sc_p4 - new_cfa; +- fs->regs.reg[13].how = REG_SAVED_OFFSET; ++ fs->regs.how[13] = REG_SAVED_OFFSET; + fs->regs.reg[13].loc.offset = (long)&sc->sc_p5 - new_cfa; + +- fs->regs.reg[15].how = REG_SAVED_OFFSET; ++ fs->regs.how[15] = REG_SAVED_OFFSET; + fs->regs.reg[15].loc.offset = (long)&sc->sc_fp - new_cfa; +- fs->regs.reg[16].how = REG_SAVED_OFFSET; ++ fs->regs.how[16] = REG_SAVED_OFFSET; + fs->regs.reg[16].loc.offset = (long)&sc->sc_i0 - new_cfa; +- fs->regs.reg[17].how = REG_SAVED_OFFSET; ++ fs->regs.how[17] = REG_SAVED_OFFSET; + fs->regs.reg[17].loc.offset = (long)&sc->sc_i1 - new_cfa; +- fs->regs.reg[18].how = REG_SAVED_OFFSET; ++ fs->regs.how[18] = REG_SAVED_OFFSET; + fs->regs.reg[18].loc.offset = (long)&sc->sc_i2 - new_cfa; +- fs->regs.reg[19].how = REG_SAVED_OFFSET; ++ fs->regs.how[19] = REG_SAVED_OFFSET; + fs->regs.reg[19].loc.offset = (long)&sc->sc_i3 - new_cfa; +- fs->regs.reg[20].how = REG_SAVED_OFFSET; ++ fs->regs.how[20] = REG_SAVED_OFFSET; + fs->regs.reg[20].loc.offset = (long)&sc->sc_b0 - new_cfa; +- fs->regs.reg[21].how = REG_SAVED_OFFSET; ++ fs->regs.how[21] = REG_SAVED_OFFSET; + fs->regs.reg[21].loc.offset = (long)&sc->sc_b1 - new_cfa; +- fs->regs.reg[22].how = REG_SAVED_OFFSET; ++ fs->regs.how[22] = REG_SAVED_OFFSET; + fs->regs.reg[22].loc.offset = (long)&sc->sc_b2 - new_cfa; +- fs->regs.reg[23].how = REG_SAVED_OFFSET; ++ fs->regs.how[23] = REG_SAVED_OFFSET; + fs->regs.reg[23].loc.offset = (long)&sc->sc_b3 - new_cfa; +- fs->regs.reg[24].how = REG_SAVED_OFFSET; ++ fs->regs.how[24] = REG_SAVED_OFFSET; + fs->regs.reg[24].loc.offset = (long)&sc->sc_l0 - new_cfa; +- fs->regs.reg[25].how = REG_SAVED_OFFSET; ++ fs->regs.how[25] = REG_SAVED_OFFSET; + fs->regs.reg[25].loc.offset = (long)&sc->sc_l1 - new_cfa; +- fs->regs.reg[26].how = REG_SAVED_OFFSET; ++ fs->regs.how[26] = REG_SAVED_OFFSET; + fs->regs.reg[26].loc.offset = (long)&sc->sc_l2 - new_cfa; +- fs->regs.reg[27].how = REG_SAVED_OFFSET; ++ fs->regs.how[27] = REG_SAVED_OFFSET; + fs->regs.reg[27].loc.offset = (long)&sc->sc_l3 - new_cfa; +- fs->regs.reg[28].how = REG_SAVED_OFFSET; ++ fs->regs.how[28] = REG_SAVED_OFFSET; + fs->regs.reg[28].loc.offset = (long)&sc->sc_m0 - new_cfa; +- fs->regs.reg[29].how = REG_SAVED_OFFSET; ++ fs->regs.how[29] = REG_SAVED_OFFSET; + fs->regs.reg[29].loc.offset = (long)&sc->sc_m1 - new_cfa; +- fs->regs.reg[30].how = REG_SAVED_OFFSET; ++ fs->regs.how[30] = REG_SAVED_OFFSET; + fs->regs.reg[30].loc.offset = (long)&sc->sc_m2 - new_cfa; +- fs->regs.reg[31].how = REG_SAVED_OFFSET; ++ fs->regs.how[31] = REG_SAVED_OFFSET; + fs->regs.reg[31].loc.offset = (long)&sc->sc_m3 - new_cfa; + /* FIXME: Handle A0, A1, CC. */ +- fs->regs.reg[35].how = REG_SAVED_OFFSET; ++ fs->regs.how[35] = REG_SAVED_OFFSET; + fs->regs.reg[35].loc.offset = (long)&sc->sc_rets - new_cfa; +- fs->regs.reg[36].how = REG_SAVED_OFFSET; ++ fs->regs.how[36] = REG_SAVED_OFFSET; + fs->regs.reg[36].loc.offset = (long)&sc->sc_pc - new_cfa; +- fs->regs.reg[37].how = REG_SAVED_OFFSET; ++ fs->regs.how[37] = REG_SAVED_OFFSET; + fs->regs.reg[37].loc.offset = (long)&sc->sc_retx - new_cfa; + +- fs->regs.reg[40].how = REG_SAVED_OFFSET; ++ fs->regs.how[40] = REG_SAVED_OFFSET; + fs->regs.reg[40].loc.offset = (long)&sc->sc_astat - new_cfa; +- fs->regs.reg[41].how = REG_SAVED_OFFSET; ++ fs->regs.how[41] = REG_SAVED_OFFSET; + fs->regs.reg[41].loc.offset = (long)&sc->sc_seqstat - new_cfa; + +- fs->regs.reg[44].how = REG_SAVED_OFFSET; ++ fs->regs.how[44] = REG_SAVED_OFFSET; + fs->regs.reg[44].loc.offset = (long)&sc->sc_lt0 - new_cfa; +- fs->regs.reg[45].how = REG_SAVED_OFFSET; ++ fs->regs.how[45] = REG_SAVED_OFFSET; + fs->regs.reg[45].loc.offset = (long)&sc->sc_lt1 - new_cfa; +- fs->regs.reg[46].how = REG_SAVED_OFFSET; ++ fs->regs.how[46] = REG_SAVED_OFFSET; + fs->regs.reg[46].loc.offset = (long)&sc->sc_lc0 - new_cfa; +- fs->regs.reg[47].how = REG_SAVED_OFFSET; ++ fs->regs.how[47] = REG_SAVED_OFFSET; + fs->regs.reg[47].loc.offset = (long)&sc->sc_lc1 - new_cfa; +- fs->regs.reg[48].how = REG_SAVED_OFFSET; ++ fs->regs.how[48] = REG_SAVED_OFFSET; + fs->regs.reg[48].loc.offset = (long)&sc->sc_lb0 - new_cfa; +- fs->regs.reg[49].how = REG_SAVED_OFFSET; ++ fs->regs.how[49] = REG_SAVED_OFFSET; + fs->regs.reg[49].loc.offset = (long)&sc->sc_lb1 - new_cfa; + fs->retaddr_column = 35; + +diff --git a/libgcc/config/csky/linux-unwind.h b/libgcc/config/csky/linux-unwind.h +index e8eaf4c2aabfc5f4..ae9319126dc1c5e0 100644 +--- a/libgcc/config/csky/linux-unwind.h ++++ b/libgcc/config/csky/linux-unwind.h +@@ -85,28 +85,28 @@ csky_fallback_frame_state (struct _Unwind_Context *context, + fs->regs.cfa_reg = STACK_POINTER_REGNUM; + fs->regs.cfa_offset = new_cfa - (_Unwind_Ptr) context->cfa; + +- fs->regs.reg[0].how = REG_SAVED_OFFSET; ++ fs->regs.how[0] = REG_SAVED_OFFSET; + fs->regs.reg[0].loc.offset = (_Unwind_Ptr) & sc_pt_regs (a0) - new_cfa; + +- fs->regs.reg[1].how = REG_SAVED_OFFSET; ++ fs->regs.how[1] = REG_SAVED_OFFSET; + fs->regs.reg[1].loc.offset = (_Unwind_Ptr) & sc_pt_regs (a1) - new_cfa; + +- fs->regs.reg[2].how = REG_SAVED_OFFSET; ++ fs->regs.how[2] = REG_SAVED_OFFSET; + fs->regs.reg[2].loc.offset = (_Unwind_Ptr) & sc_pt_regs (a2) - new_cfa; + +- fs->regs.reg[3].how = REG_SAVED_OFFSET; ++ fs->regs.how[3] = REG_SAVED_OFFSET; + fs->regs.reg[3].loc.offset = (_Unwind_Ptr) & sc_pt_regs (a3) - new_cfa; + + for (i = 4; i < 14; i++) + { +- fs->regs.reg[i].how = REG_SAVED_OFFSET; ++ fs->regs.how[i] = REG_SAVED_OFFSET; + fs->regs.reg[i].loc.offset = + (_Unwind_Ptr) & sc_pt_regs (regs[i - 4]) - new_cfa; + } + + for (i = 16; i < 31; i++) + { +- fs->regs.reg[i].how = REG_SAVED_OFFSET; ++ fs->regs.how[i] = REG_SAVED_OFFSET; + fs->regs.reg[i].loc.offset = + (_Unwind_Ptr) & sc_pt_regs (exregs[i - 16]) - new_cfa; + } +@@ -114,10 +114,10 @@ csky_fallback_frame_state (struct _Unwind_Context *context, + fs->regs.reg[31].loc.offset = + (_Unwind_Ptr) & sc_pt_regs_tls (tls) - new_cfa; + /* FIXME : hi lo ? */ +- fs->regs.reg[15].how = REG_SAVED_OFFSET; ++ fs->regs.how[15] = REG_SAVED_OFFSET; + fs->regs.reg[15].loc.offset = (_Unwind_Ptr) & sc_pt_regs_lr - new_cfa; + +- fs->regs.reg[32].how = REG_SAVED_OFFSET; ++ fs->regs.how[32] = REG_SAVED_OFFSET; + fs->regs.reg[32].loc.offset = (_Unwind_Ptr) & sc_pt_regs (pc) - new_cfa; + fs->retaddr_column = 32; + fs->signal_frame = 1; +diff --git a/libgcc/config/i386/dragonfly-unwind.h b/libgcc/config/i386/dragonfly-unwind.h +index 86899a0274178ea8..724895a1e6f5bfa4 100644 +--- a/libgcc/config/i386/dragonfly-unwind.h ++++ b/libgcc/config/i386/dragonfly-unwind.h +@@ -102,37 +102,37 @@ x86_64_dragonfly_fallback_frame_state + fs->regs.cfa_offset = new_cfa - (long) context->cfa; + + /* The SVR4 register numbering macros aren't usable in libgcc. */ +- fs->regs.reg[0].how = REG_SAVED_OFFSET; ++ fs->regs.how[0] = REG_SAVED_OFFSET; + fs->regs.reg[0].loc.offset = (long)&sf->REG_NAME(rax) - new_cfa; +- fs->regs.reg[1].how = REG_SAVED_OFFSET; ++ fs->regs.how[1] = REG_SAVED_OFFSET; + fs->regs.reg[1].loc.offset = (long)&sf->REG_NAME(rdx) - new_cfa; +- fs->regs.reg[2].how = REG_SAVED_OFFSET; ++ fs->regs.how[2] = REG_SAVED_OFFSET; + fs->regs.reg[2].loc.offset = (long)&sf->REG_NAME(rcx) - new_cfa; +- fs->regs.reg[3].how = REG_SAVED_OFFSET; ++ fs->regs.how[3] = REG_SAVED_OFFSET; + fs->regs.reg[3].loc.offset = (long)&sf->REG_NAME(rbx) - new_cfa; +- fs->regs.reg[4].how = REG_SAVED_OFFSET; ++ fs->regs.how[4] = REG_SAVED_OFFSET; + fs->regs.reg[4].loc.offset = (long)&sf->REG_NAME(rsi) - new_cfa; +- fs->regs.reg[5].how = REG_SAVED_OFFSET; ++ fs->regs.how[5] = REG_SAVED_OFFSET; + fs->regs.reg[5].loc.offset = (long)&sf->REG_NAME(rdi) - new_cfa; +- fs->regs.reg[6].how = REG_SAVED_OFFSET; ++ fs->regs.how[6] = REG_SAVED_OFFSET; + fs->regs.reg[6].loc.offset = (long)&sf->REG_NAME(rbp) - new_cfa; +- fs->regs.reg[8].how = REG_SAVED_OFFSET; ++ fs->regs.how[8] = REG_SAVED_OFFSET; + fs->regs.reg[8].loc.offset = (long)&sf->REG_NAME(r8) - new_cfa; +- fs->regs.reg[9].how = REG_SAVED_OFFSET; ++ fs->regs.how[9] = REG_SAVED_OFFSET; + fs->regs.reg[9].loc.offset = (long)&sf->REG_NAME(r9) - new_cfa; +- fs->regs.reg[10].how = REG_SAVED_OFFSET; ++ fs->regs.how[10] = REG_SAVED_OFFSET; + fs->regs.reg[10].loc.offset = (long)&sf->REG_NAME(r10) - new_cfa; +- fs->regs.reg[11].how = REG_SAVED_OFFSET; ++ fs->regs.how[11] = REG_SAVED_OFFSET; + fs->regs.reg[11].loc.offset = (long)&sf->REG_NAME(r11) - new_cfa; +- fs->regs.reg[12].how = REG_SAVED_OFFSET; ++ fs->regs.how[12] = REG_SAVED_OFFSET; + fs->regs.reg[12].loc.offset = (long)&sf->REG_NAME(r12) - new_cfa; +- fs->regs.reg[13].how = REG_SAVED_OFFSET; ++ fs->regs.how[13] = REG_SAVED_OFFSET; + fs->regs.reg[13].loc.offset = (long)&sf->REG_NAME(r13) - new_cfa; +- fs->regs.reg[14].how = REG_SAVED_OFFSET; ++ fs->regs.how[14] = REG_SAVED_OFFSET; + fs->regs.reg[14].loc.offset = (long)&sf->REG_NAME(r14) - new_cfa; +- fs->regs.reg[15].how = REG_SAVED_OFFSET; ++ fs->regs.how[15] = REG_SAVED_OFFSET; + fs->regs.reg[15].loc.offset = (long)&sf->REG_NAME(r15) - new_cfa; +- fs->regs.reg[16].how = REG_SAVED_OFFSET; ++ fs->regs.how[16] = REG_SAVED_OFFSET; + fs->regs.reg[16].loc.offset = (long)&sf->REG_NAME(rip) - new_cfa; + fs->retaddr_column = 16; + fs->signal_frame = 1; +@@ -182,21 +182,21 @@ x86_dragonfly_fallback_frame_state + fs->regs.cfa_offset = new_cfa - (long) context->cfa; + + /* The SVR4 register numbering macros aren't usable in libgcc. */ +- fs->regs.reg[0].how = REG_SAVED_OFFSET; ++ fs->regs.how[0] = REG_SAVED_OFFSET; + fs->regs.reg[0].loc.offset = (long)&sf->REG_NAME(eax) - new_cfa; +- fs->regs.reg[3].how = REG_SAVED_OFFSET; ++ fs->regs.how[3] = REG_SAVED_OFFSET; + fs->regs.reg[3].loc.offset = (long)&sf->REG_NAME(ebx) - new_cfa; +- fs->regs.reg[1].how = REG_SAVED_OFFSET; ++ fs->regs.how[1] = REG_SAVED_OFFSET; + fs->regs.reg[1].loc.offset = (long)&sf->REG_NAME(ecx) - new_cfa; +- fs->regs.reg[2].how = REG_SAVED_OFFSET; ++ fs->regs.how[2] = REG_SAVED_OFFSET; + fs->regs.reg[2].loc.offset = (long)&sf->REG_NAME(edx) - new_cfa; +- fs->regs.reg[6].how = REG_SAVED_OFFSET; ++ fs->regs.how[6] = REG_SAVED_OFFSET; + fs->regs.reg[6].loc.offset = (long)&sf->REG_NAME(esi) - new_cfa; +- fs->regs.reg[7].how = REG_SAVED_OFFSET; ++ fs->regs.how[7] = REG_SAVED_OFFSET; + fs->regs.reg[7].loc.offset = (long)&sf->REG_NAME(edi) - new_cfa; +- fs->regs.reg[5].how = REG_SAVED_OFFSET; ++ fs->regs.how[5] = REG_SAVED_OFFSET; + fs->regs.reg[5].loc.offset = (long)&sf->REG_NAME(ebp) - new_cfa; +- fs->regs.reg[8].how = REG_SAVED_OFFSET; ++ fs->regs.how[8] = REG_SAVED_OFFSET; + fs->regs.reg[8].loc.offset = (long)&sf->REG_NAME(eip) - new_cfa; + fs->retaddr_column = 8; + fs->signal_frame = 1; +diff --git a/libgcc/config/i386/freebsd-unwind.h b/libgcc/config/i386/freebsd-unwind.h +index efebadce7151f2cd..74058222ca5e1c6d 100644 +--- a/libgcc/config/i386/freebsd-unwind.h ++++ b/libgcc/config/i386/freebsd-unwind.h +@@ -110,37 +110,37 @@ x86_64_freebsd_fallback_frame_state + fs->regs.cfa_offset = new_cfa - (long) context->cfa; + + /* The SVR4 register numbering macros aren't usable in libgcc. */ +- fs->regs.reg[0].how = REG_SAVED_OFFSET; ++ fs->regs.how[0] = REG_SAVED_OFFSET; + fs->regs.reg[0].loc.offset = (long)&sf->REG_NAME(rax) - new_cfa; +- fs->regs.reg[1].how = REG_SAVED_OFFSET; ++ fs->regs.how[1] = REG_SAVED_OFFSET; + fs->regs.reg[1].loc.offset = (long)&sf->REG_NAME(rdx) - new_cfa; +- fs->regs.reg[2].how = REG_SAVED_OFFSET; ++ fs->regs.how[2] = REG_SAVED_OFFSET; + fs->regs.reg[2].loc.offset = (long)&sf->REG_NAME(rcx) - new_cfa; +- fs->regs.reg[3].how = REG_SAVED_OFFSET; ++ fs->regs.how[3] = REG_SAVED_OFFSET; + fs->regs.reg[3].loc.offset = (long)&sf->REG_NAME(rbx) - new_cfa; +- fs->regs.reg[4].how = REG_SAVED_OFFSET; ++ fs->regs.how[4] = REG_SAVED_OFFSET; + fs->regs.reg[4].loc.offset = (long)&sf->REG_NAME(rsi) - new_cfa; +- fs->regs.reg[5].how = REG_SAVED_OFFSET; ++ fs->regs.how[5] = REG_SAVED_OFFSET; + fs->regs.reg[5].loc.offset = (long)&sf->REG_NAME(rdi) - new_cfa; +- fs->regs.reg[6].how = REG_SAVED_OFFSET; ++ fs->regs.how[6] = REG_SAVED_OFFSET; + fs->regs.reg[6].loc.offset = (long)&sf->REG_NAME(rbp) - new_cfa; +- fs->regs.reg[8].how = REG_SAVED_OFFSET; ++ fs->regs.how[8] = REG_SAVED_OFFSET; + fs->regs.reg[8].loc.offset = (long)&sf->REG_NAME(r8) - new_cfa; +- fs->regs.reg[9].how = REG_SAVED_OFFSET; ++ fs->regs.how[9] = REG_SAVED_OFFSET; + fs->regs.reg[9].loc.offset = (long)&sf->REG_NAME(r9) - new_cfa; +- fs->regs.reg[10].how = REG_SAVED_OFFSET; ++ fs->regs.how[10] = REG_SAVED_OFFSET; + fs->regs.reg[10].loc.offset = (long)&sf->REG_NAME(r10) - new_cfa; +- fs->regs.reg[11].how = REG_SAVED_OFFSET; ++ fs->regs.how[11] = REG_SAVED_OFFSET; + fs->regs.reg[11].loc.offset = (long)&sf->REG_NAME(r11) - new_cfa; +- fs->regs.reg[12].how = REG_SAVED_OFFSET; ++ fs->regs.how[12] = REG_SAVED_OFFSET; + fs->regs.reg[12].loc.offset = (long)&sf->REG_NAME(r12) - new_cfa; +- fs->regs.reg[13].how = REG_SAVED_OFFSET; ++ fs->regs.how[13] = REG_SAVED_OFFSET; + fs->regs.reg[13].loc.offset = (long)&sf->REG_NAME(r13) - new_cfa; +- fs->regs.reg[14].how = REG_SAVED_OFFSET; ++ fs->regs.how[14] = REG_SAVED_OFFSET; + fs->regs.reg[14].loc.offset = (long)&sf->REG_NAME(r14) - new_cfa; +- fs->regs.reg[15].how = REG_SAVED_OFFSET; ++ fs->regs.how[15] = REG_SAVED_OFFSET; + fs->regs.reg[15].loc.offset = (long)&sf->REG_NAME(r15) - new_cfa; +- fs->regs.reg[16].how = REG_SAVED_OFFSET; ++ fs->regs.how[16] = REG_SAVED_OFFSET; + fs->regs.reg[16].loc.offset = (long)&sf->REG_NAME(rip) - new_cfa; + fs->retaddr_column = 16; + fs->signal_frame = 1; +@@ -189,21 +189,21 @@ x86_freebsd_fallback_frame_state + fs->regs.cfa_offset = new_cfa - (long) context->cfa; + + /* The SVR4 register numbering macros aren't usable in libgcc. */ +- fs->regs.reg[0].how = REG_SAVED_OFFSET; ++ fs->regs.how[0] = REG_SAVED_OFFSET; + fs->regs.reg[0].loc.offset = (long)&sf->REG_NAME(eax) - new_cfa; +- fs->regs.reg[3].how = REG_SAVED_OFFSET; ++ fs->regs.how[3] = REG_SAVED_OFFSET; + fs->regs.reg[3].loc.offset = (long)&sf->REG_NAME(ebx) - new_cfa; +- fs->regs.reg[1].how = REG_SAVED_OFFSET; ++ fs->regs.how[1] = REG_SAVED_OFFSET; + fs->regs.reg[1].loc.offset = (long)&sf->REG_NAME(ecx) - new_cfa; +- fs->regs.reg[2].how = REG_SAVED_OFFSET; ++ fs->regs.how[2] = REG_SAVED_OFFSET; + fs->regs.reg[2].loc.offset = (long)&sf->REG_NAME(edx) - new_cfa; +- fs->regs.reg[6].how = REG_SAVED_OFFSET; ++ fs->regs.how[6] = REG_SAVED_OFFSET; + fs->regs.reg[6].loc.offset = (long)&sf->REG_NAME(esi) - new_cfa; +- fs->regs.reg[7].how = REG_SAVED_OFFSET; ++ fs->regs.how[7] = REG_SAVED_OFFSET; + fs->regs.reg[7].loc.offset = (long)&sf->REG_NAME(edi) - new_cfa; +- fs->regs.reg[5].how = REG_SAVED_OFFSET; ++ fs->regs.how[5] = REG_SAVED_OFFSET; + fs->regs.reg[5].loc.offset = (long)&sf->REG_NAME(ebp) - new_cfa; +- fs->regs.reg[8].how = REG_SAVED_OFFSET; ++ fs->regs.how[8] = REG_SAVED_OFFSET; + fs->regs.reg[8].loc.offset = (long)&sf->REG_NAME(eip) - new_cfa; + fs->retaddr_column = 8; + fs->signal_frame = 1; +diff --git a/libgcc/config/i386/gnu-unwind.h b/libgcc/config/i386/gnu-unwind.h +index 0632348d4cd79b8f..72ffd450340f170a 100644 +--- a/libgcc/config/i386/gnu-unwind.h ++++ b/libgcc/config/i386/gnu-unwind.h +@@ -123,14 +123,14 @@ x86_gnu_fallback_frame_state + fs->regs.cfa_reg = 4; + fs->regs.cfa_offset = usp - (unsigned long) context->cfa; + +- fs->regs.reg[0].how = REG_SAVED_OFFSET; +- fs->regs.reg[1].how = REG_SAVED_OFFSET; +- fs->regs.reg[2].how = REG_SAVED_OFFSET; +- fs->regs.reg[3].how = REG_SAVED_OFFSET; +- fs->regs.reg[5].how = REG_SAVED_OFFSET; +- fs->regs.reg[6].how = REG_SAVED_OFFSET; +- fs->regs.reg[7].how = REG_SAVED_OFFSET; +- fs->regs.reg[8].how = REG_SAVED_OFFSET; ++ fs->regs.how[0] = REG_SAVED_OFFSET; ++ fs->regs.how[1] = REG_SAVED_OFFSET; ++ fs->regs.how[2] = REG_SAVED_OFFSET; ++ fs->regs.how[3] = REG_SAVED_OFFSET; ++ fs->regs.how[5] = REG_SAVED_OFFSET; ++ fs->regs.how[6] = REG_SAVED_OFFSET; ++ fs->regs.how[7] = REG_SAVED_OFFSET; ++ fs->regs.how[8] = REG_SAVED_OFFSET; + + fs->retaddr_column = 8; + fs->signal_frame = 1; +diff --git a/libgcc/config/i386/linux-unwind.h b/libgcc/config/i386/linux-unwind.h +index 6170a773f5f6602b..91ba2d5c6ba750d1 100644 +--- a/libgcc/config/i386/linux-unwind.h ++++ b/libgcc/config/i386/linux-unwind.h +@@ -79,37 +79,37 @@ x86_64_fallback_frame_state (struct _Unwind_Context *context, + fs->regs.cfa_offset = new_cfa - (long) context->cfa; + + /* The SVR4 register numbering macros aren't usable in libgcc. */ +- fs->regs.reg[0].how = REG_SAVED_OFFSET; ++ fs->regs.how[0] = REG_SAVED_OFFSET; + fs->regs.reg[0].loc.offset = (long)&sc->rax - new_cfa; +- fs->regs.reg[1].how = REG_SAVED_OFFSET; ++ fs->regs.how[1] = REG_SAVED_OFFSET; + fs->regs.reg[1].loc.offset = (long)&sc->rdx - new_cfa; +- fs->regs.reg[2].how = REG_SAVED_OFFSET; ++ fs->regs.how[2] = REG_SAVED_OFFSET; + fs->regs.reg[2].loc.offset = (long)&sc->rcx - new_cfa; +- fs->regs.reg[3].how = REG_SAVED_OFFSET; ++ fs->regs.how[3] = REG_SAVED_OFFSET; + fs->regs.reg[3].loc.offset = (long)&sc->rbx - new_cfa; +- fs->regs.reg[4].how = REG_SAVED_OFFSET; ++ fs->regs.how[4] = REG_SAVED_OFFSET; + fs->regs.reg[4].loc.offset = (long)&sc->rsi - new_cfa; +- fs->regs.reg[5].how = REG_SAVED_OFFSET; ++ fs->regs.how[5] = REG_SAVED_OFFSET; + fs->regs.reg[5].loc.offset = (long)&sc->rdi - new_cfa; +- fs->regs.reg[6].how = REG_SAVED_OFFSET; ++ fs->regs.how[6] = REG_SAVED_OFFSET; + fs->regs.reg[6].loc.offset = (long)&sc->rbp - new_cfa; +- fs->regs.reg[8].how = REG_SAVED_OFFSET; ++ fs->regs.how[8] = REG_SAVED_OFFSET; + fs->regs.reg[8].loc.offset = (long)&sc->r8 - new_cfa; +- fs->regs.reg[9].how = REG_SAVED_OFFSET; ++ fs->regs.how[9] = REG_SAVED_OFFSET; + fs->regs.reg[9].loc.offset = (long)&sc->r9 - new_cfa; +- fs->regs.reg[10].how = REG_SAVED_OFFSET; ++ fs->regs.how[10] = REG_SAVED_OFFSET; + fs->regs.reg[10].loc.offset = (long)&sc->r10 - new_cfa; +- fs->regs.reg[11].how = REG_SAVED_OFFSET; ++ fs->regs.how[11] = REG_SAVED_OFFSET; + fs->regs.reg[11].loc.offset = (long)&sc->r11 - new_cfa; +- fs->regs.reg[12].how = REG_SAVED_OFFSET; ++ fs->regs.how[12] = REG_SAVED_OFFSET; + fs->regs.reg[12].loc.offset = (long)&sc->r12 - new_cfa; +- fs->regs.reg[13].how = REG_SAVED_OFFSET; ++ fs->regs.how[13] = REG_SAVED_OFFSET; + fs->regs.reg[13].loc.offset = (long)&sc->r13 - new_cfa; +- fs->regs.reg[14].how = REG_SAVED_OFFSET; ++ fs->regs.how[14] = REG_SAVED_OFFSET; + fs->regs.reg[14].loc.offset = (long)&sc->r14 - new_cfa; +- fs->regs.reg[15].how = REG_SAVED_OFFSET; ++ fs->regs.how[15] = REG_SAVED_OFFSET; + fs->regs.reg[15].loc.offset = (long)&sc->r15 - new_cfa; +- fs->regs.reg[16].how = REG_SAVED_OFFSET; ++ fs->regs.how[16] = REG_SAVED_OFFSET; + fs->regs.reg[16].loc.offset = (long)&sc->rip - new_cfa; + fs->retaddr_column = 16; + fs->signal_frame = 1; +@@ -159,21 +159,21 @@ x86_fallback_frame_state (struct _Unwind_Context *context, + fs->regs.cfa_offset = new_cfa - (long) context->cfa; + + /* The SVR4 register numbering macros aren't usable in libgcc. */ +- fs->regs.reg[0].how = REG_SAVED_OFFSET; ++ fs->regs.how[0] = REG_SAVED_OFFSET; + fs->regs.reg[0].loc.offset = (long)&sc->eax - new_cfa; +- fs->regs.reg[3].how = REG_SAVED_OFFSET; ++ fs->regs.how[3] = REG_SAVED_OFFSET; + fs->regs.reg[3].loc.offset = (long)&sc->ebx - new_cfa; +- fs->regs.reg[1].how = REG_SAVED_OFFSET; ++ fs->regs.how[1] = REG_SAVED_OFFSET; + fs->regs.reg[1].loc.offset = (long)&sc->ecx - new_cfa; +- fs->regs.reg[2].how = REG_SAVED_OFFSET; ++ fs->regs.how[2] = REG_SAVED_OFFSET; + fs->regs.reg[2].loc.offset = (long)&sc->edx - new_cfa; +- fs->regs.reg[6].how = REG_SAVED_OFFSET; ++ fs->regs.how[6] = REG_SAVED_OFFSET; + fs->regs.reg[6].loc.offset = (long)&sc->esi - new_cfa; +- fs->regs.reg[7].how = REG_SAVED_OFFSET; ++ fs->regs.how[7] = REG_SAVED_OFFSET; + fs->regs.reg[7].loc.offset = (long)&sc->edi - new_cfa; +- fs->regs.reg[5].how = REG_SAVED_OFFSET; ++ fs->regs.how[5] = REG_SAVED_OFFSET; + fs->regs.reg[5].loc.offset = (long)&sc->ebp - new_cfa; +- fs->regs.reg[8].how = REG_SAVED_OFFSET; ++ fs->regs.how[8] = REG_SAVED_OFFSET; + fs->regs.reg[8].loc.offset = (long)&sc->eip - new_cfa; + fs->retaddr_column = 8; + fs->signal_frame = 1; +diff --git a/libgcc/config/i386/sol2-unwind.h b/libgcc/config/i386/sol2-unwind.h +index ee06d88319ddda0e..634f76815d6fea52 100644 +--- a/libgcc/config/i386/sol2-unwind.h ++++ b/libgcc/config/i386/sol2-unwind.h +@@ -94,37 +94,37 @@ x86_64_fallback_frame_state (struct _Unwind_Context *context, + fs->regs.cfa_offset = new_cfa - (long) context->cfa; + + /* The SVR4 register numbering macros aren't usable in libgcc. */ +- fs->regs.reg[0].how = REG_SAVED_OFFSET; ++ fs->regs.how[0] = REG_SAVED_OFFSET; + fs->regs.reg[0].loc.offset = (long)&mctx->gregs[REG_RAX] - new_cfa; +- fs->regs.reg[1].how = REG_SAVED_OFFSET; ++ fs->regs.how[1] = REG_SAVED_OFFSET; + fs->regs.reg[1].loc.offset = (long)&mctx->gregs[REG_RDX] - new_cfa; +- fs->regs.reg[2].how = REG_SAVED_OFFSET; ++ fs->regs.how[2] = REG_SAVED_OFFSET; + fs->regs.reg[2].loc.offset = (long)&mctx->gregs[REG_RCX] - new_cfa; +- fs->regs.reg[3].how = REG_SAVED_OFFSET; ++ fs->regs.how[3] = REG_SAVED_OFFSET; + fs->regs.reg[3].loc.offset = (long)&mctx->gregs[REG_RBX] - new_cfa; +- fs->regs.reg[4].how = REG_SAVED_OFFSET; ++ fs->regs.how[4] = REG_SAVED_OFFSET; + fs->regs.reg[4].loc.offset = (long)&mctx->gregs[REG_RSI] - new_cfa; +- fs->regs.reg[5].how = REG_SAVED_OFFSET; ++ fs->regs.how[5] = REG_SAVED_OFFSET; + fs->regs.reg[5].loc.offset = (long)&mctx->gregs[REG_RDI] - new_cfa; +- fs->regs.reg[6].how = REG_SAVED_OFFSET; ++ fs->regs.how[6] = REG_SAVED_OFFSET; + fs->regs.reg[6].loc.offset = (long)&mctx->gregs[REG_RBP] - new_cfa; +- fs->regs.reg[8].how = REG_SAVED_OFFSET; ++ fs->regs.how[8] = REG_SAVED_OFFSET; + fs->regs.reg[8].loc.offset = (long)&mctx->gregs[REG_R8] - new_cfa; +- fs->regs.reg[9].how = REG_SAVED_OFFSET; ++ fs->regs.how[9] = REG_SAVED_OFFSET; + fs->regs.reg[9].loc.offset = (long)&mctx->gregs[REG_R9] - new_cfa; +- fs->regs.reg[10].how = REG_SAVED_OFFSET; ++ fs->regs.how[10] = REG_SAVED_OFFSET; + fs->regs.reg[10].loc.offset = (long)&mctx->gregs[REG_R10] - new_cfa; +- fs->regs.reg[11].how = REG_SAVED_OFFSET; ++ fs->regs.how[11] = REG_SAVED_OFFSET; + fs->regs.reg[11].loc.offset = (long)&mctx->gregs[REG_R11] - new_cfa; +- fs->regs.reg[12].how = REG_SAVED_OFFSET; ++ fs->regs.how[12] = REG_SAVED_OFFSET; + fs->regs.reg[12].loc.offset = (long)&mctx->gregs[REG_R12] - new_cfa; +- fs->regs.reg[13].how = REG_SAVED_OFFSET; ++ fs->regs.how[13] = REG_SAVED_OFFSET; + fs->regs.reg[13].loc.offset = (long)&mctx->gregs[REG_R13] - new_cfa; +- fs->regs.reg[14].how = REG_SAVED_OFFSET; ++ fs->regs.how[14] = REG_SAVED_OFFSET; + fs->regs.reg[14].loc.offset = (long)&mctx->gregs[REG_R14] - new_cfa; +- fs->regs.reg[15].how = REG_SAVED_OFFSET; ++ fs->regs.how[15] = REG_SAVED_OFFSET; + fs->regs.reg[15].loc.offset = (long)&mctx->gregs[REG_R15] - new_cfa; +- fs->regs.reg[16].how = REG_SAVED_OFFSET; ++ fs->regs.how[16] = REG_SAVED_OFFSET; + fs->regs.reg[16].loc.offset = (long)&mctx->gregs[REG_RIP] - new_cfa; + fs->retaddr_column = 16; + fs->signal_frame = 1; +@@ -180,21 +180,21 @@ x86_fallback_frame_state (struct _Unwind_Context *context, + fs->regs.cfa_offset = new_cfa - (long) context->cfa; + + /* The SVR4 register numbering macros aren't usable in libgcc. */ +- fs->regs.reg[0].how = REG_SAVED_OFFSET; ++ fs->regs.how[0] = REG_SAVED_OFFSET; + fs->regs.reg[0].loc.offset = (long)&mctx->gregs[EAX] - new_cfa; +- fs->regs.reg[3].how = REG_SAVED_OFFSET; ++ fs->regs.how[3] = REG_SAVED_OFFSET; + fs->regs.reg[3].loc.offset = (long)&mctx->gregs[EBX] - new_cfa; +- fs->regs.reg[1].how = REG_SAVED_OFFSET; ++ fs->regs.how[1] = REG_SAVED_OFFSET; + fs->regs.reg[1].loc.offset = (long)&mctx->gregs[ECX] - new_cfa; +- fs->regs.reg[2].how = REG_SAVED_OFFSET; ++ fs->regs.how[2] = REG_SAVED_OFFSET; + fs->regs.reg[2].loc.offset = (long)&mctx->gregs[EDX] - new_cfa; +- fs->regs.reg[6].how = REG_SAVED_OFFSET; ++ fs->regs.how[6] = REG_SAVED_OFFSET; + fs->regs.reg[6].loc.offset = (long)&mctx->gregs[ESI] - new_cfa; +- fs->regs.reg[7].how = REG_SAVED_OFFSET; ++ fs->regs.how[7] = REG_SAVED_OFFSET; + fs->regs.reg[7].loc.offset = (long)&mctx->gregs[EDI] - new_cfa; +- fs->regs.reg[5].how = REG_SAVED_OFFSET; ++ fs->regs.how[5] = REG_SAVED_OFFSET; + fs->regs.reg[5].loc.offset = (long)&mctx->gregs[EBP] - new_cfa; +- fs->regs.reg[8].how = REG_SAVED_OFFSET; ++ fs->regs.how[8] = REG_SAVED_OFFSET; + fs->regs.reg[8].loc.offset = (long)&mctx->gregs[EIP] - new_cfa; + fs->retaddr_column = 8; + +diff --git a/libgcc/config/i386/w32-unwind.h b/libgcc/config/i386/w32-unwind.h +index 0e053c78d80f4e7c..a264d6752f54fb23 100644 +--- a/libgcc/config/i386/w32-unwind.h ++++ b/libgcc/config/i386/w32-unwind.h +@@ -153,21 +153,21 @@ i386_w32_fallback_frame_state (struct _Unwind_Context *context, + fs->regs.cfa_offset = new_cfa_ - (long) ctx_cfa_; + + /* Restore registers. */ +- fs->regs.reg[0].how = REG_SAVED_OFFSET; ++ fs->regs.how[0] = REG_SAVED_OFFSET; + fs->regs.reg[0].loc.offset = (long)&proc_ctx_->Eax - new_cfa_; +- fs->regs.reg[3].how = REG_SAVED_OFFSET; ++ fs->regs.how[3] = REG_SAVED_OFFSET; + fs->regs.reg[3].loc.offset = (long)&proc_ctx_->Ebx - new_cfa_; +- fs->regs.reg[1].how = REG_SAVED_OFFSET; ++ fs->regs.how[1] = REG_SAVED_OFFSET; + fs->regs.reg[1].loc.offset = (long)&proc_ctx_->Ecx - new_cfa_; +- fs->regs.reg[2].how = REG_SAVED_OFFSET; ++ fs->regs.how[2] = REG_SAVED_OFFSET; + fs->regs.reg[2].loc.offset = (long)&proc_ctx_->Edx - new_cfa_; +- fs->regs.reg[6].how = REG_SAVED_OFFSET; ++ fs->regs.how[6] = REG_SAVED_OFFSET; + fs->regs.reg[6].loc.offset = (long)&proc_ctx_->Esi - new_cfa_; +- fs->regs.reg[7].how = REG_SAVED_OFFSET; ++ fs->regs.how[7] = REG_SAVED_OFFSET; + fs->regs.reg[7].loc.offset = (long)&proc_ctx_->Edi - new_cfa_; +- fs->regs.reg[5].how = REG_SAVED_OFFSET; ++ fs->regs.how[5] = REG_SAVED_OFFSET; + fs->regs.reg[5].loc.offset = (long)&proc_ctx_->Ebp - new_cfa_; +- fs->regs.reg[8].how = REG_SAVED_OFFSET; ++ fs->regs.how[8] = REG_SAVED_OFFSET; + fs->regs.reg[8].loc.offset = (long)&proc_ctx_->Eip - new_cfa_; + fs->retaddr_column = 8; + fs->signal_frame = 1; +@@ -189,12 +189,12 @@ i386_w32_fallback_frame_state (struct _Unwind_Context *context, + fs->regs.cfa_offset = new_cfa_ - (long) ctx_cfa_; + + /* The saved value of %ecx is at CFA - 4 */ +- fs->regs.reg[1].how = REG_SAVED_OFFSET; ++ fs->regs.how[1] = REG_SAVED_OFFSET; + fs->regs.reg[1].loc.offset = -4; + + /* and what is stored at the CFA is the return address. */ + fs->retaddr_column = 8; +- fs->regs.reg[8].how = REG_SAVED_OFFSET; ++ fs->regs.how[8] = REG_SAVED_OFFSET; + fs->regs.reg[8].loc.offset = 0; + fs->signal_frame = 1; + +diff --git a/libgcc/config/m68k/linux-unwind.h b/libgcc/config/m68k/linux-unwind.h +index d9642cdc6e641582..9d8a62daad130bb6 100644 +--- a/libgcc/config/m68k/linux-unwind.h ++++ b/libgcc/config/m68k/linux-unwind.h +@@ -68,21 +68,21 @@ m68k_fallback_frame_state (struct _Unwind_Context *context, + fs->regs.cfa_reg = 15; + fs->regs.cfa_offset = cfa - (long) context->cfa; + +- fs->regs.reg[0].how = REG_SAVED_OFFSET; ++ fs->regs.how[0] = REG_SAVED_OFFSET; + fs->regs.reg[0].loc.offset = (long) &sc->sc_d0 - cfa; +- fs->regs.reg[1].how = REG_SAVED_OFFSET; ++ fs->regs.how[1] = REG_SAVED_OFFSET; + fs->regs.reg[1].loc.offset = (long) &sc->sc_d1 - cfa; +- fs->regs.reg[8].how = REG_SAVED_OFFSET; ++ fs->regs.how[8] = REG_SAVED_OFFSET; + fs->regs.reg[8].loc.offset = (long) &sc->sc_a0 - cfa; +- fs->regs.reg[9].how = REG_SAVED_OFFSET; ++ fs->regs.how[9] = REG_SAVED_OFFSET; + fs->regs.reg[9].loc.offset = (long) &sc->sc_a1 - cfa; + + #ifdef __uClinux__ +- fs->regs.reg[13].how = REG_SAVED_OFFSET; ++ fs->regs.how[13] = REG_SAVED_OFFSET; + fs->regs.reg[13].loc.offset = (long) &sc->sc_a5 - cfa; + #endif + +- fs->regs.reg[24].how = REG_SAVED_OFFSET; ++ fs->regs.how[24] = REG_SAVED_OFFSET; + fs->regs.reg[24].loc.offset = (long) &sc->sc_pc - cfa; + + #ifndef __uClinux__ +@@ -90,9 +90,9 @@ m68k_fallback_frame_state (struct _Unwind_Context *context, + { + int *fpregs = (int *) sc->sc_fpregs; + +- fs->regs.reg[16].how = REG_SAVED_OFFSET; ++ fs->regs.how[16] = REG_SAVED_OFFSET; + fs->regs.reg[16].loc.offset = (long) &fpregs[0] - cfa; +- fs->regs.reg[17].how = REG_SAVED_OFFSET; ++ fs->regs.how[17] = REG_SAVED_OFFSET; + fs->regs.reg[17].loc.offset = (long) &fpregs[M68K_FP_SIZE/4] - cfa; + } + #elif defined __mcffpu__ +@@ -124,12 +124,12 @@ m68k_fallback_frame_state (struct _Unwind_Context *context, + /* register %d0-%d7/%a0-%a6 */ + for (i = 0; i <= 14; i++) + { +- fs->regs.reg[i].how = REG_SAVED_OFFSET; ++ fs->regs.how[i] = REG_SAVED_OFFSET; + fs->regs.reg[i].loc.offset = (long) &gregs[i] - cfa; + } + + /* return address */ +- fs->regs.reg[24].how = REG_SAVED_OFFSET; ++ fs->regs.how[24] = REG_SAVED_OFFSET; + fs->regs.reg[24].loc.offset = (long) &gregs[16] - cfa; + + #define uc_fpstate uc_filler[0] +@@ -141,7 +141,7 @@ m68k_fallback_frame_state (struct _Unwind_Context *context, + /* register %fp0-%fp7 */ + for (i = 16; i <= 23; i++) + { +- fs->regs.reg[i].how = REG_SAVED_OFFSET; ++ fs->regs.how[i] = REG_SAVED_OFFSET; + fs->regs.reg[i].loc.offset = fpregs - cfa; + fpregs += M68K_FP_SIZE; + } +diff --git a/libgcc/config/mips/linux-unwind.h b/libgcc/config/mips/linux-unwind.h +index 2c201e0834847fb7..400038643d4ce139 100644 +--- a/libgcc/config/mips/linux-unwind.h ++++ b/libgcc/config/mips/linux-unwind.h +@@ -103,7 +103,7 @@ mips_fallback_frame_state (struct _Unwind_Context *context, + #endif + + for (i = 0; i < 32; i++) { +- fs->regs.reg[i].how = REG_SAVED_OFFSET; ++ fs->regs.how[i] = REG_SAVED_OFFSET; + fs->regs.reg[i].loc.offset + = (_Unwind_Ptr)&(sc->sc_regs[i]) + reg_offset - new_cfa; + } +@@ -115,7 +115,7 @@ mips_fallback_frame_state (struct _Unwind_Context *context, + Note that setting fs->signal_frame would not work. As the comment + above MASK_RETURN_ADDR explains, MIPS unwinders must earch for an + odd-valued address. */ +- fs->regs.reg[__LIBGCC_DWARF_ALT_FRAME_RETURN_COLUMN__].how ++ fs->regs.how[__LIBGCC_DWARF_ALT_FRAME_RETURN_COLUMN__] + = REG_SAVED_VAL_OFFSET; + fs->regs.reg[__LIBGCC_DWARF_ALT_FRAME_RETURN_COLUMN__].loc.offset + = (_Unwind_Ptr)(sc->sc_pc) + 2 - new_cfa; +diff --git a/libgcc/config/nds32/linux-unwind.h b/libgcc/config/nds32/linux-unwind.h +index 69b7709d875e4e53..d9fce6be0302ba05 100644 +--- a/libgcc/config/nds32/linux-unwind.h ++++ b/libgcc/config/nds32/linux-unwind.h +@@ -92,7 +92,7 @@ nds32_fallback_frame_state (struct _Unwind_Context *context, + fs->regs.cfa_offset = new_cfa - (_Unwind_Ptr) context->cfa; + + #define NDS32_PUT_FS_REG(NUM, NAME) \ +- (fs->regs.reg[NUM].how = REG_SAVED_OFFSET, \ ++ (fs->regs.how[NUM] = REG_SAVED_OFFSET, \ + fs->regs.reg[NUM].loc.offset = (_Unwind_Ptr) &(sc_->NAME) - new_cfa) + + /* Restore all registers value. */ +diff --git a/libgcc/config/nios2/linux-unwind.h b/libgcc/config/nios2/linux-unwind.h +index da321e222fe36b26..ebf7cdb878778887 100644 +--- a/libgcc/config/nios2/linux-unwind.h ++++ b/libgcc/config/nios2/linux-unwind.h +@@ -61,7 +61,7 @@ nios2_fallback_frame_state (struct _Unwind_Context *context, + return _URC_END_OF_STACK; + + #define NIOS2_REG(NUM,NAME) \ +- (fs->regs.reg[NUM].how = REG_SAVED_OFFSET, \ ++ (fs->regs.how[NUM] = REG_SAVED_OFFSET, \ + fs->regs.reg[NUM].loc.offset = (_Unwind_Ptr)&(regs->NAME) - new_cfa) + + if (pc[0] == (0x00800004 | (__NR_rt_sigreturn << 6))) +diff --git a/libgcc/config/or1k/linux-unwind.h b/libgcc/config/or1k/linux-unwind.h +index ea55234bcc078456..e5df904fd242662c 100644 +--- a/libgcc/config/or1k/linux-unwind.h ++++ b/libgcc/config/or1k/linux-unwind.h +@@ -59,10 +59,10 @@ or1k_fallback_frame_state (struct _Unwind_Context *context, + fs->regs.cfa_offset = new_cfa - (long) context->cfa; + for (i = 2; i < 32; ++i) + { +- fs->regs.reg[i].how = REG_SAVED_OFFSET; ++ fs->regs.how[i] = REG_SAVED_OFFSET; + fs->regs.reg[i].loc.offset = (long) &sc->regs.gpr[i] - new_cfa; + } +- fs->regs.reg[32].how = REG_SAVED_OFFSET; ++ fs->regs.how[32] = REG_SAVED_OFFSET; + fs->regs.reg[32].loc.offset = (long)&sc->regs.pc - new_cfa; + fs->retaddr_column = 32; + fs->signal_frame = 1; +diff --git a/libgcc/config/pa/hpux-unwind.h b/libgcc/config/pa/hpux-unwind.h +index f72df85828fc75b9..78ca446f44a57a9e 100644 +--- a/libgcc/config/pa/hpux-unwind.h ++++ b/libgcc/config/pa/hpux-unwind.h +@@ -57,19 +57,19 @@ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + #endif + + #define UPDATE_FS_FOR_SAR(FS, N) \ +- (FS)->regs.reg[N].how = REG_SAVED_OFFSET; \ ++ (FS)->regs.how[N] = REG_SAVED_OFFSET; \ + (FS)->regs.reg[N].loc.offset = GetSSRegAddr (mc, ss_cr11) - new_cfa + + #define UPDATE_FS_FOR_GR(FS, GRN, N) \ +- (FS)->regs.reg[N].how = REG_SAVED_OFFSET; \ ++ (FS)->regs.how[N] = REG_SAVED_OFFSET; \ + (FS)->regs.reg[N].loc.offset = GetSSRegAddr (mc, ss_gr##GRN) - new_cfa + + #define UPDATE_FS_FOR_FR(FS, FRN, N) \ +- (FS)->regs.reg[N].how = REG_SAVED_OFFSET; \ ++ (FS)->regs.how[N] = REG_SAVED_OFFSET; \ + (FS)->regs.reg[N].loc.offset = (long) &(mc->ss_fr##FRN) - new_cfa; + + #define UPDATE_FS_FOR_PC(FS, N) \ +- (FS)->regs.reg[N].how = REG_SAVED_OFFSET; \ ++ (FS)->regs.how[N] = REG_SAVED_OFFSET; \ + (FS)->regs.reg[N].loc.offset = GetSSRegAddr (mc, ss_pcoq_head) - new_cfa + + /* Extract bit field from word using HP's numbering (MSB = 0). */ +@@ -151,7 +151,7 @@ pa_fallback_frame_state (struct _Unwind_Context *context, + fs->regs.cfa_offset = 0; + + fs->retaddr_column = 0; +- fs->regs.reg[0].how = REG_SAVED_OFFSET; ++ fs->regs.how[0] = REG_SAVED_OFFSET; + fs->regs.reg[0].loc.offset = -24; + + /* Update context to describe the stub frame. */ +@@ -171,7 +171,7 @@ pa_fallback_frame_state (struct _Unwind_Context *context, + fs->regs.cfa_offset = 0; + + fs->retaddr_column = 0; +- fs->regs.reg[0].how = REG_SAVED_OFFSET; ++ fs->regs.how[0] = REG_SAVED_OFFSET; + fs->regs.reg[0].loc.offset = -8; + + /* Update context to describe the stub frame. */ +diff --git a/libgcc/config/pa/linux-unwind.h b/libgcc/config/pa/linux-unwind.h +index dcc914c9c62989fa..4b60a9979a65b6ef 100644 +--- a/libgcc/config/pa/linux-unwind.h ++++ b/libgcc/config/pa/linux-unwind.h +@@ -138,22 +138,22 @@ pa32_fallback_frame_state (struct _Unwind_Context *context, + fs->regs.cfa_offset = new_cfa - (long) context->cfa; + for (i = 1; i <= 31; i++) + { +- fs->regs.reg[i].how = REG_SAVED_OFFSET; ++ fs->regs.how[i] = REG_SAVED_OFFSET; + fs->regs.reg[i].loc.offset = (long)&sc->sc_gr[i] - new_cfa; + } + for (i = 4; i <= 31; i++) + { + /* FP regs have left and right halves */ +- fs->regs.reg[2*i+24].how = REG_SAVED_OFFSET; ++ fs->regs.how[2*i+24] = REG_SAVED_OFFSET; + fs->regs.reg[2*i+24].loc.offset + = (long)&sc->sc_fr[i] - new_cfa; +- fs->regs.reg[2*i+24+1].how = REG_SAVED_OFFSET; ++ fs->regs.how[2*i+24+1] = REG_SAVED_OFFSET; + fs->regs.reg[2*i+24+1].loc.offset + = (long)&sc->sc_fr[i] + 4 - new_cfa; + } +- fs->regs.reg[88].how = REG_SAVED_OFFSET; ++ fs->regs.how[88] = REG_SAVED_OFFSET; + fs->regs.reg[88].loc.offset = (long) &sc->sc_sar - new_cfa; +- fs->regs.reg[__LIBGCC_DWARF_ALT_FRAME_RETURN_COLUMN__].how ++ fs->regs.how[__LIBGCC_DWARF_ALT_FRAME_RETURN_COLUMN__] + = REG_SAVED_OFFSET; + fs->regs.reg[__LIBGCC_DWARF_ALT_FRAME_RETURN_COLUMN__].loc.offset + = (long) &sc->sc_iaoq[0] - new_cfa; +diff --git a/libgcc/config/riscv/linux-unwind.h b/libgcc/config/riscv/linux-unwind.h +index ab3f2a42ddf6d9ce..17511115db2e78e9 100644 +--- a/libgcc/config/riscv/linux-unwind.h ++++ b/libgcc/config/riscv/linux-unwind.h +@@ -73,13 +73,13 @@ riscv_fallback_frame_state (struct _Unwind_Context *context, + + for (i = 0; i < 32; i++) + { +- fs->regs.reg[i].how = REG_SAVED_OFFSET; ++ fs->regs.how[i] = REG_SAVED_OFFSET; + fs->regs.reg[i].loc.offset = (_Unwind_Ptr) &sc->gregs[i] - new_cfa; + } + + fs->signal_frame = 1; + fs->retaddr_column = __LIBGCC_DWARF_ALT_FRAME_RETURN_COLUMN__; +- fs->regs.reg[fs->retaddr_column].how = REG_SAVED_VAL_OFFSET; ++ fs->regs.how[fs->retaddr_column] = REG_SAVED_VAL_OFFSET; + fs->regs.reg[fs->retaddr_column].loc.offset = + (_Unwind_Ptr) sc->gregs[0] - new_cfa; + +diff --git a/libgcc/config/rs6000/aix-unwind.h b/libgcc/config/rs6000/aix-unwind.h +index b8d82765e55dc56c..2dcf5342bc997ce1 100644 +--- a/libgcc/config/rs6000/aix-unwind.h ++++ b/libgcc/config/rs6000/aix-unwind.h +@@ -40,7 +40,7 @@ + #ifdef __64BIT__ + #define MD_FROB_UPDATE_CONTEXT(CTX, FS) \ + do { \ +- if ((FS)->regs.reg[2].how == REG_UNSAVED) \ ++ if ((FS)->regs.how[2] == REG_UNSAVED) \ + { \ + unsigned int *insn \ + = (unsigned int *) \ +@@ -52,7 +52,7 @@ + #else + #define MD_FROB_UPDATE_CONTEXT(CTX, FS) \ + do { \ +- if ((FS)->regs.reg[2].how == REG_UNSAVED) \ ++ if ((FS)->regs.how[2] == REG_UNSAVED) \ + { \ + unsigned int *insn \ + = (unsigned int *) \ +@@ -207,7 +207,7 @@ ucontext_for (struct _Unwind_Context *context) + + #define REGISTER_CFA_OFFSET_FOR(FS,REGNO,ADDR,CFA)\ + do { \ +-(FS)->regs.reg[REGNO].how = REG_SAVED_OFFSET; \ ++(FS)->regs.how[REGNO] = REG_SAVED_OFFSET; \ + (FS)->regs.reg[REGNO].loc.offset = (long) (ADDR) - (CFA); \ + } while (0) + +diff --git a/libgcc/config/rs6000/darwin-fallback.c b/libgcc/config/rs6000/darwin-fallback.c +index aa484c021342dd59..27ac466dd5e51adf 100644 +--- a/libgcc/config/rs6000/darwin-fallback.c ++++ b/libgcc/config/rs6000/darwin-fallback.c +@@ -368,7 +368,7 @@ handle_syscall (_Unwind_FrameState *fs, const reg_unit gprs[32], + return false; + + #define set_offset(r, addr) \ +- (fs->regs.reg[r].how = REG_SAVED_OFFSET, \ ++ (fs->regs.how[r] = REG_SAVED_OFFSET, \ + fs->regs.reg[r].loc.offset = (_Unwind_Ptr)(addr) - new_cfa) + + /* Restore even the registers that are not call-saved, since they +diff --git a/libgcc/config/rs6000/freebsd-unwind.h b/libgcc/config/rs6000/freebsd-unwind.h +index e035f63fc2d4421f..aba8b65c0b1c08e3 100644 +--- a/libgcc/config/rs6000/freebsd-unwind.h ++++ b/libgcc/config/rs6000/freebsd-unwind.h +@@ -33,7 +33,7 @@ frob_update_context (struct _Unwind_Context *context, + const unsigned int *pc = (const unsigned int *) context->ra; + + #ifdef __powerpc64__ +- if (fs->regs.reg[2].how == REG_UNSAVED) ++ if (fs->regs.how[2] == REG_UNSAVED) + { + /* If the current unwind info (FS) does not contain explicit info + saving R2, then we have to do a minor amount of code reading to +diff --git a/libgcc/config/rs6000/linux-unwind.h b/libgcc/config/rs6000/linux-unwind.h +index acdc948f85dc3c4a..aef196a046c85c99 100644 +--- a/libgcc/config/rs6000/linux-unwind.h ++++ b/libgcc/config/rs6000/linux-unwind.h +@@ -211,12 +211,12 @@ ppc_fallback_frame_state (struct _Unwind_Context *context, + fs->regs.cfa_offset = new_cfa - (long) context->cfa; + + #ifdef __powerpc64__ +- fs->regs.reg[2].how = REG_SAVED_OFFSET; ++ fs->regs.how[2] = REG_SAVED_OFFSET; + fs->regs.reg[2].loc.offset = (long) ®s->gpr[2] - new_cfa; + #endif + for (i = 14; i < 32; i++) + { +- fs->regs.reg[i].how = REG_SAVED_OFFSET; ++ fs->regs.how[i] = REG_SAVED_OFFSET; + fs->regs.reg[i].loc.offset = (long) ®s->gpr[i] - new_cfa; + } + +@@ -226,20 +226,20 @@ ppc_fallback_frame_state (struct _Unwind_Context *context, + cr_offset += sizeof (long) - 4; + #endif + /* In the ELFv1 ABI, CR2 stands in for the whole CR. */ +- fs->regs.reg[R_CR2].how = REG_SAVED_OFFSET; ++ fs->regs.how[R_CR2] = REG_SAVED_OFFSET; + fs->regs.reg[R_CR2].loc.offset = cr_offset; + #if _CALL_ELF == 2 + /* In the ELFv2 ABI, every CR field has a separate CFI entry. */ +- fs->regs.reg[R_CR3].how = REG_SAVED_OFFSET; ++ fs->regs.how[R_CR3] = REG_SAVED_OFFSET; + fs->regs.reg[R_CR3].loc.offset = cr_offset; +- fs->regs.reg[R_CR4].how = REG_SAVED_OFFSET; ++ fs->regs.how[R_CR4] = REG_SAVED_OFFSET; + fs->regs.reg[R_CR4].loc.offset = cr_offset; + #endif + +- fs->regs.reg[R_LR].how = REG_SAVED_OFFSET; ++ fs->regs.how[R_LR] = REG_SAVED_OFFSET; + fs->regs.reg[R_LR].loc.offset = (long) ®s->link - new_cfa; + +- fs->regs.reg[ARG_POINTER_REGNUM].how = REG_SAVED_OFFSET; ++ fs->regs.how[ARG_POINTER_REGNUM] = REG_SAVED_OFFSET; + fs->regs.reg[ARG_POINTER_REGNUM].loc.offset = (long) ®s->nip - new_cfa; + fs->retaddr_column = ARG_POINTER_REGNUM; + fs->signal_frame = 1; +@@ -247,7 +247,7 @@ ppc_fallback_frame_state (struct _Unwind_Context *context, + /* If we have a FPU... */ + for (i = 14; i < 32; i++) + { +- fs->regs.reg[i + 32].how = REG_SAVED_OFFSET; ++ fs->regs.how[i + 32] = REG_SAVED_OFFSET; + fs->regs.reg[i + 32].loc.offset = (long) ®s->fpr[i] - new_cfa; + } + +@@ -261,12 +261,12 @@ ppc_fallback_frame_state (struct _Unwind_Context *context, + { + for (i = 20; i < 32; i++) + { +- fs->regs.reg[i + R_VR0].how = REG_SAVED_OFFSET; ++ fs->regs.how[i + R_VR0] = REG_SAVED_OFFSET; + fs->regs.reg[i + R_VR0].loc.offset = (long) &vregs->vr[i] - new_cfa; + } + } + +- fs->regs.reg[R_VRSAVE].how = REG_SAVED_OFFSET; ++ fs->regs.how[R_VRSAVE] = REG_SAVED_OFFSET; + fs->regs.reg[R_VRSAVE].loc.offset = (long) &vregs->vsave - new_cfa; + + /* If we have SPE register high-parts... we check at compile-time to +@@ -274,7 +274,7 @@ ppc_fallback_frame_state (struct _Unwind_Context *context, + #ifdef __SPE__ + for (i = 14; i < 32; i++) + { +- fs->regs.reg[i + FIRST_SPE_HIGH_REGNO - 4].how = REG_SAVED_OFFSET; ++ fs->regs.how[i + FIRST_SPE_HIGH_REGNO - 4] = REG_SAVED_OFFSET; + fs->regs.reg[i + FIRST_SPE_HIGH_REGNO - 4].loc.offset + = (long) ®s->vregs - new_cfa + 4 * i; + } +@@ -311,7 +311,7 @@ frob_update_context (struct _Unwind_Context *context, _Unwind_FrameState *fs ATT + #endif + + #ifdef __powerpc64__ +- if (fs->regs.reg[2].how == REG_UNSAVED) ++ if (fs->regs.how[2] == REG_UNSAVED) + { + /* If the current unwind info (FS) does not contain explicit info + saving R2, then we have to do a minor amount of code reading to +diff --git a/libgcc/config/s390/linux-unwind.h b/libgcc/config/s390/linux-unwind.h +index 277b1e2044865815..4524aeb64a715fd6 100644 +--- a/libgcc/config/s390/linux-unwind.h ++++ b/libgcc/config/s390/linux-unwind.h +@@ -106,20 +106,20 @@ s390_fallback_frame_state (struct _Unwind_Context *context, + + for (i = 0; i < 16; i++) + { +- fs->regs.reg[i].how = REG_SAVED_OFFSET; ++ fs->regs.how[i] = REG_SAVED_OFFSET; + fs->regs.reg[i].loc.offset = + (long)®s->gprs[i] - new_cfa; + } + for (i = 0; i < 16; i++) + { +- fs->regs.reg[16+i].how = REG_SAVED_OFFSET; ++ fs->regs.how[16+i] = REG_SAVED_OFFSET; + fs->regs.reg[16+i].loc.offset = + (long)®s->fprs[dwarf_to_fpr_map[i]] - new_cfa; + } + + /* Load return addr from PSW into dummy register 32. */ + +- fs->regs.reg[32].how = REG_SAVED_OFFSET; ++ fs->regs.how[32] = REG_SAVED_OFFSET; + fs->regs.reg[32].loc.offset = (long)®s->psw_addr - new_cfa; + fs->retaddr_column = 32; + /* SIGILL, SIGFPE and SIGTRAP are delivered with psw_addr +diff --git a/libgcc/config/s390/tpf-unwind.h b/libgcc/config/s390/tpf-unwind.h +index 4a54d9bd2a27f2fd..3429c5720726d279 100644 +--- a/libgcc/config/s390/tpf-unwind.h ++++ b/libgcc/config/s390/tpf-unwind.h +@@ -88,18 +88,18 @@ s390_fallback_frame_state (struct _Unwind_Context *context, + /* All registers remain unchanged ... */ + for (i = 0; i < 32; i++) + { +- fs->regs.reg[i].how = REG_SAVED_REG; ++ fs->regs.how[i] = REG_SAVED_REG; + fs->regs.reg[i].loc.reg = i; + } + + /* ... except for %r14, which is stored at CFA+offset where offset + is displacment of ICST_CRET or ICST_SRET from CFA */ + if ( __isPATrange(context->ra) ) { +- fs->regs.reg[14].how = REG_SAVED_OFFSET; ++ fs->regs.how[14] = REG_SAVED_OFFSET; + fs->regs.reg[14].loc.offset = ICST_CRET - STACK_POINTER_OFFSET; + fs->retaddr_column = 14; + } else { +- fs->regs.reg[14].how = REG_SAVED_OFFSET; ++ fs->regs.how[14] = REG_SAVED_OFFSET; + fs->regs.reg[14].loc.offset = ICST_SRET - STACK_POINTER_OFFSET; + fs->retaddr_column = 14; + +@@ -119,13 +119,13 @@ s390_fallback_frame_state (struct _Unwind_Context *context, + + for (i = 0; i < 16; i++) + { +- fs->regs.reg[i].how = REG_SAVED_OFFSET; ++ fs->regs.how[i] = REG_SAVED_OFFSET; + fs->regs.reg[i].loc.offset = regs + i*8 - new_cfa; + } + + for (i = 0; i < 4; i++) + { +- fs->regs.reg[16 + i].how = REG_SAVED_OFFSET; ++ fs->regs.how[16 + i] = REG_SAVED_OFFSET; + fs->regs.reg[16 + i].loc.offset = regs + 16*8 + i*8 - new_cfa; + } + +diff --git a/libgcc/config/sh/linux-unwind.h b/libgcc/config/sh/linux-unwind.h +index af25791ec89d2b45..eca2f6fec7e94fda 100644 +--- a/libgcc/config/sh/linux-unwind.h ++++ b/libgcc/config/sh/linux-unwind.h +@@ -99,24 +99,24 @@ sh_fallback_frame_state (struct _Unwind_Context *context, + + for (i = 0; i < 15; i++) + { +- fs->regs.reg[i].how = REG_SAVED_OFFSET; ++ fs->regs.how[i] = REG_SAVED_OFFSET; + fs->regs.reg[i].loc.offset + = (long)&(sc->sc_regs[i]) - new_cfa; + } + +- fs->regs.reg[SH_DWARF_FRAME_PR].how = REG_SAVED_OFFSET; ++ fs->regs.how[SH_DWARF_FRAME_PR] = REG_SAVED_OFFSET; + fs->regs.reg[SH_DWARF_FRAME_PR].loc.offset + = (long)&(sc->sc_pr) - new_cfa; +- fs->regs.reg[SH_DWARF_FRAME_SR].how = REG_SAVED_OFFSET; ++ fs->regs.how[SH_DWARF_FRAME_SR] = REG_SAVED_OFFSET; + fs->regs.reg[SH_DWARF_FRAME_SR].loc.offset + = (long)&(sc->sc_sr) - new_cfa; +- fs->regs.reg[SH_DWARF_FRAME_GBR].how = REG_SAVED_OFFSET; ++ fs->regs.how[SH_DWARF_FRAME_GBR] = REG_SAVED_OFFSET; + fs->regs.reg[SH_DWARF_FRAME_GBR].loc.offset + = (long)&(sc->sc_gbr) - new_cfa; +- fs->regs.reg[SH_DWARF_FRAME_MACH].how = REG_SAVED_OFFSET; ++ fs->regs.how[SH_DWARF_FRAME_MACH] = REG_SAVED_OFFSET; + fs->regs.reg[SH_DWARF_FRAME_MACH].loc.offset + = (long)&(sc->sc_mach) - new_cfa; +- fs->regs.reg[SH_DWARF_FRAME_MACL].how = REG_SAVED_OFFSET; ++ fs->regs.how[SH_DWARF_FRAME_MACL] = REG_SAVED_OFFSET; + fs->regs.reg[SH_DWARF_FRAME_MACL].loc.offset + = (long)&(sc->sc_macl) - new_cfa; + +@@ -124,7 +124,7 @@ sh_fallback_frame_state (struct _Unwind_Context *context, + r = SH_DWARF_FRAME_FP0; + for (i = 0; i < 16; i++) + { +- fs->regs.reg[r+i].how = REG_SAVED_OFFSET; ++ fs->regs.how[r+i] = REG_SAVED_OFFSET; + fs->regs.reg[r+i].loc.offset + = (long)&(sc->sc_fpregs[i]) - new_cfa; + } +@@ -132,20 +132,20 @@ sh_fallback_frame_state (struct _Unwind_Context *context, + r = SH_DWARF_FRAME_XD0; + for (i = 0; i < 8; i++) + { +- fs->regs.reg[r+i].how = REG_SAVED_OFFSET; ++ fs->regs.how[r+i] = REG_SAVED_OFFSET; + fs->regs.reg[r+i].loc.offset + = (long)&(sc->sc_xfpregs[2*i]) - new_cfa; + } + +- fs->regs.reg[SH_DWARF_FRAME_FPUL].how = REG_SAVED_OFFSET; ++ fs->regs.how[SH_DWARF_FRAME_FPUL] = REG_SAVED_OFFSET; + fs->regs.reg[SH_DWARF_FRAME_FPUL].loc.offset + = (long)&(sc->sc_fpul) - new_cfa; +- fs->regs.reg[SH_DWARF_FRAME_FPSCR].how = REG_SAVED_OFFSET; ++ fs->regs.how[SH_DWARF_FRAME_FPSCR] = REG_SAVED_OFFSET; + fs->regs.reg[SH_DWARF_FRAME_FPSCR].loc.offset + = (long)&(sc->sc_fpscr) - new_cfa; + #endif + +- fs->regs.reg[SH_DWARF_FRAME_PC].how = REG_SAVED_OFFSET; ++ fs->regs.how[SH_DWARF_FRAME_PC] = REG_SAVED_OFFSET; + fs->regs.reg[SH_DWARF_FRAME_PC].loc.offset + = (long)&(sc->sc_pc) - new_cfa; + fs->retaddr_column = SH_DWARF_FRAME_PC; +diff --git a/libgcc/config/sparc/linux-unwind.h b/libgcc/config/sparc/linux-unwind.h +index da0215a729ea5ceb..cc5204c605fe6dd5 100644 +--- a/libgcc/config/sparc/linux-unwind.h ++++ b/libgcc/config/sparc/linux-unwind.h +@@ -65,13 +65,13 @@ sparc64_fallback_frame_state (struct _Unwind_Context *context, + if ((unsigned int) i == __builtin_dwarf_sp_column ()) + continue; + +- fs->regs.reg[i].how = REG_SAVED_OFFSET; ++ fs->regs.how[i] = REG_SAVED_OFFSET; + fs->regs.reg[i].loc.offset + = this_cfa + regs_off + (i * 8) - new_cfa; + } + for (i = 0; i < 16; i++) + { +- fs->regs.reg[i + 16].how = REG_SAVED_OFFSET; ++ fs->regs.how[i + 16] = REG_SAVED_OFFSET; + fs->regs.reg[i + 16].loc.offset + = this_cfa + (i * 8) - new_cfa; + } +@@ -81,7 +81,7 @@ sparc64_fallback_frame_state (struct _Unwind_Context *context, + { + if (i > 32 && (i & 0x1)) + continue; +- fs->regs.reg[i + 32].how = REG_SAVED_OFFSET; ++ fs->regs.how[i + 32] = REG_SAVED_OFFSET; + fs->regs.reg[i + 32].loc.offset + = fpu_save + (i * 4) - new_cfa; + } +@@ -95,7 +95,7 @@ sparc64_fallback_frame_state (struct _Unwind_Context *context, + shifted_ra_location = this_cfa + regs_off + 19 * 8; /* Y register */ + *(long *)shifted_ra_location = *(long *)ra_location - 8; + fs->retaddr_column = 0; +- fs->regs.reg[0].how = REG_SAVED_OFFSET; ++ fs->regs.how[0] = REG_SAVED_OFFSET; + fs->regs.reg[0].loc.offset = shifted_ra_location - new_cfa; + fs->signal_frame = 1; + +@@ -122,7 +122,7 @@ sparc64_frob_update_context (struct _Unwind_Context *context, + context->cfa -= STACK_BIAS; + + for (i = 0; i < __LIBGCC_DWARF_FRAME_REGISTERS__ + 1; ++i) +- if (fs->regs.reg[i].how == REG_SAVED_OFFSET) ++ if (fs->regs.how[i] == REG_SAVED_OFFSET) + _Unwind_SetGRPtr (context, i, + _Unwind_GetGRPtr (context, i) - STACK_BIAS); + } +@@ -177,13 +177,13 @@ sparc_fallback_frame_state (struct _Unwind_Context *context, + if ((unsigned int) i == __builtin_dwarf_sp_column ()) + continue; + +- fs->regs.reg[i].how = REG_SAVED_OFFSET; ++ fs->regs.how[i] = REG_SAVED_OFFSET; + fs->regs.reg[i].loc.offset + = this_cfa + regs_off + (4 * 4) + (i * 4) - new_cfa; + } + for (i = 0; i < 16; i++) + { +- fs->regs.reg[i + 16].how = REG_SAVED_OFFSET; ++ fs->regs.how[i + 16] = REG_SAVED_OFFSET; + fs->regs.reg[i + 16].loc.offset + = this_cfa + (i * 4) - new_cfa; + } +@@ -191,7 +191,7 @@ sparc_fallback_frame_state (struct _Unwind_Context *context, + { + for (i = 0; i < 32; i++) + { +- fs->regs.reg[i + 32].how = REG_SAVED_OFFSET; ++ fs->regs.how[i + 32] = REG_SAVED_OFFSET; + fs->regs.reg[i + 32].loc.offset + = fpu_save + (i * 4) - new_cfa; + } +@@ -205,7 +205,7 @@ sparc_fallback_frame_state (struct _Unwind_Context *context, + shifted_ra_location = this_cfa + regs_off + 3 * 4; /* Y register */ + *(int *)shifted_ra_location = *(int *)ra_location - 8; + fs->retaddr_column = 0; +- fs->regs.reg[0].how = REG_SAVED_OFFSET; ++ fs->regs.how[0] = REG_SAVED_OFFSET; + fs->regs.reg[0].loc.offset = shifted_ra_location - new_cfa; + fs->signal_frame = 1; + +diff --git a/libgcc/config/sparc/sol2-unwind.h b/libgcc/config/sparc/sol2-unwind.h +index 480ab5dc9ee8752e..e6408bef59ffbef2 100644 +--- a/libgcc/config/sparc/sol2-unwind.h ++++ b/libgcc/config/sparc/sol2-unwind.h +@@ -96,7 +96,7 @@ sparc64_frob_update_context (struct _Unwind_Context *context, + context->cfa -= STACK_BIAS; + + for (i = 0; i < __LIBGCC_DWARF_FRAME_REGISTERS__ + 1; ++i) +- if (fs->regs.reg[i].how == REG_SAVED_OFFSET) ++ if (fs->regs.how[i] == REG_SAVED_OFFSET) + _Unwind_SetGRPtr (context, i, + _Unwind_GetGRPtr (context, i) - STACK_BIAS); + } +@@ -221,7 +221,7 @@ MD_FALLBACK_FRAME_STATE_FOR (struct _Unwind_Context *context, + continue; + + /* First the global registers and then the out registers. */ +- fs->regs.reg[i].how = REG_SAVED_OFFSET; ++ fs->regs.how[i] = REG_SAVED_OFFSET; + fs->regs.reg[i].loc.offset = (long)&mctx->gregs[REG_Y + i] - new_cfa; + } + +@@ -229,7 +229,7 @@ MD_FALLBACK_FRAME_STATE_FOR (struct _Unwind_Context *context, + the register window (in and local registers) was saved. */ + for (i = 0; i < 16; i++) + { +- fs->regs.reg[i + 16].how = REG_SAVED_OFFSET; ++ fs->regs.how[i + 16] = REG_SAVED_OFFSET; + fs->regs.reg[i + 16].loc.offset = i * sizeof(long); + } + +@@ -238,7 +238,7 @@ MD_FALLBACK_FRAME_STATE_FOR (struct _Unwind_Context *context, + { + for (i = 0; i < 32; i++) + { +- fs->regs.reg[i + 32].how = REG_SAVED_OFFSET; ++ fs->regs.how[i + 32] = REG_SAVED_OFFSET; + fs->regs.reg[i + 32].loc.offset + = (long)&mctx->fpregs.fpu_fr.fpu_regs[i] - new_cfa; + } +@@ -250,7 +250,7 @@ MD_FALLBACK_FRAME_STATE_FOR (struct _Unwind_Context *context, + if (i > 32 && (i & 1)) + continue; + +- fs->regs.reg[i + 32].how = REG_SAVED_OFFSET; ++ fs->regs.how[i + 32] = REG_SAVED_OFFSET; + fs->regs.reg[i + 32].loc.offset + = (long)&mctx->fpregs.fpu_fr.fpu_dregs[i/2] - new_cfa; + } +@@ -265,7 +265,7 @@ MD_FALLBACK_FRAME_STATE_FOR (struct _Unwind_Context *context, + shifted_ra_location = &mctx->gregs[REG_Y]; + *(void **)shifted_ra_location = *(void **)ra_location - 8; + fs->retaddr_column = 0; +- fs->regs.reg[0].how = REG_SAVED_OFFSET; ++ fs->regs.how[0] = REG_SAVED_OFFSET; + fs->regs.reg[0].loc.offset = (long)shifted_ra_location - new_cfa; + + /* SIGFPE for IEEE-754 exceptions is delivered after the faulting insn +diff --git a/libgcc/config/tilepro/linux-unwind.h b/libgcc/config/tilepro/linux-unwind.h +index cbb433d9a0556a40..d7eff58a156c990a 100644 +--- a/libgcc/config/tilepro/linux-unwind.h ++++ b/libgcc/config/tilepro/linux-unwind.h +@@ -83,12 +83,12 @@ tile_fallback_frame_state (struct _Unwind_Context *context, + + for (i = 0; i < 56; ++i) + { +- fs->regs.reg[i].how = REG_SAVED_OFFSET; ++ fs->regs.how[i] = REG_SAVED_OFFSET; + fs->regs.reg[i].loc.offset + = (long)&sc->gregs[i] - new_cfa; + } + +- fs->regs.reg[56].how = REG_SAVED_OFFSET; ++ fs->regs.how[56] = REG_SAVED_OFFSET; + fs->regs.reg[56].loc.offset = (long)&sc->pc - new_cfa; + fs->retaddr_column = 56; + fs->signal_frame = 1; +diff --git a/libgcc/config/xtensa/linux-unwind.h b/libgcc/config/xtensa/linux-unwind.h +index 9252ed9d1d0a194b..1ab300ee601a39f5 100644 +--- a/libgcc/config/xtensa/linux-unwind.h ++++ b/libgcc/config/xtensa/linux-unwind.h +@@ -105,11 +105,11 @@ xtensa_fallback_frame_state (struct _Unwind_Context *context, + + for (i = 0; i < 16; i++) + { +- fs->regs.reg[i].how = REG_SAVED_OFFSET; ++ fs->regs.how[i] = REG_SAVED_OFFSET; + fs->regs.reg[i].loc.offset = (_Unwind_Ptr) &(sc->sc_a[i]) - new_cfa; + } + +- fs->regs.reg[__LIBGCC_DWARF_ALT_FRAME_RETURN_COLUMN__].how = ++ fs->regs.how[__LIBGCC_DWARF_ALT_FRAME_RETURN_COLUMN__] = + REG_SAVED_VAL_OFFSET; + fs->regs.reg[__LIBGCC_DWARF_ALT_FRAME_RETURN_COLUMN__].loc.offset = + (_Unwind_Ptr) (sc->sc_pc) - new_cfa; +diff --git a/libgcc/unwind-dw2.c b/libgcc/unwind-dw2.c +index 43d06531fce9bed1..daebcb8bf7d215fe 100644 +--- a/libgcc/unwind-dw2.c ++++ b/libgcc/unwind-dw2.c +@@ -36,6 +36,7 @@ + #include "unwind-dw2-fde.h" + #include "gthr.h" + #include "unwind-dw2.h" ++#include + + #ifdef HAVE_SYS_SDT_H + #include +@@ -983,7 +984,7 @@ execute_cfa_program (const unsigned char *insn_ptr, + reg = DWARF_REG_TO_UNWIND_COLUMN (reg); + if (UNWIND_COLUMN_IN_RANGE (reg)) + { +- fs->regs.reg[reg].how = REG_SAVED_OFFSET; ++ fs->regs.how[reg] = REG_SAVED_OFFSET; + fs->regs.reg[reg].loc.offset = offset; + } + } +@@ -992,7 +993,7 @@ execute_cfa_program (const unsigned char *insn_ptr, + reg = insn & 0x3f; + reg = DWARF_REG_TO_UNWIND_COLUMN (reg); + if (UNWIND_COLUMN_IN_RANGE (reg)) +- fs->regs.reg[reg].how = REG_UNSAVED; ++ fs->regs.how[reg] = REG_UNSAVED; + } + else switch (insn) + { +@@ -1026,7 +1027,7 @@ execute_cfa_program (const unsigned char *insn_ptr, + reg = DWARF_REG_TO_UNWIND_COLUMN (reg); + if (UNWIND_COLUMN_IN_RANGE (reg)) + { +- fs->regs.reg[reg].how = REG_SAVED_OFFSET; ++ fs->regs.how[reg] = REG_SAVED_OFFSET; + fs->regs.reg[reg].loc.offset = offset; + } + break; +@@ -1037,21 +1038,21 @@ execute_cfa_program (const unsigned char *insn_ptr, + register was saved somewhere. */ + reg = DWARF_REG_TO_UNWIND_COLUMN (reg); + if (UNWIND_COLUMN_IN_RANGE (reg)) +- fs->regs.reg[reg].how = REG_UNSAVED; ++ fs->regs.how[reg] = REG_UNSAVED; + break; + + case DW_CFA_same_value: + insn_ptr = read_uleb128 (insn_ptr, ®); + reg = DWARF_REG_TO_UNWIND_COLUMN (reg); + if (UNWIND_COLUMN_IN_RANGE (reg)) +- fs->regs.reg[reg].how = REG_UNSAVED; ++ fs->regs.how[reg] = REG_UNSAVED; + break; + + case DW_CFA_undefined: + insn_ptr = read_uleb128 (insn_ptr, ®); + reg = DWARF_REG_TO_UNWIND_COLUMN (reg); + if (UNWIND_COLUMN_IN_RANGE (reg)) +- fs->regs.reg[reg].how = REG_UNDEFINED; ++ fs->regs.how[reg] = REG_UNDEFINED; + break; + + case DW_CFA_nop: +@@ -1065,7 +1066,7 @@ execute_cfa_program (const unsigned char *insn_ptr, + reg = DWARF_REG_TO_UNWIND_COLUMN (reg); + if (UNWIND_COLUMN_IN_RANGE (reg)) + { +- fs->regs.reg[reg].how = REG_SAVED_REG; ++ fs->regs.how[reg] = REG_SAVED_REG; + fs->regs.reg[reg].loc.reg = (_Unwind_Word)reg2; + } + } +@@ -1128,7 +1129,7 @@ execute_cfa_program (const unsigned char *insn_ptr, + reg = DWARF_REG_TO_UNWIND_COLUMN (reg); + if (UNWIND_COLUMN_IN_RANGE (reg)) + { +- fs->regs.reg[reg].how = REG_SAVED_EXP; ++ fs->regs.how[reg] = REG_SAVED_EXP; + fs->regs.reg[reg].loc.exp = insn_ptr; + } + insn_ptr = read_uleb128 (insn_ptr, &utmp); +@@ -1143,7 +1144,7 @@ execute_cfa_program (const unsigned char *insn_ptr, + reg = DWARF_REG_TO_UNWIND_COLUMN (reg); + if (UNWIND_COLUMN_IN_RANGE (reg)) + { +- fs->regs.reg[reg].how = REG_SAVED_OFFSET; ++ fs->regs.how[reg] = REG_SAVED_OFFSET; + fs->regs.reg[reg].loc.offset = offset; + } + break; +@@ -1171,7 +1172,7 @@ execute_cfa_program (const unsigned char *insn_ptr, + reg = DWARF_REG_TO_UNWIND_COLUMN (reg); + if (UNWIND_COLUMN_IN_RANGE (reg)) + { +- fs->regs.reg[reg].how = REG_SAVED_VAL_OFFSET; ++ fs->regs.how[reg] = REG_SAVED_VAL_OFFSET; + fs->regs.reg[reg].loc.offset = offset; + } + break; +@@ -1183,7 +1184,7 @@ execute_cfa_program (const unsigned char *insn_ptr, + reg = DWARF_REG_TO_UNWIND_COLUMN (reg); + if (UNWIND_COLUMN_IN_RANGE (reg)) + { +- fs->regs.reg[reg].how = REG_SAVED_VAL_OFFSET; ++ fs->regs.how[reg] = REG_SAVED_VAL_OFFSET; + fs->regs.reg[reg].loc.offset = offset; + } + break; +@@ -1193,7 +1194,7 @@ execute_cfa_program (const unsigned char *insn_ptr, + reg = DWARF_REG_TO_UNWIND_COLUMN (reg); + if (UNWIND_COLUMN_IN_RANGE (reg)) + { +- fs->regs.reg[reg].how = REG_SAVED_VAL_EXP; ++ fs->regs.how[reg] = REG_SAVED_VAL_EXP; + fs->regs.reg[reg].loc.exp = insn_ptr; + } + insn_ptr = read_uleb128 (insn_ptr, &utmp); +@@ -1205,14 +1206,14 @@ execute_cfa_program (const unsigned char *insn_ptr, + /* This CFA is multiplexed with Sparc. On AArch64 it's used to toggle + return address signing status. */ + reg = DWARF_REGNUM_AARCH64_RA_STATE; +- gcc_assert (fs->regs.reg[reg].how == REG_UNSAVED); ++ gcc_assert (fs->regs.how[reg] == REG_UNSAVED); + fs->regs.reg[reg].loc.offset ^= 1; + #else + /* ??? Hardcoded for SPARC register window configuration. */ + if (__LIBGCC_DWARF_FRAME_REGISTERS__ >= 32) + for (reg = 16; reg < 32; ++reg) + { +- fs->regs.reg[reg].how = REG_SAVED_OFFSET; ++ fs->regs.how[reg] = REG_SAVED_OFFSET; + fs->regs.reg[reg].loc.offset = (reg - 16) * sizeof (void *); + } + #endif +@@ -1232,7 +1233,7 @@ execute_cfa_program (const unsigned char *insn_ptr, + reg = DWARF_REG_TO_UNWIND_COLUMN (reg); + if (UNWIND_COLUMN_IN_RANGE (reg)) + { +- fs->regs.reg[reg].how = REG_SAVED_OFFSET; ++ fs->regs.how[reg] = REG_SAVED_OFFSET; + fs->regs.reg[reg].loc.offset = -offset; + } + break; +@@ -1255,7 +1256,8 @@ uw_frame_state_for (struct _Unwind_Context *context, _Unwind_FrameState *fs) + const struct dwarf_cie *cie; + const unsigned char *aug, *insn, *end; + +- memset (fs, 0, sizeof (*fs)); ++ memset (&fs->regs.how[0], 0, ++ sizeof (*fs) - offsetof (_Unwind_FrameState, regs.how[0])); + context->args_size = 0; + context->lsda = 0; + +@@ -1355,7 +1357,7 @@ __frame_state_for (void *pc_target, struct frame_state *state_in) + + for (reg = 0; reg < PRE_GCC3_DWARF_FRAME_REGISTERS + 1; reg++) + { +- state_in->saved[reg] = fs.regs.reg[reg].how; ++ state_in->saved[reg] = fs.regs.how[reg]; + switch (state_in->saved[reg]) + { + case REG_SAVED_REG: +@@ -1453,7 +1455,7 @@ uw_update_context_1 (struct _Unwind_Context *context, _Unwind_FrameState *fs) + + /* Compute the addresses of all registers saved in this frame. */ + for (i = 0; i < __LIBGCC_DWARF_FRAME_REGISTERS__ + 1; ++i) +- switch (fs->regs.reg[i].how) ++ switch (fs->regs.how[i]) + { + case REG_UNSAVED: + case REG_UNDEFINED: +@@ -1531,7 +1533,7 @@ uw_update_context (struct _Unwind_Context *context, _Unwind_FrameState *fs) + rule is handled like same_value. The only exception is + DW_CFA_undefined on retaddr_column which is supposed to + mark outermost frame in DWARF 3. */ +- if (fs->regs.reg[DWARF_REG_TO_UNWIND_COLUMN (fs->retaddr_column)].how ++ if (fs->regs.how[DWARF_REG_TO_UNWIND_COLUMN (fs->retaddr_column)] + == REG_UNDEFINED) + /* uw_frame_state_for uses context->ra == 0 check to find outermost + stack frame. */ +diff --git a/libgcc/unwind-dw2.h b/libgcc/unwind-dw2.h +index 896e3b1968bbfe04..22241b1f0d14cffc 100644 +--- a/libgcc/unwind-dw2.h ++++ b/libgcc/unwind-dw2.h +@@ -22,6 +22,16 @@ + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + . */ + ++enum { ++ REG_UNSAVED, ++ REG_SAVED_OFFSET, ++ REG_SAVED_REG, ++ REG_SAVED_EXP, ++ REG_SAVED_VAL_OFFSET, ++ REG_SAVED_VAL_EXP, ++ REG_UNDEFINED ++}; ++ + /* The result of interpreting the frame unwind info for a frame. + This is all symbolic at this point, as none of the values can + be resolved until the target pc is located. */ +@@ -37,16 +47,8 @@ typedef struct + _Unwind_Sword offset; + const unsigned char *exp; + } loc; +- enum { +- REG_UNSAVED, +- REG_SAVED_OFFSET, +- REG_SAVED_REG, +- REG_SAVED_EXP, +- REG_SAVED_VAL_OFFSET, +- REG_SAVED_VAL_EXP, +- REG_UNDEFINED +- } how; + } reg[__LIBGCC_DWARF_FRAME_REGISTERS__+1]; ++ unsigned char how[__LIBGCC_DWARF_FRAME_REGISTERS__+1]; + + /* Used to implement DW_CFA_remember_state. */ + struct frame_state_reg_info *prev; diff --git a/SOURCES/gcc-RHEL-105072-13.patch b/SOURCES/gcc-RHEL-105072-13.patch new file mode 100644 index 0000000..4b38934 --- /dev/null +++ b/SOURCES/gcc-RHEL-105072-13.patch @@ -0,0 +1,48 @@ +commit acdb24166d13d87c374e578d2ad5d58249171930 +Author: Florian Weimer +Date: Mon Oct 17 11:09:17 2022 +0200 + + libgcc: Move cfa_how into potential padding in struct frame_state_reg_info + + On many architectures, there is a padding gap after the how array + member, and cfa_how can be moved there. This reduces the size of the + struct and the amount of memory that uw_frame_state_for has to clear. + + There is no measurable performance benefit from this on x86-64 (even + though the memset goes from 120 to 112 bytes), but it seems to be a + good idea to do anyway. + + libgcc/ + + * unwind-dw2.h (struct frame_state_reg_info): Move cfa_how member + and reduce its size. + +diff --git a/libgcc/unwind-dw2.h b/libgcc/unwind-dw2.h +index 22241b1f0d14cffc..437c785efa4f297d 100644 +--- a/libgcc/unwind-dw2.h ++++ b/libgcc/unwind-dw2.h +@@ -50,6 +50,12 @@ typedef struct + } reg[__LIBGCC_DWARF_FRAME_REGISTERS__+1]; + unsigned char how[__LIBGCC_DWARF_FRAME_REGISTERS__+1]; + ++ enum { ++ CFA_UNSET, ++ CFA_REG_OFFSET, ++ CFA_EXP ++ } cfa_how : 8; ++ + /* Used to implement DW_CFA_remember_state. */ + struct frame_state_reg_info *prev; + +@@ -58,11 +64,6 @@ typedef struct + _Unwind_Sword cfa_offset; + _Unwind_Word cfa_reg; + const unsigned char *cfa_exp; +- enum { +- CFA_UNSET, +- CFA_REG_OFFSET, +- CFA_EXP +- } cfa_how; + } regs; + + /* The PC described by the current frame state. */ diff --git a/SOURCES/gcc-RHEL-105072-14.patch b/SOURCES/gcc-RHEL-105072-14.patch new file mode 100644 index 0000000..42b9ba4 --- /dev/null +++ b/SOURCES/gcc-RHEL-105072-14.patch @@ -0,0 +1,99 @@ +commit e724b0480bfa5ec04f39be8c7290330b495c59de +Author: Florian Weimer +Date: Fri Nov 4 10:18:03 2022 +0100 + + libgcc: Special-case BFD ld unwind table encodings in find_fde_tail + + BFD ld (and the other linkers) only produce one encoding of these + values. It is not necessary to use the general + read_encoded_value_with_base decoding routine. This avoids the + data-dependent branches in its implementation. + + libgcc/ + + * unwind-dw2-fde-dip.c (find_fde_tail): Special-case encoding + values actually used by BFD ld. + +diff --git a/libgcc/unwind-dw2-fde-dip.c b/libgcc/unwind-dw2-fde-dip.c +index 25f2e44c5823cf64..d4821d7d19950f15 100644 +--- a/libgcc/unwind-dw2-fde-dip.c ++++ b/libgcc/unwind-dw2-fde-dip.c +@@ -396,10 +396,21 @@ find_fde_tail (_Unwind_Ptr pc, + if (hdr->version != 1) + return NULL; + +- p = read_encoded_value_with_base (hdr->eh_frame_ptr_enc, +- base_from_cb_data (hdr->eh_frame_ptr_enc, +- dbase), +- p, &eh_frame); ++ if (__builtin_expect (hdr->eh_frame_ptr_enc == (DW_EH_PE_sdata4 ++ | DW_EH_PE_pcrel), 1)) ++ { ++ /* Specialized version of read_encoded_value_with_base, based on what ++ BFD ld generates. */ ++ signed value __attribute__ ((mode (SI))); ++ memcpy (&value, p, sizeof (value)); ++ p += sizeof (value); ++ dbase = value; /* No adjustment because pcrel has base 0. */ ++ } ++ else ++ p = read_encoded_value_with_base (hdr->eh_frame_ptr_enc, ++ base_from_cb_data (hdr->eh_frame_ptr_enc, ++ dbase), ++ p, &eh_frame); + + /* We require here specific table encoding to speed things up. + Also, DW_EH_PE_datarel here means using PT_GNU_EH_FRAME start +@@ -409,10 +420,20 @@ find_fde_tail (_Unwind_Ptr pc, + { + _Unwind_Ptr fde_count; + +- p = read_encoded_value_with_base (hdr->fde_count_enc, +- base_from_cb_data (hdr->fde_count_enc, +- dbase), +- p, &fde_count); ++ if (__builtin_expect (hdr->fde_count_enc == DW_EH_PE_udata4, 1)) ++ { ++ /* Specialized version of read_encoded_value_with_base, based on ++ what BFD ld generates. */ ++ unsigned value __attribute__ ((mode (SI))); ++ memcpy (&value, p, sizeof (value)); ++ p += sizeof (value); ++ fde_count = value; ++ } ++ else ++ p = read_encoded_value_with_base (hdr->fde_count_enc, ++ base_from_cb_data (hdr->fde_count_enc, ++ dbase), ++ p, &fde_count); + /* Shouldn't happen. */ + if (fde_count == 0) + return NULL; +@@ -454,8 +475,25 @@ find_fde_tail (_Unwind_Ptr pc, + f = (fde *) (table[mid].fde + data_base); + f_enc = get_fde_encoding (f); + f_enc_size = size_of_encoded_value (f_enc); +- read_encoded_value_with_base (f_enc & 0x0f, 0, +- &f->pc_begin[f_enc_size], &range); ++ ++ /* BFD ld uses DW_EH_PE_sdata4 | DW_EH_PE_pcrel on non-FDPIC targets, ++ so optimize for that. ++ ++ This optimization is not valid for FDPIC targets. f_enc & 0x0f as ++ passed to read_encoded_value_with_base masks away the base flags, ++ but they are implicit for FDPIC. */ ++#ifndef __FDPIC__ ++ if (__builtin_expect (f_enc == (DW_EH_PE_sdata4 | DW_EH_PE_pcrel), ++ 1)) ++ { ++ signed value __attribute__ ((mode (SI))); ++ memcpy (&value, &f->pc_begin[f_enc_size], sizeof (value)); ++ range = value; ++ } ++ else ++#endif ++ read_encoded_value_with_base (f_enc & 0x0f, 0, ++ &f->pc_begin[f_enc_size], &range); + _Unwind_Ptr func = table[mid].initial_loc + data_base; + if (pc < table[mid].initial_loc + data_base + range) + { diff --git a/SOURCES/gcc-RHEL-105072-15.patch b/SOURCES/gcc-RHEL-105072-15.patch new file mode 100644 index 0000000..b2d3341 --- /dev/null +++ b/SOURCES/gcc-RHEL-105072-15.patch @@ -0,0 +1,337 @@ +commit 1c118c9970600117700cc12284587e0238de6bbe +Author: Thomas Neumann +Date: Tue Nov 22 08:41:54 2022 +0100 + + speed up end_fde_sort using radix sort + + When registering a dynamic unwinding frame the fde list is sorted. + Previously, we split the list into a sorted and an unsorted part, + sorted the later using heap sort, and merged both. That can be + quite slow due to the large number of (expensive) comparisons. + + This patch replaces that logic with a radix sort instead. The + radix sort uses the same amount of memory as the old logic, + using the second list as auxiliary space, and it includes two + techniques to speed up sorting: First, it computes the pointer + addresses for blocks of values, reducing the decoding overhead. + And it recognizes when the data has reached a sorted state, + allowing for early termination. When running out of memory + we fall back to pure heap sort, as before. + + For this test program + + \#include + int main(int argc, char** argv) { + return 0; + } + + compiled with g++ -O -o hello -static hello.c we get with + perf stat -r 200 on a 5950X the following performance numbers: + + old logic: + + 0,20 msec task-clock + 930.834 cycles + 3.079.765 instructions + 0,00030478 +- 0,00000237 seconds time elapsed + + new logic: + + 0,10 msec task-clock + 473.269 cycles + 1.239.077 instructions + 0,00021119 +- 0,00000168 seconds time elapsed + + libgcc/ChangeLog: + * unwind-dw2-fde.c: Use radix sort instead of split+sort+merge. + +diff --git a/libgcc/unwind-dw2-fde.c b/libgcc/unwind-dw2-fde.c +index 27fea89dc314ccd0..a0d9bfb9f7d34ec1 100644 +--- a/libgcc/unwind-dw2-fde.c ++++ b/libgcc/unwind-dw2-fde.c +@@ -456,22 +456,52 @@ fde_mixed_encoding_compare (struct object *ob, const fde *x, const fde *y) + + typedef int (*fde_compare_t) (struct object *, const fde *, const fde *); + ++// The extractor functions compute the pointer values for a block of ++// fdes. The block processing hides the call overhead. + +-/* This is a special mix of insertion sort and heap sort, optimized for +- the data sets that actually occur. They look like +- 101 102 103 127 128 105 108 110 190 111 115 119 125 160 126 129 130. +- I.e. a linearly increasing sequence (coming from functions in the text +- section), with additionally a few unordered elements (coming from functions +- in gnu_linkonce sections) whose values are higher than the values in the +- surrounding linear sequence (but not necessarily higher than the values +- at the end of the linear sequence!). +- The worst-case total run time is O(N) + O(n log (n)), where N is the +- total number of FDEs and n is the number of erratic ones. */ ++static void ++fde_unencoded_extract (struct object *ob __attribute__ ((unused)), ++ _Unwind_Ptr *target, const fde **x, int count) ++{ ++ for (int index = 0; index < count; ++index) ++ memcpy (target + index, x[index]->pc_begin, sizeof (_Unwind_Ptr)); ++} ++ ++static void ++fde_single_encoding_extract (struct object *ob, _Unwind_Ptr *target, ++ const fde **x, int count) ++{ ++ _Unwind_Ptr base; ++ ++ base = base_from_object (ob->s.b.encoding, ob); ++ for (int index = 0; index < count; ++index) ++ read_encoded_value_with_base (ob->s.b.encoding, base, x[index]->pc_begin, ++ target + index); ++} ++ ++static void ++fde_mixed_encoding_extract (struct object *ob, _Unwind_Ptr *target, ++ const fde **x, int count) ++{ ++ for (int index = 0; index < count; ++index) ++ { ++ int encoding = get_fde_encoding (x[index]); ++ read_encoded_value_with_base (encoding, base_from_object (encoding, ob), ++ x[index]->pc_begin, target + index); ++ } ++} ++ ++typedef void (*fde_extractor_t) (struct object *, _Unwind_Ptr *, const fde **, ++ int); ++ ++// Data is is sorted using radix sort if possible, using an temporary ++// auxiliary data structure of the same size as the input. When running ++// out of memory do in-place heap sort. + + struct fde_accumulator + { + struct fde_vector *linear; +- struct fde_vector *erratic; ++ struct fde_vector *aux; + }; + + static inline int +@@ -485,8 +515,8 @@ start_fde_sort (struct fde_accumulator *accu, size_t count) + if ((accu->linear = malloc (size))) + { + accu->linear->count = 0; +- if ((accu->erratic = malloc (size))) +- accu->erratic->count = 0; ++ if ((accu->aux = malloc (size))) ++ accu->aux->count = 0; + return 1; + } + else +@@ -500,59 +530,6 @@ fde_insert (struct fde_accumulator *accu, const fde *this_fde) + accu->linear->array[accu->linear->count++] = this_fde; + } + +-/* Split LINEAR into a linear sequence with low values and an erratic +- sequence with high values, put the linear one (of longest possible +- length) into LINEAR and the erratic one into ERRATIC. This is O(N). +- +- Because the longest linear sequence we are trying to locate within the +- incoming LINEAR array can be interspersed with (high valued) erratic +- entries. We construct a chain indicating the sequenced entries. +- To avoid having to allocate this chain, we overlay it onto the space of +- the ERRATIC array during construction. A final pass iterates over the +- chain to determine what should be placed in the ERRATIC array, and +- what is the linear sequence. This overlay is safe from aliasing. */ +- +-static inline void +-fde_split (struct object *ob, fde_compare_t fde_compare, +- struct fde_vector *linear, struct fde_vector *erratic) +-{ +- static const fde *marker; +- size_t count = linear->count; +- const fde *const *chain_end = ▮ +- size_t i, j, k; +- +- /* This should optimize out, but it is wise to make sure this assumption +- is correct. Should these have different sizes, we cannot cast between +- them and the overlaying onto ERRATIC will not work. */ +- gcc_assert (sizeof (const fde *) == sizeof (const fde **)); +- +- for (i = 0; i < count; i++) +- { +- const fde *const *probe; +- +- for (probe = chain_end; +- probe != &marker && fde_compare (ob, linear->array[i], *probe) < 0; +- probe = chain_end) +- { +- chain_end = (const fde *const*) erratic->array[probe - linear->array]; +- erratic->array[probe - linear->array] = NULL; +- } +- erratic->array[i] = (const fde *) chain_end; +- chain_end = &linear->array[i]; +- } +- +- /* Each entry in LINEAR which is part of the linear sequence we have +- discovered will correspond to a non-NULL entry in the chain we built in +- the ERRATIC array. */ +- for (i = j = k = 0; i < count; i++) +- if (erratic->array[i]) +- linear->array[j++] = linear->array[i]; +- else +- erratic->array[k++] = linear->array[i]; +- linear->count = j; +- erratic->count = k; +-} +- + #define SWAP(x,y) do { const fde * tmp = x; x = y; y = tmp; } while (0) + + /* Convert a semi-heap to a heap. A semi-heap is a heap except possibly +@@ -615,59 +592,116 @@ frame_heapsort (struct object *ob, fde_compare_t fde_compare, + #undef SWAP + } + +-/* Merge V1 and V2, both sorted, and put the result into V1. */ ++// Radix sort data in V1 using V2 as aux memory. Runtime O(n). + static inline void +-fde_merge (struct object *ob, fde_compare_t fde_compare, +- struct fde_vector *v1, struct fde_vector *v2) ++fde_radixsort (struct object *ob, fde_extractor_t fde_extractor, ++ struct fde_vector *v1, struct fde_vector *v2) + { +- size_t i1, i2; +- const fde * fde2; +- +- i2 = v2->count; +- if (i2 > 0) ++#define FANOUTBITS 8 ++#define FANOUT (1 << FANOUTBITS) ++#define BLOCKSIZE 128 ++ const unsigned rounds ++ = (__CHAR_BIT__ * sizeof (_Unwind_Ptr) + FANOUTBITS - 1) / FANOUTBITS; ++ const fde **a1 = v1->array, **a2 = v2->array; ++ _Unwind_Ptr ptrs[BLOCKSIZE + 1]; ++ unsigned n = v1->count; ++ for (unsigned round = 0; round != rounds; ++round) + { +- i1 = v1->count; +- do ++ unsigned counts[FANOUT] = {0}; ++ unsigned violations = 0; ++ ++ // Count the number of elements per bucket and check if we are already ++ // sorted. ++ _Unwind_Ptr last = 0; ++ for (unsigned i = 0; i < n;) ++ { ++ unsigned chunk = ((n - i) <= BLOCKSIZE) ? (n - i) : BLOCKSIZE; ++ fde_extractor (ob, ptrs + 1, a1 + i, chunk); ++ ptrs[0] = last; ++ for (unsigned j = 0; j < chunk; ++j) ++ { ++ unsigned b = (ptrs[j + 1] >> (round * FANOUTBITS)) & (FANOUT - 1); ++ counts[b]++; ++ // Use summation instead of an if to eliminate branches. ++ violations += ptrs[j + 1] < ptrs[j]; ++ } ++ i += chunk; ++ last = ptrs[chunk]; ++ } ++ ++ // Stop if we are already sorted. ++ if (!violations) ++ { ++ // The sorted data is in a1 now. ++ a2 = a1; ++ break; ++ } ++ ++ // Compute the prefix sum. ++ unsigned sum = 0; ++ for (unsigned i = 0; i != FANOUT; ++i) ++ { ++ unsigned s = sum; ++ sum += counts[i]; ++ counts[i] = s; ++ } ++ ++ // Place all elements. ++ for (unsigned i = 0; i < n;) + { +- i2--; +- fde2 = v2->array[i2]; +- while (i1 > 0 && fde_compare (ob, v1->array[i1-1], fde2) > 0) ++ unsigned chunk = ((n - i) <= BLOCKSIZE) ? (n - i) : BLOCKSIZE; ++ fde_extractor (ob, ptrs, a1 + i, chunk); ++ for (unsigned j = 0; j < chunk; ++j) + { +- v1->array[i1+i2] = v1->array[i1-1]; +- i1--; ++ unsigned b = (ptrs[j] >> (round * FANOUTBITS)) & (FANOUT - 1); ++ a2[counts[b]++] = a1[i + j]; + } +- v1->array[i1+i2] = fde2; ++ i += chunk; + } +- while (i2 > 0); +- v1->count += v2->count; ++ ++ // Swap a1 and a2. ++ const fde **tmp = a1; ++ a1 = a2; ++ a2 = tmp; + } ++#undef BLOCKSIZE ++#undef FANOUT ++#undef FANOUTBITS ++ ++ // The data is in a2 now, move in place if needed. ++ if (a2 != v1->array) ++ memcpy (v1->array, a2, sizeof (const fde *) * n); + } + + static inline void + end_fde_sort (struct object *ob, struct fde_accumulator *accu, size_t count) + { +- fde_compare_t fde_compare; +- + gcc_assert (!accu->linear || accu->linear->count == count); + +- if (ob->s.b.mixed_encoding) +- fde_compare = fde_mixed_encoding_compare; +- else if (ob->s.b.encoding == DW_EH_PE_absptr) +- fde_compare = fde_unencoded_compare; +- else +- fde_compare = fde_single_encoding_compare; +- +- if (accu->erratic) ++ if (accu->aux) + { +- fde_split (ob, fde_compare, accu->linear, accu->erratic); +- gcc_assert (accu->linear->count + accu->erratic->count == count); +- frame_heapsort (ob, fde_compare, accu->erratic); +- fde_merge (ob, fde_compare, accu->linear, accu->erratic); +- free (accu->erratic); ++ fde_extractor_t fde_extractor; ++ if (ob->s.b.mixed_encoding) ++ fde_extractor = fde_mixed_encoding_extract; ++ else if (ob->s.b.encoding == DW_EH_PE_absptr) ++ fde_extractor = fde_unencoded_extract; ++ else ++ fde_extractor = fde_single_encoding_extract; ++ ++ fde_radixsort (ob, fde_extractor, accu->linear, accu->aux); ++ free (accu->aux); + } + else + { +- /* We've not managed to malloc an erratic array, ++ fde_compare_t fde_compare; ++ if (ob->s.b.mixed_encoding) ++ fde_compare = fde_mixed_encoding_compare; ++ else if (ob->s.b.encoding == DW_EH_PE_absptr) ++ fde_compare = fde_unencoded_compare; ++ else ++ fde_compare = fde_single_encoding_compare; ++ ++ /* We've not managed to malloc an aux array, + so heap sort in the linear one. */ + frame_heapsort (ob, fde_compare, accu->linear); + } diff --git a/SOURCES/gcc-RHEL-105072-16.patch b/SOURCES/gcc-RHEL-105072-16.patch new file mode 100644 index 0000000..ae455df --- /dev/null +++ b/SOURCES/gcc-RHEL-105072-16.patch @@ -0,0 +1,143 @@ +commit 6e56633daae79f514b0e71f4d9849bcd8d9ce71f +Author: Thomas Neumann +Date: Fri Dec 9 18:23:44 2022 +0100 + + initialize fde objects lazily + + When registering an unwind frame with __register_frame_info_bases + we currently initialize that fde object eagerly. This has the + advantage that it is immutable afterwards and we can safely + access it from multiple threads, but it has the disadvantage + that we pay the initialization cost even if the application + never throws an exception. + + This commit changes the logic to initialize the objects lazily. + The objects themselves are inserted into the b-tree when + registering the frame, but the sorted fde_vector is + not constructed yet. Only on the first time that an + exception tries to pass through the registered code the + object is initialized. We notice that with a double checking, + first doing a relaxed load of the sorted bit and then re-checking + under a mutex when the object was not initialized yet. + + Note that the check must implicitly be safe concering a concurrent + frame deregistration, as trying the deregister a frame that is + on the unwinding path of a concurrent exception is inherently racy. + + libgcc/ChangeLog: + * unwind-dw2-fde.c: Initialize fde object lazily when + the first exception tries to pass through. + +diff --git a/libgcc/unwind-dw2-fde.c b/libgcc/unwind-dw2-fde.c +index a0d9bfb9f7d34ec1..efcf9490469ad1a0 100644 +--- a/libgcc/unwind-dw2-fde.c ++++ b/libgcc/unwind-dw2-fde.c +@@ -63,8 +63,6 @@ release_registered_frames (void) + + static void + get_pc_range (const struct object *ob, uintptr_type *range); +-static void +-init_object (struct object *ob); + + #else + /* Without fast path frame deregistration must always succeed. */ +@@ -76,6 +74,7 @@ static const int in_shutdown = 0; + by decreasing value of pc_begin. */ + static struct object *unseen_objects; + static struct object *seen_objects; ++#endif + + #ifdef __GTHREAD_MUTEX_INIT + static __gthread_mutex_t object_mutex = __GTHREAD_MUTEX_INIT; +@@ -103,7 +102,6 @@ init_object_mutex_once (void) + static __gthread_mutex_t object_mutex; + #endif + #endif +-#endif + + /* Called from crtbegin.o to register the unwind info for an object. */ + +@@ -126,10 +124,7 @@ __register_frame_info_bases (const void *begin, struct object *ob, + #endif + + #ifdef ATOMIC_FDE_FAST_PATH +- // Initialize eagerly to avoid locking later +- init_object (ob); +- +- // And register the frame ++ // Register the frame in the b-tree + uintptr_type range[2]; + get_pc_range (ob, range); + btree_insert (®istered_frames, range[0], range[1] - range[0], ob); +@@ -180,10 +175,7 @@ __register_frame_info_table_bases (void *begin, struct object *ob, + ob->s.b.encoding = DW_EH_PE_omit; + + #ifdef ATOMIC_FDE_FAST_PATH +- // Initialize eagerly to avoid locking later +- init_object (ob); +- +- // And register the frame ++ // Register the frame in the b-tree + uintptr_type range[2]; + get_pc_range (ob, range); + btree_insert (®istered_frames, range[0], range[1] - range[0], ob); +@@ -926,7 +918,15 @@ init_object (struct object* ob) + accu.linear->orig_data = ob->u.single; + ob->u.sort = accu.linear; + ++#ifdef ATOMIC_FDE_FAST_PATH ++ // We must update the sorted bit with an atomic operation ++ struct object tmp; ++ tmp.s.b = ob->s.b; ++ tmp.s.b.sorted = 1; ++ __atomic_store (&(ob->s.b), &(tmp.s.b), __ATOMIC_RELEASE); ++#else + ob->s.b.sorted = 1; ++#endif + } + + #ifdef ATOMIC_FDE_FAST_PATH +@@ -1164,6 +1164,21 @@ search_object (struct object* ob, void *pc) + } + } + ++#ifdef ATOMIC_FDE_FAST_PATH ++ ++// Check if the object was already initialized ++static inline bool ++is_object_initialized (struct object *ob) ++{ ++ // We have to use acquire atomics for the read, which ++ // is a bit involved as we read from a bitfield ++ struct object tmp; ++ __atomic_load (&(ob->s.b), &(tmp.s.b), __ATOMIC_ACQUIRE); ++ return tmp.s.b.sorted; ++} ++ ++#endif ++ + const fde * + _Unwind_Find_FDE (void *pc, struct dwarf_eh_bases *bases) + { +@@ -1175,6 +1190,21 @@ _Unwind_Find_FDE (void *pc, struct dwarf_eh_bases *bases) + if (!ob) + return NULL; + ++ // Initialize the object lazily ++ if (!is_object_initialized (ob)) ++ { ++ // Check again under mutex ++ init_object_mutex_once (); ++ __gthread_mutex_lock (&object_mutex); ++ ++ if (!ob->s.b.sorted) ++ { ++ init_object (ob); ++ } ++ ++ __gthread_mutex_unlock (&object_mutex); ++ } ++ + f = search_object (ob, pc); + #else + diff --git a/SOURCES/gcc-RHEL-105072-17.patch b/SOURCES/gcc-RHEL-105072-17.patch new file mode 100644 index 0000000..31964d4 --- /dev/null +++ b/SOURCES/gcc-RHEL-105072-17.patch @@ -0,0 +1,703 @@ +commit 8fdef16cd5d1b89359db3cd9a9768ab2d1b5081f +Author: Florian Weimer +Date: Tue Jan 3 16:47:32 2023 +0100 + + libgcc: Specialize execute_cfa_program in DWARF unwinder for alignments [redo] + + The parameters fs->data_align and fs->code_align always have fixed + values for a particular target in GCC-generated code. Specialize + execute_cfa_program for these values, to avoid multiplications. + + gcc/c-family/ + + * c-cppbuiltin.cc (c_cpp_builtins): Define + __LIBGCC_DWARF_CIE_DATA_ALIGNMENT__. + + libgcc/ + + * unwind-dw2-execute_cfa.h: New file. Extracted from + the execute_cfa_program function in unwind-dw2.c. + * unwind-dw2.c (execute_cfa_program_generic): New function. + (execute_cfa_program_specialized): Likewise. + (execute_cfa_program): Call execute_cfa_program_specialized + or execute_cfa_program_generic, as appropriate. + +diff --git a/gcc/c-family/c-cppbuiltin.c b/gcc/c-family/c-cppbuiltin.c +index 11e015bdb87b0f9b..a5369eb51b07f0ab 100644 +--- a/gcc/c-family/c-cppbuiltin.c ++++ b/gcc/c-family/c-cppbuiltin.c +@@ -1408,6 +1408,9 @@ c_cpp_builtins (cpp_reader *pfile) + #endif + builtin_define_with_int_value ("__LIBGCC_DWARF_FRAME_REGISTERS__", + DWARF_FRAME_REGISTERS); ++ builtin_define_with_int_value ("__LIBGCC_DWARF_CIE_DATA_ALIGNMENT__", ++ DWARF_CIE_DATA_ALIGNMENT); ++ + #ifdef EH_RETURN_STACKADJ_RTX + cpp_define (pfile, "__LIBGCC_EH_RETURN_STACKADJ_RTX__"); + #endif +diff --git a/libgcc/unwind-dw2-execute_cfa.h b/libgcc/unwind-dw2-execute_cfa.h +new file mode 100644 +index 0000000000000000..dd97b7866686a361 +--- /dev/null ++++ b/libgcc/unwind-dw2-execute_cfa.h +@@ -0,0 +1,322 @@ ++/* DWARF2 exception handling CFA execution engine. ++ Copyright (C) 1997-2022 Free Software Foundation, Inc. ++ ++ This file is part of GCC. ++ ++ GCC is free software; you can redistribute it and/or modify it ++ under the terms of the GNU General Public License as published by ++ the Free Software Foundation; either version 3, or (at your option) ++ any later version. ++ ++ GCC is distributed in the hope that it will be useful, but WITHOUT ++ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY ++ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public ++ License for more details. ++ ++ Under Section 7 of GPL version 3, you are granted additional ++ permissions described in the GCC Runtime Library Exception, version ++ 3.1, as published by the Free Software Foundation. ++ ++ You should have received a copy of the GNU General Public License and ++ a copy of the GCC Runtime Library Exception along with this program; ++ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see ++ . */ ++ ++/* This file is included from unwind-dw2.c to specialize the code for certain ++ values of DATA_ALIGN and CODE_ALIGN. These macros must be defined prior to ++ including this file. */ ++ ++{ ++ struct frame_state_reg_info *unused_rs = NULL; ++ ++ /* Don't allow remember/restore between CIE and FDE programs. */ ++ fs->regs.prev = NULL; ++ ++ /* The comparison with the return address uses < rather than <= because ++ we are only interested in the effects of code before the call; for a ++ noreturn function, the return address may point to unrelated code with ++ a different stack configuration that we are not interested in. We ++ assume that the call itself is unwind info-neutral; if not, or if ++ there are delay instructions that adjust the stack, these must be ++ reflected at the point immediately before the call insn. ++ In signal frames, return address is after last completed instruction, ++ so we add 1 to return address to make the comparison <=. */ ++ while (insn_ptr < insn_end ++ && fs->pc < context->ra + _Unwind_IsSignalFrame (context)) ++ { ++ unsigned char insn = *insn_ptr++; ++ _uleb128_t reg, utmp; ++ _sleb128_t offset, stmp; ++ ++ if ((insn & 0xc0) == DW_CFA_advance_loc) ++ fs->pc += (insn & 0x3f) * CODE_ALIGN; ++ else if ((insn & 0xc0) == DW_CFA_offset) ++ { ++ reg = insn & 0x3f; ++ insn_ptr = read_uleb128 (insn_ptr, &utmp); ++ offset = (_Unwind_Sword) utmp * DATA_ALIGN; ++ reg = DWARF_REG_TO_UNWIND_COLUMN (reg); ++ if (UNWIND_COLUMN_IN_RANGE (reg)) ++ { ++ fs->regs.how[reg] = REG_SAVED_OFFSET; ++ fs->regs.reg[reg].loc.offset = offset; ++ } ++ } ++ else if ((insn & 0xc0) == DW_CFA_restore) ++ { ++ reg = insn & 0x3f; ++ reg = DWARF_REG_TO_UNWIND_COLUMN (reg); ++ if (UNWIND_COLUMN_IN_RANGE (reg)) ++ fs->regs.how[reg] = REG_UNSAVED; ++ } ++ else switch (insn) ++ { ++ case DW_CFA_set_loc: ++ { ++ _Unwind_Ptr pc; ++ ++ insn_ptr = read_encoded_value (context, fs->fde_encoding, ++ insn_ptr, &pc); ++ fs->pc = (void *) pc; ++ } ++ break; ++ ++ case DW_CFA_advance_loc1: ++ fs->pc += read_1u (insn_ptr) * CODE_ALIGN; ++ insn_ptr += 1; ++ break; ++ case DW_CFA_advance_loc2: ++ fs->pc += read_2u (insn_ptr) * CODE_ALIGN; ++ insn_ptr += 2; ++ break; ++ case DW_CFA_advance_loc4: ++ fs->pc += read_4u (insn_ptr) * CODE_ALIGN; ++ insn_ptr += 4; ++ break; ++ ++ case DW_CFA_offset_extended: ++ insn_ptr = read_uleb128 (insn_ptr, ®); ++ insn_ptr = read_uleb128 (insn_ptr, &utmp); ++ offset = (_Unwind_Sword) utmp * DATA_ALIGN; ++ reg = DWARF_REG_TO_UNWIND_COLUMN (reg); ++ if (UNWIND_COLUMN_IN_RANGE (reg)) ++ { ++ fs->regs.how[reg] = REG_SAVED_OFFSET; ++ fs->regs.reg[reg].loc.offset = offset; ++ } ++ break; ++ ++ case DW_CFA_restore_extended: ++ insn_ptr = read_uleb128 (insn_ptr, ®); ++ /* FIXME, this is wrong; the CIE might have said that the ++ register was saved somewhere. */ ++ reg = DWARF_REG_TO_UNWIND_COLUMN (reg); ++ if (UNWIND_COLUMN_IN_RANGE (reg)) ++ fs->regs.how[reg] = REG_UNSAVED; ++ break; ++ ++ case DW_CFA_same_value: ++ insn_ptr = read_uleb128 (insn_ptr, ®); ++ reg = DWARF_REG_TO_UNWIND_COLUMN (reg); ++ if (UNWIND_COLUMN_IN_RANGE (reg)) ++ fs->regs.how[reg] = REG_UNSAVED; ++ break; ++ ++ case DW_CFA_undefined: ++ insn_ptr = read_uleb128 (insn_ptr, ®); ++ reg = DWARF_REG_TO_UNWIND_COLUMN (reg); ++ if (UNWIND_COLUMN_IN_RANGE (reg)) ++ fs->regs.how[reg] = REG_UNDEFINED; ++ break; ++ ++ case DW_CFA_nop: ++ break; ++ ++ case DW_CFA_register: ++ { ++ _uleb128_t reg2; ++ insn_ptr = read_uleb128 (insn_ptr, ®); ++ insn_ptr = read_uleb128 (insn_ptr, ®2); ++ reg = DWARF_REG_TO_UNWIND_COLUMN (reg); ++ if (UNWIND_COLUMN_IN_RANGE (reg)) ++ { ++ fs->regs.how[reg] = REG_SAVED_REG; ++ fs->regs.reg[reg].loc.reg = (_Unwind_Word)reg2; ++ } ++ } ++ break; ++ ++ case DW_CFA_remember_state: ++ { ++ struct frame_state_reg_info *new_rs; ++ if (unused_rs) ++ { ++ new_rs = unused_rs; ++ unused_rs = unused_rs->prev; ++ } ++ else ++ new_rs = alloca (sizeof (struct frame_state_reg_info)); ++ ++ *new_rs = fs->regs; ++ fs->regs.prev = new_rs; ++ } ++ break; ++ ++ case DW_CFA_restore_state: ++ { ++ struct frame_state_reg_info *old_rs = fs->regs.prev; ++ fs->regs = *old_rs; ++ old_rs->prev = unused_rs; ++ unused_rs = old_rs; ++ } ++ break; ++ ++ case DW_CFA_def_cfa: ++ insn_ptr = read_uleb128 (insn_ptr, &utmp); ++ fs->regs.cfa_reg = (_Unwind_Word)utmp; ++ insn_ptr = read_uleb128 (insn_ptr, &utmp); ++ fs->regs.cfa_offset = (_Unwind_Word)utmp; ++ fs->regs.cfa_how = CFA_REG_OFFSET; ++ break; ++ ++ case DW_CFA_def_cfa_register: ++ insn_ptr = read_uleb128 (insn_ptr, &utmp); ++ fs->regs.cfa_reg = (_Unwind_Word)utmp; ++ fs->regs.cfa_how = CFA_REG_OFFSET; ++ break; ++ ++ case DW_CFA_def_cfa_offset: ++ insn_ptr = read_uleb128 (insn_ptr, &utmp); ++ fs->regs.cfa_offset = utmp; ++ /* cfa_how deliberately not set. */ ++ break; ++ ++ case DW_CFA_def_cfa_expression: ++ fs->regs.cfa_exp = insn_ptr; ++ fs->regs.cfa_how = CFA_EXP; ++ insn_ptr = read_uleb128 (insn_ptr, &utmp); ++ insn_ptr += utmp; ++ break; ++ ++ case DW_CFA_expression: ++ insn_ptr = read_uleb128 (insn_ptr, ®); ++ reg = DWARF_REG_TO_UNWIND_COLUMN (reg); ++ if (UNWIND_COLUMN_IN_RANGE (reg)) ++ { ++ fs->regs.how[reg] = REG_SAVED_EXP; ++ fs->regs.reg[reg].loc.exp = insn_ptr; ++ } ++ insn_ptr = read_uleb128 (insn_ptr, &utmp); ++ insn_ptr += utmp; ++ break; ++ ++ /* Dwarf3. */ ++ case DW_CFA_offset_extended_sf: ++ insn_ptr = read_uleb128 (insn_ptr, ®); ++ insn_ptr = read_sleb128 (insn_ptr, &stmp); ++ offset = stmp * DATA_ALIGN; ++ reg = DWARF_REG_TO_UNWIND_COLUMN (reg); ++ if (UNWIND_COLUMN_IN_RANGE (reg)) ++ { ++ fs->regs.how[reg] = REG_SAVED_OFFSET; ++ fs->regs.reg[reg].loc.offset = offset; ++ } ++ break; ++ ++ case DW_CFA_def_cfa_sf: ++ insn_ptr = read_uleb128 (insn_ptr, &utmp); ++ fs->regs.cfa_reg = (_Unwind_Word)utmp; ++ insn_ptr = read_sleb128 (insn_ptr, &stmp); ++ fs->regs.cfa_offset = (_Unwind_Sword)stmp; ++ fs->regs.cfa_how = CFA_REG_OFFSET; ++ fs->regs.cfa_offset *= DATA_ALIGN; ++ break; ++ ++ case DW_CFA_def_cfa_offset_sf: ++ insn_ptr = read_sleb128 (insn_ptr, &stmp); ++ fs->regs.cfa_offset = (_Unwind_Sword)stmp; ++ fs->regs.cfa_offset *= DATA_ALIGN; ++ /* cfa_how deliberately not set. */ ++ break; ++ ++ case DW_CFA_val_offset: ++ insn_ptr = read_uleb128 (insn_ptr, ®); ++ insn_ptr = read_uleb128 (insn_ptr, &utmp); ++ offset = (_Unwind_Sword) utmp * DATA_ALIGN; ++ reg = DWARF_REG_TO_UNWIND_COLUMN (reg); ++ if (UNWIND_COLUMN_IN_RANGE (reg)) ++ { ++ fs->regs.how[reg] = REG_SAVED_VAL_OFFSET; ++ fs->regs.reg[reg].loc.offset = offset; ++ } ++ break; ++ ++ case DW_CFA_val_offset_sf: ++ insn_ptr = read_uleb128 (insn_ptr, ®); ++ insn_ptr = read_sleb128 (insn_ptr, &stmp); ++ offset = stmp * DATA_ALIGN; ++ reg = DWARF_REG_TO_UNWIND_COLUMN (reg); ++ if (UNWIND_COLUMN_IN_RANGE (reg)) ++ { ++ fs->regs.how[reg] = REG_SAVED_VAL_OFFSET; ++ fs->regs.reg[reg].loc.offset = offset; ++ } ++ break; ++ ++ case DW_CFA_val_expression: ++ insn_ptr = read_uleb128 (insn_ptr, ®); ++ reg = DWARF_REG_TO_UNWIND_COLUMN (reg); ++ if (UNWIND_COLUMN_IN_RANGE (reg)) ++ { ++ fs->regs.how[reg] = REG_SAVED_VAL_EXP; ++ fs->regs.reg[reg].loc.exp = insn_ptr; ++ } ++ insn_ptr = read_uleb128 (insn_ptr, &utmp); ++ insn_ptr += utmp; ++ break; ++ ++ case DW_CFA_GNU_window_save: ++#if defined (__aarch64__) && !defined (__ILP32__) ++ /* This CFA is multiplexed with Sparc. On AArch64 it's used to toggle ++ return address signing status. */ ++ reg = DWARF_REGNUM_AARCH64_RA_STATE; ++ gcc_assert (fs->regs.how[reg] == REG_UNSAVED); ++ fs->regs.reg[reg].loc.offset ^= 1; ++#else ++ /* ??? Hardcoded for SPARC register window configuration. */ ++ if (__LIBGCC_DWARF_FRAME_REGISTERS__ >= 32) ++ for (reg = 16; reg < 32; ++reg) ++ { ++ fs->regs.how[reg] = REG_SAVED_OFFSET; ++ fs->regs.reg[reg].loc.offset = (reg - 16) * sizeof (void *); ++ } ++#endif ++ break; ++ ++ case DW_CFA_GNU_args_size: ++ insn_ptr = read_uleb128 (insn_ptr, &utmp); ++ context->args_size = (_Unwind_Word)utmp; ++ break; ++ ++ case DW_CFA_GNU_negative_offset_extended: ++ /* Obsoleted by DW_CFA_offset_extended_sf, but used by ++ older PowerPC code. */ ++ insn_ptr = read_uleb128 (insn_ptr, ®); ++ insn_ptr = read_uleb128 (insn_ptr, &utmp); ++ offset = (_Unwind_Word) utmp * DATA_ALIGN; ++ reg = DWARF_REG_TO_UNWIND_COLUMN (reg); ++ if (UNWIND_COLUMN_IN_RANGE (reg)) ++ { ++ fs->regs.how[reg] = REG_SAVED_OFFSET; ++ fs->regs.reg[reg].loc.offset = -offset; ++ } ++ break; ++ ++ default: ++ gcc_unreachable (); ++ } ++ } ++} ++ ++#undef DATA_ALIGN ++#undef CODE_ALIGN +diff --git a/libgcc/unwind-dw2.c b/libgcc/unwind-dw2.c +index daebcb8bf7d215fe..701552634c6e87c5 100644 +--- a/libgcc/unwind-dw2.c ++++ b/libgcc/unwind-dw2.c +@@ -947,302 +947,43 @@ execute_stack_op (const unsigned char *op_ptr, const unsigned char *op_end, + instruction sequence to decode, current register information and + CIE info, and the PC range to evaluate. */ + ++static void __attribute__ ((__noinline__)) ++execute_cfa_program_generic (const unsigned char *insn_ptr, ++ const unsigned char *insn_end, ++ struct _Unwind_Context *context, ++ _Unwind_FrameState *fs) ++{ ++#define DATA_ALIGN fs->data_align ++#define CODE_ALIGN fs->code_align ++#include "unwind-dw2-execute_cfa.h" ++} ++ ++static inline void ++execute_cfa_program_specialized (const unsigned char *insn_ptr, ++ const unsigned char *insn_end, ++ struct _Unwind_Context *context, ++ _Unwind_FrameState *fs) ++{ ++#define DATA_ALIGN __LIBGCC_DWARF_CIE_DATA_ALIGNMENT__ ++ /* GCC always uses 1 even on architectures with a fixed instruction ++ width. */ ++#define CODE_ALIGN 1 ++#include "unwind-dw2-execute_cfa.h" ++} ++ + static void + execute_cfa_program (const unsigned char *insn_ptr, + const unsigned char *insn_end, + struct _Unwind_Context *context, + _Unwind_FrameState *fs) + { +- struct frame_state_reg_info *unused_rs = NULL; +- +- /* Don't allow remember/restore between CIE and FDE programs. */ +- fs->regs.prev = NULL; +- +- /* The comparison with the return address uses < rather than <= because +- we are only interested in the effects of code before the call; for a +- noreturn function, the return address may point to unrelated code with +- a different stack configuration that we are not interested in. We +- assume that the call itself is unwind info-neutral; if not, or if +- there are delay instructions that adjust the stack, these must be +- reflected at the point immediately before the call insn. +- In signal frames, return address is after last completed instruction, +- so we add 1 to return address to make the comparison <=. */ +- while (insn_ptr < insn_end +- && fs->pc < context->ra + _Unwind_IsSignalFrame (context)) +- { +- unsigned char insn = *insn_ptr++; +- _uleb128_t reg, utmp; +- _sleb128_t offset, stmp; +- +- if ((insn & 0xc0) == DW_CFA_advance_loc) +- fs->pc += (insn & 0x3f) * fs->code_align; +- else if ((insn & 0xc0) == DW_CFA_offset) +- { +- reg = insn & 0x3f; +- insn_ptr = read_uleb128 (insn_ptr, &utmp); +- offset = (_Unwind_Sword) utmp * fs->data_align; +- reg = DWARF_REG_TO_UNWIND_COLUMN (reg); +- if (UNWIND_COLUMN_IN_RANGE (reg)) +- { +- fs->regs.how[reg] = REG_SAVED_OFFSET; +- fs->regs.reg[reg].loc.offset = offset; +- } +- } +- else if ((insn & 0xc0) == DW_CFA_restore) +- { +- reg = insn & 0x3f; +- reg = DWARF_REG_TO_UNWIND_COLUMN (reg); +- if (UNWIND_COLUMN_IN_RANGE (reg)) +- fs->regs.how[reg] = REG_UNSAVED; +- } +- else switch (insn) +- { +- case DW_CFA_set_loc: +- { +- _Unwind_Ptr pc; +- +- insn_ptr = read_encoded_value (context, fs->fde_encoding, +- insn_ptr, &pc); +- fs->pc = (void *) pc; +- } +- break; +- +- case DW_CFA_advance_loc1: +- fs->pc += read_1u (insn_ptr) * fs->code_align; +- insn_ptr += 1; +- break; +- case DW_CFA_advance_loc2: +- fs->pc += read_2u (insn_ptr) * fs->code_align; +- insn_ptr += 2; +- break; +- case DW_CFA_advance_loc4: +- fs->pc += read_4u (insn_ptr) * fs->code_align; +- insn_ptr += 4; +- break; +- +- case DW_CFA_offset_extended: +- insn_ptr = read_uleb128 (insn_ptr, ®); +- insn_ptr = read_uleb128 (insn_ptr, &utmp); +- offset = (_Unwind_Sword) utmp * fs->data_align; +- reg = DWARF_REG_TO_UNWIND_COLUMN (reg); +- if (UNWIND_COLUMN_IN_RANGE (reg)) +- { +- fs->regs.how[reg] = REG_SAVED_OFFSET; +- fs->regs.reg[reg].loc.offset = offset; +- } +- break; +- +- case DW_CFA_restore_extended: +- insn_ptr = read_uleb128 (insn_ptr, ®); +- /* FIXME, this is wrong; the CIE might have said that the +- register was saved somewhere. */ +- reg = DWARF_REG_TO_UNWIND_COLUMN (reg); +- if (UNWIND_COLUMN_IN_RANGE (reg)) +- fs->regs.how[reg] = REG_UNSAVED; +- break; +- +- case DW_CFA_same_value: +- insn_ptr = read_uleb128 (insn_ptr, ®); +- reg = DWARF_REG_TO_UNWIND_COLUMN (reg); +- if (UNWIND_COLUMN_IN_RANGE (reg)) +- fs->regs.how[reg] = REG_UNSAVED; +- break; +- +- case DW_CFA_undefined: +- insn_ptr = read_uleb128 (insn_ptr, ®); +- reg = DWARF_REG_TO_UNWIND_COLUMN (reg); +- if (UNWIND_COLUMN_IN_RANGE (reg)) +- fs->regs.how[reg] = REG_UNDEFINED; +- break; +- +- case DW_CFA_nop: +- break; +- +- case DW_CFA_register: +- { +- _uleb128_t reg2; +- insn_ptr = read_uleb128 (insn_ptr, ®); +- insn_ptr = read_uleb128 (insn_ptr, ®2); +- reg = DWARF_REG_TO_UNWIND_COLUMN (reg); +- if (UNWIND_COLUMN_IN_RANGE (reg)) +- { +- fs->regs.how[reg] = REG_SAVED_REG; +- fs->regs.reg[reg].loc.reg = (_Unwind_Word)reg2; +- } +- } +- break; +- +- case DW_CFA_remember_state: +- { +- struct frame_state_reg_info *new_rs; +- if (unused_rs) +- { +- new_rs = unused_rs; +- unused_rs = unused_rs->prev; +- } +- else +- new_rs = alloca (sizeof (struct frame_state_reg_info)); +- +- *new_rs = fs->regs; +- fs->regs.prev = new_rs; +- } +- break; +- +- case DW_CFA_restore_state: +- { +- struct frame_state_reg_info *old_rs = fs->regs.prev; +- fs->regs = *old_rs; +- old_rs->prev = unused_rs; +- unused_rs = old_rs; +- } +- break; +- +- case DW_CFA_def_cfa: +- insn_ptr = read_uleb128 (insn_ptr, &utmp); +- fs->regs.cfa_reg = (_Unwind_Word)utmp; +- insn_ptr = read_uleb128 (insn_ptr, &utmp); +- fs->regs.cfa_offset = (_Unwind_Word)utmp; +- fs->regs.cfa_how = CFA_REG_OFFSET; +- break; +- +- case DW_CFA_def_cfa_register: +- insn_ptr = read_uleb128 (insn_ptr, &utmp); +- fs->regs.cfa_reg = (_Unwind_Word)utmp; +- fs->regs.cfa_how = CFA_REG_OFFSET; +- break; +- +- case DW_CFA_def_cfa_offset: +- insn_ptr = read_uleb128 (insn_ptr, &utmp); +- fs->regs.cfa_offset = utmp; +- /* cfa_how deliberately not set. */ +- break; +- +- case DW_CFA_def_cfa_expression: +- fs->regs.cfa_exp = insn_ptr; +- fs->regs.cfa_how = CFA_EXP; +- insn_ptr = read_uleb128 (insn_ptr, &utmp); +- insn_ptr += utmp; +- break; +- +- case DW_CFA_expression: +- insn_ptr = read_uleb128 (insn_ptr, ®); +- reg = DWARF_REG_TO_UNWIND_COLUMN (reg); +- if (UNWIND_COLUMN_IN_RANGE (reg)) +- { +- fs->regs.how[reg] = REG_SAVED_EXP; +- fs->regs.reg[reg].loc.exp = insn_ptr; +- } +- insn_ptr = read_uleb128 (insn_ptr, &utmp); +- insn_ptr += utmp; +- break; +- +- /* Dwarf3. */ +- case DW_CFA_offset_extended_sf: +- insn_ptr = read_uleb128 (insn_ptr, ®); +- insn_ptr = read_sleb128 (insn_ptr, &stmp); +- offset = stmp * fs->data_align; +- reg = DWARF_REG_TO_UNWIND_COLUMN (reg); +- if (UNWIND_COLUMN_IN_RANGE (reg)) +- { +- fs->regs.how[reg] = REG_SAVED_OFFSET; +- fs->regs.reg[reg].loc.offset = offset; +- } +- break; +- +- case DW_CFA_def_cfa_sf: +- insn_ptr = read_uleb128 (insn_ptr, &utmp); +- fs->regs.cfa_reg = (_Unwind_Word)utmp; +- insn_ptr = read_sleb128 (insn_ptr, &stmp); +- fs->regs.cfa_offset = (_Unwind_Sword)stmp; +- fs->regs.cfa_how = CFA_REG_OFFSET; +- fs->regs.cfa_offset *= fs->data_align; +- break; +- +- case DW_CFA_def_cfa_offset_sf: +- insn_ptr = read_sleb128 (insn_ptr, &stmp); +- fs->regs.cfa_offset = (_Unwind_Sword)stmp; +- fs->regs.cfa_offset *= fs->data_align; +- /* cfa_how deliberately not set. */ +- break; +- +- case DW_CFA_val_offset: +- insn_ptr = read_uleb128 (insn_ptr, ®); +- insn_ptr = read_uleb128 (insn_ptr, &utmp); +- offset = (_Unwind_Sword) utmp * fs->data_align; +- reg = DWARF_REG_TO_UNWIND_COLUMN (reg); +- if (UNWIND_COLUMN_IN_RANGE (reg)) +- { +- fs->regs.how[reg] = REG_SAVED_VAL_OFFSET; +- fs->regs.reg[reg].loc.offset = offset; +- } +- break; +- +- case DW_CFA_val_offset_sf: +- insn_ptr = read_uleb128 (insn_ptr, ®); +- insn_ptr = read_sleb128 (insn_ptr, &stmp); +- offset = stmp * fs->data_align; +- reg = DWARF_REG_TO_UNWIND_COLUMN (reg); +- if (UNWIND_COLUMN_IN_RANGE (reg)) +- { +- fs->regs.how[reg] = REG_SAVED_VAL_OFFSET; +- fs->regs.reg[reg].loc.offset = offset; +- } +- break; +- +- case DW_CFA_val_expression: +- insn_ptr = read_uleb128 (insn_ptr, ®); +- reg = DWARF_REG_TO_UNWIND_COLUMN (reg); +- if (UNWIND_COLUMN_IN_RANGE (reg)) +- { +- fs->regs.how[reg] = REG_SAVED_VAL_EXP; +- fs->regs.reg[reg].loc.exp = insn_ptr; +- } +- insn_ptr = read_uleb128 (insn_ptr, &utmp); +- insn_ptr += utmp; +- break; +- +- case DW_CFA_GNU_window_save: +-#if defined (__aarch64__) && !defined (__ILP32__) +- /* This CFA is multiplexed with Sparc. On AArch64 it's used to toggle +- return address signing status. */ +- reg = DWARF_REGNUM_AARCH64_RA_STATE; +- gcc_assert (fs->regs.how[reg] == REG_UNSAVED); +- fs->regs.reg[reg].loc.offset ^= 1; +-#else +- /* ??? Hardcoded for SPARC register window configuration. */ +- if (__LIBGCC_DWARF_FRAME_REGISTERS__ >= 32) +- for (reg = 16; reg < 32; ++reg) +- { +- fs->regs.how[reg] = REG_SAVED_OFFSET; +- fs->regs.reg[reg].loc.offset = (reg - 16) * sizeof (void *); +- } +-#endif +- break; +- +- case DW_CFA_GNU_args_size: +- insn_ptr = read_uleb128 (insn_ptr, &utmp); +- context->args_size = (_Unwind_Word)utmp; +- break; +- +- case DW_CFA_GNU_negative_offset_extended: +- /* Obsoleted by DW_CFA_offset_extended_sf, but used by +- older PowerPC code. */ +- insn_ptr = read_uleb128 (insn_ptr, ®); +- insn_ptr = read_uleb128 (insn_ptr, &utmp); +- offset = (_Unwind_Word) utmp * fs->data_align; +- reg = DWARF_REG_TO_UNWIND_COLUMN (reg); +- if (UNWIND_COLUMN_IN_RANGE (reg)) +- { +- fs->regs.how[reg] = REG_SAVED_OFFSET; +- fs->regs.reg[reg].loc.offset = -offset; +- } +- break; +- +- default: +- gcc_unreachable (); +- } +- } ++ if (fs->data_align == __LIBGCC_DWARF_CIE_DATA_ALIGNMENT__ ++ && fs->code_align == 1) ++ execute_cfa_program_specialized (insn_ptr, insn_end, context, fs); ++ else ++ execute_cfa_program_generic (insn_ptr, insn_end, context, fs); + } ++ + + /* Given the _Unwind_Context CONTEXT for a stack frame, look up the FDE for + its caller and decode it into FS. This function also sets the diff --git a/SOURCES/gcc-RHEL-105072-18.patch b/SOURCES/gcc-RHEL-105072-18.patch new file mode 100644 index 0000000..3061c32 --- /dev/null +++ b/SOURCES/gcc-RHEL-105072-18.patch @@ -0,0 +1,146 @@ +commit c98cd1df22fbe0829149e346a1ba9bf1f0be8a40 +Author: Wilco Dijkstra +Date: Tue Jan 3 15:57:46 2023 +0000 + + libgcc: Fix uninitialized RA signing on AArch64 [PR107678] + + A recent change only initializes the regs.how[] during Dwarf unwinding + which resulted in an uninitialized offset used in return address signing + and random failures during unwinding. The fix is to encode the return + address signing state in REG_UNSAVED and a new state REG_UNSAVED_ARCHEXT. + + libgcc/ + PR target/107678 + * unwind-dw2.h (REG_UNSAVED_ARCHEXT): Add new enum. + * unwind-dw2.c (uw_update_context_1): Add REG_UNSAVED_ARCHEXT case. + * unwind-dw2-execute_cfa.h: Use REG_UNSAVED_ARCHEXT/REG_UNSAVED to + encode the return address signing state. + * config/aarch64/aarch64-unwind.h (aarch64_demangle_return_addr) + Check current return address signing state. + (aarch64_frob_update_contex): Remove. + +diff --git a/libgcc/config/aarch64/aarch64-unwind.h b/libgcc/config/aarch64/aarch64-unwind.h +index 466e4235991485ea..2fddadc57564348f 100644 +--- a/libgcc/config/aarch64/aarch64-unwind.h ++++ b/libgcc/config/aarch64/aarch64-unwind.h +@@ -29,8 +29,6 @@ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + + #define MD_DEMANGLE_RETURN_ADDR(context, fs, addr) \ + aarch64_demangle_return_addr (context, fs, addr) +-#define MD_FROB_UPDATE_CONTEXT(context, fs) \ +- aarch64_frob_update_context (context, fs) + + static inline int + aarch64_cie_signed_with_b_key (struct _Unwind_Context *context) +@@ -55,42 +53,28 @@ aarch64_cie_signed_with_b_key (struct _Unwind_Context *context) + + static inline void * + aarch64_demangle_return_addr (struct _Unwind_Context *context, +- _Unwind_FrameState *fs ATTRIBUTE_UNUSED, ++ _Unwind_FrameState *fs, + _Unwind_Word addr_word) + { + void *addr = (void *)addr_word; +- if (context->flags & RA_SIGNED_BIT) ++ const int reg = DWARF_REGNUM_AARCH64_RA_STATE; ++ ++ if (fs->regs.how[reg] == REG_UNSAVED) ++ return addr; ++ ++ /* Return-address signing state is toggled by DW_CFA_GNU_window_save (where ++ REG_UNSAVED/REG_UNSAVED_ARCHEXT means RA signing is disabled/enabled), ++ or set by a DW_CFA_expression. */ ++ if (fs->regs.how[reg] == REG_UNSAVED_ARCHEXT ++ || (_Unwind_GetGR (context, reg) & 0x1) != 0) + { + _Unwind_Word salt = (_Unwind_Word) context->cfa; + if (aarch64_cie_signed_with_b_key (context) != 0) + return __builtin_aarch64_autib1716 (addr, salt); + return __builtin_aarch64_autia1716 (addr, salt); + } +- else +- return addr; +-} +- +-/* Do AArch64 private initialization on CONTEXT based on frame info FS. Mark +- CONTEXT as return address signed if bit 0 of DWARF_REGNUM_AARCH64_RA_STATE is +- set. */ +- +-static inline void +-aarch64_frob_update_context (struct _Unwind_Context *context, +- _Unwind_FrameState *fs) +-{ +- const int reg = DWARF_REGNUM_AARCH64_RA_STATE; +- int ra_signed; +- if (fs->regs.how[reg] == REG_UNSAVED) +- ra_signed = fs->regs.reg[reg].loc.offset & 0x1; +- else +- ra_signed = _Unwind_GetGR (context, reg) & 0x1; +- if (ra_signed) +- /* The flag is used for re-authenticating EH handler's address. */ +- context->flags |= RA_SIGNED_BIT; +- else +- context->flags &= ~RA_SIGNED_BIT; + +- return; ++ return addr; + } + + #endif /* defined AARCH64_UNWIND_H && defined __ILP32__ */ +diff --git a/libgcc/unwind-dw2-execute_cfa.h b/libgcc/unwind-dw2-execute_cfa.h +index dd97b7866686a361..0166f85965d4a5ee 100644 +--- a/libgcc/unwind-dw2-execute_cfa.h ++++ b/libgcc/unwind-dw2-execute_cfa.h +@@ -278,10 +278,15 @@ + case DW_CFA_GNU_window_save: + #if defined (__aarch64__) && !defined (__ILP32__) + /* This CFA is multiplexed with Sparc. On AArch64 it's used to toggle +- return address signing status. */ ++ return address signing status. REG_UNSAVED/REG_UNSAVED_ARCHEXT ++ mean RA signing is disabled/enabled. */ + reg = DWARF_REGNUM_AARCH64_RA_STATE; +- gcc_assert (fs->regs.how[reg] == REG_UNSAVED); +- fs->regs.reg[reg].loc.offset ^= 1; ++ gcc_assert (fs->regs.how[reg] == REG_UNSAVED ++ || fs->regs.how[reg] == REG_UNSAVED_ARCHEXT); ++ if (fs->regs.how[reg] == REG_UNSAVED) ++ fs->regs.how[reg] = REG_UNSAVED_ARCHEXT; ++ else ++ fs->regs.how[reg] = REG_UNSAVED; + #else + /* ??? Hardcoded for SPARC register window configuration. */ + if (__LIBGCC_DWARF_FRAME_REGISTERS__ >= 32) +diff --git a/libgcc/unwind-dw2.c b/libgcc/unwind-dw2.c +index 701552634c6e87c5..280ec2eb4df3d2a2 100644 +--- a/libgcc/unwind-dw2.c ++++ b/libgcc/unwind-dw2.c +@@ -137,9 +137,6 @@ struct _Unwind_Context + #define SIGNAL_FRAME_BIT ((~(_Unwind_Word) 0 >> 1) + 1) + /* Context which has version/args_size/by_value fields. */ + #define EXTENDED_CONTEXT_BIT ((~(_Unwind_Word) 0 >> 2) + 1) +- /* Bit reserved on AArch64, return address has been signed with A or B +- key. */ +-#define RA_SIGNED_BIT ((~(_Unwind_Word) 0 >> 3) + 1) + _Unwind_Word flags; + /* 0 for now, can be increased when further fields are added to + struct _Unwind_Context. */ +@@ -1200,6 +1197,7 @@ uw_update_context_1 (struct _Unwind_Context *context, _Unwind_FrameState *fs) + { + case REG_UNSAVED: + case REG_UNDEFINED: ++ case REG_UNSAVED_ARCHEXT: + break; + + case REG_SAVED_OFFSET: +diff --git a/libgcc/unwind-dw2.h b/libgcc/unwind-dw2.h +index 437c785efa4f297d..44f63e2eb31298d8 100644 +--- a/libgcc/unwind-dw2.h ++++ b/libgcc/unwind-dw2.h +@@ -29,6 +29,7 @@ enum { + REG_SAVED_EXP, + REG_SAVED_VAL_OFFSET, + REG_SAVED_VAL_EXP, ++ REG_UNSAVED_ARCHEXT, /* Target specific extension. */ + REG_UNDEFINED + }; + diff --git a/SOURCES/gcc-RHEL-105072-19.patch b/SOURCES/gcc-RHEL-105072-19.patch new file mode 100644 index 0000000..395dc48 --- /dev/null +++ b/SOURCES/gcc-RHEL-105072-19.patch @@ -0,0 +1,34 @@ +commit 9be9be828dc9020735bc7eacddd1ceae1aeedb1b +Author: Sören Tempel +Date: Sun May 14 19:30:21 2023 +0200 + + fix assert in __deregister_frame_info_bases + + The assertion in __deregister_frame_info_bases assumes that for every + frame something was inserted into the lookup data structure by + __register_frame_info_bases. Unfortunately, this does not necessarily + hold true as the btree_insert call in __register_frame_info_bases will + not insert anything for empty ranges. Therefore, we need to explicitly + account for such empty ranges in the assertion as `ob` will be a null + pointer for such ranges, hence causing the assertion to fail. + + Signed-off-by: Sören Tempel + + libgcc/ChangeLog: + * unwind-dw2-fde.c: Accept empty ranges when deregistering frames. + +diff --git a/libgcc/unwind-dw2-fde.c b/libgcc/unwind-dw2-fde.c +index efcf9490469ad1a0..fdf52396e8576b79 100644 +--- a/libgcc/unwind-dw2-fde.c ++++ b/libgcc/unwind-dw2-fde.c +@@ -278,7 +278,9 @@ __deregister_frame_info_bases (const void *begin) + __gthread_mutex_unlock (&object_mutex); + #endif + +- gcc_assert (in_shutdown || ob); ++ // If we didn't find anything in the lookup data structures then they ++ // were either already destroyed or we tried to remove an empty range. ++ gcc_assert (in_shutdown || ((range[1] - range[0]) == 0 || ob)); + return (void *) ob; + } + diff --git a/SOURCES/gcc-RHEL-105072-2.patch b/SOURCES/gcc-RHEL-105072-2.patch new file mode 100644 index 0000000..7de21c7 --- /dev/null +++ b/SOURCES/gcc-RHEL-105072-2.patch @@ -0,0 +1,147 @@ +commit f58bf16f672cda3ac55f92f12e258c817ece6e3c +Author: Florian Weimer +Date: Mon Nov 22 13:30:23 2021 +0100 + + libgcc: Remove dbase member from struct unw_eh_callback_data if NULL + + Only bfin, frv, i386 and nios2 need this member at present. + + libgcc/ChangeLog + + * unwind-dw2-fde-dip.c (NEED_DBASE_MEMBER): Define. + (struct unw_eh_callback_data): Make dbase member conditional. + (unw_eh_callback_data_dbase): New function. + (base_from_cb_data): Simplify for the non-dbase case. + (_Unwind_IteratePhdrCallback): Adjust. + (_Unwind_Find_FDE): Likewise. + +diff --git a/libgcc/unwind-dw2-fde-dip.c b/libgcc/unwind-dw2-fde-dip.c +index 4a4d990f455e5c11..3f302826d2d49074 100644 +--- a/libgcc/unwind-dw2-fde-dip.c ++++ b/libgcc/unwind-dw2-fde-dip.c +@@ -101,15 +101,35 @@ static const fde * _Unwind_Find_registered_FDE (void *pc, struct dwarf_eh_bases + #define PT_GNU_EH_FRAME (PT_LOOS + 0x474e550) + #endif + ++#ifdef CRT_GET_RFIB_DATA ++#define NEED_DBASE_MEMBER 1 ++#else ++#define NEED_DBASE_MEMBER 0 ++#endif ++ + struct unw_eh_callback_data + { + _Unwind_Ptr pc; ++#if NEED_DBASE_MEMBER + void *dbase; ++#endif + void *func; + const fde *ret; + int check_cache; + }; + ++/* Returns DATA->dbase if available, else NULL. */ ++static inline _Unwind_Ptr ++unw_eh_callback_data_dbase (const struct unw_eh_callback_data *data ++ __attribute__ ((unused))) ++{ ++#if NEED_DBASE_MEMBER ++ return (_Unwind_Ptr) data->dbase; ++#else ++ return 0; ++#endif ++} ++ + struct unw_eh_frame_hdr + { + unsigned char version; +@@ -139,9 +159,11 @@ static struct frame_hdr_cache_element *frame_hdr_cache_head; + /* Like base_of_encoded_value, but take the base from a struct + unw_eh_callback_data instead of an _Unwind_Context. */ + +-static _Unwind_Ptr +-base_from_cb_data (unsigned char encoding, struct unw_eh_callback_data *data) ++static inline _Unwind_Ptr ++base_from_cb_data (unsigned char encoding __attribute__ ((unused)), ++ _Unwind_Ptr dbase __attribute__ ((unused))) + { ++#if NEED_DBASE_MEMBER + if (encoding == DW_EH_PE_omit) + return 0; + +@@ -155,10 +177,13 @@ base_from_cb_data (unsigned char encoding, struct unw_eh_callback_data *data) + case DW_EH_PE_textrel: + return 0; + case DW_EH_PE_datarel: +- return (_Unwind_Ptr) data->dbase; ++ return dbase; + default: + gcc_unreachable (); + } ++#else /* !NEED_DBASE_MEMBER */ ++ return 0; ++#endif + } + + static int +@@ -358,9 +383,10 @@ _Unwind_IteratePhdrCallback (struct dl_phdr_info *info, size_t size, void *ptr) + # endif + #endif + ++ _Unwind_Ptr dbase = unw_eh_callback_data_dbase (data); + p = read_encoded_value_with_base (hdr->eh_frame_ptr_enc, + base_from_cb_data (hdr->eh_frame_ptr_enc, +- data), ++ dbase), + (const unsigned char *) (hdr + 1), + &eh_frame); + +@@ -374,7 +400,7 @@ _Unwind_IteratePhdrCallback (struct dl_phdr_info *info, size_t size, void *ptr) + + p = read_encoded_value_with_base (hdr->fde_count_enc, + base_from_cb_data (hdr->fde_count_enc, +- data), ++ dbase), + p, &fde_count); + /* Shouldn't happen. */ + if (fde_count == 0) +@@ -431,7 +457,7 @@ _Unwind_IteratePhdrCallback (struct dl_phdr_info *info, size_t size, void *ptr) + removed, we could cache this (and thus use search_object). */ + ob.pc_begin = NULL; + ob.tbase = NULL; +- ob.dbase = data->dbase; ++ ob.dbase = (void *) dbase; + ob.u.single = (fde *) eh_frame; + ob.s.i = 0; + ob.s.b.mixed_encoding = 1; /* Need to assume worst case. */ +@@ -442,7 +468,7 @@ _Unwind_IteratePhdrCallback (struct dl_phdr_info *info, size_t size, void *ptr) + unsigned int encoding = get_fde_encoding (data->ret); + + read_encoded_value_with_base (encoding, +- base_from_cb_data (encoding, data), ++ base_from_cb_data (encoding, dbase), + data->ret->pc_begin, &func); + data->func = (void *) func; + } +@@ -460,7 +486,9 @@ _Unwind_Find_FDE (void *pc, struct dwarf_eh_bases *bases) + return ret; + + data.pc = (_Unwind_Ptr) pc; ++#if NEED_DBASE_MEMBER + data.dbase = NULL; ++#endif + data.func = NULL; + data.ret = NULL; + data.check_cache = 1; +@@ -471,7 +499,11 @@ _Unwind_Find_FDE (void *pc, struct dwarf_eh_bases *bases) + if (data.ret) + { + bases->tbase = NULL; ++#if NEED_DBASE_MEMBER + bases->dbase = data.dbase; ++#else ++ bases->dbase = NULL; ++#endif + bases->func = data.func; + } + return data.ret; diff --git a/SOURCES/gcc-RHEL-105072-20.patch b/SOURCES/gcc-RHEL-105072-20.patch new file mode 100644 index 0000000..e6a5b70 --- /dev/null +++ b/SOURCES/gcc-RHEL-105072-20.patch @@ -0,0 +1,38 @@ +commit 30adfb85ff994c0faa0cc556ba46838b218263f5 +Author: Thomas Neumann +Date: Mon May 15 14:59:22 2023 +0200 + + fix assert in non-atomic path + + The non-atomic path does not have range information, + we have to adjust the assert handle that case, too. + + libgcc/ChangeLog: + * unwind-dw2-fde.c: Fix assert in non-atomic path. + +diff --git a/libgcc/unwind-dw2-fde.c b/libgcc/unwind-dw2-fde.c +index fdf52396e8576b79..9b0c229efa5427a9 100644 +--- a/libgcc/unwind-dw2-fde.c ++++ b/libgcc/unwind-dw2-fde.c +@@ -240,6 +240,7 @@ __deregister_frame_info_bases (const void *begin) + + // And remove + ob = btree_remove (®istered_frames, range[0]); ++ bool empty_table = (range[1] - range[0]) == 0; + #else + init_object_mutex_once (); + __gthread_mutex_lock (&object_mutex); +@@ -276,11 +277,12 @@ __deregister_frame_info_bases (const void *begin) + + out: + __gthread_mutex_unlock (&object_mutex); ++ const int empty_table = 0; // The non-atomic path stores all tables. + #endif + + // If we didn't find anything in the lookup data structures then they + // were either already destroyed or we tried to remove an empty range. +- gcc_assert (in_shutdown || ((range[1] - range[0]) == 0 || ob)); ++ gcc_assert (in_shutdown || (empty_table || ob)); + return (void *) ob; + } + diff --git a/SOURCES/gcc-RHEL-105072-21.patch b/SOURCES/gcc-RHEL-105072-21.patch new file mode 100644 index 0000000..1fab924 --- /dev/null +++ b/SOURCES/gcc-RHEL-105072-21.patch @@ -0,0 +1,31 @@ +commit 5cf60b6ba111f4169305c7832b063b000e9ec36a +Author: Thomas Neumann +Date: Tue May 2 16:21:09 2023 +0200 + + release the sorted FDE array when deregistering a frame [PR109685] + + The atomic fastpath bypasses the code that releases the sort + array which was lazily allocated during unwinding. We now + check after deregistering if there is an array to free. + + libgcc/ChangeLog: + PR libgcc/109685 + * unwind-dw2-fde.c: Free sort array in atomic fast path. + +diff --git a/libgcc/unwind-dw2-fde.c b/libgcc/unwind-dw2-fde.c +index 9b0c229efa5427a9..0fd2fc54aa651350 100644 +--- a/libgcc/unwind-dw2-fde.c ++++ b/libgcc/unwind-dw2-fde.c +@@ -241,6 +241,12 @@ __deregister_frame_info_bases (const void *begin) + // And remove + ob = btree_remove (®istered_frames, range[0]); + bool empty_table = (range[1] - range[0]) == 0; ++ ++ // Deallocate the sort array if any. ++ if (ob && ob->s.b.sorted) ++ { ++ free (ob->u.sort); ++ } + #else + init_object_mutex_once (); + __gthread_mutex_lock (&object_mutex); diff --git a/SOURCES/gcc-RHEL-105072-22.patch b/SOURCES/gcc-RHEL-105072-22.patch new file mode 100644 index 0000000..cd7f38e --- /dev/null +++ b/SOURCES/gcc-RHEL-105072-22.patch @@ -0,0 +1,51 @@ +commit 38e88d41f50d844f1404172657ef7e8372014ef6 +Author: Thomas Neumann +Date: Wed May 10 12:33:49 2023 +0200 + + fix radix sort on 32bit platforms [PR109670] + + The radix sort uses two buffers, a1 for input and a2 for output. + After every digit the role of the two buffers is swapped. + When terminating the sort early the code made sure the output + was in a2. However, when we run out of bits, as can happen on + 32bit platforms, the sorted result was in a1, as we had just + swapped a1 and a2. + This patch fixes the problem by unconditionally having a1 as + output after every loop iteration. + + This bug manifested itself only on 32bit platforms and even then + only in some circumstances, as it needs frames where a swap + is required due to differences in the top-most byte, which is + affected by ASLR. The new logic was validated by exhaustive + search over 32bit input values. + + libgcc/ChangeLog: + PR libgcc/109670 + * unwind-dw2-fde.c: Fix radix sort buffer management. + +diff --git a/libgcc/unwind-dw2-fde.c b/libgcc/unwind-dw2-fde.c +index 0fd2fc54aa651350..41b8c2e9380bc45b 100644 +--- a/libgcc/unwind-dw2-fde.c ++++ b/libgcc/unwind-dw2-fde.c +@@ -634,8 +634,6 @@ fde_radixsort (struct object *ob, fde_extractor_t fde_extractor, + // Stop if we are already sorted. + if (!violations) + { +- // The sorted data is in a1 now. +- a2 = a1; + break; + } + +@@ -670,9 +668,9 @@ fde_radixsort (struct object *ob, fde_extractor_t fde_extractor, + #undef FANOUT + #undef FANOUTBITS + +- // The data is in a2 now, move in place if needed. +- if (a2 != v1->array) +- memcpy (v1->array, a2, sizeof (const fde *) * n); ++ // The data is in a1 now, move in place if needed. ++ if (a1 != v1->array) ++ memcpy (v1->array, a1, sizeof (const fde *) * n); + } + + static inline void diff --git a/SOURCES/gcc-RHEL-105072-23.patch b/SOURCES/gcc-RHEL-105072-23.patch new file mode 100644 index 0000000..cdc90df --- /dev/null +++ b/SOURCES/gcc-RHEL-105072-23.patch @@ -0,0 +1,33 @@ +commit 49310a993308492348119f4033e4db0bda4fe46a +Author: Florian Weimer +Date: Tue Jun 6 11:01:07 2023 +0200 + + libgcc: Fix eh_frame fast path in find_fde_tail + + The eh_frame value is only used by linear_search_fdes, not the binary + search directly in find_fde_tail, so the bug is not immediately + apparent with most programs. + + Fixes commit e724b0480bfa5ec04f39be8c7290330b495c59de ("libgcc: + Special-case BFD ld unwind table encodings in find_fde_tail"). + + libgcc/ + + PR libgcc/109712 + * unwind-dw2-fde-dip.c (find_fde_tail): Correct fast path for + parsing eh_frame. + +diff --git a/libgcc/unwind-dw2-fde-dip.c b/libgcc/unwind-dw2-fde-dip.c +index d4821d7d19950f15..b46e95dc8f88ac5c 100644 +--- a/libgcc/unwind-dw2-fde-dip.c ++++ b/libgcc/unwind-dw2-fde-dip.c +@@ -403,8 +403,8 @@ find_fde_tail (_Unwind_Ptr pc, + BFD ld generates. */ + signed value __attribute__ ((mode (SI))); + memcpy (&value, p, sizeof (value)); ++ eh_frame = p + value; + p += sizeof (value); +- dbase = value; /* No adjustment because pcrel has base 0. */ + } + else + p = read_encoded_value_with_base (hdr->eh_frame_ptr_enc, diff --git a/SOURCES/gcc-RHEL-105072-24.patch b/SOURCES/gcc-RHEL-105072-24.patch new file mode 100644 index 0000000..df1b5af --- /dev/null +++ b/SOURCES/gcc-RHEL-105072-24.patch @@ -0,0 +1,28 @@ +commit 104b09005229ef48a79a33511ea192bb3ec3c415 +Author: Florian Weimer +Date: Tue Jul 11 06:19:39 2023 +0200 + + libgcc: Fix -Wint-conversion warning in find_fde_tail + + Fixes commit r14-1614-g49310a99330849 ("libgcc: Fix eh_frame fast path + in find_fde_tail"). + + libgcc/ + + PR libgcc/110179 + * unwind-dw2-fde-dip.c (find_fde_tail): Add cast to avoid + implicit conversion of pointer value to integer. + +diff --git a/libgcc/unwind-dw2-fde-dip.c b/libgcc/unwind-dw2-fde-dip.c +index b46e95dc8f88ac5c..e08154c1442d748f 100644 +--- a/libgcc/unwind-dw2-fde-dip.c ++++ b/libgcc/unwind-dw2-fde-dip.c +@@ -403,7 +403,7 @@ find_fde_tail (_Unwind_Ptr pc, + BFD ld generates. */ + signed value __attribute__ ((mode (SI))); + memcpy (&value, p, sizeof (value)); +- eh_frame = p + value; ++ eh_frame = (_Unwind_Ptr) (p + value); + p += sizeof (value); + } + else diff --git a/SOURCES/gcc-RHEL-105072-25.patch b/SOURCES/gcc-RHEL-105072-25.patch new file mode 100644 index 0000000..70d2634 --- /dev/null +++ b/SOURCES/gcc-RHEL-105072-25.patch @@ -0,0 +1,97 @@ +commit c46bded78f3733ad1312d141ebf1ae541032a48b +Author: Thomas Neumann +Date: Fri Aug 11 09:20:27 2023 -0600 + + preserve base pointer for __deregister_frame [PR110956] + + Original bug report: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=110956 + Rainer Orth successfully tested the patch on Solaris with a full bootstrap. + + Some uncommon unwinding table encodings need to access the base pointer + for address computations. We do not have that information in calls to + __deregister_frame_info_bases, and previously simply used nullptr as + base pointer. That is usually fine, but for some Solaris i386 shared + libraries that results in wrong address computations. + + To fix this problem we now associate the unwinding object with + the table pointer itself, which is always known, in addition to + the PC range. When deregistering a frame, we first locate the object + using the table pointer, and then use the base pointer stored within + the object to compute the PC range. + + libgcc/ChangeLog: + PR libgcc/110956 + * unwind-dw2-fde.c: Associate object with address of unwinding + table. + +diff --git a/libgcc/unwind-dw2-fde.c b/libgcc/unwind-dw2-fde.c +index 41b8c2e9380bc45b..5d6fb29acd440563 100644 +--- a/libgcc/unwind-dw2-fde.c ++++ b/libgcc/unwind-dw2-fde.c +@@ -124,6 +124,9 @@ __register_frame_info_bases (const void *begin, struct object *ob, + #endif + + #ifdef ATOMIC_FDE_FAST_PATH ++ // Register the object itself to know the base pointer on deregistration. ++ btree_insert (®istered_frames, (uintptr_type) begin, 1, ob); ++ + // Register the frame in the b-tree + uintptr_type range[2]; + get_pc_range (ob, range); +@@ -175,6 +178,9 @@ __register_frame_info_table_bases (void *begin, struct object *ob, + ob->s.b.encoding = DW_EH_PE_omit; + + #ifdef ATOMIC_FDE_FAST_PATH ++ // Register the object itself to know the base pointer on deregistration. ++ btree_insert (®istered_frames, (uintptr_type) begin, 1, ob); ++ + // Register the frame in the b-tree + uintptr_type range[2]; + get_pc_range (ob, range); +@@ -225,22 +231,17 @@ __deregister_frame_info_bases (const void *begin) + return ob; + + #ifdef ATOMIC_FDE_FAST_PATH +- // Find the corresponding PC range +- struct object lookupob; +- lookupob.tbase = 0; +- lookupob.dbase = 0; +- lookupob.u.single = begin; +- lookupob.s.i = 0; +- lookupob.s.b.encoding = DW_EH_PE_omit; +-#ifdef DWARF2_OBJECT_END_PTR_EXTENSION +- lookupob.fde_end = NULL; +-#endif +- uintptr_type range[2]; +- get_pc_range (&lookupob, range); ++ // Find the originally registered object to get the base pointer. ++ ob = btree_remove (®istered_frames, (uintptr_type) begin); + +- // And remove +- ob = btree_remove (®istered_frames, range[0]); +- bool empty_table = (range[1] - range[0]) == 0; ++ // Remove the corresponding PC range. ++ if (ob) ++ { ++ uintptr_type range[2]; ++ get_pc_range (ob, range); ++ if (range[0] != range[1]) ++ btree_remove (®istered_frames, range[0]); ++ } + + // Deallocate the sort array if any. + if (ob && ob->s.b.sorted) +@@ -283,12 +284,11 @@ __deregister_frame_info_bases (const void *begin) + + out: + __gthread_mutex_unlock (&object_mutex); +- const int empty_table = 0; // The non-atomic path stores all tables. + #endif + + // If we didn't find anything in the lookup data structures then they + // were either already destroyed or we tried to remove an empty range. +- gcc_assert (in_shutdown || (empty_table || ob)); ++ gcc_assert (in_shutdown || ob); + return (void *) ob; + } + diff --git a/SOURCES/gcc-RHEL-105072-26.patch b/SOURCES/gcc-RHEL-105072-26.patch new file mode 100644 index 0000000..d9c173c --- /dev/null +++ b/SOURCES/gcc-RHEL-105072-26.patch @@ -0,0 +1,112 @@ +commit a364148530c28645ce87adbc58a66c9f32a325ab +Author: Thomas Neumann +Date: Mon Mar 11 14:35:20 2024 +0100 + + handle unwind tables that are embedded within unwinding code [PR111731] + + Original bug report: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=111731 + + The unwinding mechanism registers both the code range and the unwind + table itself within a b-tree lookup structure. That data structure + assumes that is consists of non-overlappping intervals. This + becomes a problem if the unwinding table is embedded within the + code itself, as now the intervals do overlap. + + To fix this problem we now keep the unwind tables in a separate + b-tree, which prevents the overlap. + + libgcc/ChangeLog: + PR libgcc/111731 + * unwind-dw2-fde.c: Split unwind ranges if they contain the + unwind table. + +diff --git a/libgcc/unwind-dw2-fde.c b/libgcc/unwind-dw2-fde.c +index 5d6fb29acd440563..421068b538abc66d 100644 +--- a/libgcc/unwind-dw2-fde.c ++++ b/libgcc/unwind-dw2-fde.c +@@ -48,6 +48,7 @@ typedef __UINTPTR_TYPE__ uintptr_type; + #include "unwind-dw2-btree.h" + + static struct btree registered_frames; ++static struct btree registered_objects; + static bool in_shutdown; + + static void +@@ -58,6 +59,7 @@ release_registered_frames (void) + /* Release the b-tree and all frames. Frame releases that happen later are + * silently ignored */ + btree_destroy (®istered_frames); ++ btree_destroy (®istered_objects); + in_shutdown = true; + } + +@@ -103,6 +105,21 @@ static __gthread_mutex_t object_mutex; + #endif + #endif + ++#ifdef ATOMIC_FDE_FAST_PATH ++// Register the pc range for a given object in the lookup structure. ++static void ++register_pc_range_for_object (uintptr_type begin, struct object *ob) ++{ ++ // Register the object itself to know the base pointer on deregistration. ++ btree_insert (®istered_objects, begin, 1, ob); ++ ++ // Register the frame in the b-tree ++ uintptr_type range[2]; ++ get_pc_range (ob, range); ++ btree_insert (®istered_frames, range[0], range[1] - range[0], ob); ++} ++#endif ++ + /* Called from crtbegin.o to register the unwind info for an object. */ + + void +@@ -124,13 +141,7 @@ __register_frame_info_bases (const void *begin, struct object *ob, + #endif + + #ifdef ATOMIC_FDE_FAST_PATH +- // Register the object itself to know the base pointer on deregistration. +- btree_insert (®istered_frames, (uintptr_type) begin, 1, ob); +- +- // Register the frame in the b-tree +- uintptr_type range[2]; +- get_pc_range (ob, range); +- btree_insert (®istered_frames, range[0], range[1] - range[0], ob); ++ register_pc_range_for_object ((uintptr_type) begin, ob); + #else + init_object_mutex_once (); + __gthread_mutex_lock (&object_mutex); +@@ -178,13 +189,7 @@ __register_frame_info_table_bases (void *begin, struct object *ob, + ob->s.b.encoding = DW_EH_PE_omit; + + #ifdef ATOMIC_FDE_FAST_PATH +- // Register the object itself to know the base pointer on deregistration. +- btree_insert (®istered_frames, (uintptr_type) begin, 1, ob); +- +- // Register the frame in the b-tree +- uintptr_type range[2]; +- get_pc_range (ob, range); +- btree_insert (®istered_frames, range[0], range[1] - range[0], ob); ++ register_pc_range_for_object ((uintptr_type) begin, ob); + #else + init_object_mutex_once (); + __gthread_mutex_lock (&object_mutex); +@@ -232,7 +237,7 @@ __deregister_frame_info_bases (const void *begin) + + #ifdef ATOMIC_FDE_FAST_PATH + // Find the originally registered object to get the base pointer. +- ob = btree_remove (®istered_frames, (uintptr_type) begin); ++ ob = btree_remove (®istered_objects, (uintptr_type) begin); + + // Remove the corresponding PC range. + if (ob) +@@ -240,7 +245,7 @@ __deregister_frame_info_bases (const void *begin) + uintptr_type range[2]; + get_pc_range (ob, range); + if (range[0] != range[1]) +- btree_remove (®istered_frames, range[0]); ++ btree_remove (®istered_frames, range[0]); + } + + // Deallocate the sort array if any. diff --git a/SOURCES/gcc-RHEL-105072-27.patch b/SOURCES/gcc-RHEL-105072-27.patch new file mode 100644 index 0000000..b15b78d --- /dev/null +++ b/SOURCES/gcc-RHEL-105072-27.patch @@ -0,0 +1,299 @@ +commit 21109b37e8585a7a1b27650fcbf1749380016108 +Author: Jakub Jelinek +Date: Mon Mar 10 10:34:00 2025 +0100 + + libgcc: Fix up unwind-dw2-btree.h [PR119151] + + The following testcase shows a bug in unwind-dw2-btree.h. + In short, the header provides lock-free btree data structure (so no parent + link on nodes, both insertion and deletion are done in top-down walks + with some locking of just a few nodes at a time so that lookups can notice + concurrent modifications and retry, non-leaf (inner) nodes contain keys + which are initially the base address of the left-most leaf entry of the + following child (or all ones if there is none) minus one, insertion ensures + balancing of the tree to ensure [d/2, d] entries filled through aggressive + splitting if it sees a full tree while walking, deletion performs various + operations like merging neighbour trees, merging into parent or moving some + nodes from neighbour to the current one). + What differs from the textbook implementations is mostly that the leaf nodes + don't include just address as a key, but address range, address + size + (where we don't insert any ranges with zero size) and the lookups can be + performed for any address in the [address, address + size) range. The keys + on inner nodes are still just address-1, so the child covers all nodes + where addr <= key unless it is covered already in children to the left. + The user (static executables or JIT) should always ensure there is no + overlap in between any of the ranges. + + In the testcase a bunch of insertions are done, always followed by one + removal, followed by one insertion of a range slightly different from the + removed one. E.g. in the first case [&code[0x50], &code[0x59]] range + is removed and then we insert [&code[0x4c], &code[0x53]] range instead. + This is valid, it doesn't overlap anything. But the problem is that some + non-leaf (inner) one used the &code[0x4f] key (after the 11 insertions + completely correctly). On removal, nothing adjusts the keys on the parent + nodes (it really can't in the top-down only walk, the keys could be many nodes + above it and unlike insertion, removal only knows the start address, doesn't + know the removed size and so will discover it only when reaching the leaf + node which contains it; plus even if it knew the address and size, it still + doesn't know what the second left-most leaf node will be (i.e. the one after + removal)). And on insertion, if nodes aren't split at a level, nothing + adjusts the inner keys either. If a range is inserted and is either fully + bellow key (keys are - 1, so having address + size - 1 being equal to key is + fine) or fully after key (i.e. address > key), it works just fine, but if + the key is in a middle of the range like in this case, &code[0x4f] is in the + middle of the [&code[0x4c], &code[0x53]] range, then insertion works fine + (we only use size on the leaf nodes), and lookup of the addresses below + the key work fine too (i.e. [&code[0x4c], &code[0x4f]] will succeed). + The problem is with lookups after the key (i.e. [&code[0x50, &code[0x53]]), + the lookup looks for them in different children of the btree and doesn't + find an entry and returns NULL. + + As users need to ensure non-overlapping entries at any time, the following + patch fixes it by adjusting keys during insertion where we know not just + the address but also size; if we find during the top-down walk a key + which is in the middle of the range being inserted, we simply increase the + key to be equal to address + size - 1 of the range being inserted. + There can't be any existing leaf nodes overlapping the range in correct + programs and the btree rebalancing done on deletion ensures we don't have + any empty nodes which would also cause problems. + + The patch adjusts the keys in two spots, once for the current node being + walked (the last hunk in the header, with large comment trying to explain + it) and once during inner node splitting in a parent node if we'd otherwise + try to add that key in the middle of the range being inserted into the + parent node (in that case it would be missed in the last hunk). + The testcase covers both of those spots, so succeeds with GCC 12 (which + didn't have btrees) and fails with vanilla GCC trunk and also fails if + either the + if (fence < base + size - 1) + fence = iter->content.children[slot].separator = base + size - 1; + or + if (left_fence >= target && left_fence < target + size - 1) + left_fence = target + size - 1; + hunk is removed (of course, only with the current node sizes, i.e. up to + 15 children of inner nodes and up to 10 entries in leaf nodes). + + 2025-03-10 Jakub Jelinek + Michael Leuchtenburg + + PR libgcc/119151 + * unwind-dw2-btree.h (btree_split_inner): Add size argument. If + left_fence is in the middle of [target,target + size - 1] range, + increase it to target + size - 1. + (btree_insert): Adjust btree_split_inner caller. If fence is smaller + than base + size - 1, increase it and separator of the slot to + base + size - 1. + + * gcc.dg/pr119151.c: New test. + +diff --git a/gcc/testsuite/gcc.dg/pr119151.c b/gcc/testsuite/gcc.dg/pr119151.c +new file mode 100644 +index 0000000000000000..6ef0f12ce9ae6c06 +--- /dev/null ++++ b/gcc/testsuite/gcc.dg/pr119151.c +@@ -0,0 +1,151 @@ ++/* PR libgcc/119151 */ ++/* Should be run just on targets which don't have _Unwind_Find_FDE in libc.so. */ ++/* { dg-do run { target { { x86_64-*-linux* aarch64*-*-linux* powerpc64*-*-linux* riscv*-*-linux* } && lp64 } } } */ ++/* { dg-options "-O2" } */ ++ ++struct object ++{ ++ void *pc_begin, *tbase, *dbase, *single; ++ __SIZE_TYPE__ i; ++ void *fde_end, *next; ++}; ++struct dwarf_eh_bases ++{ ++ void *tbase, *dbase, *func; ++}; ++extern void __register_frame_info (const void *, struct object *); ++extern void *__deregister_frame_info (const void *); ++extern const void *_Unwind_Find_FDE (void *, struct dwarf_eh_bases *); ++#define DW_EH_PE_sdata8 0x0c ++#define DW_EH_PE_pcrel 0x10 ++#define DW_CFA_def_cfa 0x0c ++#define DW_CFA_offset 0x80 ++ ++struct __attribute__((aligned (8))) eh_frame_cie { ++ unsigned len; ++ unsigned tag; ++ unsigned char version; ++ unsigned char augmentation[3]; ++ unsigned char code_align_factor; ++ unsigned char data_align_factor; ++ unsigned char ra_column; ++ unsigned char augmentation_size; ++ unsigned char encoding; ++ unsigned char def_cfa; ++ unsigned char def_cfa_op1, def_cfa_op2; ++ unsigned char offset; ++ unsigned char offset_op; ++}; ++struct __attribute__((aligned (8))) eh_frame_fde { ++ unsigned len; ++ unsigned cie_offset; ++ unsigned long long begin, size; ++ unsigned char augmentation; ++}; ++struct eh_frame_cie_fde { ++ struct eh_frame_cie cie; ++ struct eh_frame_fde fde; ++ unsigned int zero; ++ struct object obj; ++} eh_frame[256]; ++unsigned ehidx; ++unsigned char code[0x800] __attribute__((aligned (8))); ++ ++void * ++register_range (void *addr, unsigned size) ++{ ++ /* Fills in empty-ish CIE and FDE with pcrel sdata8 encoding so that ++ we don't need to worry about lp64 large code models. ++ We don't actually execute anything in code and only _Unwind_Find_FDE, ++ don't actually try to unwind anything. */ ++ eh_frame[ehidx].cie.len ++ = (unsigned) ((char *) &eh_frame[ehidx].fde ++ - (char *) &eh_frame[ehidx].cie.tag); ++ eh_frame[ehidx].cie.tag = 0; ++ eh_frame[ehidx].cie.version = 3; ++ __builtin_memcpy (eh_frame[ehidx].cie.augmentation, "zR", 3); ++ eh_frame[ehidx].cie.code_align_factor = 1; ++ eh_frame[ehidx].cie.data_align_factor = 0x78; /* sleb128 -8 */ ++ eh_frame[ehidx].cie.ra_column = 0x10; ++ eh_frame[ehidx].cie.augmentation_size = 1; ++ eh_frame[ehidx].cie.encoding = DW_EH_PE_pcrel | DW_EH_PE_sdata8; ++ eh_frame[ehidx].cie.def_cfa = DW_CFA_def_cfa; ++ eh_frame[ehidx].cie.def_cfa_op1 = 7; ++ eh_frame[ehidx].cie.def_cfa_op2 = 8; ++ eh_frame[ehidx].cie.offset = DW_CFA_offset + 0x10; ++ eh_frame[ehidx].cie.offset_op = 1; ++ eh_frame[ehidx].fde.len ++ = (unsigned) ((char *) &eh_frame[ehidx].zero ++ - (char *) &eh_frame[ehidx].fde.cie_offset); ++ eh_frame[ehidx].fde.cie_offset ++ = (unsigned) ((char *) &eh_frame[ehidx].fde.cie_offset ++ - (char *) &eh_frame[ehidx].cie); ++ eh_frame[ehidx].fde.begin ++ = (__INTPTR_TYPE__) ((__UINTPTR_TYPE__) addr ++ - (__UINTPTR_TYPE__) &eh_frame[ehidx].fde.begin); ++ eh_frame[ehidx].fde.size = size; ++ eh_frame[ehidx].fde.augmentation = 0; ++ eh_frame[ehidx].zero = 0; ++ __register_frame_info (&eh_frame[ehidx].cie, &eh_frame[ehidx].obj); ++ ++ehidx; ++ return &eh_frame[ehidx - 1].cie; ++} ++ ++void ++unregister (void *eh_frame) ++{ ++ __deregister_frame_info (eh_frame); ++} ++ ++int ++main () ++{ ++ for (int i = 0; i < 0x50; i += 0x10) ++ register_range (&code[i], 10); ++ void *p = register_range (&code[0x50], 10); ++ for (int i = 0x60; i < 0xb0; i += 0x10) ++ register_range (&code[i], 10); ++ unregister (p); ++ register_range (&code[0x4c], 8); ++ struct dwarf_eh_bases bases; ++ const void *q = _Unwind_Find_FDE (&code[0x4c], &bases); ++ const void *r = _Unwind_Find_FDE (&code[0x51], &bases); ++ if (!q || q != r) ++ __builtin_abort (); ++ for (int i = 0; i <= 0xa0; i += 0x10) ++ if (i != 0x50) ++ { ++ q = _Unwind_Find_FDE (&code[i], &bases); ++ r = _Unwind_Find_FDE (&code[i + 9], &bases); ++ if (!q || q != r) ++ __builtin_abort (); ++ } ++ for (int i = 0xb0; i < 0x240; i += 0x10) ++ register_range (&code[i], 10); ++ p = register_range (&code[0x240], 10); ++ for (int i = 0x250; i < 0x470; i += 0x10) ++ register_range (&code[i], 10); ++ void *s = register_range (&code[0x470], 10); ++ for (int i = 0x480; i < 0x700; i += 0x10) ++ register_range (&code[i], 10); ++ unregister (p); ++ register_range (&code[0x23c], 16); ++ q = _Unwind_Find_FDE (&code[0x23d], &bases); ++ r = _Unwind_Find_FDE (&code[0x24b], &bases); ++ if (!q || q != r) ++ __builtin_abort (); ++ unregister (s); ++ register_range (&code[0x46c], 16); ++ q = _Unwind_Find_FDE (&code[0x46d], &bases); ++ r = _Unwind_Find_FDE (&code[0x47b], &bases); ++ if (!q || q != r) ++ __builtin_abort (); ++ for (int i = 0; i < 0x700; i += 0x10) ++ if (i != 0x50 && i != 0x240 && i != 0x470) ++ { ++ q = _Unwind_Find_FDE (&code[i], &bases); ++ r = _Unwind_Find_FDE (&code[i + 9], &bases); ++ if (!q || q != r) ++ __builtin_abort (); ++ } ++} +diff --git a/libgcc/unwind-dw2-btree.h b/libgcc/unwind-dw2-btree.h +index ace507d9ffbdffb7..e3f3a11a7b5443d6 100644 +--- a/libgcc/unwind-dw2-btree.h ++++ b/libgcc/unwind-dw2-btree.h +@@ -474,7 +474,8 @@ btree_handle_root_split (struct btree *t, struct btree_node **node, + // Split an inner node. + static void + btree_split_inner (struct btree *t, struct btree_node **inner, +- struct btree_node **parent, uintptr_type target) ++ struct btree_node **parent, uintptr_type target, ++ uintptr_type size) + { + // Check for the root. + btree_handle_root_split (t, inner, parent); +@@ -490,6 +491,9 @@ btree_split_inner (struct btree *t, struct btree_node **inner, + = left_inner->content.children[split + index]; + left_inner->entry_count = split; + uintptr_type left_fence = btree_node_get_fence_key (left_inner); ++ if (left_fence >= target && left_fence < target + size - 1) ++ // See the PR119151 comment in btree_insert. ++ left_fence = target + size - 1; + btree_node_update_separator_after_split (*parent, right_fence, left_fence, + right_inner); + if (target <= left_fence) +@@ -753,13 +757,28 @@ btree_insert (struct btree *t, uintptr_type base, uintptr_type size, + { + // Use eager splits to avoid lock coupling up. + if (iter->entry_count == max_fanout_inner) +- btree_split_inner (t, &iter, &parent, base); ++ btree_split_inner (t, &iter, &parent, base, size); + + unsigned slot = btree_node_find_inner_slot (iter, base); + if (parent) + btree_node_unlock_exclusive (parent); + parent = iter; + fence = iter->content.children[slot].separator; ++ if (fence < base + size - 1) ++ // The separator was set to the base - 1 of the leftmost leaf child ++ // at some point but such an entry could have been removed afterwards. ++ // As both insertion and removal are just walking down the tree with ++ // only a few current nodes locked at a time, updating the separator ++ // on removal is not possible, especially because btree_remove does ++ // not know the size until it reaches leaf node. We must ensure that ++ // the separator is not in a middle of some entry though, as ++ // btree_lookup can look up any address in the entry's range and if ++ // the separator is in the middle, addresses below it or equal to it ++ // would be found while addresses above it would result in failed ++ // lookup. Update the separator now. Assumption that users ++ // ensure no overlapping registered ranges, there should be no ++ // current entry for any address in the range. See PR119151. ++ fence = iter->content.children[slot].separator = base + size - 1; + iter = iter->content.children[slot].child; + btree_node_lock_exclusive (iter); + } diff --git a/SOURCES/gcc-RHEL-105072-3.patch b/SOURCES/gcc-RHEL-105072-3.patch new file mode 100644 index 0000000..c14b039 --- /dev/null +++ b/SOURCES/gcc-RHEL-105072-3.patch @@ -0,0 +1,204 @@ +commit 9488d24206687be80443dafdb2cdfc4ff3aca28c +Author: Florian Weimer +Date: Thu Nov 25 18:40:51 2021 +0100 + + libgcc: Split FDE search code from PT_GNU_EH_FRAME lookup + + This allows switching to a different implementation for + PT_GNU_EH_FRAME lookup in a subsequent commit. + + This moves some of the PT_GNU_EH_FRAME parsing out of the glibc loader + lock that is implied by dl_iterate_phdr. However, the FDE is already + parsed outside the lock before this change, so this does not introduce + additional crashes in case of a concurrent dlclose. + + libgcc/ChangeLog: + + * unwind-dw2-fde-dip.c (struct unw_eh_callback_data): Add hdr. + Remove func, ret. + (find_fde_tail): New function. Split from + _Unwind_IteratePhdrCallback. Move the result initialization + from _Unwind_Find_FDE. + (_Unwind_Find_FDE): Updated to call find_fde_tail. + +diff --git a/libgcc/unwind-dw2-fde-dip.c b/libgcc/unwind-dw2-fde-dip.c +index 3f302826d2d49074..fbb0fbdebb92d484 100644 +--- a/libgcc/unwind-dw2-fde-dip.c ++++ b/libgcc/unwind-dw2-fde-dip.c +@@ -113,8 +113,7 @@ struct unw_eh_callback_data + #if NEED_DBASE_MEMBER + void *dbase; + #endif +- void *func; +- const fde *ret; ++ const struct unw_eh_frame_hdr *hdr; + int check_cache; + }; + +@@ -197,10 +196,6 @@ _Unwind_IteratePhdrCallback (struct dl_phdr_info *info, size_t size, void *ptr) + #else + _Unwind_Ptr load_base; + #endif +- const unsigned char *p; +- const struct unw_eh_frame_hdr *hdr; +- _Unwind_Ptr eh_frame; +- struct object ob; + _Unwind_Ptr pc_low = 0, pc_high = 0; + + struct ext_dl_phdr_info +@@ -348,10 +343,8 @@ _Unwind_IteratePhdrCallback (struct dl_phdr_info *info, size_t size, void *ptr) + return 0; + + /* Read .eh_frame_hdr header. */ +- hdr = (const struct unw_eh_frame_hdr *) ++ data->hdr = (const struct unw_eh_frame_hdr *) + __RELOC_POINTER (p_eh_frame_hdr->p_vaddr, load_base); +- if (hdr->version != 1) +- return 1; + + #ifdef CRT_GET_RFIB_DATA + # if defined __i386__ || defined __nios2__ +@@ -383,12 +376,30 @@ _Unwind_IteratePhdrCallback (struct dl_phdr_info *info, size_t size, void *ptr) + # endif + #endif + +- _Unwind_Ptr dbase = unw_eh_callback_data_dbase (data); ++ return 1; ++} ++ ++/* Find the FDE for the program counter PC, in a previously located ++ PT_GNU_EH_FRAME data region. *BASES is updated if an FDE to return is ++ found. */ ++ ++static const fde * ++find_fde_tail (_Unwind_Ptr pc, ++ const struct unw_eh_frame_hdr *hdr, ++ _Unwind_Ptr dbase, ++ struct dwarf_eh_bases *bases) ++{ ++ const unsigned char *p = (const unsigned char *) (hdr + 1); ++ _Unwind_Ptr eh_frame; ++ struct object ob; ++ ++ if (hdr->version != 1) ++ return NULL; ++ + p = read_encoded_value_with_base (hdr->eh_frame_ptr_enc, + base_from_cb_data (hdr->eh_frame_ptr_enc, + dbase), +- (const unsigned char *) (hdr + 1), +- &eh_frame); ++ p, &eh_frame); + + /* We require here specific table encoding to speed things up. + Also, DW_EH_PE_datarel here means using PT_GNU_EH_FRAME start +@@ -404,7 +415,7 @@ _Unwind_IteratePhdrCallback (struct dl_phdr_info *info, size_t size, void *ptr) + p, &fde_count); + /* Shouldn't happen. */ + if (fde_count == 0) +- return 1; ++ return NULL; + if ((((_Unwind_Ptr) p) & 3) == 0) + { + struct fde_table { +@@ -419,9 +430,9 @@ _Unwind_IteratePhdrCallback (struct dl_phdr_info *info, size_t size, void *ptr) + _Unwind_Ptr range; + + mid = fde_count - 1; +- if (data->pc < table[0].initial_loc + data_base) +- return 1; +- else if (data->pc < table[mid].initial_loc + data_base) ++ if (pc < table[0].initial_loc + data_base) ++ return NULL; ++ else if (pc < table[mid].initial_loc + data_base) + { + lo = 0; + hi = mid; +@@ -429,9 +440,9 @@ _Unwind_IteratePhdrCallback (struct dl_phdr_info *info, size_t size, void *ptr) + while (lo < hi) + { + mid = (lo + hi) / 2; +- if (data->pc < table[mid].initial_loc + data_base) ++ if (pc < table[mid].initial_loc + data_base) + hi = mid; +- else if (data->pc >= table[mid + 1].initial_loc + data_base) ++ else if (pc >= table[mid + 1].initial_loc + data_base) + lo = mid + 1; + else + break; +@@ -445,10 +456,16 @@ _Unwind_IteratePhdrCallback (struct dl_phdr_info *info, size_t size, void *ptr) + f_enc_size = size_of_encoded_value (f_enc); + read_encoded_value_with_base (f_enc & 0x0f, 0, + &f->pc_begin[f_enc_size], &range); +- if (data->pc < table[mid].initial_loc + data_base + range) +- data->ret = f; +- data->func = (void *) (table[mid].initial_loc + data_base); +- return 1; ++ _Unwind_Ptr func = table[mid].initial_loc + data_base; ++ if (pc < table[mid].initial_loc + data_base + range) ++ { ++ bases->tbase = NULL; ++ bases->dbase = (void *) dbase; ++ bases->func = (void *) func; ++ return f; ++ } ++ else ++ return NULL; + } + } + +@@ -461,18 +478,20 @@ _Unwind_IteratePhdrCallback (struct dl_phdr_info *info, size_t size, void *ptr) + ob.u.single = (fde *) eh_frame; + ob.s.i = 0; + ob.s.b.mixed_encoding = 1; /* Need to assume worst case. */ +- data->ret = linear_search_fdes (&ob, (fde *) eh_frame, (void *) data->pc); +- if (data->ret != NULL) ++ const fde *entry = linear_search_fdes (&ob, (fde *) eh_frame, (void *) pc); ++ if (entry != NULL) + { + _Unwind_Ptr func; +- unsigned int encoding = get_fde_encoding (data->ret); ++ unsigned int encoding = get_fde_encoding (entry); + + read_encoded_value_with_base (encoding, + base_from_cb_data (encoding, dbase), +- data->ret->pc_begin, &func); +- data->func = (void *) func; ++ entry->pc_begin, &func); ++ bases->tbase = NULL; ++ bases->dbase = (void *) dbase; ++ bases->func = (void *) func; + } +- return 1; ++ return entry; + } + + const fde * +@@ -489,24 +508,13 @@ _Unwind_Find_FDE (void *pc, struct dwarf_eh_bases *bases) + #if NEED_DBASE_MEMBER + data.dbase = NULL; + #endif +- data.func = NULL; +- data.ret = NULL; + data.check_cache = 1; + +- if (dl_iterate_phdr (_Unwind_IteratePhdrCallback, &data) < 0) ++ if (dl_iterate_phdr (_Unwind_IteratePhdrCallback, &data) <= 0) + return NULL; + +- if (data.ret) +- { +- bases->tbase = NULL; +-#if NEED_DBASE_MEMBER +- bases->dbase = data.dbase; +-#else +- bases->dbase = NULL; +-#endif +- bases->func = data.func; +- } +- return data.ret; ++ _Unwind_Ptr dbase = unw_eh_callback_data_dbase (&data); ++ return find_fde_tail ((_Unwind_Ptr) pc, data.hdr, dbase, bases); + } + + #else diff --git a/SOURCES/gcc-RHEL-105072-4.patch b/SOURCES/gcc-RHEL-105072-4.patch new file mode 100644 index 0000000..d51be60 --- /dev/null +++ b/SOURCES/gcc-RHEL-105072-4.patch @@ -0,0 +1,40 @@ +commit 790854ea7670f11c14d431c102a49181d2915965 +Author: Florian Weimer +Date: Tue Jan 4 15:47:30 2022 +0100 + + libgcc: Use _dl_find_object in _Unwind_Find_FDE + + libgcc/ChangeLog: + + * unwind-dw2-fde-dip.c (_Unwind_Find_FDE): Call _dl_find_object + if available. + +diff --git a/libgcc/unwind-dw2-fde-dip.c b/libgcc/unwind-dw2-fde-dip.c +index fbb0fbdebb92d484..b837d8e490425652 100644 +--- a/libgcc/unwind-dw2-fde-dip.c ++++ b/libgcc/unwind-dw2-fde-dip.c +@@ -504,6 +504,24 @@ _Unwind_Find_FDE (void *pc, struct dwarf_eh_bases *bases) + if (ret != NULL) + return ret; + ++ /* Use DLFO_STRUCT_HAS_EH_DBASE as a proxy for the existence of a glibc-style ++ _dl_find_object function. */ ++#ifdef DLFO_STRUCT_HAS_EH_DBASE ++ { ++ struct dl_find_object dlfo; ++ if (_dl_find_object (pc, &dlfo) == 0) ++ return find_fde_tail ((_Unwind_Ptr) pc, dlfo.dlfo_eh_frame, ++# if DLFO_STRUCT_HAS_EH_DBASE ++ (_Unwind_Ptr) dlfo.dlfo_eh_dbase, ++# else ++ NULL, ++# endif ++ bases); ++ else ++ return NULL; ++ } ++#endif /* DLFO_STRUCT_HAS_EH_DBASE */ ++ + data.pc = (_Unwind_Ptr) pc; + #if NEED_DBASE_MEMBER + data.dbase = NULL; diff --git a/SOURCES/gcc-RHEL-105072-5.patch b/SOURCES/gcc-RHEL-105072-5.patch new file mode 100644 index 0000000..90a89d2 --- /dev/null +++ b/SOURCES/gcc-RHEL-105072-5.patch @@ -0,0 +1,27 @@ +commit ab2a2457780d224343ce05e7d8e2964c6a47fd83 +Author: Florian Weimer +Date: Tue Jan 25 12:09:56 2022 +0100 + + libgcc: Fix _Unwind_Find_FDE for missing unwind data with glibc 2.35 + + _dl_find_object returns success even if no unwind information has been + found, and dlfo_eh_frame is NULL. + + libgcc/ChangeLog: + + PR libgcc/104207 + * unwind-dw2-fde-dip.c (_Unwind_Find_FDE): Add NULL check. + +diff --git a/libgcc/unwind-dw2-fde-dip.c b/libgcc/unwind-dw2-fde-dip.c +index b837d8e490425652..1744c91958013ebb 100644 +--- a/libgcc/unwind-dw2-fde-dip.c ++++ b/libgcc/unwind-dw2-fde-dip.c +@@ -509,7 +509,7 @@ _Unwind_Find_FDE (void *pc, struct dwarf_eh_bases *bases) + #ifdef DLFO_STRUCT_HAS_EH_DBASE + { + struct dl_find_object dlfo; +- if (_dl_find_object (pc, &dlfo) == 0) ++ if (_dl_find_object (pc, &dlfo) == 0 && dlfo.dlfo_eh_frame != NULL) + return find_fde_tail ((_Unwind_Ptr) pc, dlfo.dlfo_eh_frame, + # if DLFO_STRUCT_HAS_EH_DBASE + (_Unwind_Ptr) dlfo.dlfo_eh_dbase, diff --git a/SOURCES/gcc-RHEL-105072-6.patch b/SOURCES/gcc-RHEL-105072-6.patch new file mode 100644 index 0000000..03b408a --- /dev/null +++ b/SOURCES/gcc-RHEL-105072-6.patch @@ -0,0 +1,28 @@ +commit 157cc4e0117756503c7c63df97cf31de7570b088 +Author: Xi Ruoyao +Date: Fri Feb 25 01:45:57 2022 +0800 + + libgcc: fix a warning calling find_fde_tail + + The third parameter of find_fde_tail is an _Unwind_Ptr (which is an + integer type instead of a pointer), but we are passing NULL to it. This + causes a -Wint-conversion warning. + + libgcc/ + + * unwind-dw2-fde-dip.c (_Unwind_Find_FDE): Call find_fde_tail + with 0 instead of NULL. + +diff --git a/libgcc/unwind-dw2-fde-dip.c b/libgcc/unwind-dw2-fde-dip.c +index 1744c91958013ebb..25f2e44c5823cf64 100644 +--- a/libgcc/unwind-dw2-fde-dip.c ++++ b/libgcc/unwind-dw2-fde-dip.c +@@ -514,7 +514,7 @@ _Unwind_Find_FDE (void *pc, struct dwarf_eh_bases *bases) + # if DLFO_STRUCT_HAS_EH_DBASE + (_Unwind_Ptr) dlfo.dlfo_eh_dbase, + # else +- NULL, ++ 0, + # endif + bases); + else diff --git a/SOURCES/gcc-RHEL-105072-7.patch b/SOURCES/gcc-RHEL-105072-7.patch new file mode 100644 index 0000000..df157fc --- /dev/null +++ b/SOURCES/gcc-RHEL-105072-7.patch @@ -0,0 +1,222 @@ +commit 0d344b557604e966dc7f91739881f03e1f221efd +Author: Szabolcs Nagy +Date: Thu Feb 10 17:42:56 2022 +0000 + + aarch64: Fix pac-ret with unusual dwarf in libgcc unwinder [PR104689] + + The RA_SIGN_STATE dwarf pseudo-register is normally only set using the + DW_CFA_AARCH64_negate_ra_state (== DW_CFA_window_save) operation which + toggles the return address signedness state (the default state is 0). + (It may be set by remember/restore_state CFI too, those save/restore + the state of all registers.) + + However RA_SIGN_STATE can be set directly via DW_CFA_val_expression too. + GCC does not generate such CFI but some other compilers reportedly do. + + Note: the toggle operation must not be mixed with other dwarf register + rule CFI within the same CIE and FDE. + + In libgcc we assume REG_UNSAVED means the RA_STATE is set using toggle + operations, otherwise we assume its value is set by other CFI. + + libgcc/ChangeLog: + + PR target/104689 + * config/aarch64/aarch64-unwind.h (aarch64_frob_update_context): + Handle the !REG_UNSAVED case. + * unwind-dw2.c (execute_cfa_program): Fail toggle if !REG_UNSAVED. + + gcc/testsuite/ChangeLog: + + PR target/104689 + * gcc.target/aarch64/pr104689.c: New test. + +diff --git a/gcc/testsuite/gcc.target/aarch64/pr104689.c b/gcc/testsuite/gcc.target/aarch64/pr104689.c +new file mode 100644 +index 0000000000000000..3b7adbdfe7d6f969 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/aarch64/pr104689.c +@@ -0,0 +1,149 @@ ++/* PR target/104689. Unwind across pac-ret frames with unusual dwarf. */ ++/* { dg-do run } */ ++/* { dg-require-effective-target lp64 } */ ++/* { dg-options "-fexceptions -O2" } */ ++ ++#include ++#include ++#include ++ ++#define die() \ ++ do { \ ++ printf ("%s:%d: reached unexpectedly.\n", __FILE__, __LINE__); \ ++ fflush (stdout); \ ++ abort (); \ ++ } while (0) ++ ++ ++/* Code to invoke unwinding with a logging callback. */ ++ ++static struct _Unwind_Exception exc; ++ ++static _Unwind_Reason_Code ++force_unwind_stop (int version, _Unwind_Action actions, ++ _Unwind_Exception_Class exc_class, ++ struct _Unwind_Exception *exc_obj, ++ struct _Unwind_Context *context, ++ void *stop_parameter) ++{ ++ printf ("%s: CFA: %p PC: %p actions: %d\n", ++ __func__, ++ (void *)_Unwind_GetCFA (context), ++ (void *)_Unwind_GetIP (context), ++ (int)actions); ++ if (actions & _UA_END_OF_STACK) ++ die (); ++ return _URC_NO_REASON; ++} ++ ++static void force_unwind (void) ++{ ++#ifndef __USING_SJLJ_EXCEPTIONS__ ++ _Unwind_ForcedUnwind (&exc, force_unwind_stop, 0); ++#else ++ _Unwind_SjLj_ForcedUnwind (&exc, force_unwind_stop, 0); ++#endif ++} ++ ++ ++/* Define functions with unusual pac-ret dwarf via top level asm. */ ++ ++#define STR(x) #x ++#define DW_CFA_val_expression 0x16 ++#define RA_SIGN_STATE 34 ++#define DW_OP_lit0 0x30 ++#define DW_OP_lit1 0x31 ++ ++#define cfi_escape(a1, a2, a3, a4) \ ++ ".cfi_escape " STR(a1) ", " STR(a2) ", " STR(a3) ", " STR(a4) ++ ++/* Bytes: 0x16 0x22 0x01 0x30 */ ++#define SET_RA_STATE_0 \ ++ cfi_escape (DW_CFA_val_expression, RA_SIGN_STATE, 1, DW_OP_lit0) ++ ++/* Bytes: 0x16 0x22 0x01 0x31 */ ++#define SET_RA_STATE_1 \ ++ cfi_escape (DW_CFA_val_expression, RA_SIGN_STATE, 1, DW_OP_lit1) ++ ++/* These function call their argument. */ ++void unusual_pac_ret (void *); ++void unusual_no_pac_ret (void *); ++ ++asm("" ++".global unusual_pac_ret\n" ++".type unusual_pac_ret, %function\n" ++"unusual_pac_ret:\n" ++" .cfi_startproc\n" ++" " SET_RA_STATE_0 "\n" ++" hint 25 // paciasp\n" ++" " SET_RA_STATE_1 "\n" ++" stp x29, x30, [sp, -16]!\n" ++" .cfi_def_cfa_offset 16\n" ++" .cfi_offset 29, -16\n" ++" .cfi_offset 30, -8\n" ++" mov x29, sp\n" ++" blr x0\n" ++" ldp x29, x30, [sp], 16\n" ++" .cfi_restore 30\n" ++" .cfi_restore 29\n" ++" .cfi_def_cfa_offset 0\n" ++" hint 29 // autiasp\n" ++" " SET_RA_STATE_0 "\n" ++" ret\n" ++" .cfi_endproc\n"); ++ ++asm("" ++".global unusual_no_pac_ret\n" ++".type unusual_no_pac_ret, %function\n" ++"unusual_no_pac_ret:\n" ++" .cfi_startproc\n" ++" " SET_RA_STATE_0 "\n" ++" stp x29, x30, [sp, -16]!\n" ++" .cfi_def_cfa_offset 16\n" ++" .cfi_offset 29, -16\n" ++" .cfi_offset 30, -8\n" ++" mov x29, sp\n" ++" blr x0\n" ++" ldp x29, x30, [sp], 16\n" ++" .cfi_restore 30\n" ++" .cfi_restore 29\n" ++" .cfi_def_cfa_offset 0\n" ++" ret\n" ++" .cfi_endproc\n"); ++ ++ ++/* Functions to create a call chain with mixed pac-ret dwarf. */ ++ ++__attribute__((target("branch-protection=pac-ret"))) ++static void f2_pac_ret (void) ++{ ++ force_unwind (); ++ die (); ++} ++ ++__attribute__((target("branch-protection=none"))) ++static void f1_no_pac_ret (void) ++{ ++ unusual_pac_ret (f2_pac_ret); ++ die (); ++} ++ ++__attribute__((noinline, target("branch-protection=pac-ret"))) ++static void f0_pac_ret (void) ++{ ++ unusual_no_pac_ret (f1_no_pac_ret); ++ die (); ++} ++ ++static void cleanup_handler (void *p) ++{ ++ printf ("%s: Success.\n", __func__); ++ exit (0); ++} ++ ++int main () ++{ ++ char dummy __attribute__((cleanup (cleanup_handler))); ++ f0_pac_ret (); ++ die (); ++} +diff --git a/libgcc/config/aarch64/aarch64-unwind.h b/libgcc/config/aarch64/aarch64-unwind.h +index b6faa9b495094d8e..3158af4c8c371fac 100644 +--- a/libgcc/config/aarch64/aarch64-unwind.h ++++ b/libgcc/config/aarch64/aarch64-unwind.h +@@ -78,7 +78,13 @@ static inline void + aarch64_frob_update_context (struct _Unwind_Context *context, + _Unwind_FrameState *fs) + { +- if (fs->regs.reg[DWARF_REGNUM_AARCH64_RA_STATE].loc.offset & 0x1) ++ const int reg = DWARF_REGNUM_AARCH64_RA_STATE; ++ int ra_signed; ++ if (fs->regs.reg[reg].how == REG_UNSAVED) ++ ra_signed = fs->regs.reg[reg].loc.offset & 0x1; ++ else ++ ra_signed = _Unwind_GetGR (context, reg) & 0x1; ++ if (ra_signed) + /* The flag is used for re-authenticating EH handler's address. */ + context->flags |= RA_SIGNED_BIT; + else +diff --git a/libgcc/unwind-dw2.c b/libgcc/unwind-dw2.c +index 41af7e23f47602ec..43d06531fce9bed1 100644 +--- a/libgcc/unwind-dw2.c ++++ b/libgcc/unwind-dw2.c +@@ -1204,7 +1204,9 @@ execute_cfa_program (const unsigned char *insn_ptr, + #if defined (__aarch64__) && !defined (__ILP32__) + /* This CFA is multiplexed with Sparc. On AArch64 it's used to toggle + return address signing status. */ +- fs->regs.reg[DWARF_REGNUM_AARCH64_RA_STATE].loc.offset ^= 1; ++ reg = DWARF_REGNUM_AARCH64_RA_STATE; ++ gcc_assert (fs->regs.reg[reg].how == REG_UNSAVED); ++ fs->regs.reg[reg].loc.offset ^= 1; + #else + /* ??? Hardcoded for SPARC register window configuration. */ + if (__LIBGCC_DWARF_FRAME_REGISTERS__ >= 32) diff --git a/SOURCES/gcc-RHEL-105072-8.patch b/SOURCES/gcc-RHEL-105072-8.patch new file mode 100644 index 0000000..567738d --- /dev/null +++ b/SOURCES/gcc-RHEL-105072-8.patch @@ -0,0 +1,1354 @@ +commit 6e80a1d164d1f996ad08a512c000025a7c2ca893 +Author: Thomas Neumann +Date: Tue Mar 1 21:57:35 2022 +0100 + + eliminate mutex in fast path of __register_frame + + The __register_frame/__deregister_frame functions are used to register + unwinding frames from JITed code in a sorted list. That list itself + is protected by object_mutex, which leads to terrible performance + in multi-threaded code and is somewhat expensive even if single-threaded. + There was already a fast-path that avoided taking the mutex if no + frame was registered at all. + + This commit eliminates both the mutex and the sorted list from + the atomic fast path, and replaces it with a btree that uses + optimistic lock coupling during lookup. This allows for fully parallel + unwinding and is essential to scale exception handling to large + core counts. + + libgcc/ChangeLog: + + * unwind-dw2-fde.c (release_registered_frames): Cleanup at shutdown. + (__register_frame_info_table_bases): Use btree in atomic fast path. + (__deregister_frame_info_bases): Likewise. + (_Unwind_Find_FDE): Likewise. + (base_from_object): Make parameter const. + (classify_object_over_fdes): Add query-only mode. + (get_pc_range): Compute PC range for lookup. + * unwind-dw2-fde.h (last_fde): Make parameter const. + * unwind-dw2-btree.h: New file. + +diff --git a/libgcc/unwind-dw2-btree.h b/libgcc/unwind-dw2-btree.h +new file mode 100644 +index 0000000000000000..8853f0eab486b847 +--- /dev/null ++++ b/libgcc/unwind-dw2-btree.h +@@ -0,0 +1,953 @@ ++/* Lock-free btree for manually registered unwind frames. */ ++/* Copyright (C) 2022 Free Software Foundation, Inc. ++ Contributed by Thomas Neumann ++ ++This file is part of GCC. ++ ++GCC is free software; you can redistribute it and/or modify it under ++the terms of the GNU General Public License as published by the Free ++Software Foundation; either version 3, or (at your option) any later ++version. ++ ++GCC is distributed in the hope that it will be useful, but WITHOUT ANY ++WARRANTY; without even the implied warranty of MERCHANTABILITY or ++FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++for more details. ++ ++Under Section 7 of GPL version 3, you are granted additional ++permissions described in the GCC Runtime Library Exception, version ++3.1, as published by the Free Software Foundation. ++ ++You should have received a copy of the GNU General Public License and ++a copy of the GCC Runtime Library Exception along with this program; ++see the files COPYING3 and COPYING.RUNTIME respectively. If not, see ++. */ ++ ++#ifndef GCC_UNWIND_DW2_BTREE_H ++#define GCC_UNWIND_DW2_BTREE_H ++ ++#include ++ ++// Common logic for version locks. ++struct version_lock ++{ ++ // The lock itself. The lowest bit indicates an exclusive lock, ++ // the second bit indicates waiting threads. All other bits are ++ // used as counter to recognize changes. ++ // Overflows are okay here, we must only prevent overflow to the ++ // same value within one lock_optimistic/validate ++ // range. Even on 32 bit platforms that would require 1 billion ++ // frame registrations within the time span of a few assembler ++ // instructions. ++ uintptr_t version_lock; ++}; ++ ++#ifdef __GTHREAD_HAS_COND ++// We should never get contention within the tree as it rarely changes. ++// But if we ever do get contention we use these for waiting. ++static __gthread_mutex_t version_lock_mutex = __GTHREAD_MUTEX_INIT; ++static __gthread_cond_t version_lock_cond = __GTHREAD_COND_INIT; ++#endif ++ ++// Initialize in locked state. ++static inline void ++version_lock_initialize_locked_exclusive (struct version_lock *vl) ++{ ++ vl->version_lock = 1; ++} ++ ++// Try to lock the node exclusive. ++static inline bool ++version_lock_try_lock_exclusive (struct version_lock *vl) ++{ ++ uintptr_t state = __atomic_load_n (&(vl->version_lock), __ATOMIC_SEQ_CST); ++ if (state & 1) ++ return false; ++ return __atomic_compare_exchange_n (&(vl->version_lock), &state, state | 1, ++ false, __ATOMIC_SEQ_CST, ++ __ATOMIC_SEQ_CST); ++} ++ ++// Lock the node exclusive, blocking as needed. ++static void ++version_lock_lock_exclusive (struct version_lock *vl) ++{ ++#ifndef __GTHREAD_HAS_COND ++restart: ++#endif ++ ++ // We should virtually never get contention here, as frame ++ // changes are rare. ++ uintptr_t state = __atomic_load_n (&(vl->version_lock), __ATOMIC_SEQ_CST); ++ if (!(state & 1)) ++ { ++ if (__atomic_compare_exchange_n (&(vl->version_lock), &state, state | 1, ++ false, __ATOMIC_SEQ_CST, ++ __ATOMIC_SEQ_CST)) ++ return; ++ } ++ ++ // We did get contention, wait properly. ++#ifdef __GTHREAD_HAS_COND ++ __gthread_mutex_lock (&version_lock_mutex); ++ state = __atomic_load_n (&(vl->version_lock), __ATOMIC_SEQ_CST); ++ while (true) ++ { ++ // Check if the lock is still held. ++ if (!(state & 1)) ++ { ++ if (__atomic_compare_exchange_n (&(vl->version_lock), &state, ++ state | 1, false, __ATOMIC_SEQ_CST, ++ __ATOMIC_SEQ_CST)) ++ { ++ __gthread_mutex_unlock (&version_lock_mutex); ++ return; ++ } ++ else ++ { ++ continue; ++ } ++ } ++ ++ // Register waiting thread. ++ if (!(state & 2)) ++ { ++ if (!__atomic_compare_exchange_n (&(vl->version_lock), &state, ++ state | 2, false, __ATOMIC_SEQ_CST, ++ __ATOMIC_SEQ_CST)) ++ continue; ++ } ++ ++ // And sleep. ++ __gthread_cond_wait (&version_lock_cond, &version_lock_mutex); ++ state = __atomic_load_n (&(vl->version_lock), __ATOMIC_SEQ_CST); ++ } ++#else ++ // Spin if we do not have condition variables available. ++ // We expect no contention here, spinning should be okay. ++ goto restart; ++#endif ++} ++ ++// Release a locked node and increase the version lock. ++static void ++version_lock_unlock_exclusive (struct version_lock *vl) ++{ ++ // increase version, reset exclusive lock bits ++ uintptr_t state = __atomic_load_n (&(vl->version_lock), __ATOMIC_SEQ_CST); ++ uintptr_t ns = (state + 4) & (~((uintptr_t) 3)); ++ state = __atomic_exchange_n (&(vl->version_lock), ns, __ATOMIC_SEQ_CST); ++ ++#ifdef __GTHREAD_HAS_COND ++ if (state & 2) ++ { ++ // Wake up waiting threads. This should be extremely rare. ++ __gthread_mutex_lock (&version_lock_mutex); ++ __gthread_cond_broadcast (&version_lock_cond); ++ __gthread_mutex_unlock (&version_lock_mutex); ++ } ++#endif ++} ++ ++// Acquire an optimistic "lock". Note that this does not lock at all, it ++// only allows for validation later. ++static inline bool ++version_lock_lock_optimistic (const struct version_lock *vl, uintptr_t *lock) ++{ ++ uintptr_t state = __atomic_load_n (&(vl->version_lock), __ATOMIC_SEQ_CST); ++ *lock = state; ++ ++ // Acquiring the lock fails when there is currently an exclusive lock. ++ return !(state & 1); ++} ++ ++// Validate a previously acquired "lock". ++static inline bool ++version_lock_validate (const struct version_lock *vl, uintptr_t lock) ++{ ++ // Prevent the reordering of non-atomic loads behind the atomic load. ++ // Hans Boehm, Can Seqlocks Get Along with Programming Language Memory ++ // Models?, Section 4. ++ __atomic_thread_fence (__ATOMIC_ACQUIRE); ++ ++ // Check that the node is still in the same state. ++ uintptr_t state = __atomic_load_n (&(vl->version_lock), __ATOMIC_SEQ_CST); ++ return (state == lock); ++} ++ ++// The largest possible separator value. ++static const uintptr_t max_separator = ~((uintptr_t) (0)); ++ ++struct btree_node; ++ ++// Inner entry. The child tree contains all entries <= separator. ++struct inner_entry ++{ ++ uintptr_t separator; ++ struct btree_node *child; ++}; ++ ++// Leaf entry. Stores an object entry. ++struct leaf_entry ++{ ++ uintptr_t base, size; ++ struct object *ob; ++}; ++ ++// Node types. ++enum node_type ++{ ++ btree_node_inner, ++ btree_node_leaf, ++ btree_node_free ++}; ++ ++// Node sizes. Chosen such that the result size is roughly 256 bytes. ++#define max_fanout_inner 15 ++#define max_fanout_leaf 10 ++ ++// A btree node. ++struct btree_node ++{ ++ // The version lock used for optimistic lock coupling. ++ struct version_lock version_lock; ++ // The number of entries. ++ unsigned entry_count; ++ // The type. ++ enum node_type type; ++ // The payload. ++ union ++ { ++ // The inner nodes have fence keys, i.e., the right-most entry includes a ++ // separator. ++ struct inner_entry children[max_fanout_inner]; ++ struct leaf_entry entries[max_fanout_leaf]; ++ } content; ++}; ++ ++// Is an inner node? ++static inline bool ++btree_node_is_inner (const struct btree_node *n) ++{ ++ return n->type == btree_node_inner; ++} ++ ++// Is a leaf node? ++static inline bool ++btree_node_is_leaf (const struct btree_node *n) ++{ ++ return n->type == btree_node_leaf; ++} ++ ++// Should the node be merged? ++static inline bool ++btree_node_needs_merge (const struct btree_node *n) ++{ ++ return n->entry_count < (btree_node_is_inner (n) ? (max_fanout_inner / 2) ++ : (max_fanout_leaf / 2)); ++} ++ ++// Get the fence key for inner nodes. ++static inline uintptr_t ++btree_node_get_fence_key (const struct btree_node *n) ++{ ++ // For inner nodes we just return our right-most entry. ++ return n->content.children[n->entry_count - 1].separator; ++} ++ ++// Find the position for a slot in an inner node. ++static unsigned ++btree_node_find_inner_slot (const struct btree_node *n, uintptr_t value) ++{ ++ for (unsigned index = 0, ec = n->entry_count; index != ec; ++index) ++ if (n->content.children[index].separator >= value) ++ return index; ++ return n->entry_count; ++} ++ ++// Find the position for a slot in a leaf node. ++static unsigned ++btree_node_find_leaf_slot (const struct btree_node *n, uintptr_t value) ++{ ++ for (unsigned index = 0, ec = n->entry_count; index != ec; ++index) ++ if (n->content.entries[index].base + n->content.entries[index].size > value) ++ return index; ++ return n->entry_count; ++} ++ ++// Try to lock the node exclusive. ++static inline bool ++btree_node_try_lock_exclusive (struct btree_node *n) ++{ ++ return version_lock_try_lock_exclusive (&(n->version_lock)); ++} ++ ++// Lock the node exclusive, blocking as needed. ++static inline void ++btree_node_lock_exclusive (struct btree_node *n) ++{ ++ version_lock_lock_exclusive (&(n->version_lock)); ++} ++ ++// Release a locked node and increase the version lock. ++static inline void ++btree_node_unlock_exclusive (struct btree_node *n) ++{ ++ version_lock_unlock_exclusive (&(n->version_lock)); ++} ++ ++// Acquire an optimistic "lock". Note that this does not lock at all, it ++// only allows for validation later. ++static inline bool ++btree_node_lock_optimistic (const struct btree_node *n, uintptr_t *lock) ++{ ++ return version_lock_lock_optimistic (&(n->version_lock), lock); ++} ++ ++// Validate a previously acquire lock. ++static inline bool ++btree_node_validate (const struct btree_node *n, uintptr_t lock) ++{ ++ return version_lock_validate (&(n->version_lock), lock); ++} ++ ++// Insert a new separator after splitting. ++static void ++btree_node_update_separator_after_split (struct btree_node *n, ++ uintptr_t old_separator, ++ uintptr_t new_separator, ++ struct btree_node *new_right) ++{ ++ unsigned slot = btree_node_find_inner_slot (n, old_separator); ++ for (unsigned index = n->entry_count; index > slot; --index) ++ n->content.children[index] = n->content.children[index - 1]; ++ n->content.children[slot].separator = new_separator; ++ n->content.children[slot + 1].child = new_right; ++ n->entry_count++; ++} ++ ++// A btree. Suitable for static initialization, all members are zero at the ++// beginning. ++struct btree ++{ ++ // The root of the btree. ++ struct btree_node *root; ++ // The free list of released node. ++ struct btree_node *free_list; ++ // The version lock used to protect the root. ++ struct version_lock root_lock; ++}; ++ ++// Initialize a btree. Not actually used, just for exposition. ++static inline void ++btree_init (struct btree *t) ++{ ++ t->root = NULL; ++ t->free_list = NULL; ++ t->root_lock.version_lock = 0; ++}; ++ ++static void ++btree_release_tree_recursively (struct btree *t, struct btree_node *n); ++ ++// Destroy a tree and release all nodes. ++static void ++btree_destroy (struct btree *t) ++{ ++ // Disable the mechanism before cleaning up. ++ struct btree_node *old_root ++ = __atomic_exchange_n (&(t->root), NULL, __ATOMIC_SEQ_CST); ++ if (old_root) ++ btree_release_tree_recursively (t, old_root); ++ ++ // Release all free nodes. ++ while (t->free_list) ++ { ++ struct btree_node *next = t->free_list->content.children[0].child; ++ free (t->free_list); ++ t->free_list = next; ++ } ++} ++ ++// Allocate a node. This node will be returned in locked exclusive state. ++static struct btree_node * ++btree_allocate_node (struct btree *t, bool inner) ++{ ++ while (true) ++ { ++ // Try the free list first. ++ struct btree_node *next_free ++ = __atomic_load_n (&(t->free_list), __ATOMIC_SEQ_CST); ++ if (next_free) ++ { ++ if (!btree_node_try_lock_exclusive (next_free)) ++ continue; ++ // The node might no longer be free, check that again after acquiring ++ // the exclusive lock. ++ if (next_free->type == btree_node_free) ++ { ++ struct btree_node *ex = next_free; ++ if (__atomic_compare_exchange_n ( ++ &(t->free_list), &ex, next_free->content.children[0].child, ++ false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)) ++ { ++ next_free->entry_count = 0; ++ next_free->type = inner ? btree_node_inner : btree_node_leaf; ++ return next_free; ++ } ++ } ++ btree_node_unlock_exclusive (next_free); ++ continue; ++ } ++ ++ // No free node available, allocate a new one. ++ struct btree_node *new_node ++ = (struct btree_node *) (malloc (sizeof (struct btree_node))); ++ version_lock_initialize_locked_exclusive ( ++ &(new_node->version_lock)); // initialize the node in locked state. ++ new_node->entry_count = 0; ++ new_node->type = inner ? btree_node_inner : btree_node_leaf; ++ return new_node; ++ } ++} ++ ++// Release a node. This node must be currently locked exclusively and will ++// be placed in the free list. ++static void ++btree_release_node (struct btree *t, struct btree_node *node) ++{ ++ // We cannot release the memory immediately because there might still be ++ // concurrent readers on that node. Put it in the free list instead. ++ node->type = btree_node_free; ++ struct btree_node *next_free ++ = __atomic_load_n (&(t->free_list), __ATOMIC_SEQ_CST); ++ do ++ { ++ node->content.children[0].child = next_free; ++ } while (!__atomic_compare_exchange_n (&(t->free_list), &next_free, node, ++ false, __ATOMIC_SEQ_CST, ++ __ATOMIC_SEQ_CST)); ++ btree_node_unlock_exclusive (node); ++} ++ ++// Recursively release a tree. The btree is by design very shallow, thus ++// we can risk recursion here. ++static void ++btree_release_tree_recursively (struct btree *t, struct btree_node *node) ++{ ++ btree_node_lock_exclusive (node); ++ if (btree_node_is_inner (node)) ++ { ++ for (unsigned index = 0; index < node->entry_count; ++index) ++ btree_release_tree_recursively (t, node->content.children[index].child); ++ } ++ btree_release_node (t, node); ++} ++ ++// Check if we are splitting the root. ++static void ++btree_handle_root_split (struct btree *t, struct btree_node **node, ++ struct btree_node **parent) ++{ ++ // We want to keep the root pointer stable to allow for contention ++ // free reads. Thus, we split the root by first moving the content ++ // of the root node to a new node, and then split that new node. ++ if (!*parent) ++ { ++ // Allocate a new node, this guarantees us that we will have a parent ++ // afterwards. ++ struct btree_node *new_node ++ = btree_allocate_node (t, btree_node_is_inner (*node)); ++ struct btree_node *old_node = *node; ++ new_node->entry_count = old_node->entry_count; ++ new_node->content = old_node->content; ++ old_node->content.children[0].separator = max_separator; ++ old_node->content.children[0].child = new_node; ++ old_node->entry_count = 1; ++ old_node->type = btree_node_inner; ++ ++ *parent = old_node; ++ *node = new_node; ++ } ++} ++ ++// Split an inner node. ++static void ++btree_split_inner (struct btree *t, struct btree_node **inner, ++ struct btree_node **parent, uintptr_t target) ++{ ++ // Check for the root. ++ btree_handle_root_split (t, inner, parent); ++ ++ // Create two inner node. ++ uintptr_t right_fence = btree_node_get_fence_key (*inner); ++ struct btree_node *left_inner = *inner; ++ struct btree_node *right_inner = btree_allocate_node (t, true); ++ unsigned split = left_inner->entry_count / 2; ++ right_inner->entry_count = left_inner->entry_count - split; ++ for (unsigned index = 0; index < right_inner->entry_count; ++index) ++ right_inner->content.children[index] ++ = left_inner->content.children[split + index]; ++ left_inner->entry_count = split; ++ uintptr_t left_fence = btree_node_get_fence_key (left_inner); ++ btree_node_update_separator_after_split (*parent, right_fence, left_fence, ++ right_inner); ++ if (target <= left_fence) ++ { ++ *inner = left_inner; ++ btree_node_unlock_exclusive (right_inner); ++ } ++ else ++ { ++ *inner = right_inner; ++ btree_node_unlock_exclusive (left_inner); ++ } ++} ++ ++// Split a leaf node. ++static void ++btree_split_leaf (struct btree *t, struct btree_node **leaf, ++ struct btree_node **parent, uintptr_t fence, uintptr_t target) ++{ ++ // Check for the root. ++ btree_handle_root_split (t, leaf, parent); ++ ++ // Create two leaf nodes. ++ uintptr_t right_fence = fence; ++ struct btree_node *left_leaf = *leaf; ++ struct btree_node *right_leaf = btree_allocate_node (t, false); ++ unsigned split = left_leaf->entry_count / 2; ++ right_leaf->entry_count = left_leaf->entry_count - split; ++ for (unsigned index = 0; index != right_leaf->entry_count; ++index) ++ right_leaf->content.entries[index] ++ = left_leaf->content.entries[split + index]; ++ left_leaf->entry_count = split; ++ uintptr_t left_fence = right_leaf->content.entries[0].base - 1; ++ btree_node_update_separator_after_split (*parent, right_fence, left_fence, ++ right_leaf); ++ if (target <= left_fence) ++ { ++ *leaf = left_leaf; ++ btree_node_unlock_exclusive (right_leaf); ++ } ++ else ++ { ++ *leaf = right_leaf; ++ btree_node_unlock_exclusive (left_leaf); ++ } ++} ++ ++// Merge (or balance) child nodes. ++static struct btree_node * ++btree_merge_node (struct btree *t, unsigned child_slot, ++ struct btree_node *parent, uintptr_t target) ++{ ++ // Choose the emptiest neighbor and lock both. The target child is already ++ // locked. ++ unsigned left_slot; ++ struct btree_node *left_node, *right_node; ++ if ((child_slot == 0) ++ || (((child_slot + 1) < parent->entry_count) ++ && (parent->content.children[child_slot + 1].child->entry_count ++ < parent->content.children[child_slot - 1].child->entry_count))) ++ { ++ left_slot = child_slot; ++ left_node = parent->content.children[left_slot].child; ++ right_node = parent->content.children[left_slot + 1].child; ++ btree_node_lock_exclusive (right_node); ++ } ++ else ++ { ++ left_slot = child_slot - 1; ++ left_node = parent->content.children[left_slot].child; ++ right_node = parent->content.children[left_slot + 1].child; ++ btree_node_lock_exclusive (left_node); ++ } ++ ++ // Can we merge both nodes into one node? ++ unsigned total_count = left_node->entry_count + right_node->entry_count; ++ unsigned max_count ++ = btree_node_is_inner (left_node) ? max_fanout_inner : max_fanout_leaf; ++ if (total_count <= max_count) ++ { ++ // Merge into the parent? ++ if (parent->entry_count == 2) ++ { ++ // Merge children into parent. This can only happen at the root. ++ if (btree_node_is_inner (left_node)) ++ { ++ for (unsigned index = 0; index != left_node->entry_count; ++index) ++ parent->content.children[index] ++ = left_node->content.children[index]; ++ for (unsigned index = 0; index != right_node->entry_count; ++ ++index) ++ parent->content.children[index + left_node->entry_count] ++ = right_node->content.children[index]; ++ } ++ else ++ { ++ parent->type = btree_node_leaf; ++ for (unsigned index = 0; index != left_node->entry_count; ++index) ++ parent->content.entries[index] ++ = left_node->content.entries[index]; ++ for (unsigned index = 0; index != right_node->entry_count; ++ ++index) ++ parent->content.entries[index + left_node->entry_count] ++ = right_node->content.entries[index]; ++ } ++ parent->entry_count = total_count; ++ btree_release_node (t, left_node); ++ btree_release_node (t, right_node); ++ return parent; ++ } ++ else ++ { ++ // Regular merge. ++ if (btree_node_is_inner (left_node)) ++ { ++ for (unsigned index = 0; index != right_node->entry_count; ++ ++index) ++ left_node->content.children[left_node->entry_count++] ++ = right_node->content.children[index]; ++ } ++ else ++ { ++ for (unsigned index = 0; index != right_node->entry_count; ++ ++index) ++ left_node->content.entries[left_node->entry_count++] ++ = right_node->content.entries[index]; ++ } ++ parent->content.children[left_slot].separator ++ = parent->content.children[left_slot + 1].separator; ++ for (unsigned index = left_slot + 1; index + 1 < parent->entry_count; ++ ++index) ++ parent->content.children[index] ++ = parent->content.children[index + 1]; ++ parent->entry_count--; ++ btree_release_node (t, right_node); ++ btree_node_unlock_exclusive (parent); ++ return left_node; ++ } ++ } ++ ++ // No merge possible, rebalance instead. ++ if (left_node->entry_count > right_node->entry_count) ++ { ++ // Shift from left to right. ++ unsigned to_shift ++ = (left_node->entry_count - right_node->entry_count) / 2; ++ if (btree_node_is_inner (left_node)) ++ { ++ for (unsigned index = 0; index != right_node->entry_count; ++index) ++ { ++ unsigned pos = right_node->entry_count - 1 - index; ++ right_node->content.children[pos + to_shift] ++ = right_node->content.children[pos]; ++ } ++ for (unsigned index = 0; index != to_shift; ++index) ++ right_node->content.children[index] ++ = left_node->content ++ .children[left_node->entry_count - to_shift + index]; ++ } ++ else ++ { ++ for (unsigned index = 0; index != right_node->entry_count; ++index) ++ { ++ unsigned pos = right_node->entry_count - 1 - index; ++ right_node->content.entries[pos + to_shift] ++ = right_node->content.entries[pos]; ++ } ++ for (unsigned index = 0; index != to_shift; ++index) ++ right_node->content.entries[index] ++ = left_node->content ++ .entries[left_node->entry_count - to_shift + index]; ++ } ++ left_node->entry_count -= to_shift; ++ right_node->entry_count += to_shift; ++ } ++ else ++ { ++ // Shift from right to left. ++ unsigned to_shift ++ = (right_node->entry_count - left_node->entry_count) / 2; ++ if (btree_node_is_inner (left_node)) ++ { ++ for (unsigned index = 0; index != to_shift; ++index) ++ left_node->content.children[left_node->entry_count + index] ++ = right_node->content.children[index]; ++ for (unsigned index = 0; index != right_node->entry_count - to_shift; ++ ++index) ++ right_node->content.children[index] ++ = right_node->content.children[index + to_shift]; ++ } ++ else ++ { ++ for (unsigned index = 0; index != to_shift; ++index) ++ left_node->content.entries[left_node->entry_count + index] ++ = right_node->content.entries[index]; ++ for (unsigned index = 0; index != right_node->entry_count - to_shift; ++ ++index) ++ right_node->content.entries[index] ++ = right_node->content.entries[index + to_shift]; ++ } ++ left_node->entry_count += to_shift; ++ right_node->entry_count -= to_shift; ++ } ++ uintptr_t left_fence; ++ if (btree_node_is_leaf (left_node)) ++ { ++ left_fence = right_node->content.entries[0].base - 1; ++ } ++ else ++ { ++ left_fence = btree_node_get_fence_key (left_node); ++ } ++ parent->content.children[left_slot].separator = left_fence; ++ btree_node_unlock_exclusive (parent); ++ if (target <= left_fence) ++ { ++ btree_node_unlock_exclusive (right_node); ++ return left_node; ++ } ++ else ++ { ++ btree_node_unlock_exclusive (left_node); ++ return right_node; ++ } ++} ++ ++// Insert an entry. ++static bool ++btree_insert (struct btree *t, uintptr_t base, uintptr_t size, ++ struct object *ob) ++{ ++ // Sanity check. ++ if (!size) ++ return false; ++ ++ // Access the root. ++ struct btree_node *iter, *parent = NULL; ++ { ++ version_lock_lock_exclusive (&(t->root_lock)); ++ iter = t->root; ++ if (iter) ++ { ++ btree_node_lock_exclusive (iter); ++ } ++ else ++ { ++ t->root = iter = btree_allocate_node (t, false); ++ } ++ version_lock_unlock_exclusive (&(t->root_lock)); ++ } ++ ++ // Walk down the btree with classic lock coupling and eager splits. ++ // Strictly speaking this is not performance optimal, we could use ++ // optimistic lock coupling until we hit a node that has to be modified. ++ // But that is more difficult to implement and frame registration is ++ // rare anyway, we use simple locking for now. ++ ++ uintptr_t fence = max_separator; ++ while (btree_node_is_inner (iter)) ++ { ++ // Use eager splits to avoid lock coupling up. ++ if (iter->entry_count == max_fanout_inner) ++ btree_split_inner (t, &iter, &parent, base); ++ ++ unsigned slot = btree_node_find_inner_slot (iter, base); ++ if (parent) ++ btree_node_unlock_exclusive (parent); ++ parent = iter; ++ fence = iter->content.children[slot].separator; ++ iter = iter->content.children[slot].child; ++ btree_node_lock_exclusive (iter); ++ } ++ ++ // Make sure we have space. ++ if (iter->entry_count == max_fanout_leaf) ++ btree_split_leaf (t, &iter, &parent, fence, base); ++ if (parent) ++ btree_node_unlock_exclusive (parent); ++ ++ // Insert in node. ++ unsigned slot = btree_node_find_leaf_slot (iter, base); ++ if ((slot < iter->entry_count) && (iter->content.entries[slot].base == base)) ++ { ++ // Duplicate entry, this should never happen. ++ btree_node_unlock_exclusive (iter); ++ return false; ++ } ++ for (unsigned index = iter->entry_count; index > slot; --index) ++ iter->content.entries[index] = iter->content.entries[index - 1]; ++ struct leaf_entry *e = &(iter->content.entries[slot]); ++ e->base = base; ++ e->size = size; ++ e->ob = ob; ++ iter->entry_count++; ++ btree_node_unlock_exclusive (iter); ++ return true; ++} ++ ++// Remove an entry. ++static struct object * ++btree_remove (struct btree *t, uintptr_t base) ++{ ++ // Access the root. ++ version_lock_lock_exclusive (&(t->root_lock)); ++ struct btree_node *iter = t->root; ++ if (iter) ++ btree_node_lock_exclusive (iter); ++ version_lock_unlock_exclusive (&(t->root_lock)); ++ if (!iter) ++ return NULL; ++ ++ // Same strategy as with insert, walk down with lock coupling and ++ // merge eagerly. ++ while (btree_node_is_inner (iter)) ++ { ++ unsigned slot = btree_node_find_inner_slot (iter, base); ++ struct btree_node *next = iter->content.children[slot].child; ++ btree_node_lock_exclusive (next); ++ if (btree_node_needs_merge (next)) ++ { ++ // Use eager merges to avoid lock coupling up. ++ iter = btree_merge_node (t, slot, iter, base); ++ } ++ else ++ { ++ btree_node_unlock_exclusive (iter); ++ iter = next; ++ } ++ } ++ ++ // Remove existing entry. ++ unsigned slot = btree_node_find_leaf_slot (iter, base); ++ if ((slot >= iter->entry_count) || (iter->content.entries[slot].base != base)) ++ { ++ // Not found, this should never happen. ++ btree_node_unlock_exclusive (iter); ++ return NULL; ++ } ++ struct object *ob = iter->content.entries[slot].ob; ++ for (unsigned index = slot; index + 1 < iter->entry_count; ++index) ++ iter->content.entries[index] = iter->content.entries[index + 1]; ++ iter->entry_count--; ++ btree_node_unlock_exclusive (iter); ++ return ob; ++} ++ ++// Find the corresponding entry for the given address. ++static struct object * ++btree_lookup (const struct btree *t, uintptr_t target_addr) ++{ ++ // Within this function many loads are relaxed atomic loads. ++ // Use a macro to keep the code reasonable. ++#define RLOAD(x) __atomic_load_n (&(x), __ATOMIC_RELAXED) ++ ++ // For targets where unwind info is usually not registered through these ++ // APIs anymore, avoid any sequential consistent atomics. ++ // Use relaxed MO here, it is up to the app to ensure that the library ++ // loading/initialization happens-before using that library in other ++ // threads (in particular unwinding with that library's functions ++ // appearing in the backtraces). Calling that library's functions ++ // without waiting for the library to initialize would be racy. ++ if (__builtin_expect (!RLOAD (t->root), 1)) ++ return NULL; ++ ++ // The unwinding tables are mostly static, they only change when ++ // frames are added or removed. This makes it extremely unlikely that they ++ // change during a given unwinding sequence. Thus, we optimize for the ++ // contention free case and use optimistic lock coupling. This does not ++ // require any writes to shared state, instead we validate every read. It is ++ // important that we do not trust any value that we have read until we call ++ // validate again. Data can change at arbitrary points in time, thus we always ++ // copy something into a local variable and validate again before acting on ++ // the read. In the unlikely event that we encounter a concurrent change we ++ // simply restart and try again. ++ ++restart: ++ struct btree_node *iter; ++ uintptr_t lock; ++ { ++ // Accessing the root node requires defending against concurrent pointer ++ // changes Thus we couple rootLock -> lock on root node -> validate rootLock ++ if (!version_lock_lock_optimistic (&(t->root_lock), &lock)) ++ goto restart; ++ iter = RLOAD (t->root); ++ if (!version_lock_validate (&(t->root_lock), lock)) ++ goto restart; ++ if (!iter) ++ return NULL; ++ uintptr_t child_lock; ++ if ((!btree_node_lock_optimistic (iter, &child_lock)) ++ || (!version_lock_validate (&(t->root_lock), lock))) ++ goto restart; ++ lock = child_lock; ++ } ++ ++ // Now we can walk down towards the right leaf node. ++ while (true) ++ { ++ enum node_type type = RLOAD (iter->type); ++ unsigned entry_count = RLOAD (iter->entry_count); ++ if (!btree_node_validate (iter, lock)) ++ goto restart; ++ if (!entry_count) ++ return NULL; ++ ++ if (type == btree_node_inner) ++ { ++ // We cannot call find_inner_slot here because we need (relaxed) ++ // atomic reads here. ++ unsigned slot = 0; ++ while ( ++ ((slot + 1) < entry_count) ++ && (RLOAD (iter->content.children[slot].separator) < target_addr)) ++ ++slot; ++ struct btree_node *child = RLOAD (iter->content.children[slot].child); ++ if (!btree_node_validate (iter, lock)) ++ goto restart; ++ ++ // The node content can change at any point in time, thus we must ++ // interleave parent and child checks. ++ uintptr_t child_lock; ++ if (!btree_node_lock_optimistic (child, &child_lock)) ++ goto restart; ++ if (!btree_node_validate (iter, lock)) ++ goto restart; // make sure we still point to the correct node after ++ // acquiring the optimistic lock. ++ ++ // Go down ++ iter = child; ++ lock = child_lock; ++ } ++ else ++ { ++ // We cannot call find_leaf_slot here because we need (relaxed) ++ // atomic reads here. ++ unsigned slot = 0; ++ while (((slot + 1) < entry_count) ++ && (RLOAD (iter->content.entries[slot].base) ++ + RLOAD (iter->content.entries[slot].size) ++ <= target_addr)) ++ ++slot; ++ struct leaf_entry entry; ++ entry.base = RLOAD (iter->content.entries[slot].base); ++ entry.size = RLOAD (iter->content.entries[slot].size); ++ entry.ob = RLOAD (iter->content.entries[slot].ob); ++ if (!btree_node_validate (iter, lock)) ++ goto restart; ++ ++ // Check if we have a hit. ++ if ((entry.base <= target_addr) ++ && (target_addr < entry.base + entry.size)) ++ { ++ return entry.ob; ++ } ++ return NULL; ++ } ++ } ++#undef RLOAD ++} ++ ++#endif /* unwind-dw2-btree.h */ +diff --git a/libgcc/unwind-dw2-fde.c b/libgcc/unwind-dw2-fde.c +index 5d2213c3f88f3a49..a591faaa579b5883 100644 +--- a/libgcc/unwind-dw2-fde.c ++++ b/libgcc/unwind-dw2-fde.c +@@ -42,15 +42,34 @@ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + #endif + #endif + ++#ifdef ATOMIC_FDE_FAST_PATH ++#include "unwind-dw2-btree.h" ++ ++static struct btree registered_frames; ++ ++static void ++release_registered_frames (void) __attribute__ ((destructor (110))); ++static void ++release_registered_frames (void) ++{ ++ /* Release the b-tree and all frames. Frame releases that happen later are ++ * silently ignored */ ++ btree_destroy (®istered_frames); ++} ++ ++static void ++get_pc_range (const struct object *ob, uintptr_t *range); ++static void ++init_object (struct object *ob); ++ ++#else ++ + /* The unseen_objects list contains objects that have been registered + but not yet categorized in any way. The seen_objects list has had + its pc_begin and count fields initialized at minimum, and is sorted + by decreasing value of pc_begin. */ + static struct object *unseen_objects; + static struct object *seen_objects; +-#ifdef ATOMIC_FDE_FAST_PATH +-static int any_objects_registered; +-#endif + + #ifdef __GTHREAD_MUTEX_INIT + static __gthread_mutex_t object_mutex = __GTHREAD_MUTEX_INIT; +@@ -78,6 +97,7 @@ init_object_mutex_once (void) + static __gthread_mutex_t object_mutex; + #endif + #endif ++#endif + + /* Called from crtbegin.o to register the unwind info for an object. */ + +@@ -99,23 +119,23 @@ __register_frame_info_bases (const void *begin, struct object *ob, + ob->fde_end = NULL; + #endif + ++#ifdef ATOMIC_FDE_FAST_PATH ++ // Initialize eagerly to avoid locking later ++ init_object (ob); ++ ++ // And register the frame ++ uintptr_t range[2]; ++ get_pc_range (ob, range); ++ btree_insert (®istered_frames, range[0], range[1] - range[0], ob); ++#else + init_object_mutex_once (); + __gthread_mutex_lock (&object_mutex); + + ob->next = unseen_objects; + unseen_objects = ob; +-#ifdef ATOMIC_FDE_FAST_PATH +- /* Set flag that at least one library has registered FDEs. +- Use relaxed MO here, it is up to the app to ensure that the library +- loading/initialization happens-before using that library in other +- threads (in particular unwinding with that library's functions +- appearing in the backtraces). Calling that library's functions +- without waiting for the library to initialize would be racy. */ +- if (!any_objects_registered) +- __atomic_store_n (&any_objects_registered, 1, __ATOMIC_RELAXED); +-#endif + + __gthread_mutex_unlock (&object_mutex); ++#endif + } + + void +@@ -153,23 +173,23 @@ __register_frame_info_table_bases (void *begin, struct object *ob, + ob->s.b.from_array = 1; + ob->s.b.encoding = DW_EH_PE_omit; + ++#ifdef ATOMIC_FDE_FAST_PATH ++ // Initialize eagerly to avoid locking later ++ init_object (ob); ++ ++ // And register the frame ++ uintptr_t range[2]; ++ get_pc_range (ob, range); ++ btree_insert (®istered_frames, range[0], range[1] - range[0], ob); ++#else + init_object_mutex_once (); + __gthread_mutex_lock (&object_mutex); + + ob->next = unseen_objects; + unseen_objects = ob; +-#ifdef ATOMIC_FDE_FAST_PATH +- /* Set flag that at least one library has registered FDEs. +- Use relaxed MO here, it is up to the app to ensure that the library +- loading/initialization happens-before using that library in other +- threads (in particular unwinding with that library's functions +- appearing in the backtraces). Calling that library's functions +- without waiting for the library to initialize would be racy. */ +- if (!any_objects_registered) +- __atomic_store_n (&any_objects_registered, 1, __ATOMIC_RELAXED); +-#endif + + __gthread_mutex_unlock (&object_mutex); ++#endif + } + + void +@@ -200,16 +220,33 @@ __register_frame_table (void *begin) + void * + __deregister_frame_info_bases (const void *begin) + { +- struct object **p; + struct object *ob = 0; + + /* If .eh_frame is empty, we haven't registered. */ + if ((const uword *) begin == 0 || *(const uword *) begin == 0) + return ob; + ++#ifdef ATOMIC_FDE_FAST_PATH ++ // Find the corresponding PC range ++ struct object lookupob; ++ lookupob.tbase = 0; ++ lookupob.dbase = 0; ++ lookupob.u.single = begin; ++ lookupob.s.i = 0; ++ lookupob.s.b.encoding = DW_EH_PE_omit; ++#ifdef DWARF2_OBJECT_END_PTR_EXTENSION ++ lookupob.fde_end = NULL; ++#endif ++ uintptr_t range[2]; ++ get_pc_range (&lookupob, range); ++ ++ // And remove ++ ob = btree_remove (®istered_frames, range[0]); ++#else + init_object_mutex_once (); + __gthread_mutex_lock (&object_mutex); + ++ struct object **p; + for (p = &unseen_objects; *p ; p = &(*p)->next) + if ((*p)->u.single == begin) + { +@@ -241,6 +278,8 @@ __deregister_frame_info_bases (const void *begin) + + out: + __gthread_mutex_unlock (&object_mutex); ++#endif ++ + gcc_assert (ob); + return (void *) ob; + } +@@ -264,7 +303,7 @@ __deregister_frame (void *begin) + instead of an _Unwind_Context. */ + + static _Unwind_Ptr +-base_from_object (unsigned char encoding, struct object *ob) ++base_from_object (unsigned char encoding, const struct object *ob) + { + if (encoding == DW_EH_PE_omit) + return 0; +@@ -628,13 +667,17 @@ end_fde_sort (struct object *ob, struct fde_accumulator *accu, size_t count) + } + } + +- +-/* Update encoding, mixed_encoding, and pc_begin for OB for the +- fde array beginning at THIS_FDE. Return the number of fdes +- encountered along the way. */ ++/* Inspect the fde array beginning at this_fde. This ++ function can be used either in query mode (RANGE is ++ not null, OB is const), or in update mode (RANGE is ++ null, OB is modified). In query mode the function computes ++ the range of PC values and stores it in RANGE. In ++ update mode it updates encoding, mixed_encoding, and pc_begin ++ for OB. Return the number of fdes encountered along the way. */ + + static size_t +-classify_object_over_fdes (struct object *ob, const fde *this_fde) ++classify_object_over_fdes (struct object *ob, const fde *this_fde, ++ uintptr_t *range) + { + const struct dwarf_cie *last_cie = 0; + size_t count = 0; +@@ -660,14 +703,18 @@ classify_object_over_fdes (struct object *ob, const fde *this_fde) + if (encoding == DW_EH_PE_omit) + return -1; + base = base_from_object (encoding, ob); +- if (ob->s.b.encoding == DW_EH_PE_omit) +- ob->s.b.encoding = encoding; +- else if (ob->s.b.encoding != encoding) +- ob->s.b.mixed_encoding = 1; ++ if (!range) ++ { ++ if (ob->s.b.encoding == DW_EH_PE_omit) ++ ob->s.b.encoding = encoding; ++ else if (ob->s.b.encoding != encoding) ++ ob->s.b.mixed_encoding = 1; ++ } + } + +- read_encoded_value_with_base (encoding, base, this_fde->pc_begin, +- &pc_begin); ++ const unsigned char *p; ++ p = read_encoded_value_with_base (encoding, base, this_fde->pc_begin, ++ &pc_begin); + + /* Take care to ignore link-once functions that were removed. + In these cases, the function address will be NULL, but if +@@ -683,8 +730,29 @@ classify_object_over_fdes (struct object *ob, const fde *this_fde) + continue; + + count += 1; +- if ((void *) pc_begin < ob->pc_begin) +- ob->pc_begin = (void *) pc_begin; ++ if (range) ++ { ++ _Unwind_Ptr pc_range, pc_end; ++ read_encoded_value_with_base (encoding & 0x0F, 0, p, &pc_range); ++ pc_end = pc_begin + pc_range; ++ if ((!range[0]) && (!range[1])) ++ { ++ range[0] = pc_begin; ++ range[1] = pc_end; ++ } ++ else ++ { ++ if (pc_begin < range[0]) ++ range[0] = pc_begin; ++ if (pc_end > range[1]) ++ range[1] = pc_end; ++ } ++ } ++ else ++ { ++ if ((void *) pc_begin < ob->pc_begin) ++ ob->pc_begin = (void *) pc_begin; ++ } + } + + return count; +@@ -769,7 +837,7 @@ init_object (struct object* ob) + fde **p = ob->u.array; + for (count = 0; *p; ++p) + { +- size_t cur_count = classify_object_over_fdes (ob, *p); ++ size_t cur_count = classify_object_over_fdes (ob, *p, NULL); + if (cur_count == (size_t) -1) + goto unhandled_fdes; + count += cur_count; +@@ -777,7 +845,7 @@ init_object (struct object* ob) + } + else + { +- count = classify_object_over_fdes (ob, ob->u.single); ++ count = classify_object_over_fdes (ob, ob->u.single, NULL); + if (count == (size_t) -1) + { + static const fde terminator; +@@ -821,6 +889,32 @@ init_object (struct object* ob) + ob->s.b.sorted = 1; + } + ++#ifdef ATOMIC_FDE_FAST_PATH ++/* Get the PC range for lookup */ ++static void ++get_pc_range (const struct object *ob, uintptr_t *range) ++{ ++ // It is safe to cast to non-const object* here as ++ // classify_object_over_fdes does not modify ob in query mode. ++ struct object *ncob = (struct object *) (uintptr_t) ob; ++ range[0] = range[1] = 0; ++ if (ob->s.b.sorted) ++ { ++ classify_object_over_fdes (ncob, ob->u.sort->orig_data, range); ++ } ++ else if (ob->s.b.from_array) ++ { ++ fde **p = ob->u.array; ++ for (; *p; ++p) ++ classify_object_over_fdes (ncob, *p, range); ++ } ++ else ++ { ++ classify_object_over_fdes (ncob, ob->u.single, range); ++ } ++} ++#endif ++ + /* A linear search through a set of FDEs for the given PC. This is + used when there was insufficient memory to allocate and sort an + array. */ +@@ -985,6 +1079,9 @@ binary_search_mixed_encoding_fdes (struct object *ob, void *pc) + static const fde * + search_object (struct object* ob, void *pc) + { ++ /* The fast path initializes objects eagerly to avoid locking. ++ * On the slow path we initialize them now */ ++#ifndef ATOMIC_FDE_FAST_PATH + /* If the data hasn't been sorted, try to do this now. We may have + more memory available than last time we tried. */ + if (! ob->s.b.sorted) +@@ -997,6 +1094,7 @@ search_object (struct object* ob, void *pc) + if (pc < ob->pc_begin) + return NULL; + } ++#endif + + if (ob->s.b.sorted) + { +@@ -1033,17 +1131,12 @@ _Unwind_Find_FDE (void *pc, struct dwarf_eh_bases *bases) + const fde *f = NULL; + + #ifdef ATOMIC_FDE_FAST_PATH +- /* For targets where unwind info is usually not registered through these +- APIs anymore, avoid taking a global lock. +- Use relaxed MO here, it is up to the app to ensure that the library +- loading/initialization happens-before using that library in other +- threads (in particular unwinding with that library's functions +- appearing in the backtraces). Calling that library's functions +- without waiting for the library to initialize would be racy. */ +- if (__builtin_expect (!__atomic_load_n (&any_objects_registered, +- __ATOMIC_RELAXED), 1)) ++ ob = btree_lookup (®istered_frames, (uintptr_t) pc); ++ if (!ob) + return NULL; +-#endif ++ ++ f = search_object (ob, pc); ++#else + + init_object_mutex_once (); + __gthread_mutex_lock (&object_mutex); +@@ -1081,6 +1174,7 @@ _Unwind_Find_FDE (void *pc, struct dwarf_eh_bases *bases) + + fini: + __gthread_mutex_unlock (&object_mutex); ++#endif + + if (f) + { +diff --git a/libgcc/unwind-dw2-fde.h b/libgcc/unwind-dw2-fde.h +index bc105e1b749ea8e4..acd5549af20b6c65 100644 +--- a/libgcc/unwind-dw2-fde.h ++++ b/libgcc/unwind-dw2-fde.h +@@ -166,7 +166,7 @@ next_fde (const fde *f) + extern const fde * _Unwind_Find_FDE (void *, struct dwarf_eh_bases *); + + static inline int +-last_fde (struct object *obj __attribute__ ((__unused__)), const fde *f) ++last_fde (const struct object *obj __attribute__ ((__unused__)), const fde *f) + { + #ifdef DWARF2_OBJECT_END_PTR_EXTENSION + return f == (const fde *) obj->fde_end || f->length == 0; diff --git a/SOURCES/gcc-RHEL-105072-9.patch b/SOURCES/gcc-RHEL-105072-9.patch new file mode 100644 index 0000000..365d1b8 --- /dev/null +++ b/SOURCES/gcc-RHEL-105072-9.patch @@ -0,0 +1,377 @@ +commit d458f806afe07d1e06bdf275e94d05a716f41bf6 +Author: Thomas Neumann +Date: Sun Sep 18 11:31:01 2022 +0200 + + Remove dependency on uintptr_t in libgcc + + uintptr_t is no available for all targets, use __UINTPTR_TYPE__ + instead. + + libgcc/ChangeLog: + + * unwind-dw2-fde.c: Replace uintptr_t with typedef + for __UINTPTR_TYPE__. + * unwind-dw2-btree.h: Likewise. + +diff --git a/libgcc/unwind-dw2-btree.h b/libgcc/unwind-dw2-btree.h +index 8853f0eab486b847..ace507d9ffbdffb7 100644 +--- a/libgcc/unwind-dw2-btree.h ++++ b/libgcc/unwind-dw2-btree.h +@@ -39,7 +39,7 @@ struct version_lock + // range. Even on 32 bit platforms that would require 1 billion + // frame registrations within the time span of a few assembler + // instructions. +- uintptr_t version_lock; ++ uintptr_type version_lock; + }; + + #ifdef __GTHREAD_HAS_COND +@@ -60,7 +60,7 @@ version_lock_initialize_locked_exclusive (struct version_lock *vl) + static inline bool + version_lock_try_lock_exclusive (struct version_lock *vl) + { +- uintptr_t state = __atomic_load_n (&(vl->version_lock), __ATOMIC_SEQ_CST); ++ uintptr_type state = __atomic_load_n (&(vl->version_lock), __ATOMIC_SEQ_CST); + if (state & 1) + return false; + return __atomic_compare_exchange_n (&(vl->version_lock), &state, state | 1, +@@ -78,7 +78,7 @@ restart: + + // We should virtually never get contention here, as frame + // changes are rare. +- uintptr_t state = __atomic_load_n (&(vl->version_lock), __ATOMIC_SEQ_CST); ++ uintptr_type state = __atomic_load_n (&(vl->version_lock), __ATOMIC_SEQ_CST); + if (!(state & 1)) + { + if (__atomic_compare_exchange_n (&(vl->version_lock), &state, state | 1, +@@ -134,8 +134,8 @@ static void + version_lock_unlock_exclusive (struct version_lock *vl) + { + // increase version, reset exclusive lock bits +- uintptr_t state = __atomic_load_n (&(vl->version_lock), __ATOMIC_SEQ_CST); +- uintptr_t ns = (state + 4) & (~((uintptr_t) 3)); ++ uintptr_type state = __atomic_load_n (&(vl->version_lock), __ATOMIC_SEQ_CST); ++ uintptr_type ns = (state + 4) & (~((uintptr_type) 3)); + state = __atomic_exchange_n (&(vl->version_lock), ns, __ATOMIC_SEQ_CST); + + #ifdef __GTHREAD_HAS_COND +@@ -152,9 +152,9 @@ version_lock_unlock_exclusive (struct version_lock *vl) + // Acquire an optimistic "lock". Note that this does not lock at all, it + // only allows for validation later. + static inline bool +-version_lock_lock_optimistic (const struct version_lock *vl, uintptr_t *lock) ++version_lock_lock_optimistic (const struct version_lock *vl, uintptr_type *lock) + { +- uintptr_t state = __atomic_load_n (&(vl->version_lock), __ATOMIC_SEQ_CST); ++ uintptr_type state = __atomic_load_n (&(vl->version_lock), __ATOMIC_SEQ_CST); + *lock = state; + + // Acquiring the lock fails when there is currently an exclusive lock. +@@ -163,7 +163,7 @@ version_lock_lock_optimistic (const struct version_lock *vl, uintptr_t *lock) + + // Validate a previously acquired "lock". + static inline bool +-version_lock_validate (const struct version_lock *vl, uintptr_t lock) ++version_lock_validate (const struct version_lock *vl, uintptr_type lock) + { + // Prevent the reordering of non-atomic loads behind the atomic load. + // Hans Boehm, Can Seqlocks Get Along with Programming Language Memory +@@ -171,26 +171,26 @@ version_lock_validate (const struct version_lock *vl, uintptr_t lock) + __atomic_thread_fence (__ATOMIC_ACQUIRE); + + // Check that the node is still in the same state. +- uintptr_t state = __atomic_load_n (&(vl->version_lock), __ATOMIC_SEQ_CST); ++ uintptr_type state = __atomic_load_n (&(vl->version_lock), __ATOMIC_SEQ_CST); + return (state == lock); + } + + // The largest possible separator value. +-static const uintptr_t max_separator = ~((uintptr_t) (0)); ++static const uintptr_type max_separator = ~((uintptr_type) (0)); + + struct btree_node; + + // Inner entry. The child tree contains all entries <= separator. + struct inner_entry + { +- uintptr_t separator; ++ uintptr_type separator; + struct btree_node *child; + }; + + // Leaf entry. Stores an object entry. + struct leaf_entry + { +- uintptr_t base, size; ++ uintptr_type base, size; + struct object *ob; + }; + +@@ -248,7 +248,7 @@ btree_node_needs_merge (const struct btree_node *n) + } + + // Get the fence key for inner nodes. +-static inline uintptr_t ++static inline uintptr_type + btree_node_get_fence_key (const struct btree_node *n) + { + // For inner nodes we just return our right-most entry. +@@ -257,7 +257,7 @@ btree_node_get_fence_key (const struct btree_node *n) + + // Find the position for a slot in an inner node. + static unsigned +-btree_node_find_inner_slot (const struct btree_node *n, uintptr_t value) ++btree_node_find_inner_slot (const struct btree_node *n, uintptr_type value) + { + for (unsigned index = 0, ec = n->entry_count; index != ec; ++index) + if (n->content.children[index].separator >= value) +@@ -267,7 +267,7 @@ btree_node_find_inner_slot (const struct btree_node *n, uintptr_t value) + + // Find the position for a slot in a leaf node. + static unsigned +-btree_node_find_leaf_slot (const struct btree_node *n, uintptr_t value) ++btree_node_find_leaf_slot (const struct btree_node *n, uintptr_type value) + { + for (unsigned index = 0, ec = n->entry_count; index != ec; ++index) + if (n->content.entries[index].base + n->content.entries[index].size > value) +@@ -299,14 +299,14 @@ btree_node_unlock_exclusive (struct btree_node *n) + // Acquire an optimistic "lock". Note that this does not lock at all, it + // only allows for validation later. + static inline bool +-btree_node_lock_optimistic (const struct btree_node *n, uintptr_t *lock) ++btree_node_lock_optimistic (const struct btree_node *n, uintptr_type *lock) + { + return version_lock_lock_optimistic (&(n->version_lock), lock); + } + + // Validate a previously acquire lock. + static inline bool +-btree_node_validate (const struct btree_node *n, uintptr_t lock) ++btree_node_validate (const struct btree_node *n, uintptr_type lock) + { + return version_lock_validate (&(n->version_lock), lock); + } +@@ -314,8 +314,8 @@ btree_node_validate (const struct btree_node *n, uintptr_t lock) + // Insert a new separator after splitting. + static void + btree_node_update_separator_after_split (struct btree_node *n, +- uintptr_t old_separator, +- uintptr_t new_separator, ++ uintptr_type old_separator, ++ uintptr_type new_separator, + struct btree_node *new_right) + { + unsigned slot = btree_node_find_inner_slot (n, old_separator); +@@ -474,13 +474,13 @@ btree_handle_root_split (struct btree *t, struct btree_node **node, + // Split an inner node. + static void + btree_split_inner (struct btree *t, struct btree_node **inner, +- struct btree_node **parent, uintptr_t target) ++ struct btree_node **parent, uintptr_type target) + { + // Check for the root. + btree_handle_root_split (t, inner, parent); + + // Create two inner node. +- uintptr_t right_fence = btree_node_get_fence_key (*inner); ++ uintptr_type right_fence = btree_node_get_fence_key (*inner); + struct btree_node *left_inner = *inner; + struct btree_node *right_inner = btree_allocate_node (t, true); + unsigned split = left_inner->entry_count / 2; +@@ -489,7 +489,7 @@ btree_split_inner (struct btree *t, struct btree_node **inner, + right_inner->content.children[index] + = left_inner->content.children[split + index]; + left_inner->entry_count = split; +- uintptr_t left_fence = btree_node_get_fence_key (left_inner); ++ uintptr_type left_fence = btree_node_get_fence_key (left_inner); + btree_node_update_separator_after_split (*parent, right_fence, left_fence, + right_inner); + if (target <= left_fence) +@@ -507,13 +507,14 @@ btree_split_inner (struct btree *t, struct btree_node **inner, + // Split a leaf node. + static void + btree_split_leaf (struct btree *t, struct btree_node **leaf, +- struct btree_node **parent, uintptr_t fence, uintptr_t target) ++ struct btree_node **parent, uintptr_type fence, ++ uintptr_type target) + { + // Check for the root. + btree_handle_root_split (t, leaf, parent); + + // Create two leaf nodes. +- uintptr_t right_fence = fence; ++ uintptr_type right_fence = fence; + struct btree_node *left_leaf = *leaf; + struct btree_node *right_leaf = btree_allocate_node (t, false); + unsigned split = left_leaf->entry_count / 2; +@@ -522,7 +523,7 @@ btree_split_leaf (struct btree *t, struct btree_node **leaf, + right_leaf->content.entries[index] + = left_leaf->content.entries[split + index]; + left_leaf->entry_count = split; +- uintptr_t left_fence = right_leaf->content.entries[0].base - 1; ++ uintptr_type left_fence = right_leaf->content.entries[0].base - 1; + btree_node_update_separator_after_split (*parent, right_fence, left_fence, + right_leaf); + if (target <= left_fence) +@@ -540,7 +541,7 @@ btree_split_leaf (struct btree *t, struct btree_node **leaf, + // Merge (or balance) child nodes. + static struct btree_node * + btree_merge_node (struct btree *t, unsigned child_slot, +- struct btree_node *parent, uintptr_t target) ++ struct btree_node *parent, uintptr_type target) + { + // Choose the emptiest neighbor and lock both. The target child is already + // locked. +@@ -693,7 +694,7 @@ btree_merge_node (struct btree *t, unsigned child_slot, + left_node->entry_count += to_shift; + right_node->entry_count -= to_shift; + } +- uintptr_t left_fence; ++ uintptr_type left_fence; + if (btree_node_is_leaf (left_node)) + { + left_fence = right_node->content.entries[0].base - 1; +@@ -718,7 +719,7 @@ btree_merge_node (struct btree *t, unsigned child_slot, + + // Insert an entry. + static bool +-btree_insert (struct btree *t, uintptr_t base, uintptr_t size, ++btree_insert (struct btree *t, uintptr_type base, uintptr_type size, + struct object *ob) + { + // Sanity check. +@@ -747,7 +748,7 @@ btree_insert (struct btree *t, uintptr_t base, uintptr_t size, + // But that is more difficult to implement and frame registration is + // rare anyway, we use simple locking for now. + +- uintptr_t fence = max_separator; ++ uintptr_type fence = max_separator; + while (btree_node_is_inner (iter)) + { + // Use eager splits to avoid lock coupling up. +@@ -790,7 +791,7 @@ btree_insert (struct btree *t, uintptr_t base, uintptr_t size, + + // Remove an entry. + static struct object * +-btree_remove (struct btree *t, uintptr_t base) ++btree_remove (struct btree *t, uintptr_type base) + { + // Access the root. + version_lock_lock_exclusive (&(t->root_lock)); +@@ -838,7 +839,7 @@ btree_remove (struct btree *t, uintptr_t base) + + // Find the corresponding entry for the given address. + static struct object * +-btree_lookup (const struct btree *t, uintptr_t target_addr) ++btree_lookup (const struct btree *t, uintptr_type target_addr) + { + // Within this function many loads are relaxed atomic loads. + // Use a macro to keep the code reasonable. +@@ -867,7 +868,7 @@ btree_lookup (const struct btree *t, uintptr_t target_addr) + + restart: + struct btree_node *iter; +- uintptr_t lock; ++ uintptr_type lock; + { + // Accessing the root node requires defending against concurrent pointer + // changes Thus we couple rootLock -> lock on root node -> validate rootLock +@@ -878,7 +879,7 @@ restart: + goto restart; + if (!iter) + return NULL; +- uintptr_t child_lock; ++ uintptr_type child_lock; + if ((!btree_node_lock_optimistic (iter, &child_lock)) + || (!version_lock_validate (&(t->root_lock), lock))) + goto restart; +@@ -910,7 +911,7 @@ restart: + + // The node content can change at any point in time, thus we must + // interleave parent and child checks. +- uintptr_t child_lock; ++ uintptr_type child_lock; + if (!btree_node_lock_optimistic (child, &child_lock)) + goto restart; + if (!btree_node_validate (iter, lock)) +diff --git a/libgcc/unwind-dw2-fde.c b/libgcc/unwind-dw2-fde.c +index a591faaa579b5883..f38efd3c09efc3e9 100644 +--- a/libgcc/unwind-dw2-fde.c ++++ b/libgcc/unwind-dw2-fde.c +@@ -42,6 +42,8 @@ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + #endif + #endif + ++typedef __UINTPTR_TYPE__ uintptr_type; ++ + #ifdef ATOMIC_FDE_FAST_PATH + #include "unwind-dw2-btree.h" + +@@ -58,7 +60,7 @@ release_registered_frames (void) + } + + static void +-get_pc_range (const struct object *ob, uintptr_t *range); ++get_pc_range (const struct object *ob, uintptr_type *range); + static void + init_object (struct object *ob); + +@@ -124,7 +126,7 @@ __register_frame_info_bases (const void *begin, struct object *ob, + init_object (ob); + + // And register the frame +- uintptr_t range[2]; ++ uintptr_type range[2]; + get_pc_range (ob, range); + btree_insert (®istered_frames, range[0], range[1] - range[0], ob); + #else +@@ -178,7 +180,7 @@ __register_frame_info_table_bases (void *begin, struct object *ob, + init_object (ob); + + // And register the frame +- uintptr_t range[2]; ++ uintptr_type range[2]; + get_pc_range (ob, range); + btree_insert (®istered_frames, range[0], range[1] - range[0], ob); + #else +@@ -237,7 +239,7 @@ __deregister_frame_info_bases (const void *begin) + #ifdef DWARF2_OBJECT_END_PTR_EXTENSION + lookupob.fde_end = NULL; + #endif +- uintptr_t range[2]; ++ uintptr_type range[2]; + get_pc_range (&lookupob, range); + + // And remove +@@ -677,7 +679,7 @@ end_fde_sort (struct object *ob, struct fde_accumulator *accu, size_t count) + + static size_t + classify_object_over_fdes (struct object *ob, const fde *this_fde, +- uintptr_t *range) ++ uintptr_type *range) + { + const struct dwarf_cie *last_cie = 0; + size_t count = 0; +@@ -892,11 +894,11 @@ init_object (struct object* ob) + #ifdef ATOMIC_FDE_FAST_PATH + /* Get the PC range for lookup */ + static void +-get_pc_range (const struct object *ob, uintptr_t *range) ++get_pc_range (const struct object *ob, uintptr_type *range) + { + // It is safe to cast to non-const object* here as + // classify_object_over_fdes does not modify ob in query mode. +- struct object *ncob = (struct object *) (uintptr_t) ob; ++ struct object *ncob = (struct object *) (uintptr_type) ob; + range[0] = range[1] = 0; + if (ob->s.b.sorted) + { +@@ -1131,7 +1133,7 @@ _Unwind_Find_FDE (void *pc, struct dwarf_eh_bases *bases) + const fde *f = NULL; + + #ifdef ATOMIC_FDE_FAST_PATH +- ob = btree_lookup (®istered_frames, (uintptr_t) pc); ++ ob = btree_lookup (®istered_frames, (uintptr_type) pc); + if (!ob) + return NULL; + diff --git a/SOURCES/gcc11-libstdc++-prettyprinter-update-15-tests-48362.patch b/SOURCES/gcc11-libstdc++-prettyprinter-update-15-tests-48362.patch new file mode 100644 index 0000000..deee29c --- /dev/null +++ b/SOURCES/gcc11-libstdc++-prettyprinter-update-15-tests-48362.patch @@ -0,0 +1,27 @@ +commit 190c644c06369766aa2537851ddbf83b1231b65b +Author: Philipp Fent +Date: Sun Sep 4 20:47:34 2022 +0200 + + libstdc++: Fix pretty printer tests of tuple indexes + + Signed-off-by: Philipp Fent + + libstdc++-v3/ChangeLog: + + * testsuite/libstdc++-prettyprinters/48362.cc: Fix expected + tuple indices. + * testsuite/libstdc++-prettyprinters/cxx11.cc: Likewise. + +diff --git a/libstdc++-v3/testsuite/libstdc++-prettyprinters/48362.cc b/libstdc++-v3/testsuite/libstdc++-prettyprinters/48362.cc +index cc91803e247..af335d0d3c7 100644 +--- a/libstdc++-v3/testsuite/libstdc++-prettyprinters/48362.cc ++++ b/libstdc++-v3/testsuite/libstdc++-prettyprinters/48362.cc +@@ -29,7 +29,7 @@ main() + // { dg-final { note-test t1 {empty std::tuple} } } + + std::tuple> t2{ "Johnny", 5, {} }; +-// { dg-final { regexp-test t2 {std::tuple containing = {\[1\] = "Johnny", \[2\] = 5, \[3\] = empty std::tuple}} } } ++// { dg-final { regexp-test t2 {std::tuple containing = {\[0\] = "Johnny", \[1\] = 5, \[2\] = empty std::tuple}} } } + + std::cout << "\n"; + return 0; // Mark SPOT diff --git a/SOURCES/gcc11-libstdc++-prettyprinter-update-15-tests.patch b/SOURCES/gcc11-libstdc++-prettyprinter-update-15-tests.patch new file mode 100644 index 0000000..160b154 --- /dev/null +++ b/SOURCES/gcc11-libstdc++-prettyprinter-update-15-tests.patch @@ -0,0 +1,270 @@ + .../testsuite/libstdc++-prettyprinters/compat.cc | 10 +++---- + .../testsuite/libstdc++-prettyprinters/cxx11.cc | 33 +++++++++++++++++----- + .../testsuite/libstdc++-prettyprinters/cxx17.cc | 27 ++++++++---------- + .../libstdc++-prettyprinters/filesystem-ts.cc | 2 +- + .../libstdc++-prettyprinters/libfundts.cc | 25 ++++++++-------- + 5 files changed, 58 insertions(+), 39 deletions(-) + +diff --git a/libstdc++-v3/testsuite/libstdc++-prettyprinters/compat.cc b/libstdc++-v3/testsuite/libstdc++-prettyprinters/compat.cc +index 35243e5f892..2ef5979834f 100644 +--- a/libstdc++-v3/testsuite/libstdc++-prettyprinters/compat.cc ++++ b/libstdc++-v3/testsuite/libstdc++-prettyprinters/compat.cc +@@ -1,7 +1,7 @@ + // { dg-options "-g -O0" } + // { dg-do run { target c++11 } } + +-// Copyright (C) 2014-2021 Free Software Foundation, Inc. ++// Copyright (C) 2014-2025 Free Software Foundation, Inc. + // + // This file is part of the GNU ISO C++ Library. This library is free + // software; you can redistribute it and/or modify it under the +@@ -102,13 +102,13 @@ main() + using std::optional; + + optional o; +-// { dg-final { note-test o {std::optional [no contained value]} } } ++// { dg-final { note-test o {std::optional [no contained value]} } } + optional ob{false}; +-// { dg-final { note-test ob {std::optional = {[contained value] = false}} } } ++// { dg-final { note-test ob {std::optional = {[contained value] = false}} } } + optional oi{5}; +-// { dg-final { note-test oi {std::optional = {[contained value] = 5}} } } ++// { dg-final { note-test oi {std::optional = {[contained value] = 5}} } } + optional op{nullptr}; +-// { dg-final { note-test op {std::optional = {[contained value] = 0x0}} } } ++// { dg-final { note-test op {std::optional = {[contained value] = 0x0}} } } + + __builtin_puts(""); + return 0; // Mark SPOT +diff --git a/libstdc++-v3/testsuite/libstdc++-prettyprinters/cxx11.cc b/libstdc++-v3/testsuite/libstdc++-prettyprinters/cxx11.cc +index 0545076fb6f..23f6d97ddd4 100644 +--- a/libstdc++-v3/testsuite/libstdc++-prettyprinters/cxx11.cc ++++ b/libstdc++-v3/testsuite/libstdc++-prettyprinters/cxx11.cc +@@ -1,7 +1,7 @@ + // { dg-do run { target c++11 } } + // { dg-options "-g -O0" } + +-// Copyright (C) 2011-2021 Free Software Foundation, Inc. ++// Copyright (C) 2011-2025 Free Software Foundation, Inc. + // + // This file is part of the GNU ISO C++ Library. This library is free + // software; you can redistribute it and/or modify it under the +@@ -26,6 +26,7 @@ + #include + #include + #include ++#include + #include "../util/testsuite_allocator.h" // NullablePointer + + typedef std::tuple ExTuple; +@@ -62,6 +63,11 @@ struct datum + + std::unique_ptr global; + ++struct custom_cat : std::error_category { ++ const char* name() const noexcept { return "miaow"; } ++ std::string message(int) const { return ""; } ++}; ++ + int + main() + { +@@ -165,9 +171,9 @@ main() + // { dg-final { note-test runiq_ptr {std::unique_ptr = {get() = 0x0}} } } + + ExTuple tpl(6,7); +-// { dg-final { note-test tpl {std::tuple containing = {[1] = 6, [2] = 7}} } } ++// { dg-final { note-test tpl {std::tuple containing = {[0] = 6, [1] = 7}} } } + ExTuple &rtpl = tpl; +-// { dg-final { note-test rtpl {std::tuple containing = {[1] = 6, [2] = 7}} } } ++// { dg-final { note-test rtpl {std::tuple containing = {[0] = 6, [1] = 7}} } } + + std::error_code e0; + // { dg-final { note-test e0 {std::error_code = { }} } } +@@ -178,10 +184,7 @@ main() + std::error_condition ecinval = std::make_error_condition(std::errc::invalid_argument); + // { dg-final { note-test ecinval {std::error_condition = {"generic": EINVAL}} } } + +- struct custom_cat : std::error_category { +- const char* name() const noexcept { return "miaow"; } +- std::string message(int) const { return ""; } +- } cat; ++ custom_cat cat; + std::error_code emiaow(42, cat); + // { dg-final { note-test emiaow {std::error_code = {custom_cat: 42}} } } + std::error_condition ecmiaow(42, cat); +@@ -197,6 +200,22 @@ main() + std::initializer_list il = {3, 4}; + // { dg-final { note-test il {std::initializer_list of length 2 = {3, 4}} } } + ++ std::atomic ai{100}; ++ // { dg-final { note-test ai {std::atomic = { 100 }} } } ++ long l{}; ++ std::atomic ap{&l}; ++ // { dg-final { regexp-test ap {std::atomic.long \*. = { 0x.* }} } } ++ struct Value { int i, j; }; ++ std::atomic av{{8, 9}}; ++ // { dg-final { note-test av {std::atomic = { {i = 8, j = 9} }} } } ++ ++ std::integral_constant one; ++ // { dg-final { note-test one {std::integral_constant} } } ++ std::integral_constant truth; ++ // { dg-final { note-test truth {std::true_type} } } ++ std::integral_constant lies; ++ // { dg-final { note-test lies {std::false_type} } } ++ + placeholder(""); // Mark SPOT + use(efl); + use(fl); +diff --git a/libstdc++-v3/testsuite/libstdc++-prettyprinters/cxx17.cc b/libstdc++-v3/testsuite/libstdc++-prettyprinters/cxx17.cc +index 72c66d3b785..6dd2b60c0a5 100644 +--- a/libstdc++-v3/testsuite/libstdc++-prettyprinters/cxx17.cc ++++ b/libstdc++-v3/testsuite/libstdc++-prettyprinters/cxx17.cc +@@ -1,7 +1,7 @@ + // { dg-options "-g -O0" } + // { dg-do run { target c++17 } } + +-// Copyright (C) 2014-2021 Free Software Foundation, Inc. ++// Copyright (C) 2014-2025 Free Software Foundation, Inc. + // + // This file is part of the GNU ISO C++ Library. This library is free + // software; you can redistribute it and/or modify it under the +@@ -18,9 +18,6 @@ + // with this library; see the file COPYING3. If not see + // . + +-// Type printers only recognize the old std::string for now. +-#define _GLIBCXX_USE_CXX11_ABI 0 +- + #include + #include + #include +@@ -53,18 +50,18 @@ main() + // { dg-final { note-test str "\"string\"" } } + + optional o; +-// { dg-final { note-test o {std::optional [no contained value]} } } ++// { dg-final { note-test o {std::optional [no contained value]} } } + optional ob{false}; +-// { dg-final { note-test ob {std::optional = {[contained value] = false}} } } ++// { dg-final { note-test ob {std::optional = {[contained value] = false}} } } + optional oi{5}; +-// { dg-final { note-test oi {std::optional = {[contained value] = 5}} } } ++// { dg-final { note-test oi {std::optional = {[contained value] = 5}} } } + optional op{nullptr}; +-// { dg-final { note-test op {std::optional = {[contained value] = 0x0}} } } ++// { dg-final { note-test op {std::optional = {[contained value] = 0x0}} } } + optional> om; + om = std::map{ {1, 2.}, {3, 4.}, {5, 6.} }; +-// { dg-final { regexp-test om {std::optional> containing std::(__debug::)?map with 3 elements = {\[1\] = 2, \[3\] = 4, \[5\] = 6}} } } ++// { dg-final { regexp-test om {std::optional containing std::(__debug::)?map with 3 elements = {\[1\] = 2, \[3\] = 4, \[5\] = 6}} } } + optional os{ "stringy" }; +-// { dg-final { note-test os {std::optional = {[contained value] = "stringy"}} } } ++// { dg-final { note-test os {std::optional = {[contained value] = "stringy"}} } } + + any a; + // { dg-final { note-test a {std::any [no contained value]} } } +@@ -86,18 +83,18 @@ main() + + struct S { operator int() { throw 42; }}; + variant v0; +-// { dg-final { note-test v0 {std::variant [index 0] = {0}} } } ++// { dg-final { note-test v0 {std::variant [index 0] = {0}} } } + variant v1{ 0.5f }; +-// { dg-final { note-test v1 {std::variant [index 0] = {0.5}} } } ++// { dg-final { note-test v1 {std::variant [index 0] = {0.5}} } } + variant v2; + try { + v2.emplace<1>(S()); + } catch (int) { } +-// { dg-final { note-test v2 {std::variant [no contained value]} } } ++// { dg-final { note-test v2 {std::variant [no contained value]} } } + variant v3{ 3 }; +-// { dg-final { note-test v3 {std::variant [index 1] = {3}} } } ++// { dg-final { note-test v3 {std::variant [index 1] = {3}} } } + variant v4{ str }; +-// { dg-final { note-test v4 {std::variant [index 2] = {"string"}} } } ++// { dg-final { note-test v4 {std::variant [index 2] = {"string"}} } } + + map m{ {1, "one"} }; + map::node_type n0; +diff --git a/libstdc++-v3/testsuite/libstdc++-prettyprinters/filesystem-ts.cc b/libstdc++-v3/testsuite/libstdc++-prettyprinters/filesystem-ts.cc +index 00d100bd066..3221f2df90d 100644 +--- a/libstdc++-v3/testsuite/libstdc++-prettyprinters/filesystem-ts.cc ++++ b/libstdc++-v3/testsuite/libstdc++-prettyprinters/filesystem-ts.cc +@@ -2,7 +2,7 @@ + // { dg-do run { target c++11 } } + // { dg-require-filesystem-ts "" } + +-// Copyright (C) 2020-2021 Free Software Foundation, Inc. ++// Copyright (C) 2020-2025 Free Software Foundation, Inc. + // + // This file is part of the GNU ISO C++ Library. This library is free + // software; you can redistribute it and/or modify it under the +diff --git a/libstdc++-v3/testsuite/libstdc++-prettyprinters/libfundts.cc b/libstdc++-v3/testsuite/libstdc++-prettyprinters/libfundts.cc +index 85005c0617f..bfb86885457 100644 +--- a/libstdc++-v3/testsuite/libstdc++-prettyprinters/libfundts.cc ++++ b/libstdc++-v3/testsuite/libstdc++-prettyprinters/libfundts.cc +@@ -1,7 +1,7 @@ + // { dg-do run { target c++14 } } + // { dg-options "-g -O0" } + +-// Copyright (C) 2014-2021 Free Software Foundation, Inc. ++// Copyright (C) 2014-2025 Free Software Foundation, Inc. + // + // This file is part of the GNU ISO C++ Library. This library is free + // software; you can redistribute it and/or modify it under the +@@ -18,9 +18,6 @@ + // with this library; see the file COPYING3. If not see + // . + +-// Type printers only recognize the old std::string for now. +-#define _GLIBCXX_USE_CXX11_ABI 0 +- + #include + #include + #include +@@ -35,22 +32,28 @@ using std::experimental::string_view; + int + main() + { ++ // Ensure debug info for std::string is issued in the local ++ // translation unit, so that GDB won't pick up any alternate ++ // std::string notion that might be present in libstdc++.so. ++ std::string bah = "hi"; ++ (void)bah; ++ + string_view str = "string"; + // { dg-final { note-test str "\"string\"" } } + + optional o; +-// { dg-final { note-test o {std::experimental::optional [no contained value]} } } ++// { dg-final { note-test o {std::experimental::optional [no contained value]} } } + optional ob{false}; +-// { dg-final { note-test ob {std::experimental::optional = {[contained value] = false}} } } ++// { dg-final { note-test ob {std::experimental::optional = {[contained value] = false}} } } + optional oi{5}; +-// { dg-final { note-test oi {std::experimental::optional = {[contained value] = 5}} } } ++// { dg-final { note-test oi {std::experimental::optional = {[contained value] = 5}} } } + optional op{nullptr}; +-// { dg-final { note-test op {std::experimental::optional = {[contained value] = 0x0}} } } ++// { dg-final { note-test op {std::experimental::optional = {[contained value] = 0x0}} } } + optional> om; + om = std::map{ {1, 2.}, {3, 4.}, {5, 6.} }; +-// { dg-final { regexp-test om {std::experimental::optional> containing std::(__debug::)?map with 3 elements = {\[1\] = 2, \[3\] = 4, \[5\] = 6}} } } ++// { dg-final { regexp-test om {std::experimental::optional containing std::(__debug::)?map with 3 elements = {\[1\] = 2, \[3\] = 4, \[5\] = 6}} } } + optional os{ "stringy" }; +-// { dg-final { note-test os {std::experimental::optional = {[contained value] = "stringy"}} { xfail { c++20 || debug_mode } } } } ++// { dg-final { note-test os {std::experimental::optional = {[contained value] = "stringy"}} } } + + any a; + // { dg-final { note-test a {std::experimental::any [no contained value]} } } +@@ -61,7 +64,7 @@ main() + any ap = (void*)nullptr; + // { dg-final { note-test ap {std::experimental::any containing void * = {[contained value] = 0x0}} } } + any as = *os; +-// { dg-final { note-test as {std::experimental::any containing std::string = {[contained value] = "stringy"}} { xfail { c++20 || debug_mode } } } } ++// { dg-final { note-test as {std::experimental::any containing std::string = {[contained value] = "stringy"}} } } + any as2("stringiest"); + // { dg-final { regexp-test as2 {std::experimental::any containing const char \* = {\[contained value\] = 0x[[:xdigit:]]+ "stringiest"}} } } + any am = *om; diff --git a/SOURCES/gcc11-libstdc++-prettyprinter-update-15.patch b/SOURCES/gcc11-libstdc++-prettyprinter-update-15.patch new file mode 100644 index 0000000..c3f4fc1 --- /dev/null +++ b/SOURCES/gcc11-libstdc++-prettyprinter-update-15.patch @@ -0,0 +1,4066 @@ + libstdc++-v3/python/libstdcxx/v6/__init__.py | 4 +- + libstdc++-v3/python/libstdcxx/v6/printers.py | 2282 +++++++++++++++++--------- + libstdc++-v3/python/libstdcxx/v6/xmethods.py | 127 +- + 3 files changed, 1618 insertions(+), 795 deletions(-) + +diff --git a/libstdc++-v3/python/libstdcxx/v6/__init__.py b/libstdc++-v3/python/libstdcxx/v6/__init__.py +index c35e7d36709..5a8f2de195d 100644 +--- a/libstdc++-v3/python/libstdcxx/v6/__init__.py ++++ b/libstdc++-v3/python/libstdcxx/v6/__init__.py +@@ -1,4 +1,4 @@ +-# Copyright (C) 2014-2021 Free Software Foundation, Inc. ++# Copyright (C) 2014-2025 Free Software Foundation, Inc. + + # This program is free software; you can redistribute it and/or modify + # it under the terms of the GNU General Public License as published by +@@ -13,8 +13,6 @@ + # You should have received a copy of the GNU General Public License + # along with this program. If not, see . + +-import gdb +- + # Load the xmethods if GDB supports them. + def gdb_has_xmethods(): + try: +diff --git a/libstdc++-v3/python/libstdcxx/v6/printers.py b/libstdc++-v3/python/libstdcxx/v6/printers.py +index 6d19c4f1be3..5f5963cb595 100644 +--- a/libstdc++-v3/python/libstdcxx/v6/printers.py ++++ b/libstdc++-v3/python/libstdcxx/v6/printers.py +@@ -1,6 +1,6 @@ + # Pretty-printers for libstdc++. + +-# Copyright (C) 2008-2021 Free Software Foundation, Inc. ++# Copyright (C) 2008-2025 Free Software Foundation, Inc. + + # This program is free software; you can redistribute it and/or modify + # it under the terms of the GNU General Public License as published by +@@ -18,9 +18,11 @@ + import gdb + import itertools + import re +-import sys, os, errno ++import sys ++import errno ++import datetime + +-### Python 2 + Python 3 compatibility code ++# Python 2 + Python 3 compatibility code + + # Resources about compatibility: + # +@@ -37,15 +39,16 @@ import sys, os, errno + # + + if sys.version_info[0] > 2: +- ### Python 3 stuff ++ # Python 3 stuff + Iterator = object + # Python 3 folds these into the normal functions. + imap = map + izip = zip + # Also, int subsumes long + long = int ++ _utc_timezone = datetime.timezone.utc + else: +- ### Python 2 stuff ++ # Python 2 stuff + class Iterator: + """Compatibility mixin for iterators + +@@ -63,6 +66,20 @@ else: + # In Python 2, we still need these from itertools + from itertools import imap, izip + ++ # Python 2 does not provide the datetime.UTC singleton. ++ class UTC(datetime.tzinfo): ++ """Concrete tzinfo class representing the UTC time zone.""" ++ ++ def utcoffset(self, dt): ++ return datetime.timedelta(0) ++ ++ def tzname(self, dt): ++ return "UTC" ++ ++ def dst(self, dt): ++ return datetime.timedelta(0) ++ _utc_timezone = UTC() ++ + # Try to use the new-style pretty-printing if available. + _use_gdb_pp = True + try: +@@ -79,9 +96,17 @@ try: + except ImportError: + pass + ++# Use the base class if available. ++if hasattr(gdb, 'ValuePrinter'): ++ printer_base = gdb.ValuePrinter ++else: ++ printer_base = object ++ + # Starting with the type ORIG, search for the member type NAME. This + # handles searching upward through superclasses. This is needed to + # work around http://sourceware.org/bugzilla/show_bug.cgi?id=13615. ++ ++ + def find_type(orig, name): + typ = orig.strip_typedefs() + while True: +@@ -100,11 +125,13 @@ def find_type(orig, name): + else: + raise ValueError("Cannot find type %s::%s" % (str(orig), name)) + ++ + _versioned_namespace = '__8::' + ++ + def lookup_templ_spec(templ, *args): + """ +- Lookup template specialization templ ++ Lookup template specialization templ. + """ + t = '{}<{}>'.format(templ, ', '.join([str(a) for a in args])) + try: +@@ -112,7 +139,7 @@ def lookup_templ_spec(templ, *args): + except gdb.error as e: + # Type not found, try again in versioned namespace. + global _versioned_namespace +- if _versioned_namespace and _versioned_namespace not in templ: ++ if _versioned_namespace not in templ: + t = t.replace('::', '::' + _versioned_namespace, 1) + try: + return gdb.lookup_type(t) +@@ -125,14 +152,21 @@ def lookup_templ_spec(templ, *args): + # see https://gcc.gnu.org/bugzilla/show_bug.cgi?id=91997 for details. + def lookup_node_type(nodename, containertype): + """ +- Lookup specialization of template NODENAME corresponding to CONTAINERTYPE. +- e.g. if NODENAME is '_List_node' and CONTAINERTYPE is std::list +- then return the type std::_List_node. +- Returns None if not found. ++ Lookup specialization of template nodename corresponding to containertype. ++ ++ nodename - The name of a class template, as a String ++ containertype - The container, as a gdb.Type ++ ++ Return a gdb.Type for the corresponding specialization of nodename, ++ or None if the type cannot be found. ++ ++ e.g. lookup_node_type('_List_node', gdb.lookup_type('std::list')) ++ will return a gdb.Type for the type std::_List_node. + """ + # If nodename is unqualified, assume it's in namespace std. + if '::' not in nodename: + nodename = 'std::' + nodename ++ # Use either containertype's value_type or its first template argument. + try: + valtype = find_type(containertype, 'value_type') + except: +@@ -140,7 +174,7 @@ def lookup_node_type(nodename, containertype): + valtype = valtype.strip_typedefs() + try: + return lookup_templ_spec(nodename, valtype) +- except gdb.error as e: ++ except gdb.error: + # For debug mode containers the node is in std::__cxx1998. + if is_member_of_namespace(nodename, 'std'): + if is_member_of_namespace(containertype, 'std::__cxx1998', +@@ -152,6 +186,7 @@ def lookup_node_type(nodename, containertype): + pass + return None + ++ + def is_member_of_namespace(typ, *namespaces): + """ + Test whether a type is a member of one of the specified namespaces. +@@ -165,23 +200,38 @@ def is_member_of_namespace(typ, *namespaces): + return True + return False + ++ + def is_specialization_of(x, template_name): +- "Test if a type is a given template instantiation." ++ """ ++ Test whether a type is a specialization of the named class template. ++ The type can be specified as a string or a gdb.Type object. ++ The template should be the name of a class template as a string, ++ without any 'std' qualification. ++ """ + global _versioned_namespace + if isinstance(x, gdb.Type): + x = x.tag +- if _versioned_namespace: +- template_name = '(%s)?%s' % (_versioned_namespace, template_name) ++ template_name = '(%s)?%s' % (_versioned_namespace, template_name) + return re.match('^std::%s<.*>$' % template_name, x) is not None + ++ + def strip_versioned_namespace(typename): + global _versioned_namespace +- if _versioned_namespace: +- return typename.replace(_versioned_namespace, '') +- return typename ++ return typename.replace(_versioned_namespace, '') ++ ++ ++def strip_fundts_namespace(typ): ++ """Remove "fundamentals_vN" inline namespace from qualified type name.""" ++ pattern = r'^std::experimental::fundamentals_v\d::' ++ repl = 'std::experimental::' ++ if sys.version_info[0] == 2: ++ return re.sub(pattern, repl, typ, 1) ++ else: # Technically this needs Python 3.1 but nobody should be using 3.0 ++ return re.sub(pattern, repl, typ, count=1) ++ + + def strip_inline_namespaces(type_str): +- "Remove known inline namespaces from the canonical name of a type." ++ """Remove known inline namespaces from the canonical name of a type.""" + type_str = strip_versioned_namespace(type_str) + type_str = type_str.replace('std::__cxx11::', 'std::') + expt_ns = 'std::experimental::' +@@ -191,8 +241,9 @@ def strip_inline_namespaces(type_str): + type_str = type_str.replace(fs_ns + 'v1::', fs_ns) + return type_str + ++ + def get_template_arg_list(type_obj): +- "Return a type's template arguments as a list" ++ """Return a type's template arguments as a list.""" + n = 0 + template_args = [] + while True: +@@ -202,54 +253,75 @@ def get_template_arg_list(type_obj): + return template_args + n += 1 + ++ + class SmartPtrIterator(Iterator): +- "An iterator for smart pointer types with a single 'child' value" ++ """An iterator for smart pointer types with a single 'child' value.""" + + def __init__(self, val): +- self.val = val ++ self._val = val + + def __iter__(self): + return self + + def __next__(self): +- if self.val is None: ++ if self._val is None: + raise StopIteration +- self.val, val = None, self.val ++ self._val, val = None, self._val + return ('get()', val) + +-class SharedPointerPrinter: +- "Print a shared_ptr or weak_ptr" + +- def __init__ (self, typename, val): +- self.typename = strip_versioned_namespace(typename) +- self.val = val +- self.pointer = val['_M_ptr'] ++class SharedPointerPrinter(printer_base): ++ """ ++ Print a shared_ptr, weak_ptr, atomic, or atomic. ++ """ + +- def children (self): +- return SmartPtrIterator(self.pointer) ++ def __init__(self, typename, val): ++ self._typename = strip_versioned_namespace(typename) ++ self._val = val ++ self._pointer = val['_M_ptr'] + +- def to_string (self): ++ def children(self): ++ return SmartPtrIterator(self._pointer) ++ ++ # Return the _Sp_counted_base<>* that holds the refcounts. ++ def _get_refcounts(self): ++ if self._typename == 'std::atomic': ++ # A tagged pointer is stored as uintptr_t. ++ ptr_val = self._val['_M_refcount']['_M_val']['_M_i'] ++ ptr_val = ptr_val - (ptr_val % 2) # clear lock bit ++ ptr_type = find_type(self._val['_M_refcount'].type, 'pointer') ++ return ptr_val.cast(ptr_type) ++ return self._val['_M_refcount']['_M_pi'] ++ ++ def to_string(self): + state = 'empty' +- refcounts = self.val['_M_refcount']['_M_pi'] ++ refcounts = self._get_refcounts() ++ targ = self._val.type.template_argument(0) ++ targ = strip_versioned_namespace(str(targ)) ++ + if refcounts != 0: + usecount = refcounts['_M_use_count'] + weakcount = refcounts['_M_weak_count'] + if usecount == 0: + state = 'expired, weak count %d' % weakcount + else: +- state = 'use count %d, weak count %d' % (usecount, weakcount - 1) +- return '%s<%s> (%s)' % (self.typename, str(self.val.type.template_argument(0)), state) ++ state = 'use count %d, weak count %d' % ( ++ usecount, weakcount - 1) ++ return '%s<%s> (%s)' % (self._typename, targ, state) ++ + + def _tuple_impl_get(val): +- "Return the tuple element stored in a _Tuple_impl base class." ++ """Return the tuple element stored in a _Tuple_impl base class.""" + bases = val.type.fields() + if not bases[-1].is_base_class: +- raise ValueError("Unsupported implementation for std::tuple: %s" % str(val.type)) ++ raise ValueError( ++ "Unsupported implementation for std::tuple: %s" % str(val.type)) + # Get the _Head_base base class: + head_base = val.cast(bases[-1].type) + fields = head_base.type.fields() + if len(fields) == 0: +- raise ValueError("Unsupported implementation for std::tuple: %s" % str(val.type)) ++ raise ValueError( ++ "Unsupported implementation for std::tuple: %s" % str(val.type)) + if fields[0].name == '_M_head_impl': + # The tuple element is the _Head_base::_M_head_impl data member. + return head_base['_M_head_impl'] +@@ -258,10 +330,12 @@ def _tuple_impl_get(val): + # Cast to that empty base class. + return head_base.cast(fields[0].type) + else: +- raise ValueError("Unsupported implementation for std::tuple: %s" % str(val.type)) ++ raise ValueError( ++ "Unsupported implementation for std::tuple: %s" % str(val.type)) ++ + + def tuple_get(n, val): +- "Return the result of std::get(val) on a std::tuple" ++ """Return the result of std::get(val) on a std::tuple.""" + tuple_size = len(get_template_arg_list(val.type)) + if n > tuple_size: + raise ValueError("Out of range index for std::get on std::tuple") +@@ -273,41 +347,46 @@ def tuple_get(n, val): + n -= 1 + return _tuple_impl_get(node) + ++ + def unique_ptr_get(val): +- "Return the result of val.get() on a std::unique_ptr" ++ """Return the result of val.get() on a std::unique_ptr.""" + # std::unique_ptr contains a std::tuple, + # either as a direct data member _M_t (the old implementation) + # or within a data member of type __uniq_ptr_data. + impl_type = val.type.fields()[0].type.strip_typedefs() + # Check for new implementations first: + if is_specialization_of(impl_type, '__uniq_ptr_data') \ +- or is_specialization_of(impl_type, '__uniq_ptr_impl'): ++ or is_specialization_of(impl_type, '__uniq_ptr_impl'): + tuple_member = val['_M_t']['_M_t'] + elif is_specialization_of(impl_type, 'tuple'): + tuple_member = val['_M_t'] + else: +- raise ValueError("Unsupported implementation for unique_ptr: %s" % str(impl_type)) ++ raise ValueError( ++ "Unsupported implementation for unique_ptr: %s" % str(impl_type)) + return tuple_get(0, tuple_member) + +-class UniquePointerPrinter: +- "Print a unique_ptr" + +- def __init__ (self, typename, val): +- self.val = val ++class UniquePointerPrinter(printer_base): ++ """Print a unique_ptr.""" + +- def children (self): +- return SmartPtrIterator(unique_ptr_get(self.val)) ++ def __init__(self, typename, val): ++ self._val = val ++ ++ def children(self): ++ return SmartPtrIterator(unique_ptr_get(self._val)) + + def to_string(self): +- t = self.val.type.template_argument(0) ++ t = self._val.type.template_argument(0) + return 'std::unique_ptr<{}>'.format(str(t)) + ++ + def get_value_from_aligned_membuf(buf, valtype): +- """Returns the value held in a __gnu_cxx::__aligned_membuf.""" ++ """Return the value held in a __gnu_cxx::__aligned_membuf.""" + return buf['_M_storage'].address.cast(valtype.pointer()).dereference() + ++ + def get_value_from_list_node(node): +- """Returns the value held in an _List_node<_Val>""" ++ """Return the value held in an _List_node<_Val>.""" + try: + member = node.type.fields()[1].name + if member == '_M_data': +@@ -321,265 +400,284 @@ def get_value_from_list_node(node): + pass + raise ValueError("Unsupported implementation for %s" % str(node.type)) + +-class StdListPrinter: +- "Print a std::list" ++ ++class StdListPrinter(printer_base): ++ """Print a std::list.""" + + class _iterator(Iterator): + def __init__(self, nodetype, head): +- self.nodetype = nodetype +- self.base = head['_M_next'] +- self.head = head.address +- self.count = 0 ++ self._nodetype = nodetype ++ self._base = head['_M_next'] ++ self._head = head.address ++ self._count = 0 + + def __iter__(self): + return self + + def __next__(self): +- if self.base == self.head: ++ if self._base == self._head: + raise StopIteration +- elt = self.base.cast(self.nodetype).dereference() +- self.base = elt['_M_next'] +- count = self.count +- self.count = self.count + 1 ++ elt = self._base.cast(self._nodetype).dereference() ++ self._base = elt['_M_next'] ++ count = self._count ++ self._count = self._count + 1 + val = get_value_from_list_node(elt) + return ('[%d]' % count, val) + + def __init__(self, typename, val): +- self.typename = strip_versioned_namespace(typename) +- self.val = val ++ self._typename = strip_versioned_namespace(typename) ++ self._val = val + + def children(self): +- nodetype = lookup_node_type('_List_node', self.val.type).pointer() +- return self._iterator(nodetype, self.val['_M_impl']['_M_node']) ++ nodetype = lookup_node_type('_List_node', self._val.type).pointer() ++ return self._iterator(nodetype, self._val['_M_impl']['_M_node']) + + def to_string(self): +- headnode = self.val['_M_impl']['_M_node'] ++ headnode = self._val['_M_impl']['_M_node'] + if headnode['_M_next'] == headnode.address: +- return 'empty %s' % (self.typename) +- return '%s' % (self.typename) ++ return 'empty %s' % (self._typename) ++ return '%s' % (self._typename) ++ + +-class NodeIteratorPrinter: ++class NodeIteratorPrinter(printer_base): + def __init__(self, typename, val, contname, nodename): +- self.val = val +- self.typename = typename +- self.contname = contname +- self.nodetype = lookup_node_type(nodename, val.type) ++ self._val = val ++ self._typename = typename ++ self._contname = contname ++ self._nodetype = lookup_node_type(nodename, val.type) + + def to_string(self): +- if not self.val['_M_node']: +- return 'non-dereferenceable iterator for std::%s' % (self.contname) +- node = self.val['_M_node'].cast(self.nodetype.pointer()).dereference() ++ if not self._val['_M_node']: ++ return 'non-dereferenceable iterator for std::%s' % (self._contname) ++ node = self._val['_M_node'].cast( ++ self._nodetype.pointer()).dereference() + return str(get_value_from_list_node(node)) + ++ + class StdListIteratorPrinter(NodeIteratorPrinter): +- "Print std::list::iterator" ++ """Print std::list::iterator.""" + + def __init__(self, typename, val): + NodeIteratorPrinter.__init__(self, typename, val, 'list', '_List_node') + ++ + class StdFwdListIteratorPrinter(NodeIteratorPrinter): +- "Print std::forward_list::iterator" ++ """Print std::forward_list::iterator.""" + + def __init__(self, typename, val): + NodeIteratorPrinter.__init__(self, typename, val, 'forward_list', + '_Fwd_list_node') + +-class StdSlistPrinter: +- "Print a __gnu_cxx::slist" ++ ++class StdSlistPrinter(printer_base): ++ """Print a __gnu_cxx::slist.""" + + class _iterator(Iterator): + def __init__(self, nodetype, head): +- self.nodetype = nodetype +- self.base = head['_M_head']['_M_next'] +- self.count = 0 ++ self._nodetype = nodetype ++ self._base = head['_M_head']['_M_next'] ++ self._count = 0 + + def __iter__(self): + return self + + def __next__(self): +- if self.base == 0: ++ if self._base == 0: + raise StopIteration +- elt = self.base.cast(self.nodetype).dereference() +- self.base = elt['_M_next'] +- count = self.count +- self.count = self.count + 1 ++ elt = self._base.cast(self._nodetype).dereference() ++ self._base = elt['_M_next'] ++ count = self._count ++ self._count = self._count + 1 + return ('[%d]' % count, elt['_M_data']) + + def __init__(self, typename, val): +- self.val = val ++ self._val = val + + def children(self): +- nodetype = lookup_node_type('__gnu_cxx::_Slist_node', self.val.type) +- return self._iterator(nodetype.pointer(), self.val) ++ nodetype = lookup_node_type('__gnu_cxx::_Slist_node', self._val.type) ++ return self._iterator(nodetype.pointer(), self._val) + + def to_string(self): +- if self.val['_M_head']['_M_next'] == 0: ++ if self._val['_M_head']['_M_next'] == 0: + return 'empty __gnu_cxx::slist' + return '__gnu_cxx::slist' + +-class StdSlistIteratorPrinter: +- "Print __gnu_cxx::slist::iterator" ++ ++class StdSlistIteratorPrinter(printer_base): ++ """Print __gnu_cxx::slist::iterator.""" + + def __init__(self, typename, val): +- self.val = val ++ self._val = val + + def to_string(self): +- if not self.val['_M_node']: ++ if not self._val['_M_node']: + return 'non-dereferenceable iterator for __gnu_cxx::slist' +- nodetype = lookup_node_type('__gnu_cxx::_Slist_node', self.val.type).pointer() +- return str(self.val['_M_node'].cast(nodetype).dereference()['_M_data']) ++ nodetype = lookup_node_type( ++ '__gnu_cxx::_Slist_node', self._val.type).pointer() ++ return str(self._val['_M_node'].cast(nodetype).dereference()['_M_data']) ++ + +-class StdVectorPrinter: +- "Print a std::vector" ++class StdVectorPrinter(printer_base): ++ """Print a std::vector.""" + + class _iterator(Iterator): +- def __init__ (self, start, finish, bitvec): +- self.bitvec = bitvec ++ def __init__(self, start, finish, bitvec): ++ self._bitvec = bitvec + if bitvec: +- self.item = start['_M_p'] +- self.so = 0 +- self.finish = finish['_M_p'] +- self.fo = finish['_M_offset'] +- itype = self.item.dereference().type +- self.isize = 8 * itype.sizeof ++ self._item = start['_M_p'] ++ self._so = 0 ++ self._finish = finish['_M_p'] ++ self._fo = finish['_M_offset'] ++ itype = self._item.dereference().type ++ self._isize = 8 * itype.sizeof + else: +- self.item = start +- self.finish = finish +- self.count = 0 ++ self._item = start ++ self._finish = finish ++ self._count = 0 + + def __iter__(self): + return self + + def __next__(self): +- count = self.count +- self.count = self.count + 1 +- if self.bitvec: +- if self.item == self.finish and self.so >= self.fo: ++ count = self._count ++ self._count = self._count + 1 ++ if self._bitvec: ++ if self._item == self._finish and self._so >= self._fo: + raise StopIteration +- elt = bool(self.item.dereference() & (1 << self.so)) +- self.so = self.so + 1 +- if self.so >= self.isize: +- self.item = self.item + 1 +- self.so = 0 ++ elt = bool(self._item.dereference() & (1 << self._so)) ++ self._so = self._so + 1 ++ if self._so >= self._isize: ++ self._item = self._item + 1 ++ self._so = 0 + return ('[%d]' % count, elt) + else: +- if self.item == self.finish: ++ if self._item == self._finish: + raise StopIteration +- elt = self.item.dereference() +- self.item = self.item + 1 ++ elt = self._item.dereference() ++ self._item = self._item + 1 + return ('[%d]' % count, elt) + + def __init__(self, typename, val): +- self.typename = strip_versioned_namespace(typename) +- self.val = val +- self.is_bool = val.type.template_argument(0).code == gdb.TYPE_CODE_BOOL ++ self._typename = strip_versioned_namespace(typename) ++ self._val = val ++ self._is_bool = val.type.template_argument( ++ 0).code == gdb.TYPE_CODE_BOOL + + def children(self): +- return self._iterator(self.val['_M_impl']['_M_start'], +- self.val['_M_impl']['_M_finish'], +- self.is_bool) ++ return self._iterator(self._val['_M_impl']['_M_start'], ++ self._val['_M_impl']['_M_finish'], ++ self._is_bool) + + def to_string(self): +- start = self.val['_M_impl']['_M_start'] +- finish = self.val['_M_impl']['_M_finish'] +- end = self.val['_M_impl']['_M_end_of_storage'] +- if self.is_bool: +- start = self.val['_M_impl']['_M_start']['_M_p'] +- finish = self.val['_M_impl']['_M_finish']['_M_p'] +- fo = self.val['_M_impl']['_M_finish']['_M_offset'] ++ start = self._val['_M_impl']['_M_start'] ++ finish = self._val['_M_impl']['_M_finish'] ++ end = self._val['_M_impl']['_M_end_of_storage'] ++ if self._is_bool: ++ start = self._val['_M_impl']['_M_start']['_M_p'] ++ finish = self._val['_M_impl']['_M_finish']['_M_p'] ++ fo = self._val['_M_impl']['_M_finish']['_M_offset'] + itype = start.dereference().type + bl = 8 * itype.sizeof +- length = bl * (finish - start) + fo ++ length = bl * (finish - start) + fo + capacity = bl * (end - start) + return ('%s of length %d, capacity %d' +- % (self.typename, int (length), int (capacity))) ++ % (self._typename, int(length), int(capacity))) + else: + return ('%s of length %d, capacity %d' +- % (self.typename, int (finish - start), int (end - start))) ++ % (self._typename, int(finish - start), int(end - start))) + + def display_hint(self): + return 'array' + +-class StdVectorIteratorPrinter: +- "Print std::vector::iterator" ++ ++class StdVectorIteratorPrinter(printer_base): ++ """Print std::vector::iterator.""" + + def __init__(self, typename, val): +- self.val = val ++ self._val = val + + def to_string(self): +- if not self.val['_M_current']: ++ if not self._val['_M_current']: + return 'non-dereferenceable iterator for std::vector' +- return str(self.val['_M_current'].dereference()) ++ return str(self._val['_M_current'].dereference()) ++ + +-class StdBitIteratorPrinter: +- "Print std::vector's _Bit_iterator and _Bit_const_iterator" ++class StdBitIteratorPrinter(printer_base): ++ """Print std::vector's _Bit_iterator and _Bit_const_iterator.""" + + def __init__(self, typename, val): +- self.val = val ++ self._val = val + + def to_string(self): +- if not self.val['_M_p']: ++ if not self._val['_M_p']: + return 'non-dereferenceable iterator for std::vector' +- return bool(self.val['_M_p'].dereference() +- & (1 << self.val['_M_offset'])) ++ return bool(self._val['_M_p'].dereference() ++ & (1 << self._val['_M_offset'])) ++ + +-class StdBitReferencePrinter: +- "Print std::vector::reference" ++class StdBitReferencePrinter(printer_base): ++ """Print std::vector::reference.""" + + def __init__(self, typename, val): +- self.val = val ++ self._val = val + + def to_string(self): +- if not self.val['_M_p']: ++ if not self._val['_M_p']: ++ # PR libstdc++/115098 removed the reference default constructor ++ # that this case relates to. New code should never need this, ++ # but we still handle it for compatibility with old binaries. + return 'invalid std::vector::reference' +- return bool(self.val['_M_p'].dereference() & (self.val['_M_mask'])) ++ return bool(self._val['_M_p'].dereference() & (self._val['_M_mask'])) + +-class StdTuplePrinter: +- "Print a std::tuple" ++ ++class StdTuplePrinter(printer_base): ++ """Print a std::tuple.""" + + class _iterator(Iterator): + @staticmethod +- def _is_nonempty_tuple (nodes): +- if len (nodes) == 2: +- if is_specialization_of (nodes[1].type, '__tuple_base'): ++ def _is_nonempty_tuple(nodes): ++ if len(nodes) == 2: ++ if is_specialization_of(nodes[1].type, '__tuple_base'): + return True +- elif len (nodes) == 1: ++ elif len(nodes) == 1: + return True +- elif len (nodes) == 0: ++ elif len(nodes) == 0: + return False +- raise ValueError("Top of tuple tree does not consist of a single node.") ++ raise ValueError( ++ "Top of tuple tree does not consist of a single node.") + +- def __init__ (self, head): +- self.head = head ++ def __init__(self, head): ++ self._head = head + + # Set the base class as the initial head of the + # tuple. +- nodes = self.head.type.fields () +- if self._is_nonempty_tuple (nodes): ++ nodes = self._head.type.fields() ++ if self._is_nonempty_tuple(nodes): + # Set the actual head to the first pair. +- self.head = self.head.cast (nodes[0].type) +- self.count = 0 ++ self._head = self._head.cast(nodes[0].type) ++ self._count = 0 + +- def __iter__ (self): ++ def __iter__(self): + return self + +- def __next__ (self): ++ def __next__(self): + # Check for further recursions in the inheritance tree. +- # For a GCC 5+ tuple self.head is None after visiting all nodes: +- if not self.head: ++ # For a GCC 5+ tuple self._head is None after visiting all nodes: ++ if not self._head: + raise StopIteration +- nodes = self.head.type.fields () ++ nodes = self._head.type.fields() + # For a GCC 4.x tuple there is a final node with no fields: +- if len (nodes) == 0: ++ if len(nodes) == 0: + raise StopIteration + # Check that this iteration has an expected structure. +- if len (nodes) > 2: +- raise ValueError("Cannot parse more than 2 nodes in a tuple tree.") ++ if len(nodes) > 2: ++ raise ValueError( ++ "Cannot parse more than 2 nodes in a tuple tree.") + +- if len (nodes) == 1: ++ if len(nodes) == 1: + # This is the last node of a GCC 5+ std::tuple. +- impl = self.head.cast (nodes[0].type) +- self.head = None ++ impl = self._head.cast(nodes[0].type) ++ self._head = None + else: + # Either a node before the last node, or the last node of + # a GCC 4.x tuple (which has an empty parent). +@@ -588,53 +686,55 @@ class StdTuplePrinter: + # - Right node is the actual class contained in the tuple. + + # Process right node. +- impl = self.head.cast (nodes[1].type) ++ impl = self._head.cast(nodes[1].type) + + # Process left node and set it as head. +- self.head = self.head.cast (nodes[0].type) ++ self._head = self._head.cast(nodes[0].type) + +- self.count = self.count + 1 ++ self._count = self._count + 1 + + # Finally, check the implementation. If it is + # wrapped in _M_head_impl return that, otherwise return + # the value "as is". +- fields = impl.type.fields () +- if len (fields) < 1 or fields[0].name != "_M_head_impl": +- return ('[%d]' % self.count, impl) ++ fields = impl.type.fields() ++ if len(fields) < 1 or fields[0].name != "_M_head_impl": ++ return ('[%d]' % (self._count - 1), impl) + else: +- return ('[%d]' % self.count, impl['_M_head_impl']) ++ return ('[%d]' % (self._count - 1), impl['_M_head_impl']) + +- def __init__ (self, typename, val): +- self.typename = strip_versioned_namespace(typename) +- self.val = val; ++ def __init__(self, typename, val): ++ self._typename = strip_versioned_namespace(typename) ++ self._val = val + +- def children (self): +- return self._iterator (self.val) ++ def children(self): ++ return self._iterator(self._val) + +- def to_string (self): +- if len (self.val.type.fields ()) == 0: +- return 'empty %s' % (self.typename) +- return '%s containing' % (self.typename) ++ def to_string(self): ++ if len(self._val.type.fields()) == 0: ++ return 'empty %s' % (self._typename) ++ return '%s containing' % (self._typename) + +-class StdStackOrQueuePrinter: +- "Print a std::stack or std::queue" + +- def __init__ (self, typename, val): +- self.typename = strip_versioned_namespace(typename) +- self.visualizer = gdb.default_visualizer(val['c']) ++class StdStackOrQueuePrinter(printer_base): ++ """Print a std::stack or std::queue.""" + +- def children (self): +- return self.visualizer.children() ++ def __init__(self, typename, val): ++ self._typename = strip_versioned_namespace(typename) ++ self._visualizer = gdb.default_visualizer(val['c']) + +- def to_string (self): +- return '%s wrapping: %s' % (self.typename, +- self.visualizer.to_string()) ++ def children(self): ++ return self._visualizer.children() ++ ++ def to_string(self): ++ return '%s wrapping: %s' % (self._typename, ++ self._visualizer.to_string()) + +- def display_hint (self): +- if hasattr (self.visualizer, 'display_hint'): +- return self.visualizer.display_hint () ++ def display_hint(self): ++ if hasattr(self._visualizer, 'display_hint'): ++ return self._visualizer.display_hint() + return None + ++ + class RbtreeIterator(Iterator): + """ + Turn an RB-tree-based container (std::map, std::set etc.) into +@@ -642,24 +742,24 @@ class RbtreeIterator(Iterator): + """ + + def __init__(self, rbtree): +- self.size = rbtree['_M_t']['_M_impl']['_M_node_count'] +- self.node = rbtree['_M_t']['_M_impl']['_M_header']['_M_left'] +- self.count = 0 ++ self._size = rbtree['_M_t']['_M_impl']['_M_node_count'] ++ self._node = rbtree['_M_t']['_M_impl']['_M_header']['_M_left'] ++ self._count = 0 + + def __iter__(self): + return self + + def __len__(self): +- return int (self.size) ++ return int(self._size) + + def __next__(self): +- if self.count == self.size: ++ if self._count == self._size: + raise StopIteration +- result = self.node +- self.count = self.count + 1 +- if self.count < self.size: ++ result = self._node ++ self._count = self._count + 1 ++ if self._count < self._size: + # Compute the next node. +- node = self.node ++ node = self._node + if node.dereference()['_M_right']: + node = node.dereference()['_M_right'] + while node.dereference()['_M_left']: +@@ -671,11 +771,12 @@ class RbtreeIterator(Iterator): + parent = parent.dereference()['_M_parent'] + if node.dereference()['_M_right'] != parent: + node = parent +- self.node = node ++ self._node = node + return result + ++ + def get_value_from_Rb_tree_node(node): +- """Returns the value held in an _Rb_tree_node<_Val>""" ++ """Return the value held in an _Rb_tree_node<_Val>.""" + try: + member = node.type.fields()[1].name + if member == '_M_value_field': +@@ -692,135 +793,142 @@ def get_value_from_Rb_tree_node(node): + # This is a pretty printer for std::_Rb_tree_iterator (which is + # std::map::iterator), and has nothing to do with the RbtreeIterator + # class above. +-class StdRbtreeIteratorPrinter: +- "Print std::map::iterator, std::set::iterator, etc." + +- def __init__ (self, typename, val): +- self.val = val +- nodetype = lookup_node_type('_Rb_tree_node', self.val.type) +- self.link_type = nodetype.pointer() + +- def to_string (self): +- if not self.val['_M_node']: ++class StdRbtreeIteratorPrinter(printer_base): ++ """Print std::map::iterator, std::set::iterator, etc.""" ++ ++ def __init__(self, typename, val): ++ self._val = val ++ nodetype = lookup_node_type('_Rb_tree_node', self._val.type) ++ self._link_type = nodetype.pointer() ++ ++ def to_string(self): ++ if not self._val['_M_node']: + return 'non-dereferenceable iterator for associative container' +- node = self.val['_M_node'].cast(self.link_type).dereference() ++ node = self._val['_M_node'].cast(self._link_type).dereference() + return str(get_value_from_Rb_tree_node(node)) + +-class StdDebugIteratorPrinter: +- "Print a debug enabled version of an iterator" + +- def __init__ (self, typename, val): +- self.val = val ++class StdDebugIteratorPrinter(printer_base): ++ """Print a debug enabled version of an iterator.""" ++ ++ def __init__(self, typename, val): ++ self._val = val + + # Just strip away the encapsulating __gnu_debug::_Safe_iterator + # and return the wrapped iterator value. +- def to_string (self): ++ def to_string(self): + base_type = gdb.lookup_type('__gnu_debug::_Safe_iterator_base') +- itype = self.val.type.template_argument(0) +- safe_seq = self.val.cast(base_type)['_M_sequence'] ++ itype = self._val.type.template_argument(0) ++ safe_seq = self._val.cast(base_type)['_M_sequence'] + if not safe_seq: +- return str(self.val.cast(itype)) +- if self.val['_M_version'] != safe_seq['_M_version']: ++ return str(self._val.cast(itype)) ++ if self._val['_M_version'] != safe_seq['_M_version']: + return "invalid iterator" +- return str(self.val.cast(itype)) ++ return str(self._val.cast(itype)) ++ + + def num_elements(num): + """Return either "1 element" or "N elements" depending on the argument.""" + return '1 element' if num == 1 else '%d elements' % num + +-class StdMapPrinter: +- "Print a std::map or std::multimap" ++ ++class StdMapPrinter(printer_base): ++ """Print a std::map or std::multimap.""" + + # Turn an RbtreeIterator into a pretty-print iterator. + class _iter(Iterator): + def __init__(self, rbiter, type): +- self.rbiter = rbiter +- self.count = 0 +- self.type = type ++ self._rbiter = rbiter ++ self._count = 0 ++ self._type = type + + def __iter__(self): + return self + + def __next__(self): +- if self.count % 2 == 0: +- n = next(self.rbiter) +- n = n.cast(self.type).dereference() ++ if self._count % 2 == 0: ++ n = next(self._rbiter) ++ n = n.cast(self._type).dereference() + n = get_value_from_Rb_tree_node(n) +- self.pair = n ++ self._pair = n + item = n['first'] + else: +- item = self.pair['second'] +- result = ('[%d]' % self.count, item) +- self.count = self.count + 1 ++ item = self._pair['second'] ++ result = ('[%d]' % self._count, item) ++ self._count = self._count + 1 + return result + +- def __init__ (self, typename, val): +- self.typename = strip_versioned_namespace(typename) +- self.val = val ++ def __init__(self, typename, val): ++ self._typename = strip_versioned_namespace(typename) ++ self._val = val + +- def to_string (self): +- return '%s with %s' % (self.typename, +- num_elements(len(RbtreeIterator (self.val)))) ++ def to_string(self): ++ return '%s with %s' % (self._typename, ++ num_elements(len(RbtreeIterator(self._val)))) + +- def children (self): +- node = lookup_node_type('_Rb_tree_node', self.val.type).pointer() +- return self._iter (RbtreeIterator (self.val), node) ++ def children(self): ++ node = lookup_node_type('_Rb_tree_node', self._val.type).pointer() ++ return self._iter(RbtreeIterator(self._val), node) + +- def display_hint (self): ++ def display_hint(self): + return 'map' + +-class StdSetPrinter: +- "Print a std::set or std::multiset" ++ ++class StdSetPrinter(printer_base): ++ """Print a std::set or std::multiset.""" + + # Turn an RbtreeIterator into a pretty-print iterator. + class _iter(Iterator): + def __init__(self, rbiter, type): +- self.rbiter = rbiter +- self.count = 0 +- self.type = type ++ self._rbiter = rbiter ++ self._count = 0 ++ self._type = type + + def __iter__(self): + return self + + def __next__(self): +- item = next(self.rbiter) +- item = item.cast(self.type).dereference() ++ item = next(self._rbiter) ++ item = item.cast(self._type).dereference() + item = get_value_from_Rb_tree_node(item) + # FIXME: this is weird ... what to do? + # Maybe a 'set' display hint? +- result = ('[%d]' % self.count, item) +- self.count = self.count + 1 ++ result = ('[%d]' % self._count, item) ++ self._count = self._count + 1 + return result + +- def __init__ (self, typename, val): +- self.typename = strip_versioned_namespace(typename) +- self.val = val ++ def __init__(self, typename, val): ++ self._typename = strip_versioned_namespace(typename) ++ self._val = val ++ ++ def to_string(self): ++ return '%s with %s' % (self._typename, ++ num_elements(len(RbtreeIterator(self._val)))) + +- def to_string (self): +- return '%s with %s' % (self.typename, +- num_elements(len(RbtreeIterator (self.val)))) ++ def children(self): ++ node = lookup_node_type('_Rb_tree_node', self._val.type).pointer() ++ return self._iter(RbtreeIterator(self._val), node) + +- def children (self): +- node = lookup_node_type('_Rb_tree_node', self.val.type).pointer() +- return self._iter (RbtreeIterator (self.val), node) + +-class StdBitsetPrinter: +- "Print a std::bitset" ++class StdBitsetPrinter(printer_base): ++ """Print a std::bitset.""" + + def __init__(self, typename, val): +- self.typename = strip_versioned_namespace(typename) +- self.val = val ++ self._typename = strip_versioned_namespace(typename) ++ self._val = val + +- def to_string (self): ++ def to_string(self): + # If template_argument handled values, we could print the + # size. Or we could use a regexp on the type. +- return '%s' % (self.typename) ++ return '%s' % (self._typename) + +- def children (self): ++ def children(self): + try: + # An empty bitset may not have any members which will + # result in an exception being thrown. +- words = self.val['_M_w'] ++ words = self._val['_M_w'] + except: + return [] + +@@ -830,7 +938,7 @@ class StdBitsetPrinter: + # array. This depends on the template specialization used. + # If it is a single long, convert to a single element list. + if wtype.code == gdb.TYPE_CODE_ARRAY: +- tsize = wtype.target ().sizeof ++ tsize = wtype.target().sizeof + else: + words = [words] + tsize = wtype.sizeof +@@ -850,282 +958,349 @@ class StdBitsetPrinter: + byte = byte + 1 + return result + +-class StdDequePrinter: +- "Print a std::deque" ++ ++class StdDequePrinter(printer_base): ++ """Print a std::deque.""" + + class _iter(Iterator): + def __init__(self, node, start, end, last, buffer_size): +- self.node = node +- self.p = start +- self.end = end +- self.last = last +- self.buffer_size = buffer_size +- self.count = 0 ++ self._node = node ++ self._p = start ++ self._end = end ++ self._last = last ++ self._buffer_size = buffer_size ++ self._count = 0 + + def __iter__(self): + return self + + def __next__(self): +- if self.p == self.last: ++ if self._p == self._last: + raise StopIteration + +- result = ('[%d]' % self.count, self.p.dereference()) +- self.count = self.count + 1 ++ result = ('[%d]' % self._count, self._p.dereference()) ++ self._count = self._count + 1 + + # Advance the 'cur' pointer. +- self.p = self.p + 1 +- if self.p == self.end: ++ self._p = self._p + 1 ++ if self._p == self._end: + # If we got to the end of this bucket, move to the + # next bucket. +- self.node = self.node + 1 +- self.p = self.node[0] +- self.end = self.p + self.buffer_size ++ self._node = self._node + 1 ++ self._p = self._node[0] ++ self._end = self._p + self._buffer_size + + return result + + def __init__(self, typename, val): +- self.typename = strip_versioned_namespace(typename) +- self.val = val +- self.elttype = val.type.template_argument(0) +- size = self.elttype.sizeof ++ self._typename = strip_versioned_namespace(typename) ++ self._val = val ++ self._elttype = val.type.template_argument(0) ++ size = self._elttype.sizeof + if size < 512: +- self.buffer_size = int (512 / size) ++ self._buffer_size = int(512 / size) + else: +- self.buffer_size = 1 ++ self._buffer_size = 1 + + def to_string(self): +- start = self.val['_M_impl']['_M_start'] +- end = self.val['_M_impl']['_M_finish'] ++ start = self._val['_M_impl']['_M_start'] ++ end = self._val['_M_impl']['_M_finish'] + + delta_n = end['_M_node'] - start['_M_node'] - 1 + delta_s = start['_M_last'] - start['_M_cur'] + delta_e = end['_M_cur'] - end['_M_first'] + +- size = self.buffer_size * delta_n + delta_s + delta_e ++ size = self._buffer_size * delta_n + delta_s + delta_e + +- return '%s with %s' % (self.typename, num_elements(long(size))) ++ return '%s with %s' % (self._typename, num_elements(long(size))) + + def children(self): +- start = self.val['_M_impl']['_M_start'] +- end = self.val['_M_impl']['_M_finish'] ++ start = self._val['_M_impl']['_M_start'] ++ end = self._val['_M_impl']['_M_finish'] + return self._iter(start['_M_node'], start['_M_cur'], start['_M_last'], +- end['_M_cur'], self.buffer_size) ++ end['_M_cur'], self._buffer_size) + +- def display_hint (self): ++ def display_hint(self): + return 'array' + +-class StdDequeIteratorPrinter: +- "Print std::deque::iterator" ++ ++class StdDequeIteratorPrinter(printer_base): ++ """Print std::deque::iterator.""" + + def __init__(self, typename, val): +- self.val = val ++ self._val = val + + def to_string(self): +- if not self.val['_M_cur']: ++ if not self._val['_M_cur']: + return 'non-dereferenceable iterator for std::deque' +- return str(self.val['_M_cur'].dereference()) ++ return str(self._val['_M_cur'].dereference()) + +-class StdStringPrinter: +- "Print a std::basic_string of some kind" ++ ++class StdStringPrinter(printer_base): ++ """Print a std::basic_string of some kind.""" + + def __init__(self, typename, val): +- self.val = val +- self.new_string = typename.find("::__cxx11::basic_string") != -1 ++ self._val = val ++ self._new_string = typename.find("::__cxx11::basic_string") != -1 + + def to_string(self): + # Make sure &string works, too. +- type = self.val.type ++ type = self._val.type + if type.code == gdb.TYPE_CODE_REF: +- type = type.target () ++ type = type.target() + + # Calculate the length of the string so that to_string returns + # the string according to length, not according to first null + # encountered. +- ptr = self.val ['_M_dataplus']['_M_p'] +- if self.new_string: +- length = self.val['_M_string_length'] ++ ptr = self._val['_M_dataplus']['_M_p'] ++ if self._new_string: ++ length = self._val['_M_string_length'] + # https://sourceware.org/bugzilla/show_bug.cgi?id=17728 + ptr = ptr.cast(ptr.type.strip_typedefs()) + else: +- realtype = type.unqualified ().strip_typedefs () +- reptype = gdb.lookup_type (str (realtype) + '::_Rep').pointer () ++ realtype = type.unqualified().strip_typedefs() ++ reptype = gdb.lookup_type(str(realtype) + '::_Rep').pointer() + header = ptr.cast(reptype) - 1 +- length = header.dereference ()['_M_length'] ++ length = header.dereference()['_M_length'] + if hasattr(ptr, "lazy_string"): +- return ptr.lazy_string (length = length) +- return ptr.string (length = length) ++ return ptr.lazy_string(length=length) ++ return ptr.string(length=length) + +- def display_hint (self): ++ def display_hint(self): + return 'string' + ++ ++def access_streambuf_ptrs(streambuf): ++ """Access the streambuf put area pointers.""" ++ pbase = streambuf['_M_out_beg'] ++ pptr = streambuf['_M_out_cur'] ++ egptr = streambuf['_M_in_end'] ++ return pbase, pptr, egptr ++ ++ ++class StdStringBufPrinter(printer_base): ++ """Print a std::basic_stringbuf.""" ++ ++ def __init__(self, _, val): ++ self._val = val ++ ++ def to_string(self): ++ (pbase, pptr, egptr) = access_streambuf_ptrs(self._val) ++ # Logic from basic_stringbuf::_M_high_mark() ++ if pptr: ++ if not egptr or pptr > egptr: ++ return pbase.string(length=pptr - pbase) ++ else: ++ return pbase.string(length=egptr - pbase) ++ return self._val['_M_string'] ++ ++ def display_hint(self): ++ return 'string' ++ ++ ++class StdStringStreamPrinter(printer_base): ++ """Print a std::basic_stringstream.""" ++ ++ def __init__(self, typename, val): ++ self._val = val ++ self._typename = typename ++ ++ # Check if the stream was redirected. This is essentially: ++ # val['_M_streambuf'] != val['_M_stringbuf'].address ++ # However, GDB can't resolve the virtual inheritance, so we do that ++ # manually. ++ basetype = [f.type for f in val.type.fields() if f.is_base_class][0] ++ gdb.set_convenience_variable('__stream', val.cast(basetype).address) ++ self._streambuf = gdb.parse_and_eval('$__stream->rdbuf()') ++ self._was_redirected = self._streambuf != val['_M_stringbuf'].address ++ ++ def to_string(self): ++ if self._was_redirected: ++ return "%s redirected to %s" % ( ++ self._typename, self._streambuf.dereference()) ++ return self._val['_M_stringbuf'] ++ ++ def display_hint(self): ++ if self._was_redirected: ++ return None ++ return 'string' ++ ++ + class Tr1HashtableIterator(Iterator): +- def __init__ (self, hashtable): +- self.buckets = hashtable['_M_buckets'] +- self.bucket = 0 +- self.bucket_count = hashtable['_M_bucket_count'] +- self.node_type = find_type(hashtable.type, '_Node').pointer() +- self.node = 0 +- while self.bucket != self.bucket_count: +- self.node = self.buckets[self.bucket] +- if self.node: ++ def __init__(self, hashtable): ++ self._buckets = hashtable['_M_buckets'] ++ self._bucket = 0 ++ self._bucket_count = hashtable['_M_bucket_count'] ++ self._node_type = find_type(hashtable.type, '_Node').pointer() ++ self._node = 0 ++ while self._bucket != self._bucket_count: ++ self._node = self._buckets[self._bucket] ++ if self._node: + break +- self.bucket = self.bucket + 1 ++ self._bucket = self._bucket + 1 + +- def __iter__ (self): ++ def __iter__(self): + return self + +- def __next__ (self): +- if self.node == 0: ++ def __next__(self): ++ if self._node == 0: + raise StopIteration +- node = self.node.cast(self.node_type) ++ node = self._node.cast(self._node_type) + result = node.dereference()['_M_v'] +- self.node = node.dereference()['_M_next']; +- if self.node == 0: +- self.bucket = self.bucket + 1 +- while self.bucket != self.bucket_count: +- self.node = self.buckets[self.bucket] +- if self.node: ++ self._node = node.dereference()['_M_next'] ++ if self._node == 0: ++ self._bucket = self._bucket + 1 ++ while self._bucket != self._bucket_count: ++ self._node = self._buckets[self._bucket] ++ if self._node: + break +- self.bucket = self.bucket + 1 ++ self._bucket = self._bucket + 1 + return result + ++ + class StdHashtableIterator(Iterator): + def __init__(self, hashtable): +- self.node = hashtable['_M_before_begin']['_M_nxt'] ++ self._node = hashtable['_M_before_begin']['_M_nxt'] + valtype = hashtable.type.template_argument(1) + cached = hashtable.type.template_argument(9).template_argument(0) + node_type = lookup_templ_spec('std::__detail::_Hash_node', str(valtype), + 'true' if cached else 'false') +- self.node_type = node_type.pointer() ++ self._node_type = node_type.pointer() + + def __iter__(self): + return self + + def __next__(self): +- if self.node == 0: ++ if self._node == 0: + raise StopIteration +- elt = self.node.cast(self.node_type).dereference() +- self.node = elt['_M_nxt'] ++ elt = self._node.cast(self._node_type).dereference() ++ self._node = elt['_M_nxt'] + valptr = elt['_M_storage'].address + valptr = valptr.cast(elt.type.template_argument(0).pointer()) + return valptr.dereference() + +-class Tr1UnorderedSetPrinter: +- "Print a std::unordered_set or tr1::unordered_set" + +- def __init__ (self, typename, val): +- self.typename = strip_versioned_namespace(typename) +- self.val = val ++class Tr1UnorderedSetPrinter(printer_base): ++ """Print a std::unordered_set or tr1::unordered_set.""" ++ ++ def __init__(self, typename, val): ++ self._typename = strip_versioned_namespace(typename) ++ self._val = val + +- def hashtable (self): +- if self.typename.startswith('std::tr1'): +- return self.val +- return self.val['_M_h'] ++ def _hashtable(self): ++ if self._typename.startswith('std::tr1'): ++ return self._val ++ return self._val['_M_h'] + +- def to_string (self): +- count = self.hashtable()['_M_element_count'] +- return '%s with %s' % (self.typename, num_elements(count)) ++ def to_string(self): ++ count = self._hashtable()['_M_element_count'] ++ return '%s with %s' % (self._typename, num_elements(count)) + + @staticmethod +- def format_count (i): ++ def _format_count(i): + return '[%d]' % i + +- def children (self): +- counter = imap (self.format_count, itertools.count()) +- if self.typename.startswith('std::tr1'): +- return izip (counter, Tr1HashtableIterator (self.hashtable())) +- return izip (counter, StdHashtableIterator (self.hashtable())) ++ def children(self): ++ counter = imap(self._format_count, itertools.count()) ++ if self._typename.startswith('std::tr1'): ++ return izip(counter, Tr1HashtableIterator(self._hashtable())) ++ return izip(counter, StdHashtableIterator(self._hashtable())) + +-class Tr1UnorderedMapPrinter: +- "Print a std::unordered_map or tr1::unordered_map" + +- def __init__ (self, typename, val): +- self.typename = strip_versioned_namespace(typename) +- self.val = val ++class Tr1UnorderedMapPrinter(printer_base): ++ """Print a std::unordered_map or tr1::unordered_map.""" + +- def hashtable (self): +- if self.typename.startswith('std::tr1'): +- return self.val +- return self.val['_M_h'] ++ def __init__(self, typename, val): ++ self._typename = strip_versioned_namespace(typename) ++ self._val = val + +- def to_string (self): +- count = self.hashtable()['_M_element_count'] +- return '%s with %s' % (self.typename, num_elements(count)) ++ def _hashtable(self): ++ if self._typename.startswith('std::tr1'): ++ return self._val ++ return self._val['_M_h'] ++ ++ def to_string(self): ++ count = self._hashtable()['_M_element_count'] ++ return '%s with %s' % (self._typename, num_elements(count)) + + @staticmethod +- def flatten (list): ++ def _flatten(list): + for elt in list: + for i in elt: + yield i + + @staticmethod +- def format_one (elt): ++ def _format_one(elt): + return (elt['first'], elt['second']) + + @staticmethod +- def format_count (i): ++ def _format_count(i): + return '[%d]' % i + +- def children (self): +- counter = imap (self.format_count, itertools.count()) ++ def children(self): ++ counter = imap(self._format_count, itertools.count()) + # Map over the hash table and flatten the result. +- if self.typename.startswith('std::tr1'): +- data = self.flatten (imap (self.format_one, Tr1HashtableIterator (self.hashtable()))) ++ if self._typename.startswith('std::tr1'): ++ data = self._flatten( ++ imap(self._format_one, Tr1HashtableIterator(self._hashtable()))) + # Zip the two iterators together. +- return izip (counter, data) +- data = self.flatten (imap (self.format_one, StdHashtableIterator (self.hashtable()))) ++ return izip(counter, data) ++ data = self._flatten( ++ imap(self._format_one, StdHashtableIterator(self._hashtable()))) + # Zip the two iterators together. +- return izip (counter, data) ++ return izip(counter, data) + +- def display_hint (self): ++ def display_hint(self): + return 'map' + +-class StdForwardListPrinter: +- "Print a std::forward_list" ++ ++class StdForwardListPrinter(printer_base): ++ """Print a std::forward_list.""" + + class _iterator(Iterator): + def __init__(self, nodetype, head): +- self.nodetype = nodetype +- self.base = head['_M_next'] +- self.count = 0 ++ self._nodetype = nodetype ++ self._base = head['_M_next'] ++ self._count = 0 + + def __iter__(self): + return self + + def __next__(self): +- if self.base == 0: ++ if self._base == 0: + raise StopIteration +- elt = self.base.cast(self.nodetype).dereference() +- self.base = elt['_M_next'] +- count = self.count +- self.count = self.count + 1 ++ elt = self._base.cast(self._nodetype).dereference() ++ self._base = elt['_M_next'] ++ count = self._count ++ self._count = self._count + 1 + valptr = elt['_M_storage'].address + valptr = valptr.cast(elt.type.template_argument(0).pointer()) + return ('[%d]' % count, valptr.dereference()) + + def __init__(self, typename, val): +- self.val = val +- self.typename = strip_versioned_namespace(typename) ++ self._val = val ++ self._typename = strip_versioned_namespace(typename) + + def children(self): +- nodetype = lookup_node_type('_Fwd_list_node', self.val.type).pointer() +- return self._iterator(nodetype, self.val['_M_impl']['_M_head']) ++ nodetype = lookup_node_type('_Fwd_list_node', self._val.type).pointer() ++ return self._iterator(nodetype, self._val['_M_impl']['_M_head']) + + def to_string(self): +- if self.val['_M_impl']['_M_head']['_M_next'] == 0: +- return 'empty %s' % self.typename +- return '%s' % self.typename ++ if self._val['_M_impl']['_M_head']['_M_next'] == 0: ++ return 'empty %s' % self._typename ++ return '%s' % self._typename ++ + +-class SingleObjContainerPrinter(object): +- "Base class for printers of containers of single objects" ++class SingleObjContainerPrinter(printer_base): ++ """Base class for printers of containers of single objects.""" + +- def __init__ (self, val, viz, hint = None): +- self.contained_value = val +- self.visualizer = viz +- self.hint = hint ++ def __init__(self, val, viz, hint=None): ++ self._contained_value = val ++ self._visualizer = viz ++ self._hint = hint + + def _recognize(self, type): +- """Return TYPE as a string after applying type printers""" ++ """Return type as a string after applying type printers.""" + global _use_type_printing + if not _use_type_printing: + return str(type) +@@ -1133,36 +1308,37 @@ class SingleObjContainerPrinter(object): + type) or str(type) + + class _contained(Iterator): +- def __init__ (self, val): +- self.val = val ++ def __init__(self, val): ++ self._val = val + +- def __iter__ (self): ++ def __iter__(self): + return self + + def __next__(self): +- if self.val is None: ++ if self._val is None: + raise StopIteration +- retval = self.val +- self.val = None ++ retval = self._val ++ self._val = None + return ('[contained value]', retval) + +- def children (self): +- if self.contained_value is None: +- return self._contained (None) +- if hasattr (self.visualizer, 'children'): +- return self.visualizer.children () +- return self._contained (self.contained_value) ++ def children(self): ++ if self._contained_value is None: ++ return self._contained(None) ++ if hasattr(self._visualizer, 'children'): ++ return self._visualizer.children() ++ return self._contained(self._contained_value) + + def display_hint(self): +- if (hasattr(self.visualizer, 'children') +- and hasattr(self.visualizer, 'display_hint')): ++ if (hasattr(self._visualizer, 'children') ++ and hasattr(self._visualizer, 'display_hint')): + # If contained value is a map we want to display in the same way. +- return self.visualizer.display_hint() +- return self.hint ++ return self._visualizer.display_hint() ++ return self._hint ++ + + def function_pointer_to_name(f): +- "Find the name of the function referred to by the gdb.Value f, " +- " which should contain a function pointer from the program." ++ """Find the name of the function referred to by the gdb.Value f, ++ which should contain a function pointer from the program.""" + + # Turn the function pointer into an actual address. + # This is needed to unpack ppc64 function descriptors. +@@ -1183,23 +1359,23 @@ def function_pointer_to_name(f): + except: + return None + ++ + class StdExpAnyPrinter(SingleObjContainerPrinter): +- "Print a std::any or std::experimental::any" +- +- def __init__ (self, typename, val): +- self.typename = strip_versioned_namespace(typename) +- self.typename = re.sub(r'^std::experimental::fundamentals_v\d::', +- 'std::experimental::', self.typename, 1) +- self.val = val +- self.contained_type = None ++ """Print a std::any or std::experimental::any.""" ++ ++ def __init__(self, typename, val): ++ self._typename = strip_versioned_namespace(typename) ++ self._typename = strip_fundts_namespace(self._typename) ++ self._val = val ++ self._contained_type = None + contained_value = None + visualizer = None +- mgr = self.val['_M_manager'] ++ mgr = self._val['_M_manager'] + if mgr != 0: + func = function_pointer_to_name(mgr) + if not func: + raise ValueError( +- "Invalid function pointer in %s" % (self.typename)) ++ "Invalid function pointer in %s" % (self._typename)) + # We want to use this regular expression: + # T::_Manager_xxx<.*>::_S_manage\(T::_Op, const T\*, T::_Arg\*\) + # where T is std::any or std::experimental::any. +@@ -1212,46 +1388,80 @@ class StdExpAnyPrinter(SingleObjContainerPrinter): + ).format(typename) + m = re.match(rx, func) + if not m: +- raise ValueError("Unknown manager function in %s" % self.typename) ++ raise ValueError( ++ "Unknown manager function in %s" % self._typename) + + mgrname = m.group(1) + # FIXME need to expand 'std::string' so that gdb.lookup_type works + if 'std::string' in mgrname: +- mgrname = re.sub(r"std::string(?!\w)", str(gdb.lookup_type('std::string').strip_typedefs()), m.group(1)) +- +- mgrtype = gdb.lookup_type(mgrname) +- self.contained_type = mgrtype.template_argument(0) ++ mgrtypes = [] ++ for s in StdExpAnyPrinter._string_types(): ++ try: ++ x = re.sub(r"std::string(?!\w)", s, m.group(1)) ++ # The following lookup might raise gdb.error if the ++ # manager function was never instantiated for 's' in ++ # the program, because there will be no such type. ++ mgrtypes.append(gdb.lookup_type(x)) ++ except gdb.error: ++ pass ++ if len(mgrtypes) != 1: ++ # FIXME: this is unlikely in practice, but possible for ++ # programs that use both old and new string types with ++ # std::any in a single program. Can we do better? ++ # Maybe find the address of each type's _S_manage and ++ # compare to the address stored in _M_manager? ++ raise ValueError( ++ 'Cannot uniquely determine std::string type ' ++ 'used in std::any' ++ ) ++ mgrtype = mgrtypes[0] ++ else: ++ mgrtype = gdb.lookup_type(mgrname) ++ self._contained_type = mgrtype.template_argument(0) + valptr = None + if '::_Manager_internal' in mgrname: +- valptr = self.val['_M_storage']['_M_buffer'].address ++ valptr = self._val['_M_storage']['_M_buffer'].address + elif '::_Manager_external' in mgrname: +- valptr = self.val['_M_storage']['_M_ptr'] ++ valptr = self._val['_M_storage']['_M_ptr'] + else: +- raise ValueError("Unknown manager function in %s" % self.typename) +- contained_value = valptr.cast(self.contained_type.pointer()).dereference() ++ raise ValueError( ++ "Unknown manager function in %s" % self._typename) ++ contained_value = valptr.cast( ++ self._contained_type.pointer()).dereference() + visualizer = gdb.default_visualizer(contained_value) +- super(StdExpAnyPrinter, self).__init__ (contained_value, visualizer) +- +- def to_string (self): +- if self.contained_type is None: +- return '%s [no contained value]' % self.typename +- desc = "%s containing " % self.typename +- if hasattr (self.visualizer, 'children'): +- return desc + self.visualizer.to_string () +- valtype = self._recognize (self.contained_type) ++ super(StdExpAnyPrinter, self).__init__(contained_value, visualizer) ++ ++ def to_string(self): ++ if self._contained_type is None: ++ return '%s [no contained value]' % self._typename ++ desc = "%s containing " % self._typename ++ if hasattr(self._visualizer, 'children'): ++ return desc + self._visualizer.to_string() ++ valtype = self._recognize(self._contained_type) + return desc + strip_versioned_namespace(str(valtype)) + ++ @staticmethod ++ def _string_types(): ++ # This lookup for std::string might return the __cxx11 version, ++ # but that's not necessarily the one used by the std::any ++ # manager function we're trying to find. ++ strings = {str(gdb.lookup_type('std::string').strip_typedefs())} ++ # So also consider all the other possible std::string types! ++ s = 'basic_string, std::allocator >' ++ quals = ['std::', 'std::__cxx11::', ++ 'std::' + _versioned_namespace] ++ strings |= {q + s for q in quals} # set of unique strings ++ return strings ++ ++ + class StdExpOptionalPrinter(SingleObjContainerPrinter): +- "Print a std::optional or std::experimental::optional" +- +- def __init__ (self, typename, val): +- valtype = self._recognize (val.type.template_argument(0)) +- typename = strip_versioned_namespace(typename) +- self.typename = re.sub( +- r'^std::(experimental::|)(fundamentals_v\d::|)(.*)', +- r'std::\1\3<%s>' % valtype, typename, 1) ++ """Print a std::optional or std::experimental::optional.""" ++ ++ def __init__(self, typename, val): ++ self._typename = strip_versioned_namespace(typename) ++ self._typename = strip_fundts_namespace(self._typename) + payload = val['_M_payload'] +- if self.typename.startswith('std::experimental'): ++ if self._typename.startswith('std::experimental'): + engaged = val['_M_engaged'] + contained_value = payload + else: +@@ -1262,124 +1472,134 @@ class StdExpOptionalPrinter(SingleObjContainerPrinter): + contained_value = contained_value['_M_value'] + except: + pass +- visualizer = gdb.default_visualizer (contained_value) ++ visualizer = gdb.default_visualizer(contained_value) + if not engaged: + contained_value = None +- super (StdExpOptionalPrinter, self).__init__ (contained_value, visualizer) ++ super(StdExpOptionalPrinter, self).__init__( ++ contained_value, visualizer) ++ ++ def to_string(self): ++ if self._contained_value is None: ++ return "%s [no contained value]" % self._typename ++ if hasattr(self._visualizer, 'children'): ++ return "%s containing %s" % (self._typename, ++ self._visualizer.to_string()) ++ return self._typename + +- def to_string (self): +- if self.contained_value is None: +- return "%s [no contained value]" % self.typename +- if hasattr (self.visualizer, 'children'): +- return "%s containing %s" % (self.typename, +- self.visualizer.to_string()) +- return self.typename + + class StdVariantPrinter(SingleObjContainerPrinter): +- "Print a std::variant" ++ """Print a std::variant.""" + + def __init__(self, typename, val): + alternatives = get_template_arg_list(val.type) +- self.typename = strip_versioned_namespace(typename) +- self.typename = "%s<%s>" % (self.typename, ', '.join([self._recognize(alt) for alt in alternatives])) +- self.index = val['_M_index'] +- if self.index >= len(alternatives): +- self.contained_type = None ++ self._typename = strip_versioned_namespace(typename) ++ self._index = val['_M_index'] ++ if self._index >= len(alternatives): ++ self._contained_type = None + contained_value = None + visualizer = None + else: +- self.contained_type = alternatives[int(self.index)] ++ self._contained_type = alternatives[int(self._index)] + addr = val['_M_u']['_M_first']['_M_storage'].address +- contained_value = addr.cast(self.contained_type.pointer()).dereference() ++ contained_value = addr.cast( ++ self._contained_type.pointer()).dereference() + visualizer = gdb.default_visualizer(contained_value) +- super (StdVariantPrinter, self).__init__(contained_value, visualizer, 'array') ++ super(StdVariantPrinter, self).__init__( ++ contained_value, visualizer, 'array') + + def to_string(self): +- if self.contained_value is None: +- return "%s [no contained value]" % self.typename +- if hasattr(self.visualizer, 'children'): +- return "%s [index %d] containing %s" % (self.typename, self.index, +- self.visualizer.to_string()) +- return "%s [index %d]" % (self.typename, self.index) ++ if self._contained_value is None: ++ return "%s [no contained value]" % self._typename ++ if hasattr(self._visualizer, 'children'): ++ return "%s [index %d] containing %s" % (self._typename, self._index, ++ self._visualizer.to_string()) ++ return "%s [index %d]" % (self._typename, self._index) ++ + + class StdNodeHandlePrinter(SingleObjContainerPrinter): +- "Print a container node handle" ++ """Print a container node handle.""" + + def __init__(self, typename, val): +- self.value_type = val.type.template_argument(1) ++ self._value_type = val.type.template_argument(1) + nodetype = val.type.template_argument(2).template_argument(0) +- self.is_rb_tree_node = is_specialization_of(nodetype.name, '_Rb_tree_node') +- self.is_map_node = val.type.template_argument(0) != self.value_type ++ self._is_rb_tree_node = is_specialization_of( ++ nodetype.name, '_Rb_tree_node') ++ self._is_map_node = val.type.template_argument(0) != self._value_type + nodeptr = val['_M_ptr'] + if nodeptr: +- if self.is_rb_tree_node: +- contained_value = get_value_from_Rb_tree_node(nodeptr.dereference()) ++ if self._is_rb_tree_node: ++ contained_value = get_value_from_Rb_tree_node( ++ nodeptr.dereference()) + else: + contained_value = get_value_from_aligned_membuf(nodeptr['_M_storage'], +- self.value_type) ++ self._value_type) + visualizer = gdb.default_visualizer(contained_value) + else: + contained_value = None + visualizer = None + optalloc = val['_M_alloc'] +- self.alloc = optalloc['_M_payload'] if optalloc['_M_engaged'] else None ++ self._alloc = optalloc['_M_payload'] if optalloc['_M_engaged'] else None + super(StdNodeHandlePrinter, self).__init__(contained_value, visualizer, + 'array') + + def to_string(self): + desc = 'node handle for ' +- if not self.is_rb_tree_node: ++ if not self._is_rb_tree_node: + desc += 'unordered ' +- if self.is_map_node: +- desc += 'map'; ++ if self._is_map_node: ++ desc += 'map' + else: +- desc += 'set'; ++ desc += 'set' + +- if self.contained_value: ++ if self._contained_value: + desc += ' with element' +- if hasattr(self.visualizer, 'children'): +- return "%s = %s" % (desc, self.visualizer.to_string()) ++ if hasattr(self._visualizer, 'children'): ++ return "%s = %s" % (desc, self._visualizer.to_string()) + return desc + else: + return 'empty %s' % desc + +-class StdExpStringViewPrinter: +- "Print a std::basic_string_view or std::experimental::basic_string_view" + +- def __init__ (self, typename, val): +- self.val = val ++class StdExpStringViewPrinter(printer_base): ++ """ ++ Print a std::basic_string_view or std::experimental::basic_string_view ++ """ ++ ++ def __init__(self, typename, val): ++ self._val = val + +- def to_string (self): +- ptr = self.val['_M_str'] +- len = self.val['_M_len'] +- if hasattr (ptr, "lazy_string"): +- return ptr.lazy_string (length = len) +- return ptr.string (length = len) ++ def to_string(self): ++ ptr = self._val['_M_str'] ++ len = self._val['_M_len'] ++ if hasattr(ptr, "lazy_string"): ++ return ptr.lazy_string(length=len) ++ return ptr.string(length=len) + +- def display_hint (self): ++ def display_hint(self): + return 'string' + +-class StdExpPathPrinter: +- "Print a std::experimental::filesystem::path" + +- def __init__ (self, typename, val): +- self.val = val +- self.typename = typename +- start = self.val['_M_cmpts']['_M_impl']['_M_start'] +- finish = self.val['_M_cmpts']['_M_impl']['_M_finish'] +- self.num_cmpts = int (finish - start) ++class StdExpPathPrinter(printer_base): ++ """Print a std::experimental::filesystem::path.""" ++ ++ def __init__(self, typename, val): ++ self._val = val ++ self._typename = typename ++ start = self._val['_M_cmpts']['_M_impl']['_M_start'] ++ finish = self._val['_M_cmpts']['_M_impl']['_M_finish'] ++ self._num_cmpts = int(finish - start) + + def _path_type(self): +- t = str(self.val['_M_type']) ++ t = str(self._val['_M_type']) + if t[-9:] == '_Root_dir': + return "root-directory" + if t[-10:] == '_Root_name': + return "root-name" + return None + +- def to_string (self): +- path = "%s" % self.val ['_M_pathname'] +- if self.num_cmpts == 0: ++ def to_string(self): ++ path = "%s" % self._val['_M_pathname'] ++ if self._num_cmpts == 0: + t = self._path_type() + if t: + path = '%s [%s]' % (path, t) +@@ -1387,54 +1607,55 @@ class StdExpPathPrinter: + + class _iterator(Iterator): + def __init__(self, cmpts, pathtype): +- self.pathtype = pathtype +- self.item = cmpts['_M_impl']['_M_start'] +- self.finish = cmpts['_M_impl']['_M_finish'] +- self.count = 0 ++ self._pathtype = pathtype ++ self._item = cmpts['_M_impl']['_M_start'] ++ self._finish = cmpts['_M_impl']['_M_finish'] ++ self._count = 0 + + def __iter__(self): + return self + + def __next__(self): +- if self.item == self.finish: ++ if self._item == self._finish: + raise StopIteration +- item = self.item.dereference() +- count = self.count +- self.count = self.count + 1 +- self.item = self.item + 1 ++ item = self._item.dereference() ++ count = self._count ++ self._count = self._count + 1 ++ self._item = self._item + 1 + path = item['_M_pathname'] +- t = StdExpPathPrinter(self.pathtype, item)._path_type() ++ t = StdExpPathPrinter(self._pathtype, item)._path_type() + if not t: + t = count + return ('[%s]' % t, path) + + def children(self): +- return self._iterator(self.val['_M_cmpts'], self.typename) +- +-class StdPathPrinter: +- "Print a std::filesystem::path" +- +- def __init__ (self, typename, val): +- self.val = val +- self.typename = typename +- impl = unique_ptr_get(self.val['_M_cmpts']['_M_impl']) +- self.type = impl.cast(gdb.lookup_type('uintptr_t')) & 3 +- if self.type == 0: +- self.impl = impl ++ return self._iterator(self._val['_M_cmpts'], self._typename) ++ ++ ++class StdPathPrinter(printer_base): ++ """Print a std::filesystem::path.""" ++ ++ def __init__(self, typename, val): ++ self._val = val ++ self._typename = typename ++ impl = unique_ptr_get(self._val['_M_cmpts']['_M_impl']) ++ self._type = impl.cast(gdb.lookup_type('uintptr_t')) & 3 ++ if self._type == 0: ++ self._impl = impl + else: +- self.impl = None ++ self._impl = None + + def _path_type(self): +- t = str(self.type.cast(gdb.lookup_type(self.typename + '::_Type'))) ++ t = str(self._type.cast(gdb.lookup_type(self._typename + '::_Type'))) + if t[-9:] == '_Root_dir': + return "root-directory" + if t[-10:] == '_Root_name': + return "root-name" + return None + +- def to_string (self): +- path = "%s" % self.val ['_M_pathname'] +- if self.type != 0: ++ def to_string(self): ++ path = "%s" % self._val['_M_pathname'] ++ if self._type != 0: + t = self._path_type() + if t: + path = '%s [%s]' % (path, t) +@@ -1442,7 +1663,7 @@ class StdPathPrinter: + + class _iterator(Iterator): + def __init__(self, impl, pathtype): +- self.pathtype = pathtype ++ self._pathtype = pathtype + if impl: + # We can't access _Impl::_M_size because _Impl is incomplete + # so cast to int* to access the _M_size member at offset zero, +@@ -1451,94 +1672,96 @@ class StdPathPrinter: + char_type = gdb.lookup_type('char') + impl = impl.cast(int_type.pointer()) + size = impl.dereference() +- #self.capacity = (impl + 1).dereference() ++ #self._capacity = (impl + 1).dereference() + if hasattr(gdb.Type, 'alignof'): + sizeof_Impl = max(2 * int_type.sizeof, cmpt_type.alignof) + else: + sizeof_Impl = 2 * int_type.sizeof + begin = impl.cast(char_type.pointer()) + sizeof_Impl +- self.item = begin.cast(cmpt_type.pointer()) +- self.finish = self.item + size +- self.count = 0 ++ self._item = begin.cast(cmpt_type.pointer()) ++ self._finish = self._item + size ++ self._count = 0 + else: +- self.item = None +- self.finish = None ++ self._item = None ++ self._finish = None + + def __iter__(self): + return self + + def __next__(self): +- if self.item == self.finish: ++ if self._item == self._finish: + raise StopIteration +- item = self.item.dereference() +- count = self.count +- self.count = self.count + 1 +- self.item = self.item + 1 ++ item = self._item.dereference() ++ count = self._count ++ self._count = self._count + 1 ++ self._item = self._item + 1 + path = item['_M_pathname'] +- t = StdPathPrinter(self.pathtype, item)._path_type() ++ t = StdPathPrinter(self._pathtype, item)._path_type() + if not t: + t = count + return ('[%s]' % t, path) + + def children(self): +- return self._iterator(self.impl, self.typename) ++ return self._iterator(self._impl, self._typename) + + +-class StdPairPrinter: +- "Print a std::pair object, with 'first' and 'second' as children" ++class StdPairPrinter(printer_base): ++ """Print a std::pair object, with 'first' and 'second' as children.""" + + def __init__(self, typename, val): +- self.val = val ++ self._val = val + + class _iter(Iterator): +- "An iterator for std::pair types. Returns 'first' then 'second'." ++ """An iterator for std::pair types. Returns 'first' then 'second'.""" + + def __init__(self, val): +- self.val = val +- self.which = 'first' ++ self._val = val ++ self._which = 'first' + + def __iter__(self): + return self + + def __next__(self): +- if self.which is None: ++ if self._which is None: + raise StopIteration +- which = self.which ++ which = self._which + if which == 'first': +- self.which = 'second' ++ self._which = 'second' + else: +- self.which = None +- return (which, self.val[which]) ++ self._which = None ++ return (which, self._val[which]) + + def children(self): +- return self._iter(self.val) ++ return self._iter(self._val) + + def to_string(self): + return None + +-class StdCmpCatPrinter: +- "Print a comparison category object" ++ ++class StdCmpCatPrinter(printer_base): ++ """Print a comparison category object.""" + + def __init__(self, typename, val): +- self.typename = typename[typename.rfind(':') + 1:] +- self.val = val['_M_value'] ++ self._typename = typename[typename.rfind(':') + 1:] ++ self._val = val['_M_value'] + +- def to_string (self): +- if self.typename == 'strong_ordering' and self.val == 0: ++ def to_string(self): ++ if self._typename == 'strong_ordering' and self._val == 0: + name = 'equal' + else: +- names = {2:'unordered', -1:'less', 0:'equivalent', 1:'greater'} +- name = names[int(self.val)] +- return 'std::{}::{}'.format(self.typename, name) ++ names = {2: 'unordered', -1: 'less', 0: 'equivalent', 1: 'greater'} ++ name = names[int(self._val)] ++ return 'std::{}::{}'.format(self._typename, name) ++ + +-class StdErrorCodePrinter: +- "Print a std::error_code or std::error_condition" ++class StdErrorCodePrinter(printer_base): ++ """Print a std::error_code or std::error_condition.""" + + _system_is_posix = None # Whether std::system_category() use errno values. + +- def __init__ (self, typename, val): +- self.val = val +- self.typename = strip_versioned_namespace(typename) ++ def __init__(self, typename, val): ++ self._val = val ++ self._typename = strip_versioned_namespace(typename) + # Do this only once ... + if StdErrorCodePrinter._system_is_posix is None: + try: +@@ -1554,6 +1777,15 @@ class StdErrorCodePrinter: + return typ + return None + ++ @classmethod ++ def _find_standard_errc_enum(cls, name): ++ for ns in ['', _versioned_namespace]: ++ try: ++ qname = 'std::{}{}'.format(ns, name) ++ return cls._find_errc_enum(qname) ++ except RuntimeError: ++ pass ++ + @classmethod + def _match_net_ts_category(cls, cat): + net_cats = ['stream', 'socket', 'ip::resolver'] +@@ -1596,10 +1828,10 @@ class StdErrorCodePrinter: + is_errno = cls._system_is_posix + if typ.tag.endswith('::future_error_category'): + name = 'future' +- enum = cls._find_errc_enum('std::future_errc') ++ enum = cls._find_standard_errc_enum('future_errc') + if typ.tag.endswith('::io_error_category'): + name = 'io' +- enum = cls._find_errc_enum('std::io_errc') ++ enum = cls._find_standard_errc_enum('io_errc') + + if name is None: + try: +@@ -1614,18 +1846,20 @@ class StdErrorCodePrinter: + + @staticmethod + def _unqualified_name(name): +- "Strip any nested-name-specifier from NAME to give an unqualified name" ++ """ ++ Strip any nested-name-specifier from name to give an unqualified name. ++ """ + return name.split('::')[-1] + +- def to_string (self): +- value = self.val['_M_value'] +- cat = self.val['_M_cat'] ++ def to_string(self): ++ value = self._val['_M_value'] ++ cat = self._val['_M_cat'] + name, alt_name, enum, is_errno = self._category_info(cat) + if value == 0: +- default_cats = { 'error_code' : 'system', +- 'error_condition' : 'generic' } +- if name == default_cats[self._unqualified_name(self.typename)]: +- return self.typename + ' = { }' # default-constructed value ++ default_cats = {'error_code': 'system', ++ 'error_condition': 'generic'} ++ if name == default_cats[self._unqualified_name(self._typename)]: ++ return self._typename + ' = { }' # default-constructed value + + strval = str(value) + if is_errno and value != 0: +@@ -1640,71 +1874,488 @@ class StdErrorCodePrinter: + name = '"%s"' % name + else: + name = alt_name +- return '%s = {%s: %s}' % (self.typename, name, strval) ++ return '%s = {%s: %s}' % (self._typename, name, strval) ++ + ++class StdRegexStatePrinter(printer_base): ++ """Print a state node in the NFA for a std::regex.""" + +-class StdSpanPrinter: +- "Print a std::span" ++ def __init__(self, typename, val): ++ self._val = val ++ self._typename = typename ++ ++ def to_string(self): ++ opcode = str(self._val['_M_opcode']) ++ if opcode: ++ opcode = opcode[25:] ++ next_id = self._val['_M_next'] ++ ++ variants = {'repeat': 'alt', 'alternative': 'alt', ++ 'subexpr_begin': 'subexpr', 'subexpr_end': 'subexpr', ++ 'line_begin_assertion': None, 'line_end_assertion': None, ++ 'word_boundary': 'neg', 'subexpr_lookahead': 'neg', ++ 'backref': 'backref_index', ++ 'match': None, 'accept': None, ++ 'dummy': None, 'unknown': None ++ } ++ v = variants[opcode] ++ ++ s = "opcode={}, next={}".format(opcode, next_id) ++ if v is not None and self._val['_M_' + v] is not None: ++ s = "{}, {}={}".format(s, v, self._val['_M_' + v]) ++ return "{%s}" % (s) ++ ++ ++class StdSpanPrinter(printer_base): ++ """Print a std::span.""" + +- class iterator(Iterator): ++ class _iterator(Iterator): + def __init__(self, begin, size): +- self.count = 0 +- self.begin = begin +- self.size = size ++ self._count = 0 ++ self._begin = begin ++ self._size = size + +- def __iter__ (self): ++ def __iter__(self): + return self + +- def __next__ (self): +- if self.count == self.size: ++ def __next__(self): ++ if self._count == self._size: + raise StopIteration + +- count = self.count +- self.count = self.count + 1 +- return '[%d]' % count, (self.begin + count).dereference() ++ count = self._count ++ self._count = self._count + 1 ++ return '[%d]' % count, (self._begin + count).dereference() + + def __init__(self, typename, val): +- self.typename = strip_versioned_namespace(typename) +- self.val = val ++ self._typename = strip_versioned_namespace(typename) ++ self._val = val + size_max = gdb.parse_and_eval('static_cast(-1)') + if val.type.template_argument(1) == size_max: +- self.size = val['_M_extent']['_M_extent_value'] ++ self._size = val['_M_extent']['_M_extent_value'] + else: +- self.size = val.type.template_argument(1) ++ self._size = val.type.template_argument(1) + + def to_string(self): +- return '%s of length %d' % (self.typename, self.size) ++ return '%s of length %d' % (self._typename, self._size) + + def children(self): +- return self.iterator(self.val['_M_ptr'], self.size) ++ return self._iterator(self._val['_M_ptr'], self._size) + + def display_hint(self): + return 'array' + +-class StdInitializerListPrinter: +- "Print a std::initializer_list" ++ ++class StdInitializerListPrinter(printer_base): ++ """Print a std::initializer_list.""" + + def __init__(self, typename, val): +- self.typename = typename +- self.val = val +- self.size = val['_M_len'] ++ self._typename = typename ++ self._val = val ++ self._size = val['_M_len'] + + def to_string(self): +- return '%s of length %d' % (self.typename, self.size) ++ return '%s of length %d' % (self._typename, self._size) + + def children(self): +- return StdSpanPrinter.iterator(self.val['_M_array'], self.size) ++ return StdSpanPrinter._iterator(self._val['_M_array'], self._size) + + def display_hint(self): + return 'array' + ++ ++class StdAtomicPrinter(printer_base): ++ """Print a std:atomic.""" ++ ++ def __init__(self, typename, val): ++ self._typename = strip_versioned_namespace(typename) ++ self._val = val ++ self._shptr_printer = None ++ self._value_type = self._val.type.template_argument(0) ++ if self._value_type.tag is not None: ++ typ = strip_versioned_namespace(self._value_type.tag) ++ if (typ.startswith('std::shared_ptr<') ++ or typ.startswith('std::weak_ptr<')): ++ impl = val['_M_impl'] ++ self._shptr_printer = SharedPointerPrinter(typename, impl) ++ self.children = self._shptr_children ++ ++ def _shptr_children(self): ++ return SmartPtrIterator(self._shptr_printer._pointer) ++ ++ def to_string(self): ++ if self._shptr_printer is not None: ++ return self._shptr_printer.to_string() ++ ++ if self._value_type.code == gdb.TYPE_CODE_INT: ++ val = self._val['_M_i'] ++ elif self._value_type.code == gdb.TYPE_CODE_FLT: ++ val = self._val['_M_fp'] ++ elif self._value_type.code == gdb.TYPE_CODE_PTR: ++ val = self._val['_M_b']['_M_p'] ++ elif self._value_type.code == gdb.TYPE_CODE_BOOL: ++ val = self._val['_M_base']['_M_i'] ++ else: ++ val = self._val['_M_i'] ++ return '%s<%s> = { %s }' % (self._typename, str(self._value_type), val) ++ ++ ++class StdFormatArgsPrinter(printer_base): ++ """Print a std::basic_format_args.""" ++ # TODO: add printer for basic_format_arg and print out children. ++ # TODO: add printer for __format::_ArgStore. ++ ++ def __init__(self, typename, val): ++ self._typename = strip_versioned_namespace(typename) ++ self._val = val ++ ++ def to_string(self): ++ targs = get_template_arg_list(self._val.type) ++ char_type = get_template_arg_list(targs[0])[1] ++ if char_type == gdb.lookup_type('char'): ++ typ = 'std::format_args' ++ elif char_type == gdb.lookup_type('wchar_t'): ++ typ = 'std::wformat_args' ++ else: ++ typ = 'std::basic_format_args' ++ ++ size = self._val['_M_packed_size'] ++ if size == 1: ++ return "%s with 1 argument" % (typ) ++ if size == 0: ++ size = self._val['_M_unpacked_size'] ++ return "%s with %d arguments" % (typ, size) ++ ++ ++class StdChronoDurationPrinter(printer_base): ++ """Print a std::chrono::duration.""" ++ ++ def __init__(self, typename, val): ++ self._typename = strip_versioned_namespace(typename) ++ self._val = val ++ ++ def _ratio(self): ++ # TODO use reduced period i.e. duration::period ++ period = self._val.type.template_argument(1) ++ num = period.template_argument(0) ++ den = period.template_argument(1) ++ return (num, den) ++ ++ def _suffix(self): ++ num, den = self._ratio() ++ if num == 1: ++ if den == 1: ++ return 's' ++ if den == 1000: ++ return 'ms' ++ if den == 1000000: ++ return 'us' ++ if den == 1000000000: ++ return 'ns' ++ elif den == 1: ++ if num == 60: ++ return 'min' ++ if num == 3600: ++ return 'h' ++ if num == 86400: ++ return 'd' ++ return '[{}]s'.format(num) ++ return "[{}/{}]s".format(num, den) ++ ++ def to_string(self): ++ r = self._val['__r'] ++ if r.type.strip_typedefs().code == gdb.TYPE_CODE_FLT: ++ r = "%g" % r ++ return "std::chrono::duration = {{ {}{} }}".format(r, self._suffix()) ++ ++ ++class StdChronoTimePointPrinter(printer_base): ++ """Print a std::chrono::time_point.""" ++ ++ def __init__(self, typename, val): ++ self._typename = strip_versioned_namespace(typename) ++ self._val = val ++ ++ def _clock(self): ++ clock = self._val.type.template_argument(0) ++ name = strip_versioned_namespace(clock.name) ++ if name == 'std::chrono::_V2::system_clock' \ ++ or name == 'std::chrono::system_clock': ++ return ('std::chrono::sys_time', 0) ++ # XXX need to remove leap seconds from utc, gps, and tai ++ if name == 'std::chrono::utc_clock': ++ return ('std::chrono::utc_time', None) # XXX ++ if name == 'std::chrono::gps_clock': ++ return ('std::chrono::gps_time', None) # XXX 315964809 ++ if name == 'std::chrono::tai_clock': ++ return ('std::chrono::tai_time', None) # XXX -378691210 ++ if name == 'std::filesystem::__file_clock': ++ return ('std::chrono::file_time', 6437664000) ++ if name == 'std::chrono::local_t': ++ return ('std::chrono::local_time', 0) ++ return ('{} time_point'.format(name), None) ++ ++ def to_string(self, abbrev=False): ++ clock, offset = self._clock() ++ d = self._val['__d'] ++ r = d['__r'] ++ printer = StdChronoDurationPrinter(d.type.name, d) ++ suffix = printer._suffix() ++ time = '' ++ if offset is not None: ++ num, den = printer._ratio() ++ secs = (r * num / den) + offset ++ try: ++ dt = datetime.datetime.fromtimestamp(secs, _utc_timezone) ++ time = ' [{:%Y-%m-%d %H:%M:%S}]'.format(dt) ++ except: ++ pass ++ s = '%d%s%s' % (r, suffix, time) ++ if abbrev: ++ return s ++ return '%s = { %s }' % (clock, s) ++ ++ ++class StdChronoZonedTimePrinter(printer_base): ++ """Print a std::chrono::zoned_time.""" ++ ++ def __init__(self, typename, val): ++ self._typename = strip_versioned_namespace(typename) ++ self._val = val ++ ++ def to_string(self): ++ zone = self._val['_M_zone'].dereference()['_M_name'] ++ time = self._val['_M_tp'] ++ printer = StdChronoTimePointPrinter(time.type.name, time) ++ time = printer.to_string(True) ++ return 'std::chrono::zoned_time = {{ {} {} }}'.format(zone, time) ++ ++ ++months = [None, 'January', 'February', 'March', 'April', 'May', 'June', ++ 'July', 'August', 'September', 'October', 'November', 'December'] ++ ++weekdays = ['Sunday', 'Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', ++ 'Saturday', 'Sunday'] ++ ++ ++class StdChronoCalendarPrinter(printer_base): ++ """Print a std::chrono::day, std::chrono::month, std::chrono::year etc.""" ++ ++ def __init__(self, typename, val): ++ self._typename = strip_versioned_namespace(typename) ++ self._val = val ++ ++ def to_string(self): ++ val = self._val ++ typ = self._typename ++ if 'month' in typ and typ != 'std::chrono::year_month_day_last': ++ m = val['_M_m'] ++ if typ.startswith('std::chrono::year'): ++ y = val['_M_y'] ++ ++ if typ == 'std::chrono::day': ++ return '{}'.format(int(val['_M_d'])) ++ if typ == 'std::chrono::month': ++ if m < 1 or m >= len(months): ++ return "%d is not a valid month" % m ++ return months[m] ++ if typ == 'std::chrono::year': ++ return '{}y'.format(y) ++ if typ == 'std::chrono::weekday': ++ wd = val['_M_wd'] ++ if wd < 0 or wd >= len(weekdays): ++ return "%d is not a valid weekday" % wd ++ return '{}'.format(weekdays[wd]) ++ if typ == 'std::chrono::weekday_indexed': ++ return '{}[{}]'.format(val['_M_wd'], int(val['_M_index'])) ++ if typ == 'std::chrono::weekday_last': ++ return '{}[last]'.format(val['_M_wd']) ++ if typ == 'std::chrono::month_day': ++ return '{}/{}'.format(m, val['_M_d']) ++ if typ == 'std::chrono::month_day_last': ++ return '{}/last'.format(m) ++ if typ == 'std::chrono::month_weekday': ++ return '{}/{}'.format(m, val['_M_wdi']) ++ if typ == 'std::chrono::month_weekday_last': ++ return '{}/{}'.format(m, val['_M_wdl']) ++ if typ == 'std::chrono::year_month': ++ return '{}/{}'.format(y, m) ++ if typ == 'std::chrono::year_month_day': ++ return '{}/{}/{}'.format(y, m, val['_M_d']) ++ if typ == 'std::chrono::year_month_day_last': ++ return '{}/{}'.format(y, val['_M_mdl']) ++ if typ == 'std::chrono::year_month_weekday': ++ return '{}/{}/{}'.format(y, m, val['_M_wdi']) ++ if typ == 'std::chrono::year_month_weekday_last': ++ return '{}/{}/{}'.format(y, m, val['_M_wdl']) ++ if typ.startswith('std::chrono::hh_mm_ss'): ++ fract = '' ++ if val['fractional_width'] != 0: ++ fract = '.{:0{}d}'.format(int(val['_M_ss']['_M_r']), ++ int(val['fractional_width'])) ++ h = int(val['_M_h']['__r']) ++ m = int(val['_M_m']['__r']) ++ s = int(val['_M_s']['__r']) ++ if val['_M_is_neg']: ++ h = -h ++ return '{:02}:{:02}:{:02}{}'.format(h, m, s, fract) ++ ++ ++class StdChronoTimeZonePrinter(printer_base): ++ """Print a chrono::time_zone or chrono::time_zone_link.""" ++ ++ def __init__(self, typename, val): ++ self._typename = strip_versioned_namespace(typename) ++ self._val = val ++ ++ def to_string(self): ++ str = '%s = %s' % (self._typename, self._val['_M_name']) ++ if self._typename.endswith("_link"): ++ str += ' -> %s' % (self._val['_M_target']) ++ return str ++ ++ ++class StdChronoLeapSecondPrinter(printer_base): ++ """Print a chrono::leap_second.""" ++ ++ def __init__(self, typename, val): ++ self._typename = strip_versioned_namespace(typename) ++ self._val = val ++ ++ def to_string(self): ++ date = self._val['_M_s']['__r'] ++ neg = '+-'[date < 0] ++ return '%s %d (%c)' % (self._typename, abs(date), neg) ++ ++ ++class StdChronoTzdbPrinter(printer_base): ++ """Print a chrono::tzdb.""" ++ ++ def __init__(self, typename, val): ++ self._typename = strip_versioned_namespace(typename) ++ self._val = val ++ ++ def to_string(self): ++ return '%s %s' % (self._typename, self._val['version']) ++ ++ ++class StdChronoTimeZoneRulePrinter(printer_base): ++ """Print a chrono::time_zone rule.""" ++ ++ def __init__(self, typename, val): ++ self._typename = strip_versioned_namespace(typename) ++ self._val = val ++ ++ def to_string(self): ++ on = self._val['on'] ++ kind = on['kind'] ++ month = months[on['month']] ++ suffixes = {1: 'st', 2: 'nd', 3: 'rd', ++ 21: 'st', 22: 'nd', 23: 'rd', 31: 'st'} ++ day = on['day_of_month'] ++ ordinal_day = '{}{}'.format(day, suffixes.get(day, 'th')) ++ if kind == 0: # DayOfMonth ++ start = '{} {}'.format(month, ordinal_day) ++ else: ++ weekday = weekdays[on['day_of_week']] ++ if kind == 1: # LastWeekDay ++ start = 'last {} in {}'.format(weekday, month) ++ else: ++ if kind == 2: # LessEq ++ direction = ('last', '<=') ++ else: ++ direction = ('first', '>=') ++ day = on['day_of_month'] ++ start = '{} {} {} {} {}'.format(direction[0], weekday, ++ direction[1], month, ++ ordinal_day) ++ return 'time_zone rule {} from {} to {} starting on {}'.format( ++ self._val['name'], self._val['from'], self._val['to'], start) ++ ++ ++class StdLocalePrinter(printer_base): ++ """Print a std::locale.""" ++ ++ def __init__(self, typename, val): ++ self._val = val ++ self._typename = typename ++ ++ def to_string(self): ++ names = self._val['_M_impl']['_M_names'] ++ mod = '' ++ if names[0] == 0: ++ name = '*' ++ else: ++ cats = gdb.parse_and_eval(self._typename + '::_S_categories') ++ ncat = gdb.parse_and_eval(self._typename + '::_S_categories_size') ++ n = names[0].string() ++ cat = cats[0].string() ++ name = '{}={}'.format(cat, n) ++ cat_names = {cat: n} ++ i = 1 ++ while i < ncat and names[i] != 0: ++ n = names[i].string() ++ cat = cats[i].string() ++ name = '{};{}={}'.format(name, cat, n) ++ cat_names[cat] = n ++ i = i + 1 ++ uniq_names = set(cat_names.values()) ++ if len(uniq_names) == 1: ++ name = n ++ elif len(uniq_names) == 2: ++ n1, n2 = (uniq_names) ++ name_list = list(cat_names.values()) ++ other = None ++ if name_list.count(n1) == 1: ++ name = n2 ++ other = n1 ++ elif name_list.count(n2) == 1: ++ name = n1 ++ other = n2 ++ if other is not None: ++ cat = next(c for c, n in cat_names.items() if n == other) ++ mod = ' with "{}={}"'.format(cat, other) ++ return 'std::locale = "{}"{}'.format(name, mod) ++ ++class StdIntegralConstantPrinter(printer_base): ++ """Print a std::true_type or std::false_type.""" ++ ++ def __init__(self, typename, val): ++ self._val = val ++ self._typename = typename ++ ++ def to_string(self): ++ value_type = self._val.type.template_argument(0) ++ value = self._val.type.template_argument(1) ++ if value_type.code == gdb.TYPE_CODE_BOOL: ++ if value: ++ return "std::true_type" ++ else: ++ return "std::false_type" ++ typename = strip_versioned_namespace(self._typename) ++ return "{}<{}, {}>".format(typename, value_type, value) ++ ++class StdTextEncodingPrinter(printer_base): ++ """Print a std::text_encoding.""" ++ ++ def __init__(self, typename, val): ++ self._val = val ++ self._typename = typename ++ ++ def to_string(self): ++ rep = self._val['_M_rep'].dereference() ++ if rep['_M_id'] == 1: ++ return self._val['_M_name'] ++ if rep['_M_id'] == 2: ++ return 'unknown' ++ return rep['_M_name'] ++ + # A "regular expression" printer which conforms to the + # "SubPrettyPrinter" protocol from gdb.printing. + class RxPrinter(object): + def __init__(self, name, function): + super(RxPrinter, self).__init__() + self.name = name +- self.function = function ++ self._function = function + self.enabled = True + + def invoke(self, value): +@@ -1712,36 +2363,40 @@ class RxPrinter(object): + return None + + if value.type.code == gdb.TYPE_CODE_REF: +- if hasattr(gdb.Value,"referenced_value"): ++ if hasattr(gdb.Value, "referenced_value"): + value = value.referenced_value() + +- return self.function(self.name, value) ++ return self._function(self.name, value) + + # A pretty-printer that conforms to the "PrettyPrinter" protocol from + # gdb.printing. It can also be used directly as an old-style printer. ++ ++ + class Printer(object): + def __init__(self, name): + super(Printer, self).__init__() + self.name = name +- self.subprinters = [] +- self.lookup = {} ++ self._subprinters = [] ++ self._lookup = {} + self.enabled = True +- self.compiled_rx = re.compile('^([a-zA-Z0-9_:]+)(<.*>)?$') ++ self._compiled_rx = re.compile('^([a-zA-Z0-9_:]+)(<.*>)?$') + + def add(self, name, function): + # A small sanity check. + # FIXME +- if not self.compiled_rx.match(name): +- raise ValueError('libstdc++ programming error: "%s" does not match' % name) ++ if not self._compiled_rx.match(name): ++ raise ValueError( ++ 'libstdc++ programming error: "%s" does not match' % name) + printer = RxPrinter(name, function) +- self.subprinters.append(printer) +- self.lookup[name] = printer ++ self._subprinters.append(printer) ++ self._lookup[name] = printer + + # Add a name using _GLIBCXX_BEGIN_NAMESPACE_VERSION. + def add_version(self, base, name, function): + self.add(base + name, function) +- if _versioned_namespace: +- vbase = re.sub('^(std|__gnu_cxx)::', r'\g<0>%s' % _versioned_namespace, base) ++ if '__cxx11' not in base: ++ vbase = re.sub('^(std|__gnu_cxx)::', r'\g<0>%s' % ++ _versioned_namespace, base) + self.add(vbase + name, function) + + # Add a name using _GLIBCXX_BEGIN_NAMESPACE_CONTAINER. +@@ -1753,10 +2408,10 @@ class Printer(object): + def get_basic_type(type): + # If it points to a reference, get the reference. + if type.code == gdb.TYPE_CODE_REF: +- type = type.target () ++ type = type.target() + + # Get the unqualified type, stripped of typedefs. +- type = type.unqualified ().strip_typedefs () ++ type = type.unqualified().strip_typedefs() + + return type.tag + +@@ -1767,47 +2422,49 @@ class Printer(object): + + # All the types we match are template types, so we can use a + # dictionary. +- match = self.compiled_rx.match(typename) ++ match = self._compiled_rx.match(typename) + if not match: + return None + + basename = match.group(1) + + if val.type.code == gdb.TYPE_CODE_REF: +- if hasattr(gdb.Value,"referenced_value"): ++ if hasattr(gdb.Value, "referenced_value"): + val = val.referenced_value() + +- if basename in self.lookup: +- return self.lookup[basename].invoke(val) ++ if basename in self._lookup: ++ return self._lookup[basename].invoke(val) + + # Cannot find a pretty printer. Return None. + return None + ++ + libstdcxx_printer = None + ++ + class TemplateTypePrinter(object): +- r""" ++ """ + A type printer for class templates with default template arguments. + + Recognizes specializations of class templates and prints them without + any template arguments that use a default template argument. + Type printers are recursively applied to the template arguments. + +- e.g. replace "std::vector >" with "std::vector". ++ e.g. replace 'std::vector >' with 'std::vector'. + """ + + def __init__(self, name, defargs): + self.name = name +- self.defargs = defargs ++ self._defargs = defargs + self.enabled = True + + class _recognizer(object): +- "The recognizer class for TemplateTypePrinter." ++ """The recognizer class for TemplateTypePrinter.""" + + def __init__(self, name, defargs): + self.name = name +- self.defargs = defargs +- # self.type_obj = None ++ self._defargs = defargs ++ # self._type_obj = None + + def recognize(self, type_obj): + """ +@@ -1830,7 +2487,7 @@ class TemplateTypePrinter(object): + # The actual template argument in the type: + targ = template_args[n] + # The default template argument for the class template: +- defarg = self.defargs.get(n) ++ defarg = self._defargs.get(n) + if defarg is not None: + # Substitute other template arguments into the default: + defarg = defarg.format(*template_args) +@@ -1867,7 +2524,7 @@ class TemplateTypePrinter(object): + if type_obj.code == gdb.TYPE_CODE_ARRAY: + type_str = self._recognize_subtype(type_obj.target()) + if str(type_obj.strip_typedefs()).endswith('[]'): +- return type_str + '[]' # array of unknown bound ++ return type_str + '[]' # array of unknown bound + return "%s[%d]" % (type_str, type_obj.range()[1] + 1) + if type_obj.code == gdb.TYPE_CODE_REF: + return self._recognize_subtype(type_obj.target()) + '&' +@@ -1876,17 +2533,18 @@ class TemplateTypePrinter(object): + return self._recognize_subtype(type_obj.target()) + '&&' + + type_str = gdb.types.apply_type_recognizers( +- gdb.types.get_type_recognizers(), type_obj) ++ gdb.types.get_type_recognizers(), type_obj) + if type_str: + return type_str + return str(type_obj) + + def instantiate(self): +- "Return a recognizer object for this type printer." +- return self._recognizer(self.name, self.defargs) ++ """Return a recognizer object for this type printer.""" ++ return self._recognizer(self.name, self._defargs) ++ + + def add_one_template_type_printer(obj, name, defargs): +- r""" ++ """ + Add a type printer for a class template with default template arguments. + + Args: +@@ -1900,7 +2558,6 @@ def add_one_template_type_printer(obj, name, defargs): + { 2: 'std::hash<{0}>', + 3: 'std::equal_to<{0}>', + 4: 'std::allocator >' } +- + """ + printer = TemplateTypePrinter('std::' + name, defargs) + gdb.types.register_type_printer(obj, printer) +@@ -1909,7 +2566,7 @@ def add_one_template_type_printer(obj, name, defargs): + printer = TemplateTypePrinter('std::__debug::' + name, defargs) + gdb.types.register_type_printer(obj, printer) + +- if _versioned_namespace: ++ if '__cxx11' not in name: + # Add second type printer for same type in versioned namespace: + ns = 'std::' + _versioned_namespace + # PR 86112 Cannot use dict comprehension here: +@@ -1918,66 +2575,104 @@ def add_one_template_type_printer(obj, name, defargs): + printer = TemplateTypePrinter(ns + name, defargs) + gdb.types.register_type_printer(obj, printer) + ++ # Add type printer for same type in debug namespace: ++ printer = TemplateTypePrinter('std::__debug::' + name, defargs) ++ gdb.types.register_type_printer(obj, printer) ++ ++ + class FilteringTypePrinter(object): +- r""" ++ """ + A type printer that uses typedef names for common template specializations. + + Args: +- match (str): The class template to recognize. ++ template (str): The class template to recognize. + name (str): The typedef-name that will be used instead. ++ targ1 (str, optional): The first template argument. Defaults to None. + +- Checks if a specialization of the class template 'match' is the same type ++ Checks if a specialization of the class template 'template' is the same type + as the typedef 'name', and prints it as 'name' instead. + + e.g. if an instantiation of std::basic_istream is the same type as + std::istream then print it as std::istream. ++ ++ If targ1 is provided (not None), match only template specializations with ++ this type as the first template argument, e.g. if template='basic_string' ++ and targ1='char' then only match 'basic_string' and not ++ 'basic_string'. This rejects non-matching specializations ++ more quickly, without needing to do GDB type lookups. + """ + +- def __init__(self, match, name): +- self.match = match ++ def __init__(self, template, name, targ1=None): ++ self._template = template + self.name = name ++ self._targ1 = targ1 + self.enabled = True + + class _recognizer(object): +- "The recognizer class for FilteringTypePrinter." ++ """The recognizer class for FilteringTypePrinter.""" + +- def __init__(self, match, name): +- self.match = match ++ def __init__(self, template, name, targ1): ++ self._template = template + self.name = name +- self.type_obj = None ++ self._targ1 = targ1 ++ self._type_obj = None + + def recognize(self, type_obj): + """ +- If type_obj starts with self.match and is the same type as ++ If type_obj starts with self._template and is the same type as + self.name then return self.name, otherwise None. + """ + if type_obj.tag is None: + return None + +- if self.type_obj is None: +- if not type_obj.tag.startswith(self.match): ++ if self._type_obj is None: ++ if self._targ1 is not None: ++ s = '{}<{}'.format(self._template, self._targ1) ++ if not type_obj.tag.startswith(s): ++ # Filter didn't match. ++ return None ++ elif not type_obj.tag.startswith(self._template): + # Filter didn't match. + return None ++ + try: +- self.type_obj = gdb.lookup_type(self.name).strip_typedefs() ++ self._type_obj = gdb.lookup_type( ++ self.name).strip_typedefs() + except: + pass +- if self.type_obj == type_obj: ++ ++ if self._type_obj is None: ++ return None ++ ++ t1 = gdb.types.get_basic_type(self._type_obj) ++ t2 = gdb.types.get_basic_type(type_obj) ++ if t1 == t2: + return strip_inline_namespaces(self.name) ++ ++ # Workaround ambiguous typedefs matching both std:: and ++ # std::__cxx11:: symbols. ++ if self._template.split('::')[-1] == 'basic_string': ++ s1 = self._type_obj.tag.replace('__cxx11::', '') ++ s2 = type_obj.tag.replace('__cxx11::', '') ++ if s1 == s2: ++ return strip_inline_namespaces(self.name) ++ + return None + + def instantiate(self): +- "Return a recognizer object for this type printer." +- return self._recognizer(self.match, self.name) ++ """Return a recognizer object for this type printer.""" ++ return self._recognizer(self._template, self.name, self._targ1) + +-def add_one_type_printer(obj, match, name): +- printer = FilteringTypePrinter('std::' + match, 'std::' + name) ++ ++def add_one_type_printer(obj, template, name, targ1=None): ++ printer = FilteringTypePrinter('std::' + template, 'std::' + name, targ1) + gdb.types.register_type_printer(obj, printer) +- if _versioned_namespace: ++ if '__cxx11' not in template: + ns = 'std::' + _versioned_namespace +- printer = FilteringTypePrinter(ns + match, ns + name) ++ printer = FilteringTypePrinter(ns + template, ns + name, targ1) + gdb.types.register_type_printer(obj, printer) + ++ + def register_type_printers(obj): + global _use_type_printing + +@@ -1985,31 +2680,39 @@ def register_type_printers(obj): + return + + # Add type printers for typedefs std::string, std::wstring etc. +- for ch in ('', 'w', 'u8', 'u16', 'u32'): +- add_one_type_printer(obj, 'basic_string', ch + 'string') +- add_one_type_printer(obj, '__cxx11::basic_string', ch + 'string') ++ for ch in (('', 'char'), ++ ('w', 'wchar_t'), ++ ('u8', 'char8_t'), ++ ('u16', 'char16_t'), ++ ('u32', 'char32_t')): ++ add_one_type_printer(obj, 'basic_string', ch[0] + 'string', ch[1]) ++ add_one_type_printer(obj, '__cxx11::basic_string', ++ ch[0] + 'string', ch[1]) + # Typedefs for __cxx11::basic_string used to be in namespace __cxx11: + add_one_type_printer(obj, '__cxx11::basic_string', +- '__cxx11::' + ch + 'string') +- add_one_type_printer(obj, 'basic_string_view', ch + 'string_view') ++ '__cxx11::' + ch[0] + 'string', ch[1]) ++ add_one_type_printer(obj, 'basic_string_view', ++ ch[0] + 'string_view', ch[1]) + + # Add type printers for typedefs std::istream, std::wistream etc. +- for ch in ('', 'w'): ++ for ch in (('', 'char'), ('w', 'wchar_t')): + for x in ('ios', 'streambuf', 'istream', 'ostream', 'iostream', + 'filebuf', 'ifstream', 'ofstream', 'fstream'): +- add_one_type_printer(obj, 'basic_' + x, ch + x) ++ add_one_type_printer(obj, 'basic_' + x, ch[0] + x, ch[1]) + for x in ('stringbuf', 'istringstream', 'ostringstream', + 'stringstream'): +- add_one_type_printer(obj, 'basic_' + x, ch + x) ++ add_one_type_printer(obj, 'basic_' + x, ch[0] + x, ch[1]) + # types are in __cxx11 namespace, but typedefs aren't: +- add_one_type_printer(obj, '__cxx11::basic_' + x, ch + x) ++ add_one_type_printer(obj, '__cxx11::basic_' + x, ch[0] + x, ch[1]) + + # Add type printers for typedefs regex, wregex, cmatch, wcmatch etc. + for abi in ('', '__cxx11::'): +- for ch in ('', 'w'): +- add_one_type_printer(obj, abi + 'basic_regex', abi + ch + 'regex') ++ for ch in (('', 'char'), ('w', 'wchar_t')): ++ add_one_type_printer(obj, abi + 'basic_regex', ++ abi + ch[0] + 'regex', ch[1]) + for ch in ('c', 's', 'wc', 'ws'): +- add_one_type_printer(obj, abi + 'match_results', abi + ch + 'match') ++ add_one_type_printer( ++ obj, abi + 'match_results', abi + ch + 'match') + for x in ('sub_match', 'regex_iterator', 'regex_token_iterator'): + add_one_type_printer(obj, abi + x, abi + ch + x) + +@@ -2018,9 +2721,9 @@ def register_type_printers(obj): + add_one_type_printer(obj, 'fpos', 'streampos') + + # Add type printers for typedefs. +- for dur in ('nanoseconds', 'microseconds', 'milliseconds', +- 'seconds', 'minutes', 'hours'): +- add_one_type_printer(obj, 'duration', dur) ++ for dur in ('nanoseconds', 'microseconds', 'milliseconds', 'seconds', ++ 'minutes', 'hours', 'days', 'weeks', 'years', 'months'): ++ add_one_type_printer(obj, 'chrono::duration', 'chrono::' + dur) + + # Add type printers for typedefs. + add_one_type_printer(obj, 'linear_congruential_engine', 'minstd_rand0') +@@ -2035,47 +2738,54 @@ def register_type_printers(obj): + + # Add type printers for experimental::basic_string_view typedefs. + ns = 'experimental::fundamentals_v1::' +- for ch in ('', 'w', 'u8', 'u16', 'u32'): ++ for ch in (('', 'char'), ++ ('w', 'wchar_t'), ++ ('u8', 'char8_t'), ++ ('u16', 'char16_t'), ++ ('u32', 'char32_t')): + add_one_type_printer(obj, ns + 'basic_string_view', +- ns + ch + 'string_view') ++ ns + ch[0] + 'string_view', ch[1]) + + # Do not show defaulted template arguments in class templates. + add_one_template_type_printer(obj, 'unique_ptr', +- { 1: 'std::default_delete<{0}>' }) +- add_one_template_type_printer(obj, 'deque', { 1: 'std::allocator<{0}>'}) +- add_one_template_type_printer(obj, 'forward_list', { 1: 'std::allocator<{0}>'}) +- add_one_template_type_printer(obj, 'list', { 1: 'std::allocator<{0}>'}) +- add_one_template_type_printer(obj, '__cxx11::list', { 1: 'std::allocator<{0}>'}) +- add_one_template_type_printer(obj, 'vector', { 1: 'std::allocator<{0}>'}) ++ {1: 'std::default_delete<{0}>'}) ++ add_one_template_type_printer(obj, 'deque', {1: 'std::allocator<{0}>'}) ++ add_one_template_type_printer( ++ obj, 'forward_list', {1: 'std::allocator<{0}>'}) ++ add_one_template_type_printer(obj, 'list', {1: 'std::allocator<{0}>'}) ++ add_one_template_type_printer( ++ obj, '__cxx11::list', {1: 'std::allocator<{0}>'}) ++ add_one_template_type_printer(obj, 'vector', {1: 'std::allocator<{0}>'}) + add_one_template_type_printer(obj, 'map', +- { 2: 'std::less<{0}>', +- 3: 'std::allocator>' }) ++ {2: 'std::less<{0}>', ++ 3: 'std::allocator>'}) + add_one_template_type_printer(obj, 'multimap', +- { 2: 'std::less<{0}>', +- 3: 'std::allocator>' }) ++ {2: 'std::less<{0}>', ++ 3: 'std::allocator>'}) + add_one_template_type_printer(obj, 'set', +- { 1: 'std::less<{0}>', 2: 'std::allocator<{0}>' }) ++ {1: 'std::less<{0}>', 2: 'std::allocator<{0}>'}) + add_one_template_type_printer(obj, 'multiset', +- { 1: 'std::less<{0}>', 2: 'std::allocator<{0}>' }) ++ {1: 'std::less<{0}>', 2: 'std::allocator<{0}>'}) + add_one_template_type_printer(obj, 'unordered_map', +- { 2: 'std::hash<{0}>', +- 3: 'std::equal_to<{0}>', +- 4: 'std::allocator>'}) ++ {2: 'std::hash<{0}>', ++ 3: 'std::equal_to<{0}>', ++ 4: 'std::allocator>'}) + add_one_template_type_printer(obj, 'unordered_multimap', +- { 2: 'std::hash<{0}>', +- 3: 'std::equal_to<{0}>', +- 4: 'std::allocator>'}) ++ {2: 'std::hash<{0}>', ++ 3: 'std::equal_to<{0}>', ++ 4: 'std::allocator>'}) + add_one_template_type_printer(obj, 'unordered_set', +- { 1: 'std::hash<{0}>', +- 2: 'std::equal_to<{0}>', +- 3: 'std::allocator<{0}>'}) ++ {1: 'std::hash<{0}>', ++ 2: 'std::equal_to<{0}>', ++ 3: 'std::allocator<{0}>'}) + add_one_template_type_printer(obj, 'unordered_multiset', +- { 1: 'std::hash<{0}>', +- 2: 'std::equal_to<{0}>', +- 3: 'std::allocator<{0}>'}) ++ {1: 'std::hash<{0}>', ++ 2: 'std::equal_to<{0}>', ++ 3: 'std::allocator<{0}>'}) + +-def register_libstdcxx_printers (obj): +- "Register libstdc++ pretty-printers with objfile Obj." ++ ++def register_libstdcxx_printers(obj): ++ """Register libstdc++ pretty-printers with objfile Obj.""" + + global _use_gdb_pp + global libstdcxx_printer +@@ -2089,7 +2799,8 @@ def register_libstdcxx_printers (obj): + + register_type_printers(obj) + +-def build_libstdcxx_dictionary (): ++ ++def build_libstdcxx_dictionary(): + global libstdcxx_printer + + libstdcxx_printer = Printer("libstdc++-v6") +@@ -2098,7 +2809,8 @@ def build_libstdcxx_dictionary (): + # In order from: + # http://gcc.gnu.org/onlinedocs/libstdc++/latest-doxygen/a01847.html + libstdcxx_printer.add_version('std::', 'basic_string', StdStringPrinter) +- libstdcxx_printer.add_version('std::__cxx11::', 'basic_string', StdStringPrinter) ++ libstdcxx_printer.add_version( ++ 'std::__cxx11::', 'basic_string', StdStringPrinter) + libstdcxx_printer.add_container('std::', 'bitset', StdBitsetPrinter) + libstdcxx_printer.add_container('std::', 'deque', StdDequePrinter) + libstdcxx_printer.add_container('std::', 'list', StdListPrinter) +@@ -2116,6 +2828,8 @@ def build_libstdcxx_dictionary (): + libstdcxx_printer.add_version('std::', 'unique_ptr', UniquePointerPrinter) + libstdcxx_printer.add_container('std::', 'vector', StdVectorPrinter) + # vector ++ libstdcxx_printer.add_version('std::', 'locale', StdLocalePrinter) ++ + + if hasattr(gdb.Value, 'dynamic_type'): + libstdcxx_printer.add_version('std::', 'error_code', +@@ -2130,12 +2844,7 @@ def build_libstdcxx_dictionary (): + libstdcxx_printer.add('std::__debug::map', StdMapPrinter) + libstdcxx_printer.add('std::__debug::multimap', StdMapPrinter) + libstdcxx_printer.add('std::__debug::multiset', StdSetPrinter) +- libstdcxx_printer.add('std::__debug::priority_queue', +- StdStackOrQueuePrinter) +- libstdcxx_printer.add('std::__debug::queue', StdStackOrQueuePrinter) + libstdcxx_printer.add('std::__debug::set', StdSetPrinter) +- libstdcxx_printer.add('std::__debug::stack', StdStackOrQueuePrinter) +- libstdcxx_printer.add('std::__debug::unique_ptr', UniquePointerPrinter) + libstdcxx_printer.add('std::__debug::vector', StdVectorPrinter) + + # These are the TR1 and C++11 printers. +@@ -2153,8 +2862,10 @@ def build_libstdcxx_dictionary (): + libstdcxx_printer.add_container('std::', 'forward_list', + StdForwardListPrinter) + +- libstdcxx_printer.add_version('std::tr1::', 'shared_ptr', SharedPointerPrinter) +- libstdcxx_printer.add_version('std::tr1::', 'weak_ptr', SharedPointerPrinter) ++ libstdcxx_printer.add_version( ++ 'std::tr1::', 'shared_ptr', SharedPointerPrinter) ++ libstdcxx_printer.add_version( ++ 'std::tr1::', 'weak_ptr', SharedPointerPrinter) + libstdcxx_printer.add_version('std::tr1::', 'unordered_map', + Tr1UnorderedMapPrinter) + libstdcxx_printer.add_version('std::tr1::', 'unordered_set', +@@ -2166,7 +2877,27 @@ def build_libstdcxx_dictionary (): + + libstdcxx_printer.add_version('std::', 'initializer_list', + StdInitializerListPrinter) +- ++ libstdcxx_printer.add_version('std::', 'atomic', StdAtomicPrinter) ++ libstdcxx_printer.add_version( ++ 'std::', 'basic_stringbuf', StdStringBufPrinter) ++ libstdcxx_printer.add_version( ++ 'std::__cxx11::', 'basic_stringbuf', StdStringBufPrinter) ++ for sstream in ('istringstream', 'ostringstream', 'stringstream'): ++ libstdcxx_printer.add_version( ++ 'std::', 'basic_' + sstream, StdStringStreamPrinter) ++ libstdcxx_printer.add_version( ++ 'std::__cxx11::', 'basic_' + sstream, StdStringStreamPrinter) ++ ++ libstdcxx_printer.add_version('std::chrono::', 'duration', ++ StdChronoDurationPrinter) ++ libstdcxx_printer.add_version('std::chrono::', 'time_point', ++ StdChronoTimePointPrinter) ++ libstdcxx_printer.add_version('std::', 'integral_constant', ++ StdIntegralConstantPrinter) ++ ++ # std::regex components ++ libstdcxx_printer.add_version('std::__detail::', '_State', ++ StdRegexStatePrinter) + + # These are the C++11 printer registrations for -D_GLIBCXX_DEBUG cases. + # The tr1 namespace containers do not have any debug equivalents, +@@ -2212,11 +2943,35 @@ def build_libstdcxx_dictionary (): + '_Node_handle', StdNodeHandlePrinter) + + # C++20 components +- libstdcxx_printer.add_version('std::', 'partial_ordering', StdCmpCatPrinter) ++ libstdcxx_printer.add_version( ++ 'std::', 'partial_ordering', StdCmpCatPrinter) + libstdcxx_printer.add_version('std::', 'weak_ordering', StdCmpCatPrinter) + libstdcxx_printer.add_version('std::', 'strong_ordering', StdCmpCatPrinter) + libstdcxx_printer.add_version('std::', 'span', StdSpanPrinter) +- ++ libstdcxx_printer.add_version('std::', 'basic_format_args', ++ StdFormatArgsPrinter) ++ for c in ['day', 'month', 'year', 'weekday', 'weekday_indexed', 'weekday_last', ++ 'month_day', 'month_day_last', 'month_weekday', 'month_weekday_last', ++ 'year_month', 'year_month_day', 'year_month_day_last', ++ 'year_month_weekday', 'year_month_weekday_last', 'hh_mm_ss']: ++ libstdcxx_printer.add_version('std::chrono::', c, ++ StdChronoCalendarPrinter) ++ libstdcxx_printer.add_version('std::chrono::', 'time_zone', ++ StdChronoTimeZonePrinter) ++ libstdcxx_printer.add_version('std::chrono::', 'time_zone_link', ++ StdChronoTimeZonePrinter) ++ libstdcxx_printer.add_version('std::chrono::', 'zoned_time', ++ StdChronoZonedTimePrinter) ++ libstdcxx_printer.add_version('std::chrono::', 'leap_second', ++ StdChronoLeapSecondPrinter) ++ libstdcxx_printer.add_version( ++ 'std::chrono::', 'tzdb', StdChronoTzdbPrinter) ++ # libstdcxx_printer.add_version('std::chrono::(anonymous namespace)', 'Rule', ++ # StdChronoTimeZoneRulePrinter) ++ ++ # C++26 components ++ libstdcxx_printer.add_version('std::', 'text_encoding', ++ StdTextEncodingPrinter) + # Extensions. + libstdcxx_printer.add_version('__gnu_cxx::', 'slist', StdSlistPrinter) + +@@ -2238,11 +2993,11 @@ def build_libstdcxx_dictionary (): + libstdcxx_printer.add_version('__gnu_cxx::', '__normal_iterator', + StdVectorIteratorPrinter) + libstdcxx_printer.add_container('std::', '_Bit_iterator', +- StdBitIteratorPrinter) ++ StdBitIteratorPrinter) + libstdcxx_printer.add_container('std::', '_Bit_const_iterator', +- StdBitIteratorPrinter) ++ StdBitIteratorPrinter) + libstdcxx_printer.add_container('std::', '_Bit_reference', +- StdBitReferencePrinter) ++ StdBitReferencePrinter) + libstdcxx_printer.add_version('__gnu_cxx::', '_Slist_iterator', + StdSlistIteratorPrinter) + libstdcxx_printer.add_container('std::', '_Fwd_list_iterator', +@@ -2255,4 +3010,5 @@ def build_libstdcxx_dictionary (): + libstdcxx_printer.add('__gnu_debug::_Safe_iterator', + StdDebugIteratorPrinter) + +-build_libstdcxx_dictionary () ++ ++build_libstdcxx_dictionary() +diff --git a/libstdc++-v3/python/libstdcxx/v6/xmethods.py b/libstdc++-v3/python/libstdcxx/v6/xmethods.py +index a50ebca2287..109ca10956a 100644 +--- a/libstdc++-v3/python/libstdcxx/v6/xmethods.py ++++ b/libstdc++-v3/python/libstdcxx/v6/xmethods.py +@@ -1,6 +1,6 @@ + # Xmethods for libstdc++. + +-# Copyright (C) 2014-2021 Free Software Foundation, Inc. ++# Copyright (C) 2014-2025 Free Software Foundation, Inc. + + # This program is free software; you can redistribute it and/or modify + # it under the terms of the GNU General Public License as published by +@@ -21,12 +21,27 @@ import re + + matcher_name_prefix = 'libstdc++::' + ++ + def get_bool_type(): + return gdb.lookup_type('bool') + + def get_std_size_type(): + return gdb.lookup_type('std::size_t') + ++_versioned_namespace = '__8::' ++ ++def is_specialization_of(x, template_name): ++ """ ++ Test whether a type is a specialization of the named class template. ++ The type can be specified as a string or a gdb.Type object. ++ The template should be the name of a class template as a string, ++ without any 'std' qualification. ++ """ ++ if isinstance(x, gdb.Type): ++ x = x.tag ++ template_name = '(%s)?%s' % (_versioned_namespace, template_name) ++ return re.match(r'^std::(__\d::)?%s<.*>$' % template_name, x) is not None ++ + class LibStdCxxXMethod(gdb.xmethod.XMethod): + def __init__(self, name, worker_class): + gdb.xmethod.XMethod.__init__(self, name) +@@ -34,6 +49,7 @@ class LibStdCxxXMethod(gdb.xmethod.XMethod): + + # Xmethods for std::array + ++ + class ArrayWorkerBase(gdb.xmethod.XMethodWorker): + def __init__(self, val_type, size): + self._val_type = val_type +@@ -43,6 +59,7 @@ class ArrayWorkerBase(gdb.xmethod.XMethodWorker): + nullptr = gdb.parse_and_eval('(void *) 0') + return nullptr.cast(self._val_type.pointer()).dereference() + ++ + class ArraySizeWorker(ArrayWorkerBase): + def __init__(self, val_type, size): + ArrayWorkerBase.__init__(self, val_type, size) +@@ -56,6 +73,7 @@ class ArraySizeWorker(ArrayWorkerBase): + def __call__(self, obj): + return self._size + ++ + class ArrayEmptyWorker(ArrayWorkerBase): + def __init__(self, val_type, size): + ArrayWorkerBase.__init__(self, val_type, size) +@@ -69,6 +87,7 @@ class ArrayEmptyWorker(ArrayWorkerBase): + def __call__(self, obj): + return (int(self._size) == 0) + ++ + class ArrayFrontWorker(ArrayWorkerBase): + def __init__(self, val_type, size): + ArrayWorkerBase.__init__(self, val_type, size) +@@ -85,6 +104,7 @@ class ArrayFrontWorker(ArrayWorkerBase): + else: + return self.null_value() + ++ + class ArrayBackWorker(ArrayWorkerBase): + def __init__(self, val_type, size): + ArrayWorkerBase.__init__(self, val_type, size) +@@ -101,6 +121,7 @@ class ArrayBackWorker(ArrayWorkerBase): + else: + return self.null_value() + ++ + class ArrayAtWorker(ArrayWorkerBase): + def __init__(self, val_type, size): + ArrayWorkerBase.__init__(self, val_type, size) +@@ -117,6 +138,7 @@ class ArrayAtWorker(ArrayWorkerBase): + ((int(index), self._size))) + return obj['_M_elems'][index] + ++ + class ArraySubscriptWorker(ArrayWorkerBase): + def __init__(self, val_type, size): + ArrayWorkerBase.__init__(self, val_type, size) +@@ -133,6 +155,7 @@ class ArraySubscriptWorker(ArrayWorkerBase): + else: + return self.null_value() + ++ + class ArrayMethodsMatcher(gdb.xmethod.XMethodMatcher): + def __init__(self): + gdb.xmethod.XMethodMatcher.__init__(self, +@@ -148,7 +171,7 @@ class ArrayMethodsMatcher(gdb.xmethod.XMethodMatcher): + self.methods = [self._method_dict[m] for m in self._method_dict] + + def match(self, class_type, method_name): +- if not re.match('^std::(__\d+::)?array<.*>$', class_type.tag): ++ if not is_specialization_of(class_type, 'array'): + return None + method = self._method_dict.get(method_name) + if method is None or not method.enabled: +@@ -160,8 +183,10 @@ class ArrayMethodsMatcher(gdb.xmethod.XMethodMatcher): + return None + return method.worker_class(value_type, size) + ++ + # Xmethods for std::deque + ++ + class DequeWorkerBase(gdb.xmethod.XMethodWorker): + def __init__(self, val_type): + self._val_type = val_type +@@ -186,6 +211,7 @@ class DequeWorkerBase(gdb.xmethod.XMethodWorker): + index_node = start['_M_node'][1 + int(idx) // self._bufsize] + return index_node[idx % self._bufsize] + ++ + class DequeEmptyWorker(DequeWorkerBase): + def get_arg_types(self): + return None +@@ -197,6 +223,7 @@ class DequeEmptyWorker(DequeWorkerBase): + return (obj['_M_impl']['_M_start']['_M_cur'] == + obj['_M_impl']['_M_finish']['_M_cur']) + ++ + class DequeSizeWorker(DequeWorkerBase): + def get_arg_types(self): + return None +@@ -207,6 +234,7 @@ class DequeSizeWorker(DequeWorkerBase): + def __call__(self, obj): + return self.size(obj) + ++ + class DequeFrontWorker(DequeWorkerBase): + def get_arg_types(self): + return None +@@ -217,6 +245,7 @@ class DequeFrontWorker(DequeWorkerBase): + def __call__(self, obj): + return obj['_M_impl']['_M_start']['_M_cur'][0] + ++ + class DequeBackWorker(DequeWorkerBase): + def get_arg_types(self): + return None +@@ -226,12 +255,13 @@ class DequeBackWorker(DequeWorkerBase): + + def __call__(self, obj): + if (obj['_M_impl']['_M_finish']['_M_cur'] == +- obj['_M_impl']['_M_finish']['_M_first']): ++ obj['_M_impl']['_M_finish']['_M_first']): + prev_node = obj['_M_impl']['_M_finish']['_M_node'] - 1 + return prev_node[0][self._bufsize - 1] + else: + return obj['_M_impl']['_M_finish']['_M_cur'][-1] + ++ + class DequeSubscriptWorker(DequeWorkerBase): + def get_arg_types(self): + return get_std_size_type() +@@ -242,6 +272,7 @@ class DequeSubscriptWorker(DequeWorkerBase): + def __call__(self, obj, subscript): + return self.index(obj, subscript) + ++ + class DequeAtWorker(DequeWorkerBase): + def get_arg_types(self): + return get_std_size_type() +@@ -255,7 +286,8 @@ class DequeAtWorker(DequeWorkerBase): + raise IndexError('Deque index "%d" should not be >= %d.' % + (int(index), deque_size)) + else: +- return self.index(obj, index) ++ return self.index(obj, index) ++ + + class DequeMethodsMatcher(gdb.xmethod.XMethodMatcher): + def __init__(self): +@@ -272,7 +304,7 @@ class DequeMethodsMatcher(gdb.xmethod.XMethodMatcher): + self.methods = [self._method_dict[m] for m in self._method_dict] + + def match(self, class_type, method_name): +- if not re.match('^std::(__\d+::)?deque<.*>$', class_type.tag): ++ if not is_specialization_of(class_type, 'deque'): + return None + method = self._method_dict.get(method_name) + if method is None or not method.enabled: +@@ -281,6 +313,7 @@ class DequeMethodsMatcher(gdb.xmethod.XMethodMatcher): + + # Xmethods for std::forward_list + ++ + class ForwardListWorkerBase(gdb.xmethod.XMethodMatcher): + def __init__(self, val_type, node_type): + self._val_type = val_type +@@ -289,6 +322,7 @@ class ForwardListWorkerBase(gdb.xmethod.XMethodMatcher): + def get_arg_types(self): + return None + ++ + class ForwardListEmptyWorker(ForwardListWorkerBase): + def get_result_type(self, obj): + return get_bool_type() +@@ -296,6 +330,7 @@ class ForwardListEmptyWorker(ForwardListWorkerBase): + def __call__(self, obj): + return obj['_M_impl']['_M_head']['_M_next'] == 0 + ++ + class ForwardListFrontWorker(ForwardListWorkerBase): + def get_result_type(self, obj): + return self._val_type +@@ -305,6 +340,7 @@ class ForwardListFrontWorker(ForwardListWorkerBase): + val_address = node['_M_storage']['_M_storage'].address + return val_address.cast(self._val_type.pointer()).dereference() + ++ + class ForwardListMethodsMatcher(gdb.xmethod.XMethodMatcher): + def __init__(self): + matcher_name = matcher_name_prefix + 'forward_list' +@@ -316,7 +352,7 @@ class ForwardListMethodsMatcher(gdb.xmethod.XMethodMatcher): + self.methods = [self._method_dict[m] for m in self._method_dict] + + def match(self, class_type, method_name): +- if not re.match('^std::(__\d+::)?forward_list<.*>$', class_type.tag): ++ if not is_specialization_of(class_type, 'forward_list'): + return None + method = self._method_dict.get(method_name) + if method is None or not method.enabled: +@@ -327,6 +363,7 @@ class ForwardListMethodsMatcher(gdb.xmethod.XMethodMatcher): + + # Xmethods for std::list + ++ + class ListWorkerBase(gdb.xmethod.XMethodWorker): + def __init__(self, val_type, node_type): + self._val_type = val_type +@@ -344,6 +381,7 @@ class ListWorkerBase(gdb.xmethod.XMethodWorker): + addr = node['_M_storage'].address + return addr.cast(self._val_type.pointer()).dereference() + ++ + class ListEmptyWorker(ListWorkerBase): + def get_result_type(self, obj): + return get_bool_type() +@@ -355,6 +393,7 @@ class ListEmptyWorker(ListWorkerBase): + else: + return False + ++ + class ListSizeWorker(ListWorkerBase): + def get_result_type(self, obj): + return get_std_size_type() +@@ -368,6 +407,7 @@ class ListSizeWorker(ListWorkerBase): + size += 1 + return size + ++ + class ListFrontWorker(ListWorkerBase): + def get_result_type(self, obj): + return self._val_type +@@ -376,6 +416,7 @@ class ListFrontWorker(ListWorkerBase): + node = obj['_M_impl']['_M_node']['_M_next'].cast(self._node_type) + return self.get_value_from_node(node) + ++ + class ListBackWorker(ListWorkerBase): + def get_result_type(self, obj): + return self._val_type +@@ -384,6 +425,7 @@ class ListBackWorker(ListWorkerBase): + prev_node = obj['_M_impl']['_M_node']['_M_prev'].cast(self._node_type) + return self.get_value_from_node(prev_node) + ++ + class ListMethodsMatcher(gdb.xmethod.XMethodMatcher): + def __init__(self): + gdb.xmethod.XMethodMatcher.__init__(self, +@@ -397,7 +439,7 @@ class ListMethodsMatcher(gdb.xmethod.XMethodMatcher): + self.methods = [self._method_dict[m] for m in self._method_dict] + + def match(self, class_type, method_name): +- if not re.match('^std::(__\d+::)?(__cxx11::)?list<.*>$', class_type.tag): ++ if not is_specialization_of(class_type, '(__cxx11::)?list'): + return None + method = self._method_dict.get(method_name) + if method is None or not method.enabled: +@@ -408,6 +450,7 @@ class ListMethodsMatcher(gdb.xmethod.XMethodMatcher): + + # Xmethods for std::vector + ++ + class VectorWorkerBase(gdb.xmethod.XMethodWorker): + def __init__(self, val_type): + self._val_type = val_type +@@ -432,6 +475,7 @@ class VectorWorkerBase(gdb.xmethod.XMethodWorker): + else: + return obj['_M_impl']['_M_start'][index] + ++ + class VectorEmptyWorker(VectorWorkerBase): + def get_arg_types(self): + return None +@@ -442,6 +486,7 @@ class VectorEmptyWorker(VectorWorkerBase): + def __call__(self, obj): + return int(self.size(obj)) == 0 + ++ + class VectorSizeWorker(VectorWorkerBase): + def get_arg_types(self): + return None +@@ -452,6 +497,7 @@ class VectorSizeWorker(VectorWorkerBase): + def __call__(self, obj): + return self.size(obj) + ++ + class VectorFrontWorker(VectorWorkerBase): + def get_arg_types(self): + return None +@@ -462,6 +508,7 @@ class VectorFrontWorker(VectorWorkerBase): + def __call__(self, obj): + return self.get(obj, 0) + ++ + class VectorBackWorker(VectorWorkerBase): + def get_arg_types(self): + return None +@@ -472,6 +519,7 @@ class VectorBackWorker(VectorWorkerBase): + def __call__(self, obj): + return self.get(obj, int(self.size(obj)) - 1) + ++ + class VectorAtWorker(VectorWorkerBase): + def get_arg_types(self): + return get_std_size_type() +@@ -486,6 +534,7 @@ class VectorAtWorker(VectorWorkerBase): + ((int(index), size))) + return self.get(obj, int(index)) + ++ + class VectorSubscriptWorker(VectorWorkerBase): + def get_arg_types(self): + return get_std_size_type() +@@ -496,6 +545,7 @@ class VectorSubscriptWorker(VectorWorkerBase): + def __call__(self, obj, subscript): + return self.get(obj, int(subscript)) + ++ + class VectorMethodsMatcher(gdb.xmethod.XMethodMatcher): + def __init__(self): + gdb.xmethod.XMethodMatcher.__init__(self, +@@ -512,7 +562,7 @@ class VectorMethodsMatcher(gdb.xmethod.XMethodMatcher): + self.methods = [self._method_dict[m] for m in self._method_dict] + + def match(self, class_type, method_name): +- if not re.match('^std::(__\d+::)?vector<.*>$', class_type.tag): ++ if not is_specialization_of(class_type, 'vector'): + return None + method = self._method_dict.get(method_name) + if method is None or not method.enabled: +@@ -521,6 +571,7 @@ class VectorMethodsMatcher(gdb.xmethod.XMethodMatcher): + + # Xmethods for associative containers + ++ + class AssociativeContainerWorkerBase(gdb.xmethod.XMethodWorker): + def __init__(self, unordered): + self._unordered = unordered +@@ -534,6 +585,7 @@ class AssociativeContainerWorkerBase(gdb.xmethod.XMethodWorker): + def get_arg_types(self): + return None + ++ + class AssociativeContainerEmptyWorker(AssociativeContainerWorkerBase): + def get_result_type(self, obj): + return get_bool_type() +@@ -541,6 +593,7 @@ class AssociativeContainerEmptyWorker(AssociativeContainerWorkerBase): + def __call__(self, obj): + return int(self.node_count(obj)) == 0 + ++ + class AssociativeContainerSizeWorker(AssociativeContainerWorkerBase): + def get_result_type(self, obj): + return get_std_size_type() +@@ -548,6 +601,7 @@ class AssociativeContainerSizeWorker(AssociativeContainerWorkerBase): + def __call__(self, obj): + return self.node_count(obj) + ++ + class AssociativeContainerMethodsMatcher(gdb.xmethod.XMethodMatcher): + def __init__(self, name): + gdb.xmethod.XMethodMatcher.__init__(self, +@@ -561,7 +615,7 @@ class AssociativeContainerMethodsMatcher(gdb.xmethod.XMethodMatcher): + self.methods = [self._method_dict[m] for m in self._method_dict] + + def match(self, class_type, method_name): +- if not re.match('^std::(__\d+::)?%s<.*>$' % self._name, class_type.tag): ++ if not is_specialization_of(class_type, self._name): + return None + method = self._method_dict.get(method_name) + if method is None or not method.enabled: +@@ -571,8 +625,11 @@ class AssociativeContainerMethodsMatcher(gdb.xmethod.XMethodMatcher): + + # Xmethods for std::unique_ptr + ++ + class UniquePtrGetWorker(gdb.xmethod.XMethodWorker): +- "Implements std::unique_ptr::get() and std::unique_ptr::operator->()" ++ """ ++ Implement std::unique_ptr::get() and std::unique_ptr::operator->(). ++ """ + + def __init__(self, elem_type): + self._is_array = elem_type.code == gdb.TYPE_CODE_ARRAY +@@ -588,19 +645,19 @@ class UniquePtrGetWorker(gdb.xmethod.XMethodWorker): + return self._elem_type.pointer() + + def _supports(self, method_name): +- "operator-> is not supported for unique_ptr" ++ # operator-> is not supported for unique_ptr + return method_name == 'get' or not self._is_array + + def __call__(self, obj): + impl_type = obj.dereference().type.fields()[0].type.tag + # Check for new implementations first: +- if re.match('^std::(__\d+::)?__uniq_ptr_(data|impl)<.*>$', impl_type): ++ if is_specialization_of(impl_type, '__uniq_ptr_(data|impl)'): + tuple_member = obj['_M_t']['_M_t'] +- elif re.match('^std::(__\d+::)?tuple<.*>$', impl_type): ++ elif is_specialization_of(impl_type, 'tuple'): + tuple_member = obj['_M_t'] + else: + return None +- tuple_impl_type = tuple_member.type.fields()[0].type # _Tuple_impl ++ tuple_impl_type = tuple_member.type.fields()[0].type # _Tuple_impl + tuple_head_type = tuple_impl_type.fields()[1].type # _Head_base + head_field = tuple_head_type.fields()[0] + if head_field.name == '_M_head_impl': +@@ -610,8 +667,9 @@ class UniquePtrGetWorker(gdb.xmethod.XMethodWorker): + else: + return None + ++ + class UniquePtrDerefWorker(UniquePtrGetWorker): +- "Implements std::unique_ptr::operator*()" ++ """Implement std::unique_ptr::operator*().""" + + def __init__(self, elem_type): + UniquePtrGetWorker.__init__(self, elem_type) +@@ -620,14 +678,15 @@ class UniquePtrDerefWorker(UniquePtrGetWorker): + return self._elem_type + + def _supports(self, method_name): +- "operator* is not supported for unique_ptr" ++ # operator* is not supported for unique_ptr + return not self._is_array + + def __call__(self, obj): + return UniquePtrGetWorker.__call__(self, obj).dereference() + ++ + class UniquePtrSubscriptWorker(UniquePtrGetWorker): +- "Implements std::unique_ptr::operator[](size_t)" ++ """Implement std::unique_ptr::operator[](size_t).""" + + def __init__(self, elem_type): + UniquePtrGetWorker.__init__(self, elem_type) +@@ -639,12 +698,13 @@ class UniquePtrSubscriptWorker(UniquePtrGetWorker): + return self._elem_type + + def _supports(self, method_name): +- "operator[] is only supported for unique_ptr" ++ # operator[] is only supported for unique_ptr + return self._is_array + + def __call__(self, obj, index): + return UniquePtrGetWorker.__call__(self, obj)[index] + ++ + class UniquePtrMethodsMatcher(gdb.xmethod.XMethodMatcher): + def __init__(self): + gdb.xmethod.XMethodMatcher.__init__(self, +@@ -658,7 +718,7 @@ class UniquePtrMethodsMatcher(gdb.xmethod.XMethodMatcher): + self.methods = [self._method_dict[m] for m in self._method_dict] + + def match(self, class_type, method_name): +- if not re.match('^std::(__\d+::)?unique_ptr<.*>$', class_type.tag): ++ if not is_specialization_of(class_type, 'unique_ptr'): + return None + method = self._method_dict.get(method_name) + if method is None or not method.enabled: +@@ -670,8 +730,11 @@ class UniquePtrMethodsMatcher(gdb.xmethod.XMethodMatcher): + + # Xmethods for std::shared_ptr + ++ + class SharedPtrGetWorker(gdb.xmethod.XMethodWorker): +- "Implements std::shared_ptr::get() and std::shared_ptr::operator->()" ++ """ ++ Implements std::shared_ptr::get() and std::shared_ptr::operator->(). ++ """ + + def __init__(self, elem_type): + self._is_array = elem_type.code == gdb.TYPE_CODE_ARRAY +@@ -687,14 +750,15 @@ class SharedPtrGetWorker(gdb.xmethod.XMethodWorker): + return self._elem_type.pointer() + + def _supports(self, method_name): +- "operator-> is not supported for shared_ptr" ++ # operator-> is not supported for shared_ptr + return method_name == 'get' or not self._is_array + + def __call__(self, obj): + return obj['_M_ptr'] + ++ + class SharedPtrDerefWorker(SharedPtrGetWorker): +- "Implements std::shared_ptr::operator*()" ++ """Implement std::shared_ptr::operator*().""" + + def __init__(self, elem_type): + SharedPtrGetWorker.__init__(self, elem_type) +@@ -703,14 +767,15 @@ class SharedPtrDerefWorker(SharedPtrGetWorker): + return self._elem_type + + def _supports(self, method_name): +- "operator* is not supported for shared_ptr" ++ # operator* is not supported for shared_ptr + return not self._is_array + + def __call__(self, obj): + return SharedPtrGetWorker.__call__(self, obj).dereference() + ++ + class SharedPtrSubscriptWorker(SharedPtrGetWorker): +- "Implements std::shared_ptr::operator[](size_t)" ++ """Implement std::shared_ptr::operator[](size_t).""" + + def __init__(self, elem_type): + SharedPtrGetWorker.__init__(self, elem_type) +@@ -722,19 +787,20 @@ class SharedPtrSubscriptWorker(SharedPtrGetWorker): + return self._elem_type + + def _supports(self, method_name): +- "operator[] is only supported for shared_ptr" ++ # operator[] is only supported for shared_ptr + return self._is_array + + def __call__(self, obj, index): + # Check bounds if _elem_type is an array of known bound +- m = re.match('.*\[(\d+)]$', str(self._elem_type)) ++ m = re.match(r'.*\[(\d+)]$', str(self._elem_type)) + if m and index >= int(m.group(1)): + raise IndexError('shared_ptr<%s> index "%d" should not be >= %d.' % + (self._elem_type, int(index), int(m.group(1)))) + return SharedPtrGetWorker.__call__(self, obj)[index] + ++ + class SharedPtrUseCountWorker(gdb.xmethod.XMethodWorker): +- "Implements std::shared_ptr::use_count()" ++ """Implement std::shared_ptr::use_count().""" + + def __init__(self, elem_type): + pass +@@ -752,8 +818,9 @@ class SharedPtrUseCountWorker(gdb.xmethod.XMethodWorker): + refcounts = obj['_M_refcount']['_M_pi'] + return refcounts['_M_use_count'] if refcounts else 0 + ++ + class SharedPtrUniqueWorker(SharedPtrUseCountWorker): +- "Implements std::shared_ptr::unique()" ++ """Implement std::shared_ptr::unique().""" + + def __init__(self, elem_type): + SharedPtrUseCountWorker.__init__(self, elem_type) +@@ -764,6 +831,7 @@ class SharedPtrUniqueWorker(SharedPtrUseCountWorker): + def __call__(self, obj): + return SharedPtrUseCountWorker.__call__(self, obj) == 1 + ++ + class SharedPtrMethodsMatcher(gdb.xmethod.XMethodMatcher): + def __init__(self): + gdb.xmethod.XMethodMatcher.__init__(self, +@@ -779,7 +847,7 @@ class SharedPtrMethodsMatcher(gdb.xmethod.XMethodMatcher): + self.methods = [self._method_dict[m] for m in self._method_dict] + + def match(self, class_type, method_name): +- if not re.match('^std::(__\d+::)?shared_ptr<.*>$', class_type.tag): ++ if not is_specialization_of(class_type, 'shared_ptr'): + return None + method = self._method_dict.get(method_name) + if method is None or not method.enabled: +@@ -789,6 +857,7 @@ class SharedPtrMethodsMatcher(gdb.xmethod.XMethodMatcher): + return worker + return None + ++ + def register_libstdcxx_xmethods(locus): + gdb.xmethod.register_xmethod_matcher(locus, ArrayMethodsMatcher()) + gdb.xmethod.register_xmethod_matcher(locus, ForwardListMethodsMatcher()) diff --git a/SOURCES/gcc11-pr118976.patch b/SOURCES/gcc11-pr118976.patch new file mode 100644 index 0000000..f3485ff --- /dev/null +++ b/SOURCES/gcc11-pr118976.patch @@ -0,0 +1,64 @@ +Original patch (taken from GCC 12 branch version) edited for GCC 11 to +use .c rather than .cc filenames. + +commit 587b370c8492aadaab14c57e242c66778cc78891 +Author: Richard Sandiford +Date: Tue Mar 11 15:51:55 2025 +0000 + + Fix folding of BIT_NOT_EXPR for POLY_INT_CST [PR118976] + + There was an embarrassing typo in the folding of BIT_NOT_EXPR for + POLY_INT_CSTs: it used - rather than ~ on the poly_int. Not sure + how that happened, but it might have been due to the way that + ~x is implemented as -1 - x internally. + + gcc/ + PR tree-optimization/118976 + * fold-const.cc (const_unop): Use ~ rather than - for BIT_NOT_EXPR. + * config/aarch64/aarch64.cc (aarch64_test_sve_folding): New function. + (aarch64_run_selftests): Run it. + + (cherry picked from commit 78380fd7f743e23dfdf013d68a2f0347e1511550) + +diff --git a/gcc/config/aarch64/aarch64.c b/gcc/config/aarch64/aarch64.c +index be0d958dcf6b..72d737d62228 100644 +--- a/gcc/config/aarch64/aarch64.c ++++ b/gcc/config/aarch64/aarch64.c +@@ -27541,6 +27541,16 @@ aarch64_test_fractional_cost () + ASSERT_EQ (cf (1, 2).as_double (), 0.5); + } + ++/* Test SVE arithmetic folding. */ ++ ++static void ++aarch64_test_sve_folding () ++{ ++ tree res = fold_unary (BIT_NOT_EXPR, ssizetype, ++ ssize_int (poly_int64 (1, 1))); ++ ASSERT_TRUE (operand_equal_p (res, ssize_int (poly_int64 (-2, -1)))); ++} ++ + /* Run all target-specific selftests. */ + + static void +@@ -27548,6 +27558,7 @@ aarch64_run_selftests (void) + { + aarch64_test_loading_full_dump (); + aarch64_test_fractional_cost (); ++ aarch64_test_sve_folding (); + } + + } // namespace selftest +diff --git a/gcc/fold-const.c b/gcc/fold-const.c +index d81a71c41a17..391f11095408 100644 +--- a/gcc/fold-const.c ++++ b/gcc/fold-const.c +@@ -1802,7 +1802,7 @@ const_unop (enum tree_code code, tree type, tree arg0) + if (TREE_CODE (arg0) == INTEGER_CST) + return fold_not_const (arg0, type); + else if (POLY_INT_CST_P (arg0)) +- return wide_int_to_tree (type, -poly_int_cst_value (arg0)); ++ return wide_int_to_tree (type, ~poly_int_cst_value (arg0)); + /* Perform BIT_NOT_EXPR on each element individually. */ + else if (TREE_CODE (arg0) == VECTOR_CST) + { diff --git a/SOURCES/gcc11-pr99888.patch b/SOURCES/gcc11-pr99888.patch new file mode 100644 index 0000000..3306e96 --- /dev/null +++ b/SOURCES/gcc11-pr99888.patch @@ -0,0 +1,403 @@ +From c23b5006d3ffeda1a9edf5fd817765a6da3696ca Mon Sep 17 00:00:00 2001 +From: Kewen Lin +Date: Fri, 30 Sep 2022 07:16:49 -0500 +Subject: [PATCH] rs6000: Rework ELFv2 support for -fpatchable-function-entry* + [PR99888] + +As PR99888 and its related show, the current support for +-fpatchable-function-entry on powerpc ELFv2 doesn't work +well with global entry existence. For example, with one +command line option -fpatchable-function-entry=3,2, it got +below w/o this patch: + + .LPFE1: + nop + nop + .type foo, @function + foo: + nop + .LFB0: + .cfi_startproc + .LCF0: + 0: addis 2,12,.TOC.-.LCF0@ha + addi 2,2,.TOC.-.LCF0@l + .localentry foo,.-foo + +, the assembly is unexpected since the patched nops have +no effects when being entered from local entry. + +This patch is to update the nops patched before and after +local entry, it looks like: + + .type foo, @function + foo: + .LFB0: + .cfi_startproc + .LCF0: + 0: addis 2,12,.TOC.-.LCF0@ha + addi 2,2,.TOC.-.LCF0@l + nop + nop + .localentry foo,.-foo + nop + + PR target/99888 + PR target/105649 + +Backported to GCC 11: renamed source files from .cc back to .c + +gcc/ChangeLog: + + * doc/invoke.texi (option -fpatchable-function-entry): Adjust the + documentation for PowerPC ELFv2 ABI dual entry points. + * config/rs6000/rs6000-internal.h + (rs6000_print_patchable_function_entry): New function declaration. + * config/rs6000/rs6000-logue.c (rs6000_output_function_prologue): + Support patchable-function-entry by emitting nops before and after + local entry for the function that needs global entry. + * config/rs6000/rs6000.c (rs6000_print_patchable_function_entry): Skip + the function that needs global entry till global entry has been + emitted. + * config/rs6000/rs6000.h (struct machine_function): New bool member + global_entry_emitted. + +gcc/testsuite/ChangeLog: + + * gcc.target/powerpc/pr99888-1.c: New test. + * gcc.target/powerpc/pr99888-2.c: New test. + * gcc.target/powerpc/pr99888-3.c: New test. + * gcc.target/powerpc/pr99888-4.c: New test. + * gcc.target/powerpc/pr99888-5.c: New test. + * gcc.target/powerpc/pr99888-6.c: New test. + * c-c++-common/patchable_function_entry-default.c: Adjust for + powerpc_elfv2 to avoid compilation error. +--- + gcc/config/rs6000/rs6000-internal.h | 4 ++ + gcc/config/rs6000/rs6000-logue.c | 32 ++++++++++++++ + gcc/config/rs6000/rs6000.c | 10 ++++- + gcc/config/rs6000/rs6000.h | 4 ++ + gcc/doc/invoke.texi | 8 +++- + .../patchable_function_entry-default.c | 3 ++ + gcc/testsuite/gcc.target/powerpc/pr99888-1.c | 43 +++++++++++++++++++ + gcc/testsuite/gcc.target/powerpc/pr99888-2.c | 43 +++++++++++++++++++ + gcc/testsuite/gcc.target/powerpc/pr99888-3.c | 11 +++++ + gcc/testsuite/gcc.target/powerpc/pr99888-4.c | 13 ++++++ + gcc/testsuite/gcc.target/powerpc/pr99888-5.c | 13 ++++++ + gcc/testsuite/gcc.target/powerpc/pr99888-6.c | 14 ++++++ + 12 files changed, 194 insertions(+), 4 deletions(-) + create mode 100644 gcc/testsuite/gcc.target/powerpc/pr99888-1.c + create mode 100644 gcc/testsuite/gcc.target/powerpc/pr99888-2.c + create mode 100644 gcc/testsuite/gcc.target/powerpc/pr99888-3.c + create mode 100644 gcc/testsuite/gcc.target/powerpc/pr99888-4.c + create mode 100644 gcc/testsuite/gcc.target/powerpc/pr99888-5.c + create mode 100644 gcc/testsuite/gcc.target/powerpc/pr99888-6.c + +diff --git a/gcc/config/rs6000/rs6000-internal.h b/gcc/config/rs6000/rs6000-internal.h +index b9e82c0468d0..e75b8d5c7e88 100644 +--- a/gcc/config/rs6000/rs6000-internal.h ++++ b/gcc/config/rs6000/rs6000-internal.h +@@ -182,6 +182,10 @@ extern tree rs6000_fold_builtin (tree fndecl ATTRIBUTE_UNUSED, + tree *args ATTRIBUTE_UNUSED, + bool ignore ATTRIBUTE_UNUSED); + ++extern void rs6000_print_patchable_function_entry (FILE *, ++ unsigned HOST_WIDE_INT, ++ bool); ++ + extern bool rs6000_passes_float; + extern bool rs6000_passes_long_double; + extern bool rs6000_passes_vector; +diff --git a/gcc/config/rs6000/rs6000-logue.c b/gcc/config/rs6000/rs6000-logue.c +index a11d020ccd0c..3621cb501c70 100644 +--- a/gcc/config/rs6000/rs6000-logue.c ++++ b/gcc/config/rs6000/rs6000-logue.c +@@ -4009,11 +4009,43 @@ rs6000_output_function_prologue (FILE *file) + fprintf (file, "\tadd 2,2,12\n"); + } + ++ unsigned short patch_area_size = crtl->patch_area_size; ++ unsigned short patch_area_entry = crtl->patch_area_entry; ++ /* Need to emit the patching area. */ ++ if (patch_area_size > 0) ++ { ++ cfun->machine->global_entry_emitted = true; ++ /* As ELFv2 ABI shows, the allowable bytes between the global ++ and local entry points are 0, 4, 8, 16, 32 and 64 when ++ there is a local entry point. Considering there are two ++ non-prefixed instructions for global entry point prologue ++ (8 bytes), the count for patchable nops before local entry ++ point would be 2, 6 and 14. It's possible to support those ++ other counts of nops by not making a local entry point, but ++ we don't have clear use cases for them, so leave them ++ unsupported for now. */ ++ if (patch_area_entry > 0) ++ { ++ if (patch_area_entry != 2 ++ && patch_area_entry != 6 ++ && patch_area_entry != 14) ++ error ("unsupported number of nops before function entry (%u)", ++ patch_area_entry); ++ rs6000_print_patchable_function_entry (file, patch_area_entry, ++ true); ++ patch_area_size -= patch_area_entry; ++ } ++ } ++ + fputs ("\t.localentry\t", file); + assemble_name (file, name); + fputs (",.-", file); + assemble_name (file, name); + fputs ("\n", file); ++ /* Emit the nops after local entry. */ ++ if (patch_area_size > 0) ++ rs6000_print_patchable_function_entry (file, patch_area_size, ++ patch_area_entry == 0); + } + + else if (rs6000_pcrel_p ()) +diff --git a/gcc/config/rs6000/rs6000.c b/gcc/config/rs6000/rs6000.c +index bbe21eacc6b9..b9496d7f2680 100644 +--- a/gcc/config/rs6000/rs6000.c ++++ b/gcc/config/rs6000/rs6000.c +@@ -14930,8 +14930,14 @@ rs6000_print_patchable_function_entry (FILE *file, + if (!(TARGET_64BIT && DEFAULT_ABI != ABI_ELFv2) + && HAVE_GAS_SECTION_LINK_ORDER) + flags |= SECTION_LINK_ORDER; +- default_print_patchable_function_entry_1 (file, patch_area_size, record_p, +- flags); ++ bool global_entry_needed_p = rs6000_global_entry_point_prologue_needed_p (); ++ /* For a function which needs global entry point, we will emit the ++ patchable area before and after local entry point under the control of ++ cfun->machine->global_entry_emitted, see the handling in function ++ rs6000_output_function_prologue. */ ++ if (!global_entry_needed_p || cfun->machine->global_entry_emitted) ++ default_print_patchable_function_entry_1 (file, patch_area_size, record_p, ++ flags); + } + + enum rtx_code +diff --git a/gcc/config/rs6000/rs6000.h b/gcc/config/rs6000/rs6000.h +index eb7b21584970..b4df22b60303 100644 +--- a/gcc/config/rs6000/rs6000.h ++++ b/gcc/config/rs6000/rs6000.h +@@ -2435,6 +2435,10 @@ typedef struct GTY(()) machine_function + bool lr_is_wrapped_separately; + bool toc_is_wrapped_separately; + bool mma_return_type_error; ++ /* Indicate global entry is emitted, only useful when the function requires ++ global entry. It helps to control the patchable area before and after ++ local entry. */ ++ bool global_entry_emitted; + } machine_function; + #endif + +diff --git a/gcc/doc/invoke.texi b/gcc/doc/invoke.texi +index 2ac9cfc35f92..518bfdf0867d 100644 +--- a/gcc/doc/invoke.texi ++++ b/gcc/doc/invoke.texi +@@ -16884,9 +16884,13 @@ the area size or to remove it completely on a single function. + If @code{N=0}, no pad location is recorded. + + The NOP instructions are inserted at---and maybe before, depending on +-@var{M}---the function entry address, even before the prologue. ++@var{M}---the function entry address, even before the prologue. On ++PowerPC with the ELFv2 ABI, for a function with dual entry points, ++the local entry point is this function entry address. + +-The maximum value of @var{N} and @var{M} is 65535. ++The maximum value of @var{N} and @var{M} is 65535. On PowerPC with the ++ELFv2 ABI, for a function with dual entry points, the supported values ++for @var{M} are 0, 2, 6 and 14. + @end table + + +diff --git a/gcc/testsuite/c-c++-common/patchable_function_entry-default.c b/gcc/testsuite/c-c++-common/patchable_function_entry-default.c +index 7036f7bfbea4..a501efccb194 100644 +--- a/gcc/testsuite/c-c++-common/patchable_function_entry-default.c ++++ b/gcc/testsuite/c-c++-common/patchable_function_entry-default.c +@@ -1,6 +1,9 @@ + /* { dg-do compile { target { ! { nvptx*-*-* visium-*-* } } } } */ + /* { dg-options "-O2 -fpatchable-function-entry=3,1" } */ + /* { dg-additional-options "-fno-pie" { target sparc*-*-* } } */ ++/* See PR99888, one single preceding nop isn't allowed on powerpc_elfv2, ++ so overriding with two preceding nops to make it pass there. */ ++/* { dg-additional-options "-fpatchable-function-entry=3,2" { target powerpc_elfv2 } } */ + /* { dg-final { scan-assembler-times "nop|NOP|SWYM" 3 { target { ! { alpha*-*-* } } } } } */ + /* { dg-final { scan-assembler-times "bis" 3 { target alpha*-*-* } } } */ + +diff --git a/gcc/testsuite/gcc.target/powerpc/pr99888-1.c b/gcc/testsuite/gcc.target/powerpc/pr99888-1.c +new file mode 100644 +index 000000000000..9370b4e74388 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/powerpc/pr99888-1.c +@@ -0,0 +1,43 @@ ++/* Verify no errors for different nops after local entry on ELFv2. */ ++ ++extern int a; ++ ++__attribute__ ((noipa, patchable_function_entry (1, 0))) ++int test1 (int b) { ++ return a + b; ++} ++ ++__attribute__ ((noipa, patchable_function_entry (2, 0))) ++int test2 (int b) { ++ return a + b; ++} ++ ++__attribute__ ((noipa, patchable_function_entry (3, 0))) ++int test3 (int b) { ++ return a + b; ++} ++ ++__attribute__ ((noipa, patchable_function_entry (4, 0))) ++int test4 (int b) { ++ return a + b; ++} ++ ++__attribute__ ((noipa, patchable_function_entry (5, 0))) ++int test5 (int b) { ++ return a + b; ++} ++ ++__attribute__ ((noipa, patchable_function_entry (6, 0))) ++int test6 (int b) { ++ return a + b; ++} ++ ++__attribute__ ((noipa, patchable_function_entry (7, 0))) ++int test7 (int b) { ++ return a + b; ++} ++ ++__attribute__ ((noipa, patchable_function_entry (8, 0))) ++int test8 (int b) { ++ return a + b; ++} +diff --git a/gcc/testsuite/gcc.target/powerpc/pr99888-2.c b/gcc/testsuite/gcc.target/powerpc/pr99888-2.c +new file mode 100644 +index 000000000000..450617126023 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/powerpc/pr99888-2.c +@@ -0,0 +1,43 @@ ++/* Verify no errors for 2, 6 and 14 nops before local entry on ELFv2. */ ++ ++extern int a; ++ ++__attribute__ ((noipa, patchable_function_entry (2, 2))) ++int test1 (int b) { ++ return a + b; ++} ++ ++__attribute__ ((noipa, patchable_function_entry (4, 2))) ++int test2 (int b) { ++ return a + b; ++} ++ ++__attribute__ ((noipa, patchable_function_entry (6, 6))) ++int test3 (int b) { ++ return a + b; ++} ++ ++__attribute__ ((noipa, patchable_function_entry (8, 6))) ++int test4 (int b) { ++ return a + b; ++} ++ ++__attribute__ ((noipa, patchable_function_entry (16, 6))) ++int test5 (int b) { ++ return a + b; ++} ++ ++__attribute__ ((noipa, patchable_function_entry (14, 14))) ++int test6 (int b) { ++ return a + b; ++} ++ ++__attribute__ ((noipa, patchable_function_entry (28, 14))) ++int test7 (int b) { ++ return a + b; ++} ++ ++__attribute__ ((noipa, patchable_function_entry (64, 14))) ++int test8 (int b) { ++ return a + b; ++} +diff --git a/gcc/testsuite/gcc.target/powerpc/pr99888-3.c b/gcc/testsuite/gcc.target/powerpc/pr99888-3.c +new file mode 100644 +index 000000000000..4531ae32036d +--- /dev/null ++++ b/gcc/testsuite/gcc.target/powerpc/pr99888-3.c +@@ -0,0 +1,11 @@ ++/* { dg-options "-fpatchable-function-entry=1" } */ ++ ++/* Verify no errors on ELFv2, using command line option instead of ++ function attribute. */ ++ ++extern int a; ++ ++int test (int b) { ++ return a + b; ++} ++ +diff --git a/gcc/testsuite/gcc.target/powerpc/pr99888-4.c b/gcc/testsuite/gcc.target/powerpc/pr99888-4.c +new file mode 100644 +index 000000000000..00a8d4d316e0 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/powerpc/pr99888-4.c +@@ -0,0 +1,13 @@ ++/* { dg-require-effective-target powerpc_elfv2 } */ ++/* There is no global entry point prologue with pcrel. */ ++/* { dg-options "-mno-pcrel -fpatchable-function-entry=1,1" } */ ++ ++/* Verify one error emitted for unexpected 1 nop before local ++ entry. */ ++ ++extern int a; ++ ++int test (int b) { ++ return a + b; ++} ++/* { dg-error "unsupported number of nops before function entry \\(1\\)" "" { target *-*-* } .-1 } */ +diff --git a/gcc/testsuite/gcc.target/powerpc/pr99888-5.c b/gcc/testsuite/gcc.target/powerpc/pr99888-5.c +new file mode 100644 +index 000000000000..39d3b4465f11 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/powerpc/pr99888-5.c +@@ -0,0 +1,13 @@ ++/* { dg-require-effective-target powerpc_elfv2 } */ ++/* There is no global entry point prologue with pcrel. */ ++/* { dg-options "-mno-pcrel -fpatchable-function-entry=7,3" } */ ++ ++/* Verify one error emitted for unexpected 3 nops before local ++ entry. */ ++ ++extern int a; ++ ++int test (int b) { ++ return a + b; ++} ++/* { dg-error "unsupported number of nops before function entry \\(3\\)" "" { target *-*-* } .-1 } */ +diff --git a/gcc/testsuite/gcc.target/powerpc/pr99888-6.c b/gcc/testsuite/gcc.target/powerpc/pr99888-6.c +new file mode 100644 +index 000000000000..c6c18dcc7ac0 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/powerpc/pr99888-6.c +@@ -0,0 +1,14 @@ ++/* { dg-require-effective-target powerpc_elfv2 } */ ++/* There is no global entry point prologue with pcrel. */ ++/* { dg-options "-mno-pcrel" } */ ++ ++/* Verify one error emitted for unexpected 4 nops before local ++ entry. */ ++ ++extern int a; ++ ++__attribute__ ((patchable_function_entry (20, 4))) ++int test (int b) { ++ return a + b; ++} ++/* { dg-error "unsupported number of nops before function entry \\(4\\)" "" { target *-*-* } .-1 } */ +-- +2.43.5 + diff --git a/SOURCES/gcc11-testsuite-fixes-4.patch b/SOURCES/gcc11-testsuite-fixes-4.patch new file mode 100644 index 0000000..cab2e7e --- /dev/null +++ b/SOURCES/gcc11-testsuite-fixes-4.patch @@ -0,0 +1,38 @@ +commit 181f40f5cf8510a16191e4768dadbe2cb7a5c095 +Author: Jakub Jelinek +Date: Wed Jul 24 18:00:05 2024 +0200 + + testsuite: Fix up pr116034.c test for big/pdp endian [PR116061] + + Didn't notice the memmove is into an int variable, so the test + was still failing on big endian. + + 2024-07-24 Jakub Jelinek + + PR tree-optimization/116034 + PR testsuite/116061 + * gcc.dg/pr116034.c (g): Change type from int to unsigned short. + (foo): Guard memmove call on __SIZEOF_SHORT__ == 2. + + (cherry picked from commit 69e69847e21a8d951ab5f09fd3421449564dba31) + +diff --git a/gcc/testsuite/gcc.dg/pr116034.c b/gcc/testsuite/gcc.dg/pr116034.c +index 9a31de03424..955b4c9e86b 100644 +--- a/gcc/testsuite/gcc.dg/pr116034.c ++++ b/gcc/testsuite/gcc.dg/pr116034.c +@@ -2,12 +2,13 @@ + /* { dg-do run } */ + /* { dg-options "-O1 -fno-strict-aliasing" } */ + +-int g; ++unsigned short int g; + + static inline int + foo (_Complex unsigned short c) + { +- __builtin_memmove (&g, 1 + (char *) &c, 2); ++ if (__SIZEOF_SHORT__ == 2) ++ __builtin_memmove (&g, 1 + (char *) &c, 2); + return g; + } + diff --git a/SPECS/gcc.spec b/SPECS/gcc.spec index cad9a66..dd92049 100644 --- a/SPECS/gcc.spec +++ b/SPECS/gcc.spec @@ -1,10 +1,11 @@ +%global source_date_epoch_from_changelog 1 %global DATE 20240719 %global gitrev a985e3068a6f8045f8a6f2d2d5ae75f5eb0a8767 %global gcc_version 11.5.0 %global gcc_major 11 # Note, gcc_release must be integer, if you want to add suffixes to # %%{release}, append them after %%{gcc_release} on Release: line. -%global gcc_release 2 +%global gcc_release 11 %global nvptx_tools_gitrev 5f6f343a302d620b0868edab376c00b15741e39e %global newlib_cygwin_gitrev 50e2a63b04bdd018484605fbb954fd1bd5147fa0 %global _unpackaged_files_terminate_build 0 @@ -150,7 +151,7 @@ Source0: gcc-%{version}-%{DATE}.tar.xz Source1: nvptx-tools-%{nvptx_tools_gitrev}.tar.xz # The source for nvptx-newlib package was pulled from upstream's vcs. Use the # following commands to generate the tarball: -# git clone git://sourceware.org/git/newlib-cygwin.git newlib-cygwin-dir.tmp +# git clone https://sourceware.org/git/newlib-cygwin.git newlib-cygwin-dir.tmp # git --git-dir=newlib-cygwin-dir.tmp/.git archive --prefix=newlib-cygwin-%%{newlib_cygwin_gitrev}/ %%{newlib_cygwin_gitrev} ":(exclude)newlib/libc/sys/linux/include/rpc/*.[hx]" | xz -9e > newlib-cygwin-%%{newlib_cygwin_gitrev}.tar.xz # rm -rf newlib-cygwin-dir.tmp Source2: newlib-cygwin-%{newlib_cygwin_gitrev}.tar.xz @@ -202,7 +203,10 @@ BuildRequires: glibc >= 2.3.90-35 %endif %ifarch %{multilib_64_archs} sparcv9 ppc # Ensure glibc{,-devel} is installed for both multilib arches -BuildRequires: /lib/libc.so.6 /usr/lib/libc.so /lib64/libc.so.6 /usr/lib64/libc.so +BuildRequires: (glibc32 or glibc-devel(%{__isa_name}-32)) +%endif +%ifarch sparcv9 ppc +BuildRequires: (glibc64 or glibc-devel(%{__isa_name}-64)) %endif %if %{build_ada} # Ada requires Ada to build @@ -298,6 +302,36 @@ Patch35: gcc11-testsuite-aarch64-add-fno-stack-protector.patch Patch36: gcc11-libgfortran-flush.patch Patch37: gcc11-pr113960.patch Patch38: gcc11-pr105157.patch +Patch39: gcc11-testsuite-fixes-4.patch +Patch40: gcc11-pr99888.patch +Patch41: gcc11-pr118976.patch +Patch42: gcc-RHEL-105072-1.patch +Patch43: gcc-RHEL-105072-2.patch +Patch44: gcc-RHEL-105072-3.patch +Patch45: gcc-RHEL-105072-4.patch +Patch46: gcc-RHEL-105072-5.patch +Patch47: gcc-RHEL-105072-6.patch +Patch48: gcc-RHEL-105072-7.patch +Patch49: gcc-RHEL-105072-8.patch +Patch50: gcc-RHEL-105072-9.patch +Patch51: gcc-RHEL-105072-10.patch +Patch52: gcc-RHEL-105072-11.patch +Patch53: gcc-RHEL-105072-12.patch +Patch54: gcc-RHEL-105072-13.patch +Patch55: gcc-RHEL-105072-14.patch +Patch56: gcc-RHEL-105072-15.patch +Patch57: gcc-RHEL-105072-16.patch +Patch58: gcc-RHEL-105072-17.patch +Patch59: gcc-RHEL-105072-18.patch +Patch60: gcc-RHEL-105072-19.patch +Patch61: gcc-RHEL-105072-20.patch +Patch62: gcc-RHEL-105072-21.patch +Patch63: gcc-RHEL-105072-22.patch +Patch64: gcc-RHEL-105072-23.patch +Patch65: gcc-RHEL-105072-24.patch +Patch66: gcc-RHEL-105072-25.patch +Patch67: gcc-RHEL-105072-26.patch +Patch68: gcc-RHEL-105072-27.patch Patch100: gcc11-fortran-fdec-duplicates.patch Patch101: gcc11-fortran-flogical-as-integer.patch @@ -310,6 +344,11 @@ Patch107: gcc11-fortran-fdec-promotion.patch Patch108: gcc11-fortran-fdec-sequence.patch Patch109: gcc11-fortran-fdec-add-missing-indexes.patch +# Pretty printer update. +Patch1000: gcc11-libstdc++-prettyprinter-update-15.patch +Patch1001: gcc11-libstdc++-prettyprinter-update-15-tests.patch +Patch1002: gcc11-libstdc++-prettyprinter-update-15-tests-48362.patch + # On ARM EABI systems, we do want -gnueabi to be part of the # target triple. %ifnarch %{arm} @@ -355,6 +394,8 @@ You'll need this package in order to compile C code. %package -n libgcc Summary: GCC version 11 shared support library Autoreq: false +# This expresses the dependency on _dl_find_object. +Requires: libc.so.6(GLIBC_2.35)%[0%{?__isa_bits} == 64 ? "(64bit)" : ""] %if !%{build_ada} Obsoletes: libgnat < %{version}-%{release} %endif @@ -901,6 +942,36 @@ mark them as cross compiled. %patch36 -p1 -b .libgfortran-flush~ %patch37 -p1 -b .pr113960~ %patch38 -p1 -b .pr105157~ +%patch39 -p1 -b .testsuite4~ +%patch40 -p1 -b .pr99888~ +%patch41 -p1 -b .pr118976~ +%patch42 -p1 -b .rhel-105072-1~ +%patch43 -p1 -b .rhel-105072-2~ +%patch44 -p1 -b .rhel-105072-3~ +%patch45 -p1 -b .rhel-105072-4~ +%patch46 -p1 -b .rhel-105072-5~ +%patch47 -p1 -b .rhel-105072-6~ +%patch48 -p1 -b .rhel-105072-7~ +%patch49 -p1 -b .rhel-105072-8~ +%patch50 -p1 -b .rhel-105072-9~ +%patch51 -p1 -b .rhel-105072-10~ +%patch52 -p1 -b .rhel-105072-11~ +%patch53 -p1 -b .rhel-105072-12~ +%patch54 -p1 -b .rhel-105072-13~ +%patch55 -p1 -b .rhel-105072-14~ +%patch56 -p1 -b .rhel-105072-15~ +%patch57 -p1 -b .rhel-105072-16~ +%patch58 -p1 -b .rhel-105072-17~ +%patch59 -p1 -b .rhel-105072-18~ +%patch60 -p1 -b .rhel-105072-19~ +%patch61 -p1 -b .rhel-105072-20~ +%patch62 -p1 -b .rhel-105072-21~ +%patch63 -p1 -b .rhel-105072-22~ +%patch64 -p1 -b .rhel-105072-23~ +%patch65 -p1 -b .rhel-105072-24~ +%patch66 -p1 -b .rhel-105072-25~ +%patch67 -p1 -b .rhel-105072-26~ +%patch68 -p1 -b .rhel-105072-27~ %if 0%{?rhel} >= 9 %patch100 -p1 -b .fortran-fdec-duplicates~ @@ -915,6 +986,10 @@ mark them as cross compiled. %patch109 -p1 -b .fortran-fdec-add-missing-indexes~ %endif +%patch1000 -p1 -b .libstdc++-prettyprinter-update-15 +%patch1001 -p1 -b .libstdc++-prettyprinter-update-15-tests +%patch1002 -p1 -b .libstdc++-prettyprinter-update-15-tests-48362 + %ifarch %{arm} rm -f gcc/testsuite/go.test/test/fixedbugs/issue19182.go %endif @@ -1422,6 +1497,9 @@ then CONFIG_ARGS="$CONFIG_ARGS --without-annocheck" CONFIG_ARGS="$CONFIG_ARGS --without-tests" CONFIG_ARGS="$CONFIG_ARGS --disable-rpath" + CONFIG_ARGS="$CONFIG_ARGS --without-debuginfod" + CONFIG_ARGS="$CONFIG_ARGS --without-clang-plugin" + CONFIG_ARGS="$CONFIG_ARGS --without-llvm-plugin" comp_dir="%{_builddir}/gcc-%{version}-%{DATE}/obj-%{gcc_target_platform}/gcc/" ccompiler="%{_builddir}/gcc-%{version}-%{DATE}/obj-%{gcc_target_platform}/gcc/xgcc -B $comp_dir" @@ -1813,9 +1891,9 @@ mv -f %{buildroot}%{_prefix}/%{_lib}/libstdc++*gdb.py* \ %{buildroot}%{_datadir}/gdb/auto-load/%{_prefix}/%{_lib}/ pushd ../libstdc++-v3/python for i in `find . -name \*.py`; do - touch -r $i %{buildroot}%{_prefix}/share/gcc-%{gcc_major}/python/$i + touch -d @$SOURCE_DATE_EPOCH %{buildroot}%{_prefix}/share/gcc-%{gcc_major}/python/$i done -touch -r hook.in %{buildroot}%{_datadir}/gdb/auto-load/%{_prefix}/%{_lib}/libstdc++*gdb.py +touch -d @$SOURCE_DATE_EPOCH %{buildroot}%{_datadir}/gdb/auto-load/%{_prefix}/%{_lib}/libstdc++*gdb.py popd for f in `find %{buildroot}%{_prefix}/share/gcc-%{gcc_major}/python/ \ %{buildroot}%{_datadir}/gdb/auto-load/%{_prefix}/%{_lib}/ -name \*.py`; do @@ -3594,9 +3672,39 @@ end %endif %changelog -* Mon Sep 30 2024 Eduard Abdullin - 11.5.0-2.alma.1 +* Mon Sep 15 2025 Eduard Abdullin - 11.5.0-11.alma.1 - Debrand for AlmaLinux +* Thu Jul 31 2025 Florian Weimer - 11.5.0-11 +- Adjust glibc32 build dependency (RHEL-105072) + +* Tue Jul 29 2025 Florian Weimer - 11.5.0-10 +- Exception handling performance improvements (RHEL-105072) +- libgcc: Use _dl_find_object to find DWARF data in unwinder +- libgcc: Use lock-free data structures for run-time unwinder registration + +* Fri Jun 27 2025 Siddhesh Poyarekar 11.5.0-9 +- Pin modification time for python files to SOURCE_DATE_EPOCH (RHEL-100148). + +* Mon Jun 23 2025 Siddhesh Poyarekar - 11.5.0-8 +- Sync libstdc++ pretty printers to latest GTS (RHEL-81975) + +* Thu May 29 2025 Joseph Myers - 11.5.0-7 +- Fix folding of BIT_NOT_EXPR for POLY_INT_CST (PR 118976, RHEL-90239) + +* Wed May 21 2025 David Malcolm - 11.5.0-6 +- rs6000: Rework ELFv2 support for -fpatchable-function-entry (PR target/99888, + RHEL-75806) + +* Fri Feb 7 2025 Marek Polacek 11.5.0-5 +- rebuild for CVE-2020-11023 (RHEL-78377) + +* Mon Jan 27 2025 Marek Polacek 11.5.0-4 +- revert the PR middle-end/57245 patch (RHEL-76359) + +* Tue Jan 21 2025 Marek Polacek 11.5.0-3 +- honor -frounding-math in real truncation (PR middle-end/57245, RHEL-73749) + * Mon Jul 22 2024 Marek Polacek 11.5.0-2 - fix TARGET_CPU_DEFAULT (PR target/105157, RHEL-50037) - libstdc++: Workaround kernel-headers on s390x-linux (RHEL-50054)