import compat-libgfortran-48-4.8.5-36.1.el8

This commit is contained in:
CentOS Sources 2019-08-01 13:49:43 -04:00 committed by Stepan Oksanichenko
commit 0f87a6871f
114 changed files with 50339 additions and 0 deletions

View File

@ -0,0 +1 @@
500237a6ba14b8a56751f57e5957b40cefa9cb01 SOURCES/gcc-4.8.5-20150702.tar.bz2

1
.gitignore vendored Normal file
View File

@ -0,0 +1 @@
SOURCES/gcc-4.8.5-20150702.tar.bz2

View File

@ -0,0 +1,96 @@
2014-04-09 Richard Henderson <rth@redhat.com>
* gcc-interface/Makefile.in: Support aarch64-linux.
* init.c: Enable alternate stack support also on aarch64.
* types.h (Fat_Pointer): Remove aligned attribute.
--- gcc/ada/gcc-interface/Makefile.in
+++ gcc/ada/gcc-interface/Makefile.in
@@ -2123,6 +2123,44 @@ ifeq ($(strip $(filter-out alpha% linux%,$(arch) $(osys))),)
LIBRARY_VERSION := $(LIB_VERSION)
endif
+# AArch64 Linux
+ifeq ($(strip $(filter-out aarch64% linux%,$(arch) $(osys))),)
+ LIBGNAT_TARGET_PAIRS = \
+ a-exetim.adb<a-exetim-posix.adb \
+ a-exetim.ads<a-exetim-default.ads \
+ a-intnam.ads<a-intnam-linux.ads \
+ a-synbar.adb<a-synbar-posix.adb \
+ a-synbar.ads<a-synbar-posix.ads \
+ s-inmaop.adb<s-inmaop-posix.adb \
+ s-intman.adb<s-intman-posix.adb \
+ s-linux.ads<s-linux.ads \
+ s-mudido.adb<s-mudido-affinity.adb \
+ s-osinte.ads<s-osinte-linux.ads \
+ s-osinte.adb<s-osinte-posix.adb \
+ s-osprim.adb<s-osprim-posix.adb \
+ s-taprop.adb<s-taprop-linux.adb \
+ s-tasinf.ads<s-tasinf-linux.ads \
+ s-tasinf.adb<s-tasinf-linux.adb \
+ s-tpopsp.adb<s-tpopsp-tls.adb \
+ s-taspri.ads<s-taspri-posix.ads \
+ g-sercom.adb<g-sercom-linux.adb \
+ $(ATOMICS_TARGET_PAIRS) \
+ $(ATOMICS_BUILTINS_TARGET_PAIRS) \
+ system.ads<system-linux-x86_64.ads
+ ## ^^ Note the above is a pretty-close placeholder.
+
+ TOOLS_TARGET_PAIRS = \
+ mlib-tgt-specific.adb<mlib-tgt-specific-linux.adb \
+ indepsw.adb<indepsw-gnu.adb
+
+ EXTRA_GNATRTL_TASKING_OBJS=s-linux.o a-exetim.o
+ EH_MECHANISM=-gcc
+ THREADSLIB=-lpthread -lrt
+ GNATLIB_SHARED=gnatlib-shared-dual
+ GMEM_LIB = gmemlib
+ LIBRARY_VERSION := $(LIB_VERSION)
+endif
+
# x86-64 Linux
ifeq ($(strip $(filter-out %x86_64 linux%,$(arch) $(osys))),)
LIBGNAT_TARGET_PAIRS = \
--- gcc/ada/init.c
+++ gcc/ada/init.c
@@ -562,7 +562,9 @@ __gnat_error_handler (int sig, siginfo_t *si ATTRIBUTE_UNUSED, void *ucontext)
Raise_From_Signal_Handler (exception, msg);
}
-#if defined (i386) || defined (__x86_64__) || defined (__powerpc__)
+#if defined (i386) || defined (__x86_64__) || defined (__powerpc__) \
+ || defined (__aarch64__)
+#define HAVE_GNAT_ALTERNATE_STACK 1
/* This must be in keeping with System.OS_Interface.Alternate_Stack_Size. */
char __gnat_alternate_stack[16 * 1024]; /* 2 * SIGSTKSZ */
#endif
@@ -603,7 +605,7 @@ __gnat_install_handler (void)
handled properly, avoiding a SEGV generation from stack usage by the
handler itself. */
-#if defined (i386) || defined (__x86_64__) || defined (__powerpc__)
+#ifdef HAVE_GNAT_ALTERNATE_STACK
stack_t stack;
stack.ss_sp = __gnat_alternate_stack;
stack.ss_size = sizeof (__gnat_alternate_stack);
@@ -624,7 +626,7 @@ __gnat_install_handler (void)
sigaction (SIGILL, &act, NULL);
if (__gnat_get_interrupt_state (SIGBUS) != 's')
sigaction (SIGBUS, &act, NULL);
-#if defined (i386) || defined (__x86_64__) || defined (__powerpc__)
+#ifdef HAVE_GNAT_ALTERNATE_STACK
act.sa_flags |= SA_ONSTACK;
#endif
if (__gnat_get_interrupt_state (SIGSEGV) != 's')
--- gcc/ada/types.h
+++ gcc/ada/types.h
@@ -79,8 +79,7 @@ typedef Char *Str_Ptr;
/* Types for the fat pointer used for strings and the template it
points to. */
typedef struct {int Low_Bound, High_Bound; } String_Template;
-typedef struct {const char *Array; String_Template *Bounds; }
- __attribute ((aligned (sizeof (char *) * 2))) Fat_Pointer;
+typedef struct {const char *Array; String_Template *Bounds; } Fat_Pointer;
/* Types for Node/Entity Kinds: */

View File

@ -0,0 +1,35 @@
2014-04-07 Richard Henderson <rth@redhat.com>
* common/config/aarch64/aarch64-common.c (TARGET_OPTION_INIT_STRUCT):
Define.
(aarch64_option_init_struct): New function.
--- gcc/common/config/aarch64/aarch64-common.c
+++ gcc/common/config/aarch64/aarch64-common.c
@@ -39,6 +39,9 @@
#undef TARGET_OPTION_OPTIMIZATION_TABLE
#define TARGET_OPTION_OPTIMIZATION_TABLE aarch_option_optimization_table
+#undef TARGET_OPTION_INIT_STRUCT
+#define TARGET_OPTION_INIT_STRUCT aarch64_option_init_struct
+
/* Set default optimization options. */
static const struct default_options aarch_option_optimization_table[] =
{
@@ -47,6 +50,16 @@ static const struct default_options aarch_option_optimization_table[] =
{ OPT_LEVELS_NONE, 0, NULL, 0 }
};
+/* Implement TARGET_OPTION_INIT_STRUCT. */
+
+static void
+aarch64_option_init_struct (struct gcc_options *opts)
+{
+ /* By default, always emit DWARF-2 unwind info. This allows debugging
+ without maintaining a stack frame back-chain. */
+ opts->x_flag_asynchronous_unwind_tables = 1;
+}
+
/* Implement TARGET_HANDLE_OPTION.
This function handles the target specific options for CPU/target selection.

View File

@ -0,0 +1,342 @@
2014-08-08 Richard Henderson <rth@redhat.com>
* config/aarch64/aarch64.c (aarch64_save_or_restore_fprs): Add
cfi_ops argument, for restore put REG_CFA_RESTORE notes into
*cfi_ops rather than on individual insns. Cleanup.
(aarch64_save_or_restore_callee_save_registers): Likewise.
(aarch64_expand_prologue): Adjust caller.
(aarch64_expand_epilogue): Likewise. Cleanup. Emit queued cfi_ops
on the stack restore insn.
--- gcc/config/aarch64/aarch64.c 2014-07-15 02:27:16.000000000 -0700
+++ gcc/config/aarch64/aarch64.c 2014-08-21 12:52:44.190455860 -0700
@@ -1603,24 +1603,23 @@ aarch64_register_saved_on_entry (int reg
static void
aarch64_save_or_restore_fprs (int start_offset, int increment,
- bool restore, rtx base_rtx)
-
+ bool restore, rtx base_rtx, rtx *cfi_ops)
{
unsigned regno;
unsigned regno2;
rtx insn;
rtx (*gen_mem_ref)(enum machine_mode, rtx) = (frame_pointer_needed)? gen_frame_mem : gen_rtx_MEM;
-
for (regno = V0_REGNUM; regno <= V31_REGNUM; regno++)
{
if (aarch64_register_saved_on_entry (regno))
{
- rtx mem;
+ rtx mem, reg1;
mem = gen_mem_ref (DFmode,
plus_constant (Pmode,
base_rtx,
start_offset));
+ reg1 = gen_rtx_REG (DFmode, regno);
for (regno2 = regno + 1;
regno2 <= V31_REGNUM
@@ -1632,54 +1631,51 @@ aarch64_save_or_restore_fprs (int start_
if (regno2 <= V31_REGNUM &&
aarch64_register_saved_on_entry (regno2))
{
- rtx mem2;
+ rtx mem2, reg2;
/* Next highest register to be saved. */
mem2 = gen_mem_ref (DFmode,
plus_constant
(Pmode,
base_rtx,
start_offset + increment));
+ reg2 = gen_rtx_REG (DFmode, regno2);
+
if (restore == false)
{
- insn = emit_insn
- ( gen_store_pairdf (mem, gen_rtx_REG (DFmode, regno),
- mem2, gen_rtx_REG (DFmode, regno2)));
-
+ insn = emit_insn (gen_store_pairdf (mem, reg1, mem2, reg2));
+ /* The first part of a frame-related parallel insn
+ is always assumed to be relevant to the frame
+ calculations; subsequent parts, are only
+ frame-related if explicitly marked. */
+ RTX_FRAME_RELATED_P (XVECEXP (PATTERN (insn), 0, 1)) = 1;
+ RTX_FRAME_RELATED_P (insn) = 1;
}
else
{
- insn = emit_insn
- ( gen_load_pairdf (gen_rtx_REG (DFmode, regno), mem,
- gen_rtx_REG (DFmode, regno2), mem2));
-
- add_reg_note (insn, REG_CFA_RESTORE, gen_rtx_REG (DFmode, regno));
- add_reg_note (insn, REG_CFA_RESTORE, gen_rtx_REG (DFmode, regno2));
+ emit_insn (gen_load_pairdf (reg1, mem, reg2, mem2));
+ *cfi_ops = alloc_reg_note (REG_CFA_RESTORE, reg1, *cfi_ops);
+ *cfi_ops = alloc_reg_note (REG_CFA_RESTORE, reg2, *cfi_ops);
}
- /* The first part of a frame-related parallel insn
- is always assumed to be relevant to the frame
- calculations; subsequent parts, are only
- frame-related if explicitly marked. */
- RTX_FRAME_RELATED_P (XVECEXP (PATTERN (insn), 0,
- 1)) = 1;
regno = regno2;
start_offset += increment * 2;
}
else
{
if (restore == false)
- insn = emit_move_insn (mem, gen_rtx_REG (DFmode, regno));
+ {
+ insn = emit_move_insn (mem, reg1);
+ RTX_FRAME_RELATED_P (insn) = 1;
+ }
else
{
- insn = emit_move_insn (gen_rtx_REG (DFmode, regno), mem);
- add_reg_note (insn, REG_CFA_RESTORE, gen_rtx_REG (DImode, regno));
+ emit_move_insn (reg1, mem);
+ *cfi_ops = alloc_reg_note (REG_CFA_RESTORE, reg1, *cfi_ops);
}
start_offset += increment;
}
- RTX_FRAME_RELATED_P (insn) = 1;
}
}
-
}
@@ -1687,13 +1683,14 @@ aarch64_save_or_restore_fprs (int start_
restore's have to happen. */
static void
aarch64_save_or_restore_callee_save_registers (HOST_WIDE_INT offset,
- bool restore)
+ bool restore, rtx *cfi_ops)
{
rtx insn;
rtx base_rtx = stack_pointer_rtx;
HOST_WIDE_INT start_offset = offset;
HOST_WIDE_INT increment = UNITS_PER_WORD;
- rtx (*gen_mem_ref)(enum machine_mode, rtx) = (frame_pointer_needed)? gen_frame_mem : gen_rtx_MEM;
+ rtx (*gen_mem_ref)(enum machine_mode, rtx)
+ = (frame_pointer_needed)? gen_frame_mem : gen_rtx_MEM;
unsigned limit = (frame_pointer_needed)? R28_REGNUM: R30_REGNUM;
unsigned regno;
unsigned regno2;
@@ -1702,11 +1699,13 @@ aarch64_save_or_restore_callee_save_regi
{
if (aarch64_register_saved_on_entry (regno))
{
- rtx mem;
+ rtx mem, reg1;
+
mem = gen_mem_ref (Pmode,
plus_constant (Pmode,
base_rtx,
start_offset));
+ reg1 = gen_rtx_REG (DImode, regno);
for (regno2 = regno + 1;
regno2 <= limit
@@ -1718,56 +1717,54 @@ aarch64_save_or_restore_callee_save_regi
if (regno2 <= limit &&
aarch64_register_saved_on_entry (regno2))
{
- rtx mem2;
+ rtx mem2, reg2;
/* Next highest register to be saved. */
mem2 = gen_mem_ref (Pmode,
plus_constant
(Pmode,
base_rtx,
start_offset + increment));
+ reg2 = gen_rtx_REG (DImode, regno2);
+
if (restore == false)
{
- insn = emit_insn
- ( gen_store_pairdi (mem, gen_rtx_REG (DImode, regno),
- mem2, gen_rtx_REG (DImode, regno2)));
-
+ insn = emit_insn (gen_store_pairdi (mem, reg1, mem2, reg2));
+ /* The first part of a frame-related parallel insn
+ is always assumed to be relevant to the frame
+ calculations; subsequent parts, are only
+ frame-related if explicitly marked. */
+ RTX_FRAME_RELATED_P (XVECEXP (PATTERN (insn), 0, 1)) = 1;
+ RTX_FRAME_RELATED_P (insn) = 1;
}
else
{
- insn = emit_insn
- ( gen_load_pairdi (gen_rtx_REG (DImode, regno), mem,
- gen_rtx_REG (DImode, regno2), mem2));
-
- add_reg_note (insn, REG_CFA_RESTORE, gen_rtx_REG (DImode, regno));
- add_reg_note (insn, REG_CFA_RESTORE, gen_rtx_REG (DImode, regno2));
+ emit_insn (gen_load_pairdi (reg1, mem, reg2, mem2));
+ *cfi_ops = alloc_reg_note (REG_CFA_RESTORE, reg1, *cfi_ops);
+ *cfi_ops = alloc_reg_note (REG_CFA_RESTORE, reg2, *cfi_ops);
}
- /* The first part of a frame-related parallel insn
- is always assumed to be relevant to the frame
- calculations; subsequent parts, are only
- frame-related if explicitly marked. */
- RTX_FRAME_RELATED_P (XVECEXP (PATTERN (insn), 0,
- 1)) = 1;
regno = regno2;
start_offset += increment * 2;
}
else
{
if (restore == false)
- insn = emit_move_insn (mem, gen_rtx_REG (DImode, regno));
+ {
+ insn = emit_move_insn (mem, reg1);
+ RTX_FRAME_RELATED_P (insn) = 1;
+ }
else
{
- insn = emit_move_insn (gen_rtx_REG (DImode, regno), mem);
- add_reg_note (insn, REG_CFA_RESTORE, gen_rtx_REG (DImode, regno));
+ emit_move_insn (reg1, mem);
+ *cfi_ops = alloc_reg_note (REG_CFA_RESTORE, reg1, *cfi_ops);
}
start_offset += increment;
}
- RTX_FRAME_RELATED_P (insn) = 1;
}
}
- aarch64_save_or_restore_fprs (start_offset, increment, restore, base_rtx);
-
+ aarch64_save_or_restore_fprs (start_offset, increment, restore,
+ base_rtx, cfi_ops);
}
/* AArch64 stack frames generated by this compiler look like:
@@ -1966,7 +1963,7 @@ aarch64_expand_prologue (void)
}
aarch64_save_or_restore_callee_save_registers
- (fp_offset + cfun->machine->frame.hardfp_offset, 0);
+ (fp_offset + cfun->machine->frame.hardfp_offset, 0, NULL);
}
/* when offset >= 512,
@@ -1991,6 +1988,7 @@ aarch64_expand_epilogue (bool for_sibcal
HOST_WIDE_INT fp_offset;
rtx insn;
rtx cfa_reg;
+ rtx cfi_ops = NULL;
aarch64_layout_frame ();
original_frame_size = get_frame_size () + cfun->machine->saved_varargs_size;
@@ -2035,15 +2033,17 @@ aarch64_expand_epilogue (bool for_sibcal
insn = emit_insn (gen_add3_insn (stack_pointer_rtx,
hard_frame_pointer_rtx,
GEN_INT (- fp_offset)));
+ /* CFA should be calculated from the value of SP from now on. */
+ add_reg_note (insn, REG_CFA_ADJUST_CFA,
+ gen_rtx_SET (VOIDmode, stack_pointer_rtx,
+ plus_constant (Pmode, hard_frame_pointer_rtx,
+ -fp_offset)));
RTX_FRAME_RELATED_P (insn) = 1;
- /* As SP is set to (FP - fp_offset), according to the rules in
- dwarf2cfi.c:dwarf2out_frame_debug_expr, CFA should be calculated
- from the value of SP from now on. */
cfa_reg = stack_pointer_rtx;
}
aarch64_save_or_restore_callee_save_registers
- (fp_offset + cfun->machine->frame.hardfp_offset, 1);
+ (fp_offset + cfun->machine->frame.hardfp_offset, 1, &cfi_ops);
/* Restore the frame pointer and lr if the frame pointer is needed. */
if (offset > 0)
@@ -2051,6 +2051,8 @@ aarch64_expand_epilogue (bool for_sibcal
if (frame_pointer_needed)
{
rtx mem_fp, mem_lr;
+ rtx reg_fp = hard_frame_pointer_rtx;
+ rtx reg_lr = gen_rtx_REG (DImode, LR_REGNUM);
if (fp_offset)
{
@@ -2063,52 +2065,36 @@ aarch64_expand_epilogue (bool for_sibcal
stack_pointer_rtx,
fp_offset
+ UNITS_PER_WORD));
- insn = emit_insn (gen_load_pairdi (hard_frame_pointer_rtx,
- mem_fp,
- gen_rtx_REG (DImode,
- LR_REGNUM),
- mem_lr));
+ emit_insn (gen_load_pairdi (reg_fp, mem_fp, reg_lr, mem_lr));
+
+ insn = emit_insn (gen_add2_insn (stack_pointer_rtx,
+ GEN_INT (offset)));
}
else
{
insn = emit_insn (gen_loadwb_pairdi_di
- (stack_pointer_rtx,
- stack_pointer_rtx,
- hard_frame_pointer_rtx,
- gen_rtx_REG (DImode, LR_REGNUM),
- GEN_INT (offset),
+ (stack_pointer_rtx, stack_pointer_rtx,
+ reg_fp, reg_lr, GEN_INT (offset),
GEN_INT (GET_MODE_SIZE (DImode) + offset)));
- RTX_FRAME_RELATED_P (XVECEXP (PATTERN (insn), 0, 2)) = 1;
- add_reg_note (insn, REG_CFA_ADJUST_CFA,
- (gen_rtx_SET (Pmode, stack_pointer_rtx,
- plus_constant (Pmode, cfa_reg,
- offset))));
- }
-
- /* The first part of a frame-related parallel insn
- is always assumed to be relevant to the frame
- calculations; subsequent parts, are only
- frame-related if explicitly marked. */
- RTX_FRAME_RELATED_P (XVECEXP (PATTERN (insn), 0, 1)) = 1;
- RTX_FRAME_RELATED_P (insn) = 1;
- add_reg_note (insn, REG_CFA_RESTORE, hard_frame_pointer_rtx);
- add_reg_note (insn, REG_CFA_RESTORE,
- gen_rtx_REG (DImode, LR_REGNUM));
-
- if (fp_offset)
- {
- insn = emit_insn (gen_add2_insn (stack_pointer_rtx,
- GEN_INT (offset)));
- RTX_FRAME_RELATED_P (insn) = 1;
}
+ cfi_ops = alloc_reg_note (REG_CFA_RESTORE, reg_fp, cfi_ops);
+ cfi_ops = alloc_reg_note (REG_CFA_RESTORE, reg_lr, cfi_ops);
}
else
{
insn = emit_insn (gen_add2_insn (stack_pointer_rtx,
GEN_INT (offset)));
- RTX_FRAME_RELATED_P (insn) = 1;
}
+ cfi_ops = alloc_reg_note (REG_CFA_ADJUST_CFA,
+ gen_rtx_SET (VOIDmode, stack_pointer_rtx,
+ plus_constant (Pmode, cfa_reg,
+ offset)),
+ cfi_ops);
+ REG_NOTES (insn) = cfi_ops;
+ RTX_FRAME_RELATED_P (insn) = 1;
}
+ else
+ gcc_assert (cfi_ops == NULL);
/* Stack adjustment for exception handler. */
if (crtl->calls_eh_return)

View File

@ -0,0 +1,46 @@
2013-09-20 Jakub Jelinek <jakub@redhat.com>
* common.opt (-fdiagnostics-color=): Default to auto.
* toplev.c (process_options): Always default to
-fdiagnostics-color=auto if no -f{,no-}diagnostics-color*.
* doc/invoke.texi (-fdiagnostics-color*): Adjust documentation
of the default.
--- gcc/common.opt.jj 2013-09-18 12:06:53.000000000 +0200
+++ gcc/common.opt 2013-09-20 10:00:00.935823900 +0200
@@ -1037,7 +1037,7 @@ Common Alias(fdiagnostics-color=,always,
;
fdiagnostics-color=
-Common Joined RejectNegative Var(flag_diagnostics_show_color) Enum(diagnostic_color_rule) Init(DIAGNOSTICS_COLOR_NO)
+Common Joined RejectNegative Var(flag_diagnostics_show_color) Enum(diagnostic_color_rule) Init(DIAGNOSTICS_COLOR_AUTO)
-fdiagnostics-color=[never|always|auto] Colorize diagnostics
; Required for these enum values.
--- gcc/toplev.c.jj 2013-09-09 11:32:39.000000000 +0200
+++ gcc/toplev.c 2013-09-20 10:10:08.198721005 +0200
@@ -1229,10 +1229,8 @@ process_options (void)
maximum_field_alignment = initial_max_fld_align * BITS_PER_UNIT;
- /* Default to -fdiagnostics-color=auto if GCC_COLORS is in the environment,
- otherwise default to -fdiagnostics-color=never. */
- if (!global_options_set.x_flag_diagnostics_show_color
- && getenv ("GCC_COLORS"))
+ /* Default to -fdiagnostics-color=auto. */
+ if (!global_options_set.x_flag_diagnostics_show_color)
pp_show_color (global_dc->printer)
= colorize_init (DIAGNOSTICS_COLOR_AUTO);
--- gcc/doc/invoke.texi.jj 2013-09-18 12:06:50.000000000 +0200
+++ gcc/doc/invoke.texi 2013-09-20 10:09:29.079904455 +0200
@@ -3046,8 +3046,7 @@ a message which is too long to fit on a
@cindex highlight, color, colour
@vindex GCC_COLORS @r{environment variable}
Use color in diagnostics. @var{WHEN} is @samp{never}, @samp{always},
-or @samp{auto}. The default is @samp{never} if @env{GCC_COLORS} environment
-variable isn't present in the environment, and @samp{auto} otherwise.
+or @samp{auto}. The default is @samp{auto}.
@samp{auto} means to use color only when the standard error is a terminal.
The forms @option{-fdiagnostics-color} and @option{-fno-diagnostics-color} are
aliases for @option{-fdiagnostics-color=always} and

102
SOURCES/gcc48-hack.patch Normal file
View File

@ -0,0 +1,102 @@
--- libada/Makefile.in.jj 2009-01-14 12:07:35.000000000 +0100
+++ libada/Makefile.in 2009-01-15 14:25:33.000000000 +0100
@@ -66,18 +66,40 @@ libsubdir := $(libdir)/gcc/$(target_nonc
ADA_RTS_DIR=$(GCC_DIR)/ada/rts$(subst /,_,$(MULTISUBDIR))
ADA_RTS_SUBDIR=./rts$(subst /,_,$(MULTISUBDIR))
+DEFAULTMULTIFLAGS :=
+ifeq ($(MULTISUBDIR),)
+targ:=$(subst -, ,$(target))
+arch:=$(word 1,$(targ))
+ifeq ($(words $(targ)),2)
+osys:=$(word 2,$(targ))
+else
+osys:=$(word 3,$(targ))
+endif
+ifeq ($(strip $(filter-out i%86 x86_64 powerpc% ppc% s390% sparc% linux%, $(arch) $(osys))),)
+ifeq ($(shell $(CC) $(CFLAGS) -print-multi-os-directory),../lib64)
+DEFAULTMULTIFLAGS := -m64
+else
+ifeq ($(strip $(filter-out s390%, $(arch))),)
+DEFAULTMULTIFLAGS := -m31
+else
+DEFAULTMULTIFLAGS := -m32
+endif
+endif
+endif
+endif
+
# exeext should not be used because it's the *host* exeext. We're building
# a *target* library, aren't we?!? Likewise for CC. Still, provide bogus
# definitions just in case something slips through the safety net provided
# by recursive make invocations in gcc/ada/Makefile.in
LIBADA_FLAGS_TO_PASS = \
"MAKEOVERRIDES=" \
- "LDFLAGS=$(LDFLAGS)" \
+ "LDFLAGS=$(LDFLAGS) $(DEFAULTMULTIFLAGS)" \
"LN_S=$(LN_S)" \
"SHELL=$(SHELL)" \
- "GNATLIBFLAGS=$(GNATLIBFLAGS) $(MULTIFLAGS)" \
- "GNATLIBCFLAGS=$(GNATLIBCFLAGS) $(MULTIFLAGS)" \
- "GNATLIBCFLAGS_FOR_C=$(GNATLIBCFLAGS_FOR_C) $(MULTIFLAGS)" \
+ "GNATLIBFLAGS=$(GNATLIBFLAGS) $(MULTIFLAGS) $(DEFAULTMULTIFLAGS)" \
+ "GNATLIBCFLAGS=$(GNATLIBCFLAGS) $(MULTIFLAGS) $(DEFAULTMULTIFLAGS)" \
+ "GNATLIBCFLAGS_FOR_C=$(GNATLIBCFLAGS_FOR_C) $(MULTIFLAGS) $(DEFAULTMULTIFLAGS)" \
"PICFLAG_FOR_TARGET=$(PICFLAG)" \
"THREAD_KIND=$(THREAD_KIND)" \
"TRACE=$(TRACE)" \
@@ -88,7 +110,7 @@ LIBADA_FLAGS_TO_PASS = \
"exeext=.exeext.should.not.be.used " \
'CC=the.host.compiler.should.not.be.needed' \
"GCC_FOR_TARGET=$(CC)" \
- "CFLAGS=$(CFLAGS)"
+ "CFLAGS=$(CFLAGS) $(DEFAULTMULTIFLAGS)"
# Rules to build gnatlib.
.PHONY: gnatlib gnatlib-plain gnatlib-sjlj gnatlib-zcx gnatlib-shared osconstool
--- gcc/ada/sem_util.adb (revision 161677)
+++ gcc/ada/sem_util.adb (working copy)
@@ -5487,7 +5487,7 @@ package body Sem_Util is
Exp : Node_Id;
Assn : Node_Id;
Choice : Node_Id;
- Comp_Type : Entity_Id;
+ Comp_Type : Entity_Id := Empty;
Is_Array_Aggr : Boolean;
begin
--- config-ml.in.jj 2010-06-30 09:50:44.000000000 +0200
+++ config-ml.in 2010-07-02 21:24:17.994211151 +0200
@@ -516,6 +516,8 @@ multi-do:
ADAFLAGS="$(ADAFLAGS) $${flags}" \
prefix="$(prefix)" \
exec_prefix="$(exec_prefix)" \
+ mandir="$(mandir)" \
+ infodir="$(infodir)" \
GCJFLAGS="$(GCJFLAGS) $${flags}" \
GOCFLAGS="$(GOCFLAGS) $${flags}" \
CXXFLAGS="$(CXXFLAGS) $${flags}" \
--- libjava/Makefile.am.jj 2010-07-09 11:17:33.729604090 +0200
+++ libjava/Makefile.am 2010-07-09 13:16:41.894375641 +0200
@@ -710,7 +710,8 @@ if USE_LIBGCJ_BC
## later.
@echo Installing dummy lib libgcj_bc.so.1.0.0; \
rm $(DESTDIR)$(toolexeclibdir)/libgcj_bc.so; \
- mv $(DESTDIR)$(toolexeclibdir)/libgcj_bc.so.1.0.0 $(DESTDIR)$(toolexeclibdir)/libgcj_bc.so; \
+ $(INSTALL) $(INSTALL_STRIP_FLAG) $(here)/.libs/libgcj_bc.so $(DESTDIR)$(toolexeclibdir)/libgcj_bc.so; \
+ rm $(DESTDIR)$(toolexeclibdir)/libgcj_bc.so.1.0.0; \
$(libgcj_bc_dummy_LINK) -xc /dev/null -Wl,-soname,libgcj_bc.so.1 \
-o $(DESTDIR)$(toolexeclibdir)/libgcj_bc.so.1.0.0 -lgcj || exit; \
rm $(DESTDIR)$(toolexeclibdir)/libgcj_bc.so.1; \
--- libjava/Makefile.in.jj 2010-07-09 11:17:34.000000000 +0200
+++ libjava/Makefile.in 2010-07-09 13:18:07.542572270 +0200
@@ -12665,7 +12665,8 @@ install-exec-hook: install-binPROGRAMS i
install-libexecsubPROGRAMS
@USE_LIBGCJ_BC_TRUE@ @echo Installing dummy lib libgcj_bc.so.1.0.0; \
@USE_LIBGCJ_BC_TRUE@ rm $(DESTDIR)$(toolexeclibdir)/libgcj_bc.so; \
-@USE_LIBGCJ_BC_TRUE@ mv $(DESTDIR)$(toolexeclibdir)/libgcj_bc.so.1.0.0 $(DESTDIR)$(toolexeclibdir)/libgcj_bc.so; \
+@USE_LIBGCJ_BC_TRUE@ $(INSTALL) $(INSTALL_STRIP_FLAG) $(here)/.libs/libgcj_bc.so $(DESTDIR)$(toolexeclibdir)/libgcj_bc.so; \
+@USE_LIBGCJ_BC_TRUE@ rm $(DESTDIR)$(toolexeclibdir)/libgcj_bc.so.1.0.0; \
@USE_LIBGCJ_BC_TRUE@ $(libgcj_bc_dummy_LINK) -xc /dev/null -Wl,-soname,libgcj_bc.so.1 \
@USE_LIBGCJ_BC_TRUE@ -o $(DESTDIR)$(toolexeclibdir)/libgcj_bc.so.1.0.0 -lgcj || exit; \
@USE_LIBGCJ_BC_TRUE@ rm $(DESTDIR)$(toolexeclibdir)/libgcj_bc.so.1; \

View File

@ -0,0 +1,11 @@
--- libgomp/configure.tgt.jj 2008-01-10 20:53:48.000000000 +0100
+++ libgomp/configure.tgt 2008-03-27 12:44:51.000000000 +0100
@@ -67,7 +67,7 @@ if test $enable_linux_futex = yes; then
;;
*)
if test -z "$with_arch"; then
- XCFLAGS="${XCFLAGS} -march=i486 -mtune=${target_cpu}"
+ XCFLAGS="${XCFLAGS} -march=i486 -mtune=generic"
fi
esac
;;

View File

@ -0,0 +1,44 @@
--- libjava/configure.ac.jj 2007-12-07 17:55:50.000000000 +0100
+++ libjava/configure.ac 2007-12-07 18:36:56.000000000 +0100
@@ -82,6 +82,13 @@ AC_ARG_ENABLE(java-maintainer-mode,
[allow rebuilding of .class and .h files]))
AM_CONDITIONAL(JAVA_MAINTAINER_MODE, test "$enable_java_maintainer_mode" = yes)
+AC_ARG_ENABLE(libjava-multilib,
+ AS_HELP_STRING([--enable-libjava-multilib], [build libjava as multilib]))
+if test "$enable_libjava_multilib" = no; then
+ multilib=no
+ ac_configure_args="$ac_configure_args --disable-multilib"
+fi
+
# It may not be safe to run linking tests in AC_PROG_CC/AC_PROG_CXX.
GCC_NO_EXECUTABLES
--- libjava/configure.jj 2007-12-07 17:55:50.000000000 +0100
+++ libjava/configure 2007-12-07 18:39:58.000000000 +0100
@@ -1021,6 +1021,8 @@ Optional Features:
default=yes
--enable-java-maintainer-mode
allow rebuilding of .class and .h files
+ --enable-libjava-multilib
+ build libjava as multilib
--disable-dependency-tracking speeds up one-time build
--enable-dependency-tracking do not reject slow dependency extractors
--enable-maintainer-mode enable make rules and dependencies not useful
@@ -1973,6 +1975,16 @@ else
fi
+# Check whether --enable-libjava-multilib was given.
+if test "${enable_libjava_multilib+set}" = set; then
+ enableval=$enable_libjava_multilib;
+fi
+
+if test "$enable_libjava_multilib" = no; then
+ multilib=no
+ ac_configure_args="$ac_configure_args --disable-multilib"
+fi
+
# It may not be safe to run linking tests in AC_PROG_CC/AC_PROG_CXX.

View File

@ -0,0 +1,126 @@
2016-02-19 Jakub Jelinek <jakub@redhat.com>
Bernd Edlinger <bernd.edlinger@hotmail.de>
* Make-lang.in: Invoke gperf with -L C++.
* cfns.gperf: Remove prototypes for hash and libc_name_p
inlines.
* cfns.h: Regenerated.
* except.c (nothrow_libfn_p): Adjust.
--- gcc/cp/Make-lang.in
+++ gcc/cp/Make-lang.in
@@ -112,7 +112,7 @@ else
# deleting the $(srcdir)/cp/cfns.h file.
$(srcdir)/cp/cfns.h:
endif
- gperf -o -C -E -k '1-6,$$' -j1 -D -N 'libc_name_p' -L ANSI-C \
+ gperf -o -C -E -k '1-6,$$' -j1 -D -N 'libc_name_p' -L C++ \
$(srcdir)/cp/cfns.gperf --output-file $(srcdir)/cp/cfns.h
#
--- gcc/cp/cfns.gperf
+++ gcc/cp/cfns.gperf
@@ -1,3 +1,5 @@
+%language=C++
+%define class-name libc_name
%{
/* Copyright (C) 2000-2013 Free Software Foundation, Inc.
@@ -16,14 +18,6 @@ for more details.
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
-#ifdef __GNUC__
-__inline
-#endif
-static unsigned int hash (const char *, unsigned int);
-#ifdef __GNUC__
-__inline
-#endif
-const char * libc_name_p (const char *, unsigned int);
%}
%%
# The standard C library functions, for feeding to gperf; the result is used
--- gcc/cp/cfns.h
+++ gcc/cp/cfns.h
@@ -1,5 +1,5 @@
-/* ANSI-C code produced by gperf version 3.0.3 */
-/* Command-line: gperf -o -C -E -k '1-6,$' -j1 -D -N libc_name_p -L ANSI-C cfns.gperf */
+/* C++ code produced by gperf version 3.0.4 */
+/* Command-line: gperf -o -C -E -k '1-6,$' -j1 -D -N libc_name_p -L C++ --output-file cfns.h cfns.gperf */
#if !((' ' == 32) && ('!' == 33) && ('"' == 34) && ('#' == 35) \
&& ('%' == 37) && ('&' == 38) && ('\'' == 39) && ('(' == 40) \
@@ -28,7 +28,7 @@
#error "gperf generated tables don't work with this execution character set. Please report a bug to <bug-gnu-gperf@gnu.org>."
#endif
-#line 1 "cfns.gperf"
+#line 3 "cfns.gperf"
/* Copyright (C) 2000-2013 Free Software Foundation, Inc.
@@ -47,25 +47,18 @@ for more details.
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
-#ifdef __GNUC__
-__inline
-#endif
-static unsigned int hash (const char *, unsigned int);
-#ifdef __GNUC__
-__inline
-#endif
-const char * libc_name_p (const char *, unsigned int);
/* maximum key range = 391, duplicates = 0 */
-#ifdef __GNUC__
-__inline
-#else
-#ifdef __cplusplus
-inline
-#endif
-#endif
-static unsigned int
-hash (register const char *str, register unsigned int len)
+class libc_name
+{
+private:
+ static inline unsigned int hash (const char *str, unsigned int len);
+public:
+ static const char *libc_name_p (const char *str, unsigned int len);
+};
+
+inline unsigned int
+libc_name::hash (register const char *str, register unsigned int len)
{
static const unsigned short asso_values[] =
{
@@ -122,14 +115,8 @@ hash (register const char *str, register
return hval + asso_values[(unsigned char)str[len - 1]];
}
-#ifdef __GNUC__
-__inline
-#ifdef __GNUC_STDC_INLINE__
-__attribute__ ((__gnu_inline__))
-#endif
-#endif
const char *
-libc_name_p (register const char *str, register unsigned int len)
+libc_name::libc_name_p (register const char *str, register unsigned int len)
{
enum
{
--- gcc/cp/except.c
+++ gcc/cp/except.c
@@ -1040,7 +1040,8 @@ nothrow_libfn_p (const_tree fn)
unless the system headers are playing rename tricks, and if
they are, we don't want to be confused by them. */
id = DECL_NAME (fn);
- return !!libc_name_p (IDENTIFIER_POINTER (id), IDENTIFIER_LENGTH (id));
+ return !!libc_name::libc_name_p (IDENTIFIER_POINTER (id),
+ IDENTIFIER_LENGTH (id));
}
/* Returns nonzero if an exception of type FROM will be caught by a

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,17 @@
2008-06-09 Jakub Jelinek <jakub@redhat.com>
* omp.h.in (omp_nest_lock_t): Fix up for Linux multilibs.
--- libgomp/omp.h.in.jj 2008-06-09 13:34:05.000000000 +0200
+++ libgomp/omp.h.in 2008-06-09 13:34:48.000000000 +0200
@@ -42,8 +42,8 @@ typedef struct
typedef struct
{
- unsigned char _x[@OMP_NEST_LOCK_SIZE@]
- __attribute__((__aligned__(@OMP_NEST_LOCK_ALIGN@)));
+ unsigned char _x[8 + sizeof (void *)]
+ __attribute__((__aligned__(sizeof (void *))));
} omp_nest_lock_t;
#endif

View File

@ -0,0 +1,27 @@
libtool sucks.
--- ltmain.sh.jj 2007-12-07 14:53:21.000000000 +0100
+++ ltmain.sh 2008-09-05 21:51:48.000000000 +0200
@@ -5394,6 +5394,7 @@ EOF
rpath="$finalize_rpath"
test "$mode" != relink && rpath="$compile_rpath$rpath"
for libdir in $rpath; do
+ case "$libdir" in /usr/lib|/usr/lib64|/usr/lib/../lib|/usr/lib/../lib64) continue;; esac
if test -n "$hardcode_libdir_flag_spec"; then
if test -n "$hardcode_libdir_separator"; then
if test -z "$hardcode_libdirs"; then
@@ -6071,6 +6072,7 @@ EOF
rpath=
hardcode_libdirs=
for libdir in $compile_rpath $finalize_rpath; do
+ case "$libdir" in /usr/lib|/usr/lib64|/usr/lib/../lib|/usr/lib/../lib64) continue;; esac
if test -n "$hardcode_libdir_flag_spec"; then
if test -n "$hardcode_libdir_separator"; then
if test -z "$hardcode_libdirs"; then
@@ -6120,6 +6122,7 @@ EOF
rpath=
hardcode_libdirs=
for libdir in $finalize_rpath; do
+ case "$libdir" in /usr/lib|/usr/lib64|/usr/lib/../lib|/usr/lib/../lib64) continue;; esac
if test -n "$hardcode_libdir_flag_spec"; then
if test -n "$hardcode_libdir_separator"; then
if test -z "$hardcode_libdirs"; then

View File

@ -0,0 +1,50 @@
2010-02-08 Roland McGrath <roland@redhat.com>
* config/rs6000/sysv4.h (LINK_EH_SPEC): Pass --no-add-needed to the
linker.
* config/gnu-user.h (LINK_EH_SPEC): Likewise.
* config/alpha/elf.h (LINK_EH_SPEC): Likewise.
* config/ia64/linux.h (LINK_EH_SPEC): Likewise.
--- gcc/config/alpha/elf.h.jj 2011-01-03 12:52:31.118056764 +0100
+++ gcc/config/alpha/elf.h 2011-01-04 18:14:10.931874160 +0100
@@ -165,5 +165,5 @@ extern int alpha_this_gpdisp_sequence_nu
I imagine that other systems will catch up. In the meantime, it
doesn't harm to make sure that the data exists to be used later. */
#if defined(HAVE_LD_EH_FRAME_HDR)
-#define LINK_EH_SPEC "%{!static:--eh-frame-hdr} "
+#define LINK_EH_SPEC "--no-add-needed %{!static:--eh-frame-hdr} "
#endif
--- gcc/config/ia64/linux.h.jj 2011-01-03 13:02:11.462994522 +0100
+++ gcc/config/ia64/linux.h 2011-01-04 18:14:10.931874160 +0100
@@ -77,7 +77,7 @@ do { \
Signalize that because we have fde-glibc, we don't need all C shared libs
linked against -lgcc_s. */
#undef LINK_EH_SPEC
-#define LINK_EH_SPEC ""
+#define LINK_EH_SPEC "--no-add-needed "
/* Put all *tf routines in libgcc. */
#undef LIBGCC2_HAS_TF_MODE
--- gcc/config/gnu-user.h.jj 2011-01-03 12:53:03.739057299 +0100
+++ gcc/config/gnu-user.h 2011-01-04 18:14:10.932814884 +0100
@@ -82,7 +82,7 @@ see the files COPYING3 and COPYING.RUNTI
#define LIB_SPEC GNU_USER_TARGET_LIB_SPEC
#if defined(HAVE_LD_EH_FRAME_HDR)
-#define LINK_EH_SPEC "%{!static:--eh-frame-hdr} "
+#define LINK_EH_SPEC "--no-add-needed %{!static:--eh-frame-hdr} "
#endif
#undef LINK_GCC_C_SEQUENCE_SPEC
--- gcc/config/rs6000/sysv4.h.jj 2011-01-03 13:02:18.255994215 +0100
+++ gcc/config/rs6000/sysv4.h 2011-01-04 18:14:10.933888871 +0100
@@ -820,7 +820,7 @@ extern int fixuplabelno;
-dynamic-linker " GNU_USER_DYNAMIC_LINKER "}}"
#if defined(HAVE_LD_EH_FRAME_HDR)
-# define LINK_EH_SPEC "%{!static:--eh-frame-hdr} "
+# define LINK_EH_SPEC "--no-add-needed %{!static:--eh-frame-hdr} "
#endif
#define CPP_OS_LINUX_SPEC "-D__unix__ -D__gnu_linux__ -D__linux__ \

View File

@ -0,0 +1,87 @@
2005-11-28 Jakub Jelinek <jakub@redhat.com>
* config/rs6000/rs6000.c (rs6000_return_addr): If COUNT == 0,
read word RETURN_ADDRESS_OFFSET bytes above arg_pointer_rtx
instead of doing an extran indirection from frame_pointer_rtx.
* gcc.dg/20051128-1.c: New test.
--- gcc/config/rs6000/rs6000.c.jj 2005-11-26 14:38:01.000000000 +0100
+++ gcc/config/rs6000/rs6000.c 2005-11-28 20:32:18.000000000 +0100
@@ -21423,18 +21423,22 @@ rs6000_return_addr (int count, rtx frame
if (count != 0
|| ((DEFAULT_ABI == ABI_V4 || DEFAULT_ABI == ABI_DARWIN) && flag_pic))
{
+ rtx x;
cfun->machine->ra_needs_full_frame = 1;
- return
- gen_rtx_MEM
- (Pmode,
- memory_address
- (Pmode,
- plus_constant (Pmode,
- copy_to_reg
- (gen_rtx_MEM (Pmode,
- memory_address (Pmode, frame))),
- RETURN_ADDRESS_OFFSET)));
+ if (count == 0)
+ {
+ gcc_assert (frame == frame_pointer_rtx);
+ x = arg_pointer_rtx;
+ }
+ else
+ {
+ x = memory_address (Pmode, frame);
+ x = copy_to_reg (gen_rtx_MEM (Pmode, x));
+ }
+
+ x = plus_constant (Pmode, x, RETURN_ADDRESS_OFFSET);
+ return gen_rtx_MEM (Pmode, memory_address (Pmode, x));
}
cfun->machine->ra_need_lr = 1;
--- gcc/testsuite/gcc.dg/20051128-1.c.jj 2005-10-10 11:21:41.096999000 +0200
+++ gcc/testsuite/gcc.dg/20051128-1.c 2005-11-28 12:30:57.000000000 +0100
@@ -0,0 +1,41 @@
+/* { dg-do run } */
+/* { dg-options "-O2 -fpic" } */
+
+extern void exit (int);
+extern void abort (void);
+
+int b;
+
+struct A
+{
+ void *pad[147];
+ void *ra, *h;
+ long o;
+};
+
+void
+__attribute__((noinline))
+foo (struct A *a, void *x)
+{
+ __builtin_memset (a, 0, sizeof (a));
+ if (!b)
+ exit (0);
+}
+
+void
+__attribute__((noinline))
+bar (void)
+{
+ struct A a;
+
+ __builtin_unwind_init ();
+ foo (&a, __builtin_return_address (0));
+}
+
+int
+main (void)
+{
+ bar ();
+ abort ();
+ return 0;
+}

190
SOURCES/gcc48-pr28865.patch Normal file
View File

@ -0,0 +1,190 @@
2014-01-16 Nick Clifton <nickc@redhat.com>
PR middle-end/28865
* varasm.c (output_constant): Return the number of bytes actually
emitted.
(output_constructor_array_range): Update the field size with the
number of bytes emitted by output_constant.
(output_constructor_regular_field): Likewise. Also do not
complain if the total number of bytes emitted is now greater
than the expected fieldpos.
* output.h (output_constant): Update prototype and descriptive
comment.
* gcc.c-torture/compile/pr28865.c: New.
* gcc.c-torture/execute/pr28865.c: New.
--- gcc/varasm.c (revision 206660)
+++ gcc/varasm.c (revision 206661)
@@ -4474,8 +4474,10 @@ static unsigned HOST_WIDE_INT
This includes the pseudo-op such as ".int" or ".byte", and a newline.
Assumes output_addressed_constants has been done on EXP already.
- Generate exactly SIZE bytes of assembler data, padding at the end
- with zeros if necessary. SIZE must always be specified.
+ Generate at least SIZE bytes of assembler data, padding at the end
+ with zeros if necessary. SIZE must always be specified. The returned
+ value is the actual number of bytes of assembler data generated, which
+ may be bigger than SIZE if the object contains a variable length field.
SIZE is important for structure constructors,
since trailing members may have been omitted from the constructor.
@@ -4490,14 +4492,14 @@ static unsigned HOST_WIDE_INT
ALIGN is the alignment of the data in bits. */
-void
+unsigned HOST_WIDE_INT
output_constant (tree exp, unsigned HOST_WIDE_INT size, unsigned int align)
{
enum tree_code code;
unsigned HOST_WIDE_INT thissize;
if (size == 0 || flag_syntax_only)
- return;
+ return size;
/* See if we're trying to initialize a pointer in a non-default mode
to the address of some declaration somewhere. If the target says
@@ -4562,7 +4564,7 @@ output_constant (tree exp, unsigned HOST
&& vec_safe_is_empty (CONSTRUCTOR_ELTS (exp)))
{
assemble_zeros (size);
- return;
+ return size;
}
if (TREE_CODE (exp) == FDESC_EXPR)
@@ -4574,7 +4576,7 @@ output_constant (tree exp, unsigned HOST
#else
gcc_unreachable ();
#endif
- return;
+ return size;
}
/* Now output the underlying data. If we've handling the padding, return.
@@ -4612,8 +4614,7 @@ output_constant (tree exp, unsigned HOST
switch (TREE_CODE (exp))
{
case CONSTRUCTOR:
- output_constructor (exp, size, align, NULL);
- return;
+ return output_constructor (exp, size, align, NULL);
case STRING_CST:
thissize = MIN ((unsigned HOST_WIDE_INT)TREE_STRING_LENGTH (exp),
size);
@@ -4648,11 +4649,10 @@ output_constant (tree exp, unsigned HOST
case RECORD_TYPE:
case UNION_TYPE:
gcc_assert (TREE_CODE (exp) == CONSTRUCTOR);
- output_constructor (exp, size, align, NULL);
- return;
+ return output_constructor (exp, size, align, NULL);
case ERROR_MARK:
- return;
+ return 0;
default:
gcc_unreachable ();
@@ -4660,6 +4660,8 @@ output_constant (tree exp, unsigned HOST
if (size > thissize)
assemble_zeros (size - thissize);
+
+ return size;
}
@@ -4759,7 +4761,7 @@ output_constructor_array_range (oc_local
if (local->val == NULL_TREE)
assemble_zeros (fieldsize);
else
- output_constant (local->val, fieldsize, align2);
+ fieldsize = output_constant (local->val, fieldsize, align2);
/* Count its size. */
local->total_bytes += fieldsize;
@@ -4808,9 +4810,8 @@ output_constructor_regular_field (oc_loc
Note no alignment needed in an array, since that is guaranteed
if each element has the proper size. */
if ((local->field != NULL_TREE || local->index != NULL_TREE)
- && fieldpos != local->total_bytes)
+ && fieldpos > local->total_bytes)
{
- gcc_assert (fieldpos >= local->total_bytes);
assemble_zeros (fieldpos - local->total_bytes);
local->total_bytes = fieldpos;
}
@@ -4847,7 +4848,7 @@ output_constructor_regular_field (oc_loc
if (local->val == NULL_TREE)
assemble_zeros (fieldsize);
else
- output_constant (local->val, fieldsize, align2);
+ fieldsize = output_constant (local->val, fieldsize, align2);
/* Count its size. */
local->total_bytes += fieldsize;
--- gcc/output.h (revision 206660)
+++ gcc/output.h (revision 206661)
@@ -294,11 +294,13 @@ extern void output_quoted_string (FILE *
This includes the pseudo-op such as ".int" or ".byte", and a newline.
Assumes output_addressed_constants has been done on EXP already.
- Generate exactly SIZE bytes of assembler data, padding at the end
- with zeros if necessary. SIZE must always be specified.
+ Generate at least SIZE bytes of assembler data, padding at the end
+ with zeros if necessary. SIZE must always be specified. The returned
+ value is the actual number of bytes of assembler data generated, which
+ may be bigger than SIZE if the object contains a variable length field.
ALIGN is the alignment in bits that may be assumed for the data. */
-extern void output_constant (tree, unsigned HOST_WIDE_INT, unsigned int);
+extern unsigned HOST_WIDE_INT output_constant (tree, unsigned HOST_WIDE_INT, unsigned int);
/* When outputting delayed branch sequences, this rtx holds the
sequence being output. It is null when no delayed branch
--- gcc/testsuite/gcc.c-torture/execute/pr28865.c (revision 0)
+++ gcc/testsuite/gcc.c-torture/execute/pr28865.c (revision 206661)
@@ -0,0 +1,21 @@
+struct A { int a; char b[]; };
+union B { struct A a; char b[sizeof (struct A) + 31]; };
+union B b = { { 1, "123456789012345678901234567890" } };
+union B c = { { 2, "123456789012345678901234567890" } };
+
+__attribute__((noinline, noclone)) void
+foo (int *x[2])
+{
+ x[0] = &b.a.a;
+ x[1] = &c.a.a;
+}
+
+int
+main ()
+{
+ int *x[2];
+ foo (x);
+ if (*x[0] != 1 || *x[1] != 2)
+ __builtin_abort ();
+ return 0;
+}
--- gcc/testsuite/gcc.c-torture/compile/pr28865.c (revision 0)
+++ gcc/testsuite/gcc.c-torture/compile/pr28865.c (revision 206661)
@@ -0,0 +1,16 @@
+struct var_len
+{
+ int field1;
+ const char field2[];
+};
+
+/* Note - strictly speaking this array declaration is illegal
+ since each element has a variable length. GCC allows it
+ (for the moment) because it is used in existing code, such
+ as glibc. */
+static const struct var_len var_array[] =
+{
+ { 1, "Long exposure noise reduction" },
+ { 2, "Shutter/AE lock buttons" },
+ { 3, "Mirror lockup" }
+};

106
SOURCES/gcc48-pr38757.patch Normal file
View File

@ -0,0 +1,106 @@
2009-03-18 Jakub Jelinek <jakub@redhat.com>
PR debug/38757
* langhooks.h (struct lang_hooks): Add source_language langhook.
* langhooks-def.h (LANG_HOOKS_SOURCE_LANGUAGE): Define to NULL.
(LANG_HOOKS_INITIALIZER): Add LANG_HOOKS_SOURCE_LANGUAGE.
* dwarf2out.c (add_prototyped_attribute): Add DW_AT_prototype
also for DW_LANG_{C,C99,ObjC}.
(gen_compile_unit_die): Use lang_hooks.source_language () to
determine if DW_LANG_C99 or DW_LANG_C89 should be returned.
c/
* c-lang.c (c_source_language): New function.
(LANG_HOOKS_SOURCE_LANGUAGE): Define.
--- gcc/langhooks.h.jj 2011-01-03 12:53:05.125745450 +0100
+++ gcc/langhooks.h 2011-01-04 17:59:43.166744926 +0100
@@ -467,6 +467,10 @@ struct lang_hooks
gimplification. */
bool deep_unsharing;
+ /* Return year of the source language standard version if the FE supports
+ multiple versions of the standard. */
+ int (*source_language) (void);
+
/* Whenever you add entries here, make sure you adjust langhooks-def.h
and langhooks.c accordingly. */
};
--- gcc/langhooks-def.h.jj 2011-01-03 12:53:05.000000000 +0100
+++ gcc/langhooks-def.h 2011-01-04 18:00:44.858851030 +0100
@@ -118,6 +118,7 @@ extern void lhd_omp_firstprivatize_type_
#define LANG_HOOKS_BLOCK_MAY_FALLTHRU hook_bool_const_tree_true
#define LANG_HOOKS_EH_USE_CXA_END_CLEANUP false
#define LANG_HOOKS_DEEP_UNSHARING false
+#define LANG_HOOKS_SOURCE_LANGUAGE NULL
/* Attribute hooks. */
#define LANG_HOOKS_ATTRIBUTE_TABLE NULL
@@ -303,7 +304,8 @@ extern void lhd_end_section (void);
LANG_HOOKS_EH_PROTECT_CLEANUP_ACTIONS, \
LANG_HOOKS_BLOCK_MAY_FALLTHRU, \
LANG_HOOKS_EH_USE_CXA_END_CLEANUP, \
- LANG_HOOKS_DEEP_UNSHARING \
+ LANG_HOOKS_DEEP_UNSHARING, \
+ LANG_HOOKS_SOURCE_LANGUAGE \
}
#endif /* GCC_LANG_HOOKS_DEF_H */
--- gcc/c/c-lang.c.jj 2011-01-03 12:53:05.376056936 +0100
+++ gcc/c/c-lang.c 2011-01-04 17:59:43.167743798 +0100
@@ -36,6 +36,12 @@ along with GCC; see the file COPYING3.
enum c_language_kind c_language = clk_c;
+static int
+c_source_language (void)
+{
+ return flag_isoc99 ? 1999 : 1989;
+}
+
/* Lang hooks common to C and ObjC are declared in c-objc-common.h;
consequently, there should be very few hooks below. */
@@ -45,6 +51,8 @@ enum c_language_kind c_language = clk_c;
#define LANG_HOOKS_INIT c_objc_common_init
#undef LANG_HOOKS_INIT_TS
#define LANG_HOOKS_INIT_TS c_common_init_ts
+#undef LANG_HOOKS_SOURCE_LANGUAGE
+#define LANG_HOOKS_SOURCE_LANGUAGE c_source_language
/* Each front end provides its own lang hook initializer. */
struct lang_hooks lang_hooks = LANG_HOOKS_INITIALIZER;
--- gcc/dwarf2out.c.jj 2011-01-03 12:53:05.102056475 +0100
+++ gcc/dwarf2out.c 2011-01-04 18:03:14.534151763 +0100
@@ -16109,9 +16109,18 @@ add_bit_size_attribute (dw_die_ref die,
static inline void
add_prototyped_attribute (dw_die_ref die, tree func_type)
{
- if (get_AT_unsigned (comp_unit_die (), DW_AT_language) == DW_LANG_C89
- && prototype_p (func_type))
- add_AT_flag (die, DW_AT_prototyped, 1);
+ switch (get_AT_unsigned (comp_unit_die (), DW_AT_language))
+ {
+ case DW_LANG_C:
+ case DW_LANG_C89:
+ case DW_LANG_C99:
+ case DW_LANG_ObjC:
+ if (prototype_p (func_type))
+ add_AT_flag (die, DW_AT_prototyped, 1);
+ break;
+ default:
+ break;
+ }
}
/* Add an 'abstract_origin' attribute below a given DIE. The DIE is found
@@ -18915,6 +18924,10 @@ gen_compile_unit_die (const char *filena
if (strcmp (language_string, "GNU Go") == 0)
language = DW_LANG_Go;
}
+ else if (strcmp (language_string, "GNU C") == 0
+ && lang_hooks.source_language
+ && lang_hooks.source_language () >= 1999)
+ language = DW_LANG_C99;
}
/* Use a degraded Fortran setting in strict DWARF2 so is_fortran works. */
else if (strcmp (language_string, "GNU Fortran") == 0)

View File

@ -0,0 +1,76 @@
2014-02-27 Jeff Law <law@redhat.com>
PR rtl-optimization/52714
* combine.c (try_combine): When splitting an unrecognized PARALLEL
into two independent simple sets, if I3 is a jump, ensure the
pattern we place into I3 is a (set (pc) ...)
* gcc.c-torture/compile/pr52714.c: New test.
2016-06-15 Jakub Jelinek <jakub@redhat.com>
* gcc.c-torture/compile/20160615-1.c: New test.
--- gcc/combine.c (revision 208203)
+++ gcc/combine.c (revision 208204)
@@ -3706,6 +3706,9 @@ try_combine (rtx i3, rtx i2, rtx i1, rtx
#ifdef HAVE_cc0
&& !reg_referenced_p (cc0_rtx, XVECEXP (newpat, 0, 0))
#endif
+ /* If I3 is a jump, ensure that set0 is a jump so that
+ we do not create invalid RTL. */
+ && (!JUMP_P (i3) || SET_DEST (XVECEXP (newpat, 0, 0)) == pc_rtx)
)
{
newi2pat = XVECEXP (newpat, 0, 1);
@@ -3716,6 +3719,9 @@ try_combine (rtx i3, rtx i2, rtx i1, rtx
#ifdef HAVE_cc0
&& !reg_referenced_p (cc0_rtx, XVECEXP (newpat, 0, 1))
#endif
+ /* If I3 is a jump, ensure that set1 is a jump so that
+ we do not create invalid RTL. */
+ && (!JUMP_P (i3) || SET_DEST (XVECEXP (newpat, 0, 1)) == pc_rtx)
)
{
newi2pat = XVECEXP (newpat, 0, 0);
--- gcc/testsuite/gcc.c-torture/compile/pr52714.c (revision 0)
+++ gcc/testsuite/gcc.c-torture/compile/pr52714.c (revision 208204)
@@ -0,0 +1,25 @@
+
+int __re_compile_fastmap(unsigned char *p)
+{
+ unsigned char **stack;
+ unsigned size;
+ unsigned avail;
+
+ stack = __builtin_alloca(5 * sizeof(unsigned char*));
+ if (stack == 0)
+ return -2;
+ size = 5;
+ avail = 0;
+
+ for (;;) {
+ switch (*p++) {
+ case 0:
+ if (avail == size)
+ return -2;
+ stack[avail++] = p;
+ }
+ }
+
+ return 0;
+}
+
--- gcc/testsuite/gcc.c-torture/compile/20160615-1.c.jj 2016-06-15 11:17:54.690689056 +0200
+++ gcc/testsuite/gcc.c-torture/compile/20160615-1.c 2016-06-15 11:17:48.811765657 +0200
@@ -0,0 +1,10 @@
+int a;
+void bar (int, unsigned, unsigned);
+
+void
+foo (unsigned x)
+{
+ unsigned b = a ? x : 0;
+ if (x || b)
+ bar (0, x, b);
+}

131
SOURCES/gcc48-pr53477.patch Normal file
View File

@ -0,0 +1,131 @@
2013-08-20 Phil Muldoon <pmuldoon@redhat.com>
PR libstdc++/53477
http://sourceware.org/bugzilla/show_bug.cgi?id=15195
* python/libstdcxx/v6/printers.py (Printer.__call__): If a value
is a reference, fetch referenced value.
(RxPrinter.invoke): Ditto.
* testsuite/libstdc++-prettyprinters/cxx11.cc (main): Add -O0
flag. Add referenced value tests.
--- libstdc++-v3/python/libstdcxx/v6/printers.py (revision 201887)
+++ libstdc++-v3/python/libstdcxx/v6/printers.py (revision 201888)
@@ -786,6 +786,11 @@ class RxPrinter(object):
def invoke(self, value):
if not self.enabled:
return None
+
+ if value.type.code == gdb.TYPE_CODE_REF:
+ if hasattr(gdb.Value,"referenced_value"):
+ value = value.referenced_value()
+
return self.function(self.name, value)
# A pretty-printer that conforms to the "PrettyPrinter" protocol from
@@ -841,6 +846,11 @@ class Printer(object):
return None
basename = match.group(1)
+
+ if val.type.code == gdb.TYPE_CODE_REF:
+ if hasattr(gdb.Value,"referenced_value"):
+ val = val.referenced_value()
+
if basename in self.lookup:
return self.lookup[basename].invoke(val)
--- libstdc++-v3/testsuite/libstdc++-prettyprinters/cxx11.cc (revision 201887)
+++ libstdc++-v3/testsuite/libstdc++-prettyprinters/cxx11.cc (revision 201888)
@@ -1,5 +1,5 @@
// { dg-do run }
-// { dg-options "-std=gnu++11 -g" }
+// { dg-options "-std=gnu++11 -g -O0" }
// Copyright (C) 2011-2013 Free Software Foundation, Inc.
//
@@ -24,6 +24,8 @@
#include <string>
#include <iostream>
+typedef std::tuple<int, int> ExTuple;
+
template<class T>
void
placeholder(const T &s)
@@ -62,43 +64,75 @@ main()
std::forward_list<int> efl;
// { dg-final { note-test efl "empty std::forward_list" } }
+ std::forward_list<int> &refl = efl;
+// { dg-final { note-test refl "empty std::forward_list" } }
+
std::forward_list<int> fl;
fl.push_front(2);
fl.push_front(1);
// { dg-final { note-test fl {std::forward_list = {[0] = 1, [1] = 2}} } }
+ std::forward_list<int> &rfl = fl;
+// { dg-final { note-test rfl {std::forward_list = {[0] = 1, [1] = 2}} } }
+
std::unordered_map<int, std::string> eum;
// { dg-final { note-test eum "std::unordered_map with 0 elements" } }
+ std::unordered_map<int, std::string> &reum = eum;
+// { dg-final { note-test reum "std::unordered_map with 0 elements" } }
+
std::unordered_multimap<int, std::string> eumm;
// { dg-final { note-test eumm "std::unordered_multimap with 0 elements" } }
+ std::unordered_multimap<int, std::string> &reumm = eumm;
+// { dg-final { note-test reumm "std::unordered_multimap with 0 elements" } }
+
std::unordered_set<int> eus;
// { dg-final { note-test eus "std::unordered_set with 0 elements" } }
+ std::unordered_set<int> &reus = eus;
+// { dg-final { note-test reus "std::unordered_set with 0 elements" } }
+
std::unordered_multiset<int> eums;
// { dg-final { note-test eums "std::unordered_multiset with 0 elements" } }
+ std::unordered_multiset<int> &reums = eums;
+// { dg-final { note-test reums "std::unordered_multiset with 0 elements" } }
std::unordered_map<int, std::string> uom;
uom[5] = "three";
uom[3] = "seven";
// { dg-final { note-test uom {std::unordered_map with 2 elements = {[3] = "seven", [5] = "three"}} } }
+ std::unordered_map<int, std::string> &ruom = uom;
+// { dg-final { note-test ruom {std::unordered_map with 2 elements = {[3] = "seven", [5] = "three"}} } }
+
std::unordered_multimap<int, std::string> uomm;
uomm.insert(std::pair<int, std::string> (5, "three"));
uomm.insert(std::pair<int, std::string> (5, "seven"));
// { dg-final { note-test uomm {std::unordered_multimap with 2 elements = {[5] = "seven", [5] = "three"}} } }
+ std::unordered_multimap<int, std::string> &ruomm = uomm;
+// { dg-final { note-test ruomm {std::unordered_multimap with 2 elements = {[5] = "seven", [5] = "three"}} } }
std::unordered_set<int> uos;
uos.insert(5);
// { dg-final { note-test uos {std::unordered_set with 1 elements = {[0] = 5}} } }
+ std::unordered_set<int> &ruos = uos;
+// { dg-final { note-test ruos {std::unordered_set with 1 elements = {[0] = 5}} } }
std::unordered_multiset<int> uoms;
uoms.insert(5);
// { dg-final { note-test uoms {std::unordered_multiset with 1 elements = {[0] = 5}} } }
+ std::unordered_multiset<int> &ruoms = uoms;
+// { dg-final { note-test ruoms {std::unordered_multiset with 1 elements = {[0] = 5}} } }
std::unique_ptr<datum> uptr (new datum);
uptr->s = "hi bob";
uptr->i = 23;
// { dg-final { regexp-test uptr {std::unique_ptr.datum. containing 0x.*} } }
+ std::unique_ptr<datum> &ruptr = uptr;
+// { dg-final { regexp-test ruptr {std::unique_ptr.datum. containing 0x.*} } }
+ ExTuple tpl(6,7);
+// { dg-final { note-test tpl {std::tuple containing = {[1] = 6, [2] = 7}} } }
+ ExTuple &rtpl = tpl;
+// { dg-final { note-test rtpl {std::tuple containing = {[1] = 6, [2] = 7}} } }
placeholder(""); // Mark SPOT
use(efl);
use(fl);

654
SOURCES/gcc48-pr56564.patch Normal file
View File

@ -0,0 +1,654 @@
2013-06-19 Igor Zamyatin <igor.zamyatin@intel.com>
* gcc.dg/tree-ssa/loop-19.c: Add -fno-common.
2013-06-12 Jakub Jelinek <jakub@redhat.com>
PR target/56564
* varasm.c (decl_binds_to_current_def_p): Call binds_local_p
target hook even for !TREE_PUBLIC decls. If no resolution info
is available, return false for common and external decls.
* gcc.target/i386/pr56564-1.c: Skip on darwin, mingw and cygwin.
* gcc.target/i386/pr56564-3.c: Likewise.
2013-06-11 Jakub Jelinek <jakub@redhat.com>
PR target/56564
* varasm.c (get_variable_align): Move #endif to the right place.
2013-06-10 Jakub Jelinek <jakub@redhat.com>
PR target/56564
* varasm.c (align_variable): Don't use DATA_ALIGNMENT or
CONSTANT_ALIGNMENT if !decl_binds_to_current_def_p (decl).
Use DATA_ABI_ALIGNMENT for that case instead if defined.
(get_variable_align): New function.
(get_variable_section, emit_bss, emit_common,
assemble_variable_contents, place_block_symbol): Use
get_variable_align instead of DECL_ALIGN.
(assemble_noswitch_variable): Add align argument, use it
instead of DECL_ALIGN.
(assemble_variable): Adjust caller. Use get_variable_align
instead of DECL_ALIGN.
* config/i386/i386.h (DATA_ALIGNMENT): Adjust x86_data_alignment
caller.
(DATA_ABI_ALIGNMENT): Define.
* config/i386/i386-protos.h (x86_data_alignment): Adjust prototype.
* config/i386/i386.c (x86_data_alignment): Add opt argument. If
opt is false, only return the psABI mandated alignment increase.
* config/c6x/c6x.h (DATA_ALIGNMENT): Renamed to...
(DATA_ABI_ALIGNMENT): ... this.
* config/mmix/mmix.h (DATA_ALIGNMENT): Renamed to...
(DATA_ABI_ALIGNMENT): ... this.
* config/mmix/mmix.c (mmix_data_alignment): Adjust function comment.
* config/s390/s390.h (DATA_ALIGNMENT): Renamed to...
(DATA_ABI_ALIGNMENT): ... this.
* doc/tm.texi.in (DATA_ABI_ALIGNMENT): Document.
* doc/tm.texi: Regenerated.
* gcc.target/i386/pr56564-1.c: New test.
* gcc.target/i386/pr56564-2.c: New test.
* gcc.target/i386/pr56564-3.c: New test.
* gcc.target/i386/pr56564-4.c: New test.
* gcc.target/i386/avx256-unaligned-load-4.c: Add -fno-common.
* gcc.target/i386/avx256-unaligned-store-1.c: Likewise.
* gcc.target/i386/avx256-unaligned-store-3.c: Likewise.
* gcc.target/i386/avx256-unaligned-store-4.c: Likewise.
* gcc.target/i386/vect-sizes-1.c: Likewise.
* gcc.target/i386/memcpy-1.c: Likewise.
* gcc.dg/vect/costmodel/i386/costmodel-vect-31.c (tmp): Initialize.
* gcc.dg/vect/costmodel/x86_64/costmodel-vect-31.c (tmp): Likewise.
--- gcc/doc/tm.texi.in (revision 199897)
+++ gcc/doc/tm.texi.in (revision 199898)
@@ -1062,6 +1062,15 @@ arrays to be word-aligned so that @code{
constants to character arrays can be done inline.
@end defmac
+@defmac DATA_ABI_ALIGNMENT (@var{type}, @var{basic-align})
+Similar to @code{DATA_ALIGNMENT}, but for the cases where the ABI mandates
+some alignment increase, instead of optimization only purposes. E.g.@
+AMD x86-64 psABI says that variables with array type larger than 15 bytes
+must be aligned to 16 byte boundaries.
+
+If this macro is not defined, then @var{basic-align} is used.
+@end defmac
+
@defmac CONSTANT_ALIGNMENT (@var{constant}, @var{basic-align})
If defined, a C expression to compute the alignment given to a constant
that is being placed in memory. @var{constant} is the constant and
--- gcc/doc/tm.texi (revision 199897)
+++ gcc/doc/tm.texi (revision 199898)
@@ -1078,6 +1078,15 @@ arrays to be word-aligned so that @code{
constants to character arrays can be done inline.
@end defmac
+@defmac DATA_ABI_ALIGNMENT (@var{type}, @var{basic-align})
+Similar to @code{DATA_ALIGNMENT}, but for the cases where the ABI mandates
+some alignment increase, instead of optimization only purposes. E.g.@
+AMD x86-64 psABI says that variables with array type larger than 15 bytes
+must be aligned to 16 byte boundaries.
+
+If this macro is not defined, then @var{basic-align} is used.
+@end defmac
+
@defmac CONSTANT_ALIGNMENT (@var{constant}, @var{basic-align})
If defined, a C expression to compute the alignment given to a constant
that is being placed in memory. @var{constant} is the constant and
--- gcc/varasm.c (revision 199897)
+++ gcc/varasm.c (revision 199984)
@@ -966,13 +966,80 @@ align_variable (tree decl, bool dont_out
align = MAX_OFILE_ALIGNMENT;
}
- /* On some machines, it is good to increase alignment sometimes. */
if (! DECL_USER_ALIGN (decl))
{
+#ifdef DATA_ABI_ALIGNMENT
+ unsigned int data_abi_align
+ = DATA_ABI_ALIGNMENT (TREE_TYPE (decl), align);
+ /* For backwards compatibility, don't assume the ABI alignment for
+ TLS variables. */
+ if (! DECL_THREAD_LOCAL_P (decl) || data_abi_align <= BITS_PER_WORD)
+ align = data_abi_align;
+#endif
+
+ /* On some machines, it is good to increase alignment sometimes.
+ But as DECL_ALIGN is used both for actually emitting the variable
+ and for code accessing the variable as guaranteed alignment, we
+ can only increase the alignment if it is a performance optimization
+ if the references to it must bind to the current definition. */
+ if (decl_binds_to_current_def_p (decl))
+ {
+#ifdef DATA_ALIGNMENT
+ unsigned int data_align = DATA_ALIGNMENT (TREE_TYPE (decl), align);
+ /* Don't increase alignment too much for TLS variables - TLS space
+ is too precious. */
+ if (! DECL_THREAD_LOCAL_P (decl) || data_align <= BITS_PER_WORD)
+ align = data_align;
+#endif
+#ifdef CONSTANT_ALIGNMENT
+ if (DECL_INITIAL (decl) != 0
+ && DECL_INITIAL (decl) != error_mark_node)
+ {
+ unsigned int const_align
+ = CONSTANT_ALIGNMENT (DECL_INITIAL (decl), align);
+ /* Don't increase alignment too much for TLS variables - TLS
+ space is too precious. */
+ if (! DECL_THREAD_LOCAL_P (decl) || const_align <= BITS_PER_WORD)
+ align = const_align;
+ }
+#endif
+ }
+ }
+
+ /* Reset the alignment in case we have made it tighter, so we can benefit
+ from it in get_pointer_alignment. */
+ DECL_ALIGN (decl) = align;
+}
+
+/* Return DECL_ALIGN (decl), possibly increased for optimization purposes
+ beyond what align_variable returned. */
+
+static unsigned int
+get_variable_align (tree decl)
+{
+ unsigned int align = DECL_ALIGN (decl);
+
+ /* For user aligned vars or static vars align_variable already did
+ everything. */
+ if (DECL_USER_ALIGN (decl) || !TREE_PUBLIC (decl))
+ return align;
+
+#ifdef DATA_ABI_ALIGNMENT
+ if (DECL_THREAD_LOCAL_P (decl))
+ align = DATA_ABI_ALIGNMENT (TREE_TYPE (decl), align);
+#endif
+
+ /* For decls that bind to the current definition, align_variable
+ did also everything, except for not assuming ABI required alignment
+ of TLS variables. For other vars, increase the alignment here
+ as an optimization. */
+ if (!decl_binds_to_current_def_p (decl))
+ {
+ /* On some machines, it is good to increase alignment sometimes. */
#ifdef DATA_ALIGNMENT
unsigned int data_align = DATA_ALIGNMENT (TREE_TYPE (decl), align);
/* Don't increase alignment too much for TLS variables - TLS space
- is too precious. */
+ is too precious. */
if (! DECL_THREAD_LOCAL_P (decl) || data_align <= BITS_PER_WORD)
align = data_align;
#endif
@@ -989,9 +1056,7 @@ align_variable (tree decl, bool dont_out
#endif
}
- /* Reset the alignment in case we have made it tighter, so we can benefit
- from it in get_pointer_alignment. */
- DECL_ALIGN (decl) = align;
+ return align;
}
/* Return the section into which the given VAR_DECL or CONST_DECL
@@ -1043,7 +1108,8 @@ get_variable_section (tree decl, bool pr
return bss_noswitch_section;
}
- return targetm.asm_out.select_section (decl, reloc, DECL_ALIGN (decl));
+ return targetm.asm_out.select_section (decl, reloc,
+ get_variable_align (decl));
}
/* Return the block into which object_block DECL should be placed. */
@@ -1780,7 +1846,8 @@ emit_bss (tree decl ATTRIBUTE_UNUSED,
unsigned HOST_WIDE_INT rounded ATTRIBUTE_UNUSED)
{
#if defined ASM_OUTPUT_ALIGNED_BSS
- ASM_OUTPUT_ALIGNED_BSS (asm_out_file, decl, name, size, DECL_ALIGN (decl));
+ ASM_OUTPUT_ALIGNED_BSS (asm_out_file, decl, name, size,
+ get_variable_align (decl));
return true;
#endif
}
@@ -1796,10 +1863,11 @@ emit_common (tree decl ATTRIBUTE_UNUSED,
{
#if defined ASM_OUTPUT_ALIGNED_DECL_COMMON
ASM_OUTPUT_ALIGNED_DECL_COMMON (asm_out_file, decl, name,
- size, DECL_ALIGN (decl));
+ size, get_variable_align (decl));
return true;
#elif defined ASM_OUTPUT_ALIGNED_COMMON
- ASM_OUTPUT_ALIGNED_COMMON (asm_out_file, name, size, DECL_ALIGN (decl));
+ ASM_OUTPUT_ALIGNED_COMMON (asm_out_file, name, size,
+ get_variable_align (decl));
return true;
#else
ASM_OUTPUT_COMMON (asm_out_file, name, size, rounded);
@@ -1828,7 +1896,8 @@ emit_tls_common (tree decl ATTRIBUTE_UNU
NAME is the name of DECL's SYMBOL_REF. */
static void
-assemble_noswitch_variable (tree decl, const char *name, section *sect)
+assemble_noswitch_variable (tree decl, const char *name, section *sect,
+ unsigned int align)
{
unsigned HOST_WIDE_INT size, rounded;
@@ -1850,7 +1919,7 @@ assemble_noswitch_variable (tree decl, c
* (BIGGEST_ALIGNMENT / BITS_PER_UNIT));
if (!sect->noswitch.callback (decl, name, size, rounded)
- && (unsigned HOST_WIDE_INT) DECL_ALIGN_UNIT (decl) > rounded)
+ && (unsigned HOST_WIDE_INT) (align / BITS_PER_UNIT) > rounded)
warning (0, "requested alignment for %q+D is greater than "
"implemented alignment of %wu", decl, rounded);
}
@@ -1880,7 +1949,7 @@ assemble_variable_contents (tree decl, c
/* Output the actual data. */
output_constant (DECL_INITIAL (decl),
tree_low_cst (DECL_SIZE_UNIT (decl), 1),
- DECL_ALIGN (decl));
+ get_variable_align (decl));
else
/* Leave space for it. */
assemble_zeros (tree_low_cst (DECL_SIZE_UNIT (decl), 1));
@@ -1904,6 +1973,7 @@ assemble_variable (tree decl, int top_le
const char *name;
rtx decl_rtl, symbol;
section *sect;
+ unsigned int align;
bool asan_protected = false;
/* This function is supposed to handle VARIABLES. Ensure we have one. */
@@ -2003,6 +2073,8 @@ assemble_variable (tree decl, int top_le
set_mem_align (decl_rtl, DECL_ALIGN (decl));
+ align = get_variable_align (decl);
+
if (TREE_PUBLIC (decl))
maybe_assemble_visibility (decl);
@@ -2032,12 +2104,12 @@ assemble_variable (tree decl, int top_le
place_block_symbol (symbol);
}
else if (SECTION_STYLE (sect) == SECTION_NOSWITCH)
- assemble_noswitch_variable (decl, name, sect);
+ assemble_noswitch_variable (decl, name, sect, align);
else
{
switch_to_section (sect);
- if (DECL_ALIGN (decl) > BITS_PER_UNIT)
- ASM_OUTPUT_ALIGN (asm_out_file, floor_log2 (DECL_ALIGN_UNIT (decl)));
+ if (align > BITS_PER_UNIT)
+ ASM_OUTPUT_ALIGN (asm_out_file, floor_log2 (align / BITS_PER_UNIT));
assemble_variable_contents (decl, name, dont_output_data);
if (asan_protected)
{
@@ -6709,10 +6781,10 @@ bool
decl_binds_to_current_def_p (tree decl)
{
gcc_assert (DECL_P (decl));
- if (!TREE_PUBLIC (decl))
- return true;
if (!targetm.binds_local_p (decl))
return false;
+ if (!TREE_PUBLIC (decl))
+ return true;
/* When resolution is available, just use it. */
if (TREE_CODE (decl) == VAR_DECL
&& (TREE_STATIC (decl) || DECL_EXTERNAL (decl)))
@@ -6730,10 +6802,20 @@ decl_binds_to_current_def_p (tree decl)
return resolution_to_local_definition_p (node->symbol.resolution);
}
/* Otherwise we have to assume the worst for DECL_WEAK (hidden weaks
- binds locally but still can be overwritten).
+ binds locally but still can be overwritten), DECL_COMMON (can be merged
+ with a non-common definition somewhere in the same module) or
+ DECL_EXTERNAL.
This rely on fact that binds_local_p behave as decl_replaceable_p
for all other declaration types. */
- return !DECL_WEAK (decl);
+ if (DECL_WEAK (decl))
+ return false;
+ if (DECL_COMMON (decl)
+ && (DECL_INITIAL (decl) == NULL
+ || DECL_INITIAL (decl) == error_mark_node))
+ return false;
+ if (DECL_EXTERNAL (decl))
+ return false;
+ return true;
}
/* A replaceable function or variable is one which may be replaced
@@ -6959,7 +7041,7 @@ place_block_symbol (rtx symbol)
else
{
decl = SYMBOL_REF_DECL (symbol);
- alignment = DECL_ALIGN (decl);
+ alignment = get_variable_align (decl);
size = tree_low_cst (DECL_SIZE_UNIT (decl), 1);
if (flag_asan && asan_protect_global (decl))
{
--- gcc/config/s390/s390.h (revision 199897)
+++ gcc/config/s390/s390.h (revision 199898)
@@ -221,7 +221,7 @@ enum processor_flags
/* Alignment on even addresses for LARL instruction. */
#define CONSTANT_ALIGNMENT(EXP, ALIGN) (ALIGN) < 16 ? 16 : (ALIGN)
-#define DATA_ALIGNMENT(TYPE, ALIGN) (ALIGN) < 16 ? 16 : (ALIGN)
+#define DATA_ABI_ALIGNMENT(TYPE, ALIGN) (ALIGN) < 16 ? 16 : (ALIGN)
/* Alignment is not required by the hardware. */
#define STRICT_ALIGNMENT 0
--- gcc/config/i386/i386.h (revision 199897)
+++ gcc/config/i386/i386.h (revision 199898)
@@ -859,7 +859,18 @@ enum target_cpu_default
cause character arrays to be word-aligned so that `strcpy' calls
that copy constants to character arrays can be done inline. */
-#define DATA_ALIGNMENT(TYPE, ALIGN) ix86_data_alignment ((TYPE), (ALIGN))
+#define DATA_ALIGNMENT(TYPE, ALIGN) \
+ ix86_data_alignment ((TYPE), (ALIGN), true)
+
+/* Similar to DATA_ALIGNMENT, but for the cases where the ABI mandates
+ some alignment increase, instead of optimization only purposes. E.g.
+ AMD x86-64 psABI says that variables with array type larger than 15 bytes
+ must be aligned to 16 byte boundaries.
+
+ If this macro is not defined, then ALIGN is used. */
+
+#define DATA_ABI_ALIGNMENT(TYPE, ALIGN) \
+ ix86_data_alignment ((TYPE), (ALIGN), false)
/* If defined, a C expression to compute the alignment for a local
variable. TYPE is the data type, and ALIGN is the alignment that
--- gcc/config/i386/i386-protos.h (revision 199897)
+++ gcc/config/i386/i386-protos.h (revision 199898)
@@ -207,7 +207,7 @@ extern void init_cumulative_args (CUMULA
#endif /* RTX_CODE */
#ifdef TREE_CODE
-extern int ix86_data_alignment (tree, int);
+extern int ix86_data_alignment (tree, int, bool);
extern unsigned int ix86_local_alignment (tree, enum machine_mode,
unsigned int);
extern unsigned int ix86_minimum_alignment (tree, enum machine_mode,
--- gcc/config/i386/i386.c (revision 199897)
+++ gcc/config/i386/i386.c (revision 199898)
@@ -25292,12 +25292,13 @@ ix86_constant_alignment (tree exp, int a
instead of that alignment to align the object. */
int
-ix86_data_alignment (tree type, int align)
+ix86_data_alignment (tree type, int align, bool opt)
{
int max_align
= optimize_size ? BITS_PER_WORD : MIN (256, MAX_OFILE_ALIGNMENT);
- if (AGGREGATE_TYPE_P (type)
+ if (opt
+ && AGGREGATE_TYPE_P (type)
&& TYPE_SIZE (type)
&& TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
&& (TREE_INT_CST_LOW (TYPE_SIZE (type)) >= (unsigned) max_align
@@ -25309,14 +25310,17 @@ ix86_data_alignment (tree type, int alig
to 16byte boundary. */
if (TARGET_64BIT)
{
- if (AGGREGATE_TYPE_P (type)
- && TYPE_SIZE (type)
- && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
- && (TREE_INT_CST_LOW (TYPE_SIZE (type)) >= 128
- || TREE_INT_CST_HIGH (TYPE_SIZE (type))) && align < 128)
+ if ((opt ? AGGREGATE_TYPE_P (type) : TREE_CODE (type) == ARRAY_TYPE)
+ && TYPE_SIZE (type)
+ && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
+ && (TREE_INT_CST_LOW (TYPE_SIZE (type)) >= 128
+ || TREE_INT_CST_HIGH (TYPE_SIZE (type))) && align < 128)
return 128;
}
+ if (!opt)
+ return align;
+
if (TREE_CODE (type) == ARRAY_TYPE)
{
if (TYPE_MODE (TREE_TYPE (type)) == DFmode && align < 64)
--- gcc/config/c6x/c6x.h (revision 199897)
+++ gcc/config/c6x/c6x.h (revision 199898)
@@ -134,7 +134,7 @@ extern c6x_cpu_t c6x_arch;
Really only externally visible arrays must be aligned this way, as
only those are directly visible from another compilation unit. But
we don't have that information available here. */
-#define DATA_ALIGNMENT(TYPE, ALIGN) \
+#define DATA_ABI_ALIGNMENT(TYPE, ALIGN) \
(((ALIGN) < BITS_PER_UNIT * 8 && TREE_CODE (TYPE) == ARRAY_TYPE) \
? BITS_PER_UNIT * 8 : (ALIGN))
--- gcc/config/mmix/mmix.h (revision 199897)
+++ gcc/config/mmix/mmix.h (revision 199898)
@@ -164,7 +164,7 @@ struct GTY(()) machine_function
/* Copied from elfos.h. */
#define MAX_OFILE_ALIGNMENT (32768 * 8)
-#define DATA_ALIGNMENT(TYPE, BASIC_ALIGN) \
+#define DATA_ABI_ALIGNMENT(TYPE, BASIC_ALIGN) \
mmix_data_alignment (TYPE, BASIC_ALIGN)
#define CONSTANT_ALIGNMENT(CONSTANT, BASIC_ALIGN) \
--- gcc/config/mmix/mmix.c (revision 199897)
+++ gcc/config/mmix/mmix.c (revision 199898)
@@ -313,7 +313,7 @@ mmix_init_machine_status (void)
return ggc_alloc_cleared_machine_function ();
}
-/* DATA_ALIGNMENT.
+/* DATA_ABI_ALIGNMENT.
We have trouble getting the address of stuff that is located at other
than 32-bit alignments (GETA requirements), so try to give everything
at least 32-bit alignment. */
--- gcc/testsuite/gcc.target/i386/memcpy-1.c (revision 199897)
+++ gcc/testsuite/gcc.target/i386/memcpy-1.c (revision 199898)
@@ -1,6 +1,6 @@
/* { dg-do compile } */
/* { dg-require-effective-target ia32 } */
-/* { dg-options "-O2 -march=pentiumpro -minline-all-stringops" } */
+/* { dg-options "-O2 -march=pentiumpro -minline-all-stringops -fno-common" } */
/* { dg-final { scan-assembler "rep" } } */
/* { dg-final { scan-assembler "movs" } } */
/* { dg-final { scan-assembler-not "test" } } */
--- gcc/testsuite/gcc.target/i386/vect-sizes-1.c (revision 199897)
+++ gcc/testsuite/gcc.target/i386/vect-sizes-1.c (revision 199898)
@@ -1,5 +1,5 @@
/* { dg-do compile } */
-/* { dg-options "-O3 -ffast-math -mavx -mtune=generic" } */
+/* { dg-options "-O3 -ffast-math -mavx -mtune=generic -fno-common" } */
double a[1024];
--- gcc/testsuite/gcc.target/i386/avx256-unaligned-load-4.c (revision 199897)
+++ gcc/testsuite/gcc.target/i386/avx256-unaligned-load-4.c (revision 199898)
@@ -1,5 +1,5 @@
/* { dg-do compile } */
-/* { dg-options "-O3 -dp -mavx -mno-avx256-split-unaligned-load -mno-avx256-split-unaligned-store" } */
+/* { dg-options "-O3 -dp -mavx -mno-avx256-split-unaligned-load -mno-avx256-split-unaligned-store -fno-common" } */
#define N 1024
--- gcc/testsuite/gcc.target/i386/avx256-unaligned-store-1.c (revision 199897)
+++ gcc/testsuite/gcc.target/i386/avx256-unaligned-store-1.c (revision 199898)
@@ -1,5 +1,5 @@
/* { dg-do compile } */
-/* { dg-options "-O3 -dp -mavx -mavx256-split-unaligned-store" } */
+/* { dg-options "-O3 -dp -mavx -mavx256-split-unaligned-store -fno-common" } */
#define N 1024
--- gcc/testsuite/gcc.target/i386/avx256-unaligned-store-3.c (revision 199897)
+++ gcc/testsuite/gcc.target/i386/avx256-unaligned-store-3.c (revision 199898)
@@ -1,5 +1,5 @@
/* { dg-do compile } */
-/* { dg-options "-O3 -dp -mavx -mavx256-split-unaligned-store -mtune=generic" } */
+/* { dg-options "-O3 -dp -mavx -mavx256-split-unaligned-store -mtune=generic -fno-common" } */
#define N 1024
--- gcc/testsuite/gcc.target/i386/avx256-unaligned-store-4.c (revision 199897)
+++ gcc/testsuite/gcc.target/i386/avx256-unaligned-store-4.c (revision 199898)
@@ -1,5 +1,5 @@
/* { dg-do compile } */
-/* { dg-options "-O3 -dp -mavx -mno-avx256-split-unaligned-load -mno-avx256-split-unaligned-store" } */
+/* { dg-options "-O3 -dp -mavx -mno-avx256-split-unaligned-load -mno-avx256-split-unaligned-store -fno-common" } */
#define N 1024
--- gcc/testsuite/gcc.target/i386/pr56564-1.c (revision 0)
+++ gcc/testsuite/gcc.target/i386/pr56564-1.c (revision 199985)
@@ -0,0 +1,26 @@
+/* PR target/56564 */
+/* { dg-do compile { target { fpic && lp64 } } } */
+/* { dg-skip-if "No symbol interposition for PIC" { *-*-mingw* *-*-cygwin* *-*-darwin* } } */
+/* { dg-options "-O3 -fpic -fdump-tree-optimized" } */
+
+struct S { long a, b; } s = { 5, 6 };
+char t[16] = { 7 };
+
+int
+foo (void)
+{
+ return ((__UINTPTR_TYPE__) &s) & 15;
+}
+
+int
+bar (void)
+{
+ return ((__UINTPTR_TYPE__) &t[0]) & 15;
+}
+
+/* { dg-final { scan-tree-dump-times "&s" 1 "optimized" } } */
+/* { dg-final { scan-tree-dump-times "&t" 0 "optimized" } } */
+/* { dg-final { scan-tree-dump-times "return 0" 1 "optimized" } } */
+/* { dg-final { scan-assembler ".align\[ \t]*16\[^:]*\[\n\r]s:" { target { *-*-linux* } } } } */
+/* { dg-final { scan-assembler ".align\[ \t]*16\[^:]*\[\n\r]t:" { target { *-*-linux* } } } } */
+/* { dg-final { cleanup-tree-dump "optimized" } } */
--- gcc/testsuite/gcc.target/i386/pr56564-2.c (revision 0)
+++ gcc/testsuite/gcc.target/i386/pr56564-2.c (revision 199898)
@@ -0,0 +1,25 @@
+/* PR target/56564 */
+/* { dg-do compile { target { *-*-linux* && lp64 } } } */
+/* { dg-options "-O3 -fno-pic -fdump-tree-optimized" } */
+
+struct S { long a, b; } s = { 5, 6 };
+char t[16] = { 7 };
+
+int
+foo (void)
+{
+ return ((__UINTPTR_TYPE__) &s) & 15;
+}
+
+int
+bar (void)
+{
+ return ((__UINTPTR_TYPE__) &t[0]) & 15;
+}
+
+/* { dg-final { scan-tree-dump-times "&s" 0 "optimized" } } */
+/* { dg-final { scan-tree-dump-times "&t" 0 "optimized" } } */
+/* { dg-final { scan-tree-dump-times "return 0" 2 "optimized" } } */
+/* { dg-final { scan-assembler ".align\[ \t]*16\[^:]*\[\n\r]s:" { target { *-*-linux* } } } } */
+/* { dg-final { scan-assembler ".align\[ \t]*16\[^:]*\[\n\r]t:" { target { *-*-linux* } } } } */
+/* { dg-final { cleanup-tree-dump "optimized" } } */
--- gcc/testsuite/gcc.target/i386/pr56564-3.c (revision 0)
+++ gcc/testsuite/gcc.target/i386/pr56564-3.c (revision 199985)
@@ -0,0 +1,29 @@
+/* PR target/56564 */
+/* { dg-do compile { target { fpic && lp64 } } } */
+/* { dg-skip-if "No symbol interposition for PIC" { *-*-mingw* *-*-cygwin* *-*-darwin* } } */
+/* { dg-options "-O3 -fpic -fdump-tree-optimized" } */
+
+__thread struct S { long a, b; } s = { 5, 6 };
+__thread char t[16] = { 7 };
+
+int
+foo (void)
+{
+ return ((__UINTPTR_TYPE__) &s) & 15;
+}
+
+/* For backwards compatibility we don't assume that t must
+ be aligned to 16 bytes, but align it anyway. */
+
+int
+bar (void)
+{
+ return ((__UINTPTR_TYPE__) &t[0]) & 15;
+}
+
+/* { dg-final { scan-tree-dump-times "&s" 1 "optimized" } } */
+/* { dg-final { scan-tree-dump-times "&t" 1 "optimized" } } */
+/* { dg-final { scan-tree-dump-times "return 0" 0 "optimized" } } */
+/* { dg-final { scan-assembler-not ".align\[ \t]*16\[^:]*\[\n\r]s:" { target { *-*-linux* } } } } */
+/* { dg-final { scan-assembler ".align\[ \t]*16\[^:]*\[\n\r]t:" { target { *-*-linux* } } } } */
+/* { dg-final { cleanup-tree-dump "optimized" } } */
--- gcc/testsuite/gcc.target/i386/pr56564-4.c (revision 0)
+++ gcc/testsuite/gcc.target/i386/pr56564-4.c (revision 199898)
@@ -0,0 +1,22 @@
+/* PR target/56564 */
+/* { dg-do compile { target { *-*-linux* && lp64 } } } */
+/* { dg-options "-O3 -fno-pic -fdump-tree-optimized" } */
+
+__thread struct S { long a, b; } s = { 5, 6 };
+__thread char t[16] = { 7 };
+
+int
+foo (void)
+{
+ return ((__UINTPTR_TYPE__) &s) & 15;
+}
+
+int
+bar (void)
+{
+ return ((__UINTPTR_TYPE__) &t[0]) & 15;
+}
+
+/* { dg-final { scan-assembler-not ".align\[ \t]*16\[^:]*\[\n\r]s:" { target { *-*-linux* } } } } */
+/* { dg-final { scan-assembler ".align\[ \t]*16\[^:]*\[\n\r]t:" { target { *-*-linux* } } } } */
+/* { dg-final { cleanup-tree-dump "optimized" } } */
--- gcc/testsuite/gcc.dg/vect/costmodel/i386/costmodel-vect-31.c (revision 199897)
+++ gcc/testsuite/gcc.dg/vect/costmodel/i386/costmodel-vect-31.c (revision 199898)
@@ -18,7 +18,7 @@ struct s{
struct t e; /* unaligned (offset 2N+4N+4 B) */
};
-struct s tmp;
+struct s tmp = { 1 };
int main1 ()
{
--- gcc/testsuite/gcc.dg/vect/costmodel/x86_64/costmodel-vect-31.c (revision 199897)
+++ gcc/testsuite/gcc.dg/vect/costmodel/x86_64/costmodel-vect-31.c (revision 199898)
@@ -18,7 +18,7 @@ struct s{
struct t e; /* unaligned (offset 2N+4N+4 B) */
};
-struct s tmp;
+struct s tmp = { 1 };
int main1 ()
{
--- gcc/testsuite/gcc.dg/tree-ssa/loop-19.c (revision 200212)
+++ gcc/testsuite/gcc.dg/tree-ssa/loop-19.c (revision 200213)
@@ -6,7 +6,7 @@
/* { dg-do compile { target { i?86-*-* || { x86_64-*-* || powerpc_hard_double } } } } */
/* { dg-require-effective-target nonpic } */
-/* { dg-options "-O3 -fno-tree-loop-distribute-patterns -fno-prefetch-loop-arrays -fdump-tree-optimized" } */
+/* { dg-options "-O3 -fno-tree-loop-distribute-patterns -fno-prefetch-loop-arrays -fdump-tree-optimized -fno-common" } */
# define N 2000000
double a[N],c[N];

View File

@ -0,0 +1,16 @@
2014-02-14 Kyle McMartin <kyle@redhat.com>
PR pch/60010
* config/host-linux.c (TRY_EMPTY_VM_SPACE): Define for AArch64.
--- gcc/config/host-linux.c (revision 207784)
+++ gcc/config/host-linux.c (revision 207785)
@@ -86,6 +86,8 @@
# define TRY_EMPTY_VM_SPACE 0x60000000
#elif defined(__mc68000__)
# define TRY_EMPTY_VM_SPACE 0x40000000
+#elif defined(__aarch64__)
+# define TRY_EMPTY_VM_SPACE 0x1000000000
#elif defined(__ARM_EABI__)
# define TRY_EMPTY_VM_SPACE 0x60000000
#elif defined(__mips__) && defined(__LP64__)

View File

@ -0,0 +1,87 @@
2015-09-03 Jonathan Wakely <jwakely@redhat.com>
Backport from mainline
2015-04-27 Dmitry Prokoptsev <dprokoptsev@gmail.com>
Michael Hanselmann <public@hansmi.ch>
PR libstdc++/62258
* libsupc++/eh_ptr.cc (rethrow_exception): Increment count of
uncaught exceptions.
* testsuite/18_support/exception_ptr/62258.cc: New.
--- libstdc++-v3/libsupc++/eh_ptr.cc (revision 227455)
+++ libstdc++-v3/libsupc++/eh_ptr.cc (revision 227456)
@@ -245,6 +245,9 @@ std::rethrow_exception(std::exception_pt
__GXX_INIT_DEPENDENT_EXCEPTION_CLASS(dep->unwindHeader.exception_class);
dep->unwindHeader.exception_cleanup = __gxx_dependent_exception_cleanup;
+ __cxa_eh_globals *globals = __cxa_get_globals ();
+ globals->uncaughtExceptions += 1;
+
#ifdef _GLIBCXX_SJLJ_EXCEPTIONS
_Unwind_SjLj_RaiseException (&dep->unwindHeader);
#else
--- libstdc++-v3/testsuite/18_support/exception_ptr/62258.cc (revision 0)
+++ libstdc++-v3/testsuite/18_support/exception_ptr/62258.cc (revision 227456)
@@ -0,0 +1,61 @@
+// { dg-options "-std=gnu++11" }
+// { dg-require-atomic-builtins "" }
+
+// Copyright (C) 2015 Free Software Foundation, Inc.
+//
+// This file is part of the GNU ISO C++ Library. This library is free
+// software; you can redistribute it and/or modify it under the
+// terms of the GNU General Public License as published by the
+// Free Software Foundation; either version 3, or (at your option)
+// any later version.
+
+// This library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License along
+// with this library; see the file COPYING3. If not see
+// <http://www.gnu.org/licenses/>.
+
+// PR libstdc++/62258
+
+#include <exception>
+#include <testsuite_hooks.h>
+
+struct check_on_destruct
+{
+ ~check_on_destruct();
+};
+
+check_on_destruct::~check_on_destruct()
+{
+ VERIFY(std::uncaught_exception());
+}
+
+int main ()
+{
+ VERIFY(!std::uncaught_exception());
+
+ try
+ {
+ check_on_destruct check;
+
+ try
+ {
+ throw 1;
+ }
+ catch (...)
+ {
+ VERIFY(!std::uncaught_exception());
+
+ std::rethrow_exception(std::current_exception());
+ }
+ }
+ catch (...)
+ {
+ VERIFY(!std::uncaught_exception());
+ }
+
+ VERIFY(!std::uncaught_exception());
+}

View File

@ -0,0 +1,60 @@
2014-11-04 Jiong Wang <jiong.wang@arm.com>
Wilco Dijkstra <wilco.dijkstra@arm.com>
PR target/63293
* config/aarch64/aarch64.c (aarch64_expand_epiloue): Add barriers before
stack adjustment.
--- gcc/config/aarch64/aarch64.c (revision 217090)
+++ gcc/config/aarch64/aarch64.c (revision 217091)
@@ -1989,6 +1989,9 @@ aarch64_expand_epilogue (bool for_sibcal
rtx insn;
rtx cfa_reg;
rtx cfi_ops = NULL;
+ /* We need to add memory barrier to prevent read from deallocated stack. */
+ bool need_barrier_p = (get_frame_size () != 0
+ || cfun->machine->saved_varargs_size);
aarch64_layout_frame ();
original_frame_size = get_frame_size () + cfun->machine->saved_varargs_size;
@@ -2030,6 +2033,9 @@ aarch64_expand_epilogue (bool for_sibcal
if (frame_pointer_needed
&& (crtl->outgoing_args_size || cfun->calls_alloca))
{
+ if (cfun->calls_alloca)
+ emit_insn (gen_stack_tie (stack_pointer_rtx, stack_pointer_rtx));
+
insn = emit_insn (gen_add3_insn (stack_pointer_rtx,
hard_frame_pointer_rtx,
GEN_INT (- fp_offset)));
@@ -2048,6 +2054,9 @@ aarch64_expand_epilogue (bool for_sibcal
/* Restore the frame pointer and lr if the frame pointer is needed. */
if (offset > 0)
{
+ if (need_barrier_p && (!frame_pointer_needed || !fp_offset))
+ emit_insn (gen_stack_tie (stack_pointer_rtx, stack_pointer_rtx));
+
if (frame_pointer_needed)
{
rtx mem_fp, mem_lr;
@@ -2067,6 +2076,10 @@ aarch64_expand_epilogue (bool for_sibcal
+ UNITS_PER_WORD));
emit_insn (gen_load_pairdi (reg_fp, mem_fp, reg_lr, mem_lr));
+ if (need_barrier_p)
+ emit_insn (gen_stack_tie (stack_pointer_rtx,
+ stack_pointer_rtx));
+
insn = emit_insn (gen_add2_insn (stack_pointer_rtx,
GEN_INT (offset)));
}
@@ -2128,6 +2141,9 @@ aarch64_expand_epilogue (bool for_sibcal
if (frame_size > -1)
{
+ if (need_barrier_p)
+ emit_insn (gen_stack_tie (stack_pointer_rtx, stack_pointer_rtx));
+
if (frame_size >= 0x1000000)
{
rtx op0 = gen_rtx_REG (Pmode, IP0_REGNUM);

View File

@ -0,0 +1,23 @@
2016-06-01 Jakub Jelinek <jakub@redhat.com>
Backported from mainline
2015-10-02 Jonathan Wakely <jwakely@redhat.com>
PR libstdc++/65142
* src/c++11/random.cc (random_device::_M_getval()): Check read result.
--- libstdc++-v3/src/c++11/random.cc (revision 228423)
+++ libstdc++-v3/src/c++11/random.cc (revision 228424)
@@ -126,8 +126,10 @@ namespace std _GLIBCXX_VISIBILITY(defaul
#endif
result_type __ret;
- std::fread(reinterpret_cast<void*>(&__ret), sizeof(result_type),
- 1, _M_file);
+ const size_t e = std::fread(reinterpret_cast<void*>(&__ret),
+ sizeof(result_type), 1, _M_file);
+ if (e != 1)
+ std::__throw_runtime_error(__N("random_device could not be read"));
return __ret;
}

111
SOURCES/gcc48-pr66731.patch Normal file
View File

@ -0,0 +1,111 @@
2015-08-04 Szabolcs Nagy <szabolcs.nagy@arm.com>
Backport from mainline:
2015-07-06 Szabolcs Nagy <szabolcs.nagy@arm.com>
PR target/66731
* config/aarch64/aarch64.md (fnmul<mode>3): Handle -frounding-math.
* gcc.target/aarch64/fnmul-1.c: New.
* gcc.target/aarch64/fnmul-2.c: New.
* gcc.target/aarch64/fnmul-3.c: New.
* gcc.target/aarch64/fnmul-4.c: New.
--- gcc/config/aarch64/aarch64.md (revision 226591)
+++ gcc/config/aarch64/aarch64.md (revision 226592)
@@ -3101,6 +3101,17 @@
(mult:GPF
(neg:GPF (match_operand:GPF 1 "register_operand" "w"))
(match_operand:GPF 2 "register_operand" "w")))]
+ "TARGET_FLOAT && !flag_rounding_math"
+ "fnmul\\t%<s>0, %<s>1, %<s>2"
+ [(set_attr "v8type" "fmul")
+ (set_attr "mode" "<MODE>")]
+)
+
+(define_insn "*fnmul<mode>3"
+ [(set (match_operand:GPF 0 "register_operand" "=w")
+ (neg:GPF (mult:GPF
+ (match_operand:GPF 1 "register_operand" "w")
+ (match_operand:GPF 2 "register_operand" "w"))))]
"TARGET_FLOAT"
"fnmul\\t%<s>0, %<s>1, %<s>2"
[(set_attr "v8type" "fmul")
--- gcc/testsuite/gcc.target/aarch64/fnmul-1.c (nonexistent)
+++ gcc/testsuite/gcc.target/aarch64/fnmul-1.c (revision 226592)
@@ -0,0 +1,16 @@
+/* { dg-do compile } */
+/* { dg-options "-O2" } */
+
+double
+foo_d (double a, double b)
+{
+ /* { dg-final { scan-assembler "fnmul\\td\[0-9\]+, d\[0-9\]+, d\[0-9\]+" } } */
+ return -a * b;
+}
+
+float
+foo_s (float a, float b)
+{
+ /* { dg-final { scan-assembler "fnmul\\ts\[0-9\]+, s\[0-9\]+, s\[0-9\]+" } } */
+ return -a * b;
+}
--- gcc/testsuite/gcc.target/aarch64/fnmul-2.c (nonexistent)
+++ gcc/testsuite/gcc.target/aarch64/fnmul-2.c (revision 226592)
@@ -0,0 +1,18 @@
+/* { dg-do compile } */
+/* { dg-options "-O2 -frounding-math" } */
+
+double
+foo_d (double a, double b)
+{
+ /* { dg-final { scan-assembler "fneg\\td\[0-9\]+, d\[0-9\]+" } } */
+ /* { dg-final { scan-assembler "fmul\\td\[0-9\]+, d\[0-9\]+, d\[0-9\]+" } } */
+ return -a * b;
+}
+
+float
+foo_s (float a, float b)
+{
+ /* { dg-final { scan-assembler "fneg\\ts\[0-9\]+, s\[0-9\]+" } } */
+ /* { dg-final { scan-assembler "fmul\\ts\[0-9\]+, s\[0-9\]+, s\[0-9\]+" } } */
+ return -a * b;
+}
--- gcc/testsuite/gcc.target/aarch64/fnmul-3.c (nonexistent)
+++ gcc/testsuite/gcc.target/aarch64/fnmul-3.c (revision 226592)
@@ -0,0 +1,16 @@
+/* { dg-do compile } */
+/* { dg-options "-O2" } */
+
+double
+foo_d (double a, double b)
+{
+ /* { dg-final { scan-assembler "fnmul\\td\[0-9\]+, d\[0-9\]+, d\[0-9\]+" } } */
+ return -(a * b);
+}
+
+float
+foo_s (float a, float b)
+{
+ /* { dg-final { scan-assembler "fnmul\\ts\[0-9\]+, s\[0-9\]+, s\[0-9\]+" } } */
+ return -(a * b);
+}
--- gcc/testsuite/gcc.target/aarch64/fnmul-4.c (nonexistent)
+++ gcc/testsuite/gcc.target/aarch64/fnmul-4.c (revision 226592)
@@ -0,0 +1,16 @@
+/* { dg-do compile } */
+/* { dg-options "-O2 -frounding-math" } */
+
+double
+foo_d (double a, double b)
+{
+ /* { dg-final { scan-assembler "fnmul\\td\[0-9\]+, d\[0-9\]+, d\[0-9\]+" } } */
+ return -(a * b);
+}
+
+float
+foo_s (float a, float b)
+{
+ /* { dg-final { scan-assembler "fnmul\\ts\[0-9\]+, s\[0-9\]+, s\[0-9\]+" } } */
+ return -(a * b);
+}

View File

@ -0,0 +1,16 @@
2015-07-14 Matthias Klose <doko@ubuntu.com>
PR target/66840
* config/rs6000/t-rs6000 (TM_H): Add rs6000-cpus.def.
diff -Nrup a/gcc/config/rs6000/t-rs6000 b/gcc/config/rs6000/t-rs6000
--- /gcc/config/rs6000/t-rs6000 2013-08-14 05:55:11.000000000 -0600
+++ gcc/config/rs6000/t-rs6000 2018-04-18 12:09:30.614737081 -0600
@@ -19,6 +19,7 @@
# <http://www.gnu.org/licenses/>.
TM_H += $(srcdir)/config/rs6000/rs6000-builtin.def
+TM_H += $(srcdir)/config/rs6000/rs6000-cpus.def
rs6000.o: $(CONFIG_H) $(SYSTEM_H) coretypes.h $(TM_H) \
$(RTL_H) $(REGS_H) hard-reg-set.h \

348
SOURCES/gcc48-pr67281.patch Normal file
View File

@ -0,0 +1,348 @@
2015-10-14 Peter Bergner <bergner@vnet.ibm.com>
Torvald Riegel <triegel@redhat.com>
PR target/67281
* config/rs6000/htm.md (UNSPEC_HTM_FENCE): New.
(tabort, tabort<wd>c, tabort<wd>ci, tbegin, tcheck, tend,
trechkpt, treclaim, tsr, ttest): Rename define_insns from this...
(*tabort, *tabort<wd>c, *tabort<wd>ci, *tbegin, *tcheck, *tend,
*trechkpt, *treclaim, *tsr, *ttest): ...to this. Add memory barrier.
(tabort, tabort<wd>c, tabort<wd>ci, tbegin, tcheck, tend,
trechkpt, treclaim, tsr, ttest): New define_expands.
* config/rs6000/rs6000-c.c (rs6000_target_modify_macros): Define
__TM_FENCE__ for htm.
* doc/extend.texi: Update documentation for htm builtins.
2015-08-03 Peter Bergner <bergner@vnet.ibm.com>
* config/rs6000/htm.md (tabort.): Restrict the source operand to
using a base register.
* gcc.target/powerpc/htm-tabort-no-r0.c: New test.
--- gcc/doc/extend.texi (revision 228826)
+++ gcc/doc/extend.texi (revision 228827)
@@ -16092,6 +16092,28 @@ unsigned int __builtin_tresume (void)
unsigned int __builtin_tsuspend (void)
@end smallexample
+Note that the semantics of the above HTM builtins are required to mimic
+the locking semantics used for critical sections. Builtins that are used
+to create a new transaction or restart a suspended transaction must have
+lock acquisition like semantics while those builtins that end or suspend a
+transaction must have lock release like semantics. Specifically, this must
+mimic lock semantics as specified by C++11, for example: Lock acquisition is
+as-if an execution of __atomic_exchange_n(&globallock,1,__ATOMIC_ACQUIRE)
+that returns 0, and lock release is as-if an execution of
+__atomic_store(&globallock,0,__ATOMIC_RELEASE), with globallock being an
+implicit implementation-defined lock used for all transactions. The HTM
+instructions associated with with the builtins inherently provide the
+correct acquisition and release hardware barriers required. However,
+the compiler must also be prohibited from moving loads and stores across
+the builtins in a way that would violate their semantics. This has been
+accomplished by adding memory barriers to the associated HTM instructions
+(which is a conservative approach to provide acquire and release semantics).
+Earlier versions of the compiler did not treat the HTM instructions as
+memory barriers. A @code{__TM_FENCE__} macro has been added, which can
+be used to determine whether the current compiler treats HTM instructions
+as memory barriers or not. This allows the user to explicitly add memory
+barriers to their code when using an older version of the compiler.
+
The following set of built-in functions are available to gain access
to the HTM specific special purpose registers.
--- gcc/config/rs6000/htm.md (revision 226531)
+++ gcc/config/rs6000/htm.md (revision 228827)
@@ -27,6 +27,14 @@ (define_constants
])
;;
+;; UNSPEC usage
+;;
+
+(define_c_enum "unspec"
+ [UNSPEC_HTM_FENCE
+ ])
+
+;;
;; UNSPEC_VOLATILE usage
;;
@@ -45,96 +53,223 @@ (define_c_enum "unspecv"
UNSPECV_HTM_MTSPR
])
+(define_expand "tabort"
+ [(parallel
+ [(set (match_operand:CC 1 "cc_reg_operand" "=x")
+ (unspec_volatile:CC [(match_operand:SI 0 "base_reg_operand" "b")]
+ UNSPECV_HTM_TABORT))
+ (set (match_dup 2) (unspec:BLK [(match_dup 2)] UNSPEC_HTM_FENCE))])]
+ "TARGET_HTM"
+{
+ operands[2] = gen_rtx_MEM (BLKmode, gen_rtx_SCRATCH (Pmode));
+ MEM_VOLATILE_P (operands[2]) = 1;
+})
-(define_insn "tabort"
+(define_insn "*tabort"
[(set (match_operand:CC 1 "cc_reg_operand" "=x")
- (unspec_volatile:CC [(match_operand:SI 0 "gpc_reg_operand" "r")]
- UNSPECV_HTM_TABORT))]
+ (unspec_volatile:CC [(match_operand:SI 0 "base_reg_operand" "b")]
+ UNSPECV_HTM_TABORT))
+ (set (match_operand:BLK 2) (unspec:BLK [(match_dup 2)] UNSPEC_HTM_FENCE))]
"TARGET_HTM"
"tabort. %0"
[(set_attr "type" "htm")
(set_attr "length" "4")])
-(define_insn "tabort<wd>c"
+(define_expand "tabort<wd>c"
+ [(parallel
+ [(set (match_operand:CC 3 "cc_reg_operand" "=x")
+ (unspec_volatile:CC [(match_operand 0 "u5bit_cint_operand" "n")
+ (match_operand:GPR 1 "gpc_reg_operand" "r")
+ (match_operand:GPR 2 "gpc_reg_operand" "r")]
+ UNSPECV_HTM_TABORTXC))
+ (set (match_dup 4) (unspec:BLK [(match_dup 4)] UNSPEC_HTM_FENCE))])]
+ "TARGET_HTM"
+{
+ operands[4] = gen_rtx_MEM (BLKmode, gen_rtx_SCRATCH (Pmode));
+ MEM_VOLATILE_P (operands[4]) = 1;
+})
+
+(define_insn "*tabort<wd>c"
[(set (match_operand:CC 3 "cc_reg_operand" "=x")
(unspec_volatile:CC [(match_operand 0 "u5bit_cint_operand" "n")
(match_operand:GPR 1 "gpc_reg_operand" "r")
(match_operand:GPR 2 "gpc_reg_operand" "r")]
- UNSPECV_HTM_TABORTXC))]
+ UNSPECV_HTM_TABORTXC))
+ (set (match_operand:BLK 4) (unspec:BLK [(match_dup 4)] UNSPEC_HTM_FENCE))]
"TARGET_HTM"
"tabort<wd>c. %0,%1,%2"
[(set_attr "type" "htm")
(set_attr "length" "4")])
-(define_insn "tabort<wd>ci"
+(define_expand "tabort<wd>ci"
+ [(parallel
+ [(set (match_operand:CC 3 "cc_reg_operand" "=x")
+ (unspec_volatile:CC [(match_operand 0 "u5bit_cint_operand" "n")
+ (match_operand:GPR 1 "gpc_reg_operand" "r")
+ (match_operand 2 "s5bit_cint_operand" "n")]
+ UNSPECV_HTM_TABORTXCI))
+ (set (match_dup 4) (unspec:BLK [(match_dup 4)] UNSPEC_HTM_FENCE))])]
+ "TARGET_HTM"
+{
+ operands[4] = gen_rtx_MEM (BLKmode, gen_rtx_SCRATCH (Pmode));
+ MEM_VOLATILE_P (operands[4]) = 1;
+})
+
+(define_insn "*tabort<wd>ci"
[(set (match_operand:CC 3 "cc_reg_operand" "=x")
(unspec_volatile:CC [(match_operand 0 "u5bit_cint_operand" "n")
(match_operand:GPR 1 "gpc_reg_operand" "r")
(match_operand 2 "s5bit_cint_operand" "n")]
- UNSPECV_HTM_TABORTXCI))]
+ UNSPECV_HTM_TABORTXCI))
+ (set (match_operand:BLK 4) (unspec:BLK [(match_dup 4)] UNSPEC_HTM_FENCE))]
"TARGET_HTM"
"tabort<wd>ci. %0,%1,%2"
[(set_attr "type" "htm")
(set_attr "length" "4")])
-(define_insn "tbegin"
+(define_expand "tbegin"
+ [(parallel
+ [(set (match_operand:CC 1 "cc_reg_operand" "=x")
+ (unspec_volatile:CC [(match_operand 0 "const_0_to_1_operand" "n")]
+ UNSPECV_HTM_TBEGIN))
+ (set (match_dup 2) (unspec:BLK [(match_dup 2)] UNSPEC_HTM_FENCE))])]
+ "TARGET_HTM"
+{
+ operands[2] = gen_rtx_MEM (BLKmode, gen_rtx_SCRATCH (Pmode));
+ MEM_VOLATILE_P (operands[2]) = 1;
+})
+
+(define_insn "*tbegin"
[(set (match_operand:CC 1 "cc_reg_operand" "=x")
(unspec_volatile:CC [(match_operand 0 "const_0_to_1_operand" "n")]
- UNSPECV_HTM_TBEGIN))]
+ UNSPECV_HTM_TBEGIN))
+ (set (match_operand:BLK 2) (unspec:BLK [(match_dup 2)] UNSPEC_HTM_FENCE))]
"TARGET_HTM"
"tbegin. %0"
[(set_attr "type" "htm")
(set_attr "length" "4")])
-(define_insn "tcheck"
+(define_expand "tcheck"
+ [(parallel
+ [(set (match_operand:CC 0 "cc_reg_operand" "=y")
+ (unspec_volatile:CC [(const_int 0)] UNSPECV_HTM_TCHECK))
+ (set (match_dup 1) (unspec:BLK [(match_dup 1)] UNSPEC_HTM_FENCE))])]
+ "TARGET_HTM"
+{
+ operands[1] = gen_rtx_MEM (BLKmode, gen_rtx_SCRATCH (Pmode));
+ MEM_VOLATILE_P (operands[1]) = 1;
+})
+
+(define_insn "*tcheck"
[(set (match_operand:CC 0 "cc_reg_operand" "=y")
- (unspec_volatile:CC [(const_int 0)]
- UNSPECV_HTM_TCHECK))]
+ (unspec_volatile:CC [(const_int 0)] UNSPECV_HTM_TCHECK))
+ (set (match_operand:BLK 1) (unspec:BLK [(match_dup 1)] UNSPEC_HTM_FENCE))]
"TARGET_HTM"
"tcheck %0"
[(set_attr "type" "htm")
(set_attr "length" "4")])
-(define_insn "tend"
+(define_expand "tend"
+ [(parallel
+ [(set (match_operand:CC 1 "cc_reg_operand" "=x")
+ (unspec_volatile:CC [(match_operand 0 "const_0_to_1_operand" "n")]
+ UNSPECV_HTM_TEND))
+ (set (match_dup 2) (unspec:BLK [(match_dup 2)] UNSPEC_HTM_FENCE))])]
+ "TARGET_HTM"
+{
+ operands[2] = gen_rtx_MEM (BLKmode, gen_rtx_SCRATCH (Pmode));
+ MEM_VOLATILE_P (operands[2]) = 1;
+})
+
+(define_insn "*tend"
[(set (match_operand:CC 1 "cc_reg_operand" "=x")
(unspec_volatile:CC [(match_operand 0 "const_0_to_1_operand" "n")]
- UNSPECV_HTM_TEND))]
+ UNSPECV_HTM_TEND))
+ (set (match_operand:BLK 2) (unspec:BLK [(match_dup 2)] UNSPEC_HTM_FENCE))]
"TARGET_HTM"
"tend. %0"
[(set_attr "type" "htm")
(set_attr "length" "4")])
-(define_insn "trechkpt"
+(define_expand "trechkpt"
+ [(parallel
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x")
+ (unspec_volatile:CC [(const_int 0)] UNSPECV_HTM_TRECHKPT))
+ (set (match_dup 1) (unspec:BLK [(match_dup 1)] UNSPEC_HTM_FENCE))])]
+ "TARGET_HTM"
+{
+ operands[1] = gen_rtx_MEM (BLKmode, gen_rtx_SCRATCH (Pmode));
+ MEM_VOLATILE_P (operands[1]) = 1;
+})
+
+(define_insn "*trechkpt"
[(set (match_operand:CC 0 "cc_reg_operand" "=x")
- (unspec_volatile:CC [(const_int 0)]
- UNSPECV_HTM_TRECHKPT))]
+ (unspec_volatile:CC [(const_int 0)] UNSPECV_HTM_TRECHKPT))
+ (set (match_operand:BLK 1) (unspec:BLK [(match_dup 1)] UNSPEC_HTM_FENCE))]
"TARGET_HTM"
"trechkpt."
[(set_attr "type" "htm")
(set_attr "length" "4")])
-(define_insn "treclaim"
+(define_expand "treclaim"
+ [(parallel
+ [(set (match_operand:CC 1 "cc_reg_operand" "=x")
+ (unspec_volatile:CC [(match_operand:SI 0 "gpc_reg_operand" "r")]
+ UNSPECV_HTM_TRECLAIM))
+ (set (match_dup 2) (unspec:BLK [(match_dup 2)] UNSPEC_HTM_FENCE))])]
+ "TARGET_HTM"
+{
+ operands[2] = gen_rtx_MEM (BLKmode, gen_rtx_SCRATCH (Pmode));
+ MEM_VOLATILE_P (operands[2]) = 1;
+})
+
+(define_insn "*treclaim"
[(set (match_operand:CC 1 "cc_reg_operand" "=x")
(unspec_volatile:CC [(match_operand:SI 0 "gpc_reg_operand" "r")]
- UNSPECV_HTM_TRECLAIM))]
+ UNSPECV_HTM_TRECLAIM))
+ (set (match_operand:BLK 2) (unspec:BLK [(match_dup 2)] UNSPEC_HTM_FENCE))]
"TARGET_HTM"
"treclaim. %0"
[(set_attr "type" "htm")
(set_attr "length" "4")])
-(define_insn "tsr"
+(define_expand "tsr"
+ [(parallel
+ [(set (match_operand:CC 1 "cc_reg_operand" "=x")
+ (unspec_volatile:CC [(match_operand 0 "const_0_to_1_operand" "n")]
+ UNSPECV_HTM_TSR))
+ (set (match_dup 2) (unspec:BLK [(match_dup 2)] UNSPEC_HTM_FENCE))])]
+ "TARGET_HTM"
+{
+ operands[2] = gen_rtx_MEM (BLKmode, gen_rtx_SCRATCH (Pmode));
+ MEM_VOLATILE_P (operands[2]) = 1;
+})
+
+(define_insn "*tsr"
[(set (match_operand:CC 1 "cc_reg_operand" "=x")
(unspec_volatile:CC [(match_operand 0 "const_0_to_1_operand" "n")]
- UNSPECV_HTM_TSR))]
+ UNSPECV_HTM_TSR))
+ (set (match_operand:BLK 2) (unspec:BLK [(match_dup 2)] UNSPEC_HTM_FENCE))]
"TARGET_HTM"
"tsr. %0"
[(set_attr "type" "htm")
(set_attr "length" "4")])
-(define_insn "ttest"
+(define_expand "ttest"
+ [(parallel
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x")
+ (unspec_volatile:CC [(const_int 0)] UNSPECV_HTM_TTEST))
+ (set (match_dup 1) (unspec:BLK [(match_dup 1)] UNSPEC_HTM_FENCE))])]
+ "TARGET_HTM"
+{
+ operands[1] = gen_rtx_MEM (BLKmode, gen_rtx_SCRATCH (Pmode));
+ MEM_VOLATILE_P (operands[1]) = 1;
+})
+
+(define_insn "*ttest"
[(set (match_operand:CC 0 "cc_reg_operand" "=x")
- (unspec_volatile:CC [(const_int 0)]
- UNSPECV_HTM_TTEST))]
+ (unspec_volatile:CC [(const_int 0)] UNSPECV_HTM_TTEST))
+ (set (match_operand:BLK 1) (unspec:BLK [(match_dup 1)] UNSPEC_HTM_FENCE))]
"TARGET_HTM"
"tabortwci. 0,1,0"
[(set_attr "type" "htm")
--- gcc/config/rs6000/rs6000-c.c (revision 228826)
+++ gcc/config/rs6000/rs6000-c.c (revision 228827)
@@ -372,7 +372,11 @@ rs6000_target_modify_macros (bool define
if ((flags & OPTION_MASK_VSX) != 0)
rs6000_define_or_undefine_macro (define_p, "__VSX__");
if ((flags & OPTION_MASK_HTM) != 0)
- rs6000_define_or_undefine_macro (define_p, "__HTM__");
+ {
+ rs6000_define_or_undefine_macro (define_p, "__HTM__");
+ /* Tell the user that our HTM insn patterns act as memory barriers. */
+ rs6000_define_or_undefine_macro (define_p, "__TM_FENCE__");
+ }
if ((flags & OPTION_MASK_P8_VECTOR) != 0)
rs6000_define_or_undefine_macro (define_p, "__POWER8_VECTOR__");
if ((flags & OPTION_MASK_QUAD_MEMORY) != 0)
--- gcc/testsuite/gcc.target/powerpc/htm-tabort-no-r0.c (revision 0)
+++ gcc/testsuite/gcc.target/powerpc/htm-tabort-no-r0.c (revision 226532)
@@ -0,0 +1,12 @@
+/* { dg-do compile { target { powerpc*-*-* } } } */
+/* { dg-skip-if "" { powerpc*-*-darwin* } { "*" } { "" } } */
+/* { dg-require-effective-target powerpc_htm_ok } */
+/* { dg-options "-O2 -mhtm -ffixed-r3 -ffixed-r4 -ffixed-r5 -ffixed-r6 -ffixed-r7 -ffixed-r8 -ffixed-r9 -ffixed-r10 -ffixed-r11 -ffixed-r12" } */
+
+/* { dg-final { scan-assembler-not "tabort\\.\[ \t\]0" } } */
+
+int
+foo (void)
+{
+ return __builtin_tabort (10);
+}

View File

@ -0,0 +1,59 @@
2017-02-28 Jakub Jelinek <jakub@redhat.com>
Backport from mainline
2015-12-02 Jan Hubicka <hubicka@ucw.cz>
PR ipa/68184
* cgraphunit.c (cgraph_node::analyze): Set can_throw_external.
* g++.dg/torture/pr68184.C: New testcase.
--- gcc/cgraphunit.c.jj 2014-09-10 09:15:51.000000000 +0200
+++ gcc/cgraphunit.c 2017-02-28 08:24:44.387385510 +0100
@@ -626,8 +626,10 @@ cgraph_analyze_function (struct cgraph_n
}
else if (node->thunk.thunk_p)
{
- cgraph_create_edge (node, cgraph_get_node (node->thunk.alias),
- NULL, 0, CGRAPH_FREQ_BASE);
+ struct cgraph_node *t = cgraph_get_node (node->thunk.alias);
+ cgraph_create_edge (node, t, NULL, 0,
+ CGRAPH_FREQ_BASE)->can_throw_external
+ = !TREE_NOTHROW (t->symbol.decl);
}
else if (node->dispatcher_function)
{
--- gcc/testsuite/g++.dg/torture/pr68184.C.jj 2017-02-28 08:26:09.205246069 +0100
+++ gcc/testsuite/g++.dg/torture/pr68184.C 2015-12-03 16:39:34.589010321 +0100
@@ -0,0 +1,31 @@
+// { dg-do run }
+namespace {
+struct IFoo { virtual void foo() = 0; };
+struct IBar { virtual void bar() = 0; };
+
+struct FooBar : private IBar, private IFoo
+{
+ void call_foo()
+ {
+ try
+ {
+ static_cast<IFoo*>(this)->foo();
+ }
+ catch( ... ) {}
+ }
+ void foo() { throw 1; }
+ void bar() {}
+};
+
+void test()
+{
+ FooBar foobar;
+ foobar.call_foo();
+}
+}
+int main()
+{
+ test();
+ return 0;
+}
+

View File

@ -0,0 +1,46 @@
2015-12-04 Jakub Jelinek <jakub@redhat.com>
PR tree-optimization/68680
* calls.c (special_function_p): Return ECF_MAY_BE_ALLOCA for
BUILT_IN_ALLOCA{,_WITH_ALIGN}.
* gcc.target/i386/pr68680.c: New test.
--- gcc/calls.c (revision 231278)
+++ gcc/calls.c (revision 231279)
@@ -564,6 +564,17 @@ special_function_p (const_tree fndecl, i
flags |= ECF_NORETURN;
}
+ if (DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL)
+ switch (DECL_FUNCTION_CODE (fndecl))
+ {
+ case BUILT_IN_ALLOCA:
+ case BUILT_IN_ALLOCA_WITH_ALIGN:
+ flags |= ECF_MAY_BE_ALLOCA;
+ break;
+ default:
+ break;
+ }
+
return flags;
}
--- gcc/testsuite/gcc.target/i386/pr68680.c (revision 0)
+++ gcc/testsuite/gcc.target/i386/pr68680.c (revision 231279)
@@ -0,0 +1,15 @@
+/* PR tree-optimization/68680 */
+/* { dg-do compile } */
+/* { dg-options "-O2 -fstack-protector-strong" } */
+
+int foo (char *);
+
+int
+bar (unsigned long x)
+{
+ char a[x];
+ return foo (a);
+}
+
+/* Verify that this function is stack protected. */
+/* { dg-final { scan-assembler "stack_chk_fail" } } */

View File

@ -0,0 +1,92 @@
2016-02-10 Jonathan Wakely <jwakely@redhat.com>
PR libstdc++/69116
* include/bits/valarray_before.h (__fun, __fun_with_valarray): Only
define result_type for types which can be safely used with valarrays.
* testsuite/26_numerics/valarray/69116.cc: New.
--- libstdc++-v3/include/bits/valarray_before.h (revision 233264)
+++ libstdc++-v3/include/bits/valarray_before.h (revision 233265)
@@ -331,14 +331,24 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
{ return pow(__x, __y); }
};
+ template<typename _Tp, bool _IsValidValarrayValue = !__is_abstract(_Tp)>
+ struct __fun_with_valarray
+ {
+ typedef _Tp result_type;
+ };
+
+ template<typename _Tp>
+ struct __fun_with_valarray<_Tp, false>
+ {
+ // No result type defined for invalid value types.
+ };
// We need these bits in order to recover the return type of
// some functions/operators now that we're no longer using
// function templates.
template<typename, typename _Tp>
- struct __fun
+ struct __fun : __fun_with_valarray<_Tp>
{
- typedef _Tp result_type;
};
// several specializations for relational operators.
--- libstdc++-v3/testsuite/26_numerics/valarray/69116.cc (nonexistent)
+++ libstdc++-v3/testsuite/26_numerics/valarray/69116.cc (revision 233265)
@@ -0,0 +1,53 @@
+// Copyright (C) 2016 Free Software Foundation, Inc.
+//
+// This file is part of the GNU ISO C++ Library. This library is free
+// software; you can redistribute it and/or modify it under the
+// terms of the GNU General Public License as published by the
+// Free Software Foundation; either version 3, or (at your option)
+// any later version.
+
+// This library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License along
+// with this library; see the file COPYING3. If not see
+// <http://www.gnu.org/licenses/>.
+
+// { dg-do compile }
+// { dg-options "-std=gnu++98" }
+
+// libstdc++/69116
+
+#include <exception>
+#include <valarray>
+
+template<typename T>
+ void foo(const T&) { }
+
+struct X : std::exception // makes namespace std an associated namespace
+{
+ virtual void pure() = 0;
+
+ typedef void(*func_type)(const X&);
+
+ void operator+(func_type) const;
+ void operator-(func_type) const;
+ void operator*(func_type) const;
+ void operator/(func_type) const;
+ void operator%(func_type) const;
+ void operator<<(func_type) const;
+ void operator>>(func_type) const;
+};
+
+void foo(X& x)
+{
+ x + foo;
+ x - foo;
+ x * foo;
+ x / foo;
+ x % foo;
+ x << foo;
+ x >> foo;
+}

View File

@ -0,0 +1,51 @@
2016-02-04 Jakub Jelinek <jakub@redhat.com>
Backported from mainline
2016-02-03 Jakub Jelinek <jakub@redhat.com>
PR target/69644
* config/rs6000/rs6000.c (rs6000_expand_atomic_compare_and_swap):
Force oldval into register if it does not satisfy reg_or_short_operand
predicate. Fix up formatting.
* gcc.dg/pr69644.c: New test.
--- gcc/config/rs6000/rs6000.c
+++ gcc/config/rs6000/rs6000.c
@@ -20263,6 +20263,9 @@ rs6000_expand_atomic_compare_and_swap (rtx operands[])
else if (reg_overlap_mentioned_p (retval, oldval))
oldval = copy_to_reg (oldval);
+ if (mode != TImode && !reg_or_short_operand (oldval, mode))
+ oldval = copy_to_mode_reg (mode, oldval);
+
mem = rs6000_pre_atomic_barrier (mem, mod_s);
label1 = NULL_RTX;
@@ -20277,10 +20280,8 @@ rs6000_expand_atomic_compare_and_swap (rtx operands[])
x = retval;
if (mask)
- {
- x = expand_simple_binop (SImode, AND, retval, mask,
- NULL_RTX, 1, OPTAB_LIB_WIDEN);
- }
+ x = expand_simple_binop (SImode, AND, retval, mask,
+ NULL_RTX, 1, OPTAB_LIB_WIDEN);
cond = gen_reg_rtx (CCmode);
/* If we have TImode, synthesize a comparison. */
--- /dev/null
+++ gcc/testsuite/gcc.dg/pr69644.c
@@ -0,0 +1,11 @@
+/* PR target/69644 */
+/* { dg-do compile } */
+
+int
+main ()
+{
+ unsigned short x = 0x8000;
+ if (!__sync_bool_compare_and_swap (&x, 0x8000, 0) || x)
+ __builtin_abort ();
+ return 0;
+}

View File

@ -0,0 +1,61 @@
2017-03-08 Bernd Schmidt <bschmidt@redhat.com>
PR target/70549
* config/aarch64/aarch64.c (aarch64_secondary_reload): Reload
CORE_REGS rclass constants in [SD]Fmode through FP_REGS.
* g++.dg/opt/pr70549.C: New test.
--- gcc/config/aarch64/aarch64.c.jj 2017-03-08 15:50:55.000000000 +0100
+++ gcc/config/aarch64/aarch64.c 2017-03-08 16:01:15.426080172 +0100
@@ -3846,8 +3846,13 @@ aarch64_secondary_reload (bool in_p ATTR
&& GET_MODE_SIZE (mode) == 16 && MEM_P (x))
return FP_REGS;
+ if (rclass == CORE_REGS
+ && (mode == SFmode || mode == DFmode)
+ && CONSTANT_P (x))
+ return FP_REGS;
+
if (rclass == FP_REGS && (mode == TImode || mode == TFmode) && CONSTANT_P(x))
- return CORE_REGS;
+ return CORE_REGS;
return NO_REGS;
}
--- gcc/testsuite/g++.dg/opt/pr70549.C.jj 2017-03-08 16:02:45.104918249 +0100
+++ gcc/testsuite/g++.dg/opt/pr70549.C 2017-03-08 16:02:14.000000000 +0100
@@ -0,0 +1,33 @@
+// PR target/70549
+// { dg-do compile }
+// { dg-options "-O2" }
+// { dg-additional-options "-fPIC" { target fpic } }
+
+struct A { float x; float y; };
+A a, b, c;
+int d, e;
+A bar ();
+void foo (A, A);
+inline A operator/ (A, A p2) { if (p2.x) return a; }
+struct B { A dval; };
+int baz (A, B, A, int);
+
+void
+test ()
+{
+ B q;
+ A f, g, h, k;
+ h.x = 1.0;
+ f = h;
+ struct A i, j = f;
+ do {
+ i = bar ();
+ g = i / j;
+ foo (g, c);
+ int l = baz (k, q, b, e);
+ if (l)
+ goto cleanup;
+ j = i;
+ } while (d);
+cleanup:;
+}

View File

@ -0,0 +1,87 @@
2016-12-13 Michael Meissner <meissner@linux.vnet.ibm.com>
Backport from mainline
2016-12-07 Michael Meissner <meissner@linux.vnet.ibm.com>
PR target/72717
* config/rs6000/rs6000.c (rs6000_expand_vector_init): If the
V2DImode elements are SUBREG's convert the result into DImode
rather than failing in emit_move_insn.
--- gcc/testsuite/gcc.target/powerpc/pr72717.c (nonexistent)
+++ gcc/testsuite/gcc.target/powerpc/pr72717.c (revision 243626)
@@ -0,0 +1,18 @@
+/* { dg-do compile { target { powerpc*-*-* && lp64 } } } */
+/* { dg-skip-if "" { powerpc*-*-darwin* } { "*" } { "" } } */
+/* { dg-require-effective-target powerpc_p8vector_ok } */
+/* { dg-skip-if "do not override -mcpu" { powerpc*-*-* } { "-mcpu=*" } { "-mcpu=power8" } } */
+/* { dg-options "-mcpu=power8 -O2" } */
+
+typedef long V __attribute__((__vector_size__(32)));
+
+extern void foo (V *, V*);
+
+/* This test generated an failure in emit_move_insn. */
+
+void
+foo(V *p, V *q)
+{
+ V v = *q;
+ *p = v << v[0];
+}
--- gcc/config/rs6000/rs6000.c (revision 243625)
+++ gcc/config/rs6000/rs6000.c (revision 243626)
@@ -6667,25 +6667,43 @@
/* Double word values on VSX can use xxpermdi or lxvdsx. */
if (VECTOR_MEM_VSX_P (mode) && (mode == V2DFmode || mode == V2DImode))
{
- rtx op0 = XVECEXP (vals, 0, 0);
- rtx op1 = XVECEXP (vals, 0, 1);
+ rtx op[2];
+ size_t i;
+ size_t num_elements = (all_same) ? 1 : 2;
+ for (i = 0; i < num_elements; i++)
+ {
+ op[i] = XVECEXP (vals, 0, i);
+ /* Just in case there is a SUBREG with a smaller mode, do a
+ conversion. */
+ if (GET_MODE (op[i]) != inner_mode)
+ {
+ rtx tmp = gen_reg_rtx (inner_mode);
+ convert_move (tmp, op[i], 0);
+ op[i] = tmp;
+ }
+ /* Allow load with splat double word. */
+ else if (MEM_P (op[i]))
+ {
+ if (!all_same)
+ op[i] = force_reg (inner_mode, op[i]);
+ }
+ else if (!REG_P (op[i]))
+ op[i] = force_reg (inner_mode, op[i]);
+ }
+
if (all_same)
{
- if (!MEM_P (op0) && !REG_P (op0))
- op0 = force_reg (inner_mode, op0);
if (mode == V2DFmode)
- emit_insn (gen_vsx_splat_v2df (target, op0));
+ emit_insn (gen_vsx_splat_v2df (target, op[0]));
else
- emit_insn (gen_vsx_splat_v2di (target, op0));
+ emit_insn (gen_vsx_splat_v2di (target, op[0]));
}
else
{
- op0 = force_reg (inner_mode, op0);
- op1 = force_reg (inner_mode, op1);
if (mode == V2DFmode)
- emit_insn (gen_vsx_concat_v2df (target, op0, op1));
+ emit_insn (gen_vsx_concat_v2df (target, op[0], op[1]));
else
- emit_insn (gen_vsx_concat_v2di (target, op0, op1));
+ emit_insn (gen_vsx_concat_v2di (target, op[0], op[1]));
}
return;
}

View File

@ -0,0 +1,90 @@
2016-11-02 Will Schmidt <will_schmidt@vnet.ibm.com>
Backport from trunk
2016-10-26 Will Schmidt <will_schmidt@vnet.ibm.com>
PR middle-end/72747
* gimplify.c (gimplify_init_constructor): Move emit of constructor
assignment to earlier in the if/else logic.
* c-c++-common/pr72747-1.c: New test.
* c-c++-common/pr72747-2.c: Likewise.
--- gcc/gimplify.c (revision 241792)
+++ gcc/gimplify.c (revision 241793)
@@ -4273,24 +4273,23 @@ gimplify_init_constructor (tree *expr_p,
if (ret == GS_ERROR)
return GS_ERROR;
- else if (want_value)
+ /* If we have gimplified both sides of the initializer but have
+ not emitted an assignment, do so now. */
+ if (*expr_p)
+ {
+ tree lhs = TREE_OPERAND (*expr_p, 0);
+ tree rhs = TREE_OPERAND (*expr_p, 1);
+ gimple init = gimple_build_assign (lhs, rhs);
+ gimplify_seq_add_stmt (pre_p, init);
+ }
+ if (want_value)
{
*expr_p = object;
return GS_OK;
}
else
{
- /* If we have gimplified both sides of the initializer but have
- not emitted an assignment, do so now. */
- if (*expr_p)
- {
- tree lhs = TREE_OPERAND (*expr_p, 0);
- tree rhs = TREE_OPERAND (*expr_p, 1);
- gimple init = gimple_build_assign (lhs, rhs);
- gimplify_seq_add_stmt (pre_p, init);
- *expr_p = NULL;
- }
-
+ *expr_p = NULL;
return GS_ALL_DONE;
}
}
--- gcc/testsuite/c-c++-common/pr72747-1.c (nonexistent)
+++ gcc/testsuite/c-c++-common/pr72747-1.c (revision 241793)
@@ -0,0 +1,16 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target powerpc_altivec_ok } */
+/* { dg-options "-maltivec -fdump-tree-gimple" } */
+
+/* PR 72747: Test that cascaded definition is happening for constant vectors. */
+
+#include <altivec.h>
+
+int main (int argc, char *argv[])
+{
+ __vector int v1,v2;
+ v1 = v2 = vec_splats ((int) 42);
+ return 0;
+}
+/* { dg-final { scan-tree-dump-times " v2 = { 42, 42, 42, 42 }" 1 "gimple" } } */
+
--- gcc/testsuite/c-c++-common/pr72747-2.c (nonexistent)
+++ gcc/testsuite/c-c++-common/pr72747-2.c (revision 241793)
@@ -0,0 +1,18 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target powerpc_altivec_ok } */
+/* { dg-options "-c -maltivec -fdump-tree-gimple" } */
+
+/* PR 72747: test that cascaded definition is happening for non constants. */
+
+void foo ()
+{
+ extern int i;
+ __vector int v,w;
+ v = w = (vector int) { i };
+}
+
+int main (int argc, char *argv[])
+{
+ return 0;
+}
+/* { dg-final { scan-tree-dump-times " w = " 1 "gimple" } } */

View File

@ -0,0 +1,73 @@
2016-08-25 Bill Schmidt <wschmidt@linux.vnet.ibm.com>
Backport from mainline (minus test for POWER9 support)
2016-08-11 Bill Schmidt <wschmidt@linux.vnet.ibm.com>
PR target/72863
* vsx.md (vsx_load_<mode>): For P8LE, emit swaps at expand time.
(vsx_store_<mode>): Likewise.
* gcc.target/powerpc/pr72863.c: New test.
--- gcc/config/rs6000/vsx.md (revision 239761)
+++ gcc/config/rs6000/vsx.md (revision 239762)
@@ -716,13 +716,27 @@ (define_expand "vsx_load_<mode>"
[(set (match_operand:VSX_M 0 "vsx_register_operand" "")
(match_operand:VSX_M 1 "memory_operand" ""))]
"VECTOR_MEM_VSX_P (<MODE>mode)"
- "")
+{
+ /* Expand to swaps if needed, prior to swap optimization. */
+ if (!BYTES_BIG_ENDIAN)
+ {
+ rs6000_emit_le_vsx_move (operands[0], operands[1], <MODE>mode);
+ DONE;
+ }
+})
(define_expand "vsx_store_<mode>"
[(set (match_operand:VSX_M 0 "memory_operand" "")
(match_operand:VSX_M 1 "vsx_register_operand" ""))]
"VECTOR_MEM_VSX_P (<MODE>mode)"
- "")
+{
+ /* Expand to swaps if needed, prior to swap optimization. */
+ if (!BYTES_BIG_ENDIAN)
+ {
+ rs6000_emit_le_vsx_move (operands[0], operands[1], <MODE>mode);
+ DONE;
+ }
+})
;; VSX vector floating point arithmetic instructions. The VSX scalar
--- gcc/testsuite/gcc.target/powerpc/pr72863.c (nonexistent)
+++ gcc/testsuite/gcc.target/powerpc/pr72863.c (revision 239762)
@@ -0,0 +1,27 @@
+/* { dg-do compile { target { powerpc64le-*-* } } } */
+/* { dg-skip-if "do not override -mcpu" { powerpc*-*-* } { "-mcpu=*" } { "-mcpu=power8" } } */
+/* { dg-options "-mcpu=power8 -O3" } */
+/* { dg-final { scan-assembler "lxvd2x" } } */
+/* { dg-final { scan-assembler "stxvd2x" } } */
+/* { dg-final { scan-assembler-not "xxpermdi" } } */
+
+#include <altivec.h>
+
+extern unsigned char *src, *dst;
+
+void b(void)
+{
+ int i;
+
+ unsigned char *s8 = src;
+ unsigned char *d8 = dst;
+
+ for (i = 0; i < 100; i++) {
+ vector unsigned char vs = vec_vsx_ld(0, s8);
+ vector unsigned char vd = vec_vsx_ld(0, d8);
+ vector unsigned char vr = vec_xor(vs, vd);
+ vec_vsx_st(vr, 0, d8);
+ s8 += 16;
+ d8 += 16;
+ }
+}

View File

@ -0,0 +1,37 @@
2017-05-30 Jakub Jelinek <jakub@redhat.com>
Backported from mainline
2016-09-16 Jakub Jelinek <jakub@redhat.com>
PR c++/77375
* class.c (check_bases): Set CLASSTYPE_HAS_MUTABLE if any
TYPE_HAS_MUTABLE_P for any bases.
* g++.dg/cpp0x/mutable1.C: New test.
--- gcc/cp/class.c 2017-10-17 17:27:32.287980595 +0200
+++ gcc/cp/class.c 2017-10-17 17:29:11.104213281 +0200
@@ -1479,6 +1479,8 @@ check_bases (tree t,
|= CLASSTYPE_CONTAINS_EMPTY_CLASS_P (basetype);
TYPE_HAS_COMPLEX_DFLT (t) |= (!TYPE_HAS_DEFAULT_CONSTRUCTOR (basetype)
|| TYPE_HAS_COMPLEX_DFLT (basetype));
+ if (TYPE_HAS_MUTABLE_P (basetype))
+ CLASSTYPE_HAS_MUTABLE (t) = 1;
/* A standard-layout class is a class that:
...
--- /dev/null
+++ gcc/testsuite/g++.dg/cpp0x/mutable1.C
@@ -0,0 +1,12 @@
+// PR c++/77375
+// { dg-do run { target c++11 } }
+
+struct Base { mutable int i; };
+struct Derived : Base {};
+const Derived foo{};
+
+int
+main ()
+{
+ foo.i = 42;
+}

View File

@ -0,0 +1,56 @@
2017-05-30 Jakub Jelinek <jakub@redhat.com>
Backported from mainline
2016-12-21 Jakub Jelinek <jakub@redhat.com>
PR c/77767
* c-decl.c (grokdeclarator): If *expr is non-NULL, append expression
to *expr instead of overwriting it.
* gcc.c-torture/execute/pr77767.c: New test.
--- gcc/c/c-decl.c
+++ gcc/c/c-decl.c
@@ -5409,11 +5409,21 @@ grokdeclarator (const struct c_declarator *declarator,
if (TREE_CODE (type) == ERROR_MARK)
return error_mark_node;
if (expr == NULL)
- expr = &expr_dummy;
+ {
+ expr = &expr_dummy;
+ expr_dummy = NULL_TREE;
+ }
if (expr_const_operands == NULL)
expr_const_operands = &expr_const_operands_dummy;
- *expr = declspecs->expr;
+ if (declspecs->expr)
+ {
+ if (*expr)
+ *expr = build2 (COMPOUND_EXPR, TREE_TYPE (declspecs->expr), *expr,
+ declspecs->expr);
+ else
+ *expr = declspecs->expr;
+ }
*expr_const_operands = declspecs->expr_const_operands;
if (decl_context == FUNCDEF)
--- /dev/null
+++ gcc/testsuite/gcc.c-torture/execute/pr77767.c
@@ -0,0 +1,16 @@
+/* PR c/77767 */
+
+void
+foo (int a, int b[a++], int c, int d[c++])
+{
+ if (a != 2 || c != 2)
+ __builtin_abort ();
+}
+
+int
+main ()
+{
+ int e[10];
+ foo (1, e, 1, e);
+ return 0;
+}

View File

@ -0,0 +1,15 @@
2016-10-24 Florian Weimer <fweimer@redhat.com>
PR libgcc/78064
* unwind-c.c: Include auto-target.h.
--- libgcc/unwind-c.c (revision 241490)
+++ libgcc/unwind-c.c (revision 241491)
@@ -26,6 +26,7 @@ see the files COPYING3 and COPYING.RUNTI
#include "tconfig.h"
#include "tsystem.h"
+#include "auto-target.h"
#include "unwind.h"
#define NO_SIZE_OF_ENCODED_VALUE
#include "unwind-pe.h"

View File

@ -0,0 +1,43 @@
2017-05-30 Jakub Jelinek <jakub@redhat.com>
Backported from mainline
2016-11-16 Jakub Jelinek <jakub@redhat.com>
PR rtl-optimization/78378
* combine.c (make_extraction): Use force_to_mode for non-{REG,MEM}
inner only if pos is 0.
* gcc.c-torture/execute/pr78378.c: New test.
--- gcc/combine.c
+++ gcc/combine.c
@@ -7342,6 +7342,7 @@ make_extraction (machine_mode mode, rtx inner, HOST_WIDE_INT pos,
if (tmode != BLKmode
&& ((pos_rtx == 0 && (pos % BITS_PER_WORD) == 0
&& !MEM_P (inner)
+ && (pos == 0 || REG_P (inner))
&& (inner_mode == tmode
|| !REG_P (inner)
|| TRULY_NOOP_TRUNCATION_MODES_P (tmode, inner_mode)
--- /dev/null
+++ gcc/testsuite/gcc.c-torture/execute/pr78378.c
@@ -0,0 +1,18 @@
+/* PR rtl-optimization/78378 */
+
+unsigned long long __attribute__ ((noinline, noclone))
+foo (unsigned long long x)
+{
+ x <<= 41;
+ x /= 232;
+ return 1 + (unsigned short) x;
+}
+
+int
+main ()
+{
+ unsigned long long x = foo (1);
+ if (x != 0x2c24)
+ __builtin_abort();
+ return 0;
+}

108
SOURCES/gcc48-pr78416.patch Normal file
View File

@ -0,0 +1,108 @@
2016-11-18 Jakub Jelinek <jakub@redhat.com>
PR middle-end/78416
* expmed.c (expand_divmod): For modes wider than HWI, take into
account implicit 1 bits above msb for EXACT_POWER_OF_2_OR_ZERO_P.
* gcc.dg/torture/pr78416.c: New test.
--- gcc/expmed.c
+++ gcc/expmed.c
@@ -3844,7 +3844,15 @@ expand_divmod (int rem_flag, enum tree_code code, enum machine_mode mode,
if (unsignedp)
ext_op1 &= GET_MODE_MASK (mode);
op1_is_pow2 = ((EXACT_POWER_OF_2_OR_ZERO_P (ext_op1)
- || (! unsignedp && EXACT_POWER_OF_2_OR_ZERO_P (-ext_op1))));
+ /* If mode is wider than HWI and op1 has msb set,
+ then it has there extra implicit 1 bits above it. */
+ && (GET_MODE_PRECISION (mode) <= HOST_BITS_PER_WIDE_INT
+ || INTVAL (op1) >= 0))
+ || (! unsignedp
+ && EXACT_POWER_OF_2_OR_ZERO_P (-ext_op1)
+ && (GET_MODE_PRECISION (mode)
+ <= HOST_BITS_PER_WIDE_INT
+ || INTVAL (op1) < 0)));
}
/*
@@ -3987,8 +3995,17 @@ expand_divmod (int rem_flag, enum tree_code code, enum machine_mode mode,
op1_is_constant = CONST_INT_P (op1);
op1_is_pow2 = (op1_is_constant
&& ((EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1))
- || (! unsignedp
- && EXACT_POWER_OF_2_OR_ZERO_P (-UINTVAL (op1))))));
+ /* If mode is wider than HWI and op1 has msb set,
+ then it has there extra implicit 1 bits above
+ it. */
+ && (GET_MODE_PRECISION (compute_mode)
+ <= HOST_BITS_PER_WIDE_INT
+ || INTVAL (op1) >= 0))
+ || (! unsignedp
+ && EXACT_POWER_OF_2_OR_ZERO_P (-UINTVAL (op1))
+ && (GET_MODE_PRECISION (compute_mode)
+ <= HOST_BITS_PER_WIDE_INT
+ || INTVAL (op1) < 0))));
}
/* If one of the operands is a volatile MEM, copy it into a register. */
@@ -4031,7 +4048,8 @@ expand_divmod (int rem_flag, enum tree_code code, enum machine_mode mode,
unsigned HOST_WIDE_INT d = (INTVAL (op1)
& GET_MODE_MASK (compute_mode));
- if (EXACT_POWER_OF_2_OR_ZERO_P (d))
+ if (EXACT_POWER_OF_2_OR_ZERO_P (d)
+ && (INTVAL (op1) >= 0 || size <= HOST_BITS_PER_WIDE_INT))
{
pre_shift = floor_log2 (d);
if (rem_flag)
@@ -4179,6 +4197,7 @@ expand_divmod (int rem_flag, enum tree_code code, enum machine_mode mode,
goto fail1;
}
else if (EXACT_POWER_OF_2_OR_ZERO_P (d)
+ && (size <= HOST_BITS_PER_WIDE_INT || d >= 0)
&& (rem_flag
? smod_pow2_cheap (speed, compute_mode)
: sdiv_pow2_cheap (speed, compute_mode))
@@ -4192,7 +4211,9 @@ expand_divmod (int rem_flag, enum tree_code code, enum machine_mode mode,
compute_mode)
!= CODE_FOR_nothing)))
;
- else if (EXACT_POWER_OF_2_OR_ZERO_P (abs_d))
+ else if (EXACT_POWER_OF_2_OR_ZERO_P (abs_d)
+ && (size <= HOST_BITS_PER_WIDE_INT
+ || abs_d != (unsigned HOST_WIDE_INT) d))
{
if (rem_flag)
{
@@ -4504,7 +4525,10 @@ expand_divmod (int rem_flag, enum tree_code code, enum machine_mode mode,
case CEIL_MOD_EXPR:
if (unsignedp)
{
- if (op1_is_constant && EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1)))
+ if (op1_is_constant
+ && EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1))
+ && (size <= HOST_BITS_PER_WIDE_INT
+ || INTVAL (op1) >= 0))
{
rtx t1, t2, t3;
unsigned HOST_WIDE_INT d = INTVAL (op1);
--- gcc/testsuite/gcc.dg/torture/pr78416.c
+++ gcc/testsuite/gcc.dg/torture/pr78416.c
@@ -0,0 +1,17 @@
+/* PR middle-end/78416 */
+/* { dg-do run { target int128 } } */
+
+int
+main ()
+{
+ unsigned __int128 x;
+ x = 0xFFFFFFFFFFFFFFFFULL;
+ x /= ~0x7FFFFFFFFFFFFFFFLL;
+ if (x != 0)
+ __builtin_abort ();
+ x = ~0x7FFFFFFFFFFFFFFELL;
+ x /= ~0x7FFFFFFFFFFFFFFFLL;
+ if (x != 1)
+ __builtin_abort ();
+ return 0;
+}

View File

@ -0,0 +1,56 @@
2016-12-14 Wilco Dijkstra <wdijkstr@arm.com>
Jakub Jelinek <jakub@redhat.com>
PR target/78796
* config/aarch64/aarch64.c (aarch64_classify_symbol): Merge large
model checks into switch.
* gcc.dg/tls/pr78796.c: New test.
--- gcc/config/aarch64/aarch64.c (revision 243645)
+++ gcc/config/aarch64/aarch64.c (revision 243646)
@@ -4986,6 +4986,9 @@ aarch64_classify_symbol (rtx x,
switch (aarch64_cmodel)
{
case AARCH64_CMODEL_LARGE:
+ if (aarch64_tls_symbol_p (x))
+ return aarch64_classify_tls_symbol (x);
+
return SYMBOL_FORCE_TO_MEM;
case AARCH64_CMODEL_TINY:
--- gcc/testsuite/gcc.dg/tls/pr78796.c (nonexistent)
+++ gcc/testsuite/gcc.dg/tls/pr78796.c (revision 243646)
@@ -0,0 +1,32 @@
+/* PR target/78796 */
+/* { dg-do run } */
+/* { dg-options "-O2" } */
+/* { dg-additional-options "-mcmodel=large" { target aarch64-*-* } } */
+/* { dg-require-effective-target tls } */
+
+struct S { int a, b, c, d, e; };
+struct S t;
+__thread struct S s;
+
+__attribute__((used, noinline, noclone)) void
+foo (int *x, int *y)
+{
+ asm volatile ("" : : "g" (x), "g" (y) : "memory");
+ if (*x != 1 || *y != 2)
+ __builtin_abort ();
+}
+
+__attribute__((used, noinline, noclone)) void
+bar (void)
+{
+ foo (&t.c, &s.c);
+}
+
+int
+main ()
+{
+ t.c = 1;
+ s.c = 2;
+ bar ();
+ return 0;
+}

254
SOURCES/gcc48-pr78875.patch Normal file
View File

@ -0,0 +1,254 @@
2017-01-17 Segher Boessenkool <segher@kernel.crashing.org>
PR target/78875
* config/rs6000/rs6000-opts.h (stack_protector_guard): New enum.
* config/rs6000/rs6000.c (rs6000_option_override_internal): Handle
the new options.
* config/rs6000/rs6000.md (stack_protect_set): Handle the new more
flexible settings.
(stack_protect_test): Ditto.
* config/rs6000/rs6000.opt (mstack-protector-guard=,
mstack-protector-guard-reg=, mstack-protector-guard-offset=): New
options.
* doc/invoke.texi (Option Summary) [RS/6000 and PowerPC Options]:
Add -mstack-protector-guard=, -mstack-protector-guard-reg=, and
-mstack-protector-guard-offset=.
(RS/6000 and PowerPC Options): Ditto.
* gcc.target/powerpc/ssp-1.c: New testcase.
* gcc.target/powerpc/ssp-2.c: New testcase.
--- gcc/config/rs6000/rs6000.opt (revision 244555)
+++ gcc/config/rs6000/rs6000.opt (revision 244556)
@@ -593,3 +593,31 @@ Allow float variables in upper registers
moptimize-swaps
Target Undocumented Var(rs6000_optimize_swaps) Init(1) Save
Analyze and remove doubleword swaps from VSX computations.
+
+mstack-protector-guard=
+Target RejectNegative Joined Enum(stack_protector_guard) Var(rs6000_stack_protector_guard) Init(SSP_TLS)
+Use given stack-protector guard.
+
+Enum
+Name(stack_protector_guard) Type(enum stack_protector_guard)
+Valid arguments to -mstack-protector-guard=:
+
+EnumValue
+Enum(stack_protector_guard) String(tls) Value(SSP_TLS)
+
+EnumValue
+Enum(stack_protector_guard) String(global) Value(SSP_GLOBAL)
+
+mstack-protector-guard-reg=
+Target RejectNegative Joined Var(rs6000_stack_protector_guard_reg_str)
+Use the given base register for addressing the stack-protector guard.
+
+TargetVariable
+int rs6000_stack_protector_guard_reg = 0
+
+mstack-protector-guard-offset=
+Target RejectNegative Joined Integer Var(rs6000_stack_protector_guard_offset_str)
+Use the given offset for addressing the stack-protector guard.
+
+TargetVariable
+long rs6000_stack_protector_guard_offset = 0
--- gcc/config/rs6000/rs6000.c (revision 244555)
+++ gcc/config/rs6000/rs6000.c (revision 244556)
@@ -3727,6 +3727,54 @@ rs6000_option_override_internal (bool gl
atoi (rs6000_sched_insert_nops_str));
}
+ /* Handle stack protector */
+ if (!global_options_set.x_rs6000_stack_protector_guard)
+#ifdef TARGET_THREAD_SSP_OFFSET
+ rs6000_stack_protector_guard = SSP_TLS;
+#else
+ rs6000_stack_protector_guard = SSP_GLOBAL;
+#endif
+
+#ifdef TARGET_THREAD_SSP_OFFSET
+ rs6000_stack_protector_guard_offset = TARGET_THREAD_SSP_OFFSET;
+ rs6000_stack_protector_guard_reg = TARGET_64BIT ? 13 : 2;
+#endif
+
+ if (global_options_set.x_rs6000_stack_protector_guard_offset_str)
+ {
+ char *endp;
+ const char *str = rs6000_stack_protector_guard_offset_str;
+
+ errno = 0;
+ long offset = strtol (str, &endp, 0);
+ if (!*str || *endp || errno)
+ error ("%qs is not a valid number "
+ "in -mstack-protector-guard-offset=", str);
+
+ if (!IN_RANGE (offset, -0x8000, 0x7fff)
+ || (TARGET_64BIT && (offset & 3)))
+ error ("%qs is not a valid offset "
+ "in -mstack-protector-guard-offset=", str);
+
+ rs6000_stack_protector_guard_offset = offset;
+ }
+
+ if (global_options_set.x_rs6000_stack_protector_guard_reg_str)
+ {
+ const char *str = rs6000_stack_protector_guard_reg_str;
+ int reg = decode_reg_name (str);
+
+ if (!IN_RANGE (reg, 1, 31))
+ error ("%qs is not a valid base register "
+ "in -mstack-protector-guard-reg=", str);
+
+ rs6000_stack_protector_guard_reg = reg;
+ }
+
+ if (rs6000_stack_protector_guard == SSP_TLS
+ && !IN_RANGE (rs6000_stack_protector_guard_reg, 1, 31))
+ error ("-mstack-protector-guard=tls needs a valid base register");
+
if (global_init_p)
{
#ifdef TARGET_REGNAMES
--- gcc/config/rs6000/rs6000.md (revision 244555)
+++ gcc/config/rs6000/rs6000.md (revision 244556)
@@ -13092,19 +13092,23 @@
(define_expand "stack_protect_set"
- [(match_operand 0 "memory_operand" "")
- (match_operand 1 "memory_operand" "")]
+ [(match_operand 0 "memory_operand")
+ (match_operand 1 "memory_operand")]
""
{
-#ifdef TARGET_THREAD_SSP_OFFSET
- rtx tlsreg = gen_rtx_REG (Pmode, TARGET_64BIT ? 13 : 2);
- rtx addr = gen_rtx_PLUS (Pmode, tlsreg, GEN_INT (TARGET_THREAD_SSP_OFFSET));
- operands[1] = gen_rtx_MEM (Pmode, addr);
-#endif
+ if (rs6000_stack_protector_guard == SSP_TLS)
+ {
+ rtx reg = gen_rtx_REG (Pmode, rs6000_stack_protector_guard_reg);
+ rtx offset = GEN_INT (rs6000_stack_protector_guard_offset);
+ rtx addr = gen_rtx_PLUS (Pmode, reg, offset);
+ operands[1] = gen_rtx_MEM (Pmode, addr);
+ }
+
if (TARGET_64BIT)
emit_insn (gen_stack_protect_setdi (operands[0], operands[1]));
else
emit_insn (gen_stack_protect_setsi (operands[0], operands[1]));
+
DONE;
})
@@ -13127,21 +13131,26 @@
(set_attr "length" "12")])
(define_expand "stack_protect_test"
- [(match_operand 0 "memory_operand" "")
- (match_operand 1 "memory_operand" "")
- (match_operand 2 "" "")]
+ [(match_operand 0 "memory_operand")
+ (match_operand 1 "memory_operand")
+ (match_operand 2 "")]
""
{
- rtx test, op0, op1;
-#ifdef TARGET_THREAD_SSP_OFFSET
- rtx tlsreg = gen_rtx_REG (Pmode, TARGET_64BIT ? 13 : 2);
- rtx addr = gen_rtx_PLUS (Pmode, tlsreg, GEN_INT (TARGET_THREAD_SSP_OFFSET));
- operands[1] = gen_rtx_MEM (Pmode, addr);
-#endif
- op0 = operands[0];
- op1 = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, operands[1]), UNSPEC_SP_TEST);
- test = gen_rtx_EQ (VOIDmode, op0, op1);
- emit_jump_insn (gen_cbranchsi4 (test, op0, op1, operands[2]));
+ rtx guard = operands[1];
+
+ if (rs6000_stack_protector_guard == SSP_TLS)
+ {
+ rtx reg = gen_rtx_REG (Pmode, rs6000_stack_protector_guard_reg);
+ rtx offset = GEN_INT (rs6000_stack_protector_guard_offset);
+ rtx addr = gen_rtx_PLUS (Pmode, reg, offset);
+ guard = gen_rtx_MEM (Pmode, addr);
+ }
+
+ operands[1] = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, guard), UNSPEC_SP_TEST);
+ rtx test = gen_rtx_EQ (VOIDmode, operands[0], operands[1]);
+ rtx jump = gen_cbranchsi4 (test, operands[0], operands[1], operands[2]);
+ emit_jump_insn (jump);
+
DONE;
})
--- gcc/config/rs6000/rs6000-opts.h (revision 244555)
+++ gcc/config/rs6000/rs6000-opts.h (revision 244556)
@@ -154,6 +154,12 @@ enum rs6000_vector {
VECTOR_OTHER /* Some other vector unit */
};
+/* Where to get the canary for the stack protector. */
+enum stack_protector_guard {
+ SSP_TLS, /* per-thread canary in TLS block */
+ SSP_GLOBAL /* global canary */
+};
+
/* No enumeration is defined to index the -mcpu= values (entries in
processor_target_table), with the type int being used instead, but
we need to distinguish the special "native" value. */
--- gcc/doc/invoke.texi (revision 244555)
+++ gcc/doc/invoke.texi (revision 244556)
@@ -862,7 +862,9 @@ See RS/6000 and PowerPC Options.
-mcrypto -mno-crypto -mdirect-move -mno-direct-move @gol
-mquad-memory -mno-quad-memory @gol
-mquad-memory-atomic -mno-quad-memory-atomic @gol
--mcompat-align-parm -mno-compat-align-parm}
+-mcompat-align-parm -mno-compat-align-parm @gol
+-mstack-protector-guard=@var{guard} -mstack-protector-guard-reg=@var{reg} @gol
+-mstack-protector-guard-offset=@var{offset}}
@emph{RX Options}
@gccoptlist{-m64bit-doubles -m32bit-doubles -fpu -nofpu@gol
@@ -18295,6 +18297,23 @@ GCC.
In this version of the compiler, the @option{-mcompat-align-parm}
is the default, except when using the Linux ELFv2 ABI.
+
+@item -mstack-protector-guard=@var{guard}
+@itemx -mstack-protector-guard-reg=@var{reg}
+@itemx -mstack-protector-guard-offset=@var{offset}
+@opindex mstack-protector-guard
+@opindex mstack-protector-guard-reg
+@opindex mstack-protector-guard-offset
+Generate stack protection code using canary at @var{guard}. Supported
+locations are @samp{global} for global canary or @samp{tls} for per-thread
+canary in the TLS block (the default with GNU libc version 2.4 or later).
+
+With the latter choice the options
+@option{-mstack-protector-guard-reg=@var{reg}} and
+@option{-mstack-protector-guard-offset=@var{offset}} furthermore specify
+which register to use as base register for reading the canary, and from what
+offset from that base register. The default for those is as specified in the
+relevant ABI.
@end table
@node RX Options
--- gcc/testsuite/gcc.target/powerpc/ssp-1.c (nonexistent)
+++ gcc/testsuite/gcc.target/powerpc/ssp-1.c (revision 244562)
@@ -0,0 +1,6 @@
+/* { dg-do compile } */
+/* { dg-options "-O2 -fstack-protector-all -mstack-protector-guard=global" } */
+
+/* { dg-final { scan-assembler "__stack_chk_guard" } } */
+
+void f(void) { }
--- gcc/testsuite/gcc.target/powerpc/ssp-2.c (nonexistent)
+++ gcc/testsuite/gcc.target/powerpc/ssp-2.c (revision 244562)
@@ -0,0 +1,6 @@
+/* { dg-do compile } */
+/* { dg-options "-O2 -fstack-protector-all -mstack-protector-guard=tls -mstack-protector-guard-reg=r18 -mstack-protector-guard-offset=0x3038" } */
+
+/* { dg-final { scan-assembler {\m12344\(r?18\)} } } */
+
+void f(void) { }

View File

@ -0,0 +1,53 @@
2017-03-01 Michael Meissner <meissner@linux.vnet.ibm.com>
PR target/79439
* config/rs6000/predicates.md (current_file_function_operand): Do
not allow self calls to be local if the function is replaceable.
* gcc.target/powerpc/pr79439.c: New test.
--- gcc/config/rs6000/predicates.md (revision 245812)
+++ gcc/config/rs6000/predicates.md (revision 245813)
@@ -1086,8 +1086,8 @@
&& ((DEFAULT_ABI != ABI_AIX
&& DEFAULT_ABI != ABI_ELFv2)
|| !SYMBOL_REF_EXTERNAL_P (op)))
- || (op == XEXP (DECL_RTL (current_function_decl),
- 0)))")))
+ || (op == XEXP (DECL_RTL (current_function_decl), 0)
+ && !decl_replaceable_p (current_function_decl)))")))
;; Return 1 if this operand is a valid input for a move insn.
(define_predicate "input_operand"
--- gcc/testsuite/gcc.target/powerpc/pr79439.c (nonexistent)
+++ gcc/testsuite/gcc.target/powerpc/pr79439.c (revision 245813)
@@ -0,0 +1,29 @@
+/* { dg-do compile { target { powerpc64*-*-linux* && lp64 } } } */
+/* { dg-options "-O2 -fpic" } */
+
+/* On the Linux 64-bit ABIs, we should not eliminate NOP in the 'rec' call if
+ -fpic is used because rec can be interposed at link time (since it is
+ external), and the recursive call should call the interposed function. The
+ Linux 32-bit ABIs do not require NOPs after the BL instruction. */
+
+int f (void);
+
+void
+g (void)
+{
+}
+
+int
+rec (int a)
+{
+ int ret = 0;
+ if (a > 10 && f ())
+ ret += rec (a - 1);
+ g ();
+ return a + ret;
+}
+
+/* { dg-final { scan-assembler-times {\mbl f\M} 1 } } */
+/* { dg-final { scan-assembler-times {\mbl g\M} 2 } } */
+/* { dg-final { scan-assembler-times {\mbl rec\M} 1 } } */
+/* { dg-final { scan-assembler-times {\mnop\M} 4 } } */

View File

@ -0,0 +1,43 @@
2017-03-09 Jakub Jelinek <jakub@redhat.com>
PR c/79969
* c-decl.c (start_enum): Adjust DECL_SOURCE_LOCATION of
TYPE_STUB_DECL.
* gcc.dg/debug/dwarf2/enum-loc1.c: New test.
--- gcc/c/c-decl.c.jj 2017-03-05 22:39:45.000000000 +0100
+++ gcc/c/c-decl.c 2017-03-09 08:19:33.100042166 +0100
@@ -8201,6 +8201,10 @@ start_enum (location_t loc, struct c_enu
enumtype = make_node (ENUMERAL_TYPE);
pushtag (loc, name, enumtype);
}
+ /* Update type location to the one of the definition, instead of e.g.
+ a forward declaration. */
+ else if (TYPE_STUB_DECL (enumtype))
+ DECL_SOURCE_LOCATION (TYPE_STUB_DECL (enumtype)) = loc;
if (C_TYPE_BEING_DEFINED (enumtype))
error_at (loc, "nested redefinition of %<enum %E%>", name);
--- gcc/testsuite/gcc.dg/debug/dwarf2/enum-loc1.c.jj 2017-03-09 08:09:30.742037844 +0100
+++ gcc/testsuite/gcc.dg/debug/dwarf2/enum-loc1.c 2017-03-09 08:16:45.202268438 +0100
@@ -0,0 +1,19 @@
+/* PR c/79969 */
+/* { dg-do compile } */
+/* { dg-options "-gdwarf-2 -dA -fno-merge-debug-strings" } */
+
+enum ENUMTAG;
+
+enum ENUMTAG
+{
+ B = 1,
+ C = 2
+};
+
+void
+bar (void)
+{
+ enum ENUMTAG a = C;
+}
+
+/* { dg-final { scan-assembler "DW_TAG_enumeration_type\[^\\r\\n\]*\[\\r\\n\]+\[^\\r\\n\]*\"ENUMTAG\[^\\r\\n\]*DW_AT_name(\[^\\r\\n\]*\[\\r\\n\]+\[^\\r\\n\]*DW_AT_)*\[^\\r\\n\]*\[\\r\\n\]+\[^\\r\\n\]*\[^0-9a-fA-FxX](0x)?7\[^0-9a-fA-FxX]\[^\\r\\n\]*DW_AT_decl_line" } } */

View File

@ -0,0 +1,45 @@
2017-05-30 Jakub Jelinek <jakub@redhat.com>
Backported from mainline
2017-03-22 Jakub Jelinek <jakub@redhat.com>
PR c++/80129
* gimplify.c (gimplify_modify_expr_rhs) <case COND_EXPR>: Clear
TREE_READONLY on result if writing it more than once.
* g++.dg/torture/pr80129.C: New test.
--- gcc/gimplify.c
+++ gcc/gimplify.c
@@ -4293,6 +4293,14 @@ gimplify_modify_expr_rhs (tree *expr_p, tree *from_p, tree *to_p,
if (ret != GS_ERROR)
ret = GS_OK;
+ /* If we are going to write RESULT more than once, clear
+ TREE_READONLY flag, otherwise we might incorrectly promote
+ the variable to static const and initialize it at compile
+ time in one of the branches. */
+ if (TREE_CODE (result) == VAR_DECL
+ && TREE_TYPE (TREE_OPERAND (cond, 1)) != void_type_node
+ && TREE_TYPE (TREE_OPERAND (cond, 2)) != void_type_node)
+ TREE_READONLY (result) = 0;
if (TREE_TYPE (TREE_OPERAND (cond, 1)) != void_type_node)
TREE_OPERAND (cond, 1)
= build2 (code, void_type_node, result,
--- /dev/null
+++ gcc/testsuite/g++.dg/torture/pr80129.C
@@ -0,0 +1,14 @@
+// PR c++/80129
+// { dg-do run }
+// { dg-options "-std=c++11" }
+
+struct A { bool a; int b; };
+
+int
+main ()
+{
+ bool c = false;
+ const A x = c ? A {true, 1} : A {false, 0};
+ if (x.a)
+ __builtin_abort ();
+}

View File

@ -0,0 +1,38 @@
2017-09-18 Richard Biener <rguenther@suse.de>
Backport from mainline
2017-04-10 Richard Biener <rguenther@suse.de>
PR middle-end/80362
* fold-const.c (fold_binary_loc): Look at unstripped ops when
looking for NEGATE_EXPR in -A / -B to A / B folding.
* gcc.dg/torture/pr80362.c: New testcase.
--- gcc/fold-const.c
+++ gcc/fold-const.c
@@ -11940,8 +11942,8 @@ fold_binary_loc (location_t loc,
/* Convert -A / -B to A / B when the type is signed and overflow is
undefined. */
if ((!INTEGRAL_TYPE_P (type) || TYPE_OVERFLOW_UNDEFINED (type))
- && TREE_CODE (arg0) == NEGATE_EXPR
- && negate_expr_p (arg1))
+ && TREE_CODE (op0) == NEGATE_EXPR
+ && negate_expr_p (op1))
{
if (INTEGRAL_TYPE_P (type))
fold_overflow_warning (("assuming signed overflow does not occur "
--- /dev/null
+++ gcc/testsuite/gcc.dg/torture/pr80362.c
@@ -0,0 +1,11 @@
+/* { dg-do run } */
+/* { dg-additional-options "-fstrict-overflow" } */
+
+int main()
+{
+ signed char var_0, var_1 = -128;
+ var_0 = (signed char)(-var_1) / 3;
+ if (var_0 > 0)
+ __builtin_abort();
+ return 0;
+}

View File

@ -0,0 +1,53 @@
2017-06-27 Segher Boessenkool <segher@kernel.crashing.org>
Backport from trunk
2017-05-17 Segher Boessenkool <segher@kernel.crashing.org>
PR middle-end/80692
* real.c (do_compare): Give decimal_do_compare preference over
comparing just the signs.
* gcc.c-torture/execute/pr80692.c: New testcase.
--- gcc/real.c
+++ gcc/real.c
@@ -950,12 +950,12 @@ do_compare (const REAL_VALUE_TYPE *a, const REAL_VALUE_TYPE *b,
gcc_unreachable ();
}
- if (a->sign != b->sign)
- return -a->sign - -b->sign;
-
if (a->decimal || b->decimal)
return decimal_do_compare (a, b, nan_result);
+ if (a->sign != b->sign)
+ return -a->sign - -b->sign;
+
if (REAL_EXP (a) > REAL_EXP (b))
ret = 1;
else if (REAL_EXP (a) < REAL_EXP (b))
--- /dev/null
+++ gcc/testsuite/gcc.c-torture/execute/pr80692.c
@@ -0,0 +1,11 @@
+int main () {
+ _Decimal64 d64 = -0.DD;
+
+ if (d64 != 0.DD)
+ __builtin_abort ();
+
+ if (d64 != -0.DD)
+ __builtin_abort ();
+
+ return 0;
+}
--- /dev/null
+++ gcc/testsuite/gcc.c-torture/execute/pr80692.x
@@ -0,0 +1,7 @@
+load_lib target-supports.exp
+
+if { ! [check_effective_target_dfp] } {
+ return 1
+}
+
+return 0

View File

@ -0,0 +1,67 @@
2017-07-18 Jonathan Wakely <jwakely@redhat.com>
PR libstdc++/81395
* include/bits/fstream.tcc (basic_filebuf::xsgetn): Don't set buffer
pointers for write mode after reading.
* testsuite/27_io/basic_filebuf/sgetn/char/81395.cc: New.
--- libstdc++-v3/include/bits/fstream.tcc (revision 254017)
+++ libstdc++-v3/include/bits/fstream.tcc (revision 254018)
@@ -699,7 +699,7 @@
if (__n == 0)
{
- _M_set_buffer(0);
+ // Set _M_reading. Buffer is already in initial 'read' mode.
_M_reading = true;
}
else if (__len == 0)
--- libstdc++-v3/testsuite/27_io/basic_filebuf/sgetn/char/81395.cc (nonexistent)
+++ libstdc++-v3/testsuite/27_io/basic_filebuf/sgetn/char/81395.cc (revision 254018)
@@ -0,0 +1,46 @@
+// Copyright (C) 2017 Free Software Foundation, Inc.
+//
+// This file is part of the GNU ISO C++ Library. This library is free
+// software; you can redistribute it and/or modify it under the
+// terms of the GNU General Public License as published by the
+// Free Software Foundation; either version 3, or (at your option)
+// any later version.
+
+// This library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License along
+// with this library; see the file COPYING3. If not see
+// <http://www.gnu.org/licenses/>.
+
+// { dg-require-fileio "" }
+
+// PR libstdc++/81395
+
+#include <fstream>
+#include <cstring> // for std::memset
+#include <cstdio> // For BUFSIZ
+
+using std::memset;
+
+int main()
+{
+ {
+ std::filebuf fb;
+ fb.open("test.txt", std::ios::out);
+ char data[BUFSIZ];
+ memset(data, 'A', sizeof(data));
+ fb.sputn(data, sizeof(data));
+ }
+
+ std::filebuf fb;
+ fb.open("test.txt", std::ios::in|std::ios::out);
+ char buf[BUFSIZ];
+ memset(buf, 0, sizeof(buf));
+ fb.sgetn(buf, sizeof(buf));
+ // Switch from reading to writing without seeking first:
+ fb.sputn("B", 1);
+ fb.pubsync();
+}

View File

@ -0,0 +1,40 @@
2017-10-13 Jakub Jelinek <jakub@redhat.com>
PR target/82274
* libgcc2.c (__mulvDI3): If both operands have
the same highpart of -1 and the topmost bit of lowpart is 0,
multiplication overflows even if both lowparts are 0.
* gcc.dg/pr82274-1.c: New test.
--- libgcc/libgcc2.c 2017/10/13 16:50:13 253733
+++ libgcc/libgcc2.c 2017/10/13 17:19:12 253734
@@ -375,7 +375,8 @@
}
else
{
- if (uu.s.high == (Wtype) -1 && vv.s.high == (Wtype) - 1)
+ if ((uu.s.high & vv.s.high) == (Wtype) -1
+ && (uu.s.low | vv.s.low) != 0)
{
DWunion ww = {.ll = (UDWtype) (UWtype) uu.s.low
* (UDWtype) (UWtype) vv.s.low};
--- /dev/null
+++ gcc/testsuite/gcc.dg/pr82274-1.c
@@ -0,0 +1,16 @@
+/* PR target/82274 */
+/* { dg-do run } */
+/* { dg-shouldfail "trapv" } */
+/* { dg-options "-ftrapv" } */
+
+int
+main ()
+{
+#ifdef __SIZEOF_INT128__
+ volatile __int128 m = -(((__int128) 1) << (__CHAR_BIT__ * __SIZEOF_INT128__ / 2));
+#else
+ volatile long long m = -(1LL << (__CHAR_BIT__ * __SIZEOF_LONG_LONG__ / 2));
+#endif
+ m = m * m;
+ return 0;
+}

View File

@ -0,0 +1,338 @@
2016-01-22 Torvald Riegel <triegel@redhat.com>
* beginend.cc (GTM::gtm_thread::serial_lock): Put on cacheline
boundary.
(htm_fastpath): Remove.
(gtm_thread::begin_transaction): Fix HTM fastpath.
(_ITM_commitTransaction): Adapt.
(_ITM_commitTransactionEH): Adapt.
* libitm/config/linux/rwlock.h (gtm_rwlock): Add htm_fastpath member
and accessors.
* libitm/config/posix/rwlock.h (gtm_rwlock): Likewise.
* libitm/config/posix/rwlock.cc (gtm_rwlock::gtm_rwlock): Adapt.
* libitm/libitm_i.h (htm_fastpath): Remove declaration.
* libitm/method-serial.cc (htm_mg): Adapt.
(gtm_thread::serialirr_mode): Adapt.
* libitm/query.cc (_ITM_inTransaction, _ITM_getTransactionId): Adapt.
--- libitm/beginend.cc
+++ libitm/beginend.cc
@@ -32,7 +32,11 @@ using namespace GTM;
extern __thread gtm_thread_tls _gtm_thr_tls;
#endif
-gtm_rwlock GTM::gtm_thread::serial_lock;
+// Put this at the start of a cacheline so that serial_lock's writers and
+// htm_fastpath fields are on the same cacheline, so that HW transactions
+// only have to pay one cacheline capacity to monitor both.
+gtm_rwlock GTM::gtm_thread::serial_lock
+ __attribute__((aligned(HW_CACHELINE_SIZE)));
gtm_thread *GTM::gtm_thread::list_of_threads = 0;
unsigned GTM::gtm_thread::number_of_threads = 0;
@@ -54,9 +58,6 @@ static pthread_mutex_t global_tid_lock = PTHREAD_MUTEX_INITIALIZER;
static pthread_key_t thr_release_key;
static pthread_once_t thr_release_once = PTHREAD_ONCE_INIT;
-// See gtm_thread::begin_transaction.
-uint32_t GTM::htm_fastpath = 0;
-
/* Allocate a transaction structure. */
void *
GTM::gtm_thread::operator new (size_t s)
@@ -174,9 +175,11 @@ GTM::gtm_thread::begin_transaction (uint32_t prop, const gtm_jmpbuf *jb)
// lock's writer flag and thus abort if another thread is or becomes a
// serial transaction. Therefore, if the fastpath is enabled, then a
// transaction is not executing as a HW transaction iff the serial lock is
- // write-locked. This allows us to use htm_fastpath and the serial lock's
- // writer flag to reliable determine whether the current thread runs a HW
- // transaction, and thus we do not need to maintain this information in
+ // write-locked. Also, HW transactions monitor the fastpath control
+ // variable, so that they will only execute if dispatch_htm is still the
+ // current method group. This allows us to use htm_fastpath and the serial
+ // lock's writers flag to reliable determine whether the current thread runs
+ // a HW transaction, and thus we do not need to maintain this information in
// per-thread state.
// If an uninstrumented code path is not available, we can still run
// instrumented code from a HW transaction because the HTM fastpath kicks
@@ -187,9 +190,14 @@ GTM::gtm_thread::begin_transaction (uint32_t prop, const gtm_jmpbuf *jb)
// indeed in serial mode, and HW transactions should never need serial mode
// for any internal changes (e.g., they never abort visibly to the STM code
// and thus do not trigger the standard retry handling).
- if (likely(htm_fastpath && (prop & pr_hasNoAbort)))
+ if (likely(serial_lock.get_htm_fastpath() && (prop & pr_hasNoAbort)))
{
- for (uint32_t t = htm_fastpath; t; t--)
+ // Note that the snapshot of htm_fastpath that we take here could be
+ // outdated, and a different method group than dispatch_htm may have
+ // been chosen in the meantime. Therefore, take care not not touch
+ // anything besides the serial lock, which is independent of method
+ // groups.
+ for (uint32_t t = serial_lock.get_htm_fastpath(); t; t--)
{
uint32_t ret = htm_begin();
if (htm_begin_success(ret))
@@ -197,9 +205,11 @@ GTM::gtm_thread::begin_transaction (uint32_t prop, const gtm_jmpbuf *jb)
// We are executing a transaction now.
// Monitor the writer flag in the serial-mode lock, and abort
// if there is an active or waiting serial-mode transaction.
+ // Also checks that htm_fastpath is still nonzero and thus
+ // HW transactions are allowed to run.
// Note that this can also happen due to an enclosing
// serial-mode transaction; we handle this case below.
- if (unlikely(serial_lock.is_write_locked()))
+ if (unlikely(serial_lock.htm_fastpath_disabled()))
htm_abort();
else
// We do not need to set a_saveLiveVariables because of HTM.
@@ -210,9 +220,12 @@ GTM::gtm_thread::begin_transaction (uint32_t prop, const gtm_jmpbuf *jb)
// retrying the transaction will be successful.
if (!htm_abort_should_retry(ret))
break;
+ // Check whether the HTM fastpath has been disabled.
+ if (!serial_lock.get_htm_fastpath())
+ break;
// Wait until any concurrent serial-mode transactions have finished.
// This is an empty critical section, but won't be elided.
- if (serial_lock.is_write_locked())
+ if (serial_lock.htm_fastpath_disabled())
{
tx = gtm_thr();
if (unlikely(tx == NULL))
@@ -618,7 +631,7 @@ _ITM_commitTransaction(void)
// a serial-mode transaction. If we are, then there will be no other
// concurrent serial-mode transaction.
// See gtm_thread::begin_transaction.
- if (likely(htm_fastpath && !gtm_thread::serial_lock.is_write_locked()))
+ if (likely(!gtm_thread::serial_lock.htm_fastpath_disabled()))
{
htm_commit();
return;
@@ -634,7 +647,7 @@ _ITM_commitTransactionEH(void *exc_ptr)
{
#if defined(USE_HTM_FASTPATH)
// See _ITM_commitTransaction.
- if (likely(htm_fastpath && !gtm_thread::serial_lock.is_write_locked()))
+ if (likely(!gtm_thread::serial_lock.htm_fastpath_disabled()))
{
htm_commit();
return;
--- libitm/config/linux/rwlock.h
+++ libitm/config/linux/rwlock.h
@@ -39,16 +39,29 @@ struct gtm_thread;
//
// In this implementation, writers are given highest priority access but
// read-to-write upgrades do not have a higher priority than writers.
+//
+// Do not change the layout of this class; it must remain a POD type with
+// standard layout, and the writers field must be first (i.e., so the
+// assembler code can assume that its address is equal to the address of the
+// respective instance of the class), and htm_fastpath must be second.
class gtm_rwlock
{
- // TODO Put futexes on different cachelines?
std::atomic<int> writers; // Writers' futex.
+ // We put the HTM fastpath control variable here so that HTM fastpath
+ // transactions can check efficiently whether they are allowed to run.
+ // This must be accessed atomically because threads can load this value
+ // when they are neither a registered reader nor writer (i.e., when they
+ // attempt to execute the HTM fastpath).
+ std::atomic<uint32_t> htm_fastpath;
+ // TODO Put these futexes on different cachelines? (writers and htm_fastpath
+ // should remain on the same cacheline.
std::atomic<int> writer_readers;// A confirmed writer waits here for readers.
std::atomic<int> readers; // Readers wait here for writers (iff true).
public:
- gtm_rwlock() : writers(0), writer_readers(0), readers(0) {};
+ gtm_rwlock() : writers(0), htm_fastpath(0), writer_readers(0), readers(0)
+ { }
void read_lock (gtm_thread *tx);
void read_unlock (gtm_thread *tx);
@@ -59,12 +72,28 @@ class gtm_rwlock
bool write_upgrade (gtm_thread *tx);
void write_upgrade_finish (gtm_thread *tx);
- // Returns true iff there is a concurrent active or waiting writer.
- // This is primarily useful for simple HyTM approaches, and the value being
- // checked is loaded with memory_order_relaxed.
- bool is_write_locked()
+ // Returns true iff there is a concurrent active or waiting writer, or
+ // htm_fastpath is zero. This is primarily useful for simple HyTM
+ // approaches, and the values being checked are loaded with
+ // memory_order_relaxed.
+ bool htm_fastpath_disabled ()
+ {
+ return writers.load (memory_order_relaxed) != 0
+ || htm_fastpath.load (memory_order_relaxed) == 0;
+ }
+
+ // This does not need to return an exact value, hence relaxed MO is
+ // sufficient.
+ uint32_t get_htm_fastpath ()
+ {
+ return htm_fastpath.load (memory_order_relaxed);
+ }
+ // This must only be called while having acquired the write lock, and other
+ // threads do not need to load an exact value; hence relaxed MO is
+ // sufficient.
+ void set_htm_fastpath (uint32_t val)
{
- return writers.load (memory_order_relaxed) != 0;
+ htm_fastpath.store (val, memory_order_relaxed);
}
protected:
--- libitm/config/posix/rwlock.h
+++ libitm/config/posix/rwlock.h
@@ -44,19 +44,32 @@ struct gtm_thread;
//
// In this implementation, writers are given highest priority access but
// read-to-write upgrades do not have a higher priority than writers.
+//
+// Do not change the layout of this class; it must remain a POD type with
+// standard layout, and the summary field must be first (i.e., so the
+// assembler code can assume that its address is equal to the address of the
+// respective instance of the class), and htm_fastpath must be second.
class gtm_rwlock
{
- pthread_mutex_t mutex; // Held if manipulating any field.
- pthread_cond_t c_readers; // Readers wait here
- pthread_cond_t c_writers; // Writers wait here for writers
- pthread_cond_t c_confirmed_writers; // Writers wait here for readers
-
static const unsigned a_writer = 1; // An active writer.
static const unsigned w_writer = 2; // The w_writers field != 0
static const unsigned w_reader = 4; // The w_readers field != 0
std::atomic<unsigned int> summary; // Bitmask of the above.
+
+ // We put the HTM fastpath control variable here so that HTM fastpath
+ // transactions can check efficiently whether they are allowed to run.
+ // This must be accessed atomically because threads can load this value
+ // when they are neither a registered reader nor writer (i.e., when they
+ // attempt to execute the HTM fastpath).
+ std::atomic<uint32_t> htm_fastpath;
+
+ pthread_mutex_t mutex; // Held if manipulating any field.
+ pthread_cond_t c_readers; // Readers wait here
+ pthread_cond_t c_writers; // Writers wait here for writers
+ pthread_cond_t c_confirmed_writers; // Writers wait here for readers
+
unsigned int a_readers; // Nr active readers as observed by a writer
unsigned int w_readers; // Nr waiting readers
unsigned int w_writers; // Nr waiting writers
@@ -74,12 +87,28 @@ class gtm_rwlock
bool write_upgrade (gtm_thread *tx);
void write_upgrade_finish (gtm_thread *tx);
- // Returns true iff there is a concurrent active or waiting writer.
- // This is primarily useful for simple HyTM approaches, and the value being
- // checked is loaded with memory_order_relaxed.
- bool is_write_locked()
+ // Returns true iff there is a concurrent active or waiting writer, or
+ // htm_fastpath is zero. This is primarily useful for simple HyTM
+ // approaches, and the values being checked are loaded with
+ // memory_order_relaxed.
+ bool htm_fastpath_disabled ()
+ {
+ return (summary.load (memory_order_relaxed) & (a_writer | w_writer))
+ || htm_fastpath.load (memory_order_relaxed) == 0;
+ }
+
+ // This does not need to return an exact value, hence relaxed MO is
+ // sufficient.
+ uint32_t get_htm_fastpath ()
+ {
+ return htm_fastpath.load (memory_order_relaxed);
+ }
+ // This must only be called while having acquired the write lock, and other
+ // threads do not need to load an exact value; hence relaxed MO is
+ // sufficient.
+ void set_htm_fastpath (uint32_t val)
{
- return summary.load (memory_order_relaxed) & (a_writer | w_writer);
+ htm_fastpath.store (val, memory_order_relaxed);
}
protected:
--- libitm/config/posix/rwlock.cc
+++ libitm/config/posix/rwlock.cc
@@ -30,11 +30,12 @@ namespace GTM HIDDEN {
// ??? Move this back to the header file when constexpr is implemented.
gtm_rwlock::gtm_rwlock()
- : mutex (PTHREAD_MUTEX_INITIALIZER),
+ : summary (0),
+ htm_fastpath (0),
+ mutex (PTHREAD_MUTEX_INITIALIZER),
c_readers (PTHREAD_COND_INITIALIZER),
c_writers (PTHREAD_COND_INITIALIZER),
c_confirmed_writers (PTHREAD_COND_INITIALIZER),
- summary (0),
a_readers (0),
w_readers (0),
w_writers (0)
--- libitm/libitm_i.h
+++ libitm/libitm_i.h
@@ -336,10 +336,6 @@ extern abi_dispatch *dispatch_htm();
extern gtm_cacheline_mask gtm_mask_stack(gtm_cacheline *, gtm_cacheline_mask);
-// Control variable for the HTM fastpath that uses serial mode as fallback.
-// Non-zero if the HTM fastpath is enabled. See gtm_thread::begin_transaction.
-extern uint32_t htm_fastpath;
-
} // namespace GTM
#endif // LIBITM_I_H
--- libitm/method-serial.cc
+++ libitm/method-serial.cc
@@ -222,13 +222,13 @@ struct htm_mg : public method_group
// Enable the HTM fastpath if the HW is available. The fastpath is
// initially disabled.
#ifdef USE_HTM_FASTPATH
- htm_fastpath = htm_init();
+ gtm_thread::serial_lock.set_htm_fastpath(htm_init());
#endif
}
virtual void fini()
{
// Disable the HTM fastpath.
- htm_fastpath = 0;
+ gtm_thread::serial_lock.set_htm_fastpath(0);
}
};
@@ -288,7 +288,7 @@ GTM::gtm_thread::serialirr_mode ()
#if defined(USE_HTM_FASTPATH)
// HTM fastpath. If we are executing a HW transaction, don't go serial but
// continue. See gtm_thread::begin_transaction.
- if (likely(htm_fastpath && !gtm_thread::serial_lock.is_write_locked()))
+ if (likely(!gtm_thread::serial_lock.htm_fastpath_disabled()))
return;
#endif
--- libitm/query.cc
+++ libitm/query.cc
@@ -49,7 +49,7 @@ _ITM_inTransaction (void)
// a transaction and thus we can't deduce this by looking at just the serial
// lock. This function isn't used in practice currently, so the easiest
// way to handle it is to just abort.
- if (htm_fastpath && htm_transaction_active())
+ if (gtm_thread::serial_lock.get_htm_fastpath() && htm_transaction_active())
htm_abort();
#endif
struct gtm_thread *tx = gtm_thr();
@@ -69,7 +69,7 @@ _ITM_getTransactionId (void)
{
#if defined(USE_HTM_FASTPATH)
// See ITM_inTransaction.
- if (htm_fastpath && htm_transaction_active())
+ if (gtm_thread::serial_lock.get_htm_fastpath() && htm_transaction_active())
htm_abort();
#endif
struct gtm_thread *tx = gtm_thr();

View File

@ -0,0 +1,35 @@
2015-07-15 Jonathan Wakely <jwakely@redhat.com>
PR libstdc++/57394
* include/std/streambuf (basic_streambuf(const basic_streambuf&)):
Fix initializer for _M_out_end.
(operator=(const basic_streambuf&)): Replace stub with actual
implementation.
--- libstdc++-v3/include/std/streambuf
+++ libstdc++-v3/include/std/streambuf
@@ -802,12 +802,22 @@
basic_streambuf(const basic_streambuf& __sb)
: _M_in_beg(__sb._M_in_beg), _M_in_cur(__sb._M_in_cur),
_M_in_end(__sb._M_in_end), _M_out_beg(__sb._M_out_beg),
- _M_out_cur(__sb._M_out_cur), _M_out_end(__sb._M_out_cur),
+ _M_out_cur(__sb._M_out_cur), _M_out_end(__sb._M_out_end),
_M_buf_locale(__sb._M_buf_locale)
{ }
basic_streambuf&
- operator=(const basic_streambuf&) { return *this; };
+ operator=(const basic_streambuf& __sb)
+ {
+ _M_in_beg = __sb._M_in_beg;
+ _M_in_cur = __sb._M_in_cur;
+ _M_in_end = __sb._M_in_end;
+ _M_out_beg = __sb._M_out_beg;
+ _M_out_cur = __sb._M_out_cur;
+ _M_out_end = __sb._M_out_end;
+ _M_buf_locale = __sb._M_buf_locale;
+ return *this;
+ };
};
// Explicit specialization declarations, defined in src/streambuf.cc.

View File

@ -0,0 +1,78 @@
2013-07-25 Sterling Augustine <saugustine@google.com>
* dwarf2out.c (size_of_pubnames): Move code to...
(include_pubname_in_output): ...here. New.
(output_pubnames): Call include_pubname_in_output. Move assertion.
--- gcc/dwarf2out.c (revision 201254)
+++ gcc/dwarf2out.c (revision 201255)
@@ -7806,6 +7806,30 @@ unmark_all_dies (dw_die_ref die)
unmark_all_dies (AT_ref (a));
}
+/* Calculate if the entry should appear in the final output file. It may be
+ from a pruned a type. */
+
+static bool
+include_pubname_in_output (vec<pubname_entry, va_gc> *table, pubname_entry *p)
+{
+ if (table == pubname_table)
+ {
+ /* Enumerator names are part of the pubname table, but the parent
+ DW_TAG_enumeration_type die may have been pruned. Don't output
+ them if that is the case. */
+ if (p->die->die_tag == DW_TAG_enumerator && !p->die->die_mark)
+ return false;
+
+ /* Everything else in the pubname table is included. */
+ return true;
+ }
+
+ /* The pubtypes table shouldn't include types that have been
+ pruned. */
+ return (p->die->die_offset != 0
+ || !flag_eliminate_unused_debug_types);
+}
+
/* Return the size of the .debug_pubnames or .debug_pubtypes table
generated for the compilation unit. */
@@ -7818,9 +7842,7 @@ size_of_pubnames (vec<pubname_entry, va_
size = DWARF_PUBNAMES_HEADER_SIZE;
FOR_EACH_VEC_ELT (*names, i, p)
- if (names != pubtype_table
- || p->die->die_offset != 0
- || !flag_eliminate_unused_debug_types)
+ if (include_pubname_in_output (names, p))
size += strlen (p->name) + DWARF_OFFSET_SIZE + 1;
size += DWARF_OFFSET_SIZE;
@@ -8999,22 +9021,14 @@ output_pubnames (vec<pubname_entry, va_g
FOR_EACH_VEC_ELT (*names, i, pub)
{
- /* Enumerator names are part of the pubname table, but the parent
- DW_TAG_enumeration_type die may have been pruned. Don't output
- them if that is the case. */
- if (pub->die->die_tag == DW_TAG_enumerator && !pub->die->die_mark)
- continue;
-
- /* We shouldn't see pubnames for DIEs outside of the main CU. */
- if (names == pubname_table)
- gcc_assert (pub->die->die_mark);
-
- if (names != pubtype_table
- || pub->die->die_offset != 0
- || !flag_eliminate_unused_debug_types)
+ if (include_pubname_in_output (names, pub))
{
dw_offset die_offset = pub->die->die_offset;
+ /* We shouldn't see pubnames for DIEs outside of the main CU. */
+ if (names == pubname_table)
+ gcc_assert (pub->die->die_mark);
+
/* If we're putting types in their own .debug_types sections,
the .debug_pubtypes table will still point to the compile
unit (not the type unit), so we want to use the offset of

View File

@ -0,0 +1,14 @@
2015-09-02 Alan Modra <amodra@gmail.com>
* config/rs6000/sysv4.h (LINK_SPEC): Delete link_target.
--- gcc/config/rs6000/sysv4.h (revision 227396)
+++ gcc/config/rs6000/sysv4.h (revision 227397)
@@ -574,7 +574,6 @@ ENDIAN_SELECT(" -mbig", " -mlittle", DEF
%{R*} \
%(link_shlib) \
%{!T*: %(link_start) } \
-%(link_target) \
%(link_os)"
/* Shared libraries are not default. */

View File

@ -0,0 +1,496 @@
2015-12-24 Kirill Yukhin <kirill.yukhin@intel.com>
* common/config/i386/i386-common.c (OPTION_MASK_ISA_PKU_SET): New.
(OPTION_MASK_ISA_PKU_UNSET): Ditto.
(ix86_handle_option): Handle OPT_mpku.
* config.gcc: Add pkuintrin.h to i[34567]86-*-* and x86_64-*-*
targets.
* config/i386/cpuid.h (host_detect_local_cpu): Detect PKU feature.
* config/i386/i386-c.c (ix86_target_macros_internal): Handle PKU ISA
flag.
* config/i386/i386.c (ix86_target_string): Add "-mpku" to
ix86_target_opts.
(ix86_option_override_internal): Define PTA_PKU, mention new key
in skylake-avx512. Handle new ISA bits.
(ix86_valid_target_attribute_inner_p): Add "pku".
(enum ix86_builtins): Add IX86_BUILTIN_RDPKRU and IX86_BUILTIN_WRPKRU.
(builtin_description bdesc_special_args[]): Add new built-ins.
* config/i386/i386.h (define TARGET_PKU): New.
(define TARGET_PKU_P): Ditto.
* config/i386/i386.md (define_c_enum "unspecv"): Add UNSPEC_PKU.
(define_expand "rdpkru"): New.
(define_insn "*rdpkru"): Ditto.
(define_expand "wrpkru"): Ditto.
(define_insn "*wrpkru"): Ditto.
* config/i386/i386.opt (mpku): Ditto.
* config/i386/pkuintrin.h: New file.
* config/i386/x86intrin.h: Include pkuintrin.h
* doc/extend.texi: Describe new built-ins.
* doc/invoke.texi: Describe new switches.
* g++.dg/other/i386-2.C: Add -mpku.
* g++.dg/other/i386-3.C: Ditto.
* gcc.target/i386/rdpku-1.c: New test.
* gcc.target/i386/sse-12.c: Add -mpku.
* gcc.target/i386/sse-13.c: Ditto.
* gcc.target/i386/sse-22.c: Ditto.
* gcc.target/i386/sse-33.c: Ditto.
* gcc.target/i386/wrpku-1.c: New test.
--- gcc/config.gcc (revision 231943)
+++ gcc/config.gcc (revision 231945)
@@ -368,7 +368,7 @@ i[34567]86-*-*)
lzcntintrin.h bmiintrin.h bmi2intrin.h tbmintrin.h
avx2intrin.h fmaintrin.h f16cintrin.h rtmintrin.h
xtestintrin.h rdseedintrin.h prfchwintrin.h adxintrin.h
- fxsrintrin.h xsaveintrin.h xsaveoptintrin.h"
+ fxsrintrin.h xsaveintrin.h xsaveoptintrin.h pkuintrin.h"
;;
x86_64-*-*)
cpu_type=i386
@@ -383,7 +383,7 @@ x86_64-*-*)
lzcntintrin.h bmiintrin.h tbmintrin.h bmi2intrin.h
avx2intrin.h fmaintrin.h f16cintrin.h rtmintrin.h
xtestintrin.h rdseedintrin.h prfchwintrin.h adxintrin.h
- fxsrintrin.h xsaveintrin.h xsaveoptintrin.h"
+ fxsrintrin.h xsaveintrin.h xsaveoptintrin.h pkuintrin.h"
need_64bit_hwint=yes
;;
ia64-*-*)
--- gcc/common/config/i386/i386-common.c (revision 231943)
+++ gcc/common/config/i386/i386-common.c (revision 231945)
@@ -98,6 +98,7 @@ along with GCC; see the file COPYING3.
#define OPTION_MASK_ISA_RDRND_SET OPTION_MASK_ISA_RDRND
#define OPTION_MASK_ISA_F16C_SET \
(OPTION_MASK_ISA_F16C | OPTION_MASK_ISA_AVX_SET)
+#define OPTION_MASK_ISA_PKU_SET OPTION_MASK_ISA_PKU
/* Define a set of ISAs which aren't available when a given ISA is
disabled. MMX and SSE ISAs are handled separately. */
@@ -164,6 +165,7 @@ along with GCC; see the file COPYING3.
#define OPTION_MASK_ISA_FSGSBASE_UNSET OPTION_MASK_ISA_FSGSBASE
#define OPTION_MASK_ISA_RDRND_UNSET OPTION_MASK_ISA_RDRND
#define OPTION_MASK_ISA_F16C_UNSET OPTION_MASK_ISA_F16C
+#define OPTION_MASK_ISA_PKU_UNSET OPTION_MASK_ISA_PKU
/* Implement TARGET_HANDLE_OPTION. */
@@ -659,6 +661,19 @@ ix86_handle_option (struct gcc_options *
}
return true;
+ case OPT_mpku:
+ if (value)
+ {
+ opts->x_ix86_isa_flags |= OPTION_MASK_ISA_PKU_SET;
+ opts->x_ix86_isa_flags_explicit |= OPTION_MASK_ISA_PKU_SET;
+ }
+ else
+ {
+ opts->x_ix86_isa_flags &= ~OPTION_MASK_ISA_PKU_UNSET;
+ opts->x_ix86_isa_flags_explicit |= OPTION_MASK_ISA_PKU_UNSET;
+ }
+ return true;
+
/* Comes from final.c -- no real reason to change it. */
#define MAX_CODE_ALIGN 16
--- gcc/config/i386/i386.h (revision 231943)
+++ gcc/config/i386/i386.h (revision 231945)
@@ -80,6 +80,7 @@ see the files COPYING3 and COPYING.RUNTI
#define TARGET_FXSR TARGET_ISA_FXSR
#define TARGET_XSAVE TARGET_ISA_XSAVE
#define TARGET_XSAVEOPT TARGET_ISA_XSAVEOPT
+#define TARGET_PKU TARGET_ISA_PKU
#define TARGET_LP64 TARGET_ABI_64
#define TARGET_X32 TARGET_ABI_X32
--- gcc/config/i386/i386.md (revision 231943)
+++ gcc/config/i386/i386.md (revision 231945)
@@ -224,6 +224,9 @@ (define_c_enum "unspecv" [
UNSPECV_XTEST
UNSPECV_NLGR
+
+ ;; For RDPKRU and WRPKRU support
+ UNSPECV_PKU
])
;; Constants to represent rounding modes in the ROUND instruction
@@ -18289,6 +18292,48 @@ (define_insn "xtest_1"
[(set_attr "type" "other")
(set_attr "length" "3")])
+;; RDPKRU and WRPKRU
+
+(define_expand "rdpkru"
+ [(parallel
+ [(set (match_operand:SI 0 "register_operand")
+ (unspec_volatile:SI [(match_dup 1)] UNSPECV_PKU))
+ (set (match_dup 2) (const_int 0))])]
+ "TARGET_PKU"
+{
+ operands[1] = force_reg (SImode, const0_rtx);
+ operands[2] = gen_reg_rtx (SImode);
+})
+
+(define_insn "*rdpkru"
+ [(set (match_operand:SI 0 "register_operand" "=a")
+ (unspec_volatile:SI [(match_operand:SI 2 "register_operand" "c")]
+ UNSPECV_PKU))
+ (set (match_operand:SI 1 "register_operand" "=d")
+ (const_int 0))]
+ "TARGET_PKU"
+ "rdpkru"
+ [(set_attr "type" "other")])
+
+(define_expand "wrpkru"
+ [(unspec_volatile:SI
+ [(match_operand:SI 0 "register_operand")
+ (match_dup 1) (match_dup 2)] UNSPECV_PKU)]
+ "TARGET_PKU"
+{
+ operands[1] = force_reg (SImode, const0_rtx);
+ operands[2] = force_reg (SImode, const0_rtx);
+})
+
+(define_insn "*wrpkru"
+ [(unspec_volatile:SI
+ [(match_operand:SI 0 "register_operand" "a")
+ (match_operand:SI 1 "register_operand" "d")
+ (match_operand:SI 2 "register_operand" "c")] UNSPECV_PKU)]
+ "TARGET_PKU"
+ "wrpkru"
+ [(set_attr "type" "other")])
+
(include "mmx.md")
(include "sse.md")
(include "sync.md")
--- gcc/config/i386/pkuintrin.h (revision 0)
+++ gcc/config/i386/pkuintrin.h (revision 231945)
@@ -0,0 +1,45 @@
+/* Copyright (C) 2015 Free Software Foundation, Inc.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3, or (at your option)
+ any later version.
+
+ GCC is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ Under Section 7 of GPL version 3, you are granted additional
+ permissions described in the GCC Runtime Library Exception, version
+ 3.1, as published by the Free Software Foundation.
+
+ You should have received a copy of the GNU General Public License and
+ a copy of the GCC Runtime Library Exception along with this program;
+ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+ <http://www.gnu.org/licenses/>. */
+
+#if !defined _X86INTRIN_H_INCLUDED
+# error "Never use <pkuintrin.h> directly; include <x86intrin.h> instead."
+#endif
+
+#ifndef _PKUINTRIN_H_INCLUDED
+#define _PKUINTRIN_H_INCLUDED
+
+extern __inline unsigned int
+__attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_rdpkru_u32(void)
+{
+ return __builtin_ia32_rdpkru ();
+}
+
+extern __inline void
+__attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_wrpkru(unsigned int key)
+{
+ return __builtin_ia32_wrpkru (key);
+}
+
+#endif /* _PKUINTRIN_H_INCLUDED */
--- gcc/config/i386/cpuid.h (revision 231943)
+++ gcc/config/i386/cpuid.h (revision 231945)
@@ -74,6 +74,10 @@
#define bit_RDSEED (1 << 18)
#define bit_ADX (1 << 19)
+/* %ecx */
+#define bit_PKU (1 << 3)
+#define bit_OSPKE (1 << 4)
+
/* Extended State Enumeration Sub-leaf (%eax == 13, %ecx == 1) */
#define bit_XSAVEOPT (1 << 0)
--- gcc/config/i386/x86intrin.h (revision 231943)
+++ gcc/config/i386/x86intrin.h (revision 231945)
@@ -119,4 +119,8 @@
#include <adxintrin.h>
+#ifdef __PKU__
+#include <pkuintrin.h>
+#endif
+
#endif /* _X86INTRIN_H_INCLUDED */
--- gcc/config/i386/i386-c.c (revision 231943)
+++ gcc/config/i386/i386-c.c (revision 231945)
@@ -348,6 +348,8 @@ ix86_target_macros_internal (HOST_WIDE_I
def_or_undef (parse_in, "__XSAVE__");
if (isa_flag & OPTION_MASK_ISA_XSAVEOPT)
def_or_undef (parse_in, "__XSAVEOPT__");
+ if (isa_flag & OPTION_MASK_ISA_PKU)
+ def_or_undef (parse_in, "__PKU__");
if ((fpmath & FPMATH_SSE) && (isa_flag & OPTION_MASK_ISA_SSE))
def_or_undef (parse_in, "__SSE_MATH__");
if ((fpmath & FPMATH_SSE) && (isa_flag & OPTION_MASK_ISA_SSE2))
--- gcc/config/i386/i386.opt (revision 231943)
+++ gcc/config/i386/i386.opt (revision 231945)
@@ -626,3 +626,7 @@ Split 32-byte AVX unaligned store
mrtm
Target Report Mask(ISA_RTM) Var(ix86_isa_flags) Save
Support RTM built-in functions and code generation
+
+mpku
+Target Report Mask(ISA_PKU) Var(ix86_isa_flags) Save
+Support PKU built-in functions and code generation
--- gcc/config/i386/driver-i386.c (revision 231943)
+++ gcc/config/i386/driver-i386.c (revision 231945)
@@ -408,6 +408,7 @@ const char *host_detect_local_cpu (int a
unsigned int has_rdrnd = 0, has_f16c = 0, has_fsgsbase = 0;
unsigned int has_rdseed = 0, has_prfchw = 0, has_adx = 0;
unsigned int has_osxsave = 0, has_fxsr = 0, has_xsave = 0, has_xsaveopt = 0;
+ unsigned int has_pku = 0;
bool arch;
@@ -479,6 +480,8 @@ const char *host_detect_local_cpu (int a
has_fsgsbase = ebx & bit_FSGSBASE;
has_rdseed = ebx & bit_RDSEED;
has_adx = ebx & bit_ADX;
+
+ has_pku = ecx & bit_OSPKE;
}
if (max_level >= 13)
@@ -855,12 +858,13 @@ const char *host_detect_local_cpu (int a
const char *fxsr = has_fxsr ? " -mfxsr" : " -mno-fxsr";
const char *xsave = has_xsave ? " -mxsave" : " -mno-xsave";
const char *xsaveopt = has_xsaveopt ? " -mxsaveopt" : " -mno-xsaveopt";
+ const char *pku = has_pku ? " -mpku" : " -mno-pku";
options = concat (options, cx16, sahf, movbe, ase, pclmul,
popcnt, abm, lwp, fma, fma4, xop, bmi, bmi2,
tbm, avx, avx2, sse4_2, sse4_1, lzcnt, rtm,
hle, rdrnd, f16c, fsgsbase, rdseed, prfchw, adx,
- fxsr, xsave, xsaveopt, NULL);
+ fxsr, xsave, xsaveopt, pku, NULL);
}
done:
--- gcc/config/i386/i386.c (revision 231943)
+++ gcc/config/i386/i386.c (revision 231945)
@@ -2632,6 +2632,7 @@ ix86_target_string (HOST_WIDE_INT isa, i
{ "-mrtm", OPTION_MASK_ISA_RTM },
{ "-mxsave", OPTION_MASK_ISA_XSAVE },
{ "-mxsaveopt", OPTION_MASK_ISA_XSAVEOPT },
+ { "-mpku", OPTION_MASK_ISA_PKU },
};
/* Flag options. */
@@ -2905,6 +2906,7 @@ ix86_option_override_internal (bool main
#define PTA_FXSR (HOST_WIDE_INT_1 << 37)
#define PTA_XSAVE (HOST_WIDE_INT_1 << 38)
#define PTA_XSAVEOPT (HOST_WIDE_INT_1 << 39)
+#define PTA_PKU (HOST_WIDE_INT_1 << 60)
/* if this reaches 64, need to widen struct pta flags below */
@@ -3429,6 +3431,9 @@ ix86_option_override_internal (bool main
if (processor_alias_table[i].flags & PTA_XSAVEOPT
&& !(ix86_isa_flags_explicit & OPTION_MASK_ISA_XSAVEOPT))
ix86_isa_flags |= OPTION_MASK_ISA_XSAVEOPT;
+ if (processor_alias_table[i].flags & PTA_PKU
+ && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_PKU))
+ ix86_isa_flags |= OPTION_MASK_ISA_PKU;
if (processor_alias_table[i].flags & (PTA_PREFETCH_SSE | PTA_SSE))
x86_prefetch_sse = true;
@@ -4220,6 +4225,7 @@ ix86_valid_target_attribute_inner_p (tre
IX86_ATTR_ISA ("fxsr", OPT_mfxsr),
IX86_ATTR_ISA ("xsave", OPT_mxsave),
IX86_ATTR_ISA ("xsaveopt", OPT_mxsaveopt),
+ IX86_ATTR_ISA ("pku", OPT_mpku),
/* enum options */
IX86_ATTR_ENUM ("fpmath=", OPT_mfpmath_),
@@ -27042,6 +27048,10 @@ enum ix86_builtins
IX86_BUILTIN_CPU_IS,
IX86_BUILTIN_CPU_SUPPORTS,
+ /* PKU instructions. */
+ IX86_BUILTIN_RDPKRU,
+ IX86_BUILTIN_WRPKRU,
+
IX86_BUILTIN_MAX
};
@@ -27357,6 +27367,10 @@ static const struct builtin_description
{ OPTION_MASK_ISA_RTM, CODE_FOR_xbegin, "__builtin_ia32_xbegin", IX86_BUILTIN_XBEGIN, UNKNOWN, (int) UNSIGNED_FTYPE_VOID },
{ OPTION_MASK_ISA_RTM, CODE_FOR_xend, "__builtin_ia32_xend", IX86_BUILTIN_XEND, UNKNOWN, (int) VOID_FTYPE_VOID },
{ OPTION_MASK_ISA_RTM, CODE_FOR_xtest, "__builtin_ia32_xtest", IX86_BUILTIN_XTEST, UNKNOWN, (int) INT_FTYPE_VOID },
+
+ /* RDPKRU and WRPKRU. */
+ { OPTION_MASK_ISA_PKU, CODE_FOR_rdpkru, "__builtin_ia32_rdpkru", IX86_BUILTIN_RDPKRU, UNKNOWN, (int) UNSIGNED_FTYPE_VOID },
+ { OPTION_MASK_ISA_PKU, CODE_FOR_wrpkru, "__builtin_ia32_wrpkru", IX86_BUILTIN_WRPKRU, UNKNOWN, (int) VOID_FTYPE_UNSIGNED },
};
/* Builtins with variable number of arguments. */
--- gcc/doc/extend.texi (revision 231943)
+++ gcc/doc/extend.texi (revision 231945)
@@ -10996,6 +10996,13 @@ void __builtin_ia32_xabort (status)
int __builtin_ia32_xtest ()
@end smallexample
+The following built-in functions are available when @option{-mpku} is used.
+They generate reads and writes to PKRU.
+@smallexample
+void __builtin_ia32_wrpkru (unsigned int)
+unsigned int __builtin_ia32_rdpkru ()
+@end smallexample
+
@node X86 transactional memory intrinsics
@subsection X86 transaction memory intrinsics
--- gcc/doc/invoke.texi (revision 231943)
+++ gcc/doc/invoke.texi (revision 231945)
@@ -645,7 +645,7 @@ Objective-C and Objective-C++ Dialects}.
-mmmx -msse -msse2 -msse3 -mssse3 -msse4.1 -msse4.2 -msse4 -mavx @gol
-mavx2 -maes -mpclmul -mfsgsbase -mrdrnd -mf16c -mfma @gol
-msse4a -m3dnow -mpopcnt -mabm -mbmi -mtbm -mfma4 -mxop -mlzcnt @gol
--mbmi2 -mrtm -mlwp -mthreads @gol
+-mbmi2 -mrtm -mlwp -mpku -mthreads @gol
-mno-align-stringops -minline-all-stringops @gol
-minline-stringops-dynamically -mstringop-strategy=@var{alg} @gol
-mpush-args -maccumulate-outgoing-args -m128bit-long-double @gol
@@ -14326,6 +14326,8 @@ preferred alignment to @option{-mpreferr
@itemx -mlzcnt
@itemx -mno-lzcnt
@itemx -mrtm
+@itemx -mpku
+@itemx -mno-pku
@itemx -mtbm
@itemx -mno-tbm
@opindex mmmx
@@ -14336,7 +14338,7 @@ preferred alignment to @option{-mpreferr
@opindex mno-3dnow
These switches enable or disable the use of instructions in the MMX, SSE,
SSE2, SSE3, SSSE3, SSE4.1, AVX, AVX2, AES, PCLMUL, FSGSBASE, RDRND, F16C,
-FMA, SSE4A, FMA4, XOP, LWP, ABM, BMI, BMI2, LZCNT, RTM or 3DNow!@:
+FMA, SSE4A, FMA4, XOP, LWP, ABM, BMI, BMI2, LZCNT, RTM, PKU or 3DNow!@:
extended instruction sets.
These extensions are also available as built-in functions: see
@ref{X86 Built-in Functions}, for details of the functions enabled and
--- gcc/testsuite/gcc.target/i386/sse-12.c (revision 231943)
+++ gcc/testsuite/gcc.target/i386/sse-12.c (revision 231945)
@@ -3,7 +3,7 @@
popcntintrin.h and mm_malloc.h are usable
with -O -std=c89 -pedantic-errors. */
/* { dg-do compile } */
-/* { dg-options "-O -std=c89 -pedantic-errors -march=k8 -msse4a -m3dnow -mavx -mavx2 -mfma4 -mxop -maes -mpclmul -mpopcnt -mabm -mlzcnt -mbmi -mbmi2 -mtbm -mlwp -mfsgsbase -mrdrnd -mf16c -mfma -mrtm -mrdseed -mprfchw -madx -mfxsr -mxsaveopt" } */
+/* { dg-options "-O -std=c89 -pedantic-errors -march=k8 -msse4a -m3dnow -mavx -mavx2 -mfma4 -mxop -maes -mpclmul -mpopcnt -mabm -mlzcnt -mbmi -mbmi2 -mtbm -mlwp -mfsgsbase -mrdrnd -mf16c -mfma -mrtm -mrdseed -mprfchw -madx -mfxsr -mxsaveopt -mpku" } */
#include <x86intrin.h>
--- gcc/testsuite/gcc.target/i386/sse-13.c (revision 231943)
+++ gcc/testsuite/gcc.target/i386/sse-13.c (revision 231945)
@@ -1,5 +1,5 @@
/* { dg-do compile } */
-/* { dg-options "-O2 -Werror-implicit-function-declaration -march=k8 -msse4a -m3dnow -mavx -mavx2 -mfma4 -mxop -maes -mpclmul -mpopcnt -mabm -mlzcnt -mbmi -mbmi2 -mtbm -mlwp -mfsgsbase -mrdrnd -mf16c -mfma -mrtm -mrdseed -mprfchw -madx -mfxsr -mxsaveopt" } */
+/* { dg-options "-O2 -Werror-implicit-function-declaration -march=k8 -msse4a -m3dnow -mavx -mavx2 -mfma4 -mxop -maes -mpclmul -mpopcnt -mabm -mlzcnt -mbmi -mbmi2 -mtbm -mlwp -mfsgsbase -mrdrnd -mf16c -mfma -mrtm -mrdseed -mprfchw -madx -mfxsr -mxsaveopt -mpku" } */
#include <mm_malloc.h>
--- gcc/testsuite/gcc.target/i386/sse-22.c (revision 231943)
+++ gcc/testsuite/gcc.target/i386/sse-22.c (revision 231945)
@@ -268,7 +268,7 @@ test_2 (_mm_clmulepi64_si128, __m128i, _
/* x86intrin.h (FMA4/XOP/LWP/BMI/BMI2/TBM/LZCNT/FMA). */
#ifdef DIFFERENT_PRAGMAS
-#pragma GCC target ("fma4,xop,lwp,bmi,bmi2,tbm,lzcnt,fma,rdseed,prfchw,adx,fxsr,xsaveopt")
+#pragma GCC target ("fma4,xop,lwp,bmi,bmi2,tbm,lzcnt,fma,rdseed,prfchw,adx,fxsr,xsaveopt,pku")
#endif
#include <x86intrin.h>
/* xopintrin.h */
--- gcc/testsuite/gcc.target/i386/sse-23.c (revision 231943)
+++ gcc/testsuite/gcc.target/i386/sse-23.c (revision 231945)
@@ -183,7 +183,7 @@
/* rtmintrin.h */
#define __builtin_ia32_xabort(M) __builtin_ia32_xabort(1)
-#pragma GCC target ("sse4a,3dnow,avx,avx2,fma4,xop,aes,pclmul,popcnt,abm,lzcnt,bmi,bmi2,tbm,lwp,fsgsbase,rdrnd,f16c,fma,rtm,rdseed,prfchw,adx,fxsr,xsaveopt")
+#pragma GCC target ("sse4a,3dnow,avx,avx2,fma4,xop,aes,pclmul,popcnt,abm,lzcnt,bmi,bmi2,tbm,lwp,fsgsbase,rdrnd,f16c,fma,rtm,rdseed,prfchw,adx,fxsr,xsaveopt,pku")
#include <wmmintrin.h>
#include <smmintrin.h>
#include <mm3dnow.h>
--- gcc/testsuite/gcc.target/i386/rdpku-1.c (revision 0)
+++ gcc/testsuite/gcc.target/i386/rdpku-1.c (revision 231945)
@@ -0,0 +1,11 @@
+/* { dg-do compile } */
+/* { dg-options "-mpku -O2" } */
+/* { dg-final { scan-assembler "rdpkru\n" } } */
+
+#include <x86intrin.h>
+
+unsigned extern
+rdpku_test (void)
+{
+ return _rdpkru_u32 ();
+}
--- gcc/testsuite/gcc.target/i386/wrpku-1.c (revision 0)
+++ gcc/testsuite/gcc.target/i386/wrpku-1.c (revision 231945)
@@ -0,0 +1,11 @@
+/* { dg-do compile } */
+/* { dg-options "-mpku -O2" } */
+/* { dg-final { scan-assembler "wrpkru\n" } } */
+
+#include <x86intrin.h>
+
+void extern
+wrpku_test (unsigned int key)
+{
+ _wrpkru (key);
+}
--- gcc/testsuite/g++.dg/other/i386-2.C (revision 231943)
+++ gcc/testsuite/g++.dg/other/i386-2.C (revision 231945)
@@ -1,9 +1,9 @@
/* { dg-do compile { target i?86-*-* x86_64-*-* } } */
-/* { dg-options "-O -pedantic-errors -march=k8 -msse4a -m3dnow -mavx -mavx2 -mfma4 -mxop -maes -mpclmul -mpopcnt -mabm -mlzcnt -mbmi -mbmi2 -mtbm -mlwp -mfsgsbase -mrdrnd -mf16c -mfma -mrtm -mrdseed -mprfchw -madx -mfxsr -mxsaveopt" } */
+/* { dg-options "-O -pedantic-errors -march=k8 -msse4a -m3dnow -mavx -mavx2 -mfma4 -mxop -maes -mpclmul -mpopcnt -mabm -mlzcnt -mbmi -mbmi2 -mtbm -mlwp -mfsgsbase -mrdrnd -mf16c -mfma -mrtm -mrdseed -mprfchw -madx -mfxsr -mxsaveopt -mpku" } */
/* Test that {,x,e,p,t,s,w,a,b,i}mmintrin.h, mm3dnow.h, fma4intrin.h,
xopintrin.h, abmintrin.h, bmiintrin.h, tbmintrin.h, lwpintrin.h,
- popcntintrin.h, fmaintrin.h and mm_malloc.h.h are usable with
+ popcntintrin.h, fmaintrin.h, pkuintrin.h and mm_malloc.h.h are usable with
-O -pedantic-errors. */
#include <x86intrin.h>
--- gcc/testsuite/g++.dg/other/i386-3.C (revision 231943)
+++ gcc/testsuite/g++.dg/other/i386-3.C (revision 231945)
@@ -1,9 +1,9 @@
/* { dg-do compile { target i?86-*-* x86_64-*-* } } */
-/* { dg-options "-O -fkeep-inline-functions -march=k8 -msse4a -m3dnow -mavx -mavx2 -mfma4 -mxop -maes -mpclmul -mpopcnt -mabm -mlzcnt -mbmi -mbmi2 -mtbm -mlwp -mfsgsbase -mrdrnd -mf16c -mfma -mrtm -mrdseed -mprfchw -madx -mfxsr -mxsaveopt" } */
+/* { dg-options "-O -fkeep-inline-functions -march=k8 -msse4a -m3dnow -mavx -mavx2 -mfma4 -mxop -maes -mpclmul -mpopcnt -mabm -mlzcnt -mbmi -mbmi2 -mtbm -mlwp -mfsgsbase -mrdrnd -mf16c -mfma -mrtm -mrdseed -mprfchw -madx -mfxsr -mxsaveopt -mpku" } */
/* Test that {,x,e,p,t,s,w,a,b,i}mmintrin.h, mm3dnow.h, fma4intrin.h,
xopintrin.h, abmintrin.h, bmiintrin.h, tbmintrin.h, lwpintrin.h,
- popcntintrin.h, fmaintrin.h and mm_malloc.h are usable with
+ popcntintrin.h, fmaintrin.h, pkuintrin.h and mm_malloc.h are usable with
-O -fkeep-inline-functions. */
#include <x86intrin.h>

View File

@ -0,0 +1,76 @@
2015-12-02 Pierre-Marie de Rodat <derodat@adacore.com>
* dwarf2out.c (dwar2out_var_location): Enhance pattern matching to get
the SYMBOL_REF they embed.
(gen_subprogram_die): Handle such calls.
--- gcc/dwarf2out.c (revision 231184)
+++ gcc/dwarf2out.c (revision 231185)
@@ -18051,18 +18051,23 @@ gen_subprogram_die (tree decl, dw_die_re
}
if (mode == VOIDmode || mode == BLKmode)
continue;
- if (XEXP (XEXP (arg, 0), 0) == pc_rtx)
+ /* Get dynamic information about call target only if we
+ have no static information: we cannot generate both
+ DW_AT_abstract_origin and DW_AT_GNU_call_site_target
+ attributes. */
+ if (ca_loc->symbol_ref == NULL_RTX)
{
- gcc_assert (ca_loc->symbol_ref == NULL_RTX);
- tloc = XEXP (XEXP (arg, 0), 1);
- continue;
- }
- else if (GET_CODE (XEXP (XEXP (arg, 0), 0)) == CLOBBER
- && XEXP (XEXP (XEXP (arg, 0), 0), 0) == pc_rtx)
- {
- gcc_assert (ca_loc->symbol_ref == NULL_RTX);
- tlocc = XEXP (XEXP (arg, 0), 1);
- continue;
+ if (XEXP (XEXP (arg, 0), 0) == pc_rtx)
+ {
+ tloc = XEXP (XEXP (arg, 0), 1);
+ continue;
+ }
+ else if (GET_CODE (XEXP (XEXP (arg, 0), 0)) == CLOBBER
+ && XEXP (XEXP (XEXP (arg, 0), 0), 0) == pc_rtx)
+ {
+ tlocc = XEXP (XEXP (arg, 0), 1);
+ continue;
+ }
}
reg = NULL;
if (REG_P (XEXP (XEXP (arg, 0), 0)))
@@ -20842,15 +20847,27 @@ dwarf2out_var_location (rtx loc_note)
if (!CALL_P (prev))
prev = XVECEXP (PATTERN (prev), 0, 0);
ca_loc->tail_call_p = SIBLING_CALL_P (prev);
+
+ /* Look for a SYMBOL_REF in the "prev" instruction. */
x = get_call_rtx_from (PATTERN (prev));
if (x)
{
- x = XEXP (XEXP (x, 0), 0);
- if (GET_CODE (x) == SYMBOL_REF
- && SYMBOL_REF_DECL (x)
- && TREE_CODE (SYMBOL_REF_DECL (x)) == FUNCTION_DECL)
- ca_loc->symbol_ref = x;
+ /* Try to get the call symbol, if any. */
+ if (MEM_P (XEXP (x, 0)))
+ x = XEXP (x, 0);
+ /* First, look for a memory access to a symbol_ref. */
+ if (GET_CODE (XEXP (x, 0)) == SYMBOL_REF
+ && SYMBOL_REF_DECL (XEXP (x, 0))
+ && TREE_CODE (SYMBOL_REF_DECL (XEXP (x, 0))) == FUNCTION_DECL)
+ ca_loc->symbol_ref = XEXP (x, 0);
+ /* Otherwise, look at a compile-time known user-level function
+ declaration. */
+ else if (MEM_P (x)
+ && MEM_EXPR (x)
+ && TREE_CODE (MEM_EXPR (x)) == FUNCTION_DECL)
+ ca_loc->symbol_ref = XEXP (DECL_RTL (MEM_EXPR (x)), 0);
}
+
ca_loc->block = insn_scope (prev);
if (call_arg_locations)
call_arg_loc_last->next = ca_loc;

View File

@ -0,0 +1,24 @@
2016-05-26 Martin Sebor <msebor@redhat.com>
Jakub Jelinek <jakub@redhat.com>
* asan/asan_rtl.cc (InitializeHighMemEnd): Backport part of upstream
r221457 fix and typo fix from r206158.
--- libsanitizer/asan/asan_rtl.cc.jj 2013-03-04 12:44:18.000000000 +0100
+++ libsanitizer/asan/asan_rtl.cc 2016-05-26 09:57:10.761973999 +0200
@@ -308,11 +308,13 @@ static void InitializeHighMemEnd() {
# if defined(__powerpc64__)
// FIXME:
// On PowerPC64 we have two different address space layouts: 44- and 46-bit.
- // We somehow need to figure our which one we are using now and choose
+ // We somehow need to figure out which one we are using now and choose
// one of 0x00000fffffffffffUL and 0x00003fffffffffffUL.
// Note that with 'ulimit -s unlimited' the stack is moved away from the top
// of the address space, so simply checking the stack address is not enough.
- kHighMemEnd = (1ULL << 44) - 1; // 0x00000fffffffffffUL
+ // This should (does) work for both PowerPC64 Endian modes.
+ kHighMemEnd =
+ (1ULL << (MostSignificantSetBitIndex(GET_CURRENT_FRAME()) + 1)) - 1;
# else
kHighMemEnd = (1ULL << 47) - 1; // 0x00007fffffffffffUL;
# endif

View File

@ -0,0 +1,37 @@
2016-06-14 Jason Merrill <jason@redhat.com>
* call.c (add_function_candidate): Be more careful about
ref-to-ptr conversion.
2016-06-15 Jakub Jelinek <jakub@redhat.com>
* g++.dg/cpp0x/ref-qual17.C: New test.
--- gcc/cp/call.c.jj 2014-08-06 10:45:03.260163142 +0200
+++ gcc/cp/call.c 2016-06-15 11:15:06.663878423 +0200
@@ -1975,7 +1975,9 @@ add_function_candidate (struct z_candida
bool rv = FUNCTION_RVALUE_QUALIFIED (TREE_TYPE (fn));
parmtype = cp_build_reference_type (parmtype, rv);
if (TREE_CODE (arg) == CONVERT_EXPR
- && TYPE_PTR_P (TREE_TYPE (arg)))
+ && TYPE_PTR_P (TREE_TYPE (arg))
+ && (TREE_CODE (TREE_TYPE (TREE_OPERAND (arg, 0)))
+ == REFERENCE_TYPE))
/* Strip conversion from reference to pointer. */
arg = TREE_OPERAND (arg, 0);
arg = build_fold_indirect_ref (arg);
--- gcc/testsuite/g++.dg/cpp0x/ref-qual17.C.jj 2016-06-15 11:12:57.692558903 +0200
+++ gcc/testsuite/g++.dg/cpp0x/ref-qual17.C 2016-06-15 11:07:02.000000000 +0200
@@ -0,0 +1,12 @@
+// { dg-do compile { target c++11 } }
+
+struct A
+{
+ void foo () &;
+};
+
+void
+bar (__UINTPTR_TYPE__ a)
+{
+ reinterpret_cast<A *>(a)->foo ();
+}

View File

@ -0,0 +1,52 @@
2017-03-07 Jakub Jelinek <jakub@redhat.com>
Partial backport
2016-05-07 Fritz Reese <fritzoreese@gmail.com>
PR fortran/56226
* interface.c (gfc_compare_derived_types): Don't ICE if the
derived type or both types have no components.
* gfortran.dg/rh1369183.f90: New test.
--- gcc/fortran/interface.c.jj 2015-06-18 16:32:45.000000000 +0200
+++ gcc/fortran/interface.c 2017-03-07 18:35:38.982302826 +0100
@@ -418,6 +418,13 @@ gfc_compare_derived_types (gfc_symbol *d
&& !(derived1->attr.is_bind_c && derived2->attr.is_bind_c))
return 0;
+ /* Protect against null components. */
+ if (derived1->attr.zero_comp != derived2->attr.zero_comp)
+ return 0;
+
+ if (derived1->attr.zero_comp)
+ return 1;
+
dt1 = derived1->components;
dt2 = derived2->components;
--- gcc/testsuite/gfortran.dg/rh1369183.f90.jj 2017-03-07 18:37:39.574775432 +0100
+++ gcc/testsuite/gfortran.dg/rh1369183.f90 2017-03-07 18:38:38.423993194 +0100
@@ -0,0 +1,22 @@
+! { dg-do compile }
+
+module mod1369183
+ implicit none
+ contains
+ subroutine sub(test)
+ type test_t
+ sequence
+ integer(4) type
+ end type test_t
+ type(test_t),intent(inout) :: test
+ end subroutine sub
+end module mod1369183
+subroutine rh1369183
+ use mod1369183
+ implicit none
+ type test_t
+ sequence
+ end type test_t
+ type(test_t) :: tst
+ call sub(tst) ! { dg-error "Type mismatch in argument" }
+end subroutine rh1369183

View File

@ -0,0 +1,32 @@
--- gcc/reload.c (revision 235552)
+++ gcc/reload.c (working copy)
@@ -4054,14 +4054,14 @@ find_reloads (rtx insn, int replace, int
&XEXP (recog_data.operand[i], 0), (rtx*) 0,
base_reg_class (VOIDmode, as, MEM, SCRATCH),
address_mode,
- VOIDmode, 0, 0, i, RELOAD_FOR_INPUT);
+ VOIDmode, 0, 0, i, RELOAD_OTHER);
rld[operand_reloadnum[i]].inc
= GET_MODE_SIZE (GET_MODE (recog_data.operand[i]));
/* If this operand is an output, we will have made any
reloads for its address as RELOAD_FOR_OUTPUT_ADDRESS, but
now we are treating part of the operand as an input, so
- we must change these to RELOAD_FOR_INPUT_ADDRESS. */
+ we must change these to RELOAD_FOR_OTHER_ADDRESS. */
if (modified[i] == RELOAD_WRITE)
{
@@ -4070,10 +4070,10 @@ find_reloads (rtx insn, int replace, int
if (rld[j].opnum == i)
{
if (rld[j].when_needed == RELOAD_FOR_OUTPUT_ADDRESS)
- rld[j].when_needed = RELOAD_FOR_INPUT_ADDRESS;
+ rld[j].when_needed = RELOAD_FOR_OTHER_ADDRESS;
else if (rld[j].when_needed
== RELOAD_FOR_OUTADDR_ADDRESS)
- rld[j].when_needed = RELOAD_FOR_INPADDR_ADDRESS;
+ rld[j].when_needed = RELOAD_FOR_OTHER_ADDRESS;
}
}
}

View File

@ -0,0 +1,16 @@
2014-01-15 Pat Haugen <pthaugen@us.ibm.com>
* config/rs6000/rs6000.c (rs6000_output_function_prologue): Check if
current procedure should be profiled.
--- gcc/config/rs6000/rs6000.c
+++ gcc/config/rs6000/rs6000.c
@@ -23198,7 +23198,7 @@ rs6000_output_function_prologue (FILE *file,
/* Output -mprofile-kernel code. This needs to be done here instead of
in output_function_profile since it must go after the ELFv2 ABI
local entry point. */
- if (TARGET_PROFILE_KERNEL)
+ if (TARGET_PROFILE_KERNEL && crtl->profile)
{
gcc_assert (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2);
gcc_assert (!TARGET_32BIT);

View File

@ -0,0 +1,163 @@
2014-05-05 Andreas Krebbel <Andreas.Krebbel@de.ibm.com>
* target.def: Add new target hook.
* doc/tm.texi: Regenerate.
* targhooks.h (default_keep_leaf_when_profiled): Add prototype.
* targhooks.c (default_keep_leaf_when_profiled): New function.
2015-04-23 Anton Blanchard <anton@samba.org>
* config/rs6000/rs6000.c (rs6000_output_function_prologue): No
need for -mprofile-kernel to save LR to stack.
2016-01-21 Anton Blanchard <anton@samba.org>
Bill Schmidt <wschmidt@linux.vnet.ibm.com>
PR target/63354
* config/rs6000/linux64.h (TARGET_KEEP_LEAF_WHEN_PROFILED): New
#define.
* config/rs6000/rs6000.c (rs6000_keep_leaf_when_profiled): New
function.
* gcc.target/powerpc/pr63354.c: New test.
--- gcc/doc/tm.texi
+++ gcc/doc/tm.texi
@@ -4953,6 +4953,10 @@ Define this macro if the code for function profiling should come before
the function prologue. Normally, the profiling code comes after.
@end defmac
+@deftypefn {Target Hook} bool TARGET_KEEP_LEAF_WHEN_PROFILED (void)
+This target hook returns true if the target wants the leaf flag for the current function to stay true even if it calls mcount. This might make sense for targets using the leaf flag only to determine whether a stack frame needs to be generated or not and for which the call to mcount is generated before the function prologue.
+@end deftypefn
+
@node Tail Calls
@subsection Permitting tail calls
@cindex tail calls
--- gcc/doc/tm.texi.in
+++ gcc/doc/tm.texi.in
@@ -3963,6 +3963,8 @@ Define this macro if the code for function profiling should come before
the function prologue. Normally, the profiling code comes after.
@end defmac
+@hook TARGET_KEEP_LEAF_WHEN_PROFILED
+
@node Tail Calls
@subsection Permitting tail calls
@cindex tail calls
--- gcc/final.c
+++ gcc/final.c
@@ -4241,7 +4241,9 @@ leaf_function_p (void)
{
rtx insn;
- if (crtl->profile || profile_arc_flag)
+ /* Some back-ends (e.g. s390) want leaf functions to stay leaf
+ functions even if they call mcount. */
+ if (crtl->profile && !targetm.keep_leaf_when_profiled ())
return 0;
for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
--- gcc/target.def
+++ gcc/target.def
@@ -2658,6 +2658,18 @@ The default version of this hook use the target macro\n\
bool, (void),
default_profile_before_prologue)
+/* Return true if a leaf function should stay leaf even with profiling
+ enabled. */
+DEFHOOK
+(keep_leaf_when_profiled,
+ "This target hook returns true if the target wants the leaf flag for\
+ the current function to stay true even if it calls mcount. This might\
+ make sense for targets using the leaf flag only to determine whether a\
+ stack frame needs to be generated or not and for which the call to\
+ mcount is generated before the function prologue.",
+ bool, (void),
+ default_keep_leaf_when_profiled)
+
/* Modify and return the identifier of a DECL's external name,
originally identified by ID, as required by the target,
(eg, append @nn to windows32 stdcall function names).
--- gcc/targhooks.c
+++ gcc/targhooks.c
@@ -1447,6 +1447,15 @@ default_get_reg_raw_mode (int regno)
return reg_raw_mode[regno];
}
+/* Return true if a leaf function should stay leaf even with profiling
+ enabled. */
+
+bool
+default_keep_leaf_when_profiled ()
+{
+ return false;
+}
+
/* Return true if the state of option OPTION should be stored in PCH files
and checked by default_pch_valid_p. Store the option's current state
in STATE if so. */
--- gcc/targhooks.h
+++ gcc/targhooks.h
@@ -188,6 +188,7 @@ extern section * default_function_sectio
bool startup, bool exit);
extern enum machine_mode default_dwarf_frame_reg_mode (int);
extern enum machine_mode default_get_reg_raw_mode(int);
+extern bool default_keep_leaf_when_profiled ();
extern void *default_get_pch_validity (size_t *);
extern const char *default_pch_valid_p (const void *, size_t);
--- gcc/config/rs6000/rs6000.c
+++ gcc/config/rs6000/rs6000.c
@@ -24433,7 +24433,6 @@ rs6000_output_function_prologue (FILE *file,
gcc_assert (!TARGET_32BIT);
asm_fprintf (file, "\tmflr %s\n", reg_names[0]);
- asm_fprintf (file, "\tstd %s,16(%s)\n", reg_names[0], reg_names[1]);
/* In the ELFv2 ABI we have no compiler stack word. It must be
the resposibility of _mcount to preserve the static chain
--- gcc/config/rs6000/linux64.h
+++ gcc/config/rs6000/linux64.h
@@ -59,6 +59,9 @@ extern int dot_symbols;
#define TARGET_PROFILE_KERNEL profile_kernel
+#undef TARGET_KEEP_LEAF_WHEN_PROFILED
+#define TARGET_KEEP_LEAF_WHEN_PROFILED rs6000_keep_leaf_when_profiled
+
#define TARGET_USES_LINUX64_OPT 1
#ifdef HAVE_LD_LARGE_TOC
#undef TARGET_CMODEL
--- gcc/config/rs6000/rs6000.c
+++ gcc/config/rs6000/rs6000.c
@@ -26237,6 +26237,14 @@ rs6000_output_function_prologue (FILE *file,
rs6000_pic_labelno++;
}
+/* -mprofile-kernel code calls mcount before the function prolog,
+ so a profiled leaf function should stay a leaf function. */
+static bool
+rs6000_keep_leaf_when_profiled ()
+{
+ return TARGET_PROFILE_KERNEL;
+}
+
/* Non-zero if vmx regs are restored before the frame pop, zero if
we restore after the pop when possible. */
#define ALWAYS_RESTORE_ALTIVEC_BEFORE_POP 0
--- /dev/null
+++ gcc/testsuite/gcc.target/powerpc/pr63354.c
@@ -0,0 +1,12 @@
+/* Verify that we don't stack a frame for leaf functions when using
+ -pg -mprofile-kernel. */
+
+/* { dg-do compile { target { powerpc64*-*-* } } } */
+/* { dg-options "-O2 -pg -mprofile-kernel" } */
+/* { dg-require-effective-target lp64 } */
+/* { dg-final { scan-assembler-not "mtlr" } } */
+
+int foo(void)
+{
+ return 1;
+}

View File

@ -0,0 +1,223 @@
2017-07-25 Jonathan Wakely <jwakely@redhat.com>
PR libstdc++/53984
* include/bits/basic_ios.h (basic_ios::_M_setstate): Adjust comment.
* include/bits/istream.tcc (basic_istream::sentry): Handle exceptions
during construction.
* include/std/istream: Adjust comments for formatted input functions
and unformatted input functions.
* testsuite/27_io/basic_fstream/53984.cc: New.
* testsuite/27_io/basic_istream/sentry/char/53984.cc: New.
--- libstdc++-v3/include/bits/basic_ios.h
+++ libstdc++-v3/include/bits/basic_ios.h
@@ -157,8 +157,8 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
setstate(iostate __state)
{ this->clear(this->rdstate() | __state); }
- // Flip the internal state on for the proper state bits, then re
- // throws the propagated exception if bit also set in
+ // Flip the internal state on for the proper state bits, then
+ // rethrows the propagated exception if bit also set in
// exceptions().
void
_M_setstate(iostate __state)
--- libstdc++-v3/include/bits/istream.tcc
+++ libstdc++-v3/include/bits/istream.tcc
@@ -48,28 +48,36 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
{
ios_base::iostate __err = ios_base::goodbit;
if (__in.good())
- {
- if (__in.tie())
- __in.tie()->flush();
- if (!__noskip && bool(__in.flags() & ios_base::skipws))
- {
- const __int_type __eof = traits_type::eof();
- __streambuf_type* __sb = __in.rdbuf();
- __int_type __c = __sb->sgetc();
-
- const __ctype_type& __ct = __check_facet(__in._M_ctype);
- while (!traits_type::eq_int_type(__c, __eof)
- && __ct.is(ctype_base::space,
- traits_type::to_char_type(__c)))
- __c = __sb->snextc();
+ __try
+ {
+ if (__in.tie())
+ __in.tie()->flush();
+ if (!__noskip && bool(__in.flags() & ios_base::skipws))
+ {
+ const __int_type __eof = traits_type::eof();
+ __streambuf_type* __sb = __in.rdbuf();
+ __int_type __c = __sb->sgetc();
+
+ const __ctype_type& __ct = __check_facet(__in._M_ctype);
+ while (!traits_type::eq_int_type(__c, __eof)
+ && __ct.is(ctype_base::space,
+ traits_type::to_char_type(__c)))
+ __c = __sb->snextc();
- // _GLIBCXX_RESOLVE_LIB_DEFECTS
- // 195. Should basic_istream::sentry's constructor ever
- // set eofbit?
- if (traits_type::eq_int_type(__c, __eof))
- __err |= ios_base::eofbit;
- }
- }
+ // _GLIBCXX_RESOLVE_LIB_DEFECTS
+ // 195. Should basic_istream::sentry's constructor ever
+ // set eofbit?
+ if (traits_type::eq_int_type(__c, __eof))
+ __err |= ios_base::eofbit;
+ }
+ }
+ __catch(__cxxabiv1::__forced_unwind&)
+ {
+ __in._M_setstate(ios_base::badbit);
+ __throw_exception_again;
+ }
+ __catch(...)
+ { __in._M_setstate(ios_base::badbit); }
if (__in.good() && __err == ios_base::goodbit)
_M_ok = true;
--- libstdc++-v3/include/std/istream
+++ libstdc++-v3/include/std/istream
@@ -150,9 +150,9 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
* whatever data is appropriate for the type of the argument.
*
* If an exception is thrown during extraction, ios_base::badbit
- * will be turned on in the stream's error state without causing an
- * ios_base::failure to be thrown. The original exception will then
- * be rethrown.
+ * will be turned on in the stream's error state (without causing an
+ * ios_base::failure to be thrown) and the original exception will
+ * be rethrown if badbit is set in the exceptions mask.
*/
//@{
@@ -286,9 +286,9 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
* by gcount().
*
* If an exception is thrown during extraction, ios_base::badbit
- * will be turned on in the stream's error state without causing an
- * ios_base::failure to be thrown. The original exception will then
- * be rethrown.
+ * will be turned on in the stream's error state (without causing an
+ * ios_base::failure to be thrown) and the original exception will
+ * be rethrown if badbit is set in the exceptions mask.
*/
/**
--- /dev/null
+++ libstdc++-v3/testsuite/27_io/basic_fstream/53984.cc
@@ -0,0 +1,64 @@
+// Copyright (C) 2017 Free Software Foundation, Inc.
+//
+// This file is part of the GNU ISO C++ Library. This library is free
+// software; you can redistribute it and/or modify it under the
+// terms of the GNU General Public License as published by the
+// Free Software Foundation; either version 3, or (at your option)
+// any later version.
+
+// This library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License along
+// with this library; see the file COPYING3. If not see
+// <http://www.gnu.org/licenses/>.
+
+// { dg-require-fileio "" }
+
+// PR libstdc++/53984
+
+#include <fstream>
+#include <testsuite_hooks.h>
+
+void
+test01()
+{
+ std::ifstream in(".");
+ if (in)
+ {
+ char c;
+ if (in.get(c))
+ {
+ // Reading a directory doesn't produce an error on this target
+ // so the formatted input functions below wouldn't fail anyway
+ // (see PR libstdc++/81808).
+ return;
+ }
+ int x;
+ in.clear();
+ // Formatted input function should set badbit, but not throw:
+ in >> x;
+ VERIFY( in.bad() );
+
+ in.clear();
+ in.exceptions(std::ios::badbit);
+ try
+ {
+ // Formatted input function should set badbit, and throw:
+ in >> x;
+ VERIFY( false );
+ }
+ catch (const std::exception&)
+ {
+ VERIFY( in.bad() );
+ }
+ }
+}
+
+int
+main()
+{
+ test01();
+}
--- /dev/null
+++ libstdc++-v3/testsuite/27_io/basic_istream/sentry/char/53984.cc
@@ -0,0 +1,41 @@
+// Copyright (C) 2017 Free Software Foundation, Inc.
+//
+// This file is part of the GNU ISO C++ Library. This library is free
+// software; you can redistribute it and/or modify it under the
+// terms of the GNU General Public License as published by the
+// Free Software Foundation; either version 3, or (at your option)
+// any later version.
+
+// This library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License along
+// with this library; see the file COPYING3. If not see
+// <http://www.gnu.org/licenses/>.
+
+#include <streambuf>
+#include <istream>
+#include <testsuite_hooks.h>
+
+struct SB : std::streambuf
+{
+ virtual int_type underflow() { throw 1; }
+};
+
+void
+test01()
+{
+ SB sb;
+ std::istream is(&sb);
+ int i;
+ is >> i;
+ VERIFY( is.bad() );
+}
+
+int
+main()
+{
+ test01();
+}

View File

@ -0,0 +1,60 @@
2015-10-02 Uros Bizjak <ubizjak@gmail.com>
* system.h (ROUND_UP): New macro definition.
(ROUND_DOWN): Ditto.
* ggc-page.c (ROUND_UP): Remove local macro definition.
(PAGE_ALIGN): Implement using ROUND_UP macro.
2013-08-24 Marc Glisse <marc.glisse@inria.fr>
PR other/57324
* hwint.h (HOST_WIDE_INT_UC, HOST_WIDE_INT_1U, HOST_WIDE_INT_M1,
HOST_WIDE_INT_M1U): New macros.
diff --git a/gcc/ggc-page.c b/gcc/ggc-page.c
index 5b18468439d..4fb41b1112b 100644
--- a/gcc/ggc-page.c
+++ b/gcc/ggc-page.c
@@ -216,10 +216,6 @@ static const size_t extra_order_size_table[] = {
#define ROUND_UP_VALUE(x, f) ((f) - 1 - ((f) - 1 + (x)) % (f))
-/* Compute the smallest multiple of F that is >= X. */
-
-#define ROUND_UP(x, f) (CEIL (x, f) * (f))
-
/* Round X to next multiple of the page size */
#define PAGE_ALIGN(x) (((x) + G.pagesize - 1) & ~(G.pagesize - 1))
diff --git a/gcc/hwint.h b/gcc/hwint.h
index da62fadcc9e..64b1805345d 100644
--- a/gcc/hwint.h
+++ b/gcc/hwint.h
@@ -76,7 +76,9 @@ extern char sizeof_long_long_must_be_8[sizeof(long long) == 8 ? 1 : -1];
# endif
#endif
+#define HOST_WIDE_INT_UC(X) HOST_WIDE_INT_C (X ## U)
#define HOST_WIDE_INT_1 HOST_WIDE_INT_C(1)
+#define HOST_WIDE_INT_1U HOST_WIDE_INT_UC(1)
/* This is a magic identifier which allows GCC to figure out the type
of HOST_WIDE_INT for %wd specifier checks. You must issue this
diff --git a/gcc/system.h b/gcc/system.h
index 41cd565538a..8230d506fc3 100644
--- a/gcc/system.h
+++ b/gcc/system.h
@@ -348,6 +348,12 @@ extern int errno;
/* Returns the least number N such that N * Y >= X. */
#define CEIL(x,y) (((x) + (y) - 1) / (y))
+/* This macro rounds x up to the y boundary. */
+#define ROUND_UP(x,y) (((x) + (y) - 1) & ~((y) - 1))
+
+/* This macro rounds x down to the y boundary. */
+#define ROUND_DOWN(x,y) ((x) & ~((y) - 1))
+
#ifdef HAVE_SYS_WAIT_H
#include <sys/wait.h>
#endif

View File

@ -0,0 +1,52 @@
commit c22c3dee4bbf4a99b234307c63e4845052a15890
Author: law <law@138bc75d-0d04-0410-961f-82ee72b054a4>
Date: Thu Sep 21 22:03:59 2017 +0000
* config/i386/i386.c (ix86_adjust_stack_and_probe_stack_clash):
Fix dump output if the only stack space is for pushed registers.
* lib/target-supports.exp
(check_effective_target_frame_pointer_for_non_leaf): Add
case for x86 Solaris.
git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@253082 138bc75d-0d04-0410-961f-82ee72b054a4
diff --git a/gcc/config/i386/i386.c b/gcc/config/i386/i386.c
index a9072f58f50..d8a225195ae 100644
--- a/gcc/config/i386/i386.c
+++ b/gcc/config/i386/i386.c
@@ -9856,7 +9856,16 @@ ix86_adjust_stack_and_probe_stack_clash (const HOST_WIDE_INT size)
no probes are needed. */
if (!size)
{
- dump_stack_clash_frame_info (NO_PROBE_NO_FRAME, false);
+ struct ix86_frame frame;
+ ix86_compute_frame_layout (&frame);
+
+ /* However, the allocation of space via pushes for register
+ saves could be viewed as allocating space, but without the
+ need to probe. */
+ if (frame.nregs || frame.nsseregs || frame_pointer_needed)
+ dump_stack_clash_frame_info (NO_PROBE_SMALL_FRAME, true);
+ else
+ dump_stack_clash_frame_info (NO_PROBE_NO_FRAME, false);
return;
}
diff --git a/gcc/testsuite/lib/target-supports.exp b/gcc/testsuite/lib/target-supports.exp
index f24c5c6e0ac..7c126e4122b 100644
--- a/gcc/testsuite/lib/target-supports.exp
+++ b/gcc/testsuite/lib/target-supports.exp
@@ -5439,6 +5439,12 @@ proc check_effective_target_frame_pointer_for_non_leaf { } {
if { [istarget aarch*-*-*] } {
return 1
}
+
+ # Solaris/x86 defaults to -fno-omit-frame-pointer.
+ if { [istarget i?86-*-solaris*] || [istarget x86_64-*-solaris*] } {
+ return 1
+ }
+
return 0
}

View File

@ -0,0 +1,573 @@
commit 27d2a2d27f3e0060ade9a1a82ce2292aad6c6931
Author: law <law@138bc75d-0d04-0410-961f-82ee72b054a4>
Date: Mon Sep 25 23:13:55 2017 +0000
* config/rs6000/rs6000-protos.h (output_probe_stack_range): Update
prototype for new argument.
* config/rs6000/rs6000.c (rs6000_emit_allocate_stack_1): New function,
mostly extracted from rs6000_emit_allocate_stack.
(rs6000_emit_probe_stack_range_stack_clash): New function.
(rs6000_emit_allocate_stack): Call
rs6000_emit_probe_stack_range_stack_clash as needed.
(rs6000_emit_probe_stack_range): Add additional argument
to call to gen_probe_stack_range{si,di}.
(output_probe_stack_range): New.
(output_probe_stack_range_1): Renamed from output_probe_stack_range.
(output_probe_stack_range_stack_clash): New.
(rs6000_emit_prologue): Emit notes into dump file as requested.
* rs6000.md (allocate_stack): Handle -fstack-clash-protection.
(probe_stack_range<P:mode>): Operand 0 is now early-clobbered.
Add additional operand and pass it to output_probe_stack_range.
* lib/target-supports.exp
(check_effective_target_supports_stack_clash_protection): Enable for
rs6000 and powerpc targets.
git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@253179 138bc75d-0d04-0410-961f-82ee72b054a4
diff --git a/gcc/config/rs6000/rs6000-protos.h b/gcc/config/rs6000/rs6000-protos.h
index d4b93d9970d..cfb23ab80cc 100644
--- a/gcc/config/rs6000/rs6000-protos.h
+++ b/gcc/config/rs6000/rs6000-protos.h
@@ -114,7 +114,7 @@ extern void rs6000_emit_sCOND (enum machine_mode, rtx[]);
extern void rs6000_emit_cbranch (enum machine_mode, rtx[]);
extern char * output_cbranch (rtx, const char *, int, rtx);
extern char * output_e500_flip_gt_bit (rtx, rtx);
-extern const char * output_probe_stack_range (rtx, rtx);
+extern const char * output_probe_stack_range (rtx, rtx, rtx);
extern rtx rs6000_emit_set_const (rtx, enum machine_mode, rtx, int);
extern int rs6000_emit_cmove (rtx, rtx, rtx, rtx);
extern int rs6000_emit_vector_cond_expr (rtx, rtx, rtx, rtx, rtx, rtx);
diff --git a/gcc/config/rs6000/rs6000.c b/gcc/config/rs6000/rs6000.c
index a9052c6becf..c5d9988c1d9 100644
--- a/gcc/config/rs6000/rs6000.c
+++ b/gcc/config/rs6000/rs6000.c
@@ -22320,6 +22320,220 @@ rs6000_emit_stack_tie (rtx fp, bool hard_frame_needed)
emit_insn (gen_stack_tie (gen_rtx_PARALLEL (VOIDmode, p)));
}
+/* Allocate SIZE_INT bytes on the stack using a store with update style insn
+ and set the appropriate attributes for the generated insn. Return the
+ first insn which adjusts the stack pointer or the last insn before
+ the stack adjustment loop.
+
+ SIZE_INT is used to create the CFI note for the allocation.
+
+ SIZE_RTX is an rtx containing the size of the adjustment. Note that
+ since stacks grow to lower addresses its runtime value is -SIZE_INT.
+
+ ORIG_SP contains the backchain value that must be stored at *sp. */
+
+static rtx
+rs6000_emit_allocate_stack_1 (HOST_WIDE_INT size_int, rtx orig_sp)
+{
+ rtx insn;
+
+ rtx size_rtx = GEN_INT (-size_int);
+ if (size_int > 32767)
+ {
+ rtx tmp_reg = gen_rtx_REG (Pmode, 0);
+ /* Need a note here so that try_split doesn't get confused. */
+ if (get_last_insn () == NULL_RTX)
+ emit_note (NOTE_INSN_DELETED);
+ insn = emit_move_insn (tmp_reg, size_rtx);
+ try_split (PATTERN (insn), insn, 0);
+ size_rtx = tmp_reg;
+ }
+
+ if (Pmode == SImode)
+ insn = emit_insn (gen_movsi_update_stack (stack_pointer_rtx,
+ stack_pointer_rtx,
+ size_rtx,
+ orig_sp));
+ else
+ insn = emit_insn (gen_movdi_di_update_stack (stack_pointer_rtx,
+ stack_pointer_rtx,
+ size_rtx,
+ orig_sp));
+ rtx par = PATTERN (insn);
+ gcc_assert (GET_CODE (par) == PARALLEL);
+ rtx set = XVECEXP (par, 0, 0);
+ gcc_assert (GET_CODE (set) == SET);
+ rtx mem = SET_DEST (set);
+ gcc_assert (MEM_P (mem));
+ MEM_NOTRAP_P (mem) = 1;
+ set_mem_alias_set (mem, get_frame_alias_set ());
+
+ RTX_FRAME_RELATED_P (insn) = 1;
+ add_reg_note (insn, REG_FRAME_RELATED_EXPR,
+ gen_rtx_SET (VOIDmode, stack_pointer_rtx,
+ gen_rtx_PLUS (Pmode,
+ stack_pointer_rtx,
+ GEN_INT (-size_int))));
+
+ /* Emit a blockage to ensure the allocation/probing insns are
+ not optimized, combined, removed, etc. Add REG_STACK_CHECK
+ note for similar reasons. */
+ if (flag_stack_clash_protection)
+ {
+ add_reg_note (insn, REG_STACK_CHECK, const0_rtx);
+ emit_insn (gen_blockage ());
+ }
+
+ return insn;
+}
+
+static HOST_WIDE_INT
+get_stack_clash_protection_probe_interval (void)
+{
+ return (HOST_WIDE_INT_1U
+ << PARAM_VALUE (PARAM_STACK_CLASH_PROTECTION_PROBE_INTERVAL));
+}
+
+static HOST_WIDE_INT
+get_stack_clash_protection_guard_size (void)
+{
+ return (HOST_WIDE_INT_1U
+ << PARAM_VALUE (PARAM_STACK_CLASH_PROTECTION_GUARD_SIZE));
+}
+
+/* Allocate ORIG_SIZE bytes on the stack and probe the newly
+ allocated space every STACK_CLASH_PROTECTION_PROBE_INTERVAL bytes.
+
+ COPY_REG, if non-null, should contain a copy of the original
+ stack pointer at exit from this function.
+
+ This is subtly different than the Ada probing in that it tries hard to
+ prevent attacks that jump the stack guard. Thus it is never allowed to
+ allocate more than STACK_CLASH_PROTECTION_PROBE_INTERVAL bytes of stack
+ space without a suitable probe. */
+static rtx
+rs6000_emit_probe_stack_range_stack_clash (HOST_WIDE_INT orig_size,
+ rtx copy_reg)
+{
+ rtx orig_sp = copy_reg;
+
+ HOST_WIDE_INT probe_interval = get_stack_clash_protection_probe_interval ();
+
+ /* Round the size down to a multiple of PROBE_INTERVAL. */
+ HOST_WIDE_INT rounded_size = ROUND_DOWN (orig_size, probe_interval);
+
+ /* If explicitly requested,
+ or the rounded size is not the same as the original size
+ or the the rounded size is greater than a page,
+ then we will need a copy of the original stack pointer. */
+ if (rounded_size != orig_size
+ || rounded_size > probe_interval
+ || copy_reg)
+ {
+ /* If the caller did not request a copy of the incoming stack
+ pointer, then we use r0 to hold the copy. */
+ if (!copy_reg)
+ orig_sp = gen_rtx_REG (Pmode, 0);
+ emit_move_insn (orig_sp, stack_pointer_rtx);
+ }
+
+ /* There's three cases here.
+
+ One is a single probe which is the most common and most efficiently
+ implemented as it does not have to have a copy of the original
+ stack pointer if there are no residuals.
+
+ Second is unrolled allocation/probes which we use if there's just
+ a few of them. It needs to save the original stack pointer into a
+ temporary for use as a source register in the allocation/probe.
+
+ Last is a loop. This is the most uncommon case and least efficient. */
+ rtx retval = NULL;
+ if (rounded_size == probe_interval)
+ {
+ retval = rs6000_emit_allocate_stack_1 (probe_interval, stack_pointer_rtx);
+
+ dump_stack_clash_frame_info (PROBE_INLINE, rounded_size != orig_size);
+ }
+ else if (rounded_size <= 8 * probe_interval)
+ {
+ /* The ABI requires using the store with update insns to allocate
+ space and store the backchain into the stack
+
+ So we save the current stack pointer into a temporary, then
+ emit the store-with-update insns to store the saved stack pointer
+ into the right location in each new page. */
+ for (int i = 0; i < rounded_size; i += probe_interval)
+ {
+ rtx insn = rs6000_emit_allocate_stack_1 (probe_interval, orig_sp);
+
+ /* Save the first stack adjustment in RETVAL. */
+ if (i == 0)
+ retval = insn;
+ }
+
+ dump_stack_clash_frame_info (PROBE_INLINE, rounded_size != orig_size);
+ }
+ else
+ {
+ /* Compute the ending address. */
+ rtx end_addr
+ = copy_reg ? gen_rtx_REG (Pmode, 0) : gen_rtx_REG (Pmode, 12);
+ rtx rs = GEN_INT (-rounded_size);
+ rtx insn;
+ if (add_operand (rs, Pmode))
+ insn = emit_insn (gen_add3_insn (end_addr, stack_pointer_rtx, rs));
+ else
+ {
+ emit_move_insn (end_addr, GEN_INT (-rounded_size));
+ insn = emit_insn (gen_add3_insn (end_addr, end_addr,
+ stack_pointer_rtx));
+ /* Describe the effect of INSN to the CFI engine. */
+ add_reg_note (insn, REG_FRAME_RELATED_EXPR,
+ gen_rtx_SET (VOIDmode, end_addr,
+ gen_rtx_PLUS (Pmode, stack_pointer_rtx,
+ rs)));
+ }
+ RTX_FRAME_RELATED_P (insn) = 1;
+
+ /* Emit the loop. */
+ if (TARGET_64BIT)
+ retval = emit_insn (gen_probe_stack_rangedi (stack_pointer_rtx,
+ stack_pointer_rtx, orig_sp,
+ end_addr));
+ else
+ retval = emit_insn (gen_probe_stack_rangesi (stack_pointer_rtx,
+ stack_pointer_rtx, orig_sp,
+ end_addr));
+ RTX_FRAME_RELATED_P (retval) = 1;
+ /* Describe the effect of INSN to the CFI engine. */
+ add_reg_note (retval, REG_FRAME_RELATED_EXPR,
+ gen_rtx_SET (VOIDmode, stack_pointer_rtx, end_addr));
+
+ /* Emit a blockage to ensure the allocation/probing insns are
+ not optimized, combined, removed, etc. Other cases handle this
+ within their call to rs6000_emit_allocate_stack_1. */
+ emit_insn (gen_blockage ());
+
+ dump_stack_clash_frame_info (PROBE_LOOP, rounded_size != orig_size);
+ }
+
+ if (orig_size != rounded_size)
+ {
+ /* Allocate (and implicitly probe) any residual space. */
+ HOST_WIDE_INT residual = orig_size - rounded_size;
+
+ rtx insn = rs6000_emit_allocate_stack_1 (residual, orig_sp);
+
+ /* If the residual was the only allocation, then we can return the
+ allocating insn. */
+ if (!retval)
+ retval = insn;
+ }
+
+ return retval;
+}
+
/* Emit the correct code for allocating stack space, as insns.
If COPY_REG, make sure a copy of the old frame is left there.
The generated code may use hard register 0 as a temporary. */
@@ -22331,7 +22545,6 @@ rs6000_emit_allocate_stack (HOST_WIDE_INT size, rtx copy_reg, int copy_off)
rtx stack_reg = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
rtx tmp_reg = gen_rtx_REG (Pmode, 0);
rtx todec = gen_int_mode (-size, Pmode);
- rtx par, set, mem;
if (INTVAL (todec) != -size)
{
@@ -22368,6 +22581,22 @@ rs6000_emit_allocate_stack (HOST_WIDE_INT size, rtx copy_reg, int copy_off)
warning (0, "stack limit expression is not supported");
}
+ if (flag_stack_clash_protection)
+ {
+ if (size < get_stack_clash_protection_guard_size ())
+ dump_stack_clash_frame_info (NO_PROBE_SMALL_FRAME, true);
+ else
+ {
+ rtx insn = rs6000_emit_probe_stack_range_stack_clash (size, copy_reg);
+
+ /* If we asked for a copy with an offset, then we still need add in
+ the offset. */
+ if (copy_reg && copy_off)
+ emit_insn (gen_add3_insn (copy_reg, copy_reg, GEN_INT (copy_off)));
+ return;
+ }
+ }
+
if (copy_reg)
{
if (copy_off != 0)
@@ -22376,39 +22605,12 @@ rs6000_emit_allocate_stack (HOST_WIDE_INT size, rtx copy_reg, int copy_off)
emit_move_insn (copy_reg, stack_reg);
}
- if (size > 32767)
- {
- /* Need a note here so that try_split doesn't get confused. */
- if (get_last_insn () == NULL_RTX)
- emit_note (NOTE_INSN_DELETED);
- insn = emit_move_insn (tmp_reg, todec);
- try_split (PATTERN (insn), insn, 0);
- todec = tmp_reg;
- }
-
- insn = emit_insn (TARGET_32BIT
- ? gen_movsi_update_stack (stack_reg, stack_reg,
- todec, stack_reg)
- : gen_movdi_di_update_stack (stack_reg, stack_reg,
- todec, stack_reg));
/* Since we didn't use gen_frame_mem to generate the MEM, grab
it now and set the alias set/attributes. The above gen_*_update
calls will generate a PARALLEL with the MEM set being the first
operation. */
- par = PATTERN (insn);
- gcc_assert (GET_CODE (par) == PARALLEL);
- set = XVECEXP (par, 0, 0);
- gcc_assert (GET_CODE (set) == SET);
- mem = SET_DEST (set);
- gcc_assert (MEM_P (mem));
- MEM_NOTRAP_P (mem) = 1;
- set_mem_alias_set (mem, get_frame_alias_set ());
-
- RTX_FRAME_RELATED_P (insn) = 1;
- add_reg_note (insn, REG_FRAME_RELATED_EXPR,
- gen_rtx_SET (VOIDmode, stack_reg,
- gen_rtx_PLUS (Pmode, stack_reg,
- GEN_INT (-size))));
+ insn = rs6000_emit_allocate_stack_1 (size, stack_reg);
+ return;
}
#define PROBE_INTERVAL (1 << STACK_CHECK_PROBE_INTERVAL_EXP)
@@ -22490,9 +22692,9 @@ rs6000_emit_probe_stack_range (HOST_WIDE_INT first, HOST_WIDE_INT size)
until it is equal to ROUNDED_SIZE. */
if (TARGET_64BIT)
- emit_insn (gen_probe_stack_rangedi (r12, r12, r0));
+ emit_insn (gen_probe_stack_rangedi (r12, r12, stack_pointer_rtx, r0));
else
- emit_insn (gen_probe_stack_rangesi (r12, r12, r0));
+ emit_insn (gen_probe_stack_rangesi (r12, r12, stack_pointer_rtx, r0));
/* Step 4: probe at FIRST + SIZE if we cannot assert at compile-time
@@ -22504,10 +22706,10 @@ rs6000_emit_probe_stack_range (HOST_WIDE_INT first, HOST_WIDE_INT size)
}
/* Probe a range of stack addresses from REG1 to REG2 inclusive. These are
- absolute addresses. */
+ addresses, not offsets. */
-const char *
-output_probe_stack_range (rtx reg1, rtx reg2)
+static const char *
+output_probe_stack_range_1 (rtx reg1, rtx reg2)
{
static int labelno = 0;
char loop_lab[32], end_lab[32];
@@ -22546,6 +22748,63 @@ output_probe_stack_range (rtx reg1, rtx reg2)
return "";
}
+/* Probe a range of stack addresses from REG1 to REG3 inclusive. These are
+ addresses, not offsets.
+
+ REG2 contains the backchain that must be stored into *sp at each allocation.
+
+ This is subtly different than the Ada probing above in that it tries hard
+ to prevent attacks that jump the stack guard. Thus, it is never allowed
+ to allocate more than PROBE_INTERVAL bytes of stack space without a
+ suitable probe. */
+
+static const char *
+output_probe_stack_range_stack_clash (rtx reg1, rtx reg2, rtx reg3)
+{
+ static int labelno = 0;
+ char loop_lab[32];
+ rtx xops[3];
+
+ HOST_WIDE_INT probe_interval = get_stack_clash_protection_probe_interval ();
+
+ ASM_GENERATE_INTERNAL_LABEL (loop_lab, "LPSRL", labelno++);
+
+ ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, loop_lab);
+
+ /* This allocates and probes. */
+ xops[0] = reg1;
+ xops[1] = reg2;
+ xops[2] = GEN_INT (-probe_interval);
+ if (TARGET_64BIT)
+ output_asm_insn ("stdu %1,%2(%0)", xops);
+ else
+ output_asm_insn ("stwu %1,%2(%0)", xops);
+
+ /* Jump to LOOP_LAB if TEST_ADDR != LAST_ADDR. */
+ xops[0] = reg1;
+ xops[1] = reg3;
+ if (TARGET_64BIT)
+ output_asm_insn ("cmpd 0,%0,%1", xops);
+ else
+ output_asm_insn ("cmpw 0,%0,%1", xops);
+
+ fputs ("\tbne 0,", asm_out_file);
+ assemble_name_raw (asm_out_file, loop_lab);
+ fputc ('\n', asm_out_file);
+
+ return "";
+}
+
+/* Wrapper around the output_probe_stack_range routines. */
+const char *
+output_probe_stack_range (rtx reg1, rtx reg2, rtx reg3)
+{
+ if (flag_stack_clash_protection)
+ return output_probe_stack_range_stack_clash (reg1, reg2, reg3);
+ else
+ return output_probe_stack_range_1 (reg1, reg3);
+}
+
/* Add to 'insn' a note which is PATTERN (INSN) but with REG replaced
with (plus:P (reg 1) VAL), and with REG2 replaced with RREG if REG2
is not NULL. It would be nice if dwarf2out_frame_debug_expr could
@@ -23857,6 +24116,13 @@ rs6000_emit_prologue (void)
}
}
+ /* If we are emitting stack probes, but allocate no stack, then
+ just note that in the dump file. */
+ if (flag_stack_clash_protection
+ && dump_file
+ && !info->push_p)
+ dump_stack_clash_frame_info (NO_PROBE_NO_FRAME, false);
+
/* Update stack and set back pointer unless this is V.4,
for which it was done previously. */
if (!WORLD_SAVE_P (info) && info->push_p
diff --git a/gcc/config/rs6000/rs6000.md b/gcc/config/rs6000/rs6000.md
index cd197213480..3cd70e592c1 100644
--- a/gcc/config/rs6000/rs6000.md
+++ b/gcc/config/rs6000/rs6000.md
@@ -11822,10 +11822,20 @@
;;
;; First, an insn to allocate new stack space for dynamic use (e.g., alloca).
;; We move the back-chain and decrement the stack pointer.
-
+;;
+;; Operand1 is more naturally reg_or_short_operand. However, for a large
+;; constant alloca, using that predicate will force the generic code to put
+;; the constant size into a register before calling the expander.
+;;
+;; As a result the expander would not have the constant size information
+;; in those cases and would have to generate less efficient code.
+;;
+;; Thus we allow reg_or_cint_operand instead so that the expander can see
+;; the constant size. The value is forced into a register if necessary.
+;;
(define_expand "allocate_stack"
[(set (match_operand 0 "gpc_reg_operand" "")
- (minus (reg 1) (match_operand 1 "reg_or_short_operand" "")))
+ (minus (reg 1) (match_operand 1 "reg_or_cint_operand" "")))
(set (reg 1)
(minus (reg 1) (match_dup 1)))]
""
@@ -11835,6 +11845,15 @@
rtx neg_op0;
rtx insn, par, set, mem;
+ /* By allowing reg_or_cint_operand as the predicate we can get
+ better code for stack-clash-protection because we do not lose
+ size information. But the rest of the code expects the operand
+ to be reg_or_short_operand. If it isn't, then force it into
+ a register. */
+ rtx orig_op1 = operands[1];
+ if (!reg_or_short_operand (operands[1], Pmode))
+ operands[1] = force_reg (Pmode, operands[1]);
+
emit_move_insn (chain, stack_bot);
/* Check stack bounds if necessary. */
@@ -11847,6 +11866,51 @@
emit_insn (gen_cond_trap (LTU, available, operands[1], const0_rtx));
}
+ /* Allocate and probe if requested.
+ This may look similar to the loop we use for prologue allocations,
+ but it is critically different. For the former we know the loop
+ will iterate, but do not know that generally here. The former
+ uses that knowledge to rotate the loop. Combining them would be
+ possible with some performance cost. */
+ if (flag_stack_clash_protection)
+ {
+ rtx rounded_size, last_addr, residual;
+ HOST_WIDE_INT probe_interval;
+ compute_stack_clash_protection_loop_data (&rounded_size, &last_addr,
+ &residual, &probe_interval,
+ orig_op1);
+
+ /* We do occasionally get in here with constant sizes, we might
+ as well do a reasonable job when we obviously can. */
+ if (rounded_size != const0_rtx)
+ {
+ rtx loop_lab, end_loop;
+ bool rotated = CONST_INT_P (rounded_size);
+
+ emit_stack_clash_protection_probe_loop_start (&loop_lab, &end_loop,
+ last_addr, rotated);
+
+ if (Pmode == SImode)
+ emit_insn (gen_movsi_update_stack (stack_pointer_rtx,
+ stack_pointer_rtx,
+ GEN_INT (-probe_interval),
+ chain));
+ else
+ emit_insn (gen_movdi_di_update_stack (stack_pointer_rtx,
+ stack_pointer_rtx,
+ GEN_INT (-probe_interval),
+ chain));
+ emit_stack_clash_protection_probe_loop_end (loop_lab, end_loop,
+ last_addr, rotated);
+ }
+
+ /* Now handle residuals. We just have to set operands[1] correctly
+ and let the rest of the expander run. */
+ operands[1] = residual;
+ if (!CONST_INT_P (residual))
+ operands[1] = force_reg (Pmode, operands[1]);
+ }
+
if (GET_CODE (operands[1]) != CONST_INT
|| INTVAL (operands[1]) < -32767
|| INTVAL (operands[1]) > 32768)
@@ -12994,12 +13058,13 @@
(set_attr "length" "4")])
(define_insn "probe_stack_range<P:mode>"
- [(set (match_operand:P 0 "register_operand" "=r")
+ [(set (match_operand:P 0 "register_operand" "=&r")
(unspec_volatile:P [(match_operand:P 1 "register_operand" "0")
- (match_operand:P 2 "register_operand" "r")]
+ (match_operand:P 2 "register_operand" "r")
+ (match_operand:P 3 "register_operand" "r")]
UNSPECV_PROBE_STACK_RANGE))]
""
- "* return output_probe_stack_range (operands[0], operands[2]);"
+ "* return output_probe_stack_range (operands[0], operands[2], operands[3]);"
[(set_attr "type" "three")])
;; Compare insns are next. Note that the RS/6000 has two types of compares,
diff --git a/gcc/testsuite/lib/target-supports.exp b/gcc/testsuite/lib/target-supports.exp
index 7c126e4122b..aba99513ed0 100644
--- a/gcc/testsuite/lib/target-supports.exp
+++ b/gcc/testsuite/lib/target-supports.exp
@@ -5421,12 +5421,12 @@ proc check_effective_target_autoincdec { } {
proc check_effective_target_supports_stack_clash_protection { } {
# Temporary until the target bits are fully ACK'd.
-# if { [istarget aarch*-*-*]
-# || [istarget powerpc*-*-*] || [istarget rs6000*-*-*] } {
+# if { [istarget aarch*-*-*] } {
# return 1
# }
if { [istarget x86_64-*-*] || [istarget i?86-*-*]
+ || [istarget powerpc*-*-*] || [istarget rs6000*-*-*]
|| [istarget s390*-*-*] } {
return 1
}

View File

@ -0,0 +1,47 @@
commit 15d5202e75021f2c41b8a1cb344c04b8915e9d4e
Author: law <law@138bc75d-0d04-0410-961f-82ee72b054a4>
Date: Sun Oct 8 15:44:39 2017 +0000
* gcc.dg/stack-check-5.c: Skip with -fstack-protector.
* gcc.dg/stack-check-6.c: Likewise.
* gcc.dg/stack-check-6a.c: Likewise.
git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@253527 138bc75d-0d04-0410-961f-82ee72b054a4
diff --git a/gcc/testsuite/gcc.dg/stack-check-5.c b/gcc/testsuite/gcc.dg/stack-check-5.c
index 3178f5d8ce5..850e023ea4e 100644
--- a/gcc/testsuite/gcc.dg/stack-check-5.c
+++ b/gcc/testsuite/gcc.dg/stack-check-5.c
@@ -1,6 +1,7 @@
/* { dg-do compile } */
/* { dg-options "-O2 -fstack-clash-protection -fdump-rtl-pro_and_epilogue -fno-optimize-sibling-calls --param stack-clash-protection-probe-interval=12 --param stack-clash-protection-guard-size=12" } */
/* { dg-require-effective-target supports_stack_clash_protection } */
+/* { dg-skip-if "" { *-*-* } { "-fstack-protector" } { "" } } */
/* Otherwise the S/390 back-end might save the stack pointer in f2 ()
diff --git a/gcc/testsuite/gcc.dg/stack-check-6.c b/gcc/testsuite/gcc.dg/stack-check-6.c
index ad2021c9037..ab4b0e8894c 100644
--- a/gcc/testsuite/gcc.dg/stack-check-6.c
+++ b/gcc/testsuite/gcc.dg/stack-check-6.c
@@ -1,6 +1,7 @@
/* { dg-do compile } */
/* { dg-options "-O2 -fstack-clash-protection -fdump-rtl-pro_and_epilogue -fno-optimize-sibling-calls --param stack-clash-protection-probe-interval=12 --param stack-clash-protection-guard-size=12" } */
/* { dg-require-effective-target supports_stack_clash_protection } */
+/* { dg-skip-if "" { *-*-* } { "-fstack-protector" } { "" } } */
extern void foo (char *);
diff --git a/gcc/testsuite/gcc.dg/stack-check-6a.c b/gcc/testsuite/gcc.dg/stack-check-6a.c
index 6f8e7128921..468d649a4fa 100644
--- a/gcc/testsuite/gcc.dg/stack-check-6a.c
+++ b/gcc/testsuite/gcc.dg/stack-check-6a.c
@@ -4,6 +4,8 @@
/* { dg-do compile } */
/* { dg-options "-O2 -fstack-clash-protection -fdump-rtl-pro_and_epilogue -fno-optimize-sibling-calls --param stack-clash-protection-probe-interval=12 --param stack-clash-protection-guard-size=16" } */
/* { dg-require-effective-target supports_stack_clash_protection } */
+/* { dg-skip-if "" { *-*-* } { "-fstack-protector" } { "" } } */
+
#include "stack-check-6.c"

View File

@ -0,0 +1,21 @@
* config/i386/i386.c (ix86_expand_prologue): Tighten assert
for int_registers_saved.
diff -rup a/gcc/config/i386/i386.c b/gcc/config/i386/i386.c
--- a/gcc/config/i386/i386.c 2017-11-03 10:39:24.585633524 -0600
+++ b/gcc/config/i386/i386.c 2017-11-03 10:41:10.654774032 -0600
@@ -10686,8 +10686,12 @@ ix86_expand_prologue (void)
&& (flag_stack_check == STATIC_BUILTIN_STACK_CHECK
|| flag_stack_clash_protection))
{
- /* We expect the registers to be saved when probes are used. */
- gcc_assert (int_registers_saved);
+ /* This assert wants to verify that integer registers were saved
+ prior to probing. This is necessary when probing may be implemented
+ as a function call (Windows). It is not necessary for stack clash
+ protection probing. */
+ if (!flag_stack_clash_protection)
+ gcc_assert (int_registers_saved);
if (flag_stack_clash_protection)
{

View File

@ -0,0 +1,301 @@
commit 21397732bbcef3347c0d5ff8a0ee5163e803e2fb
Author: Jeff Law <law@redhat.com>
Date: Mon Oct 2 12:30:26 2017 -0600
Dependencies for aarch64 work
diff --git a/gcc/config/aarch64/aarch64-protos.h b/gcc/config/aarch64/aarch64-protos.h
index 07ff7031b35..91dd5b7fc02 100644
--- a/gcc/config/aarch64/aarch64-protos.h
+++ b/gcc/config/aarch64/aarch64-protos.h
@@ -181,6 +181,7 @@ unsigned aarch64_dbx_register_number (unsigned);
unsigned aarch64_trampoline_size (void);
void aarch64_asm_output_labelref (FILE *, const char *);
void aarch64_elf_asm_named_section (const char *, unsigned, tree);
+const char * aarch64_output_probe_stack_range (rtx, rtx);
void aarch64_expand_epilogue (bool);
void aarch64_expand_mov_immediate (rtx, rtx);
void aarch64_expand_prologue (void);
diff --git a/gcc/config/aarch64/aarch64.c b/gcc/config/aarch64/aarch64.c
index 5afc167d569..cadf193cfcf 100644
--- a/gcc/config/aarch64/aarch64.c
+++ b/gcc/config/aarch64/aarch64.c
@@ -969,6 +969,199 @@ aarch64_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
return true;
}
+static int
+aarch64_internal_mov_immediate (rtx dest, rtx imm, bool generate,
+ enum machine_mode mode)
+{
+ int i;
+ unsigned HOST_WIDE_INT val, val2, mask;
+ int one_match, zero_match;
+ int num_insns;
+
+ val = INTVAL (imm);
+
+ if (aarch64_move_imm (val, mode))
+ {
+ if (generate)
+ emit_insn (gen_rtx_SET (VOIDmode, dest, imm));
+ return 1;
+ }
+
+ /* Check to see if the low 32 bits are either 0xffffXXXX or 0xXXXXffff
+ (with XXXX non-zero). In that case check to see if the move can be done in
+ a smaller mode. */
+ val2 = val & 0xffffffff;
+ if (mode == DImode
+ && aarch64_move_imm (val2, SImode)
+ && (((val >> 32) & 0xffff) == 0 || (val >> 48) == 0))
+ {
+ if (generate)
+ emit_insn (gen_rtx_SET (VOIDmode, dest, GEN_INT (val2)));
+
+ /* Check if we have to emit a second instruction by checking to see
+ if any of the upper 32 bits of the original DI mode value is set. */
+ if (val == val2)
+ return 1;
+
+ i = (val >> 48) ? 48 : 32;
+
+ if (generate)
+ emit_insn (gen_insv_immdi (dest, GEN_INT (i),
+ GEN_INT ((val >> i) & 0xffff)));
+
+ return 2;
+ }
+
+ if ((val >> 32) == 0 || mode == SImode)
+ {
+ if (generate)
+ {
+ emit_insn (gen_rtx_SET (VOIDmode, dest, GEN_INT (val & 0xffff)));
+ if (mode == SImode)
+ emit_insn (gen_insv_immsi (dest, GEN_INT (16),
+ GEN_INT ((val >> 16) & 0xffff)));
+ else
+ emit_insn (gen_insv_immdi (dest, GEN_INT (16),
+ GEN_INT ((val >> 16) & 0xffff)));
+ }
+ return 2;
+ }
+
+ /* Remaining cases are all for DImode. */
+
+ mask = 0xffff;
+ zero_match = ((val & mask) == 0) + ((val & (mask << 16)) == 0) +
+ ((val & (mask << 32)) == 0) + ((val & (mask << 48)) == 0);
+ one_match = ((~val & mask) == 0) + ((~val & (mask << 16)) == 0) +
+ ((~val & (mask << 32)) == 0) + ((~val & (mask << 48)) == 0);
+
+ if (zero_match != 2 && one_match != 2)
+ {
+ /* Try emitting a bitmask immediate with a movk replacing 16 bits.
+ For a 64-bit bitmask try whether changing 16 bits to all ones or
+ zeroes creates a valid bitmask. To check any repeated bitmask,
+ try using 16 bits from the other 32-bit half of val. */
+
+ for (i = 0; i < 64; i += 16, mask <<= 16)
+ {
+ val2 = val & ~mask;
+ if (val2 != val && aarch64_bitmask_imm (val2, mode))
+ break;
+ val2 = val | mask;
+ if (val2 != val && aarch64_bitmask_imm (val2, mode))
+ break;
+ val2 = val2 & ~mask;
+ val2 = val2 | (((val2 >> 32) | (val2 << 32)) & mask);
+ if (val2 != val && aarch64_bitmask_imm (val2, mode))
+ break;
+ }
+ if (i != 64)
+ {
+ if (generate)
+ {
+ emit_insn (gen_rtx_SET (VOIDmode, dest, GEN_INT (val2)));
+ emit_insn (gen_insv_immdi (dest, GEN_INT (i),
+ GEN_INT ((val >> i) & 0xffff)));
+ }
+ return 2;
+ }
+ }
+
+ /* Generate 2-4 instructions, skipping 16 bits of all zeroes or ones which
+ are emitted by the initial mov. If one_match > zero_match, skip set bits,
+ otherwise skip zero bits. */
+
+ num_insns = 1;
+ mask = 0xffff;
+ val2 = one_match > zero_match ? ~val : val;
+ i = (val2 & mask) != 0 ? 0 : (val2 & (mask << 16)) != 0 ? 16 : 32;
+
+ if (generate)
+ emit_insn (gen_rtx_SET (VOIDmode, dest, GEN_INT (one_match > zero_match
+ ? (val | ~(mask << i))
+ : (val & (mask << i)))));
+ for (i += 16; i < 64; i += 16)
+ {
+ if ((val2 & (mask << i)) == 0)
+ continue;
+ if (generate)
+ emit_insn (gen_insv_immdi (dest, GEN_INT (i),
+ GEN_INT ((val >> i) & 0xffff)));
+ num_insns ++;
+ }
+
+ return num_insns;
+}
+
+/* Add DELTA to REGNUM in mode MODE. SCRATCHREG can be used to hold a
+ temporary value if necessary. FRAME_RELATED_P should be true if
+ the RTX_FRAME_RELATED flag should be set and CFA adjustments added
+ to the generated instructions. If SCRATCHREG is known to hold
+ abs (delta), EMIT_MOVE_IMM can be set to false to avoid emitting the
+ immediate again.
+
+ Since this function may be used to adjust the stack pointer, we must
+ ensure that it cannot cause transient stack deallocation (for example
+ by first incrementing SP and then decrementing when adjusting by a
+ large immediate). */
+
+static void
+aarch64_add_constant_internal (enum machine_mode mode, int regnum,
+ int scratchreg, HOST_WIDE_INT delta,
+ bool frame_related_p, bool emit_move_imm)
+{
+ HOST_WIDE_INT mdelta = abs_hwi (delta);
+ rtx this_rtx = gen_rtx_REG (mode, regnum);
+ rtx insn;
+
+ if (!mdelta)
+ return;
+
+ /* Single instruction adjustment. */
+ if (aarch64_uimm12_shift (mdelta))
+ {
+ insn = emit_insn (gen_add2_insn (this_rtx, GEN_INT (delta)));
+ RTX_FRAME_RELATED_P (insn) = frame_related_p;
+ return;
+ }
+
+ /* Emit 2 additions/subtractions if the adjustment is less than 24 bits.
+ Only do this if mdelta is not a 16-bit move as adjusting using a move
+ is better. */
+ if (mdelta < 0x1000000 && !aarch64_move_imm (mdelta, mode))
+ {
+ HOST_WIDE_INT low_off = mdelta & 0xfff;
+
+ low_off = delta < 0 ? -low_off : low_off;
+ insn = emit_insn (gen_add2_insn (this_rtx, GEN_INT (low_off)));
+ RTX_FRAME_RELATED_P (insn) = frame_related_p;
+ insn = emit_insn (gen_add2_insn (this_rtx, GEN_INT (delta - low_off)));
+ RTX_FRAME_RELATED_P (insn) = frame_related_p;
+ return;
+ }
+
+ /* Emit a move immediate if required and an addition/subtraction. */
+ rtx scratch_rtx = gen_rtx_REG (mode, scratchreg);
+ if (emit_move_imm)
+ aarch64_internal_mov_immediate (scratch_rtx, GEN_INT (mdelta), true, mode);
+ insn = emit_insn (delta < 0 ? gen_sub2_insn (this_rtx, scratch_rtx)
+ : gen_add2_insn (this_rtx, scratch_rtx));
+ if (frame_related_p)
+ {
+ RTX_FRAME_RELATED_P (insn) = frame_related_p;
+ rtx adj = plus_constant (mode, this_rtx, delta);
+ add_reg_note (insn , REG_CFA_ADJUST_CFA,
+ gen_rtx_SET (VOIDmode, this_rtx, adj));
+ }
+}
+
+static inline void
+aarch64_sub_sp (int scratchreg, HOST_WIDE_INT delta, bool frame_related_p)
+{
+ aarch64_add_constant_internal (Pmode, SP_REGNUM, scratchreg, -delta,
+ frame_related_p, true);
+}
+
/* Implement TARGET_PASS_BY_REFERENCE. */
static bool
@@ -1476,6 +1669,47 @@ aarch64_libgcc_cmp_return_mode (void)
return SImode;
}
+#define PROBE_INTERVAL (1 << STACK_CHECK_PROBE_INTERVAL_EXP)
+
+/* We use the 12-bit shifted immediate arithmetic instructions so values
+ must be multiple of (1 << 12), i.e. 4096. */
+#define ARITH_FACTOR 4096
+
+/* Probe a range of stack addresses from REG1 to REG2 inclusive. These are
+ absolute addresses. */
+
+const char *
+aarch64_output_probe_stack_range (rtx reg1, rtx reg2)
+{
+ static int labelno = 0;
+ char loop_lab[32];
+ rtx xops[2];
+
+ ASM_GENERATE_INTERNAL_LABEL (loop_lab, "LPSRL", labelno++);
+
+ /* Loop. */
+ ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, loop_lab);
+
+ /* TEST_ADDR = TEST_ADDR + PROBE_INTERVAL. */
+ xops[0] = reg1;
+ xops[1] = GEN_INT (PROBE_INTERVAL);
+ output_asm_insn ("sub\t%0, %0, %1", xops);
+
+ /* Probe at TEST_ADDR. */
+ output_asm_insn ("str\txzr, [%0]", xops);
+
+ /* Test if TEST_ADDR == LAST_ADDR. */
+ xops[1] = reg2;
+ output_asm_insn ("cmp\t%0, %1", xops);
+
+ /* Branch. */
+ fputs ("\tb.ne\t", asm_out_file);
+ assemble_name_raw (asm_out_file, loop_lab);
+ fputc ('\n', asm_out_file);
+
+ return "";
+}
+
static bool
aarch64_frame_pointer_required (void)
{
diff --git a/gcc/config/aarch64/aarch64.md b/gcc/config/aarch64/aarch64.md
index 91299901bbf..17082486ac8 100644
--- a/gcc/config/aarch64/aarch64.md
+++ b/gcc/config/aarch64/aarch64.md
@@ -88,6 +88,7 @@
UNSPEC_ST4
UNSPEC_TLS
UNSPEC_TLSDESC
+ UNSPECV_PROBE_STACK_RANGE ; Represent stack range probing.
UNSPEC_VSTRUCTDUMMY
])
@@ -3399,6 +3400,18 @@
[(set_attr "length" "0")]
)
+(define_insn "probe_stack_range"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (unspec_volatile:DI [(match_operand:DI 1 "register_operand" "0")
+ (match_operand:DI 2 "register_operand" "r")]
+ UNSPECV_PROBE_STACK_RANGE))]
+ ""
+{
+ return aarch64_output_probe_stack_range (operands[0], operands[2]);
+}
+ [(set_attr "length" "32")]
+)
+
;; Named pattern for expanding thread pointer reference.
(define_expand "get_thread_pointerdi"
[(match_operand:DI 0 "register_operand" "=r")]

View File

@ -0,0 +1,147 @@
commit 54a2f1efc188660df9da78523b6925aab4c3a668
Author: rsandifo <rsandifo@138bc75d-0d04-0410-961f-82ee72b054a4>
Date: Tue Jan 13 14:11:15 2015 +0000
gcc/
* config/aarch64/aarch64.md (subsi3, *subsi3_uxtw, subdi3)
(*sub_<optab><ALLX:mode>_<GPI:mode>, *sub_<optab><SHORT:mode>_si_uxtw)
(*sub_<optab><ALLX:mode>_shft_<GPI:mode>)
(*sub_<optab><SHORT:mode>_shft_si_uxtw, *sub_<optab><mode>_multp2)
(*sub_<optab>si_multp2_uxtw, *sub_uxt<mode>_multp2)
(*sub_uxtsi_multp2_uxtw): Add stack pointer sources.
gcc/testsuite/
* gcc.target/aarch64/subsp.c: New test.
git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@219533 138bc75d-0d04-0410-961f-82ee72b054a4
diff --git a/gcc/config/aarch64/aarch64.md b/gcc/config/aarch64/aarch64.md
index 17082486ac8..a085c6acaf5 100644
--- a/gcc/config/aarch64/aarch64.md
+++ b/gcc/config/aarch64/aarch64.md
@@ -1610,8 +1610,8 @@
(define_insn "subsi3"
[(set (match_operand:SI 0 "register_operand" "=rk")
- (minus:SI (match_operand:SI 1 "register_operand" "r")
- (match_operand:SI 2 "register_operand" "r")))]
+ (minus:SI (match_operand:SI 1 "register_operand" "rk")
+ (match_operand:SI 2 "register_operand" "r")))]
""
"sub\\t%w0, %w1, %w2"
[(set_attr "v8type" "alu")
@@ -1622,7 +1622,7 @@
(define_insn "*subsi3_uxtw"
[(set (match_operand:DI 0 "register_operand" "=rk")
(zero_extend:DI
- (minus:SI (match_operand:SI 1 "register_operand" "r")
+ (minus:SI (match_operand:SI 1 "register_operand" "rk")
(match_operand:SI 2 "register_operand" "r"))))]
""
"sub\\t%w0, %w1, %w2"
@@ -1632,7 +1632,7 @@
(define_insn "subdi3"
[(set (match_operand:DI 0 "register_operand" "=rk,!w")
- (minus:DI (match_operand:DI 1 "register_operand" "r,!w")
+ (minus:DI (match_operand:DI 1 "register_operand" "rk,!w")
(match_operand:DI 2 "register_operand" "r,!w")))]
""
"@
@@ -1725,7 +1725,7 @@
(define_insn "*sub_<optab><ALLX:mode>_<GPI:mode>"
[(set (match_operand:GPI 0 "register_operand" "=rk")
- (minus:GPI (match_operand:GPI 1 "register_operand" "r")
+ (minus:GPI (match_operand:GPI 1 "register_operand" "rk")
(ANY_EXTEND:GPI
(match_operand:ALLX 2 "register_operand" "r"))))]
""
@@ -1738,7 +1738,7 @@
(define_insn "*sub_<optab><SHORT:mode>_si_uxtw"
[(set (match_operand:DI 0 "register_operand" "=rk")
(zero_extend:DI
- (minus:SI (match_operand:SI 1 "register_operand" "r")
+ (minus:SI (match_operand:SI 1 "register_operand" "rk")
(ANY_EXTEND:SI
(match_operand:SHORT 2 "register_operand" "r")))))]
""
@@ -1749,7 +1749,7 @@
(define_insn "*sub_<optab><ALLX:mode>_shft_<GPI:mode>"
[(set (match_operand:GPI 0 "register_operand" "=rk")
- (minus:GPI (match_operand:GPI 1 "register_operand" "r")
+ (minus:GPI (match_operand:GPI 1 "register_operand" "rk")
(ashift:GPI (ANY_EXTEND:GPI
(match_operand:ALLX 2 "register_operand" "r"))
(match_operand 3 "aarch64_imm3" "Ui3"))))]
@@ -1763,7 +1763,7 @@
(define_insn "*sub_<optab><SHORT:mode>_shft_si_uxtw"
[(set (match_operand:DI 0 "register_operand" "=rk")
(zero_extend:DI
- (minus:SI (match_operand:SI 1 "register_operand" "r")
+ (minus:SI (match_operand:SI 1 "register_operand" "rk")
(ashift:SI (ANY_EXTEND:SI
(match_operand:SHORT 2 "register_operand" "r"))
(match_operand 3 "aarch64_imm3" "Ui3")))))]
@@ -1775,7 +1775,7 @@
(define_insn "*sub_<optab><mode>_multp2"
[(set (match_operand:GPI 0 "register_operand" "=rk")
- (minus:GPI (match_operand:GPI 4 "register_operand" "r")
+ (minus:GPI (match_operand:GPI 4 "register_operand" "rk")
(ANY_EXTRACT:GPI
(mult:GPI (match_operand:GPI 1 "register_operand" "r")
(match_operand 2 "aarch64_pwr_imm3" "Up3"))
@@ -1791,7 +1791,7 @@
(define_insn "*sub_<optab>si_multp2_uxtw"
[(set (match_operand:DI 0 "register_operand" "=rk")
(zero_extend:DI
- (minus:SI (match_operand:SI 4 "register_operand" "r")
+ (minus:SI (match_operand:SI 4 "register_operand" "rk")
(ANY_EXTRACT:SI
(mult:SI (match_operand:SI 1 "register_operand" "r")
(match_operand 2 "aarch64_pwr_imm3" "Up3"))
@@ -1805,7 +1805,7 @@
(define_insn "*sub_uxt<mode>_multp2"
[(set (match_operand:GPI 0 "register_operand" "=rk")
- (minus:GPI (match_operand:GPI 4 "register_operand" "r")
+ (minus:GPI (match_operand:GPI 4 "register_operand" "rk")
(and:GPI
(mult:GPI (match_operand:GPI 1 "register_operand" "r")
(match_operand 2 "aarch64_pwr_imm3" "Up3"))
@@ -1823,7 +1823,7 @@
(define_insn "*sub_uxtsi_multp2_uxtw"
[(set (match_operand:DI 0 "register_operand" "=rk")
(zero_extend:DI
- (minus:SI (match_operand:SI 4 "register_operand" "r")
+ (minus:SI (match_operand:SI 4 "register_operand" "rk")
(and:SI
(mult:SI (match_operand:SI 1 "register_operand" "r")
(match_operand 2 "aarch64_pwr_imm3" "Up3"))
diff --git a/gcc/testsuite/gcc.target/aarch64/subsp.c b/gcc/testsuite/gcc.target/aarch64/subsp.c
new file mode 100644
index 00000000000..70d848c59d1
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/subsp.c
@@ -0,0 +1,19 @@
+/* { dg-options "-O" } */
+
+int foo (void *);
+
+int
+f1 (int *x, long y)
+{
+ return foo (__builtin_alloca (y));
+}
+
+int
+f2 (int *x, int y)
+{
+ char a[y + 1][16];
+ return foo (a);
+}
+
+/* { dg-final { scan-assembler "sub\tsp, sp, x\[0-9\]*\n" } } */
+/* { dg-final { scan-assembler "sub\tsp, sp, x\[0-9\]*, sxtw 4\n" } } */

View File

@ -0,0 +1,462 @@
commit 5d7a77ede3e91948ee125bd82533d7e692543fff
Author: Jeff Law <law@redhat.com>
Date: Mon Oct 2 13:43:01 2017 -0600
aarch64 support
diff --git a/gcc/config/aarch64/.aarch64.c.rej.swp b/gcc/config/aarch64/.aarch64.c.rej.swp
new file mode 100644
index 00000000000..b899e21b855
Binary files /dev/null and b/gcc/config/aarch64/.aarch64.c.rej.swp differ
diff --git a/gcc/config/aarch64/aarch64.c b/gcc/config/aarch64/aarch64.c
index cadf193cfcf..e08632ffa88 100644
--- a/gcc/config/aarch64/aarch64.c
+++ b/gcc/config/aarch64/aarch64.c
@@ -45,6 +45,8 @@
#include "gimple.h"
#include "optabs.h"
#include "dwarf2.h"
+#include "params.h"
+#include "dumpfile.h"
/* Classifies an address.
@@ -1696,7 +1698,14 @@ aarch64_output_probe_stack_range (rtx reg1, rtx reg2)
output_asm_insn ("sub\t%0, %0, %1", xops);
/* Probe at TEST_ADDR. */
- output_asm_insn ("str\txzr, [%0]", xops);
+ if (flag_stack_clash_protection)
+ {
+ gcc_assert (xops[0] == stack_pointer_rtx);
+ xops[1] = GEN_INT (PROBE_INTERVAL - 8);
+ output_asm_insn ("str\txzr, [%0, %1]", xops);
+ }
+ else
+ output_asm_insn ("str\txzr, [%0]", xops);
/* Test if TEST_ADDR == LAST_ADDR. */
xops[1] = reg2;
@@ -2001,6 +2010,123 @@ aarch64_save_or_restore_callee_save_registers (HOST_WIDE_INT offset,
base_rtx, cfi_ops);
}
+/* Allocate SIZE bytes of stack space using SCRATCH_REG as a scratch
+ register. */
+
+static void
+aarch64_allocate_and_probe_stack_space (int scratchreg, HOST_WIDE_INT size)
+{
+ HOST_WIDE_INT probe_interval
+ = 1 << PARAM_VALUE (PARAM_STACK_CLASH_PROTECTION_PROBE_INTERVAL);
+ HOST_WIDE_INT guard_size
+ = 1 << PARAM_VALUE (PARAM_STACK_CLASH_PROTECTION_GUARD_SIZE);
+ HOST_WIDE_INT guard_used_by_caller = 1024;
+
+ /* SIZE should be large enough to require probing here. ie, it
+ must be larger than GUARD_SIZE - GUARD_USED_BY_CALLER.
+
+ We can allocate GUARD_SIZE - GUARD_USED_BY_CALLER as a single chunk
+ without any probing. */
+ gcc_assert (size >= guard_size - guard_used_by_caller);
+ aarch64_sub_sp (scratchreg, guard_size - guard_used_by_caller, true);
+ HOST_WIDE_INT orig_size = size;
+ size -= (guard_size - guard_used_by_caller);
+
+ HOST_WIDE_INT rounded_size = size & -probe_interval;
+ HOST_WIDE_INT residual = size - rounded_size;
+
+ /* We can handle a small number of allocations/probes inline. Otherwise
+ punt to a loop. */
+ if (rounded_size && rounded_size <= 4 * probe_interval)
+ {
+ /* We don't use aarch64_sub_sp here because we don't want to
+ repeatedly load SCRATCHREG. */
+ rtx scratch_rtx = gen_rtx_REG (Pmode, scratchreg);
+ if (probe_interval > ARITH_FACTOR)
+ emit_move_insn (scratch_rtx, GEN_INT (-probe_interval));
+ else
+ scratch_rtx = GEN_INT (-probe_interval);
+
+ for (HOST_WIDE_INT i = 0; i < rounded_size; i += probe_interval)
+ {
+ rtx insn = emit_insn (gen_add2_insn (stack_pointer_rtx, scratch_rtx));
+ add_reg_note (insn, REG_STACK_CHECK, const0_rtx);
+
+ if (probe_interval > ARITH_FACTOR)
+ {
+ RTX_FRAME_RELATED_P (insn) = 1;
+ rtx adj = plus_constant (Pmode, stack_pointer_rtx, -probe_interval);
+ add_reg_note (insn, REG_CFA_ADJUST_CFA,
+ gen_rtx_SET (VOIDmode, stack_pointer_rtx, adj));
+ }
+
+ emit_stack_probe (plus_constant (Pmode, stack_pointer_rtx,
+ (probe_interval
+ - GET_MODE_SIZE (word_mode))));
+ emit_insn (gen_blockage ());
+ }
+ dump_stack_clash_frame_info (PROBE_INLINE, size != rounded_size);
+ }
+ else if (rounded_size)
+ {
+ /* Compute the ending address. */
+ rtx temp = gen_rtx_REG (word_mode, scratchreg);
+ emit_move_insn (temp, GEN_INT (-rounded_size));
+ rtx insn = emit_insn (gen_add3_insn (temp, stack_pointer_rtx, temp));
+
+ /* For the initial allocation, we don't have a frame pointer
+ set up, so we always need CFI notes. If we're doing the
+ final allocation, then we may have a frame pointer, in which
+ case it is the CFA, otherwise we need CFI notes.
+
+ We can determine which allocation we are doing by looking at
+ the temporary register. IP0 is the initial allocation, IP1
+ is the final allocation. */
+ if (scratchreg == IP0_REGNUM || !frame_pointer_needed)
+ {
+ /* We want the CFA independent of the stack pointer for the
+ duration of the loop. */
+ add_reg_note (insn, REG_CFA_DEF_CFA,
+ plus_constant (Pmode, temp,
+ (rounded_size + (orig_size - size))));
+ RTX_FRAME_RELATED_P (insn) = 1;
+ }
+
+ /* This allocates and probes the stack.
+
+ It also probes at a 4k interval regardless of the value of
+ PARAM_STACK_CLASH_PROTECTION_PROBE_INTERVAL. */
+ insn = emit_insn (gen_probe_stack_range (stack_pointer_rtx,
+ stack_pointer_rtx, temp));
+
+ /* Now reset the CFA register if needed. */
+ if (scratchreg == IP0_REGNUM || !frame_pointer_needed)
+ {
+ add_reg_note (insn, REG_CFA_DEF_CFA,
+ plus_constant (Pmode, stack_pointer_rtx,
+ (rounded_size + (orig_size - size))));
+ RTX_FRAME_RELATED_P (insn) = 1;
+ }
+
+ emit_insn (gen_blockage ());
+ dump_stack_clash_frame_info (PROBE_LOOP, size != rounded_size);
+ }
+ else
+ dump_stack_clash_frame_info (PROBE_INLINE, size != rounded_size);
+
+ /* Handle any residuals.
+ Note that any residual must be probed. */
+ if (residual)
+ {
+ aarch64_sub_sp (scratchreg, residual, true);
+ add_reg_note (get_last_insn (), REG_STACK_CHECK, const0_rtx);
+ emit_stack_probe (plus_constant (Pmode, stack_pointer_rtx,
+ (residual - GET_MODE_SIZE (word_mode))));
+ emit_insn (gen_blockage ());
+ }
+ return;
+}
+
/* AArch64 stack frames generated by this compiler look like:
+-------------------------------+
@@ -2073,6 +2199,44 @@ aarch64_expand_prologue (void)
- original_frame_size
- cfun->machine->frame.saved_regs_size);
+ /* We do not fully protect aarch64 against stack clash style attacks
+ as doing so would be prohibitively expensive with less utility over
+ time as newer compilers are deployed.
+
+ We assume the guard is at least 64k. Furthermore, we assume that
+ the caller has not pushed the stack pointer more than 1k into
+ the guard. A caller that pushes the stack pointer than 1k into
+ the guard is considered invalid.
+
+ Note that the caller's ability to push the stack pointer into the
+ guard is a function of the number and size of outgoing arguments and/or
+ dynamic stack allocations due to the mandatory save of the link register
+ in the caller's frame.
+
+ With those assumptions the callee can allocate up to 63k of stack
+ space without probing.
+
+ When probing is needed, we emit a probe at the start of the prologue
+ and every PARAM_STACK_CLASH_PROTECTION_PROBE_INTERVAL bytes thereafter.
+
+ We have to track how much space has been allocated, but we do not
+ track stores into the stack as implicit probes except for the
+ fp/lr store. */
+ HOST_WIDE_INT guard_size
+ = 1 << PARAM_VALUE (PARAM_STACK_CLASH_PROTECTION_GUARD_SIZE);
+ HOST_WIDE_INT guard_used_by_caller = 1024;
+ HOST_WIDE_INT final_adjust = crtl->outgoing_args_size;
+ HOST_WIDE_INT initial_adjust = frame_size;
+
+ if (flag_stack_clash_protection)
+ {
+ if (initial_adjust == 0)
+ dump_stack_clash_frame_info (NO_PROBE_NO_FRAME, false);
+ else if (offset < guard_size - guard_used_by_caller
+ && final_adjust < guard_size - guard_used_by_caller)
+ dump_stack_clash_frame_info (NO_PROBE_SMALL_FRAME, true);
+ }
+
/* Store pairs and load pairs have a range only -512 to 504. */
if (offset >= 512)
{
@@ -2089,7 +2253,10 @@ aarch64_expand_prologue (void)
frame_size -= (offset + crtl->outgoing_args_size);
fp_offset = 0;
- if (frame_size >= 0x1000000)
+ if (flag_stack_clash_protection
+ && frame_size >= guard_size - guard_used_by_caller)
+ aarch64_allocate_and_probe_stack_space (IP0_REGNUM, frame_size);
+ else if (frame_size >= 0x1000000)
{
rtx op0 = gen_rtx_REG (Pmode, IP0_REGNUM);
emit_move_insn (op0, GEN_INT (-frame_size));
@@ -2206,10 +2373,30 @@ aarch64_expand_prologue (void)
{
if (crtl->outgoing_args_size > 0)
{
- insn = emit_insn (gen_add2_insn
- (stack_pointer_rtx,
- GEN_INT (- crtl->outgoing_args_size)));
- RTX_FRAME_RELATED_P (insn) = 1;
+ if (flag_stack_clash_protection)
+ {
+ /* First probe if the final adjustment is larger than the
+ guard size less the amount of guard reserved for use by
+ the caller's outgoing args. */
+ if (final_adjust >= guard_size - guard_used_by_caller)
+ aarch64_allocate_and_probe_stack_space (IP1_REGNUM,
+ final_adjust);
+ else
+ aarch64_sub_sp (IP1_REGNUM, final_adjust, !frame_pointer_needed);
+
+ /* We must also probe if the final adjustment is larger than the
+ guard that is assumed used by the caller. This may be
+ sub-optimal. */
+ if (final_adjust >= guard_used_by_caller)
+ {
+ if (dump_file)
+ fprintf (dump_file,
+ "Stack clash aarch64 large outgoing arg, probing\n");
+ emit_stack_probe (stack_pointer_rtx);
+ }
+ }
+ else
+ aarch64_sub_sp (IP1_REGNUM, final_adjust, !frame_pointer_needed);
}
}
}
@@ -5088,6 +5275,12 @@ aarch64_override_options (void)
#endif
}
+ /* We assume the guard page is 64k. */
+ maybe_set_param_value (PARAM_STACK_CLASH_PROTECTION_GUARD_SIZE,
+ 16,
+ global_options.x_param_values,
+ global_options_set.x_param_values);
+
aarch64_override_options_after_change ();
}
@@ -8161,6 +8354,28 @@ aarch64_vectorize_vec_perm_const_ok (enum machine_mode vmode,
return ret;
}
+/* It has been decided that to allow up to 1kb of outgoing argument
+ space to be allocated w/o probing. If more than 1kb of outgoing
+ argment space is allocated, then it must be probed and the last
+ probe must occur no more than 1kbyte away from the end of the
+ allocated space.
+
+ This implies that the residual part of an alloca allocation may
+ need probing in cases where the generic code might not otherwise
+ think a probe is needed.
+
+ This target hook returns TRUE when allocating RESIDUAL bytes of
+ alloca space requires an additional probe, otherwise FALSE is
+ returned. */
+
+static bool
+aarch64_stack_clash_protection_final_dynamic_probe (rtx residual)
+{
+ return (residual == CONST0_RTX (Pmode)
+ || GET_CODE (residual) != CONST_INT
+ || INTVAL (residual) >= 1024);
+}
+
#undef TARGET_ADDRESS_COST
#define TARGET_ADDRESS_COST aarch64_address_cost
@@ -8378,6 +8593,10 @@ aarch64_vectorize_vec_perm_const_ok (enum machine_mode vmode,
#undef TARGET_FIXED_CONDITION_CODE_REGS
#define TARGET_FIXED_CONDITION_CODE_REGS aarch64_fixed_condition_code_regs
+#undef TARGET_STACK_CLASH_PROTECTION_FINAL_DYNAMIC_PROBE
+#define TARGET_STACK_CLASH_PROTECTION_FINAL_DYNAMIC_PROBE \
+ aarch64_stack_clash_protection_final_dynamic_probe
+
struct gcc_target targetm = TARGET_INITIALIZER;
#include "gt-aarch64.h"
diff --git a/gcc/config/aarch64/aarch64.md b/gcc/config/aarch64/aarch64.md
index a085c6acaf5..5485a5f70b1 100644
--- a/gcc/config/aarch64/aarch64.md
+++ b/gcc/config/aarch64/aarch64.md
@@ -3401,7 +3401,7 @@
)
(define_insn "probe_stack_range"
- [(set (match_operand:DI 0 "register_operand" "=r")
+ [(set (match_operand:DI 0 "register_operand" "=rk")
(unspec_volatile:DI [(match_operand:DI 1 "register_operand" "0")
(match_operand:DI 2 "register_operand" "r")]
UNSPECV_PROBE_STACK_RANGE))]
diff --git a/gcc/testsuite/gcc.target/aarch64/stack-check-12.c b/gcc/testsuite/gcc.target/aarch64/stack-check-12.c
new file mode 100644
index 00000000000..2ce38483b6b
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/stack-check-12.c
@@ -0,0 +1,20 @@
+/* { dg-do compile } */
+/* { dg-options "-O2 -fstack-clash-protection --param stack-clash-protection-guard-size=12" } */
+/* { dg-require-effective-target supports_stack_clash_protection } */
+
+extern void arf (unsigned long int *, unsigned long int *);
+void
+frob ()
+{
+ unsigned long int num[1000];
+ unsigned long int den[1000];
+ arf (den, num);
+}
+
+/* This verifies that the scheduler did not break the dependencies
+ by adjusting the offsets within the probe and that the scheduler
+ did not reorder around the stack probes. */
+/* { dg-final { scan-assembler-times "sub\\tsp, sp, #4096\\n\\tstr\\txzr, .sp, 4088." 3 } } */
+
+
+
diff --git a/gcc/testsuite/gcc.target/aarch64/stack-check-13.c b/gcc/testsuite/gcc.target/aarch64/stack-check-13.c
new file mode 100644
index 00000000000..d8886835989
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/stack-check-13.c
@@ -0,0 +1,28 @@
+/* { dg-do compile } */
+/* { dg-options "-O2 -fstack-clash-protection --param stack-clash-protection-guard-size=12" } */
+/* { dg-require-effective-target supports_stack_clash_protection } */
+
+#define ARG32(X) X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X
+#define ARG192(X) ARG32(X),ARG32(X),ARG32(X),ARG32(X),ARG32(X),ARG32(X)
+void out1(ARG192(__int128));
+int t1(int);
+
+int t3(int x)
+{
+ if (x < 1000)
+ return t1 (x) + 1;
+
+ out1 (ARG192(1));
+ return 0;
+}
+
+
+
+/* This test creates a large (> 1k) outgoing argument area that needs
+ to be probed. We don't test the exact size of the space or the
+ exact offset to make the test a little less sensitive to trivial
+ output changes. */
+/* { dg-final { scan-assembler-times "sub\\tsp, sp, #....\\n\\tstr\\txzr, \\\[sp" 1 } } */
+
+
+
diff --git a/gcc/testsuite/gcc.target/aarch64/stack-check-14.c b/gcc/testsuite/gcc.target/aarch64/stack-check-14.c
new file mode 100644
index 00000000000..59ffe01376d
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/stack-check-14.c
@@ -0,0 +1,25 @@
+/* { dg-do compile } */
+/* { dg-options "-O2 -fstack-clash-protection --param stack-clash-protection-guard-size=12" } */
+/* { dg-require-effective-target supports_stack_clash_protection } */
+
+int t1(int);
+
+int t2(int x)
+{
+ char *p = __builtin_alloca (4050);
+ x = t1 (x);
+ return p[x];
+}
+
+
+/* This test has a constant sized alloca that is smaller than the
+ probe interval. But it actually requires two probes instead
+ of one because of the optimistic assumptions we made in the
+ aarch64 prologue code WRT probing state.
+
+ The form can change quite a bit so we just check for two
+ probes without looking at the actual address. */
+/* { dg-final { scan-assembler-times "str\\txzr," 2 } } */
+
+
+
diff --git a/gcc/testsuite/gcc.target/aarch64/stack-check-15.c b/gcc/testsuite/gcc.target/aarch64/stack-check-15.c
new file mode 100644
index 00000000000..e06db6dc2f0
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/stack-check-15.c
@@ -0,0 +1,24 @@
+/* { dg-do compile } */
+/* { dg-options "-O2 -fstack-clash-protection --param stack-clash-protection-guard-size=12" } */
+/* { dg-require-effective-target supports_stack_clash_protection } */
+
+int t1(int);
+
+int t2(int x)
+{
+ char *p = __builtin_alloca (x);
+ x = t1 (x);
+ return p[x];
+}
+
+
+/* This test has a variable sized alloca. It requires 3 probes.
+ One in the loop, one for the residual and at the end of the
+ alloca area.
+
+ The form can change quite a bit so we just check for two
+ probes without looking at the actual address. */
+/* { dg-final { scan-assembler-times "str\\txzr," 3 } } */
+
+
+
diff --git a/gcc/testsuite/lib/target-supports.exp b/gcc/testsuite/lib/target-supports.exp
index aba99513ed0..a8451c98b08 100644
--- a/gcc/testsuite/lib/target-supports.exp
+++ b/gcc/testsuite/lib/target-supports.exp
@@ -5420,14 +5420,9 @@ proc check_effective_target_autoincdec { } {
#
proc check_effective_target_supports_stack_clash_protection { } {
- # Temporary until the target bits are fully ACK'd.
-# if { [istarget aarch*-*-*] } {
-# return 1
-# }
-
if { [istarget x86_64-*-*] || [istarget i?86-*-*]
|| [istarget powerpc*-*-*] || [istarget rs6000*-*-*]
- || [istarget s390*-*-*] } {
+ || [istarget aarch64*-**] || [istarget s390*-*-*] } {
return 1
}
return 0

View File

@ -0,0 +1,58 @@
* config/i386/i386.c (ix86_emit_restore_reg_using_pop): Prototype.
(ix86_adjust_stack_and_probe_stack_clash): Use a push/pop sequence
to probe at the start of a noreturn function.
* gcc.target/i386/stack-check-12.c: New test
diff -Nrup a/gcc/config/i386/i386.c b/gcc/config/i386/i386.c
--- a/gcc/config/i386/i386.c 2017-11-03 13:35:17.641528205 -0600
+++ b/gcc/config/i386/i386.c 2017-11-03 13:37:39.489361692 -0600
@@ -64,6 +64,7 @@ along with GCC; see the file COPYING3.
#include "tree-flow.h"
static rtx legitimize_dllimport_symbol (rtx, bool);
+static void ix86_emit_restore_reg_using_pop (rtx);
#ifndef CHECK_STACK_LIMIT
#define CHECK_STACK_LIMIT (-1)
@@ -9884,8 +9885,14 @@ ix86_adjust_stack_and_probe_stack_clash
we just probe when we cross PROBE_INTERVAL. */
if (TREE_THIS_VOLATILE (cfun->decl))
{
- emit_stack_probe (plus_constant (Pmode, stack_pointer_rtx,
- -GET_MODE_SIZE (word_mode)));
+ /* We can safely use any register here since we're just going to push
+ its value and immediately pop it back. But we do try and avoid
+ argument passing registers so as not to introduce dependencies in
+ the pipeline. For 32 bit we use %esi and for 64 bit we use %rax. */
+ rtx dummy_reg = gen_rtx_REG (word_mode, TARGET_64BIT ? AX_REG : SI_REG);
+ rtx insn = emit_insn (gen_push (dummy_reg));
+ RTX_FRAME_RELATED_P (insn) = 1;
+ ix86_emit_restore_reg_using_pop (dummy_reg);
emit_insn (gen_blockage ());
}
diff -Nrup a/gcc/testsuite/gcc.target/i386/stack-check-12.c b/gcc/testsuite/gcc.target/i386/stack-check-12.c
--- a/gcc/testsuite/gcc.target/i386/stack-check-12.c 1969-12-31 17:00:00.000000000 -0700
+++ b/gcc/testsuite/gcc.target/i386/stack-check-12.c 2017-11-03 13:36:15.104055651 -0600
@@ -0,0 +1,19 @@
+/* { dg-do compile } */
+/* { dg-options "-O2 -fstack-clash-protection -mtune=generic -fomit-frame-pointer" } */
+/* { dg-require-effective-target supports_stack_clash_protection } */
+
+__attribute__ ((noreturn)) void exit (int);
+
+__attribute__ ((noreturn)) void
+f (void)
+{
+ asm volatile ("nop" ::: "edi");
+ exit (1);
+}
+
+/* { dg-final { scan-assembler-not "or\[ql\]" } } */
+/* { dg-final { scan-assembler "pushl %esi" { target ia32 } } } */
+/* { dg-final { scan-assembler "popl %esi" { target ia32 } } }*/
+/* { dg-final { scan-assembler "pushq %rax" { target { ! ia32 } } } } */
+/* { dg-final { scan-assembler "popq %rax" { target { ! ia32 } } } }*/
+

View File

@ -0,0 +1,191 @@
* config/i386/i386.c (PROBE_INTERVAL): Remove.
(get_probe_interval): New function.
(ix86_adjust_stack_and_probe_stack_clash): Use get_probe_interval.
(ix86_adjust_stack_and_probe): Likewise.
(output_adjust_stack_and_probe): Likewise.
(ix86_emit_probe_stack_range): Likewise.
(ix86_expand_prologue): Likewise.
* gcc.dg/pr82788.c: New test.
diff -Nrup a/gcc/config/i386/i386.c b/gcc/config/i386/i386.c
--- a/gcc/config/i386/i386.c 2017-11-06 09:54:43.814921056 -0700
+++ b/gcc/config/i386/i386.c 2017-11-06 09:55:25.327589661 -0700
@@ -9839,7 +9839,17 @@ release_scratch_register_on_entry (struc
}
}
-#define PROBE_INTERVAL (1 << STACK_CHECK_PROBE_INTERVAL_EXP)
+/* Return the probing interval for -fstack-clash-protection. */
+
+static HOST_WIDE_INT
+get_probe_interval (void)
+{
+ if (flag_stack_clash_protection)
+ return (HOST_WIDE_INT_1U
+ << PARAM_VALUE (PARAM_STACK_CLASH_PROTECTION_PROBE_INTERVAL));
+ else
+ return (HOST_WIDE_INT_1U << STACK_CHECK_PROBE_INTERVAL_EXP);
+}
/* Emit code to adjust the stack pointer by SIZE bytes while probing it.
@@ -9911,8 +9921,7 @@ ix86_adjust_stack_and_probe_stack_clash
/* We're allocating a large enough stack frame that we need to
emit probes. Either emit them inline or in a loop depending
on the size. */
- HOST_WIDE_INT probe_interval
- = 1 << PARAM_VALUE (PARAM_STACK_CLASH_PROTECTION_PROBE_INTERVAL);
+ HOST_WIDE_INT probe_interval = get_probe_interval ();
if (size <= 4 * probe_interval)
{
HOST_WIDE_INT i;
@@ -9921,7 +9930,7 @@ ix86_adjust_stack_and_probe_stack_clash
/* Allocate PROBE_INTERVAL bytes. */
rtx insn
= pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
- GEN_INT (-PROBE_INTERVAL), -1,
+ GEN_INT (-probe_interval), -1,
m->fs.cfa_reg == stack_pointer_rtx);
add_reg_note (insn, REG_STACK_CHECK, const0_rtx);
@@ -10014,7 +10023,7 @@ ix86_adjust_stack_and_probe (const HOST_
that's the easy case. The run-time loop is made up of 11 insns in the
generic case while the compile-time loop is made up of 3+2*(n-1) insns
for n # of intervals. */
- if (size <= 5 * PROBE_INTERVAL)
+ if (size <= 5 * get_probe_interval ())
{
HOST_WIDE_INT i, adjust;
bool first_probe = true;
@@ -10023,15 +10032,15 @@ ix86_adjust_stack_and_probe (const HOST_
values of N from 1 until it exceeds SIZE. If only one probe is
needed, this will not generate any code. Then adjust and probe
to PROBE_INTERVAL + SIZE. */
- for (i = PROBE_INTERVAL; i < size; i += PROBE_INTERVAL)
+ for (i = get_probe_interval (); i < size; i += get_probe_interval ())
{
if (first_probe)
{
- adjust = 2 * PROBE_INTERVAL + dope;
+ adjust = 2 * get_probe_interval () + dope;
first_probe = false;
}
else
- adjust = PROBE_INTERVAL;
+ adjust = get_probe_interval ();
emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx,
plus_constant (Pmode, stack_pointer_rtx,
@@ -10040,9 +10049,9 @@ ix86_adjust_stack_and_probe (const HOST_
}
if (first_probe)
- adjust = size + PROBE_INTERVAL + dope;
+ adjust = size + get_probe_interval () + dope;
else
- adjust = size + PROBE_INTERVAL - i;
+ adjust = size + get_probe_interval () - i;
emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx,
plus_constant (Pmode, stack_pointer_rtx,
@@ -10052,7 +10061,8 @@ ix86_adjust_stack_and_probe (const HOST_
/* Adjust back to account for the additional first interval. */
last = emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx,
plus_constant (Pmode, stack_pointer_rtx,
- PROBE_INTERVAL + dope)));
+ (get_probe_interval ()
+ + dope))));
}
/* Otherwise, do the same as above, but in a loop. Note that we must be
@@ -10070,7 +10080,7 @@ ix86_adjust_stack_and_probe (const HOST_
/* Step 1: round SIZE to the previous multiple of the interval. */
- rounded_size = size & -PROBE_INTERVAL;
+ rounded_size = size & get_probe_interval ();
/* Step 2: compute initial and final value of the loop counter. */
@@ -10078,7 +10088,7 @@ ix86_adjust_stack_and_probe (const HOST_
/* SP = SP_0 + PROBE_INTERVAL. */
emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx,
plus_constant (Pmode, stack_pointer_rtx,
- - (PROBE_INTERVAL + dope))));
+ - (get_probe_interval () + dope))));
/* LAST_ADDR = SP_0 + PROBE_INTERVAL + ROUNDED_SIZE. */
emit_move_insn (sr.reg, GEN_INT (-rounded_size));
@@ -10115,7 +10125,8 @@ ix86_adjust_stack_and_probe (const HOST_
/* Adjust back to account for the additional first interval. */
last = emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx,
plus_constant (Pmode, stack_pointer_rtx,
- PROBE_INTERVAL + dope)));
+ (get_probe_interval ()
+ + dope))));
release_scratch_register_on_entry (&sr);
}
@@ -10134,7 +10145,7 @@ ix86_adjust_stack_and_probe (const HOST_
XVECEXP (expr, 0, 1)
= gen_rtx_SET (VOIDmode, stack_pointer_rtx,
plus_constant (Pmode, stack_pointer_rtx,
- PROBE_INTERVAL + dope + size));
+ get_probe_interval () + dope + size));
add_reg_note (last, REG_FRAME_RELATED_EXPR, expr);
RTX_FRAME_RELATED_P (last) = 1;
@@ -10168,7 +10179,7 @@ output_adjust_stack_and_probe (rtx reg)
fputc ('\n', asm_out_file);
/* SP = SP + PROBE_INTERVAL. */
- xops[1] = GEN_INT (PROBE_INTERVAL);
+ xops[1] = GEN_INT (get_probe_interval ());
output_asm_insn ("sub%z0\t{%1, %0|%0, %1}", xops);
/* Probe at SP. */
@@ -10194,14 +10205,14 @@ ix86_emit_probe_stack_range (HOST_WIDE_I
that's the easy case. The run-time loop is made up of 7 insns in the
generic case while the compile-time loop is made up of n insns for n #
of intervals. */
- if (size <= 7 * PROBE_INTERVAL)
+ if (size <= 7 * get_probe_interval ())
{
HOST_WIDE_INT i;
/* Probe at FIRST + N * PROBE_INTERVAL for values of N from 1 until
it exceeds SIZE. If only one probe is needed, this will not
generate any code. Then probe at FIRST + SIZE. */
- for (i = PROBE_INTERVAL; i < size; i += PROBE_INTERVAL)
+ for (i = get_probe_interval (); i < size; i += get_probe_interval ())
emit_stack_probe (plus_constant (Pmode, stack_pointer_rtx,
-(first + i)));
@@ -10224,7 +10235,7 @@ ix86_emit_probe_stack_range (HOST_WIDE_I
/* Step 1: round SIZE to the previous multiple of the interval. */
- rounded_size = size & -PROBE_INTERVAL;
+ rounded_size = size & -get_probe_interval ();
/* Step 2: compute initial and final value of the loop counter. */
@@ -10291,7 +10302,7 @@ output_probe_stack_range (rtx reg, rtx e
fputc ('\n', asm_out_file);
/* TEST_ADDR = TEST_ADDR + PROBE_INTERVAL. */
- xops[1] = GEN_INT (PROBE_INTERVAL);
+ xops[1] = GEN_INT (get_probe_interval ());
output_asm_insn ("sub%z0\t{%1, %0|%0, %1}", xops);
/* Probe at TEST_ADDR. */
diff -Nrup a/gcc/testsuite/gcc.dg/pr82788.c b/gcc/testsuite/gcc.dg/pr82788.c
--- a/gcc/testsuite/gcc.dg/pr82788.c 1969-12-31 17:00:00.000000000 -0700
+++ b/gcc/testsuite/gcc.dg/pr82788.c 2017-11-06 09:55:10.680706587 -0700
@@ -0,0 +1,4 @@
+/* { dg-do run } */
+/* { dg-options "-O2 -fstack-clash-protection --param stack-clash-protection-probe-interval=10 --param stack-clash-protection-guard-size=12" } */
+/* { dg-require-effective-target supports_stack_clash_protection } */
+int main() { int a[1442]; return 0;}

View File

@ -0,0 +1,36 @@
2017-10-31 Segher Boessenkool <segher@kernel.crsahing.org>
PR target/82674
* config/rs6000/rs6000.md (allocate_stack): Force update interval
into a register if it does not fit into an immediate offset field.
diff -Nrup a/gcc/config/rs6000/rs6000.md b/gcc/config/rs6000/rs6000.md
--- a/gcc/config/rs6000/rs6000.md 2017-11-06 09:12:21.128237467 -0700
+++ b/gcc/config/rs6000/rs6000.md 2017-11-06 09:13:40.521601373 -0700
@@ -11886,6 +11886,9 @@
{
rtx loop_lab, end_loop;
bool rotated = CONST_INT_P (rounded_size);
+ rtx update = GEN_INT (-probe_interval);
+ if (probe_interval > 32768)
+ update = force_reg (Pmode, update);
emit_stack_clash_protection_probe_loop_start (&loop_lab, &end_loop,
last_addr, rotated);
@@ -11893,13 +11896,11 @@
if (Pmode == SImode)
emit_insn (gen_movsi_update_stack (stack_pointer_rtx,
stack_pointer_rtx,
- GEN_INT (-probe_interval),
- chain));
+ update, chain));
else
emit_insn (gen_movdi_di_update_stack (stack_pointer_rtx,
stack_pointer_rtx,
- GEN_INT (-probe_interval),
- chain));
+ update, chain));
emit_stack_clash_protection_probe_loop_end (loop_lab, end_loop,
last_addr, rotated);
}

View File

@ -0,0 +1,360 @@
commit 6427208ee82548346a2f42a8ac83fdd2f823fde2
Author: law <law@138bc75d-0d04-0410-961f-82ee72b054a4>
Date: Wed Sep 20 04:56:54 2017 +0000
* common.opt (-fstack-clash-protection): New option.
* flag-types.h (enum stack_check_type): Note difference between
-fstack-check= and -fstack-clash-protection.
* params.def (PARAM_STACK_CLASH_PROTECTION_GUARD_SIZE): New PARAM.
(PARAM_STACK_CLASH_PROTECTION_PROBE_INTERVAL): Likewise.
* toplev.c (process_options): Issue warnings/errors for cases
not handled with -fstack-clash-protection.
* doc/invoke.texi (-fstack-clash-protection): Document new option.
(-fstack-check): Note additional problem with -fstack-check=generic.
Note that -fstack-check is primarily for Ada and refer users
to -fstack-clash-protection for stack-clash-protection.
Document new params for stack clash protection.
* gcc.dg/stack-check-2.c: New test.
* lib/target-supports.exp
(check_effective_target_supports_stack_clash_protection): New function.
(check_effective_target_frame_pointer_for_non_leaf): Likewise.
(check_effective_target_caller_implicit_probes): Likewise.
git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@252994 138bc75d-0d04-0410-961f-82ee72b054a4
diff --git a/gcc/common.opt b/gcc/common.opt
index 16846c13b62..0c335cb12cd 100644
--- a/gcc/common.opt
+++ b/gcc/common.opt
@@ -1911,13 +1911,18 @@ Common Report Var(flag_variable_expansion_in_unroller) Optimization
Apply variable expansion when loops are unrolled
fstack-check=
-Common Report RejectNegative Joined
--fstack-check=[no|generic|specific] Insert stack checking code into the program
+Common Report RejectNegative Joined Optimization
+-fstack-check=[no|generic|specific] Insert stack checking code into the program.
fstack-check
Common Alias(fstack-check=, specific, no)
Insert stack checking code into the program. Same as -fstack-check=specific
+fstack-clash-protection
+Common Report Var(flag_stack_clash_protection) Optimization
+Insert code to probe each page of stack space as it is allocated to protect
+from stack-clash style attacks.
+
fstack-limit
Common Var(common_deferred_options) Defer
diff --git a/gcc/doc/invoke.texi b/gcc/doc/invoke.texi
index f7a15ca190e..313a6c5ff76 100644
--- a/gcc/doc/invoke.texi
+++ b/gcc/doc/invoke.texi
@@ -9406,6 +9406,21 @@ compilation for profile feedback and one for compilation without. The value
for compilation with profile feedback needs to be more conservative (higher) in
order to make tracer effective.
+@item stack-clash-protection-guard-size
+Specify the size of the operating system provided stack guard as
+2 raised to @var{num} bytes. The default value is 12 (4096 bytes).
+Acceptable values are between 12 and 30. Higher values may reduce the
+number of explicit probes, but a value larger than the operating system
+provided guard will leave code vulnerable to stack clash style attacks.
+
+@item stack-clash-protection-probe-interval
+Stack clash protection involves probing stack space as it is allocated. This
+param controls the maximum distance between probes into the stack as 2 raised
+to @var{num} bytes. Acceptable values are between 10 and 16 and defaults to
+12. Higher values may reduce the number of explicit probes, but a value
+larger than the operating system provided guard will leave code vulnerable to
+stack clash style attacks.
+
@item max-cse-path-length
The maximum number of basic blocks on path that CSE considers.
@@ -20949,7 +20964,8 @@ target support in the compiler but comes with the following drawbacks:
@enumerate
@item
Modified allocation strategy for large objects: they are always
-allocated dynamically if their size exceeds a fixed threshold.
+allocated dynamically if their size exceeds a fixed threshold. Note this
+may change the semantics of some code.
@item
Fixed limit on the size of the static frame of functions: when it is
@@ -20964,6 +20980,27 @@ generic implementation, code performance is hampered.
Note that old-style stack checking is also the fallback method for
@code{specific} if no target support has been added in the compiler.
+@samp{-fstack-check=} is designed for Ada's needs to detect infinite recursion
+and stack overflows. @samp{specific} is an excellent choice when compiling
+Ada code. It is not generally sufficient to protect against stack-clash
+attacks. To protect against those you want @samp{-fstack-clash-protection}.
+
+@item -fstack-clash-protection
+@opindex fstack-clash-protection
+Generate code to prevent stack clash style attacks. When this option is
+enabled, the compiler will only allocate one page of stack space at a time
+and each page is accessed immediately after allocation. Thus, it prevents
+allocations from jumping over any stack guard page provided by the
+operating system.
+
+Most targets do not fully support stack clash protection. However, on
+those targets @option{-fstack-clash-protection} will protect dynamic stack
+allocations. @option{-fstack-clash-protection} may also provide limited
+protection for static stack allocations if the target supports
+@option{-fstack-check=specific}.
+
+
+
@item -fstack-limit-register=@var{reg}
@itemx -fstack-limit-symbol=@var{sym}
@itemx -fno-stack-limit
diff --git a/gcc/flag-types.h b/gcc/flag-types.h
index 4fc5d33348e..21e943d38fa 100644
--- a/gcc/flag-types.h
+++ b/gcc/flag-types.h
@@ -139,7 +139,14 @@ enum excess_precision
EXCESS_PRECISION_STANDARD
};
-/* Type of stack check. */
+/* Type of stack check.
+
+ Stack checking is designed to detect infinite recursion and stack
+ overflows for Ada programs. Furthermore stack checking tries to ensure
+ in that scenario that enough stack space is left to run a signal handler.
+
+ -fstack-check= does not prevent stack-clash style attacks. For that
+ you want -fstack-clash-protection. */
enum stack_check_type
{
/* Do not check the stack. */
diff --git a/gcc/params.def b/gcc/params.def
index e51b847a7c4..e668624b0cb 100644
--- a/gcc/params.def
+++ b/gcc/params.def
@@ -208,6 +208,16 @@ DEFPARAM(PARAM_STACK_FRAME_GROWTH,
"Maximal stack frame growth due to inlining (in percent)",
1000, 0, 0)
+DEFPARAM(PARAM_STACK_CLASH_PROTECTION_GUARD_SIZE,
+ "stack-clash-protection-guard-size",
+ "Size of the stack guard expressed as a power of two.",
+ 12, 12, 30)
+
+DEFPARAM(PARAM_STACK_CLASH_PROTECTION_PROBE_INTERVAL,
+ "stack-clash-protection-probe-interval",
+ "Interval in which to probe the stack expressed as a power of two.",
+ 12, 10, 16)
+
/* The GCSE optimization will be disabled if it would require
significantly more memory than this value. */
DEFPARAM(PARAM_MAX_GCSE_MEMORY,
diff --git a/gcc/testsuite/gcc.dg/stack-check-2.c b/gcc/testsuite/gcc.dg/stack-check-2.c
new file mode 100644
index 00000000000..196c4bbfbdd
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/stack-check-2.c
@@ -0,0 +1,66 @@
+/* The goal here is to ensure that we never consider a call to a noreturn
+ function as a potential tail call.
+
+ Right now GCC discovers potential tail calls by looking at the
+ predecessors of the exit block. A call to a non-return function
+ has no successors and thus can never match that first filter.
+
+ But that could change one day and we want to catch it. The problem
+ is the compiler could potentially optimize a tail call to a nonreturn
+ function, even if the caller has a frame. That breaks the assumption
+ that calls probe *sp when saving the return address that some targets
+ depend on to elide stack probes. */
+
+/* { dg-do compile } */
+/* { dg-options "-O2 -fstack-clash-protection -fdump-tree-tailc -fdump-tree-optimized" } */
+/* { dg-require-effective-target supports_stack_clash_protection } */
+
+extern void foo (void) __attribute__ ((__noreturn__));
+
+
+void
+test_direct_1 (void)
+{
+ foo ();
+}
+
+void
+test_direct_2 (void)
+{
+ return foo ();
+}
+
+void (*indirect)(void)__attribute__ ((noreturn));
+
+
+void
+test_indirect_1 ()
+{
+ (*indirect)();
+}
+
+void
+test_indirect_2 (void)
+{
+ return (*indirect)();;
+}
+
+
+typedef void (*pvfn)() __attribute__ ((noreturn));
+
+void (*indirect_casted)(void);
+
+void
+test_indirect_casted_1 ()
+{
+ (*(pvfn)indirect_casted)();
+}
+
+void
+test_indirect_casted_2 (void)
+{
+ return (*(pvfn)indirect_casted)();
+}
+/* { dg-final { scan-tree-dump-not "tail call" "tailc" } } */
+/* { dg-final { scan-tree-dump-not "tail call" "optimized" } } */
+
diff --git a/gcc/testsuite/lib/target-supports.exp b/gcc/testsuite/lib/target-supports.exp
index ef371ad7efd..821cea9cb33 100644
--- a/gcc/testsuite/lib/target-supports.exp
+++ b/gcc/testsuite/lib/target-supports.exp
@@ -5392,3 +5392,95 @@ proc check_effective_target_fenv_exceptions {} {
}
} "-std=gnu99"]
}
+
+# Return 1 if the target supports the auto_inc_dec optimization pass.
+proc check_effective_target_autoincdec { } {
+ if { ![check_no_compiler_messages auto_incdec assembly { void f () { }
+ } "-O2 -fdump-rtl-auto_inc_dec" ] } {
+ return 0
+ }
+
+ set dumpfile [glob -nocomplain "auto_incdec[pid].c.\[0-9\]\[0-9\]\[0-9\]r.auto_inc_dec"]
+ if { [file exists $dumpfile ] } {
+ file delete $dumpfile
+ return 1
+ }
+ return 0
+}
+
+# Return 1 if the target has support for stack probing designed
+# to avoid stack-clash style attacks.
+#
+# This is used to restrict the stack-clash mitigation tests to
+# just those targets that have been explicitly supported.
+#
+# In addition to the prologue work on those targets, each target's
+# properties should be described in the functions below so that
+# tests do not become a mess of unreadable target conditions.
+#
+proc check_effective_target_supports_stack_clash_protection { } {
+
+ # Temporary until the target bits are fully ACK'd.
+# if { [istarget aarch*-*-*] || [istarget x86_64-*-*]
+# || [istarget i?86-*-*] || [istarget s390*-*-*]
+# || [istarget powerpc*-*-*] || [istarget rs6000*-*-*] } {
+# return 1
+# }
+ return 0
+}
+
+# Return 1 if the target creates a frame pointer for non-leaf functions
+# Note we ignore cases where we apply tail call optimization here.
+proc check_effective_target_frame_pointer_for_non_leaf { } {
+ if { [istarget aarch*-*-*] } {
+ return 1
+ }
+ return 0
+}
+
+# Return 1 if the target's calling sequence or its ABI
+# create implicit stack probes at or prior to function entry.
+proc check_effective_target_caller_implicit_probes { } {
+
+ # On x86/x86_64 the call instruction itself pushes the return
+ # address onto the stack. That is an implicit probe of *sp.
+ if { [istarget x86_64-*-*] || [istarget i?86-*-*] } {
+ return 1
+ }
+
+ # On PPC, the ABI mandates that the address of the outer
+ # frame be stored at *sp. Thus each allocation of stack
+ # space is itself an implicit probe of *sp.
+ if { [istarget powerpc*-*-*] || [istarget rs6000*-*-*] } {
+ return 1
+ }
+
+ # s390's ABI has a register save area allocated by the
+ # caller for use by the callee. The mere existence does
+ # not constitute a probe by the caller, but when the slots
+ # used by the callee those stores are implicit probes.
+ if { [istarget s390*-*-*] } {
+ return 1
+ }
+
+ # Not strictly true on aarch64, but we have agreed that we will
+ # consider any function that pushes SP more than 3kbytes into
+ # the guard page as broken. This essentially means that we can
+ # consider the aarch64 as having a caller implicit probe at
+ # *(sp + 1k).
+ if { [istarget aarch64*-*-*] } {
+ return 1;
+ }
+
+ return 0
+}
+
+# Targets that potentially realign the stack pointer often cause residual
+# stack allocations and make it difficult to elimination loops or residual
+# allocations for dynamic stack allocations
+proc check_effective_target_callee_realigns_stack { } {
+ if { [istarget x86_64-*-*] || [istarget i?86-*-*] } {
+ return 1
+ }
+ return 0
+}
diff --git a/gcc/toplev.c b/gcc/toplev.c
index 26f2ffb362c..1def163f8b9 100644
--- a/gcc/toplev.c
+++ b/gcc/toplev.c
@@ -1520,6 +1520,28 @@ process_options (void)
flag_associative_math = 0;
}
+#ifndef STACK_GROWS_DOWNWARD
+ /* -fstack-clash-protection is not currently supported on targets
+ where the stack grows up. */
+ if (flag_stack_clash_protection)
+ {
+ warning_at (UNKNOWN_LOCATION, 0,
+ "%<-fstack-clash-protection%> is not supported on targets "
+ "where the stack grows from lower to higher addresses");
+ flag_stack_clash_protection = 0;
+ }
+#endif
+
+ /* We can not support -fstack-check= and -fstack-clash-protection at
+ the same time. */
+ if (flag_stack_check != NO_STACK_CHECK && flag_stack_clash_protection)
+ {
+ warning_at (UNKNOWN_LOCATION, 0,
+ "%<-fstack-check=%> and %<-fstack-clash_protection%> are "
+ "mutually exclusive. Disabling %<-fstack-check=%>");
+ flag_stack_check = NO_STACK_CHECK;
+ }
+
/* With -fcx-limited-range, we do cheap and quick complex arithmetic. */
if (flag_cx_limited_range)
flag_complex_method = 0;

View File

@ -0,0 +1,37 @@
* explow.c (anti_adjust_stack_and_probe_stack_clash): Avoid probing
the red zone for stack_clash_protection_final_dynamic_probe targets
when the total dynamic stack size is zero bytes.
diff -Nrup a/gcc/explow.c b/gcc/explow.c
--- a/gcc/explow.c 2017-11-14 23:33:15.403557607 -0700
+++ b/gcc/explow.c 2017-11-14 23:33:56.243239120 -0700
@@ -1934,6 +1934,13 @@ anti_adjust_stack_and_probe_stack_clash
if (size != CONST0_RTX (Pmode)
&& targetm.stack_clash_protection_final_dynamic_probe (residual))
{
+ /* SIZE could be zero at runtime and in that case *sp could hold
+ live data. Furthermore, we don't want to probe into the red
+ zone.
+
+ Go ahead and just guard a probe at *sp on SIZE != 0 at runtime
+ if SIZE is not a compile time constant. */
+
/* Ideally we would just probe at *sp. However, if SIZE is not
a compile-time constant, but is zero at runtime, then *sp
might hold live data. So probe at *sp if we know that
@@ -1946,9 +1953,12 @@ anti_adjust_stack_and_probe_stack_clash
}
else
{
- emit_stack_probe (plus_constant (Pmode, stack_pointer_rtx,
- -GET_MODE_SIZE (word_mode)));
+ rtx label = gen_label_rtx ();
+ emit_cmp_and_jump_insns (size, CONST0_RTX (GET_MODE (size)),
+ EQ, NULL_RTX, Pmode, 1, label);
+ emit_stack_probe (stack_pointer_rtx);
emit_insn (gen_blockage ());
+ emit_label (label);
}
}
}

View File

@ -0,0 +1,144 @@
PR middle-end/83654
* explow.c (anti_adjust_stack_and_probe_stack_clash): Test a
non-constant residual for zero at runtime and avoid probing in
that case. Reorganize code for trailing problem to mirror handling
of the residual.
PR middle-end/83654
* gcc.target/i386/stack-check-18.c: New test.
* gcc.target/i386/stack-check-19.c: New test.
diff --git a/gcc/explow.c b/gcc/explow.c
index b6c56602152..042e71904ec 100644
--- a/gcc/explow.c
+++ b/gcc/explow.c
@@ -1997,11 +1997,27 @@ anti_adjust_stack_and_probe_stack_clash (rtx size)
if (residual != CONST0_RTX (Pmode))
{
+ rtx label = NULL_RTX;
+ /* RESIDUAL could be zero at runtime and in that case *sp could
+ hold live data. Furthermore, we do not want to probe into the
+ red zone.
+
+ Go ahead and just guard the probe at *sp on RESIDUAL != 0 at
+ runtime if RESIDUAL is not a compile time constant. */
+ if (!CONST_INT_P (residual))
+ {
+ label = gen_label_rtx ();
+ emit_cmp_and_jump_insns (residual, CONST0_RTX (GET_MODE (residual)),
+ EQ, NULL_RTX, Pmode, 1, label);
+ }
+
rtx x = force_reg (Pmode, plus_constant (Pmode, residual,
-GET_MODE_SIZE (word_mode)));
anti_adjust_stack (residual);
emit_stack_probe (gen_rtx_PLUS (Pmode, stack_pointer_rtx, x));
emit_insn (gen_blockage ());
+ if (!CONST_INT_P (residual))
+ emit_label (label);
}
/* Some targets make optimistic assumptions in their prologues about
@@ -2014,28 +2030,20 @@ anti_adjust_stack_and_probe_stack_clash (rtx size)
live data. Furthermore, we don't want to probe into the red
zone.
- Go ahead and just guard a probe at *sp on SIZE != 0 at runtime
+ Go ahead and just guard the probe at *sp on SIZE != 0 at runtime
if SIZE is not a compile time constant. */
-
- /* Ideally we would just probe at *sp. However, if SIZE is not
- a compile-time constant, but is zero at runtime, then *sp
- might hold live data. So probe at *sp if we know that
- an allocation was made, otherwise probe into the red zone
- which is obviously undesirable. */
- if (CONST_INT_P (size))
- {
- emit_stack_probe (stack_pointer_rtx);
- emit_insn (gen_blockage ());
- }
- else
+ rtx label = NULL_RTX;
+ if (!CONST_INT_P (size))
{
- rtx label = gen_label_rtx ();
+ label = gen_label_rtx ();
emit_cmp_and_jump_insns (size, CONST0_RTX (GET_MODE (size)),
EQ, NULL_RTX, Pmode, 1, label);
- emit_stack_probe (stack_pointer_rtx);
- emit_insn (gen_blockage ());
- emit_label (label);
}
+
+ emit_stack_probe (stack_pointer_rtx);
+ emit_insn (gen_blockage ());
+ if (!CONST_INT_P (size))
+ emit_label (label);
}
}
diff --git a/gcc/testsuite/gcc.target/i386/stack-check-18.c b/gcc/testsuite/gcc.target/i386/stack-check-18.c
new file mode 100644
index 00000000000..6dbff4402da
--- /dev/null
+++ b/gcc/testsuite/gcc.target/i386/stack-check-18.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-options "-O2 -fstack-clash-protection -mtune=generic -fdump-rtl-expand" } */
+/* { dg-require-effective-target supports_stack_clash_protection } */
+
+int f1 (char *);
+
+int
+f2 (void)
+{
+ const int size = 4096;
+ char buffer[size];
+ return f1 (buffer);
+}
+
+/* So we want to verify that at expand time that we probed the main
+ VLA allocation as well as the residuals. Then we want to verify
+ there was only one probe in the final assembly (implying the
+ residual probe was optimized away). */
+/* { dg-final { scan-rtl-dump-times "allocation and probing in loop" 1 "expand" } } */
+/* { dg-final { scan-rtl-dump-times "allocation and probing residuals" 1 "expand" } } */
+
+/* { dg-final { scan-assembler-times "or\[ql\]" 1 } } */
+
diff --git a/gcc/testsuite/gcc.target/i386/stack-check-19.c b/gcc/testsuite/gcc.target/i386/stack-check-19.c
new file mode 100644
index 00000000000..b92c126d57f
--- /dev/null
+++ b/gcc/testsuite/gcc.target/i386/stack-check-19.c
@@ -0,0 +1,29 @@
+/* { dg-do compile } */
+/* { dg-options "-O2 -fstack-clash-protection -mtune=generic -fdump-rtl-expand" } */
+/* { dg-require-effective-target supports_stack_clash_protection } */
+
+int f1 (char *);
+
+int
+f2 (const int size)
+{
+ char buffer[size];
+ return f1 (buffer);
+}
+
+/* So we want to verify that at expand time that we probed the main
+ VLA allocation as well as the residuals. Then we want to verify
+ there are two probes in the final assembly code. */
+/* { dg-final { scan-rtl-dump-times "allocation and probing in loop" 1 "expand" } } */
+/* { dg-final { scan-rtl-dump-times "allocation and probing residuals" 1 "expand" } } */
+/* { dg-final { scan-assembler-times "or\[ql\]" 2 } } */
+
+/* We also want to verify (indirectly) that the residual probe is
+ guarded. We do that by checking the number of conditional
+ branches. There should be 3. One that bypasses the probe loop, one
+ in the probe loop and one that bypasses the residual probe.
+
+ These will all be equality tests. */
+/* { dg-final { scan-assembler-times "(\?:je|jne)" 3 } } */
+
+

View File

@ -0,0 +1,118 @@
diff -Nrup a/gcc/config/i386/i386.c b/gcc/config/i386/i386.c
--- a/gcc/config/i386/i386.c 2018-01-03 16:10:46.278171086 -0700
+++ b/gcc/config/i386/i386.c 2018-01-03 16:12:32.022220166 -0700
@@ -9862,14 +9862,13 @@ static void
ix86_adjust_stack_and_probe_stack_clash (const HOST_WIDE_INT size)
{
struct machine_function *m = cfun->machine;
+ struct ix86_frame frame;
+ ix86_compute_frame_layout (&frame);
/* If this function does not statically allocate stack space, then
no probes are needed. */
if (!size)
{
- struct ix86_frame frame;
- ix86_compute_frame_layout (&frame);
-
/* However, the allocation of space via pushes for register
saves could be viewed as allocating space, but without the
need to probe. */
@@ -9888,21 +9887,40 @@ ix86_adjust_stack_and_probe_stack_clash
pointer could be anywhere in the guard page. The safe thing
to do is emit a probe now.
+ The probe can be avoided if we have already emitted any callee
+ register saves into the stack or have a frame pointer (which will
+ have been saved as well). Those saves will function as implicit
+ probes.
+
?!? This should be revamped to work like aarch64 and s390 where
we track the offset from the most recent probe. Normally that
offset would be zero. For a non-return function we would reset
it to PROBE_INTERVAL - (STACK_BOUNDARY / BITS_PER_UNIT). Then
we just probe when we cross PROBE_INTERVAL. */
- if (TREE_THIS_VOLATILE (cfun->decl))
+ if (TREE_THIS_VOLATILE (cfun->decl)
+ && !(frame.nregs || frame.nsseregs || frame_pointer_needed))
+
{
/* We can safely use any register here since we're just going to push
its value and immediately pop it back. But we do try and avoid
argument passing registers so as not to introduce dependencies in
the pipeline. For 32 bit we use %esi and for 64 bit we use %rax. */
rtx dummy_reg = gen_rtx_REG (word_mode, TARGET_64BIT ? AX_REG : SI_REG);
- rtx insn = emit_insn (gen_push (dummy_reg));
- RTX_FRAME_RELATED_P (insn) = 1;
- ix86_emit_restore_reg_using_pop (dummy_reg);
+ rtx insn_push = emit_insn (gen_push (dummy_reg));
+ rtx insn_pop = emit_insn (gen_pop (dummy_reg));
+ m->fs.sp_offset -= UNITS_PER_WORD;
+ if (m->fs.cfa_reg == stack_pointer_rtx)
+ {
+ m->fs.cfa_offset -= UNITS_PER_WORD;
+ rtx x = plus_constant (Pmode, stack_pointer_rtx, -UNITS_PER_WORD);
+ x = gen_rtx_SET (VOIDmode, stack_pointer_rtx, x);
+ add_reg_note (insn_push, REG_CFA_ADJUST_CFA, x);
+ RTX_FRAME_RELATED_P (insn_push) = 1;
+ x = plus_constant (Pmode, stack_pointer_rtx, UNITS_PER_WORD);
+ x = gen_rtx_SET (VOIDmode, stack_pointer_rtx, x);
+ add_reg_note (insn_pop, REG_CFA_ADJUST_CFA, x);
+ RTX_FRAME_RELATED_P (insn_pop) = 1;
+ }
emit_insn (gen_blockage ());
}
diff -Nrup a/gcc-4.8.5-20150702/gcc/testsuite/gcc.target/i386/stack-check-12.c b/gcc-4.8.5-20150702/gcc/testsuite/gcc.target/i386/stack-check-12.c
--- gcc-4.8.5-20150702/gcc/testsuite/gcc.target/i386/stack-check-12.c 2018-01-03 15:42:40.849530670 -0700
+++ gcc-4.8.5-20150702/gcc/testsuite/gcc.target/i386/stack-check-12.c 2018-01-03 15:36:12.528488596 -0700
@@ -7,7 +7,6 @@ __attribute__ ((noreturn)) void exit (in
__attribute__ ((noreturn)) void
f (void)
{
- asm volatile ("nop" ::: "edi");
exit (1);
}
diff -Nrup a/gcc-4.8.5-20150702/gcc/testsuite/gcc.target/i386/stack-check-17.c b/gcc-4.8.5-20150702/gcc/testsuite/gcc.target/i386/stack-check-17.c
--- gcc-4.8.5-20150702/gcc/testsuite/gcc.target/i386/stack-check-17.c 1969-12-31 17:00:00.000000000 -0700
+++ gcc-4.8.5-20150702/gcc/testsuite/gcc.target/i386/stack-check-17.c 2018-01-03 15:36:12.528488596 -0700
@@ -0,0 +1,37 @@
+/* { dg-do compile } */
+/* { dg-options "-O2 -fstack-clash-protection -mtune=generic -fomit-frame-pointer" } */
+/* { dg-require-effective-target supports_stack_clash_protection } */
+
+
+int x0, x1;
+void f1 (void);
+void f2 (int, int);
+
+__attribute__ ((noreturn))
+void
+f3 (void)
+{
+ int y0 = x0;
+ int y1 = x1;
+ f1 ();
+ f2 (y0, y1);
+ while (1);
+}
+
+/* Verify no explicit probes. */
+/* { dg-final { scan-assembler-not "or\[ql\]" } } */
+
+/* We also want to verify we did not use a push/pop sequence
+ to probe *sp as the callee register saves are sufficient
+ to probe *sp.
+
+ y0/y1 are live across the call and thus must be allocated
+ into either a stack slot or callee saved register. The former
+ would be rather dumb. So assume it does not happen.
+
+ So search for two/four pushes for the callee register saves/argument
+ pushes and no pops (since the function has no reachable epilogue). */
+/* { dg-final { scan-assembler-times "push\[ql\]" 2 { target { ! ia32 } } } } */
+/* { dg-final { scan-assembler-times "push\[ql\]" 4 { target { ia32 } } } } */
+/* { dg-final { scan-assembler-not "pop" } } */
+

View File

@ -0,0 +1,52 @@
diff --git a/gcc/testsuite/gcc.target/i386/stack-check-17.c b/gcc/testsuite/gcc.target/i386/stack-check-17.c
index d2ef83b..dcd2930 100644
--- a/gcc/testsuite/gcc.target/i386/stack-check-17.c
+++ b/gcc/testsuite/gcc.target/i386/stack-check-17.c
@@ -29,9 +29,11 @@ f3 (void)
into either a stack slot or callee saved register. The former
would be rather dumb. So assume it does not happen.
- So search for two/four pushes for the callee register saves/argument
- pushes and no pops (since the function has no reachable epilogue). */
-/* { dg-final { scan-assembler-times "push\[ql\]" 2 { target { ! ia32 } } } } */
-/* { dg-final { scan-assembler-times "push\[ql\]" 4 { target { ia32 } } } } */
+ So search for two pushes for the callee register saves pushes
+ and no pops (since the function has no reachable epilogue).
+
+ This is slightly different than upstream because the code we
+ generate for argument setup is slightly different. */
+/* { dg-final { scan-assembler-times "push\[ql\]" 2 } } */
/* { dg-final { scan-assembler-not "pop" } } */
diff --git a/gcc/testsuite/gcc.target/i386/stack-check-18.c b/gcc/testsuite/gcc.target/i386/stack-check-18.c
index 6dbff44..1638f77 100644
--- a/gcc/testsuite/gcc.target/i386/stack-check-18.c
+++ b/gcc/testsuite/gcc.target/i386/stack-check-18.c
@@ -1,6 +1,7 @@
/* { dg-do compile } */
/* { dg-options "-O2 -fstack-clash-protection -mtune=generic -fdump-rtl-expand" } */
/* { dg-require-effective-target supports_stack_clash_protection } */
+/* { dg-skip-if "" { *-*-* } { "-fstack-protector" } { "" } } */
int f1 (char *);
diff --git a/gcc/testsuite/gcc.target/i386/stack-check-19.c b/gcc/testsuite/gcc.target/i386/stack-check-19.c
index b92c126..c341801 100644
--- a/gcc/testsuite/gcc.target/i386/stack-check-19.c
+++ b/gcc/testsuite/gcc.target/i386/stack-check-19.c
@@ -1,6 +1,7 @@
/* { dg-do compile } */
/* { dg-options "-O2 -fstack-clash-protection -mtune=generic -fdump-rtl-expand" } */
/* { dg-require-effective-target supports_stack_clash_protection } */
+/* { dg-skip-if "" { *-*-* } { "-fstack-protector" } { "" } } */
int f1 (char *);
@@ -24,6 +25,6 @@ f2 (const int size)
in the probe loop and one that bypasses the residual probe.
These will all be equality tests. */
-/* { dg-final { scan-assembler-times "(\?:je|jne)" 3 } } */
+/* { dg-final { scan-assembler-times "(\?:jmp|je|jne)" 3 } } */

View File

@ -0,0 +1,600 @@
commit a3e2ba88eb09c1eed2f7ed6e17660b345464bb90
Author: law <law@138bc75d-0d04-0410-961f-82ee72b054a4>
Date: Wed Sep 20 05:05:12 2017 +0000
2017-09-18 Jeff Law <law@redhat.com>
* explow.c: Include "params.h" and "dumpfile.h".
(anti_adjust_stack_and_probe_stack_clash): New function.
(get_stack_check_protect): Likewise.
(compute_stack_clash_protection_loop_data): Likewise.
(emit_stack_clash_protection_loop_start): Likewise.
(emit_stack_clash_protection_loop_end): Likewise.
(allocate_dynamic_stack_space): Use get_stack_check_protect.
Use anti_adjust_stack_and_probe_stack_clash.
* explow.h (compute_stack_clash_protection_loop_data): Prototype.
(emit_stack_clash_protection_loop_start): Likewise.
(emit_stack_clash_protection_loop_end): Likewise.
* rtl.h (get_stack_check_protect): Prototype.
* target.def (stack_clash_protection_final_dynamic_probe): New hook.
* targhooks.c (default_stack_clash_protection_final_dynamic_probe): New.
* targhooks.h (default_stack_clash_protection_final_dynamic_probe):
Prototype.
* doc/tm.texi.in (TARGET_STACK_CLASH_PROTECTION_FINAL_DYNAMIC_PROBE):
Add @hook.
* doc/tm.texi: Rebuilt.
* config/alpha/alpha.c (alpha_expand_prologue): Likewise.
* config/i386/i386.c (ix86_expand_prologue): Likewise.
* config/ia64/ia64.c (ia64_expand_prologue): Likewise.
* config/mips/mips.c (mips_expand_prologue): Likewise.
* config/rs6000/rs6000.c (rs6000_emit_prologue): Likewise.
* config/sparc/sparc.c (sparc_expand_prologue): Likewise.
(sparc_flat_expand_prologue): Likewise.
* gcc.dg/stack-check-3.c: New test.
git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@252995 138bc75d-0d04-0410-961f-82ee72b054a4
diff --git a/gcc/config/alpha/alpha.c b/gcc/config/alpha/alpha.c
index 2874b8454a9..5402f5213d6 100644
--- a/gcc/config/alpha/alpha.c
+++ b/gcc/config/alpha/alpha.c
@@ -7625,7 +7625,7 @@ alpha_expand_prologue (void)
probed_size = frame_size;
if (flag_stack_check)
- probed_size += STACK_CHECK_PROTECT;
+ probed_size += get_stack_check_protect ();
if (probed_size <= 32768)
{
diff --git a/gcc/config/i386/i386.c b/gcc/config/i386/i386.c
index e36726ba722..d996fd160e8 100644
--- a/gcc/config/i386/i386.c
+++ b/gcc/config/i386/i386.c
@@ -10544,12 +10544,12 @@ ix86_expand_prologue (void)
HOST_WIDE_INT size = allocate;
if (TARGET_64BIT && size >= (HOST_WIDE_INT) 0x80000000)
- size = 0x80000000 - STACK_CHECK_PROTECT - 1;
+ size = 0x80000000 - get_stack_check_protect () - 1;
if (TARGET_STACK_PROBE)
- ix86_emit_probe_stack_range (0, size + STACK_CHECK_PROTECT);
+ ix86_emit_probe_stack_range (0, size + get_stack_check_protect ());
else
- ix86_emit_probe_stack_range (STACK_CHECK_PROTECT, size);
+ ix86_emit_probe_stack_range (get_stack_check_protect (), size);
}
}
diff --git a/gcc/config/ia64/ia64.c b/gcc/config/ia64/ia64.c
index 50bbad6661c..390983936e8 100644
--- a/gcc/config/ia64/ia64.c
+++ b/gcc/config/ia64/ia64.c
@@ -3435,7 +3435,7 @@ ia64_expand_prologue (void)
current_function_static_stack_size = current_frame_info.total_size;
if (flag_stack_check == STATIC_BUILTIN_STACK_CHECK)
- ia64_emit_probe_stack_range (STACK_CHECK_PROTECT,
+ ia64_emit_probe_stack_range (get_stack_check_protect (),
current_frame_info.total_size,
current_frame_info.n_input_regs
+ current_frame_info.n_local_regs);
diff --git a/gcc/config/mips/mips.c b/gcc/config/mips/mips.c
index 41c5d6b6b1f..9b7eb678f19 100644
--- a/gcc/config/mips/mips.c
+++ b/gcc/config/mips/mips.c
@@ -10746,7 +10746,7 @@ mips_expand_prologue (void)
current_function_static_stack_size = size;
if (flag_stack_check == STATIC_BUILTIN_STACK_CHECK && size)
- mips_emit_probe_stack_range (STACK_CHECK_PROTECT, size);
+ mips_emit_probe_stack_range (get_stack_check_protect (), size);
/* Save the registers. Allocate up to MIPS_MAX_FIRST_STACK_STEP
bytes beforehand; this is enough to cover the register save area
diff --git a/gcc/config/rs6000/rs6000.c b/gcc/config/rs6000/rs6000.c
index 15583055895..a9052c6becf 100644
--- a/gcc/config/rs6000/rs6000.c
+++ b/gcc/config/rs6000/rs6000.c
@@ -23214,7 +23214,8 @@ rs6000_emit_prologue (void)
current_function_static_stack_size = info->total_size;
if (flag_stack_check == STATIC_BUILTIN_STACK_CHECK && info->total_size)
- rs6000_emit_probe_stack_range (STACK_CHECK_PROTECT, info->total_size);
+ rs6000_emit_probe_stack_range (get_stack_check_protect (),
+ info->total_size);
if (TARGET_FIX_AND_CONTINUE)
{
diff --git a/gcc/config/sparc/sparc.c b/gcc/config/sparc/sparc.c
index e5d326cdf23..e5e93c80261 100644
--- a/gcc/config/sparc/sparc.c
+++ b/gcc/config/sparc/sparc.c
@@ -5431,7 +5431,7 @@ sparc_expand_prologue (void)
current_function_static_stack_size = size;
if (flag_stack_check == STATIC_BUILTIN_STACK_CHECK && size)
- sparc_emit_probe_stack_range (STACK_CHECK_PROTECT, size);
+ sparc_emit_probe_stack_range (get_stack_check_protect (), size);
if (size == 0)
; /* do nothing. */
@@ -5533,7 +5533,7 @@ sparc_flat_expand_prologue (void)
current_function_static_stack_size = size;
if (flag_stack_check == STATIC_BUILTIN_STACK_CHECK && size)
- sparc_emit_probe_stack_range (STACK_CHECK_PROTECT, size);
+ sparc_emit_probe_stack_range (get_stack_check_protect (), size);
if (sparc_save_local_in_regs_p)
emit_save_or_restore_local_in_regs (stack_pointer_rtx, SPARC_STACK_BIAS,
diff --git a/gcc/doc/tm.texi b/gcc/doc/tm.texi
index 6b18a2724bc..eeef757bf5b 100644
--- a/gcc/doc/tm.texi
+++ b/gcc/doc/tm.texi
@@ -3571,6 +3571,10 @@ GCC computed the default from the values of the above macros and you will
normally not need to override that default.
@end defmac
+@deftypefn {Target Hook} bool TARGET_STACK_CLASH_PROTECTION_FINAL_DYNAMIC_PROBE (rtx @var{residual})
+Some targets make optimistic assumptions about the state of stack probing when they emit their prologues. On such targets a probe into the end of any dynamically allocated space is likely required for safety against stack clash style attacks. Define this variable to return nonzero if such a probe is required or zero otherwise. You need not define this macro if it would always have the value zero.
+@end deftypefn
+
@need 2000
@node Frame Registers
@subsection Registers That Address the Stack Frame
diff --git a/gcc/doc/tm.texi.in b/gcc/doc/tm.texi.in
index 7d0b3c73b2f..6707ca87236 100644
--- a/gcc/doc/tm.texi.in
+++ b/gcc/doc/tm.texi.in
@@ -3539,6 +3539,8 @@ GCC computed the default from the values of the above macros and you will
normally not need to override that default.
@end defmac
+@hook TARGET_STACK_CLASH_PROTECTION_FINAL_DYNAMIC_PROBE
+
@need 2000
@node Frame Registers
@subsection Registers That Address the Stack Frame
diff --git a/gcc/explow.c b/gcc/explow.c
index 7da8bc75f19..2526e8513b7 100644
--- a/gcc/explow.c
+++ b/gcc/explow.c
@@ -40,8 +40,11 @@ along with GCC; see the file COPYING3. If not see
#include "target.h"
#include "common/common-target.h"
#include "output.h"
+#include "params.h"
+#include "dumpfile.h"
static rtx break_out_memory_refs (rtx);
+static void anti_adjust_stack_and_probe_stack_clash (rtx);
/* Truncate and perhaps sign-extend C as appropriate for MODE. */
@@ -1140,6 +1143,29 @@ update_nonlocal_goto_save_area (void)
emit_stack_save (SAVE_NONLOCAL, &r_save);
}
+/* Return the number of bytes to "protect" on the stack for -fstack-check.
+
+ "protect" in the context of -fstack-check means how many bytes we
+ should always ensure are available on the stack. More importantly
+ this is how many bytes are skipped when probing the stack.
+
+ On some targets we want to reuse the -fstack-check prologue support
+ to give a degree of protection against stack clashing style attacks.
+
+ In that scenario we do not want to skip bytes before probing as that
+ would render the stack clash protections useless.
+
+ So we never use STACK_CHECK_PROTECT directly. Instead we indirect though
+ this helper which allows us to provide different values for
+ -fstack-check and -fstack-clash-protection. */
+HOST_WIDE_INT
+get_stack_check_protect (void)
+{
+ if (flag_stack_clash_protection)
+ return 0;
+ return STACK_CHECK_PROTECT;
+}
+
/* Return an rtx representing the address of an area of memory dynamically
pushed on the stack.
@@ -1393,7 +1419,7 @@ allocate_dynamic_stack_space (rtx size, unsigned size_align,
probe_stack_range (STACK_OLD_CHECK_PROTECT + STACK_CHECK_MAX_FRAME_SIZE,
size);
else if (flag_stack_check == STATIC_BUILTIN_STACK_CHECK)
- probe_stack_range (STACK_CHECK_PROTECT, size);
+ probe_stack_range (get_stack_check_protect (), size);
/* Don't let anti_adjust_stack emit notes. */
suppress_reg_args_size = true;
@@ -1451,6 +1477,8 @@ allocate_dynamic_stack_space (rtx size, unsigned size_align,
if (flag_stack_check && STACK_CHECK_MOVING_SP)
anti_adjust_stack_and_probe (size, false);
+ else if (flag_stack_clash_protection)
+ anti_adjust_stack_and_probe_stack_clash (size);
else
anti_adjust_stack (size);
@@ -1712,6 +1740,219 @@ probe_stack_range (HOST_WIDE_INT first, rtx size)
}
}
+/* Compute parameters for stack clash probing a dynamic stack
+ allocation of SIZE bytes.
+
+ We compute ROUNDED_SIZE, LAST_ADDR, RESIDUAL and PROBE_INTERVAL.
+
+ Additionally we conditionally dump the type of probing that will
+ be needed given the values computed. */
+
+void
+compute_stack_clash_protection_loop_data (rtx *rounded_size, rtx *last_addr,
+ rtx *residual,
+ HOST_WIDE_INT *probe_interval,
+ rtx size)
+{
+ /* Round SIZE down to STACK_CLASH_PROTECTION_PROBE_INTERVAL */
+ *probe_interval
+ = 1 << PARAM_VALUE (PARAM_STACK_CLASH_PROTECTION_PROBE_INTERVAL);
+ *rounded_size = simplify_gen_binary (AND, Pmode, size,
+ GEN_INT (-*probe_interval));
+
+ /* Compute the value of the stack pointer for the last iteration.
+ It's just SP + ROUNDED_SIZE. */
+ rtx rounded_size_op = force_operand (*rounded_size, NULL_RTX);
+ *last_addr = force_operand (gen_rtx_fmt_ee (STACK_GROW_OP, Pmode,
+ stack_pointer_rtx,
+ rounded_size_op),
+ NULL_RTX);
+
+ /* Compute any residuals not allocated by the loop above. Residuals
+ are just the ROUNDED_SIZE - SIZE. */
+ *residual = simplify_gen_binary (MINUS, Pmode, size, *rounded_size);
+
+ /* Dump key information to make writing tests easy. */
+ if (dump_file)
+ {
+ if (*rounded_size == CONST0_RTX (Pmode))
+ fprintf (dump_file,
+ "Stack clash skipped dynamic allocation and probing loop.\n");
+ else if (GET_CODE (*rounded_size) == CONST_INT
+ && INTVAL (*rounded_size) <= 4 * *probe_interval)
+ fprintf (dump_file,
+ "Stack clash dynamic allocation and probing inline.\n");
+ else if (GET_CODE (*rounded_size) == CONST_INT)
+ fprintf (dump_file,
+ "Stack clash dynamic allocation and probing in "
+ "rotated loop.\n");
+ else
+ fprintf (dump_file,
+ "Stack clash dynamic allocation and probing in loop.\n");
+
+ if (*residual != CONST0_RTX (Pmode))
+ fprintf (dump_file,
+ "Stack clash dynamic allocation and probing residuals.\n");
+ else
+ fprintf (dump_file,
+ "Stack clash skipped dynamic allocation and "
+ "probing residuals.\n");
+ }
+}
+
+/* Emit the start of an allocate/probe loop for stack
+ clash protection.
+
+ LOOP_LAB and END_LAB are returned for use when we emit the
+ end of the loop.
+
+ LAST addr is the value for SP which stops the loop. */
+void
+emit_stack_clash_protection_probe_loop_start (rtx *loop_lab,
+ rtx *end_lab,
+ rtx last_addr,
+ bool rotated)
+{
+ /* Essentially we want to emit any setup code, the top of loop
+ label and the comparison at the top of the loop. */
+ *loop_lab = gen_label_rtx ();
+ *end_lab = gen_label_rtx ();
+
+ emit_label (*loop_lab);
+ if (!rotated)
+ emit_cmp_and_jump_insns (stack_pointer_rtx, last_addr, EQ, NULL_RTX,
+ Pmode, 1, *end_lab);
+}
+
+/* Emit the end of a stack clash probing loop.
+
+ This consists of just the jump back to LOOP_LAB and
+ emitting END_LOOP after the loop. */
+
+void
+emit_stack_clash_protection_probe_loop_end (rtx loop_lab, rtx end_loop,
+ rtx last_addr, bool rotated)
+{
+ if (rotated)
+ emit_cmp_and_jump_insns (stack_pointer_rtx, last_addr, NE, NULL_RTX,
+ Pmode, 1, loop_lab);
+ else
+ emit_jump (loop_lab);
+
+ emit_label (end_loop);
+
+}
+
+/* Adjust the stack pointer by minus SIZE (an rtx for a number of bytes)
+ while probing it. This pushes when SIZE is positive. SIZE need not
+ be constant.
+
+ This is subtly different than anti_adjust_stack_and_probe to try and
+ prevent stack-clash attacks
+
+ 1. It must assume no knowledge of the probing state, any allocation
+ must probe.
+
+ Consider the case of a 1 byte alloca in a loop. If the sum of the
+ allocations is large, then this could be used to jump the guard if
+ probes were not emitted.
+
+ 2. It never skips probes, whereas anti_adjust_stack_and_probe will
+ skip probes on the first couple PROBE_INTERVALs on the assumption
+ they're done elsewhere.
+
+ 3. It only allocates and probes SIZE bytes, it does not need to
+ allocate/probe beyond that because this probing style does not
+ guarantee signal handling capability if the guard is hit. */
+
+static void
+anti_adjust_stack_and_probe_stack_clash (rtx size)
+{
+ /* First ensure SIZE is Pmode. */
+ if (GET_MODE (size) != VOIDmode && GET_MODE (size) != Pmode)
+ size = convert_to_mode (Pmode, size, 1);
+
+ /* We can get here with a constant size on some targets. */
+ rtx rounded_size, last_addr, residual;
+ HOST_WIDE_INT probe_interval;
+ compute_stack_clash_protection_loop_data (&rounded_size, &last_addr,
+ &residual, &probe_interval, size);
+
+ if (rounded_size != CONST0_RTX (Pmode))
+ {
+ if (INTVAL (rounded_size) <= 4 * probe_interval)
+ {
+ for (HOST_WIDE_INT i = 0;
+ i < INTVAL (rounded_size);
+ i += probe_interval)
+ {
+ anti_adjust_stack (GEN_INT (probe_interval));
+
+ /* The prologue does not probe residuals. Thus the offset
+ here to probe just beyond what the prologue had already
+ allocated. */
+ emit_stack_probe (plus_constant (Pmode, stack_pointer_rtx,
+ (probe_interval
+ - GET_MODE_SIZE (word_mode))));
+ emit_insn (gen_blockage ());
+ }
+ }
+ else
+ {
+ rtx loop_lab, end_loop;
+ bool rotate_loop = GET_CODE (rounded_size) == CONST_INT;
+ emit_stack_clash_protection_probe_loop_start (&loop_lab, &end_loop,
+ last_addr, rotate_loop);
+
+ anti_adjust_stack (GEN_INT (probe_interval));
+
+ /* The prologue does not probe residuals. Thus the offset here
+ to probe just beyond what the prologue had already allocated. */
+ emit_stack_probe (plus_constant (Pmode, stack_pointer_rtx,
+ (probe_interval
+ - GET_MODE_SIZE (word_mode))));
+
+ emit_stack_clash_protection_probe_loop_end (loop_lab, end_loop,
+ last_addr, rotate_loop);
+ emit_insn (gen_blockage ());
+ }
+ }
+
+ if (residual != CONST0_RTX (Pmode))
+ {
+ rtx x = force_reg (Pmode, plus_constant (Pmode, residual,
+ -GET_MODE_SIZE (word_mode)));
+ anti_adjust_stack (residual);
+ emit_stack_probe (gen_rtx_PLUS (Pmode, stack_pointer_rtx, x));
+ emit_insn (gen_blockage ());
+ }
+
+ /* Some targets make optimistic assumptions in their prologues about
+ how the caller may have probed the stack. Make sure we honor
+ those assumptions when needed. */
+ if (size != CONST0_RTX (Pmode)
+ && targetm.stack_clash_protection_final_dynamic_probe (residual))
+ {
+ /* Ideally we would just probe at *sp. However, if SIZE is not
+ a compile-time constant, but is zero at runtime, then *sp
+ might hold live data. So probe at *sp if we know that
+ an allocation was made, otherwise probe into the red zone
+ which is obviously undesirable. */
+ if (GET_CODE (size) == CONST_INT)
+ {
+ emit_stack_probe (stack_pointer_rtx);
+ emit_insn (gen_blockage ());
+ }
+ else
+ {
+ emit_stack_probe (plus_constant (Pmode, stack_pointer_rtx,
+ -GET_MODE_SIZE (word_mode)));
+ emit_insn (gen_blockage ());
+ }
+ }
+}
+
+
/* Adjust the stack pointer by minus SIZE (an rtx for a number of bytes)
while probing it. This pushes when SIZE is positive. SIZE need not
be constant. If ADJUST_BACK is true, adjust back the stack pointer
diff --git a/gcc/rtl.h b/gcc/rtl.h
index 91f3387c701..ab8ec27418d 100644
--- a/gcc/rtl.h
+++ b/gcc/rtl.h
@@ -1756,6 +1756,17 @@ extern int currently_expanding_to_rtl;
/* In explow.c */
extern HOST_WIDE_INT trunc_int_for_mode (HOST_WIDE_INT, enum machine_mode);
extern rtx plus_constant (enum machine_mode, rtx, HOST_WIDE_INT);
+extern HOST_WIDE_INT get_stack_check_protect (void);
+
+/* Support for building allocation/probing loops for stack-clash
+ protection of dyamically allocated stack space. */
+extern void compute_stack_clash_protection_loop_data (rtx *, rtx *, rtx *,
+ HOST_WIDE_INT *, rtx);
+extern void emit_stack_clash_protection_probe_loop_start (rtx *, rtx *,
+ rtx, bool);
+extern void emit_stack_clash_protection_probe_loop_end (rtx, rtx,
+ rtx, bool);
+
/* In rtl.c */
extern rtx rtx_alloc_stat (RTX_CODE MEM_STAT_DECL);
diff --git a/gcc/target.def b/gcc/target.def
index 4d6081c3121..eb2bd46f7a1 100644
--- a/gcc/target.def
+++ b/gcc/target.def
@@ -2580,6 +2580,13 @@ DEFHOOK
void, (void),
hook_void_void)
+DEFHOOK
+(stack_clash_protection_final_dynamic_probe,
+ "Some targets make optimistic assumptions about the state of stack probing when they emit their prologues. On such targets a probe into the end of any dynamically allocated space is likely required for safety against stack clash style attacks. Define this variable to return nonzero if such a probe is required or zero otherwise. You need not define this macro if it would always have the value zero.",
+ bool, (rtx residual),
+ default_stack_clash_protection_final_dynamic_probe)
+
+
/* Functions specific to the C family of frontends. */
#undef HOOK_PREFIX
#define HOOK_PREFIX "TARGET_C_"
diff --git a/gcc/targhooks.c b/gcc/targhooks.c
index f6aa9907225..be23875538d 100644
--- a/gcc/targhooks.c
+++ b/gcc/targhooks.c
@@ -1557,4 +1557,10 @@ default_canonicalize_comparison (int *, rtx *, rtx *, bool)
{
}
+bool
+default_stack_clash_protection_final_dynamic_probe (rtx residual ATTRIBUTE_UNUSED)
+{
+ return 0;
+}
+
#include "gt-targhooks.h"
diff --git a/gcc/targhooks.h b/gcc/targhooks.h
index b64274d3ff9..4acf33fae08 100644
--- a/gcc/targhooks.h
+++ b/gcc/targhooks.h
@@ -195,3 +195,4 @@ extern const char *default_pch_valid_p (const void *, size_t);
extern void default_asm_output_ident_directive (const char*);
extern bool default_member_type_forces_blk (const_tree, enum machine_mode);
+extern bool default_stack_clash_protection_final_dynamic_probe (rtx);
diff --git a/gcc/testsuite/gcc.dg/stack-check-3.c b/gcc/testsuite/gcc.dg/stack-check-3.c
new file mode 100644
index 00000000000..58fb65649ee
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/stack-check-3.c
@@ -0,0 +1,86 @@
+/* The goal here is to ensure that dynamic allocations via vlas or
+ alloca calls receive probing.
+
+ Scanning the RTL or assembly code seems like insanity here as does
+ checking for particular allocation sizes and probe offsets. For
+ now we just verify that there's an allocation + probe loop and
+ residual allocation + probe for f?. */
+
+/* { dg-do compile } */
+/* { dg-options "-O2 -fstack-clash-protection -fdump-rtl-expand -fno-optimize-sibling-calls --param stack-clash-protection-probe-interval=4096 --param stack-clash-protection-guard-size=4096" } */
+/* { dg-require-effective-target supports_stack_clash_protection } */
+
+__attribute__((noinline, noclone)) void
+foo (char *p)
+{
+ asm volatile ("" : : "r" (p) : "memory");
+}
+
+/* Simple VLA, no other locals. */
+__attribute__((noinline, noclone)) void
+f0 (int x)
+{
+ char vla[x];
+ foo (vla);
+}
+
+/* Simple VLA, small local frame. */
+__attribute__((noinline, noclone)) void
+f1 (int x)
+{
+ char locals[128];
+ char vla[x];
+ foo (vla);
+}
+
+/* Small constant alloca, no other locals. */
+__attribute__((noinline, noclone)) void
+f2 (int x)
+{
+ char *vla = __builtin_alloca (128);
+ foo (vla);
+}
+
+/* Big constant alloca, small local frame. */
+__attribute__((noinline, noclone)) void
+f3 (int x)
+{
+ char locals[128];
+ char *vla = __builtin_alloca (16384);
+ foo (vla);
+}
+
+/* Big constant alloca, small local frame. */
+__attribute__((noinline, noclone)) void
+f3a (int x)
+{
+ char locals[128];
+ char *vla = __builtin_alloca (32768);
+ foo (vla);
+}
+
+/* Nonconstant alloca, no other locals. */
+__attribute__((noinline, noclone)) void
+f4 (int x)
+{
+ char *vla = __builtin_alloca (x);
+ foo (vla);
+}
+
+/* Nonconstant alloca, small local frame. */
+__attribute__((noinline, noclone)) void
+f5 (int x)
+{
+ char locals[128];
+ char *vla = __builtin_alloca (x);
+ foo (vla);
+}
+
+/* { dg-final { scan-rtl-dump-times "allocation and probing residuals" 7 "expand" } } */
+
+
+/* { dg-final { scan-rtl-dump-times "allocation and probing in loop" 7 "expand" { target callee_realigns_stack } } } */
+/* { dg-final { scan-rtl-dump-times "allocation and probing in loop" 4 "expand" { target { ! callee_realigns_stack } } } } */
+/* { dg-final { scan-rtl-dump-times "allocation and probing in rotated loop" 1 "expand" { target { ! callee_realigns_stack } } } } */
+/* { dg-final { scan-rtl-dump-times "allocation and probing inline" 1 "expand" { target { ! callee_realigns_stack } } } } */
+/* { dg-final { scan-rtl-dump-times "skipped dynamic allocation and probing loop" 1 "expand" { target { ! callee_realigns_stack } } } } */

View File

@ -0,0 +1,143 @@
commit 8a1b46d59d6c3e1e5eb606cd44689c8557612257
Author: law <law@138bc75d-0d04-0410-961f-82ee72b054a4>
Date: Wed Sep 20 05:21:09 2017 +0000
* config/alpha/alpha.c (alpha_expand_prologue): Also check
flag_stack_clash_protection.
* config/ia64/ia64.c (ia64_compute_frame_size): Likewise.
(ia64_expand_prologue): Likewise.
* config/mips/mips.c (mips_expand_prologue): Likewise.
* config/sparc/sparc.c (sparc_expand_prologue): Likewise.
(sparc_flat_expand_prologue): Likewise.
* config/spu/spu.c (spu_expand_prologue): Likewise.
git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@252996 138bc75d-0d04-0410-961f-82ee72b054a4
diff --git a/gcc/config/alpha/alpha.c b/gcc/config/alpha/alpha.c
index 5402f5213d6..c46c843e462 100644
--- a/gcc/config/alpha/alpha.c
+++ b/gcc/config/alpha/alpha.c
@@ -7624,7 +7624,7 @@ alpha_expand_prologue (void)
Note that we are only allowed to adjust sp once in the prologue. */
probed_size = frame_size;
- if (flag_stack_check)
+ if (flag_stack_check || flag_stack_clash_protection)
probed_size += get_stack_check_protect ();
if (probed_size <= 32768)
@@ -7639,7 +7639,7 @@ alpha_expand_prologue (void)
/* We only have to do this probe if we aren't saving registers or
if we are probing beyond the frame because of -fstack-check. */
if ((sa_size == 0 && probed_size > probed - 4096)
- || flag_stack_check)
+ || flag_stack_check || flag_stack_clash_protection)
emit_insn (gen_probe_stack (GEN_INT (-probed_size)));
}
@@ -7669,7 +7669,8 @@ alpha_expand_prologue (void)
late in the compilation, generate the loop as a single insn. */
emit_insn (gen_prologue_stack_probe_loop (count, ptr));
- if ((leftover > 4096 && sa_size == 0) || flag_stack_check)
+ if ((leftover > 4096 && sa_size == 0)
+ || flag_stack_check || flag_stack_clash_protection)
{
rtx last = gen_rtx_MEM (DImode,
plus_constant (Pmode, ptr, -leftover));
@@ -7677,7 +7678,7 @@ alpha_expand_prologue (void)
emit_move_insn (last, const0_rtx);
}
- if (flag_stack_check)
+ if (flag_stack_check || flag_stack_clash_protection)
{
/* If -fstack-check is specified we have to load the entire
constant into a register and subtract from the sp in one go,
diff --git a/gcc/config/ia64/ia64.c b/gcc/config/ia64/ia64.c
index 390983936e8..5bf7046cf15 100644
--- a/gcc/config/ia64/ia64.c
+++ b/gcc/config/ia64/ia64.c
@@ -2638,7 +2638,8 @@ ia64_compute_frame_size (HOST_WIDE_INT size)
mark_reg_gr_used_mask (cfun->machine->ia64_eh_epilogue_bsp, NULL);
/* Static stack checking uses r2 and r3. */
- if (flag_stack_check == STATIC_BUILTIN_STACK_CHECK)
+ if (flag_stack_check == STATIC_BUILTIN_STACK_CHECK
+ || flag_stack_clash_protection)
current_frame_info.gr_used_mask |= 0xc;
/* Find the size of the register stack frame. We have only 80 local
@@ -3434,7 +3435,8 @@ ia64_expand_prologue (void)
if (flag_stack_usage_info)
current_function_static_stack_size = current_frame_info.total_size;
- if (flag_stack_check == STATIC_BUILTIN_STACK_CHECK)
+ if (flag_stack_check == STATIC_BUILTIN_STACK_CHECK
+ || flag_stack_clash_protection)
ia64_emit_probe_stack_range (get_stack_check_protect (),
current_frame_info.total_size,
current_frame_info.n_input_regs
diff --git a/gcc/config/mips/mips.c b/gcc/config/mips/mips.c
index 9b7eb678f19..da17f94b4f9 100644
--- a/gcc/config/mips/mips.c
+++ b/gcc/config/mips/mips.c
@@ -10745,7 +10745,9 @@ mips_expand_prologue (void)
if (flag_stack_usage_info)
current_function_static_stack_size = size;
- if (flag_stack_check == STATIC_BUILTIN_STACK_CHECK && size)
+ if ((flag_stack_check == STATIC_BUILTIN_STACK_CHECK
+ || flag_stack_clash_protection)
+ && size)
mips_emit_probe_stack_range (get_stack_check_protect (), size);
/* Save the registers. Allocate up to MIPS_MAX_FIRST_STACK_STEP
diff --git a/gcc/config/sparc/sparc.c b/gcc/config/sparc/sparc.c
index e5e93c80261..617aa617208 100644
--- a/gcc/config/sparc/sparc.c
+++ b/gcc/config/sparc/sparc.c
@@ -5430,7 +5430,9 @@ sparc_expand_prologue (void)
if (flag_stack_usage_info)
current_function_static_stack_size = size;
- if (flag_stack_check == STATIC_BUILTIN_STACK_CHECK && size)
+ if ((flag_stack_check == STATIC_BUILTIN_STACK_CHECK
+ || flag_stack_clash_protection)
+ && size)
sparc_emit_probe_stack_range (get_stack_check_protect (), size);
if (size == 0)
@@ -5532,7 +5534,9 @@ sparc_flat_expand_prologue (void)
if (flag_stack_usage_info)
current_function_static_stack_size = size;
- if (flag_stack_check == STATIC_BUILTIN_STACK_CHECK && size)
+ if ((flag_stack_check == STATIC_BUILTIN_STACK_CHECK
+ || flag_stack_clash_protection)
+ && size)
sparc_emit_probe_stack_range (get_stack_check_protect (), size);
if (sparc_save_local_in_regs_p)
diff --git a/gcc/config/spu/spu.c b/gcc/config/spu/spu.c
index 328bd5bd2ae..5541a3cd243 100644
--- a/gcc/config/spu/spu.c
+++ b/gcc/config/spu/spu.c
@@ -1761,7 +1761,7 @@ spu_expand_prologue (void)
if (total_size > 0)
{
- if (flag_stack_check)
+ if (flag_stack_check || flag_stack_clash_protection)
{
/* We compare against total_size-1 because
($sp >= total_size) <=> ($sp > total_size-1) */
@@ -5366,7 +5366,7 @@ spu_allocate_stack (rtx op0, rtx op1)
emit_insn (gen_spu_convert (sp, stack_pointer_rtx));
emit_insn (gen_subv4si3 (sp, sp, splatted));
- if (flag_stack_check)
+ if (flag_stack_check || flag_stack_clash_protection)
{
rtx avail = gen_reg_rtx(SImode);
rtx result = gen_reg_rtx(SImode);

View File

@ -0,0 +1,94 @@
commit ea2b372d666ec1105abf4ef5418d92d612283e88
Author: law <law@138bc75d-0d04-0410-961f-82ee72b054a4>
Date: Wed Sep 20 05:23:51 2017 +0000
* function.c (dump_stack_clash_frame_info): New function.
* function.h (dump_stack_clash_frame_info): Prototype.
(enum stack_clash_probes): New enum.
git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@252997 138bc75d-0d04-0410-961f-82ee72b054a4
diff --git a/gcc/function.c b/gcc/function.c
index 76baf307984..9b395aebcb3 100644
--- a/gcc/function.c
+++ b/gcc/function.c
@@ -5263,6 +5263,58 @@ get_arg_pointer_save_area (void)
return ret;
}
+
+/* If debugging dumps are requested, dump information about how the
+ target handled -fstack-check=clash for the prologue.
+
+ PROBES describes what if any probes were emitted.
+
+ RESIDUALS indicates if the prologue had any residual allocation
+ (i.e. total allocation was not a multiple of PROBE_INTERVAL). */
+
+void
+dump_stack_clash_frame_info (enum stack_clash_probes probes, bool residuals)
+{
+ if (!dump_file)
+ return;
+
+ switch (probes)
+ {
+ case NO_PROBE_NO_FRAME:
+ fprintf (dump_file,
+ "Stack clash no probe no stack adjustment in prologue.\n");
+ break;
+ case NO_PROBE_SMALL_FRAME:
+ fprintf (dump_file,
+ "Stack clash no probe small stack adjustment in prologue.\n");
+ break;
+ case PROBE_INLINE:
+ fprintf (dump_file, "Stack clash inline probes in prologue.\n");
+ break;
+ case PROBE_LOOP:
+ fprintf (dump_file, "Stack clash probe loop in prologue.\n");
+ break;
+ }
+
+ if (residuals)
+ fprintf (dump_file, "Stack clash residual allocation in prologue.\n");
+ else
+ fprintf (dump_file, "Stack clash no residual allocation in prologue.\n");
+
+ if (frame_pointer_needed)
+ fprintf (dump_file, "Stack clash frame pointer needed.\n");
+ else
+ fprintf (dump_file, "Stack clash no frame pointer needed.\n");
+
+ if (TREE_THIS_VOLATILE (cfun->decl))
+ fprintf (dump_file,
+ "Stack clash noreturn prologue, assuming no implicit"
+ " probes in caller.\n");
+ else
+ fprintf (dump_file,
+ "Stack clash not noreturn prologue.\n");
+}
+
/* Add a list of INSNS to the hash HASHP, possibly allocating HASHP
for the first time. */
diff --git a/gcc/function.h b/gcc/function.h
index 89d71e592dd..ffea1e564ba 100644
--- a/gcc/function.h
+++ b/gcc/function.h
@@ -712,6 +712,16 @@ extern void instantiate_decl_rtl (rtx x);
and create duplicate blocks. */
extern void reorder_blocks (void);
+enum stack_clash_probes {
+ NO_PROBE_NO_FRAME,
+ NO_PROBE_SMALL_FRAME,
+ PROBE_INLINE,
+ PROBE_LOOP
+};
+
+extern void dump_stack_clash_frame_info (enum stack_clash_probes, bool);
+
+
/* Set BLOCK_NUMBER for all the blocks in FN. */
extern void number_blocks (tree);

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,115 @@
commit 2bb044f9734259945e2b5048d92bc8d0af707d27
Author: law <law@138bc75d-0d04-0410-961f-82ee72b054a4>
Date: Wed Sep 20 05:43:28 2017 +0000
* combine-stack-adj.c (combine_stack_adjustments_for_block): Do
nothing for stack adjustments with REG_STACK_CHECK.
* sched-deps.c (parse_add_or_inc): Reject insns with
REG_STACK_CHECK from dependency breaking.
* config/i386/i386.c (pro_epilogue_adjust_stack): Return insn.
(ix86_adjust_satck_and_probe_stack_clash): Add REG_STACK_NOTEs.
* reg-notes.def (STACK_CHECK): New note.
* gcc.target/i386/stack-check-11.c: New test.
git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@252999 138bc75d-0d04-0410-961f-82ee72b054a4
diff --git a/gcc/combine-stack-adj.c b/gcc/combine-stack-adj.c
index 0a4d8a51d1d..ee66c28ca35 100644
--- a/gcc/combine-stack-adj.c
+++ b/gcc/combine-stack-adj.c
@@ -441,6 +441,8 @@ combine_stack_adjustments_for_block (basic_block bb)
continue;
set = single_set_for_csa (insn);
+ if (set && find_reg_note (insn, REG_STACK_CHECK, NULL_RTX))
+ set = NULL_RTX;
if (set)
{
rtx dest = SET_DEST (set);
diff --git a/gcc/config/i386/i386.c b/gcc/config/i386/i386.c
index a07104d304d..a9072f58f50 100644
--- a/gcc/config/i386/i386.c
+++ b/gcc/config/i386/i386.c
@@ -9502,7 +9502,7 @@ ix86_add_queued_cfa_restore_notes (rtx insn)
zero if %r11 register is live and cannot be freely used and positive
otherwise. */
-static void
+static rtx
pro_epilogue_adjust_stack (rtx dest, rtx src, rtx offset,
int style, bool set_cfa)
{
@@ -9589,6 +9589,7 @@ pro_epilogue_adjust_stack (rtx dest, rtx src, rtx offset,
m->fs.sp_offset = ooffset - INTVAL (offset);
m->fs.sp_valid = valid;
}
+ return insn;
}
/* Find an available register to be used as dynamic realign argument
@@ -9902,9 +9903,11 @@ ix86_adjust_stack_and_probe_stack_clash (const HOST_WIDE_INT size)
for (i = probe_interval; i <= size; i += probe_interval)
{
/* Allocate PROBE_INTERVAL bytes. */
- pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
- GEN_INT (-probe_interval), -1,
- m->fs.cfa_reg == stack_pointer_rtx);
+ rtx insn
+ = pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
+ GEN_INT (-PROBE_INTERVAL), -1,
+ m->fs.cfa_reg == stack_pointer_rtx);
+ add_reg_note (insn, REG_STACK_CHECK, const0_rtx);
/* And probe at *sp. */
emit_stack_probe (stack_pointer_rtx);
diff --git a/gcc/reg-notes.def b/gcc/reg-notes.def
index db61c092aab..1d7a4356a85 100644
--- a/gcc/reg-notes.def
+++ b/gcc/reg-notes.def
@@ -216,3 +216,7 @@ REG_NOTE (ARGS_SIZE)
that the return value of a call can be used to reinitialize a
pseudo reg. */
REG_NOTE (RETURNED)
+
+/* Indicates the instruction is a stack check probe that should not
+ be combined with other stack adjustments. */
+REG_NOTE (STACK_CHECK)
diff --git a/gcc/sched-deps.c b/gcc/sched-deps.c
index 4ac2542a3af..75780150e34 100644
--- a/gcc/sched-deps.c
+++ b/gcc/sched-deps.c
@@ -4607,6 +4607,11 @@ parse_add_or_inc (struct mem_inc_info *mii, rtx insn, bool before_mem)
if (RTX_FRAME_RELATED_P (insn) || !pat)
return false;
+ /* Do not allow breaking data dependencies for insns that are marked
+ with REG_STACK_CHECK. */
+ if (find_reg_note (insn, REG_STACK_CHECK, NULL))
+ return false;
+
/* Result must be single reg. */
if (!REG_P (SET_DEST (pat)))
return false;
diff --git a/gcc/testsuite/gcc.target/i386/stack-check-11.c b/gcc/testsuite/gcc.target/i386/stack-check-11.c
new file mode 100644
index 00000000000..183103f01e5
--- /dev/null
+++ b/gcc/testsuite/gcc.target/i386/stack-check-11.c
@@ -0,0 +1,16 @@
+/* { dg-do compile } */
+/* { dg-options "-O2 -fstack-clash-protection" } */
+/* { dg-require-effective-target supports_stack_clash_protection } */
+
+extern void arf (unsigned long int *, unsigned long int *);
+void
+frob ()
+{
+ unsigned long int num[859];
+ unsigned long int den[859];
+ arf (den, num);
+}
+
+/* { dg-final { scan-assembler-times "subq" 4 } } */
+/* { dg-final { scan-assembler-times "orq" 3 } } */
+

View File

@ -0,0 +1,88 @@
commit 93ed472702aad6d9b8998592775a0ab4120b6242
Author: law <law@138bc75d-0d04-0410-961f-82ee72b054a4>
Date: Wed Sep 20 21:59:50 2017 +0000
* explow.c (compute_stack_clash_protection_loop_data): Use
CONST_INT_P instead of explicit test. Verify object is a
CONST_INT_P before looking at INTVAL.
(anti_adjust_stack_and_probe_stack_clash): Use CONST_INT_P
instead of explicit test.
* gcc.target/i386/stack-check-11.c: Update test and regexp
so that it works for both i?86 and x86_64.
git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@253034 138bc75d-0d04-0410-961f-82ee72b054a4
diff --git a/gcc/explow.c b/gcc/explow.c
index 2526e8513b7..d118e0d7782 100644
--- a/gcc/explow.c
+++ b/gcc/explow.c
@@ -1778,11 +1778,11 @@ compute_stack_clash_protection_loop_data (rtx *rounded_size, rtx *last_addr,
if (*rounded_size == CONST0_RTX (Pmode))
fprintf (dump_file,
"Stack clash skipped dynamic allocation and probing loop.\n");
- else if (GET_CODE (*rounded_size) == CONST_INT
+ else if (CONST_INT_P (*rounded_size)
&& INTVAL (*rounded_size) <= 4 * *probe_interval)
fprintf (dump_file,
"Stack clash dynamic allocation and probing inline.\n");
- else if (GET_CODE (*rounded_size) == CONST_INT)
+ else if (CONST_INT_P (*rounded_size))
fprintf (dump_file,
"Stack clash dynamic allocation and probing in "
"rotated loop.\n");
@@ -1880,7 +1880,8 @@ anti_adjust_stack_and_probe_stack_clash (rtx size)
if (rounded_size != CONST0_RTX (Pmode))
{
- if (INTVAL (rounded_size) <= 4 * probe_interval)
+ if (CONST_INT_P (rounded_size)
+ && INTVAL (rounded_size) <= 4 * probe_interval)
{
for (HOST_WIDE_INT i = 0;
i < INTVAL (rounded_size);
@@ -1900,7 +1901,7 @@ anti_adjust_stack_and_probe_stack_clash (rtx size)
else
{
rtx loop_lab, end_loop;
- bool rotate_loop = GET_CODE (rounded_size) == CONST_INT;
+ bool rotate_loop = CONST_INT_P (rounded_size);
emit_stack_clash_protection_probe_loop_start (&loop_lab, &end_loop,
last_addr, rotate_loop);
@@ -1938,7 +1939,7 @@ anti_adjust_stack_and_probe_stack_clash (rtx size)
might hold live data. So probe at *sp if we know that
an allocation was made, otherwise probe into the red zone
which is obviously undesirable. */
- if (GET_CODE (size) == CONST_INT)
+ if (CONST_INT_P (size))
{
emit_stack_probe (stack_pointer_rtx);
emit_insn (gen_blockage ());
diff --git a/gcc/testsuite/gcc.target/i386/stack-check-11.c b/gcc/testsuite/gcc.target/i386/stack-check-11.c
index 183103f01e5..fe5b2c2b844 100644
--- a/gcc/testsuite/gcc.target/i386/stack-check-11.c
+++ b/gcc/testsuite/gcc.target/i386/stack-check-11.c
@@ -2,15 +2,17 @@
/* { dg-options "-O2 -fstack-clash-protection" } */
/* { dg-require-effective-target supports_stack_clash_protection } */
-extern void arf (unsigned long int *, unsigned long int *);
+#include <stdint.h>
+
+extern void arf (uint64_t *, uint64_t *);
void
frob ()
{
- unsigned long int num[859];
- unsigned long int den[859];
+ uint64_t num[859];
+ uint64_t den[859];
arf (den, num);
}
-/* { dg-final { scan-assembler-times "subq" 4 } } */
-/* { dg-final { scan-assembler-times "orq" 3 } } */
+/* { dg-final { scan-assembler-times "sub\[ql\]" 4 } } */
+/* { dg-final { scan-assembler-times "or\[ql\]" 3 } } */

View File

@ -0,0 +1,389 @@
commit b49f8fb8a97e9af8e6ba2b65d18195099cd1bb79
Author: law <law@138bc75d-0d04-0410-961f-82ee72b054a4>
Date: Thu Sep 21 04:30:16 2017 +0000
* config/s390/s390.c (MIN_UNROLL_PROBES): Define.
(allocate_stack_space): New function, partially extracted from
s390_emit_prologue.
(s390_emit_prologue): Track offset to most recent stack probe.
Code to allocate space moved into allocate_stack_space.
Dump actions when no stack is allocated.
(s390_prologue_plus_offset): New function.
(s390_emit_stack_probe): Likewise.
* gcc.dg/stack-check-5.c: Add argument for s390.
* lib/target-supports.exp:
(check_effective_target_supports_stack_clash_protection): Enable for
s390/s390x targets.
git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@253049 138bc75d-0d04-0410-961f-82ee72b054a4
diff --git a/gcc/config/s390/s390.c b/gcc/config/s390/s390.c
index 3c04781f947..45998bc7516 100644
--- a/gcc/config/s390/s390.c
+++ b/gcc/config/s390/s390.c
@@ -10350,6 +10350,184 @@ s390_emit_stack_tie (void)
emit_insn (gen_stack_tie (mem));
}
+/* Calculate TARGET = REG + OFFSET as s390_emit_prologue would do it.
+ - push too big immediates to the literal pool and annotate the refs
+ - emit frame related notes for stack pointer changes. */
+
+static rtx
+s390_prologue_plus_offset (rtx target, rtx reg, rtx offset, bool frame_related_p)
+{
+ rtx insn;
+ rtx orig_offset = offset;
+
+ gcc_assert (REG_P (target));
+ gcc_assert (REG_P (reg));
+ gcc_assert (CONST_INT_P (offset));
+
+ if (offset == const0_rtx) /* lr/lgr */
+ {
+ insn = emit_move_insn (target, reg);
+ }
+ else if (DISP_IN_RANGE (INTVAL (offset))) /* la */
+ {
+ insn = emit_move_insn (target, gen_rtx_PLUS (Pmode, reg,
+ offset));
+ }
+ else
+ {
+ if (!satisfies_constraint_K (offset) /* ahi/aghi */
+ && (!TARGET_EXTIMM
+ || (!satisfies_constraint_Op (offset) /* alfi/algfi */
+ && !satisfies_constraint_On (offset)))) /* slfi/slgfi */
+ offset = force_const_mem (Pmode, offset);
+
+ if (target != reg)
+ {
+ insn = emit_move_insn (target, reg);
+ RTX_FRAME_RELATED_P (insn) = frame_related_p ? 1 : 0;
+ }
+
+ insn = emit_insn (gen_add2_insn (target, offset));
+
+ if (!CONST_INT_P (offset))
+ {
+ annotate_constant_pool_refs (&PATTERN (insn));
+
+ if (frame_related_p)
+ add_reg_note (insn, REG_FRAME_RELATED_EXPR,
+ gen_rtx_SET (VOIDmode, target,
+ gen_rtx_PLUS (Pmode, target,
+ orig_offset)));
+ }
+ }
+
+ RTX_FRAME_RELATED_P (insn) = frame_related_p ? 1 : 0;
+
+ /* If this is a stack adjustment and we are generating a stack clash
+ prologue, then add a REG_STACK_CHECK note to signal that this insn
+ should be left alone. */
+ if (flag_stack_clash_protection && target == stack_pointer_rtx)
+ add_reg_note (insn, REG_STACK_CHECK, const0_rtx);
+
+ return insn;
+}
+
+/* Emit a compare instruction with a volatile memory access as stack
+ probe. It does not waste store tags and does not clobber any
+ registers apart from the condition code. */
+static void
+s390_emit_stack_probe (rtx addr)
+{
+ rtx tmp = gen_rtx_MEM (Pmode, addr);
+ MEM_VOLATILE_P (tmp) = 1;
+ s390_emit_compare (EQ, gen_rtx_REG (Pmode, 0), tmp);
+ emit_insn (gen_blockage ());
+}
+
+/* Use a runtime loop if we have to emit more probes than this. */
+#define MIN_UNROLL_PROBES 3
+
+/* Allocate SIZE bytes of stack space, using TEMP_REG as a temporary
+ if necessary. LAST_PROBE_OFFSET contains the offset of the closest
+ probe relative to the stack pointer.
+
+ Note that SIZE is negative.
+
+ The return value is true if TEMP_REG has been clobbered. */
+static bool
+allocate_stack_space (rtx size, HOST_WIDE_INT last_probe_offset,
+ rtx temp_reg)
+{
+ bool temp_reg_clobbered_p = false;
+ HOST_WIDE_INT probe_interval
+ = 1 << PARAM_VALUE (PARAM_STACK_CLASH_PROTECTION_PROBE_INTERVAL);
+ HOST_WIDE_INT guard_size
+ = 1 << PARAM_VALUE (PARAM_STACK_CLASH_PROTECTION_GUARD_SIZE);
+
+ if (flag_stack_clash_protection)
+ {
+ if (last_probe_offset + -INTVAL (size) < guard_size)
+ dump_stack_clash_frame_info (NO_PROBE_SMALL_FRAME, true);
+ else
+ {
+ rtx offset = GEN_INT (probe_interval - UNITS_PER_LONG);
+ HOST_WIDE_INT rounded_size = -INTVAL (size) & -probe_interval;
+ HOST_WIDE_INT num_probes = rounded_size / probe_interval;
+ HOST_WIDE_INT residual = -INTVAL (size) - rounded_size;
+
+ if (num_probes < MIN_UNROLL_PROBES)
+ {
+ /* Emit unrolled probe statements. */
+
+ for (unsigned int i = 0; i < num_probes; i++)
+ {
+ s390_prologue_plus_offset (stack_pointer_rtx,
+ stack_pointer_rtx,
+ GEN_INT (-probe_interval), true);
+ s390_emit_stack_probe (gen_rtx_PLUS (Pmode,
+ stack_pointer_rtx,
+ offset));
+ }
+ dump_stack_clash_frame_info (PROBE_INLINE, residual != 0);
+ }
+ else
+ {
+ /* Emit a loop probing the pages. */
+
+ rtx loop_start_label = gen_label_rtx ();
+
+ /* From now on temp_reg will be the CFA register. */
+ s390_prologue_plus_offset (temp_reg, stack_pointer_rtx,
+ GEN_INT (-rounded_size), true);
+ emit_label (loop_start_label);
+
+ s390_prologue_plus_offset (stack_pointer_rtx,
+ stack_pointer_rtx,
+ GEN_INT (-probe_interval), false);
+ s390_emit_stack_probe (gen_rtx_PLUS (Pmode,
+ stack_pointer_rtx,
+ offset));
+ emit_cmp_and_jump_insns (stack_pointer_rtx, temp_reg,
+ GT, NULL_RTX,
+ Pmode, 1, loop_start_label);
+
+ /* Without this make_edges ICEes. */
+ JUMP_LABEL (get_last_insn ()) = loop_start_label;
+ LABEL_NUSES (loop_start_label) = 1;
+
+ /* That's going to be a NOP since stack pointer and
+ temp_reg are supposed to be the same here. We just
+ emit it to set the CFA reg back to r15. */
+ s390_prologue_plus_offset (stack_pointer_rtx, temp_reg,
+ const0_rtx, true);
+ temp_reg_clobbered_p = true;
+ dump_stack_clash_frame_info (PROBE_LOOP, residual != 0);
+ }
+
+ /* Handle any residual allocation request. */
+ s390_prologue_plus_offset (stack_pointer_rtx,
+ stack_pointer_rtx,
+ GEN_INT (-residual), true);
+ last_probe_offset += residual;
+ if (last_probe_offset >= probe_interval)
+ s390_emit_stack_probe (gen_rtx_PLUS (Pmode,
+ stack_pointer_rtx,
+ GEN_INT (residual
+ - UNITS_PER_LONG)));
+
+ return temp_reg_clobbered_p;
+ }
+ }
+
+ /* Subtract frame size from stack pointer. */
+ s390_prologue_plus_offset (stack_pointer_rtx,
+ stack_pointer_rtx,
+ size, true);
+
+ return temp_reg_clobbered_p;
+}
+
+
/* Expand the prologue into a bunch of separate insns. */
void
@@ -10391,6 +10569,19 @@ s390_emit_prologue (void)
else
temp_reg = gen_rtx_REG (Pmode, 1);
+ /* When probing for stack-clash mitigation, we have to track the distance
+ between the stack pointer and closest known reference.
+
+ Most of the time we have to make a worst cast assumption. The
+ only exception is when TARGET_BACKCHAIN is active, in which case
+ we know *sp (offset 0) was written. */
+ HOST_WIDE_INT probe_interval
+ = 1 << PARAM_VALUE (PARAM_STACK_CLASH_PROTECTION_PROBE_INTERVAL);
+ HOST_WIDE_INT last_probe_offset
+ = (TARGET_BACKCHAIN
+ ? (TARGET_PACKED_STACK ? STACK_POINTER_OFFSET - UNITS_PER_LONG : 0)
+ : probe_interval - (STACK_BOUNDARY / UNITS_PER_WORD));
+
/* Save call saved gprs. */
if (cfun_frame_layout.first_save_gpr != -1)
{
@@ -10400,6 +10591,14 @@ s390_emit_prologue (void)
- cfun_frame_layout.first_save_gpr_slot),
cfun_frame_layout.first_save_gpr,
cfun_frame_layout.last_save_gpr);
+
+ /* This is not 100% correct. If we have more than one register saved,
+ then LAST_PROBE_OFFSET can move even closer to sp. */
+ last_probe_offset
+ = (cfun_frame_layout.gprs_offset +
+ UNITS_PER_LONG * (cfun_frame_layout.first_save_gpr
+ - cfun_frame_layout.first_save_gpr_slot));
+
emit_insn (insn);
}
@@ -10416,6 +10615,8 @@ s390_emit_prologue (void)
if (cfun_fpr_bit_p (i))
{
save_fpr (stack_pointer_rtx, offset, i + 16);
+ if (offset < last_probe_offset)
+ last_probe_offset = offset;
offset += 8;
}
else if (!TARGET_PACKED_STACK)
@@ -10429,6 +10630,8 @@ s390_emit_prologue (void)
if (cfun_fpr_bit_p (i))
{
insn = save_fpr (stack_pointer_rtx, offset, i + 16);
+ if (offset < last_probe_offset)
+ last_probe_offset = offset;
offset += 8;
/* If f4 and f6 are call clobbered they are saved due to stdargs and
@@ -10451,6 +10654,8 @@ s390_emit_prologue (void)
if (cfun_fpr_bit_p (i))
{
insn = save_fpr (stack_pointer_rtx, offset, i + 16);
+ if (offset < last_probe_offset)
+ last_probe_offset = offset;
RTX_FRAME_RELATED_P (insn) = 1;
offset -= 8;
@@ -10470,10 +10675,11 @@ s390_emit_prologue (void)
if (cfun_frame_layout.frame_size > 0)
{
rtx frame_off = GEN_INT (-cfun_frame_layout.frame_size);
- rtx real_frame_off;
+ rtx stack_pointer_backup_loc;
+ bool temp_reg_clobbered_p;
if (s390_stack_size)
- {
+ {
HOST_WIDE_INT stack_guard;
if (s390_stack_guard)
@@ -10538,35 +10744,36 @@ s390_emit_prologue (void)
if (s390_warn_dynamicstack_p && cfun->calls_alloca)
warning (0, "%qs uses dynamic stack allocation", current_function_name ());
- /* Save incoming stack pointer into temp reg. */
- if (TARGET_BACKCHAIN || next_fpr)
- insn = emit_insn (gen_move_insn (temp_reg, stack_pointer_rtx));
+ /* Save the location where we could backup the incoming stack
+ pointer. */
+ stack_pointer_backup_loc = get_last_insn ();
- /* Subtract frame size from stack pointer. */
+ temp_reg_clobbered_p = allocate_stack_space (frame_off, last_probe_offset,
+ temp_reg);
- if (DISP_IN_RANGE (INTVAL (frame_off)))
- {
- insn = gen_rtx_SET (VOIDmode, stack_pointer_rtx,
- gen_rtx_PLUS (Pmode, stack_pointer_rtx,
- frame_off));
- insn = emit_insn (insn);
- }
- else
+ if (TARGET_BACKCHAIN || next_fpr)
{
- if (!CONST_OK_FOR_K (INTVAL (frame_off)))
- frame_off = force_const_mem (Pmode, frame_off);
-
- insn = emit_insn (gen_add2_insn (stack_pointer_rtx, frame_off));
- annotate_constant_pool_refs (&PATTERN (insn));
+ if (temp_reg_clobbered_p)
+ {
+ /* allocate_stack_space had to make use of temp_reg and
+ we need it to hold a backup of the incoming stack
+ pointer. Calculate back that value from the current
+ stack pointer. */
+ s390_prologue_plus_offset (temp_reg, stack_pointer_rtx,
+ GEN_INT (cfun_frame_layout.frame_size),
+ false);
+ }
+ else
+ {
+ /* allocate_stack_space didn't actually required
+ temp_reg. Insert the stack pointer backup insn
+ before the stack pointer decrement code - knowing now
+ that the value will survive. */
+ emit_insn_after (gen_move_insn (temp_reg, stack_pointer_rtx),
+ stack_pointer_backup_loc);
+ }
}
- RTX_FRAME_RELATED_P (insn) = 1;
- real_frame_off = GEN_INT (-cfun_frame_layout.frame_size);
- add_reg_note (insn, REG_FRAME_RELATED_EXPR,
- gen_rtx_SET (VOIDmode, stack_pointer_rtx,
- gen_rtx_PLUS (Pmode, stack_pointer_rtx,
- real_frame_off)));
-
/* Set backchain. */
if (TARGET_BACKCHAIN)
@@ -10590,6 +10797,8 @@ s390_emit_prologue (void)
emit_clobber (addr);
}
}
+ else if (flag_stack_clash_protection)
+ dump_stack_clash_frame_info (NO_PROBE_NO_FRAME, false);
/* Save fprs 8 - 15 (64 bit ABI). */
diff --git a/gcc/testsuite/gcc.dg/stack-check-5.c b/gcc/testsuite/gcc.dg/stack-check-5.c
index 2171d9b6c23..3178f5d8ce5 100644
--- a/gcc/testsuite/gcc.dg/stack-check-5.c
+++ b/gcc/testsuite/gcc.dg/stack-check-5.c
@@ -3,6 +3,10 @@
/* { dg-require-effective-target supports_stack_clash_protection } */
+/* Otherwise the S/390 back-end might save the stack pointer in f2 ()
+ into an FPR. */
+/* { dg-additional-options "-msoft-float" { target { s390x-*-* } } } */
+
extern void foo (char *);
extern void bar (void);
diff --git a/gcc/testsuite/lib/target-supports.exp b/gcc/testsuite/lib/target-supports.exp
index 2c669a9822f..f24c5c6e0ac 100644
--- a/gcc/testsuite/lib/target-supports.exp
+++ b/gcc/testsuite/lib/target-supports.exp
@@ -5422,12 +5422,12 @@ proc check_effective_target_supports_stack_clash_protection { } {
# Temporary until the target bits are fully ACK'd.
# if { [istarget aarch*-*-*]
-# || [istarget s390*-*-*]
# || [istarget powerpc*-*-*] || [istarget rs6000*-*-*] } {
# return 1
# }
- if { [istarget x86_64-*-*] || [istarget i?86-*-*] } {
+ if { [istarget x86_64-*-*] || [istarget i?86-*-*]
+ || [istarget s390*-*-*] } {
return 1
}
return 0

View File

@ -0,0 +1,53 @@
2017-03-25 Uros Bizjak <ubizjak@gmail.com>
PR target/80180
* config/i386/i386.c (ix86_expand_builtin)
<IX86_BUILTIN_RDSEED{16,32,64}_STEP>: Do not expand arg0 between
flags reg setting and flags reg using instructions.
<IX86_BUILTIN_RDRAND{16,32,64}_STEP>: Ditto. Use non-flags reg
clobbering instructions to zero extend op2.
--- gcc/config/i386/i386.c (revision 246478)
+++ gcc/config/i386/i386.c (revision 246479)
@@ -39533,9 +39533,6 @@
mode0 = DImode;
rdrand_step:
- op0 = gen_reg_rtx (mode0);
- emit_insn (GEN_FCN (icode) (op0));
-
arg0 = CALL_EXPR_ARG (exp, 0);
op1 = expand_normal (arg0);
if (!address_operand (op1, VOIDmode))
@@ -39543,6 +39540,10 @@
op1 = convert_memory_address (Pmode, op1);
op1 = copy_addr_to_reg (op1);
}
+
+ op0 = gen_reg_rtx (mode0);
+ emit_insn (GEN_FCN (icode) (op0));
+
emit_move_insn (gen_rtx_MEM (mode0, op1), op0);
op1 = gen_reg_rtx (SImode);
@@ -39584,9 +39597,6 @@
mode0 = DImode;
rdseed_step:
- op0 = gen_reg_rtx (mode0);
- emit_insn (GEN_FCN (icode) (op0));
-
arg0 = CALL_EXPR_ARG (exp, 0);
op1 = expand_normal (arg0);
if (!address_operand (op1, VOIDmode))
@@ -39594,6 +39604,10 @@
op1 = convert_memory_address (Pmode, op1);
op1 = copy_addr_to_reg (op1);
}
+
+ op0 = gen_reg_rtx (mode0);
+ emit_insn (GEN_FCN (icode) (op0));
+
emit_move_insn (gen_rtx_MEM (mode0, op1), op0);
op2 = gen_reg_rtx (QImode);

View File

@ -0,0 +1,49 @@
2016-05-04 Alan Modra <amodra@gmail.com>
* config/rs6000/rs6000.c (rs6000_elf_output_toc_section_asm_op):
Align .toc.
--- gcc/config/rs6000/rs6000.c
+++ gcc/config/rs6000/rs6000.c
@@ -31339,8 +31339,8 @@ rs6000_elf_output_toc_section_asm_op (const void *data ATTRIBUTE_UNUSED)
{
if (!toc_initialized)
{
- toc_initialized = 1;
fprintf (asm_out_file, "%s\n", TOC_SECTION_ASM_OP);
+ ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2);
(*targetm.asm_out.internal_label) (asm_out_file, "LCTOC", 0);
fprintf (asm_out_file, "\t.tc ");
ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1[TC],");
@@ -31348,20 +31348,30 @@ rs6000_elf_output_toc_section_asm_op (const void *data ATTRIBUTE_UNUSED)
fprintf (asm_out_file, "\n");
fprintf (asm_out_file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
+ ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2);
ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1");
fprintf (asm_out_file, " = .+32768\n");
+ toc_initialized = 1;
}
else
fprintf (asm_out_file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
}
else if ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
&& !TARGET_RELOCATABLE)
- fprintf (asm_out_file, "%s\n", TOC_SECTION_ASM_OP);
+ {
+ fprintf (asm_out_file, "%s\n", TOC_SECTION_ASM_OP);
+ if (!toc_initialized)
+ {
+ ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2);
+ toc_initialized = 1;
+ }
+ }
else
{
fprintf (asm_out_file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
if (!toc_initialized)
{
+ ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2);
ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1");
fprintf (asm_out_file, " = .+32768\n");
toc_initialized = 1;

View File

@ -0,0 +1,177 @@
2016-01-16 Torvald Riegel <triegel@redhat.com>
* method-gl.cc (gl_wt_dispatch::trycommit): Ensure proxy privatization
safety.
* method-ml.cc (ml_wt_dispatch::trycommit): Likewise.
* libitm/testsuite/libitm.c/priv-1.c: New.
--- libitm/method-gl.cc
+++ libitm/method-gl.cc
@@ -291,12 +291,18 @@ public:
// See begin_or_restart() for why we need release memory order here.
v = gl_mg::clear_locked(v) + 1;
o_gl_mg.orec.store(v, memory_order_release);
-
- // Need to ensure privatization safety. Every other transaction must
- // have a snapshot time that is at least as high as our commit time
- // (i.e., our commit must be visible to them).
- priv_time = v;
}
+
+ // Need to ensure privatization safety. Every other transaction must have
+ // a snapshot time that is at least as high as our commit time (i.e., our
+ // commit must be visible to them). Because of proxy privatization, we
+ // must ensure that even if we are a read-only transaction. See
+ // ml_wt_dispatch::trycommit() for details: We can't get quite the same
+ // set of problems because we just use one orec and thus, for example,
+ // there cannot be concurrent writers -- but we can still get pending
+ // loads to privatized data when not ensuring privatization safety, which
+ // is problematic if the program unmaps the privatized memory.
+ priv_time = v;
return true;
}
--- libitm/method-ml.cc
+++ libitm/method-ml.cc
@@ -513,6 +513,21 @@ public:
if (!tx->writelog.size())
{
tx->readlog.clear();
+ // We still need to ensure privatization safety, unfortunately. While
+ // we cannot have privatized anything by ourselves (because we are not
+ // an update transaction), we can have observed the commits of
+ // another update transaction that privatized something. Because any
+ // commit happens before ensuring privatization, our snapshot and
+ // commit can thus have happened before ensuring privatization safety
+ // for this commit/snapshot time. Therefore, before we can return to
+ // nontransactional code that might use the privatized data, we must
+ // ensure privatization safety for our snapshot time.
+ // This still seems to be better than not allowing use of the
+ // snapshot time before privatization safety has been ensured because
+ // we at least can run transactions such as this one, and in the
+ // meantime the transaction producing this commit time might have
+ // finished ensuring privatization safety for it.
+ priv_time = tx->shared_state.load(memory_order_relaxed);
return true;
}
--- /dev/null
+++ libitm/testsuite/libitm.c/priv-1.c
@@ -0,0 +1,117 @@
+/* Quick stress test for proxy privatization. */
+
+/* We need to use a TM method that has to enforce privatization safety
+ explicitly. */
+/* { dg-set-target-env-var ITM_DEFAULT_METHOD "ml_wt" } */
+/* { dg-options "-std=gnu11" } */
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <pthread.h>
+
+/* Make them likely to be mapped to different orecs. */
+#define ALIGN __attribute__((aligned (256)))
+/* Don't make these static to work around PR 68591. */
+int x ALIGN;
+int *ptr ALIGN;
+int *priv_ptr ALIGN;
+int priv_value ALIGN;
+int barrier ALIGN = 0;
+const int iters = 100;
+
+static void arrive_and_wait (int expected_value)
+{
+ int now = __atomic_add_fetch (&barrier, 1, __ATOMIC_ACQ_REL);
+ while (now < expected_value)
+ __atomic_load (&barrier, &now, __ATOMIC_ACQUIRE);
+}
+
+static void __attribute__((transaction_pure,noinline)) delay (int i)
+{
+ for (volatile int v = 0; v < i; v++);
+}
+
+/* This tries to catch a case in which proxy privatization safety is not
+ ensured by privatization_user. Specifically, it's access to the value
+ of it's transactional snapshot of ptr must read from an uncommitted write
+ by writer; thus, writer must still be active but must have read ptr before
+ proxy can privatize *ptr by assigning to ptr.
+ We try to make this interleaving more likely by delaying the commit of
+ writer and the start of proxy. */
+static void *writer (void *dummy __attribute__((unused)))
+{
+ for (int i = 0; i < iters; i++)
+ {
+ /* Initialize state in each round. */
+ x = 0;
+ ptr = &x;
+ priv_ptr = NULL;
+ int wrote = 1;
+ arrive_and_wait (i * 6 + 3);
+ /* Interference by another writer. Has a conflict with the proxy
+ privatizer. */
+ __transaction_atomic
+ {
+ if (ptr != NULL)
+ *ptr = 1;
+ else
+ wrote = 0;
+ delay (2000000);
+ }
+ arrive_and_wait (i * 6 + 6);
+ /* If the previous transaction committed first, wrote == 1 and x == 1;
+ otherwise, if the proxy came first, wrote == 0 and priv_value == 0.
+ */
+ if (wrote != priv_value)
+ abort ();
+ }
+ return NULL;
+}
+
+static void *proxy (void *dummy __attribute__((unused)))
+{
+ for (int i = 0; i < iters; i++)
+ {
+ arrive_and_wait (i * 6 + 3);
+ delay(1000000);
+ __transaction_atomic
+ {
+ /* Hand-off to privatization-user and its read-only transaction and
+ subsequent use of privatization. */
+ priv_ptr = ptr;
+ ptr = NULL;
+ }
+ arrive_and_wait (i * 6 + 6);
+ }
+ return NULL;
+}
+
+static void *privatization_user (void *dummy __attribute__((unused)))
+{
+ for (int i = 0; i < iters; i++)
+ {
+ arrive_and_wait (i * 6 + 3);
+ /* Spin until we have gotten a pointer from the proxy. Then access
+ the value pointed to nontransactionally. */
+ int *p = NULL;
+ while (p == NULL)
+ __transaction_atomic { p = priv_ptr; }
+ priv_value = *p;
+ arrive_and_wait (i * 6 + 6);
+ }
+ return NULL;
+}
+
+int main()
+{
+ pthread_t p[3];
+
+ pthread_create (p+0, NULL, writer, NULL);
+ pthread_create (p+1, NULL, proxy, NULL);
+ pthread_create (p+2, NULL, privatization_user, NULL);
+
+ for (int i = 0; i < 3; ++i)
+ pthread_join (p[i], NULL);
+
+ return 0;
+}

View File

@ -0,0 +1,30 @@
commit 30562e52396c7fbe2a404acda2b1b77f871005ea
Author: root <root@lenovo-x3950-01.khw.lab.eng.bos.redhat.com>
Date: Thu Jan 18 00:12:41 2018 -0500
Add FIRST_INT_REG, LAST_INT_REG, LEGACY_INT_REG_P , and LEGACY_INT_REGNO_P
diff --git a/gcc/config/i386/i386.h b/gcc/config/i386/i386.h
index e31c8d0..87fd381 100644
--- a/gcc/config/i386/i386.h
+++ b/gcc/config/i386/i386.h
@@ -1115,6 +1115,9 @@ extern const char *host_detect_local_cpu (int argc, const char **argv);
/* Base register for access to local variables of the function. */
#define FRAME_POINTER_REGNUM 20
+#define FIRST_INT_REG AX_REG
+#define LAST_INT_REG SP_REG
+
/* First floating point reg */
#define FIRST_FLOAT_REG 8
@@ -1317,6 +1320,9 @@ enum reg_class
#define QI_REG_P(X) (REG_P (X) && QI_REGNO_P (REGNO (X)))
#define QI_REGNO_P(N) IN_RANGE ((N), AX_REG, BX_REG)
+#define LEGACY_INT_REG_P(X) (REG_P (X) && LEGACY_INT_REGNO_P (REGNO (X)))
+#define LEGACY_INT_REGNO_P(N) (IN_RANGE ((N), FIRST_INT_REG, LAST_INT_REG))
+
#define GENERAL_REG_P(X) \
(REG_P (X) && GENERAL_REGNO_P (REGNO (X)))
#define GENERAL_REGNO_P(N) \

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,521 @@
commit 94695137d1ea3c094dd37ab5b73d66b09639f3f4
Author: hjl <hjl@138bc75d-0d04-0410-961f-82ee72b054a4>
Date: Tue Jan 16 11:17:49 2018 +0000
HJ patch #3
diff --git a/gcc/config/i386/constraints.md b/gcc/config/i386/constraints.md
index d567fd7..43faabb 100644
--- a/gcc/config/i386/constraints.md
+++ b/gcc/config/i386/constraints.md
@@ -135,7 +135,8 @@
(define_constraint "w"
"@internal Call memory operand."
- (and (not (match_test "TARGET_X32"))
+ (and (not (match_test "ix86_indirect_branch_register"))
+ (not (match_test "TARGET_X32"))
(match_operand 0 "memory_operand")))
;; Integer constant constraints.
diff --git a/gcc/config/i386/i386.md b/gcc/config/i386/i386.md
index ef16cf5..228f8f6 100644
--- a/gcc/config/i386/i386.md
+++ b/gcc/config/i386/i386.md
@@ -11274,7 +11274,7 @@
[(set (pc) (match_operand 0 "indirect_branch_operand"))]
""
{
- if (TARGET_X32)
+ if (TARGET_X32 || ix86_indirect_branch_register)
operands[0] = convert_memory_address (word_mode, operands[0]);
cfun->machine->has_local_indirect_jump = true;
})
@@ -11327,7 +11327,7 @@
OPTAB_DIRECT);
}
- if (TARGET_X32)
+ if (TARGET_X32 || ix86_indirect_branch_register)
operands[0] = convert_memory_address (word_mode, operands[0]);
cfun->machine->has_local_indirect_jump = true;
})
@@ -11514,7 +11514,7 @@
})
(define_insn "*call_pop"
- [(call (mem:QI (match_operand:SI 0 "call_insn_operand" "lzm"))
+ [(call (mem:QI (match_operand:SI 0 "call_insn_operand" "lwz"))
(match_operand 1))
(set (reg:SI SP_REG)
(plus:SI (reg:SI SP_REG)
@@ -11612,7 +11612,7 @@
(define_insn "*call_value_pop"
[(set (match_operand 0)
- (call (mem:QI (match_operand:SI 1 "call_insn_operand" "lzm"))
+ (call (mem:QI (match_operand:SI 1 "call_insn_operand" "lwz"))
(match_operand 2)))
(set (reg:SI SP_REG)
(plus:SI (reg:SI SP_REG)
diff --git a/gcc/config/i386/i386.opt b/gcc/config/i386/i386.opt
index 9dfa2cb..0a8ae8f 100644
--- a/gcc/config/i386/i386.opt
+++ b/gcc/config/i386/i386.opt
@@ -654,3 +654,7 @@ Enum(indirect_branch) String(thunk-inline) Value(indirect_branch_thunk_inline)
EnumValue
Enum(indirect_branch) String(thunk-extern) Value(indirect_branch_thunk_extern)
+
+mindirect-branch-register
+Target Report Var(ix86_indirect_branch_register) Init(0)
+Force indirect call and jump via register.
diff --git a/gcc/config/i386/predicates.md b/gcc/config/i386/predicates.md
index 61614e1..6c7a593 100644
--- a/gcc/config/i386/predicates.md
+++ b/gcc/config/i386/predicates.md
@@ -540,7 +540,8 @@
;; Test for a valid operand for indirect branch.
(define_predicate "indirect_branch_operand"
(ior (match_operand 0 "register_operand")
- (and (not (match_test "TARGET_X32"))
+ (and (not (match_test "ix86_indirect_branch_register"))
+ (not (match_test "TARGET_X32"))
(match_operand 0 "memory_operand"))))
;; Test for a valid operand for a call instruction.
@@ -549,8 +550,9 @@
(ior (match_test "constant_call_address_operand
(op, mode == VOIDmode ? mode : Pmode)")
(match_operand 0 "call_register_no_elim_operand")
- (and (not (match_test "TARGET_X32"))
- (match_operand 0 "memory_operand"))))
+ (and (not (match_test "ix86_indirect_branch_register"))
+ (and (not (match_test "TARGET_X32"))
+ (match_operand 0 "memory_operand")))))
;; Similarly, but for tail calls, in which we cannot allow memory references.
(define_special_predicate "sibcall_insn_operand"
diff --git a/gcc/doc/invoke.texi b/gcc/doc/invoke.texi
index 5acd23a..4a365c7 100644
--- a/gcc/doc/invoke.texi
+++ b/gcc/doc/invoke.texi
@@ -658,7 +658,8 @@ Objective-C and Objective-C++ Dialects}.
-m32 -m64 -mx32 -mlarge-data-threshold=@var{num} @gol
-msse2avx -mfentry -m8bit-idiv @gol
-mavx256-split-unaligned-load -mavx256-split-unaligned-store @gol
--mindirect-branch=@var{choice} -mfunction-return==@var{choice}}
+-mindirect-branch=@var{choice} -mfunction-return==@var{choice}
+-mindirect-branch-register}
@emph{i386 and x86-64 Windows Options}
@gccoptlist{-mconsole -mcygwin -mno-cygwin -mdll @gol
@@ -14669,6 +14670,10 @@ object file. You can control this behavior for a specific function by
using the function attribute @code{function_return}.
@xref{Function Attributes}.
+@item -mindirect-branch-register
+@opindex -mindirect-branch-register
+Force indirect call and jump via register.
+
@end table
These @samp{-m} switches are supported in addition to the above
diff --git a/gcc/testsuite/gcc.target/i386/indirect-thunk-1.c b/gcc/testsuite/gcc.target/i386/indirect-thunk-1.c
index 034b4cc..321db77 100644
--- a/gcc/testsuite/gcc.target/i386/indirect-thunk-1.c
+++ b/gcc/testsuite/gcc.target/i386/indirect-thunk-1.c
@@ -1,5 +1,5 @@
/* { dg-do compile } */
-/* { dg-options "-O2 -mfunction-return=keep -mindirect-branch=thunk -fno-pic" } */
+/* { dg-options "-O2 -mno-indirect-branch-register -mfunction-return=keep -mindirect-branch=thunk -fno-pic" } */
typedef void (*dispatch_t)(long offset);
diff --git a/gcc/testsuite/gcc.target/i386/indirect-thunk-2.c b/gcc/testsuite/gcc.target/i386/indirect-thunk-2.c
index e0c57cb..d584516 100644
--- a/gcc/testsuite/gcc.target/i386/indirect-thunk-2.c
+++ b/gcc/testsuite/gcc.target/i386/indirect-thunk-2.c
@@ -1,5 +1,5 @@
/* { dg-do compile } */
-/* { dg-options "-O2 -mfunction-return=keep -mindirect-branch=thunk -fno-pic" } */
+/* { dg-options "-O2 -mno-indirect-branch-register -mfunction-return=keep -mindirect-branch=thunk -fno-pic" } */
typedef void (*dispatch_t)(long offset);
diff --git a/gcc/testsuite/gcc.target/i386/indirect-thunk-3.c b/gcc/testsuite/gcc.target/i386/indirect-thunk-3.c
index 3c0d4c3..9e24a38 100644
--- a/gcc/testsuite/gcc.target/i386/indirect-thunk-3.c
+++ b/gcc/testsuite/gcc.target/i386/indirect-thunk-3.c
@@ -1,5 +1,5 @@
/* { dg-do compile } */
-/* { dg-options "-O2 -mfunction-return=keep -mindirect-branch=thunk -fno-pic" } */
+/* { dg-options "-O2 -mno-indirect-branch-register -mno-indirect-branch-register -mno-indirect-branch-register -mfunction-return=keep -mindirect-branch=thunk -fno-pic" } */
typedef void (*dispatch_t)(long offset);
diff --git a/gcc/testsuite/gcc.target/i386/indirect-thunk-4.c b/gcc/testsuite/gcc.target/i386/indirect-thunk-4.c
index 14d4ef6..127b5d9 100644
--- a/gcc/testsuite/gcc.target/i386/indirect-thunk-4.c
+++ b/gcc/testsuite/gcc.target/i386/indirect-thunk-4.c
@@ -1,5 +1,5 @@
/* { dg-do compile } */
-/* { dg-options "-O2 -mfunction-return=keep -mindirect-branch=thunk -fno-pic" } */
+/* { dg-options "-O2 -mno-indirect-branch-register -mno-indirect-branch-register -mno-indirect-branch-register -mfunction-return=keep -mindirect-branch=thunk -fno-pic" } */
typedef void (*dispatch_t)(long offset);
diff --git a/gcc/testsuite/gcc.target/i386/indirect-thunk-7.c b/gcc/testsuite/gcc.target/i386/indirect-thunk-7.c
index bc6b47a..17c2d0f 100644
--- a/gcc/testsuite/gcc.target/i386/indirect-thunk-7.c
+++ b/gcc/testsuite/gcc.target/i386/indirect-thunk-7.c
@@ -1,5 +1,5 @@
/* { dg-do compile } */
-/* { dg-options "-O2 -mfunction-return=keep -mindirect-branch=thunk -fno-pic" } */
+/* { dg-options "-O2 -mno-indirect-branch-register -mfunction-return=keep -mindirect-branch=thunk -fno-pic" } */
void func0 (void);
void func1 (void);
diff --git a/gcc/testsuite/gcc.target/i386/indirect-thunk-attr-1.c b/gcc/testsuite/gcc.target/i386/indirect-thunk-attr-1.c
index 7c45142..cd7e8d7 100644
--- a/gcc/testsuite/gcc.target/i386/indirect-thunk-attr-1.c
+++ b/gcc/testsuite/gcc.target/i386/indirect-thunk-attr-1.c
@@ -1,5 +1,5 @@
/* { dg-do compile } */
-/* { dg-options "-O2 -mfunction-return=keep -fno-pic" } */
+/* { dg-options "-O2 -mno-indirect-branch-register -mfunction-return=keep -fno-pic" } */
typedef void (*dispatch_t)(long offset);
diff --git a/gcc/testsuite/gcc.target/i386/indirect-thunk-attr-2.c b/gcc/testsuite/gcc.target/i386/indirect-thunk-attr-2.c
index 9eebc84..4dbd7a5 100644
--- a/gcc/testsuite/gcc.target/i386/indirect-thunk-attr-2.c
+++ b/gcc/testsuite/gcc.target/i386/indirect-thunk-attr-2.c
@@ -1,5 +1,5 @@
/* { dg-do compile } */
-/* { dg-options "-O2 -mfunction-return=keep -fno-pic" } */
+/* { dg-options "-O2 -mno-indirect-branch-register -mfunction-return=keep -fno-pic" } */
typedef void (*dispatch_t)(long offset);
diff --git a/gcc/testsuite/gcc.target/i386/indirect-thunk-attr-3.c b/gcc/testsuite/gcc.target/i386/indirect-thunk-attr-3.c
index f938db0..4aeec18 100644
--- a/gcc/testsuite/gcc.target/i386/indirect-thunk-attr-3.c
+++ b/gcc/testsuite/gcc.target/i386/indirect-thunk-attr-3.c
@@ -1,5 +1,5 @@
/* { dg-do compile } */
-/* { dg-options "-O2 -mfunction-return=keep -fno-pic" } */
+/* { dg-options "-O2 -mno-indirect-branch-register -mfunction-return=keep -fno-pic" } */
typedef void (*dispatch_t)(long offset);
diff --git a/gcc/testsuite/gcc.target/i386/indirect-thunk-attr-4.c b/gcc/testsuite/gcc.target/i386/indirect-thunk-attr-4.c
index 4e58599..ac0e599 100644
--- a/gcc/testsuite/gcc.target/i386/indirect-thunk-attr-4.c
+++ b/gcc/testsuite/gcc.target/i386/indirect-thunk-attr-4.c
@@ -1,5 +1,5 @@
/* { dg-do compile } */
-/* { dg-options "-O2 -mfunction-return=keep -fno-pic" } */
+/* { dg-options "-O2 -mno-indirect-branch-register -mfunction-return=keep -fno-pic" } */
typedef void (*dispatch_t)(long offset);
diff --git a/gcc/testsuite/gcc.target/i386/indirect-thunk-attr-5.c b/gcc/testsuite/gcc.target/i386/indirect-thunk-attr-5.c
index b8d5024..573cf1e 100644
--- a/gcc/testsuite/gcc.target/i386/indirect-thunk-attr-5.c
+++ b/gcc/testsuite/gcc.target/i386/indirect-thunk-attr-5.c
@@ -1,5 +1,5 @@
/* { dg-do compile } */
-/* { dg-options "-O2 -mfunction-return=keep -fno-pic" } */
+/* { dg-options "-O2 -mno-indirect-branch-register -mfunction-return=keep -fno-pic" } */
typedef void (*dispatch_t)(long offset);
diff --git a/gcc/testsuite/gcc.target/i386/indirect-thunk-attr-6.c b/gcc/testsuite/gcc.target/i386/indirect-thunk-attr-6.c
index 455adab..b2b37fc 100644
--- a/gcc/testsuite/gcc.target/i386/indirect-thunk-attr-6.c
+++ b/gcc/testsuite/gcc.target/i386/indirect-thunk-attr-6.c
@@ -1,5 +1,5 @@
/* { dg-do compile } */
-/* { dg-options "-O2 -mfunction-return=keep -fno-pic" } */
+/* { dg-options "-O2 -mno-indirect-branch-register -mfunction-return=keep -fno-pic" } */
typedef void (*dispatch_t)(long offset);
diff --git a/gcc/testsuite/gcc.target/i386/indirect-thunk-attr-7.c b/gcc/testsuite/gcc.target/i386/indirect-thunk-attr-7.c
index 4595b84..4a43e19 100644
--- a/gcc/testsuite/gcc.target/i386/indirect-thunk-attr-7.c
+++ b/gcc/testsuite/gcc.target/i386/indirect-thunk-attr-7.c
@@ -1,5 +1,5 @@
/* { dg-do compile } */
-/* { dg-options "-O2 -mfunction-return=keep -fno-pic" } */
+/* { dg-options "-O2 -mno-indirect-branch-register -mfunction-return=keep -fno-pic" } */
void func0 (void);
void func1 (void);
diff --git a/gcc/testsuite/gcc.target/i386/indirect-thunk-extern-1.c b/gcc/testsuite/gcc.target/i386/indirect-thunk-extern-1.c
index f424181..72de88e 100644
--- a/gcc/testsuite/gcc.target/i386/indirect-thunk-extern-1.c
+++ b/gcc/testsuite/gcc.target/i386/indirect-thunk-extern-1.c
@@ -1,5 +1,5 @@
/* { dg-do compile } */
-/* { dg-options "-O2 -mfunction-return=keep -mindirect-branch=thunk-extern -fno-pic" } */
+/* { dg-options "-O2 -mno-indirect-branch-register -mfunction-return=keep -mindirect-branch=thunk-extern -fno-pic" } */
typedef void (*dispatch_t)(long offset);
diff --git a/gcc/testsuite/gcc.target/i386/indirect-thunk-extern-2.c b/gcc/testsuite/gcc.target/i386/indirect-thunk-extern-2.c
index ac54868..d4137b3 100644
--- a/gcc/testsuite/gcc.target/i386/indirect-thunk-extern-2.c
+++ b/gcc/testsuite/gcc.target/i386/indirect-thunk-extern-2.c
@@ -1,5 +1,5 @@
/* { dg-do compile } */
-/* { dg-options "-O2 -mfunction-return=keep -mindirect-branch=thunk-extern -fno-pic" } */
+/* { dg-options "-O2 -mno-indirect-branch-register -mfunction-return=keep -mindirect-branch=thunk-extern -fno-pic" } */
typedef void (*dispatch_t)(long offset);
diff --git a/gcc/testsuite/gcc.target/i386/indirect-thunk-extern-3.c b/gcc/testsuite/gcc.target/i386/indirect-thunk-extern-3.c
index 06ebf1c..d9964c2 100644
--- a/gcc/testsuite/gcc.target/i386/indirect-thunk-extern-3.c
+++ b/gcc/testsuite/gcc.target/i386/indirect-thunk-extern-3.c
@@ -1,5 +1,5 @@
/* { dg-do compile } */
-/* { dg-options "-O2 -mfunction-return=keep -mindirect-branch=thunk-extern -fno-pic" } */
+/* { dg-options "-O2 -mno-indirect-branch-register -mfunction-return=keep -mindirect-branch=thunk-extern -fno-pic" } */
typedef void (*dispatch_t)(long offset);
diff --git a/gcc/testsuite/gcc.target/i386/indirect-thunk-extern-4.c b/gcc/testsuite/gcc.target/i386/indirect-thunk-extern-4.c
index 1c8f944..d4dca4d 100644
--- a/gcc/testsuite/gcc.target/i386/indirect-thunk-extern-4.c
+++ b/gcc/testsuite/gcc.target/i386/indirect-thunk-extern-4.c
@@ -1,5 +1,5 @@
/* { dg-do compile } */
-/* { dg-options "-O2 -mfunction-return=keep -mindirect-branch=thunk-extern -fno-pic" } */
+/* { dg-options "-O2 -mno-indirect-branch-register -mfunction-return=keep -mindirect-branch=thunk-extern -fno-pic" } */
typedef void (*dispatch_t)(long offset);
diff --git a/gcc/testsuite/gcc.target/i386/indirect-thunk-extern-7.c b/gcc/testsuite/gcc.target/i386/indirect-thunk-extern-7.c
index 86e9fd1..aece938 100644
--- a/gcc/testsuite/gcc.target/i386/indirect-thunk-extern-7.c
+++ b/gcc/testsuite/gcc.target/i386/indirect-thunk-extern-7.c
@@ -1,5 +1,5 @@
/* { dg-do compile } */
-/* { dg-options "-O2 -mfunction-return=keep -mindirect-branch=thunk-extern -fno-pic" } */
+/* { dg-options "-O2 -mno-indirect-branch-register -mfunction-return=keep -mindirect-branch=thunk-extern -fno-pic" } */
void func0 (void);
void func1 (void);
diff --git a/gcc/testsuite/gcc.target/i386/indirect-thunk-inline-1.c b/gcc/testsuite/gcc.target/i386/indirect-thunk-inline-1.c
index 4117a35..e3cea3f 100644
--- a/gcc/testsuite/gcc.target/i386/indirect-thunk-inline-1.c
+++ b/gcc/testsuite/gcc.target/i386/indirect-thunk-inline-1.c
@@ -1,5 +1,5 @@
/* { dg-do compile } */
-/* { dg-options "-O2 -mfunction-return=keep -mindirect-branch=thunk-inline -fno-pic" } */
+/* { dg-options "-O2 -mno-indirect-branch-register -mfunction-return=keep -mindirect-branch=thunk-inline -fno-pic" } */
typedef void (*dispatch_t)(long offset);
diff --git a/gcc/testsuite/gcc.target/i386/indirect-thunk-inline-2.c b/gcc/testsuite/gcc.target/i386/indirect-thunk-inline-2.c
index 650d55c..6222996 100644
--- a/gcc/testsuite/gcc.target/i386/indirect-thunk-inline-2.c
+++ b/gcc/testsuite/gcc.target/i386/indirect-thunk-inline-2.c
@@ -1,5 +1,5 @@
/* { dg-do compile } */
-/* { dg-options "-O2 -mfunction-return=keep -mindirect-branch=thunk-inline -fno-pic" } */
+/* { dg-options "-O2 -mno-indirect-branch-register -mfunction-return=keep -mindirect-branch=thunk-inline -fno-pic" } */
typedef void (*dispatch_t)(long offset);
diff --git a/gcc/testsuite/gcc.target/i386/indirect-thunk-inline-3.c b/gcc/testsuite/gcc.target/i386/indirect-thunk-inline-3.c
index 9540996..2eef6f3 100644
--- a/gcc/testsuite/gcc.target/i386/indirect-thunk-inline-3.c
+++ b/gcc/testsuite/gcc.target/i386/indirect-thunk-inline-3.c
@@ -1,5 +1,5 @@
/* { dg-do compile } */
-/* { dg-options "-O2 -mfunction-return=keep -mindirect-branch=thunk-inline -fno-pic" } */
+/* { dg-options "-O2 -mno-indirect-branch-register -mfunction-return=keep -mindirect-branch=thunk-inline -fno-pic" } */
typedef void (*dispatch_t)(long offset);
diff --git a/gcc/testsuite/gcc.target/i386/indirect-thunk-inline-4.c b/gcc/testsuite/gcc.target/i386/indirect-thunk-inline-4.c
index f3db6e2..e825a10 100644
--- a/gcc/testsuite/gcc.target/i386/indirect-thunk-inline-4.c
+++ b/gcc/testsuite/gcc.target/i386/indirect-thunk-inline-4.c
@@ -1,5 +1,5 @@
/* { dg-do compile } */
-/* { dg-options "-O2 -mfunction-return=keep -mindirect-branch=thunk-inline -fno-pic" } */
+/* { dg-options "-O2 -mno-indirect-branch-register -mfunction-return=keep -mindirect-branch=thunk-inline -fno-pic" } */
typedef void (*dispatch_t)(long offset);
diff --git a/gcc/testsuite/gcc.target/i386/indirect-thunk-inline-7.c b/gcc/testsuite/gcc.target/i386/indirect-thunk-inline-7.c
index 764a375..c67066c 100644
--- a/gcc/testsuite/gcc.target/i386/indirect-thunk-inline-7.c
+++ b/gcc/testsuite/gcc.target/i386/indirect-thunk-inline-7.c
@@ -1,5 +1,5 @@
/* { dg-do compile } */
-/* { dg-options "-O2 -mfunction-return=keep -mindirect-branch=thunk-inline -fno-pic" } */
+/* { dg-options "-O2 -mno-indirect-branch-register -mfunction-return=keep -mindirect-branch=thunk-inline -fno-pic" } */
void func0 (void);
void func1 (void);
diff --git a/gcc/testsuite/gcc.target/i386/indirect-thunk-register-1.c b/gcc/testsuite/gcc.target/i386/indirect-thunk-register-1.c
new file mode 100644
index 0000000..7d396a3
--- /dev/null
+++ b/gcc/testsuite/gcc.target/i386/indirect-thunk-register-1.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-options "-O2 -mindirect-branch=thunk -mindirect-branch-register -fno-pic" } */
+
+typedef void (*dispatch_t)(long offset);
+
+dispatch_t dispatch;
+
+void
+male_indirect_jump (long offset)
+{
+ dispatch(offset);
+}
+
+/* { dg-final { scan-assembler "jmp\[ \t\]*__x86_indirect_thunk_(r|e)ax" } } */
+/* { dg-final { scan-assembler "jmp\[ \t\]*\.LIND" } } */
+/* { dg-final { scan-assembler "call\[ \t\]*\.LIND" } } */
+/* { dg-final { scan-assembler "mov\[ \t\](%eax|%rax), \\((%esp|%rsp)\\)" } } */
+/* { dg-final { scan-assembler {\tpause} } } */
+/* { dg-final { scan-assembler-not "push(?:l|q)\[ \t\]*_?dispatch" } } */
+/* { dg-final { scan-assembler-not "pushq\[ \t\]%rax" } } */
+/* { dg-final { scan-assembler-not "__x86_indirect_thunk\n" } } */
+/* { dg-final { scan-assembler-not "__x86_indirect_thunk_bnd\n" } } */
diff --git a/gcc/testsuite/gcc.target/i386/indirect-thunk-register-2.c b/gcc/testsuite/gcc.target/i386/indirect-thunk-register-2.c
new file mode 100644
index 0000000..e7e616b
--- /dev/null
+++ b/gcc/testsuite/gcc.target/i386/indirect-thunk-register-2.c
@@ -0,0 +1,20 @@
+/* { dg-do compile } */
+/* { dg-options "-O2 -mindirect-branch=thunk-inline -mindirect-branch-register -fno-pic" } */
+
+typedef void (*dispatch_t)(long offset);
+
+dispatch_t dispatch;
+
+void
+male_indirect_jump (long offset)
+{
+ dispatch(offset);
+}
+
+/* { dg-final { scan-assembler "jmp\[ \t\]*\.LIND" } } */
+/* { dg-final { scan-assembler "call\[ \t\]*\.LIND" } } */
+/* { dg-final { scan-assembler "mov\[ \t\](%eax|%rax), \\((%esp|%rsp)\\)" } } */
+/* { dg-final { scan-assembler {\tpause} } } */
+/* { dg-final { scan-assembler-not "push(?:l|q)\[ \t\]*_?dispatch" } } */
+/* { dg-final { scan-assembler-not "pushq\[ \t\]%rax" } } */
+/* { dg-final { scan-assembler-not "__x86_indirect_thunk" } } */
diff --git a/gcc/testsuite/gcc.target/i386/indirect-thunk-register-3.c b/gcc/testsuite/gcc.target/i386/indirect-thunk-register-3.c
new file mode 100644
index 0000000..5320e92
--- /dev/null
+++ b/gcc/testsuite/gcc.target/i386/indirect-thunk-register-3.c
@@ -0,0 +1,19 @@
+/* { dg-do compile } */
+/* { dg-options "-O2 -mindirect-branch=thunk-extern -mindirect-branch-register -fno-pic" } */
+
+typedef void (*dispatch_t)(long offset);
+
+dispatch_t dispatch;
+
+void
+male_indirect_jump (long offset)
+{
+ dispatch(offset);
+}
+
+/* { dg-final { scan-assembler "jmp\[ \t\]*__x86_indirect_thunk_(r|e)ax" } } */
+/* { dg-final { scan-assembler-not "push(?:l|q)\[ \t\]*_?dispatch" } } */
+/* { dg-final { scan-assembler-not "pushq\[ \t\]%rax" } } */
+/* { dg-final { scan-assembler-not {\t(pause|pause|nop)} } } */
+/* { dg-final { scan-assembler-not "jmp\[ \t\]*\.LIND" } } */
+/* { dg-final { scan-assembler-not "call\[ \t\]*\.LIND" } } */
diff --git a/gcc/testsuite/gcc.target/i386/ret-thunk-10.c b/gcc/testsuite/gcc.target/i386/ret-thunk-10.c
index 3a6727b..e6fea84 100644
--- a/gcc/testsuite/gcc.target/i386/ret-thunk-10.c
+++ b/gcc/testsuite/gcc.target/i386/ret-thunk-10.c
@@ -1,5 +1,5 @@
/* { dg-do compile } */
-/* { dg-options "-O2 -mfunction-return=thunk-inline -mindirect-branch=thunk -fno-pic" } */
+/* { dg-options "-O2 -mno-indirect-branch-register -mno-indirect-branch-register -mfunction-return=thunk-inline -mindirect-branch=thunk -fno-pic" } */
extern void (*bar) (void);
diff --git a/gcc/testsuite/gcc.target/i386/ret-thunk-11.c b/gcc/testsuite/gcc.target/i386/ret-thunk-11.c
index b8f6818..e239ec4 100644
--- a/gcc/testsuite/gcc.target/i386/ret-thunk-11.c
+++ b/gcc/testsuite/gcc.target/i386/ret-thunk-11.c
@@ -1,5 +1,5 @@
/* { dg-do compile } */
-/* { dg-options "-O2 -mfunction-return=thunk-extern -mindirect-branch=thunk -fno-pic" } */
+/* { dg-options "-O2 -mno-indirect-branch-register -mno-indirect-branch-register -mno-indirect-branch-register -mno-indirect-branch-register -mfunction-return=thunk-extern -mindirect-branch=thunk -fno-pic" } */
extern void (*bar) (void);
diff --git a/gcc/testsuite/gcc.target/i386/ret-thunk-12.c b/gcc/testsuite/gcc.target/i386/ret-thunk-12.c
index 01b0a02..fa31813 100644
--- a/gcc/testsuite/gcc.target/i386/ret-thunk-12.c
+++ b/gcc/testsuite/gcc.target/i386/ret-thunk-12.c
@@ -1,5 +1,5 @@
/* { dg-do compile } */
-/* { dg-options "-O2 -mfunction-return=keep -mindirect-branch=thunk -fno-pic" } */
+/* { dg-options "-O2 -mno-indirect-branch-register -mno-indirect-branch-register -mno-indirect-branch-register -mno-indirect-branch-register -mfunction-return=keep -mindirect-branch=thunk -fno-pic" } */
extern void (*bar) (void);
diff --git a/gcc/testsuite/gcc.target/i386/ret-thunk-13.c b/gcc/testsuite/gcc.target/i386/ret-thunk-13.c
index 4b497b5..fd5b41f 100644
--- a/gcc/testsuite/gcc.target/i386/ret-thunk-13.c
+++ b/gcc/testsuite/gcc.target/i386/ret-thunk-13.c
@@ -1,5 +1,5 @@
/* { dg-do compile } */
-/* { dg-options "-O2 -mfunction-return=keep -mindirect-branch=thunk-inline -fno-pic" } */
+/* { dg-options "-O2 -mno-indirect-branch-register -mfunction-return=keep -mindirect-branch=thunk-inline -fno-pic" } */
extern void (*bar) (void);
extern int foo (void) __attribute__ ((function_return("thunk")));
diff --git a/gcc/testsuite/gcc.target/i386/ret-thunk-14.c b/gcc/testsuite/gcc.target/i386/ret-thunk-14.c
index 4ae4c44..d606373 100644
--- a/gcc/testsuite/gcc.target/i386/ret-thunk-14.c
+++ b/gcc/testsuite/gcc.target/i386/ret-thunk-14.c
@@ -1,5 +1,5 @@
/* { dg-do compile } */
-/* { dg-options "-O2 -mfunction-return=keep -mindirect-branch=thunk-extern -fno-pic" } */
+/* { dg-options "-O2 -mno-indirect-branch-register -mfunction-return=keep -mindirect-branch=thunk-extern -fno-pic" } */
extern void (*bar) (void);
diff --git a/gcc/testsuite/gcc.target/i386/ret-thunk-15.c b/gcc/testsuite/gcc.target/i386/ret-thunk-15.c
index 5b5bc76..75e45e2 100644
--- a/gcc/testsuite/gcc.target/i386/ret-thunk-15.c
+++ b/gcc/testsuite/gcc.target/i386/ret-thunk-15.c
@@ -1,5 +1,5 @@
/* { dg-do compile } */
-/* { dg-options "-O2 -mfunction-return=keep -mindirect-branch=keep -fno-pic" } */
+/* { dg-options "-O2 -mno-indirect-branch-register -mno-indirect-branch-register -mno-indirect-branch-register -mno-indirect-branch-register -mfunction-return=keep -mindirect-branch=keep -fno-pic" } */
extern void (*bar) (void);
diff --git a/gcc/testsuite/gcc.target/i386/ret-thunk-9.c b/gcc/testsuite/gcc.target/i386/ret-thunk-9.c
index fa24a1f..d1db41c 100644
--- a/gcc/testsuite/gcc.target/i386/ret-thunk-9.c
+++ b/gcc/testsuite/gcc.target/i386/ret-thunk-9.c
@@ -1,5 +1,5 @@
/* { dg-do compile } */
-/* { dg-options "-O2 -mfunction-return=thunk -mindirect-branch=thunk -fno-pic" } */
+/* { dg-options "-O2 -mno-indirect-branch-register -mno-indirect-branch-register -mfunction-return=thunk -mindirect-branch=thunk -fno-pic" } */
extern void (*bar) (void);

View File

@ -0,0 +1,77 @@
commit 6effbc703b711779a196e5dbaf6335f39fab71c2
Author: hjl <hjl@138bc75d-0d04-0410-961f-82ee72b054a4>
Date: Tue Jan 16 11:19:51 2018 +0000
HJ patch #4
diff --git a/gcc/config/i386/i386.c b/gcc/config/i386/i386.c
index 9dffd02f..e73389b 100644
--- a/gcc/config/i386/i386.c
+++ b/gcc/config/i386/i386.c
@@ -14497,6 +14497,7 @@ put_condition_code (enum rtx_code code, enum machine_mode mode, bool reverse,
If CODE is 'h', pretend the reg is the 'high' byte register.
If CODE is 'y', print "st(0)" instead of "st", if the reg is stack op.
If CODE is 'd', duplicate the operand for AVX instruction.
+ If CODE is 'V', print naked full integer register name without %.
*/
void
@@ -14506,7 +14507,7 @@ print_reg (rtx x, int code, FILE *file)
unsigned int regno;
bool duplicated = code == 'd' && TARGET_AVX;
- if (ASSEMBLER_DIALECT == ASM_ATT)
+ if (ASSEMBLER_DIALECT == ASM_ATT && code != 'V')
putc ('%', file);
if (x == pc_rtx)
@@ -14542,6 +14543,14 @@ print_reg (rtx x, int code, FILE *file)
else
code = GET_MODE_SIZE (GET_MODE (x));
+ if (code == 'V')
+ {
+ if (GENERAL_REGNO_P (regno))
+ code = GET_MODE_SIZE (word_mode);
+ else
+ error ("'V' modifier on non-integer register");
+ }
+
/* Irritatingly, AMD extended registers use different naming convention
from the normal registers: "r%d[bwd]" */
if (REX_INT_REGNO_P (regno))
@@ -14695,6 +14704,7 @@ get_some_local_dynamic_name (void)
& -- print some in-use local-dynamic symbol name.
H -- print a memory address offset by 8; used for sse high-parts
Y -- print condition for XOP pcom* instruction.
+ V -- print naked full integer register name without %.
+ -- print a branch hint as 'cs' or 'ds' prefix
; -- print a semicolon (after prefixes due to bug in older gas).
~ -- print "i" if TARGET_AVX2, "f" otherwise.
@@ -14919,6 +14929,7 @@ ix86_print_operand (FILE *file, rtx x, int code)
case 'X':
case 'P':
case 'p':
+ case 'V':
break;
case 's':
diff --git a/gcc/testsuite/gcc.target/i386/indirect-thunk-register-4.c b/gcc/testsuite/gcc.target/i386/indirect-thunk-register-4.c
new file mode 100644
index 0000000..f0cd9b7
--- /dev/null
+++ b/gcc/testsuite/gcc.target/i386/indirect-thunk-register-4.c
@@ -0,0 +1,13 @@
+/* { dg-do compile } */
+/* { dg-options "-O2 -mindirect-branch=keep -fno-pic" } */
+
+extern void (*func_p) (void);
+
+void
+foo (void)
+{
+ asm("call __x86_indirect_thunk_%V0" : : "a" (func_p));
+}
+
+/* { dg-final { scan-assembler "call\[ \t\]*__x86_indirect_thunk_eax" { target ia32 } } } */
+/* { dg-final { scan-assembler "call\[ \t\]*__x86_indirect_thunk_rax" { target { ! ia32 } } } } */

View File

@ -0,0 +1,233 @@
commit 5315d05c7295fbb9345d85d6bf7cbe7c975a19c8
Author: hjl <hjl@138bc75d-0d04-0410-961f-82ee72b054a4>
Date: Tue Jan 16 11:22:01 2018 +0000
HJ patch #5
diff --git a/gcc/config/i386/i386.c b/gcc/config/i386/i386.c
index e73389b..15cfe83 100644
--- a/gcc/config/i386/i386.c
+++ b/gcc/config/i386/i386.c
@@ -4634,6 +4634,19 @@ ix86_set_indirect_branch_type (tree fndecl)
}
else
cfun->machine->indirect_branch_type = ix86_indirect_branch;
+
+ /* -mcmodel=large is not compatible with -mindirect-branch=thunk
+ nor -mindirect-branch=thunk-extern. */
+ if ((ix86_cmodel == CM_LARGE || ix86_cmodel == CM_LARGE_PIC)
+ && ((cfun->machine->indirect_branch_type
+ == indirect_branch_thunk_extern)
+ || (cfun->machine->indirect_branch_type
+ == indirect_branch_thunk)))
+ error ("%<-mindirect-branch=%s%> and %<-mcmodel=large%> are not "
+ "compatible",
+ ((cfun->machine->indirect_branch_type
+ == indirect_branch_thunk_extern)
+ ? "thunk-extern" : "thunk"));
}
if (cfun->machine->function_return_type == indirect_branch_unset)
@@ -4659,6 +4672,19 @@ ix86_set_indirect_branch_type (tree fndecl)
}
else
cfun->machine->function_return_type = ix86_function_return;
+
+ /* -mcmodel=large is not compatible with -mfunction-return=thunk
+ nor -mfunction-return=thunk-extern. */
+ if ((ix86_cmodel == CM_LARGE || ix86_cmodel == CM_LARGE_PIC)
+ && ((cfun->machine->function_return_type
+ == indirect_branch_thunk_extern)
+ || (cfun->machine->function_return_type
+ == indirect_branch_thunk)))
+ error ("%<-mfunction-return=%s%> and %<-mcmodel=large%> are not "
+ "compatible",
+ ((cfun->machine->function_return_type
+ == indirect_branch_thunk_extern)
+ ? "thunk-extern" : "thunk"));
}
}
diff --git a/gcc/doc/invoke.texi b/gcc/doc/invoke.texi
index 4a365c7..7b33803 100644
--- a/gcc/doc/invoke.texi
+++ b/gcc/doc/invoke.texi
@@ -14659,6 +14659,11 @@ to external call and return thunk provided in a separate object file.
You can control this behavior for a specific function by using the
function attribute @code{indirect_branch}. @xref{Function Attributes}.
+Note that @option{-mcmodel=large} is incompatible with
+@option{-mindirect-branch=thunk} nor
+@option{-mindirect-branch=thunk-extern} since the thunk function may
+not be reachable in large code model.
+
@item -mfunction-return=@var{choice}
@opindex -mfunction-return
Convert function return with @var{choice}. The default is @samp{keep},
@@ -14670,6 +14675,11 @@ object file. You can control this behavior for a specific function by
using the function attribute @code{function_return}.
@xref{Function Attributes}.
+Note that @option{-mcmodel=large} is incompatible with
+@option{-mfunction-return=thunk} nor
+@option{-mfunction-return=thunk-extern} since the thunk function may
+not be reachable in large code model.
+
@item -mindirect-branch-register
@opindex -mindirect-branch-register
Force indirect call and jump via register.
diff --git a/gcc/testsuite/gcc.target/i386/indirect-thunk-10.c b/gcc/testsuite/gcc.target/i386/indirect-thunk-10.c
new file mode 100644
index 0000000..a0674bd
--- /dev/null
+++ b/gcc/testsuite/gcc.target/i386/indirect-thunk-10.c
@@ -0,0 +1,7 @@
+/* { dg-do compile { target { lp64 } } } */
+/* { dg-options "-O2 -mindirect-branch=thunk-inline -mfunction-return=keep -mcmodel=large" } */
+
+void
+bar (void)
+{
+}
diff --git a/gcc/testsuite/gcc.target/i386/indirect-thunk-8.c b/gcc/testsuite/gcc.target/i386/indirect-thunk-8.c
new file mode 100644
index 0000000..7a80a89
--- /dev/null
+++ b/gcc/testsuite/gcc.target/i386/indirect-thunk-8.c
@@ -0,0 +1,7 @@
+/* { dg-do compile { target { lp64 } } } */
+/* { dg-options "-O2 -mindirect-branch=thunk -mfunction-return=keep -mcmodel=large" } */
+
+void
+bar (void)
+{ /* { dg-error "'-mindirect-branch=thunk' and '-mcmodel=large' are not compatible" } */
+}
diff --git a/gcc/testsuite/gcc.target/i386/indirect-thunk-9.c b/gcc/testsuite/gcc.target/i386/indirect-thunk-9.c
new file mode 100644
index 0000000..d4d45c5
--- /dev/null
+++ b/gcc/testsuite/gcc.target/i386/indirect-thunk-9.c
@@ -0,0 +1,7 @@
+/* { dg-do compile { target { lp64 } } } */
+/* { dg-options "-O2 -mindirect-branch=thunk-extern -mfunction-return=keep -mcmodel=large" } */
+
+void
+bar (void)
+{ /* { dg-error "'-mindirect-branch=thunk-extern' and '-mcmodel=large' are not compatible" } */
+}
diff --git a/gcc/testsuite/gcc.target/i386/indirect-thunk-attr-10.c b/gcc/testsuite/gcc.target/i386/indirect-thunk-attr-10.c
new file mode 100644
index 0000000..3a2aead
--- /dev/null
+++ b/gcc/testsuite/gcc.target/i386/indirect-thunk-attr-10.c
@@ -0,0 +1,9 @@
+/* { dg-do compile { target { lp64 } } } */
+/* { dg-options "-O2 -mindirect-branch=keep -mfunction-return=keep -mcmodel=large" } */
+/* { dg-additional-options "-fPIC" { target fpic } } */
+
+__attribute__ ((indirect_branch("thunk-extern")))
+void
+bar (void)
+{ /* { dg-error "'-mindirect-branch=thunk-extern' and '-mcmodel=large' are not compatible" } */
+}
diff --git a/gcc/testsuite/gcc.target/i386/indirect-thunk-attr-11.c b/gcc/testsuite/gcc.target/i386/indirect-thunk-attr-11.c
new file mode 100644
index 0000000..8e52f03
--- /dev/null
+++ b/gcc/testsuite/gcc.target/i386/indirect-thunk-attr-11.c
@@ -0,0 +1,9 @@
+/* { dg-do compile { target { lp64 } } } */
+/* { dg-options "-O2 -mindirect-branch=keep -mfunction-return=keep -mcmodel=large" } */
+/* { dg-additional-options "-fPIC" { target fpic } } */
+
+__attribute__ ((indirect_branch("thunk-inline")))
+void
+bar (void)
+{
+}
diff --git a/gcc/testsuite/gcc.target/i386/indirect-thunk-attr-9.c b/gcc/testsuite/gcc.target/i386/indirect-thunk-attr-9.c
new file mode 100644
index 0000000..bdaa4f6
--- /dev/null
+++ b/gcc/testsuite/gcc.target/i386/indirect-thunk-attr-9.c
@@ -0,0 +1,9 @@
+/* { dg-do compile { target { lp64 } } } */
+/* { dg-options "-O2 -mindirect-branch=keep -mfunction-return=keep -mcmodel=large" } */
+/* { dg-additional-options "-fPIC" { target fpic } } */
+
+__attribute__ ((indirect_branch("thunk")))
+void
+bar (void)
+{ /* { dg-error "'-mindirect-branch=thunk' and '-mcmodel=large' are not compatible" } */
+}
diff --git a/gcc/testsuite/gcc.target/i386/ret-thunk-17.c b/gcc/testsuite/gcc.target/i386/ret-thunk-17.c
new file mode 100644
index 0000000..0605e2c
--- /dev/null
+++ b/gcc/testsuite/gcc.target/i386/ret-thunk-17.c
@@ -0,0 +1,7 @@
+/* { dg-do compile { target { lp64 } } } */
+/* { dg-options "-O2 -mfunction-return=thunk -mindirect-branch=keep -mcmodel=large" } */
+
+void
+bar (void)
+{ /* { dg-error "'-mfunction-return=thunk' and '-mcmodel=large' are not compatible" } */
+}
diff --git a/gcc/testsuite/gcc.target/i386/ret-thunk-18.c b/gcc/testsuite/gcc.target/i386/ret-thunk-18.c
new file mode 100644
index 0000000..307019d
--- /dev/null
+++ b/gcc/testsuite/gcc.target/i386/ret-thunk-18.c
@@ -0,0 +1,8 @@
+/* { dg-do compile { target { lp64 } } } */
+/* { dg-options "-O2 -mfunction-return=thunk-extern -mindirect-branch=keep -mcmodel=large" } */
+/* { dg-additional-options "-fPIC" { target fpic } } */
+
+void
+bar (void)
+{ /* { dg-error "'-mfunction-return=thunk-extern' and '-mcmodel=large' are not compatible" } */
+}
diff --git a/gcc/testsuite/gcc.target/i386/ret-thunk-19.c b/gcc/testsuite/gcc.target/i386/ret-thunk-19.c
new file mode 100644
index 0000000..772617f
--- /dev/null
+++ b/gcc/testsuite/gcc.target/i386/ret-thunk-19.c
@@ -0,0 +1,8 @@
+/* { dg-do compile { target { lp64 } } } */
+/* { dg-options "-O2 -mfunction-return=keep -mindirect-branch=keep -mcmodel=large" } */
+
+__attribute__ ((function_return("thunk")))
+void
+bar (void)
+{ /* { dg-error "'-mfunction-return=thunk' and '-mcmodel=large' are not compatible" } */
+}
diff --git a/gcc/testsuite/gcc.target/i386/ret-thunk-20.c b/gcc/testsuite/gcc.target/i386/ret-thunk-20.c
new file mode 100644
index 0000000..1e9f9bd
--- /dev/null
+++ b/gcc/testsuite/gcc.target/i386/ret-thunk-20.c
@@ -0,0 +1,9 @@
+/* { dg-do compile { target { lp64 } } } */
+/* { dg-options "-O2 -mfunction-return=keep -mindirect-branch=keep -mcmodel=large" } */
+/* { dg-additional-options "-fPIC" { target fpic } } */
+
+__attribute__ ((function_return("thunk-extern")))
+void
+bar (void)
+{ /* { dg-error "'-mfunction-return=thunk-extern' and '-mcmodel=large' are not compatible" } */
+}
diff --git a/gcc/testsuite/gcc.target/i386/ret-thunk-21.c b/gcc/testsuite/gcc.target/i386/ret-thunk-21.c
new file mode 100644
index 0000000..eea07f7
--- /dev/null
+++ b/gcc/testsuite/gcc.target/i386/ret-thunk-21.c
@@ -0,0 +1,9 @@
+/* { dg-do compile { target { lp64 } } } */
+/* { dg-options "-O2 -mfunction-return=keep -mindirect-branch=keep -mcmodel=large" } */
+/* { dg-additional-options "-fPIC" { target fpic } } */
+
+__attribute__ ((function_return("thunk-inline")))
+void
+bar (void)
+{
+}

View File

@ -0,0 +1,38 @@
2018-04-10 Segher Boessenkool <segher@kernel.crashing.org>
PR target/85287
* gcc/config/rs6000/rs6000.md (allocate_stack): Put the residual size
for stack clash protection in a register whenever we need it to be in
a register.
--- a/gcc/config/rs6000/rs6000.md 2018/04/10 21:09:30 259298
+++ b/gcc/config/rs6000/rs6000.md 2018/04/10 21:37:34 259299
@@ -9783,14 +9783,12 @@
/* Now handle residuals. We just have to set operands[1] correctly
and let the rest of the expander run. */
operands[1] = residual;
- if (!CONST_INT_P (residual))
- operands[1] = force_reg (Pmode, operands[1]);
}
- if (GET_CODE (operands[1]) != CONST_INT
- || INTVAL (operands[1]) < -32767
- || INTVAL (operands[1]) > 32768)
+ if (!(CONST_INT_P (operands[1])
+ && IN_RANGE (INTVAL (operands[1]), -32767, 32768)))
{
+ operands[1] = force_reg (Pmode, operands[1]);
neg_op0 = gen_reg_rtx (Pmode);
if (TARGET_32BIT)
emit_insn (gen_negsi2 (neg_op0, operands[1]));
@@ -9798,7 +9796,7 @@
emit_insn (gen_negdi2 (neg_op0, operands[1]));
}
else
- neg_op0 = GEN_INT (- INTVAL (operands[1]));
+ neg_op0 = GEN_INT (-INTVAL (operands[1]));
insn = emit_insn ((* ((TARGET_32BIT) ? gen_movsi_update_stack
: gen_movdi_di_update_stack))

View File

@ -0,0 +1,91 @@
diff --git a/gcc/testsuite/gcc.dg/stack-check-5.c b/gcc/testsuite/gcc.dg/stack-check-5.c
index 850e023ea4e..604fa3cf6c5 100644
--- a/gcc/testsuite/gcc.dg/stack-check-5.c
+++ b/gcc/testsuite/gcc.dg/stack-check-5.c
@@ -1,7 +1,7 @@
/* { dg-do compile } */
/* { dg-options "-O2 -fstack-clash-protection -fdump-rtl-pro_and_epilogue -fno-optimize-sibling-calls --param stack-clash-protection-probe-interval=12 --param stack-clash-protection-guard-size=12" } */
/* { dg-require-effective-target supports_stack_clash_protection } */
-/* { dg-skip-if "" { *-*-* } { "-fstack-protector" } { "" } } */
+/* { dg-skip-if "" { *-*-* } { "-fstack-protector*" } { "" } } */
/* Otherwise the S/390 back-end might save the stack pointer in f2 ()
diff --git a/gcc/testsuite/gcc.dg/stack-check-6.c b/gcc/testsuite/gcc.dg/stack-check-6.c
index ab4b0e8894c..fe75612b737 100644
--- a/gcc/testsuite/gcc.dg/stack-check-6.c
+++ b/gcc/testsuite/gcc.dg/stack-check-6.c
@@ -1,7 +1,7 @@
/* { dg-do compile } */
/* { dg-options "-O2 -fstack-clash-protection -fdump-rtl-pro_and_epilogue -fno-optimize-sibling-calls --param stack-clash-protection-probe-interval=12 --param stack-clash-protection-guard-size=12" } */
/* { dg-require-effective-target supports_stack_clash_protection } */
-/* { dg-skip-if "" { *-*-* } { "-fstack-protector" } { "" } } */
+/* { dg-skip-if "" { *-*-* } { "-fstack-protector*" } { "" } } */
extern void foo (char *);
diff --git a/gcc/testsuite/gcc.dg/stack-check-6a.c b/gcc/testsuite/gcc.dg/stack-check-6a.c
index 468d649a4fa..8fb9c621585 100644
--- a/gcc/testsuite/gcc.dg/stack-check-6a.c
+++ b/gcc/testsuite/gcc.dg/stack-check-6a.c
@@ -4,7 +4,7 @@
/* { dg-do compile } */
/* { dg-options "-O2 -fstack-clash-protection -fdump-rtl-pro_and_epilogue -fno-optimize-sibling-calls --param stack-clash-protection-probe-interval=12 --param stack-clash-protection-guard-size=16" } */
/* { dg-require-effective-target supports_stack_clash_protection } */
-/* { dg-skip-if "" { *-*-* } { "-fstack-protector" } { "" } } */
+/* { dg-skip-if "" { *-*-* } { "-fstack-protector*" } { "" } } */
#include "stack-check-6.c"
diff --git a/gcc/testsuite/gcc.target/i386/stack-check-11.c b/gcc/testsuite/gcc.target/i386/stack-check-11.c
index fe5b2c2b844..43a291857b6 100644
--- a/gcc/testsuite/gcc.target/i386/stack-check-11.c
+++ b/gcc/testsuite/gcc.target/i386/stack-check-11.c
@@ -1,6 +1,8 @@
/* { dg-do compile } */
/* { dg-options "-O2 -fstack-clash-protection" } */
/* { dg-require-effective-target supports_stack_clash_protection } */
+/* { dg-skip-if "" { *-*-* } { "-fstack-protector*" } { "" } } */
+
#include <stdint.h>
diff --git a/gcc/testsuite/gcc.target/i386/stack-check-17.c b/gcc/testsuite/gcc.target/i386/stack-check-17.c
index dcd29305a2c..da6ea016815 100644
--- a/gcc/testsuite/gcc.target/i386/stack-check-17.c
+++ b/gcc/testsuite/gcc.target/i386/stack-check-17.c
@@ -1,6 +1,8 @@
/* { dg-do compile } */
/* { dg-options "-O2 -fstack-clash-protection -mtune=generic -fomit-frame-pointer" } */
/* { dg-require-effective-target supports_stack_clash_protection } */
+/* { dg-skip-if "" { *-*-* } { "-fstack-protector*" } { "" } } */
+
int x0, x1;
diff --git a/gcc/testsuite/gcc.target/i386/stack-check-18.c b/gcc/testsuite/gcc.target/i386/stack-check-18.c
index 1638f776267..1cf4bbcfafb 100644
--- a/gcc/testsuite/gcc.target/i386/stack-check-18.c
+++ b/gcc/testsuite/gcc.target/i386/stack-check-18.c
@@ -1,7 +1,7 @@
/* { dg-do compile } */
/* { dg-options "-O2 -fstack-clash-protection -mtune=generic -fdump-rtl-expand" } */
/* { dg-require-effective-target supports_stack_clash_protection } */
-/* { dg-skip-if "" { *-*-* } { "-fstack-protector" } { "" } } */
+/* { dg-skip-if "" { *-*-* } { "-fstack-protector*" } { "" } } */
int f1 (char *);
diff --git a/gcc/testsuite/gcc.target/i386/stack-check-19.c b/gcc/testsuite/gcc.target/i386/stack-check-19.c
index c341801189c..49f3a20af8b 100644
--- a/gcc/testsuite/gcc.target/i386/stack-check-19.c
+++ b/gcc/testsuite/gcc.target/i386/stack-check-19.c
@@ -1,7 +1,7 @@
/* { dg-do compile } */
/* { dg-options "-O2 -fstack-clash-protection -mtune=generic -fdump-rtl-expand" } */
/* { dg-require-effective-target supports_stack_clash_protection } */
-/* { dg-skip-if "" { *-*-* } { "-fstack-protector" } { "" } } */
+/* { dg-skip-if "" { *-*-* } { "-fstack-protector*" } { "" } } */
int f1 (char *);

View File

@ -0,0 +1,114 @@
commit f7765f70e0e254fd9ce4469c7281c69cd06c9467
Author: law <law@138bc75d-0d04-0410-961f-82ee72b054a4>
Date: Wed Jan 24 21:57:16 2018 +0000
PR target/83994
* i386.c (get_probe_interval): Move to earlier point.
(ix86_compute_frame_layout): If -fstack-clash-protection and
the frame is larger than the probe interval, then use pushes
to save registers rather than reg->mem moves.
(ix86_expand_prologue): Remove conditional for int_registers_saved
assertion.
PR target/83994
* gcc.target/i386/pr83994.c: New test.
diff --git a/gcc/config/i386/i386.c b/gcc/config/i386/i386.c
index 15cfe83..5230227 100644
--- a/gcc/config/i386/i386.c
+++ b/gcc/config/i386/i386.c
@@ -9371,6 +9371,18 @@ ix86_builtin_setjmp_frame_value (void)
return stack_realign_fp ? hard_frame_pointer_rtx : virtual_stack_vars_rtx;
}
+/* Return the probing interval for -fstack-clash-protection. */
+
+static HOST_WIDE_INT
+get_probe_interval (void)
+{
+ if (flag_stack_clash_protection)
+ return (HOST_WIDE_INT_1U
+ << PARAM_VALUE (PARAM_STACK_CLASH_PROTECTION_PROBE_INTERVAL));
+ else
+ return (HOST_WIDE_INT_1U << STACK_CHECK_PROBE_INTERVAL_EXP);
+}
+
/* When using -fsplit-stack, the allocation routines set a field in
the TCB to the bottom of the stack plus this much space, measured
in bytes. */
@@ -9545,7 +9557,15 @@ ix86_compute_frame_layout (struct ix86_frame *frame)
to_allocate = offset - frame->sse_reg_save_offset;
if ((!to_allocate && frame->nregs <= 1)
- || (TARGET_64BIT && to_allocate >= (HOST_WIDE_INT) 0x80000000))
+ || (TARGET_64BIT && to_allocate >= (HOST_WIDE_INT) 0x80000000)
+ /* If stack clash probing needs a loop, then it needs a
+ scratch register. But the returned register is only guaranteed
+ to be safe to use after register saves are complete. So if
+ stack clash protections are enabled and the allocated frame is
+ larger than the probe interval, then use pushes to save
+ callee saved registers. */
+ || (flag_stack_clash_protection && to_allocate > get_probe_interval ()))
+
frame->save_regs_using_mov = false;
if (ix86_using_red_zone ()
@@ -10181,18 +10201,6 @@ release_scratch_register_on_entry (struct scratch_reg *sr)
}
}
-/* Return the probing interval for -fstack-clash-protection. */
-
-static HOST_WIDE_INT
-get_probe_interval (void)
-{
- if (flag_stack_clash_protection)
- return (HOST_WIDE_INT_1U
- << PARAM_VALUE (PARAM_STACK_CLASH_PROTECTION_PROBE_INTERVAL));
- else
- return (HOST_WIDE_INT_1U << STACK_CHECK_PROBE_INTERVAL_EXP);
-}
-
/* Emit code to adjust the stack pointer by SIZE bytes while probing it.
This differs from the next routine in that it tries hard to prevent
@@ -11064,12 +11072,11 @@ ix86_expand_prologue (void)
&& (flag_stack_check == STATIC_BUILTIN_STACK_CHECK
|| flag_stack_clash_protection))
{
- /* This assert wants to verify that integer registers were saved
- prior to probing. This is necessary when probing may be implemented
- as a function call (Windows). It is not necessary for stack clash
- protection probing. */
- if (!flag_stack_clash_protection)
- gcc_assert (int_registers_saved);
+ /* We expect the GP registers to be saved when probes are used
+ as the probing sequences might need a scratch register and
+ the routine to allocate one assumes the integer registers
+ have already been saved. */
+ gcc_assert (int_registers_saved);
if (flag_stack_clash_protection)
{
diff --git a/gcc/testsuite/gcc.target/i386/pr83994.c b/gcc/testsuite/gcc.target/i386/pr83994.c
new file mode 100644
index 0000000..dc0b7cb
--- /dev/null
+++ b/gcc/testsuite/gcc.target/i386/pr83994.c
@@ -0,0 +1,16 @@
+/* { dg-do compile } */
+/* { dg-options "-O2 -march=i686 -fpic -fstack-clash-protection" } */
+/* { dg-require-effective-target ia32 } */
+
+void f1 (char *);
+
+__attribute__ ((regparm (3)))
+int
+f2 (int arg1, int arg2, int arg3)
+{
+ char buf[16384];
+ f1 (buf);
+ f1 (buf);
+ return 0;
+}
+

View File

@ -0,0 +1,163 @@
commit 33839c8f8aa7857cc5f22ddb3f0960999cb0dfc7
Author: law <law@138bc75d-0d04-0410-961f-82ee72b054a4>
Date: Wed Jan 31 05:02:30 2018 +0000
PR target/84064
* i386.c (ix86_adjust_stack_and_probe_stack_clash): New argument
INT_REGISTERS_SAVED. Check it prior to calling
get_scratch_register_on_entry.
(ix86_adjust_stack_and_probe): Similarly.
(ix86_emit_probe_stack_range): Similarly.
(ix86_expand_prologue): Corresponding changes.
PR target/84064
* gcc.target/i386/pr84064: New test.
diff --git a/gcc/config/i386/i386.c b/gcc/config/i386/i386.c
index 5230227..2fe2a0c 100644
--- a/gcc/config/i386/i386.c
+++ b/gcc/config/i386/i386.c
@@ -10206,10 +10206,14 @@ release_scratch_register_on_entry (struct scratch_reg *sr)
This differs from the next routine in that it tries hard to prevent
attacks that jump the stack guard. Thus it is never allowed to allocate
more than PROBE_INTERVAL bytes of stack space without a suitable
- probe. */
+ probe.
+
+ INT_REGISTERS_SAVED is true if integer registers have already been
+ pushed on the stack. */
static void
-ix86_adjust_stack_and_probe_stack_clash (const HOST_WIDE_INT size)
+ix86_adjust_stack_and_probe_stack_clash (const HOST_WIDE_INT size,
+ const bool int_registers_saved)
{
struct machine_function *m = cfun->machine;
struct ix86_frame frame;
@@ -10318,6 +10322,12 @@ ix86_adjust_stack_and_probe_stack_clash (const HOST_WIDE_INT size)
}
else
{
+ /* We expect the GP registers to be saved when probes are used
+ as the probing sequences might need a scratch register and
+ the routine to allocate one assumes the integer registers
+ have already been saved. */
+ gcc_assert (int_registers_saved);
+
struct scratch_reg sr;
get_scratch_register_on_entry (&sr);
@@ -10376,10 +10386,14 @@ ix86_adjust_stack_and_probe_stack_clash (const HOST_WIDE_INT size)
emit_insn (gen_blockage ());
}
-/* Emit code to adjust the stack pointer by SIZE bytes while probing it. */
+/* Emit code to adjust the stack pointer by SIZE bytes while probing it.
+
+ INT_REGISTERS_SAVED is true if integer registers have already been
+ pushed on the stack. */
static void
-ix86_adjust_stack_and_probe (const HOST_WIDE_INT size)
+ix86_adjust_stack_and_probe (const HOST_WIDE_INT size,
+ const bool int_registers_saved)
{
/* We skip the probe for the first interval + a small dope of 4 words and
probe that many bytes past the specified size to maintain a protection
@@ -10440,6 +10454,12 @@ ix86_adjust_stack_and_probe (const HOST_WIDE_INT size)
equality test for the loop condition. */
else
{
+ /* We expect the GP registers to be saved when probes are used
+ as the probing sequences might need a scratch register and
+ the routine to allocate one assumes the integer registers
+ have already been saved. */
+ gcc_assert (int_registers_saved);
+
HOST_WIDE_INT rounded_size;
struct scratch_reg sr;
@@ -10564,10 +10584,14 @@ output_adjust_stack_and_probe (rtx reg)
}
/* Emit code to probe a range of stack addresses from FIRST to FIRST+SIZE,
- inclusive. These are offsets from the current stack pointer. */
+ inclusive. These are offsets from the current stack pointer.
+
+ INT_REGISTERS_SAVED is true if integer registers have already been
+ pushed on the stack. */
static void
-ix86_emit_probe_stack_range (HOST_WIDE_INT first, HOST_WIDE_INT size)
+ix86_emit_probe_stack_range (HOST_WIDE_INT first, HOST_WIDE_INT size,
+ const bool int_registers_saved)
{
/* See if we have a constant small number of probes to generate. If so,
that's the easy case. The run-time loop is made up of 7 insns in the
@@ -10595,6 +10619,12 @@ ix86_emit_probe_stack_range (HOST_WIDE_INT first, HOST_WIDE_INT size)
equality test for the loop condition. */
else
{
+ /* We expect the GP registers to be saved when probes are used
+ as the probing sequences might need a scratch register and
+ the routine to allocate one assumes the integer registers
+ have already been saved. */
+ gcc_assert (int_registers_saved);
+
HOST_WIDE_INT rounded_size, last;
struct scratch_reg sr;
@@ -11072,20 +11102,15 @@ ix86_expand_prologue (void)
&& (flag_stack_check == STATIC_BUILTIN_STACK_CHECK
|| flag_stack_clash_protection))
{
- /* We expect the GP registers to be saved when probes are used
- as the probing sequences might need a scratch register and
- the routine to allocate one assumes the integer registers
- have already been saved. */
- gcc_assert (int_registers_saved);
-
if (flag_stack_clash_protection)
{
- ix86_adjust_stack_and_probe_stack_clash (allocate);
+ ix86_adjust_stack_and_probe_stack_clash (allocate,
+ int_registers_saved);
allocate = 0;
}
else if (STACK_CHECK_MOVING_SP)
{
- ix86_adjust_stack_and_probe (allocate);
+ ix86_adjust_stack_and_probe (allocate, int_registers_saved);
allocate = 0;
}
else
@@ -11096,9 +11121,11 @@ ix86_expand_prologue (void)
size = 0x80000000 - get_stack_check_protect () - 1;
if (TARGET_STACK_PROBE)
- ix86_emit_probe_stack_range (0, size + get_stack_check_protect ());
+ ix86_emit_probe_stack_range (0, size + get_stack_check_protect (),
+ int_registers_saved);
else
- ix86_emit_probe_stack_range (get_stack_check_protect (), size);
+ ix86_emit_probe_stack_range (get_stack_check_protect (), size,
+ int_registers_saved);
}
}
diff --git a/gcc/testsuite/gcc.target/i386/pr84064.c b/gcc/testsuite/gcc.target/i386/pr84064.c
new file mode 100644
index 0000000..01f8d9e
--- /dev/null
+++ b/gcc/testsuite/gcc.target/i386/pr84064.c
@@ -0,0 +1,10 @@
+/* { dg-do compile } */
+/* { dg-options "-O2 -march=i686 -fstack-clash-protection" } */
+/* { dg-require-effective-target ia32 } */
+
+void
+f (void *p1, void *p2)
+{
+ __builtin_memcpy (p1, p2, 1000);
+}
+

View File

@ -0,0 +1,182 @@
commit 14041afe24556efd5845564aa183b6451fd9d6cc
Author: law <law@138bc75d-0d04-0410-961f-82ee72b054a4>
Date: Thu Feb 1 16:22:56 2018 +0000
PR target/84128
* config/i386/i386.c (release_scratch_register_on_entry): Add new
OFFSET and RELEASE_VIA_POP arguments. Use SP+OFFSET to restore
the scratch if RELEASE_VIA_POP is false.
(ix86_adjust_stack_and_probe_stack_clash): Un-constify SIZE.
If we have to save a temporary register, decrement SIZE appropriately.
Pass new arguments to release_scratch_register_on_entry.
(ix86_adjust_stack_and_probe): Likewise.
(ix86_emit_probe_stack_range): Pass new arguments to
release_scratch_register_on_entry.
PR target/84128
* gcc.target/i386/pr84128.c: New test.
diff --git a/gcc/config/i386/i386.c b/gcc/config/i386/i386.c
index 2fe2a0c..c25d26c 100644
--- a/gcc/config/i386/i386.c
+++ b/gcc/config/i386/i386.c
@@ -10182,22 +10182,39 @@ get_scratch_register_on_entry (struct scratch_reg *sr)
}
}
-/* Release a scratch register obtained from the preceding function. */
+/* Release a scratch register obtained from the preceding function.
+
+ If RELEASE_VIA_POP is true, we just pop the register off the stack
+ to release it. This is what non-Linux systems use with -fstack-check.
+
+ Otherwise we use OFFSET to locate the saved register and the
+ allocated stack space becomes part of the local frame and is
+ deallcated by the epilogue. */
static void
-release_scratch_register_on_entry (struct scratch_reg *sr)
+release_scratch_register_on_entry (struct scratch_reg *sr, HOST_WIDE_INT offset,
+ bool release_via_pop)
{
if (sr->saved)
{
- struct machine_function *m = cfun->machine;
- rtx x, insn = emit_insn (gen_pop (sr->reg));
+ if (release_via_pop)
+ {
+ struct machine_function *m = cfun->machine;
+ rtx x, insn = emit_insn (gen_pop (sr->reg));
- /* The RTX_FRAME_RELATED_P mechanism doesn't know about pop. */
- RTX_FRAME_RELATED_P (insn) = 1;
- x = gen_rtx_PLUS (Pmode, stack_pointer_rtx, GEN_INT (UNITS_PER_WORD));
- x = gen_rtx_SET (VOIDmode, stack_pointer_rtx, x);
- add_reg_note (insn, REG_FRAME_RELATED_EXPR, x);
- m->fs.sp_offset -= UNITS_PER_WORD;
+ /* The RTX FRAME_RELATED_P mechanism doesn't know about pop. */
+ RTX_FRAME_RELATED_P (insn) = 1;
+ x = gen_rtx_PLUS (Pmode, stack_pointer_rtx, GEN_INT (UNITS_PER_WORD));
+ x = gen_rtx_SET (VOIDmode, stack_pointer_rtx, x);
+ add_reg_note (insn, REG_FRAME_RELATED_EXPR, x);
+ m->fs.sp_offset -= UNITS_PER_WORD;
+ }
+ else
+ {
+ rtx x = gen_rtx_PLUS (Pmode, stack_pointer_rtx, GEN_INT (offset));
+ x = gen_rtx_SET (VOIDmode, sr->reg, gen_rtx_MEM (word_mode, x));
+ emit_insn (x);
+ }
}
}
@@ -10212,7 +10229,7 @@ release_scratch_register_on_entry (struct scratch_reg *sr)
pushed on the stack. */
static void
-ix86_adjust_stack_and_probe_stack_clash (const HOST_WIDE_INT size,
+ix86_adjust_stack_and_probe_stack_clash (HOST_WIDE_INT size,
const bool int_registers_saved)
{
struct machine_function *m = cfun->machine;
@@ -10331,6 +10348,12 @@ ix86_adjust_stack_and_probe_stack_clash (const HOST_WIDE_INT size,
struct scratch_reg sr;
get_scratch_register_on_entry (&sr);
+ /* If we needed to save a register, then account for any space
+ that was pushed (we are not going to pop the register when
+ we do the restore). */
+ if (sr.saved)
+ size -= UNITS_PER_WORD;
+
/* Step 1: round SIZE down to a multiple of the interval. */
HOST_WIDE_INT rounded_size = size & -probe_interval;
@@ -10379,7 +10402,9 @@ ix86_adjust_stack_and_probe_stack_clash (const HOST_WIDE_INT size,
m->fs.cfa_reg == stack_pointer_rtx);
dump_stack_clash_frame_info (PROBE_LOOP, size != rounded_size);
- release_scratch_register_on_entry (&sr);
+ /* This does not deallocate the space reserved for the scratch
+ register. That will be deallocated in the epilogue. */
+ release_scratch_register_on_entry (&sr, size, false);
}
/* Make sure nothing is scheduled before we are done. */
@@ -10392,7 +10417,7 @@ ix86_adjust_stack_and_probe_stack_clash (const HOST_WIDE_INT size,
pushed on the stack. */
static void
-ix86_adjust_stack_and_probe (const HOST_WIDE_INT size,
+ix86_adjust_stack_and_probe (HOST_WIDE_INT size,
const bool int_registers_saved)
{
/* We skip the probe for the first interval + a small dope of 4 words and
@@ -10465,6 +10490,11 @@ ix86_adjust_stack_and_probe (const HOST_WIDE_INT size,
get_scratch_register_on_entry (&sr);
+ /* If we needed to save a register, then account for any space
+ that was pushed (we are not going to pop the register when
+ we do the restore). */
+ if (sr.saved)
+ size -= UNITS_PER_WORD;
/* Step 1: round SIZE to the previous multiple of the interval. */
@@ -10516,7 +10546,9 @@ ix86_adjust_stack_and_probe (const HOST_WIDE_INT size,
(get_probe_interval ()
+ dope))));
- release_scratch_register_on_entry (&sr);
+ /* This does not deallocate the space reserved for the scratch
+ register. That will be deallocated in the epilogue. */
+ release_scratch_register_on_entry (&sr, size, false);
}
gcc_assert (cfun->machine->fs.cfa_reg != stack_pointer_rtx);
@@ -10669,7 +10701,7 @@ ix86_emit_probe_stack_range (HOST_WIDE_INT first, HOST_WIDE_INT size,
sr.reg),
rounded_size - size));
- release_scratch_register_on_entry (&sr);
+ release_scratch_register_on_entry (&sr, size, true);
}
/* Make sure nothing is scheduled before we are done. */
diff --git a/gcc/testsuite/gcc.target/i386/pr84128.c b/gcc/testsuite/gcc.target/i386/pr84128.c
new file mode 100644
index 0000000..a8323fd
--- /dev/null
+++ b/gcc/testsuite/gcc.target/i386/pr84128.c
@@ -0,0 +1,30 @@
+/* { dg-do run } */
+/* { dg-options "-O2 -march=i686 -mtune=generic -fstack-clash-protection" } */
+/* { dg-require-effective-target ia32 } */
+
+__attribute__ ((noinline, noclone, weak, regparm (3)))
+int
+f1 (long arg0, int (*pf) (long, void *))
+{
+ unsigned char buf[32768];
+ return pf (arg0, buf);
+}
+
+__attribute__ ((noinline, noclone, weak))
+int
+f2 (long arg0, void *ignored)
+{
+ if (arg0 != 17)
+ __builtin_abort ();
+ return 19;
+}
+
+int
+main (void)
+{
+ if (f1 (17, f2) != 19)
+ __builtin_abort ();
+ return 0;
+}
+
+

Some files were not shown because too many files have changed in this diff Show More