200 lines
5.1 KiB
Diff
200 lines
5.1 KiB
Diff
2017-09-15 Jakub Jelinek <jakub@redhat.com>
|
|
|
|
Backported from mainline
|
|
2017-09-14 Jakub Jelinek <jakub@redhat.com>
|
|
|
|
PR target/81325
|
|
* cfgbuild.c (find_bb_boundaries): Ignore debug insns in decisions
|
|
if and where to split a bb, except for splitting before debug insn
|
|
sequences followed by non-label real insn. Delete debug insns
|
|
in between basic blocks.
|
|
|
|
* g++.dg/cpp0x/pr81325.C: New test.
|
|
|
|
--- gcc/cfgbuild.c (revision 252751)
|
|
+++ gcc/cfgbuild.c (revision 252752)
|
|
@@ -442,9 +442,10 @@ find_bb_boundaries (basic_block bb)
|
|
rtx_insn *end = BB_END (bb), *x;
|
|
rtx_jump_table_data *table;
|
|
rtx_insn *flow_transfer_insn = NULL;
|
|
+ rtx_insn *debug_insn = NULL;
|
|
edge fallthru = NULL;
|
|
|
|
- if (insn == BB_END (bb))
|
|
+ if (insn == end)
|
|
return;
|
|
|
|
if (LABEL_P (insn))
|
|
@@ -455,27 +456,49 @@ find_bb_boundaries (basic_block bb)
|
|
{
|
|
enum rtx_code code = GET_CODE (insn);
|
|
|
|
+ if (code == DEBUG_INSN)
|
|
+ {
|
|
+ if (flow_transfer_insn && !debug_insn)
|
|
+ debug_insn = insn;
|
|
+ }
|
|
/* In case we've previously seen an insn that effects a control
|
|
flow transfer, split the block. */
|
|
- if ((flow_transfer_insn || code == CODE_LABEL)
|
|
- && inside_basic_block_p (insn))
|
|
+ else if ((flow_transfer_insn || code == CODE_LABEL)
|
|
+ && inside_basic_block_p (insn))
|
|
{
|
|
- fallthru = split_block (bb, PREV_INSN (insn));
|
|
+ rtx_insn *prev = PREV_INSN (insn);
|
|
+
|
|
+ /* If the first non-debug inside_basic_block_p insn after a control
|
|
+ flow transfer is not a label, split the block before the debug
|
|
+ insn instead of before the non-debug insn, so that the debug
|
|
+ insns are not lost. */
|
|
+ if (debug_insn && code != CODE_LABEL && code != BARRIER)
|
|
+ prev = PREV_INSN (debug_insn);
|
|
+ fallthru = split_block (bb, prev);
|
|
if (flow_transfer_insn)
|
|
{
|
|
BB_END (bb) = flow_transfer_insn;
|
|
|
|
+ rtx_insn *next;
|
|
/* Clean up the bb field for the insns between the blocks. */
|
|
for (x = NEXT_INSN (flow_transfer_insn);
|
|
x != BB_HEAD (fallthru->dest);
|
|
- x = NEXT_INSN (x))
|
|
- if (!BARRIER_P (x))
|
|
- set_block_for_insn (x, NULL);
|
|
+ x = next)
|
|
+ {
|
|
+ next = NEXT_INSN (x);
|
|
+ /* Debug insns should not be in between basic blocks,
|
|
+ drop them on the floor. */
|
|
+ if (DEBUG_INSN_P (x))
|
|
+ delete_insn (x);
|
|
+ else if (!BARRIER_P (x))
|
|
+ set_block_for_insn (x, NULL);
|
|
+ }
|
|
}
|
|
|
|
bb = fallthru->dest;
|
|
remove_edge (fallthru);
|
|
flow_transfer_insn = NULL;
|
|
+ debug_insn = NULL;
|
|
if (code == CODE_LABEL && LABEL_ALT_ENTRY_P (insn))
|
|
make_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun), bb, 0);
|
|
}
|
|
@@ -498,17 +521,23 @@ find_bb_boundaries (basic_block bb)
|
|
/* In case expander replaced normal insn by sequence terminating by
|
|
return and barrier, or possibly other sequence not behaving like
|
|
ordinary jump, we need to take care and move basic block boundary. */
|
|
- if (flow_transfer_insn)
|
|
+ if (flow_transfer_insn && flow_transfer_insn != end)
|
|
{
|
|
BB_END (bb) = flow_transfer_insn;
|
|
|
|
/* Clean up the bb field for the insns that do not belong to BB. */
|
|
- x = flow_transfer_insn;
|
|
- while (x != end)
|
|
+ rtx_insn *next;
|
|
+ for (x = NEXT_INSN (flow_transfer_insn); ; x = next)
|
|
{
|
|
- x = NEXT_INSN (x);
|
|
- if (!BARRIER_P (x))
|
|
+ next = NEXT_INSN (x);
|
|
+ /* Debug insns should not be in between basic blocks,
|
|
+ drop them on the floor. */
|
|
+ if (DEBUG_INSN_P (x))
|
|
+ delete_insn (x);
|
|
+ else if (!BARRIER_P (x))
|
|
set_block_for_insn (x, NULL);
|
|
+ if (x == end)
|
|
+ break;
|
|
}
|
|
}
|
|
|
|
--- gcc/testsuite/g++.dg/cpp0x/pr81325.C (nonexistent)
|
|
+++ gcc/testsuite/g++.dg/cpp0x/pr81325.C (revision 252752)
|
|
@@ -0,0 +1,84 @@
|
|
+// PR target/81325
|
|
+// { dg-do compile { target c++11 } }
|
|
+// { dg-options "-O2 -fcompare-debug" }
|
|
+
|
|
+struct A { A(const char *, const int & = 0); };
|
|
+template <typename> struct B;
|
|
+template <typename> struct C {
|
|
+ int _M_i;
|
|
+ void m_fn1() { __atomic_fetch_add(&_M_i, 0, __ATOMIC_RELAXED); }
|
|
+};
|
|
+struct D {
|
|
+ int *Data;
|
|
+ long Length = 0;
|
|
+ D(int) : Data() {}
|
|
+};
|
|
+template <> struct B<int> : C<int> {};
|
|
+struct F {
|
|
+ B<int> RefCount;
|
|
+ void m_fn2() { RefCount.m_fn1(); }
|
|
+};
|
|
+struct G {
|
|
+ F *Obj;
|
|
+ G(const G &p1) : Obj(p1.Obj) {
|
|
+ if (Obj) {
|
|
+ F *a = 0;
|
|
+ a->m_fn2();
|
|
+ }
|
|
+ }
|
|
+};
|
|
+struct H {
|
|
+ int CPlusPlus : 1;
|
|
+};
|
|
+struct I {
|
|
+ enum {} KindId;
|
|
+};
|
|
+template <typename ResultT, typename ArgT> struct J {
|
|
+ void operator()();
|
|
+ ResultT operator()(ArgT) {}
|
|
+};
|
|
+struct K {
|
|
+ int AllowBind;
|
|
+ I SupportedKind;
|
|
+ I RestrictKind;
|
|
+ G Implementation;
|
|
+};
|
|
+struct L {
|
|
+ L(int) : Implementation(Implementation) {}
|
|
+ K Implementation;
|
|
+};
|
|
+struct M {
|
|
+ int Param1;
|
|
+};
|
|
+struct N {
|
|
+ N(int, L &p2) : Param2(p2) {}
|
|
+ L Param2;
|
|
+};
|
|
+struct O {
|
|
+ L m_fn3();
|
|
+};
|
|
+L ignoringImpCasts(L);
|
|
+J<O, L> b;
|
|
+L hasName(const A &);
|
|
+M hasOverloadedOperatorName(D);
|
|
+J<O, int> c;
|
|
+struct P {
|
|
+ void m_fn4(L, int);
|
|
+};
|
|
+struct Q {
|
|
+ void m_fn5(P *);
|
|
+};
|
|
+H d;
|
|
+void Q::m_fn5(P *p1) {
|
|
+ if (!d.CPlusPlus) {
|
|
+ c();
|
|
+ L e = 0, f = ignoringImpCasts(e);
|
|
+ b(ignoringImpCasts(f)).m_fn3();
|
|
+ }
|
|
+ hasOverloadedOperatorName(0);
|
|
+ hasName("");
|
|
+ L g = 0;
|
|
+ N(0, g);
|
|
+ L h(0);
|
|
+ p1->m_fn4(h, 0);
|
|
+}
|