Commit 3371a64f by Teresa Johnson Committed by Teresa Johnson

re PR c++/53743 (ICE when compiling firefox with PGO and LTO)

2013-06-06  Teresa Johnson  <tejohnson@google.com>

	PR c++/53743
	* ifcvt.c (find_if_case_1): Replace BB_COPY_PARTITION with assert
	as this is now done by redirect_edge_and_branch_force.
	* function.c (thread_prologue_and_epilogue_insns): Insert new bb after
	barriers, and fix interaction with splitting.
	* emit-rtl.c (try_split): Copy REG_CROSSING_JUMP notes.
	* cfgcleanup.c (try_forward_edges): Fix early return value to properly
	reflect changes made in the routine.
	* bb-reorder.c (emit_barrier_after_bb): Move to cfgrtl.c.
	(fix_up_fall_thru_edges): Remove incorrect check for bb layout order
	since this is called in cfglayout mode, and replace partition fixup
	with assert as that is now done by force_nonfallthru_and_redirect.
	(add_reg_crossing_jump_notes): Handle the fact that some jumps may
	already be marked with region crossing note.
	(insert_section_boundary_note): Make non-static, gate on flag
	has_bb_partition, rewrite to also check for multiple partitions.
	(rest_of_handle_reorder_blocks): Remove call to
	insert_section_boundary_note, now done later during free_cfg.
	(duplicate_computed_gotos): Don't duplicate partition crossing edge.
	* bb-reorder.h (insert_section_boundary_note): Declare.
	* Makefile.in (cfgrtl.o): Depend on bb-reorder.h
	* cfgrtl.c (rest_of_pass_free_cfg): If partitions exist
	invoke insert_section_boundary_note.
	(try_redirect_by_replacing_jump): Remove unnecessary
	check for region crossing note.
	(fixup_partition_crossing): New function.
	(rtl_redirect_edge_and_branch): Fixup partition boundaries.
	(emit_barrier_after_bb): Move here from bb-reorder.c, handle insertion
	in non-cfglayout mode.
	(force_nonfallthru_and_redirect): Fixup partition boundaries,
	remove old code that tried to do this. Emit barrier correctly
	when we are in cfglayout mode.
	(last_bb_in_partition): New function.
	(rtl_split_edge): Correctly fixup partition boundaries.
	(commit_one_edge_insertion): Remove old code that tried to
	fixup region crossing edge since this is now handled in
	split_block, and set up insertion point correctly since
	block may now end in a jump.
	(verify_hot_cold_block_grouping): Guard against checking when not in
	linearized RTL mode.
	(rtl_verify_edges): Add checks for incorrect/missing REG_CROSSING_JUMP
	notes.
	(rtl_verify_flow_info_1): Move verify_hot_cold_block_grouping to
	rtl_verify_flow_info, so not called in cfglayout mode.
	(rtl_verify_flow_info): Move verify_hot_cold_block_grouping here.
	(fixup_reorder_chain): Remove old code that attempted to fixup region
	crossing note as this is now handled in force_nonfallthru_and_redirect.
	(duplicate_insn_chain): Don't duplicate switch section notes.
	(rtl_can_remove_branch_p): Remove unnecessary check for region crossing
	note.
	* basic-block.h (emit_barrier_after_bb): Declare.

	* testsuite/gcc.dg/tree-prof/va-arg-pack-1.c: Cloned from c-torture, made
	into -freorder-blocks-and-partition test.
	* testsuite/gcc.dg/tree-prof/comp-goto-1.c: Ditto.
	* testsuite/gcc.dg/tree-prof/20041218-1.c: Ditto.
	* testsuite/gcc.dg/tree-prof/pr52027.c: Use -O2.
	* testsuite/gcc.dg/tree-prof/pr50907.c: Ditto.
	* testsuite/gcc.dg/tree-prof/pr45354.c: Ditto.
	* testsuite/g++.dg/tree-prof/partition2.C: Ditto.
	* testsuite/g++.dg/tree-prof/partition3.C: Ditto.

From-SVN: r199744
parent 66071e10
2013-06-06 Teresa Johnson <tejohnson@google.com>
PR c++/53743
* ifcvt.c (find_if_case_1): Replace BB_COPY_PARTITION with assert
as this is now done by redirect_edge_and_branch_force.
* function.c (thread_prologue_and_epilogue_insns): Insert new bb after
barriers, and fix interaction with splitting.
* emit-rtl.c (try_split): Copy REG_CROSSING_JUMP notes.
* cfgcleanup.c (try_forward_edges): Fix early return value to properly
reflect changes made in the routine.
* bb-reorder.c (emit_barrier_after_bb): Move to cfgrtl.c.
(fix_up_fall_thru_edges): Remove incorrect check for bb layout order
since this is called in cfglayout mode, and replace partition fixup
with assert as that is now done by force_nonfallthru_and_redirect.
(add_reg_crossing_jump_notes): Handle the fact that some jumps may
already be marked with region crossing note.
(insert_section_boundary_note): Make non-static, gate on flag
has_bb_partition, rewrite to also check for multiple partitions.
(rest_of_handle_reorder_blocks): Remove call to
insert_section_boundary_note, now done later during free_cfg.
(duplicate_computed_gotos): Don't duplicate partition crossing edge.
* bb-reorder.h (insert_section_boundary_note): Declare.
* Makefile.in (cfgrtl.o): Depend on bb-reorder.h
* cfgrtl.c (rest_of_pass_free_cfg): If partitions exist
invoke insert_section_boundary_note.
(try_redirect_by_replacing_jump): Remove unnecessary
check for region crossing note.
(fixup_partition_crossing): New function.
(rtl_redirect_edge_and_branch): Fixup partition boundaries.
(emit_barrier_after_bb): Move here from bb-reorder.c, handle insertion
in non-cfglayout mode.
(force_nonfallthru_and_redirect): Fixup partition boundaries,
remove old code that tried to do this. Emit barrier correctly
when we are in cfglayout mode.
(last_bb_in_partition): New function.
(rtl_split_edge): Correctly fixup partition boundaries.
(commit_one_edge_insertion): Remove old code that tried to
fixup region crossing edge since this is now handled in
split_block, and set up insertion point correctly since
block may now end in a jump.
(verify_hot_cold_block_grouping): Guard against checking when not in
linearized RTL mode.
(rtl_verify_edges): Add checks for incorrect/missing REG_CROSSING_JUMP
notes.
(rtl_verify_flow_info_1): Move verify_hot_cold_block_grouping to
rtl_verify_flow_info, so not called in cfglayout mode.
(rtl_verify_flow_info): Move verify_hot_cold_block_grouping here.
(fixup_reorder_chain): Remove old code that attempted to fixup region
crossing note as this is now handled in force_nonfallthru_and_redirect.
(duplicate_insn_chain): Don't duplicate switch section notes.
(rtl_can_remove_branch_p): Remove unnecessary check for region crossing
note.
* basic-block.h (emit_barrier_after_bb): Declare.
2013-06-06 Kyrylo Tkachov <kyrylo.tkachov@arm.com>
* config/arm/arm-fixed.md (add<mode>3,usadd<mode>3,ssadd<mode>3,
......
......@@ -3155,7 +3155,7 @@ cfgrtl.o : cfgrtl.c $(CONFIG_H) $(SYSTEM_H) coretypes.h $(TM_H) $(RTL_ERROR_H) \
$(FUNCTION_H) $(EXCEPT_H) $(TM_P_H) $(INSN_ATTR_H) \
insn-config.h $(EXPR_H) \
$(CFGLOOP_H) $(OBSTACK_H) $(TARGET_H) $(TREE_H) \
$(TREE_PASS_H) $(DF_H) $(GGC_H) $(COMMON_TARGET_H) gt-cfgrtl.h
$(TREE_PASS_H) $(DF_H) $(GGC_H) $(COMMON_TARGET_H) gt-cfgrtl.h bb-reorder.h
cfganal.o : cfganal.c $(CONFIG_H) $(SYSTEM_H) coretypes.h $(BASIC_BLOCK_H) \
$(TIMEVAR_H) sbitmap.h $(BITMAP_H)
cfgbuild.o : cfgbuild.c $(CONFIG_H) $(SYSTEM_H) coretypes.h $(TM_H) $(RTL_H) \
......
......@@ -796,6 +796,7 @@ extern basic_block force_nonfallthru_and_redirect (edge, basic_block, rtx);
extern bool contains_no_active_insn_p (const_basic_block);
extern bool forwarder_block_p (const_basic_block);
extern bool can_fallthru (basic_block, basic_block);
extern void emit_barrier_after_bb (basic_block bb);
/* In cfgbuild.c. */
extern void find_many_sub_basic_blocks (sbitmap);
......
......@@ -1380,15 +1380,6 @@ get_uncond_jump_length (void)
return length;
}
/* Emit a barrier into the footer of BB. */
static void
emit_barrier_after_bb (basic_block bb)
{
rtx barrier = emit_barrier_after (BB_END (bb));
BB_FOOTER (bb) = unlink_insn_chain (barrier, barrier);
}
/* The landing pad OLD_LP, in block OLD_BB, has edges from both partitions.
Duplicate the landing pad and split the edges so that no EH edge
crosses partitions. */
......@@ -1720,8 +1711,7 @@ fix_up_fall_thru_edges (void)
(i.e. fix it so the fall through does not cross and
the cond jump does). */
if (!cond_jump_crosses
&& cur_bb->aux == cond_jump->dest)
if (!cond_jump_crosses)
{
/* Find label in fall_thru block. We've already added
any missing labels, so there must be one. */
......@@ -1765,10 +1755,10 @@ fix_up_fall_thru_edges (void)
new_bb->aux = cur_bb->aux;
cur_bb->aux = new_bb;
/* Make sure new fall-through bb is in same
partition as bb it's falling through from. */
/* This is done by force_nonfallthru_and_redirect. */
gcc_assert (BB_PARTITION (new_bb)
== BB_PARTITION (cur_bb));
BB_COPY_PARTITION (new_bb, cur_bb);
single_succ_edge (new_bb)->flags |= EDGE_CROSSING;
}
else
......@@ -2064,7 +2054,10 @@ add_reg_crossing_jump_notes (void)
FOR_EACH_BB (bb)
FOR_EACH_EDGE (e, ei, bb->succs)
if ((e->flags & EDGE_CROSSING)
&& JUMP_P (BB_END (e->src)))
&& JUMP_P (BB_END (e->src))
/* Some notes were added during fix_up_fall_thru_edges, via
force_nonfallthru_and_redirect. */
&& !find_reg_note (BB_END (e->src), REG_CROSSING_JUMP, NULL_RTX))
add_reg_note (BB_END (e->src), REG_CROSSING_JUMP, NULL_RTX);
}
......@@ -2133,23 +2126,26 @@ reorder_basic_blocks (void)
encountering this note will make the compiler switch between the
hot and cold text sections. */
static void
void
insert_section_boundary_note (void)
{
basic_block bb;
int first_partition = 0;
bool switched_sections = false;
int current_partition = 0;
if (!flag_reorder_blocks_and_partition)
if (!crtl->has_bb_partition)
return;
FOR_EACH_BB (bb)
{
if (!first_partition)
first_partition = BB_PARTITION (bb);
if (BB_PARTITION (bb) != first_partition)
if (!current_partition)
current_partition = BB_PARTITION (bb);
if (BB_PARTITION (bb) != current_partition)
{
emit_note_before (NOTE_INSN_SWITCH_TEXT_SECTIONS, BB_HEAD (bb));
break;
gcc_assert (!switched_sections);
switched_sections = true;
emit_note_before (NOTE_INSN_SWITCH_TEXT_SECTIONS, BB_HEAD (bb));
current_partition = BB_PARTITION (bb);
}
}
}
......@@ -2180,8 +2176,6 @@ rest_of_handle_reorder_blocks (void)
bb->aux = bb->next_bb;
cfg_layout_finalize ();
/* Add NOTE_INSN_SWITCH_TEXT_SECTIONS notes. */
insert_section_boundary_note ();
return 0;
}
......@@ -2315,6 +2309,11 @@ duplicate_computed_gotos (void)
if (!bitmap_bit_p (candidates, single_succ (bb)->index))
continue;
/* Don't duplicate a partition crossing edge, which requires difficult
fixup. */
if (find_reg_note (BB_END (bb), REG_CROSSING_JUMP, NULL_RTX))
continue;
new_bb = duplicate_block (single_succ (bb), single_succ_edge (bb), bb);
new_bb->aux = bb->aux;
bb->aux = new_bb;
......
......@@ -35,4 +35,6 @@ extern struct target_bb_reorder *this_target_bb_reorder;
extern int get_uncond_jump_length (void);
extern void insert_section_boundary_note (void);
#endif
......@@ -456,7 +456,7 @@ try_forward_edges (int mode, basic_block b)
if (first != EXIT_BLOCK_PTR
&& find_reg_note (BB_END (first), REG_CROSSING_JUMP, NULL_RTX))
return false;
return changed;
while (counter < n_basic_blocks)
{
......
......@@ -3574,6 +3574,7 @@ try_split (rtx pat, rtx trial, int last)
break;
case REG_NON_LOCAL_GOTO:
case REG_CROSSING_JUMP:
for (insn = insn_last; insn != NULL_RTX; insn = PREV_INSN (insn))
{
if (JUMP_P (insn))
......
......@@ -6270,8 +6270,10 @@ thread_prologue_and_epilogue_insns (void)
break;
if (e)
{
copy_bb = create_basic_block (NEXT_INSN (BB_END (e->src)),
NULL_RTX, e->src);
/* Make sure we insert after any barriers. */
rtx end = get_last_bb_insn (e->src);
copy_bb = create_basic_block (NEXT_INSN (end),
NULL_RTX, e->src);
BB_COPY_PARTITION (copy_bb, e->src);
}
else
......@@ -6538,7 +6540,7 @@ epilogue_done:
basic_block simple_return_block_cold = NULL;
edge pending_edge_hot = NULL;
edge pending_edge_cold = NULL;
basic_block exit_pred = EXIT_BLOCK_PTR->prev_bb;
basic_block exit_pred;
int i;
gcc_assert (entry_edge != orig_entry_edge);
......@@ -6566,6 +6568,12 @@ epilogue_done:
else
pending_edge_cold = e;
}
/* Save a pointer to the exit's predecessor BB for use in
inserting new BBs at the end of the function. Do this
after the call to split_block above which may split
the original exit pred. */
exit_pred = EXIT_BLOCK_PTR->prev_bb;
FOR_EACH_VEC_ELT (unconverted_simple_returns, i, e)
{
......
......@@ -3905,10 +3905,9 @@ find_if_case_1 (basic_block test_bb, edge then_edge, edge else_edge)
if (new_bb)
{
df_bb_replace (then_bb_index, new_bb);
/* Since the fallthru edge was redirected from test_bb to new_bb,
we need to ensure that new_bb is in the same partition as
test bb (you can not fall through across section boundaries). */
BB_COPY_PARTITION (new_bb, test_bb);
/* This should have been done above via force_nonfallthru_and_redirect
(possibly called from redirect_edge_and_branch_force). */
gcc_checking_assert (BB_PARTITION (new_bb) == BB_PARTITION (test_bb));
}
num_true_changes++;
......
2013-06-06 Teresa Johnson <tejohnson@google.com>
PR c++/53743
* gcc.dg/tree-prof/va-arg-pack-1.c: Cloned from c-torture, made
into -freorder-blocks-and-partition test.
* gcc.dg/tree-prof/comp-goto-1.c: Ditto.
* gcc.dg/tree-prof/20041218-1.c: Ditto.
* gcc.dg/tree-prof/pr52027.c: Use -O2.
* gcc.dg/tree-prof/pr50907.c: Ditto.
* gcc.dg/tree-prof/pr45354.c: Ditto.
* g++.dg/tree-prof/partition2.C: Ditto.
* g++.dg/tree-prof/partition3.C: Ditto.
2013-06-06 Tobias Burnus <burnus@net-b.de>
PR fortran/57542
......
// PR middle-end/45458
// { dg-require-effective-target freorder }
// { dg-options "-fnon-call-exceptions -freorder-blocks-and-partition" }
// { dg-options "-O2 -fnon-call-exceptions -freorder-blocks-and-partition" }
int
main ()
......
// PR middle-end/45566
// { dg-require-effective-target freorder }
// { dg-options "-O -fnon-call-exceptions -freorder-blocks-and-partition" }
// { dg-options "-O2 -fnon-call-exceptions -freorder-blocks-and-partition" }
int k;
......
/* PR rtl-optimization/16968 */
/* Testcase by Jakub Jelinek <jakub@redhat.com> */
/* { dg-require-effective-target freorder } */
/* { dg-options "-O2 -freorder-blocks-and-partition" } */
struct T
{
unsigned int b, c, *d;
unsigned char e;
};
struct S
{
unsigned int a;
struct T f;
};
struct U
{
struct S g, h;
};
struct V
{
unsigned int i;
struct U j;
};
extern void exit (int);
extern void abort (void);
void *
dummy1 (void *x)
{
return "";
}
void *
dummy2 (void *x, void *y)
{
exit (0);
}
struct V *
baz (unsigned int x)
{
static struct V v;
__builtin_memset (&v, 0x55, sizeof (v));
return &v;
}
int
check (void *x, struct S *y)
{
if (y->a || y->f.b || y->f.c || y->f.d || y->f.e)
abort ();
return 1;
}
static struct V *
bar (unsigned int x, void *y)
{
const struct T t = { 0, 0, (void *) 0, 0 };
struct V *u;
void *v;
v = dummy1 (y);
if (!v)
return (void *) 0;
u = baz (sizeof (struct V));
u->i = x;
u->j.g.a = 0;
u->j.g.f = t;
u->j.h.a = 0;
u->j.h.f = t;
if (!check (v, &u->j.g) || !check (v, &u->j.h))
return (void *) 0;
return u;
}
int
foo (unsigned int *x, unsigned int y, void **z)
{
void *v;
unsigned int i, j;
*z = v = (void *) 0;
for (i = 0; i < y; i++)
{
struct V *c;
j = *x;
switch (j)
{
case 1:
c = bar (j, x);
break;
default:
c = 0;
break;
}
if (c)
v = dummy2 (v, c);
else
return 1;
}
*z = v;
return 0;
}
int
main (void)
{
unsigned int one = 1;
void *p;
foo (&one, 1, &p);
abort ();
}
/* { dg-require-effective-target freorder } */
/* { dg-options "-O2 -freorder-blocks-and-partition" } */
#include <stdlib.h>
#if !defined(NO_LABEL_VALUES) && (!defined(STACK_SIZE) || STACK_SIZE >= 4000) && __INT_MAX__ >= 2147483647
typedef unsigned int uint32;
typedef signed int sint32;
typedef uint32 reg_t;
typedef unsigned long int host_addr_t;
typedef uint32 target_addr_t;
typedef sint32 target_saddr_t;
typedef union
{
struct
{
unsigned int offset:18;
unsigned int ignore:4;
unsigned int s1:8;
int :2;
signed int simm:14;
unsigned int s3:8;
unsigned int s2:8;
int pad2:2;
} f1;
long long ll;
double d;
} insn_t;
typedef struct
{
target_addr_t vaddr_tag;
unsigned long int rigged_paddr;
} tlb_entry_t;
typedef struct
{
insn_t *pc;
reg_t registers[256];
insn_t *program;
tlb_entry_t tlb_tab[0x100];
} environment_t;
enum operations
{
LOAD32_RR,
METAOP_DONE
};
host_addr_t
f ()
{
abort ();
}
reg_t
simulator_kernel (int what, environment_t *env)
{
register insn_t *pc = env->pc;
register reg_t *regs = env->registers;
register insn_t insn;
register int s1;
register reg_t r2;
register void *base_addr = &&sim_base_addr;
register tlb_entry_t *tlb = env->tlb_tab;
if (what != 0)
{
int i;
static void *op_map[] =
{
&&L_LOAD32_RR,
&&L_METAOP_DONE,
};
insn_t *program = env->program;
for (i = 0; i < what; i++)
program[i].f1.offset = op_map[program[i].f1.offset] - base_addr;
}
sim_base_addr:;
insn = *pc++;
r2 = (*(reg_t *) (((char *) regs) + (insn.f1.s2 << 2)));
s1 = (insn.f1.s1 << 2);
goto *(base_addr + insn.f1.offset);
L_LOAD32_RR:
{
target_addr_t vaddr_page = r2 / 4096;
unsigned int x = vaddr_page % 0x100;
insn = *pc++;
for (;;)
{
target_addr_t tag = tlb[x].vaddr_tag;
host_addr_t rigged_paddr = tlb[x].rigged_paddr;
if (tag == vaddr_page)
{
*(reg_t *) (((char *) regs) + s1) = *(uint32 *) (rigged_paddr + r2);
r2 = *(reg_t *) (((char *) regs) + (insn.f1.s2 << 2));
s1 = insn.f1.s1 << 2;
goto *(base_addr + insn.f1.offset);
}
if (((target_saddr_t) tag < 0))
{
*(reg_t *) (((char *) regs) + s1) = *(uint32 *) f ();
r2 = *(reg_t *) (((char *) regs) + (insn.f1.s2 << 2));
s1 = insn.f1.s1 << 2;
goto *(base_addr + insn.f1.offset);
}
x = (x - 1) % 0x100;
}
L_METAOP_DONE:
return (*(reg_t *) (((char *) regs) + s1));
}
}
insn_t program[2 + 1];
void *malloc ();
int
main ()
{
environment_t env;
insn_t insn;
int i, res;
host_addr_t a_page = (host_addr_t) malloc (2 * 4096);
target_addr_t a_vaddr = 0x123450;
target_addr_t vaddr_page = a_vaddr / 4096;
a_page = (a_page + 4096 - 1) & -4096;
env.tlb_tab[((vaddr_page) % 0x100)].vaddr_tag = vaddr_page;
env.tlb_tab[((vaddr_page) % 0x100)].rigged_paddr = a_page - vaddr_page * 4096;
insn.f1.offset = LOAD32_RR;
env.registers[0] = 0;
env.registers[2] = a_vaddr;
*(sint32 *) (a_page + a_vaddr % 4096) = 88;
insn.f1.s1 = 0;
insn.f1.s2 = 2;
for (i = 0; i < 2; i++)
program[i] = insn;
insn.f1.offset = METAOP_DONE;
insn.f1.s1 = 0;
program[2] = insn;
env.pc = program;
env.program = program;
res = simulator_kernel (2 + 1, &env);
if (res != 88)
abort ();
exit (0);
}
#else
main(){ exit (0); }
#endif
/* { dg-require-effective-target freorder } */
/* { dg-options "-O -freorder-blocks-and-partition -fschedule-insns -fselective-scheduling" { target powerpc*-*-* ia64-*-* x86_64-*-* } } */
/* { dg-options "-O2 -freorder-blocks-and-partition -fschedule-insns -fselective-scheduling" { target powerpc*-*-* ia64-*-* x86_64-*-* } } */
extern void abort (void);
......
/* PR middle-end/50907 */
/* { dg-require-effective-target freorder } */
/* { dg-options "-O -freorder-blocks-and-partition -fschedule-insns -fselective-scheduling -fpic" { target { { powerpc*-*-* ia64-*-* x86_64-*-* } && fpic } } } */
/* { dg-options "-O2 -freorder-blocks-and-partition -fschedule-insns -fselective-scheduling -fpic" { target { { powerpc*-*-* ia64-*-* x86_64-*-* } && fpic } } } */
#include "pr45354.c"
/* PR debug/52027 */
/* { dg-require-effective-target freorder } */
/* { dg-options "-O -freorder-blocks-and-partition -fno-reorder-functions" } */
/* { dg-options "-O2 -freorder-blocks-and-partition -fno-reorder-functions" } */
void
foo (int len)
......
/* __builtin_va_arg_pack () builtin tests. */
/* { dg-require-effective-target freorder } */
/* { dg-options "-O2 -freorder-blocks-and-partition" } */
#include <stdarg.h>
extern void abort (void);
int v1 = 8;
long int v2 = 3;
void *v3 = (void *) &v2;
struct A { char c[16]; } v4 = { "foo" };
long double v5 = 40;
char seen[20];
int cnt;
__attribute__ ((noinline)) int
foo1 (int x, int y, ...)
{
int i;
long int l;
void *v;
struct A a;
long double ld;
va_list ap;
va_start (ap, y);
if (x < 0 || x >= 20 || seen[x])
abort ();
seen[x] = ++cnt;
if (y != 6)
abort ();
i = va_arg (ap, int);
if (i != 5)
abort ();
switch (x)
{
case 0:
i = va_arg (ap, int);
if (i != 9 || v1 != 9)
abort ();
a = va_arg (ap, struct A);
if (__builtin_memcmp (a.c, v4.c, sizeof (a.c)) != 0)
abort ();
v = (void *) va_arg (ap, struct A *);
if (v != (void *) &v4)
abort ();
l = va_arg (ap, long int);
if (l != 3 || v2 != 4)
abort ();
break;
case 1:
ld = va_arg (ap, long double);
if (ld != 41 || v5 != ld)
abort ();
i = va_arg (ap, int);
if (i != 8)
abort ();
v = va_arg (ap, void *);
if (v != &v2)
abort ();
break;
case 2:
break;
default:
abort ();
}
va_end (ap);
return x;
}
__attribute__ ((noinline)) int
foo2 (int x, int y, ...)
{
long long int ll;
void *v;
struct A a, b;
long double ld;
va_list ap;
va_start (ap, y);
if (x < 0 || x >= 20 || seen[x])
abort ();
seen[x] = ++cnt | 64;
if (y != 10)
abort ();
switch (x)
{
case 11:
break;
case 12:
ld = va_arg (ap, long double);
if (ld != 41 || v5 != 40)
abort ();
a = va_arg (ap, struct A);
if (__builtin_memcmp (a.c, v4.c, sizeof (a.c)) != 0)
abort ();
b = va_arg (ap, struct A);
if (__builtin_memcmp (b.c, v4.c, sizeof (b.c)) != 0)
abort ();
v = va_arg (ap, void *);
if (v != &v2)
abort ();
ll = va_arg (ap, long long int);
if (ll != 16LL)
abort ();
break;
case 2:
break;
default:
abort ();
}
va_end (ap);
return x + 8;
}
__attribute__ ((noinline)) int
foo3 (void)
{
return 6;
}
extern inline __attribute__ ((always_inline, gnu_inline)) int
bar (int x, ...)
{
if (x < 10)
return foo1 (x, foo3 (), 5, __builtin_va_arg_pack ());
return foo2 (x, foo3 () + 4, __builtin_va_arg_pack ());
}
int
main (void)
{
if (bar (0, ++v1, v4, &v4, v2++) != 0)
abort ();
if (bar (1, ++v5, 8, v3) != 1)
abort ();
if (bar (2) != 2)
abort ();
if (bar (v1 + 2) != 19)
abort ();
if (bar (v1 + 3, v5--, v4, v4, v3, 16LL) != 20)
abort ();
return 0;
}
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment