Commit 8ecba28a by Jan Hubicka

Makefile.in (cfgcleanup.o): Add cselib.h dependancy.

	* Makefile.in (cfgcleanup.o): Add cselib.h dependancy.
	* basic-block.h (CLEANUP_THREADING): New constant.
	* cfgcleanup.c: Include cselib.h
	(thread_jump, mark_effect): New functions.
	(try_forward_edges): Do jump threading when asked for.
	* jump.c (mark_modified_reg, save_regs, num_same_regs, modified_regs,
	modified_mem, thread_jumps, rtx_equal_for-thread_p): Kill.
	* rtl.h (thread_jumps, rtx_equal_for_thread_p): Kill.
	* toplev.c (rest_of_compilation): Do now call thread_jumps; use
	CLEANUP_THREAD instead.

From-SVN: r48108
parent e9c46bb7
......@@ -1478,7 +1478,7 @@ cfgbuild.o : cfgbuild.c $(CONFIG_H) $(SYSTEM_H) $(RTL_H) flags.h insn-config.h \
function.h except.h $(GGC_H)
cfgcleanup.o : cfgcleanup.c $(CONFIG_H) $(SYSTEM_H) $(RTL_H) $(TIMEVAR_H)\
$(BASIC_BLOCK_H) hard-reg-set.h output.h flags.h $(RECOG_H) toplev.h \
$(GGC_H) insn-config.h
$(GGC_H) insn-config.h cselib.h
cfgloop.o : cfgloop.c $(CONFIG_H) $(SYSTEM_H) $(RTL_H) \
$(BASIC_BLOCK_H) hard-reg-set.h
dominance.o : dominance.c $(CONFIG_H) $(SYSTEM_H) $(RTL_H) hard-reg-set.h \
......
......@@ -577,6 +577,7 @@ enum update_life_extent
#define CLEANUP_PRE_LOOP 16 /* Take care to preserve syntactic loop
notes. */
#define CLEANUP_UPDATE_LIFE 32 /* Keep life information up to date. */
#define CLEANUP_THREADING 64 /* Do jump threading. */
/* Flags for loop discovery. */
#define LOOP_TREE 1 /* Build loop hierarchy tree. */
......
......@@ -42,6 +42,7 @@ Software Foundation, 59 Temple Place - Suite 330, Boston, MA
#include "flags.h"
#include "recog.h"
#include "toplev.h"
#include "cselib.h"
#include "obstack.h"
......@@ -82,6 +83,8 @@ static bool merge_blocks PARAMS ((edge,basic_block,basic_block,
static bool try_optimize_cfg PARAMS ((int));
static bool try_simplify_condjump PARAMS ((basic_block));
static bool try_forward_edges PARAMS ((int, basic_block));
static edge thread_jump PARAMS ((int, edge, basic_block));
static bool mark_effect PARAMS ((rtx, bitmap));
static void notice_new_block PARAMS ((basic_block));
static void update_forwarder_flag PARAMS ((basic_block));
......@@ -178,6 +181,154 @@ try_simplify_condjump (cbranch_block)
return true;
}
/* Attempt to prove that operation is NOOP using CSElib or mark the effect
on register. Used by jump threading. */
static bool
mark_effect (exp, nonequal)
rtx exp;
regset nonequal;
{
switch (GET_CODE (exp))
{
/* In case we do clobber the register, mark it as equal, as we know the
value is dead so it don't have to match. */
case CLOBBER:
if (REG_P (XEXP (exp, 0)))
CLEAR_REGNO_REG_SET (nonequal, REGNO (XEXP (exp, 0)));
return false;
case SET:
if (rtx_equal_for_cselib_p (SET_DEST (exp), SET_SRC (exp)))
return false;
if (GET_CODE (SET_SRC (exp)) != REG)
return true;
SET_REGNO_REG_SET (nonequal, REGNO (SET_SRC (exp)));
return false;
default:
return false;
}
}
/* Attempt to prove that the basic block B will have no side effects and
allways continues in the same edge if reached via E. Return the edge
if exist, NULL otherwise. */
static edge
thread_jump (mode, e, b)
int mode;
edge e;
basic_block b;
{
rtx set1, set2, cond1, cond2, insn;
enum rtx_code code1, code2, reversed_code2;
bool reverse1 = false;
int i;
regset nonequal;
bool failed = false;
/* At the moment, we do handle only conditional jumps, but later we may
want to extend this code to tablejumps and others. */
if (!e->src->succ->succ_next || e->src->succ->succ_next->succ_next)
return NULL;
if (!b->succ || !b->succ->succ_next || b->succ->succ_next->succ_next)
return NULL;
/* Second branch must end with onlyjump, as we will eliminate the jump. */
if (!any_condjump_p (e->src->end) || !any_condjump_p (b->end)
|| !onlyjump_p (b->end))
return NULL;
set1 = pc_set (e->src->end);
set2 = pc_set (b->end);
if (((e->flags & EDGE_FALLTHRU) != 0)
!= (XEXP (SET_SRC (set1), 0) == pc_rtx))
reverse1 = true;
cond1 = XEXP (SET_SRC (set1), 0);
cond2 = XEXP (SET_SRC (set2), 0);
if (reverse1)
code1 = reversed_comparison_code (cond1, b->end);
else
code1 = GET_CODE (cond1);
code2 = GET_CODE (cond2);
reversed_code2 = reversed_comparison_code (cond2, b->end);
if (!comparison_dominates_p (code1, code2)
&& !comparison_dominates_p (code1, reversed_code2))
return NULL;
/* Ensure that the comparison operators are equivalent.
??? This is far too pesimistic. We should allow swapped operands,
different CCmodes, or for example comparisons for interval, that
dominate even when operands are not equivalent. */
if (!rtx_equal_p (XEXP (cond1, 0), XEXP (cond2, 0))
|| !rtx_equal_p (XEXP (cond1, 1), XEXP (cond2, 1)))
return NULL;
/* Short circuit cases where block B contains some side effects, as we can't
safely bypass it. */
for (insn = NEXT_INSN (b->head); insn != NEXT_INSN (b->end);
insn = NEXT_INSN (insn))
if (INSN_P (insn) && side_effects_p (PATTERN (insn)))
return NULL;
cselib_init ();
/* First process all values computed in the source basic block. */
for (insn = NEXT_INSN (e->src->head); insn != NEXT_INSN (e->src->end);
insn = NEXT_INSN (insn))
if (INSN_P (insn))
cselib_process_insn (insn);
nonequal = BITMAP_XMALLOC();
CLEAR_REG_SET (nonequal);
/* Now assume that we've continued by the edge E to B and continue
processing as if it were same basic block.
Our goal is to prove that whole block is an NOOP. */
for (insn = NEXT_INSN (b->head); insn != b->end && !failed;
insn = NEXT_INSN (insn))
{
if (INSN_P (insn))
{
rtx pat = PATTERN (insn);
if (GET_CODE (pat) == PARALLEL)
{
for (i = 0; i < XVECLEN (pat, 0); i++)
failed |= mark_effect (XVECEXP (pat, 0, i), nonequal);
}
else
failed |= mark_effect (pat, nonequal);
}
cselib_process_insn (insn);
}
/* Later we should clear nonequal of dead registers. So far we don't
have life information in cfg_cleanup. */
if (failed)
goto failed_exit;
/* In case liveness information is available, we need to prove equivalence
only of the live values. */
if (mode & CLEANUP_UPDATE_LIFE)
AND_REG_SET (nonequal, b->global_live_at_end);
EXECUTE_IF_SET_IN_REG_SET (nonequal, 0, i, goto failed_exit;);
BITMAP_XFREE (nonequal);
cselib_finish ();
if ((comparison_dominates_p (code1, code2) != 0)
!= (XEXP (SET_SRC (set2), 0) == pc_rtx))
return BRANCH_EDGE (b);
else
return FALLTHRU_EDGE (b);
failed_exit:
BITMAP_XFREE (nonequal);
cselib_finish ();
return NULL;
}
/* Attempt to forward edges leaving basic block B.
Return true if successful. */
......@@ -187,12 +338,13 @@ try_forward_edges (mode, b)
int mode;
{
bool changed = false;
edge e, next;
edge e, next, threaded_edge;
for (e = b->succ; e ; e = next)
{
basic_block target, first;
int counter;
bool threaded = false;
next = e->succ_next;
......@@ -207,16 +359,32 @@ try_forward_edges (mode, b)
target = first = e->dest;
counter = 0;
/* Look for the real destination of the jump.
Avoid infinite loop in the infinite empty loop by counting
up to n_basic_blocks. */
while (FORWARDER_BLOCK_P (target)
&& target->succ->dest != EXIT_BLOCK_PTR
&& counter < n_basic_blocks)
while (counter < n_basic_blocks)
{
/* Bypass trivial infinite loops. */
if (target == target->succ->dest)
counter = n_basic_blocks;
basic_block new_target = NULL;
bool new_target_threaded = false;
if (FORWARDER_BLOCK_P (target)
&& target->succ->dest != EXIT_BLOCK_PTR)
{
/* Bypass trivial infinite loops. */
if (target == target->succ->dest)
counter = n_basic_blocks;
new_target = target->succ->dest;
}
/* Allow to thread only over one edge at time to simplify updating
of probabilities. */
else if ((mode & CLEANUP_THREADING) && !threaded)
{
threaded_edge = thread_jump (mode, e, target);
if (threaded_edge)
{
new_target = threaded_edge->dest;
new_target_threaded = true;
}
}
if (!new_target)
break;
/* Avoid killing of loop pre-headers, as it is the place loop
optimizer wants to hoist code to.
......@@ -241,8 +409,10 @@ try_forward_edges (mode, b)
if (GET_CODE (insn) == NOTE)
break;
}
target = target->succ->dest, counter++;
}
counter++;
target = new_target;
threaded |= new_target_threaded;
}
if (counter >= n_basic_blocks)
{
......@@ -257,37 +427,47 @@ try_forward_edges (mode, b)
/* Save the values now, as the edge may get removed. */
gcov_type edge_count = e->count;
int edge_probability = e->probability;
int edge_frequency;
if (redirect_edge_and_branch (e, target))
if (threaded)
{
/* We successfully forwarded the edge. Now update profile
data: for each edge we traversed in the chain, remove
the original edge's execution count. */
int edge_frequency = ((edge_probability * b->frequency
+ REG_BR_PROB_BASE / 2)
/ REG_BR_PROB_BASE);
if (!FORWARDER_BLOCK_P (b) && forwarder_block_p (b))
BB_SET_FLAG (b, BB_FORWARDER_BLOCK);
BB_SET_FLAG (b, BB_UPDATE_LIFE);
do
{
first->count -= edge_count;
first->succ->count -= edge_count;
first->frequency -= edge_frequency;
first = first->succ->dest;
}
while (first != target);
changed = true;
notice_new_block (redirect_edge_and_branch_force (e, target));
if (rtl_dump_file)
fprintf (rtl_dump_file, "Conditionals threaded.\n");
}
else
else if (!redirect_edge_and_branch (e, target))
{
if (rtl_dump_file)
fprintf (rtl_dump_file, "Forwarding edge %i->%i to %i failed.\n",
b->index, e->dest->index, target->index);
continue;
}
/* We successfully forwarded the edge. Now update profile
data: for each edge we traversed in the chain, remove
the original edge's execution count. */
edge_frequency = ((edge_probability * b->frequency
+ REG_BR_PROB_BASE / 2)
/ REG_BR_PROB_BASE);
if (!FORWARDER_BLOCK_P (b) && forwarder_block_p (b))
BB_SET_FLAG (b, BB_FORWARDER_BLOCK);
BB_SET_FLAG (b, BB_UPDATE_LIFE);
do
{
edge t;
first->count -= edge_count;
first->succ->count -= edge_count;
first->frequency -= edge_frequency;
if (first->succ->succ_next)
t = threaded_edge;
else
t = first->succ;
first = t->dest;
}
while (first != target);
changed = true;
}
}
......
......@@ -69,7 +69,6 @@ static void invert_exp_1 PARAMS ((rtx));
static int invert_exp PARAMS ((rtx));
static int returnjump_p_1 PARAMS ((rtx *, void *));
static void delete_prior_computation PARAMS ((rtx, rtx));
static void mark_modified_reg PARAMS ((rtx, rtx, void *));
/* Alternate entry into the jump optimizer. This entry point only rebuilds
the JUMP_LABEL field in jumping insns and REG_LABEL notes in non-jumping
......@@ -2427,469 +2426,3 @@ true_regnum (x)
}
return -1;
}
/* Optimize code of the form:
for (x = a[i]; x; ...)
...
for (x = a[i]; x; ...)
...
foo:
Loop optimize will change the above code into
if (x = a[i])
for (;;)
{ ...; if (! (x = ...)) break; }
if (x = a[i])
for (;;)
{ ...; if (! (x = ...)) break; }
foo:
In general, if the first test fails, the program can branch
directly to `foo' and skip the second try which is doomed to fail.
We run this after loop optimization and before flow analysis. */
/* When comparing the insn patterns, we track the fact that different
pseudo-register numbers may have been used in each computation.
The following array stores an equivalence -- same_regs[I] == J means
that pseudo register I was used in the first set of tests in a context
where J was used in the second set. We also count the number of such
pending equivalences. If nonzero, the expressions really aren't the
same. */
static int *same_regs;
static int num_same_regs;
/* Track any registers modified between the target of the first jump and
the second jump. They never compare equal. */
static char *modified_regs;
/* Record if memory was modified. */
static int modified_mem;
/* Called via note_stores on each insn between the target of the first
branch and the second branch. It marks any changed registers. */
static void
mark_modified_reg (dest, x, data)
rtx dest;
rtx x;
void *data ATTRIBUTE_UNUSED;
{
int regno;
unsigned int i;
if (GET_CODE (dest) == SUBREG)
dest = SUBREG_REG (dest);
if (GET_CODE (dest) == MEM)
modified_mem = 1;
if (GET_CODE (dest) != REG)
return;
regno = REGNO (dest);
if (regno >= FIRST_PSEUDO_REGISTER)
modified_regs[regno] = 1;
/* Don't consider a hard condition code register as modified,
if it is only being set. thread_jumps will check if it is set
to the same value. */
else if (GET_MODE_CLASS (GET_MODE (dest)) != MODE_CC
|| GET_CODE (x) != SET
|| ! rtx_equal_p (dest, SET_DEST (x))
|| HARD_REGNO_NREGS (regno, GET_MODE (dest)) != 1)
for (i = 0; i < HARD_REGNO_NREGS (regno, GET_MODE (dest)); i++)
modified_regs[regno + i] = 1;
}
/* F is the first insn in the chain of insns. */
void
thread_jumps (f, max_reg, flag_before_loop)
rtx f;
int max_reg;
int flag_before_loop;
{
/* Basic algorithm is to find a conditional branch,
the label it may branch to, and the branch after
that label. If the two branches test the same condition,
walk back from both branch paths until the insn patterns
differ, or code labels are hit. If we make it back to
the target of the first branch, then we know that the first branch
will either always succeed or always fail depending on the relative
senses of the two branches. So adjust the first branch accordingly
in this case. */
rtx label, b1, b2, t1, t2;
enum rtx_code code1, code2;
rtx b1op0, b1op1, b2op0, b2op1;
int changed = 1;
int i;
int *all_reset;
enum rtx_code reversed_code1, reversed_code2;
/* Allocate register tables and quick-reset table. */
modified_regs = (char *) xmalloc (max_reg * sizeof (char));
same_regs = (int *) xmalloc (max_reg * sizeof (int));
all_reset = (int *) xmalloc (max_reg * sizeof (int));
for (i = 0; i < max_reg; i++)
all_reset[i] = -1;
while (changed)
{
changed = 0;
for (b1 = f; b1; b1 = NEXT_INSN (b1))
{
rtx set;
rtx set2;
/* Get to a candidate branch insn. */
if (GET_CODE (b1) != JUMP_INSN
|| ! any_condjump_p (b1) || JUMP_LABEL (b1) == 0)
continue;
memset (modified_regs, 0, max_reg * sizeof (char));
modified_mem = 0;
memcpy (same_regs, all_reset, max_reg * sizeof (int));
num_same_regs = 0;
label = JUMP_LABEL (b1);
/* Look for a branch after the target. Record any registers and
memory modified between the target and the branch. Stop when we
get to a label since we can't know what was changed there. */
for (b2 = NEXT_INSN (label); b2; b2 = NEXT_INSN (b2))
{
if (GET_CODE (b2) == CODE_LABEL)
break;
else if (GET_CODE (b2) == JUMP_INSN)
{
/* If this is an unconditional jump and is the only use of
its target label, we can follow it. */
if (any_uncondjump_p (b2)
&& onlyjump_p (b2)
&& JUMP_LABEL (b2) != 0
&& LABEL_NUSES (JUMP_LABEL (b2)) == 1)
{
b2 = JUMP_LABEL (b2);
continue;
}
else
break;
}
if (GET_CODE (b2) != CALL_INSN && GET_CODE (b2) != INSN)
continue;
if (GET_CODE (b2) == CALL_INSN)
{
modified_mem = 1;
for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
if (call_used_regs[i] && ! fixed_regs[i]
&& i != STACK_POINTER_REGNUM
&& i != FRAME_POINTER_REGNUM
&& i != HARD_FRAME_POINTER_REGNUM
&& i != ARG_POINTER_REGNUM)
modified_regs[i] = 1;
}
note_stores (PATTERN (b2), mark_modified_reg, NULL);
}
/* Check the next candidate branch insn from the label
of the first. */
if (b2 == 0
|| GET_CODE (b2) != JUMP_INSN
|| b2 == b1
|| !any_condjump_p (b2)
|| !onlyjump_p (b2))
continue;
set = pc_set (b1);
set2 = pc_set (b2);
/* Get the comparison codes and operands, reversing the
codes if appropriate. If we don't have comparison codes,
we can't do anything. */
b1op0 = XEXP (XEXP (SET_SRC (set), 0), 0);
b1op1 = XEXP (XEXP (SET_SRC (set), 0), 1);
code1 = GET_CODE (XEXP (SET_SRC (set), 0));
reversed_code1 = code1;
if (XEXP (SET_SRC (set), 1) == pc_rtx)
code1 = reversed_comparison_code (XEXP (SET_SRC (set), 0), b1);
else
reversed_code1 = reversed_comparison_code (XEXP (SET_SRC (set), 0), b1);
b2op0 = XEXP (XEXP (SET_SRC (set2), 0), 0);
b2op1 = XEXP (XEXP (SET_SRC (set2), 0), 1);
code2 = GET_CODE (XEXP (SET_SRC (set2), 0));
reversed_code2 = code2;
if (XEXP (SET_SRC (set2), 1) == pc_rtx)
code2 = reversed_comparison_code (XEXP (SET_SRC (set2), 0), b2);
else
reversed_code2 = reversed_comparison_code (XEXP (SET_SRC (set2), 0), b2);
/* If they test the same things and knowing that B1 branches
tells us whether or not B2 branches, check if we
can thread the branch. */
if (rtx_equal_for_thread_p (b1op0, b2op0, b2)
&& rtx_equal_for_thread_p (b1op1, b2op1, b2)
&& (comparison_dominates_p (code1, code2)
|| comparison_dominates_p (code1, reversed_code2)))
{
t1 = prev_nonnote_insn (b1);
t2 = prev_nonnote_insn (b2);
while (t1 != 0 && t2 != 0)
{
if (t2 == label)
{
/* We have reached the target of the first branch.
If there are no pending register equivalents,
we know that this branch will either always
succeed (if the senses of the two branches are
the same) or always fail (if not). */
rtx new_label;
if (num_same_regs != 0)
break;
if (comparison_dominates_p (code1, code2))
new_label = JUMP_LABEL (b2);
else
new_label = get_label_after (b2);
if (JUMP_LABEL (b1) != new_label)
{
rtx prev = PREV_INSN (new_label);
if (flag_before_loop
&& GET_CODE (prev) == NOTE
&& NOTE_LINE_NUMBER (prev) == NOTE_INSN_LOOP_BEG)
{
/* Don't thread to the loop label. If a loop
label is reused, loop optimization will
be disabled for that loop. */
new_label = gen_label_rtx ();
emit_label_after (new_label, PREV_INSN (prev));
}
changed |= redirect_jump (b1, new_label, 1);
}
break;
}
/* If either of these is not a normal insn (it might be
a JUMP_INSN, CALL_INSN, or CODE_LABEL) we fail. (NOTEs
have already been skipped above.) Similarly, fail
if the insns are different. */
if (GET_CODE (t1) != INSN || GET_CODE (t2) != INSN
|| recog_memoized (t1) != recog_memoized (t2)
|| ! rtx_equal_for_thread_p (PATTERN (t1),
PATTERN (t2), t2))
break;
t1 = prev_nonnote_insn (t1);
t2 = prev_nonnote_insn (t2);
}
}
}
}
/* Clean up. */
free (modified_regs);
free (same_regs);
free (all_reset);
}
/* This is like RTX_EQUAL_P except that it knows about our handling of
possibly equivalent registers and knows to consider volatile and
modified objects as not equal.
YINSN is the insn containing Y. */
int
rtx_equal_for_thread_p (x, y, yinsn)
rtx x, y;
rtx yinsn;
{
int i;
int j;
enum rtx_code code;
const char *fmt;
code = GET_CODE (x);
/* Rtx's of different codes cannot be equal. */
if (code != GET_CODE (y))
return 0;
/* (MULT:SI x y) and (MULT:HI x y) are NOT equivalent.
(REG:SI x) and (REG:HI x) are NOT equivalent. */
if (GET_MODE (x) != GET_MODE (y))
return 0;
/* For floating-point, consider everything unequal. This is a bit
pessimistic, but this pass would only rarely do anything for FP
anyway. */
if (TARGET_FLOAT_FORMAT == IEEE_FLOAT_FORMAT
&& FLOAT_MODE_P (GET_MODE (x)) && ! flag_unsafe_math_optimizations)
return 0;
/* For commutative operations, the RTX match if the operand match in any
order. Also handle the simple binary and unary cases without a loop. */
if (code == EQ || code == NE || GET_RTX_CLASS (code) == 'c')
return ((rtx_equal_for_thread_p (XEXP (x, 0), XEXP (y, 0), yinsn)
&& rtx_equal_for_thread_p (XEXP (x, 1), XEXP (y, 1), yinsn))
|| (rtx_equal_for_thread_p (XEXP (x, 0), XEXP (y, 1), yinsn)
&& rtx_equal_for_thread_p (XEXP (x, 1), XEXP (y, 0), yinsn)));
else if (GET_RTX_CLASS (code) == '<' || GET_RTX_CLASS (code) == '2')
return (rtx_equal_for_thread_p (XEXP (x, 0), XEXP (y, 0), yinsn)
&& rtx_equal_for_thread_p (XEXP (x, 1), XEXP (y, 1), yinsn));
else if (GET_RTX_CLASS (code) == '1')
return rtx_equal_for_thread_p (XEXP (x, 0), XEXP (y, 0), yinsn);
/* Handle special-cases first. */
switch (code)
{
case REG:
if (REGNO (x) == REGNO (y) && ! modified_regs[REGNO (x)])
return 1;
/* If neither is user variable or hard register, check for possible
equivalence. */
if (REG_USERVAR_P (x) || REG_USERVAR_P (y)
|| REGNO (x) < FIRST_PSEUDO_REGISTER
|| REGNO (y) < FIRST_PSEUDO_REGISTER)
return 0;
if (same_regs[REGNO (x)] == -1)
{
same_regs[REGNO (x)] = REGNO (y);
num_same_regs++;
/* If this is the first time we are seeing a register on the `Y'
side, see if it is the last use. If not, we can't thread the
jump, so mark it as not equivalent. */
if (REGNO_LAST_UID (REGNO (y)) != INSN_UID (yinsn))
return 0;
return 1;
}
else
return (same_regs[REGNO (x)] == (int) REGNO (y));
break;
case MEM:
/* If memory modified or either volatile, not equivalent.
Else, check address. */
if (modified_mem || MEM_VOLATILE_P (x) || MEM_VOLATILE_P (y))
return 0;
return rtx_equal_for_thread_p (XEXP (x, 0), XEXP (y, 0), yinsn);
case ASM_INPUT:
if (MEM_VOLATILE_P (x) || MEM_VOLATILE_P (y))
return 0;
break;
case SET:
/* Cancel a pending `same_regs' if setting equivalenced registers.
Then process source. */
if (GET_CODE (SET_DEST (x)) == REG
&& GET_CODE (SET_DEST (y)) == REG)
{
if (same_regs[REGNO (SET_DEST (x))] == (int) REGNO (SET_DEST (y)))
{
same_regs[REGNO (SET_DEST (x))] = -1;
num_same_regs--;
}
else if (REGNO (SET_DEST (x)) != REGNO (SET_DEST (y)))
return 0;
}
else
{
if (rtx_equal_for_thread_p (SET_DEST (x), SET_DEST (y), yinsn) == 0)
return 0;
}
return rtx_equal_for_thread_p (SET_SRC (x), SET_SRC (y), yinsn);
case LABEL_REF:
return XEXP (x, 0) == XEXP (y, 0);
case SYMBOL_REF:
return XSTR (x, 0) == XSTR (y, 0);
default:
break;
}
if (x == y)
return 1;
fmt = GET_RTX_FORMAT (code);
for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
{
switch (fmt[i])
{
case 'w':
if (XWINT (x, i) != XWINT (y, i))
return 0;
break;
case 'n':
case 'i':
if (XINT (x, i) != XINT (y, i))
return 0;
break;
case 'V':
case 'E':
/* Two vectors must have the same length. */
if (XVECLEN (x, i) != XVECLEN (y, i))
return 0;
/* And the corresponding elements must match. */
for (j = 0; j < XVECLEN (x, i); j++)
if (rtx_equal_for_thread_p (XVECEXP (x, i, j),
XVECEXP (y, i, j), yinsn) == 0)
return 0;
break;
case 'e':
if (rtx_equal_for_thread_p (XEXP (x, i), XEXP (y, i), yinsn) == 0)
return 0;
break;
case 'S':
case 's':
if (strcmp (XSTR (x, i), XSTR (y, i)))
return 0;
break;
case 'u':
/* These are just backpointers, so they don't matter. */
break;
case '0':
case 't':
break;
/* It is believed that rtx's at this level will never
contain anything but integers and other rtx's,
except for within LABEL_REFs and SYMBOL_REFs. */
default:
abort ();
}
}
return 1;
}
......@@ -1788,8 +1788,6 @@ extern int true_regnum PARAMS ((rtx));
extern int redirect_jump_1 PARAMS ((rtx, rtx));
extern int redirect_jump PARAMS ((rtx, rtx, int));
extern void rebuild_jump_labels PARAMS ((rtx));
extern void thread_jumps PARAMS ((rtx, int, int));
extern int rtx_equal_for_thread_p PARAMS ((rtx, rtx, rtx));
extern enum rtx_code reversed_comparison_code PARAMS ((rtx, rtx));
extern enum rtx_code reversed_comparison_code_parts PARAMS ((enum rtx_code,
rtx, rtx, rtx));
......
......@@ -2680,7 +2680,8 @@ rest_of_compilation (decl)
if (optimize > 0)
{
find_basic_blocks (insns, max_reg_num (), rtl_dump_file);
cleanup_cfg (CLEANUP_EXPENSIVE | CLEANUP_PRE_LOOP);
cleanup_cfg (CLEANUP_EXPENSIVE | CLEANUP_PRE_LOOP
| (flag_thread_jumps ? CLEANUP_THREADING : 0));
/* ??? Run if-conversion before delete_null_pointer_checks,
since the later does not preserve the CFG. This should
......@@ -2721,13 +2722,6 @@ rest_of_compilation (decl)
reg_scan (insns, max_reg_num (), 1);
if (flag_thread_jumps)
{
timevar_push (TV_JUMP);
thread_jumps (insns, max_reg_num (), 1);
timevar_pop (TV_JUMP);
}
tem = cse_main (insns, max_reg_num (), 0, rtl_dump_file);
/* If we are not running more CSE passes, then we are no longer
......@@ -2750,14 +2744,16 @@ rest_of_compilation (decl)
delete_trivially_dead_insns (insns, max_reg_num (), 0);
/* Try to identify useless null pointer tests and delete them. */
if (flag_delete_null_pointer_checks)
if (flag_delete_null_pointer_checks || flag_thread_jumps)
{
timevar_push (TV_JUMP);
find_basic_blocks (insns, max_reg_num (), rtl_dump_file);
cleanup_cfg (CLEANUP_EXPENSIVE | CLEANUP_PRE_LOOP);
cleanup_cfg (CLEANUP_EXPENSIVE | CLEANUP_PRE_LOOP
| (flag_thread_jumps ? CLEANUP_THREADING : 0));
delete_null_pointer_checks (insns);
if (flag_delete_null_pointer_checks)
delete_null_pointer_checks (insns);
/* CFG is no longer maintained up-to-date. */
free_bb_for_insn ();
timevar_pop (TV_JUMP);
......@@ -2928,16 +2924,6 @@ rest_of_compilation (decl)
}
}
if (flag_thread_jumps)
{
/* This pass of jump threading straightens out code
that was kinked by loop optimization. */
timevar_push (TV_JUMP);
reg_scan (insns, max_reg_num (), 0);
thread_jumps (insns, max_reg_num (), 0);
timevar_pop (TV_JUMP);
}
close_dump_file (DFI_cse2, print_rtl, insns);
timevar_pop (TV_CSE2);
......@@ -2955,7 +2941,8 @@ rest_of_compilation (decl)
open_dump_file (DFI_cfg, decl);
find_basic_blocks (insns, max_reg_num (), rtl_dump_file);
cleanup_cfg (optimize ? CLEANUP_EXPENSIVE : 0);
cleanup_cfg (optimize ? CLEANUP_EXPENSIVE : 0
| (flag_thread_jumps ? CLEANUP_THREADING : 0));
check_function_return_warnings ();
/* It may make more sense to mark constant functions after dead code is
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment