Commit 8472b255 by Bernd Schmidt Committed by Bernd Schmidt

bfin.c (np_check_regno, [...]): New static variables.

	* config/bfin/bfin.c (np_check_regno, np_after_branch): New static
	variables.
	(note_np_check_stores): New function.
	(harmless_null_pointer_p): New function.
	(trapping_loads_p): New args NP_REG and AFTER_NP_BRANCH.  Callers
	changed.  Take into account whether we're in the shadow of a condjump
	that tested NP_REG for NULL.
	Lose all code that tested for SEQUENCEs.
	(workaround_speculation): Avoid inserting NOPs for loads that are
	either always executed or a NULL pointer.

From-SVN: r151513
parent ac875c2e
2009-09-08 Bernd Schmidt <bernd.schmidt@analog.com>
* config/bfin/bfin.c (np_check_regno, np_after_branch): New static
variables.
(note_np_check_stores): New function.
(harmless_null_pointer_p): New function.
(trapping_loads_p): New args NP_REG and AFTER_NP_BRANCH. Callers
changed. Take into account whether we're in the shadow of a condjump
that tested NP_REG for NULL.
Lose all code that tested for SEQUENCEs.
(workaround_speculation): Avoid inserting NOPs for loads that are
either always executed or a NULL pointer.
2009-09-08 Jan Hubicka <jh@suse.cz> 2009-09-08 Jan Hubicka <jh@suse.cz>
* doc/invoke.texi (early-inlining-insns): Reduce from 12 to 8. * doc/invoke.texi (early-inlining-insns): Reduce from 12 to 8.
......
...@@ -5039,28 +5039,38 @@ type_for_anomaly (rtx insn) ...@@ -5039,28 +5039,38 @@ type_for_anomaly (rtx insn)
return get_attr_type (insn); return get_attr_type (insn);
} }
/* Return nonzero if INSN contains any loads that may trap. It handles /* Return true iff the address found in MEM is based on the register
SEQUENCEs correctly. */ NP_REG and optionally has a positive offset. */
static bool static bool
trapping_loads_p (rtx insn) harmless_null_pointer_p (rtx mem, int np_reg)
{ {
rtx pat = PATTERN (insn); mem = XEXP (mem, 0);
if (GET_CODE (pat) == SEQUENCE) if (GET_CODE (mem) == POST_INC || GET_CODE (mem) == POST_DEC)
mem = XEXP (mem, 0);
if (REG_P (mem) && REGNO (mem) == np_reg)
return true;
if (GET_CODE (mem) == PLUS
&& REG_P (XEXP (mem, 0)) && REGNO (XEXP (mem, 0)) == np_reg)
{ {
enum attr_type t; mem = XEXP (mem, 1);
t = get_attr_type (XVECEXP (pat, 0, 1)); if (GET_CODE (mem) == CONST_INT && INTVAL (mem) > 0)
if (t == TYPE_MCLD
&& may_trap_p (SET_SRC (PATTERN (XVECEXP (pat, 0, 1)))))
return true; return true;
t = get_attr_type (XVECEXP (pat, 0, 2));
if (t == TYPE_MCLD
&& may_trap_p (SET_SRC (PATTERN (XVECEXP (pat, 0, 2)))))
return true;
return false;
} }
else return false;
return may_trap_p (SET_SRC (single_set (insn))); }
/* Return nonzero if INSN contains any loads that may trap. */
static bool
trapping_loads_p (rtx insn, int np_reg, bool after_np_branch)
{
rtx pat = PATTERN (insn);
rtx mem = SET_SRC (single_set (insn));
if (!after_np_branch)
np_reg = -1;
return ((np_reg == -1 || !harmless_null_pointer_p (mem, np_reg))
&& may_trap_p (mem));
} }
/* Return INSN if it is of TYPE_MCLD. Alternatively, if INSN is the start of /* Return INSN if it is of TYPE_MCLD. Alternatively, if INSN is the start of
...@@ -5098,6 +5108,24 @@ indirect_call_p (rtx pat) ...@@ -5098,6 +5108,24 @@ indirect_call_p (rtx pat)
return REG_P (pat); return REG_P (pat);
} }
/* During workaround_speculation, track whether we're in the shadow of a
conditional branch that tests a P register for NULL. If so, we can omit
emitting NOPs if we see a load from that P register, since a speculative
access at address 0 isn't a problem, and the load is executed in all other
cases anyway.
Global for communication with note_np_check_stores through note_stores.
*/
int np_check_regno = -1;
bool np_after_branch = false;
/* Subroutine of workaround_speculation, called through note_stores. */
static void
note_np_check_stores (rtx x, const_rtx pat, void *data ATTRIBUTE_UNUSED)
{
if (REG_P (x) && (REGNO (x) == REG_CC || REGNO (x) == np_check_regno))
np_check_regno = -1;
}
static void static void
workaround_speculation (void) workaround_speculation (void)
{ {
...@@ -5119,17 +5147,38 @@ workaround_speculation (void) ...@@ -5119,17 +5147,38 @@ workaround_speculation (void)
next = find_next_insn_start (insn); next = find_next_insn_start (insn);
if (NOTE_P (insn) || BARRIER_P (insn) || LABEL_P (insn)) if (NOTE_P (insn) || BARRIER_P (insn))
continue; continue;
if (LABEL_P (insn))
{
np_check_regno = -1;
continue;
}
pat = PATTERN (insn); pat = PATTERN (insn);
if (GET_CODE (pat) == USE || GET_CODE (pat) == CLOBBER if (GET_CODE (pat) == USE || GET_CODE (pat) == CLOBBER
|| GET_CODE (pat) == ASM_INPUT || GET_CODE (pat) == ADDR_VEC || GET_CODE (pat) == ADDR_VEC || GET_CODE (pat) == ADDR_DIFF_VEC)
|| GET_CODE (pat) == ADDR_DIFF_VEC || asm_noperands (pat) >= 0)
continue; continue;
if (GET_CODE (pat) == ASM_INPUT || asm_noperands (pat) >= 0)
{
np_check_regno = -1;
continue;
}
if (JUMP_P (insn)) if (JUMP_P (insn))
{ {
/* Is this a condjump based on a null pointer comparison we saw
earlier? */
if (np_check_regno != -1
&& recog_memoized (insn) == CODE_FOR_cbranchbi4)
{
rtx op = XEXP (SET_SRC (PATTERN (insn)), 0);
gcc_assert (GET_CODE (op) == EQ || GET_CODE (op) == NE);
if (GET_CODE (op) == NE)
np_after_branch = true;
}
if (any_condjump_p (insn) if (any_condjump_p (insn)
&& ! cbranch_predicted_taken_p (insn)) && ! cbranch_predicted_taken_p (insn))
{ {
...@@ -5142,6 +5191,7 @@ workaround_speculation (void) ...@@ -5142,6 +5191,7 @@ workaround_speculation (void)
} }
else if (CALL_P (insn)) else if (CALL_P (insn))
{ {
np_check_regno = -1;
if (cycles_since_jump < INT_MAX) if (cycles_since_jump < INT_MAX)
cycles_since_jump++; cycles_since_jump++;
if (indirect_call_p (pat) && ENABLE_WA_INDIRECT_CALLS) if (indirect_call_p (pat) && ENABLE_WA_INDIRECT_CALLS)
...@@ -5157,13 +5207,44 @@ workaround_speculation (void) ...@@ -5157,13 +5207,44 @@ workaround_speculation (void)
if (cycles_since_jump < INT_MAX) if (cycles_since_jump < INT_MAX)
cycles_since_jump++; cycles_since_jump++;
/* Detect a comparison of a P register with zero. If we later
see a condjump based on it, we have found a null pointer
check. */
if (recog_memoized (insn) == CODE_FOR_compare_eq)
{
rtx src = SET_SRC (PATTERN (insn));
if (REG_P (XEXP (src, 0))
&& P_REGNO_P (REGNO (XEXP (src, 0)))
&& XEXP (src, 1) == const0_rtx)
{
np_check_regno = REGNO (XEXP (src, 0));
np_after_branch = false;
}
else
np_check_regno = -1;
}
if (load_insn && ENABLE_WA_SPECULATIVE_LOADS) if (load_insn && ENABLE_WA_SPECULATIVE_LOADS)
{ {
if (trapping_loads_p (load_insn)) if (trapping_loads_p (load_insn, np_check_regno,
np_after_branch))
delay_needed = 4; delay_needed = 4;
} }
else if (type == TYPE_SYNC && ENABLE_WA_SPECULATIVE_SYNCS) else if (type == TYPE_SYNC && ENABLE_WA_SPECULATIVE_SYNCS)
delay_needed = 3; delay_needed = 3;
/* See if we need to forget about a null pointer comparison
we found earlier. */
if (recog_memoized (insn) != CODE_FOR_compare_eq)
{
note_stores (PATTERN (insn), note_np_check_stores, NULL);
if (np_check_regno != -1)
{
if (find_regno_note (insn, REG_INC, np_check_regno))
np_check_regno = -1;
}
}
} }
if (delay_needed > cycles_since_jump if (delay_needed > cycles_since_jump
...@@ -5241,7 +5322,7 @@ workaround_speculation (void) ...@@ -5241,7 +5322,7 @@ workaround_speculation (void)
if (load_insn && ENABLE_WA_SPECULATIVE_LOADS) if (load_insn && ENABLE_WA_SPECULATIVE_LOADS)
{ {
if (trapping_loads_p (load_insn)) if (trapping_loads_p (load_insn, -1, false))
delay_needed = 2; delay_needed = 2;
} }
else if (type == TYPE_SYNC && ENABLE_WA_SPECULATIVE_SYNCS) else if (type == TYPE_SYNC && ENABLE_WA_SPECULATIVE_SYNCS)
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment