Commit 4271f003 by Michael Hayes Committed by Michael Hayes

c4x.c: (c4x_override_options): For compatibility with old target options clear...

	* config/c4x/c4x.c: (c4x_override_options): For compatibility
	with old target options clear flag_branch_on_count_reg if
	-mno-rptb specified and set flag_argument_alias is -mno-aliases
	specified.
	(c4x_output_cbranch): Handle a sequence of insns rather than a
 	single insn.
	(c4x_rptb_insert): Do not emit a RPTB insn if the RC register
	has not been allocated as the loop counter.
	(c4x_address_conflict): Do not allow two volatile memory references.
	(valid_parallel_operands_4, valid_parallel_operands_5,
 	valid_parallel_operands_6): Reject pattern if the register destination
	of the first set is used as part of an address in the second set.

From-SVN: r23879
parent 0fe69aba
...@@ -16,9 +16,12 @@ Thu Nov 26 15:16:05 1998 Michael Hayes <m.hayes@elec.canterbury.ac.nz> ...@@ -16,9 +16,12 @@ Thu Nov 26 15:16:05 1998 Michael Hayes <m.hayes@elec.canterbury.ac.nz>
specified. specified.
(c4x_output_cbranch): Handle a sequence of insns rather than a (c4x_output_cbranch): Handle a sequence of insns rather than a
single insn. single insn.
(c4x_rptb_insert): Don not emit a RPTB insn if the RC register (c4x_rptb_insert): Do not emit a RPTB insn if the RC register
has not been allocated as the loop counter. has not been allocated as the loop counter.
(c4x_address_conflict): (c4x_address_conflict): Do not allow two volatile memory references.
(valid_parallel_operands_4, valid_parallel_operands_5,
valid_parallel_operands_6): Reject pattern if the register destination
of the first set is used as part of an address in the second set.
Thu Nov 26 14:56:32 1998 Michael Hayes <m.hayes@elec.canterbury.ac.nz> Thu Nov 26 14:56:32 1998 Michael Hayes <m.hayes@elec.canterbury.ac.nz>
......
...@@ -159,9 +159,6 @@ tree interrupt_tree = NULL_TREE; ...@@ -159,9 +159,6 @@ tree interrupt_tree = NULL_TREE;
void void
c4x_override_options () c4x_override_options ()
{ {
/* Convert foo / 8.0 into foo * 0.125, etc. */
flag_fast_math = 1;
if (c4x_rpts_cycles_string) if (c4x_rpts_cycles_string)
c4x_rpts_cycles = atoi (c4x_rpts_cycles_string); c4x_rpts_cycles = atoi (c4x_rpts_cycles_string);
else else
...@@ -204,8 +201,21 @@ c4x_override_options () ...@@ -204,8 +201,21 @@ c4x_override_options ()
else else
target_flags &= ~C3X_FLAG; target_flags &= ~C3X_FLAG;
/* Convert foo / 8.0 into foo * 0.125, etc. */
flag_fast_math = 1;
/* We should phase out the following at some stage.
This provides compatibility with the old -mno-rptb option. */
if (!TARGET_RPTB && flag_branch_on_count_reg)
flag_branch_on_count_reg = 0;
/* We should phase out the following at some stage.
This provides compatibility with the old -mno-aliases option. */
if (!TARGET_ALIASES && !flag_argument_noalias)
flag_argument_noalias = 1;
} }
/* This is called before c4x_override_options. */
void void
c4x_optimization_options (level, size) c4x_optimization_options (level, size)
int level; int level;
...@@ -213,7 +223,7 @@ c4x_optimization_options (level, size) ...@@ -213,7 +223,7 @@ c4x_optimization_options (level, size)
{ {
/* When optimizing, enable use of RPTB instruction. */ /* When optimizing, enable use of RPTB instruction. */
if (level >= 1) if (level >= 1)
flag_branch_on_count_reg = 1; flag_branch_on_count_reg = 1;
} }
/* Write an ASCII string. */ /* Write an ASCII string. */
...@@ -1419,30 +1429,26 @@ c4x_gen_compare_reg (code, x, y) ...@@ -1419,30 +1429,26 @@ c4x_gen_compare_reg (code, x, y)
} }
char * char *
c4x_output_cbranch (reversed, insn) c4x_output_cbranch (form, seq)
int reversed; char *form;
rtx insn; rtx seq;
{ {
int delayed = 0; int delayed = 0;
int annultrue = 0; int annultrue = 0;
int annulfalse = 0; int annulfalse = 0;
rtx delay; rtx delay;
char *cp; char *cp;
static char str[20]; static char str[100];
if (final_sequence) if (final_sequence)
{ {
delay = XVECEXP (final_sequence, 0, 1); delay = XVECEXP (final_sequence, 0, 1);
delayed = !INSN_ANNULLED_BRANCH_P (insn); delayed = !INSN_ANNULLED_BRANCH_P (seq);
annultrue = INSN_ANNULLED_BRANCH_P (insn) && !INSN_FROM_TARGET_P (delay); annultrue = INSN_ANNULLED_BRANCH_P (seq) && !INSN_FROM_TARGET_P (delay);
annulfalse = INSN_ANNULLED_BRANCH_P (insn) && INSN_FROM_TARGET_P (delay); annulfalse = INSN_ANNULLED_BRANCH_P (seq) && INSN_FROM_TARGET_P (delay);
} }
cp = str; strcpy (str, form);
*cp++ = 'b'; cp = &str [strlen (str)];
*cp++ = '%';
if (reversed)
*cp++ = 'I';
*cp++ = '0';
if (delayed) if (delayed)
{ {
*cp++ = '%'; *cp++ = '%';
...@@ -1466,7 +1472,6 @@ c4x_output_cbranch (reversed, insn) ...@@ -1466,7 +1472,6 @@ c4x_output_cbranch (reversed, insn)
return str; return str;
} }
void void
c4x_print_operand (file, op, letter) c4x_print_operand (file, op, letter)
FILE *file; /* file to write to */ FILE *file; /* file to write to */
...@@ -2040,10 +2045,19 @@ c4x_rptb_insert (insn) ...@@ -2040,10 +2045,19 @@ c4x_rptb_insert (insn)
{ {
rtx end_label; rtx end_label;
rtx start_label; rtx start_label;
rtx count_reg;
/* If the count register has not been allocated to RC, say if
there is a movstr pattern in the loop, then do not insert a
RPTB instruction. Instead we emit a decrement and branch
at the end of the loop. */
count_reg = XEXP (XEXP (SET_SRC (XVECEXP (PATTERN (insn), 0, 0)), 0), 0);
if (REGNO (count_reg) != RC_REGNO)
return;
/* Extract the start label from the jump pattern (rptb_end). */ /* Extract the start label from the jump pattern (rptb_end). */
start_label = XEXP (XEXP (SET_SRC (XVECEXP (PATTERN (insn), 0, 0)), 1), 0); start_label = XEXP (XEXP (SET_SRC (XVECEXP (PATTERN (insn), 0, 0)), 1), 0);
/* We'll have to update the basic blocks. */ /* We'll have to update the basic blocks. */
end_label = gen_label_rtx (); end_label = gen_label_rtx ();
emit_label_after (end_label, insn); emit_label_after (end_label, insn);
...@@ -3123,6 +3137,9 @@ c4x_address_conflict (op0, op1, store0, store1) ...@@ -3123,6 +3137,9 @@ c4x_address_conflict (op0, op1, store0, store1)
int disp0; int disp0;
int disp1; int disp1;
if (MEM_VOLATILE_P (op0) && MEM_VOLATILE_P (op1))
return 1;
c4x_S_address_parse (op0, &base0, &incdec0, &index0, &disp0); c4x_S_address_parse (op0, &base0, &incdec0, &index0, &disp0);
c4x_S_address_parse (op1, &base1, &incdec1, &index1, &disp1); c4x_S_address_parse (op1, &base1, &incdec1, &index1, &disp1);
...@@ -3137,12 +3154,7 @@ c4x_address_conflict (op0, op1, store0, store1) ...@@ -3137,12 +3154,7 @@ c4x_address_conflict (op0, op1, store0, store1)
have an aliased address if both locations are not marked have an aliased address if both locations are not marked
volatile, it is probably safer to flag a potential conflict volatile, it is probably safer to flag a potential conflict
if either location is volatile. */ if either location is volatile. */
if (!TARGET_ALIASES) if (!flag_argument_alias)
{
if (MEM_VOLATILE_P (op0) && MEM_VOLATILE_P (op1))
return 1;
}
else
{ {
if (MEM_VOLATILE_P (op0) || MEM_VOLATILE_P (op1)) if (MEM_VOLATILE_P (op0) || MEM_VOLATILE_P (op1))
return 1; return 1;
...@@ -3222,6 +3234,14 @@ valid_parallel_operands_4 (operands, mode) ...@@ -3222,6 +3234,14 @@ valid_parallel_operands_4 (operands, mode)
par_ind_operand() operands. Thus of the 4 operands, only 2 par_ind_operand() operands. Thus of the 4 operands, only 2
should be REGs and the other 2 should be MEMs. */ should be REGs and the other 2 should be MEMs. */
/* This test prevents the multipack pass from using this pattern if
op0 is used as an index or base register in op3, since this combination
will require reloading. */
if (GET_CODE (op0) == REG
&& GET_CODE (op3) == MEM
&& reg_mentioned_p (op0, XEXP (op3, 0)))
return 0;
/* LDI||LDI */ /* LDI||LDI */
if (GET_CODE (op0) == REG && GET_CODE (op2) == REG) if (GET_CODE (op0) == REG && GET_CODE (op2) == REG)
return (REGNO (op0) != REGNO (op2)) return (REGNO (op0) != REGNO (op2))
...@@ -3246,6 +3266,7 @@ valid_parallel_operands_4 (operands, mode) ...@@ -3246,6 +3266,7 @@ valid_parallel_operands_4 (operands, mode)
return 0; return 0;
} }
/* We only use this to check operands 1 and 2 since these may be /* We only use this to check operands 1 and 2 since these may be
commutative. It will need extending for the C32 opcodes. */ commutative. It will need extending for the C32 opcodes. */
int int
...@@ -3254,23 +3275,36 @@ valid_parallel_operands_5 (operands, mode) ...@@ -3254,23 +3275,36 @@ valid_parallel_operands_5 (operands, mode)
enum machine_mode mode ATTRIBUTE_UNUSED; enum machine_mode mode ATTRIBUTE_UNUSED;
{ {
int regs = 0; int regs = 0;
rtx op0 = operands[1]; rtx op0 = operands[0];
rtx op1 = operands[2]; rtx op1 = operands[1];
rtx op2 = operands[2];
rtx op3 = operands[3];
if (GET_CODE (op0) == SUBREG) if (GET_CODE (op0) == SUBREG)
op0 = SUBREG_REG (op0); op0 = SUBREG_REG (op0);
if (GET_CODE (op1) == SUBREG) if (GET_CODE (op2) == SUBREG)
op1 = SUBREG_REG (op1); op2 = SUBREG_REG (op2);
/* The patterns should only allow ext_low_reg_operand() or /* The patterns should only allow ext_low_reg_operand() or
par_ind_operand() operands. */ par_ind_operand() operands. */
if (GET_CODE (op0) == REG) if (GET_CODE (op0) == REG)
regs++; regs++;
if (GET_CODE (op1) == REG) if (GET_CODE (op2) == REG)
regs++; regs++;
return regs == 1; if (regs != 1)
return 0;
/* This test prevents the multipack pass from using this pattern if
op0 is used as an index or base register in op3, since this combination
will require reloading. */
if (GET_CODE (op0) == REG
&& GET_CODE (op3) == MEM
&& reg_mentioned_p (op0, XEXP (op3, 0)))
return 0;
return 1;
} }
...@@ -3280,47 +3314,48 @@ valid_parallel_operands_6 (operands, mode) ...@@ -3280,47 +3314,48 @@ valid_parallel_operands_6 (operands, mode)
enum machine_mode mode ATTRIBUTE_UNUSED; enum machine_mode mode ATTRIBUTE_UNUSED;
{ {
int regs = 0; int regs = 0;
rtx op0 = operands[1]; rtx op0 = operands[0];
rtx op1 = operands[2]; rtx op1 = operands[1];
rtx op2 = operands[4]; rtx op2 = operands[2];
rtx op3 = operands[5]; rtx op4 = operands[4];
rtx op5 = operands[5];
if (GET_CODE (op0) == SUBREG)
op0 = SUBREG_REG (op0);
if (GET_CODE (op1) == SUBREG) if (GET_CODE (op1) == SUBREG)
op1 = SUBREG_REG (op1); op1 = SUBREG_REG (op1);
if (GET_CODE (op2) == SUBREG) if (GET_CODE (op2) == SUBREG)
op2 = SUBREG_REG (op2); op2 = SUBREG_REG (op2);
if (GET_CODE (op3) == SUBREG) if (GET_CODE (op4) == SUBREG)
op3 = SUBREG_REG (op3); op4 = SUBREG_REG (op4);
if (GET_CODE (op5) == SUBREG)
op5 = SUBREG_REG (op5);
/* The patterns should only allow ext_low_reg_operand() or /* The patterns should only allow ext_low_reg_operand() or
par_ind_operand() operands. Thus of the 4 input operands, only 2 par_ind_operand() operands. Thus of the 4 input operands, only 2
should be REGs and the other 2 should be MEMs. */ should be REGs and the other 2 should be MEMs. */
if (GET_CODE (op0) == REG)
regs++;
if (GET_CODE (op1) == REG) if (GET_CODE (op1) == REG)
regs++; regs++;
if (GET_CODE (op2) == REG) if (GET_CODE (op2) == REG)
regs++; regs++;
if (GET_CODE (op3) == REG) if (GET_CODE (op4) == REG)
regs++;
if (GET_CODE (op5) == REG)
regs++; regs++;
/* The new C30/C40 silicon dies allow 3 regs of the 4 input operands. /* The new C30/C40 silicon dies allow 3 regs of the 4 input operands.
Perhaps we should count the MEMs as well? */ Perhaps we should count the MEMs as well? */
return regs == 2; if (regs != 2)
} return 0;
/* This test prevents the multipack pass from using this pattern if
op0 is used as an index or base register in op4 or op5, since
this combination will require reloading. */
if (GET_CODE (op0) == REG
&& ((GET_CODE (op4) == MEM && reg_mentioned_p (op0, XEXP (op4, 0)))
|| (GET_CODE (op5) == MEM && reg_mentioned_p (op0, XEXP (op5, 0)))))
return 0;
int return 1;
legitimize_parallel_operands_6 (operands, mode)
rtx *operands;
enum machine_mode mode;
{
/* It's gonna be hard to legitimize operands for a parallel
instruction... TODO... */
return valid_parallel_operands_6 (operands, mode);
} }
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment