Commit c5563e11 by Jim Wilson

(movesi_unaligned, movsi_ulw, movsi_usw): Delete.

(extv, extzv, insv, movsi_ulw, movsi_usw): New patterns.

From-SVN: r7413
parent a40ed31b
......@@ -2545,130 +2545,143 @@ move\\t%0,%z4\\n\\
;;
;; ....................
;; unaligned word moves generated by the block moves.
;; Bit field extract patterns which use lwl/lwr.
;; I don't think these are used anymore. Ian Taylor 30 Sep 93
;; ??? There should be DImode variants for 64 bit code, but the current
;; bitfield scheme can't handle that. We would need to add new optabs
;; in order to make that work.
;;(define_expand "movsi_unaligned"
;; [(set (match_operand:SI 0 "general_operand" "")
;; (unspec [(match_operand:SI 1 "general_operand" "")] 0))]
;; ""
;; "
;;{
;; /* Handle stores. */
;; if (GET_CODE (operands[0]) == MEM)
;; {
;; rtx reg = gen_reg_rtx (SImode);
;; rtx insn = emit_insn (gen_movsi_ulw (reg, operands[1]));
;; rtx addr = XEXP (operands[0], 0);
;; if (CONSTANT_P (addr))
;; REG_NOTES (insn) = gen_rtx (EXPR_LIST, REG_EQUIV, addr, REG_NOTES (insn));
;;
;; if (reg_or_0_operand (operands[1], SImode))
;; DONE;
;;
;; operands[1] = reg;
;; }
;;
;; /* Generate appropriate load, store. If not a load or store,
;; do a normal movsi. */
;; if (GET_CODE (operands[0]) != MEM && GET_CODE (operands[1]) != MEM)
;; {
;; emit_insn (gen_movsi (operands[0], operands[1]));
;; DONE;
;; }
;;
;; /* Fall through and generate normal code. */
;;}")
;;
;;(define_insn "movsi_ulw"
;; [(set (match_operand:SI 0 "register_operand" "=&d,&d,d,d")
;; (unspec [(match_operand:SI 1 "general_operand" "R,o,dIKL,M")] 0))]
;; ""
;; "*
;;{
;; enum rtx_code code;
;; char *ret;
;; rtx offset;
;; rtx addr;
;; rtx mem_addr;
;;
;; if (which_alternative != 0)
;; return mips_move_1word (operands, insn, FALSE);
;;
;; if (TARGET_STATS)
;; mips_count_memory_refs (operands[1], 2);
;;
;; /* The stack/frame pointers are always aligned, so we can convert
;; to the faster lw if we are referencing an aligned stack location. */
;;
;; offset = const0_rtx;
;; addr = XEXP (operands[1], 0);
;; mem_addr = eliminate_constant_term (addr, &offset);
;;
;; if ((INTVAL (offset) & (UNITS_PER_WORD-1)) == 0
;; && (mem_addr == stack_pointer_rtx || mem_addr == frame_pointer_rtx))
;; ret = \"lw\\t%0,%1\";
;;
;; else
;; {
;; ret = \"ulw\\t%0,%1\";
;; if (TARGET_GAS)
;; {
;; enum rtx_code code = GET_CODE (addr);
;;
;; if (code == CONST || code == SYMBOL_REF || code == LABEL_REF)
;; {
;; operands[2] = gen_rtx (REG, SImode, GP_REG_FIRST + 1);
;; ret = \"%[la\\t%2,%1\;ulw\\t%0,0(%2)%]\";
;; }
;; }
;; }
;;
;; return mips_fill_delay_slot (ret, DELAY_LOAD, operands, insn);
;;}"
;; [(set_attr "type" "load,load,move,arith")
;; (set_attr "mode" "SI")
;; (set_attr "length" "2,4,1,2")])
;;
;;(define_insn "movsi_usw"
;; [(set (match_operand:SI 0 "memory_operand" "=R,o")
;; (unspec [(match_operand:SI 1 "reg_or_0_operand" "dJ,dJ")] 0))]
;; ""
;; "*
;;{
;; rtx offset = const0_rtx;
;; rtx addr = XEXP (operands[0], 0);
;; rtx mem_addr = eliminate_constant_term (addr, &offset);
;;
;; if (TARGET_STATS)
;; mips_count_memory_refs (operands[0], 2);
;;
;; /* The stack/frame pointers are always aligned, so we can convert
;; to the faster sw if we are referencing an aligned stack location. */
;;
;; if ((INTVAL (offset) & (UNITS_PER_WORD-1)) == 0
;; && (mem_addr == stack_pointer_rtx || mem_addr == frame_pointer_rtx))
;; return \"sw\\t%1,%0\";
;;
;;
;; if (TARGET_GAS)
;; {
;; enum rtx_code code = GET_CODE (XEXP (operands[0], 0));
;;
;; if (code == CONST || code == SYMBOL_REF || code == LABEL_REF)
;; {
;; operands[2] = gen_rtx (REG, SImode, GP_REG_FIRST + 1);
;; return \"%[la\\t%2,%0\;usw\\t%z1,0(%2)%]\";
;; }
;; }
;;
;; return \"usw\\t%z1,%0\";
;;}"
;; [(set_attr "type" "store")
;; (set_attr "mode" "SI")
;; (set_attr "length" "2,4")])
;; ??? There could be HImode variants for the ulh/ulhu/ush macros.
;; It isn't clear whether this will give better code.
(define_expand "extv"
[(set (match_operand:SI 0 "register_operand" "")
(sign_extract:SI (match_operand:QI 1 "memory_operand" "")
(match_operand:SI 2 "immediate_operand" "")
(match_operand:SI 3 "immediate_operand" "")))]
""
"
{
/* If this isn't a 32 bit field, and it doesn't start on a byte boundary
then fail. */
if (INTVAL (operands[2]) != 32 || (INTVAL (operands[3]) % 8) != 0)
FAIL;
/* This can happen for a 64 bit target, when extracting a value from
a 64 bit union member. extract_bit_field doesn't verify that our
source matches the predicate, so we force it to be a MEM here. */
if (GET_CODE (operands[1]) != MEM)
FAIL;
/* Otherwise, emit a lwl/lwr pair to load the value. */
emit_insn (gen_movsi_ulw (operands[0], operands[1]));
DONE;
}")
(define_expand "extzv"
[(set (match_operand:SI 0 "register_operand" "")
(zero_extract:SI (match_operand:QI 1 "memory_operand" "")
(match_operand:SI 2 "immediate_operand" "")
(match_operand:SI 3 "immediate_operand" "")))]
""
"
{
/* If this isn't a 32 bit field, and it doesn't start on a byte boundary
then fail. */
if (INTVAL (operands[2]) != 32 || (INTVAL (operands[3]) % 8) != 0)
FAIL;
/* This can happen for a 64 bit target, when extracting a value from
a 64 bit union member. extract_bit_field doesn't verify that our
source matches the predicate, so we force it to be a MEM here. */
if (GET_CODE (operands[1]) != MEM)
FAIL;
/* Otherwise, emit a lwl/lwr pair to load the value. */
emit_insn (gen_movsi_ulw (operands[0], operands[1]));
DONE;
}")
(define_expand "insv"
[(set (zero_extract:SI (match_operand:QI 0 "memory_operand" "")
(match_operand:SI 1 "immediate_operand" "")
(match_operand:SI 2 "immediate_operand" ""))
(match_operand:SI 3 "register_operand" ""))]
""
"
{
/* If this isn't a 32 bit field, and it doesn't start on a byte boundary
then fail. */
if (INTVAL (operands[1]) != 32 || (INTVAL (operands[2]) % 8) != 0)
FAIL;
/* This can happen for a 64 bit target, when storing into a 32 bit union
member. store_bit_field doesn't verify that our target matches the
predicate, so we force it to be a MEM here. */
if (GET_CODE (operands[0]) != MEM)
FAIL;
/* Otherwise, emit a swl/swr pair to load the value. */
emit_insn (gen_movsi_usw (operands[0], operands[3]));
DONE;
}")
;; unaligned word moves generated by the bit field patterns
(define_insn "movsi_ulw"
[(set (match_operand:SI 0 "register_operand" "=&d,&d")
(unspec [(match_operand:QI 1 "general_operand" "R,o")] 0))]
""
"*
{
rtx offset = const0_rtx;
rtx addr = XEXP (operands[1], 0);
rtx mem_addr = eliminate_constant_term (addr, &offset);
char *ret;
if (TARGET_STATS)
mips_count_memory_refs (operands[1], 2);
/* The stack/frame pointers are always aligned, so we can convert
to the faster lw if we are referencing an aligned stack location. */
if ((INTVAL (offset) & 3) == 0
&& (mem_addr == stack_pointer_rtx || mem_addr == frame_pointer_rtx))
ret = \"lw\\t%0,%1\";
else
ret = \"ulw\\t%0,%1\";
return mips_fill_delay_slot (ret, DELAY_LOAD, operands, insn);
}"
[(set_attr "type" "load,load")
(set_attr "mode" "SI")
(set_attr "length" "2,4")])
(define_insn "movsi_usw"
[(set (match_operand:QI 0 "memory_operand" "=R,o")
(unspec [(match_operand:SI 1 "reg_or_0_operand" "dJ,dJ")] 1))]
""
"*
{
rtx offset = const0_rtx;
rtx addr = XEXP (operands[0], 0);
rtx mem_addr = eliminate_constant_term (addr, &offset);
if (TARGET_STATS)
mips_count_memory_refs (operands[0], 2);
/* The stack/frame pointers are always aligned, so we can convert
to the faster sw if we are referencing an aligned stack location. */
if ((INTVAL (offset) & 3) == 0
&& (mem_addr == stack_pointer_rtx || mem_addr == frame_pointer_rtx))
return \"sw\\t%1,%0\";
return \"usw\\t%z1,%0\";
}"
[(set_attr "type" "store")
(set_attr "mode" "SI")
(set_attr "length" "2,4")])
;; 64-bit integer moves
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment