Commit 9283726f by Richard Henderson Committed by Richard Henderson

spu: Rename patterns for vec_widen_<s>mult_even/odd_<mode>

        * config/spu/spu-builtins.md (spu_mpy): Move to spu.md.
        (spu_mpyu, spu_mpyhhu, spu_mpyhh): Likewise.
        * config/spu/spu.md (vec_widen_smult_odd_v8hi): Rename from spu_mpy.
        (vec_widen_umult_odd_v8hi): Rename from spu_mpyu.
        (vec_widen_smult_even_v8hi): Rename from spu_mpyhh.
        (vec_widen_umult_even_v8hi): Rename from spu_mpyhhu.
        * config/spu/spu-builtins.def: Update pattern names to match.

From-SVN: r189406
parent 2371eaec
2012-07-10 Richard Henderson <rth@redhat.com> 2012-07-10 Richard Henderson <rth@redhat.com>
* config/spu/spu-builtins.md (spu_mpy): Move to spu.md.
(spu_mpyu, spu_mpyhhu, spu_mpyhh): Likewise.
* config/spu/spu.md (vec_widen_smult_odd_v8hi): Rename from spu_mpy.
(vec_widen_umult_odd_v8hi): Rename from spu_mpyu.
(vec_widen_smult_even_v8hi): Rename from spu_mpyhh.
(vec_widen_umult_even_v8hi): Rename from spu_mpyhhu.
* config/spu/spu-builtins.def: Update pattern names to match.
* config/rs6000/altivec.md (vec_widen_umult_even_v16qi): Rename * config/rs6000/altivec.md (vec_widen_umult_even_v16qi): Rename
from altivec_vmuleub. from altivec_vmuleub.
(vec_widen_smult_even_v16qi): Rename from altivec_vmulesb. (vec_widen_smult_even_v16qi): Rename from altivec_vmulesb.
......
...@@ -62,15 +62,15 @@ DEF_BUILTIN (SI_SFI, CODE_FOR_spu_sf, "si_sfi", B_INSN, ...@@ -62,15 +62,15 @@ DEF_BUILTIN (SI_SFI, CODE_FOR_spu_sf, "si_sfi", B_INSN,
DEF_BUILTIN (SI_SFX, CODE_FOR_spu_sfx, "si_sfx", B_INSN, _A4(SPU_BTI_QUADWORD, SPU_BTI_QUADWORD, SPU_BTI_QUADWORD, SPU_BTI_QUADWORD)) DEF_BUILTIN (SI_SFX, CODE_FOR_spu_sfx, "si_sfx", B_INSN, _A4(SPU_BTI_QUADWORD, SPU_BTI_QUADWORD, SPU_BTI_QUADWORD, SPU_BTI_QUADWORD))
DEF_BUILTIN (SI_BG, CODE_FOR_spu_bg, "si_bg", B_INSN, _A3(SPU_BTI_QUADWORD, SPU_BTI_QUADWORD, SPU_BTI_QUADWORD)) DEF_BUILTIN (SI_BG, CODE_FOR_spu_bg, "si_bg", B_INSN, _A3(SPU_BTI_QUADWORD, SPU_BTI_QUADWORD, SPU_BTI_QUADWORD))
DEF_BUILTIN (SI_BGX, CODE_FOR_spu_bgx, "si_bgx", B_INSN, _A4(SPU_BTI_QUADWORD, SPU_BTI_QUADWORD, SPU_BTI_QUADWORD, SPU_BTI_QUADWORD)) DEF_BUILTIN (SI_BGX, CODE_FOR_spu_bgx, "si_bgx", B_INSN, _A4(SPU_BTI_QUADWORD, SPU_BTI_QUADWORD, SPU_BTI_QUADWORD, SPU_BTI_QUADWORD))
DEF_BUILTIN (SI_MPY, CODE_FOR_spu_mpy, "si_mpy", B_INSN, _A3(SPU_BTI_QUADWORD, SPU_BTI_QUADWORD, SPU_BTI_QUADWORD)) DEF_BUILTIN (SI_MPY, CODE_FOR_vec_widen_smult_odd_v8hi, "si_mpy", B_INSN, _A3(SPU_BTI_QUADWORD, SPU_BTI_QUADWORD, SPU_BTI_QUADWORD))
DEF_BUILTIN (SI_MPYU, CODE_FOR_spu_mpyu, "si_mpyu", B_INSN, _A3(SPU_BTI_QUADWORD, SPU_BTI_QUADWORD, SPU_BTI_QUADWORD)) DEF_BUILTIN (SI_MPYU, CODE_FOR_vec_widen_umult_odd_v8hi, "si_mpyu", B_INSN, _A3(SPU_BTI_QUADWORD, SPU_BTI_QUADWORD, SPU_BTI_QUADWORD))
DEF_BUILTIN (SI_MPYI, CODE_FOR_spu_mpy, "si_mpyi", B_INSN, _A3(SPU_BTI_QUADWORD, SPU_BTI_QUADWORD, SPU_BTI_S10)) DEF_BUILTIN (SI_MPYI, CODE_FOR_vec_widen_smult_odd_v8hi, "si_mpyi", B_INSN, _A3(SPU_BTI_QUADWORD, SPU_BTI_QUADWORD, SPU_BTI_S10))
DEF_BUILTIN (SI_MPYUI, CODE_FOR_spu_mpyu, "si_mpyui", B_INSN, _A3(SPU_BTI_QUADWORD, SPU_BTI_QUADWORD, SPU_BTI_S10)) DEF_BUILTIN (SI_MPYUI, CODE_FOR_vec_widen_umult_odd_v8hi, "si_mpyui", B_INSN, _A3(SPU_BTI_QUADWORD, SPU_BTI_QUADWORD, SPU_BTI_S10))
DEF_BUILTIN (SI_MPYA, CODE_FOR_spu_mpya, "si_mpya", B_INSN, _A4(SPU_BTI_QUADWORD, SPU_BTI_QUADWORD, SPU_BTI_QUADWORD, SPU_BTI_QUADWORD)) DEF_BUILTIN (SI_MPYA, CODE_FOR_spu_mpya, "si_mpya", B_INSN, _A4(SPU_BTI_QUADWORD, SPU_BTI_QUADWORD, SPU_BTI_QUADWORD, SPU_BTI_QUADWORD))
DEF_BUILTIN (SI_MPYH, CODE_FOR_spu_mpyh, "si_mpyh", B_INSN, _A3(SPU_BTI_QUADWORD, SPU_BTI_QUADWORD, SPU_BTI_QUADWORD)) DEF_BUILTIN (SI_MPYH, CODE_FOR_spu_mpyh, "si_mpyh", B_INSN, _A3(SPU_BTI_QUADWORD, SPU_BTI_QUADWORD, SPU_BTI_QUADWORD))
DEF_BUILTIN (SI_MPYS, CODE_FOR_spu_mpys, "si_mpys", B_INSN, _A3(SPU_BTI_QUADWORD, SPU_BTI_QUADWORD, SPU_BTI_QUADWORD)) DEF_BUILTIN (SI_MPYS, CODE_FOR_spu_mpys, "si_mpys", B_INSN, _A3(SPU_BTI_QUADWORD, SPU_BTI_QUADWORD, SPU_BTI_QUADWORD))
DEF_BUILTIN (SI_MPYHH, CODE_FOR_spu_mpyhh, "si_mpyhh", B_INSN, _A3(SPU_BTI_QUADWORD, SPU_BTI_QUADWORD, SPU_BTI_QUADWORD)) DEF_BUILTIN (SI_MPYHH, CODE_FOR_vec_widen_smult_even_v8hi, "si_mpyhh", B_INSN, _A3(SPU_BTI_QUADWORD, SPU_BTI_QUADWORD, SPU_BTI_QUADWORD))
DEF_BUILTIN (SI_MPYHHU, CODE_FOR_spu_mpyhhu, "si_mpyhhu", B_INSN, _A3(SPU_BTI_QUADWORD, SPU_BTI_QUADWORD, SPU_BTI_QUADWORD)) DEF_BUILTIN (SI_MPYHHU, CODE_FOR_vec_widen_umult_even_v8hi, "si_mpyhhu", B_INSN, _A3(SPU_BTI_QUADWORD, SPU_BTI_QUADWORD, SPU_BTI_QUADWORD))
DEF_BUILTIN (SI_MPYHHA, CODE_FOR_spu_mpyhha, "si_mpyhha", B_INSN, _A4(SPU_BTI_QUADWORD, SPU_BTI_QUADWORD, SPU_BTI_QUADWORD, SPU_BTI_QUADWORD)) DEF_BUILTIN (SI_MPYHHA, CODE_FOR_spu_mpyhha, "si_mpyhha", B_INSN, _A4(SPU_BTI_QUADWORD, SPU_BTI_QUADWORD, SPU_BTI_QUADWORD, SPU_BTI_QUADWORD))
DEF_BUILTIN (SI_MPYHHAU, CODE_FOR_spu_mpyhhau, "si_mpyhhau", B_INSN, _A4(SPU_BTI_QUADWORD, SPU_BTI_QUADWORD, SPU_BTI_QUADWORD, SPU_BTI_QUADWORD)) DEF_BUILTIN (SI_MPYHHAU, CODE_FOR_spu_mpyhhau, "si_mpyhhau", B_INSN, _A4(SPU_BTI_QUADWORD, SPU_BTI_QUADWORD, SPU_BTI_QUADWORD, SPU_BTI_QUADWORD))
DEF_BUILTIN (SI_CLZ, CODE_FOR_clzv4si2, "si_clz", B_INSN, _A2(SPU_BTI_QUADWORD, SPU_BTI_QUADWORD)) DEF_BUILTIN (SI_CLZ, CODE_FOR_clzv4si2, "si_clz", B_INSN, _A2(SPU_BTI_QUADWORD, SPU_BTI_QUADWORD))
...@@ -295,16 +295,16 @@ DEF_BUILTIN (SPU_MHHADD, CODE_FOR_nothing, "spu_mhhadd", ...@@ -295,16 +295,16 @@ DEF_BUILTIN (SPU_MHHADD, CODE_FOR_nothing, "spu_mhhadd",
DEF_BUILTIN (SPU_MHHADD_0, CODE_FOR_spu_mpyhhau, "spu_mhhadd_0", B_INTERNAL, _A4(SPU_BTI_UV4SI, SPU_BTI_UV8HI, SPU_BTI_UV8HI, SPU_BTI_UV4SI)) DEF_BUILTIN (SPU_MHHADD_0, CODE_FOR_spu_mpyhhau, "spu_mhhadd_0", B_INTERNAL, _A4(SPU_BTI_UV4SI, SPU_BTI_UV8HI, SPU_BTI_UV8HI, SPU_BTI_UV4SI))
DEF_BUILTIN (SPU_MHHADD_1, CODE_FOR_spu_mpyhha, "spu_mhhadd_1", B_INTERNAL, _A4(SPU_BTI_V4SI, SPU_BTI_V8HI, SPU_BTI_V8HI, SPU_BTI_V4SI)) DEF_BUILTIN (SPU_MHHADD_1, CODE_FOR_spu_mpyhha, "spu_mhhadd_1", B_INTERNAL, _A4(SPU_BTI_V4SI, SPU_BTI_V8HI, SPU_BTI_V8HI, SPU_BTI_V4SI))
DEF_BUILTIN (SPU_MULE, CODE_FOR_nothing, "spu_mule", B_OVERLOAD, _A1(SPU_BTI_VOID)) DEF_BUILTIN (SPU_MULE, CODE_FOR_nothing, "spu_mule", B_OVERLOAD, _A1(SPU_BTI_VOID))
DEF_BUILTIN (SPU_MULE_0, CODE_FOR_spu_mpyhhu, "spu_mule_0", B_INTERNAL, _A3(SPU_BTI_UV4SI, SPU_BTI_UV8HI, SPU_BTI_UV8HI)) DEF_BUILTIN (SPU_MULE_0, CODE_FOR_vec_widen_umult_even_v8hi, "spu_mule_0", B_INTERNAL, _A3(SPU_BTI_UV4SI, SPU_BTI_UV8HI, SPU_BTI_UV8HI))
DEF_BUILTIN (SPU_MULE_1, CODE_FOR_spu_mpyhh, "spu_mule_1", B_INTERNAL, _A3(SPU_BTI_V4SI, SPU_BTI_V8HI, SPU_BTI_V8HI)) DEF_BUILTIN (SPU_MULE_1, CODE_FOR_vec_widen_smult_even_v8hi, "spu_mule_1", B_INTERNAL, _A3(SPU_BTI_V4SI, SPU_BTI_V8HI, SPU_BTI_V8HI))
DEF_BUILTIN (SPU_MUL, CODE_FOR_nothing, "spu_mul", B_OVERLOAD, _A1(SPU_BTI_VOID)) DEF_BUILTIN (SPU_MUL, CODE_FOR_nothing, "spu_mul", B_OVERLOAD, _A1(SPU_BTI_VOID))
DEF_BUILTIN (SPU_MUL_0, CODE_FOR_mulv4sf3, "spu_mul_0", B_INTERNAL, _A3(SPU_BTI_V4SF, SPU_BTI_V4SF, SPU_BTI_V4SF)) DEF_BUILTIN (SPU_MUL_0, CODE_FOR_mulv4sf3, "spu_mul_0", B_INTERNAL, _A3(SPU_BTI_V4SF, SPU_BTI_V4SF, SPU_BTI_V4SF))
DEF_BUILTIN (SPU_MUL_1, CODE_FOR_mulv2df3, "spu_mul_1", B_INTERNAL, _A3(SPU_BTI_V2DF, SPU_BTI_V2DF, SPU_BTI_V2DF)) DEF_BUILTIN (SPU_MUL_1, CODE_FOR_mulv2df3, "spu_mul_1", B_INTERNAL, _A3(SPU_BTI_V2DF, SPU_BTI_V2DF, SPU_BTI_V2DF))
DEF_BUILTIN (SPU_MULO, CODE_FOR_nothing, "spu_mulo", B_OVERLOAD, _A1(SPU_BTI_VOID)) DEF_BUILTIN (SPU_MULO, CODE_FOR_nothing, "spu_mulo", B_OVERLOAD, _A1(SPU_BTI_VOID))
DEF_BUILTIN (SPU_MULO_0, CODE_FOR_spu_mpy, "spu_mulo_0", B_INTERNAL, _A3(SPU_BTI_V4SI, SPU_BTI_V8HI, SPU_BTI_V8HI)) DEF_BUILTIN (SPU_MULO_0, CODE_FOR_vec_widen_smult_odd_v8hi, "spu_mulo_0", B_INTERNAL, _A3(SPU_BTI_V4SI, SPU_BTI_V8HI, SPU_BTI_V8HI))
DEF_BUILTIN (SPU_MULO_1, CODE_FOR_spu_mpyu, "spu_mulo_1", B_INTERNAL, _A3(SPU_BTI_UV4SI, SPU_BTI_UV8HI, SPU_BTI_UV8HI)) DEF_BUILTIN (SPU_MULO_1, CODE_FOR_vec_widen_umult_odd_v8hi, "spu_mulo_1", B_INTERNAL, _A3(SPU_BTI_UV4SI, SPU_BTI_UV8HI, SPU_BTI_UV8HI))
DEF_BUILTIN (SPU_MULO_2, CODE_FOR_spu_mpy, "spu_mulo_2", B_INTERNAL, _A3(SPU_BTI_V4SI, SPU_BTI_V8HI, SPU_BTI_INTHI)) DEF_BUILTIN (SPU_MULO_2, CODE_FOR_vec_widen_smult_odd_v8hi, "spu_mulo_2", B_INTERNAL, _A3(SPU_BTI_V4SI, SPU_BTI_V8HI, SPU_BTI_INTHI))
DEF_BUILTIN (SPU_MULO_3, CODE_FOR_spu_mpyu, "spu_mulo_3", B_INTERNAL, _A3(SPU_BTI_UV4SI, SPU_BTI_UV8HI, SPU_BTI_UINTHI)) DEF_BUILTIN (SPU_MULO_3, CODE_FOR_vec_widen_umult_odd_v8hi, "spu_mulo_3", B_INTERNAL, _A3(SPU_BTI_UV4SI, SPU_BTI_UV8HI, SPU_BTI_UINTHI))
DEF_BUILTIN (SPU_NMSUB, CODE_FOR_nothing, "spu_nmsub", B_OVERLOAD, _A1(SPU_BTI_VOID)) DEF_BUILTIN (SPU_NMSUB, CODE_FOR_nothing, "spu_nmsub", B_OVERLOAD, _A1(SPU_BTI_VOID))
DEF_BUILTIN (SPU_NMSUB_0, CODE_FOR_fnmav4sf4, "spu_nmsub_0", B_INTERNAL, _A4(SPU_BTI_V4SF, SPU_BTI_V4SF, SPU_BTI_V4SF, SPU_BTI_V4SF)) DEF_BUILTIN (SPU_NMSUB_0, CODE_FOR_fnmav4sf4, "spu_nmsub_0", B_INTERNAL, _A4(SPU_BTI_V4SF, SPU_BTI_V4SF, SPU_BTI_V4SF, SPU_BTI_V4SF))
DEF_BUILTIN (SPU_NMSUB_1, CODE_FOR_nfmsv2df4, "spu_nmsub_1", B_INTERNAL, _A4(SPU_BTI_V2DF, SPU_BTI_V2DF, SPU_BTI_V2DF, SPU_BTI_V2DF)) DEF_BUILTIN (SPU_NMSUB_1, CODE_FOR_nfmsv2df4, "spu_nmsub_1", B_INTERNAL, _A4(SPU_BTI_V2DF, SPU_BTI_V2DF, SPU_BTI_V2DF, SPU_BTI_V2DF))
......
...@@ -197,41 +197,6 @@ ...@@ -197,41 +197,6 @@
"" ""
"") "")
;; integer multiply
(define_insn "spu_mpy"
[(set (match_operand:V4SI 0 "spu_reg_operand" "=r,r")
(mult:V4SI
(sign_extend:V4SI
(vec_select:V4HI
(match_operand:V8HI 1 "spu_reg_operand" "r,r")
(parallel [(const_int 1)(const_int 3)(const_int 5)(const_int 7)])))
(sign_extend:V4SI
(vec_select:V4HI
(match_operand:V8HI 2 "spu_arith_operand" "r,B")
(parallel [(const_int 1)(const_int 3)(const_int 5)(const_int 7)])))))]
""
"@
mpy\t%0,%1,%2
mpyi\t%0,%1,%2"
[(set_attr "type" "fp7")])
(define_insn "spu_mpyu"
[(set (match_operand:V4SI 0 "spu_reg_operand" "=r,r")
(mult:V4SI
(zero_extend:V4SI
(vec_select:V4HI
(match_operand:V8HI 1 "spu_reg_operand" "r,r")
(parallel [(const_int 1)(const_int 3)(const_int 5)(const_int 7)])))
(zero_extend:V4SI
(vec_select:V4HI
(match_operand:V8HI 2 "spu_arith_operand" "r,B")
(parallel [(const_int 1)(const_int 3)(const_int 5)(const_int 7)])))))]
""
"@
mpyu\t%0,%1,%2
mpyui\t%0,%1,%2"
[(set_attr "type" "fp7")])
(define_insn "spu_mpya" (define_insn "spu_mpya"
[(set (match_operand:V4SI 0 "spu_reg_operand" "=r") [(set (match_operand:V4SI 0 "spu_reg_operand" "=r")
(plus:V4SI (plus:V4SI
...@@ -283,36 +248,6 @@ ...@@ -283,36 +248,6 @@
"mpys\t%0,%1,%2" "mpys\t%0,%1,%2"
[(set_attr "type" "fp7")]) [(set_attr "type" "fp7")])
(define_insn "spu_mpyhhu"
[(set (match_operand:V4SI 0 "spu_reg_operand" "=r")
(mult:V4SI
(zero_extend:V4SI
(vec_select:V4HI
(match_operand:V8HI 1 "spu_reg_operand" "r")
(parallel [(const_int 0)(const_int 2)(const_int 4)(const_int 6)])))
(zero_extend:V4SI
(vec_select:V4HI
(match_operand:V8HI 2 "spu_reg_operand" "r")
(parallel [(const_int 0)(const_int 2)(const_int 4)(const_int 6)])))))]
""
"mpyhhu\t%0,%1,%2"
[(set_attr "type" "fp7")])
(define_insn "spu_mpyhh"
[(set (match_operand:V4SI 0 "spu_reg_operand" "=r")
(mult:V4SI
(sign_extend:V4SI
(vec_select:V4HI
(match_operand:V8HI 1 "spu_reg_operand" "r")
(parallel [(const_int 0)(const_int 2)(const_int 4)(const_int 6)])))
(sign_extend:V4SI
(vec_select:V4HI
(match_operand:V8HI 2 "spu_reg_operand" "r")
(parallel [(const_int 0)(const_int 2)(const_int 4)(const_int 6)])))))]
""
"mpyhh\t%0,%1,%2"
[(set_attr "type" "fp7")])
(define_insn "spu_mpyhhau" (define_insn "spu_mpyhhau"
[(set (match_operand:V4SI 0 "spu_reg_operand" "=r") [(set (match_operand:V4SI 0 "spu_reg_operand" "=r")
(plus:V4SI (plus:V4SI
......
...@@ -1387,8 +1387,8 @@ ...@@ -1387,8 +1387,8 @@
rtx mask = gen_reg_rtx (V4SImode); rtx mask = gen_reg_rtx (V4SImode);
emit_move_insn (mask, spu_const (V4SImode, 0x0000ffff)); emit_move_insn (mask, spu_const (V4SImode, 0x0000ffff));
emit_insn (gen_spu_mpyhh (high, operands[1], operands[2])); emit_insn (gen_vec_widen_smult_even_v8hi (high, operands[1], operands[2]));
emit_insn (gen_spu_mpy (low, operands[1], operands[2])); emit_insn (gen_vec_widen_smult_odd_v8hi (low, operands[1], operands[2]));
emit_insn (gen_vashlv4si3 (shift, high, spu_const(V4SImode, 16))); emit_insn (gen_vashlv4si3 (shift, high, spu_const(V4SImode, 16)));
emit_insn (gen_selb (result, shift, low, mask)); emit_insn (gen_selb (result, shift, low, mask));
DONE; DONE;
...@@ -1482,7 +1482,7 @@ ...@@ -1482,7 +1482,7 @@
rtx op2 = simplify_gen_subreg (V8HImode, operands[2], V4SImode, 0); rtx op2 = simplify_gen_subreg (V8HImode, operands[2], V4SImode, 0);
emit_insn (gen_spu_mpyh(a, op1, op2)); emit_insn (gen_spu_mpyh(a, op1, op2));
emit_insn (gen_spu_mpyh(b, op2, op1)); emit_insn (gen_spu_mpyh(b, op2, op1));
emit_insn (gen_spu_mpyu(c, op1, op2)); emit_insn (gen_vec_widen_umult_odd_v8hi (c, op1, op2));
emit_insn (gen_addv4si3(d, a, b)); emit_insn (gen_addv4si3(d, a, b));
emit_insn (gen_addv4si3(operands[0], d, c)); emit_insn (gen_addv4si3(operands[0], d, c));
DONE; DONE;
...@@ -4612,6 +4612,70 @@ selb\t%0,%4,%0,%3" ...@@ -4612,6 +4612,70 @@ selb\t%0,%4,%0,%3"
DONE; DONE;
}") }")
(define_insn "vec_widen_smult_odd_v8hi"
[(set (match_operand:V4SI 0 "spu_reg_operand" "=r,r")
(mult:V4SI
(sign_extend:V4SI
(vec_select:V4HI
(match_operand:V8HI 1 "spu_reg_operand" "r,r")
(parallel [(const_int 1)(const_int 3)(const_int 5)(const_int 7)])))
(sign_extend:V4SI
(vec_select:V4HI
(match_operand:V8HI 2 "spu_arith_operand" "r,B")
(parallel [(const_int 1)(const_int 3)(const_int 5)(const_int 7)])))))]
""
"@
mpy\t%0,%1,%2
mpyi\t%0,%1,%2"
[(set_attr "type" "fp7")])
(define_insn "vec_widen_umult_odd_v8hi"
[(set (match_operand:V4SI 0 "spu_reg_operand" "=r,r")
(mult:V4SI
(zero_extend:V4SI
(vec_select:V4HI
(match_operand:V8HI 1 "spu_reg_operand" "r,r")
(parallel [(const_int 1)(const_int 3)(const_int 5)(const_int 7)])))
(zero_extend:V4SI
(vec_select:V4HI
(match_operand:V8HI 2 "spu_arith_operand" "r,B")
(parallel [(const_int 1)(const_int 3)(const_int 5)(const_int 7)])))))]
""
"@
mpyu\t%0,%1,%2
mpyui\t%0,%1,%2"
[(set_attr "type" "fp7")])
(define_insn "vec_widen_smult_even_v8hi"
[(set (match_operand:V4SI 0 "spu_reg_operand" "=r")
(mult:V4SI
(sign_extend:V4SI
(vec_select:V4HI
(match_operand:V8HI 1 "spu_reg_operand" "r")
(parallel [(const_int 0)(const_int 2)(const_int 4)(const_int 6)])))
(sign_extend:V4SI
(vec_select:V4HI
(match_operand:V8HI 2 "spu_reg_operand" "r")
(parallel [(const_int 0)(const_int 2)(const_int 4)(const_int 6)])))))]
""
"mpyhh\t%0,%1,%2"
[(set_attr "type" "fp7")])
(define_insn "vec_widen_umult_even_v8hi"
[(set (match_operand:V4SI 0 "spu_reg_operand" "=r")
(mult:V4SI
(zero_extend:V4SI
(vec_select:V4HI
(match_operand:V8HI 1 "spu_reg_operand" "r")
(parallel [(const_int 0)(const_int 2)(const_int 4)(const_int 6)])))
(zero_extend:V4SI
(vec_select:V4HI
(match_operand:V8HI 2 "spu_reg_operand" "r")
(parallel [(const_int 0)(const_int 2)(const_int 4)(const_int 6)])))))]
""
"mpyhhu\t%0,%1,%2"
[(set_attr "type" "fp7")])
(define_expand "vec_widen_umult_hi_v8hi" (define_expand "vec_widen_umult_hi_v8hi"
[(set (match_operand:V4SI 0 "register_operand" "=r") [(set (match_operand:V4SI 0 "register_operand" "=r")
(mult:V4SI (mult:V4SI
...@@ -4634,8 +4698,8 @@ selb\t%0,%4,%0,%3" ...@@ -4634,8 +4698,8 @@ selb\t%0,%4,%0,%3"
0x04, 0x05, 0x06, 0x07, 0x14, 0x15, 0x16, 0x17}; 0x04, 0x05, 0x06, 0x07, 0x14, 0x15, 0x16, 0x17};
emit_move_insn (mask, array_to_constant (TImode, arr)); emit_move_insn (mask, array_to_constant (TImode, arr));
emit_insn (gen_spu_mpyhhu (ve, operands[1], operands[2])); emit_insn (gen_vec_widen_umult_even_v8hi (ve, operands[1], operands[2]));
emit_insn (gen_spu_mpyu (vo, operands[1], operands[2])); emit_insn (gen_vec_widen_umult_odd_v8hi (vo, operands[1], operands[2]));
emit_insn (gen_shufb (operands[0], ve, vo, mask)); emit_insn (gen_shufb (operands[0], ve, vo, mask));
DONE; DONE;
}") }")
...@@ -4662,8 +4726,8 @@ selb\t%0,%4,%0,%3" ...@@ -4662,8 +4726,8 @@ selb\t%0,%4,%0,%3"
0x0C, 0x0D, 0x0E, 0x0F, 0x1C, 0x1D, 0x1E, 0x1F}; 0x0C, 0x0D, 0x0E, 0x0F, 0x1C, 0x1D, 0x1E, 0x1F};
emit_move_insn (mask, array_to_constant (TImode, arr)); emit_move_insn (mask, array_to_constant (TImode, arr));
emit_insn (gen_spu_mpyhhu (ve, operands[1], operands[2])); emit_insn (gen_vec_widen_umult_even_v8hi (ve, operands[1], operands[2]));
emit_insn (gen_spu_mpyu (vo, operands[1], operands[2])); emit_insn (gen_vec_widen_umult_odd_v8hi (vo, operands[1], operands[2]));
emit_insn (gen_shufb (operands[0], ve, vo, mask)); emit_insn (gen_shufb (operands[0], ve, vo, mask));
DONE; DONE;
}") }")
...@@ -4690,8 +4754,8 @@ selb\t%0,%4,%0,%3" ...@@ -4690,8 +4754,8 @@ selb\t%0,%4,%0,%3"
0x04, 0x05, 0x06, 0x07, 0x14, 0x15, 0x16, 0x17}; 0x04, 0x05, 0x06, 0x07, 0x14, 0x15, 0x16, 0x17};
emit_move_insn (mask, array_to_constant (TImode, arr)); emit_move_insn (mask, array_to_constant (TImode, arr));
emit_insn (gen_spu_mpyhh (ve, operands[1], operands[2])); emit_insn (gen_vec_widen_smult_even_v8hi (ve, operands[1], operands[2]));
emit_insn (gen_spu_mpy (vo, operands[1], operands[2])); emit_insn (gen_vec_widen_smult_odd_v8hi (vo, operands[1], operands[2]));
emit_insn (gen_shufb (operands[0], ve, vo, mask)); emit_insn (gen_shufb (operands[0], ve, vo, mask));
DONE; DONE;
}") }")
...@@ -4718,8 +4782,8 @@ selb\t%0,%4,%0,%3" ...@@ -4718,8 +4782,8 @@ selb\t%0,%4,%0,%3"
0x0C, 0x0D, 0x0E, 0x0F, 0x1C, 0x1D, 0x1E, 0x1F}; 0x0C, 0x0D, 0x0E, 0x0F, 0x1C, 0x1D, 0x1E, 0x1F};
emit_move_insn (mask, array_to_constant (TImode, arr)); emit_move_insn (mask, array_to_constant (TImode, arr));
emit_insn (gen_spu_mpyhh (ve, operands[1], operands[2])); emit_insn (gen_vec_widen_smult_even_v8hi (ve, operands[1], operands[2]));
emit_insn (gen_spu_mpy (vo, operands[1], operands[2])); emit_insn (gen_vec_widen_smult_odd_v8hi (vo, operands[1], operands[2]));
emit_insn (gen_shufb (operands[0], ve, vo, mask)); emit_insn (gen_shufb (operands[0], ve, vo, mask));
DONE; DONE;
}") }")
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment