Skip to content
Projects
Groups
Snippets
Help
This project
Loading...
Sign in / Register
Toggle navigation
R
riscv-gcc-1
Overview
Overview
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
lvzhengyang
riscv-gcc-1
Commits
5bc7cd8e
Commit
5bc7cd8e
authored
Jan 09, 1997
by
Stan Cox
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Use SImode instead of HImode if aligned.
From-SVN: r13479
parent
72acf258
Hide whitespace changes
Inline
Side-by-side
Showing
3 changed files
with
485 additions
and
84 deletions
+485
-84
gcc/config/i386/i386.c
+80
-1
gcc/config/i386/i386.h
+5
-1
gcc/config/i386/i386.md
+400
-82
No files found.
gcc/config/i386/i386.c
View file @
5bc7cd8e
...
@@ -388,6 +388,85 @@ optimization_options (level)
...
@@ -388,6 +388,85 @@ optimization_options (level)
#endif
#endif
}
}
/* Sign-extend a 16-bit constant */
struct
rtx_def
*
i386_sext16_if_const
(
op
)
struct
rtx_def
*
op
;
{
if
(
GET_CODE
(
op
)
==
CONST_INT
)
{
HOST_WIDE_INT
val
=
INTVAL
(
op
);
HOST_WIDE_INT
sext_val
;
if
(
val
&
0x8000
)
sext_val
=
val
|
~
0xffff
;
else
sext_val
=
val
&
0xffff
;
if
(
sext_val
!=
val
)
op
=
GEN_INT
(
sext_val
);
}
return
op
;
}
/* Return nonzero if the rtx is aligned */
static
int
i386_aligned_reg_p
(
regno
)
int
regno
;
{
return
(
regno
==
STACK_POINTER_REGNUM
||
(
!
flag_omit_frame_pointer
&&
regno
==
FRAME_POINTER_REGNUM
));
}
int
i386_aligned_p
(
op
)
rtx
op
;
{
/* registers and immediate operands are always "aligned" */
if
(
GET_CODE
(
op
)
!=
MEM
)
return
1
;
/* Don't even try to do any aligned optimizations with volatiles */
if
(
MEM_VOLATILE_P
(
op
))
return
0
;
/* Get address of memory operand */
op
=
XEXP
(
op
,
0
);
switch
(
GET_CODE
(
op
))
{
case
CONST_INT
:
if
(
INTVAL
(
op
)
&
3
)
break
;
return
1
;
/* match "reg + offset" */
case
PLUS
:
if
(
GET_CODE
(
XEXP
(
op
,
1
))
!=
CONST_INT
)
break
;
if
(
INTVAL
(
XEXP
(
op
,
1
))
&
3
)
break
;
op
=
XEXP
(
op
,
0
);
if
(
GET_CODE
(
op
)
!=
REG
)
break
;
/* fall through */
case
REG
:
return
i386_aligned_reg_p
(
REGNO
(
op
));
}
return
0
;
}
/* Return nonzero if INSN looks like it won't compute useful cc bits
as a side effect. This information is only a hint. */
int
i386_cc_probably_useless_p
(
insn
)
rtx
insn
;
{
return
!
next_cc0_user
(
insn
);
}
/* Return nonzero if IDENTIFIER with arguments ARGS is a valid machine specific
/* Return nonzero if IDENTIFIER with arguments ARGS is a valid machine specific
attribute for DECL. The attributes in ATTRIBUTES have previously been
attribute for DECL. The attributes in ATTRIBUTES have previously been
assigned to DECL. */
assigned to DECL. */
...
@@ -854,7 +933,7 @@ asm_add (n, x)
...
@@ -854,7 +933,7 @@ asm_add (n, x)
output_asm_insn
(
AS1
(
dec
%
L0
,
%
0
),
xops
);
output_asm_insn
(
AS1
(
dec
%
L0
,
%
0
),
xops
);
else
if
(
n
==
1
)
else
if
(
n
==
1
)
output_asm_insn
(
AS1
(
inc
%
L0
,
%
0
),
xops
);
output_asm_insn
(
AS1
(
inc
%
L0
,
%
0
),
xops
);
else
if
(
n
<
0
)
else
if
(
n
<
0
||
n
==
128
)
{
{
xops
[
1
]
=
GEN_INT
(
-
n
);
xops
[
1
]
=
GEN_INT
(
-
n
);
output_asm_insn
(
AS2
(
sub
%
L0
,
%
1
,
%
0
),
xops
);
output_asm_insn
(
AS2
(
sub
%
L0
,
%
1
,
%
0
),
xops
);
...
...
gcc/config/i386/i386.h
View file @
5bc7cd8e
...
@@ -157,7 +157,8 @@ extern int target_flags;
...
@@ -157,7 +157,8 @@ extern int target_flags;
#define TARGET_PENTIUMPRO (ix86_cpu == PROCESSOR_PENTIUMPRO)
#define TARGET_PENTIUMPRO (ix86_cpu == PROCESSOR_PENTIUMPRO)
#define TARGET_USE_LEAVE (ix86_cpu == PROCESSOR_I386)
#define TARGET_USE_LEAVE (ix86_cpu == PROCESSOR_I386)
#define TARGET_PUSH_MEMORY (ix86_cpu == PROCESSOR_I386)
#define TARGET_PUSH_MEMORY (ix86_cpu == PROCESSOR_I386)
#define TARGET_ZERO_EXTEND_WITH_AND (ix86_cpu != PROCESSOR_I386)
#define TARGET_ZERO_EXTEND_WITH_AND (ix86_cpu != PROCESSOR_I386 \
&& ix86_cpu != PROCESSOR_PENTIUMPRO)
#define TARGET_DOUBLE_WITH_ADD (ix86_cpu != PROCESSOR_I386)
#define TARGET_DOUBLE_WITH_ADD (ix86_cpu != PROCESSOR_I386)
#define TARGET_USE_BIT_TEST (ix86_cpu == PROCESSOR_I386)
#define TARGET_USE_BIT_TEST (ix86_cpu == PROCESSOR_I386)
#define TARGET_UNROLL_STRLEN (ix86_cpu != PROCESSOR_I386)
#define TARGET_UNROLL_STRLEN (ix86_cpu != PROCESSOR_I386)
...
@@ -2604,6 +2605,9 @@ do { \
...
@@ -2604,6 +2605,9 @@ do { \
extern
void
override_options
();
extern
void
override_options
();
extern
void
order_regs_for_local_alloc
();
extern
void
order_regs_for_local_alloc
();
extern
char
*
output_strlen_unroll
();
extern
char
*
output_strlen_unroll
();
extern
struct
rtx_def
*
i386_sext16_if_const
();
extern
int
i386_aligned_p
();
extern
int
i386_cc_probably_useless_p
();
extern
int
i386_valid_decl_attribute_p
();
extern
int
i386_valid_decl_attribute_p
();
extern
int
i386_valid_type_attribute_p
();
extern
int
i386_valid_type_attribute_p
();
extern
int
i386_return_pops_args
();
extern
int
i386_return_pops_args
();
...
...
gcc/config/i386/i386.md
View file @
5bc7cd8e
...
@@ -770,6 +770,12 @@
...
@@ -770,6 +770,12 @@
}
}
}
}
/* use 32-bit test instruction if there are no sign issues */
if (GET_CODE (operands[1]) == CONST_INT
&& !(INTVAL (operands[1]) & ~0x7fff)
&& i386_aligned_p (operands[0]))
return AS2 (test%L0,%1,%k0);
if (CONSTANT_P (operands[1]) || GET_CODE (operands[0]) == MEM)
if (CONSTANT_P (operands[1]) || GET_CODE (operands[0]) == MEM)
return AS2 (test%W0,%1,%0);
return AS2 (test%W0,%1,%0);
...
@@ -931,10 +937,21 @@
...
@@ -931,10 +937,21 @@
if (REG_P (operands[0]))
if (REG_P (operands[0]))
{
{
if (REG_P (operands[1]))
if (i386_aligned_p (operands[1]))
return AS2 (mov%L0,%k1,%k0);
{
else if (CONSTANT_P (operands[1]))
operands[1] = i386_sext16_if_const (operands[1]);
return AS2 (mov%L0,%1,%k0);
return AS2 (mov%L0,%k1,%k0);
}
if (TARGET_PENTIUMPRO)
{
/* movzwl is faster than movw on the Pentium Pro,
* although not as fast as an aligned movl. */
#ifdef INTEL_SYNTAX
return AS2 (movzx,%1,%k0);
#else
return AS2 (movz%W0%L0,%1,%k0);
#endif
}
}
}
return AS2 (mov%W0,%1,%0);
return AS2 (mov%W0,%1,%0);
...
@@ -1040,7 +1057,7 @@
...
@@ -1040,7 +1057,7 @@
{
{
rtx link;
rtx link;
if (operands[1] == const0_rtx && REG_P (operands[0]))
if (operands[1] == const0_rtx && REG_P (operands[0]))
return AS2 (xor%
B0,%0,%
0);
return AS2 (xor%
L0,%k0,%k
0);
if (operands[1] == const1_rtx
if (operands[1] == const1_rtx
&& (link = find_reg_note (insn, REG_WAS_0, 0))
&& (link = find_reg_note (insn, REG_WAS_0, 0))
...
@@ -1747,7 +1764,10 @@
...
@@ -1747,7 +1764,10 @@
{
{
xops
[
0
]
= operands
[
0
]
;
xops
[
0
]
= operands
[
0
]
;
xops
[
1
]
= gen_rtx (CONST_INT, VOIDmode, 0xffff);
xops
[
1
]
= gen_rtx (CONST_INT, VOIDmode, 0xffff);
output_asm_insn (AS2 (mov%W0,%1,%w0),operands);
if (i386_aligned_p (operands
[
1
]
))
output_asm_insn (AS2 (mov%L0,%k1,%k0),operands);
else
output_asm_insn (AS2 (mov%W0,%1,%w0),operands);
output_asm_insn (AS2 (and%L0,%1,%k0), xops);
output_asm_insn (AS2 (and%L0,%1,%k0), xops);
RET;
RET;
}
}
...
@@ -2752,7 +2772,7 @@
...
@@ -2752,7 +2772,7 @@
}")
}")
(define_insn "addsidi3_2"
(define_insn "addsidi3_2"
[
(set (match_operand:DI 0 "nonimmediate_operand" "=&r,r,o,&r,!&r,r,o,o,!o")
[
(set (match_operand:DI 0 "nonimmediate_operand" "=&r,r,o,&r,!&r,
&
r,o,o,!o")
(plus:DI (zero_extend:DI (match_operand:SI 2 "general_operand" "o,ri,ri,o,o,ri,ri,i,r"))
(plus:DI (zero_extend:DI (match_operand:SI 2 "general_operand" "o,ri,ri,o,o,ri,ri,i,r"))
(match_operand:DI 1 "general_operand" "0,0,0,iF,ro,roiF,riF,o,o")))
(match_operand:DI 1 "general_operand" "0,0,0,iF,ro,roiF,riF,o,o")))
(clobber (match_scratch:SI 3 "=X,X,X,X,X,X,X,&r,&r"))]
(clobber (match_scratch:SI 3 "=X,X,X,X,X,X,X,&r,&r"))]
...
@@ -2952,6 +2972,18 @@
...
@@ -2952,6 +2972,18 @@
if (operands
[
2
]
== constm1_rtx)
if (operands
[
2
]
== constm1_rtx)
return AS1 (dec%L0,%0);
return AS1 (dec%L0,%0);
/
* subl $-128,%ebx is smaller than addl $128,%ebx. *
/
if (GET_CODE (operands
[
2
]
) == CONST_INT
&& INTVAL (operands
[
2
]
) == 128)
{
/
*
This doesn't compute the carry bit in the same way
*
as add%L0, but we use inc and dec above and they
*
don't set the carry bit at all. If inc/dec don't need
*
a CC_STATUS_INIT, this doesn't either...
*
/
operands
[
2
]
= GEN_INT (-128);
return AS2 (sub%L0,%2,%0);
}
return AS2 (add%L0,%2,%0);
return AS2 (add%L0,%2,%0);
}")
}")
...
@@ -3003,9 +3035,11 @@
...
@@ -3003,9 +3035,11 @@
"*
"*
{
{
/* ??? what about offsettable memory references? */
/* ??? what about offsettable memory references? */
if (QI_REG_P (operands[0])
if (!TARGET_PENTIUMPRO /* partial stalls are just too painful to risk. */
&& QI_REG_P (operands[0])
&& GET_CODE (operands[2]) == CONST_INT
&& GET_CODE (operands[2]) == CONST_INT
&& (INTVAL (operands[2]) & 0xff) == 0)
&& (INTVAL (operands[2]) & 0xff) == 0
&& i386_cc_probably_useless_p (insn))
{
{
int byteval = (INTVAL (operands[2]) >> 8) & 0xff;
int byteval = (INTVAL (operands[2]) >> 8) & 0xff;
CC_STATUS_INIT;
CC_STATUS_INIT;
...
@@ -3019,6 +3053,28 @@
...
@@ -3019,6 +3053,28 @@
return AS2 (add%B0,%2,%h0);
return AS2 (add%B0,%2,%h0);
}
}
/* Use a 32-bit operation when possible, to avoid the prefix penalty. */
if (REG_P (operands[0])
&& i386_aligned_p (operands[2])
&& i386_cc_probably_useless_p (insn))
{
CC_STATUS_INIT;
if (GET_CODE (operands[2]) == CONST_INT)
{
HOST_WIDE_INT intval = 0xffff & INTVAL (operands[2]);
if (intval == 1)
return AS1 (inc%L0,%k0);
if (intval == 0xffff)
return AS1 (dec%L0,%k0);
operands[2] = i386_sext16_if_const (operands[2]);
}
return AS2 (add%L0,%k2,%k0);
}
if (operands[2] == const1_rtx)
if (operands[2] == const1_rtx)
return AS1 (inc%W0,%0);
return AS1 (inc%W0,%0);
...
@@ -3246,7 +3302,18 @@
...
@@ -3246,7 +3302,18 @@
(minus:HI (match_operand:HI 1 "nonimmediate_operand" "0,0")
(minus:HI (match_operand:HI 1 "nonimmediate_operand" "0,0")
(match_operand:HI 2 "general_operand" "ri,rm")))]
(match_operand:HI 2 "general_operand" "ri,rm")))]
"ix86_binary_operator_ok (MINUS, HImode, operands)"
"ix86_binary_operator_ok (MINUS, HImode, operands)"
"* return AS2 (sub%W0,%2,%0);")
"*
{
if (REG_P (operands[0])
&& i386_aligned_p (operands[2])
&& i386_cc_probably_useless_p (insn))
{
CC_STATUS_INIT;
operands[2] = i386_sext16_if_const (operands[2]);
return AS2 (sub%L0,%k2,%k0);
}
return AS2 (sub%W0,%2,%0);
}")
(define_expand "subqi3"
(define_expand "subqi3"
[(set (match_operand:QI 0 "general_operand" "")
[(set (match_operand:QI 0 "general_operand" "")
...
@@ -3518,7 +3585,6 @@
...
@@ -3518,7 +3585,6 @@
;; The `
r' in
`rm' for operand 3 looks redundant, but it causes
;; The `
r' in
`rm' for operand 3 looks redundant, but it causes
;; optional reloads to be generated if op 3 is a pseudo in a stack slot.
;; optional reloads to be generated if op 3 is a pseudo in a stack slot.
;; ??? What if we only change one byte of an offsettable memory reference?
(define_insn "andsi3"
(define_insn "andsi3"
[(set (match_operand:SI 0 "nonimmediate_operand" "=rm,r")
[(set (match_operand:SI 0 "nonimmediate_operand" "=rm,r")
(and:SI (match_operand:SI 1 "nonimmediate_operand" "%0,0")
(and:SI (match_operand:SI 1 "nonimmediate_operand" "%0,0")
...
@@ -3526,6 +3592,7 @@
...
@@ -3526,6 +3592,7 @@
""
""
"*
"*
{
{
HOST_WIDE_INT intval;
if (!rtx_equal_p (operands[0], operands[1])
if (!rtx_equal_p (operands[0], operands[1])
&& rtx_equal_p (operands[0], operands[2]))
&& rtx_equal_p (operands[0], operands[2]))
{
{
...
@@ -3534,10 +3601,14 @@
...
@@ -3534,10 +3601,14 @@
operands[1] = operands[2];
operands[1] = operands[2];
operands[2] = tmp;
operands[2] = tmp;
}
}
if (GET_CODE (operands[2]) == CONST_INT
switch (GET_CODE (operands[2]))
&& ! (GET_CODE (operands[0]) == MEM && MEM_VOLATILE_P (operands[0])))
{
{
if (INTVAL (operands[2]) == 0xffff && REG_P (operands[0])
case CONST_INT:
if (GET_CODE (operands[0]) == MEM && MEM_VOLATILE_P (operands[0]))
break;
intval = INTVAL (operands[2]);
/* zero-extend 16->32? */
if (intval == 0xffff && REG_P (operands[0])
&& (! REG_P (operands[1])
&& (! REG_P (operands[1])
|| REGNO (operands[0]) != 0 || REGNO (operands[1]) != 0)
|| REGNO (operands[0]) != 0 || REGNO (operands[1]) != 0)
&& (!TARGET_ZERO_EXTEND_WITH_AND || ! rtx_equal_p (operands[0], operands[1])))
&& (!TARGET_ZERO_EXTEND_WITH_AND || ! rtx_equal_p (operands[0], operands[1])))
...
@@ -3552,7 +3623,8 @@
...
@@ -3552,7 +3623,8 @@
#endif
#endif
}
}
if (INTVAL (operands[2]) == 0xff && REG_P (operands[0])
/* zero extend 8->32? */
if (intval == 0xff && REG_P (operands[0])
&& !(REG_P (operands[1]) && NON_QI_REG_P (operands[1]))
&& !(REG_P (operands[1]) && NON_QI_REG_P (operands[1]))
&& (! REG_P (operands[1])
&& (! REG_P (operands[1])
|| REGNO (operands[0]) != 0 || REGNO (operands[1]) != 0)
|| REGNO (operands[0]) != 0 || REGNO (operands[1]) != 0)
...
@@ -3568,39 +3640,99 @@
...
@@ -3568,39 +3640,99 @@
#endif
#endif
}
}
if (QI_REG_P (operands[0]) && ~(INTVAL (operands[2]) | 0xff) == 0)
/* Check partial bytes.. non-QI-regs are not available */
{
if (REG_P (operands[0]) && ! QI_REG_P (operands[0]))
CC_STATUS_INIT
;
break
;
if (INTVAL (operands[2]) == 0xffffff00)
/* only low byte has zero bits? */
if (~(intval | 0xff) == 0)
{
intval &= 0xff;
if (REG_P (operands[0]))
{
{
operands[2] = const0_rtx;
if (intval == 0)
return AS2 (mov%B0,%2,%b0);
{
CC_STATUS_INIT;
return AS2 (xor%B0,%b0,%b0);
}
/* we're better off with the 32-bit version if reg != EAX */
/* the value is sign-extended in 8 bits */
if (REGNO (operands[0]) != 0 && (intval & 0x80))
break;
}
}
operands[2] = GEN_INT (INTVAL (operands[2]) & 0xff);
CC_STATUS_INIT;
operands[2] = GEN_INT (intval);
if (intval == 0)
return AS2 (mov%B0,%2,%b0);
return AS2 (and%B0,%2,%b0);
return AS2 (and%B0,%2,%b0);
}
}
if (QI_REG_P (operands[0]) && ~(INTVAL (operands[2]) | 0xff00) == 0)
/* only second byte has zero? */
if (~(intval | 0xff00) == 0)
{
{
CC_STATUS_INIT;
CC_STATUS_INIT;
if (INTVAL (operands[2]) == 0xffff00ff)
intval = (intval >> 8) & 0xff;
operands[2] = GEN_INT (intval);
if (intval == 0)
{
{
operands[2] = const0_rtx;
if (REG_P (operands[0]))
return AS2 (mov%B0,%2,%h0);
return AS2 (xor%B0,%h0,%h0);
operands[0] = adj_offsettable_operand (operands[0], 1);
return AS2 (mov%B0,%2,%b0);
}
}
operands[2] = GEN_INT ((INTVAL (operands[2]) >> 8) & 0xff);
if (REG_P (operands[0]))
return AS2 (and%B0,%2,%h0);
return AS2 (and%B0,%2,%h0);
operands[0] = adj_offsettable_operand (operands[0], 1);
return AS2 (and%B0,%2,%b0);
}
}
if (GET_CODE (operands[0]) == MEM && INTVAL (operands[2]) == 0xffff0000)
if (REG_P (operands[0]))
break;
/* third byte has zero bits? */
if (~(intval | 0xff0000) == 0)
{
intval = (intval >> 16) & 0xff;
operands[0] = adj_offsettable_operand (operands[0], 2);
byte_and_operation:
CC_STATUS_INIT;
operands[2] = GEN_INT (intval);
if (intval == 0)
return AS2 (mov%B0,%2,%b0);
return AS2 (and%B0,%2,%b0);
}
/* fourth byte has zero bits? */
if (~(intval | 0xff000000) == 0)
{
intval = (intval >> 24) & 0xff;
operands[0] = adj_offsettable_operand (operands[0], 3);
goto byte_and_operation;
}
/* Low word is zero? */
if (intval == 0xffff0000)
{
{
word_zero_and_operation:
CC_STATUS_INIT;
operands[2] = const0_rtx;
operands[2] = const0_rtx;
return AS2 (mov%W0,%2,%w0);
return AS2 (mov%W0,%2,%w0);
}
}
/* High word is zero? */
if (intval == 0x0000ffff)
{
operands[0] = adj_offsettable_operand (operands[0], 2);
goto word_zero_and_operation;
}
}
}
return AS2 (and%L0,%2,%0);
return AS2 (and%L0,%2,%0);
...
@@ -3647,6 +3779,38 @@
...
@@ -3647,6 +3779,38 @@
operands[2] = GEN_INT ((INTVAL (operands[2]) >> 8) & 0xff);
operands[2] = GEN_INT ((INTVAL (operands[2]) >> 8) & 0xff);
return AS2 (and%B0,%2,%h0);
return AS2 (and%B0,%2,%h0);
}
}
/* use 32-bit ops on registers when there are no sign issues.. */
if (REG_P (operands[0]))
{
if (!(INTVAL (operands[2]) & ~0x7fff))
return AS2 (and%L0,%2,%k0);
}
}
if (REG_P (operands[0])
&& i386_aligned_p (operands[2]))
{
CC_STATUS_INIT;
/* If op[2] is constant, we should zero-extend it and */
/* make a note that op[0] has been zero-extended, so */
/* that we could use 32-bit ops on it forthwith, but */
/* there is no such reg-note available. Instead we do */
/* a sign extension as that can result in shorter asm */
operands[2] = i386_sext16_if_const (operands[2]);
return AS2 (and%L0,%k2,%k0);
}
/* Use a 32-bit word with the upper bits set, invalidate CC */
if (GET_CODE (operands[2]) == CONST_INT
&& i386_aligned_p (operands[0]))
{
HOST_WIDE_INT val = INTVAL (operands[2]);
CC_STATUS_INIT;
val |= ~0xffff;
if (val != INTVAL (operands[2]))
operands[2] = GEN_INT (val);
return AS2 (and%L0,%k2,%k0);
}
}
return AS2 (and%W0,%2,%0);
return AS2 (and%W0,%2,%0);
...
@@ -3685,7 +3849,10 @@
...
@@ -3685,7 +3849,10 @@
;;- Bit set (inclusive or) instructions
;;- Bit set (inclusive or) instructions
;; ??? What if we only change one byte of an offsettable memory reference?
;; This optimizes known byte-wide operations to memory, and in some cases
;; to QI registers.. Note that we don't want to use the QI registers too
;; aggressively, because often the 32-bit register instruction is the same
;; size, and likely to be faster on PentiumPro.
(define_insn "iorsi3"
(define_insn "iorsi3"
[(set (match_operand:SI 0 "nonimmediate_operand" "=rm,r")
[(set (match_operand:SI 0 "nonimmediate_operand" "=rm,r")
(ior:SI (match_operand:SI 1 "nonimmediate_operand" "%0,0")
(ior:SI (match_operand:SI 1 "nonimmediate_operand" "%0,0")
...
@@ -3693,29 +3860,76 @@
...
@@ -3693,29 +3860,76 @@
""
""
"*
"*
{
{
if (GET_CODE (operands[2]) == CONST_INT
HOST_WIDE_INT intval;
&& ! (GET_CODE (operands[0]) == MEM && MEM_VOLATILE_P (operands[0])
))
switch (GET_CODE (operands[2]
))
{
{
if ((! REG_P (operands[0]) || QI_REG_P (operands[0]))
case CONST_INT:
&& (INTVAL (operands[2]) & ~0xff) == 0)
{
CC_STATUS_INIT;
if (INTVAL (operands[2]) == 0xff)
if (REG_P (operands[0]) && ! QI_REG_P (operands[0]))
return AS2 (mov%B0,%2,%b0);
break;
/* don't try to optimize volatile accesses */
if (GET_CODE (operands[0]) == MEM && MEM_VOLATILE_P (operands[0]))
break;
intval = INTVAL (operands[2]);
if ((intval & ~0xff) == 0)
{
if (REG_P (operands[0]))
{
/* Do low byte access only for %eax or when high bit is set */
if (REGNO (operands[0]) != 0 && !(intval & 0x80))
break;
}
return AS2 (or%B0,%2,%b0);
byte_or_operation:
CC_STATUS_INIT;
if (intval != INTVAL (operands[2]))
operands[2] = GEN_INT (intval);
if (intval == 0xff)
return AS2 (mov%B0,%2,%b0);
return AS2 (or%B0,%2,%b0);
}
}
if (QI_REG_P (operands[0]) && (INTVAL (operands[2]) & ~0xff00) == 0)
/* second byte? */
if ((intval & ~0xff00) == 0)
{
{
CC_STATUS_INIT;
intval >>= 8;
operands[2] = GEN_INT (INTVAL (operands[2]) >> 8);
if (INTVAL (operands[2]) == 0xff)
if (REG_P (operands[0]))
return AS2 (mov%B0,%2,%h0);
{
CC_STATUS_INIT;
operands[2] = GEN_INT (intval);
if (intval == 0xff)
return AS2 (mov%B0,%2,%h0);
return AS2 (or%B0,%2,%h0);
return AS2 (or%B0,%2,%h0);
}
operands[0] = adj_offsettable_operand (operands[0], 1);
goto byte_or_operation;
}
if (REG_P (operands[0]))
break;
/* third byte? */
if ((intval & ~0xff0000) == 0)
{
intval >>= 16;
operands[0] = adj_offsettable_operand (operands[0], 2);
goto byte_or_operation;
}
/* fourth byte? */
if ((intval & ~0xff000000) == 0)
{
intval = (intval >> 24) & 0xff;
operands[0] = adj_offsettable_operand (operands[0], 3);
goto byte_or_operation;
}
}
}
}
...
@@ -3729,38 +3943,77 @@
...
@@ -3729,38 +3943,77 @@
""
""
"*
"*
{
{
if (GET_CODE (operands[2]) == CONST_INT
HOST_WIDE_INT intval;
&& ! (GET_CODE (operands[0]) == MEM && MEM_VOLATILE_P (operands[0])
))
switch (GET_CODE (operands[2]
))
{
{
/* Can we ignore the upper byte? */
case CONST_INT:
if ((! REG_P (operands[0]) || QI_REG_P (operands[0]))
&& (INTVAL (operands[2]) & 0xff00) == 0)
{
CC_STATUS_INIT;
if (INTVAL (operands[2]) & 0xffff0000)
operands[2] = GEN_INT (INTVAL (operands[2]) & 0xffff);
if (INTVAL (operands[2]) == 0xff)
if (REG_P (operands[0]) && ! QI_REG_P (operands[0]))
return AS2 (mov%B0,%2,%b0);
break;
/* don't try to optimize volatile accesses */
if (GET_CODE (operands[0]) == MEM && MEM_VOLATILE_P (operands[0]))
break;
intval = 0xffff & INTVAL (operands[2]);
if ((intval & 0xff00) == 0)
{
if (REG_P (operands[0]))
{
/* Do low byte access only for %eax or when high bit is set */
if (REGNO (operands[0]) != 0 && !(intval & 0x80))
break;
}
byte_or_operation:
CC_STATUS_INIT;
if (intval == 0xff)
return AS2 (mov%B0,%2,%b0);
return AS2 (or%B0,%2,%b0);
return AS2 (or%B0,%2,%b0);
}
}
/* Can we ignore the lower byte? */
/* high byte? */
/* ??? what about offsettable memory references? */
if ((intval & 0xff) == 0)
if (QI_REG_P (operands[0])
&& (INTVAL (operands[2]) & 0xff) == 0)
{
{
CC_STATUS_INIT
;
intval >>= 8
;
operands[2] = GEN_INT (
(INTVAL (operands[2]) >> 8) & 0xff
);
operands[2] = GEN_INT (
intval
);
if (INTVAL (operands[2]) == 0xff)
if (REG_P (operands[0]))
return AS2 (mov%B0,%2,%h0);
{
CC_STATUS_INIT;
if (intval == 0xff)
return AS2 (mov%B0,%2,%h0);
return AS2 (or%B0,%2,%h0);
}
operands[0] = adj_offsettable_operand (operands[0], 1);
return AS2 (or%B0,%2,%h0)
;
goto byte_or_operation
;
}
}
}
}
if (REG_P (operands[0])
&& i386_aligned_p (operands[2]))
{
CC_STATUS_INIT;
operands[2] = i386_sext16_if_const (operands[2]);
return AS2 (or%L0,%k2,%k0);
}
if (GET_CODE (operands[2]) == CONST_INT
&& i386_aligned_p (operands[0]))
{
CC_STATUS_INIT;
intval = 0xffff & INTVAL (operands[2]);
if (intval != INTVAL (operands[2]))
operands[2] = GEN_INT (intval);
return AS2 (or%L0,%2,%k0);
}
return AS2 (or%W0,%2,%0);
return AS2 (or%W0,%2,%0);
}")
}")
...
@@ -3773,7 +4026,6 @@
...
@@ -3773,7 +4026,6 @@
;;- xor instructions
;;- xor instructions
;; ??? What if we only change one byte of an offsettable memory reference?
(define_insn "xorsi3"
(define_insn "xorsi3"
[(set (match_operand:SI 0 "nonimmediate_operand" "=rm,r")
[(set (match_operand:SI 0 "nonimmediate_operand" "=rm,r")
(xor:SI (match_operand:SI 1 "nonimmediate_operand" "%0,0")
(xor:SI (match_operand:SI 1 "nonimmediate_operand" "%0,0")
...
@@ -3781,29 +4033,76 @@
...
@@ -3781,29 +4033,76 @@
""
""
"*
"*
{
{
if (GET_CODE (operands[2]) == CONST_INT
HOST_WIDE_INT intval;
&& ! (GET_CODE (operands[0]) == MEM && MEM_VOLATILE_P (operands[0])
))
switch (GET_CODE (operands[2]
))
{
{
if ((! REG_P (operands[0]) || QI_REG_P (operands[0]))
case CONST_INT:
&& (INTVAL (operands[2]) & ~0xff) == 0)
{
CC_STATUS_INIT;
if (INTVAL (operands[2]) == 0xff
)
if (REG_P (operands[0]) && ! QI_REG_P (operands[0])
)
return AS1 (not%B0,%b0)
;
break
;
return AS2 (xor%B0,%2,%b0);
/* don't try to optimize volatile accesses */
if (GET_CODE (operands[0]) == MEM && MEM_VOLATILE_P (operands[0]))
break;
intval = INTVAL (operands[2]);
if ((intval & ~0xff) == 0)
{
if (REG_P (operands[0]))
{
/* Do low byte access only for %eax or when high bit is set */
if (REGNO (operands[0]) != 0 && !(intval & 0x80))
break;
}
byte_xor_operation:
CC_STATUS_INIT;
if (intval == 0xff)
return AS1 (not%B0,%b0);
if (intval != INTVAL (operands[2]))
operands[2] = GEN_INT (intval);
return AS2 (xor%B0,%2,%b0);
}
}
if (QI_REG_P (operands[0]) && (INTVAL (operands[2]) & ~0xff00) == 0)
/* second byte? */
if ((intval & ~0xff00) == 0)
{
{
CC_STATUS_INIT;
intval >>= 8;
operands[2] = GEN_INT (INTVAL (operands[2]) >> 8);
if (INTVAL (operands[2]) == 0xff)
if (REG_P (operands[0]))
return AS1 (not%B0,%h0);
{
CC_STATUS_INIT;
if (intval == 0xff)
return AS1 (not%B0,%h0);
return AS2 (xor%B0,%2,%h0);
operands[2] = GEN_INT (intval);
return AS2 (xor%B0,%2,%h0);
}
operands[0] = adj_offsettable_operand (operands[0], 1);
goto byte_xor_operation;
}
if (REG_P (operands[0]))
break;
/* third byte? */
if ((intval & ~0xff0000) == 0)
{
intval >>= 16;
operands[0] = adj_offsettable_operand (operands[0], 2);
goto byte_xor_operation;
}
/* fourth byte? */
if ((intval & ~0xff000000) == 0)
{
intval = (intval >> 24) & 0xff;
operands[0] = adj_offsettable_operand (operands[0], 3);
goto byte_xor_operation;
}
}
}
}
...
@@ -3849,6 +4148,25 @@
...
@@ -3849,6 +4148,25 @@
}
}
}
}
if (REG_P (operands[0])
&& i386_aligned_p (operands[2]))
{
CC_STATUS_INIT;
operands[2] = i386_sext16_if_const (operands[2]);
return AS2 (xor%L0,%k2,%k0);
}
if (GET_CODE (operands[2]) == CONST_INT
&& i386_aligned_p (operands[0]))
{
HOST_WIDE_INT intval;
CC_STATUS_INIT;
intval = 0xffff & INTVAL (operands[2]);
if (intval != INTVAL (operands[2]))
operands[2] = GEN_INT (intval);
return AS2 (xor%L0,%2,%k0);
}
return AS2 (xor%W0,%2,%0);
return AS2 (xor%W0,%2,%0);
}")
}")
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment