Skip to content
Projects
Groups
Snippets
Help
This project
Loading...
Sign in / Register
Toggle navigation
R
riscv-gcc-1
Overview
Overview
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
lvzhengyang
riscv-gcc-1
Commits
b1ec3c92
Commit
b1ec3c92
authored
Jul 06, 1992
by
Charles Hannum
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
entered into RCS
From-SVN: r1472
parent
5f4f0e22
Hide whitespace changes
Inline
Side-by-side
Showing
5 changed files
with
245 additions
and
230 deletions
+245
-230
gcc/explow.c
+37
-40
gcc/expmed.c
+122
-116
gcc/global.c
+40
-31
gcc/local-alloc.c
+7
-6
gcc/optabs.c
+39
-37
No files found.
gcc/explow.c
View file @
b1ec3c92
...
...
@@ -29,12 +29,14 @@ the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
#include "insn-flags.h"
#include "insn-codes.h"
/* Return an rtx for the sum of X and the integer C. */
/* Return an rtx for the sum of X and the integer C.
This fucntion should be used via the `plus_constant' macro. */
rtx
plus_constant
(
x
,
c
)
plus_constant
_wide
(
x
,
c
)
register
rtx
x
;
register
int
c
;
register
HOST_WIDE_INT
c
;
{
register
RTX_CODE
code
;
register
enum
machine_mode
mode
;
...
...
@@ -51,15 +53,15 @@ plus_constant (x, c)
switch
(
code
)
{
case
CONST_INT
:
return
gen_rtx
(
CONST_INT
,
VOIDmode
,
(
INTVAL
(
x
)
+
c
)
);
return
GEN_INT
(
INTVAL
(
x
)
+
c
);
case
CONST_DOUBLE
:
{
int
l1
=
CONST_DOUBLE_LOW
(
x
);
int
h1
=
CONST_DOUBLE_HIGH
(
x
);
int
l2
=
c
;
int
h2
=
c
<
0
?
~
0
:
0
;
int
lv
,
hv
;
HOST_WIDE_INT
l1
=
CONST_DOUBLE_LOW
(
x
);
HOST_WIDE_INT
h1
=
CONST_DOUBLE_HIGH
(
x
);
HOST_WIDE_INT
l2
=
c
;
HOST_WIDE_INT
h2
=
c
<
0
?
~
0
:
0
;
HOST_WIDE_INT
lv
,
hv
;
add_double
(
l1
,
h1
,
l2
,
h2
,
&
lv
,
&
hv
);
...
...
@@ -117,7 +119,7 @@ plus_constant (x, c)
}
if
(
c
!=
0
)
x
=
gen_rtx
(
PLUS
,
mode
,
x
,
gen_rtx
(
CONST_INT
,
VOIDmode
,
c
));
x
=
gen_rtx
(
PLUS
,
mode
,
x
,
GEN_INT
(
c
));
if
(
GET_CODE
(
x
)
==
SYMBOL_REF
||
GET_CODE
(
x
)
==
LABEL_REF
)
return
x
;
...
...
@@ -127,12 +129,14 @@ plus_constant (x, c)
return
x
;
}
/* This is the same as `plus_constant', except that it handles LO_SUM. */
/* This is the same as `plus_constant', except that it handles LO_SUM.
This function should be used via the `plus_constant_for_output' macro. */
rtx
plus_constant_for_output
(
x
,
c
)
plus_constant_for_output
_wide
(
x
,
c
)
register
rtx
x
;
register
int
c
;
register
HOST_WIDE_INT
c
;
{
register
RTX_CODE
code
=
GET_CODE
(
x
);
register
enum
machine_mode
mode
=
GET_MODE
(
x
);
...
...
@@ -239,7 +243,7 @@ expr_size (exp)
tree
exp
;
{
return
expand_expr
(
size_in_bytes
(
TREE_TYPE
(
exp
)),
0
,
TYPE_MODE
(
sizetype
),
0
);
NULL_RTX
,
TYPE_MODE
(
sizetype
),
0
);
}
/* Return a copy of X in which all memory references
...
...
@@ -377,15 +381,15 @@ memory_address (mode, x)
rtx
y
=
eliminate_constant_term
(
x
,
&
constant_term
);
if
(
constant_term
==
const0_rtx
||
!
memory_address_p
(
mode
,
y
))
return
force_operand
(
x
,
0
);
return
force_operand
(
x
,
NULL_RTX
);
y
=
gen_rtx
(
PLUS
,
GET_MODE
(
x
),
copy_to_reg
(
y
),
constant_term
);
if
(
!
memory_address_p
(
mode
,
y
))
return
force_operand
(
x
,
0
);
return
force_operand
(
x
,
NULL_RTX
);
return
y
;
}
if
(
GET_CODE
(
x
)
==
MULT
||
GET_CODE
(
x
)
==
MINUS
)
return
force_operand
(
x
,
0
);
return
force_operand
(
x
,
NULL_RTX
);
/* If we have a register that's an invalid address,
it must be a hard reg of the wrong class. Copy it to a pseudo. */
...
...
@@ -408,7 +412,7 @@ memory_address (mode, x)
if
(
general_operand
(
x
,
Pmode
))
return
force_reg
(
Pmode
,
x
);
else
return
force_operand
(
x
,
0
);
return
force_operand
(
x
,
NULL_RTX
);
}
return
x
;
}
...
...
@@ -550,7 +554,7 @@ force_reg (mode, x)
and that X can be substituted for it. */
if
(
CONSTANT_P
(
x
))
{
rtx
note
=
find_reg_note
(
insn
,
REG_EQUAL
,
0
);
rtx
note
=
find_reg_note
(
insn
,
REG_EQUAL
,
NULL_RTX
);
if
(
note
)
XEXP
(
note
,
0
)
=
x
;
...
...
@@ -662,16 +666,13 @@ round_push (size)
{
int
new
=
(
INTVAL
(
size
)
+
align
-
1
)
/
align
*
align
;
if
(
INTVAL
(
size
)
!=
new
)
size
=
gen_rtx
(
CONST_INT
,
VOIDmode
,
new
);
size
=
GEN_INT
(
new
);
}
else
{
size
=
expand_divmod
(
0
,
CEIL_DIV_EXPR
,
Pmode
,
size
,
gen_rtx
(
CONST_INT
,
VOIDmode
,
align
),
0
,
1
);
size
=
expand_mult
(
Pmode
,
size
,
gen_rtx
(
CONST_INT
,
VOIDmode
,
align
),
0
,
1
);
size
=
expand_divmod
(
0
,
CEIL_DIV_EXPR
,
Pmode
,
size
,
GEN_INT
(
align
),
NULL_RTX
,
1
);
size
=
expand_mult
(
Pmode
,
size
,
GEN_INT
(
align
),
NULL_RTX
,
1
);
}
#endif
/* STACK_BOUNDARY */
return
size
;
...
...
@@ -867,14 +868,12 @@ allocate_dynamic_stack_space (size, target, known_align)
if
(
known_align
%
BIGGEST_ALIGNMENT
!=
0
)
{
if
(
GET_CODE
(
size
)
==
CONST_INT
)
size
=
gen_rtx
(
CONST_INT
,
VOIDmode
,
(
INTVAL
(
size
)
+
(
BIGGEST_ALIGNMENT
/
BITS_PER_UNIT
-
1
)));
size
=
GEN_INT
(
INTVAL
(
size
)
+
(
BIGGEST_ALIGNMENT
/
BITS_PER_UNIT
-
1
));
else
size
=
expand_binop
(
Pmode
,
add_optab
,
size
,
gen_rtx
(
CONST_INT
,
VOIDmode
,
BIGGEST_ALIGNMENT
/
BITS_PER_UNIT
-
1
),
0
,
1
,
OPTAB_LIB_WIDEN
);
GEN_INT
(
BIGGEST_ALIGNMENT
/
BITS_PER_UNIT
-
1
),
NULL_RTX
,
1
,
OPTAB_LIB_WIDEN
);
}
#endif
...
...
@@ -887,9 +886,9 @@ allocate_dynamic_stack_space (size, target, known_align)
{
rtx
dynamic_offset
=
expand_binop
(
Pmode
,
sub_optab
,
virtual_stack_dynamic_rtx
,
stack_pointer_rtx
,
0
,
1
,
OPTAB_LIB_WIDEN
);
stack_pointer_rtx
,
NULL_RTX
,
1
,
OPTAB_LIB_WIDEN
);
size
=
expand_binop
(
Pmode
,
add_optab
,
size
,
dynamic_offset
,
0
,
1
,
OPTAB_LIB_WIDEN
);
NULL_RTX
,
1
,
OPTAB_LIB_WIDEN
);
}
#endif
/* SETJMP_VIA_SAVE_AREA */
...
...
@@ -953,14 +952,12 @@ allocate_dynamic_stack_space (size, target, known_align)
if
(
known_align
%
BIGGEST_ALIGNMENT
!=
0
)
{
target
=
expand_divmod
(
0
,
CEIL_DIV_EXPR
,
Pmode
,
target
,
gen_rtx
(
CONST_INT
,
VOIDmode
,
BIGGEST_ALIGNMENT
/
BITS_PER_UNIT
),
0
,
1
);
GEN_INT
(
BIGGEST_ALIGNMENT
/
BITS_PER_UNIT
),
NULL_RTX
,
1
);
target
=
expand_mult
(
Pmode
,
target
,
gen_rtx
(
CONST_INT
,
VOIDmode
,
BIGGEST_ALIGNMENT
/
BITS_PER_UNIT
),
0
,
1
);
GEN_INT
(
BIGGEST_ALIGNMENT
/
BITS_PER_UNIT
),
NULL_RTX
,
1
);
}
#endif
...
...
gcc/expmed.c
View file @
b1ec3c92
...
...
@@ -62,25 +62,24 @@ init_expmed ()
/* This is "some random pseudo register" for purposes of calling recog
to see what insns exist. */
rtx
reg
=
gen_rtx
(
REG
,
word_mode
,
FIRST_PSEUDO_REGISTER
);
rtx
pow2
=
gen_rtx
(
CONST_INT
,
VOIDmode
,
32
);
rtx
pow2
=
GEN_INT
(
32
);
rtx
lea
;
int
i
,
dummy
;
HOST_WIDE_INT
i
;
int
dummy
;
add_cost
=
rtx_cost
(
gen_rtx
(
PLUS
,
word_mode
,
reg
,
reg
),
SET
);
shift_cost
=
rtx_cost
(
gen_rtx
(
LSHIFT
,
word_mode
,
reg
,
/* Using a constant gives better
estimate of typical costs.
1 or 2 might have quirks. */
gen_rtx
(
CONST_INT
,
VOIDmode
,
3
)),
SET
);
GEN_INT
(
3
)),
SET
);
mult_cost
=
rtx_cost
(
gen_rtx
(
MULT
,
word_mode
,
reg
,
reg
),
SET
);
negate_cost
=
rtx_cost
(
gen_rtx
(
NEG
,
word_mode
,
reg
),
SET
);
/* 999999 is chosen to avoid any plausible faster special case. */
mult_is_very_cheap
=
(
rtx_cost
(
gen_rtx
(
MULT
,
word_mode
,
reg
,
gen_rtx
(
CONST_INT
,
VOIDmode
,
999999
)),
SET
)
<
rtx_cost
(
gen_rtx
(
LSHIFT
,
word_mode
,
reg
,
gen_rtx
(
CONST_INT
,
VOIDmode
,
7
)),
SET
));
=
(
rtx_cost
(
gen_rtx
(
MULT
,
word_mode
,
reg
,
GEN_INT
(
999999
)),
SET
)
<
rtx_cost
(
gen_rtx
(
LSHIFT
,
word_mode
,
reg
,
GEN_INT
(
7
)),
SET
));
sdiv_pow2_cheap
=
rtx_cost
(
gen_rtx
(
DIV
,
word_mode
,
reg
,
pow2
),
SET
)
<=
2
*
add_cost
;
...
...
@@ -92,8 +91,7 @@ init_expmed ()
{
lea
=
gen_rtx
(
SET
,
VOIDmode
,
reg
,
gen_rtx
(
PLUS
,
word_mode
,
gen_rtx
(
MULT
,
word_mode
,
reg
,
gen_rtx
(
CONST_INT
,
VOIDmode
,
i
)),
gen_rtx
(
MULT
,
word_mode
,
reg
,
GEN_INT
(
i
)),
reg
));
/* Using 0 as second argument is not quite right,
but what else is there to do? */
...
...
@@ -118,19 +116,19 @@ negate_rtx (mode, x)
{
if
(
GET_CODE
(
x
)
==
CONST_INT
)
{
int
val
=
-
INTVAL
(
x
);
if
(
GET_MODE_BITSIZE
(
mode
)
<
HOST_BITS_PER_INT
)
HOST_WIDE_INT
val
=
-
INTVAL
(
x
);
if
(
GET_MODE_BITSIZE
(
mode
)
<
HOST_BITS_PER_
WIDE_
INT
)
{
/* Sign extend the value from the bits that are significant. */
if
(
val
&
(
1
<<
(
GET_MODE_BITSIZE
(
mode
)
-
1
)))
val
|=
(
-
1
)
<<
GET_MODE_BITSIZE
(
mode
);
if
(
val
&
(
(
HOST_WIDE_INT
)
1
<<
(
GET_MODE_BITSIZE
(
mode
)
-
1
)))
val
|=
(
HOST_WIDE_INT
)
(
-
1
)
<<
GET_MODE_BITSIZE
(
mode
);
else
val
&=
(
1
<<
GET_MODE_BITSIZE
(
mode
))
-
1
;
val
&=
(
(
HOST_WIDE_INT
)
1
<<
GET_MODE_BITSIZE
(
mode
))
-
1
;
}
return
gen_rtx
(
CONST_INT
,
VOIDmode
,
val
);
return
GEN_INT
(
val
);
}
else
return
expand_unop
(
GET_MODE
(
x
),
neg_optab
,
x
,
0
,
0
);
return
expand_unop
(
GET_MODE
(
x
),
neg_optab
,
x
,
NULL_RTX
,
0
);
}
/* Generate code to store value from rtx VALUE
...
...
@@ -425,10 +423,7 @@ store_bit_field (str_rtx, bitsize, bitnum, fieldmode, value, align, total_size)
(
value1
,
maxmode
)))
value1
=
force_reg
(
maxmode
,
value1
);
pat
=
gen_insv
(
xop0
,
gen_rtx
(
CONST_INT
,
VOIDmode
,
bitsize
),
gen_rtx
(
CONST_INT
,
VOIDmode
,
xbitpos
),
value1
);
pat
=
gen_insv
(
xop0
,
GEN_INT
(
bitsize
),
GEN_INT
(
xbitpos
),
value1
);
if
(
pat
)
emit_insn
(
pat
);
else
...
...
@@ -546,15 +541,16 @@ store_fixed_bit_field (op0, offset, bitsize, bitpos, value, struct_align)
if
(
GET_CODE
(
value
)
==
CONST_INT
)
{
register
int
v
=
INTVAL
(
value
);
register
HOST_WIDE_INT
v
=
INTVAL
(
value
);
if
(
bitsize
<
HOST_BITS_PER_INT
)
v
&=
(
1
<<
bitsize
)
-
1
;
if
(
bitsize
<
HOST_BITS_PER_
WIDE_
INT
)
v
&=
(
(
HOST_WIDE_INT
)
1
<<
bitsize
)
-
1
;
if
(
v
==
0
)
all_zero
=
1
;
else
if
((
bitsize
<
HOST_BITS_PER_INT
&&
v
==
(
1
<<
bitsize
)
-
1
)
||
(
bitsize
==
HOST_BITS_PER_INT
&&
v
==
-
1
))
else
if
((
bitsize
<
HOST_BITS_PER_WIDE_INT
&&
v
==
((
HOST_WIDE_INT
)
1
<<
bitsize
)
-
1
)
||
(
bitsize
==
HOST_BITS_PER_WIDE_INT
&&
v
==
-
1
))
all_one
=
1
;
value
=
lshift_value
(
mode
,
value
,
bitpos
,
bitsize
);
...
...
@@ -587,10 +583,10 @@ store_fixed_bit_field (op0, offset, bitsize, bitpos, value, struct_align)
if
(
must_and
)
value
=
expand_binop
(
mode
,
and_optab
,
value
,
mask_rtx
(
mode
,
0
,
bitsize
,
0
),
0
,
1
,
OPTAB_LIB_WIDEN
);
NULL_RTX
,
1
,
OPTAB_LIB_WIDEN
);
if
(
bitpos
>
0
)
value
=
expand_shift
(
LSHIFT_EXPR
,
mode
,
value
,
build_int_2
(
bitpos
,
0
),
0
,
1
);
build_int_2
(
bitpos
,
0
),
NULL_RTX
,
1
);
}
/* Now clear the chosen bits in OP0,
...
...
@@ -655,35 +651,33 @@ store_split_bit_field (op0, bitsize, bitpos, value, align)
/* PART1 gets the more significant part. */
if
(
GET_CODE
(
value
)
==
CONST_INT
)
{
part1
=
gen_rtx
(
CONST_INT
,
VOIDmode
,
(
unsigned
)
(
INTVAL
(
value
))
>>
bitsize_2
);
part2
=
gen_rtx
(
CONST_INT
,
VOIDmode
,
(
unsigned
)
(
INTVAL
(
value
))
&
((
1
<<
bitsize_2
)
-
1
));
part1
=
GEN_INT
((
unsigned
HOST_WIDE_INT
)
(
INTVAL
(
value
))
>>
bitsize_2
);
part2
=
GEN_INT
((
unsigned
HOST_WIDE_INT
)
(
INTVAL
(
value
))
&
(((
HOST_WIDE_INT
)
1
<<
bitsize_2
)
-
1
));
}
else
{
part1
=
extract_fixed_bit_field
(
word_mode
,
value
,
0
,
bitsize_1
,
BITS_PER_WORD
-
bitsize
,
0
,
1
,
BITS_PER_WORD
-
bitsize
,
NULL_RTX
,
1
,
BITS_PER_WORD
);
part2
=
extract_fixed_bit_field
(
word_mode
,
value
,
0
,
bitsize_2
,
BITS_PER_WORD
-
bitsize_2
,
0
,
1
,
BITS_PER_WORD
-
bitsize_2
,
NULL_RTX
,
1
,
BITS_PER_WORD
);
}
#else
/* PART1 gets the less significant part. */
if
(
GET_CODE
(
value
)
==
CONST_INT
)
{
part1
=
gen_rtx
(
CONST_INT
,
VOIDmode
,
(
unsigned
)
(
INTVAL
(
value
))
&
((
1
<<
bitsize_1
)
-
1
));
part2
=
gen_rtx
(
CONST_INT
,
VOIDmode
,
(
unsigned
)
(
INTVAL
(
value
))
>>
bitsize_1
);
part1
=
GEN_INT
((
unsigned
HOST_WIDE_INT
)
(
INTVAL
(
value
))
&
(((
HOST_WIDE_INT
)
1
<<
bitsize_1
)
-
1
));
part2
=
GEN_INT
((
unsigned
HOST_WIDE_INT
)
(
INTVAL
(
value
))
>>
bitsize_1
);
}
else
{
part1
=
extract_fixed_bit_field
(
word_mode
,
value
,
0
,
bitsize_1
,
0
,
0
,
1
,
BITS_PER_WORD
);
NULL_RTX
,
1
,
BITS_PER_WORD
);
part2
=
extract_fixed_bit_field
(
word_mode
,
value
,
0
,
bitsize_2
,
bitsize_1
,
0
,
1
,
BITS_PER_WORD
);
bitsize_1
,
NULL_RTX
,
1
,
BITS_PER_WORD
);
}
#endif
...
...
@@ -989,8 +983,8 @@ extract_bit_field (str_rtx, bitsize, bitnum, unsignedp,
(
xtarget
,
maxmode
)))
xtarget
=
gen_reg_rtx
(
maxmode
);
bitsize_rtx
=
gen_rtx
(
CONST_INT
,
VOIDmode
,
bitsize
);
bitpos_rtx
=
gen_rtx
(
CONST_INT
,
VOIDmode
,
xbitpos
);
bitsize_rtx
=
GEN_INT
(
bitsize
);
bitpos_rtx
=
GEN_INT
(
xbitpos
);
pat
=
gen_extzv
(
protect_from_queue
(
xtarget
,
1
),
xop0
,
bitsize_rtx
,
bitpos_rtx
);
...
...
@@ -1121,8 +1115,8 @@ extract_bit_field (str_rtx, bitsize, bitnum, unsignedp,
(
xtarget
,
maxmode
)))
xtarget
=
gen_reg_rtx
(
maxmode
);
bitsize_rtx
=
gen_rtx
(
CONST_INT
,
VOIDmode
,
bitsize
);
bitpos_rtx
=
gen_rtx
(
CONST_INT
,
VOIDmode
,
xbitpos
);
bitsize_rtx
=
GEN_INT
(
bitsize
);
bitpos_rtx
=
GEN_INT
(
xbitpos
);
pat
=
gen_extv
(
protect_from_queue
(
xtarget
,
1
),
xop0
,
bitsize_rtx
,
bitpos_rtx
);
...
...
@@ -1327,23 +1321,25 @@ mask_rtx (mode, bitpos, bitsize, complement)
enum
machine_mode
mode
;
int
bitpos
,
bitsize
,
complement
;
{
int
masklow
,
maskhigh
;
HOST_WIDE_INT
masklow
,
maskhigh
;
if
(
bitpos
<
HOST_BITS_PER_INT
)
masklow
=
-
1
<<
bitpos
;
if
(
bitpos
<
HOST_BITS_PER_
WIDE_
INT
)
masklow
=
(
HOST_WIDE_INT
)
-
1
<<
bitpos
;
else
masklow
=
0
;
if
(
bitpos
+
bitsize
<
HOST_BITS_PER_INT
)
masklow
&=
(
unsigned
)
-
1
>>
(
HOST_BITS_PER_INT
-
bitpos
-
bitsize
);
if
(
bitpos
+
bitsize
<
HOST_BITS_PER_WIDE_INT
)
masklow
&=
((
unsigned
HOST_WIDE_INT
)
-
1
>>
(
HOST_BITS_PER_WIDE_INT
-
bitpos
-
bitsize
));
if
(
bitpos
<=
HOST_BITS_PER_INT
)
if
(
bitpos
<=
HOST_BITS_PER_
WIDE_
INT
)
maskhigh
=
-
1
;
else
maskhigh
=
-
1
<<
(
bitpos
-
HOST_BITS_PER
_INT
);
maskhigh
=
(
HOST_WIDE_INT
)
-
1
<<
(
bitpos
-
HOST_BITS_PER_WIDE
_INT
);
if
(
bitpos
+
bitsize
>
HOST_BITS_PER_INT
)
maskhigh
&=
(
unsigned
)
-
1
>>
(
2
*
HOST_BITS_PER_INT
-
bitpos
-
bitsize
);
if
(
bitpos
+
bitsize
>
HOST_BITS_PER_WIDE_INT
)
maskhigh
&=
((
unsigned
HOST_WIDE_INT
)
-
1
>>
(
2
*
HOST_BITS_PER_WIDE_INT
-
bitpos
-
bitsize
));
else
maskhigh
=
0
;
...
...
@@ -1365,21 +1361,21 @@ lshift_value (mode, value, bitpos, bitsize)
rtx
value
;
int
bitpos
,
bitsize
;
{
unsigned
v
=
INTVAL
(
value
);
int
low
,
high
;
unsigned
HOST_WIDE_INT
v
=
INTVAL
(
value
);
HOST_WIDE_INT
low
,
high
;
if
(
bitsize
<
HOST_BITS_PER_INT
)
v
&=
~
(
-
1
<<
bitsize
);
if
(
bitsize
<
HOST_BITS_PER_
WIDE_
INT
)
v
&=
~
(
(
HOST_WIDE_INT
)
-
1
<<
bitsize
);
if
(
bitpos
<
HOST_BITS_PER_INT
)
if
(
bitpos
<
HOST_BITS_PER_
WIDE_
INT
)
{
low
=
v
<<
bitpos
;
high
=
(
bitpos
>
0
?
(
v
>>
(
HOST_BITS_PER_INT
-
bitpos
))
:
0
);
high
=
(
bitpos
>
0
?
(
v
>>
(
HOST_BITS_PER_
WIDE_
INT
-
bitpos
))
:
0
);
}
else
{
low
=
0
;
high
=
v
<<
(
bitpos
-
HOST_BITS_PER_INT
);
high
=
v
<<
(
bitpos
-
HOST_BITS_PER_
WIDE_
INT
);
}
return
immed_double_const
(
low
,
high
,
mode
);
...
...
@@ -1417,7 +1413,8 @@ extract_split_bit_field (op0, bitsize, bitpos, unsignedp, align)
:
operand_subword_force
(
op0
,
offset
,
GET_MODE
(
op0
)));
part1
=
extract_fixed_bit_field
(
word_mode
,
word
,
GET_CODE
(
op0
)
==
MEM
?
offset
:
0
,
bitsize_1
,
bitpos
%
unit
,
0
,
1
,
align
);
bitsize_1
,
bitpos
%
unit
,
NULL_RTX
,
1
,
align
);
/* Offset op0 by 1 word to get to the following one. */
if
(
GET_CODE
(
op0
)
==
SUBREG
)
...
...
@@ -1433,7 +1430,7 @@ extract_split_bit_field (op0, bitsize, bitpos, unsignedp, align)
(
GET_CODE
(
op0
)
==
MEM
?
CEIL
(
offset
+
1
,
UNITS_PER_WORD
)
*
UNITS_PER_WORD
:
0
),
bitsize_2
,
0
,
0
,
1
,
align
);
bitsize_2
,
0
,
NULL_RTX
,
1
,
align
);
/* Shift the more significant part up to fit above the other part. */
#if BYTES_BIG_ENDIAN
...
...
@@ -1446,7 +1443,7 @@ extract_split_bit_field (op0, bitsize, bitpos, unsignedp, align)
/* Combine the two parts with bitwise or. This works
because we extracted both parts as unsigned bit fields. */
result
=
expand_binop
(
word_mode
,
ior_optab
,
part1
,
part2
,
0
,
1
,
result
=
expand_binop
(
word_mode
,
ior_optab
,
part1
,
part2
,
NULL_RTX
,
1
,
OPTAB_LIB_WIDEN
);
/* Unsigned bit field: we are done. */
...
...
@@ -1454,9 +1451,10 @@ extract_split_bit_field (op0, bitsize, bitpos, unsignedp, align)
return
result
;
/* Signed bit field: sign-extend with two arithmetic shifts. */
result
=
expand_shift
(
LSHIFT_EXPR
,
word_mode
,
result
,
build_int_2
(
BITS_PER_WORD
-
bitsize
,
0
),
0
,
0
);
build_int_2
(
BITS_PER_WORD
-
bitsize
,
0
),
NULL_RTX
,
0
);
return
expand_shift
(
RSHIFT_EXPR
,
word_mode
,
result
,
build_int_2
(
BITS_PER_WORD
-
bitsize
,
0
),
0
,
0
);
build_int_2
(
BITS_PER_WORD
-
bitsize
,
0
),
NULL_RTX
,
0
);
}
/* Add INC into TARGET. */
...
...
@@ -1510,7 +1508,7 @@ expand_shift (code, mode, shifted, amount, target, unsignedp)
and shifted in the other direction; but that does not work
on all machines. */
op1
=
expand_expr
(
amount
,
0
,
VOIDmode
,
0
);
op1
=
expand_expr
(
amount
,
NULL_RTX
,
VOIDmode
,
0
);
if
(
op1
==
const0_rtx
)
return
shifted
;
...
...
@@ -1624,8 +1622,7 @@ expand_shift (code, mode, shifted, amount, target, unsignedp)
/* WIDTH gets the width of the bit field to extract:
wordsize minus # bits to shift by. */
if
(
GET_CODE
(
xop1
)
==
CONST_INT
)
width
=
gen_rtx
(
CONST_INT
,
VOIDmode
,
(
GET_MODE_BITSIZE
(
mode
)
-
INTVAL
(
op1
)));
width
=
GEN_INT
(
GET_MODE_BITSIZE
(
mode
)
-
INTVAL
(
op1
));
else
{
/* Now get the width in the proper mode. */
...
...
@@ -1633,9 +1630,8 @@ expand_shift (code, mode, shifted, amount, target, unsignedp)
TREE_UNSIGNED
(
TREE_TYPE
(
amount
)));
width
=
expand_binop
(
length_mode
,
sub_optab
,
gen_rtx
(
CONST_INT
,
VOIDmode
,
GET_MODE_BITSIZE
(
mode
)),
width
,
0
,
0
,
OPTAB_LIB_WIDEN
);
GEN_INT
(
GET_MODE_BITSIZE
(
mode
)),
width
,
NULL_RTX
,
0
,
OPTAB_LIB_WIDEN
);
}
/* If this machine's extzv insists on a register for
...
...
@@ -1700,13 +1696,15 @@ struct algorithm
static
struct
algorithm
synth_mult
(
t
,
add_cost
,
shift_cost
,
max_cost
)
unsigned
int
t
;
unsigned
HOST_WIDE_INT
t
;
int
add_cost
,
shift_cost
;
int
max_cost
;
{
int
m
,
n
;
struct
algorithm
*
best_alg
=
(
struct
algorithm
*
)
alloca
(
sizeof
(
struct
algorithm
));
struct
algorithm
*
alg_in
=
(
struct
algorithm
*
)
alloca
(
sizeof
(
struct
algorithm
));
struct
algorithm
*
best_alg
=
(
struct
algorithm
*
)
alloca
(
sizeof
(
struct
algorithm
));
struct
algorithm
*
alg_in
=
(
struct
algorithm
*
)
alloca
(
sizeof
(
struct
algorithm
));
unsigned
int
cost
;
/* No matter what happens, we want to return a valid algorithm. */
...
...
@@ -1756,13 +1754,13 @@ synth_mult (t, add_cost, shift_cost, max_cost)
for
(
m
=
floor_log2
(
t
)
-
1
;
m
>=
2
;
m
--
)
{
int
m_exp_2
=
1
<<
m
;
int
d
;
HOST_WIDE_INT
m_exp_2
=
(
HOST_WIDE_INT
)
1
<<
m
;
HOST_WIDE_INT
d
;
d
=
m_exp_2
+
1
;
if
(
t
%
d
==
0
)
{
int
q
=
t
/
d
;
HOST_WIDE_INT
q
=
t
/
d
;
cost
=
add_cost
+
shift_cost
*
2
;
...
...
@@ -1791,7 +1789,7 @@ synth_mult (t, add_cost, shift_cost, max_cost)
d
=
m_exp_2
-
1
;
if
(
t
%
d
==
0
)
{
int
q
=
t
/
d
;
HOST_WIDE_INT
q
=
t
/
d
;
cost
=
add_cost
+
shift_cost
*
2
;
...
...
@@ -1821,8 +1819,8 @@ synth_mult (t, add_cost, shift_cost, max_cost)
/* Try load effective address instructions, i.e. do a*3, a*5, a*9. */
{
int
q
;
int
w
;
HOST_WIDE_INT
q
;
HOST_WIDE_INT
w
;
q
=
t
&
-
t
;
/* get out lsb */
w
=
(
t
-
q
)
&
-
(
t
-
q
);
/* get out next lsb */
...
...
@@ -1861,8 +1859,8 @@ synth_mult (t, add_cost, shift_cost, max_cost)
1-bit. */
{
int
q
;
int
w
;
HOST_WIDE_INT
q
;
HOST_WIDE_INT
w
;
q
=
t
&
-
t
;
/* get out lsb */
for
(
w
=
q
;
(
w
&
t
)
!=
0
;
w
<<=
1
)
...
...
@@ -1958,7 +1956,7 @@ expand_mult (mode, op0, op1, target, unsignedp)
{
if
((
CONST_DOUBLE_HIGH
(
op1
)
==
0
&&
CONST_DOUBLE_LOW
(
op1
)
>=
0
)
||
(
CONST_DOUBLE_HIGH
(
op1
)
==
-
1
&&
CONST_DOUBLE_LOW
(
op1
)
<
0
))
const_op1
=
gen_rtx
(
CONST_INT
,
VOIDmode
,
CONST_DOUBLE_LOW
(
op1
));
const_op1
=
GEN_INT
(
CONST_DOUBLE_LOW
(
op1
));
}
/* We used to test optimize here, on the grounds that it's better to
...
...
@@ -1970,7 +1968,7 @@ expand_mult (mode, op0, op1, target, unsignedp)
struct
algorithm
alg
;
struct
algorithm
neg_alg
;
int
negate
=
0
;
int
absval
=
INTVAL
(
op1
);
HOST_WIDE_INT
absval
=
INTVAL
(
op1
);
rtx
last
;
/* Try to do the computation two ways: multiply by the negative of OP1
...
...
@@ -2015,8 +2013,7 @@ expand_mult (mode, op0, op1, target, unsignedp)
if
(
alg
.
op
[
0
]
!=
alg_add
)
abort
();
accum
=
expand_shift
(
LSHIFT_EXPR
,
mode
,
op0
,
build_int_2
(
log
,
0
),
0
,
0
);
build_int_2
(
log
,
0
),
NULL_RTX
,
0
);
}
while
(
++
opno
<
alg
.
ops
)
...
...
@@ -2035,7 +2032,7 @@ expand_mult (mode, op0, op1, target, unsignedp)
if
(
factors_seen
)
{
tem
=
expand_shift
(
LSHIFT_EXPR
,
mode
,
op0
,
build_int_2
(
log
,
0
),
0
,
0
);
build_int_2
(
log
,
0
),
NULL_RTX
,
0
);
accum
=
force_operand
(
gen_rtx
(
PLUS
,
mode
,
accum
,
tem
),
accum
);
}
...
...
@@ -2054,7 +2051,7 @@ expand_mult (mode, op0, op1, target, unsignedp)
if
(
factors_seen
)
{
tem
=
expand_shift
(
LSHIFT_EXPR
,
mode
,
op0
,
build_int_2
(
log
,
0
),
0
,
0
);
build_int_2
(
log
,
0
),
NULL_RTX
,
0
);
accum
=
force_operand
(
gen_rtx
(
MINUS
,
mode
,
accum
,
tem
),
accum
);
}
...
...
@@ -2073,11 +2070,11 @@ expand_mult (mode, op0, op1, target, unsignedp)
case
alg_compound
:
factors_seen
=
1
;
tem
=
expand_shift
(
LSHIFT_EXPR
,
mode
,
accum
,
build_int_2
(
log
,
0
),
0
,
0
);
build_int_2
(
log
,
0
),
NULL_RTX
,
0
);
log
=
floor_log2
(
alg
.
coeff
[
opno
+
1
]);
accum
=
expand_shift
(
LSHIFT_EXPR
,
mode
,
accum
,
build_int_2
(
log
,
0
),
0
,
0
);
build_int_2
(
log
,
0
),
NULL_RTX
,
0
);
opno
++
;
if
(
alg
.
op
[
opno
]
==
alg_add
)
accum
=
force_operand
(
gen_rtx
(
PLUS
,
mode
,
tem
,
accum
),
...
...
@@ -2105,9 +2102,7 @@ expand_mult (mode, op0, op1, target, unsignedp)
REG_NOTES
(
last
)
=
gen_rtx
(
EXPR_LIST
,
REG_EQUAL
,
gen_rtx
(
MULT
,
mode
,
op0
,
negate
?
gen_rtx
(
CONST_INT
,
VOIDmode
,
absval
)
:
op1
),
negate
?
GEN_INT
(
absval
)
:
op1
),
REG_NOTES
(
last
));
}
...
...
@@ -2285,7 +2280,8 @@ expand_divmod (rem_flag, code, mode, op0, op1, target, unsignedp)
which will screw up mem refs for autoincrements. */
op0
=
force_reg
(
compute_mode
,
op0
);
}
emit_cmp_insn
(
adjusted_op0
,
const0_rtx
,
GE
,
0
,
compute_mode
,
0
,
0
);
emit_cmp_insn
(
adjusted_op0
,
const0_rtx
,
GE
,
NULL_RTX
,
compute_mode
,
0
,
0
);
emit_jump_insn
(
gen_bge
(
label
));
expand_inc
(
adjusted_op0
,
plus_constant
(
op1
,
-
1
));
emit_label
(
label
);
...
...
@@ -2306,7 +2302,8 @@ expand_divmod (rem_flag, code, mode, op0, op1, target, unsignedp)
which will screw up mem refs for autoincrements. */
op0
=
force_reg
(
compute_mode
,
op0
);
}
emit_cmp_insn
(
adjusted_op0
,
const0_rtx
,
GE
,
0
,
compute_mode
,
0
,
0
);
emit_cmp_insn
(
adjusted_op0
,
const0_rtx
,
GE
,
NULL_RTX
,
compute_mode
,
0
,
0
);
emit_jump_insn
(
gen_bge
(
label
));
expand_dec
(
adjusted_op0
,
op1
);
expand_inc
(
adjusted_op0
,
const1_rtx
);
...
...
@@ -2331,7 +2328,8 @@ expand_divmod (rem_flag, code, mode, op0, op1, target, unsignedp)
if
(
!
unsignedp
)
{
label
=
gen_label_rtx
();
emit_cmp_insn
(
adjusted_op0
,
const0_rtx
,
LE
,
0
,
compute_mode
,
0
,
0
);
emit_cmp_insn
(
adjusted_op0
,
const0_rtx
,
LE
,
NULL_RTX
,
compute_mode
,
0
,
0
);
emit_jump_insn
(
gen_ble
(
label
));
}
expand_inc
(
adjusted_op0
,
op1
);
...
...
@@ -2343,7 +2341,7 @@ expand_divmod (rem_flag, code, mode, op0, op1, target, unsignedp)
{
adjusted_op0
=
expand_binop
(
compute_mode
,
add_optab
,
adjusted_op0
,
plus_constant
(
op1
,
-
1
),
0
,
0
,
OPTAB_LIB_WIDEN
);
NULL_RTX
,
0
,
OPTAB_LIB_WIDEN
);
}
mod_insn_no_good
=
1
;
break
;
...
...
@@ -2361,11 +2359,12 @@ expand_divmod (rem_flag, code, mode, op0, op1, target, unsignedp)
if
(
log
<
0
)
{
op1
=
expand_shift
(
RSHIFT_EXPR
,
compute_mode
,
op1
,
integer_one_node
,
0
,
0
);
integer_one_node
,
NULL_RTX
,
0
);
if
(
!
unsignedp
)
{
rtx
label
=
gen_label_rtx
();
emit_cmp_insn
(
adjusted_op0
,
const0_rtx
,
GE
,
0
,
compute_mode
,
0
,
0
);
emit_cmp_insn
(
adjusted_op0
,
const0_rtx
,
GE
,
NULL_RTX
,
compute_mode
,
0
,
0
);
emit_jump_insn
(
gen_bge
(
label
));
expand_unop
(
compute_mode
,
neg_optab
,
op1
,
op1
,
0
);
emit_label
(
label
);
...
...
@@ -2374,7 +2373,7 @@ expand_divmod (rem_flag, code, mode, op0, op1, target, unsignedp)
}
else
{
op1
=
gen_rtx
(
CONST_INT
,
VOIDmode
,
(
1
<<
log
)
/
2
);
op1
=
GEN_INT
(((
HOST_WIDE_INT
)
1
<<
log
)
/
2
);
expand_inc
(
adjusted_op0
,
op1
);
}
mod_insn_no_good
=
1
;
...
...
@@ -2386,8 +2385,7 @@ expand_divmod (rem_flag, code, mode, op0, op1, target, unsignedp)
/* Try to produce the remainder directly */
if
(
log
>=
0
)
result
=
expand_binop
(
compute_mode
,
and_optab
,
adjusted_op0
,
gen_rtx
(
CONST_INT
,
VOIDmode
,
(
1
<<
log
)
-
1
),
GEN_INT
(((
HOST_WIDE_INT
)
1
<<
log
)
-
1
),
target
,
1
,
OPTAB_LIB_WIDEN
);
else
{
...
...
@@ -2403,7 +2401,7 @@ expand_divmod (rem_flag, code, mode, op0, op1, target, unsignedp)
if
(
!
expand_twoval_binop
(
unsignedp
?
udivmod_optab
:
sdivmod_optab
,
adjusted_op0
,
op1
,
0
,
result
,
unsignedp
))
NULL_RTX
,
result
,
unsignedp
))
result
=
0
;
}
}
...
...
@@ -2421,12 +2419,14 @@ expand_divmod (rem_flag, code, mode, op0, op1, target, unsignedp)
and a remainder subroutine would be ok,
don't use a divide subroutine. */
result
=
sign_expand_binop
(
compute_mode
,
udiv_optab
,
sdiv_optab
,
adjusted_op0
,
op1
,
0
,
unsignedp
,
OPTAB_WIDEN
);
adjusted_op0
,
op1
,
NULL_RTX
,
unsignedp
,
OPTAB_WIDEN
);
else
{
/* Try a quotient insn, but not a library call. */
result
=
sign_expand_binop
(
compute_mode
,
udiv_optab
,
sdiv_optab
,
adjusted_op0
,
op1
,
rem_flag
?
0
:
target
,
adjusted_op0
,
op1
,
rem_flag
?
NULL_RTX
:
target
,
unsignedp
,
OPTAB_WIDEN
);
if
(
result
==
0
)
{
...
...
@@ -2435,14 +2435,15 @@ expand_divmod (rem_flag, code, mode, op0, op1, target, unsignedp)
result
=
gen_reg_rtx
(
mode
);
if
(
!
expand_twoval_binop
(
unsignedp
?
udivmod_optab
:
sdivmod_optab
,
adjusted_op0
,
op1
,
result
,
0
,
unsignedp
))
result
,
NULL_RTX
,
unsignedp
))
result
=
0
;
}
/* If still no luck, use a library call. */
if
(
result
==
0
)
result
=
sign_expand_binop
(
compute_mode
,
udiv_optab
,
sdiv_optab
,
adjusted_op0
,
op1
,
rem_flag
?
0
:
target
,
adjusted_op0
,
op1
,
rem_flag
?
NULL_RTX
:
target
,
unsignedp
,
OPTAB_LIB_WIDEN
);
}
...
...
@@ -2611,7 +2612,7 @@ expand_and (op0, op1, target)
if
(
mode
!=
VOIDmode
)
tem
=
expand_binop
(
mode
,
and_optab
,
op0
,
op1
,
target
,
0
,
OPTAB_LIB_WIDEN
);
else
if
(
GET_CODE
(
op0
)
==
CONST_INT
&&
GET_CODE
(
op1
)
==
CONST_INT
)
tem
=
gen_rtx
(
CONST_INT
,
VOIDmode
,
INTVAL
(
op0
)
&
INTVAL
(
op1
));
tem
=
GEN_INT
(
INTVAL
(
op0
)
&
INTVAL
(
op1
));
else
abort
();
...
...
@@ -2697,8 +2698,9 @@ emit_store_flag (target, code, op0, op1, mode, unsignedp, normalizep)
if
(
op1
==
const0_rtx
&&
(
code
==
LT
||
code
==
GE
)
&&
GET_MODE_CLASS
(
mode
)
==
MODE_INT
&&
(
normalizep
||
STORE_FLAG_VALUE
==
1
||
(
GET_MODE_BITSIZE
(
mode
)
<=
HOST_BITS_PER_INT
&&
STORE_FLAG_VALUE
==
1
<<
(
GET_MODE_BITSIZE
(
mode
)
-
1
))))
||
(
GET_MODE_BITSIZE
(
mode
)
<=
HOST_BITS_PER_WIDE_INT
&&
(
STORE_FLAG_VALUE
==
(
HOST_WIDE_INT
)
1
<<
(
GET_MODE_BITSIZE
(
mode
)
-
1
)))))
{
rtx
subtarget
=
target
;
...
...
@@ -2742,7 +2744,8 @@ emit_store_flag (target, code, op0, op1, mode, unsignedp, normalizep)
emit_queue
();
last
=
get_last_insn
();
comparison
=
compare_from_rtx
(
op0
,
op1
,
code
,
unsignedp
,
mode
,
0
,
0
);
comparison
=
compare_from_rtx
(
op0
,
op1
,
code
,
unsignedp
,
mode
,
NULL_RTX
,
0
);
if
(
GET_CODE
(
comparison
)
==
CONST_INT
)
return
(
comparison
==
const0_rtx
?
const0_rtx
:
normalizep
==
1
?
const1_rtx
...
...
@@ -2773,9 +2776,10 @@ emit_store_flag (target, code, op0, op1, mode, unsignedp, normalizep)
{
convert_move
(
target
,
subtarget
,
(
GET_MODE_BITSIZE
(
compare_mode
)
<=
HOST_BITS_PER_INT
)
<=
HOST_BITS_PER_
WIDE_
INT
)
&&
0
==
(
STORE_FLAG_VALUE
&
(
1
<<
(
GET_MODE_BITSIZE
(
compare_mode
)
-
1
))));
&
((
HOST_WIDE_INT
)
1
<<
(
GET_MODE_BITSIZE
(
compare_mode
)
-
1
))));
op0
=
target
;
compare_mode
=
target_mode
;
}
...
...
@@ -2798,9 +2802,10 @@ emit_store_flag (target, code, op0, op1, mode, unsignedp, normalizep)
/* We don't want to use STORE_FLAG_VALUE < 0 below since this
makes it hard to use a value of just the sign bit due to
ANSI integer constant typing rules. */
else
if
(
GET_MODE_BITSIZE
(
compare_mode
)
<=
HOST_BITS_PER_INT
else
if
(
GET_MODE_BITSIZE
(
compare_mode
)
<=
HOST_BITS_PER_
WIDE_
INT
&&
(
STORE_FLAG_VALUE
&
(
1
<<
(
GET_MODE_BITSIZE
(
compare_mode
)
-
1
))))
&
((
HOST_WIDE_INT
)
1
<<
(
GET_MODE_BITSIZE
(
compare_mode
)
-
1
))))
op0
=
expand_shift
(
RSHIFT_EXPR
,
compare_mode
,
op0
,
size_int
(
GET_MODE_BITSIZE
(
compare_mode
)
-
1
),
subtarget
,
normalizep
==
1
);
...
...
@@ -2874,8 +2879,9 @@ emit_store_flag (target, code, op0, op1, mode, unsignedp, normalizep)
if
(
STORE_FLAG_VALUE
==
1
||
STORE_FLAG_VALUE
==
-
1
)
normalizep
=
STORE_FLAG_VALUE
;
else
if
(
GET_MODE_BITSIZE
(
mode
)
<=
HOST_BITS_PER_INT
&&
STORE_FLAG_VALUE
==
1
<<
(
GET_MODE_BITSIZE
(
mode
)
-
1
))
else
if
(
GET_MODE_BITSIZE
(
mode
)
<=
HOST_BITS_PER_WIDE_INT
&&
(
STORE_FLAG_VALUE
==
(
HOST_WIDE_INT
)
1
<<
(
GET_MODE_BITSIZE
(
mode
)
-
1
)))
;
else
return
0
;
...
...
gcc/global.c
View file @
b1ec3c92
...
...
@@ -96,6 +96,13 @@ static int *allocno_size;
static
int
*
reg_may_share
;
/* Define the number of bits in each element of `conflicts' and what
type that element has. We use the largest integer format on the
host machine. */
#define INT_BITS HOST_BITS_PER_WIDE_INT
#define INT_TYPE HOST_WIDE_INT
/* max_allocno by max_allocno array of bits,
recording whether two allocno's conflict (can't go in the same
hardware register).
...
...
@@ -103,7 +110,7 @@ static int *reg_may_share;
`conflicts' is not symmetric; a conflict between allocno's i and j
is recorded either in element i,j or in element j,i. */
static
int
*
conflicts
;
static
INT_TYPE
*
conflicts
;
/* Number of ints require to hold max_allocno bits.
This is the length of a row in `conflicts'. */
...
...
@@ -114,11 +121,11 @@ static int allocno_row_words;
#define CONFLICTP(I, J) \
(conflicts[(I) * allocno_row_words + (J) / INT_BITS] \
& (1 << ((J) % INT_BITS)))
& (
(INT_TYPE)
1 << ((J) % INT_BITS)))
#define SET_CONFLICT(I, J) \
(conflicts[(I) * allocno_row_words + (J) / INT_BITS] \
|= (1 << ((J) % INT_BITS)))
|= (
(INT_TYPE)
1 << ((J) % INT_BITS)))
/* Set of hard regs currently live (during scan of all insns). */
...
...
@@ -194,21 +201,19 @@ static int local_reg_live_length[FIRST_PSEUDO_REGISTER];
/* Bit mask for allocnos live at current point in the scan. */
static
int
*
allocnos_live
;
#define INT_BITS HOST_BITS_PER_INT
static
INT_TYPE
*
allocnos_live
;
/* Test, set or clear bit number I in allocnos_live,
a bit vector indexed by allocno. */
#define ALLOCNO_LIVE_P(I) \
(allocnos_live[(I) / INT_BITS] & (1 << ((I) % INT_BITS)))
(allocnos_live[(I) / INT_BITS] & (
(INT_TYPE)
1 << ((I) % INT_BITS)))
#define SET_ALLOCNO_LIVE(I) \
(allocnos_live[(I) / INT_BITS] |= (1 << ((I) % INT_BITS)))
(allocnos_live[(I) / INT_BITS] |= (
(INT_TYPE)
1 << ((I) % INT_BITS)))
#define CLEAR_ALLOCNO_LIVE(I) \
(allocnos_live[(I) / INT_BITS] &= ~(1 << ((I) % INT_BITS)))
(allocnos_live[(I) / INT_BITS] &= ~(
(INT_TYPE)
1 << ((I) % INT_BITS)))
/* This is turned off because it doesn't work right for DImode.
(And it is only used for DImode, so the other cases are worthless.)
...
...
@@ -457,10 +462,12 @@ global_alloc (file)
allocno_row_words
=
(
max_allocno
+
INT_BITS
-
1
)
/
INT_BITS
;
conflicts
=
(
int
*
)
alloca
(
max_allocno
*
allocno_row_words
*
sizeof
(
int
));
bzero
(
conflicts
,
max_allocno
*
allocno_row_words
*
sizeof
(
int
));
conflicts
=
(
INT_TYPE
*
)
alloca
(
max_allocno
*
allocno_row_words
*
sizeof
(
INT_TYPE
));
bzero
(
conflicts
,
max_allocno
*
allocno_row_words
*
sizeof
(
INT_TYPE
));
allocnos_live
=
(
int
*
)
alloca
(
allocno_row_words
*
sizeof
(
int
));
allocnos_live
=
(
INT_TYPE
*
)
alloca
(
allocno_row_words
*
sizeof
(
INT_TYPE
));
/* If there is work to be done (at least one reg to allocate),
perform global conflict analysis and allocate the regs. */
...
...
@@ -534,7 +541,7 @@ global_alloc (file)
if
(
reg_renumber
[
allocno_reg
[
allocno_order
[
i
]]]
>=
0
)
continue
;
}
if
(
!
reg_preferred_or_nothing
(
allocno_reg
[
allocno_order
[
i
]])
)
if
(
reg_alternate_class
(
allocno_reg
[
allocno_order
[
i
]])
!=
NO_REGS
)
find_reg
(
allocno_order
[
i
],
HARD_CONST
(
0
),
1
,
0
,
0
);
}
}
...
...
@@ -593,7 +600,7 @@ global_conflicts ()
for
(
b
=
0
;
b
<
n_basic_blocks
;
b
++
)
{
bzero
(
allocnos_live
,
allocno_row_words
*
sizeof
(
int
));
bzero
(
allocnos_live
,
allocno_row_words
*
sizeof
(
INT_TYPE
));
/* Initialize table of registers currently live
to the state at the beginning of this basic block.
...
...
@@ -609,7 +616,8 @@ global_conflicts ()
are explicitly marked in basic_block_live_at_start. */
{
register
int
offset
,
bit
;
register
int
offset
;
REGSET_ELT_TYPE
bit
;
register
regset
old
=
basic_block_live_at_start
[
b
];
int
ax
=
0
;
...
...
@@ -620,7 +628,7 @@ global_conflicts ()
#endif
for
(
offset
=
0
,
i
=
0
;
offset
<
regset_size
;
offset
++
)
if
(
old
[
offset
]
==
0
)
i
+=
HOST_BITS_PER_INT
;
i
+=
REGSET_ELT_BITS
;
else
for
(
bit
=
1
;
bit
;
bit
<<=
1
,
i
++
)
{
...
...
@@ -699,7 +707,7 @@ global_conflicts ()
#ifdef AUTO_INC_DEC
for
(
link
=
REG_NOTES
(
insn
);
link
;
link
=
XEXP
(
link
,
1
))
if
(
REG_NOTE_KIND
(
link
)
==
REG_INC
)
mark_reg_store
(
XEXP
(
link
,
0
),
0
);
mark_reg_store
(
XEXP
(
link
,
0
),
NULL_RTX
);
#endif
/* If INSN has multiple outputs, then any reg that dies here
...
...
@@ -857,8 +865,8 @@ prune_preferences ()
LOSERS, if non-zero, is a HARD_REG_SET indicating registers that cannot
be used for this allocation.
If AL
L
_REGS_P is zero, consider only the preferred class of ALLOCNO's reg.
Otherwise ignore that preferred class.
If AL
T
_REGS_P is zero, consider only the preferred class of ALLOCNO's reg.
Otherwise ignore that preferred class
and use the alternate class
.
If ACCEPT_CALL_CLOBBERED is nonzero, accept a call-clobbered hard reg that
will have to be saved and restored at calls.
...
...
@@ -869,10 +877,10 @@ prune_preferences ()
If not, do nothing. */
static
void
find_reg
(
allocno
,
losers
,
al
l
_regs_p
,
accept_call_clobbered
,
retrying
)
find_reg
(
allocno
,
losers
,
al
t
_regs_p
,
accept_call_clobbered
,
retrying
)
int
allocno
;
HARD_REG_SET
losers
;
int
al
l
_regs_p
;
int
al
t
_regs_p
;
int
accept_call_clobbered
;
int
retrying
;
{
...
...
@@ -882,8 +890,9 @@ find_reg (allocno, losers, all_regs_p, accept_call_clobbered, retrying)
#endif
HARD_REG_SET
used
,
used1
,
used2
;
enum
reg_class
class
=
all_regs_p
?
ALL_REGS
:
reg_preferred_class
(
allocno_reg
[
allocno
]);
enum
reg_class
class
=
(
alt_regs_p
?
reg_alternate_class
(
allocno_reg
[
allocno
])
:
reg_preferred_class
(
allocno_reg
[
allocno
]));
enum
machine_mode
mode
=
PSEUDO_REGNO_MODE
(
allocno_reg
[
allocno
]);
if
(
accept_call_clobbered
)
...
...
@@ -1042,7 +1051,7 @@ find_reg (allocno, losers, all_regs_p, accept_call_clobbered, retrying)
&&
CALLER_SAVE_PROFITABLE
(
allocno_n_refs
[
allocno
],
allocno_calls_crossed
[
allocno
]))
{
find_reg
(
allocno
,
losers
,
al
l
_regs_p
,
1
,
retrying
);
find_reg
(
allocno
,
losers
,
al
t
_regs_p
,
1
,
retrying
);
if
(
reg_renumber
[
allocno_reg
[
allocno
]]
>=
0
)
{
caller_save_needed
=
1
;
...
...
@@ -1146,7 +1155,7 @@ retry_global_alloc (regno, forbidden_regs)
if
(
N_REG_CLASSES
>
1
)
find_reg
(
allocno
,
forbidden_regs
,
0
,
0
,
1
);
if
(
reg_renumber
[
regno
]
<
0
&&
!
reg_preferred_or_nothing
(
regno
)
)
&&
reg_alternate_class
(
regno
)
!=
NO_REGS
)
find_reg
(
allocno
,
forbidden_regs
,
1
,
0
,
1
);
/* If we found a register, modify the RTL for the register to
...
...
@@ -1563,13 +1572,13 @@ mark_elimination (from, to)
int
i
;
for
(
i
=
0
;
i
<
n_basic_blocks
;
i
++
)
if
((
basic_block_live_at_start
[
i
][
from
/
HOST_BITS_PER_INT
]
&
(
1
<<
(
from
%
HOST_BITS_PER_INT
)))
!=
0
)
if
((
basic_block_live_at_start
[
i
][
from
/
REGSET_ELT_BITS
]
&
(
(
REGSET_ELT_TYPE
)
1
<<
(
from
%
REGSET_ELT_BITS
)))
!=
0
)
{
basic_block_live_at_start
[
i
][
from
/
HOST_BITS_PER_INT
]
&=
~
(
1
<<
(
from
%
HOST_BITS_PER_INT
));
basic_block_live_at_start
[
i
][
to
/
HOST_BITS_PER_INT
]
|=
(
1
<<
(
to
%
HOST_BITS_PER_INT
));
basic_block_live_at_start
[
i
][
from
/
REGSET_ELT_BITS
]
&=
~
(
(
REGSET_ELT_TYPE
)
1
<<
(
from
%
REGSET_ELT_BITS
));
basic_block_live_at_start
[
i
][
to
/
REGSET_ELT_BITS
]
|=
(
(
REGSET_ELT_TYPE
)
1
<<
(
to
%
REGSET_ELT_BITS
));
}
}
...
...
gcc/local-alloc.c
View file @
b1ec3c92
...
...
@@ -922,7 +922,7 @@ update_equiv_regs ()
||
reg_n_sets
[
regno
]
!=
1
)
continue
;
note
=
find_reg_note
(
insn
,
REG_EQUAL
,
0
);
note
=
find_reg_note
(
insn
,
REG_EQUAL
,
NULL_RTX
);
/* Record this insn as initializing this register. */
reg_equiv_init_insn
[
regno
]
=
insn
;
...
...
@@ -947,7 +947,7 @@ update_equiv_regs ()
MEM remains unchanged for the life of the register, add a REG_EQUIV
note. */
note
=
find_reg_note
(
insn
,
REG_EQUIV
,
0
);
note
=
find_reg_note
(
insn
,
REG_EQUIV
,
NULL_RTX
);
if
(
note
==
0
&&
reg_basic_block
[
regno
]
>=
0
&&
GET_CODE
(
SET_SRC
(
set
))
==
MEM
...
...
@@ -1155,11 +1155,12 @@ block_alloc (b)
if
(
GET_CODE
(
PATTERN
(
insn
))
==
CLOBBER
&&
(
r0
=
XEXP
(
PATTERN
(
insn
),
0
),
GET_CODE
(
r0
)
==
REG
)
&&
(
link
=
find_reg_note
(
insn
,
REG_LIBCALL
,
0
))
!=
0
&&
(
link
=
find_reg_note
(
insn
,
REG_LIBCALL
,
NULL_RTX
))
!=
0
&&
GET_CODE
(
XEXP
(
link
,
0
))
==
INSN
&&
(
set
=
single_set
(
XEXP
(
link
,
0
)))
!=
0
&&
SET_DEST
(
set
)
==
r0
&&
SET_SRC
(
set
)
==
r0
&&
(
note
=
find_reg_note
(
XEXP
(
link
,
0
),
REG_EQUAL
,
0
))
!=
0
)
&&
(
note
=
find_reg_note
(
XEXP
(
link
,
0
),
REG_EQUAL
,
NULL_RTX
))
!=
0
)
{
if
(
r1
=
XEXP
(
note
,
0
),
GET_CODE
(
r1
)
==
REG
/* Check that we have such a sequence. */
...
...
@@ -1245,7 +1246,7 @@ block_alloc (b)
/* If this is an insn that has a REG_RETVAL note pointing at a
CLOBBER insn, we have reached the end of a REG_NO_CONFLICT
block, so clear any register number that combined within it. */
if
((
note
=
find_reg_note
(
insn
,
REG_RETVAL
,
0
))
!=
0
if
((
note
=
find_reg_note
(
insn
,
REG_RETVAL
,
NULL_RTX
))
!=
0
&&
GET_CODE
(
XEXP
(
note
,
0
))
==
INSN
&&
GET_CODE
(
PATTERN
(
XEXP
(
note
,
0
)))
==
CLOBBER
)
no_conflict_combined_regno
=
-
1
;
...
...
@@ -1988,7 +1989,7 @@ no_conflict_p (insn, r0, r1)
rtx
insn
,
r0
,
r1
;
{
int
ok
=
0
;
rtx
note
=
find_reg_note
(
insn
,
REG_LIBCALL
,
0
);
rtx
note
=
find_reg_note
(
insn
,
REG_LIBCALL
,
NULL_RTX
);
rtx
p
,
last
;
/* If R1 is a hard register, return 0 since we handle this case
...
...
gcc/optabs.c
View file @
b1ec3c92
...
...
@@ -402,8 +402,8 @@ expand_binop (mode, binoptab, op0, op1, target, unsignedp, methods)
&&
!
add_equal_note
(
pat
,
temp
,
binoptab
->
code
,
xop0
,
xop1
))
{
delete_insns_since
(
last
);
return
expand_binop
(
mode
,
binoptab
,
op0
,
op1
,
0
,
unsignedp
,
methods
);
return
expand_binop
(
mode
,
binoptab
,
op0
,
op1
,
NULL_RTX
,
unsignedp
,
methods
);
}
emit_insn
(
pat
);
...
...
@@ -454,7 +454,7 @@ expand_binop (mode, binoptab, op0, op1, target, unsignedp, methods)
else
xop1
=
convert_to_mode
(
wider_mode
,
xop1
,
unsignedp
);
temp
=
expand_binop
(
wider_mode
,
binoptab
,
xop0
,
xop1
,
0
,
temp
=
expand_binop
(
wider_mode
,
binoptab
,
xop0
,
xop1
,
NULL_RTX
,
unsignedp
,
OPTAB_DIRECT
);
if
(
temp
)
{
...
...
@@ -711,18 +711,18 @@ expand_binop (mode, binoptab, op0, op1, target, unsignedp, methods)
&&
smul_widen_optab
->
handlers
[(
int
)
mode
].
insn_code
!=
CODE_FOR_nothing
)
{
rtx
wordm1
=
gen_rtx
(
CONST_INT
,
VOIDmode
,
BITS_PER_WORD
-
1
);
rtx
wordm1
=
GEN_INT
(
BITS_PER_WORD
-
1
);
product
=
expand_binop
(
mode
,
smul_widen_optab
,
op0_low
,
op1_low
,
target
,
1
,
OPTAB_DIRECT
);
op0_xhigh
=
expand_binop
(
word_mode
,
lshr_optab
,
op0_low
,
wordm1
,
0
,
1
,
OPTAB_DIRECT
);
NULL_RTX
,
1
,
OPTAB_DIRECT
);
if
(
op0_xhigh
)
op0_xhigh
=
expand_binop
(
word_mode
,
add_optab
,
op0_high
,
op0_xhigh
,
op0_xhigh
,
0
,
OPTAB_DIRECT
);
else
{
op0_xhigh
=
expand_binop
(
word_mode
,
ashr_optab
,
op0_low
,
wordm1
,
0
,
0
,
OPTAB_DIRECT
);
NULL_RTX
,
0
,
OPTAB_DIRECT
);
if
(
op0_xhigh
)
op0_xhigh
=
expand_binop
(
word_mode
,
sub_optab
,
op0_high
,
op0_xhigh
,
op0_xhigh
,
0
,
...
...
@@ -730,14 +730,14 @@ expand_binop (mode, binoptab, op0, op1, target, unsignedp, methods)
}
op1_xhigh
=
expand_binop
(
word_mode
,
lshr_optab
,
op1_low
,
wordm1
,
0
,
1
,
OPTAB_DIRECT
);
NULL_RTX
,
1
,
OPTAB_DIRECT
);
if
(
op1_xhigh
)
op1_xhigh
=
expand_binop
(
word_mode
,
add_optab
,
op1_high
,
op1_xhigh
,
op1_xhigh
,
0
,
OPTAB_DIRECT
);
else
{
op1_xhigh
=
expand_binop
(
word_mode
,
ashr_optab
,
op1_low
,
wordm1
,
0
,
0
,
OPTAB_DIRECT
);
NULL_RTX
,
0
,
OPTAB_DIRECT
);
if
(
op1_xhigh
)
op1_xhigh
=
expand_binop
(
word_mode
,
sub_optab
,
op1_high
,
op1_xhigh
,
op1_xhigh
,
0
,
...
...
@@ -759,8 +759,8 @@ expand_binop (mode, binoptab, op0, op1, target, unsignedp, methods)
{
rtx
product_piece
;
rtx
product_high
=
operand_subword
(
product
,
high
,
1
,
mode
);
rtx
temp
=
expand_binop
(
word_mode
,
binoptab
,
op0_low
,
op1_xhigh
,
0
,
0
,
OPTAB_DIRECT
);
rtx
temp
=
expand_binop
(
word_mode
,
binoptab
,
op0_low
,
op1_xhigh
,
NULL_RTX
,
0
,
OPTAB_DIRECT
);
if
(
temp
)
{
...
...
@@ -770,8 +770,8 @@ expand_binop (mode, binoptab, op0, op1, target, unsignedp, methods)
if
(
product_piece
!=
product_high
)
emit_move_insn
(
product_high
,
product_piece
);
temp
=
expand_binop
(
word_mode
,
binoptab
,
op1_low
,
op0_xhigh
,
0
,
0
,
OPTAB_DIRECT
);
temp
=
expand_binop
(
word_mode
,
binoptab
,
op1_low
,
op0_xhigh
,
NULL_RTX
,
0
,
OPTAB_DIRECT
);
product_piece
=
expand_binop
(
word_mode
,
add_optab
,
temp
,
product_high
,
product_high
,
...
...
@@ -879,7 +879,7 @@ expand_binop (mode, binoptab, op0, op1, target, unsignedp, methods)
else
xop1
=
convert_to_mode
(
wider_mode
,
xop1
,
unsignedp
);
temp
=
expand_binop
(
wider_mode
,
binoptab
,
xop0
,
xop1
,
0
,
temp
=
expand_binop
(
wider_mode
,
binoptab
,
xop0
,
xop1
,
NULL_RTX
,
unsignedp
,
methods
);
if
(
temp
)
{
...
...
@@ -1152,10 +1152,10 @@ expand_unop (mode, unoptab, op0, target, unsignedp)
if
(
pat
)
{
if
(
GET_CODE
(
pat
)
==
SEQUENCE
&&
!
add_equal_note
(
pat
,
temp
,
unoptab
->
code
,
xop0
,
0
))
&&
!
add_equal_note
(
pat
,
temp
,
unoptab
->
code
,
xop0
,
NULL_RTX
))
{
delete_insns_since
(
last
);
return
expand_unop
(
mode
,
unoptab
,
op0
,
0
,
unsignedp
);
return
expand_unop
(
mode
,
unoptab
,
op0
,
NULL_RTX
,
unsignedp
);
}
emit_insn
(
pat
);
...
...
@@ -1186,7 +1186,8 @@ expand_unop (mode, unoptab, op0, target, unsignedp)
else
xop0
=
convert_to_mode
(
wider_mode
,
xop0
,
unsignedp
);
temp
=
expand_unop
(
wider_mode
,
unoptab
,
xop0
,
0
,
unsignedp
);
temp
=
expand_unop
(
wider_mode
,
unoptab
,
xop0
,
NULL_RTX
,
unsignedp
);
if
(
temp
)
{
...
...
@@ -1233,7 +1234,7 @@ expand_unop (mode, unoptab, op0, target, unsignedp)
insns
=
get_insns
();
end_sequence
();
emit_no_conflict_block
(
insns
,
target
,
op0
,
0
,
emit_no_conflict_block
(
insns
,
target
,
op0
,
NULL_RTX
,
gen_rtx
(
unoptab
->
code
,
mode
,
op0
));
return
target
;
}
...
...
@@ -1282,7 +1283,8 @@ expand_unop (mode, unoptab, op0, target, unsignedp)
else
xop0
=
convert_to_mode
(
wider_mode
,
xop0
,
unsignedp
);
temp
=
expand_unop
(
wider_mode
,
unoptab
,
xop0
,
0
,
unsignedp
);
temp
=
expand_unop
(
wider_mode
,
unoptab
,
xop0
,
NULL_RTX
,
unsignedp
);
if
(
temp
)
{
...
...
@@ -1341,7 +1343,7 @@ emit_unop_insn (icode, target, op0, code)
pat
=
GEN_FCN
(
icode
)
(
temp
,
op0
);
if
(
GET_CODE
(
pat
)
==
SEQUENCE
&&
code
!=
UNKNOWN
)
add_equal_note
(
pat
,
temp
,
code
,
op0
,
0
);
add_equal_note
(
pat
,
temp
,
code
,
op0
,
NULL_RTX
);
emit_insn
(
pat
);
...
...
@@ -1645,9 +1647,9 @@ emit_cmp_insn (x, y, comparison, size, mode, unsignedp, align)
enum
machine_mode
result_mode
=
insn_operand_mode
[(
int
)
CODE_FOR_cmpstrqi
][
0
];
rtx
result
=
gen_reg_rtx
(
result_mode
);
emit_insn
(
gen_cmpstrqi
(
result
,
x
,
y
,
size
,
gen_rtx
(
CONST_INT
,
VOIDmode
,
align
)));
emit_cmp_insn
(
result
,
const0_rtx
,
comparison
,
0
,
result_mode
,
0
,
0
);
emit_insn
(
gen_cmpstrqi
(
result
,
x
,
y
,
size
,
GEN_INT
(
align
)));
emit_cmp_insn
(
result
,
const0_rtx
,
comparison
,
NULL_RTX
,
result_mode
,
0
,
0
);
}
else
#endif
...
...
@@ -1659,9 +1661,9 @@ emit_cmp_insn (x, y, comparison, size, mode, unsignedp, align)
enum
machine_mode
result_mode
=
insn_operand_mode
[(
int
)
CODE_FOR_cmpstrhi
][
0
];
rtx
result
=
gen_reg_rtx
(
result_mode
);
emit_insn
(
gen_cmpstrhi
(
result
,
x
,
y
,
size
,
gen_rtx
(
CONST_INT
,
VOIDmode
,
align
)));
emit_cmp_insn
(
result
,
const0_rtx
,
comparison
,
0
,
result_mode
,
0
,
0
);
emit_insn
(
gen_cmpstrhi
(
result
,
x
,
y
,
size
,
GEN_INT
(
align
)));
emit_cmp_insn
(
result
,
const0_rtx
,
comparison
,
NULL_RTX
,
result_mode
,
0
,
0
);
}
else
#endif
...
...
@@ -1673,8 +1675,9 @@ emit_cmp_insn (x, y, comparison, size, mode, unsignedp, align)
rtx
result
=
gen_reg_rtx
(
result_mode
);
emit_insn
(
gen_cmpstrsi
(
result
,
x
,
y
,
convert_to_mode
(
SImode
,
size
,
1
),
gen_rtx
(
CONST_INT
,
VOIDmode
,
align
)));
emit_cmp_insn
(
result
,
const0_rtx
,
comparison
,
0
,
result_mode
,
0
,
0
);
GEN_INT
(
align
)));
emit_cmp_insn
(
result
,
const0_rtx
,
comparison
,
NULL_RTX
,
result_mode
,
0
,
0
);
}
else
#endif
...
...
@@ -1691,7 +1694,7 @@ emit_cmp_insn (x, y, comparison, size, mode, unsignedp, align)
size
,
Pmode
);
#endif
emit_cmp_insn
(
hard_libcall_value
(
TYPE_MODE
(
integer_type_node
)),
const0_rtx
,
comparison
,
0
,
const0_rtx
,
comparison
,
NULL_RTX
,
TYPE_MODE
(
integer_type_node
),
0
,
0
);
}
return
;
...
...
@@ -1752,7 +1755,7 @@ emit_cmp_insn (x, y, comparison, size, mode, unsignedp, align)
{
x
=
convert_to_mode
(
wider_mode
,
x
,
unsignedp
);
y
=
convert_to_mode
(
wider_mode
,
y
,
unsignedp
);
emit_cmp_insn
(
x
,
y
,
comparison
,
0
,
emit_cmp_insn
(
x
,
y
,
comparison
,
NULL_RTX
,
wider_mode
,
unsignedp
,
align
);
return
;
}
...
...
@@ -1778,7 +1781,7 @@ emit_cmp_insn (x, y, comparison, size, mode, unsignedp, align)
there is still a value that can represent the result "less than". */
emit_cmp_insn
(
hard_libcall_value
(
SImode
),
const1_rtx
,
comparison
,
0
,
SImode
,
unsignedp
,
0
);
comparison
,
NULL_RTX
,
SImode
,
unsignedp
,
0
);
return
;
}
...
...
@@ -1949,7 +1952,7 @@ emit_float_lib_cmp (x, y, comparison)
SImode
,
2
,
x
,
mode
,
y
,
mode
);
emit_cmp_insn
(
hard_libcall_value
(
SImode
),
const0_rtx
,
comparison
,
0
,
SImode
,
0
,
0
);
NULL_RTX
,
SImode
,
0
,
0
);
}
/* Generate code to indirectly jump to a location given in the rtx LOC. */
...
...
@@ -2866,7 +2869,7 @@ expand_float (to, from, unsignedp)
correct its value by 2**bitwidth. */
do_pending_stack_adjust
();
emit_cmp_insn
(
from
,
const0_rtx
,
GE
,
0
,
GET_MODE
(
from
),
0
,
0
);
emit_cmp_insn
(
from
,
const0_rtx
,
GE
,
NULL_RTX
,
GET_MODE
(
from
),
0
,
0
);
emit_jump_insn
(
gen_bge
(
label
));
/* On SCO 3.2.1, ldexp rejects values outside [0.5, 1).
Rather than setting up a dconst_dot_5, let's hope SCO
...
...
@@ -3039,7 +3042,7 @@ expand_fix (to, from, unsignedp)
We only need to check all real modes, since we know we didn't find
anything with a wider integer mode. */
if
(
unsignedp
&&
GET_MODE_BITSIZE
(
GET_MODE
(
to
))
<=
HOST_BITS_PER_INT
)
if
(
unsignedp
&&
GET_MODE_BITSIZE
(
GET_MODE
(
to
))
<=
HOST_BITS_PER_
WIDE_
INT
)
for
(
fmode
=
GET_MODE
(
from
);
fmode
!=
VOIDmode
;
fmode
=
GET_MODE_WIDER_MODE
(
fmode
))
/* Make sure we won't lose significant bits doing this. */
...
...
@@ -3066,7 +3069,7 @@ expand_fix (to, from, unsignedp)
/* See if we need to do the subtraction. */
do_pending_stack_adjust
();
emit_cmp_insn
(
from
,
limit
,
GE
,
0
,
GET_MODE
(
from
),
0
,
0
);
emit_cmp_insn
(
from
,
limit
,
GE
,
NULL_RTX
,
GET_MODE
(
from
),
0
,
0
);
emit_jump_insn
(
gen_bge
(
lab1
));
/* If not, do the signed "fix" and branch around fixup code. */
...
...
@@ -3079,11 +3082,10 @@ expand_fix (to, from, unsignedp)
will often generate better code. */
emit_label
(
lab1
);
target
=
expand_binop
(
GET_MODE
(
from
),
sub_optab
,
from
,
limit
,
0
,
0
,
OPTAB_LIB_WIDEN
);
NULL_RTX
,
0
,
OPTAB_LIB_WIDEN
);
expand_fix
(
to
,
target
,
0
);
target
=
expand_binop
(
GET_MODE
(
to
),
xor_optab
,
to
,
gen_rtx
(
CONST_INT
,
VOIDmode
,
1
<<
(
bitsize
-
1
)),
GEN_INT
((
HOST_WIDE_INT
)
1
<<
(
bitsize
-
1
)),
to
,
1
,
OPTAB_LIB_WIDEN
);
if
(
target
!=
to
)
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment