Skip to content
Projects
Groups
Snippets
Help
This project
Loading...
Sign in / Register
Toggle navigation
R
riscv-gcc-1
Overview
Overview
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
lvzhengyang
riscv-gcc-1
Commits
832a3292
Commit
832a3292
authored
Feb 29, 2004
by
Kazu Hirata
Committed by
Kazu Hirata
Feb 29, 2004
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
* config/sh/sh.c: Fix formatting.
From-SVN: r78646
parent
508ea1c5
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
227 additions
and
219 deletions
+227
-219
gcc/ChangeLog
+4
-0
gcc/config/sh/sh.c
+223
-219
No files found.
gcc/ChangeLog
View file @
832a3292
2004-02-28 Kazu Hirata <kazu@cs.umass.edu>
2004-02-28 Kazu Hirata <kazu@cs.umass.edu>
* config/sh/sh.c: Fix formatting.
2004-02-28 Kazu Hirata <kazu@cs.umass.edu>
* config/sh/sh.c: Convert to ISO-C.
* config/sh/sh.c: Convert to ISO-C.
2004-02-28 Andrew Pinski <pinskia@physics.uc.edu>
2004-02-28 Andrew Pinski <pinskia@physics.uc.edu>
...
...
gcc/config/sh/sh.c
View file @
832a3292
...
@@ -1324,7 +1324,7 @@ output_branch (int logic, rtx insn, rtx *operands)
...
@@ -1324,7 +1324,7 @@ output_branch (int logic, rtx insn, rtx *operands)
output_asm_insn
(
"bra
\t
%l0"
,
&
op0
);
output_asm_insn
(
"bra
\t
%l0"
,
&
op0
);
fprintf
(
asm_out_file
,
"
\t
nop
\n
"
);
fprintf
(
asm_out_file
,
"
\t
nop
\n
"
);
(
*
targetm
.
asm_out
.
internal_label
)(
asm_out_file
,
"LF"
,
label
);
(
*
targetm
.
asm_out
.
internal_label
)
(
asm_out_file
,
"LF"
,
label
);
return
""
;
return
""
;
}
}
...
@@ -1567,7 +1567,7 @@ shift_insns_rtx (rtx insn)
...
@@ -1567,7 +1567,7 @@ shift_insns_rtx (rtx insn)
case
ASHIFT
:
case
ASHIFT
:
return
shift_insns
[
shift_count
];
return
shift_insns
[
shift_count
];
default:
default:
abort
();
abort
();
}
}
}
}
...
@@ -1682,7 +1682,7 @@ addsubcosts (rtx x)
...
@@ -1682,7 +1682,7 @@ addsubcosts (rtx x)
/* Fall through. */
/* Fall through. */
default:
default:
return
5
;
return
5
;
}
}
/* Any other constant requires a 2 cycle pc-relative load plus an
/* Any other constant requires a 2 cycle pc-relative load plus an
...
@@ -2089,7 +2089,7 @@ shl_and_kind (rtx left_rtx, rtx mask_rtx, int *attrp)
...
@@ -2089,7 +2089,7 @@ shl_and_kind (rtx left_rtx, rtx mask_rtx, int *attrp)
mask
=
(
unsigned
HOST_WIDE_INT
)
INTVAL
(
mask_rtx
)
>>
left
;
mask
=
(
unsigned
HOST_WIDE_INT
)
INTVAL
(
mask_rtx
)
>>
left
;
else
else
mask
=
(
unsigned
HOST_WIDE_INT
)
GET_MODE_MASK
(
SImode
)
>>
left
;
mask
=
(
unsigned
HOST_WIDE_INT
)
GET_MODE_MASK
(
SImode
)
>>
left
;
/* Can this be expressed as a right shift / left shift pair
?
*/
/* Can this be expressed as a right shift / left shift pair
?
*/
lsb
=
((
mask
^
(
mask
-
1
))
>>
1
)
+
1
;
lsb
=
((
mask
^
(
mask
-
1
))
>>
1
)
+
1
;
right
=
exact_log2
(
lsb
);
right
=
exact_log2
(
lsb
);
mask2
=
~
(
mask
+
lsb
-
1
);
mask2
=
~
(
mask
+
lsb
-
1
);
...
@@ -2103,15 +2103,15 @@ shl_and_kind (rtx left_rtx, rtx mask_rtx, int *attrp)
...
@@ -2103,15 +2103,15 @@ shl_and_kind (rtx left_rtx, rtx mask_rtx, int *attrp)
int
late_right
=
exact_log2
(
lsb2
);
int
late_right
=
exact_log2
(
lsb2
);
best_cost
=
shift_insns
[
left
+
late_right
]
+
shift_insns
[
late_right
];
best_cost
=
shift_insns
[
left
+
late_right
]
+
shift_insns
[
late_right
];
}
}
/* Try to use zero extend */
/* Try to use zero extend
.
*/
if
(
mask2
==
~
(
lsb2
-
1
))
if
(
mask2
==
~
(
lsb2
-
1
))
{
{
int
width
,
first
;
int
width
,
first
;
for
(
width
=
8
;
width
<=
16
;
width
+=
8
)
for
(
width
=
8
;
width
<=
16
;
width
+=
8
)
{
{
/* Can we zero-extend right away? */
/* Can we zero-extend right away?
*/
if
(
lsb2
==
(
unsigned
HOST_WIDE_INT
)
1
<<
width
)
if
(
lsb2
==
(
unsigned
HOST_WIDE_INT
)
1
<<
width
)
{
{
cost
cost
=
1
+
ext_shift_insns
[
right
]
+
ext_shift_insns
[
left
+
right
];
=
1
+
ext_shift_insns
[
right
]
+
ext_shift_insns
[
left
+
right
];
...
@@ -2143,7 +2143,7 @@ shl_and_kind (rtx left_rtx, rtx mask_rtx, int *attrp)
...
@@ -2143,7 +2143,7 @@ shl_and_kind (rtx left_rtx, rtx mask_rtx, int *attrp)
best_len
=
cost
;
best_len
=
cost
;
if
(
attrp
)
if
(
attrp
)
attrp
[
2
]
=
first
;
attrp
[
2
]
=
first
;
}
}
}
}
}
}
}
}
...
@@ -2164,7 +2164,7 @@ shl_and_kind (rtx left_rtx, rtx mask_rtx, int *attrp)
...
@@ -2164,7 +2164,7 @@ shl_and_kind (rtx left_rtx, rtx mask_rtx, int *attrp)
}
}
}
}
/* Try to use a scratch register to hold the AND operand. */
/* Try to use a scratch register to hold the AND operand. */
can_ext
=
((
mask
<<
left
)
&
((
unsigned
HOST_WIDE_INT
)
3
<<
30
))
==
0
;
can_ext
=
((
mask
<<
left
)
&
((
unsigned
HOST_WIDE_INT
)
3
<<
30
))
==
0
;
for
(
i
=
0
;
i
<=
2
;
i
++
)
for
(
i
=
0
;
i
<=
2
;
i
++
)
{
{
if
(
i
>
right
)
if
(
i
>
right
)
...
@@ -2229,7 +2229,7 @@ gen_shl_and (rtx dest, rtx left_rtx, rtx mask_rtx, rtx source)
...
@@ -2229,7 +2229,7 @@ gen_shl_and (rtx dest, rtx left_rtx, rtx mask_rtx, rtx source)
unsigned
HOST_WIDE_INT
mask
;
unsigned
HOST_WIDE_INT
mask
;
int
kind
=
shl_and_kind
(
left_rtx
,
mask_rtx
,
attributes
);
int
kind
=
shl_and_kind
(
left_rtx
,
mask_rtx
,
attributes
);
int
right
,
total_shift
;
int
right
,
total_shift
;
void
(
*
shift_gen_fun
)
(
int
,
rtx
*
)
=
gen_shifty_hi_op
;
void
(
*
shift_gen_fun
)
(
int
,
rtx
*
)
=
gen_shifty_hi_op
;
right
=
attributes
[
0
];
right
=
attributes
[
0
];
total_shift
=
INTVAL
(
left_rtx
)
+
right
;
total_shift
=
INTVAL
(
left_rtx
)
+
right
;
...
@@ -2246,10 +2246,10 @@ gen_shl_and (rtx dest, rtx left_rtx, rtx mask_rtx, rtx source)
...
@@ -2246,10 +2246,10 @@ gen_shl_and (rtx dest, rtx left_rtx, rtx mask_rtx, rtx source)
if
(
first
<
0
)
if
(
first
<
0
)
{
{
emit_insn
((
mask
<<
right
)
<=
0xff
emit_insn
((
mask
<<
right
)
<=
0xff
?
gen_zero_extendqisi2
(
dest
,
?
gen_zero_extendqisi2
(
dest
,
gen_lowpart
(
QImode
,
source
))
gen_lowpart
(
QImode
,
source
))
:
gen_zero_extendhisi2
(
dest
,
:
gen_zero_extendhisi2
(
dest
,
gen_lowpart
(
HImode
,
source
)));
gen_lowpart
(
HImode
,
source
)));
source
=
dest
;
source
=
dest
;
}
}
if
(
source
!=
dest
)
if
(
source
!=
dest
)
...
@@ -2269,8 +2269,8 @@ gen_shl_and (rtx dest, rtx left_rtx, rtx mask_rtx, rtx source)
...
@@ -2269,8 +2269,8 @@ gen_shl_and (rtx dest, rtx left_rtx, rtx mask_rtx, rtx source)
}
}
if
(
first
>=
0
)
if
(
first
>=
0
)
emit_insn
(
mask
<=
0xff
emit_insn
(
mask
<=
0xff
?
gen_zero_extendqisi2
(
dest
,
gen_lowpart
(
QImode
,
dest
))
?
gen_zero_extendqisi2
(
dest
,
gen_lowpart
(
QImode
,
dest
))
:
gen_zero_extendhisi2
(
dest
,
gen_lowpart
(
HImode
,
dest
)));
:
gen_zero_extendhisi2
(
dest
,
gen_lowpart
(
HImode
,
dest
)));
if
(
total_shift
>
0
)
if
(
total_shift
>
0
)
{
{
operands
[
2
]
=
GEN_INT
(
total_shift
);
operands
[
2
]
=
GEN_INT
(
total_shift
);
...
@@ -2284,8 +2284,8 @@ gen_shl_and (rtx dest, rtx left_rtx, rtx mask_rtx, rtx source)
...
@@ -2284,8 +2284,8 @@ gen_shl_and (rtx dest, rtx left_rtx, rtx mask_rtx, rtx source)
/* If the topmost bit that matters is set, set the topmost bits
/* If the topmost bit that matters is set, set the topmost bits
that don't matter. This way, we might be able to get a shorter
that don't matter. This way, we might be able to get a shorter
signed constant. */
signed constant. */
if
(
mask
&
((
HOST_WIDE_INT
)
1
<<
(
31
-
total_shift
)))
if
(
mask
&
((
HOST_WIDE_INT
)
1
<<
(
31
-
total_shift
)))
mask
|=
(
HOST_WIDE_INT
)
~
0
<<
(
31
-
total_shift
);
mask
|=
(
HOST_WIDE_INT
)
~
0
<<
(
31
-
total_shift
);
case
2
:
case
2
:
/* Don't expand fine-grained when combining, because that will
/* Don't expand fine-grained when combining, because that will
make the pattern fail. */
make the pattern fail. */
...
@@ -2502,8 +2502,8 @@ gen_shl_sext (rtx dest, rtx left_rtx, rtx size_rtx, rtx source)
...
@@ -2502,8 +2502,8 @@ gen_shl_sext (rtx dest, rtx left_rtx, rtx size_rtx, rtx source)
gen_shifty_hi_op
(
ASHIFT
,
operands
);
gen_shifty_hi_op
(
ASHIFT
,
operands
);
}
}
emit_insn
(
kind
&
1
emit_insn
(
kind
&
1
?
gen_extendqisi2
(
dest
,
gen_lowpart
(
QImode
,
dest
))
?
gen_extendqisi2
(
dest
,
gen_lowpart
(
QImode
,
dest
))
:
gen_extendhisi2
(
dest
,
gen_lowpart
(
HImode
,
dest
)));
:
gen_extendhisi2
(
dest
,
gen_lowpart
(
HImode
,
dest
)));
if
(
kind
<=
2
)
if
(
kind
<=
2
)
{
{
if
(
shift2
)
if
(
shift2
)
...
@@ -2824,7 +2824,7 @@ dump_table (rtx scan)
...
@@ -2824,7 +2824,7 @@ dump_table (rtx scan)
{
{
lab
=
XEXP
(
ref
,
0
);
lab
=
XEXP
(
ref
,
0
);
emit_insn_before
(
gen_consttable_window_end
(
lab
),
emit_insn_before
(
gen_consttable_window_end
(
lab
),
align_insn
);
align_insn
);
}
}
delete_insn
(
align_insn
);
delete_insn
(
align_insn
);
align_insn
=
NULL_RTX
;
align_insn
=
NULL_RTX
;
...
@@ -3722,7 +3722,7 @@ barrier_align (rtx barrier_or_label)
...
@@ -3722,7 +3722,7 @@ barrier_align (rtx barrier_or_label)
the table to the minimum for proper code alignment. */
the table to the minimum for proper code alignment. */
return
((
TARGET_SMALLCODE
return
((
TARGET_SMALLCODE
||
((
unsigned
)
XVECLEN
(
pat
,
1
)
*
GET_MODE_SIZE
(
GET_MODE
(
pat
))
||
((
unsigned
)
XVECLEN
(
pat
,
1
)
*
GET_MODE_SIZE
(
GET_MODE
(
pat
))
<=
(
unsigned
)
1
<<
(
CACHE_LOG
-
2
)))
<=
(
unsigned
)
1
<<
(
CACHE_LOG
-
2
)))
?
1
<<
TARGET_SHMEDIA
:
align_jumps_log
);
?
1
<<
TARGET_SHMEDIA
:
align_jumps_log
);
}
}
...
@@ -4813,7 +4813,7 @@ push_regs (HARD_REG_SET *mask, int interrupt_handler)
...
@@ -4813,7 +4813,7 @@ push_regs (HARD_REG_SET *mask, int interrupt_handler)
HARD_REG_SET
unsaved
;
HARD_REG_SET
unsaved
;
push
(
FPSCR_REG
);
push
(
FPSCR_REG
);
COMPL_HARD_REG_SET
(
unsaved
,
*
mask
);
COMPL_HARD_REG_SET
(
unsaved
,
*
mask
);
fpscr_set_from_mem
(
NORMAL_MODE
(
FP_MODE
),
unsaved
);
fpscr_set_from_mem
(
NORMAL_MODE
(
FP_MODE
),
unsaved
);
skip_fpscr
=
1
;
skip_fpscr
=
1
;
}
}
...
@@ -5103,8 +5103,8 @@ sh5_schedule_saves (HARD_REG_SET *live_regs_mask, save_schedule *schedule,
...
@@ -5103,8 +5103,8 @@ sh5_schedule_saves (HARD_REG_SET *live_regs_mask, save_schedule *schedule,
&&
!
(
current_function_needs_context
&&
i
==
STATIC_CHAIN_REGNUM
)
&&
!
(
current_function_needs_context
&&
i
==
STATIC_CHAIN_REGNUM
)
&&
!
(
current_function_calls_eh_return
&&
!
(
current_function_calls_eh_return
&&
(
i
==
EH_RETURN_STACKADJ_REGNO
&&
(
i
==
EH_RETURN_STACKADJ_REGNO
||
((
unsigned
)
i
<=
EH_RETURN_DATA_REGNO
(
0
)
||
((
unsigned
)
i
<=
EH_RETURN_DATA_REGNO
(
0
)
&&
(
unsigned
)
i
>=
EH_RETURN_DATA_REGNO
(
3
)))))
&&
(
unsigned
)
i
>=
EH_RETURN_DATA_REGNO
(
3
)))))
schedule
->
temps
[
tmpx
++
]
=
i
;
schedule
->
temps
[
tmpx
++
]
=
i
;
entry
->
reg
=
-
1
;
entry
->
reg
=
-
1
;
entry
->
mode
=
VOIDmode
;
entry
->
mode
=
VOIDmode
;
...
@@ -6003,7 +6003,7 @@ sh_builtin_saveregs (void)
...
@@ -6003,7 +6003,7 @@ sh_builtin_saveregs (void)
regno
=
first_floatreg
;
regno
=
first_floatreg
;
if
(
regno
&
1
)
if
(
regno
&
1
)
{
{
emit_insn
(
gen_addsi3
(
fpregs
,
fpregs
,
GEN_INT
(
-
UNITS_PER_WORD
)));
emit_insn
(
gen_addsi3
(
fpregs
,
fpregs
,
GEN_INT
(
-
UNITS_PER_WORD
)));
mem
=
gen_rtx_MEM
(
SFmode
,
fpregs
);
mem
=
gen_rtx_MEM
(
SFmode
,
fpregs
);
set_mem_alias_set
(
mem
,
alias_set
);
set_mem_alias_set
(
mem
,
alias_set
);
emit_move_insn
(
mem
,
emit_move_insn
(
mem
,
...
@@ -6016,7 +6016,7 @@ sh_builtin_saveregs (void)
...
@@ -6016,7 +6016,7 @@ sh_builtin_saveregs (void)
{
{
rtx
mem
;
rtx
mem
;
emit_insn
(
gen_addsi3
(
fpregs
,
fpregs
,
GEN_INT
(
-
UNITS_PER_WORD
)));
emit_insn
(
gen_addsi3
(
fpregs
,
fpregs
,
GEN_INT
(
-
UNITS_PER_WORD
)));
mem
=
gen_rtx_MEM
(
SFmode
,
fpregs
);
mem
=
gen_rtx_MEM
(
SFmode
,
fpregs
);
set_mem_alias_set
(
mem
,
alias_set
);
set_mem_alias_set
(
mem
,
alias_set
);
emit_move_insn
(
mem
,
emit_move_insn
(
mem
,
...
@@ -6402,11 +6402,11 @@ sh_function_arg (CUMULATIVE_ARGS *ca, enum machine_mode mode,
...
@@ -6402,11 +6402,11 @@ sh_function_arg (CUMULATIVE_ARGS *ca, enum machine_mode mode,
BASE_ARG_REG
(
mode
)
BASE_ARG_REG
(
mode
)
+
(
ROUND_REG
(
*
ca
,
mode
)
^
1
)),
+
(
ROUND_REG
(
*
ca
,
mode
)
^
1
)),
const0_rtx
);
const0_rtx
);
rtx
r2
=
gen_rtx_EXPR_LIST
(
VOIDmode
,
rtx
r2
=
gen_rtx_EXPR_LIST
(
VOIDmode
,
gen_rtx_REG
(
SFmode
,
gen_rtx_REG
(
SFmode
,
BASE_ARG_REG
(
mode
)
BASE_ARG_REG
(
mode
)
+
((
ROUND_REG
(
*
ca
,
mode
)
+
1
)
^
1
)),
+
((
ROUND_REG
(
*
ca
,
mode
)
+
1
)
^
1
)),
GEN_INT
(
4
));
GEN_INT
(
4
));
return
gen_rtx_PARALLEL
(
SCmode
,
gen_rtvec
(
2
,
r1
,
r2
));
return
gen_rtx_PARALLEL
(
SCmode
,
gen_rtvec
(
2
,
r1
,
r2
));
}
}
...
@@ -6473,144 +6473,144 @@ void
...
@@ -6473,144 +6473,144 @@ void
sh_function_arg_advance
(
CUMULATIVE_ARGS
*
ca
,
enum
machine_mode
mode
,
sh_function_arg_advance
(
CUMULATIVE_ARGS
*
ca
,
enum
machine_mode
mode
,
tree
type
,
int
named
)
tree
type
,
int
named
)
{
{
if
(
ca
->
force_mem
)
if
(
ca
->
force_mem
)
ca
->
force_mem
=
0
;
ca
->
force_mem
=
0
;
else
if
(
TARGET_SH5
)
else
if
(
TARGET_SH5
)
{
{
tree
type2
=
(
ca
->
byref
&&
type
tree
type2
=
(
ca
->
byref
&&
type
?
TREE_TYPE
(
type
)
?
TREE_TYPE
(
type
)
:
type
);
:
type
);
enum
machine_mode
mode2
=
(
ca
->
byref
&&
type
enum
machine_mode
mode2
=
(
ca
->
byref
&&
type
?
TYPE_MODE
(
type2
)
?
TYPE_MODE
(
type2
)
:
mode
);
:
mode
);
int
dwords
=
((
ca
->
byref
int
dwords
=
((
ca
->
byref
?
ca
->
byref
?
ca
->
byref
:
mode2
==
BLKmode
:
mode2
==
BLKmode
?
int_size_in_bytes
(
type2
)
?
int_size_in_bytes
(
type2
)
:
GET_MODE_SIZE
(
mode2
))
+
7
)
/
8
;
:
GET_MODE_SIZE
(
mode2
))
+
7
)
/
8
;
int
numregs
=
MIN
(
dwords
,
NPARM_REGS
(
SImode
)
int
numregs
=
MIN
(
dwords
,
NPARM_REGS
(
SImode
)
-
ca
->
arg_count
[(
int
)
SH_ARG_INT
]);
-
ca
->
arg_count
[(
int
)
SH_ARG_INT
]);
if
(
numregs
)
if
(
numregs
)
{
{
ca
->
arg_count
[(
int
)
SH_ARG_INT
]
+=
numregs
;
ca
->
arg_count
[(
int
)
SH_ARG_INT
]
+=
numregs
;
if
(
TARGET_SHCOMPACT
if
(
TARGET_SHCOMPACT
&&
SHCOMPACT_FORCE_ON_STACK
(
mode2
,
type2
))
&&
SHCOMPACT_FORCE_ON_STACK
(
mode2
,
type2
))
{
{
ca
->
call_cookie
ca
->
call_cookie
|=
CALL_COOKIE_INT_REG
(
ca
->
arg_count
[(
int
)
SH_ARG_INT
]
|=
CALL_COOKIE_INT_REG
(
ca
->
arg_count
[(
int
)
SH_ARG_INT
]
-
numregs
,
1
);
-
numregs
,
1
);
/* N.B. We want this also for outgoing. */
/* N.B. We want this also for outgoing. */
ca
->
stack_regs
+=
numregs
;
ca
->
stack_regs
+=
numregs
;
}
}
else
if
(
ca
->
byref
)
else
if
(
ca
->
byref
)
{
{
if
(
!
ca
->
outgoing
)
if
(
!
ca
->
outgoing
)
ca
->
stack_regs
+=
numregs
;
ca
->
stack_regs
+=
numregs
;
ca
->
byref_regs
+=
numregs
;
ca
->
byref_regs
+=
numregs
;
ca
->
byref
=
0
;
ca
->
byref
=
0
;
do
do
ca
->
call_cookie
ca
->
call_cookie
|=
CALL_COOKIE_INT_REG
(
ca
->
arg_count
[(
int
)
SH_ARG_INT
]
|=
CALL_COOKIE_INT_REG
(
ca
->
arg_count
[(
int
)
SH_ARG_INT
]
-
numregs
,
2
);
-
numregs
,
2
);
while
(
--
numregs
);
while
(
--
numregs
);
ca
->
call_cookie
ca
->
call_cookie
|=
CALL_COOKIE_INT_REG
(
ca
->
arg_count
[(
int
)
SH_ARG_INT
]
|=
CALL_COOKIE_INT_REG
(
ca
->
arg_count
[(
int
)
SH_ARG_INT
]
-
1
,
1
);
-
1
,
1
);
}
}
else
if
(
dwords
>
numregs
)
else
if
(
dwords
>
numregs
)
{
{
int
pushregs
=
numregs
;
int
pushregs
=
numregs
;
if
(
TARGET_SHCOMPACT
)
if
(
TARGET_SHCOMPACT
)
ca
->
stack_regs
+=
numregs
;
ca
->
stack_regs
+=
numregs
;
while
(
pushregs
<
NPARM_REGS
(
SImode
)
-
1
while
(
pushregs
<
NPARM_REGS
(
SImode
)
-
1
&&
(
CALL_COOKIE_INT_REG_GET
&&
(
CALL_COOKIE_INT_REG_GET
(
ca
->
call_cookie
,
(
ca
->
call_cookie
,
NPARM_REGS
(
SImode
)
-
pushregs
)
NPARM_REGS
(
SImode
)
-
pushregs
)
==
1
))
==
1
))
{
{
ca
->
call_cookie
ca
->
call_cookie
&=
~
CALL_COOKIE_INT_REG
(
NPARM_REGS
(
SImode
)
&=
~
CALL_COOKIE_INT_REG
(
NPARM_REGS
(
SImode
)
-
pushregs
,
1
);
-
pushregs
,
1
);
pushregs
++
;
pushregs
++
;
}
}
if
(
numregs
==
NPARM_REGS
(
SImode
))
if
(
numregs
==
NPARM_REGS
(
SImode
))
ca
->
call_cookie
ca
->
call_cookie
|=
CALL_COOKIE_INT_REG
(
0
,
1
)
|=
CALL_COOKIE_INT_REG
(
0
,
1
)
|
CALL_COOKIE_STACKSEQ
(
numregs
-
1
);
|
CALL_COOKIE_STACKSEQ
(
numregs
-
1
);
else
else
ca
->
call_cookie
ca
->
call_cookie
|=
CALL_COOKIE_STACKSEQ
(
numregs
);
|=
CALL_COOKIE_STACKSEQ
(
numregs
);
}
}
}
}
if
(
GET_SH_ARG_CLASS
(
mode2
)
==
SH_ARG_FLOAT
if
(
GET_SH_ARG_CLASS
(
mode2
)
==
SH_ARG_FLOAT
&&
(
named
||
!
ca
->
prototype_p
))
&&
(
named
||
!
ca
->
prototype_p
))
{
{
if
(
mode2
==
SFmode
&&
ca
->
free_single_fp_reg
)
if
(
mode2
==
SFmode
&&
ca
->
free_single_fp_reg
)
ca
->
free_single_fp_reg
=
0
;
ca
->
free_single_fp_reg
=
0
;
else
if
(
ca
->
arg_count
[(
int
)
SH_ARG_FLOAT
]
else
if
(
ca
->
arg_count
[(
int
)
SH_ARG_FLOAT
]
<
NPARM_REGS
(
SFmode
))
<
NPARM_REGS
(
SFmode
))
{
{
int
numfpregs
int
numfpregs
=
MIN
((
GET_MODE_SIZE
(
mode2
)
+
7
)
/
8
*
2
,
=
MIN
((
GET_MODE_SIZE
(
mode2
)
+
7
)
/
8
*
2
,
NPARM_REGS
(
SFmode
)
NPARM_REGS
(
SFmode
)
-
ca
->
arg_count
[(
int
)
SH_ARG_FLOAT
]);
-
ca
->
arg_count
[(
int
)
SH_ARG_FLOAT
]);
ca
->
arg_count
[(
int
)
SH_ARG_FLOAT
]
+=
numfpregs
;
ca
->
arg_count
[(
int
)
SH_ARG_FLOAT
]
+=
numfpregs
;
if
(
TARGET_SHCOMPACT
&&
!
ca
->
prototype_p
)
if
(
TARGET_SHCOMPACT
&&
!
ca
->
prototype_p
)
{
{
if
(
ca
->
outgoing
&&
numregs
>
0
)
if
(
ca
->
outgoing
&&
numregs
>
0
)
do
do
{
{
ca
->
call_cookie
ca
->
call_cookie
|=
(
CALL_COOKIE_INT_REG
|=
(
CALL_COOKIE_INT_REG
(
ca
->
arg_count
[(
int
)
SH_ARG_INT
]
(
ca
->
arg_count
[(
int
)
SH_ARG_INT
]
-
numregs
+
((
numfpregs
-
2
)
/
2
),
-
numregs
+
((
numfpregs
-
2
)
/
2
),
4
+
(
ca
->
arg_count
[(
int
)
SH_ARG_FLOAT
]
4
+
(
ca
->
arg_count
[(
int
)
SH_ARG_FLOAT
]
-
numfpregs
)
/
2
));
-
numfpregs
)
/
2
));
}
}
while
(
numfpregs
-=
2
);
while
(
numfpregs
-=
2
);
}
}
else
if
(
mode2
==
SFmode
&&
(
named
)
else
if
(
mode2
==
SFmode
&&
(
named
)
&&
(
ca
->
arg_count
[(
int
)
SH_ARG_FLOAT
]
&&
(
ca
->
arg_count
[(
int
)
SH_ARG_FLOAT
]
<
NPARM_REGS
(
SFmode
)))
<
NPARM_REGS
(
SFmode
)))
ca
->
free_single_fp_reg
ca
->
free_single_fp_reg
=
FIRST_FP_PARM_REG
-
numfpregs
=
FIRST_FP_PARM_REG
-
numfpregs
+
ca
->
arg_count
[(
int
)
SH_ARG_FLOAT
]
+
1
;
+
ca
->
arg_count
[(
int
)
SH_ARG_FLOAT
]
+
1
;
}
}
}
}
return
;
return
;
}
}
if
((
TARGET_HITACHI
||
ca
->
renesas_abi
)
&&
TARGET_FPU_DOUBLE
)
if
((
TARGET_HITACHI
||
ca
->
renesas_abi
)
&&
TARGET_FPU_DOUBLE
)
{
{
/* Note that we've used the skipped register. */
/* Note that we've used the skipped register. */
if
(
mode
==
SFmode
&&
ca
->
free_single_fp_reg
)
if
(
mode
==
SFmode
&&
ca
->
free_single_fp_reg
)
{
{
ca
->
free_single_fp_reg
=
0
;
ca
->
free_single_fp_reg
=
0
;
return
;
return
;
}
}
/* When we have a DF after an SF, there's an SF register that get
/* When we have a DF after an SF, there's an SF register that get
skipped in order to align the DF value. We note this skipped
skipped in order to align the DF value. We note this skipped
register, because the next SF value will use it, and not the
register, because the next SF value will use it, and not the
SF that follows the DF. */
SF that follows the DF. */
if
(
mode
==
DFmode
if
(
mode
==
DFmode
&&
ROUND_REG
(
*
ca
,
DFmode
)
!=
ROUND_REG
(
*
ca
,
SFmode
))
&&
ROUND_REG
(
*
ca
,
DFmode
)
!=
ROUND_REG
(
*
ca
,
SFmode
))
{
{
ca
->
free_single_fp_reg
=
(
ROUND_REG
(
*
ca
,
SFmode
)
ca
->
free_single_fp_reg
=
(
ROUND_REG
(
*
ca
,
SFmode
)
+
BASE_ARG_REG
(
mode
));
+
BASE_ARG_REG
(
mode
));
}
}
}
}
if
(
!
(
TARGET_SH4
||
ca
->
renesas_abi
)
if
(
!
(
TARGET_SH4
||
ca
->
renesas_abi
)
||
PASS_IN_REG_P
(
*
ca
,
mode
,
type
))
||
PASS_IN_REG_P
(
*
ca
,
mode
,
type
))
(
ca
->
arg_count
[(
int
)
GET_SH_ARG_CLASS
(
mode
)]
(
ca
->
arg_count
[(
int
)
GET_SH_ARG_CLASS
(
mode
)]
=
(
ROUND_REG
(
*
ca
,
mode
)
=
(
ROUND_REG
(
*
ca
,
mode
)
+
(
mode
==
BLKmode
+
(
mode
==
BLKmode
?
ROUND_ADVANCE
(
int_size_in_bytes
(
type
))
?
ROUND_ADVANCE
(
int_size_in_bytes
(
type
))
:
ROUND_ADVANCE
(
GET_MODE_SIZE
(
mode
)))));
:
ROUND_ADVANCE
(
GET_MODE_SIZE
(
mode
)))));
}
}
/* The Renesas calling convention doesn't quite fit into this scheme since
/* The Renesas calling convention doesn't quite fit into this scheme since
...
@@ -7252,7 +7252,7 @@ and_operand (rtx op, enum machine_mode mode)
...
@@ -7252,7 +7252,7 @@ and_operand (rtx op, enum machine_mode mode)
&&
mode
==
DImode
&&
mode
==
DImode
&&
GET_CODE
(
op
)
==
CONST_INT
&&
GET_CODE
(
op
)
==
CONST_INT
&&
CONST_OK_FOR_J16
(
INTVAL
(
op
)))
&&
CONST_OK_FOR_J16
(
INTVAL
(
op
)))
return
1
;
return
1
;
return
0
;
return
0
;
}
}
...
@@ -7429,7 +7429,8 @@ equality_comparison_operator (rtx op, enum machine_mode mode)
...
@@ -7429,7 +7429,8 @@ equality_comparison_operator (rtx op, enum machine_mode mode)
&&
(
GET_CODE
(
op
)
==
EQ
||
GET_CODE
(
op
)
==
NE
));
&&
(
GET_CODE
(
op
)
==
EQ
||
GET_CODE
(
op
)
==
NE
));
}
}
int
greater_comparison_operator
(
rtx
op
,
enum
machine_mode
mode
)
int
greater_comparison_operator
(
rtx
op
,
enum
machine_mode
mode
)
{
{
if
(
mode
!=
VOIDmode
&&
GET_MODE
(
op
)
==
mode
)
if
(
mode
!=
VOIDmode
&&
GET_MODE
(
op
)
==
mode
)
return
0
;
return
0
;
...
@@ -7445,7 +7446,8 @@ int greater_comparison_operator (rtx op, enum machine_mode mode)
...
@@ -7445,7 +7446,8 @@ int greater_comparison_operator (rtx op, enum machine_mode mode)
}
}
}
}
int
less_comparison_operator
(
rtx
op
,
enum
machine_mode
mode
)
int
less_comparison_operator
(
rtx
op
,
enum
machine_mode
mode
)
{
{
if
(
mode
!=
VOIDmode
&&
GET_MODE
(
op
)
==
mode
)
if
(
mode
!=
VOIDmode
&&
GET_MODE
(
op
)
==
mode
)
return
0
;
return
0
;
...
@@ -7508,7 +7510,7 @@ mextr_bit_offset (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
...
@@ -7508,7 +7510,7 @@ mextr_bit_offset (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
if
(
GET_CODE
(
op
)
!=
CONST_INT
)
if
(
GET_CODE
(
op
)
!=
CONST_INT
)
return
0
;
return
0
;
i
=
INTVAL
(
op
);
i
=
INTVAL
(
op
);
return
i
>=
1
*
8
&&
i
<=
7
*
8
&&
(
i
&
7
)
==
0
;
return
i
>=
1
*
8
&&
i
<=
7
*
8
&&
(
i
&
7
)
==
0
;
}
}
int
int
...
@@ -7571,7 +7573,7 @@ sh_rep_vec (rtx v, enum machine_mode mode)
...
@@ -7571,7 +7573,7 @@ sh_rep_vec (rtx v, enum machine_mode mode)
if
(
GET_MODE_UNIT_SIZE
(
mode
)
==
1
)
if
(
GET_MODE_UNIT_SIZE
(
mode
)
==
1
)
{
{
y
=
XVECEXP
(
v
,
0
,
i
);
y
=
XVECEXP
(
v
,
0
,
i
);
for
(
i
-=
2
;
i
>=
0
;
i
-=
2
)
for
(
i
-=
2
;
i
>=
0
;
i
-=
2
)
if
(
!
rtx_equal_p
(
XVECEXP
(
v
,
0
,
i
+
1
),
x
)
if
(
!
rtx_equal_p
(
XVECEXP
(
v
,
0
,
i
+
1
),
x
)
||
!
rtx_equal_p
(
XVECEXP
(
v
,
0
,
i
),
y
))
||
!
rtx_equal_p
(
XVECEXP
(
v
,
0
,
i
),
y
))
return
0
;
return
0
;
...
@@ -7791,7 +7793,7 @@ void
...
@@ -7791,7 +7793,7 @@ void
expand_df_binop
(
rtx
(
*
fun
)
(
rtx
,
rtx
,
rtx
,
rtx
),
rtx
*
operands
)
expand_df_binop
(
rtx
(
*
fun
)
(
rtx
,
rtx
,
rtx
,
rtx
),
rtx
*
operands
)
{
{
emit_df_insn
((
*
fun
)
(
operands
[
0
],
operands
[
1
],
operands
[
2
],
emit_df_insn
((
*
fun
)
(
operands
[
0
],
operands
[
1
],
operands
[
2
],
get_fpscr_rtx
()));
get_fpscr_rtx
()));
}
}
/* ??? gcc does flow analysis strictly after common subexpression
/* ??? gcc does flow analysis strictly after common subexpression
...
@@ -8029,7 +8031,7 @@ nonpic_symbol_mentioned_p (rtx x)
...
@@ -8029,7 +8031,7 @@ nonpic_symbol_mentioned_p (rtx x)
||
XINT
(
x
,
1
)
==
UNSPEC_GOTTPOFF
||
XINT
(
x
,
1
)
==
UNSPEC_GOTTPOFF
||
XINT
(
x
,
1
)
==
UNSPEC_DTPOFF
||
XINT
(
x
,
1
)
==
UNSPEC_DTPOFF
||
XINT
(
x
,
1
)
==
UNSPEC_PLT
))
||
XINT
(
x
,
1
)
==
UNSPEC_PLT
))
return
0
;
return
0
;
fmt
=
GET_RTX_FORMAT
(
GET_CODE
(
x
));
fmt
=
GET_RTX_FORMAT
(
GET_CODE
(
x
));
for
(
i
=
GET_RTX_LENGTH
(
GET_CODE
(
x
))
-
1
;
i
>=
0
;
i
--
)
for
(
i
=
GET_RTX_LENGTH
(
GET_CODE
(
x
))
-
1
;
i
>=
0
;
i
--
)
...
@@ -8182,15 +8184,14 @@ int
...
@@ -8182,15 +8184,14 @@ int
sh_hard_regno_rename_ok
(
unsigned
int
old_reg
ATTRIBUTE_UNUSED
,
sh_hard_regno_rename_ok
(
unsigned
int
old_reg
ATTRIBUTE_UNUSED
,
unsigned
int
new_reg
)
unsigned
int
new_reg
)
{
{
/* Interrupt functions can only use registers that have already been
/* Interrupt functions can only use registers that have already been
saved by the prologue, even if they would normally be
saved by the prologue, even if they would normally be
call-clobbered. */
call-clobbered. */
if
(
sh_cfun_interrupt_handler_p
()
&&
!
regs_ever_live
[
new_reg
])
if
(
sh_cfun_interrupt_handler_p
()
&&
!
regs_ever_live
[
new_reg
])
return
0
;
return
0
;
return
1
;
return
1
;
}
}
/* Function to update the integer COST
/* Function to update the integer COST
...
@@ -8266,7 +8267,7 @@ sh_adjust_cost (rtx insn, rtx link ATTRIBUTE_UNUSED, rtx dep_insn, int cost)
...
@@ -8266,7 +8267,7 @@ sh_adjust_cost (rtx insn, rtx link ATTRIBUTE_UNUSED, rtx dep_insn, int cost)
&&
get_attr_type
(
insn
)
==
TYPE_DYN_SHIFT
&&
get_attr_type
(
insn
)
==
TYPE_DYN_SHIFT
&&
get_attr_any_int_load
(
dep_insn
)
==
ANY_INT_LOAD_YES
&&
get_attr_any_int_load
(
dep_insn
)
==
ANY_INT_LOAD_YES
&&
reg_overlap_mentioned_p
(
SET_DEST
(
PATTERN
(
dep_insn
)),
&&
reg_overlap_mentioned_p
(
SET_DEST
(
PATTERN
(
dep_insn
)),
XEXP
(
SET_SRC
(
single_set
(
insn
)),
XEXP
(
SET_SRC
(
single_set
(
insn
)),
1
)))
1
)))
cost
++
;
cost
++
;
/* When an LS group instruction with a latency of less than
/* When an LS group instruction with a latency of less than
...
@@ -8338,7 +8339,7 @@ sh_pr_n_sets (void)
...
@@ -8338,7 +8339,7 @@ sh_pr_n_sets (void)
/* This Function returns nonzero if the DFA based scheduler interface
/* This Function returns nonzero if the DFA based scheduler interface
is to be used. At present this is supported for the SH4 only. */
is to be used. At present this is supported for the SH4 only. */
static
int
static
int
sh_use_dfa_interface
(
void
)
sh_use_dfa_interface
(
void
)
{
{
if
(
TARGET_HARD_SH4
)
if
(
TARGET_HARD_SH4
)
return
1
;
return
1
;
...
@@ -8349,7 +8350,7 @@ sh_use_dfa_interface(void)
...
@@ -8349,7 +8350,7 @@ sh_use_dfa_interface(void)
/* This function returns "2" to indicate dual issue for the SH4
/* This function returns "2" to indicate dual issue for the SH4
processor. To be used by the DFA pipeline description. */
processor. To be used by the DFA pipeline description. */
static
int
static
int
sh_issue_rate
(
void
)
sh_issue_rate
(
void
)
{
{
if
(
TARGET_SUPERSCALAR
)
if
(
TARGET_SUPERSCALAR
)
return
2
;
return
2
;
...
@@ -8467,12 +8468,15 @@ swap_reorder (rtx *a, int n)
...
@@ -8467,12 +8468,15 @@ swap_reorder (rtx *a, int n)
a
[
i
+
1
]
=
insn
;
a
[
i
+
1
]
=
insn
;
}
}
#define SCHED_REORDER(READY, N_READY) \
#define SCHED_REORDER(READY, N_READY) \
do { if ((N_READY) == 2) \
do \
swap_reorder (READY, N_READY); \
{ \
else if ((N_READY) > 2) \
if ((N_READY) == 2) \
qsort (READY, N_READY, sizeof (rtx), rank_for_reorder); } \
swap_reorder (READY, N_READY); \
while (0)
else if ((N_READY) > 2) \
qsort (READY, N_READY, sizeof (rtx), rank_for_reorder); \
} \
while (0)
/* Sort the ready list READY by ascending priority, using the SCHED_REORDER
/* Sort the ready list READY by ascending priority, using the SCHED_REORDER
macro. */
macro. */
...
@@ -8631,22 +8635,22 @@ sh_dfa_new_cycle (FILE *sched_dump ATTRIBUTE_UNUSED,
...
@@ -8631,22 +8635,22 @@ sh_dfa_new_cycle (FILE *sched_dump ATTRIBUTE_UNUSED,
int
*
sort_p
)
int
*
sort_p
)
{
{
if
(
reload_completed
)
if
(
reload_completed
)
return
0
;
return
0
;
if
(
skip_cycles
)
if
(
skip_cycles
)
{
if
((
clock_var
-
last_clock_var
)
<
MAX_SKIPS
)
{
{
*
sort_p
=
0
;
if
((
clock_var
-
last_clock_var
)
<
MAX_SKIPS
)
return
1
;
{
}
*
sort_p
=
0
;
/* If this is the last cycle we are skipping, allow reordering of R. */
return
1
;
if
((
clock_var
-
last_clock_var
)
==
MAX_SKIPS
)
}
{
/* If this is the last cycle we are skipping, allow reordering of R. */
*
sort_p
=
1
;
if
((
clock_var
-
last_clock_var
)
==
MAX_SKIPS
)
return
1
;
{
*
sort_p
=
1
;
return
1
;
}
}
}
}
skip_cycles
=
0
;
skip_cycles
=
0
;
...
@@ -9195,16 +9199,16 @@ sh_cannot_change_mode_class (enum machine_mode from, enum machine_mode to,
...
@@ -9195,16 +9199,16 @@ sh_cannot_change_mode_class (enum machine_mode from, enum machine_mode to,
{
{
if
(
GET_MODE_SIZE
(
from
)
!=
GET_MODE_SIZE
(
to
))
if
(
GET_MODE_SIZE
(
from
)
!=
GET_MODE_SIZE
(
to
))
{
{
if
(
TARGET_LITTLE_ENDIAN
)
if
(
TARGET_LITTLE_ENDIAN
)
{
{
if
(
GET_MODE_SIZE
(
to
)
<
8
||
GET_MODE_SIZE
(
from
)
<
8
)
if
(
GET_MODE_SIZE
(
to
)
<
8
||
GET_MODE_SIZE
(
from
)
<
8
)
return
reg_classes_intersect_p
(
DF_REGS
,
class
);
return
reg_classes_intersect_p
(
DF_REGS
,
class
);
}
}
else
else
{
{
if
(
GET_MODE_SIZE
(
from
)
<
8
)
if
(
GET_MODE_SIZE
(
from
)
<
8
)
return
reg_classes_intersect_p
(
DF_HI_REGS
,
class
);
return
reg_classes_intersect_p
(
DF_HI_REGS
,
class
);
}
}
}
}
return
0
;
return
0
;
}
}
...
@@ -9252,15 +9256,15 @@ sh_register_move_cost (enum machine_mode mode,
...
@@ -9252,15 +9256,15 @@ sh_register_move_cost (enum machine_mode mode,
return
4
;
return
4
;
if
((
REGCLASS_HAS_FP_REG
(
dstclass
)
&&
srcclass
==
MAC_REGS
)
if
((
REGCLASS_HAS_FP_REG
(
dstclass
)
&&
srcclass
==
MAC_REGS
)
||
(
dstclass
==
MAC_REGS
&&
REGCLASS_HAS_FP_REG
(
srcclass
)))
||
(
dstclass
==
MAC_REGS
&&
REGCLASS_HAS_FP_REG
(
srcclass
)))
return
9
;
return
9
;
if
((
REGCLASS_HAS_FP_REG
(
dstclass
)
if
((
REGCLASS_HAS_FP_REG
(
dstclass
)
&&
REGCLASS_HAS_GENERAL_REG
(
srcclass
))
&&
REGCLASS_HAS_GENERAL_REG
(
srcclass
))
||
(
REGCLASS_HAS_GENERAL_REG
(
dstclass
)
||
(
REGCLASS_HAS_GENERAL_REG
(
dstclass
)
&&
REGCLASS_HAS_FP_REG
(
srcclass
)))
&&
REGCLASS_HAS_FP_REG
(
srcclass
)))
return
((
TARGET_SHMEDIA
?
4
:
TARGET_FMOVD
?
8
:
12
)
return
((
TARGET_SHMEDIA
?
4
:
TARGET_FMOVD
?
8
:
12
)
*
((
GET_MODE_SIZE
(
mode
)
+
7
)
/
8U
));
*
((
GET_MODE_SIZE
(
mode
)
+
7
)
/
8U
));
if
((
dstclass
==
FPUL_REGS
if
((
dstclass
==
FPUL_REGS
&&
REGCLASS_HAS_GENERAL_REG
(
srcclass
))
&&
REGCLASS_HAS_GENERAL_REG
(
srcclass
))
...
@@ -9430,7 +9434,7 @@ sh_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
...
@@ -9430,7 +9434,7 @@ sh_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
abort
();
/* FIXME */
abort
();
/* FIXME */
emit_load_ptr
(
scratch0
,
offset_addr
);
emit_load_ptr
(
scratch0
,
offset_addr
);
if
(
Pmode
!=
ptr_mode
)
if
(
Pmode
!=
ptr_mode
)
scratch0
=
gen_rtx_TRUNCATE
(
ptr_mode
,
scratch0
);
scratch0
=
gen_rtx_TRUNCATE
(
ptr_mode
,
scratch0
);
emit_insn
(
gen_add2_insn
(
this
,
scratch0
));
emit_insn
(
gen_add2_insn
(
this
,
scratch0
));
}
}
...
@@ -9469,7 +9473,7 @@ sh_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
...
@@ -9469,7 +9473,7 @@ sh_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
sh_reorg
();
sh_reorg
();
if
(
optimize
>
0
&&
flag_delayed_branch
)
if
(
optimize
>
0
&&
flag_delayed_branch
)
dbr_schedule
(
insns
,
dump_file
);
dbr_schedule
(
insns
,
dump_file
);
shorten_branches
(
insns
);
shorten_branches
(
insns
);
final_start_function
(
insns
,
file
,
1
);
final_start_function
(
insns
,
file
,
1
);
final
(
insns
,
file
,
1
,
0
);
final
(
insns
,
file
,
1
,
0
);
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment