Skip to content
Projects
Groups
Snippets
Help
This project
Loading...
Sign in / Register
Toggle navigation
R
riscv-gcc-1
Overview
Overview
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
lvzhengyang
riscv-gcc-1
Commits
f5d8c9f4
Commit
f5d8c9f4
authored
Dec 13, 1999
by
Bernd Schmidt
Committed by
Bernd Schmidt
Dec 13, 1999
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Simplify reload register allocation
From-SVN: r30890
parent
2cf4028a
Show whitespace changes
Inline
Side-by-side
Showing
6 changed files
with
391 additions
and
1105 deletions
+391
-1105
gcc/ChangeLog
+43
-0
gcc/hard-reg-set.h
+5
-1
gcc/regclass.c
+8
-0
gcc/reload.c
+0
-64
gcc/reload.h
+3
-19
gcc/reload1.c
+332
-1021
No files found.
gcc/ChangeLog
View file @
f5d8c9f4
1999
-
12
-
10
Bernd
Schmidt
<
bernds
@cygnus
.
co
.
uk
>
*
hard
-
reg
-
set
.
h
(
inv_reg_alloc_order
)
:
Declare
if
REG_ALLOC_ORDER
is
defined
.
*
regclass
.
c
(
inv_reg_alloc_order
)
:
New
array
.
(
regclass_init
)
:
If
REG_ALLOC_ORDER
is
defined
,
initialize
it
.
*
reload
.
h
(
struct
insn_chain
)
:
Delete
fields
group_size
,
group_mode
,
counted_for_groups
,
counted_for_nongroups
.
Add
fields
rld
and
n_reloads
.
*
reload
.
c
(
push_secondary_reload
)
:
Don
'
t
set
nongroup
field
of
new
reloads
.
(
push_reload
)
:
Likewise
.
(
find_reloads
)
:
Delete
code
to
compute
nongroup
fields
.
*
reload1
.
c
(
reload_insn_firstobj
)
:
New
static
variable
.
(
pseudos_counted
,
spilled_pseudos
)
:
Now
of
type
regset_head
.
All
users
changed
.
(
calculate_needs
,
find_tworeg_group
,
find_group
,
possible_group_p
,
count_possible_groups
,
modes_equiv_for_class_p
,
new_spill_reg
,
dump_needs
,
maybe_mark_pseudo_spilled
,
hard_reg_use_compare
)
:
Delete
functions
.
(
count_pseudo
,
select_reload_regs
,
copy_reloads
,
find_reg
)
:
New
functions
.
(
struct
hard_reg_n_uses
)
:
Deleted
.
(
potential_reload_regs
)
:
Deleted
.
(
init_reload
)
:
Initialize
spilled_pseudos
and
pseudos_counted
.
(
reload
)
:
Don
'
t
try
to
allocate
reload
registers
if
we
already
know
we
have
to
make
another
pass
.
Call
select_reload_regs
.
Free
memory
starting
with
reload_firstobj
when
starting
another
pass
.
Don
'
t
allocate
spilled_pseudos
.
(
calculate_needs_all_insns
)
:
Call
copy_reloads
for
an
insn
that
needs
reloads
;
don
'
t
call
calculate_needs
.
(
spill_cost
)
:
New
static
array
.
(
used_spill_regs_local
)
:
New
static
variable
.
(
order_regs_for_reload
)
:
Rewrite
to
lose
hard_reg_n_uses
and
the
code
to
compute
potential_reload_regs
.
(
find_reload_regs
)
:
Completely
rewritten
to
use
find_reg
.
(
allocate_reload_reg
)
:
Don
'
t
test
counted_for_groups
or
counted_for_nongroups
.
Lose
NOERROR
arg
and
code
to
give
an
error
;
all
cllers
changed
.
(
choose_reload_regs
)
:
Add
fallback
code
that
uses
the
existing
register
allocation
from
find_reload_regs
.
Mon
Dec
13
00
:
54
:
14
1999
Philippe
De
Muyter
<
phdm
@macqel
.
be
>
*
flow
.
c
(
create_edge_list
)
:
Cast
xmalloc
return
value
.
...
...
gcc/hard-reg-set.h
View file @
f5d8c9f4
...
...
@@ -429,10 +429,14 @@ extern HARD_REG_SET call_fixed_reg_set;
extern
char
global_regs
[
FIRST_PSEUDO_REGISTER
];
#ifdef REG_ALLOC_ORDER
/* Table of register numbers in the order in which to try to use them. */
#ifdef REG_ALLOC_ORDER
/* Avoid undef symbol in certain broken linkers. */
extern
int
reg_alloc_order
[
FIRST_PSEUDO_REGISTER
];
/* The inverse of reg_alloc_order. */
extern
int
inv_reg_alloc_order
[
FIRST_PSEUDO_REGISTER
];
#endif
/* For each reg class, a HARD_REG_SET saying which registers are in it. */
...
...
gcc/regclass.c
View file @
f5d8c9f4
...
...
@@ -118,6 +118,9 @@ char global_regs[FIRST_PSEUDO_REGISTER];
/* Table of register numbers in the order in which to try to use them. */
#ifdef REG_ALLOC_ORDER
int
reg_alloc_order
[
FIRST_PSEUDO_REGISTER
]
=
REG_ALLOC_ORDER
;
/* The inverse of reg_alloc_order. */
int
inv_reg_alloc_order
[
FIRST_PSEUDO_REGISTER
];
#endif
/* For each reg class, a HARD_REG_SET saying which registers are in it. */
...
...
@@ -251,6 +254,11 @@ init_reg_sets ()
/* Do any additional initialization regsets may need */
INIT_ONCE_REG_SET
();
#ifdef REG_ALLOC_ORDER
for
(
i
=
0
;
i
<
FIRST_PSEUDO_REGISTER
;
i
++
)
inv_reg_alloc_order
[
reg_alloc_order
[
i
]]
=
i
;
#endif
}
/* After switches have been processed, which perhaps alter
...
...
gcc/reload.c
View file @
f5d8c9f4
...
...
@@ -465,7 +465,6 @@ push_secondary_reload (in_p, x, opnum, optional, reload_class, reload_mode,
rld
[
t_reload
].
outmode
=
!
in_p
?
t_mode
:
VOIDmode
;
rld
[
t_reload
].
reg_rtx
=
0
;
rld
[
t_reload
].
optional
=
optional
;
rld
[
t_reload
].
nongroup
=
0
;
rld
[
t_reload
].
inc
=
0
;
/* Maybe we could combine these, but it seems too tricky. */
rld
[
t_reload
].
nocombine
=
1
;
...
...
@@ -535,7 +534,6 @@ push_secondary_reload (in_p, x, opnum, optional, reload_class, reload_mode,
rld
[
s_reload
].
outmode
=
!
in_p
?
mode
:
VOIDmode
;
rld
[
s_reload
].
reg_rtx
=
0
;
rld
[
s_reload
].
optional
=
optional
;
rld
[
s_reload
].
nongroup
=
0
;
rld
[
s_reload
].
inc
=
0
;
/* Maybe we could combine these, but it seems too tricky. */
rld
[
s_reload
].
nocombine
=
1
;
...
...
@@ -1246,7 +1244,6 @@ push_reload (in, out, inloc, outloc, class,
rld
[
i
].
outmode
=
outmode
;
rld
[
i
].
reg_rtx
=
0
;
rld
[
i
].
optional
=
optional
;
rld
[
i
].
nongroup
=
0
;
rld
[
i
].
inc
=
0
;
rld
[
i
].
nocombine
=
0
;
rld
[
i
].
in_reg
=
inloc
?
*
inloc
:
0
;
...
...
@@ -4119,67 +4116,6 @@ find_reloads (insn, replace, ind_levels, live_known, reload_reg_p)
abort
();
#endif
/* Set which reloads must use registers not used in any group. Start
with those that conflict with a group and then include ones that
conflict with ones that are already known to conflict with a group. */
changed
=
0
;
for
(
i
=
0
;
i
<
n_reloads
;
i
++
)
{
enum
machine_mode
mode
=
rld
[
i
].
inmode
;
enum
reg_class
class
=
rld
[
i
].
class
;
int
size
;
if
(
GET_MODE_SIZE
(
rld
[
i
].
outmode
)
>
GET_MODE_SIZE
(
mode
))
mode
=
rld
[
i
].
outmode
;
size
=
CLASS_MAX_NREGS
(
class
,
mode
);
if
(
size
==
1
)
for
(
j
=
0
;
j
<
n_reloads
;
j
++
)
if
((
CLASS_MAX_NREGS
(
rld
[
j
].
class
,
(
GET_MODE_SIZE
(
rld
[
j
].
outmode
)
>
GET_MODE_SIZE
(
rld
[
j
].
inmode
))
?
rld
[
j
].
outmode
:
rld
[
j
].
inmode
)
>
1
)
&&
!
rld
[
j
].
optional
&&
(
rld
[
j
].
in
!=
0
||
rld
[
j
].
out
!=
0
||
rld
[
j
].
secondary_p
)
&&
reloads_conflict
(
i
,
j
)
&&
reg_classes_intersect_p
(
class
,
rld
[
j
].
class
))
{
rld
[
i
].
nongroup
=
1
;
changed
=
1
;
break
;
}
}
while
(
changed
)
{
changed
=
0
;
for
(
i
=
0
;
i
<
n_reloads
;
i
++
)
{
enum
machine_mode
mode
=
rld
[
i
].
inmode
;
enum
reg_class
class
=
rld
[
i
].
class
;
int
size
;
if
(
GET_MODE_SIZE
(
rld
[
i
].
outmode
)
>
GET_MODE_SIZE
(
mode
))
mode
=
rld
[
i
].
outmode
;
size
=
CLASS_MAX_NREGS
(
class
,
mode
);
if
(
!
rld
[
i
].
nongroup
&&
size
==
1
)
for
(
j
=
0
;
j
<
n_reloads
;
j
++
)
if
(
rld
[
j
].
nongroup
&&
reloads_conflict
(
i
,
j
)
&&
reg_classes_intersect_p
(
class
,
rld
[
j
].
class
))
{
rld
[
i
].
nongroup
=
1
;
changed
=
1
;
break
;
}
}
}
/* Compute reload_mode and reload_nregs. */
for
(
i
=
0
;
i
<
n_reloads
;
i
++
)
{
...
...
gcc/reload.h
View file @
f5d8c9f4
...
...
@@ -231,25 +231,9 @@ struct insn_chain
regset
live_before
;
regset
live_after
;
/* For each class, size of group of consecutive regs
that is needed for the reloads of this class. */
char
group_size
[
N_REG_CLASSES
];
/* For each class, the machine mode which requires consecutive
groups of regs of that class.
If two different modes ever require groups of one class,
they must be the same size and equally restrictive for that class,
otherwise we can't handle the complexity. */
enum
machine_mode
group_mode
[
N_REG_CLASSES
];
/* Indicates if a register was counted against the need for
groups. 0 means it can count against max_nongroup instead. */
HARD_REG_SET
counted_for_groups
;
/* Indicates if a register was counted against the need for
non-groups. 0 means it can become part of a new group.
During choose_reload_regs, 1 here means don't use this reg
as part of a group, even if it seems to be otherwise ok. */
HARD_REG_SET
counted_for_nongroups
;
/* Copies of the global variables computed by find_reloads. */
struct
reload
*
rld
;
int
n_reloads
;
/* Indicates which registers have already been used for spills. */
HARD_REG_SET
used_spill_regs
;
...
...
gcc/reload1.c
View file @
f5d8c9f4
...
...
@@ -215,13 +215,6 @@ static HARD_REG_SET used_spill_regs;
a round-robin fashion. */
static
int
last_spill_reg
;
/* Describes order of preference for putting regs into spill_regs.
Contains the numbers of all the hard regs, in order most preferred first.
This order is different for each function.
It is set up by order_regs_for_reload.
Empty elements at the end contain -1. */
static
short
potential_reload_regs
[
FIRST_PSEUDO_REGISTER
];
/* Nonzero if indirect addressing is supported on the machine; this means
that spilling (REG n) does not require reloading it into a register in
order to do (MEM (REG n)) or (MEM (PLUS (REG n) (CONST_INT c))). The
...
...
@@ -245,7 +238,11 @@ static rtx spill_stack_slot[FIRST_PSEUDO_REGISTER];
static
int
spill_stack_slot_width
[
FIRST_PSEUDO_REGISTER
];
/* Record which pseudos needed to be spilled. */
static
regset
spilled_pseudos
;
static
regset_head
spilled_pseudos
;
/* Used for communication between order_regs_for_reload and count_pseudo.
Used to avoid counting one pseudo twice. */
static
regset_head
pseudos_counted
;
/* First uid used by insns created by reload in this function.
Used in find_equiv_reg. */
...
...
@@ -275,9 +272,13 @@ struct obstack reload_obstack;
char
*
reload_startobj
;
/* The point after all insn_chain structures. Used to quickly deallocate
memory
used while processing one insn
. */
memory
allocated in copy_reloads during calculate_needs_all_insns
. */
char
*
reload_firstobj
;
/* This points before all local rtl generated by register elimination.
Used to quickly free all memory after processing one insn. */
static
char
*
reload_insn_firstobj
;
#define obstack_chunk_alloc xmalloc
#define obstack_chunk_free free
...
...
@@ -365,32 +366,17 @@ static int (*offsets_at)[NUM_ELIMINABLE_REGS];
static
int
num_labels
;
struct
hard_reg_n_uses
{
int
regno
;
unsigned
int
uses
;
};
static
void
maybe_fix_stack_asms
PROTO
((
void
));
static
void
copy_reloads
PROTO
((
struct
insn_chain
*
));
static
void
calculate_needs_all_insns
PROTO
((
int
));
static
void
calculate_needs
PROTO
((
struct
insn_chain
*
));
static
void
find_reload_regs
PROTO
((
struct
insn_chain
*
chain
,
static
int
find_reg
PROTO
((
struct
insn_chain
*
,
int
,
FILE
*
));
static
void
find_tworeg_group
PROTO
((
struct
insn_chain
*
,
int
,
FILE
*
));
static
void
find_group
PROTO
((
struct
insn_chain
*
,
int
,
FILE
*
));
static
int
possible_group_p
PROTO
((
struct
insn_chain
*
,
int
));
static
void
count_possible_groups
PROTO
((
struct
insn_chain
*
,
int
));
static
int
modes_equiv_for_class_p
PROTO
((
enum
machine_mode
,
enum
machine_mode
,
enum
reg_class
));
static
void
find_reload_regs
PROTO
((
struct
insn_chain
*
,
FILE
*
));
static
void
select_reload_regs
PROTO
((
FILE
*
));
static
void
delete_caller_save_insns
PROTO
((
void
));
static
void
spill_failure
PROTO
((
rtx
));
static
void
new_spill_reg
PROTO
((
struct
insn_chain
*
,
int
,
int
,
int
,
FILE
*
));
static
void
maybe_mark_pseudo_spilled
PROTO
((
int
));
static
void
count_spilled_pseudo
PROTO
((
int
,
int
,
int
));
static
void
delete_dead_insn
PROTO
((
rtx
));
static
void
alter_reg
PROTO
((
int
,
int
));
static
void
set_label_offsets
PROTO
((
rtx
,
rtx
,
int
));
...
...
@@ -409,8 +395,7 @@ static void spill_hard_reg PROTO((int, FILE *, int));
static
int
finish_spills
PROTO
((
int
,
FILE
*
));
static
void
ior_hard_reg_set
PROTO
((
HARD_REG_SET
*
,
HARD_REG_SET
*
));
static
void
scan_paradoxical_subregs
PROTO
((
rtx
));
static
int
hard_reg_use_compare
PROTO
((
const
PTR
,
const
PTR
));
static
void
count_pseudo
PROTO
((
struct
hard_reg_n_uses
*
,
int
));
static
void
count_pseudo
PROTO
((
int
));
static
void
order_regs_for_reload
PROTO
((
struct
insn_chain
*
));
static
void
reload_as_needed
PROTO
((
int
));
static
void
forget_old_reloads_1
PROTO
((
rtx
,
rtx
,
void
*
));
...
...
@@ -420,10 +405,12 @@ static void mark_reload_reg_in_use PROTO((int, int, enum reload_type,
static
void
clear_reload_reg_in_use
PROTO
((
int
,
int
,
enum
reload_type
,
enum
machine_mode
));
static
int
reload_reg_free_p
PROTO
((
int
,
int
,
enum
reload_type
));
static
int
reload_reg_free_for_value_p
PROTO
((
int
,
int
,
enum
reload_type
,
rtx
,
rtx
,
int
,
int
));
static
int
reload_reg_free_for_value_p
PROTO
((
int
,
int
,
enum
reload_type
,
rtx
,
rtx
,
int
,
int
));
static
int
reload_reg_reaches_end_p
PROTO
((
int
,
int
,
enum
reload_type
));
static
int
allocate_reload_reg
PROTO
((
struct
insn_chain
*
,
int
,
int
,
int
));
static
int
allocate_reload_reg
PROTO
((
struct
insn_chain
*
,
int
,
int
));
static
void
failed_reload
PROTO
((
rtx
,
int
));
static
int
set_reload_reg
PROTO
((
int
,
int
));
static
void
choose_reload_regs_init
PROTO
((
struct
insn_chain
*
,
rtx
*
));
static
void
choose_reload_regs
PROTO
((
struct
insn_chain
*
));
static
void
merge_assigned_reloads
PROTO
((
rtx
));
...
...
@@ -509,6 +496,9 @@ init_reload ()
/* Initialize obstack for our rtl allocation. */
gcc_obstack_init
(
&
reload_obstack
);
reload_startobj
=
(
char
*
)
obstack_alloc
(
&
reload_obstack
,
0
);
INIT_REG_SET
(
&
spilled_pseudos
);
INIT_REG_SET
(
&
pseudos_counted
);
}
/* List of insn chains that are currently unused. */
...
...
@@ -817,8 +807,6 @@ reload (first, global, dumpfile)
/* Initialize to -1, which means take the first spill register. */
last_spill_reg
=
-
1
;
spilled_pseudos
=
ALLOCA_REG_SET
();
/* Spill any hard regs that we know we can't eliminate. */
CLEAR_HARD_REG_SET
(
used_spill_regs
);
for
(
ep
=
reg_eliminate
;
ep
<
&
reg_eliminate
[
NUM_ELIMINABLE_REGS
];
ep
++
)
...
...
@@ -842,7 +830,6 @@ reload (first, global, dumpfile)
{
int
something_changed
;
int
did_spill
;
struct
insn_chain
*
chain
;
HOST_WIDE_INT
starting_frame_size
;
...
...
@@ -928,7 +915,7 @@ reload (first, global, dumpfile)
calculate_needs_all_insns
(
global
);
CLEAR_REG_SET
(
spilled_pseudos
);
CLEAR_REG_SET
(
&
spilled_pseudos
);
did_spill
=
0
;
something_changed
=
0
;
...
...
@@ -961,23 +948,25 @@ reload (first, global, dumpfile)
}
}
CLEAR_HARD_REG_SET
(
used_spill_regs
);
/* Try to satisfy the needs for each ins
n. */
for
(
chain
=
insns_need_reload
;
chain
!=
0
;
chain
=
chain
->
next_need_reload
)
find_reload_regs
(
chain
,
dumpfile
);
/* No point in trying to select reload registers if we know we're
going to re-run everything agai
n. */
if
(
!
something_changed
)
{
select_reload_regs
(
dumpfile
);
if
(
failure
)
goto
failed
;
if
(
insns_need_reload
!=
0
||
did_spill
)
something_changed
|=
finish_spills
(
global
,
dumpfile
);
}
if
(
!
something_changed
)
break
;
if
(
caller_save_needed
)
delete_caller_save_insns
();
obstack_free
(
&
reload_obstack
,
reload_firstobj
);
}
/* If global-alloc was run, notify it of any register eliminations we have
...
...
@@ -1049,6 +1038,7 @@ reload (first, global, dumpfile)
and we decide not to abort about it. */
failed
:
CLEAR_REG_SET
(
&
spilled_pseudos
);
reload_in_progress
=
0
;
/* Now eliminate all pseudo regs by modifying them into
...
...
@@ -1194,8 +1184,6 @@ reload (first, global, dumpfile)
free
(
pseudo_previous_regs
);
free
(
pseudo_forbidden_regs
);
FREE_REG_SET
(
spilled_pseudos
);
CLEAR_HARD_REG_SET
(
used_spill_regs
);
for
(
i
=
0
;
i
<
n_spills
;
i
++
)
SET_HARD_REG_BIT
(
used_spill_regs
,
spill_regs
[
i
]);
...
...
@@ -1320,6 +1308,19 @@ maybe_fix_stack_asms ()
#endif
}
/* Copy the global variables n_reloads and rld into the corresponding elts
of CHAIN. */
static
void
copy_reloads
(
chain
)
struct
insn_chain
*
chain
;
{
chain
->
n_reloads
=
n_reloads
;
chain
->
rld
=
(
struct
reload
*
)
obstack_alloc
(
&
reload_obstack
,
n_reloads
*
sizeof
(
struct
reload
));
memcpy
(
chain
->
rld
,
rld
,
n_reloads
*
sizeof
(
struct
reload
));
reload_insn_firstobj
=
(
char
*
)
obstack_alloc
(
&
reload_obstack
,
0
);
}
/* Walk the chain of insns, and determine for each whether it needs reloads
and/or eliminations. Build the corresponding insns_need_reload list, and
...
...
@@ -1333,11 +1334,13 @@ calculate_needs_all_insns (global)
something_needs_elimination
=
0
;
reload_insn_firstobj
=
(
char
*
)
obstack_alloc
(
&
reload_obstack
,
0
);
for
(
chain
=
reload_insn_chain
;
chain
!=
0
;
chain
=
chain
->
next
)
{
rtx
insn
=
chain
->
insn
;
/* Clear out the shortcuts, in case they were set last time through. */
/* Clear out the shortcuts. */
chain
->
n_reloads
=
0
;
chain
->
need_elim
=
0
;
chain
->
need_reload
=
0
;
chain
->
need_operand_change
=
0
;
...
...
@@ -1407,7 +1410,7 @@ calculate_needs_all_insns (global)
/* Discard any register replacements done. */
if
(
did_elimination
)
{
obstack_free
(
&
reload_obstack
,
reload_firstobj
);
obstack_free
(
&
reload_obstack
,
reload_
insn_
firstobj
);
PATTERN
(
insn
)
=
old_body
;
INSN_CODE
(
insn
)
=
old_code
;
REG_NOTES
(
insn
)
=
old_notes
;
...
...
@@ -1418,611 +1421,332 @@ calculate_needs_all_insns (global)
if
(
n_reloads
!=
0
)
{
copy_reloads
(
chain
);
*
pprev_reload
=
chain
;
pprev_reload
=
&
chain
->
next_need_reload
;
calculate_needs
(
chain
);
}
}
}
*
pprev_reload
=
0
;
}
/* Compute the most additional registers needed by one instruction,
given by CHAIN. Collect information separately for each class of regs.
To compute the number of reload registers of each class needed for an
insn, we must simulate what choose_reload_regs can do. We do this by
splitting an insn into an "input" and an "output" part. RELOAD_OTHER
reloads are used in both. The input part uses those reloads,
RELOAD_FOR_INPUT reloads, which must be live over the entire input section
of reloads, and the maximum of all the RELOAD_FOR_INPUT_ADDRESS and
RELOAD_FOR_OPERAND_ADDRESS reloads, which conflict with the inputs.
The registers needed for output are RELOAD_OTHER and RELOAD_FOR_OUTPUT,
which are live for the entire output portion, and the maximum of all the
RELOAD_FOR_OUTPUT_ADDRESS reloads for each operand.
The total number of registers needed is the maximum of the
inputs and outputs. */
/* Comparison function for qsort to decide which of two reloads
should be handled first. *P1 and *P2 are the reload numbers. */
static
void
calculate_needs
(
chain
)
struct
insn_chain
*
chain
;
static
int
reload_reg_class_lower
(
r1p
,
r2p
)
const
PTR
r1p
;
const
PTR
r2p
;
{
int
i
;
/* Each `struct needs' corresponds to one RELOAD_... type. */
struct
{
struct
needs
other
;
struct
needs
input
;
struct
needs
output
;
struct
needs
insn
;
struct
needs
other_addr
;
struct
needs
op_addr
;
struct
needs
op_addr_reload
;
struct
needs
in_addr
[
MAX_RECOG_OPERANDS
];
struct
needs
in_addr_addr
[
MAX_RECOG_OPERANDS
];
struct
needs
out_addr
[
MAX_RECOG_OPERANDS
];
struct
needs
out_addr_addr
[
MAX_RECOG_OPERANDS
];
}
insn_needs
;
bzero
((
char
*
)
chain
->
group_size
,
sizeof
chain
->
group_size
);
for
(
i
=
0
;
i
<
N_REG_CLASSES
;
i
++
)
chain
->
group_mode
[
i
]
=
VOIDmode
;
bzero
((
char
*
)
&
insn_needs
,
sizeof
insn_needs
);
/* Count each reload once in every class
containing the reload's own class. */
for
(
i
=
0
;
i
<
n_reloads
;
i
++
)
{
register
enum
reg_class
*
p
;
enum
reg_class
class
=
rld
[
i
].
class
;
int
size
;
enum
machine_mode
mode
;
struct
needs
*
this_needs
;
/* Don't count the dummy reloads, for which one of the
regs mentioned in the insn can be used for reloading.
Don't count optional reloads.
Don't count reloads that got combined with others. */
if
(
rld
[
i
].
reg_rtx
!=
0
||
rld
[
i
].
optional
!=
0
||
(
rld
[
i
].
out
==
0
&&
rld
[
i
].
in
==
0
&&
!
rld
[
i
].
secondary_p
))
continue
;
register
int
r1
=
*
(
short
*
)
r1p
,
r2
=
*
(
short
*
)
r2p
;
register
int
t
;
mode
=
rld
[
i
].
mode
;
size
=
rld
[
i
].
nregs
;
/* Consider required reloads before optional ones. */
t
=
rld
[
r1
].
optional
-
rld
[
r2
].
optional
;
if
(
t
!=
0
)
return
t
;
/* Decide which time-of-use to count this reload for. */
switch
(
rld
[
i
].
when_needed
)
{
case
RELOAD_OTHER
:
this_needs
=
&
insn_needs
.
other
;
break
;
case
RELOAD_FOR_INPUT
:
this_needs
=
&
insn_needs
.
input
;
break
;
case
RELOAD_FOR_OUTPUT
:
this_needs
=
&
insn_needs
.
output
;
break
;
case
RELOAD_FOR_INSN
:
this_needs
=
&
insn_needs
.
insn
;
break
;
case
RELOAD_FOR_OTHER_ADDRESS
:
this_needs
=
&
insn_needs
.
other_addr
;
break
;
case
RELOAD_FOR_INPUT_ADDRESS
:
this_needs
=
&
insn_needs
.
in_addr
[
rld
[
i
].
opnum
];
break
;
case
RELOAD_FOR_INPADDR_ADDRESS
:
this_needs
=
&
insn_needs
.
in_addr_addr
[
rld
[
i
].
opnum
];
break
;
case
RELOAD_FOR_OUTPUT_ADDRESS
:
this_needs
=
&
insn_needs
.
out_addr
[
rld
[
i
].
opnum
];
break
;
case
RELOAD_FOR_OUTADDR_ADDRESS
:
this_needs
=
&
insn_needs
.
out_addr_addr
[
rld
[
i
].
opnum
];
break
;
case
RELOAD_FOR_OPERAND_ADDRESS
:
this_needs
=
&
insn_needs
.
op_addr
;
break
;
case
RELOAD_FOR_OPADDR_ADDR
:
this_needs
=
&
insn_needs
.
op_addr_reload
;
break
;
default
:
abort
();
}
/* Count all solitary classes before non-solitary ones. */
t
=
((
reg_class_size
[(
int
)
rld
[
r2
].
class
]
==
1
)
-
(
reg_class_size
[(
int
)
rld
[
r1
].
class
]
==
1
));
if
(
t
!=
0
)
return
t
;
if
(
size
>
1
)
{
enum
machine_mode
other_mode
,
allocate_mode
;
/* Aside from solitaires, consider all multi-reg groups first. */
t
=
rld
[
r2
].
nregs
-
rld
[
r1
].
nregs
;
if
(
t
!=
0
)
return
t
;
/* Count number of groups needed separately from
number of individual regs needed. */
this_needs
->
groups
[(
int
)
class
]
++
;
p
=
reg_class_superclasses
[(
int
)
class
];
while
(
*
p
!=
LIM_REG_CLASSES
)
this_needs
->
groups
[(
int
)
*
p
++
]
++
;
/* Consider reloads in order of increasing reg-class number. */
t
=
(
int
)
rld
[
r1
].
class
-
(
int
)
rld
[
r2
].
class
;
if
(
t
!=
0
)
return
t
;
/* Record size and mode of a group of this class. */
/* If more than one size group is needed,
make all groups the largest needed size. */
if
(
chain
->
group_size
[(
int
)
class
]
<
size
)
{
other_mode
=
chain
->
group_mode
[(
int
)
class
];
allocate_mode
=
mode
;
/* If reloads are equally urgent, sort by reload number,
so that the results of qsort leave nothing to chance. */
return
r1
-
r2
;
}
chain
->
group_size
[(
int
)
class
]
=
size
;
chain
->
group_mode
[(
int
)
class
]
=
mode
;
}
else
{
other_mode
=
mode
;
allocate_mode
=
chain
->
group_mode
[(
int
)
class
];
}
/* The cost of spilling each hard reg. */
static
int
spill_cost
[
FIRST_PSEUDO_REGISTER
];
/* Crash if two dissimilar machine modes both need
groups of consecutive regs of the same class. */
/* When spilling multiple hard registers, we use SPILL_COST for the first
spilled hard reg and SPILL_ADD_COST for subsequent regs. SPILL_ADD_COST
only the first hard reg for a multi-reg pseudo. */
static
int
spill_add_cost
[
FIRST_PSEUDO_REGISTER
];
if
(
other_mode
!=
VOIDmode
&&
other_mode
!=
allocate_mode
&&
!
modes_equiv_for_class_p
(
allocate_mode
,
other_mode
,
class
))
fatal_insn
(
"Two dissimilar machine modes both need groups of consecutive regs of the same class"
,
chain
->
insn
);
}
else
if
(
size
==
1
)
{
this_needs
->
regs
[(
unsigned
char
)
rld
[
i
].
nongroup
][(
int
)
class
]
+=
1
;
p
=
reg_class_superclasses
[(
int
)
class
];
while
(
*
p
!=
LIM_REG_CLASSES
)
this_needs
->
regs
[(
unsigned
char
)
rld
[
i
].
nongroup
][(
int
)
*
p
++
]
+=
1
;
}
else
abort
();
}
/* Update the spill cost arrays, considering that pseudo REG is live. */
static
void
count_pseudo
(
reg
)
int
reg
;
{
int
n_refs
=
REG_N_REFS
(
reg
);
int
r
=
reg_renumber
[
reg
];
int
nregs
;
/* All reloads have been counted for this insn;
now merge the various times of use.
This sets insn_needs, etc., to the maximum total number
of registers needed at any point in this insn. */
if
(
REGNO_REG_SET_P
(
&
pseudos_counted
,
reg
)
||
REGNO_REG_SET_P
(
&
spilled_pseudos
,
reg
))
return
;
for
(
i
=
0
;
i
<
N_REG_CLASSES
;
i
++
)
{
int
j
,
in_max
,
out_max
;
SET_REGNO_REG_SET
(
&
pseudos_counted
,
reg
);
/* Compute normal and nongroup needs. */
for
(
j
=
0
;
j
<=
1
;
j
++
)
{
int
k
;
for
(
in_max
=
0
,
out_max
=
0
,
k
=
0
;
k
<
reload_n_operands
;
k
++
)
{
in_max
=
MAX
(
in_max
,
(
insn_needs
.
in_addr
[
k
].
regs
[
j
][
i
]
+
insn_needs
.
in_addr_addr
[
k
].
regs
[
j
][
i
]));
out_max
=
MAX
(
out_max
,
insn_needs
.
out_addr
[
k
].
regs
[
j
][
i
]);
out_max
=
MAX
(
out_max
,
insn_needs
.
out_addr_addr
[
k
].
regs
[
j
][
i
]);
}
if
(
r
<
0
)
abort
();
/* RELOAD_FOR_INSN reloads conflict with inputs, outputs,
and operand addresses but not things used to reload
them. Similarly, RELOAD_FOR_OPERAND_ADDRESS reloads
don't conflict with things needed to reload inputs or
outputs. */
spill_add_cost
[
r
]
+=
n_refs
;
in_max
=
MAX
(
MAX
(
insn_needs
.
op_addr
.
regs
[
j
][
i
],
insn_needs
.
op_addr_reload
.
regs
[
j
][
i
]),
in_max
);
nregs
=
HARD_REGNO_NREGS
(
r
,
PSEUDO_REGNO_MODE
(
reg
));
while
(
nregs
--
>
0
)
spill_cost
[
r
+
nregs
]
+=
n_refs
;
}
out_max
=
MAX
(
out_max
,
insn_needs
.
insn
.
regs
[
j
][
i
]);
/* Calculate the SPILL_COST and SPILL_ADD_COST arrays and determine the
contents of BAD_SPILL_REGS for the insn described by CHAIN. */
static
void
order_regs_for_reload
(
chain
)
struct
insn_chain
*
chain
;
{
register
int
i
,
j
;
insn_needs
.
input
.
regs
[
j
][
i
]
=
MAX
(
insn_needs
.
input
.
regs
[
j
][
i
]
+
insn_needs
.
op_addr
.
regs
[
j
][
i
]
+
insn_needs
.
insn
.
regs
[
j
][
i
],
in_max
+
insn_needs
.
input
.
regs
[
j
][
i
]);
COPY_HARD_REG_SET
(
bad_spill_regs
,
bad_spill_regs_global
);
insn_needs
.
output
.
regs
[
j
][
i
]
+=
out_max
;
insn_needs
.
other
.
regs
[
j
][
i
]
+=
MAX
(
MAX
(
insn_needs
.
input
.
regs
[
j
][
i
],
insn_needs
.
output
.
regs
[
j
][
i
]),
insn_needs
.
other_addr
.
regs
[
j
][
i
]);
memset
(
spill_cost
,
0
,
sizeof
spill_cost
);
memset
(
spill_add_cost
,
0
,
sizeof
spill_add_cost
);
}
/* Count number of uses of each hard reg by pseudo regs allocated to it
and then order them by decreasing use. */
/* Now compute group needs. */
for
(
in_max
=
0
,
out_max
=
0
,
j
=
0
;
j
<
reload_n_operands
;
j
++
)
for
(
i
=
0
;
i
<
FIRST_PSEUDO_REGISTER
;
i
++
)
{
in_max
=
MAX
(
in_max
,
insn_needs
.
in_addr
[
j
].
groups
[
i
]);
in_max
=
MAX
(
in_max
,
insn_needs
.
in_addr_addr
[
j
].
groups
[
i
]);
out_max
=
MAX
(
out_max
,
insn_needs
.
out_addr
[
j
].
groups
[
i
]);
out_max
=
MAX
(
out_max
,
insn_needs
.
out_addr_addr
[
j
].
groups
[
i
]);
}
in_max
=
MAX
(
MAX
(
insn_needs
.
op_addr
.
groups
[
i
],
insn_needs
.
op_addr_reload
.
groups
[
i
]),
in_max
);
out_max
=
MAX
(
out_max
,
insn_needs
.
insn
.
groups
[
i
]);
insn_needs
.
input
.
groups
[
i
]
=
MAX
(
insn_needs
.
input
.
groups
[
i
]
+
insn_needs
.
op_addr
.
groups
[
i
]
+
insn_needs
.
insn
.
groups
[
i
],
in_max
+
insn_needs
.
input
.
groups
[
i
]);
insn_needs
.
output
.
groups
[
i
]
+=
out_max
;
insn_needs
.
other
.
groups
[
i
]
+=
MAX
(
MAX
(
insn_needs
.
input
.
groups
[
i
],
insn_needs
.
output
.
groups
[
i
]),
insn_needs
.
other_addr
.
groups
[
i
]);
/* Test the various reasons why we can't use a register for
spilling in this insn. */
if
(
fixed_regs
[
i
]
||
REGNO_REG_SET_P
(
chain
->
live_before
,
i
)
||
REGNO_REG_SET_P
(
chain
->
live_after
,
i
))
SET_HARD_REG_BIT
(
bad_spill_regs
,
i
);
}
/* Now find out which pseudos are allocated to it, and update
hard_reg_n_uses. */
CLEAR_REG_SET
(
&
pseudos_counted
);
/* Record the needs for later. */
chain
->
need
=
insn_needs
.
other
;
EXECUTE_IF_SET_IN_REG_SET
(
chain
->
live_before
,
FIRST_PSEUDO_REGISTER
,
j
,
{
count_pseudo
(
j
);
});
EXECUTE_IF_SET_IN_REG_SET
(
chain
->
live_after
,
FIRST_PSEUDO_REGISTER
,
j
,
{
count_pseudo
(
j
);
});
CLEAR_REG_SET
(
&
pseudos_counted
);
}
/* Find a group of exactly 2 registers.
First try to fill out the group by spilling a single register which
would allow completion of the group.
Then try to create a new group from a pair of registers, neither of
which are explicitly used.
/* Vector of reload-numbers showing the order in which the reloads should
be processed. */
static
short
reload_order
[
MAX_RELOADS
];
Then try to create a group from any pair of registers. */
/* This is used to keep track of the spill regs used in one insn. */
static
HARD_REG_SET
used_spill_regs_local
;
/* We decided to spill hard register SPILLED, which has a size of
SPILLED_NREGS. Determine how pseudo REG, which is live during the insn,
is affected. We will add it to SPILLED_PSEUDOS if necessary, and we will
update SPILL_COST/SPILL_ADD_COST. */
static
void
find_tworeg_group
(
chain
,
class
,
dumpfile
)
struct
insn_chain
*
chain
;
int
class
;
FILE
*
dumpfile
;
count_spilled_pseudo
(
spilled
,
spilled_nregs
,
reg
)
int
spilled
,
spilled_nregs
,
reg
;
{
int
i
;
/* First, look for a register that will complete a group. */
for
(
i
=
0
;
i
<
FIRST_PSEUDO_REGISTER
;
i
++
)
{
int
j
,
other
;
j
=
potential_reload_regs
[
i
];
if
(
j
>=
0
&&
!
TEST_HARD_REG_BIT
(
bad_spill_regs
,
j
)
&&
((
j
>
0
&&
(
other
=
j
-
1
,
spill_reg_order
[
other
]
>=
0
)
&&
TEST_HARD_REG_BIT
(
reg_class_contents
[
class
],
j
)
&&
TEST_HARD_REG_BIT
(
reg_class_contents
[
class
],
other
)
&&
HARD_REGNO_MODE_OK
(
other
,
chain
->
group_mode
[
class
])
&&
!
TEST_HARD_REG_BIT
(
chain
->
counted_for_nongroups
,
other
)
/* We don't want one part of another group.
We could get "two groups" that overlap! */
&&
!
TEST_HARD_REG_BIT
(
chain
->
counted_for_groups
,
other
))
||
(
j
<
FIRST_PSEUDO_REGISTER
-
1
&&
(
other
=
j
+
1
,
spill_reg_order
[
other
]
>=
0
)
&&
TEST_HARD_REG_BIT
(
reg_class_contents
[
class
],
j
)
&&
TEST_HARD_REG_BIT
(
reg_class_contents
[
class
],
other
)
&&
HARD_REGNO_MODE_OK
(
j
,
chain
->
group_mode
[
class
])
&&
!
TEST_HARD_REG_BIT
(
chain
->
counted_for_nongroups
,
other
)
&&
!
TEST_HARD_REG_BIT
(
chain
->
counted_for_groups
,
other
))))
{
register
enum
reg_class
*
p
;
/* We have found one that will complete a group,
so count off one group as provided. */
chain
->
need
.
groups
[
class
]
--
;
p
=
reg_class_superclasses
[
class
];
while
(
*
p
!=
LIM_REG_CLASSES
)
{
if
(
chain
->
group_size
[(
int
)
*
p
]
<=
chain
->
group_size
[
class
])
chain
->
need
.
groups
[(
int
)
*
p
]
--
;
p
++
;
}
/* Indicate both these regs are part of a group. */
SET_HARD_REG_BIT
(
chain
->
counted_for_groups
,
j
);
SET_HARD_REG_BIT
(
chain
->
counted_for_groups
,
other
);
break
;
}
}
/* We can't complete a group, so start one. */
if
(
i
==
FIRST_PSEUDO_REGISTER
)
for
(
i
=
0
;
i
<
FIRST_PSEUDO_REGISTER
;
i
++
)
{
int
j
,
k
;
j
=
potential_reload_regs
[
i
];
/* Verify that J+1 is a potential reload reg. */
for
(
k
=
0
;
k
<
FIRST_PSEUDO_REGISTER
;
k
++
)
if
(
potential_reload_regs
[
k
]
==
j
+
1
)
break
;
if
(
j
>=
0
&&
j
+
1
<
FIRST_PSEUDO_REGISTER
&&
k
<
FIRST_PSEUDO_REGISTER
&&
spill_reg_order
[
j
]
<
0
&&
spill_reg_order
[
j
+
1
]
<
0
&&
TEST_HARD_REG_BIT
(
reg_class_contents
[
class
],
j
)
&&
TEST_HARD_REG_BIT
(
reg_class_contents
[
class
],
j
+
1
)
&&
HARD_REGNO_MODE_OK
(
j
,
chain
->
group_mode
[
class
])
&&
!
TEST_HARD_REG_BIT
(
chain
->
counted_for_nongroups
,
j
+
1
)
&&
!
TEST_HARD_REG_BIT
(
bad_spill_regs
,
j
+
1
))
break
;
}
int
r
=
reg_renumber
[
reg
];
int
nregs
=
HARD_REGNO_NREGS
(
r
,
PSEUDO_REGNO_MODE
(
reg
));
if
(
REGNO_REG_SET_P
(
&
spilled_pseudos
,
reg
)
||
spilled
+
spilled_nregs
<=
r
||
r
+
nregs
<=
spilled
)
return
;
/* I should be the index in potential_reload_regs
of the new reload reg we have found. */
SET_REGNO_REG_SET
(
&
spilled_pseudos
,
reg
);
new_spill_reg
(
chain
,
i
,
class
,
0
,
dumpfile
);
spill_add_cost
[
r
]
-=
REG_N_REFS
(
reg
);
while
(
nregs
--
>
0
)
spill_cost
[
r
+
nregs
]
-=
REG_N_REFS
(
reg
);
}
/* Find a group of more than 2 registers.
Look for a sufficient sequence of unspilled registers, and spill them all
at once. */
/* Find reload register to use for reload number ORDER. */
static
void
find_
group
(
chain
,
class
,
dumpfile
)
static
int
find_
reg
(
chain
,
order
,
dumpfile
)
struct
insn_chain
*
chain
;
int
class
;
int
order
;
FILE
*
dumpfile
;
{
int
i
;
int
rnum
=
reload_order
[
order
];
struct
reload
*
rl
=
rld
+
rnum
;
int
best_cost
=
INT_MAX
;
int
best_reg
=
-
1
;
int
i
,
j
;
HARD_REG_SET
not_usable
;
HARD_REG_SET
used_by_other_reload
;
for
(
i
=
0
;
i
<
FIRST_PSEUDO_REGISTER
;
i
++
)
{
int
j
=
potential_reload_regs
[
i
]
;
COPY_HARD_REG_SET
(
not_usable
,
bad_spill_regs
);
IOR_HARD_REG_SET
(
not_usable
,
bad_spill_regs_global
);
IOR_COMPL_HARD_REG_SET
(
not_usable
,
reg_class_contents
[
rl
->
class
])
;
if
(
j
>=
0
&&
j
+
chain
->
group_size
[
class
]
<=
FIRST_PSEUDO_REGISTER
&&
HARD_REGNO_MODE_OK
(
j
,
chain
->
group_mode
[
class
]))
CLEAR_HARD_REG_SET
(
used_by_other_reload
);
for
(
i
=
0
;
i
<
order
;
i
++
)
{
int
k
;
/* Check each reg in the sequence. */
for
(
k
=
0
;
k
<
chain
->
group_size
[
class
];
k
++
)
if
(
!
(
spill_reg_order
[
j
+
k
]
<
0
&&
!
TEST_HARD_REG_BIT
(
bad_spill_regs
,
j
+
k
)
&&
TEST_HARD_REG_BIT
(
reg_class_contents
[
class
],
j
+
k
)))
break
;
/* We got a full sequence, so spill them all. */
if
(
k
==
chain
->
group_size
[
class
])
int
other
=
reload_order
[
i
];
if
(
rld
[
other
].
regno
>=
0
&&
reloads_conflict
(
other
,
rnum
))
for
(
j
=
0
;
j
<
rld
[
other
].
nregs
;
j
++
)
SET_HARD_REG_BIT
(
used_by_other_reload
,
rld
[
other
].
regno
+
j
);
}
for
(
i
=
0
;
i
<
FIRST_PSEUDO_REGISTER
;
i
++
)
{
register
enum
reg_class
*
p
;
for
(
k
=
0
;
k
<
chain
->
group_size
[
class
];
k
++
)
int
regno
=
i
;
if
(
!
TEST_HARD_REG_BIT
(
not_usable
,
regno
)
&&
!
TEST_HARD_REG_BIT
(
used_by_other_reload
,
regno
)
&&
HARD_REGNO_MODE_OK
(
regno
,
rl
->
mode
))
{
int
idx
;
SET_HARD_REG_BIT
(
chain
->
counted_for_groups
,
j
+
k
);
for
(
idx
=
0
;
idx
<
FIRST_PSEUDO_REGISTER
;
idx
++
)
if
(
potential_reload_regs
[
idx
]
==
j
+
k
)
break
;
new_spill_reg
(
chain
,
idx
,
class
,
0
,
dumpfile
);
}
int
this_cost
=
spill_cost
[
regno
];
int
ok
=
1
;
int
this_nregs
=
HARD_REGNO_NREGS
(
regno
,
rl
->
mode
);
/* We have found one that will complete a group,
so count off one group as provided. */
chain
->
need
.
groups
[
class
]
--
;
p
=
reg_class_superclasses
[
class
];
while
(
*
p
!=
LIM_REG_CLASSES
)
for
(
j
=
1
;
j
<
this_nregs
;
j
++
)
{
if
(
chain
->
group_size
[(
int
)
*
p
]
<=
chain
->
group_size
[
class
]
)
chain
->
need
.
groups
[(
int
)
*
p
]
--
;
p
++
;
this_cost
+=
spill_add_cost
[
regno
+
j
];
if
((
TEST_HARD_REG_BIT
(
not_usable
,
regno
+
j
)
)
||
TEST_HARD_REG_BIT
(
used_by_other_reload
,
regno
+
j
))
ok
=
0
;
}
return
;
if
(
!
ok
)
continue
;
if
(
rl
->
in
&&
GET_CODE
(
rl
->
in
)
==
REG
&&
REGNO
(
rl
->
in
)
==
regno
)
this_cost
--
;
if
(
rl
->
out
&&
GET_CODE
(
rl
->
out
)
==
REG
&&
REGNO
(
rl
->
out
)
==
regno
)
this_cost
--
;
if
(
this_cost
<
best_cost
/* Among registers with equal cost, prefer caller-saved ones, or
use REG_ALLOC_ORDER if it is defined. */
||
(
this_cost
==
best_cost
#ifdef REG_ALLOC_ORDER
&&
(
inv_reg_alloc_order
[
regno
]
<
inv_reg_alloc_order
[
best_reg
])
#else
&&
call_used_regs
[
regno
]
&&
!
call_used_regs
[
best_reg
]
#endif
))
{
best_reg
=
regno
;
best_cost
=
this_cost
;
}
}
}
/* There are no groups left. */
spill_failure
(
chain
->
insn
);
failure
=
1
;
}
if
(
best_reg
==
-
1
)
return
0
;
if
(
dumpfile
)
fprintf
(
dumpfile
,
"Using reg %d for reload %d
\n
"
,
best_reg
,
rnum
);
rl
->
nregs
=
HARD_REGNO_NREGS
(
best_reg
,
rl
->
mode
);
rl
->
regno
=
best_reg
;
/* If pseudo REG conflicts with one of our reload registers, mark it as
spilled. */
static
void
maybe_mark_pseudo_spilled
(
reg
)
int
reg
;
{
int
i
;
int
r
=
reg_renumber
[
reg
];
int
nregs
;
EXECUTE_IF_SET_IN_REG_SET
(
chain
->
live_before
,
FIRST_PSEUDO_REGISTER
,
j
,
{
count_spilled_pseudo
(
best_reg
,
rl
->
nregs
,
j
);
});
EXECUTE_IF_SET_IN_REG_SET
(
chain
->
live_after
,
FIRST_PSEUDO_REGISTER
,
j
,
{
count_spilled_pseudo
(
best_reg
,
rl
->
nregs
,
j
);
});
if
(
r
<
0
)
abort
();
nregs
=
HARD_REGNO_NREGS
(
r
,
PSEUDO_REGNO_MODE
(
reg
));
for
(
i
=
0
;
i
<
n_spills
;
i
++
)
if
(
r
<=
spill_regs
[
i
]
&&
r
+
nregs
>
spill_regs
[
i
])
for
(
i
=
0
;
i
<
rl
->
nregs
;
i
++
)
{
SET_REGNO_REG_SET
(
spilled_pseudos
,
reg
);
return
;
if
(
spill_cost
[
best_reg
+
i
]
!=
0
||
spill_add_cost
[
best_reg
+
i
]
!=
0
)
abort
();
SET_HARD_REG_BIT
(
used_spill_regs_local
,
best_reg
+
i
);
}
return
1
;
}
/* Find more reload regs to satisfy the remaining need of an insn, which
is given by CHAIN.
Do it by ascending class number, since otherwise a reg
might be spilled for a big class and might fail to count
for a smaller class even though it belongs to that class.
Count spilled regs in `spills', and add entries to
`spill_regs' and `spill_reg_order'.
??? Note there is a problem here.
When there is a need for a group in a high-numbered class,
and also need for non-group regs that come from a lower class,
the non-group regs are chosen first. If there aren't many regs,
they might leave no room for a group.
This was happening on the 386. To fix it, we added the code
that calls possible_group_p, so that the lower class won't
break up the last possible group.
Really fixing the problem would require changes above
in counting the regs already spilled, and in choose_reload_regs.
It might be hard to avoid introducing bugs there. */
for a smaller class even though it belongs to that class. */
static
void
find_reload_regs
(
chain
,
dumpfile
)
struct
insn_chain
*
chain
;
FILE
*
dumpfile
;
{
int
i
,
class
;
short
*
group_needs
=
chain
->
need
.
groups
;
short
*
simple_needs
=
chain
->
need
.
regs
[
0
];
short
*
nongroup_needs
=
chain
->
need
.
regs
[
1
];
if
(
dumpfile
)
fprintf
(
dumpfile
,
"Spilling for insn %d.
\n
"
,
INSN_UID
(
chain
->
insn
));
/* Compute the order of preference for hard registers to spill.
Store them by decreasing preference in potential_reload_regs. */
order_regs_for_reload
(
chain
);
/* So far, no hard regs have been spilled. */
n_spills
=
0
;
for
(
i
=
0
;
i
<
FIRST_PSEUDO_REGISTER
;
i
++
)
spill_reg_order
[
i
]
=
-
1
;
CLEAR_HARD_REG_SET
(
chain
->
used_spill_regs
);
CLEAR_HARD_REG_SET
(
chain
->
counted_for_groups
);
CLEAR_HARD_REG_SET
(
chain
->
counted_for_nongroups
);
int
i
;
for
(
class
=
0
;
class
<
N_REG_CLASSES
;
class
++
)
/* In order to be certain of getting the registers we need,
we must sort the reloads into order of increasing register class.
Then our grabbing of reload registers will parallel the process
that provided the reload registers. */
for
(
i
=
0
;
i
<
chain
->
n_reloads
;
i
++
)
{
/* First get the groups of registers.
If we got single registers first, we might fragment
possible groups. */
while
(
group_needs
[
class
]
>
0
)
/* Show whether this reload already has a hard reg. */
if
(
chain
->
rld
[
i
].
reg_rtx
)
{
/* If any single spilled regs happen to form groups,
count them now. Maybe we don't really need
to spill another group. */
count_possible_groups
(
chain
,
class
);
if
(
group_needs
[
class
]
<=
0
)
break
;
/* Groups of size 2, the only groups used on most machines,
are treated specially. */
if
(
chain
->
group_size
[
class
]
==
2
)
find_tworeg_group
(
chain
,
class
,
dumpfile
);
int
regno
=
REGNO
(
chain
->
rld
[
i
].
reg_rtx
);
chain
->
rld
[
i
].
regno
=
regno
;
chain
->
rld
[
i
].
nregs
=
HARD_REGNO_NREGS
(
regno
,
GET_MODE
(
chain
->
rld
[
i
].
reg_rtx
));
}
else
find_group
(
chain
,
class
,
dumpfile
);
if
(
failure
)
return
;
chain
->
rld
[
i
].
regno
=
-
1
;
reload_order
[
i
]
=
i
;
}
/* Now similarly satisfy all need for single registers. */
while
(
simple_needs
[
class
]
>
0
||
nongroup_needs
[
class
]
>
0
)
{
/* If we spilled enough regs, but they weren't counted
against the non-group need, see if we can count them now.
If so, we can avoid some actual spilling. */
if
(
simple_needs
[
class
]
<=
0
&&
nongroup_needs
[
class
]
>
0
)
for
(
i
=
0
;
i
<
n_spills
;
i
++
)
{
int
regno
=
spill_regs
[
i
];
if
(
TEST_HARD_REG_BIT
(
reg_class_contents
[
class
],
regno
)
&&
!
TEST_HARD_REG_BIT
(
chain
->
counted_for_groups
,
regno
)
&&
!
TEST_HARD_REG_BIT
(
chain
->
counted_for_nongroups
,
regno
)
&&
nongroup_needs
[
class
]
>
0
)
{
register
enum
reg_class
*
p
;
n_reloads
=
chain
->
n_reloads
;
memcpy
(
rld
,
chain
->
rld
,
n_reloads
*
sizeof
(
struct
reload
));
SET_HARD_REG_BIT
(
chain
->
counted_for_nongroups
,
regno
);
nongroup_needs
[
class
]
--
;
p
=
reg_class_superclasses
[
class
];
while
(
*
p
!=
LIM_REG_CLASSES
)
nongroup_needs
[(
int
)
*
p
++
]
--
;
}
}
if
(
simple_needs
[
class
]
<=
0
&&
nongroup_needs
[
class
]
<=
0
)
break
;
CLEAR_HARD_REG_SET
(
used_spill_regs_local
);
/* Consider the potential reload regs that aren't
yet in use as reload regs, in order of preference.
Find the most preferred one that's in this class. */
if
(
dumpfile
)
fprintf
(
dumpfile
,
"Spilling for insn %d.
\n
"
,
INSN_UID
(
chain
->
insn
));
for
(
i
=
0
;
i
<
FIRST_PSEUDO_REGISTER
;
i
++
)
{
int
regno
=
potential_reload_regs
[
i
];
if
(
regno
>=
0
&&
TEST_HARD_REG_BIT
(
reg_class_contents
[
class
],
regno
)
/* If this reg will not be available for groups,
pick one that does not foreclose possible groups.
This is a kludge, and not very general,
but it should be sufficient to make the 386 work,
and the problem should not occur on machines with
more registers. */
&&
(
nongroup_needs
[
class
]
==
0
||
possible_group_p
(
chain
,
regno
)))
break
;
}
qsort
(
reload_order
,
n_reloads
,
sizeof
(
short
),
reload_reg_class_lower
);
/* If we couldn't get a register, try to get one even if we
might foreclose possible groups. This may cause problems
later, but that's better than aborting now, since it is
possible that we will, in fact, be able to form the needed
group even with this allocation. */
/* Compute the order of preference for hard registers to spill. */
if
(
i
>=
FIRST_PSEUDO_REGISTER
&&
asm_noperands
(
chain
->
insn
)
<
0
)
for
(
i
=
0
;
i
<
FIRST_PSEUDO_REGISTER
;
i
++
)
if
(
potential_reload_regs
[
i
]
>=
0
&&
TEST_HARD_REG_BIT
(
reg_class_contents
[
class
],
potential_reload_regs
[
i
]))
break
;
order_regs_for_reload
(
chain
);
/* I should be the index in potential_reload_regs
of the new reload reg we have found. */
for
(
i
=
0
;
i
<
n_reloads
;
i
++
)
{
int
r
=
reload_order
[
i
];
new_spill_reg
(
chain
,
i
,
class
,
1
,
dumpfile
);
if
(
failure
)
/* Ignore reloads that got marked inoperative. */
if
((
rld
[
r
].
out
!=
0
||
rld
[
r
].
in
!=
0
||
rld
[
r
].
secondary_p
)
&&
!
rld
[
r
].
optional
&&
rld
[
r
].
regno
==
-
1
)
if
(
!
find_reg
(
chain
,
i
,
dumpfile
))
{
spill_failure
(
chain
->
insn
);
failure
=
1
;
return
;
}
}
/* We know which hard regs to use, now mark the pseudos that live in them
as needing to be kicked out. */
EXECUTE_IF_SET_IN_REG_SET
(
chain
->
live_before
,
FIRST_PSEUDO_REGISTER
,
i
,
{
maybe_mark_pseudo_spilled
(
i
);
});
EXECUTE_IF_SET_IN_REG_SET
(
chain
->
live_after
,
FIRST_PSEUDO_REGISTER
,
i
,
{
maybe_mark_pseudo_spilled
(
i
);
});
IOR_HARD_REG_SET
(
used_spill_regs
,
chain
->
used_spill_regs
);
COPY_HARD_REG_SET
(
chain
->
used_spill_regs
,
used_spill_regs_local
);
IOR_HARD_REG_SET
(
used_spill_regs
,
used_spill_regs_local
);
memcpy
(
chain
->
rld
,
rld
,
n_reloads
*
sizeof
(
struct
reload
));
}
void
dump_needs
(
chain
,
dumpfile
)
struct
insn_chain
*
chain
;
static
void
select_reload_regs
(
dumpfile
)
FILE
*
dumpfile
;
{
static
const
char
*
const
reg_class_names
[]
=
REG_CLASS_NAMES
;
int
i
;
struct
needs
*
n
=
&
chain
->
need
;
for
(
i
=
0
;
i
<
N_REG_CLASSES
;
i
++
)
{
if
(
n
->
regs
[
i
][
0
]
>
0
)
fprintf
(
dumpfile
,
";; Need %d reg%s of class %s.
\n
"
,
n
->
regs
[
i
][
0
],
n
->
regs
[
i
][
0
]
==
1
?
""
:
"s"
,
reg_class_names
[
i
]);
if
(
n
->
regs
[
i
][
1
]
>
0
)
fprintf
(
dumpfile
,
";; Need %d nongroup reg%s of class %s.
\n
"
,
n
->
regs
[
i
][
1
],
n
->
regs
[
i
][
1
]
==
1
?
""
:
"s"
,
reg_class_names
[
i
]);
if
(
n
->
groups
[
i
]
>
0
)
fprintf
(
dumpfile
,
";; Need %d group%s (%smode) of class %s.
\n
"
,
n
->
groups
[
i
],
n
->
groups
[
i
]
==
1
?
""
:
"s"
,
GET_MODE_NAME
(
chain
->
group_mode
[
i
]),
reg_class_names
[
i
]);
}
struct
insn_chain
*
chain
;
/* Try to satisfy the needs for each insn. */
for
(
chain
=
insns_need_reload
;
chain
!=
0
;
chain
=
chain
->
next_need_reload
)
find_reload_regs
(
chain
,
dumpfile
);
}
/* Delete all insns that were inserted by emit_caller_save_insns during
...
...
@@ -2064,162 +1788,6 @@ delete_caller_save_insns ()
}
}
/* Nonzero if, after spilling reg REGNO for non-groups,
it will still be possible to find a group if we still need one. */
static
int
possible_group_p
(
chain
,
regno
)
struct
insn_chain
*
chain
;
int
regno
;
{
int
i
;
int
class
=
(
int
)
NO_REGS
;
for
(
i
=
0
;
i
<
(
int
)
N_REG_CLASSES
;
i
++
)
if
(
chain
->
need
.
groups
[
i
]
>
0
)
{
class
=
i
;
break
;
}
if
(
class
==
(
int
)
NO_REGS
)
return
1
;
/* Consider each pair of consecutive registers. */
for
(
i
=
0
;
i
<
FIRST_PSEUDO_REGISTER
-
1
;
i
++
)
{
/* Ignore pairs that include reg REGNO. */
if
(
i
==
regno
||
i
+
1
==
regno
)
continue
;
/* Ignore pairs that are outside the class that needs the group.
??? Here we fail to handle the case where two different classes
independently need groups. But this never happens with our
current machine descriptions. */
if
(
!
(
TEST_HARD_REG_BIT
(
reg_class_contents
[
class
],
i
)
&&
TEST_HARD_REG_BIT
(
reg_class_contents
[
class
],
i
+
1
)))
continue
;
/* A pair of consecutive regs we can still spill does the trick. */
if
(
spill_reg_order
[
i
]
<
0
&&
spill_reg_order
[
i
+
1
]
<
0
&&
!
TEST_HARD_REG_BIT
(
bad_spill_regs
,
i
)
&&
!
TEST_HARD_REG_BIT
(
bad_spill_regs
,
i
+
1
))
return
1
;
/* A pair of one already spilled and one we can spill does it
provided the one already spilled is not otherwise reserved. */
if
(
spill_reg_order
[
i
]
<
0
&&
!
TEST_HARD_REG_BIT
(
bad_spill_regs
,
i
)
&&
spill_reg_order
[
i
+
1
]
>=
0
&&
!
TEST_HARD_REG_BIT
(
chain
->
counted_for_groups
,
i
+
1
)
&&
!
TEST_HARD_REG_BIT
(
chain
->
counted_for_nongroups
,
i
+
1
))
return
1
;
if
(
spill_reg_order
[
i
+
1
]
<
0
&&
!
TEST_HARD_REG_BIT
(
bad_spill_regs
,
i
+
1
)
&&
spill_reg_order
[
i
]
>=
0
&&
!
TEST_HARD_REG_BIT
(
chain
->
counted_for_groups
,
i
)
&&
!
TEST_HARD_REG_BIT
(
chain
->
counted_for_nongroups
,
i
))
return
1
;
}
return
0
;
}
/* Count any groups of CLASS that can be formed from the registers recently
spilled. */
static
void
count_possible_groups
(
chain
,
class
)
struct
insn_chain
*
chain
;
int
class
;
{
HARD_REG_SET
new
;
int
i
,
j
;
/* Now find all consecutive groups of spilled registers
and mark each group off against the need for such groups.
But don't count them against ordinary need, yet. */
if
(
chain
->
group_size
[
class
]
==
0
)
return
;
CLEAR_HARD_REG_SET
(
new
);
/* Make a mask of all the regs that are spill regs in class I. */
for
(
i
=
0
;
i
<
n_spills
;
i
++
)
{
int
regno
=
spill_regs
[
i
];
if
(
TEST_HARD_REG_BIT
(
reg_class_contents
[
class
],
regno
)
&&
!
TEST_HARD_REG_BIT
(
chain
->
counted_for_groups
,
regno
)
&&
!
TEST_HARD_REG_BIT
(
chain
->
counted_for_nongroups
,
regno
))
SET_HARD_REG_BIT
(
new
,
regno
);
}
/* Find each consecutive group of them. */
for
(
i
=
0
;
i
<
FIRST_PSEUDO_REGISTER
&&
chain
->
need
.
groups
[
class
]
>
0
;
i
++
)
if
(
TEST_HARD_REG_BIT
(
new
,
i
)
&&
i
+
chain
->
group_size
[
class
]
<=
FIRST_PSEUDO_REGISTER
&&
HARD_REGNO_MODE_OK
(
i
,
chain
->
group_mode
[
class
]))
{
for
(
j
=
1
;
j
<
chain
->
group_size
[
class
];
j
++
)
if
(
!
TEST_HARD_REG_BIT
(
new
,
i
+
j
))
break
;
if
(
j
==
chain
->
group_size
[
class
])
{
/* We found a group. Mark it off against this class's need for
groups, and against each superclass too. */
register
enum
reg_class
*
p
;
chain
->
need
.
groups
[
class
]
--
;
p
=
reg_class_superclasses
[
class
];
while
(
*
p
!=
LIM_REG_CLASSES
)
{
if
(
chain
->
group_size
[(
int
)
*
p
]
<=
chain
->
group_size
[
class
])
chain
->
need
.
groups
[(
int
)
*
p
]
--
;
p
++
;
}
/* Don't count these registers again. */
for
(
j
=
0
;
j
<
chain
->
group_size
[
class
];
j
++
)
SET_HARD_REG_BIT
(
chain
->
counted_for_groups
,
i
+
j
);
}
/* Skip to the last reg in this group. When i is incremented above,
it will then point to the first reg of the next possible group. */
i
+=
j
-
1
;
}
}
/* ALLOCATE_MODE is a register mode that needs to be reloaded. OTHER_MODE is
another mode that needs to be reloaded for the same register class CLASS.
If any reg in CLASS allows ALLOCATE_MODE but not OTHER_MODE, fail.
ALLOCATE_MODE will never be smaller than OTHER_MODE.
This code used to also fail if any reg in CLASS allows OTHER_MODE but not
ALLOCATE_MODE. This test is unnecessary, because we will never try to put
something of mode ALLOCATE_MODE into an OTHER_MODE register. Testing this
causes unnecessary failures on machines requiring alignment of register
groups when the two modes are different sizes, because the larger mode has
more strict alignment rules than the smaller mode. */
static
int
modes_equiv_for_class_p
(
allocate_mode
,
other_mode
,
class
)
enum
machine_mode
allocate_mode
,
other_mode
;
enum
reg_class
class
;
{
register
int
regno
;
for
(
regno
=
0
;
regno
<
FIRST_PSEUDO_REGISTER
;
regno
++
)
{
if
(
TEST_HARD_REG_BIT
(
reg_class_contents
[(
int
)
class
],
regno
)
&&
HARD_REGNO_MODE_OK
(
regno
,
allocate_mode
)
&&
!
HARD_REGNO_MODE_OK
(
regno
,
other_mode
))
return
0
;
}
return
1
;
}
/* Handle the failure to find a register to spill.
INSN should be one of the insns which needed this particular spill reg. */
...
...
@@ -2233,83 +1801,6 @@ spill_failure (insn)
fatal_insn
(
"Unable to find a register to spill."
,
insn
);
}
/* Add a new register to the tables of available spill-registers.
CHAIN is the insn for which the register will be used; we decrease the
needs of that insn.
I is the index of this register in potential_reload_regs.
CLASS is the regclass whose need is being satisfied.
NONGROUP is 0 if this register is part of a group.
DUMPFILE is the same as the one that `reload' got. */
static
void
new_spill_reg
(
chain
,
i
,
class
,
nongroup
,
dumpfile
)
struct
insn_chain
*
chain
;
int
i
;
int
class
;
int
nongroup
;
FILE
*
dumpfile
;
{
register
enum
reg_class
*
p
;
int
regno
=
potential_reload_regs
[
i
];
if
(
i
>=
FIRST_PSEUDO_REGISTER
)
{
spill_failure
(
chain
->
insn
);
failure
=
1
;
return
;
}
if
(
TEST_HARD_REG_BIT
(
bad_spill_regs
,
regno
))
{
static
const
char
*
const
reg_class_names
[]
=
REG_CLASS_NAMES
;
if
(
asm_noperands
(
PATTERN
(
chain
->
insn
))
<
0
)
{
/* The error message is still correct - we know only that it wasn't
an asm statement that caused the problem, but one of the global
registers declared by the users might have screwed us. */
error
(
"fixed or forbidden register %d (%s) was spilled for class %s."
,
regno
,
reg_names
[
regno
],
reg_class_names
[
class
]);
error
(
"This may be due to a compiler bug or to impossible asm"
);
error
(
"statements or clauses."
);
fatal_insn
(
"This is the instruction:"
,
chain
->
insn
);
}
error_for_asm
(
chain
->
insn
,
"Invalid `asm' statement:"
);
error_for_asm
(
chain
->
insn
,
"fixed or forbidden register %d (%s) was spilled for class %s."
,
regno
,
reg_names
[
regno
],
reg_class_names
[
class
]);
failure
=
1
;
return
;
}
/* Make reg REGNO an additional reload reg. */
potential_reload_regs
[
i
]
=
-
1
;
spill_regs
[
n_spills
]
=
regno
;
spill_reg_order
[
regno
]
=
n_spills
;
if
(
dumpfile
)
fprintf
(
dumpfile
,
"Spilling reg %d.
\n
"
,
regno
);
SET_HARD_REG_BIT
(
chain
->
used_spill_regs
,
regno
);
/* Clear off the needs we just satisfied. */
chain
->
need
.
regs
[
0
][
class
]
--
;
p
=
reg_class_superclasses
[
class
];
while
(
*
p
!=
LIM_REG_CLASSES
)
chain
->
need
.
regs
[
0
][(
int
)
*
p
++
]
--
;
if
(
nongroup
&&
chain
->
need
.
regs
[
1
][
class
]
>
0
)
{
SET_HARD_REG_BIT
(
chain
->
counted_for_nongroups
,
regno
);
chain
->
need
.
regs
[
1
][
class
]
--
;
p
=
reg_class_superclasses
[
class
];
while
(
*
p
!=
LIM_REG_CLASSES
)
chain
->
need
.
regs
[
1
][(
int
)
*
p
++
]
--
;
}
n_spills
++
;
}
/* Delete an unneeded INSN and any previous insns who sole purpose is loading
data that is dead in INSN. */
...
...
@@ -3927,7 +3418,7 @@ spill_hard_reg (regno, dumpfile, cant_eliminate)
+
HARD_REGNO_NREGS
(
reg_renumber
[
i
],
PSEUDO_REGNO_MODE
(
i
))
>
regno
))
SET_REGNO_REG_SET
(
spilled_pseudos
,
i
);
SET_REGNO_REG_SET
(
&
spilled_pseudos
,
i
);
}
/* I'm getting weird preprocessor errors if I use IOR_HARD_REG_SET
...
...
@@ -3979,7 +3470,7 @@ finish_spills (global, dumpfile)
spill_reg_order
[
i
]
=
-
1
;
for
(
i
=
FIRST_PSEUDO_REGISTER
;
i
<
max_regno
;
i
++
)
if
(
REGNO_REG_SET_P
(
spilled_pseudos
,
i
))
if
(
REGNO_REG_SET_P
(
&
spilled_pseudos
,
i
))
{
/* Record the current hard register the pseudo is allocated to in
pseudo_previous_regs so we avoid reallocating it to the same
...
...
@@ -4030,7 +3521,7 @@ finish_spills (global, dumpfile)
IOR_HARD_REG_SET
(
forbidden
,
pseudo_previous_regs
[
i
]);
retry_global_alloc
(
i
,
forbidden
);
if
(
reg_renumber
[
i
]
>=
0
)
CLEAR_REGNO_REG_SET
(
spilled_pseudos
,
i
);
CLEAR_REGNO_REG_SET
(
&
spilled_pseudos
,
i
);
}
}
...
...
@@ -4042,8 +3533,8 @@ finish_spills (global, dumpfile)
HARD_REG_SET
used_by_pseudos
;
HARD_REG_SET
used_by_pseudos2
;
AND_COMPL_REG_SET
(
chain
->
live_before
,
spilled_pseudos
);
AND_COMPL_REG_SET
(
chain
->
live_after
,
spilled_pseudos
);
AND_COMPL_REG_SET
(
chain
->
live_before
,
&
spilled_pseudos
);
AND_COMPL_REG_SET
(
chain
->
live_after
,
&
spilled_pseudos
);
/* Mark any unallocated hard regs as available for spills. That
makes inheritance work somewhat better. */
...
...
@@ -4148,146 +3639,6 @@ scan_paradoxical_subregs (x)
}
}
static
int
hard_reg_use_compare
(
p1p
,
p2p
)
const
PTR
p1p
;
const
PTR
p2p
;
{
const
struct
hard_reg_n_uses
*
p1
=
(
const
struct
hard_reg_n_uses
*
)
p1p
;
const
struct
hard_reg_n_uses
*
p2
=
(
const
struct
hard_reg_n_uses
*
)
p2p
;
int
bad1
=
TEST_HARD_REG_BIT
(
bad_spill_regs
,
p1
->
regno
);
int
bad2
=
TEST_HARD_REG_BIT
(
bad_spill_regs
,
p2
->
regno
);
if
(
bad1
&&
bad2
)
return
p1
->
regno
-
p2
->
regno
;
if
(
bad1
)
return
1
;
if
(
bad2
)
return
-
1
;
if
(
p1
->
uses
>
p2
->
uses
)
return
1
;
if
(
p1
->
uses
<
p2
->
uses
)
return
-
1
;
/* If regs are equally good, sort by regno,
so that the results of qsort leave nothing to chance. */
return
p1
->
regno
-
p2
->
regno
;
}
/* Used for communication between order_regs_for_reload and count_pseudo.
Used to avoid counting one pseudo twice. */
static
regset
pseudos_counted
;
/* Update the costs in N_USES, considering that pseudo REG is live. */
static
void
count_pseudo
(
n_uses
,
reg
)
struct
hard_reg_n_uses
*
n_uses
;
int
reg
;
{
int
r
=
reg_renumber
[
reg
];
int
nregs
;
if
(
REGNO_REG_SET_P
(
pseudos_counted
,
reg
))
return
;
SET_REGNO_REG_SET
(
pseudos_counted
,
reg
);
if
(
r
<
0
)
abort
();
nregs
=
HARD_REGNO_NREGS
(
r
,
PSEUDO_REGNO_MODE
(
reg
));
while
(
nregs
--
>
0
)
n_uses
[
r
++
].
uses
+=
REG_N_REFS
(
reg
);
}
/* Choose the order to consider regs for use as reload registers
based on how much trouble would be caused by spilling one.
Store them in order of decreasing preference in potential_reload_regs. */
static
void
order_regs_for_reload
(
chain
)
struct
insn_chain
*
chain
;
{
register
int
i
;
register
int
o
=
0
;
struct
hard_reg_n_uses
hard_reg_n_uses
[
FIRST_PSEUDO_REGISTER
];
pseudos_counted
=
ALLOCA_REG_SET
();
COPY_HARD_REG_SET
(
bad_spill_regs
,
bad_spill_regs_global
);
/* Count number of uses of each hard reg by pseudo regs allocated to it
and then order them by decreasing use. */
for
(
i
=
0
;
i
<
FIRST_PSEUDO_REGISTER
;
i
++
)
{
hard_reg_n_uses
[
i
].
regno
=
i
;
hard_reg_n_uses
[
i
].
uses
=
0
;
/* Test the various reasons why we can't use a register for
spilling in this insn. */
if
(
fixed_regs
[
i
]
||
REGNO_REG_SET_P
(
chain
->
live_before
,
i
)
||
REGNO_REG_SET_P
(
chain
->
live_after
,
i
))
SET_HARD_REG_BIT
(
bad_spill_regs
,
i
);
}
/* Now compute hard_reg_n_uses. */
CLEAR_REG_SET
(
pseudos_counted
);
EXECUTE_IF_SET_IN_REG_SET
(
chain
->
live_before
,
FIRST_PSEUDO_REGISTER
,
i
,
{
count_pseudo
(
hard_reg_n_uses
,
i
);
});
EXECUTE_IF_SET_IN_REG_SET
(
chain
->
live_after
,
FIRST_PSEUDO_REGISTER
,
i
,
{
count_pseudo
(
hard_reg_n_uses
,
i
);
});
FREE_REG_SET
(
pseudos_counted
);
/* Prefer registers not so far used, for use in temporary loading.
Among them, if REG_ALLOC_ORDER is defined, use that order.
Otherwise, prefer registers not preserved by calls. */
#ifdef REG_ALLOC_ORDER
for
(
i
=
0
;
i
<
FIRST_PSEUDO_REGISTER
;
i
++
)
{
int
regno
=
reg_alloc_order
[
i
];
if
(
hard_reg_n_uses
[
regno
].
uses
==
0
&&
!
TEST_HARD_REG_BIT
(
bad_spill_regs
,
regno
))
potential_reload_regs
[
o
++
]
=
regno
;
}
#else
for
(
i
=
0
;
i
<
FIRST_PSEUDO_REGISTER
;
i
++
)
{
if
(
hard_reg_n_uses
[
i
].
uses
==
0
&&
call_used_regs
[
i
]
&&
!
TEST_HARD_REG_BIT
(
bad_spill_regs
,
i
))
potential_reload_regs
[
o
++
]
=
i
;
}
for
(
i
=
0
;
i
<
FIRST_PSEUDO_REGISTER
;
i
++
)
{
if
(
hard_reg_n_uses
[
i
].
uses
==
0
&&
!
call_used_regs
[
i
]
&&
!
TEST_HARD_REG_BIT
(
bad_spill_regs
,
i
))
potential_reload_regs
[
o
++
]
=
i
;
}
#endif
qsort
(
hard_reg_n_uses
,
FIRST_PSEUDO_REGISTER
,
sizeof
hard_reg_n_uses
[
0
],
hard_reg_use_compare
);
/* Now add the regs that are already used,
preferring those used less often. The fixed and otherwise forbidden
registers will be at the end of this list. */
for
(
i
=
0
;
i
<
FIRST_PSEUDO_REGISTER
;
i
++
)
if
(
hard_reg_n_uses
[
i
].
uses
!=
0
&&
!
TEST_HARD_REG_BIT
(
bad_spill_regs
,
hard_reg_n_uses
[
i
].
regno
))
potential_reload_regs
[
o
++
]
=
hard_reg_n_uses
[
i
].
regno
;
for
(
i
=
0
;
i
<
FIRST_PSEUDO_REGISTER
;
i
++
)
if
(
TEST_HARD_REG_BIT
(
bad_spill_regs
,
hard_reg_n_uses
[
i
].
regno
))
potential_reload_regs
[
o
++
]
=
hard_reg_n_uses
[
i
].
regno
;
}
/* Reload pseudo-registers into hard regs around each insn as needed.
Additional register load insns are output before the insn that needs it
and perhaps store insns after insns that modify the reloaded pseudo reg.
...
...
@@ -4626,43 +3977,6 @@ forget_old_reloads_1 (x, ignored, data)
reg_last_reload_reg
[
regno
+
nr
]
=
0
;
}
/* Comparison function for qsort to decide which of two reloads
should be handled first. *P1 and *P2 are the reload numbers. */
static
int
reload_reg_class_lower
(
r1p
,
r2p
)
const
PTR
r1p
;
const
PTR
r2p
;
{
register
int
r1
=
*
(
const
short
*
)
r1p
,
r2
=
*
(
const
short
*
)
r2p
;
register
int
t
;
/* Consider required reloads before optional ones. */
t
=
rld
[
r1
].
optional
-
rld
[
r2
].
optional
;
if
(
t
!=
0
)
return
t
;
/* Count all solitary classes before non-solitary ones. */
t
=
((
reg_class_size
[(
int
)
rld
[
r2
].
class
]
==
1
)
-
(
reg_class_size
[(
int
)
rld
[
r1
].
class
]
==
1
));
if
(
t
!=
0
)
return
t
;
/* Aside from solitaires, consider all multi-reg groups first. */
t
=
rld
[
r2
].
nregs
-
rld
[
r1
].
nregs
;
if
(
t
!=
0
)
return
t
;
/* Consider reloads in order of increasing reg-class number. */
t
=
(
int
)
rld
[
r1
].
class
-
(
int
)
rld
[
r2
].
class
;
if
(
t
!=
0
)
return
t
;
/* If reloads are equally urgent, sort by reload number,
so that the results of qsort leave nothing to chance. */
return
r1
-
r2
;
}
/* The following HARD_REG_SETs indicate when each hard register is
used for a reload of various parts of the current insn. */
...
...
@@ -5240,10 +4554,6 @@ reloads_conflict (r1, r2)
}
}
/* Vector of reload-numbers showing the order in which the reloads should
be processed. */
short
reload_order
[
MAX_RELOADS
];
/* Indexed by reload number, 1 if incoming value
inherited from previous insns. */
char
reload_inherited
[
MAX_RELOADS
];
...
...
@@ -5575,15 +4885,14 @@ set_reload_reg (i, r)
Set rld[R].reg_rtx to the register allocated.
If NOERROR is nonzero, we return 1 if successful,
or 0 if we couldn't find a spill reg and
we didn't change anything. */
We return 1 if successful, or 0 if we couldn't find a spill reg and
we didn't change anything. */
static
int
allocate_reload_reg
(
chain
,
r
,
last_reload
,
noerror
)
allocate_reload_reg
(
chain
,
r
,
last_reload
)
struct
insn_chain
*
chain
;
int
r
;
int
last_reload
;
int
noerror
;
{
rtx
insn
=
chain
->
insn
;
int
i
,
pass
,
count
;
...
...
@@ -5620,17 +4929,9 @@ allocate_reload_reg (chain, r, last_reload, noerror)
/* I is the index in spill_regs.
We advance it round-robin between insns to use all spill regs
equally, so that inherited reloads have a chance
of leapfrogging each other. Don't do this, however, when we have
group needs and failure would be fatal; if we only have a relatively
small number of spill registers, and more than one of them has
group needs, then by starting in the middle, we may end up
allocating the first one in such a way that we are not left with
sufficient groups to handle the rest. */
if
(
noerror
||
!
force_group
)
of leapfrogging each other. */
i
=
last_spill_reg
;
else
i
=
-
1
;
for
(
count
=
0
;
count
<
n_spills
;
count
++
)
{
...
...
@@ -5679,19 +4980,14 @@ allocate_reload_reg (chain, r, last_reload, noerror)
break
;
}
/* Otherwise check that as many consecutive regs as we need
are available here.
Also, don't use for a group registers that are
needed for nongroups. */
if
(
!
TEST_HARD_REG_BIT
(
chain
->
counted_for_nongroups
,
regnum
))
are available here. */
while
(
nr
>
1
)
{
int
regno
=
regnum
+
nr
-
1
;
if
(
!
(
TEST_HARD_REG_BIT
(
reg_class_contents
[
class
],
regno
)
&&
spill_reg_order
[
regno
]
>=
0
&&
reload_reg_free_p
(
regno
,
rld
[
r
].
opnum
,
rld
[
r
].
when_needed
)
&&
!
TEST_HARD_REG_BIT
(
chain
->
counted_for_nongroups
,
regno
)))
rld
[
r
].
when_needed
)))
break
;
nr
--
;
}
...
...
@@ -5706,24 +5002,13 @@ allocate_reload_reg (chain, r, last_reload, noerror)
}
/* We should have found a spill register by now. */
if
(
count
==
n_spills
)
{
if
(
noerror
)
return
0
;
goto
failure
;
}
if
(
set_reload_reg
(
i
,
r
))
return
1
;
/* The reg is not OK. */
if
(
noerror
)
if
(
count
>=
n_spills
)
return
0
;
failure
:
failed_reload
(
insn
,
r
);
/* I is the index in SPILL_REG_RTX of the reload register we are to
allocate. Get an rtx for it and find its register number. */
return
1
;
return
set_reload_reg
(
i
,
r
)
;
}
/* Initialize all the tables needed to allocate reload registers.
...
...
@@ -5797,8 +5082,7 @@ choose_reload_regs (chain)
register
int
i
,
j
;
int
max_group_size
=
1
;
enum
reg_class
group_class
=
NO_REGS
;
int
inheritance
;
int
pass
;
int
pass
,
win
,
inheritance
;
rtx
save_reload_reg_rtx
[
MAX_RELOADS
];
...
...
@@ -5833,7 +5117,7 @@ choose_reload_regs (chain)
Using inheritance when not optimizing leads to paradoxes
with fp on the 68k: fp numbers (not NaNs) fail to be equal to themselves
because one side of the comparison might be inherited. */
win
=
0
;
for
(
inheritance
=
optimize
>
0
;
inheritance
>=
0
;
inheritance
--
)
{
choose_reload_regs_init
(
chain
,
save_reload_reg_rtx
);
...
...
@@ -5891,7 +5175,7 @@ choose_reload_regs (chain)
|| rld[reload_order[i]].secondary_p)
&& ! rld[reload_order[i]].optional
&& rld[reload_order[i]].reg_rtx == 0)
allocate_reload_reg (chain, reload_order[i], 0
, inheritance
);
allocate_reload_reg (chain, reload_order[i], 0);
#endif
/* First see if this pseudo is already available as reloaded
...
...
@@ -6248,7 +5532,7 @@ choose_reload_regs (chain)
if (i == n_reloads)
continue;
allocate_reload_reg (chain, r, j == n_reloads - 1
, inheritance
);
allocate_reload_reg (chain, r, j == n_reloads - 1);
#endif
}
...
...
@@ -6267,17 +5551,44 @@ choose_reload_regs (chain)
if
(
rld
[
r
].
reg_rtx
!=
0
||
rld
[
r
].
optional
)
continue
;
if
(
!
allocate_reload_reg
(
chain
,
r
,
j
==
n_reloads
-
1
,
inheritance
))
if
(
!
allocate_reload_reg
(
chain
,
r
,
j
==
n_reloads
-
1
))
break
;
}
/* If that loop got all the way, we have won. */
if
(
j
==
n_reloads
)
{
win
=
1
;
break
;
}
/* Loop around and try without any inheritance. */
}
if
(
!
win
)
{
/* First undo everything done by the failed attempt
to allocate with inheritance. */
choose_reload_regs_init
(
chain
,
save_reload_reg_rtx
);
/* Some sanity tests to verify that the reloads found in the first
pass are identical to the ones we have now. */
if
(
chain
->
n_reloads
!=
n_reloads
)
abort
();
for
(
i
=
0
;
i
<
n_reloads
;
i
++
)
{
if
(
chain
->
rld
[
i
].
regno
<
0
||
chain
->
rld
[
i
].
reg_rtx
!=
0
)
continue
;
if
(
chain
->
rld
[
i
].
when_needed
!=
rld
[
i
].
when_needed
)
abort
();
for
(
j
=
0
;
j
<
n_spills
;
j
++
)
if
(
spill_regs
[
j
]
==
chain
->
rld
[
i
].
regno
)
if
(
!
set_reload_reg
(
j
,
i
))
failed_reload
(
chain
->
insn
,
i
);
}
}
/* If we thought we could inherit a reload, because it seemed that
nothing else wanted the same reload register earlier in the insn,
verify that assumption, now that all reloads have been assigned.
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment