Skip to content
Projects
Groups
Snippets
Help
This project
Loading...
Sign in / Register
Toggle navigation
R
riscv-gcc-1
Overview
Overview
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
lvzhengyang
riscv-gcc-1
Commits
8b3686ed
Commit
8b3686ed
authored
Apr 05, 1992
by
Richard Kenner
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
*** empty log message ***
From-SVN: r688
parent
2aa8f23f
Hide whitespace changes
Inline
Side-by-side
Showing
3 changed files
with
152 additions
and
24 deletions
+152
-24
gcc/cse.c
+134
-24
gcc/flags.h
+12
-0
gcc/toplev.c
+6
-0
No files found.
gcc/cse.c
View file @
8b3686ed
...
@@ -2858,7 +2858,7 @@ simplify_unary_operation (code, mode, op, op_mode)
...
@@ -2858,7 +2858,7 @@ simplify_unary_operation (code, mode, op, op_mode)
break
;
break
;
case
ABS
:
case
ABS
:
if
(
REAL_VALUE
S_LESS
(
d
,
0
.
0
))
if
(
REAL_VALUE
_NEGATIVE
(
d
))
d
=
REAL_VALUE_NEGATE
(
d
);
d
=
REAL_VALUE_NEGATE
(
d
);
break
;
break
;
...
@@ -3860,7 +3860,9 @@ simplify_relational_operation (code, mode, op0, op1)
...
@@ -3860,7 +3860,9 @@ simplify_relational_operation (code, mode, op0, op1)
if (CONSTANT_P (op0) && op1 == const0_rtx)
if (CONSTANT_P (op0) && op1 == const0_rtx)
return const0_rtx;
return const0_rtx;
#endif
#endif
if
(
NONZERO_BASE_PLUS_P
(
op0
)
&&
op1
==
const0_rtx
)
if
(
NONZERO_BASE_PLUS_P
(
op0
)
&&
op1
==
const0_rtx
/* On some machines, the ap reg can be 0 sometimes. */
&&
op0
!=
arg_pointer_rtx
)
return
const0_rtx
;
return
const0_rtx
;
break
;
break
;
}
}
...
@@ -3871,7 +3873,9 @@ simplify_relational_operation (code, mode, op0, op1)
...
@@ -3871,7 +3873,9 @@ simplify_relational_operation (code, mode, op0, op1)
if (CONSTANT_P (op0) && op1 == const0_rtx)
if (CONSTANT_P (op0) && op1 == const0_rtx)
return const_true_rtx;
return const_true_rtx;
#endif
#endif
if
(
NONZERO_BASE_PLUS_P
(
op0
)
&&
op1
==
const0_rtx
)
if
(
NONZERO_BASE_PLUS_P
(
op0
)
&&
op1
==
const0_rtx
/* On some machines, the ap reg can be 0 sometimes. */
&&
op0
!=
arg_pointer_rtx
)
return
const_true_rtx
;
return
const_true_rtx
;
break
;
break
;
...
@@ -6577,6 +6581,65 @@ cse_around_loop (loop_start)
...
@@ -6577,6 +6581,65 @@ cse_around_loop (loop_start)
}
}
}
}
/* Variable used for communications between the next two routines. */
static
struct
write_data
skipped_writes_memory
;
/* Process one SET of an insn that was skipped. We ignore CLOBBERs
since they are done elsewhere. This function is called via note_stores. */
static
void
invalidate_skipped_set
(
dest
,
set
)
rtx
set
;
rtx
dest
;
{
if
(
GET_CODE
(
set
)
==
CLOBBER
#ifdef HAVE_cc0
||
dest
==
cc0_rtx
#endif
||
dest
==
pc_rtx
)
return
;
if
(
GET_CODE
(
dest
)
==
MEM
)
note_mem_written
(
dest
,
&
skipped_writes_memory
);
if
(
GET_CODE
(
dest
)
==
REG
||
GET_CODE
(
dest
)
==
SUBREG
||
(
!
skipped_writes_memory
.
all
&&
!
cse_rtx_addr_varies_p
(
dest
)))
invalidate
(
dest
);
}
/* Invalidate all insns from START up to the end of the function or the
next label. This called when we wish to CSE around a block that is
conditionally executed. */
static
void
invalidate_skipped_block
(
start
)
rtx
start
;
{
rtx
insn
;
int
i
;
static
struct
write_data
init
=
{
0
,
0
,
0
,
0
};
static
struct
write_data
everything
=
{
0
,
1
,
1
,
1
};
for
(
insn
=
start
;
insn
&&
GET_CODE
(
insn
)
!=
CODE_LABEL
;
insn
=
NEXT_INSN
(
insn
))
{
if
(
GET_RTX_CLASS
(
GET_CODE
(
insn
))
!=
'i'
)
continue
;
skipped_writes_memory
=
init
;
if
(
GET_CODE
(
insn
)
==
CALL_INSN
)
{
invalidate_for_call
();
skipped_writes_memory
=
everything
;
}
note_stores
(
PATTERN
(
insn
),
invalidate_skipped_set
);
invalidate_from_clobbers
(
&
skipped_writes_memory
,
PATTERN
(
insn
));
}
}
/* Used for communication between the following two routines; contains a
/* Used for communication between the following two routines; contains a
value to be checked for modification. */
value to be checked for modification. */
...
@@ -6706,7 +6769,7 @@ cse_set_around_loop (x, insn, loop_start)
...
@@ -6706,7 +6769,7 @@ cse_set_around_loop (x, insn, loop_start)
The branch path indicates which branches should be followed. If a non-zero
The branch path indicates which branches should be followed. If a non-zero
path size is specified, the block should be rescanned and a different set
path size is specified, the block should be rescanned and a different set
of branches will be taken. The branch path is only used if
of branches will be taken. The branch path is only used if
FLAG_CSE_FOLLOW_JUMPS is non-zero.
FLAG_CSE_FOLLOW_JUMPS
or FLAG_CSE_SKIP_BLOCKS
is non-zero.
DATA is a pointer to a struct cse_basic_block_data, defined below, that is
DATA is a pointer to a struct cse_basic_block_data, defined below, that is
used to describe the block. It is filled in with the information about
used to describe the block. It is filled in with the information about
...
@@ -6732,17 +6795,20 @@ struct cse_basic_block_data {
...
@@ -6732,17 +6795,20 @@ struct cse_basic_block_data {
struct
branch_path
{
struct
branch_path
{
/* The branch insn. */
/* The branch insn. */
rtx
branch
;
rtx
branch
;
/* Whether it should be taken or not. */
/* Whether it should be taken or not. AROUND is the same as taken
enum
taken
{
TAKEN
,
NOT_TAKEN
}
status
;
except that it is used when the destination label is not preceded
by a BARRIER. */
enum
taken
{
TAKEN
,
NOT_TAKEN
,
AROUND
}
status
;
}
path
[
PATHLENGTH
];
}
path
[
PATHLENGTH
];
};
};
void
void
cse_end_of_basic_block
(
insn
,
data
,
follow_jumps
,
after_loop
)
cse_end_of_basic_block
(
insn
,
data
,
follow_jumps
,
after_loop
,
skip_blocks
)
rtx
insn
;
rtx
insn
;
struct
cse_basic_block_data
*
data
;
struct
cse_basic_block_data
*
data
;
int
follow_jumps
;
int
follow_jumps
;
int
after_loop
;
int
after_loop
;
int
skip_blocks
;
{
{
rtx
p
=
insn
,
q
;
rtx
p
=
insn
,
q
;
int
nsets
=
0
;
int
nsets
=
0
;
...
@@ -6757,7 +6823,7 @@ cse_end_of_basic_block (insn, data, follow_jumps, after_loop)
...
@@ -6757,7 +6823,7 @@ cse_end_of_basic_block (insn, data, follow_jumps, after_loop)
at least one branch must have been taken if PATH_SIZE is non-zero. */
at least one branch must have been taken if PATH_SIZE is non-zero. */
while
(
path_size
>
0
)
while
(
path_size
>
0
)
{
{
if
(
data
->
path
[
path_size
-
1
].
status
==
TAKEN
)
if
(
data
->
path
[
path_size
-
1
].
status
!=
NOT_
TAKEN
)
{
{
data
->
path
[
path_size
-
1
].
status
=
NOT_TAKEN
;
data
->
path
[
path_size
-
1
].
status
=
NOT_TAKEN
;
break
;
break
;
...
@@ -6802,15 +6868,15 @@ cse_end_of_basic_block (insn, data, follow_jumps, after_loop)
...
@@ -6802,15 +6868,15 @@ cse_end_of_basic_block (insn, data, follow_jumps, after_loop)
nsets
+=
1
;
nsets
+=
1
;
if
(
INSN_CUID
(
p
)
>
high_cuid
)
if
(
INSN_CUID
(
p
)
>
high_cuid
)
high_cuid
=
INSN_CUID
(
p
);
high_cuid
=
INSN_CUID
(
p
);
if
(
INSN_CUID
(
p
)
<
low_cuid
)
if
(
INSN_CUID
(
p
)
<
low_cuid
)
low_cuid
=
INSN_CUID
(
p
);
low_cuid
=
INSN_CUID
(
p
);
/* See if this insn is in our branch path. If it is and we are to
/* See if this insn is in our branch path. If it is and we are to
take it, do so. */
take it, do so. */
if
(
path_entry
<
path_size
&&
data
->
path
[
path_entry
].
branch
==
p
)
if
(
path_entry
<
path_size
&&
data
->
path
[
path_entry
].
branch
==
p
)
{
{
if
(
data
->
path
[
path_entry
].
status
==
TAKEN
)
if
(
data
->
path
[
path_entry
].
status
!=
NOT_
TAKEN
)
p
=
JUMP_LABEL
(
p
);
p
=
JUMP_LABEL
(
p
);
/* Point to next entry in path, if any. */
/* Point to next entry in path, if any. */
...
@@ -6820,8 +6886,14 @@ cse_end_of_basic_block (insn, data, follow_jumps, after_loop)
...
@@ -6820,8 +6886,14 @@ cse_end_of_basic_block (insn, data, follow_jumps, after_loop)
/* If this is a conditional jump, we can follow it if -fcse-follow-jumps
/* If this is a conditional jump, we can follow it if -fcse-follow-jumps
was specified, we haven't reached our maximum path length, there are
was specified, we haven't reached our maximum path length, there are
insns following the target of the jump, this is the only use of the
insns following the target of the jump, this is the only use of the
jump label, and the target label is preceded by a BARRIER. */
jump label, and the target label is preceded by a BARRIER.
else
if
(
follow_jumps
&&
path_size
<
PATHLENGTH
-
1
Alternatively, we can follow the jump if it branches around a
block of code and there are no other branches into the block.
In this case invalidate_skipped_block will be called to invalidate any
registers set in the block when following the jump. */
else
if
((
follow_jumps
||
skip_blocks
)
&&
path_size
<
PATHLENGTH
-
1
&&
GET_CODE
(
p
)
==
JUMP_INSN
&&
GET_CODE
(
p
)
==
JUMP_INSN
&&
GET_CODE
(
PATTERN
(
p
))
==
SET
&&
GET_CODE
(
PATTERN
(
p
))
==
SET
&&
GET_CODE
(
SET_SRC
(
PATTERN
(
p
)))
==
IF_THEN_ELSE
&&
GET_CODE
(
SET_SRC
(
PATTERN
(
p
)))
==
IF_THEN_ELSE
...
@@ -6837,7 +6909,7 @@ cse_end_of_basic_block (insn, data, follow_jumps, after_loop)
...
@@ -6837,7 +6909,7 @@ cse_end_of_basic_block (insn, data, follow_jumps, after_loop)
/* If we ran into a BARRIER, this code is an extension of the
/* If we ran into a BARRIER, this code is an extension of the
basic block when the branch is taken. */
basic block when the branch is taken. */
if
(
q
!=
0
&&
GET_CODE
(
q
)
==
BARRIER
)
if
(
follow_jumps
&&
q
!=
0
&&
GET_CODE
(
q
)
==
BARRIER
)
{
{
/* Don't allow ourself to keep walking around an
/* Don't allow ourself to keep walking around an
always-executed loop. */
always-executed loop. */
...
@@ -6865,8 +6937,40 @@ cse_end_of_basic_block (insn, data, follow_jumps, after_loop)
...
@@ -6865,8 +6937,40 @@ cse_end_of_basic_block (insn, data, follow_jumps, after_loop)
/* Mark block so we won't scan it again later. */
/* Mark block so we won't scan it again later. */
PUT_MODE
(
NEXT_INSN
(
p
),
QImode
);
PUT_MODE
(
NEXT_INSN
(
p
),
QImode
);
}
}
/* Detect a branch around a block of code. */
else
if
(
skip_blocks
&&
q
!=
0
&&
GET_CODE
(
q
)
!=
CODE_LABEL
)
{
register
rtx
tmp
;
if
(
next_real_insn
(
q
)
==
next_real_insn
(
insn
))
break
;
for
(
i
=
0
;
i
<
path_entry
;
i
++
)
if
(
data
->
path
[
i
].
branch
==
p
)
break
;
if
(
i
!=
path_entry
)
break
;
/* This is no_labels_between_p (p, q) with an added check for
reaching the end of a function (in case Q precedes P). */
for
(
tmp
=
NEXT_INSN
(
p
);
tmp
&&
tmp
!=
q
;
tmp
=
NEXT_INSN
(
tmp
))
if
(
GET_CODE
(
tmp
)
==
CODE_LABEL
)
break
;
if
(
tmp
==
q
)
{
data
->
path
[
path_entry
].
branch
=
p
;
data
->
path
[
path_entry
++
].
status
=
AROUND
;
path_size
=
path_entry
;
p
=
JUMP_LABEL
(
p
);
/* Mark block so we won't scan it again later. */
PUT_MODE
(
NEXT_INSN
(
p
),
QImode
);
}
}
}
}
p
=
NEXT_INSN
(
p
);
p
=
NEXT_INSN
(
p
);
}
}
...
@@ -6878,7 +6982,7 @@ cse_end_of_basic_block (insn, data, follow_jumps, after_loop)
...
@@ -6878,7 +6982,7 @@ cse_end_of_basic_block (insn, data, follow_jumps, after_loop)
/* If all jumps in the path are not taken, set our path length to zero
/* If all jumps in the path are not taken, set our path length to zero
so a rescan won't be done. */
so a rescan won't be done. */
for
(
i
=
path_size
-
1
;
i
>=
0
;
i
--
)
for
(
i
=
path_size
-
1
;
i
>=
0
;
i
--
)
if
(
data
->
path
[
i
].
status
==
TAKEN
)
if
(
data
->
path
[
i
].
status
!=
NOT_
TAKEN
)
break
;
break
;
if
(
i
==
-
1
)
if
(
i
==
-
1
)
...
@@ -6998,9 +7102,8 @@ cse_main (f, nregs, after_loop, file)
...
@@ -6998,9 +7102,8 @@ cse_main (f, nregs, after_loop, file)
insn
=
f
;
insn
=
f
;
while
(
insn
)
while
(
insn
)
{
{
int
tem
;
cse_end_of_basic_block
(
insn
,
&
val
,
flag_cse_follow_jumps
,
after_loop
,
flag_cse_skip_blocks
);
cse_end_of_basic_block
(
insn
,
&
val
,
flag_cse_follow_jumps
,
after_loop
);
/* If this basic block was already processed or has no sets, skip it. */
/* If this basic block was already processed or has no sets, skip it. */
if
(
val
.
nsets
==
0
||
GET_MODE
(
insn
)
==
QImode
)
if
(
val
.
nsets
==
0
||
GET_MODE
(
insn
)
==
QImode
)
...
@@ -7042,7 +7145,8 @@ cse_main (f, nregs, after_loop, file)
...
@@ -7042,7 +7145,8 @@ cse_main (f, nregs, after_loop, file)
us a new branch path to investigate. */
us a new branch path to investigate. */
cse_jumps_altered
=
0
;
cse_jumps_altered
=
0
;
temp
=
cse_basic_block
(
insn
,
val
.
last
,
val
.
path
,
!
after_loop
);
temp
=
cse_basic_block
(
insn
,
val
.
last
,
val
.
path
,
!
after_loop
);
if
(
cse_jumps_altered
==
0
||
flag_cse_follow_jumps
==
0
)
if
(
cse_jumps_altered
==
0
||
(
flag_cse_follow_jumps
==
0
&&
flag_cse_skip_blocks
==
0
))
insn
=
temp
;
insn
=
temp
;
cse_jumps_altered
|=
old_cse_jumps_altered
;
cse_jumps_altered
|=
old_cse_jumps_altered
;
...
@@ -7116,9 +7220,14 @@ cse_basic_block (from, to, next_branch, around_loop)
...
@@ -7116,9 +7220,14 @@ cse_basic_block (from, to, next_branch, around_loop)
to be taken, do so. */
to be taken, do so. */
if
(
next_branch
->
branch
==
insn
)
if
(
next_branch
->
branch
==
insn
)
{
{
if
(
next_branch
++->
status
==
TAKEN
)
enum
taken
status
=
next_branch
++->
status
;
if
(
status
!=
NOT_TAKEN
)
{
{
record_jump_equiv
(
insn
,
1
);
if
(
status
==
TAKEN
)
record_jump_equiv
(
insn
,
1
);
else
invalidate_skipped_block
(
NEXT_INSN
(
insn
));
/* Set the last insn as the jump insn; it doesn't affect cc0.
/* Set the last insn as the jump insn; it doesn't affect cc0.
Then follow this branch. */
Then follow this branch. */
#ifdef HAVE_cc0
#ifdef HAVE_cc0
...
@@ -7200,7 +7309,7 @@ cse_basic_block (from, to, next_branch, around_loop)
...
@@ -7200,7 +7309,7 @@ cse_basic_block (from, to, next_branch, around_loop)
to_usage
=
0
;
to_usage
=
0
;
val
.
path_size
=
0
;
val
.
path_size
=
0
;
cse_end_of_basic_block
(
insn
,
&
val
,
0
,
0
);
cse_end_of_basic_block
(
insn
,
&
val
,
0
,
0
,
0
);
/* If the tables we allocated have enough space left
/* If the tables we allocated have enough space left
to handle all the SETs in the next basic block,
to handle all the SETs in the next basic block,
...
@@ -7230,7 +7339,8 @@ cse_basic_block (from, to, next_branch, around_loop)
...
@@ -7230,7 +7339,8 @@ cse_basic_block (from, to, next_branch, around_loop)
we can cse into the loop. Don't do this if we changed the jump
we can cse into the loop. Don't do this if we changed the jump
structure of a loop unless we aren't going to be following jumps. */
structure of a loop unless we aren't going to be following jumps. */
if
((
cse_jumps_altered
==
0
||
flag_cse_follow_jumps
==
0
)
if
((
cse_jumps_altered
==
0
||
(
flag_cse_follow_jumps
==
0
&&
flag_cse_skip_blocks
==
0
))
&&
around_loop
&&
to
!=
0
&&
around_loop
&&
to
!=
0
&&
GET_CODE
(
to
)
==
NOTE
&&
NOTE_LINE_NUMBER
(
to
)
==
NOTE_INSN_LOOP_END
&&
GET_CODE
(
to
)
==
NOTE
&&
NOTE_LINE_NUMBER
(
to
)
==
NOTE_INSN_LOOP_END
&&
GET_CODE
(
PREV_INSN
(
to
))
==
JUMP_INSN
&&
GET_CODE
(
PREV_INSN
(
to
))
==
JUMP_INSN
...
...
gcc/flags.h
View file @
8b3686ed
...
@@ -192,6 +192,11 @@ extern int flag_unroll_all_loops;
...
@@ -192,6 +192,11 @@ extern int flag_unroll_all_loops;
extern
int
flag_cse_follow_jumps
;
extern
int
flag_cse_follow_jumps
;
/* Nonzero for -fcse-skip-blocks:
have cse follow a branch around a block. */
extern
int
flag_cse_skip_blocks
;
/* Nonzero for -fexpensive-optimizations:
/* Nonzero for -fexpensive-optimizations:
perform miscellaneous relatively-expensive optimizations. */
perform miscellaneous relatively-expensive optimizations. */
extern
int
flag_expensive_optimizations
;
extern
int
flag_expensive_optimizations
;
...
@@ -220,6 +225,13 @@ extern int flag_no_peephole;
...
@@ -220,6 +225,13 @@ extern int flag_no_peephole;
extern
int
flag_volatile
;
extern
int
flag_volatile
;
/* Nonzero allows GCC to violate some IEEE or ANSI rules regarding math
operations in the interest of optimization. For example it allows
GCC to assume arguments to sqrt are nonnegative numbers, allowing
faster code for sqrt to be generated. */
extern
int
flag_fast_math
;
/* Nonzero means make functions that look like good inline candidates
/* Nonzero means make functions that look like good inline candidates
go inline. */
go inline. */
...
...
gcc/toplev.c
View file @
8b3686ed
...
@@ -293,6 +293,10 @@ int flag_float_store = 0;
...
@@ -293,6 +293,10 @@ int flag_float_store = 0;
int
flag_cse_follow_jumps
;
int
flag_cse_follow_jumps
;
/* Nonzero for -fcse-skip-blocks:
have cse follow a branch around a block. */
int
flag_cse_skip_blocks
;
/* Nonzero for -fexpensive-optimizations:
/* Nonzero for -fexpensive-optimizations:
perform miscellaneous relatively-expensive optimizations. */
perform miscellaneous relatively-expensive optimizations. */
int
flag_expensive_optimizations
;
int
flag_expensive_optimizations
;
...
@@ -451,6 +455,7 @@ struct { char *string; int *variable; int on_value;} f_options[] =
...
@@ -451,6 +455,7 @@ struct { char *string; int *variable; int on_value;} f_options[] =
{
"defer-pop"
,
&
flag_defer_pop
,
1
},
{
"defer-pop"
,
&
flag_defer_pop
,
1
},
{
"omit-frame-pointer"
,
&
flag_omit_frame_pointer
,
1
},
{
"omit-frame-pointer"
,
&
flag_omit_frame_pointer
,
1
},
{
"cse-follow-jumps"
,
&
flag_cse_follow_jumps
,
1
},
{
"cse-follow-jumps"
,
&
flag_cse_follow_jumps
,
1
},
{
"cse-skip-blocks"
,
&
flag_cse_skip_blocks
,
1
},
{
"expensive-optimizations"
,
&
flag_expensive_optimizations
,
1
},
{
"expensive-optimizations"
,
&
flag_expensive_optimizations
,
1
},
{
"thread-jumps"
,
&
flag_thread_jumps
,
1
},
{
"thread-jumps"
,
&
flag_thread_jumps
,
1
},
{
"strength-reduce"
,
&
flag_strength_reduce
,
1
},
{
"strength-reduce"
,
&
flag_strength_reduce
,
1
},
...
@@ -2593,6 +2598,7 @@ main (argc, argv, envp)
...
@@ -2593,6 +2598,7 @@ main (argc, argv, envp)
if
(
optimize
>=
2
)
if
(
optimize
>=
2
)
{
{
flag_cse_follow_jumps
=
1
;
flag_cse_follow_jumps
=
1
;
flag_cse_skip_blocks
=
1
;
flag_expensive_optimizations
=
1
;
flag_expensive_optimizations
=
1
;
flag_strength_reduce
=
1
;
flag_strength_reduce
=
1
;
flag_rerun_cse_after_loop
=
1
;
flag_rerun_cse_after_loop
=
1
;
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment