Skip to content
Projects
Groups
Snippets
Help
This project
Loading...
Sign in / Register
Toggle navigation
R
riscv-gcc-1
Overview
Overview
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
lvzhengyang
riscv-gcc-1
Commits
bd4b6b0d
Commit
bd4b6b0d
authored
Jan 12, 2016
by
Jeff Law
Committed by
Jeff Law
Jan 12, 2016
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
* tree-ssa-threadupdate.c: Various whitespace and typo fixes.
From-SVN: r232297
parent
26b5ace7
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
43 additions
and
39 deletions
+43
-39
gcc/ChangeLog
+4
-0
gcc/tree-ssa-threadupdate.c
+39
-39
No files found.
gcc/ChangeLog
View file @
bd4b6b0d
2016-01-12 Jeff Law <law@redhat.com>
* tree-ssa-threadupdate.c: Various whitespace and typo fixes.
2016-01-12 Olivier Hainque <hainque@adacore.com>
2016-01-12 Olivier Hainque <hainque@adacore.com>
* gcc.c (spec_undefvar_allowed): New global.
* gcc.c (spec_undefvar_allowed): New global.
...
...
gcc/tree-ssa-threadupdate.c
View file @
bd4b6b0d
...
@@ -353,7 +353,7 @@ lookup_redirection_data (edge e, enum insert_option insert)
...
@@ -353,7 +353,7 @@ lookup_redirection_data (edge e, enum insert_option insert)
struct
redirection_data
*
elt
;
struct
redirection_data
*
elt
;
vec
<
jump_thread_edge
*>
*
path
=
THREAD_PATH
(
e
);
vec
<
jump_thread_edge
*>
*
path
=
THREAD_PATH
(
e
);
/* Build a hash table element so we can see if E is already
/* Build a hash table element so we can see if E is already
in the table. */
in the table. */
elt
=
XNEW
(
struct
redirection_data
);
elt
=
XNEW
(
struct
redirection_data
);
elt
->
path
=
path
;
elt
->
path
=
path
;
...
@@ -635,21 +635,21 @@ any_remaining_duplicated_blocks (vec<jump_thread_edge *> *path,
...
@@ -635,21 +635,21 @@ any_remaining_duplicated_blocks (vec<jump_thread_edge *> *path,
are not part of any jump threading path, but add profile counts along
are not part of any jump threading path, but add profile counts along
the path.
the path.
In the aboe example, after all jump threading is complete, we will
In the abo
v
e example, after all jump threading is complete, we will
end up with the following control flow:
end up with the following control flow:
A
B
C
A
B
C
|
|
|
|
|
|
Ea|
|Eb
|Ec
Ea|
|Eb
|Ec
|
|
|
|
|
|
v
v
v
v
v
v
Ja
J
Jc
Ja
J
Jc
/ \ / \Eon' / \
/ \
/ \Eon' / \
Eona/ \ ---/---\-------- \Eonc
Eona/ \ ---/---\-------- \Eonc
/ \ / / \
\
/ \ / / \
\
v v v v v
v v v v v
Sona Soff Son Sonc
Sona Soff Son Sonc
\
/\
/
\
/\
/
\___________ / \ _____/
\___________ / \ _____/
\ / \/
\ / \/
vv v
vv v
...
@@ -793,19 +793,19 @@ compute_path_counts (struct redirection_data *rd,
...
@@ -793,19 +793,19 @@ compute_path_counts (struct redirection_data *rd,
coming into the path that will contribute to the count flowing
coming into the path that will contribute to the count flowing
into the path successor. */
into the path successor. */
if
(
has_joiner
&&
epath
!=
elast
)
if
(
has_joiner
&&
epath
!=
elast
)
{
{
/* Look for other incoming edges after joiner. */
/* Look for other incoming edges after joiner. */
FOR_EACH_EDGE
(
ein
,
ei
,
epath
->
dest
->
preds
)
FOR_EACH_EDGE
(
ein
,
ei
,
epath
->
dest
->
preds
)
{
{
if
(
ein
!=
epath
if
(
ein
!=
epath
/* Ignore in edges from blocks we have duplicated for a
/* Ignore in edges from blocks we have duplicated for a
threading path, which have duplicated edge counts until
threading path, which have duplicated edge counts until
they are redirected by an invocation of this routine. */
they are redirected by an invocation of this routine. */
&&
!
bitmap_bit_p
(
local_info
->
duplicate_blocks
,
&&
!
bitmap_bit_p
(
local_info
->
duplicate_blocks
,
ein
->
src
->
index
))
ein
->
src
->
index
))
nonpath_count
+=
ein
->
count
;
nonpath_count
+=
ein
->
count
;
}
}
}
}
if
(
cur_count
<
path_out_count
)
if
(
cur_count
<
path_out_count
)
path_out_count
=
cur_count
;
path_out_count
=
cur_count
;
if
(
epath
->
count
<
min_path_count
)
if
(
epath
->
count
<
min_path_count
)
...
@@ -827,14 +827,14 @@ compute_path_counts (struct redirection_data *rd,
...
@@ -827,14 +827,14 @@ compute_path_counts (struct redirection_data *rd,
difference between elast->count and nonpath_count. Otherwise the edge
difference between elast->count and nonpath_count. Otherwise the edge
counts after threading will not be sane. */
counts after threading will not be sane. */
if
(
has_joiner
&&
path_out_count
<
elast
->
count
-
nonpath_count
)
if
(
has_joiner
&&
path_out_count
<
elast
->
count
-
nonpath_count
)
{
{
path_out_count
=
elast
->
count
-
nonpath_count
;
path_out_count
=
elast
->
count
-
nonpath_count
;
/* But neither can we go above the minimum count along the path
/* But neither can we go above the minimum count along the path
we are duplicating. This can be an issue due to profile
we are duplicating. This can be an issue due to profile
insanities coming in to this pass. */
insanities coming in to this pass. */
if
(
path_out_count
>
min_path_count
)
if
(
path_out_count
>
min_path_count
)
path_out_count
=
min_path_count
;
path_out_count
=
min_path_count
;
}
}
*
path_in_count_ptr
=
path_in_count
;
*
path_in_count_ptr
=
path_in_count
;
*
path_out_count_ptr
=
path_out_count
;
*
path_out_count_ptr
=
path_out_count
;
...
@@ -1268,17 +1268,17 @@ ssa_fix_duplicate_block_edges (struct redirection_data *rd,
...
@@ -1268,17 +1268,17 @@ ssa_fix_duplicate_block_edges (struct redirection_data *rd,
thread path (path_in_freq). If we had a joiner, it would have
thread path (path_in_freq). If we had a joiner, it would have
been updated at the end of that handling to the edge frequency
been updated at the end of that handling to the edge frequency
along the duplicated joiner path edge. */
along the duplicated joiner path edge. */
update_profile
(
epath
,
NULL
,
path_out_count
,
path_out_count
,
update_profile
(
epath
,
NULL
,
path_out_count
,
path_out_count
,
cur_path_freq
);
cur_path_freq
);
}
}
/* Increment the index into the duplicated path when we processed
/* Increment the index into the duplicated path when we processed
a duplicated block. */
a duplicated block. */
if
((
*
path
)[
i
]
->
type
==
EDGE_COPY_SRC_JOINER_BLOCK
if
((
*
path
)[
i
]
->
type
==
EDGE_COPY_SRC_JOINER_BLOCK
||
(
*
path
)[
i
]
->
type
==
EDGE_COPY_SRC_BLOCK
)
||
(
*
path
)[
i
]
->
type
==
EDGE_COPY_SRC_BLOCK
)
{
{
count
++
;
count
++
;
}
}
}
}
/* Now walk orig blocks and update their probabilities, since the
/* Now walk orig blocks and update their probabilities, since the
...
@@ -2383,7 +2383,7 @@ valid_jump_thread_path (vec<jump_thread_edge *> *path)
...
@@ -2383,7 +2383,7 @@ valid_jump_thread_path (vec<jump_thread_edge *> *path)
struct
loop
*
loop
=
e
->
dest
->
loop_father
;
struct
loop
*
loop
=
e
->
dest
->
loop_father
;
if
(
e
->
dest
!=
(
*
path
)[
j
+
1
]
->
e
->
src
)
if
(
e
->
dest
!=
(
*
path
)[
j
+
1
]
->
e
->
src
)
return
false
;
return
false
;
/* If we're threading through the loop latch back into the
/* If we're threading through the loop latch back into the
same loop and the destination does not dominate the loop
same loop and the destination does not dominate the loop
...
@@ -2705,7 +2705,7 @@ register_jump_thread (vec<jump_thread_edge *> *path)
...
@@ -2705,7 +2705,7 @@ register_jump_thread (vec<jump_thread_edge *> *path)
for
(
unsigned
int
i
=
0
;
i
<
path
->
length
();
i
++
)
for
(
unsigned
int
i
=
0
;
i
<
path
->
length
();
i
++
)
{
{
if
((
*
path
)[
i
]
->
e
==
NULL
)
if
((
*
path
)[
i
]
->
e
==
NULL
)
{
{
if
(
dump_file
&&
(
dump_flags
&
TDF_DETAILS
))
if
(
dump_file
&&
(
dump_flags
&
TDF_DETAILS
))
{
{
fprintf
(
dump_file
,
fprintf
(
dump_file
,
...
@@ -2715,7 +2715,7 @@ register_jump_thread (vec<jump_thread_edge *> *path)
...
@@ -2715,7 +2715,7 @@ register_jump_thread (vec<jump_thread_edge *> *path)
delete_jump_thread_path
(
path
);
delete_jump_thread_path
(
path
);
return
;
return
;
}
}
/* Only the FSM threader is allowed to thread across
/* Only the FSM threader is allowed to thread across
backedges in the CFG. */
backedges in the CFG. */
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment