Skip to content
Projects
Groups
Snippets
Help
This project
Loading...
Sign in / Register
Toggle navigation
R
riscv-gcc-1
Overview
Overview
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
lvzhengyang
riscv-gcc-1
Commits
242d8792
Commit
242d8792
authored
Jan 26, 2004
by
Richard Henderson
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Revert accidental commit.
From-SVN: r76675
parent
6d4b0a5d
Hide whitespace changes
Inline
Side-by-side
Showing
3 changed files
with
30 additions
and
158 deletions
+30
-158
gcc/ggc-common.c
+1
-7
gcc/ggc-page.c
+29
-149
gcc/ggc.h
+0
-2
No files found.
gcc/ggc-common.c
View file @
242d8792
...
...
@@ -147,11 +147,6 @@ ggc_realloc (void *x, size_t size)
return
ggc_alloc
(
size
);
old_size
=
ggc_get_size
(
x
);
#ifndef ENABLE_GC_ALWAYS_COLLECT
/* In completely-anal-checking mode, never re-use memory. This maximizes
the chance of catching the user retaining a pointer to the old block.
Otherwise, we get to consume the power-of-two overhead we had before. */
if
(
size
<=
old_size
)
{
/* Mark the unwanted memory as unaccessible. We also need to make
...
...
@@ -169,7 +164,6 @@ ggc_realloc (void *x, size_t size)
VALGRIND_DISCARD
(
VALGRIND_MAKE_READABLE
(
x
,
size
));
return
x
;
}
#endif
r
=
ggc_alloc
(
size
);
...
...
@@ -182,7 +176,7 @@ ggc_realloc (void *x, size_t size)
memcpy
(
r
,
x
,
old_size
);
/* The old object is not supposed to be used anymore. */
ggc_free
(
x
);
VALGRIND_DISCARD
(
VALGRIND_MAKE_NOACCESS
(
x
,
old_size
)
);
return
r
;
}
...
...
gcc/ggc-page.c
View file @
242d8792
...
...
@@ -401,17 +401,6 @@ static struct globals
zero otherwise. We allocate them all together, to enable a
better runtime data access pattern. */
unsigned
long
**
save_in_use
;
#ifdef ENABLE_GC_ALWAYS_COLLECT
/* List of free objects to be verified as actually free on the
next collection. */
struct
free_object
{
void
*
object
;
struct
free_object
*
next
;
}
*
free_object_list
;
#endif
#ifdef GATHER_STATISTICS
struct
{
...
...
@@ -905,7 +894,7 @@ adjust_depth (void)
/* For a page that is no longer needed, put it on the free page list. */
static
void
static
inline
void
free_page
(
page_entry
*
entry
)
{
if
(
GGC_DEBUG_LEVEL
>=
2
)
...
...
@@ -1060,19 +1049,16 @@ ggc_alloc_zone (size_t size, struct alloc_zone *zone ATTRIBUTE_UNUSED)
void
*
ggc_alloc
(
size_t
size
)
{
size_t
order
,
word
,
bit
,
object_offset
,
object_size
;
unsigned
order
,
word
,
bit
,
object_offset
;
struct
page_entry
*
entry
;
void
*
result
;
if
(
size
<=
256
)
{
order
=
size_lookup
[
size
];
object_size
=
OBJECT_SIZE
(
order
);
}
order
=
size_lookup
[
size
];
else
{
order
=
9
;
while
(
size
>
(
object_size
=
OBJECT_SIZE
(
order
)
))
while
(
size
>
OBJECT_SIZE
(
order
))
order
++
;
}
...
...
@@ -1135,7 +1121,7 @@ ggc_alloc (size_t size)
/* Next time, try the next bit. */
entry
->
next_bit_hint
=
hint
+
1
;
object_offset
=
hint
*
object_size
;
object_offset
=
hint
*
OBJECT_SIZE
(
order
)
;
}
/* Set the in-use bit. */
...
...
@@ -1163,16 +1149,16 @@ ggc_alloc (size_t size)
exact same semantics in presence of memory bugs, regardless of
ENABLE_VALGRIND_CHECKING. We override this request below. Drop the
handle to avoid handle leak. */
VALGRIND_DISCARD
(
VALGRIND_MAKE_WRITABLE
(
result
,
object_size
));
VALGRIND_DISCARD
(
VALGRIND_MAKE_WRITABLE
(
result
,
OBJECT_SIZE
(
order
)
));
/* `Poison' the entire allocated object, including any padding at
the end. */
memset
(
result
,
0xaf
,
object_size
);
memset
(
result
,
0xaf
,
OBJECT_SIZE
(
order
)
);
/* Make the bytes after the end of the object unaccessible. Discard the
handle to avoid handle leak. */
VALGRIND_DISCARD
(
VALGRIND_MAKE_NOACCESS
((
char
*
)
result
+
size
,
object_size
-
size
));
OBJECT_SIZE
(
order
)
-
size
));
#endif
/* Tell Valgrind that the memory is there, but its content isn't
...
...
@@ -1182,39 +1168,37 @@ ggc_alloc (size_t size)
/* Keep track of how many bytes are being allocated. This
information is used in deciding when to collect. */
G
.
allocated
+=
object_size
;
G
.
allocated
+=
OBJECT_SIZE
(
order
)
;
#ifdef GATHER_STATISTICS
{
size_t
overhead
=
object_size
-
size
;
G
.
stats
.
total_overhead
+=
OBJECT_SIZE
(
order
)
-
size
;
G
.
stats
.
total_allocated
+=
OBJECT_SIZE
(
order
);
G
.
stats
.
total_overhead_per_order
[
order
]
+=
OBJECT_SIZE
(
order
)
-
size
;
G
.
stats
.
total_allocated_per_order
[
order
]
+=
OBJECT_SIZE
(
order
);
if
(
size
<=
32
){
G
.
stats
.
total_overhead_under32
+=
OBJECT_SIZE
(
order
)
-
size
;
G
.
stats
.
total_allocated_under32
+=
OBJECT_SIZE
(
order
);
}
G
.
stats
.
total_overhead
+=
overhead
;
G
.
stats
.
total_allocated
+=
object_size
;
G
.
stats
.
total_overhead_per_order
[
order
]
+=
overhead
;
G
.
stats
.
total_allocated_per_order
[
order
]
+=
object_size
;
if
(
size
<=
64
){
G
.
stats
.
total_overhead_under64
+=
OBJECT_SIZE
(
order
)
-
size
;
G
.
stats
.
total_allocated_under64
+=
OBJECT_SIZE
(
order
);
}
if
(
size
<=
128
){
G
.
stats
.
total_overhead_under128
+=
OBJECT_SIZE
(
order
)
-
size
;
G
.
stats
.
total_allocated_under128
+=
OBJECT_SIZE
(
order
);
}
if
(
size
<=
32
)
{
G
.
stats
.
total_overhead_under32
+=
overhead
;
G
.
stats
.
total_allocated_under32
+=
object_size
;
}
if
(
size
<=
64
)
{
G
.
stats
.
total_overhead_under64
+=
overhead
;
G
.
stats
.
total_allocated_under64
+=
object_size
;
}
if
(
size
<=
128
)
{
G
.
stats
.
total_overhead_under128
+=
overhead
;
G
.
stats
.
total_allocated_under128
+=
object_size
;
}
}
#endif
if
(
GGC_DEBUG_LEVEL
>=
3
)
fprintf
(
G
.
debug_file
,
"Allocating object, requested size=%lu, actual=%lu at %p on %p
\n
"
,
(
unsigned
long
)
size
,
(
unsigned
long
)
object_size
,
result
,
(
unsigned
long
)
size
,
(
unsigned
long
)
OBJECT_SIZE
(
order
)
,
result
,
(
void
*
)
entry
);
return
result
;
...
...
@@ -1295,78 +1279,6 @@ ggc_get_size (const void *p)
page_entry
*
pe
=
lookup_page_table_entry
(
p
);
return
OBJECT_SIZE
(
pe
->
order
);
}
/* Release the memory for object P. */
void
ggc_free
(
void
*
p
)
{
page_entry
*
pe
=
lookup_page_table_entry
(
p
);
size_t
order
=
pe
->
order
;
size_t
size
=
OBJECT_SIZE
(
order
);
if
(
GGC_DEBUG_LEVEL
>=
3
)
fprintf
(
G
.
debug_file
,
"Freeing object, actual size=%lu, at %p on %p
\n
"
,
(
unsigned
long
)
size
,
p
,
(
void
*
)
pe
);
#ifdef ENABLE_GC_CHECKING
/* Poison the data, to indicate the data is garbage. */
VALGRIND_DISCARD
(
VALGRIND_MAKE_WRITABLE
(
p
,
size
));
memset
(
p
,
0xa5
,
size
);
#endif
/* Let valgrind know the object is free. */
VALGRIND_DISCARD
(
VALGRIND_MAKE_NOACCESS
(
p
,
size
));
#ifdef ENABLE_GC_ALWAYS_COLLECT
/* In the completely-anal-checking mode, we do *not* immediately free
the data, but instead verify that the data is *actually* not
reachable the next time we collect. */
{
struct
free_object
*
fo
=
xmalloc
(
sizeof
(
struct
free_object
));
fo
->
object
=
p
;
fo
->
next
=
G
.
free_object_list
;
G
.
free_object_list
=
fo
;
}
#else
{
unsigned
int
bit_offset
,
word
,
bit
;
G
.
allocated
-=
size
;
/* Mark the object not-in-use. */
bit_offset
=
OFFSET_TO_BIT
(((
const
char
*
)
p
)
-
pe
->
page
,
order
);
word
=
bit_offset
/
HOST_BITS_PER_LONG
;
bit
=
bit_offset
%
HOST_BITS_PER_LONG
;
pe
->
in_use_p
[
word
]
&=
~
(
1UL
<<
bit
);
if
(
pe
->
num_free_objects
++
==
0
)
{
/* If the page is completely full, then it's supposed to
be after all pages that aren't. Since we've freed one
object from a page that was full, we need to move the
page to the head of the list. */
page_entry
*
p
,
*
q
;
for
(
q
=
NULL
,
p
=
G
.
pages
[
order
];
;
q
=
p
,
p
=
p
->
next
)
if
(
p
==
pe
)
break
;
if
(
q
&&
q
->
num_free_objects
==
0
)
{
p
=
pe
->
next
;
q
->
next
=
p
;
if
(
!
p
)
G
.
page_tails
[
order
]
=
q
;
pe
->
next
=
G
.
pages
[
order
];
G
.
pages
[
order
]
=
pe
;
}
/* Reset the hint bit to point to the only free object. */
pe
->
next_bit_hint
=
bit_offset
;
}
}
#endif
}
/* Subroutine of init_ggc which computes the pair of numbers used to
perform division by OBJECT_SIZE (order) and fills in inverse_table[].
...
...
@@ -1876,8 +1788,6 @@ ggc_collect (void)
timevar_push
(
TV_GC
);
if
(
!
quiet_flag
)
fprintf
(
stderr
,
" {GC %luk -> "
,
(
unsigned
long
)
G
.
allocated
/
1024
);
if
(
GGC_DEBUG_LEVEL
>=
2
)
fprintf
(
G
.
debug_file
,
"BEGIN COLLECTING
\n
"
);
/* Zero the total allocated bytes. This will be recalculated in the
sweep phase. */
...
...
@@ -1899,42 +1809,12 @@ ggc_collect (void)
sweep_pages
();
#ifdef ENABLE_GC_ALWAYS_COLLECT
/* Validate that the reportedly free objects actually are. */
{
struct
free_object
*
f
,
*
n
;
for
(
f
=
G
.
free_object_list
;
f
;
f
=
n
)
{
page_entry
*
pe
=
lookup_page_table_entry
(
f
->
object
);
/* If the page entry is null, that means the entire page is free.
Otherwise, we have to examine the in-use bit for the object. */
if
(
pe
!=
NULL
)
{
size_t
bit
,
word
;
bit
=
OFFSET_TO_BIT
((
char
*
)
f
->
object
-
pe
->
page
,
pe
->
order
);
word
=
bit
/
HOST_BITS_PER_LONG
;
bit
=
bit
%
HOST_BITS_PER_LONG
;
if
(
pe
->
in_use_p
[
word
]
&
(
1UL
<<
bit
))
abort
();
}
n
=
f
->
next
;
free
(
f
);
}
G
.
free_object_list
=
NULL
;
}
#endif
G
.
allocated_last_gc
=
G
.
allocated
;
timevar_pop
(
TV_GC
);
if
(
!
quiet_flag
)
fprintf
(
stderr
,
"%luk}"
,
(
unsigned
long
)
G
.
allocated
/
1024
);
if
(
GGC_DEBUG_LEVEL
>=
2
)
fprintf
(
G
.
debug_file
,
"END COLLECTING
\n
"
);
}
/* Print allocation statistics. */
...
...
gcc/ggc.h
View file @
242d8792
...
...
@@ -223,8 +223,6 @@ extern void *ggc_alloc_cleared_zone (size_t, struct alloc_zone *);
extern
void
*
ggc_realloc
(
void
*
,
size_t
);
/* Like ggc_alloc_cleared, but performs a multiplication. */
extern
void
*
ggc_calloc
(
size_t
,
size_t
);
/* Free a block. To be used when known for certain it's not reachable. */
extern
void
ggc_free
(
void
*
);
#define ggc_alloc_rtx(CODE) \
((rtx) ggc_alloc_typed (gt_ggc_e_7rtx_def, RTX_SIZE (CODE)))
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment