Commit 08cee789 by Daniel Jacobowitz Committed by Daniel Jacobowitz

ggc-zone.c: Rewritten.

	* ggc-zone.c: Rewritten.

	* alloc-pool.c (dump_alloc_pool_statistics): Don't print statistics
	if there are none.
	* configure.ac: Define GGC_ZONE for --with-gc=zone.
	* config.in: Regenerated.
	* configure: Regenerated.
	* gentype.c (write_types_process_field, write_func_for_structure):
	Pass new argument to gt_pch_note_object.
	* ggc-common.c (struct ptr_data): Add TYPE.
	(gt_pch_note_object): Take TYPE argument and save it.
	(call_count): Update call to ggc_pch_count_object.
	(call_alloc): Update call to ggc_pch_alloc_object.
	(gt_pch_save): Call ggc_pch_prepare_write after paddng the PCH
	file.
	* ggc-none.c (rtl_zone, garbage_zone, ggc_alloc_zone_stat): Delete.
	* ggc-page.c (rtl_zone, tree_zone, garbage_zone)
	(ggc_alloc_zone_stat): Delete.
	(ggc_pch_count_object, ggc_pch_alloc_object): Add TYPE argument.
	* ggc.h (gt_pch_note_object, ggc_pch_count_object)
	(ggc_pch_alloc_object): Update prototypes.
	(garbage_zone): Delete.
	(tree_zone, rtl_zone, ggc_alloc_zone_stat, ggc_alloc_zone): Move to
	GGC_ZONE conditional.  Update.  Change tree_zone and rtl_zone into
	pointers.
	(tree_id_zone): New variable.
	(ggc_alloc_cleared_zone): Remove unused.
	(ggc_alloc_zone): Define.
	(ggc_alloc_rtvec, ggc_alloc_tree): Update to use ggc_alloc_zone.
	* rtl.c (rtx_alloc_stat, shallow_copy_rtx_stat): Use
	ggc_alloc_zone_stat.
	* stringpool.c (gt_pch_n_S): Update call to gt_pch_note_object.
	* tree.c (copy_node_stat, make_tree_binfo_stat, make_tree_vec_stat)
	(tree_cons_stat, build1_stat): Update call to ggc_alloc_zone_stat.
	(make_node_stat): Likewise.  Use tree_id_zone.

From-SVN: r96381
parent 69229b81
2005-03-13 Daniel Jacobowitz <dan@codesourcery.com>
* ggc-zone.c: Rewritten.
* alloc-pool.c (dump_alloc_pool_statistics): Don't print statistics
if there are none.
* configure.ac: Define GGC_ZONE for --with-gc=zone.
* config.in: Regenerated.
* configure: Regenerated.
* gentype.c (write_types_process_field, write_func_for_structure):
Pass new argument to gt_pch_note_object.
* ggc-common.c (struct ptr_data): Add TYPE.
(gt_pch_note_object): Take TYPE argument and save it.
(call_count): Update call to ggc_pch_count_object.
(call_alloc): Update call to ggc_pch_alloc_object.
(gt_pch_save): Call ggc_pch_prepare_write after paddng the PCH
file.
* ggc-none.c (rtl_zone, garbage_zone, ggc_alloc_zone_stat): Delete.
* ggc-page.c (rtl_zone, tree_zone, garbage_zone)
(ggc_alloc_zone_stat): Delete.
(ggc_pch_count_object, ggc_pch_alloc_object): Add TYPE argument.
* ggc.h (gt_pch_note_object, ggc_pch_count_object)
(ggc_pch_alloc_object): Update prototypes.
(garbage_zone): Delete.
(tree_zone, rtl_zone, ggc_alloc_zone_stat, ggc_alloc_zone): Move to
GGC_ZONE conditional. Update. Change tree_zone and rtl_zone into
pointers.
(tree_id_zone): New variable.
(ggc_alloc_cleared_zone): Remove unused.
(ggc_alloc_zone): Define.
(ggc_alloc_rtvec, ggc_alloc_tree): Update to use ggc_alloc_zone.
* rtl.c (rtx_alloc_stat, shallow_copy_rtx_stat): Use
ggc_alloc_zone_stat.
* stringpool.c (gt_pch_n_S): Update call to gt_pch_note_object.
* tree.c (copy_node_stat, make_tree_binfo_stat, make_tree_vec_stat)
(tree_cons_stat, build1_stat): Update call to ggc_alloc_zone_stat.
(make_node_stat): Likewise. Use tree_id_zone.
2005-03-13 Daniel Jacobowitz <dan@codesourcery.com>
Joseph S. Myers <joseph@codesourcery.com>
* configure.ac: Check for MIPS TLS.
......
......@@ -333,6 +333,9 @@ void dump_alloc_pool_statistics (void)
#ifdef GATHER_STATISTICS
struct output_info info;
if (!alloc_pool_hash)
return;
fprintf (stderr, "\nAlloc-pool Kind Pools Allocated Peak Leak\n");
fprintf (stderr, "-------------------------------------------------------------\n");
info.count = 0;
......
......@@ -79,6 +79,9 @@
this is either `int' or `gid_t'. */
#undef GETGROUPS_T
/* Define if the zone collector is in use */
#undef GGC_ZONE
/* Define to 1 if you have the `alphasort' function. */
#undef HAVE_ALPHASORT
......
......@@ -14959,9 +14959,17 @@ fi
if test "${with_gc+set}" = set; then
withval="$with_gc"
case "$withval" in
page | zone)
page)
GGC=ggc-$withval
;;
zone)
GGC=ggc-$withval
cat >>confdefs.h <<\_ACEOF
#define GGC_ZONE 1
_ACEOF
;;
*)
{ { echo "$as_me:$LINENO: error: $withval is an invalid option to --with-gc" >&5
echo "$as_me: error: $withval is an invalid option to --with-gc" >&2;}
......
......@@ -3027,9 +3027,13 @@ AC_ARG_WITH(gc,
[ --with-gc={page,zone} choose the garbage collection mechanism to use
with the compiler],
[case "$withval" in
page | zone)
page)
GGC=ggc-$withval
;;
zone)
GGC=ggc-$withval
AC_DEFINE(GGC_ZONE, 1, [Define if the zone collector is in use])
;;
*)
AC_MSG_ERROR([$withval is an invalid option to --with-gc])
;;
......
......@@ -1928,6 +1928,21 @@ write_types_process_field (type_p f, const struct walk_type_data *d)
}
else
oprintf (d->of, ", gt_%sa_%s", wtd->param_prefix, d->prev_val[0]);
if (f->u.p->kind == TYPE_PARAM_STRUCT
&& f->u.p->u.s.line.file != NULL)
{
oprintf (d->of, ", gt_e_");
output_mangled_typename (d->of, f);
}
else if (UNION_OR_STRUCT_P (f)
&& f->u.p->u.s.line.file != NULL)
{
oprintf (d->of, ", gt_ggc_e_");
output_mangled_typename (d->of, f);
}
else
oprintf (d->of, ", gt_types_enum_last");
}
oprintf (d->of, ");\n");
if (d->reorder_fn && wtd->reorder_note_routine)
......@@ -2035,6 +2050,21 @@ write_func_for_structure (type_p orig_s, type_p s, type_p *param,
{
oprintf (d.of, ", x, gt_%s_", wtd->param_prefix);
output_mangled_typename (d.of, orig_s);
if (orig_s->u.p->kind == TYPE_PARAM_STRUCT
&& orig_s->u.p->u.s.line.file != NULL)
{
oprintf (d.of, ", gt_e_");
output_mangled_typename (d.of, orig_s);
}
else if (UNION_OR_STRUCT_P (orig_s)
&& orig_s->u.s.line.file != NULL)
{
oprintf (d.of, ", gt_ggc_e_");
output_mangled_typename (d.of, orig_s);
}
else
oprintf (d.of, ", gt_types_enum_last");
}
oprintf (d.of, "))\n");
}
......@@ -2045,6 +2075,21 @@ write_func_for_structure (type_p orig_s, type_p s, type_p *param,
{
oprintf (d.of, ", xlimit, gt_%s_", wtd->param_prefix);
output_mangled_typename (d.of, orig_s);
if (orig_s->u.p->kind == TYPE_PARAM_STRUCT
&& orig_s->u.p->u.s.line.file != NULL)
{
oprintf (d.of, ", gt_e_");
output_mangled_typename (d.of, orig_s);
}
else if (UNION_OR_STRUCT_P (orig_s)
&& orig_s->u.s.line.file != NULL)
{
oprintf (d.of, ", gt_ggc_e_");
output_mangled_typename (d.of, orig_s);
}
else
oprintf (d.of, ", gt_types_enum_last");
}
oprintf (d.of, "))\n");
oprintf (d.of, " xlimit = (");
......@@ -2070,6 +2115,21 @@ write_func_for_structure (type_p orig_s, type_p s, type_p *param,
{
oprintf (d.of, ", xprev, gt_%s_", wtd->param_prefix);
output_mangled_typename (d.of, orig_s);
if (orig_s->u.p->kind == TYPE_PARAM_STRUCT
&& orig_s->u.p->u.s.line.file != NULL)
{
oprintf (d.of, ", gt_e_");
output_mangled_typename (d.of, orig_s);
}
else if (UNION_OR_STRUCT_P (orig_s)
&& orig_s->u.s.line.file != NULL)
{
oprintf (d.of, ", gt_ggc_e_");
output_mangled_typename (d.of, orig_s);
}
else
oprintf (d.of, ", gt_types_enum_last");
}
oprintf (d.of, ");\n");
oprintf (d.of, " }\n");
......
......@@ -244,6 +244,7 @@ struct ptr_data
gt_handle_reorder reorder_fn;
size_t size;
void *new_addr;
enum gt_types_enum type;
};
#define POINTER_HASH(x) (hashval_t)((long)x >> 3)
......@@ -252,7 +253,8 @@ struct ptr_data
int
gt_pch_note_object (void *obj, void *note_ptr_cookie,
gt_note_pointers note_ptr_fn)
gt_note_pointers note_ptr_fn,
enum gt_types_enum type)
{
struct ptr_data **slot;
......@@ -277,6 +279,7 @@ gt_pch_note_object (void *obj, void *note_ptr_cookie,
(*slot)->size = strlen (obj) + 1;
else
(*slot)->size = ggc_get_size (obj);
(*slot)->type = type;
return 1;
}
......@@ -330,7 +333,9 @@ call_count (void **slot, void *state_p)
struct ptr_data *d = (struct ptr_data *)*slot;
struct traversal_state *state = (struct traversal_state *)state_p;
ggc_pch_count_object (state->d, d->obj, d->size, d->note_ptr_fn == gt_pch_p_S);
ggc_pch_count_object (state->d, d->obj, d->size,
d->note_ptr_fn == gt_pch_p_S,
d->type);
state->count++;
return 1;
}
......@@ -341,7 +346,9 @@ call_alloc (void **slot, void *state_p)
struct ptr_data *d = (struct ptr_data *)*slot;
struct traversal_state *state = (struct traversal_state *)state_p;
d->new_addr = ggc_pch_alloc_object (state->d, d->obj, d->size, d->note_ptr_fn == gt_pch_p_S);
d->new_addr = ggc_pch_alloc_object (state->d, d->obj, d->size,
d->note_ptr_fn == gt_pch_p_S,
d->type);
state->ptrs[state->ptrs_i++] = d;
return 1;
}
......@@ -476,8 +483,6 @@ gt_pch_save (FILE *f)
write_pch_globals (gt_ggc_rtab, &state);
write_pch_globals (gt_pch_cache_rtab, &state);
ggc_pch_prepare_write (state.d, state.f);
/* Pad the PCH file so that the mmapped area starts on an allocation
granularity (usually page) boundary. */
{
......@@ -496,6 +501,8 @@ gt_pch_save (FILE *f)
&& fseek (state.f, mmi.offset, SEEK_SET) != 0)
fatal_error ("can't write padding to PCH file: %m");
ggc_pch_prepare_write (state.d, state.f);
/* Actually write out the objects. */
for (i = 0; i < state.count; i++)
{
......
......@@ -33,9 +33,6 @@
#include "coretypes.h"
#include "ggc.h"
struct alloc_zone *rtl_zone = NULL;
struct alloc_zone *garbage_zone = NULL;
void *
ggc_alloc_typed_stat (enum gt_types_enum ARG_UNUSED (gte), size_t size
MEM_STAT_DECL)
......@@ -50,13 +47,6 @@ ggc_alloc_stat (size_t size MEM_STAT_DECL)
}
void *
ggc_alloc_zone_stat (size_t size, struct alloc_zone * ARG_UNUSED (zone)
MEM_STAT_DECL)
{
return xmalloc (size);
}
void *
ggc_alloc_cleared_stat (size_t size MEM_STAT_DECL)
{
return xcalloc (size, 1);
......
......@@ -495,9 +495,6 @@ static void move_ptes_to_front (int, int);
void debug_print_page_list (int);
static void push_depth (unsigned int);
static void push_by_depth (page_entry *, unsigned long *);
struct alloc_zone *rtl_zone = NULL;
struct alloc_zone *tree_zone = NULL;
struct alloc_zone *garbage_zone = NULL;
/* Push an entry onto G.depth. */
......@@ -1052,15 +1049,6 @@ ggc_alloc_typed_stat (enum gt_types_enum type ATTRIBUTE_UNUSED, size_t size
return ggc_alloc_stat (size PASS_MEM_STAT);
}
/* Zone allocation function. Does nothing special in this collector. */
void *
ggc_alloc_zone_stat (size_t size, struct alloc_zone *zone ATTRIBUTE_UNUSED
MEM_STAT_DECL)
{
return ggc_alloc_stat (size PASS_MEM_STAT);
}
/* Allocate a chunk of memory of SIZE bytes. Its contents are undefined. */
void *
......@@ -2119,7 +2107,8 @@ init_ggc_pch (void)
void
ggc_pch_count_object (struct ggc_pch_data *d, void *x ATTRIBUTE_UNUSED,
size_t size, bool is_string ATTRIBUTE_UNUSED)
size_t size, bool is_string ATTRIBUTE_UNUSED,
enum gt_types_enum type ATTRIBUTE_UNUSED)
{
unsigned order;
......@@ -2162,7 +2151,8 @@ ggc_pch_this_base (struct ggc_pch_data *d, void *base)
char *
ggc_pch_alloc_object (struct ggc_pch_data *d, void *x ATTRIBUTE_UNUSED,
size_t size, bool is_string ATTRIBUTE_UNUSED)
size_t size, bool is_string ATTRIBUTE_UNUSED,
enum gt_types_enum type ATTRIBUTE_UNUSED)
{
unsigned order;
char *result;
......
/* "Bag-of-pages" zone garbage collector for the GNU compiler.
Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004
Free Software Foundation, Inc.
Contributed by Richard Henderson (rth@redhat.com) and Daniel Berlin
(dberlin@dberlin.org)
Contributed by Richard Henderson (rth@redhat.com) and Daniel Berlin
(dberlin@dberlin.org). Rewritten by Daniel Jacobowitz
<dan@codesourcery.com>.
This file is part of GCC.
......@@ -51,6 +52,7 @@ Software Foundation, 59 Temple Place - Suite 330, Boston, MA
#define VALGRIND_MALLOCLIKE_BLOCK(w,x,y,z)
#define VALGRIND_FREELIKE_BLOCK(x,y)
#endif
/* Prefer MAP_ANON(YMOUS) to /dev/zero, since we don't need to keep a
file open. Prefer either to valloc. */
#ifdef HAVE_MMAP_ANON
......@@ -64,56 +66,58 @@ Software Foundation, 59 Temple Place - Suite 330, Boston, MA
# define MAP_ANONYMOUS MAP_ANON
# endif
# define USING_MMAP
#endif
#ifdef HAVE_MMAP_DEV_ZERO
# include <sys/mman.h>
# ifndef MAP_FAILED
# define MAP_FAILED -1
# endif
# define USING_MMAP
#endif
#ifndef USING_MMAP
#error "Zone collector requires mmap"
#error Zone collector requires mmap
#endif
#if (GCC_VERSION < 3001)
#define prefetch(X) ((void) X)
#define prefetchw(X) ((void) X)
#else
#define prefetch(X) __builtin_prefetch (X)
#define prefetchw(X) __builtin_prefetch (X, 1, 3)
#endif
/* NOTES:
/* FUTURE NOTES:
If we track inter-zone pointers, we can mark single zones at a
time.
If we have a zone where we guarantee no inter-zone pointers, we
could mark that zone separately.
The garbage zone should not be marked, and we should return 1 in
ggc_set_mark for any object in the garbage zone, which cuts off
marking quickly. */
/* Stategy:
This garbage-collecting allocator segregates objects into zones.
It also segregates objects into "large" and "small" bins. Large
objects are greater or equal to page size.
Pages for small objects are broken up into chunks, each of which
are described by a struct alloc_chunk. One can walk over all
chunks on the page by adding the chunk size to the chunk's data
address. The free space for a page exists in the free chunk bins.
objects are greater than page size.
Each page-entry also has a context depth, which is used to track
pushing and popping of allocation contexts. Only objects allocated
in the current (highest-numbered) context may be collected.
Pages for small objects are broken up into chunks. The page has
a bitmap which marks the start position of each chunk (whether
allocated or free). Free chunks are on one of the zone's free
lists and contain a pointer to the next free chunk. Chunks in
most of the free lists have a fixed size determined by the
free list. Chunks in the "other" sized free list have their size
stored right after their chain pointer.
Empty pages (of all sizes) are kept on a single page cache list,
and are considered first when new pages are required; they are
deallocated at the start of the next collection if they haven't
been recycled by then. */
been recycled by then. The free page list is currently per-zone. */
/* Define GGC_DEBUG_LEVEL to print debugging information.
0: No debugging output.
......@@ -127,81 +131,88 @@ Software Foundation, 59 Temple Place - Suite 330, Boston, MA
#define HOST_BITS_PER_PTR HOST_BITS_PER_LONG
#endif
#ifdef COOKIE_CHECKING
#define CHUNK_MAGIC 0x95321123
#define DEADCHUNK_MAGIC 0x12817317
#endif
/* This structure manages small chunks. When the chunk is free, it's
linked with other chunks via free_next. When the chunk is allocated,
the data starts at u. Large chunks are allocated one at a time to
their own page, and so don't come in here.
The "type" field is a placeholder for a future change to do
generational collection. At present it is 0 when free and
and 1 when allocated. */
/* This structure manages small free chunks. The SIZE field is only
initialized if the chunk is in the "other" sized free list. Large
chunks are allocated one at a time to their own page, and so don't
come in here. */
struct alloc_chunk {
#ifdef COOKIE_CHECKING
unsigned int magic;
#endif
unsigned int type:1;
unsigned int mark:1;
unsigned char large;
unsigned short size;
/* Right now, on 32-bit hosts we don't have enough room to save the
typecode unless we make the one remaining flag into a bitfield.
There's a performance cost to that, so we don't do it until we're
ready to use the type information for something. */
union {
struct alloc_chunk *next_free;
char data[1];
unsigned int size;
};
/* Make sure the data is sufficiently aligned. */
HOST_WIDEST_INT align_i;
#ifdef HAVE_LONG_DOUBLE
long double align_d;
#else
double align_d;
/* The size of the fixed-size portion of a small page descriptor. */
#define PAGE_OVERHEAD (offsetof (struct small_page_entry, alloc_bits))
/* The collector's idea of the page size. This must be a power of two
no larger than the system page size, because pages must be aligned
to this amount and are tracked at this granularity in the page
table. We choose a size at compile time for efficiency.
We could make a better guess at compile time if PAGE_SIZE is a
constant in system headers, and PAGE_SHIFT is defined... */
#define GGC_PAGE_SIZE 4096
#define GGC_PAGE_MASK (GGC_PAGE_SIZE - 1)
#define GGC_PAGE_SHIFT 12
#if 0
/* Alternative definitions which use the runtime page size. */
#define GGC_PAGE_SIZE G.pagesize
#define GGC_PAGE_MASK G.page_mask
#define GGC_PAGE_SHIFT G.lg_pagesize
#endif
} u;
};
#define CHUNK_OVERHEAD (offsetof (struct alloc_chunk, u))
/* The size of a small page managed by the garbage collector. This
must currently be GGC_PAGE_SIZE, but with a few changes could
be any multiple of it to reduce certain kinds of overhead. */
#define SMALL_PAGE_SIZE GGC_PAGE_SIZE
/* We maintain several bins of free lists for chunks for very small
objects. We never exhaustively search other bins -- if we don't
find one of the proper size, we allocate from the "larger" bin. */
/* Free bin information. These numbers may be in need of re-tuning.
In general, decreasing the number of free bins would seem to
increase the time it takes to allocate... */
/* Decreasing the number of free bins increases the time it takes to allocate.
Similar with increasing max_free_bin_size without increasing num_free_bins.
/* FIXME: We can't use anything but MAX_ALIGNMENT for the bin size
today. */
After much histogramming of allocation sizes and time spent on gc,
on a PowerPC G4 7450 - 667 mhz, and a Pentium 4 - 2.8ghz,
these were determined to be the optimal values. */
#define NUM_FREE_BINS 64
#define MAX_FREE_BIN_SIZE (64 * sizeof (void *))
#define FREE_BIN_DELTA (MAX_FREE_BIN_SIZE / NUM_FREE_BINS)
#define SIZE_BIN_UP(SIZE) (((SIZE) + FREE_BIN_DELTA - 1) / FREE_BIN_DELTA)
#define FREE_BIN_DELTA MAX_ALIGNMENT
#define SIZE_BIN_DOWN(SIZE) ((SIZE) / FREE_BIN_DELTA)
/* Marker used as chunk->size for a large object. Should correspond
to the size of the bitfield above. */
#define LARGE_OBJECT_SIZE 0x7fff
/* Allocation and marking parameters. */
/* The smallest allocatable unit to keep track of. */
#define BYTES_PER_ALLOC_BIT MAX_ALIGNMENT
/* The smallest markable unit. If we require each allocated object
to contain at least two allocatable units, we can use half as many
bits for the mark bitmap. But this adds considerable complexity
to sweeping. */
#define BYTES_PER_MARK_BIT BYTES_PER_ALLOC_BIT
#define BYTES_PER_MARK_WORD (8 * BYTES_PER_MARK_BIT * sizeof (mark_type))
/* We use this structure to determine the alignment required for
allocations. For power-of-two sized allocations, that's not a
problem, but it does matter for odd-sized allocations. */
allocations.
There are several things wrong with this estimation of alignment.
The maximum alignment for a structure is often less than the
maximum alignment for a basic data type; for instance, on some
targets long long must be aligned to sizeof (int) in a structure
and sizeof (long long) in a variable. i386-linux is one example;
Darwin is another (sometimes, depending on the compiler in use).
Also, long double is not included. Nothing in GCC uses long
double, so we assume that this is OK. On powerpc-darwin, adding
long double would bring the maximum alignment up to 16 bytes,
and until we need long double (or to vectorize compiler operations)
that's painfully wasteful. This will need to change, some day. */
struct max_alignment {
char c;
union {
HOST_WIDEST_INT i;
#ifdef HAVE_LONG_DOUBLE
long double d;
#else
double d;
#endif
} u;
};
......@@ -209,43 +220,128 @@ struct max_alignment {
#define MAX_ALIGNMENT (offsetof (struct max_alignment, u))
/* Compute the smallest nonnegative number which when added to X gives
a multiple of F. */
#define ROUND_UP_VALUE(x, f) ((f) - 1 - ((f) - 1 + (x)) % (f))
/* Compute the smallest multiple of F that is >= X. */
#define ROUND_UP(x, f) (CEIL (x, f) * (f))
/* Types to use for the allocation and mark bitmaps. It might be
a good idea to add ffsl to libiberty and use unsigned long
instead; that could speed us up where long is wider than int. */
typedef unsigned int alloc_type;
typedef unsigned int mark_type;
#define alloc_ffs(x) ffs(x)
/* A page_entry records the status of an allocation page. */
/* A page_entry records the status of an allocation page. This is the
common data between all three kinds of pages - small, large, and
PCH. */
typedef struct page_entry
{
/* The next page-entry with objects of the same size, or NULL if
this is the last page-entry. */
struct page_entry *next;
/* The address at which the memory is allocated. */
char *page;
/* The number of bytes allocated. (This will always be a multiple
of the host system page size.) */
size_t bytes;
/* The zone that this page entry belongs to. */
struct alloc_zone *zone;
#ifdef GATHER_STATISTICS
/* How many collections we've survived. */
size_t survived;
/* The address at which the memory is allocated. */
char *page;
/* Context depth of this page. */
unsigned short context_depth;
#endif
/* Does this page contain small objects, or one large object? */
bool large_p;
/* The zone that this page entry belongs to. */
struct alloc_zone *zone;
/* Is this page part of the loaded PCH? */
bool pch_p;
} page_entry;
/* Additional data needed for small pages. */
struct small_page_entry
{
struct page_entry common;
/* The next small page entry, or NULL if this is the last. */
struct small_page_entry *next;
/* If currently marking this zone, a pointer to the mark bits
for this page. If we aren't currently marking this zone,
this pointer may be stale (pointing to freed memory). */
mark_type *mark_bits;
/* The allocation bitmap. This array extends far enough to have
one bit for every BYTES_PER_ALLOC_BIT bytes in the page. */
alloc_type alloc_bits[1];
};
/* Additional data needed for large pages. */
struct large_page_entry
{
struct page_entry common;
/* The next large page entry, or NULL if this is the last. */
struct large_page_entry *next;
/* The number of bytes allocated, not including the page entry. */
size_t bytes;
/* The previous page in the list, so that we can unlink this one. */
struct large_page_entry *prev;
/* During marking, is this object marked? */
bool mark_p;
};
/* A two-level tree is used to look up the page-entry for a given
pointer. Two chunks of the pointer's bits are extracted to index
the first and second levels of the tree, as follows:
HOST_PAGE_SIZE_BITS
32 | |
msb +----------------+----+------+------+ lsb
| | |
PAGE_L1_BITS |
| |
PAGE_L2_BITS
The bottommost HOST_PAGE_SIZE_BITS are ignored, since page-entry
pages are aligned on system page boundaries. The next most
significant PAGE_L2_BITS and PAGE_L1_BITS are the second and first
index values in the lookup table, respectively.
For 32-bit architectures and the settings below, there are no
leftover bits. For architectures with wider pointers, the lookup
tree points to a list of pages, which must be scanned to find the
correct one. */
#define PAGE_L1_BITS (8)
#define PAGE_L2_BITS (32 - PAGE_L1_BITS - GGC_PAGE_SHIFT)
#define PAGE_L1_SIZE ((size_t) 1 << PAGE_L1_BITS)
#define PAGE_L2_SIZE ((size_t) 1 << PAGE_L2_BITS)
#define LOOKUP_L1(p) \
(((size_t) (p) >> (32 - PAGE_L1_BITS)) & ((1 << PAGE_L1_BITS) - 1))
#define LOOKUP_L2(p) \
(((size_t) (p) >> GGC_PAGE_SHIFT) & ((1 << PAGE_L2_BITS) - 1))
#if HOST_BITS_PER_PTR <= 32
/* On 32-bit hosts, we use a two level page table, as pictured above. */
typedef page_entry **page_table[PAGE_L1_SIZE];
#else
/* On 64-bit hosts, we use the same two level page tables plus a linked
list that disambiguates the top 32-bits. There will almost always be
exactly one entry in the list. */
typedef struct page_table_chain
{
struct page_table_chain *next;
size_t high_bits;
page_entry **table[PAGE_L1_SIZE];
} *page_table;
#endif
/* The global variables. */
static struct globals
......@@ -253,52 +349,77 @@ static struct globals
/* The linked list of zones. */
struct alloc_zone *zones;
/* The system's page size. */
/* Lookup table for associating allocation pages with object addresses. */
page_table lookup;
/* The system's page size, and related constants. */
size_t pagesize;
size_t lg_pagesize;
size_t page_mask;
/* The size to allocate for a small page entry. This includes
the size of the structure and the size of the allocation
bitmap. */
size_t small_page_overhead;
/* A file descriptor open to /dev/zero for reading. */
#if defined (HAVE_MMAP_DEV_ZERO)
/* A file descriptor open to /dev/zero for reading. */
int dev_zero_fd;
#endif
/* Allocate pages in chunks of this size, to throttle calls to memory
allocation routines. The first page is used, the rest go onto the
free list. */
size_t quire_size;
/* The file descriptor for debugging output. */
FILE *debug_file;
} G;
/* The zone allocation structure. */
/* A zone allocation structure. There is one of these for every
distinct allocation zone. */
struct alloc_zone
{
/* Name of the zone. */
const char *name;
/* Linked list of pages in a zone. */
page_entry *pages;
/* The most recent free chunk is saved here, instead of in the linked
free list, to decrease list manipulation. It is most likely that we
will want this one. */
char *cached_free;
size_t cached_free_size;
/* Linked lists of free storage. Slots 1 ... NUM_FREE_BINS have chunks of size
FREE_BIN_DELTA. All other chunks are in slot 0. */
struct alloc_chunk *free_chunks[NUM_FREE_BINS + 1];
/* Bytes currently allocated. */
/* The highest bin index which might be non-empty. It may turn out
to be empty, in which case we have to search downwards. */
size_t high_free_bin;
/* Bytes currently allocated in this zone. */
size_t allocated;
/* Bytes currently allocated at the end of the last collection. */
size_t allocated_last_gc;
/* Linked list of the small pages in this zone. */
struct small_page_entry *pages;
/* Total amount of memory mapped. */
size_t bytes_mapped;
/* Doubly linked list of large pages in this zone. */
struct large_page_entry *large_pages;
/* Bit N set if any allocations have been done at context depth N. */
unsigned long context_depth_allocations;
/* If we are currently marking this zone, a pointer to the mark bits. */
mark_type *mark_bits;
/* Bit N set if any collections have been done at context depth N. */
unsigned long context_depth_collections;
/* Name of the zone. */
const char *name;
/* The current depth in the context stack. */
unsigned short context_depth;
/* The number of small pages currently allocated in this zone. */
size_t n_small_pages;
/* Bytes allocated at the end of the last collection. */
size_t allocated_last_gc;
/* Total amount of memory mapped. */
size_t bytes_mapped;
/* A cache of free system pages. */
page_entry *free_pages;
struct small_page_entry *free_pages;
/* Next zone in the linked list of zones. */
struct alloc_zone *next_zone;
......@@ -333,48 +454,310 @@ struct alloc_zone
#endif
} main_zone;
struct alloc_zone *rtl_zone;
struct alloc_zone *garbage_zone;
struct alloc_zone *tree_zone;
/* Some default zones. */
struct alloc_zone rtl_zone;
struct alloc_zone tree_zone;
struct alloc_zone tree_id_zone;
static int always_collect;
/* The PCH zone does not need a normal zone structure, and it does
not live on the linked list of zones. */
struct pch_zone
{
/* The start of the PCH zone. NULL if there is none. */
char *page;
/* Allocate pages in chunks of this size, to throttle calls to memory
allocation routines. The first page is used, the rest go onto the
free list. This cannot be larger than HOST_BITS_PER_INT for the
in_use bitmask for page_group. */
#define GGC_QUIRE_SIZE 16
/* The end of the PCH zone. NULL if there is none. */
char *end;
/* The size of the PCH zone. 0 if there is none. */
size_t bytes;
/* The allocation bitmap for the PCH zone. */
alloc_type *alloc_bits;
/* If we are currently marking, the mark bitmap for the PCH zone.
When it is first read in, we could avoid marking the PCH,
because it will not contain any pointers to GC memory outside
of the PCH; however, the PCH is currently mapped as writable,
so we must mark it in case new pointers are added. */
mark_type *mark_bits;
} pch_zone;
static int ggc_allocated_p (const void *);
#ifdef USING_MMAP
static char *alloc_anon (char *, size_t, struct alloc_zone *);
#endif
static struct page_entry * alloc_small_page ( struct alloc_zone *);
static struct page_entry * alloc_large_page (size_t, struct alloc_zone *);
static void free_chunk (struct alloc_chunk *, size_t, struct alloc_zone *);
static void free_page (struct page_entry *);
static struct small_page_entry * alloc_small_page (struct alloc_zone *);
static struct large_page_entry * alloc_large_page (size_t, struct alloc_zone *);
static void free_chunk (char *, size_t, struct alloc_zone *);
static void free_small_page (struct small_page_entry *);
static void free_large_page (struct large_page_entry *);
static void release_pages (struct alloc_zone *);
static void sweep_pages (struct alloc_zone *);
static void * ggc_alloc_zone_1 (size_t, struct alloc_zone *, short MEM_STAT_DECL);
static bool ggc_collect_1 (struct alloc_zone *, bool);
static void check_cookies (void);
static void new_ggc_zone_1 (struct alloc_zone *, const char *);
/* Traverse the page table and find the entry for a page.
Die (probably) if the object wasn't allocated via GC. */
static inline page_entry *
lookup_page_table_entry (const void *p)
{
page_entry ***base;
size_t L1, L2;
#if HOST_BITS_PER_PTR <= 32
base = &G.lookup[0];
#else
page_table table = G.lookup;
size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
while (table->high_bits != high_bits)
table = table->next;
base = &table->table[0];
#endif
/* Extract the level 1 and 2 indices. */
L1 = LOOKUP_L1 (p);
L2 = LOOKUP_L2 (p);
/* Returns nonzero if P was allocated in GC'able memory. */
return base[L1][L2];
}
/* Set the page table entry for the page that starts at P. If ENTRY
is NULL, clear the entry. */
static inline int
ggc_allocated_p (const void *p)
static void
set_page_table_entry (void *p, page_entry *entry)
{
struct alloc_chunk *chunk;
chunk = (struct alloc_chunk *) ((char *)p - CHUNK_OVERHEAD);
#ifdef COOKIE_CHECKING
gcc_assert (chunk->magic == CHUNK_MAGIC);
page_entry ***base;
size_t L1, L2;
#if HOST_BITS_PER_PTR <= 32
base = &G.lookup[0];
#else
page_table table;
size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
for (table = G.lookup; table; table = table->next)
if (table->high_bits == high_bits)
goto found;
/* Not found -- allocate a new table. */
table = xcalloc (1, sizeof(*table));
table->next = G.lookup;
table->high_bits = high_bits;
G.lookup = table;
found:
base = &table->table[0];
#endif
if (chunk->type == 1)
return true;
return false;
/* Extract the level 1 and 2 indices. */
L1 = LOOKUP_L1 (p);
L2 = LOOKUP_L2 (p);
if (base[L1] == NULL)
base[L1] = xcalloc (PAGE_L2_SIZE, sizeof (page_entry *));
base[L1][L2] = entry;
}
/* Find the page table entry associated with OBJECT. */
static inline struct page_entry *
zone_get_object_page (const void *object)
{
return lookup_page_table_entry (object);
}
/* Find which element of the alloc_bits array OBJECT should be
recorded in. */
static inline unsigned int
zone_get_object_alloc_word (const void *object)
{
return (((size_t) object & (GGC_PAGE_SIZE - 1))
/ (8 * sizeof (alloc_type) * BYTES_PER_ALLOC_BIT));
}
/* Find which bit of the appropriate word in the alloc_bits array
OBJECT should be recorded in. */
static inline unsigned int
zone_get_object_alloc_bit (const void *object)
{
return (((size_t) object / BYTES_PER_ALLOC_BIT)
% (8 * sizeof (alloc_type)));
}
/* Find which element of the mark_bits array OBJECT should be recorded
in. */
static inline unsigned int
zone_get_object_mark_word (const void *object)
{
return (((size_t) object & (GGC_PAGE_SIZE - 1))
/ (8 * sizeof (mark_type) * BYTES_PER_MARK_BIT));
}
/* Find which bit of the appropriate word in the mark_bits array
OBJECT should be recorded in. */
static inline unsigned int
zone_get_object_mark_bit (const void *object)
{
return (((size_t) object / BYTES_PER_MARK_BIT)
% (8 * sizeof (mark_type)));
}
/* Set the allocation bit corresponding to OBJECT in its page's
bitmap. Used to split this object from the preceeding one. */
static inline void
zone_set_object_alloc_bit (const void *object)
{
struct small_page_entry *page
= (struct small_page_entry *) zone_get_object_page (object);
unsigned int start_word = zone_get_object_alloc_word (object);
unsigned int start_bit = zone_get_object_alloc_bit (object);
page->alloc_bits[start_word] |= 1L << start_bit;
}
/* Clear the allocation bit corresponding to OBJECT in PAGE's
bitmap. Used to coalesce this object with the preceeding
one. */
static inline void
zone_clear_object_alloc_bit (struct small_page_entry *page,
const void *object)
{
unsigned int start_word = zone_get_object_alloc_word (object);
unsigned int start_bit = zone_get_object_alloc_bit (object);
/* Would xor be quicker? */
page->alloc_bits[start_word] &= ~(1L << start_bit);
}
/* Find the size of the object which starts at START_WORD and
START_BIT in ALLOC_BITS, which is at most MAX_SIZE bytes.
Helper function for ggc_get_size and zone_find_object_size. */
static inline size_t
zone_object_size_1 (alloc_type *alloc_bits,
size_t start_word, size_t start_bit,
size_t max_size)
{
size_t size;
alloc_type alloc_word;
int indx;
/* Load the first word. */
alloc_word = alloc_bits[start_word++];
/* If that was the last bit in this word, we'll want to continue
with the next word. Otherwise, handle the rest of this word. */
if (start_bit)
{
indx = alloc_ffs (alloc_word >> start_bit);
if (indx)
/* indx is 1-based. We started at the bit after the object's
start, but we also ended at the bit after the object's end.
It cancels out. */
return indx * BYTES_PER_ALLOC_BIT;
/* The extra 1 accounts for the starting unit, before start_bit. */
size = (sizeof (alloc_type) * 8 - start_bit + 1) * BYTES_PER_ALLOC_BIT;
if (size >= max_size)
return max_size;
alloc_word = alloc_bits[start_word++];
}
else
size = BYTES_PER_ALLOC_BIT;
while (alloc_word == 0)
{
size += sizeof (alloc_type) * 8 * BYTES_PER_ALLOC_BIT;
if (size >= max_size)
return max_size;
alloc_word = alloc_bits[start_word++];
}
indx = alloc_ffs (alloc_word);
return size + (indx - 1) * BYTES_PER_ALLOC_BIT;
}
/* Find the size of OBJECT on small page PAGE. */
static inline size_t
zone_find_object_size (struct small_page_entry *page,
const void *object)
{
const char *object_midptr = (const char *) object + BYTES_PER_ALLOC_BIT;
unsigned int start_word = zone_get_object_alloc_word (object_midptr);
unsigned int start_bit = zone_get_object_alloc_bit (object_midptr);
size_t max_size = (page->common.page + SMALL_PAGE_SIZE
- (char *) object);
return zone_object_size_1 (page->alloc_bits, start_word, start_bit,
max_size);
}
/* Allocate the mark bits for every zone, and set the pointers on each
page. */
static void
zone_allocate_marks (void)
{
struct alloc_zone *zone;
for (zone = G.zones; zone; zone = zone->next_zone)
{
struct small_page_entry *page;
mark_type *cur_marks;
size_t mark_words, mark_words_per_page;
#ifdef ENABLE_CHECKING
size_t n = 0;
#endif
mark_words_per_page
= (GGC_PAGE_SIZE + BYTES_PER_MARK_WORD - 1) / BYTES_PER_MARK_WORD;
mark_words = zone->n_small_pages * mark_words_per_page;
zone->mark_bits = (mark_type *) xcalloc (sizeof (mark_type),
mark_words);
cur_marks = zone->mark_bits;
for (page = zone->pages; page; page = page->next)
{
page->mark_bits = cur_marks;
cur_marks += mark_words_per_page;
#ifdef ENABLE_CHECKING
n++;
#endif
}
#ifdef ENABLE_CHECKING
gcc_assert (n == zone->n_small_pages);
#endif
}
/* We don't collect the PCH zone, but we do have to mark it
(for now). */
if (pch_zone.bytes)
pch_zone.mark_bits
= (mark_type *) xcalloc (sizeof (mark_type),
CEIL (pch_zone.bytes, BYTES_PER_MARK_WORD));
}
/* After marking and sweeping, release the memory used for mark bits. */
static void
zone_free_marks (void)
{
struct alloc_zone *zone;
for (zone = G.zones; zone; zone = zone->next_zone)
if (zone->mark_bits)
{
free (zone->mark_bits);
zone->mark_bits = NULL;
}
if (pch_zone.bytes)
{
free (pch_zone.mark_bits);
pch_zone.mark_bits = NULL;
}
}
#ifdef USING_MMAP
/* Allocate SIZE bytes of anonymous memory, preferably near PREF,
......@@ -392,7 +775,6 @@ alloc_anon (char *pref ATTRIBUTE_UNUSED, size_t size, struct alloc_zone *zone)
char *page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE,
MAP_PRIVATE, G.dev_zero_fd, 0);
#endif
VALGRIND_MALLOCLIKE_BLOCK(page, size, 0, 0);
if (page == (char *) MAP_FAILED)
{
......@@ -402,24 +784,23 @@ alloc_anon (char *pref ATTRIBUTE_UNUSED, size_t size, struct alloc_zone *zone)
/* Remember that we allocated this memory. */
zone->bytes_mapped += size;
/* Pretend we don't have access to the allocated pages. We'll enable
access to smaller pieces of the area in ggc_alloc. Discard the
handle to avoid handle leak. */
VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS (page, size));
return page;
}
#endif
/* Allocate a new page for allocating objects of size 2^ORDER,
and return an entry for it. */
/* Allocate a new page for allocating small objects in ZONE, and
return an entry for it. */
static inline struct page_entry *
static struct small_page_entry *
alloc_small_page (struct alloc_zone *zone)
{
struct page_entry *entry;
char *page;
page = NULL;
struct small_page_entry *entry;
/* Check the list of free pages for one we can use. */
entry = zone->free_pages;
......@@ -427,80 +808,82 @@ alloc_small_page (struct alloc_zone *zone)
{
/* Recycle the allocated memory from this page ... */
zone->free_pages = entry->next;
page = entry->page;
}
#ifdef USING_MMAP
else
{
/* We want just one page. Allocate a bunch of them and put the
extras on the freelist. (Can only do this optimization with
mmap for backing store.) */
struct page_entry *e, *f = zone->free_pages;
struct small_page_entry *e, *f = zone->free_pages;
int i;
char *page;
page = alloc_anon (NULL, G.pagesize * GGC_QUIRE_SIZE, zone);
page = alloc_anon (NULL, GGC_PAGE_SIZE * G.quire_size, zone);
/* This loop counts down so that the chain will be in ascending
memory order. */
for (i = GGC_QUIRE_SIZE - 1; i >= 1; i--)
for (i = G.quire_size - 1; i >= 1; i--)
{
e = (struct page_entry *) xmalloc (sizeof (struct page_entry));
e->bytes = G.pagesize;
e->page = page + (i << G.lg_pagesize);
e = xcalloc (1, G.small_page_overhead);
e->common.page = page + (i << GGC_PAGE_SHIFT);
e->common.zone = zone;
e->next = f;
f = e;
set_page_table_entry (e->common.page, &e->common);
}
zone->free_pages = f;
entry = xcalloc (1, G.small_page_overhead);
entry->common.page = page;
entry->common.zone = zone;
set_page_table_entry (page, &entry->common);
}
#endif
if (entry == NULL)
entry = (struct page_entry *) xmalloc (sizeof (struct page_entry));
entry->next = 0;
entry->bytes = G.pagesize;
entry->page = page;
entry->context_depth = zone->context_depth;
entry->large_p = false;
entry->zone = zone;
zone->context_depth_allocations |= (unsigned long)1 << zone->context_depth;
zone->n_small_pages++;
if (GGC_DEBUG_LEVEL >= 2)
fprintf (G.debug_file,
"Allocating %s page at %p, data %p-%p\n", entry->zone->name,
(PTR) entry, page, page + G.pagesize - 1);
"Allocating %s page at %p, data %p-%p\n",
entry->common.zone->name, (PTR) entry, entry->common.page,
entry->common.page + SMALL_PAGE_SIZE - 1);
return entry;
}
/* Compute the smallest multiple of F that is >= X. */
#define ROUND_UP(x, f) (CEIL (x, f) * (f))
/* Allocate a large page of size SIZE in ZONE. */
static inline struct page_entry *
static struct large_page_entry *
alloc_large_page (size_t size, struct alloc_zone *zone)
{
struct page_entry *entry;
struct large_page_entry *entry;
char *page;
size = ROUND_UP (size, 1024);
page = (char *) xmalloc (size + CHUNK_OVERHEAD + sizeof (struct page_entry));
entry = (struct page_entry *) (page + size + CHUNK_OVERHEAD);
size_t needed_size;
needed_size = size + sizeof (struct large_page_entry);
page = xmalloc (needed_size);
entry->next = 0;
entry = (struct large_page_entry *) page;
entry->next = NULL;
entry->common.page = page + sizeof (struct large_page_entry);
entry->common.large_p = true;
entry->common.pch_p = false;
entry->common.zone = zone;
#ifdef GATHER_STATISTICS
entry->common.survived = 0;
#endif
entry->mark_p = false;
entry->bytes = size;
entry->page = page;
entry->context_depth = zone->context_depth;
entry->large_p = true;
entry->zone = zone;
zone->context_depth_allocations |= (unsigned long)1 << zone->context_depth;
entry->prev = NULL;
set_page_table_entry (entry->common.page, &entry->common);
if (GGC_DEBUG_LEVEL >= 2)
fprintf (G.debug_file,
"Allocating %s large page at %p, data %p-%p\n", entry->zone->name,
(PTR) entry, page, page + size - 1);
"Allocating %s large page at %p, data %p-%p\n",
entry->common.zone->name, (PTR) entry, entry->common.page,
entry->common.page + SMALL_PAGE_SIZE - 1);
return entry;
}
......@@ -509,27 +892,41 @@ alloc_large_page (size_t size, struct alloc_zone *zone)
/* For a page that is no longer needed, put it on the free page list. */
static inline void
free_page (page_entry *entry)
free_small_page (struct small_page_entry *entry)
{
if (GGC_DEBUG_LEVEL >= 2)
fprintf (G.debug_file,
"Deallocating %s page at %p, data %p-%p\n", entry->zone->name, (PTR) entry,
entry->page, entry->page + entry->bytes - 1);
"Deallocating %s page at %p, data %p-%p\n",
entry->common.zone->name, (PTR) entry,
entry->common.page, entry->common.page + SMALL_PAGE_SIZE - 1);
gcc_assert (!entry->common.large_p);
if (entry->large_p)
{
free (entry->page);
VALGRIND_FREELIKE_BLOCK (entry->page, entry->bytes);
}
else
{
/* Mark the page as inaccessible. Discard the handle to
avoid handle leak. */
VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS (entry->page, entry->bytes));
VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS (entry->common.page,
SMALL_PAGE_SIZE));
entry->next = entry->zone->free_pages;
entry->zone->free_pages = entry;
}
entry->next = entry->common.zone->free_pages;
entry->common.zone->free_pages = entry;
entry->common.zone->n_small_pages--;
}
/* Release a large page that is no longer needed. */
static inline void
free_large_page (struct large_page_entry *entry)
{
if (GGC_DEBUG_LEVEL >= 2)
fprintf (G.debug_file,
"Deallocating %s page at %p, data %p-%p\n",
entry->common.zone->name, (PTR) entry,
entry->common.page, entry->common.page + SMALL_PAGE_SIZE - 1);
gcc_assert (entry->common.large_p);
set_page_table_entry (entry->common.page, NULL);
free (entry);
}
/* Release the free page cache to the system. */
......@@ -538,7 +935,7 @@ static void
release_pages (struct alloc_zone *zone)
{
#ifdef USING_MMAP
page_entry *p, *next;
struct small_page_entry *p, *next;
char *start;
size_t len;
......@@ -547,17 +944,17 @@ release_pages (struct alloc_zone *zone)
while (p)
{
start = p->page;
start = p->common.page;
next = p->next;
len = p->bytes;
free (p);
len = SMALL_PAGE_SIZE;
set_page_table_entry (p->common.page, NULL);
p = next;
while (p && p->page == start + len)
while (p && p->common.page == start + len)
{
next = p->next;
len += p->bytes;
free (p);
len += SMALL_PAGE_SIZE;
set_page_table_entry (p->common.page, NULL);
p = next;
}
......@@ -569,74 +966,157 @@ release_pages (struct alloc_zone *zone)
#endif
}
/* Place CHUNK of size SIZE on the free list for ZONE. */
/* Place the block at PTR of size SIZE on the free list for ZONE. */
static inline void
free_chunk (struct alloc_chunk *chunk, size_t size, struct alloc_zone *zone)
free_chunk (char *ptr, size_t size, struct alloc_zone *zone)
{
struct alloc_chunk *chunk = (struct alloc_chunk *) ptr;
size_t bin = 0;
bin = SIZE_BIN_DOWN (size);
gcc_assert (bin);
gcc_assert (bin != 0);
if (bin > NUM_FREE_BINS)
{
bin = 0;
#ifdef COOKIE_CHECKING
gcc_assert (chunk->magic == CHUNK_MAGIC || chunk->magic == DEADCHUNK_MAGIC);
chunk->magic = DEADCHUNK_MAGIC;
#endif
chunk->u.next_free = zone->free_chunks[bin];
VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (chunk, sizeof (struct alloc_chunk)));
chunk->size = size;
chunk->next_free = zone->free_chunks[bin];
VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS (ptr + sizeof (struct alloc_chunk),
size - sizeof (struct alloc_chunk)));
}
else
{
VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (chunk, sizeof (struct alloc_chunk *)));
chunk->next_free = zone->free_chunks[bin];
VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS (ptr + sizeof (struct alloc_chunk *),
size - sizeof (struct alloc_chunk *)));
}
zone->free_chunks[bin] = chunk;
if (bin > zone->high_free_bin)
zone->high_free_bin = bin;
if (GGC_DEBUG_LEVEL >= 3)
fprintf (G.debug_file, "Deallocating object, chunk=%p\n", (void *)chunk);
VALGRIND_DISCARD (VALGRIND_MAKE_READABLE (chunk, sizeof (struct alloc_chunk)));
}
/* Allocate a chunk of memory of SIZE bytes. */
/* Allocate a chunk of memory of at least ORIG_SIZE bytes, in ZONE. */
static void *
ggc_alloc_zone_1 (size_t orig_size, struct alloc_zone *zone,
short type ATTRIBUTE_UNUSED
void *
ggc_alloc_zone_stat (size_t orig_size, struct alloc_zone *zone
MEM_STAT_DECL)
{
size_t bin = 0;
size_t lsize = 0;
struct page_entry *entry;
struct alloc_chunk *chunk, *lchunk, **pp;
size_t bin;
size_t csize;
struct small_page_entry *entry;
struct alloc_chunk *chunk, **pp;
void *result;
size_t size = orig_size;
/* Align size, so that we're assured of aligned allocations. */
if (size < FREE_BIN_DELTA)
size = FREE_BIN_DELTA;
/* Make sure that zero-sized allocations get a unique and freeable
pointer. */
if (size == 0)
size = MAX_ALIGNMENT;
else
size = (size + MAX_ALIGNMENT - 1) & -MAX_ALIGNMENT;
/* Large objects are handled specially. */
if (size >= G.pagesize - 2*CHUNK_OVERHEAD - FREE_BIN_DELTA)
/* Try to allocate the object from several different sources. Each
of these cases is responsible for setting RESULT and SIZE to
describe the allocated block, before jumping to FOUND. If a
chunk is split, the allocate bit for the new chunk should also be
set.
Large objects are handled specially. However, they'll just fail
the next couple of conditions, so we can wait to check for them
below. The large object case is relatively rare (< 1%), so this
is a win. */
/* First try to split the last chunk we allocated. For best
fragmentation behavior it would be better to look for a
free bin of the appropriate size for a small object. However,
we're unlikely (1% - 7%) to find one, and this gives better
locality behavior anyway. This case handles the lion's share
of all calls to this function. */
if (size <= zone->cached_free_size)
{
size = ROUND_UP (size, 1024);
entry = alloc_large_page (size, zone);
entry->survived = 0;
entry->next = entry->zone->pages;
entry->zone->pages = entry;
result = zone->cached_free;
chunk = (struct alloc_chunk *) entry->page;
VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (chunk, sizeof (struct alloc_chunk)));
chunk->large = 1;
chunk->size = CEIL (size, 1024);
zone->cached_free_size -= size;
if (zone->cached_free_size)
{
zone->cached_free += size;
zone_set_object_alloc_bit (zone->cached_free);
}
goto found;
}
/* First look for a tiny object already segregated into its own
size bucket. */
bin = SIZE_BIN_UP (size);
if (bin <= NUM_FREE_BINS)
/* Next, try to find a free bin of the exactly correct size. */
/* We want to round SIZE up, rather than down, but we know it's
already aligned to at least FREE_BIN_DELTA, so we can just
shift. */
bin = SIZE_BIN_DOWN (size);
if (bin <= NUM_FREE_BINS
&& (chunk = zone->free_chunks[bin]) != NULL)
{
chunk = zone->free_chunks[bin];
if (chunk)
/* We have a chunk of the right size. Pull it off the free list
and use it. */
zone->free_chunks[bin] = chunk->next_free;
/* NOTE: SIZE is only guaranteed to be right if MAX_ALIGNMENT
== FREE_BIN_DELTA. */
result = chunk;
/* The allocation bits are already set correctly. HIGH_FREE_BIN
may now be wrong, if this was the last chunk in the high bin.
Rather than fixing it up now, wait until we need to search
the free bins. */
goto found;
}
/* Next, if there wasn't a chunk of the ideal size, look for a chunk
to split. We can find one in the too-big bin, or in the largest
sized bin with a chunk in it. Try the largest normal-sized bin
first. */
if (zone->high_free_bin > bin)
{
zone->free_chunks[bin] = chunk->u.next_free;
VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (chunk, sizeof (struct alloc_chunk)));
/* Find the highest numbered free bin. It will be at or below
the watermark. */
while (zone->high_free_bin > bin
&& zone->free_chunks[zone->high_free_bin] == NULL)
zone->high_free_bin--;
if (zone->high_free_bin > bin)
{
size_t tbin = zone->high_free_bin;
chunk = zone->free_chunks[tbin];
/* Remove the chunk from its previous bin. */
zone->free_chunks[tbin] = chunk->next_free;
result = (char *) chunk;
/* Save the rest of the chunk for future allocation. */
if (zone->cached_free_size)
free_chunk (zone->cached_free, zone->cached_free_size, zone);
chunk = (struct alloc_chunk *) ((char *) result + size);
zone->cached_free = (char *) chunk;
zone->cached_free_size = (tbin - bin) * FREE_BIN_DELTA;
/* Mark the new free chunk as an object, so that we can
find the size of the newly allocated object. */
zone_set_object_alloc_bit (chunk);
/* HIGH_FREE_BIN may now be wrong, if this was the last
chunk in the high bin. Rather than fixing it up now,
wait until we need to search the free bins. */
goto found;
}
}
......@@ -647,85 +1127,113 @@ ggc_alloc_zone_1 (size_t orig_size, struct alloc_zone *zone,
chunk = *pp;
while (chunk && chunk->size < size)
{
pp = &chunk->u.next_free;
pp = &chunk->next_free;
chunk = *pp;
}
/* Failing that, allocate new storage. */
if (!chunk)
if (chunk)
{
entry = alloc_small_page (zone);
entry->next = entry->zone->pages;
entry->zone->pages = entry;
/* Remove the chunk from its previous bin. */
*pp = chunk->next_free;
chunk = (struct alloc_chunk *) entry->page;
VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (chunk, sizeof (struct alloc_chunk)));
chunk->size = G.pagesize - CHUNK_OVERHEAD;
chunk->large = 0;
}
else
result = (char *) chunk;
/* Save the rest of the chunk for future allocation, if there's any
left over. */
csize = chunk->size;
if (csize > size)
{
*pp = chunk->u.next_free;
VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (chunk, sizeof (struct alloc_chunk)));
chunk->large = 0;
if (zone->cached_free_size)
free_chunk (zone->cached_free, zone->cached_free_size, zone);
chunk = (struct alloc_chunk *) ((char *) result + size);
zone->cached_free = (char *) chunk;
zone->cached_free_size = csize - size;
/* Mark the new free chunk as an object. */
zone_set_object_alloc_bit (chunk);
}
goto found;
}
/* Release extra memory from a chunk that's too big. */
lsize = chunk->size - size;
if (lsize >= CHUNK_OVERHEAD + FREE_BIN_DELTA)
/* Handle large allocations. We could choose any threshold between
GGC_PAGE_SIZE - sizeof (struct large_page_entry) and
GGC_PAGE_SIZE. It can't be smaller, because then it wouldn't
be guaranteed to have a unique entry in the lookup table. Large
allocations will always fall through to here. */
if (size > GGC_PAGE_SIZE)
{
VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (chunk, sizeof (struct alloc_chunk)));
chunk->size = size;
struct large_page_entry *entry = alloc_large_page (size, zone);
lsize -= CHUNK_OVERHEAD;
lchunk = (struct alloc_chunk *)(chunk->u.data + size);
VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (lchunk, sizeof (struct alloc_chunk)));
#ifdef COOKIE_CHECKING
lchunk->magic = CHUNK_MAGIC;
#ifdef GATHER_STATISTICS
entry->common.survived = 0;
#endif
lchunk->type = 0;
lchunk->mark = 0;
lchunk->size = lsize;
lchunk->large = 0;
free_chunk (lchunk, lsize, zone);
lsize = 0;
entry->next = zone->large_pages;
if (zone->large_pages)
zone->large_pages->prev = entry;
zone->large_pages = entry;
result = entry->common.page;
goto found;
}
/* Failing everything above, allocate a new small page. */
entry = alloc_small_page (zone);
entry->next = zone->pages;
zone->pages = entry;
/* Mark the first chunk in the new page. */
entry->alloc_bits[0] = 1;
result = entry->common.page;
if (size < SMALL_PAGE_SIZE)
{
if (zone->cached_free_size)
free_chunk (zone->cached_free, zone->cached_free_size, zone);
zone->cached_free = (char *) result + size;
zone->cached_free_size = SMALL_PAGE_SIZE - size;
/* Mark the new free chunk as an object. */
zone_set_object_alloc_bit (zone->cached_free);
}
/* Calculate the object's address. */
found:
#ifdef COOKIE_CHECKING
chunk->magic = CHUNK_MAGIC;
#endif
chunk->type = 1;
chunk->mark = 0;
/* We could save TYPE in the chunk, but we don't use that for
anything yet. */
result = chunk->u.data;
anything yet. If we wanted to, we could do it by adding it
either before the beginning of the chunk or after its end,
and adjusting the size and pointer appropriately. */
#ifdef ENABLE_GC_CHECKING
/* Keep poisoning-by-writing-0xaf the object, in an attempt to keep the
exact same semantics in presence of memory bugs, regardless of
ENABLE_VALGRIND_CHECKING. We override this request below. Drop the
handle to avoid handle leak. */
VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (result, size));
/* We'll probably write to this after we return. */
prefetchw (result);
#ifdef ENABLE_GC_CHECKING
/* `Poison' the entire allocated object. */
VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (result, size));
memset (result, 0xaf, size);
VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS (result + orig_size,
size - orig_size));
#endif
/* Tell Valgrind that the memory is there, but its content isn't
defined. The bytes at the end of the object are still marked
unaccessible. */
VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (result, size));
VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (result, orig_size));
/* Keep track of how many bytes are being allocated. This
information is used in deciding when to collect. */
zone->allocated += size;
#ifdef GATHER_STATISTICS
ggc_record_overhead (orig_size, size + CHUNK_OVERHEAD - orig_size PASS_MEM_STAT);
ggc_record_overhead (orig_size, size - orig_size, result PASS_MEM_STAT);
{
size_t object_size = size + CHUNK_OVERHEAD;
size_t object_size = size;
size_t overhead = object_size - orig_size;
zone->stats.total_overhead += overhead;
......@@ -750,8 +1258,8 @@ ggc_alloc_zone_1 (size_t orig_size, struct alloc_zone *zone,
#endif
if (GGC_DEBUG_LEVEL >= 3)
fprintf (G.debug_file, "Allocating object, chunk=%p size=%lu at %p\n",
(void *)chunk, (unsigned long) size, result);
fprintf (G.debug_file, "Allocating object, size=%lu at %p\n",
(unsigned long) size, result);
return result;
}
......@@ -766,16 +1274,16 @@ ggc_alloc_typed_stat (enum gt_types_enum gte, size_t size
switch (gte)
{
case gt_ggc_e_14lang_tree_node:
return ggc_alloc_zone_1 (size, tree_zone, gte PASS_MEM_STAT);
return ggc_alloc_zone_stat (size, &tree_zone PASS_MEM_STAT);
case gt_ggc_e_7rtx_def:
return ggc_alloc_zone_1 (size, rtl_zone, gte PASS_MEM_STAT);
return ggc_alloc_zone_stat (size, &rtl_zone PASS_MEM_STAT);
case gt_ggc_e_9rtvec_def:
return ggc_alloc_zone_1 (size, rtl_zone, gte PASS_MEM_STAT);
return ggc_alloc_zone_stat (size, &rtl_zone PASS_MEM_STAT);
default:
return ggc_alloc_zone_1 (size, &main_zone, gte PASS_MEM_STAT);
return ggc_alloc_zone_stat (size, &main_zone PASS_MEM_STAT);
}
}
......@@ -784,23 +1292,15 @@ ggc_alloc_typed_stat (enum gt_types_enum gte, size_t size
void *
ggc_alloc_stat (size_t size MEM_STAT_DECL)
{
return ggc_alloc_zone_1 (size, &main_zone, -1 PASS_MEM_STAT);
}
/* Zone allocation allocates into the specified zone. */
void *
ggc_alloc_zone_stat (size_t size, struct alloc_zone *zone MEM_STAT_DECL)
{
return ggc_alloc_zone_1 (size, zone, -1 PASS_MEM_STAT);
return ggc_alloc_zone_stat (size, &main_zone PASS_MEM_STAT);
}
/* Poison the chunk. */
#ifdef ENABLE_GC_CHECKING
#define poison_chunk(CHUNK, SIZE) \
memset ((CHUNK)->u.data, 0xa5, (SIZE))
#define poison_region(PTR, SIZE) \
memset ((PTR), 0xa5, (SIZE))
#else
#define poison_chunk(CHUNK, SIZE)
#define poison_region(PTR, SIZE)
#endif
/* Free the object at P. */
......@@ -808,30 +1308,96 @@ ggc_alloc_zone_stat (size_t size, struct alloc_zone *zone MEM_STAT_DECL)
void
ggc_free (void *p)
{
struct alloc_chunk *chunk;
struct page_entry *page;
#ifdef GATHER_STATISTICS
ggc_free_overhead (p);
#endif
poison_region (p, ggc_get_size (p));
page = zone_get_object_page (p);
if (page->large_p)
{
struct large_page_entry *large_page
= (struct large_page_entry *) page;
/* Remove the page from the linked list. */
if (large_page->prev)
large_page->prev->next = large_page->next;
else
{
gcc_assert (large_page->common.zone->large_pages == large_page);
large_page->common.zone->large_pages = large_page->next;
}
if (large_page->next)
large_page->next->prev = large_page->prev;
chunk = (struct alloc_chunk *) ((char *)p - CHUNK_OVERHEAD);
large_page->common.zone->allocated -= large_page->bytes;
/* Poison the chunk. */
poison_chunk (chunk, ggc_get_size (p));
/* Release the memory associated with this object. */
free_large_page (large_page);
}
else if (page->pch_p)
/* Don't do anything. We won't allocate a new object from the
PCH zone so there's no point in releasing anything. */
;
else
{
size_t size = ggc_get_size (p);
page->zone->allocated -= size;
/* Add the chunk to the free list. We don't bother with coalescing,
since we are likely to want a chunk of this size again. */
free_chunk (p, size, page->zone);
}
}
/* If P is not marked, mark it and return false. Otherwise return true.
P must have been allocated by the GC allocator; it mustn't point to
static objects, stack variables, or memory allocated with malloc. */
int
ggc_set_mark (const void *p)
{
struct alloc_chunk *chunk;
int
ggc_set_mark (const void *p)
{
struct page_entry *page;
const char *ptr = (const char *) p;
page = zone_get_object_page (p);
if (page->pch_p)
{
size_t mark_word, mark_bit, offset;
offset = (ptr - pch_zone.page) / BYTES_PER_MARK_BIT;
mark_word = offset / (8 * sizeof (mark_type));
mark_bit = offset % (8 * sizeof (mark_type));
if (pch_zone.mark_bits[mark_word] & (1 << mark_bit))
return 1;
pch_zone.mark_bits[mark_word] |= (1 << mark_bit);
}
else if (page->large_p)
{
struct large_page_entry *large_page
= (struct large_page_entry *) page;
if (large_page->mark_p)
return 1;
large_page->mark_p = true;
}
else
{
struct small_page_entry *small_page
= (struct small_page_entry *) page;
chunk = (struct alloc_chunk *) ((char *)p - CHUNK_OVERHEAD);
#ifdef COOKIE_CHECKING
gcc_assert (chunk->magic == CHUNK_MAGIC);
#endif
if (chunk->mark)
if (small_page->mark_bits[zone_get_object_mark_word (p)]
& (1 << zone_get_object_mark_bit (p)))
return 1;
chunk->mark = 1;
small_page->mark_bits[zone_get_object_mark_word (p)]
|= (1 << zone_get_object_mark_bit (p));
}
if (GGC_DEBUG_LEVEL >= 4)
fprintf (G.debug_file, "Marking %p\n", p);
......@@ -846,13 +1412,36 @@ ggc_set_mark (const void *p)
int
ggc_marked_p (const void *p)
{
struct alloc_chunk *chunk;
struct page_entry *page;
const char *ptr = p;
chunk = (struct alloc_chunk *) ((char *)p - CHUNK_OVERHEAD);
#ifdef COOKIE_CHECKING
gcc_assert (chunk->magic == CHUNK_MAGIC);
#endif
return chunk->mark;
page = zone_get_object_page (p);
if (page->pch_p)
{
size_t mark_word, mark_bit, offset;
offset = (ptr - pch_zone.page) / BYTES_PER_MARK_BIT;
mark_word = offset / (8 * sizeof (mark_type));
mark_bit = offset % (8 * sizeof (mark_type));
return (pch_zone.mark_bits[mark_word] & (1 << mark_bit)) != 0;
}
if (page->large_p)
{
struct large_page_entry *large_page
= (struct large_page_entry *) page;
return large_page->mark_p;
}
else
{
struct small_page_entry *small_page
= (struct small_page_entry *) page;
return 0 != (small_page->mark_bits[zone_get_object_mark_word (p)]
& (1 << zone_get_object_mark_bit (p)));
}
}
/* Return the size of the gc-able object P. */
......@@ -860,33 +1449,64 @@ ggc_marked_p (const void *p)
size_t
ggc_get_size (const void *p)
{
struct alloc_chunk *chunk;
struct page_entry *page;
const char *ptr = (const char *) p;
chunk = (struct alloc_chunk *) ((char *)p - CHUNK_OVERHEAD);
#ifdef COOKIE_CHECKING
gcc_assert (chunk->magic == CHUNK_MAGIC);
#endif
if (chunk->large)
return chunk->size * 1024;
page = zone_get_object_page (p);
if (page->pch_p)
{
size_t alloc_word, alloc_bit, offset, max_size;
offset = (ptr - pch_zone.page) / BYTES_PER_ALLOC_BIT + 1;
alloc_word = offset / (8 * sizeof (alloc_type));
alloc_bit = offset % (8 * sizeof (alloc_type));
max_size = pch_zone.bytes - (ptr - pch_zone.page);
return zone_object_size_1 (pch_zone.alloc_bits, alloc_word, alloc_bit,
max_size);
}
return chunk->size;
if (page->large_p)
return ((struct large_page_entry *)page)->bytes;
else
return zone_find_object_size ((struct small_page_entry *) page, p);
}
/* Initialize the ggc-zone-mmap allocator. */
void
init_ggc (void)
{
/* The allocation size must be greater than BYTES_PER_MARK_BIT, and
a multiple of both BYTES_PER_ALLOC_BIT and FREE_BIN_DELTA, for
the current assumptions to hold. */
gcc_assert (FREE_BIN_DELTA == MAX_ALIGNMENT);
/* Set up the main zone by hand. */
main_zone.name = "Main zone";
G.zones = &main_zone;
/* Allocate the default zones. */
rtl_zone = new_ggc_zone ("RTL zone");
tree_zone = new_ggc_zone ("Tree zone");
garbage_zone = new_ggc_zone ("Garbage zone");
new_ggc_zone_1 (&rtl_zone, "RTL zone");
new_ggc_zone_1 (&tree_zone, "Tree zone");
new_ggc_zone_1 (&tree_id_zone, "Tree identifier zone");
G.pagesize = getpagesize();
G.lg_pagesize = exact_log2 (G.pagesize);
G.page_mask = ~(G.pagesize - 1);
/* Require the system page size to be a multiple of GGC_PAGE_SIZE. */
gcc_assert ((G.pagesize & (GGC_PAGE_SIZE - 1)) == 0);
/* Allocate 16 system pages at a time. */
G.quire_size = 16 * G.pagesize / GGC_PAGE_SIZE;
/* Calculate the size of the allocation bitmap and other overhead. */
/* Right now we allocate bits for the page header and bitmap. These
are wasted, but a little tricky to eliminate. */
G.small_page_overhead
= PAGE_OVERHEAD + (GGC_PAGE_SIZE / BYTES_PER_ALLOC_BIT / 8);
/* G.small_page_overhead = ROUND_UP (G.small_page_overhead, MAX_ALIGNMENT); */
#ifdef HAVE_MMAP_DEV_ZERO
G.dev_zero_fd = open ("/dev/zero", O_RDONLY);
gcc_assert (G.dev_zero_fd != -1);
......@@ -906,7 +1526,7 @@ init_ggc (void)
hork badly if we tried to use it. */
{
char *p = alloc_anon (NULL, G.pagesize, &main_zone);
struct page_entry *e;
struct small_page_entry *e;
if ((size_t)p & (G.pagesize - 1))
{
/* How losing. Discard this one and try another. If we still
......@@ -916,25 +1536,39 @@ init_ggc (void)
gcc_assert (!((size_t)p & (G.pagesize - 1)));
}
if (GGC_PAGE_SIZE == G.pagesize)
{
/* We have a good page, might as well hold onto it... */
e = (struct page_entry *) xmalloc (sizeof (struct page_entry));
e->bytes = G.pagesize;
e->page = p;
e = xcalloc (1, G.small_page_overhead);
e->common.page = p;
e->common.zone = &main_zone;
e->next = main_zone.free_pages;
set_page_table_entry (e->common.page, &e->common);
main_zone.free_pages = e;
}
else
{
munmap (p, G.pagesize);
}
}
#endif
}
/* Start a new GGC zone. */
struct alloc_zone *
new_ggc_zone (const char * name)
static void
new_ggc_zone_1 (struct alloc_zone *new_zone, const char * name)
{
struct alloc_zone *new_zone = xcalloc (1, sizeof (struct alloc_zone));
new_zone->name = name;
new_zone->next_zone = G.zones->next_zone;
G.zones->next_zone = new_zone;
}
struct alloc_zone *
new_ggc_zone (const char * name)
{
struct alloc_zone *new_zone = xcalloc (1, sizeof (struct alloc_zone));
new_ggc_zone_1 (new_zone, name);
return new_zone;
}
......@@ -952,58 +1586,7 @@ destroy_ggc_zone (struct alloc_zone * dead_zone)
gcc_assert (z);
/* z is dead, baby. z is dead. */
z->dead= true;
}
/* Increment the `GC context'. Objects allocated in an outer context
are never freed, eliminating the need to register their roots. */
void
ggc_push_context (void)
{
struct alloc_zone *zone;
for (zone = G.zones; zone; zone = zone->next_zone)
++(zone->context_depth);
/* Die on wrap. */
gcc_assert (main_zone.context_depth < HOST_BITS_PER_LONG);
}
/* Decrement the `GC context'. All objects allocated since the
previous ggc_push_context are migrated to the outer context. */
static void
ggc_pop_context_1 (struct alloc_zone *zone)
{
unsigned long omask;
unsigned depth;
page_entry *p;
depth = --(zone->context_depth);
omask = (unsigned long)1 << (depth + 1);
if (!((zone->context_depth_allocations | zone->context_depth_collections) & omask))
return;
zone->context_depth_allocations |= (zone->context_depth_allocations & omask) >> 1;
zone->context_depth_allocations &= omask - 1;
zone->context_depth_collections &= omask - 1;
/* Any remaining pages in the popped context are lowered to the new
current context; i.e. objects allocated in the popped context and
left over are imported into the previous context. */
for (p = zone->pages; p != NULL; p = p->next)
if (p->context_depth > depth)
p->context_depth = depth;
}
/* Pop all the zone contexts. */
void
ggc_pop_context (void)
{
struct alloc_zone *zone;
for (zone = G.zones; zone; zone = zone->next_zone)
ggc_pop_context_1 (zone);
z->dead = true;
}
/* Free all empty pages and objects within a page for a given zone */
......@@ -1011,111 +1594,168 @@ ggc_pop_context (void)
static void
sweep_pages (struct alloc_zone *zone)
{
page_entry **pp, *p, *next;
struct alloc_chunk *chunk, *last_free, *end;
size_t last_free_size, allocated = 0;
struct large_page_entry **lpp, *lp, *lnext;
struct small_page_entry **spp, *sp, *snext;
char *last_free;
size_t allocated = 0;
bool nomarksinpage;
/* First, reset the free_chunks lists, since we are going to
re-free free chunks in hopes of coalescing them into large chunks. */
memset (zone->free_chunks, 0, sizeof (zone->free_chunks));
pp = &zone->pages;
for (p = zone->pages; p ; p = next)
{
next = p->next;
/* Large pages are all or none affairs. Either they are
completely empty, or they are completely full.
zone->high_free_bin = 0;
zone->cached_free = NULL;
zone->cached_free_size = 0;
XXX: Should we bother to increment allocated. */
if (p->large_p)
/* Large pages are all or none affairs. Either they are completely
empty, or they are completely full. */
lpp = &zone->large_pages;
for (lp = zone->large_pages; lp != NULL; lp = lnext)
{
if (((struct alloc_chunk *)p->page)->mark == 1)
gcc_assert (lp->common.large_p);
lnext = lp->next;
#ifdef GATHER_STATISTICS
/* This page has now survived another collection. */
lp->common.survived++;
#endif
if (lp->mark_p)
{
((struct alloc_chunk *)p->page)->mark = 0;
allocated += p->bytes - CHUNK_OVERHEAD;
pp = &p->next;
lp->mark_p = false;
allocated += lp->bytes;
lpp = &lp->next;
}
else
{
*pp = next;
*lpp = lnext;
#ifdef ENABLE_GC_CHECKING
/* Poison the page. */
memset (p->page, 0xb5, p->bytes);
memset (lp->common.page, 0xb5, SMALL_PAGE_SIZE);
#endif
free_page (p);
if (lp->prev)
lp->prev->next = lp->next;
if (lp->next)
lp->next->prev = lp->prev;
free_large_page (lp);
}
continue;
}
spp = &zone->pages;
for (sp = zone->pages; sp != NULL; sp = snext)
{
char *object, *last_object;
char *end;
alloc_type *alloc_word_p;
mark_type *mark_word_p;
gcc_assert (!sp->common.large_p);
snext = sp->next;
#ifdef GATHER_STATISTICS
/* This page has now survived another collection. */
p->survived++;
sp->common.survived++;
#endif
/* Which leaves full and partial pages. Step through all chunks,
consolidate those that are free and insert them into the free
lists. Note that consolidation slows down collection
slightly. */
/* Step through all chunks, consolidate those that are free and
insert them into the free lists. Note that consolidation
slows down collection slightly. */
chunk = (struct alloc_chunk *)p->page;
end = (struct alloc_chunk *)(p->page + G.pagesize);
last_object = object = sp->common.page;
end = sp->common.page + SMALL_PAGE_SIZE;
last_free = NULL;
last_free_size = 0;
nomarksinpage = true;
mark_word_p = sp->mark_bits;
alloc_word_p = sp->alloc_bits;
gcc_assert (BYTES_PER_ALLOC_BIT == BYTES_PER_MARK_BIT);
object = sp->common.page;
do
{
prefetch ((struct alloc_chunk *)(chunk->u.data + chunk->size));
if (chunk->mark || p->context_depth < zone->context_depth)
{
unsigned int i, n;
alloc_type alloc_word;
mark_type mark_word;
alloc_word = *alloc_word_p++;
mark_word = *mark_word_p++;
if (mark_word)
nomarksinpage = false;
/* There ought to be some way to do this without looping... */
i = 0;
while ((n = alloc_ffs (alloc_word)) != 0)
{
/* Extend the current state for n - 1 bits. We can't
shift alloc_word by n, even though it isn't used in the
loop, in case only the highest bit was set. */
alloc_word >>= n - 1;
mark_word >>= n - 1;
object += BYTES_PER_MARK_BIT * (n - 1);
if (mark_word & 1)
{
if (last_free)
{
last_free->type = 0;
last_free->size = last_free_size;
last_free->mark = 0;
poison_chunk (last_free, last_free_size);
free_chunk (last_free, last_free_size, zone);
VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (last_free,
object
- last_free));
poison_region (last_free, object - last_free);
free_chunk (last_free, object - last_free, zone);
last_free = NULL;
}
if (chunk->mark)
{
allocated += chunk->size;
}
chunk->mark = 0;
else
allocated += object - last_object;
last_object = object;
}
else
{
if (last_free)
if (last_free == NULL)
{
last_free_size += CHUNK_OVERHEAD + chunk->size;
last_free = object;
allocated += object - last_object;
}
else
{
last_free = chunk;
last_free_size = chunk->size;
zone_clear_object_alloc_bit (sp, object);
}
/* Shift to just after the alloc bit we handled. */
alloc_word >>= 1;
mark_word >>= 1;
object += BYTES_PER_MARK_BIT;
i += n;
}
chunk = (struct alloc_chunk *)(chunk->u.data + chunk->size);
object += BYTES_PER_MARK_BIT * (8 * sizeof (alloc_type) - i);
}
while (chunk < end);
while (object < end);
if (nomarksinpage)
{
*pp = next;
*spp = snext;
#ifdef ENABLE_GC_CHECKING
VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (sp->common.page, SMALL_PAGE_SIZE));
/* Poison the page. */
memset (p->page, 0xb5, p->bytes);
memset (sp->common.page, 0xb5, SMALL_PAGE_SIZE);
#endif
free_page (p);
free_small_page (sp);
continue;
}
else if (last_free)
{
last_free->type = 0;
last_free->size = last_free_size;
last_free->mark = 0;
poison_chunk (last_free, last_free_size);
free_chunk (last_free, last_free_size, zone);
VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (last_free,
object - last_free));
poison_region (last_free, object - last_free);
free_chunk (last_free, object - last_free, zone);
}
pp = &p->next;
else
allocated += object - last_object;
spp = &sp->next;
}
zone->allocated = allocated;
......@@ -1129,6 +1769,31 @@ sweep_pages (struct alloc_zone *zone)
static bool
ggc_collect_1 (struct alloc_zone *zone, bool need_marking)
{
#if 0
/* */
{
int i;
for (i = 0; i < NUM_FREE_BINS + 1; i++)
{
struct alloc_chunk *chunk;
int n, tot;
n = 0;
tot = 0;
chunk = zone->free_chunks[i];
while (chunk)
{
n++;
tot += chunk->size;
chunk = chunk->next_free;
}
fprintf (stderr, "Bin %d: %d free chunks (%d bytes)\n",
i, n, tot);
}
}
/* */
#endif
if (!quiet_flag)
fprintf (stderr, " {%s GC %luk -> ",
zone->name, (unsigned long) zone->allocated / 1024);
......@@ -1141,11 +1806,15 @@ ggc_collect_1 (struct alloc_zone *zone, bool need_marking)
reuse in the interim. */
release_pages (zone);
/* Indicate that we've seen collections at this context depth. */
zone->context_depth_collections
= ((unsigned long)1 << (zone->context_depth + 1)) - 1;
if (need_marking)
{
zone_allocate_marks ();
ggc_mark_roots ();
#ifdef GATHER_STATISTICS
ggc_prune_overhead_list ();
#endif
}
sweep_pages (zone);
zone->was_collected = true;
zone->allocated_last_gc = zone->allocated;
......@@ -1155,6 +1824,7 @@ ggc_collect_1 (struct alloc_zone *zone, bool need_marking)
return true;
}
#ifdef GATHER_STATISTICS
/* Calculate the average page survival rate in terms of number of
collections. */
......@@ -1163,46 +1833,22 @@ calculate_average_page_survival (struct alloc_zone *zone)
{
float count = 0.0;
float survival = 0.0;
page_entry *p;
struct small_page_entry *p;
struct large_page_entry *lp;
for (p = zone->pages; p; p = p->next)
{
count += 1.0;
survival += p->survived;
survival += p->common.survived;
}
return survival/count;
}
/* Check the magic cookies all of the chunks contain, to make sure we
aren't doing anything stupid, like stomping on alloc_chunk
structures. */
static inline void
check_cookies (void)
{
#ifdef COOKIE_CHECKING
page_entry *p;
struct alloc_zone *zone;
for (zone = G.zones; zone; zone = zone->next_zone)
{
for (p = zone->pages; p; p = p->next)
{
if (!p->large_p)
for (lp = zone->large_pages; lp; lp = lp->next)
{
struct alloc_chunk *chunk = (struct alloc_chunk *)p->page;
struct alloc_chunk *end = (struct alloc_chunk *)(p->page + G.pagesize);
do
{
gcc_assert (chunk->magic == CHUNK_MAGIC
|| chunk->magic == DEADCHUNK_MAGIC);
chunk = (struct alloc_chunk *)(chunk->u.data + chunk->size);
}
while (chunk < end);
}
}
count += 1.0;
survival += lp->common.survived;
}
#endif
return survival/count;
}
#endif
/* Top level collection routine. */
void
......@@ -1210,12 +1856,10 @@ ggc_collect (void)
{
struct alloc_zone *zone;
bool marked = false;
float f;
timevar_push (TV_GC);
check_cookies ();
if (!always_collect)
if (!ggc_force_collect)
{
float allocated_last_gc = 0, allocated = 0, min_expand;
......@@ -1258,12 +1902,12 @@ ggc_collect (void)
for (zone = main_zone.next_zone; zone; zone = zone->next_zone)
{
check_cookies ();
zone->was_collected = false;
marked |= ggc_collect_1 (zone, !marked);
}
}
#ifdef GATHER_STATISTICS
/* Print page survival stats, if someone wants them. */
if (GGC_DEBUG_LEVEL >= 2)
{
......@@ -1271,47 +1915,16 @@ ggc_collect (void)
{
if (zone->was_collected)
{
f = calculate_average_page_survival (zone);
float f = calculate_average_page_survival (zone);
printf ("Average page survival in zone `%s' is %f\n",
zone->name, f);
}
}
}
#endif
/* Since we don't mark zone at a time right now, marking in any
zone means marking in every zone. So we have to clear all the
marks in all the zones that weren't collected already. */
if (marked)
{
page_entry *p;
for (zone = G.zones; zone; zone = zone->next_zone)
{
if (zone->was_collected)
continue;
for (p = zone->pages; p; p = p->next)
{
if (!p->large_p)
{
struct alloc_chunk *chunk = (struct alloc_chunk *)p->page;
struct alloc_chunk *end = (struct alloc_chunk *)(p->page + G.pagesize);
do
{
prefetch ((struct alloc_chunk *)(chunk->u.data + chunk->size));
if (chunk->mark || p->context_depth < zone->context_depth)
{
chunk->mark = 0;
}
chunk = (struct alloc_chunk *)(chunk->u.data + chunk->size);
}
while (chunk < end);
}
else
{
((struct alloc_chunk *)p->page)->mark = 0;
}
}
}
}
zone_free_marks ();
/* Free dead zones. */
for (zone = G.zones; zone && zone->next_zone; zone = zone->next_zone)
......@@ -1349,17 +1962,18 @@ ggc_print_statistics (void)
struct alloc_zone *zone;
struct ggc_statistics stats;
size_t total_overhead = 0, total_allocated = 0, total_bytes_mapped = 0;
size_t pte_overhead, i;
/* Clear the statistics. */
memset (&stats, 0, sizeof (stats));
/* Make sure collection will really occur, in all zones. */
always_collect = 1;
/* Make sure collection will really occur. */
ggc_force_collect = true;
/* Collect and print the statistics common across collectors. */
ggc_print_common_statistics (stderr, &stats);
always_collect = 0;
ggc_force_collect = false;
/* Release free pages so that we will not count the bytes allocated
there as part of the total allocated memory. */
......@@ -1375,48 +1989,43 @@ ggc_print_statistics (void)
"Zone", "Allocated", "Used", "Overhead");
for (zone = G.zones; zone; zone = zone->next_zone)
{
page_entry *p;
size_t allocated;
size_t in_use;
size_t overhead;
struct large_page_entry *large_page;
size_t overhead, allocated, in_use;
/* Skip empty entries. */
if (!zone->pages)
/* Skip empty zones. */
if (!zone->pages && !zone->large_pages)
continue;
overhead = allocated = in_use = 0;
/* Figure out the total number of bytes allocated for objects of
this size, and how many of them are actually in use. Also figure
out how much memory the page table is using. */
for (p = zone->pages; p; p = p->next)
{
struct alloc_chunk *chunk;
allocated = in_use = 0;
/* We've also allocated sizeof (page_entry), but it's not in the
"managed" area... */
allocated += p->bytes;
overhead += sizeof (page_entry);
overhead = sizeof (struct alloc_zone);
if (p->large_p)
for (large_page = zone->large_pages; large_page != NULL;
large_page = large_page->next)
{
in_use += p->bytes - CHUNK_OVERHEAD;
chunk = (struct alloc_chunk *) p->page;
overhead += CHUNK_OVERHEAD;
gcc_assert (chunk->type && !chunk->mark);
continue;
allocated += large_page->bytes;
in_use += large_page->bytes;
overhead += sizeof (struct large_page_entry);
}
for (chunk = (struct alloc_chunk *) p->page;
(char *) chunk < (char *) p->page + p->bytes;
chunk = (struct alloc_chunk *)(chunk->u.data + chunk->size))
/* There's no easy way to walk through the small pages finding
used and unused objects. Instead, add all the pages, and
subtract out the free list. */
allocated += GGC_PAGE_SIZE * zone->n_small_pages;
in_use += GGC_PAGE_SIZE * zone->n_small_pages;
overhead += G.small_page_overhead * zone->n_small_pages;
for (i = 0; i <= NUM_FREE_BINS; i++)
{
overhead += CHUNK_OVERHEAD;
if (chunk->type)
in_use += chunk->size;
gcc_assert (!chunk->mark);
struct alloc_chunk *chunk = zone->free_chunks[i];
while (chunk)
{
in_use -= ggc_get_size (chunk);
chunk = chunk->next_free;
}
}
fprintf (stderr, "%20s %10lu%c %10lu%c %10lu%c\n",
zone->name,
SCALE (allocated), LABEL (allocated),
......@@ -1430,6 +2039,30 @@ ggc_print_statistics (void)
total_bytes_mapped += zone->bytes_mapped;
}
/* Count the size of the page table as best we can. */
#if HOST_BITS_PER_PTR <= 32
pte_overhead = sizeof (G.lookup);
for (i = 0; i < PAGE_L1_SIZE; i++)
if (G.lookup[i])
pte_overhead += PAGE_L2_SIZE * sizeof (struct page_entry *);
#else
{
struct page_table_chain *table;
pte_overhead = 0;
while (table)
{
pte_overhead += sizeof (*table);
for (i = 0; i < PAGE_L1_SIZE; i++)
if (table->table[i])
pte_overhead += PAGE_L2_SIZE * sizeof (struct page_entry *);
table = table->next;
}
}
#endif
fprintf (stderr, "%20s %11s %11s %10lu%c\n", "Page Table",
"", "", SCALE (pte_overhead), LABEL (pte_overhead));
total_overhead += pte_overhead;
fprintf (stderr, "%20s %10lu%c %10lu%c %10lu%c\n", "Total",
SCALE (total_bytes_mapped), LABEL (total_bytes_mapped),
SCALE (total_allocated), LABEL(total_allocated),
......@@ -1485,14 +2118,34 @@ ggc_print_statistics (void)
#endif
}
/* Precompiled header support. */
/* For precompiled headers, we sort objects based on their type. We
also sort various objects into their own buckets; currently this
covers strings and IDENTIFIER_NODE trees. The choices of how
to sort buckets have not yet been tuned. */
#define NUM_PCH_BUCKETS (gt_types_enum_last + 3)
#define OTHER_BUCKET (gt_types_enum_last + 0)
#define IDENTIFIER_BUCKET (gt_types_enum_last + 1)
#define STRING_BUCKET (gt_types_enum_last + 2)
struct ggc_pch_ondisk
{
size_t total;
size_t type_totals[NUM_PCH_BUCKETS];
};
struct ggc_pch_data
{
struct ggc_pch_ondisk
{
unsigned total;
} d;
struct ggc_pch_ondisk d;
size_t base;
size_t written;
size_t orig_base;
size_t alloc_size;
alloc_type *alloc_bits;
size_t type_bases[NUM_PCH_BUCKETS];
size_t start_offset;
};
/* Initialize the PCH data structure. */
......@@ -1503,18 +2156,40 @@ init_ggc_pch (void)
return xcalloc (sizeof (struct ggc_pch_data), 1);
}
/* Return which of the page-aligned buckets the object at X, with type
TYPE, should be sorted into in the PCH. Strings will have
IS_STRING set and TYPE will be gt_types_enum_last. Other objects
of unknown type will also have TYPE equal to gt_types_enum_last. */
static int
pch_bucket (void *x, enum gt_types_enum type,
bool is_string)
{
/* Sort identifiers into their own bucket, to improve locality
when searching the identifier hash table. */
if (type == gt_ggc_e_14lang_tree_node
&& TREE_CODE ((tree) x) == IDENTIFIER_NODE)
return IDENTIFIER_BUCKET;
else if (type == gt_types_enum_last)
{
if (is_string)
return STRING_BUCKET;
return OTHER_BUCKET;
}
return type;
}
/* Add the size of object X to the size of the PCH data. */
void
ggc_pch_count_object (struct ggc_pch_data *d, void *x ATTRIBUTE_UNUSED,
size_t size, bool is_string)
size_t size, bool is_string, enum gt_types_enum type)
{
if (!is_string)
{
d->d.total += size + CHUNK_OVERHEAD;
}
else
d->d.total += size;
/* NOTE: Right now we don't need to align up the size of any objects.
Strings can be unaligned, and everything else is allocated to a
MAX_ALIGNMENT boundary already. */
d->d.type_totals[pch_bucket (x, type, is_string)] += size;
}
/* Return the total size of the PCH data. */
......@@ -1522,95 +2197,181 @@ ggc_pch_count_object (struct ggc_pch_data *d, void *x ATTRIBUTE_UNUSED,
size_t
ggc_pch_total_size (struct ggc_pch_data *d)
{
return d->d.total;
enum gt_types_enum i;
size_t alloc_size, total_size;
total_size = 0;
for (i = 0; i < NUM_PCH_BUCKETS; i++)
{
d->d.type_totals[i] = ROUND_UP (d->d.type_totals[i], GGC_PAGE_SIZE);
total_size += d->d.type_totals[i];
}
d->d.total = total_size;
/* Include the size of the allocation bitmap. */
alloc_size = CEIL (d->d.total, BYTES_PER_ALLOC_BIT * 8);
alloc_size = ROUND_UP (alloc_size, MAX_ALIGNMENT);
d->alloc_size = alloc_size;
return d->d.total + alloc_size;
}
/* Set the base address for the objects in the PCH file. */
void
ggc_pch_this_base (struct ggc_pch_data *d, void *base)
ggc_pch_this_base (struct ggc_pch_data *d, void *base_)
{
d->base = (size_t) base;
int i;
size_t base = (size_t) base_;
d->base = d->orig_base = base;
for (i = 0; i < NUM_PCH_BUCKETS; i++)
{
d->type_bases[i] = base;
base += d->d.type_totals[i];
}
if (d->alloc_bits == NULL)
d->alloc_bits = xcalloc (1, d->alloc_size);
}
/* Allocate a place for object X of size SIZE in the PCH file. */
char *
ggc_pch_alloc_object (struct ggc_pch_data *d, void *x,
size_t size, bool is_string)
size_t size, bool is_string,
enum gt_types_enum type)
{
size_t alloc_word, alloc_bit;
char *result;
result = (char *)d->base;
if (!is_string)
{
struct alloc_chunk *chunk = (struct alloc_chunk *) ((char *)x - CHUNK_OVERHEAD);
if (chunk->large)
d->base += ggc_get_size (x) + CHUNK_OVERHEAD;
else
d->base += chunk->size + CHUNK_OVERHEAD;
return result + CHUNK_OVERHEAD;
}
else
{
d->base += size;
int bucket = pch_bucket (x, type, is_string);
/* Record the start of the object in the allocation bitmap. We
can't assert that the allocation bit is previously clear, because
strings may violate the invariant that they are at least
BYTES_PER_ALLOC_BIT long. This is harmless - ggc_get_size
should not be called for strings. */
alloc_word = ((d->type_bases[bucket] - d->orig_base)
/ (8 * sizeof (alloc_type) * BYTES_PER_ALLOC_BIT));
alloc_bit = ((d->type_bases[bucket] - d->orig_base)
/ BYTES_PER_ALLOC_BIT) % (8 * sizeof (alloc_type));
d->alloc_bits[alloc_word] |= 1L << alloc_bit;
/* Place the object at the current pointer for this bucket. */
result = (char *) d->type_bases[bucket];
d->type_bases[bucket] += size;
return result;
}
}
/* Prepare to write out the PCH data to file F. */
void
ggc_pch_prepare_write (struct ggc_pch_data *d ATTRIBUTE_UNUSED,
FILE *f ATTRIBUTE_UNUSED)
ggc_pch_prepare_write (struct ggc_pch_data *d,
FILE *f)
{
/* Nothing to do. */
/* We seek around a lot while writing. Record where the end
of the padding in the PCH file is, so that we can
locate each object's offset. */
d->start_offset = ftell (f);
}
/* Write out object X of SIZE to file F. */
void
ggc_pch_write_object (struct ggc_pch_data *d ATTRIBUTE_UNUSED,
FILE *f, void *x, void *newx ATTRIBUTE_UNUSED,
size_t size, bool is_string)
ggc_pch_write_object (struct ggc_pch_data *d,
FILE *f, void *x, void *newx,
size_t size, bool is_string ATTRIBUTE_UNUSED)
{
if (!is_string)
{
struct alloc_chunk *chunk = (struct alloc_chunk *) ((char *)x - CHUNK_OVERHEAD);
size = ggc_get_size (x);
if (fwrite (chunk, size + CHUNK_OVERHEAD, 1, f) != 1)
fatal_error ("can't write PCH file: %m");
d->written += size + CHUNK_OVERHEAD;
}
else
{
if (fseek (f, (size_t) newx - d->orig_base + d->start_offset, SEEK_SET) != 0)
fatal_error ("can't seek PCH file: %m");
if (fwrite (x, size, 1, f) != 1)
fatal_error ("can't write PCH file: %m");
d->written += size;
}
}
void
ggc_pch_finish (struct ggc_pch_data *d, FILE *f)
{
/* Write out the allocation bitmap. */
if (fseek (f, d->start_offset + d->d.total, SEEK_SET) != 0)
fatal_error ("can't seek PCH file: %m");
if (fwrite (d->alloc_bits, d->alloc_size, 1, f) != 1)
fatal_error ("can't write PCH fle: %m");
/* Done with the PCH, so write out our footer. */
if (fwrite (&d->d, sizeof (d->d), 1, f) != 1)
fatal_error ("can't write PCH file: %m");
free (d->alloc_bits);
free (d);
}
/* The PCH file from F has been mapped at ADDR. Read in any
additional data from the file and set up the GC state. */
void
ggc_pch_read (FILE *f, void *addr)
{
struct ggc_pch_ondisk d;
struct page_entry *entry;
struct alloc_zone *pch_zone;
size_t alloc_size;
struct alloc_zone *zone;
struct page_entry *pch_page;
char *p;
if (fread (&d, sizeof (d), 1, f) != 1)
fatal_error ("can't read PCH file: %m");
entry = xcalloc (1, sizeof (struct page_entry));
entry->bytes = d.total;
entry->page = addr;
entry->context_depth = 0;
pch_zone = new_ggc_zone ("PCH zone");
entry->zone = pch_zone;
entry->next = entry->zone->pages;
entry->zone->pages = entry;
alloc_size = CEIL (d.total, BYTES_PER_ALLOC_BIT * 8);
alloc_size = ROUND_UP (alloc_size, MAX_ALIGNMENT);
pch_zone.bytes = d.total;
pch_zone.alloc_bits = (alloc_type *) ((char *) addr + pch_zone.bytes);
pch_zone.page = (char *) addr;
pch_zone.end = (char *) pch_zone.alloc_bits;
/* We've just read in a PCH file. So, every object that used to be
allocated is now free. */
for (zone = G.zones; zone; zone = zone->next_zone)
{
struct small_page_entry *page, *next_page;
struct large_page_entry *large_page, *next_large_page;
zone->allocated = 0;
/* Clear the zone's free chunk list. */
memset (zone->free_chunks, 0, sizeof (zone->free_chunks));
zone->high_free_bin = 0;
zone->cached_free = NULL;
zone->cached_free_size = 0;
/* Move all the small pages onto the free list. */
for (page = zone->pages; page != NULL; page = next_page)
{
next_page = page->next;
memset (page->alloc_bits, 0,
G.small_page_overhead - PAGE_OVERHEAD);
free_small_page (page);
}
/* Discard all the large pages. */
for (large_page = zone->large_pages; large_page != NULL;
large_page = next_large_page)
{
next_large_page = large_page->next;
free_large_page (large_page);
}
zone->pages = NULL;
zone->large_pages = NULL;
}
/* Allocate the dummy page entry for the PCH, and set all pages
mapped into the PCH to reference it. */
pch_page = xcalloc (1, sizeof (struct page_entry));
pch_page->page = pch_zone.page;
pch_page->pch_p = true;
for (p = pch_zone.page; p < pch_zone.end; p += GGC_PAGE_SIZE)
set_page_table_entry (p, pch_page);
}
......@@ -54,7 +54,8 @@ typedef void (*gt_handle_reorder) (void *, void *, gt_pointer_operator,
void *);
/* Used by the gt_pch_n_* routines. Register an object in the hash table. */
extern int gt_pch_note_object (void *, void *, gt_note_pointers);
extern int gt_pch_note_object (void *, void *, gt_note_pointers,
enum gt_types_enum);
/* Used by the gt_pch_n_* routines. Register that an object has a reorder
function. */
......@@ -169,9 +170,10 @@ extern struct ggc_pch_data *init_ggc_pch (void);
/* The second parameter and third parameters give the address and size
of an object. Update the ggc_pch_data structure with as much of
that information as is necessary. The last argument should be true
that information as is necessary. The bool argument should be true
if the object is a string. */
extern void ggc_pch_count_object (struct ggc_pch_data *, void *, size_t, bool);
extern void ggc_pch_count_object (struct ggc_pch_data *, void *, size_t, bool,
enum gt_types_enum);
/* Return the total size of the data to be written to hold all
the objects previously passed to ggc_pch_count_object. */
......@@ -183,8 +185,9 @@ extern void ggc_pch_this_base (struct ggc_pch_data *, void *);
/* Assuming that the objects really do end up at the address
passed to ggc_pch_this_base, return the address of this object.
The last argument should be true if the object is a string. */
extern char *ggc_pch_alloc_object (struct ggc_pch_data *, void *, size_t, bool);
The bool argument should be true if the object is a string. */
extern char *ggc_pch_alloc_object (struct ggc_pch_data *, void *, size_t, bool,
enum gt_types_enum);
/* Write out any initial information required. */
extern void ggc_pch_prepare_write (struct ggc_pch_data *, FILE *);
......@@ -203,30 +206,18 @@ extern void ggc_pch_read (FILE *, void *);
/* Allocation. */
/* For single pass garbage. */
extern struct alloc_zone *garbage_zone;
/* For regular rtl allocations. */
extern struct alloc_zone *rtl_zone;
/* For regular tree allocations. */
extern struct alloc_zone *tree_zone;
/* When set, ggc_collect will do collection. */
extern bool ggc_force_collect;
/* The internal primitive. */
extern void *ggc_alloc_stat (size_t MEM_STAT_DECL);
#define ggc_alloc(s) ggc_alloc_stat (s MEM_STAT_INFO)
/* Allocate an object into the specified allocation zone. */
extern void *ggc_alloc_zone_stat (size_t, struct alloc_zone * MEM_STAT_DECL);
#define ggc_alloc_zone(s,z) ggc_alloc_zone_stat (s,z MEM_STAT_INFO)
/* Allocate an object of the specified type and size. */
extern void *ggc_alloc_typed_stat (enum gt_types_enum, size_t MEM_STAT_DECL);
#define ggc_alloc_typed(s,z) ggc_alloc_typed_stat (s,z MEM_STAT_INFO)
/* Like ggc_alloc, but allocates cleared memory. */
extern void *ggc_alloc_cleared_stat (size_t MEM_STAT_DECL);
#define ggc_alloc_cleared(s) ggc_alloc_cleared_stat (s MEM_STAT_INFO)
/* Like ggc_alloc_zone, but allocates cleared memory. */
extern void *ggc_alloc_cleared_zone (size_t, struct alloc_zone * MEM_STAT_DECL);
#define ggc_alloc_cleared_zone(s,z) ggc_alloc_cleared_stat (s,z MEM_STAT_INFO)
/* Resize a block. */
extern void *ggc_realloc_stat (void *, size_t MEM_STAT_DECL);
#define ggc_realloc(s,z) ggc_realloc_stat (s,z MEM_STAT_INFO)
......@@ -250,10 +241,10 @@ extern void dump_ggc_loc_statistics (void);
#define GGC_CNEWVAR(T, S) ((T *) ggc_alloc_cleared ((S)))
#define ggc_alloc_rtvec(NELT) \
((rtvec) ggc_alloc_typed (gt_ggc_e_9rtvec_def, sizeof (struct rtvec_def) \
+ ((NELT) - 1) * sizeof (rtx)))
((rtvec) ggc_alloc_zone (sizeof (struct rtvec_def) + ((NELT) - 1) \
* sizeof (rtx), &rtl_zone))
#define ggc_alloc_tree(LENGTH) ((tree) ggc_alloc_zone (LENGTH, tree_zone))
#define ggc_alloc_tree(LENGTH) ((tree) ggc_alloc_zone (LENGTH, &tree_zone))
#define htab_create_ggc(SIZE, HASH, EQ, DEL) \
htab_create_alloc (SIZE, HASH, EQ, DEL, ggc_calloc, NULL)
......@@ -309,4 +300,29 @@ extern int ggc_min_expand_heuristic (void);
extern int ggc_min_heapsize_heuristic (void);
extern void init_ggc_heuristics (void);
/* Zone collection. */
#if defined (GGC_ZONE) && !defined (GENERATOR_FILE)
/* For regular rtl allocations. */
extern struct alloc_zone rtl_zone;
/* For regular tree allocations. */
extern struct alloc_zone tree_zone;
/* For IDENTIFIER_NODE allocations. */
extern struct alloc_zone tree_id_zone;
/* Allocate an object into the specified allocation zone. */
extern void *ggc_alloc_zone_stat (size_t, struct alloc_zone * MEM_STAT_DECL);
# define ggc_alloc_zone(s,z) ggc_alloc_zone_stat (s,z MEM_STAT_INFO)
#else
# define ggc_alloc_zone(s, z) ggc_alloc (s)
# ifdef GATHER_STATISTICS
# define ggc_alloc_zone_stat(s, z, n, l, f) ggc_alloc_stat (s, n, l, f)
# else
# define ggc_alloc_zone_stat(s, z) ggc_alloc_stat (s)
# endif
#endif
#endif
......@@ -174,8 +174,7 @@ rtx_alloc_stat (RTX_CODE code MEM_STAT_DECL)
{
rtx rt;
rt = (rtx) ggc_alloc_typed_stat (gt_ggc_e_7rtx_def,
RTX_SIZE (code) PASS_MEM_STAT);
rt = (rtx) ggc_alloc_zone_stat (RTX_SIZE (code), &rtl_zone PASS_MEM_STAT);
/* We want to clear everything up to the FLD array. Normally, this
is one int, but we don't want to assume that and it isn't very
......@@ -309,8 +308,8 @@ shallow_copy_rtx_stat (rtx orig MEM_STAT_DECL)
{
rtx copy;
copy = (rtx) ggc_alloc_typed_stat (gt_ggc_e_7rtx_def,
RTX_SIZE (GET_CODE (orig)) PASS_MEM_STAT);
copy = (rtx) ggc_alloc_zone_stat (RTX_SIZE (GET_CODE (orig)),
&rtl_zone PASS_MEM_STAT);
memcpy (copy, orig, RTX_SIZE (GET_CODE (orig)));
return copy;
}
......
......@@ -197,7 +197,8 @@ gt_pch_p_S (void *obj ATTRIBUTE_UNUSED, void *x ATTRIBUTE_UNUSED,
void
gt_pch_n_S (const void *x)
{
gt_pch_note_object ((void *)x, (void *)x, &gt_pch_p_S);
gt_pch_note_object ((void *)x, (void *)x, &gt_pch_p_S,
gt_types_enum_last);
}
/* Handle saving and restoring the string pool for PCH. */
......
......@@ -341,7 +341,10 @@ make_node_stat (enum tree_code code MEM_STAT_DECL)
tree_node_sizes[(int) kind] += length;
#endif
t = ggc_alloc_zone_stat (length, tree_zone PASS_MEM_STAT);
if (code == IDENTIFIER_NODE)
t = ggc_alloc_zone_stat (length, &tree_id_zone PASS_MEM_STAT);
else
t = ggc_alloc_zone_stat (length, &tree_zone PASS_MEM_STAT);
memset (t, 0, length);
......@@ -425,7 +428,7 @@ copy_node_stat (tree node MEM_STAT_DECL)
gcc_assert (code != STATEMENT_LIST);
length = tree_size (node);
t = ggc_alloc_zone_stat (length, tree_zone PASS_MEM_STAT);
t = ggc_alloc_zone_stat (length, &tree_zone PASS_MEM_STAT);
memcpy (t, node, length);
TREE_CHAIN (t) = 0;
......@@ -910,7 +913,7 @@ make_tree_binfo_stat (unsigned base_binfos MEM_STAT_DECL)
tree_node_sizes[(int) binfo_kind] += length;
#endif
t = ggc_alloc_zone_stat (length, tree_zone PASS_MEM_STAT);
t = ggc_alloc_zone_stat (length, &tree_zone PASS_MEM_STAT);
memset (t, 0, offsetof (struct tree_binfo, base_binfos));
......@@ -935,7 +938,7 @@ make_tree_vec_stat (int len MEM_STAT_DECL)
tree_node_sizes[(int) vec_kind] += length;
#endif
t = ggc_alloc_zone_stat (length, tree_zone PASS_MEM_STAT);
t = ggc_alloc_zone_stat (length, &tree_zone PASS_MEM_STAT);
memset (t, 0, length);
......@@ -1408,7 +1411,7 @@ tree_cons_stat (tree purpose, tree value, tree chain MEM_STAT_DECL)
tree node;
node = ggc_alloc_zone_stat (sizeof (struct tree_list),
tree_zone PASS_MEM_STAT);
&tree_zone PASS_MEM_STAT);
memset (node, 0, sizeof (struct tree_common));
......@@ -2502,7 +2505,7 @@ build1_stat (enum tree_code code, tree type, tree node MEM_STAT_DECL)
gcc_assert (TREE_CODE_LENGTH (code) == 1);
t = ggc_alloc_zone_stat (length, tree_zone PASS_MEM_STAT);
t = ggc_alloc_zone_stat (length, &tree_zone PASS_MEM_STAT);
memset (t, 0, sizeof (struct tree_common));
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment