ggc-page.c 66.5 KB
Newer Older
Alex Samuel committed
1
/* "Bag-of-pages" garbage collector for the GNU compiler.
2 3
   Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2007, 2008, 2009,
   2010 Free Software Foundation, Inc.
Alex Samuel committed
4

5
This file is part of GCC.
Alex Samuel committed
6

7 8
GCC is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
9
Software Foundation; either version 3, or (at your option) any later
10
version.
Alex Samuel committed
11

12 13 14 15
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
for more details.
Alex Samuel committed
16

17
You should have received a copy of the GNU General Public License
18 19
along with GCC; see the file COPYING3.  If not see
<http://www.gnu.org/licenses/>.  */
Alex Samuel committed
20 21 22

#include "config.h"
#include "system.h"
23 24
#include "coretypes.h"
#include "tm.h"
Alex Samuel committed
25
#include "tree.h"
26
#include "rtl.h"
27
#include "tm_p.h"
28
#include "toplev.h"
Alex Samuel committed
29
#include "flags.h"
30
#include "ggc.h"
31
#include "ggc-internal.h"
32
#include "timevar.h"
33
#include "params.h"
34
#include "tree-flow.h"
35
#include "cfgloop.h"
36
#include "plugin.h"
37

38 39 40 41 42 43 44 45 46 47 48 49 50 51
/* Prefer MAP_ANON(YMOUS) to /dev/zero, since we don't need to keep a
   file open.  Prefer either to valloc.  */
#ifdef HAVE_MMAP_ANON
# undef HAVE_MMAP_DEV_ZERO

# include <sys/mman.h>
# ifndef MAP_FAILED
#  define MAP_FAILED -1
# endif
# if !defined (MAP_ANONYMOUS) && defined (MAP_ANON)
#  define MAP_ANONYMOUS MAP_ANON
# endif
# define USING_MMAP

52
#endif
Alex Samuel committed
53

54 55 56 57 58 59 60 61
#ifdef HAVE_MMAP_DEV_ZERO

# include <sys/mman.h>
# ifndef MAP_FAILED
#  define MAP_FAILED -1
# endif
# define USING_MMAP

62 63
#endif

64 65
#ifndef USING_MMAP
#define USING_MALLOC_PAGE_GROUPS
66
#endif
Alex Samuel committed
67

68
/* Strategy:
Alex Samuel committed
69 70 71 72 73 74 75 76 77 78 79 80 81 82

   This garbage-collecting allocator allocates objects on one of a set
   of pages.  Each page can allocate objects of a single size only;
   available sizes are powers of two starting at four bytes.  The size
   of an allocation request is rounded up to the next power of two
   (`order'), and satisfied from the appropriate page.

   Each page is recorded in a page-entry, which also maintains an
   in-use bitmap of object positions on the page.  This allows the
   allocation state of a particular object to be flipped without
   touching the page itself.

   Each page-entry also has a context depth, which is used to track
   pushing and popping of allocation contexts.  Only objects allocated
83
   in the current (highest-numbered) context may be collected.
Alex Samuel committed
84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101

   Page entries are arranged in an array of singly-linked lists.  The
   array is indexed by the allocation size, in bits, of the pages on
   it; i.e. all pages on a list allocate objects of the same size.
   Pages are ordered on the list such that all non-full pages precede
   all full pages, with non-full pages arranged in order of decreasing
   context depth.

   Empty pages (of all orders) are kept on a single page cache list,
   and are considered first when new pages are required; they are
   deallocated at the start of the next collection if they haven't
   been recycled by then.  */

/* Define GGC_DEBUG_LEVEL to print debugging information.
     0: No debugging output.
     1: GC statistics only.
     2: Page-entry allocations/deallocations as well.
     3: Object allocations as well.
Kazu Hirata committed
102
     4: Object marks as well.  */
Alex Samuel committed
103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124
#define GGC_DEBUG_LEVEL (0)

#ifndef HOST_BITS_PER_PTR
#define HOST_BITS_PER_PTR  HOST_BITS_PER_LONG
#endif


/* A two-level tree is used to look up the page-entry for a given
   pointer.  Two chunks of the pointer's bits are extracted to index
   the first and second levels of the tree, as follows:

				   HOST_PAGE_SIZE_BITS
			   32		|      |
       msb +----------------+----+------+------+ lsb
			    |    |      |
			 PAGE_L1_BITS   |
				 |      |
			       PAGE_L2_BITS

   The bottommost HOST_PAGE_SIZE_BITS are ignored, since page-entry
   pages are aligned on system page boundaries.  The next most
   significant PAGE_L2_BITS and PAGE_L1_BITS are the second and first
125
   index values in the lookup table, respectively.
Alex Samuel committed
126

127 128 129 130
   For 32-bit architectures and the settings below, there are no
   leftover bits.  For architectures with wider pointers, the lookup
   tree points to a list of pages, which must be scanned to find the
   correct one.  */
Alex Samuel committed
131 132 133 134 135 136 137 138 139 140 141 142

#define PAGE_L1_BITS	(8)
#define PAGE_L2_BITS	(32 - PAGE_L1_BITS - G.lg_pagesize)
#define PAGE_L1_SIZE	((size_t) 1 << PAGE_L1_BITS)
#define PAGE_L2_SIZE	((size_t) 1 << PAGE_L2_BITS)

#define LOOKUP_L1(p) \
  (((size_t) (p) >> (32 - PAGE_L1_BITS)) & ((1 << PAGE_L1_BITS) - 1))

#define LOOKUP_L2(p) \
  (((size_t) (p) >> G.lg_pagesize) & ((1 << PAGE_L2_BITS) - 1))

143 144 145 146
/* The number of objects per allocation page, for objects on a page of
   the indicated ORDER.  */
#define OBJECTS_PER_PAGE(ORDER) objects_per_page_table[ORDER]

147 148 149
/* The number of objects in P.  */
#define OBJECTS_IN_PAGE(P) ((P)->bytes / OBJECT_SIZE ((P)->order))

150 151 152
/* The size of an object on a page of the indicated ORDER.  */
#define OBJECT_SIZE(ORDER) object_size_table[ORDER]

153 154 155 156 157 158 159 160 161
/* For speed, we avoid doing a general integer divide to locate the
   offset in the allocation bitmap, by precalculating numbers M, S
   such that (O * M) >> S == O / Z (modulo 2^32), for any offset O
   within the page which is evenly divisible by the object size Z.  */
#define DIV_MULT(ORDER) inverse_table[ORDER].mult
#define DIV_SHIFT(ORDER) inverse_table[ORDER].shift
#define OFFSET_TO_BIT(OFFSET, ORDER) \
  (((OFFSET) * DIV_MULT (ORDER)) >> DIV_SHIFT (ORDER))

162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179
/* We use this structure to determine the alignment required for
   allocations.  For power-of-two sized allocations, that's not a
   problem, but it does matter for odd-sized allocations.
   We do not care about alignment for floating-point types.  */

struct max_alignment {
  char c;
  union {
    HOST_WIDEST_INT i;
    void *p;
  } u;
};

/* The biggest alignment required.  */

#define MAX_ALIGNMENT (offsetof (struct max_alignment, u))


180 181 182
/* The number of extra orders, not corresponding to power-of-two sized
   objects.  */

183
#define NUM_EXTRA_ORDERS ARRAY_SIZE (extra_order_size_table)
184

185
#define RTL_SIZE(NSLOTS) \
186
  (RTX_HDR_SIZE + (NSLOTS) * sizeof (rtunion))
187

188 189 190
#define TREE_EXP_SIZE(OPS) \
  (sizeof (struct tree_exp) + ((OPS) - 1) * sizeof (tree))

191 192 193 194 195
/* The Ith entry is the maximum size of an object to be stored in the
   Ith extra order.  Adding a new entry to this array is the *only*
   thing you need to do to add a new special allocation size.  */

static const size_t extra_order_size_table[] = {
196 197 198 199 200 201 202 203 204 205 206 207 208 209
  /* Extra orders for small non-power-of-two multiples of MAX_ALIGNMENT.
     There are a lot of structures with these sizes and explicitly
     listing them risks orders being dropped because they changed size.  */
  MAX_ALIGNMENT * 3,
  MAX_ALIGNMENT * 5,
  MAX_ALIGNMENT * 6,
  MAX_ALIGNMENT * 7,
  MAX_ALIGNMENT * 9,
  MAX_ALIGNMENT * 10,
  MAX_ALIGNMENT * 11,
  MAX_ALIGNMENT * 12,
  MAX_ALIGNMENT * 13,
  MAX_ALIGNMENT * 14,
  MAX_ALIGNMENT * 15,
210 211 212 213
  sizeof (struct tree_decl_non_common),
  sizeof (struct tree_field_decl),
  sizeof (struct tree_parm_decl),
  sizeof (struct tree_var_decl),
214
  sizeof (struct tree_type),
215 216
  sizeof (struct function),
  sizeof (struct basic_block_def),
217 218
  sizeof (struct cgraph_node),
  sizeof (struct loop),
219 220 221 222 223 224
};

/* The total number of orders.  */

#define NUM_ORDERS (HOST_BITS_PER_PTR + NUM_EXTRA_ORDERS)

225 226 227 228 229 230 231 232 233
/* Compute the smallest nonnegative number which when added to X gives
   a multiple of F.  */

#define ROUND_UP_VALUE(x, f) ((f) - 1 - ((f) - 1 + (x)) % (f))

/* Compute the smallest multiple of F that is >= X.  */

#define ROUND_UP(x, f) (CEIL (x, f) * (f))

234 235 236 237 238 239 240
/* The Ith entry is the number of objects on a page or order I.  */

static unsigned objects_per_page_table[NUM_ORDERS];

/* The Ith entry is the size of an object on a page of order I.  */

static size_t object_size_table[NUM_ORDERS];
Alex Samuel committed
241

242 243 244 245 246 247
/* The Ith entry is a pair of numbers (mult, shift) such that
   ((k * mult) >> shift) mod 2^32 == (k / OBJECT_SIZE(I)) mod 2^32,
   for all k evenly divisible by OBJECT_SIZE(I).  */

static struct
{
248
  size_t mult;
249 250 251 252
  unsigned int shift;
}
inverse_table[NUM_ORDERS];

Alex Samuel committed
253 254
/* A page_entry records the status of an allocation page.  This
   structure is dynamically sized to fit the bitmap in_use_p.  */
255
typedef struct page_entry
Alex Samuel committed
256 257 258 259 260
{
  /* The next page-entry with objects of the same size, or NULL if
     this is the last page-entry.  */
  struct page_entry *next;

261 262
  /* The previous page-entry with objects of the same size, or NULL if
     this is the first page-entry.   The PREV pointer exists solely to
263
     keep the cost of ggc_free manageable.  */
264 265
  struct page_entry *prev;

Alex Samuel committed
266 267 268 269 270 271 272
  /* The number of bytes allocated.  (This will always be a multiple
     of the host system page size.)  */
  size_t bytes;

  /* The address at which the memory is allocated.  */
  char *page;

273 274 275 276 277
#ifdef USING_MALLOC_PAGE_GROUPS
  /* Back pointer to the page group this page came from.  */
  struct page_group *group;
#endif

278 279 280
  /* This is the index in the by_depth varray where this page table
     can be found.  */
  unsigned long index_by_depth;
Alex Samuel committed
281 282

  /* Context depth of this page.  */
283
  unsigned short context_depth;
Alex Samuel committed
284 285 286 287 288 289 290 291

  /* The number of free objects remaining on this page.  */
  unsigned short num_free_objects;

  /* A likely candidate for the bit position of a free object for the
     next allocation from this page.  */
  unsigned short next_bit_hint;

292 293 294
  /* The lg of size of objects allocated from this page.  */
  unsigned char order;

Alex Samuel committed
295 296 297 298 299 300
  /* A bit vector indicating whether or not objects are in use.  The
     Nth bit is one if the Nth object on this page is allocated.  This
     array is dynamically sized.  */
  unsigned long in_use_p[1];
} page_entry;

301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318
#ifdef USING_MALLOC_PAGE_GROUPS
/* A page_group describes a large allocation from malloc, from which
   we parcel out aligned pages.  */
typedef struct page_group
{
  /* A linked list of all extant page groups.  */
  struct page_group *next;

  /* The address we received from malloc.  */
  char *allocation;

  /* The size of the block.  */
  size_t alloc_size;

  /* A bitmask of pages in use.  */
  unsigned int in_use;
} page_group;
#endif
Alex Samuel committed
319 320 321 322 323 324 325 326

#if HOST_BITS_PER_PTR <= 32

/* On 32-bit hosts, we use a two level page table, as pictured above.  */
typedef page_entry **page_table[PAGE_L1_SIZE];

#else

327 328
/* On 64-bit hosts, we use the same two level page tables plus a linked
   list that disambiguates the top 32-bits.  There will almost always be
Alex Samuel committed
329 330 331 332 333 334 335 336 337 338
   exactly one entry in the list.  */
typedef struct page_table_chain
{
  struct page_table_chain *next;
  size_t high_bits;
  page_entry **table[PAGE_L1_SIZE];
} *page_table;

#endif

339 340 341 342 343 344 345 346 347 348
#ifdef ENABLE_GC_ALWAYS_COLLECT
/* List of free objects to be verified as actually free on the
   next collection.  */
struct free_object
{
  void *object;
  struct free_object *next;
};
#endif

Alex Samuel committed
349 350 351 352 353 354 355
/* The rest of the global variables.  */
static struct globals
{
  /* The Nth element in this array is a page with objects of size 2^N.
     If there are any pages with free objects, they will be at the
     head of the list.  NULL if there are no page-entries for this
     object size.  */
356
  page_entry *pages[NUM_ORDERS];
Alex Samuel committed
357 358 359 360

  /* The Nth element in this array is the last page with objects of
     size 2^N.  NULL if there are no page-entries for this object
     size.  */
361
  page_entry *page_tails[NUM_ORDERS];
Alex Samuel committed
362 363 364 365 366 367 368 369 370 371 372 373 374 375

  /* Lookup table for associating allocation pages with object addresses.  */
  page_table lookup;

  /* The system's page size.  */
  size_t pagesize;
  size_t lg_pagesize;

  /* Bytes currently allocated.  */
  size_t allocated;

  /* Bytes currently allocated at the end of the last collection.  */
  size_t allocated_last_gc;

376 377 378
  /* Total amount of memory mapped.  */
  size_t bytes_mapped;

379 380 381 382 383 384
  /* Bit N set if any allocations have been done at context depth N.  */
  unsigned long context_depth_allocations;

  /* Bit N set if any collections have been done at context depth N.  */
  unsigned long context_depth_collections;

Alex Samuel committed
385
  /* The current depth in the context stack.  */
386
  unsigned short context_depth;
Alex Samuel committed
387 388

  /* A file descriptor open to /dev/zero for reading.  */
389
#if defined (HAVE_MMAP_DEV_ZERO)
Alex Samuel committed
390 391 392 393 394 395
  int dev_zero_fd;
#endif

  /* A cache of free system pages.  */
  page_entry *free_pages;

396 397 398 399
#ifdef USING_MALLOC_PAGE_GROUPS
  page_group *page_groups;
#endif

Alex Samuel committed
400 401
  /* The file descriptor for debugging output.  */
  FILE *debug_file;
402 403 404 405 406 407 408

  /* Current number of elements in use in depth below.  */
  unsigned int depth_in_use;

  /* Maximum number of elements that can be used before resizing.  */
  unsigned int depth_max;

409
  /* Each element of this array is an index in by_depth where the given
410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430
     depth starts.  This structure is indexed by that given depth we
     are interested in.  */
  unsigned int *depth;

  /* Current number of elements in use in by_depth below.  */
  unsigned int by_depth_in_use;

  /* Maximum number of elements that can be used before resizing.  */
  unsigned int by_depth_max;

  /* Each element of this array is a pointer to a page_entry, all
     page_entries can be found in here by increasing depth.
     index_by_depth in the page_entry is the index into this data
     structure where that page_entry can be found.  This is used to
     speed up finding all page_entries at a particular depth.  */
  page_entry **by_depth;

  /* Each element is a pointer to the saved in_use_p bits, if any,
     zero otherwise.  We allocate them all together, to enable a
     better runtime data access pattern.  */
  unsigned long **save_in_use;
431 432 433 434

#ifdef ENABLE_GC_ALWAYS_COLLECT
  /* List of free objects to be verified as actually free on the
     next collection.  */
435
  struct free_object *free_object_list;
436 437
#endif

438 439 440
#ifdef GATHER_STATISTICS
  struct
  {
441
    /* Total GC-allocated memory.  */
442
    unsigned long long total_allocated;
443
    /* Total overhead for GC-allocated memory.  */
444 445 446 447
    unsigned long long total_overhead;

    /* Total allocations and overhead for sizes less than 32, 64 and 128.
       These sizes are interesting because they are typical cache line
448
       sizes.  */
H.J. Lu committed
449

450 451
    unsigned long long total_allocated_under32;
    unsigned long long total_overhead_under32;
H.J. Lu committed
452

453 454
    unsigned long long total_allocated_under64;
    unsigned long long total_overhead_under64;
H.J. Lu committed
455

456 457
    unsigned long long total_allocated_under128;
    unsigned long long total_overhead_under128;
H.J. Lu committed
458

459 460 461
    /* The allocations for each of the allocation orders.  */
    unsigned long long total_allocated_per_order[NUM_ORDERS];

462
    /* The overhead for each of the allocation orders.  */
463 464 465
    unsigned long long total_overhead_per_order[NUM_ORDERS];
  } stats;
#endif
Alex Samuel committed
466 467 468 469 470
} G;

/* The size in bytes required to maintain a bitmap for the objects
   on a page-entry.  */
#define BITMAP_SIZE(Num_objects) \
471
  (CEIL ((Num_objects), HOST_BITS_PER_LONG) * sizeof(long))
Alex Samuel committed
472

473 474 475
/* Allocate pages in chunks of this size, to throttle calls to memory
   allocation routines.  The first page is used, the rest go onto the
   free list.  This cannot be larger than HOST_BITS_PER_INT for the
476
   in_use bitmask for page_group.  Hosts that need a different value
477
   can override this by defining GGC_QUIRE_SIZE explicitly.  */
478 479 480 481 482 483 484
#ifndef GGC_QUIRE_SIZE
# ifdef USING_MMAP
#  define GGC_QUIRE_SIZE 256
# else
#  define GGC_QUIRE_SIZE 16
# endif
#endif
485 486 487

/* Initial guess as to how many page table entries we might need.  */
#define INITIAL_PTE_COUNT 128
Alex Samuel committed
488

489 490 491
static int ggc_allocated_p (const void *);
static page_entry *lookup_page_table_entry (const void *);
static void set_page_table_entry (void *, page_entry *);
492
#ifdef USING_MMAP
493
static char *alloc_anon (char *, size_t);
494 495
#endif
#ifdef USING_MALLOC_PAGE_GROUPS
496 497 498
static size_t page_group_index (char *, char *);
static void set_page_group_in_use (page_group *, char *);
static void clear_page_group_in_use (page_group *, char *);
499
#endif
500 501 502 503 504 505 506 507 508
static struct page_entry * alloc_page (unsigned);
static void free_page (struct page_entry *);
static void release_pages (void);
static void clear_marks (void);
static void sweep_pages (void);
static void ggc_recalculate_in_use_p (page_entry *);
static void compute_inverse (unsigned);
static inline void adjust_depth (void);
static void move_ptes_to_front (int, int);
Alex Samuel committed
509

510 511 512
void debug_print_page_list (int);
static void push_depth (unsigned int);
static void push_by_depth (page_entry *, unsigned long *);
513

514 515 516
/* Push an entry onto G.depth.  */

inline static void
517
push_depth (unsigned int i)
518 519 520 521
{
  if (G.depth_in_use >= G.depth_max)
    {
      G.depth_max *= 2;
522
      G.depth = XRESIZEVEC (unsigned int, G.depth, G.depth_max);
523 524 525 526 527 528 529
    }
  G.depth[G.depth_in_use++] = i;
}

/* Push an entry onto G.by_depth and G.save_in_use.  */

inline static void
530
push_by_depth (page_entry *p, unsigned long *s)
531 532 533 534
{
  if (G.by_depth_in_use >= G.by_depth_max)
    {
      G.by_depth_max *= 2;
535 536 537
      G.by_depth = XRESIZEVEC (page_entry *, G.by_depth, G.by_depth_max);
      G.save_in_use = XRESIZEVEC (unsigned long *, G.save_in_use,
				  G.by_depth_max);
538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553
    }
  G.by_depth[G.by_depth_in_use] = p;
  G.save_in_use[G.by_depth_in_use++] = s;
}

#if (GCC_VERSION < 3001)
#define prefetch(X) ((void) X)
#else
#define prefetch(X) __builtin_prefetch (X)
#endif

#define save_in_use_p_i(__i) \
  (G.save_in_use[__i])
#define save_in_use_p(__p) \
  (save_in_use_p_i (__p->index_by_depth))

554
/* Returns nonzero if P was allocated in GC'able memory.  */
Alex Samuel committed
555

556
static inline int
557
ggc_allocated_p (const void *p)
Alex Samuel committed
558 559
{
  page_entry ***base;
560
  size_t L1, L2;
Alex Samuel committed
561 562 563 564 565 566

#if HOST_BITS_PER_PTR <= 32
  base = &G.lookup[0];
#else
  page_table table = G.lookup;
  size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
567 568 569 570 571 572 573 574
  while (1)
    {
      if (table == NULL)
	return 0;
      if (table->high_bits == high_bits)
	break;
      table = table->next;
    }
Alex Samuel committed
575 576 577
  base = &table->table[0];
#endif

578
  /* Extract the level 1 and 2 indices.  */
579 580 581 582 583 584
  L1 = LOOKUP_L1 (p);
  L2 = LOOKUP_L2 (p);

  return base[L1] && base[L1][L2];
}

585
/* Traverse the page table and find the entry for a page.
586 587 588
   Die (probably) if the object wasn't allocated via GC.  */

static inline page_entry *
589
lookup_page_table_entry (const void *p)
590 591 592 593
{
  page_entry ***base;
  size_t L1, L2;

594 595 596 597 598 599 600 601 602
#if HOST_BITS_PER_PTR <= 32
  base = &G.lookup[0];
#else
  page_table table = G.lookup;
  size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
  while (table->high_bits != high_bits)
    table = table->next;
  base = &table->table[0];
#endif
603

604
  /* Extract the level 1 and 2 indices.  */
Alex Samuel committed
605 606 607 608 609 610 611
  L1 = LOOKUP_L1 (p);
  L2 = LOOKUP_L2 (p);

  return base[L1][L2];
}

/* Set the page table entry for a page.  */
612

Alex Samuel committed
613
static void
614
set_page_table_entry (void *p, page_entry *entry)
Alex Samuel committed
615 616 617 618 619 620 621 622 623 624 625 626 627 628
{
  page_entry ***base;
  size_t L1, L2;

#if HOST_BITS_PER_PTR <= 32
  base = &G.lookup[0];
#else
  page_table table;
  size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
  for (table = G.lookup; table; table = table->next)
    if (table->high_bits == high_bits)
      goto found;

  /* Not found -- allocate a new table.  */
629
  table = XCNEW (struct page_table_chain);
Alex Samuel committed
630 631 632 633 634 635 636
  table->next = G.lookup;
  table->high_bits = high_bits;
  G.lookup = table;
found:
  base = &table->table[0];
#endif

637
  /* Extract the level 1 and 2 indices.  */
Alex Samuel committed
638 639 640 641
  L1 = LOOKUP_L1 (p);
  L2 = LOOKUP_L2 (p);

  if (base[L1] == NULL)
642
    base[L1] = XCNEWVEC (page_entry *, PAGE_L2_SIZE);
Alex Samuel committed
643 644 645 646 647

  base[L1][L2] = entry;
}

/* Prints the page-entry for object size ORDER, for debugging.  */
648

649
DEBUG_FUNCTION void
650
debug_print_page_list (int order)
Alex Samuel committed
651 652
{
  page_entry *p;
653 654
  printf ("Head=%p, Tail=%p:\n", (void *) G.pages[order],
	  (void *) G.page_tails[order]);
Alex Samuel committed
655 656 657
  p = G.pages[order];
  while (p != NULL)
    {
658
      printf ("%p(%1d|%3d) -> ", (void *) p, p->context_depth,
659
	      p->num_free_objects);
Alex Samuel committed
660 661 662 663 664 665
      p = p->next;
    }
  printf ("NULL\n");
  fflush (stdout);
}

666
#ifdef USING_MMAP
Alex Samuel committed
667
/* Allocate SIZE bytes of anonymous memory, preferably near PREF,
668 669
   (if non-null).  The ifdef structure here is intended to cause a
   compile error unless exactly one of the HAVE_* is defined.  */
670

Alex Samuel committed
671
static inline char *
672
alloc_anon (char *pref ATTRIBUTE_UNUSED, size_t size)
Alex Samuel committed
673
{
674
#ifdef HAVE_MMAP_ANON
675 676
  char *page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE,
			      MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
677 678
#endif
#ifdef HAVE_MMAP_DEV_ZERO
679 680
  char *page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE,
			      MAP_PRIVATE, G.dev_zero_fd, 0);
Alex Samuel committed
681
#endif
682 683

  if (page == (char *) MAP_FAILED)
684
    {
685
      perror ("virtual memory exhausted");
686
      exit (FATAL_EXIT_CODE);
687
    }
Alex Samuel committed
688

689 690 691
  /* Remember that we allocated this memory.  */
  G.bytes_mapped += size;

692
  /* Pretend we don't have access to the allocated pages.  We'll enable
693
     access to smaller pieces of the area in ggc_internal_alloc.  Discard the
694
     handle to avoid handle leak.  */
695
  VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (page, size));
696

Alex Samuel committed
697 698
  return page;
}
699 700 701 702 703
#endif
#ifdef USING_MALLOC_PAGE_GROUPS
/* Compute the index for this page into the page group.  */

static inline size_t
704
page_group_index (char *allocation, char *page)
705
{
Kazu Hirata committed
706
  return (size_t) (page - allocation) >> G.lg_pagesize;
707 708 709 710 711
}

/* Set and clear the in_use bit for this page in the page group.  */

static inline void
712
set_page_group_in_use (page_group *group, char *page)
713 714 715 716 717
{
  group->in_use |= 1 << page_group_index (group->allocation, page);
}

static inline void
718
clear_page_group_in_use (page_group *group, char *page)
719 720 721 722
{
  group->in_use &= ~(1 << page_group_index (group->allocation, page));
}
#endif
Alex Samuel committed
723 724 725 726

/* Allocate a new page for allocating objects of size 2^ORDER,
   and return an entry for it.  The entry is not added to the
   appropriate page_table list.  */
727

Alex Samuel committed
728
static inline struct page_entry *
729
alloc_page (unsigned order)
Alex Samuel committed
730 731 732 733 734 735 736
{
  struct page_entry *entry, *p, **pp;
  char *page;
  size_t num_objects;
  size_t bitmap_size;
  size_t page_entry_size;
  size_t entry_size;
737 738 739
#ifdef USING_MALLOC_PAGE_GROUPS
  page_group *group;
#endif
Alex Samuel committed
740 741 742 743

  num_objects = OBJECTS_PER_PAGE (order);
  bitmap_size = BITMAP_SIZE (num_objects + 1);
  page_entry_size = sizeof (page_entry) - sizeof (long) + bitmap_size;
744
  entry_size = num_objects * OBJECT_SIZE (order);
745 746
  if (entry_size < G.pagesize)
    entry_size = G.pagesize;
Alex Samuel committed
747 748 749 750 751

  entry = NULL;
  page = NULL;

  /* Check the list of free pages for one we can use.  */
752
  for (pp = &G.free_pages, p = *pp; p; pp = &p->next, p = *pp)
Alex Samuel committed
753 754 755 756 757
    if (p->bytes == entry_size)
      break;

  if (p != NULL)
    {
758
      /* Recycle the allocated memory from this page ...  */
Alex Samuel committed
759 760
      *pp = p->next;
      page = p->page;
761

762 763 764
#ifdef USING_MALLOC_PAGE_GROUPS
      group = p->group;
#endif
765

Alex Samuel committed
766 767 768 769 770 771 772 773 774
      /* ... and, if possible, the page entry itself.  */
      if (p->order == order)
	{
	  entry = p;
	  memset (entry, 0, page_entry_size);
	}
      else
	free (p);
    }
775
#ifdef USING_MMAP
776
  else if (entry_size == G.pagesize)
Alex Samuel committed
777
    {
778 779 780 781 782 783
      /* We want just one page.  Allocate a bunch of them and put the
	 extras on the freelist.  (Can only do this optimization with
	 mmap for backing store.)  */
      struct page_entry *e, *f = G.free_pages;
      int i;

784
      page = alloc_anon (NULL, G.pagesize * GGC_QUIRE_SIZE);
785

786 787 788 789
      /* This loop counts down so that the chain will be in ascending
	 memory order.  */
      for (i = GGC_QUIRE_SIZE - 1; i >= 1; i--)
	{
790
	  e = XCNEWVAR (struct page_entry, page_entry_size);
791 792 793
	  e->order = order;
	  e->bytes = G.pagesize;
	  e->page = page + (i << G.lg_pagesize);
794 795 796
	  e->next = f;
	  f = e;
	}
797

798
      G.free_pages = f;
Alex Samuel committed
799
    }
800 801
  else
    page = alloc_anon (NULL, entry_size);
802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817
#endif
#ifdef USING_MALLOC_PAGE_GROUPS
  else
    {
      /* Allocate a large block of memory and serve out the aligned
	 pages therein.  This results in much less memory wastage
	 than the traditional implementation of valloc.  */

      char *allocation, *a, *enda;
      size_t alloc_size, head_slop, tail_slop;
      int multiple_pages = (entry_size == G.pagesize);

      if (multiple_pages)
	alloc_size = GGC_QUIRE_SIZE * G.pagesize;
      else
	alloc_size = entry_size + G.pagesize - 1;
818
      allocation = XNEWVEC (char, alloc_size);
819

Kazu Hirata committed
820
      page = (char *) (((size_t) allocation + G.pagesize - 1) & -G.pagesize);
821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841
      head_slop = page - allocation;
      if (multiple_pages)
	tail_slop = ((size_t) allocation + alloc_size) & (G.pagesize - 1);
      else
	tail_slop = alloc_size - entry_size - head_slop;
      enda = allocation + alloc_size - tail_slop;

      /* We allocated N pages, which are likely not aligned, leaving
	 us with N-1 usable pages.  We plan to place the page_group
	 structure somewhere in the slop.  */
      if (head_slop >= sizeof (page_group))
	group = (page_group *)page - 1;
      else
	{
	  /* We magically got an aligned allocation.  Too bad, we have
	     to waste a page anyway.  */
	  if (tail_slop == 0)
	    {
	      enda -= G.pagesize;
	      tail_slop += G.pagesize;
	    }
842
	  gcc_assert (tail_slop >= sizeof (page_group));
843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860
	  group = (page_group *)enda;
	  tail_slop -= sizeof (page_group);
	}

      /* Remember that we allocated this memory.  */
      group->next = G.page_groups;
      group->allocation = allocation;
      group->alloc_size = alloc_size;
      group->in_use = 0;
      G.page_groups = group;
      G.bytes_mapped += alloc_size;

      /* If we allocated multiple pages, put the rest on the free list.  */
      if (multiple_pages)
	{
	  struct page_entry *e, *f = G.free_pages;
	  for (a = enda - G.pagesize; a != page; a -= G.pagesize)
	    {
861
	      e = XCNEWVAR (struct page_entry, page_entry_size);
862 863 864 865 866 867 868 869 870 871 872
	      e->order = order;
	      e->bytes = G.pagesize;
	      e->page = a;
	      e->group = group;
	      e->next = f;
	      f = e;
	    }
	  G.free_pages = f;
	}
    }
#endif
Alex Samuel committed
873 874

  if (entry == NULL)
875
    entry = XCNEWVAR (struct page_entry, page_entry_size);
Alex Samuel committed
876 877 878 879 880 881 882 883

  entry->bytes = entry_size;
  entry->page = page;
  entry->context_depth = G.context_depth;
  entry->order = order;
  entry->num_free_objects = num_objects;
  entry->next_bit_hint = 1;

884 885
  G.context_depth_allocations |= (unsigned long)1 << G.context_depth;

886 887 888 889 890
#ifdef USING_MALLOC_PAGE_GROUPS
  entry->group = group;
  set_page_group_in_use (group, page);
#endif

Alex Samuel committed
891 892 893 894 895 896 897 898
  /* Set the one-past-the-end in-use bit.  This acts as a sentry as we
     increment the hint.  */
  entry->in_use_p[num_objects / HOST_BITS_PER_LONG]
    = (unsigned long) 1 << (num_objects % HOST_BITS_PER_LONG);

  set_page_table_entry (page, entry);

  if (GGC_DEBUG_LEVEL >= 2)
899
    fprintf (G.debug_file,
900
	     "Allocating page at %p, object size=%lu, data %p-%p\n",
901
	     (void *) entry, (unsigned long) OBJECT_SIZE (order), page,
902
	     page + entry_size - 1);
Alex Samuel committed
903 904 905 906

  return entry;
}

907 908 909 910
/* Adjust the size of G.depth so that no index greater than the one
   used by the top of the G.by_depth is used.  */

static inline void
911
adjust_depth (void)
912 913 914 915 916 917 918
{
  page_entry *top;

  if (G.by_depth_in_use)
    {
      top = G.by_depth[G.by_depth_in_use-1];

919 920
      /* Peel back indices in depth that index into by_depth, so that
	 as new elements are added to by_depth, we note the indices
921 922 923 924 925 926
	 of those elements, if they are for new context depths.  */
      while (G.depth_in_use > (size_t)top->context_depth+1)
	--G.depth_in_use;
    }
}

927
/* For a page that is no longer needed, put it on the free page list.  */
Alex Samuel committed
928

929
static void
930
free_page (page_entry *entry)
Alex Samuel committed
931 932
{
  if (GGC_DEBUG_LEVEL >= 2)
933
    fprintf (G.debug_file,
934
	     "Deallocating page at %p, data %p-%p\n", (void *) entry,
Alex Samuel committed
935 936
	     entry->page, entry->page + entry->bytes - 1);

937 938
  /* Mark the page as inaccessible.  Discard the handle to avoid handle
     leak.  */
939
  VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (entry->page, entry->bytes));
940

Alex Samuel committed
941 942
  set_page_table_entry (entry->page, NULL);

943 944 945 946
#ifdef USING_MALLOC_PAGE_GROUPS
  clear_page_group_in_use (entry->group, entry->page);
#endif

947 948 949
  if (G.by_depth_in_use > 1)
    {
      page_entry *top = G.by_depth[G.by_depth_in_use-1];
950 951 952 953 954
      int i = entry->index_by_depth;

      /* We cannot free a page from a context deeper than the current
	 one.  */
      gcc_assert (entry->context_depth == top->context_depth);
H.J. Lu committed
955

956 957 958 959
      /* Put top element into freed slot.  */
      G.by_depth[i] = top;
      G.save_in_use[i] = G.save_in_use[G.by_depth_in_use-1];
      top->index_by_depth = i;
960 961 962 963 964
    }
  --G.by_depth_in_use;

  adjust_depth ();

Alex Samuel committed
965 966 967 968
  entry->next = G.free_pages;
  G.free_pages = entry;
}

969
/* Release the free page cache to the system.  */
Alex Samuel committed
970

971
static void
972
release_pages (void)
Alex Samuel committed
973
{
974
#ifdef USING_MMAP
975
  page_entry *p, *next;
Alex Samuel committed
976 977 978
  char *start;
  size_t len;

979
  /* Gather up adjacent pages so they are unmapped together.  */
Alex Samuel committed
980 981 982 983
  p = G.free_pages;

  while (p)
    {
984
      start = p->page;
Alex Samuel committed
985
      next = p->next;
986
      len = p->bytes;
Alex Samuel committed
987 988 989
      free (p);
      p = next;

990 991 992 993 994 995 996 997 998 999 1000
      while (p && p->page == start + len)
	{
	  next = p->next;
	  len += p->bytes;
	  free (p);
	  p = next;
	}

      munmap (start, len);
      G.bytes_mapped -= len;
    }
1001

Alex Samuel committed
1002
  G.free_pages = NULL;
1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024
#endif
#ifdef USING_MALLOC_PAGE_GROUPS
  page_entry **pp, *p;
  page_group **gp, *g;

  /* Remove all pages from free page groups from the list.  */
  pp = &G.free_pages;
  while ((p = *pp) != NULL)
    if (p->group->in_use == 0)
      {
	*pp = p->next;
	free (p);
      }
    else
      pp = &p->next;

  /* Remove all free page groups, and release the storage.  */
  gp = &G.page_groups;
  while ((g = *gp) != NULL)
    if (g->in_use == 0)
      {
	*gp = g->next;
1025
	G.bytes_mapped -= g->alloc_size;
1026 1027 1028 1029 1030
	free (g->allocation);
      }
    else
      gp = &g->next;
#endif
Alex Samuel committed
1031 1032 1033
}

/* This table provides a fast way to determine ceil(log_2(size)) for
1034
   allocation requests.  The minimum allocation size is eight bytes.  */
1035 1036
#define NUM_SIZE_LOOKUP 512
static unsigned char size_lookup[NUM_SIZE_LOOKUP] =
1037
{
1038 1039 1040 1041 1042 1043 1044
  3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4,
  4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
  5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
  6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
  6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
  7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
  7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
Alex Samuel committed
1045 1046 1047 1048 1049 1050 1051 1052 1053
  7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
  7, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
  8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
  8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
  8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
  8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
  8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
  8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
  8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068
  8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
  9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
  9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
  9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
  9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
  9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
  9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
  9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
  9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
  9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
  9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
  9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
  9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
  9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
  9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1069
  9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9
Alex Samuel committed
1070 1071
};

1072 1073 1074
/* Typed allocation function.  Does nothing special in this collector.  */

void *
1075 1076
ggc_alloc_typed_stat (enum gt_types_enum type ATTRIBUTE_UNUSED, size_t size
		      MEM_STAT_DECL)
1077
{
1078
  return ggc_internal_alloc_stat (size PASS_MEM_STAT);
1079 1080
}

1081
/* Allocate a chunk of memory of SIZE bytes.  Its contents are undefined.  */
1082

1083
void *
1084
ggc_internal_alloc_stat (size_t size MEM_STAT_DECL)
Alex Samuel committed
1085
{
1086
  size_t order, word, bit, object_offset, object_size;
Alex Samuel committed
1087 1088 1089
  struct page_entry *entry;
  void *result;

1090
  if (size < NUM_SIZE_LOOKUP)
1091 1092 1093 1094
    {
      order = size_lookup[size];
      object_size = OBJECT_SIZE (order);
    }
Alex Samuel committed
1095 1096
  else
    {
1097
      order = 10;
1098
      while (size > (object_size = OBJECT_SIZE (order)))
Alex Samuel committed
1099 1100 1101 1102 1103 1104 1105 1106 1107
	order++;
    }

  /* If there are non-full pages for this size allocation, they are at
     the head of the list.  */
  entry = G.pages[order];

  /* If there is no page for this object size, or all pages in this
     context are full, allocate a new page.  */
1108
  if (entry == NULL || entry->num_free_objects == 0)
Alex Samuel committed
1109 1110 1111
    {
      struct page_entry *new_entry;
      new_entry = alloc_page (order);
1112

1113 1114 1115 1116 1117 1118 1119 1120
      new_entry->index_by_depth = G.by_depth_in_use;
      push_by_depth (new_entry, 0);

      /* We can skip context depths, if we do, make sure we go all the
	 way to the new depth.  */
      while (new_entry->context_depth >= G.depth_in_use)
	push_depth (G.by_depth_in_use-1);

1121 1122 1123
      /* If this is the only entry, it's also the tail.  If it is not
	 the only entry, then we must update the PREV pointer of the
	 ENTRY (G.pages[order]) to point to our new page entry.  */
Alex Samuel committed
1124 1125
      if (entry == NULL)
	G.page_tails[order] = new_entry;
1126 1127
      else
	entry->prev = new_entry;
1128

1129 1130
      /* Put new pages at the head of the page list.  By definition the
	 entry at the head of the list always has a NULL pointer.  */
Alex Samuel committed
1131
      new_entry->next = entry;
1132
      new_entry->prev = NULL;
Alex Samuel committed
1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151
      entry = new_entry;
      G.pages[order] = new_entry;

      /* For a new page, we know the word and bit positions (in the
	 in_use bitmap) of the first available object -- they're zero.  */
      new_entry->next_bit_hint = 1;
      word = 0;
      bit = 0;
      object_offset = 0;
    }
  else
    {
      /* First try to use the hint left from the previous allocation
	 to locate a clear bit in the in-use bitmap.  We've made sure
	 that the one-past-the-end bit is always set, so if the hint
	 has run over, this test will fail.  */
      unsigned hint = entry->next_bit_hint;
      word = hint / HOST_BITS_PER_LONG;
      bit = hint % HOST_BITS_PER_LONG;
1152

Alex Samuel committed
1153 1154 1155 1156 1157 1158
      /* If the hint didn't work, scan the bitmap from the beginning.  */
      if ((entry->in_use_p[word] >> bit) & 1)
	{
	  word = bit = 0;
	  while (~entry->in_use_p[word] == 0)
	    ++word;
1159 1160 1161 1162

#if GCC_VERSION >= 3004
	  bit = __builtin_ctzl (~entry->in_use_p[word]);
#else
Alex Samuel committed
1163 1164
	  while ((entry->in_use_p[word] >> bit) & 1)
	    ++bit;
1165 1166
#endif

Alex Samuel committed
1167 1168 1169 1170 1171 1172
	  hint = word * HOST_BITS_PER_LONG + bit;
	}

      /* Next time, try the next bit.  */
      entry->next_bit_hint = hint + 1;

1173
      object_offset = hint * object_size;
Alex Samuel committed
1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186
    }

  /* Set the in-use bit.  */
  entry->in_use_p[word] |= ((unsigned long) 1 << bit);

  /* Keep a running total of the number of free objects.  If this page
     fills up, we may have to move it to the end of the list if the
     next page isn't full.  If the next page is full, all subsequent
     pages are full, so there's no need to move it.  */
  if (--entry->num_free_objects == 0
      && entry->next != NULL
      && entry->next->num_free_objects > 0)
    {
1187
      /* We have a new head for the list.  */
Alex Samuel committed
1188
      G.pages[order] = entry->next;
1189 1190 1191 1192 1193

      /* We are moving ENTRY to the end of the page table list.
	 The new page at the head of the list will have NULL in
	 its PREV field and ENTRY will have NULL in its NEXT field.  */
      entry->next->prev = NULL;
Alex Samuel committed
1194
      entry->next = NULL;
1195 1196 1197

      /* Append ENTRY to the tail of the list.  */
      entry->prev = G.page_tails[order];
Alex Samuel committed
1198 1199 1200 1201 1202 1203
      G.page_tails[order]->next = entry;
      G.page_tails[order] = entry;
    }

  /* Calculate the object's address.  */
  result = entry->page + object_offset;
1204 1205 1206 1207
#ifdef GATHER_STATISTICS
  ggc_record_overhead (OBJECT_SIZE (order), OBJECT_SIZE (order) - size,
		       result PASS_MEM_STAT);
#endif
Alex Samuel committed
1208

1209
#ifdef ENABLE_GC_CHECKING
1210 1211 1212 1213
  /* Keep poisoning-by-writing-0xaf the object, in an attempt to keep the
     exact same semantics in presence of memory bugs, regardless of
     ENABLE_VALGRIND_CHECKING.  We override this request below.  Drop the
     handle to avoid handle leak.  */
1214
  VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (result, object_size));
1215

1216 1217
  /* `Poison' the entire allocated object, including any padding at
     the end.  */
1218
  memset (result, 0xaf, object_size);
1219 1220 1221

  /* Make the bytes after the end of the object unaccessible.  Discard the
     handle to avoid handle leak.  */
1222 1223
  VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS ((char *) result + size,
						object_size - size));
Alex Samuel committed
1224
#endif
1225

1226 1227 1228
  /* Tell Valgrind that the memory is there, but its content isn't
     defined.  The bytes at the end of the object are still marked
     unaccessible.  */
1229
  VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (result, size));
1230

Alex Samuel committed
1231 1232
  /* Keep track of how many bytes are being allocated.  This
     information is used in deciding when to collect.  */
1233
  G.allocated += object_size;
Alex Samuel committed
1234

1235 1236 1237
  /* For timevar statistics.  */
  timevar_ggc_mem_total += object_size;

1238 1239
#ifdef GATHER_STATISTICS
  {
1240
    size_t overhead = object_size - size;
1241

1242 1243 1244 1245
    G.stats.total_overhead += overhead;
    G.stats.total_allocated += object_size;
    G.stats.total_overhead_per_order[order] += overhead;
    G.stats.total_allocated_per_order[order] += object_size;
1246

1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261
    if (size <= 32)
      {
	G.stats.total_overhead_under32 += overhead;
	G.stats.total_allocated_under32 += object_size;
      }
    if (size <= 64)
      {
	G.stats.total_overhead_under64 += overhead;
	G.stats.total_allocated_under64 += object_size;
      }
    if (size <= 128)
      {
	G.stats.total_overhead_under128 += overhead;
	G.stats.total_allocated_under128 += object_size;
      }
1262 1263
  }
#endif
1264

Alex Samuel committed
1265
  if (GGC_DEBUG_LEVEL >= 3)
1266
    fprintf (G.debug_file,
1267
	     "Allocating object, requested size=%lu, actual=%lu at %p on %p\n",
1268
	     (unsigned long) size, (unsigned long) object_size, result,
1269
	     (void *) entry);
Alex Samuel committed
1270 1271 1272 1273

  return result;
}

1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302
/* Mark function for strings.  */

void
gt_ggc_m_S (const void *p)
{
  page_entry *entry;
  unsigned bit, word;
  unsigned long mask;
  unsigned long offset;

  if (!p || !ggc_allocated_p (p))
    return;

  /* Look up the page on which the object is alloced.  .  */
  entry = lookup_page_table_entry (p);
  gcc_assert (entry);

  /* Calculate the index of the object on the page; this is its bit
     position in the in_use_p bitmap.  Note that because a char* might
     point to the middle of an object, we need special code here to
     make sure P points to the start of an object.  */
  offset = ((const char *) p - entry->page) % object_size_table[entry->order];
  if (offset)
    {
      /* Here we've seen a char* which does not point to the beginning
	 of an allocated object.  We assume it points to the middle of
	 a STRING_CST.  */
      gcc_assert (offset == offsetof (struct tree_string, str));
      p = ((const char *) p) - offset;
1303
      gt_ggc_mx_lang_tree_node (CONST_CAST (void *, p));
1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324
      return;
    }

  bit = OFFSET_TO_BIT (((const char *) p) - entry->page, entry->order);
  word = bit / HOST_BITS_PER_LONG;
  mask = (unsigned long) 1 << (bit % HOST_BITS_PER_LONG);

  /* If the bit was previously set, skip it.  */
  if (entry->in_use_p[word] & mask)
    return;

  /* Otherwise set it, and decrement the free object count.  */
  entry->in_use_p[word] |= mask;
  entry->num_free_objects -= 1;

  if (GGC_DEBUG_LEVEL >= 4)
    fprintf (G.debug_file, "Marking %p\n", p);

  return;
}

1325
/* If P is not marked, marks it and return false.  Otherwise return true.
Alex Samuel committed
1326 1327
   P must have been allocated by the GC allocator; it mustn't point to
   static objects, stack variables, or memory allocated with malloc.  */
1328

1329
int
1330
ggc_set_mark (const void *p)
Alex Samuel committed
1331 1332 1333 1334 1335 1336 1337
{
  page_entry *entry;
  unsigned bit, word;
  unsigned long mask;

  /* Look up the page on which the object is alloced.  If the object
     wasn't allocated by the collector, we'll probably die.  */
1338
  entry = lookup_page_table_entry (p);
1339
  gcc_assert (entry);
Alex Samuel committed
1340 1341 1342

  /* Calculate the index of the object on the page; this is its bit
     position in the in_use_p bitmap.  */
1343
  bit = OFFSET_TO_BIT (((const char *) p) - entry->page, entry->order);
Alex Samuel committed
1344 1345
  word = bit / HOST_BITS_PER_LONG;
  mask = (unsigned long) 1 << (bit % HOST_BITS_PER_LONG);
1346

1347
  /* If the bit was previously set, skip it.  */
Alex Samuel committed
1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360
  if (entry->in_use_p[word] & mask)
    return 1;

  /* Otherwise set it, and decrement the free object count.  */
  entry->in_use_p[word] |= mask;
  entry->num_free_objects -= 1;

  if (GGC_DEBUG_LEVEL >= 4)
    fprintf (G.debug_file, "Marking %p\n", p);

  return 0;
}

1361
/* Return 1 if P has been marked, zero otherwise.
1362 1363 1364 1365
   P must have been allocated by the GC allocator; it mustn't point to
   static objects, stack variables, or memory allocated with malloc.  */

int
1366
ggc_marked_p (const void *p)
1367 1368 1369 1370 1371 1372 1373 1374
{
  page_entry *entry;
  unsigned bit, word;
  unsigned long mask;

  /* Look up the page on which the object is alloced.  If the object
     wasn't allocated by the collector, we'll probably die.  */
  entry = lookup_page_table_entry (p);
1375
  gcc_assert (entry);
1376 1377 1378

  /* Calculate the index of the object on the page; this is its bit
     position in the in_use_p bitmap.  */
1379
  bit = OFFSET_TO_BIT (((const char *) p) - entry->page, entry->order);
1380 1381
  word = bit / HOST_BITS_PER_LONG;
  mask = (unsigned long) 1 << (bit % HOST_BITS_PER_LONG);
1382

1383
  return (entry->in_use_p[word] & mask) != 0;
1384 1385
}

1386 1387
/* Return the size of the gc-able object P.  */

1388
size_t
1389
ggc_get_size (const void *p)
1390 1391
{
  page_entry *pe = lookup_page_table_entry (p);
1392
  return OBJECT_SIZE (pe->order);
1393
}
1394 1395 1396 1397 1398 1399 1400 1401 1402 1403

/* Release the memory for object P.  */

void
ggc_free (void *p)
{
  page_entry *pe = lookup_page_table_entry (p);
  size_t order = pe->order;
  size_t size = OBJECT_SIZE (order);

1404 1405 1406 1407
#ifdef GATHER_STATISTICS
  ggc_free_overhead (p);
#endif

1408 1409 1410 1411 1412 1413 1414
  if (GGC_DEBUG_LEVEL >= 3)
    fprintf (G.debug_file,
	     "Freeing object, actual size=%lu, at %p on %p\n",
	     (unsigned long) size, p, (void *) pe);

#ifdef ENABLE_GC_CHECKING
  /* Poison the data, to indicate the data is garbage.  */
1415
  VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (p, size));
1416 1417 1418
  memset (p, 0xa5, size);
#endif
  /* Let valgrind know the object is free.  */
1419
  VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (p, size));
1420 1421 1422

#ifdef ENABLE_GC_ALWAYS_COLLECT
  /* In the completely-anal-checking mode, we do *not* immediately free
H.J. Lu committed
1423
     the data, but instead verify that the data is *actually* not
1424 1425
     reachable the next time we collect.  */
  {
1426
    struct free_object *fo = XNEW (struct free_object);
1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444
    fo->object = p;
    fo->next = G.free_object_list;
    G.free_object_list = fo;
  }
#else
  {
    unsigned int bit_offset, word, bit;

    G.allocated -= size;

    /* Mark the object not-in-use.  */
    bit_offset = OFFSET_TO_BIT (((const char *) p) - pe->page, order);
    word = bit_offset / HOST_BITS_PER_LONG;
    bit = bit_offset % HOST_BITS_PER_LONG;
    pe->in_use_p[word] &= ~(1UL << bit);

    if (pe->num_free_objects++ == 0)
      {
1445 1446
	page_entry *p, *q;

1447 1448 1449
	/* If the page is completely full, then it's supposed to
	   be after all pages that aren't.  Since we've freed one
	   object from a page that was full, we need to move the
H.J. Lu committed
1450
	   page to the head of the list.
1451

1452 1453 1454
	   PE is the node we want to move.  Q is the previous node
	   and P is the next node in the list.  */
	q = pe->prev;
1455 1456 1457
	if (q && q->num_free_objects == 0)
	  {
	    p = pe->next;
1458

1459
	    q->next = p;
1460 1461 1462 1463

	    /* If PE was at the end of the list, then Q becomes the
	       new end of the list.  If PE was not the end of the
	       list, then we need to update the PREV field for P.  */
1464 1465
	    if (!p)
	      G.page_tails[order] = q;
1466 1467 1468 1469
	    else
	      p->prev = q;

	    /* Move PE to the head of the list.  */
1470
	    pe->next = G.pages[order];
1471 1472
	    pe->prev = NULL;
	    G.pages[order]->prev = pe;
1473 1474 1475 1476 1477 1478 1479 1480 1481
	    G.pages[order] = pe;
	  }

	/* Reset the hint bit to point to the only free object.  */
	pe->next_bit_hint = bit_offset;
      }
  }
#endif
}
Alex Samuel committed
1482

1483 1484 1485 1486 1487 1488 1489 1490 1491
/* Subroutine of init_ggc which computes the pair of numbers used to
   perform division by OBJECT_SIZE (order) and fills in inverse_table[].

   This algorithm is taken from Granlund and Montgomery's paper
   "Division by Invariant Integers using Multiplication"
   (Proc. SIGPLAN PLDI, 1994), section 9 (Exact division by
   constants).  */

static void
1492
compute_inverse (unsigned order)
1493
{
H.J. Lu committed
1494
  size_t size, inv;
1495
  unsigned int e;
1496

1497 1498 1499 1500 1501 1502 1503
  size = OBJECT_SIZE (order);
  e = 0;
  while (size % 2 == 0)
    {
      e++;
      size >>= 1;
    }
1504

1505 1506 1507 1508 1509 1510 1511 1512 1513
  inv = size;
  while (inv * size != 1)
    inv = inv * (2 - inv*size);

  DIV_MULT (order) = inv;
  DIV_SHIFT (order) = e;
}

/* Initialize the ggc-mmap allocator.  */
Alex Samuel committed
1514
void
1515
init_ggc (void)
Alex Samuel committed
1516
{
1517 1518
  unsigned order;

Alex Samuel committed
1519 1520 1521
  G.pagesize = getpagesize();
  G.lg_pagesize = exact_log2 (G.pagesize);

1522
#ifdef HAVE_MMAP_DEV_ZERO
Alex Samuel committed
1523 1524
  G.dev_zero_fd = open ("/dev/zero", O_RDONLY);
  if (G.dev_zero_fd == -1)
1525
    internal_error ("open /dev/zero: %m");
Alex Samuel committed
1526 1527 1528 1529 1530 1531 1532 1533
#endif

#if 0
  G.debug_file = fopen ("ggc-mmap.debug", "w");
#else
  G.debug_file = stdout;
#endif

1534
#ifdef USING_MMAP
1535 1536 1537 1538 1539 1540
  /* StunOS has an amazing off-by-one error for the first mmap allocation
     after fiddling with RLIMIT_STACK.  The result, as hard as it is to
     believe, is an unaligned page allocation, which would cause us to
     hork badly if we tried to use it.  */
  {
    char *p = alloc_anon (NULL, G.pagesize);
1541
    struct page_entry *e;
1542 1543 1544 1545 1546 1547
    if ((size_t)p & (G.pagesize - 1))
      {
	/* How losing.  Discard this one and try another.  If we still
	   can't get something useful, give up.  */

	p = alloc_anon (NULL, G.pagesize);
1548
	gcc_assert (!((size_t)p & (G.pagesize - 1)));
1549
      }
1550

1551
    /* We have a good page, might as well hold onto it...  */
1552
    e = XCNEW (struct page_entry);
1553 1554 1555 1556
    e->bytes = G.pagesize;
    e->page = p;
    e->next = G.free_pages;
    G.free_pages = e;
1557 1558
  }
#endif
1559 1560 1561 1562 1563

  /* Initialize the object size table.  */
  for (order = 0; order < HOST_BITS_PER_PTR; ++order)
    object_size_table[order] = (size_t) 1 << order;
  for (order = HOST_BITS_PER_PTR; order < NUM_ORDERS; ++order)
1564 1565
    {
      size_t s = extra_order_size_table[order - HOST_BITS_PER_PTR];
1566 1567 1568 1569

      /* If S is not a multiple of the MAX_ALIGNMENT, then round it up
	 so that we're sure of getting aligned memory.  */
      s = ROUND_UP (s, MAX_ALIGNMENT);
1570 1571
      object_size_table[order] = s;
    }
1572

1573
  /* Initialize the objects-per-page and inverse tables.  */
1574 1575 1576 1577 1578
  for (order = 0; order < NUM_ORDERS; ++order)
    {
      objects_per_page_table[order] = G.pagesize / OBJECT_SIZE (order);
      if (objects_per_page_table[order] == 0)
	objects_per_page_table[order] = 1;
1579
      compute_inverse (order);
1580 1581 1582 1583 1584
    }

  /* Reset the size_lookup array to put appropriately sized objects in
     the special orders.  All objects bigger than the previous power
     of two, but no greater than the special size, should go in the
1585
     new order.  */
1586 1587
  for (order = HOST_BITS_PER_PTR; order < NUM_ORDERS; ++order)
    {
1588 1589
      int o;
      int i;
1590

1591 1592 1593 1594 1595
      i = OBJECT_SIZE (order);
      if (i >= NUM_SIZE_LOOKUP)
	continue;

      for (o = size_lookup[i]; o == size_lookup [i]; --i)
1596 1597
	size_lookup[i] = order;
    }
1598

1599 1600
  G.depth_in_use = 0;
  G.depth_max = 10;
1601
  G.depth = XNEWVEC (unsigned int, G.depth_max);
1602 1603 1604

  G.by_depth_in_use = 0;
  G.by_depth_max = INITIAL_PTE_COUNT;
1605 1606
  G.by_depth = XNEWVEC (page_entry *, G.by_depth_max);
  G.save_in_use = XNEWVEC (unsigned long *, G.by_depth_max);
Alex Samuel committed
1607 1608
}

1609 1610 1611 1612
/* Merge the SAVE_IN_USE_P and IN_USE_P arrays in P so that IN_USE_P
   reflects reality.  Recalculate NUM_FREE_OBJECTS as well.  */

static void
1613
ggc_recalculate_in_use_p (page_entry *p)
1614 1615 1616 1617
{
  unsigned int i;
  size_t num_objects;

1618
  /* Because the past-the-end bit in in_use_p is always set, we
1619
     pretend there is one additional object.  */
1620
  num_objects = OBJECTS_IN_PAGE (p) + 1;
1621 1622 1623 1624 1625

  /* Reset the free object count.  */
  p->num_free_objects = num_objects;

  /* Combine the IN_USE_P and SAVE_IN_USE_P arrays.  */
1626
  for (i = 0;
1627 1628
       i < CEIL (BITMAP_SIZE (num_objects),
		 sizeof (*p->in_use_p));
1629 1630 1631 1632 1633 1634
       ++i)
    {
      unsigned long j;

      /* Something is in use if it is marked, or if it was in use in a
	 context further down the context stack.  */
1635
      p->in_use_p[i] |= save_in_use_p (p)[i];
1636 1637 1638 1639 1640 1641

      /* Decrement the free object count for every object allocated.  */
      for (j = p->in_use_p[i]; j; j >>= 1)
	p->num_free_objects -= (j & 1);
    }

1642
  gcc_assert (p->num_free_objects < num_objects);
1643
}
Alex Samuel committed
1644

1645 1646
/* Unmark all objects.  */

1647
static void
1648
clear_marks (void)
Alex Samuel committed
1649 1650 1651
{
  unsigned order;

1652
  for (order = 2; order < NUM_ORDERS; order++)
Alex Samuel committed
1653 1654 1655 1656 1657
    {
      page_entry *p;

      for (p = G.pages[order]; p != NULL; p = p->next)
	{
1658 1659 1660
	  size_t num_objects = OBJECTS_IN_PAGE (p);
	  size_t bitmap_size = BITMAP_SIZE (num_objects + 1);

Alex Samuel committed
1661
	  /* The data should be page-aligned.  */
1662
	  gcc_assert (!((size_t) p->page & (G.pagesize - 1)));
Alex Samuel committed
1663 1664 1665 1666

	  /* Pages that aren't in the topmost context are not collected;
	     nevertheless, we need their in-use bit vectors to store GC
	     marks.  So, back them up first.  */
1667
	  if (p->context_depth < G.context_depth)
Alex Samuel committed
1668
	    {
1669
	      if (! save_in_use_p (p))
1670
		save_in_use_p (p) = XNEWVAR (unsigned long, bitmap_size);
1671
	      memcpy (save_in_use_p (p), p->in_use_p, bitmap_size);
Alex Samuel committed
1672 1673 1674 1675 1676 1677 1678 1679
	    }

	  /* Reset reset the number of free objects and clear the
             in-use bits.  These will be adjusted by mark_obj.  */
	  p->num_free_objects = num_objects;
	  memset (p->in_use_p, 0, bitmap_size);

	  /* Make sure the one-past-the-end bit is always set.  */
1680
	  p->in_use_p[num_objects / HOST_BITS_PER_LONG]
Alex Samuel committed
1681 1682 1683 1684 1685
	    = ((unsigned long) 1 << (num_objects % HOST_BITS_PER_LONG));
	}
    }
}

1686 1687 1688
/* Free all empty pages.  Partially empty pages need no attention
   because the `mark' bit doubles as an `unused' bit.  */

1689
static void
1690
sweep_pages (void)
Alex Samuel committed
1691 1692 1693
{
  unsigned order;

1694
  for (order = 2; order < NUM_ORDERS; order++)
Alex Samuel committed
1695 1696 1697 1698 1699
    {
      /* The last page-entry to consider, regardless of entries
	 placed at the end of the list.  */
      page_entry * const last = G.page_tails[order];

1700
      size_t num_objects;
1701
      size_t live_objects;
Alex Samuel committed
1702 1703
      page_entry *p, *previous;
      int done;
1704

Alex Samuel committed
1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715
      p = G.pages[order];
      if (p == NULL)
	continue;

      previous = NULL;
      do
	{
	  page_entry *next = p->next;

	  /* Loop until all entries have been examined.  */
	  done = (p == last);
1716

1717
	  num_objects = OBJECTS_IN_PAGE (p);
Alex Samuel committed
1718

1719 1720 1721 1722
	  /* Add all live objects on this page to the count of
             allocated memory.  */
	  live_objects = num_objects - p->num_free_objects;

1723
	  G.allocated += OBJECT_SIZE (order) * live_objects;
1724

Alex Samuel committed
1725 1726 1727 1728 1729 1730
	  /* Only objects on pages in the topmost context should get
	     collected.  */
	  if (p->context_depth < G.context_depth)
	    ;

	  /* Remove the page if it's empty.  */
1731
	  else if (live_objects == 0)
Alex Samuel committed
1732
	    {
1733 1734 1735
	      /* If P was the first page in the list, then NEXT
		 becomes the new first page in the list, otherwise
		 splice P out of the forward pointers.  */
Alex Samuel committed
1736 1737 1738 1739
	      if (! previous)
		G.pages[order] = next;
	      else
		previous->next = next;
H.J. Lu committed
1740

1741 1742 1743
	      /* Splice P out of the back pointers too.  */
	      if (next)
		next->prev = previous;
Alex Samuel committed
1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759

	      /* Are we removing the last element?  */
	      if (p == G.page_tails[order])
		G.page_tails[order] = previous;
	      free_page (p);
	      p = previous;
	    }

	  /* If the page is full, move it to the end.  */
	  else if (p->num_free_objects == 0)
	    {
	      /* Don't move it if it's already at the end.  */
	      if (p != G.page_tails[order])
		{
		  /* Move p to the end of the list.  */
		  p->next = NULL;
1760
		  p->prev = G.page_tails[order];
Alex Samuel committed
1761 1762 1763 1764 1765 1766 1767 1768 1769 1770
		  G.page_tails[order]->next = p;

		  /* Update the tail pointer...  */
		  G.page_tails[order] = p;

		  /* ... and the head pointer, if necessary.  */
		  if (! previous)
		    G.pages[order] = next;
		  else
		    previous->next = next;
1771 1772 1773 1774 1775

		  /* And update the backpointer in NEXT if necessary.  */
		  if (next)
		    next->prev = previous;

Alex Samuel committed
1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786
		  p = previous;
		}
	    }

	  /* If we've fallen through to here, it's a page in the
	     topmost context that is neither full nor empty.  Such a
	     page must precede pages at lesser context depth in the
	     list, so move it to the head.  */
	  else if (p != G.pages[order])
	    {
	      previous->next = p->next;
1787 1788 1789 1790 1791 1792

	      /* Update the backchain in the next node if it exists.  */
	      if (p->next)
		p->next->prev = previous;

	      /* Move P to the head of the list.  */
Alex Samuel committed
1793
	      p->next = G.pages[order];
1794 1795 1796 1797
	      p->prev = NULL;
	      G.pages[order]->prev = p;

	      /* Update the head pointer.  */
Alex Samuel committed
1798
	      G.pages[order] = p;
1799

Alex Samuel committed
1800 1801 1802 1803 1804 1805 1806 1807
	      /* Are we moving the last element?  */
	      if (G.page_tails[order] == p)
	        G.page_tails[order] = previous;
	      p = previous;
	    }

	  previous = p;
	  p = next;
1808
	}
Alex Samuel committed
1809
      while (! done);
1810 1811 1812 1813 1814 1815

      /* Now, restore the in_use_p vectors for any pages from contexts
         other than the current one.  */
      for (p = G.pages[order]; p; p = p->next)
	if (p->context_depth != G.context_depth)
	  ggc_recalculate_in_use_p (p);
Alex Samuel committed
1816 1817 1818
    }
}

1819
#ifdef ENABLE_GC_CHECKING
1820 1821
/* Clobber all free objects.  */

1822
static void
1823
poison_pages (void)
Alex Samuel committed
1824 1825 1826
{
  unsigned order;

1827
  for (order = 2; order < NUM_ORDERS; order++)
Alex Samuel committed
1828
    {
1829
      size_t size = OBJECT_SIZE (order);
Alex Samuel committed
1830 1831 1832 1833
      page_entry *p;

      for (p = G.pages[order]; p != NULL; p = p->next)
	{
1834
	  size_t num_objects;
Alex Samuel committed
1835
	  size_t i;
1836 1837 1838 1839 1840 1841 1842 1843

	  if (p->context_depth != G.context_depth)
	    /* Since we don't do any collection for pages in pushed
	       contexts, there's no need to do any poisoning.  And
	       besides, the IN_USE_P array isn't valid until we pop
	       contexts.  */
	    continue;

1844
	  num_objects = OBJECTS_IN_PAGE (p);
Alex Samuel committed
1845 1846 1847 1848 1849 1850
	  for (i = 0; i < num_objects; i++)
	    {
	      size_t word, bit;
	      word = i / HOST_BITS_PER_LONG;
	      bit = i % HOST_BITS_PER_LONG;
	      if (((p->in_use_p[word] >> bit) & 1) == 0)
1851 1852 1853 1854 1855 1856 1857
		{
		  char *object = p->page + i * size;

		  /* Keep poison-by-write when we expect to use Valgrind,
		     so the exact same memory semantics is kept, in case
		     there are memory errors.  We override this request
		     below.  */
1858 1859
		  VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (object,
								 size));
1860 1861 1862
		  memset (object, 0xa5, size);

		  /* Drop the handle to avoid handle leak.  */
1863
		  VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (object, size));
1864
		}
Alex Samuel committed
1865 1866 1867 1868
	    }
	}
    }
}
1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892
#else
#define poison_pages()
#endif

#ifdef ENABLE_GC_ALWAYS_COLLECT
/* Validate that the reportedly free objects actually are.  */

static void
validate_free_objects (void)
{
  struct free_object *f, *next, *still_free = NULL;

  for (f = G.free_object_list; f ; f = next)
    {
      page_entry *pe = lookup_page_table_entry (f->object);
      size_t bit, word;

      bit = OFFSET_TO_BIT ((char *)f->object - pe->page, pe->order);
      word = bit / HOST_BITS_PER_LONG;
      bit = bit % HOST_BITS_PER_LONG;
      next = f->next;

      /* Make certain it isn't visible from any root.  Notice that we
	 do this check before sweep_pages merges save_in_use_p.  */
1893
      gcc_assert (!(pe->in_use_p[word] & (1UL << bit)));
1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910

      /* If the object comes from an outer context, then retain the
	 free_object entry, so that we can verify that the address
	 isn't live on the stack in some outer context.  */
      if (pe->context_depth != G.context_depth)
	{
	  f->next = still_free;
	  still_free = f;
	}
      else
	free (f);
    }

  G.free_object_list = still_free;
}
#else
#define validate_free_objects()
Alex Samuel committed
1911 1912
#endif

1913 1914
/* Top level mark-and-sweep routine.  */

Alex Samuel committed
1915
void
1916
ggc_collect (void)
Alex Samuel committed
1917 1918 1919 1920
{
  /* Avoid frequent unnecessary work by skipping collection if the
     total allocations haven't expanded much since the last
     collection.  */
1921
  float allocated_last_gc =
1922 1923
    MAX (G.allocated_last_gc, (size_t)PARAM_VALUE (GGC_MIN_HEAPSIZE) * 1024);

1924
  float min_expand = allocated_last_gc * PARAM_VALUE (GGC_MIN_EXPAND) / 100;
1925

1926
  if (G.allocated < allocated_last_gc + min_expand && !ggc_force_collect)
Alex Samuel committed
1927 1928
    return;

1929
  timevar_push (TV_GC);
Alex Samuel committed
1930
  if (!quiet_flag)
1931
    fprintf (stderr, " {GC %luk -> ", (unsigned long) G.allocated / 1024);
1932 1933
  if (GGC_DEBUG_LEVEL >= 2)
    fprintf (G.debug_file, "BEGIN COLLECTING\n");
Alex Samuel committed
1934

1935 1936
  /* Zero the total allocated bytes.  This will be recalculated in the
     sweep phase.  */
Alex Samuel committed
1937 1938
  G.allocated = 0;

1939
  /* Release the pages we freed the last time we collected, but didn't
Alex Samuel committed
1940 1941 1942
     reuse in the interim.  */
  release_pages ();

1943 1944 1945
  /* Indicate that we've seen collections at this context depth.  */
  G.context_depth_collections = ((unsigned long)1 << (G.context_depth + 1)) - 1;

1946 1947
  invoke_plugin_callbacks (PLUGIN_GGC_START, NULL);

Alex Samuel committed
1948 1949
  clear_marks ();
  ggc_mark_roots ();
1950 1951 1952
#ifdef GATHER_STATISTICS
  ggc_prune_overhead_list ();
#endif
Alex Samuel committed
1953
  poison_pages ();
1954
  validate_free_objects ();
1955 1956
  sweep_pages ();

Alex Samuel committed
1957 1958
  G.allocated_last_gc = G.allocated;

1959 1960
  invoke_plugin_callbacks (PLUGIN_GGC_END, NULL);

1961
  timevar_pop (TV_GC);
Alex Samuel committed
1962 1963

  if (!quiet_flag)
1964
    fprintf (stderr, "%luk}", (unsigned long) G.allocated / 1024);
1965 1966
  if (GGC_DEBUG_LEVEL >= 2)
    fprintf (G.debug_file, "END COLLECTING\n");
Alex Samuel committed
1967
}
1968 1969

/* Print allocation statistics.  */
1970 1971 1972 1973 1974
#define SCALE(x) ((unsigned long) ((x) < 1024*10 \
		  ? (x) \
		  : ((x) < 1024*1024*10 \
		     ? (x) / 1024 \
		     : (x) / (1024*1024))))
1975
#define STAT_LABEL(x) ((x) < 1024*10 ? ' ' : ((x) < 1024*1024*10 ? 'k' : 'M'))
1976 1977

void
1978
ggc_print_statistics (void)
1979 1980
{
  struct ggc_statistics stats;
1981
  unsigned int i;
1982
  size_t total_overhead = 0;
1983 1984

  /* Clear the statistics.  */
1985
  memset (&stats, 0, sizeof (stats));
1986

1987 1988 1989 1990
  /* Make sure collection will really occur.  */
  G.allocated_last_gc = 0;

  /* Collect and print the statistics common across collectors.  */
1991
  ggc_print_common_statistics (stderr, &stats);
1992

1993 1994 1995 1996
  /* Release free pages so that we will not count the bytes allocated
     there as part of the total allocated memory.  */
  release_pages ();

1997
  /* Collect some information about the various sizes of
1998
     allocation.  */
1999 2000
  fprintf (stderr,
           "Memory still allocated at the end of the compilation process\n");
2001
  fprintf (stderr, "%-5s %10s  %10s  %10s\n",
2002
	   "Size", "Allocated", "Used", "Overhead");
2003
  for (i = 0; i < NUM_ORDERS; ++i)
2004 2005 2006 2007
    {
      page_entry *p;
      size_t allocated;
      size_t in_use;
2008
      size_t overhead;
2009 2010 2011 2012 2013

      /* Skip empty entries.  */
      if (!G.pages[i])
	continue;

2014
      overhead = allocated = in_use = 0;
2015 2016

      /* Figure out the total number of bytes allocated for objects of
2017 2018
	 this size, and how many of them are actually in use.  Also figure
	 out how much memory the page table is using.  */
2019 2020 2021
      for (p = G.pages[i]; p; p = p->next)
	{
	  allocated += p->bytes;
2022
	  in_use +=
2023
	    (OBJECTS_IN_PAGE (p) - p->num_free_objects) * OBJECT_SIZE (i);
2024 2025

	  overhead += (sizeof (page_entry) - sizeof (long)
2026
		       + BITMAP_SIZE (OBJECTS_IN_PAGE (p) + 1));
2027
	}
2028 2029
      fprintf (stderr, "%-5lu %10lu%c %10lu%c %10lu%c\n",
	       (unsigned long) OBJECT_SIZE (i),
2030 2031 2032
	       SCALE (allocated), STAT_LABEL (allocated),
	       SCALE (in_use), STAT_LABEL (in_use),
	       SCALE (overhead), STAT_LABEL (overhead));
2033
      total_overhead += overhead;
2034
    }
2035
  fprintf (stderr, "%-5s %10lu%c %10lu%c %10lu%c\n", "Total",
2036 2037 2038
	   SCALE (G.bytes_mapped), STAT_LABEL (G.bytes_mapped),
	   SCALE (G.allocated), STAT_LABEL(G.allocated),
	   SCALE (total_overhead), STAT_LABEL (total_overhead));
2039

H.J. Lu committed
2040
#ifdef GATHER_STATISTICS
2041
  {
2042 2043
    fprintf (stderr, "\nTotal allocations and overheads during the compilation process\n");

2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060
    fprintf (stderr, "Total Overhead:                        %10lld\n",
             G.stats.total_overhead);
    fprintf (stderr, "Total Allocated:                       %10lld\n",
             G.stats.total_allocated);

    fprintf (stderr, "Total Overhead  under  32B:            %10lld\n",
             G.stats.total_overhead_under32);
    fprintf (stderr, "Total Allocated under  32B:            %10lld\n",
             G.stats.total_allocated_under32);
    fprintf (stderr, "Total Overhead  under  64B:            %10lld\n",
             G.stats.total_overhead_under64);
    fprintf (stderr, "Total Allocated under  64B:            %10lld\n",
             G.stats.total_allocated_under64);
    fprintf (stderr, "Total Overhead  under 128B:            %10lld\n",
             G.stats.total_overhead_under128);
    fprintf (stderr, "Total Allocated under 128B:            %10lld\n",
             G.stats.total_allocated_under128);
H.J. Lu committed
2061

2062
    for (i = 0; i < NUM_ORDERS; i++)
2063 2064
      if (G.stats.total_allocated_per_order[i])
        {
2065
          fprintf (stderr, "Total Overhead  page size %7lu:     %10lld\n",
2066 2067
                   (unsigned long) OBJECT_SIZE (i),
		   G.stats.total_overhead_per_order[i]);
2068
          fprintf (stderr, "Total Allocated page size %7lu:     %10lld\n",
2069 2070
                   (unsigned long) OBJECT_SIZE (i),
		   G.stats.total_allocated_per_order[i]);
2071
        }
2072 2073
  }
#endif
2074
}
2075

2076 2077 2078 2079 2080
struct ggc_pch_ondisk
{
  unsigned totals[NUM_ORDERS];
};

2081 2082
struct ggc_pch_data
{
2083
  struct ggc_pch_ondisk d;
2084 2085 2086 2087 2088
  size_t base[NUM_ORDERS];
  size_t written[NUM_ORDERS];
};

struct ggc_pch_data *
2089
init_ggc_pch (void)
2090
{
2091
  return XCNEW (struct ggc_pch_data);
2092 2093
}

2094 2095
void
ggc_pch_count_object (struct ggc_pch_data *d, void *x ATTRIBUTE_UNUSED,
2096 2097
		      size_t size, bool is_string ATTRIBUTE_UNUSED,
		      enum gt_types_enum type ATTRIBUTE_UNUSED)
2098 2099 2100
{
  unsigned order;

2101
  if (size < NUM_SIZE_LOOKUP)
2102 2103 2104
    order = size_lookup[size];
  else
    {
2105
      order = 10;
2106 2107 2108
      while (size > OBJECT_SIZE (order))
	order++;
    }
2109

2110 2111
  d->d.totals[order]++;
}
2112

2113
size_t
2114
ggc_pch_total_size (struct ggc_pch_data *d)
2115 2116 2117 2118 2119 2120 2121 2122 2123 2124
{
  size_t a = 0;
  unsigned i;

  for (i = 0; i < NUM_ORDERS; i++)
    a += ROUND_UP (d->d.totals[i] * OBJECT_SIZE (i), G.pagesize);
  return a;
}

void
2125
ggc_pch_this_base (struct ggc_pch_data *d, void *base)
2126 2127 2128
{
  size_t a = (size_t) base;
  unsigned i;
2129

2130 2131 2132 2133 2134 2135 2136 2137 2138
  for (i = 0; i < NUM_ORDERS; i++)
    {
      d->base[i] = a;
      a += ROUND_UP (d->d.totals[i] * OBJECT_SIZE (i), G.pagesize);
    }
}


char *
2139
ggc_pch_alloc_object (struct ggc_pch_data *d, void *x ATTRIBUTE_UNUSED,
2140 2141
		      size_t size, bool is_string ATTRIBUTE_UNUSED,
		      enum gt_types_enum type ATTRIBUTE_UNUSED)
2142 2143 2144
{
  unsigned order;
  char *result;
2145

2146
  if (size < NUM_SIZE_LOOKUP)
2147 2148 2149
    order = size_lookup[size];
  else
    {
2150
      order = 10;
2151 2152 2153 2154 2155 2156 2157 2158 2159
      while (size > OBJECT_SIZE (order))
	order++;
    }

  result = (char *) d->base[order];
  d->base[order] += OBJECT_SIZE (order);
  return result;
}

2160 2161 2162
void
ggc_pch_prepare_write (struct ggc_pch_data *d ATTRIBUTE_UNUSED,
		       FILE *f ATTRIBUTE_UNUSED)
2163 2164 2165 2166 2167
{
  /* Nothing to do.  */
}

void
2168 2169
ggc_pch_write_object (struct ggc_pch_data *d ATTRIBUTE_UNUSED,
		      FILE *f, void *x, void *newx ATTRIBUTE_UNUSED,
2170
		      size_t size, bool is_string ATTRIBUTE_UNUSED)
2171 2172
{
  unsigned order;
2173
  static const char emptyBytes[256] = { 0 };
2174

2175
  if (size < NUM_SIZE_LOOKUP)
2176 2177 2178
    order = size_lookup[size];
  else
    {
2179
      order = 10;
2180 2181 2182
      while (size > OBJECT_SIZE (order))
	order++;
    }
2183

2184
  if (fwrite (x, size, 1, f) != 1)
2185
    fatal_error ("can't write PCH file: %m");
2186

2187
  /* If SIZE is not the same as OBJECT_SIZE(order), then we need to pad the
2188
     object out to OBJECT_SIZE(order).  This happens for strings.  */
2189 2190 2191 2192 2193 2194 2195 2196

  if (size != OBJECT_SIZE (order))
    {
      unsigned padding = OBJECT_SIZE(order) - size;

      /* To speed small writes, we use a nulled-out array that's larger
         than most padding requests as the source for our null bytes.  This
         permits us to do the padding with fwrite() rather than fseek(), and
2197
         limits the chance the OS may try to flush any outstanding writes.  */
2198 2199 2200 2201 2202 2203 2204
      if (padding <= sizeof(emptyBytes))
        {
          if (fwrite (emptyBytes, 1, padding, f) != padding)
            fatal_error ("can't write PCH file");
        }
      else
        {
2205
          /* Larger than our buffer?  Just default to fseek.  */
2206 2207 2208 2209
          if (fseek (f, padding, SEEK_CUR) != 0)
            fatal_error ("can't write PCH file");
        }
    }
2210 2211 2212 2213 2214 2215

  d->written[order]++;
  if (d->written[order] == d->d.totals[order]
      && fseek (f, ROUND_UP_VALUE (d->d.totals[order] * OBJECT_SIZE (order),
				   G.pagesize),
		SEEK_CUR) != 0)
2216
    fatal_error ("can't write PCH file: %m");
2217 2218 2219
}

void
2220
ggc_pch_finish (struct ggc_pch_data *d, FILE *f)
2221 2222
{
  if (fwrite (&d->d, sizeof (d->d), 1, f) != 1)
2223
    fatal_error ("can't write PCH file: %m");
2224 2225 2226
  free (d);
}

2227 2228 2229 2230
/* Move the PCH PTE entries just added to the end of by_depth, to the
   front.  */

static void
2231
move_ptes_to_front (int count_old_page_tables, int count_new_page_tables)
2232 2233 2234 2235 2236 2237 2238
{
  unsigned i;

  /* First, we swap the new entries to the front of the varrays.  */
  page_entry **new_by_depth;
  unsigned long **new_save_in_use;

2239 2240
  new_by_depth = XNEWVEC (page_entry *, G.by_depth_max);
  new_save_in_use = XNEWVEC (unsigned long *, G.by_depth_max);
2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256

  memcpy (&new_by_depth[0],
	  &G.by_depth[count_old_page_tables],
	  count_new_page_tables * sizeof (void *));
  memcpy (&new_by_depth[count_new_page_tables],
	  &G.by_depth[0],
	  count_old_page_tables * sizeof (void *));
  memcpy (&new_save_in_use[0],
	  &G.save_in_use[count_old_page_tables],
	  count_new_page_tables * sizeof (void *));
  memcpy (&new_save_in_use[count_new_page_tables],
	  &G.save_in_use[0],
	  count_old_page_tables * sizeof (void *));

  free (G.by_depth);
  free (G.save_in_use);
2257

2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276
  G.by_depth = new_by_depth;
  G.save_in_use = new_save_in_use;

  /* Now update all the index_by_depth fields.  */
  for (i = G.by_depth_in_use; i > 0; --i)
    {
      page_entry *p = G.by_depth[i-1];
      p->index_by_depth = i-1;
    }

  /* And last, we update the depth pointers in G.depth.  The first
     entry is already 0, and context 0 entries always start at index
     0, so there is nothing to update in the first slot.  We need a
     second slot, only if we have old ptes, and if we do, they start
     at index count_new_page_tables.  */
  if (count_old_page_tables)
    push_depth (count_new_page_tables);
}

2277
void
2278
ggc_pch_read (FILE *f, void *addr)
2279 2280 2281
{
  struct ggc_pch_ondisk d;
  unsigned i;
2282
  char *offs = (char *) addr;
2283 2284 2285 2286 2287 2288 2289
  unsigned long count_old_page_tables;
  unsigned long count_new_page_tables;

  count_old_page_tables = G.by_depth_in_use;

  /* We've just read in a PCH file.  So, every object that used to be
     allocated is now free.  */
2290
  clear_marks ();
2291
#ifdef ENABLE_GC_CHECKING
2292 2293
  poison_pages ();
#endif
2294 2295 2296
  /* Since we free all the allocated objects, the free list becomes
     useless.  Validate it now, which will also clear it.  */
  validate_free_objects();
2297 2298 2299 2300

  /* No object read from a PCH file should ever be freed.  So, set the
     context depth to 1, and set the depth of all the currently-allocated
     pages to be 1 too.  PCH pages will have depth 0.  */
2301
  gcc_assert (!G.context_depth);
2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312
  G.context_depth = 1;
  for (i = 0; i < NUM_ORDERS; i++)
    {
      page_entry *p;
      for (p = G.pages[i]; p != NULL; p = p->next)
	p->context_depth = G.context_depth;
    }

  /* Allocate the appropriate page-table entries for the pages read from
     the PCH file.  */
  if (fread (&d, sizeof (d), 1, f) != 1)
2313
    fatal_error ("can't read PCH file: %m");
2314

2315 2316 2317 2318 2319 2320 2321
  for (i = 0; i < NUM_ORDERS; i++)
    {
      struct page_entry *entry;
      char *pte;
      size_t bytes;
      size_t num_objs;
      size_t j;
2322

2323 2324
      if (d.totals[i] == 0)
	continue;
2325

2326 2327
      bytes = ROUND_UP (d.totals[i] * OBJECT_SIZE (i), G.pagesize);
      num_objs = bytes / OBJECT_SIZE (i);
2328 2329 2330
      entry = XCNEWVAR (struct page_entry, (sizeof (struct page_entry)
					    - sizeof (long)
					    + BITMAP_SIZE (num_objs + 1)));
2331 2332 2333 2334 2335 2336 2337
      entry->bytes = bytes;
      entry->page = offs;
      entry->context_depth = 0;
      offs += bytes;
      entry->num_free_objects = 0;
      entry->order = i;

2338
      for (j = 0;
2339 2340 2341 2342
	   j + HOST_BITS_PER_LONG <= num_objs + 1;
	   j += HOST_BITS_PER_LONG)
	entry->in_use_p[j / HOST_BITS_PER_LONG] = -1;
      for (; j < num_objs + 1; j++)
2343
	entry->in_use_p[j / HOST_BITS_PER_LONG]
2344 2345
	  |= 1L << (j % HOST_BITS_PER_LONG);

2346 2347
      for (pte = entry->page;
	   pte < entry->page + entry->bytes;
2348 2349 2350 2351 2352 2353 2354 2355
	   pte += G.pagesize)
	set_page_table_entry (pte, entry);

      if (G.page_tails[i] != NULL)
	G.page_tails[i]->next = entry;
      else
	G.pages[i] = entry;
      G.page_tails[i] = entry;
2356 2357 2358 2359 2360 2361

      /* We start off by just adding all the new information to the
	 end of the varrays, later, we will move the new information
	 to the front of the varrays, as the PCH page tables are at
	 context 0.  */
      push_by_depth (entry, 0);
2362 2363
    }

2364 2365 2366 2367 2368 2369
  /* Now, we update the various data structures that speed page table
     handling.  */
  count_new_page_tables = G.by_depth_in_use - count_old_page_tables;

  move_ptes_to_front (count_old_page_tables, count_new_page_tables);

2370 2371 2372
  /* Update the statistics.  */
  G.allocated = G.allocated_last_gc = offs - (char *)addr;
}
2373 2374 2375 2376 2377 2378 2379 2380 2381

struct alloc_zone
{
  int dummy;
};

struct alloc_zone rtl_zone;
struct alloc_zone tree_zone;
struct alloc_zone tree_id_zone;