ggc-common.c 32 KB
Newer Older
1
/* Simple garbage collection for the GNU compiler.
2 3
   Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008,
   2009, 2010 Free Software Foundation, Inc.
4

5
This file is part of GCC.
6

7 8
GCC is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
9
Software Foundation; either version 3, or (at your option) any later
10
version.
11

12 13
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or
Richard Kenner committed
14 15
FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
for more details.
16

Richard Kenner committed
17
You should have received a copy of the GNU General Public License
18 19
along with GCC; see the file COPYING3.  If not see
<http://www.gnu.org/licenses/>.  */
20 21 22 23 24 25

/* Generic garbage collection (GC) functions and data, not specific to
   any particular GC implementation.  */

#include "config.h"
#include "system.h"
26
#include "coretypes.h"
27
#include "hashtab.h"
28
#include "ggc.h"
29
#include "ggc-internal.h"
30
#include "diagnostic-core.h"
31
#include "params.h"
32
#include "hosthooks.h"
33
#include "hosthooks-def.h"
34 35
#include "plugin.h"
#include "vec.h"
36
#include "timevar.h"
37

38 39 40
/* When set, ggc_collect will do collection.  */
bool ggc_force_collect;

41 42 43
/* When true, protect the contents of the identifier hash table.  */
bool ggc_protect_identifiers = true;

44 45 46
/* Statistics about the allocation.  */
static ggc_statistics *ggc_stats;

47 48
struct traversal_state;

49 50 51 52 53 54 55 56 57
static int ggc_htab_delete (void **, void *);
static hashval_t saving_htab_hash (const void *);
static int saving_htab_eq (const void *, const void *);
static int call_count (void **, void *);
static int call_alloc (void **, void *);
static int compare_ptr_data (const void *, const void *);
static void relocate_ptrs (void *, void *);
static void write_pch_globals (const struct ggc_root_tab * const *tab,
			       struct traversal_state *state);
58 59 60

/* Maintain global roots that are preserved during GC.  */

61 62 63
/* Process a slot of an htab by deleting it if it has not been marked.  */

static int
64
ggc_htab_delete (void **slot, void *info)
65
{
66
  const struct ggc_cache_tab *r = (const struct ggc_cache_tab *) info;
67 68

  if (! (*r->marked_p) (*slot))
69 70 71
    htab_clear_slot (*r->base, slot);
  else
    (*r->cb) (*slot);
72 73 74 75

  return 1;
}

76 77 78

/* This extra vector of dynamically registered root_tab-s is used by
   ggc_mark_roots and gives the ability to dynamically add new GGC root
79 80 81
   tables, for instance from some plugins; this vector is on the heap
   since it is used by GGC internally.  */
typedef const struct ggc_root_tab *const_ggc_root_tab_t;
82 83 84 85 86 87 88
DEF_VEC_P(const_ggc_root_tab_t);
DEF_VEC_ALLOC_P(const_ggc_root_tab_t, heap);
static VEC(const_ggc_root_tab_t, heap) *extra_root_vec;

/* Dynamically register a new GGC root table RT. This is useful for
   plugins. */

H.J. Lu committed
89
void
90 91
ggc_register_root_tab (const struct ggc_root_tab* rt)
{
92 93
  if (rt)
    VEC_safe_push (const_ggc_root_tab_t, heap, extra_root_vec, rt);
94 95
}

96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131
/* This extra vector of dynamically registered cache_tab-s is used by
   ggc_mark_roots and gives the ability to dynamically add new GGC cache
   tables, for instance from some plugins; this vector is on the heap
   since it is used by GGC internally.  */
typedef const struct ggc_cache_tab *const_ggc_cache_tab_t;
DEF_VEC_P(const_ggc_cache_tab_t);
DEF_VEC_ALLOC_P(const_ggc_cache_tab_t, heap);
static VEC(const_ggc_cache_tab_t, heap) *extra_cache_vec;

/* Dynamically register a new GGC cache table CT. This is useful for
   plugins. */

void
ggc_register_cache_tab (const struct ggc_cache_tab* ct)
{
  if (ct)
    VEC_safe_push (const_ggc_cache_tab_t, heap, extra_cache_vec, ct);
}

/* Scan a hash table that has objects which are to be deleted if they are not
   already marked.  */

static void
ggc_scan_cache_tab (const_ggc_cache_tab_t ctp)
{
  const struct ggc_cache_tab *cti;

  for (cti = ctp; cti->base != NULL; cti++)
    if (*cti->base)
      {
        ggc_set_mark (*cti->base);
        htab_traverse_noresize (*cti->base, ggc_htab_delete,
                                CONST_CAST (void *, (const void *)cti));
        ggc_set_mark ((*cti->base)->entries);
      }
}
132

133 134 135 136 137 138 139 140 141 142 143 144
/* Mark all the roots in the table RT.  */

static void
ggc_mark_root_tab (const_ggc_root_tab_t rt)
{
  size_t i;

  for ( ; rt->base != NULL; rt++)
    for (i = 0; i < rt->nelt; i++)
      (*rt->cb) (*(void **) ((char *)rt->base + rt->stride * i));
}

145 146
/* Iterate through all registered roots and mark each element.  */

147
void
148
ggc_mark_roots (void)
149
{
150
  const struct ggc_root_tab *const *rt;
151
  const_ggc_root_tab_t rtp, rti;
152
  const struct ggc_cache_tab *const *ct;
153
  const_ggc_cache_tab_t ctp;
154
  size_t i;
155

156 157 158 159 160
  for (rt = gt_ggc_deletable_rtab; *rt; rt++)
    for (rti = *rt; rti->base != NULL; rti++)
      memset (rti->base, 0, rti->stride);

  for (rt = gt_ggc_rtab; *rt; rt++)
161
    ggc_mark_root_tab (*rt);
162

163
  FOR_EACH_VEC_ELT (const_ggc_root_tab_t, extra_root_vec, i, rtp)
164
    ggc_mark_root_tab (rtp);
165

166 167
  if (ggc_protect_identifiers)
    ggc_mark_stringpool ();
168

169
  /* Now scan all hash tables that have objects which are to be deleted if
170 171
     they are not already marked.  */
  for (ct = gt_ggc_cache_rtab; *ct; ct++)
172 173
    ggc_scan_cache_tab (*ct);

174
  FOR_EACH_VEC_ELT (const_ggc_cache_tab_t, extra_cache_vec, i, ctp)
175
    ggc_scan_cache_tab (ctp);
176 177 178

  if (! ggc_protect_identifiers)
    ggc_purge_stringpool ();
179 180 181

  /* Some plugins may call ggc_set_mark from here.  */
  invoke_plugin_callbacks (PLUGIN_GGC_MARKING, NULL);
182 183
}

184 185
/* Allocate a block of memory, then clear it.  */
void *
186
ggc_internal_cleared_alloc_stat (size_t size MEM_STAT_DECL)
187
{
188
  void *buf = ggc_internal_alloc_stat (size PASS_MEM_STAT);
189 190
  memset (buf, 0, size);
  return buf;
191 192
}

193 194
/* Resize a block of memory, possibly re-allocating it.  */
void *
195
ggc_realloc_stat (void *x, size_t size MEM_STAT_DECL)
196
{
197 198
  void *r;
  size_t old_size;
199

200
  if (x == NULL)
201
    return ggc_internal_alloc_stat (size PASS_MEM_STAT);
202

203
  old_size = ggc_get_size (x);
204

205
  if (size <= old_size)
206 207 208 209 210 211 212 213
    {
      /* Mark the unwanted memory as unaccessible.  We also need to make
	 the "new" size accessible, since ggc_get_size returns the size of
	 the pool, not the size of the individually allocated object, the
	 size which was previously made accessible.  Unfortunately, we
	 don't know that previously allocated size.  Without that
	 knowledge we have to lose some initialization-tracking for the
	 old parts of the object.  An alternative is to mark the whole
214
	 old_size as reachable, but that would lose tracking of writes
215 216
	 after the end of the object (by small offsets).  Discard the
	 handle to avoid handle leak.  */
217 218 219
      VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS ((char *) x + size,
						    old_size - size));
      VALGRIND_DISCARD (VALGRIND_MAKE_MEM_DEFINED (x, size));
220 221
      return x;
    }
222

223
  r = ggc_internal_alloc_stat (size PASS_MEM_STAT);
224 225 226 227 228

  /* Since ggc_get_size returns the size of the pool, not the size of the
     individually allocated object, we'd access parts of the old object
     that were marked invalid with the memcpy below.  We lose a bit of the
     initialization-tracking since some of it may be uninitialized.  */
229
  VALGRIND_DISCARD (VALGRIND_MAKE_MEM_DEFINED (x, old_size));
230

231
  memcpy (r, x, old_size);
232 233

  /* The old object is not supposed to be used anymore.  */
234
  ggc_free (x);
235

236
  return r;
237 238
}

239
void *
240 241
ggc_cleared_alloc_htab_ignore_args (size_t c ATTRIBUTE_UNUSED,
				    size_t n ATTRIBUTE_UNUSED)
242
{
243 244 245 246 247 248 249 250 251 252 253
  gcc_assert (c * n == sizeof (struct htab));
  return ggc_alloc_cleared_htab ();
}

/* TODO: once we actually use type information in GGC, create a new tag
   gt_gcc_ptr_array and use it for pointer arrays.  */
void *
ggc_cleared_alloc_ptr_array_two_args (size_t c, size_t n)
{
  gcc_assert (sizeof (PTR *) == n);
  return ggc_internal_cleared_vec_alloc (sizeof (PTR *), c);
254 255
}

256
/* These are for splay_tree_new_ggc.  */
257
void *
258 259
ggc_splay_alloc (enum gt_types_enum obj_type ATTRIBUTE_UNUSED, int sz,
		 void *nl)
260
{
261
  gcc_assert (!nl);
262
  return ggc_internal_alloc (sz);
263 264 265
}

void
266
ggc_splay_dont_free (void * x ATTRIBUTE_UNUSED, void *nl)
267
{
268
  gcc_assert (!nl);
269 270
}

271
/* Print statistics that are independent of the collector in use.  */
272 273 274 275 276 277
#define SCALE(x) ((unsigned long) ((x) < 1024*10 \
		  ? (x) \
		  : ((x) < 1024*1024*10 \
		     ? (x) / 1024 \
		     : (x) / (1024*1024))))
#define LABEL(x) ((x) < 1024*10 ? ' ' : ((x) < 1024*1024*10 ? 'k' : 'M'))
278 279

void
280 281
ggc_print_common_statistics (FILE *stream ATTRIBUTE_UNUSED,
			     ggc_statistics *stats)
282 283 284 285 286 287 288 289
{
  /* Set the pointer so that during collection we will actually gather
     the statistics.  */
  ggc_stats = stats;

  /* Then do one collection to fill in the statistics.  */
  ggc_collect ();

290 291 292 293 294 295 296 297 298 299
  /* At present, we don't really gather any interesting statistics.  */

  /* Don't gather statistics any more.  */
  ggc_stats = NULL;
}

/* Functions for saving and restoring GCable memory to disk.  */

static htab_t saving_htab;

300
struct ptr_data
301 302 303 304 305 306 307
{
  void *obj;
  void *note_ptr_cookie;
  gt_note_pointers note_ptr_fn;
  gt_handle_reorder reorder_fn;
  size_t size;
  void *new_addr;
308
  enum gt_types_enum type;
309 310 311 312 313 314 315
};

#define POINTER_HASH(x) (hashval_t)((long)x >> 3)

/* Register an object in the hash table.  */

int
316
gt_pch_note_object (void *obj, void *note_ptr_cookie,
317 318
		    gt_note_pointers note_ptr_fn,
		    enum gt_types_enum type)
319 320
{
  struct ptr_data **slot;
321

322 323 324 325 326 327 328 329
  if (obj == NULL || obj == (void *) 1)
    return 0;

  slot = (struct ptr_data **)
    htab_find_slot_with_hash (saving_htab, obj, POINTER_HASH (obj),
			      INSERT);
  if (*slot != NULL)
    {
330 331
      gcc_assert ((*slot)->note_ptr_fn == note_ptr_fn
		  && (*slot)->note_ptr_cookie == note_ptr_cookie);
332 333
      return 0;
    }
334

335
  *slot = XCNEW (struct ptr_data);
336 337 338 339
  (*slot)->obj = obj;
  (*slot)->note_ptr_fn = note_ptr_fn;
  (*slot)->note_ptr_cookie = note_ptr_cookie;
  if (note_ptr_fn == gt_pch_p_S)
340
    (*slot)->size = strlen ((const char *)obj) + 1;
341 342
  else
    (*slot)->size = ggc_get_size (obj);
343
  (*slot)->type = type;
344 345 346 347 348 349
  return 1;
}

/* Register an object in the hash table.  */

void
350 351
gt_pch_note_reorder (void *obj, void *note_ptr_cookie,
		     gt_handle_reorder reorder_fn)
352 353
{
  struct ptr_data *data;
354

355 356 357
  if (obj == NULL || obj == (void *) 1)
    return;

358 359
  data = (struct ptr_data *)
    htab_find_with_hash (saving_htab, obj, POINTER_HASH (obj));
360
  gcc_assert (data && data->note_ptr_cookie == note_ptr_cookie);
361

362 363 364 365 366 367
  data->reorder_fn = reorder_fn;
}

/* Hash and equality functions for saving_htab, callbacks for htab_create.  */

static hashval_t
368
saving_htab_hash (const void *p)
369
{
370
  return POINTER_HASH (((const struct ptr_data *)p)->obj);
371 372 373
}

static int
374
saving_htab_eq (const void *p1, const void *p2)
375
{
376
  return ((const struct ptr_data *)p1)->obj == p2;
377 378 379 380
}

/* Handy state for the traversal functions.  */

381
struct traversal_state
382 383 384 385 386 387 388 389 390 391 392
{
  FILE *f;
  struct ggc_pch_data *d;
  size_t count;
  struct ptr_data **ptrs;
  size_t ptrs_i;
};

/* Callbacks for htab_traverse.  */

static int
393
call_count (void **slot, void *state_p)
394 395 396
{
  struct ptr_data *d = (struct ptr_data *)*slot;
  struct traversal_state *state = (struct traversal_state *)state_p;
397

398 399 400
  ggc_pch_count_object (state->d, d->obj, d->size,
			d->note_ptr_fn == gt_pch_p_S,
			d->type);
401 402 403 404 405
  state->count++;
  return 1;
}

static int
406
call_alloc (void **slot, void *state_p)
407 408 409
{
  struct ptr_data *d = (struct ptr_data *)*slot;
  struct traversal_state *state = (struct traversal_state *)state_p;
410

411 412 413
  d->new_addr = ggc_pch_alloc_object (state->d, d->obj, d->size,
				      d->note_ptr_fn == gt_pch_p_S,
				      d->type);
414 415 416 417 418 419 420
  state->ptrs[state->ptrs_i++] = d;
  return 1;
}

/* Callback for qsort.  */

static int
421
compare_ptr_data (const void *p1_p, const void *p2_p)
422
{
423 424
  const struct ptr_data *const p1 = *(const struct ptr_data *const *)p1_p;
  const struct ptr_data *const p2 = *(const struct ptr_data *const *)p2_p;
425 426 427 428 429 430 431
  return (((size_t)p1->new_addr > (size_t)p2->new_addr)
	  - ((size_t)p1->new_addr < (size_t)p2->new_addr));
}

/* Callbacks for note_ptr_fn.  */

static void
432
relocate_ptrs (void *ptr_p, void *state_p)
433 434
{
  void **ptr = (void **)ptr_p;
435
  struct traversal_state *state ATTRIBUTE_UNUSED
436 437 438 439 440
    = (struct traversal_state *)state_p;
  struct ptr_data *result;

  if (*ptr == NULL || *ptr == (void *)1)
    return;
441

442 443
  result = (struct ptr_data *)
    htab_find_with_hash (saving_htab, *ptr, POINTER_HASH (*ptr));
444
  gcc_assert (result);
445 446 447 448 449
  *ptr = result->new_addr;
}

/* Write out, after relocation, the pointers in TAB.  */
static void
450 451
write_pch_globals (const struct ggc_root_tab * const *tab,
		   struct traversal_state *state)
452 453 454 455 456 457 458 459 460 461 462 463 464
{
  const struct ggc_root_tab *const *rt;
  const struct ggc_root_tab *rti;
  size_t i;

  for (rt = tab; *rt; rt++)
    for (rti = *rt; rti->base != NULL; rti++)
      for (i = 0; i < rti->nelt; i++)
	{
	  void *ptr = *(void **)((char *)rti->base + rti->stride * i);
	  struct ptr_data *new_ptr;
	  if (ptr == NULL || ptr == (void *)1)
	    {
465
	      if (fwrite (&ptr, sizeof (void *), 1, state->f)
466
		  != 1)
467
		fatal_error ("can%'t write PCH file: %m");
468 469 470
	    }
	  else
	    {
471 472
	      new_ptr = (struct ptr_data *)
		htab_find_with_hash (saving_htab, ptr, POINTER_HASH (ptr));
473
	      if (fwrite (&new_ptr->new_addr, sizeof (void *), 1, state->f)
474
		  != 1)
475
		fatal_error ("can%'t write PCH file: %m");
476 477 478 479 480 481
	    }
	}
}

/* Hold the information we need to mmap the file back in.  */

482
struct mmap_info
483 484 485 486 487 488 489 490 491
{
  size_t offset;
  size_t size;
  void *preferred_base;
};

/* Write out the state of the compiler to F.  */

void
492
gt_pch_save (FILE *f)
493 494 495 496 497 498 499 500
{
  const struct ggc_root_tab *const *rt;
  const struct ggc_root_tab *rti;
  size_t i;
  struct traversal_state state;
  char *this_object = NULL;
  size_t this_object_size = 0;
  struct mmap_info mmi;
501
  const size_t mmap_offset_alignment = host_hooks.gt_pch_alloc_granularity();
502 503 504

  gt_pch_save_stringpool ();

505
  timevar_push (TV_PCH_PTR_REALLOC);
506 507 508 509 510 511 512 513 514 515 516 517 518 519
  saving_htab = htab_create (50000, saving_htab_hash, saving_htab_eq, free);

  for (rt = gt_ggc_rtab; *rt; rt++)
    for (rti = *rt; rti->base != NULL; rti++)
      for (i = 0; i < rti->nelt; i++)
	(*rti->pchw)(*(void **)((char *)rti->base + rti->stride * i));

  for (rt = gt_pch_cache_rtab; *rt; rt++)
    for (rti = *rt; rti->base != NULL; rti++)
      for (i = 0; i < rti->nelt; i++)
	(*rti->pchw)(*(void **)((char *)rti->base + rti->stride * i));

  /* Prepare the objects for writing, determine addresses and such.  */
  state.f = f;
520
  state.d = init_ggc_pch ();
521 522 523 524 525
  state.count = 0;
  htab_traverse (saving_htab, call_count, &state);

  mmi.size = ggc_pch_total_size (state.d);

526 527
  /* Try to arrange things so that no relocation is necessary, but
     don't try very hard.  On most platforms, this will always work,
H.J. Lu committed
528
     and on the rest it's a lot of work to do better.
529 530
     (The extra work goes in HOST_HOOKS_GT_PCH_GET_ADDRESS and
     HOST_HOOKS_GT_PCH_USE_ADDRESS.)  */
531
  mmi.preferred_base = host_hooks.gt_pch_get_address (mmi.size, fileno (f));
H.J. Lu committed
532

533 534
  ggc_pch_this_base (state.d, mmi.preferred_base);

535
  state.ptrs = XNEWVEC (struct ptr_data *, state.count);
536
  state.ptrs_i = 0;
537

538
  htab_traverse (saving_htab, call_alloc, &state);
539 540 541
  timevar_pop (TV_PCH_PTR_REALLOC);

  timevar_push (TV_PCH_PTR_SORT);
542
  qsort (state.ptrs, state.count, sizeof (*state.ptrs), compare_ptr_data);
543
  timevar_pop (TV_PCH_PTR_SORT);
544 545 546 547 548

  /* Write out all the scalar variables.  */
  for (rt = gt_pch_scalar_rtab; *rt; rt++)
    for (rti = *rt; rti->base != NULL; rti++)
      if (fwrite (rti->base, rti->stride, 1, f) != 1)
549
	fatal_error ("can%'t write PCH file: %m");
550 551 552 553 554

  /* Write out all the global pointers, after translation.  */
  write_pch_globals (gt_ggc_rtab, &state);
  write_pch_globals (gt_pch_cache_rtab, &state);

555 556
  /* Pad the PCH file so that the mmapped area starts on an allocation
     granularity (usually page) boundary.  */
557
  {
558 559 560
    long o;
    o = ftell (state.f) + sizeof (mmi);
    if (o == -1)
561
      fatal_error ("can%'t get position in PCH file: %m");
562 563
    mmi.offset = mmap_offset_alignment - o % mmap_offset_alignment;
    if (mmi.offset == mmap_offset_alignment)
564 565 566 567
      mmi.offset = 0;
    mmi.offset += o;
  }
  if (fwrite (&mmi, sizeof (mmi), 1, state.f) != 1)
568
    fatal_error ("can%'t write PCH file: %m");
569 570
  if (mmi.offset != 0
      && fseek (state.f, mmi.offset, SEEK_SET) != 0)
571
    fatal_error ("can%'t write padding to PCH file: %m");
572

573 574
  ggc_pch_prepare_write (state.d, state.f);

575 576
  /* Actually write out the objects.  */
  for (i = 0; i < state.count; i++)
577
    {
578 579 580
      if (this_object_size < state.ptrs[i]->size)
	{
	  this_object_size = state.ptrs[i]->size;
581
	  this_object = XRESIZEVAR (char, this_object, this_object_size);
582 583 584
	}
      memcpy (this_object, state.ptrs[i]->obj, state.ptrs[i]->size);
      if (state.ptrs[i]->reorder_fn != NULL)
585
	state.ptrs[i]->reorder_fn (state.ptrs[i]->obj,
586 587
				   state.ptrs[i]->note_ptr_cookie,
				   relocate_ptrs, &state);
588
      state.ptrs[i]->note_ptr_fn (state.ptrs[i]->obj,
589 590 591
				  state.ptrs[i]->note_ptr_cookie,
				  relocate_ptrs, &state);
      ggc_pch_write_object (state.d, state.f, state.ptrs[i]->obj,
592 593
			    state.ptrs[i]->new_addr, state.ptrs[i]->size,
			    state.ptrs[i]->note_ptr_fn == gt_pch_p_S);
594 595
      if (state.ptrs[i]->note_ptr_fn != gt_pch_p_S)
	memcpy (state.ptrs[i]->obj, this_object, state.ptrs[i]->size);
596
    }
597
  ggc_pch_finish (state.d, state.f);
598
  gt_pch_fixup_stringpool ();
599 600 601 602 603 604 605 606

  free (state.ptrs);
  htab_delete (saving_htab);
}

/* Read the state of the compiler back in from F.  */

void
607
gt_pch_restore (FILE *f)
608 609 610 611 612
{
  const struct ggc_root_tab *const *rt;
  const struct ggc_root_tab *rti;
  size_t i;
  struct mmap_info mmi;
613
  int result;
614 615 616 617 618 619 620 621 622 623 624 625

  /* Delete any deletable objects.  This makes ggc_pch_read much
     faster, as it can be sure that no GCable objects remain other
     than the ones just read in.  */
  for (rt = gt_ggc_deletable_rtab; *rt; rt++)
    for (rti = *rt; rti->base != NULL; rti++)
      memset (rti->base, 0, rti->stride);

  /* Read in all the scalar variables.  */
  for (rt = gt_pch_scalar_rtab; *rt; rt++)
    for (rti = *rt; rti->base != NULL; rti++)
      if (fread (rti->base, rti->stride, 1, f) != 1)
626
	fatal_error ("can%'t read PCH file: %m");
627 628 629 630 631 632 633

  /* Read in all the global pointers, in 6 easy loops.  */
  for (rt = gt_ggc_rtab; *rt; rt++)
    for (rti = *rt; rti->base != NULL; rti++)
      for (i = 0; i < rti->nelt; i++)
	if (fread ((char *)rti->base + rti->stride * i,
		   sizeof (void *), 1, f) != 1)
634
	  fatal_error ("can%'t read PCH file: %m");
635 636 637 638 639 640

  for (rt = gt_pch_cache_rtab; *rt; rt++)
    for (rti = *rt; rti->base != NULL; rti++)
      for (i = 0; i < rti->nelt; i++)
	if (fread ((char *)rti->base + rti->stride * i,
		   sizeof (void *), 1, f) != 1)
641
	  fatal_error ("can%'t read PCH file: %m");
642 643

  if (fread (&mmi, sizeof (mmi), 1, f) != 1)
644
    fatal_error ("can%'t read PCH file: %m");
645

646 647 648 649 650
  result = host_hooks.gt_pch_use_address (mmi.preferred_base, mmi.size,
					  fileno (f), mmi.offset);
  if (result < 0)
    fatal_error ("had to relocate PCH");
  if (result == 0)
651
    {
652 653
      if (fseek (f, mmi.offset, SEEK_SET) != 0
	  || fread (mmi.preferred_base, mmi.size, 1, f) != 1)
654
	fatal_error ("can%'t read PCH file: %m");
655 656
    }
  else if (fseek (f, mmi.offset + mmi.size, SEEK_SET) != 0)
657
    fatal_error ("can%'t read PCH file: %m");
658

659
  ggc_pch_read (f, mmi.preferred_base);
660

661 662
  gt_pch_restore_stringpool ();
}
663

664 665 666
/* Default version of HOST_HOOKS_GT_PCH_GET_ADDRESS when mmap is not present.
   Select no address whatsoever, and let gt_pch_save choose what it will with
   malloc, presumably.  */
667

668 669 670 671 672 673
void *
default_gt_pch_get_address (size_t size ATTRIBUTE_UNUSED,
			    int fd ATTRIBUTE_UNUSED)
{
  return NULL;
}
674

675 676 677 678 679 680 681 682 683 684 685 686 687
/* Default version of HOST_HOOKS_GT_PCH_USE_ADDRESS when mmap is not present.
   Allocate SIZE bytes with malloc.  Return 0 if the address we got is the
   same as base, indicating that the memory has been allocated but needs to
   be read in from the file.  Return -1 if the address differs, to relocation
   of the PCH file would be required.  */

int
default_gt_pch_use_address (void *base, size_t size, int fd ATTRIBUTE_UNUSED,
			    size_t offset ATTRIBUTE_UNUSED)
{
  void *addr = xmalloc (size);
  return (addr == base) - 1;
}
688

689 690 691 692 693 694 695 696 697 698
/* Default version of HOST_HOOKS_GT_PCH_GET_ADDRESS.   Return the
   alignment required for allocating virtual memory. Usually this is the
   same as pagesize.  */

size_t
default_gt_pch_alloc_granularity (void)
{
  return getpagesize();
}

699 700 701
#if HAVE_MMAP_FILE
/* Default version of HOST_HOOKS_GT_PCH_GET_ADDRESS when mmap is present.
   We temporarily allocate SIZE bytes, and let the kernel place the data
702
   wherever it will.  If it worked, that's our spot, if not we're likely
703
   to be in trouble.  */
704

705 706 707 708
void *
mmap_gt_pch_get_address (size_t size, int fd)
{
  void *ret;
709

710 711 712 713
  ret = mmap (NULL, size, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
  if (ret == (void *) MAP_FAILED)
    ret = NULL;
  else
714
    munmap ((caddr_t) ret, size);
715

716 717
  return ret;
}
718

719
/* Default version of HOST_HOOKS_GT_PCH_USE_ADDRESS when mmap is present.
H.J. Lu committed
720
   Map SIZE bytes of FD+OFFSET at BASE.  Return 1 if we succeeded at
721
   mapping the data at BASE, -1 if we couldn't.
722

723 724 725
   This version assumes that the kernel honors the START operand of mmap
   even without MAP_FIXED if START through START+SIZE are not currently
   mapped with something.  */
726

727 728 729 730
int
mmap_gt_pch_use_address (void *base, size_t size, int fd, size_t offset)
{
  void *addr;
731

732 733 734 735 736 737
  /* We're called with size == 0 if we're not planning to load a PCH
     file at all.  This allows the hook to free any static space that
     we might have allocated at link time.  */
  if (size == 0)
    return -1;

738
  addr = mmap ((caddr_t) base, size, PROT_READ | PROT_WRITE, MAP_PRIVATE,
739 740 741
	       fd, offset);

  return addr == base ? 1 : -1;
742
}
743
#endif /* HAVE_MMAP_FILE */
744

745 746
#if !defined ENABLE_GC_CHECKING && !defined ENABLE_GC_ALWAYS_COLLECT

747
/* Modify the bound based on rlimits.  */
748
static double
749
ggc_rlimit_bound (double limit)
750 751 752
{
#if defined(HAVE_GETRLIMIT)
  struct rlimit rlim;
753 754 755 756
# if defined (RLIMIT_AS)
  /* RLIMIT_AS is what POSIX says is the limit on mmap.  Presumably
     any OS which has RLIMIT_AS also has a working mmap that GCC will use.  */
  if (getrlimit (RLIMIT_AS, &rlim) == 0
757
      && rlim.rlim_cur != (rlim_t) RLIM_INFINITY
758 759
      && rlim.rlim_cur < limit)
    limit = rlim.rlim_cur;
760 761 762 763
# elif defined (RLIMIT_DATA)
  /* ... but some older OSs bound mmap based on RLIMIT_DATA, or we
     might be on an OS that has a broken mmap.  (Others don't bound
     mmap at all, apparently.)  */
764
  if (getrlimit (RLIMIT_DATA, &rlim) == 0
765
      && rlim.rlim_cur != (rlim_t) RLIM_INFINITY
766 767 768 769 770 771 772
      && rlim.rlim_cur < limit
      /* Darwin has this horribly bogus default setting of
	 RLIMIT_DATA, to 6144Kb.  No-one notices because RLIMIT_DATA
	 appears to be ignored.  Ignore such silliness.  If a limit
	 this small was actually effective for mmap, GCC wouldn't even
	 start up.  */
      && rlim.rlim_cur >= 8 * 1024 * 1024)
773
    limit = rlim.rlim_cur;
774
# endif /* RLIMIT_AS or RLIMIT_DATA */
775 776 777 778 779
#endif /* HAVE_GETRLIMIT */

  return limit;
}

780
/* Heuristic to set a default for GGC_MIN_EXPAND.  */
781
static int
782
ggc_min_expand_heuristic (void)
783 784
{
  double min_expand = physmem_total();
785 786 787

  /* Adjust for rlimits.  */
  min_expand = ggc_rlimit_bound (min_expand);
788

789 790 791 792 793 794 795 796 797 798 799
  /* The heuristic is a percentage equal to 30% + 70%*(RAM/1GB), yielding
     a lower bound of 30% and an upper bound of 100% (when RAM >= 1GB).  */
  min_expand /= 1024*1024*1024;
  min_expand *= 70;
  min_expand = MIN (min_expand, 70);
  min_expand += 30;

  return min_expand;
}

/* Heuristic to set a default for GGC_MIN_HEAPSIZE.  */
800
static int
801
ggc_min_heapsize_heuristic (void)
802
{
803 804
  double phys_kbytes = physmem_total();
  double limit_kbytes = ggc_rlimit_bound (phys_kbytes * 2);
805

806 807
  phys_kbytes /= 1024; /* Convert to Kbytes.  */
  limit_kbytes /= 1024;
808

809 810
  /* The heuristic is RAM/8, with a lower bound of 4M and an upper
     bound of 128M (when RAM >= 1GB).  */
811 812 813
  phys_kbytes /= 8;

#if defined(HAVE_GETRLIMIT) && defined (RLIMIT_RSS)
H.J. Lu committed
814
  /* Try not to overrun the RSS limit while doing garbage collection.
815 816 817 818 819 820 821 822 823 824
     The RSS limit is only advisory, so no margin is subtracted.  */
 {
   struct rlimit rlim;
   if (getrlimit (RLIMIT_RSS, &rlim) == 0
       && rlim.rlim_cur != (rlim_t) RLIM_INFINITY)
     phys_kbytes = MIN (phys_kbytes, rlim.rlim_cur / 1024);
 }
# endif

  /* Don't blindly run over our data limit; do GC at least when the
825 826 827 828
     *next* GC would be within 20Mb of the limit or within a quarter of
     the limit, whichever is larger.  If GCC does hit the data limit,
     compilation will fail, so this tries to be conservative.  */
  limit_kbytes = MAX (0, limit_kbytes - MAX (limit_kbytes / 4, 20 * 1024));
829
  limit_kbytes = (limit_kbytes * 100) / (110 + ggc_min_expand_heuristic ());
830 831 832 833
  phys_kbytes = MIN (phys_kbytes, limit_kbytes);

  phys_kbytes = MAX (phys_kbytes, 4 * 1024);
  phys_kbytes = MIN (phys_kbytes, 128 * 1024);
834

835
  return phys_kbytes;
836
}
837
#endif
838 839

void
840
init_ggc_heuristics (void)
841
{
842
#if !defined ENABLE_GC_CHECKING && !defined ENABLE_GC_ALWAYS_COLLECT
843 844
  set_default_param_value (GGC_MIN_EXPAND, ggc_min_expand_heuristic ());
  set_default_param_value (GGC_MIN_HEAPSIZE, ggc_min_heapsize_heuristic ());
845 846
#endif
}
847 848 849 850 851 852 853 854 855 856 857 858

#ifdef GATHER_STATISTICS

/* Datastructure used to store per-call-site statistics.  */
struct loc_descriptor
{
  const char *file;
  int line;
  const char *function;
  int times;
  size_t allocated;
  size_t overhead;
859 860
  size_t freed;
  size_t collected;
861 862 863 864 865 866 867 868 869
};

/* Hashtable used for statistics.  */
static htab_t loc_hash;

/* Hash table helpers functions.  */
static hashval_t
hash_descriptor (const void *p)
{
870
  const struct loc_descriptor *const d = (const struct loc_descriptor *) p;
871 872 873 874 875 876 877

  return htab_hash_pointer (d->function) | d->line;
}

static int
eq_descriptor (const void *p1, const void *p2)
{
878 879
  const struct loc_descriptor *const d = (const struct loc_descriptor *) p1;
  const struct loc_descriptor *const d2 = (const struct loc_descriptor *) p2;
880 881 882 883 884

  return (d->file == d2->file && d->line == d2->line
	  && d->function == d2->function);
}

885 886 887 888 889 890 891 892 893 894 895 896 897
/* Hashtable converting address of allocated field to loc descriptor.  */
static htab_t ptr_hash;
struct ptr_hash_entry
{
  void *ptr;
  struct loc_descriptor *loc;
  size_t size;
};

/* Hash table helpers functions.  */
static hashval_t
hash_ptr (const void *p)
{
898
  const struct ptr_hash_entry *const d = (const struct ptr_hash_entry *) p;
899 900 901 902 903 904 905

  return htab_hash_pointer (d->ptr);
}

static int
eq_ptr (const void *p1, const void *p2)
{
906
  const struct ptr_hash_entry *const p = (const struct ptr_hash_entry *) p1;
907 908 909 910

  return (p->ptr == p2);
}

911 912 913 914 915 916 917 918 919 920 921 922 923
/* Return descriptor for given call site, create new one if needed.  */
static struct loc_descriptor *
loc_descriptor (const char *name, int line, const char *function)
{
  struct loc_descriptor loc;
  struct loc_descriptor **slot;

  loc.file = name;
  loc.line = line;
  loc.function = function;
  if (!loc_hash)
    loc_hash = htab_create (10, hash_descriptor, eq_descriptor, NULL);

924
  slot = (struct loc_descriptor **) htab_find_slot (loc_hash, &loc, INSERT);
925 926
  if (*slot)
    return *slot;
927
  *slot = XCNEW (struct loc_descriptor);
928 929 930 931 932 933
  (*slot)->file = name;
  (*slot)->line = line;
  (*slot)->function = function;
  return *slot;
}

934 935
/* Record ALLOCATED and OVERHEAD bytes to descriptor NAME:LINE (FUNCTION).  */
void
936
ggc_record_overhead (size_t allocated, size_t overhead, void *ptr,
937
		     const char *name, int line, const char *function)
938 939
{
  struct loc_descriptor *loc = loc_descriptor (name, line, function);
940
  struct ptr_hash_entry *p = XNEW (struct ptr_hash_entry);
941 942 943 944 945 946 947 948
  PTR *slot;

  p->ptr = ptr;
  p->loc = loc;
  p->size = allocated + overhead;
  if (!ptr_hash)
    ptr_hash = htab_create (10, hash_ptr, eq_ptr, NULL);
  slot = htab_find_slot_with_hash (ptr_hash, ptr, htab_hash_pointer (ptr), INSERT);
949
  gcc_assert (!*slot);
950
  *slot = p;
951 952 953 954 955 956

  loc->times++;
  loc->allocated+=allocated;
  loc->overhead+=overhead;
}

957 958 959 960 961
/* Helper function for prune_overhead_list.  See if SLOT is still marked and
   remove it from hashtable if it is not.  */
static int
ggc_prune_ptr (void **slot, void *b ATTRIBUTE_UNUSED)
{
962
  struct ptr_hash_entry *p = (struct ptr_hash_entry *) *slot;
963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980
  if (!ggc_marked_p (p->ptr))
    {
      p->loc->collected += p->size;
      htab_clear_slot (ptr_hash, slot);
      free (p);
    }
  return 1;
}

/* After live values has been marked, walk all recorded pointers and see if
   they are still live.  */
void
ggc_prune_overhead_list (void)
{
  htab_traverse (ptr_hash, ggc_prune_ptr, NULL);
}

/* Notice that the pointer has been freed.  */
981 982
void
ggc_free_overhead (void *ptr)
983 984 985
{
  PTR *slot = htab_find_slot_with_hash (ptr_hash, ptr, htab_hash_pointer (ptr),
					NO_INSERT);
986 987 988 989 990 991 992
  struct ptr_hash_entry *p;
  /* The pointer might be not found if a PCH read happened between allocation
     and ggc_free () call.  FIXME: account memory properly in the presence of
     PCH. */
  if (!slot)
      return;
  p = (struct ptr_hash_entry *) *slot;
993 994 995 996 997
  p->loc->freed += p->size;
  htab_clear_slot (ptr_hash, slot);
  free (p);
}

998 999
/* Helper for qsort; sort descriptors by amount of memory consumed.  */
static int
1000
final_cmp_statistic (const void *loc1, const void *loc2)
1001
{
1002 1003 1004 1005
  const struct loc_descriptor *const l1 =
    *(const struct loc_descriptor *const *) loc1;
  const struct loc_descriptor *const l2 =
    *(const struct loc_descriptor *const *) loc2;
1006 1007
  long diff;
  diff = ((long)(l1->allocated + l1->overhead - l1->freed) -
1008
	  (l2->allocated + l2->overhead - l2->freed));
1009 1010 1011 1012 1013 1014 1015
  return diff > 0 ? 1 : diff < 0 ? -1 : 0;
}

/* Helper for qsort; sort descriptors by amount of memory consumed.  */
static int
cmp_statistic (const void *loc1, const void *loc2)
{
1016 1017 1018 1019
  const struct loc_descriptor *const l1 =
    *(const struct loc_descriptor *const *) loc1;
  const struct loc_descriptor *const l2 =
    *(const struct loc_descriptor *const *) loc2;
1020 1021 1022 1023 1024 1025 1026 1027 1028
  long diff;

  diff = ((long)(l1->allocated + l1->overhead - l1->freed - l1->collected) -
	  (l2->allocated + l2->overhead - l2->freed - l2->collected));
  if (diff)
    return diff > 0 ? 1 : diff < 0 ? -1 : 0;
  diff =  ((long)(l1->allocated + l1->overhead - l1->freed) -
	   (l2->allocated + l2->overhead - l2->freed));
  return diff > 0 ? 1 : diff < 0 ? -1 : 0;
1029 1030 1031
}

/* Collect array of the descriptors from hashtable.  */
1032
static struct loc_descriptor **loc_array;
1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043
static int
add_statistics (void **slot, void *b)
{
  int *n = (int *)b;
  loc_array[*n] = (struct loc_descriptor *) *slot;
  (*n)++;
  return 1;
}

/* Dump per-site memory statistics.  */
#endif
1044
void
1045
dump_ggc_loc_statistics (bool final ATTRIBUTE_UNUSED)
1046 1047 1048 1049
{
#ifdef GATHER_STATISTICS
  int nentries = 0;
  char s[4096];
1050
  size_t collected = 0, freed = 0, allocated = 0, overhead = 0, times = 0;
1051 1052
  int i;

1053 1054 1055
  ggc_force_collect = true;
  ggc_collect ();

1056
  loc_array = XCNEWVEC (struct loc_descriptor *, loc_hash->n_elements);
1057
  fprintf (stderr, "-------------------------------------------------------\n");
1058 1059
  fprintf (stderr, "\n%-48s %10s       %10s       %10s       %10s       %10s\n",
	   "source location", "Garbage", "Freed", "Leak", "Overhead", "Times");
1060 1061
  fprintf (stderr, "-------------------------------------------------------\n");
  htab_traverse (loc_hash, add_statistics, &nentries);
1062 1063
  qsort (loc_array, nentries, sizeof (*loc_array),
	 final ? final_cmp_statistic : cmp_statistic);
1064 1065 1066
  for (i = 0; i < nentries; i++)
    {
      struct loc_descriptor *d = loc_array[i];
1067 1068 1069 1070
      allocated += d->allocated;
      times += d->times;
      freed += d->freed;
      collected += d->collected;
1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082
      overhead += d->overhead;
    }
  for (i = 0; i < nentries; i++)
    {
      struct loc_descriptor *d = loc_array[i];
      if (d->allocated)
	{
	  const char *s1 = d->file;
	  const char *s2;
	  while ((s2 = strstr (s1, "gcc/")))
	    s1 = s2 + 4;
	  sprintf (s, "%s:%i (%s)", s1, d->line, d->function);
1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094
	  s[48] = 0;
	  fprintf (stderr, "%-48s %10li:%4.1f%% %10li:%4.1f%% %10li:%4.1f%% %10li:%4.1f%% %10li\n", s,
		   (long)d->collected,
		   (d->collected) * 100.0 / collected,
		   (long)d->freed,
		   (d->freed) * 100.0 / freed,
		   (long)(d->allocated + d->overhead - d->freed - d->collected),
		   (d->allocated + d->overhead - d->freed - d->collected) * 100.0
		   / (allocated + overhead - freed - collected),
		   (long)d->overhead,
		   d->overhead * 100.0 / overhead,
		   (long)d->times);
1095 1096
	}
    }
1097 1098 1099 1100 1101 1102
  fprintf (stderr, "%-48s %10ld       %10ld       %10ld       %10ld       %10ld\n",
	   "Total", (long)collected, (long)freed,
	   (long)(allocated + overhead - freed - collected), (long)overhead,
	   (long)times);
  fprintf (stderr, "%-48s %10s       %10s       %10s       %10s       %10s\n",
	   "source location", "Garbage", "Freed", "Leak", "Overhead", "Times");
1103
  fprintf (stderr, "-------------------------------------------------------\n");
1104
  ggc_force_collect = false;
1105 1106
#endif
}