unwind-dw2-fde.c 26.5 KB
Newer Older
Jason Merrill committed
1
/* Subroutines needed for unwinding stack frames for exception handling.  */
Geoffrey Keating committed
2
/* Copyright (C) 1997, 1998, 1999, 2000, 2001, 2002 Free Software Foundation, Inc.
Jason Merrill committed
3 4
   Contributed by Jason Merrill <jason@cygnus.com>.

5
This file is part of GCC.
Jason Merrill committed
6

7 8 9 10
GCC is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
Software Foundation; either version 2, or (at your option) any later
version.
Jason Merrill committed
11

12 13 14 15 16 17 18 19 20
In addition to the permissions in the GNU General Public License, the
Free Software Foundation gives you unlimited permission to link the
compiled version of this file into combinations with other programs,
and to distribute those combinations without any restriction coming
from the use of this file.  (The General Public License restrictions
do apply in other respects; for example, they cover modification of
the file, and distribution when not linked into a combine
executable.)

21 22 23 24
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
for more details.
Jason Merrill committed
25 26

You should have received a copy of the GNU General Public License
27 28 29
along with GCC; see the file COPYING.  If not, write to the Free
Software Foundation, 59 Temple Place - Suite 330, Boston, MA
02111-1307, USA.  */
Jason Merrill committed
30

31
#ifndef _Unwind_Find_FDE
32 33
#include "tconfig.h"
#include "tsystem.h"
34 35
#include "coretypes.h"
#include "tm.h"
36 37
#include "dwarf2.h"
#include "unwind.h"
38
#define NO_BASE_OF_ENCODED_VALUE
39
#include "unwind-pe.h"
40 41
#include "unwind-dw2-fde.h"
#include "gthr.h"
42
#endif
43

44 45 46 47 48 49
/* The unseen_objects list contains objects that have been registered
   but not yet categorized in any way.  The seen_objects list has had
   it's pc_begin and count fields initialized at minimum, and is sorted
   by decreasing value of pc_begin.  */
static struct object *unseen_objects;
static struct object *seen_objects;
50 51 52 53 54 55 56 57

#ifdef __GTHREAD_MUTEX_INIT
static __gthread_mutex_t object_mutex = __GTHREAD_MUTEX_INIT;
#else
static __gthread_mutex_t object_mutex;
#endif

#ifdef __GTHREAD_MUTEX_INIT_FUNCTION
Kazu Hirata committed
58
static void
59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76
init_object_mutex (void)
{
  __GTHREAD_MUTEX_INIT_FUNCTION (&object_mutex);
}

static void
init_object_mutex_once (void)
{
  static __gthread_once_t once = __GTHREAD_ONCE_INIT;
  __gthread_once (&once, init_object_mutex);
}
#else
#define init_object_mutex_once()
#endif

/* Called from crtbegin.o to register the unwind info for an object.  */

void
77 78
__register_frame_info_bases (void *begin, struct object *ob,
			     void *tbase, void *dbase)
79
{
80
  /* If .eh_frame is empty, don't register at all.  */
81
  if ((uword *) begin == 0 || *(uword *) begin == 0)
82 83
    return;

84 85 86 87 88 89
  ob->pc_begin = (void *)-1;
  ob->tbase = tbase;
  ob->dbase = dbase;
  ob->u.single = begin;
  ob->s.i = 0;
  ob->s.b.encoding = DW_EH_PE_omit;
Geoffrey Keating committed
90 91 92
#ifdef DWARF2_OBJECT_END_PTR_EXTENSION
  ob->fde_end = NULL;
#endif
93 94 95 96

  init_object_mutex_once ();
  __gthread_mutex_lock (&object_mutex);

97 98
  ob->next = unseen_objects;
  unseen_objects = ob;
99 100 101 102 103

  __gthread_mutex_unlock (&object_mutex);
}

void
104 105 106 107 108 109
__register_frame_info (void *begin, struct object *ob)
{
  __register_frame_info_bases (begin, ob, 0, 0);
}

void
110 111
__register_frame (void *begin)
{
112 113 114
  struct object *ob;

  /* If .eh_frame is empty, don't register at all.  */
115
  if (*(uword *) begin == 0)
116 117
    return;

118
  ob = malloc (sizeof (struct object));
Kazu Hirata committed
119
  __register_frame_info (begin, ob);
120 121 122 123 124 125 126
}

/* Similar, but BEGIN is actually a pointer to a table of unwind entries
   for different translation units.  Called from the file generated by
   collect2.  */

void
127 128
__register_frame_info_table_bases (void *begin, struct object *ob,
				   void *tbase, void *dbase)
129
{
130 131 132 133 134 135 136
  ob->pc_begin = (void *)-1;
  ob->tbase = tbase;
  ob->dbase = dbase;
  ob->u.array = begin;
  ob->s.i = 0;
  ob->s.b.from_array = 1;
  ob->s.b.encoding = DW_EH_PE_omit;
137 138 139 140

  init_object_mutex_once ();
  __gthread_mutex_lock (&object_mutex);

141 142
  ob->next = unseen_objects;
  unseen_objects = ob;
143 144 145 146 147

  __gthread_mutex_unlock (&object_mutex);
}

void
148 149 150 151 152 153
__register_frame_info_table (void *begin, struct object *ob)
{
  __register_frame_info_table_bases (begin, ob, 0, 0);
}

void
154 155
__register_frame_table (void *begin)
{
156
  struct object *ob = malloc (sizeof (struct object));
157 158 159 160
  __register_frame_info_table (begin, ob);
}

/* Called from crtbegin.o to deregister the unwind info for an object.  */
161 162 163 164 165 166 167 168
/* ??? Glibc has for a while now exported __register_frame_info and
   __deregister_frame_info.  If we call __register_frame_info_bases
   from crtbegin (wherein it is declared weak), and this object does
   not get pulled from libgcc.a for other reasons, then the
   invocation of __deregister_frame_info will be resolved from glibc.
   Since the registration did not happen there, we'll abort.

   Therefore, declare a new deregistration entry point that does the
Kazu Hirata committed
169
   exact same thing, but will resolve to the same library as
170
   implements __register_frame_info_bases.  */
171 172

void *
173
__deregister_frame_info_bases (void *begin)
174 175
{
  struct object **p;
176
  struct object *ob = 0;
177

178
  /* If .eh_frame is empty, we haven't registered.  */
179
  if ((uword *) begin == 0 || *(uword *) begin == 0)
180
    return ob;
181

182 183 184
  init_object_mutex_once ();
  __gthread_mutex_lock (&object_mutex);

185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212
  for (p = &unseen_objects; *p ; p = &(*p)->next)
    if ((*p)->u.single == begin)
      {
	ob = *p;
	*p = ob->next;
	goto out;
      }

  for (p = &seen_objects; *p ; p = &(*p)->next)
    if ((*p)->s.b.sorted)
      {
	if ((*p)->u.sort->orig_data == begin)
	  {
	    ob = *p;
	    *p = ob->next;
	    free (ob->u.sort);
	    goto out;
	  }
      }
    else
      {
	if ((*p)->u.single == begin)
	  {
	    ob = *p;
	    *p = ob->next;
	    goto out;
	  }
      }
213 214 215

  __gthread_mutex_unlock (&object_mutex);
  abort ();
216 217 218 219

 out:
  __gthread_mutex_unlock (&object_mutex);
  return (void *) ob;
220 221
}

222 223 224 225 226 227
void *
__deregister_frame_info (void *begin)
{
  return __deregister_frame_info_bases (begin);
}

228 229 230
void
__deregister_frame (void *begin)
{
231
  /* If .eh_frame is empty, we haven't registered.  */
232
  if (*(uword *) begin != 0)
233
    free (__deregister_frame_info (begin));
234 235 236
}


237 238 239 240 241 242 243 244 245 246 247 248 249
/* Like base_of_encoded_value, but take the base from a struct object
   instead of an _Unwind_Context.  */

static _Unwind_Ptr
base_from_object (unsigned char encoding, struct object *ob)
{
  if (encoding == DW_EH_PE_omit)
    return 0;

  switch (encoding & 0x70)
    {
    case DW_EH_PE_absptr:
    case DW_EH_PE_pcrel:
250
    case DW_EH_PE_aligned:
251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268
      return 0;

    case DW_EH_PE_textrel:
      return (_Unwind_Ptr) ob->tbase;
    case DW_EH_PE_datarel:
      return (_Unwind_Ptr) ob->dbase;
    }
  abort ();
}

/* Return the FDE pointer encoding from the CIE.  */
/* ??? This is a subset of extract_cie_info from unwind-dw2.c.  */

static int
get_cie_encoding (struct dwarf_cie *cie)
{
  const unsigned char *aug, *p;
  _Unwind_Ptr dummy;
269 270
  _Unwind_Word utmp;
  _Unwind_Sword stmp;
271 272 273 274 275 276

  aug = cie->augmentation;
  if (aug[0] != 'z')
    return DW_EH_PE_absptr;

  p = aug + strlen (aug) + 1;		/* Skip the augmentation string.  */
277 278
  p = read_uleb128 (p, &utmp);		/* Skip code alignment.  */
  p = read_sleb128 (p, &stmp);		/* Skip data alignment.  */
279 280 281
  p++;					/* Skip return address column.  */

  aug++;				/* Skip 'z' */
282
  p = read_uleb128 (p, &utmp);		/* Skip augmentation length.  */
283 284 285 286 287 288 289
  while (1)
    {
      /* This is what we're looking for.  */
      if (*aug == 'R')
	return *p;
      /* Personality encoding and pointer.  */
      else if (*aug == 'P')
290 291 292 293 294 295
	{
	  /* ??? Avoid dereferencing indirect pointers, since we're
	     faking the base address.  Gotta keep DW_EH_PE_aligned
	     intact, however.  */
	  p = read_encoded_value_with_base (*p & 0x7F, 0, p + 1, &dummy);
	}
296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312
      /* LSDA encoding.  */
      else if (*aug == 'L')
	p++;
      /* Otherwise end of string, or unknown augmentation.  */
      else
	return DW_EH_PE_absptr;
      aug++;
    }
}

static inline int
get_fde_encoding (struct dwarf_fde *f)
{
  return get_cie_encoding (get_cie (f));
}


313 314 315 316
/* Sorting an array of FDEs by address.
   (Ideally we would have the linker sort the FDEs so we don't have to do
   it at run time. But the linkers are not yet prepared for this.)  */

317 318
/* Comparison routines.  Three variants of increasing complexity.  */

319
static int
320 321 322
fde_unencoded_compare (struct object *ob __attribute__((unused)),
		       fde *x, fde *y)
{
323 324 325 326
  _Unwind_Ptr x_ptr = *(_Unwind_Ptr *) x->pc_begin;
  _Unwind_Ptr y_ptr = *(_Unwind_Ptr *) y->pc_begin;

  if (x_ptr > y_ptr)
327
    return 1;
328
  if (x_ptr < y_ptr)
329 330
    return -1;
  return 0;
331 332
}

333
static int
334 335 336 337 338 339 340 341
fde_single_encoding_compare (struct object *ob, fde *x, fde *y)
{
  _Unwind_Ptr base, x_ptr, y_ptr;

  base = base_from_object (ob->s.b.encoding, ob);
  read_encoded_value_with_base (ob->s.b.encoding, base, x->pc_begin, &x_ptr);
  read_encoded_value_with_base (ob->s.b.encoding, base, y->pc_begin, &y_ptr);

342 343 344 345 346
  if (x_ptr > y_ptr)
    return 1;
  if (x_ptr < y_ptr)
    return -1;
  return 0;
347 348
}

349
static int
350 351 352 353 354 355 356 357 358 359 360 361 362
fde_mixed_encoding_compare (struct object *ob, fde *x, fde *y)
{
  int x_encoding, y_encoding;
  _Unwind_Ptr x_ptr, y_ptr;

  x_encoding = get_fde_encoding (x);
  read_encoded_value_with_base (x_encoding, base_from_object (x_encoding, ob),
				x->pc_begin, &x_ptr);

  y_encoding = get_fde_encoding (y);
  read_encoded_value_with_base (y_encoding, base_from_object (y_encoding, ob),
				y->pc_begin, &y_ptr);

363 364 365 366 367
  if (x_ptr > y_ptr)
    return 1;
  if (x_ptr < y_ptr)
    return -1;
  return 0;
368 369
}

370
typedef int (*fde_compare_t) (struct object *, fde *, fde *);
371 372


373 374 375 376 377 378 379 380 381 382 383
/* This is a special mix of insertion sort and heap sort, optimized for
   the data sets that actually occur. They look like
   101 102 103 127 128 105 108 110 190 111 115 119 125 160 126 129 130.
   I.e. a linearly increasing sequence (coming from functions in the text
   section), with additionally a few unordered elements (coming from functions
   in gnu_linkonce sections) whose values are higher than the values in the
   surrounding linear sequence (but not necessarily higher than the values
   at the end of the linear sequence!).
   The worst-case total run time is O(N) + O(n log (n)), where N is the
   total number of FDEs and n is the number of erratic ones.  */

384
struct fde_accumulator
385
{
386 387 388
  struct fde_vector *linear;
  struct fde_vector *erratic;
};
389

390
static inline int
391
start_fde_sort (struct fde_accumulator *accu, size_t count)
392
{
393 394 395 396 397
  size_t size;
  if (! count)
    return 0;

  size = sizeof (struct fde_vector) + sizeof (fde *) * count;
398
  if ((accu->linear = malloc (size)))
399 400
    {
      accu->linear->count = 0;
401
      if ((accu->erratic = malloc (size)))
402 403 404 405
	accu->erratic->count = 0;
      return 1;
    }
  else
Kazu Hirata committed
406
    return 0;
407
}
Jason Merrill committed
408

409
static inline void
410
fde_insert (struct fde_accumulator *accu, fde *this_fde)
411
{
412 413
  if (accu->linear)
    accu->linear->array[accu->linear->count++] = this_fde;
414 415 416 417
}

/* Split LINEAR into a linear sequence with low values and an erratic
   sequence with high values, put the linear one (of longest possible
418
   length) into LINEAR and the erratic one into ERRATIC. This is O(N).
Kazu Hirata committed
419

420 421 422 423 424 425 426
   Because the longest linear sequence we are trying to locate within the
   incoming LINEAR array can be interspersed with (high valued) erratic
   entries.  We construct a chain indicating the sequenced entries.
   To avoid having to allocate this chain, we overlay it onto the space of
   the ERRATIC array during construction.  A final pass iterates over the
   chain to determine what should be placed in the ERRATIC array, and
   what is the linear sequence.  This overlay is safe from aliasing.  */
427

428
static inline void
429 430
fde_split (struct object *ob, fde_compare_t fde_compare,
	   struct fde_vector *linear, struct fde_vector *erratic)
431
{
432
  static fde *marker;
433
  size_t count = linear->count;
434 435 436 437 438 439 440 441
  fde **chain_end = &marker;
  size_t i, j, k;

  /* This should optimize out, but it is wise to make sure this assumption
     is correct. Should these have different sizes, we cannot cast between
     them and the overlaying onto ERRATIC will not work.  */
  if (sizeof (fde *) != sizeof (fde **))
    abort ();
Kazu Hirata committed
442

443 444
  for (i = 0; i < count; i++)
    {
445
      fde **probe;
Kazu Hirata committed
446

447
      for (probe = chain_end;
Kazu Hirata committed
448 449 450 451 452 453
	   probe != &marker && fde_compare (ob, linear->array[i], *probe) < 0;
	   probe = chain_end)
	{
	  chain_end = (fde **) erratic->array[probe - linear->array];
	  erratic->array[probe - linear->array] = NULL;
	}
454
      erratic->array[i] = (fde *) chain_end;
455
      chain_end = &linear->array[i];
456 457
    }

458 459 460 461 462
  /* Each entry in LINEAR which is part of the linear sequence we have
     discovered will correspond to a non-NULL entry in the chain we built in
     the ERRATIC array.  */
  for (i = j = k = 0; i < count; i++)
    if (erratic->array[i])
463
      linear->array[j++] = linear->array[i];
464 465
    else
      erratic->array[k++] = linear->array[i];
466
  linear->count = j;
467
  erratic->count = k;
468 469
}

470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497
#define SWAP(x,y) do { fde * tmp = x; x = y; y = tmp; } while (0)

/* Convert a semi-heap to a heap.  A semi-heap is a heap except possibly
   for the first (root) node; push it down to its rightful place.  */

static void
frame_downheap (struct object *ob, fde_compare_t fde_compare, fde **a,
		int lo, int hi)
{
  int i, j;

  for (i = lo, j = 2*i+1;
       j < hi;
       j = 2*i+1)
    {
      if (j+1 < hi && fde_compare (ob, a[j], a[j+1]) < 0)
	++j;

      if (fde_compare (ob, a[i], a[j]) < 0)
	{
	  SWAP (a[i], a[j]);
	  i = j;
	}
      else
	break;
    }
}

498 499
/* This is O(n log(n)).  BSD/OS defines heapsort in stdlib.h, so we must
   use a name that does not conflict.  */
500 501 502 503

static void
frame_heapsort (struct object *ob, fde_compare_t fde_compare,
		struct fde_vector *erratic)
504 505 506
{
  /* For a description of this algorithm, see:
     Samuel P. Harbison, Guy L. Steele Jr.: C, a reference manual, 2nd ed.,
507
     p. 60-61.  */
508 509 510
  fde ** a = erratic->array;
  /* A portion of the array is called a "heap" if for all i>=0:
     If i and 2i+1 are valid indices, then a[i] >= a[2i+1].
511
     If i and 2i+2 are valid indices, then a[i] >= a[2i+2].  */
512
  size_t n = erratic->count;
513 514 515 516 517 518 519 520 521 522 523 524
  int m;

  /* Expand our heap incrementally from the end of the array, heapifying
     each resulting semi-heap as we go.  After each step, a[m] is the top
     of a heap.  */
  for (m = n/2-1; m >= 0; --m)
    frame_downheap (ob, fde_compare, a, m, n);

  /* Shrink our heap incrementally from the end of the array, first
     swapping out the largest element a[0] and then re-heapifying the
     resulting semi-heap.  After each step, a[0..m) is a heap.  */
  for (m = n-1; m >= 1; --m)
525
    {
526 527
      SWAP (a[0], a[m]);
      frame_downheap (ob, fde_compare, a, 0, m);
528 529 530 531
    }
#undef SWAP
}

532
/* Merge V1 and V2, both sorted, and put the result into V1.  */
533 534 535
static inline void
fde_merge (struct object *ob, fde_compare_t fde_compare,
	   struct fde_vector *v1, struct fde_vector *v2)
Jason Merrill committed
536
{
537 538
  size_t i1, i2;
  fde * fde2;
Jason Merrill committed
539

540 541
  i2 = v2->count;
  if (i2 > 0)
Jason Merrill committed
542
    {
543
      i1 = v1->count;
Kazu Hirata committed
544 545 546 547 548 549 550 551 552
      do
	{
	  i2--;
	  fde2 = v2->array[i2];
	  while (i1 > 0 && fde_compare (ob, v1->array[i1-1], fde2) > 0)
	    {
	      v1->array[i1+i2] = v1->array[i1-1];
	      i1--;
	    }
Kazu Hirata committed
553
	  v1->array[i1+i2] = fde2;
Kazu Hirata committed
554 555
	}
      while (i2 > 0);
556
      v1->count += v2->count;
Jason Merrill committed
557 558 559
    }
}

560 561
static inline void
end_fde_sort (struct object *ob, struct fde_accumulator *accu, size_t count)
562
{
563 564 565
  fde_compare_t fde_compare;

  if (accu->linear && accu->linear->count != count)
566
    abort ();
567 568 569 570 571 572 573 574 575

  if (ob->s.b.mixed_encoding)
    fde_compare = fde_mixed_encoding_compare;
  else if (ob->s.b.encoding == DW_EH_PE_absptr)
    fde_compare = fde_unencoded_compare;
  else
    fde_compare = fde_single_encoding_compare;

  if (accu->erratic)
576
    {
577 578
      fde_split (ob, fde_compare, accu->linear, accu->erratic);
      if (accu->linear->count + accu->erratic->count != count)
579
	abort ();
580 581 582
      frame_heapsort (ob, fde_compare, accu->erratic);
      fde_merge (ob, fde_compare, accu->linear, accu->erratic);
      free (accu->erratic);
583 584 585
    }
  else
    {
586 587 588
      /* We've not managed to malloc an erratic array,
	 so heap sort in the linear one.  */
      frame_heapsort (ob, fde_compare, accu->linear);
589
    }
590 591
}

592

Kazu Hirata committed
593
/* Update encoding, mixed_encoding, and pc_begin for OB for the
594 595 596
   fde array beginning at THIS_FDE.  Return the number of fdes
   encountered along the way.  */

597
static size_t
598
classify_object_over_fdes (struct object *ob, fde *this_fde)
599
{
600 601 602 603
  struct dwarf_cie *last_cie = 0;
  size_t count = 0;
  int encoding = DW_EH_PE_absptr;
  _Unwind_Ptr base = 0;
Jason Merrill committed
604

Geoffrey Keating committed
605
  for (; ! last_fde (ob, this_fde); this_fde = next_fde (this_fde))
606 607 608
    {
      struct dwarf_cie *this_cie;
      _Unwind_Ptr mask, pc_begin;
609

610 611 612
      /* Skip CIEs.  */
      if (this_fde->CIE_delta == 0)
	continue;
613

614 615 616 617 618 619 620 621 622 623 624 625 626
      /* Determine the encoding for this FDE.  Note mixed encoded
	 objects for later.  */
      this_cie = get_cie (this_fde);
      if (this_cie != last_cie)
	{
	  last_cie = this_cie;
	  encoding = get_cie_encoding (this_cie);
	  base = base_from_object (encoding, ob);
	  if (ob->s.b.encoding == DW_EH_PE_omit)
	    ob->s.b.encoding = encoding;
	  else if (ob->s.b.encoding != encoding)
	    ob->s.b.mixed_encoding = 1;
	}
Jason Merrill committed
627

628 629
      read_encoded_value_with_base (encoding, base, this_fde->pc_begin,
				    &pc_begin);
Jason Merrill committed
630

631 632 633 634 635 636 637 638 639 640 641 642
      /* Take care to ignore link-once functions that were removed.
	 In these cases, the function address will be NULL, but if
	 the encoding is smaller than a pointer a true NULL may not
	 be representable.  Assume 0 in the representable bits is NULL.  */
      mask = size_of_encoded_value (encoding);
      if (mask < sizeof (void *))
	mask = (1L << (mask << 3)) - 1;
      else
	mask = -1;

      if ((pc_begin & mask) == 0)
	continue;
Teemu Torma committed
643

644
      count += 1;
645 646
      if ((void *) pc_begin < ob->pc_begin)
	ob->pc_begin = (void *) pc_begin;
647
    }
Teemu Torma committed
648

649
  return count;
Jason Merrill committed
650 651
}

652 653
static void
add_fdes (struct object *ob, struct fde_accumulator *accu, fde *this_fde)
654
{
655 656 657 658
  struct dwarf_cie *last_cie = 0;
  int encoding = ob->s.b.encoding;
  _Unwind_Ptr base = base_from_object (ob->s.b.encoding, ob);

Geoffrey Keating committed
659
  for (; ! last_fde (ob, this_fde); this_fde = next_fde (this_fde))
660
    {
661
      struct dwarf_cie *this_cie;
662

663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681
      /* Skip CIEs.  */
      if (this_fde->CIE_delta == 0)
	continue;

      if (ob->s.b.mixed_encoding)
	{
	  /* Determine the encoding for this FDE.  Note mixed encoded
	     objects for later.  */
	  this_cie = get_cie (this_fde);
	  if (this_cie != last_cie)
	    {
	      last_cie = this_cie;
	      encoding = get_cie_encoding (this_cie);
	      base = base_from_object (encoding, ob);
	    }
	}

      if (encoding == DW_EH_PE_absptr)
	{
682
	  if (*(_Unwind_Ptr *) this_fde->pc_begin == 0)
683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706
	    continue;
	}
      else
	{
	  _Unwind_Ptr pc_begin, mask;

	  read_encoded_value_with_base (encoding, base, this_fde->pc_begin,
					&pc_begin);

	  /* Take care to ignore link-once functions that were removed.
	     In these cases, the function address will be NULL, but if
	     the encoding is smaller than a pointer a true NULL may not
	     be representable.  Assume 0 in the representable bits is NULL.  */
	  mask = size_of_encoded_value (encoding);
	  if (mask < sizeof (void *))
	    mask = (1L << (mask << 3)) - 1;
	  else
	    mask = -1;

	  if ((pc_begin & mask) == 0)
	    continue;
	}

      fde_insert (accu, this_fde);
707
    }
708 709
}

710 711 712 713
/* Set up a sorted array of pointers to FDEs for a loaded object.  We
   count up the entries before allocating the array because it's likely to
   be faster.  We can be called multiple times, should we have failed to
   allocate a sorted fde array on a previous occasion.  */
Jason Merrill committed
714

715 716
static inline void
init_object (struct object* ob)
Jason Merrill committed
717
{
718
  struct fde_accumulator accu;
719
  size_t count;
Jason Merrill committed
720

721 722
  count = ob->s.b.count;
  if (count == 0)
723
    {
724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740
      if (ob->s.b.from_array)
	{
	  fde **p = ob->u.array;
	  for (count = 0; *p; ++p)
	    count += classify_object_over_fdes (ob, *p);
	}
      else
	count = classify_object_over_fdes (ob, ob->u.single);

      /* The count field we have in the main struct object is somewhat
	 limited, but should suffice for virtually all cases.  If the
	 counted value doesn't fit, re-write a zero.  The worst that
	 happens is that we re-count next time -- admittedly non-trivial
	 in that this implies some 2M fdes, but at least we function.  */
      ob->s.b.count = count;
      if (ob->s.b.count != count)
	ob->s.b.count = 0;
741
    }
Teemu Torma committed
742

743
  if (!start_fde_sort (&accu, count))
744
    return;
Teemu Torma committed
745

746
  if (ob->s.b.from_array)
747
    {
748 749
      fde **p;
      for (p = ob->u.array; *p; ++p)
Kazu Hirata committed
750
	add_fdes (ob, &accu, *p);
751 752
    }
  else
753 754 755 756 757 758 759 760 761 762
    add_fdes (ob, &accu, ob->u.single);

  end_fde_sort (ob, &accu, count);

  /* Save the original fde pointer, since this is the key by which the
     DSO will deregister the object.  */
  accu.linear->orig_data = ob->u.single;
  ob->u.sort = accu.linear;

  ob->s.b.sorted = 1;
763 764
}

765 766 767 768 769 770
/* A linear search through a set of FDEs for the given PC.  This is
   used when there was insufficient memory to allocate and sort an
   array.  */

static fde *
linear_search_fdes (struct object *ob, fde *this_fde, void *pc)
Jason Merrill committed
771
{
772 773 774 775
  struct dwarf_cie *last_cie = 0;
  int encoding = ob->s.b.encoding;
  _Unwind_Ptr base = base_from_object (ob->s.b.encoding, ob);

Geoffrey Keating committed
776
  for (; ! last_fde (ob, this_fde); this_fde = next_fde (this_fde))
777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799
    {
      struct dwarf_cie *this_cie;
      _Unwind_Ptr pc_begin, pc_range;

      /* Skip CIEs.  */
      if (this_fde->CIE_delta == 0)
	continue;

      if (ob->s.b.mixed_encoding)
	{
	  /* Determine the encoding for this FDE.  Note mixed encoded
	     objects for later.  */
	  this_cie = get_cie (this_fde);
	  if (this_cie != last_cie)
	    {
	      last_cie = this_cie;
	      encoding = get_cie_encoding (this_cie);
	      base = base_from_object (encoding, ob);
	    }
	}

      if (encoding == DW_EH_PE_absptr)
	{
800 801
	  pc_begin = ((_Unwind_Ptr *) this_fde->pc_begin)[0];
	  pc_range = ((_Unwind_Ptr *) this_fde->pc_begin)[1];
802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827
	  if (pc_begin == 0)
	    continue;
	}
      else
	{
	  _Unwind_Ptr mask;
	  const char *p;

	  p = read_encoded_value_with_base (encoding, base,
					    this_fde->pc_begin, &pc_begin);
	  read_encoded_value_with_base (encoding & 0x0F, 0, p, &pc_range);

	  /* Take care to ignore link-once functions that were removed.
	     In these cases, the function address will be NULL, but if
	     the encoding is smaller than a pointer a true NULL may not
	     be representable.  Assume 0 in the representable bits is NULL.  */
	  mask = size_of_encoded_value (encoding);
	  if (mask < sizeof (void *))
	    mask = (1L << (mask << 3)) - 1;
	  else
	    mask = -1;

	  if ((pc_begin & mask) == 0)
	    continue;
	}

828
      if ((_Unwind_Ptr) pc - pc_begin < pc_range)
Kazu Hirata committed
829
	return this_fde;
830 831 832 833 834 835 836 837 838 839 840 841
    }

  return NULL;
}

/* Binary search for an FDE containing the given PC.  Here are three
   implementations of increasing complexity.  */

static inline fde *
binary_search_unencoded_fdes (struct object *ob, void *pc)
{
  struct fde_vector *vec = ob->u.sort;
842
  size_t lo, hi;
Kazu Hirata committed
843

844 845 846 847 848 849 850
  for (lo = 0, hi = vec->count; lo < hi; )
    {
      size_t i = (lo + hi) / 2;
      fde *f = vec->array[i];
      void *pc_begin;
      uaddr pc_range;

851 852
      pc_begin = ((void **) f->pc_begin)[0];
      pc_range = ((uaddr *) f->pc_begin)[1];
853 854 855 856 857 858 859 860

      if (pc < pc_begin)
	hi = i;
      else if (pc >= pc_begin + pc_range)
	lo = i + 1;
      else
	return f;
    }
Jason Merrill committed
861

862 863
  return NULL;
}
Teemu Torma committed
864

865 866 867 868 869 870 871
static inline fde *
binary_search_single_encoding_fdes (struct object *ob, void *pc)
{
  struct fde_vector *vec = ob->u.sort;
  int encoding = ob->s.b.encoding;
  _Unwind_Ptr base = base_from_object (encoding, ob);
  size_t lo, hi;
Kazu Hirata committed
872

873
  for (lo = 0, hi = vec->count; lo < hi; )
Jason Merrill committed
874
    {
875 876 877 878 879 880 881 882 883
      size_t i = (lo + hi) / 2;
      fde *f = vec->array[i];
      _Unwind_Ptr pc_begin, pc_range;
      const char *p;

      p = read_encoded_value_with_base (encoding, base, f->pc_begin,
					&pc_begin);
      read_encoded_value_with_base (encoding & 0x0F, 0, p, &pc_range);

884
      if ((_Unwind_Ptr) pc < pc_begin)
885
	hi = i;
886
      else if ((_Unwind_Ptr) pc >= pc_begin + pc_range)
887 888 889
	lo = i + 1;
      else
	return f;
890
    }
Jason Merrill committed
891

892 893 894 895 896 897 898 899
  return NULL;
}

static inline fde *
binary_search_mixed_encoding_fdes (struct object *ob, void *pc)
{
  struct fde_vector *vec = ob->u.sort;
  size_t lo, hi;
Kazu Hirata committed
900

901
  for (lo = 0, hi = vec->count; lo < hi; )
902
    {
903 904 905 906 907 908 909 910 911 912 913 914
      size_t i = (lo + hi) / 2;
      fde *f = vec->array[i];
      _Unwind_Ptr pc_begin, pc_range;
      const char *p;
      int encoding;

      encoding = get_fde_encoding (f);
      p = read_encoded_value_with_base (encoding,
					base_from_object (encoding, ob),
					f->pc_begin, &pc_begin);
      read_encoded_value_with_base (encoding & 0x0F, 0, p, &pc_range);

915
      if ((_Unwind_Ptr) pc < pc_begin)
916
	hi = i;
917
      else if ((_Unwind_Ptr) pc >= pc_begin + pc_range)
918 919 920
	lo = i + 1;
      else
	return f;
921
    }
Jason Merrill committed
922

923 924
  return NULL;
}
925

926 927 928 929 930 931
static fde *
search_object (struct object* ob, void *pc)
{
  /* If the data hasn't been sorted, try to do this now.  We may have
     more memory available than last time we tried.  */
  if (! ob->s.b.sorted)
932
    {
933
      init_object (ob);
934

935 936 937 938 939 940 941 942 943 944 945 946 947 948 949
      /* Despite the above comment, the normal reason to get here is
	 that we've not processed this object before.  A quick range
	 check is in order.  */
      if (pc < ob->pc_begin)
	return NULL;
    }

  if (ob->s.b.sorted)
    {
      if (ob->s.b.mixed_encoding)
	return binary_search_mixed_encoding_fdes (ob, pc);
      else if (ob->s.b.encoding == DW_EH_PE_absptr)
	return binary_search_unencoded_fdes (ob, pc);
      else
	return binary_search_single_encoding_fdes (ob, pc);
Jason Merrill committed
950
    }
951 952
  else
    {
953 954
      /* Long slow labourious linear search, cos we've no memory.  */
      if (ob->s.b.from_array)
Kazu Hirata committed
955 956
	{
	  fde **p;
957 958 959
	  for (p = ob->u.array; *p ; p++)
	    {
	      fde *f = linear_search_fdes (ob, *p, pc);
Kazu Hirata committed
960
	      if (f)
961
		return f;
Kazu Hirata committed
962
	    }
963 964
	  return NULL;
	}
965
      else
966 967 968 969 970 971 972 973 974 975 976 977 978 979
	return linear_search_fdes (ob, ob->u.single, pc);
    }
}

fde *
_Unwind_Find_FDE (void *pc, struct dwarf_eh_bases *bases)
{
  struct object *ob;
  fde *f = NULL;

  init_object_mutex_once ();
  __gthread_mutex_lock (&object_mutex);

  /* Linear search through the classified objects, to find the one
980
     containing the pc.  Note that pc_begin is sorted descending, and
981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018
     we expect objects to be non-overlapping.  */
  for (ob = seen_objects; ob; ob = ob->next)
    if (pc >= ob->pc_begin)
      {
	f = search_object (ob, pc);
	if (f)
	  goto fini;
	break;
      }

  /* Classify and search the objects we've not yet processed.  */
  while ((ob = unseen_objects))
    {
      struct object **p;

      unseen_objects = ob->next;
      f = search_object (ob, pc);

      /* Insert the object into the classified list.  */
      for (p = &seen_objects; *p ; p = &(*p)->next)
	if ((*p)->pc_begin < ob->pc_begin)
	  break;
      ob->next = *p;
      *p = ob;

      if (f)
	goto fini;
    }

 fini:
  __gthread_mutex_unlock (&object_mutex);

  if (f)
    {
      int encoding;

      bases->tbase = ob->tbase;
      bases->dbase = ob->dbase;
Teemu Torma committed
1019

1020 1021 1022 1023 1024
      encoding = ob->s.b.encoding;
      if (ob->s.b.mixed_encoding)
	encoding = get_fde_encoding (f);
      read_encoded_value_with_base (encoding, base_from_object (encoding, ob),
				    f->pc_begin, (_Unwind_Ptr *)&bases->func);
1025
    }
Jason Merrill committed
1026

1027
  return f;
1028
}