hashtab.c 28.8 KB
Newer Older
1
/* An expandable hash tables datatype.  
2
   Copyright (C) 1999-2019 Free Software Foundation, Inc.
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17
   Contributed by Vladimir Makarov (vmakarov@cygnus.com).

This file is part of the libiberty library.
Libiberty is free software; you can redistribute it and/or
modify it under the terms of the GNU Library General Public
License as published by the Free Software Foundation; either
version 2 of the License, or (at your option) any later version.

Libiberty is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
Library General Public License for more details.

You should have received a copy of the GNU Library General Public
License along with libiberty; see the file COPYING.LIB.  If
18 19
not, write to the Free Software Foundation, Inc., 51 Franklin Street - Fifth Floor,
Boston, MA 02110-1301, USA.  */
20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37

/* This package implements basic hash table functionality.  It is possible
   to search for an entry, create an entry and destroy an entry.

   Elements in the table are generic pointers.

   The size of the table is not fixed; if the occupancy of the table
   grows too high the hash table will be expanded.

   The abstract data implementation is based on generalized Algorithm D
   from Knuth's book "The art of computer programming".  Hash table is
   expanded by creation of new hash table and transferring elements from
   the old table to the new table. */

#ifdef HAVE_CONFIG_H
#include "config.h"
#endif

38 39
#include <sys/types.h>

40 41 42
#ifdef HAVE_STDLIB_H
#include <stdlib.h>
#endif
43 44 45
#ifdef HAVE_STRING_H
#include <string.h>
#endif
46 47 48
#ifdef HAVE_MALLOC_H
#include <malloc.h>
#endif
49 50 51
#ifdef HAVE_LIMITS_H
#include <limits.h>
#endif
52 53 54
#ifdef HAVE_INTTYPES_H
#include <inttypes.h>
#endif
55 56 57
#ifdef HAVE_STDINT_H
#include <stdint.h>
#endif
58

59 60
#include <stdio.h>

61
#include "libiberty.h"
62
#include "ansidecl.h"
63 64
#include "hashtab.h"

65 66 67 68
#ifndef CHAR_BIT
#define CHAR_BIT 8
#endif

69 70 71 72 73 74 75 76
static unsigned int higher_prime_index (unsigned long);
static hashval_t htab_mod_1 (hashval_t, hashval_t, hashval_t, int);
static hashval_t htab_mod (hashval_t, htab_t);
static hashval_t htab_mod_m2 (hashval_t, htab_t);
static hashval_t hash_pointer (const void *);
static int eq_pointer (const void *, const void *);
static int htab_expand (htab_t);
static PTR *find_empty_slot_for_expand (htab_t, hashval_t);
77 78 79 80 81 82

/* At some point, we could make these be NULL, and modify the
   hash-table routines to handle NULL specially; that would avoid
   function-call overhead for the common case of hashing pointers.  */
htab_hash htab_hash_pointer = hash_pointer;
htab_eq htab_eq_pointer = eq_pointer;
83

84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102
/* Table of primes and multiplicative inverses.

   Note that these are not minimally reduced inverses.  Unlike when generating
   code to divide by a constant, we want to be able to use the same algorithm
   all the time.  All of these inverses (are implied to) have bit 32 set.

   For the record, here's the function that computed the table; it's a 
   vastly simplified version of the function of the same name from gcc.  */

#if 0
unsigned int
ceil_log2 (unsigned int x)
{
  int i;
  for (i = 31; i >= 0 ; --i)
    if (x > (1u << i))
      return i+1;
  abort ();
}
103

104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171
unsigned int
choose_multiplier (unsigned int d, unsigned int *mlp, unsigned char *shiftp)
{
  unsigned long long mhigh;
  double nx;
  int lgup, post_shift;
  int pow, pow2;
  int n = 32, precision = 32;

  lgup = ceil_log2 (d);
  pow = n + lgup;
  pow2 = n + lgup - precision;

  nx = ldexp (1.0, pow) + ldexp (1.0, pow2);
  mhigh = nx / d;

  *shiftp = lgup - 1;
  *mlp = mhigh;
  return mhigh >> 32;
}
#endif

struct prime_ent
{
  hashval_t prime;
  hashval_t inv;
  hashval_t inv_m2;	/* inverse of prime-2 */
  hashval_t shift;
};

static struct prime_ent const prime_tab[] = {
  {          7, 0x24924925, 0x9999999b, 2 },
  {         13, 0x3b13b13c, 0x745d1747, 3 },
  {         31, 0x08421085, 0x1a7b9612, 4 },
  {         61, 0x0c9714fc, 0x15b1e5f8, 5 },
  {        127, 0x02040811, 0x0624dd30, 6 },
  {        251, 0x05197f7e, 0x073260a5, 7 },
  {        509, 0x01824366, 0x02864fc8, 8 },
  {       1021, 0x00c0906d, 0x014191f7, 9 },
  {       2039, 0x0121456f, 0x0161e69e, 10 },
  {       4093, 0x00300902, 0x00501908, 11 },
  {       8191, 0x00080041, 0x00180241, 12 },
  {      16381, 0x000c0091, 0x00140191, 13 },
  {      32749, 0x002605a5, 0x002a06e6, 14 },
  {      65521, 0x000f00e2, 0x00110122, 15 },
  {     131071, 0x00008001, 0x00018003, 16 },
  {     262139, 0x00014002, 0x0001c004, 17 },
  {     524287, 0x00002001, 0x00006001, 18 },
  {    1048573, 0x00003001, 0x00005001, 19 },
  {    2097143, 0x00004801, 0x00005801, 20 },
  {    4194301, 0x00000c01, 0x00001401, 21 },
  {    8388593, 0x00001e01, 0x00002201, 22 },
  {   16777213, 0x00000301, 0x00000501, 23 },
  {   33554393, 0x00001381, 0x00001481, 24 },
  {   67108859, 0x00000141, 0x000001c1, 25 },
  {  134217689, 0x000004e1, 0x00000521, 26 },
  {  268435399, 0x00000391, 0x000003b1, 27 },
  {  536870909, 0x00000019, 0x00000029, 28 },
  { 1073741789, 0x0000008d, 0x00000095, 29 },
  { 2147483647, 0x00000003, 0x00000007, 30 },
  /* Avoid "decimal constant so large it is unsigned" for 4294967291.  */
  { 0xfffffffb, 0x00000006, 0x00000008, 31 }
};

/* The following function returns an index into the above table of the
   nearest prime number which is greater than N, and near a power of two. */

static unsigned int
172
higher_prime_index (unsigned long n)
173
{
174 175
  unsigned int low = 0;
  unsigned int high = sizeof(prime_tab) / sizeof(prime_tab[0]);
176 177 178

  while (low != high)
    {
179 180
      unsigned int mid = low + (high - low) / 2;
      if (n > prime_tab[mid].prime)
181 182 183 184 185 186
	low = mid + 1;
      else
	high = mid;
    }

  /* If we've run out of primes, abort.  */
187
  if (n > prime_tab[low].prime)
188 189 190 191 192
    {
      fprintf (stderr, "Cannot find prime bigger than %lu\n", n);
      abort ();
    }

193
  return low;
194 195
}

196 197
/* Returns non-zero if P1 and P2 are equal.  */

198
static int
199
eq_pointer (const PTR p1, const PTR p2)
200 201 202 203
{
  return p1 == p2;
}

204

205 206 207 208 209 210 211 212 213 214 215
/* The parens around the function names in the next two definitions
   are essential in order to prevent macro expansions of the name.
   The bodies, however, are expanded as expected, so they are not
   recursive definitions.  */

/* Return the current size of given hash table.  */

#define htab_size(htab)  ((htab)->size)

size_t
(htab_size) (htab_t htab)
216
{
217
  return htab_size (htab);
218 219 220 221
}

/* Return the current number of elements in given hash table. */

222 223 224 225
#define htab_elements(htab)  ((htab)->n_elements - (htab)->n_deleted)

size_t
(htab_elements) (htab_t htab)
226
{
227
  return htab_elements (htab);
228 229
}

230 231 232
/* Return X % Y.  */

static inline hashval_t
233
htab_mod_1 (hashval_t x, hashval_t y, hashval_t inv, int shift)
234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257
{
  /* The multiplicative inverses computed above are for 32-bit types, and
     requires that we be able to compute a highpart multiply.  */
#ifdef UNSIGNED_64BIT_TYPE
  __extension__ typedef UNSIGNED_64BIT_TYPE ull;
  if (sizeof (hashval_t) * CHAR_BIT <= 32)
    {
      hashval_t t1, t2, t3, t4, q, r;

      t1 = ((ull)x * inv) >> 32;
      t2 = x - t1;
      t3 = t2 >> 1;
      t4 = t1 + t3;
      q  = t4 >> shift;
      r  = x - (q * y);

      return r;
    }
#endif

  /* Otherwise just use the native division routines.  */
  return x % y;
}

258 259 260
/* Compute the primary hash for HASH given HTAB's current size.  */

static inline hashval_t
261
htab_mod (hashval_t hash, htab_t htab)
262
{
263 264
  const struct prime_ent *p = &prime_tab[htab->size_prime_index];
  return htab_mod_1 (hash, p->prime, p->inv, p->shift);
265 266 267 268 269
}

/* Compute the secondary hash for HASH given HTAB's current size.  */

static inline hashval_t
270
htab_mod_m2 (hashval_t hash, htab_t htab)
271
{
272 273
  const struct prime_ent *p = &prime_tab[htab->size_prime_index];
  return 1 + htab_mod_1 (hash, p->prime - 2, p->inv_m2, p->shift);
274 275
}

276 277
/* This function creates table with length slightly longer than given
   source length.  Created hash table is initiated as empty (all the
Daniel Berlin committed
278
   hash table entries are HTAB_EMPTY_ENTRY).  The function returns the
279
   created hash table, or NULL if memory allocation fails.  */
280

281
htab_t
282 283
htab_create_alloc (size_t size, htab_hash hash_f, htab_eq eq_f,
                   htab_del del_f, htab_alloc alloc_f, htab_free free_f)
284
{
285 286 287 288 289 290 291 292 293 294 295 296 297
  return htab_create_typed_alloc (size, hash_f, eq_f, del_f, alloc_f, alloc_f,
				  free_f);
}

/* As above, but uses the variants of ALLOC_F and FREE_F which accept
   an extra argument.  */

htab_t
htab_create_alloc_ex (size_t size, htab_hash hash_f, htab_eq eq_f,
		      htab_del del_f, void *alloc_arg,
		      htab_alloc_with_arg alloc_f,
		      htab_free_with_arg free_f)
{
298
  htab_t result;
299 300 301 302
  unsigned int size_prime_index;

  size_prime_index = higher_prime_index (size);
  size = prime_tab[size_prime_index].prime;
303

304
  result = (htab_t) (*alloc_f) (alloc_arg, 1, sizeof (struct htab));
305 306
  if (result == NULL)
    return NULL;
307
  result->entries = (PTR *) (*alloc_f) (alloc_arg, size, sizeof (PTR));
308 309
  if (result->entries == NULL)
    {
310
      if (free_f != NULL)
311
	(*free_f) (alloc_arg, result);
312 313 314
      return NULL;
    }
  result->size = size;
315
  result->size_prime_index = size_prime_index;
316 317 318
  result->hash_f = hash_f;
  result->eq_f = eq_f;
  result->del_f = del_f;
319 320 321
  result->alloc_arg = alloc_arg;
  result->alloc_with_arg_f = alloc_f;
  result->free_with_arg_f = free_f;
322 323 324
  return result;
}

325 326
/*

327 328 329
@deftypefn Supplemental htab_t htab_create_typed_alloc (size_t @var{size}, @
htab_hash @var{hash_f}, htab_eq @var{eq_f}, htab_del @var{del_f}, @
htab_alloc @var{alloc_tab_f}, htab_alloc @var{alloc_f}, @
330 331 332 333 334 335 336 337 338 339 340 341 342 343 344
htab_free @var{free_f})

This function creates a hash table that uses two different allocators
@var{alloc_tab_f} and @var{alloc_f} to use for allocating the table itself
and its entries respectively.  This is useful when variables of different
types need to be allocated with different allocators.

The created hash table is slightly larger than @var{size} and it is
initially empty (all the hash table entries are @code{HTAB_EMPTY_ENTRY}).
The function returns the created hash table, or @code{NULL} if memory
allocation fails.

@end deftypefn

*/
345 346

htab_t
347 348 349
htab_create_typed_alloc (size_t size, htab_hash hash_f, htab_eq eq_f,
			 htab_del del_f, htab_alloc alloc_tab_f,
			 htab_alloc alloc_f, htab_free free_f)
350 351
{
  htab_t result;
352 353 354 355
  unsigned int size_prime_index;

  size_prime_index = higher_prime_index (size);
  size = prime_tab[size_prime_index].prime;
356

357
  result = (htab_t) (*alloc_tab_f) (1, sizeof (struct htab));
358 359
  if (result == NULL)
    return NULL;
360
  result->entries = (PTR *) (*alloc_f) (size, sizeof (PTR));
361 362 363
  if (result->entries == NULL)
    {
      if (free_f != NULL)
364
	(*free_f) (result);
365 366 367
      return NULL;
    }
  result->size = size;
368
  result->size_prime_index = size_prime_index;
369 370 371
  result->hash_f = hash_f;
  result->eq_f = eq_f;
  result->del_f = del_f;
372 373
  result->alloc_f = alloc_f;
  result->free_f = free_f;
374 375 376
  return result;
}

377

378 379 380
/* Update the function pointers and allocation parameter in the htab_t.  */

void
381 382 383
htab_set_functions_ex (htab_t htab, htab_hash hash_f, htab_eq eq_f,
                       htab_del del_f, PTR alloc_arg,
                       htab_alloc_with_arg alloc_f, htab_free_with_arg free_f)
384 385 386 387 388 389 390 391 392
{
  htab->hash_f = hash_f;
  htab->eq_f = eq_f;
  htab->del_f = del_f;
  htab->alloc_arg = alloc_arg;
  htab->alloc_with_arg_f = alloc_f;
  htab->free_with_arg_f = free_f;
}

393 394 395 396
/* These functions exist solely for backward compatibility.  */

#undef htab_create
htab_t
397
htab_create (size_t size, htab_hash hash_f, htab_eq eq_f, htab_del del_f)
398 399 400 401 402
{
  return htab_create_alloc (size, hash_f, eq_f, del_f, xcalloc, free);
}

htab_t
403
htab_try_create (size_t size, htab_hash hash_f, htab_eq eq_f, htab_del del_f)
404 405 406 407
{
  return htab_create_alloc (size, hash_f, eq_f, del_f, calloc, free);
}

408 409 410 411
/* This function frees all memory allocated for given hash table.
   Naturally the hash table must already exist. */

void
412
htab_delete (htab_t htab)
413
{
414 415
  size_t size = htab_size (htab);
  PTR *entries = htab->entries;
416
  int i;
417

418
  if (htab->del_f)
419
    for (i = size - 1; i >= 0; i--)
Daniel Berlin committed
420
      if (entries[i] != HTAB_EMPTY_ENTRY && entries[i] != HTAB_DELETED_ENTRY)
421
	(*htab->del_f) (entries[i]);
422

423 424
  if (htab->free_f != NULL)
    {
425
      (*htab->free_f) (entries);
426 427
      (*htab->free_f) (htab);
    }
428 429
  else if (htab->free_with_arg_f != NULL)
    {
430
      (*htab->free_with_arg_f) (htab->alloc_arg, entries);
431 432
      (*htab->free_with_arg_f) (htab->alloc_arg, htab);
    }
433 434 435 436 437
}

/* This function clears all entries in the given hash table.  */

void
438
htab_empty (htab_t htab)
439
{
440 441
  size_t size = htab_size (htab);
  PTR *entries = htab->entries;
442
  int i;
443

444
  if (htab->del_f)
445
    for (i = size - 1; i >= 0; i--)
Daniel Berlin committed
446
      if (entries[i] != HTAB_EMPTY_ENTRY && entries[i] != HTAB_DELETED_ENTRY)
447
	(*htab->del_f) (entries[i]);
448

449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470
  /* Instead of clearing megabyte, downsize the table.  */
  if (size > 1024*1024 / sizeof (PTR))
    {
      int nindex = higher_prime_index (1024 / sizeof (PTR));
      int nsize = prime_tab[nindex].prime;

      if (htab->free_f != NULL)
	(*htab->free_f) (htab->entries);
      else if (htab->free_with_arg_f != NULL)
	(*htab->free_with_arg_f) (htab->alloc_arg, htab->entries);
      if (htab->alloc_with_arg_f != NULL)
	htab->entries = (PTR *) (*htab->alloc_with_arg_f) (htab->alloc_arg, nsize,
						           sizeof (PTR *));
      else
	htab->entries = (PTR *) (*htab->alloc_f) (nsize, sizeof (PTR *));
     htab->size = nsize;
     htab->size_prime_index = nindex;
    }
  else
    memset (entries, 0, size * sizeof (PTR));
  htab->n_deleted = 0;
  htab->n_elements = 0;
471 472
}

473 474 475 476 477 478
/* Similar to htab_find_slot, but without several unwanted side effects:
    - Does not call htab->eq_f when it finds an existing entry.
    - Does not change the count of elements/searches/collisions in the
      hash table.
   This function also assumes there are no deleted entries in the table.
   HASH is the hash value for the element to be inserted.  */
479

480
static PTR *
481
find_empty_slot_for_expand (htab_t htab, hashval_t hash)
482
{
483 484
  hashval_t index = htab_mod (hash, htab);
  size_t size = htab_size (htab);
485 486 487
  PTR *slot = htab->entries + index;
  hashval_t hash2;

Daniel Berlin committed
488
  if (*slot == HTAB_EMPTY_ENTRY)
489
    return slot;
Daniel Berlin committed
490
  else if (*slot == HTAB_DELETED_ENTRY)
491
    abort ();
492

493
  hash2 = htab_mod_m2 (hash, htab);
494 495
  for (;;)
    {
496 497 498
      index += hash2;
      if (index >= size)
	index -= size;
499

500
      slot = htab->entries + index;
Daniel Berlin committed
501
      if (*slot == HTAB_EMPTY_ENTRY)
502
	return slot;
Daniel Berlin committed
503
      else if (*slot == HTAB_DELETED_ENTRY)
504 505 506 507
	abort ();
    }
}

508 509 510 511
/* The following function changes size of memory allocated for the
   entries and repeatedly inserts the table elements.  The occupancy
   of the table after the call will be about 50%.  Naturally the hash
   table must already exist.  Remember also that the place of the
512 513 514
   table entries is changed.  If memory allocation failures are allowed,
   this function will return zero, indicating that the table could not be
   expanded.  If all goes well, it will return a non-zero value.  */
515

516
static int
517
htab_expand (htab_t htab)
518
{
519 520 521
  PTR *oentries;
  PTR *olimit;
  PTR *p;
522
  PTR *nentries;
523 524
  size_t nsize, osize, elts;
  unsigned int oindex, nindex;
525 526

  oentries = htab->entries;
527 528 529 530
  oindex = htab->size_prime_index;
  osize = htab->size;
  olimit = oentries + osize;
  elts = htab_elements (htab);
531

532 533
  /* Resize only when table after removal of unused elements is either
     too full or too empty.  */
534 535 536 537 538
  if (elts * 2 > osize || (elts * 8 < osize && osize > 32))
    {
      nindex = higher_prime_index (elts * 2);
      nsize = prime_tab[nindex].prime;
    }
539
  else
540 541 542 543
    {
      nindex = oindex;
      nsize = osize;
    }
544

545 546 547 548 549
  if (htab->alloc_with_arg_f != NULL)
    nentries = (PTR *) (*htab->alloc_with_arg_f) (htab->alloc_arg, nsize,
						  sizeof (PTR *));
  else
    nentries = (PTR *) (*htab->alloc_f) (nsize, sizeof (PTR *));
550 551 552
  if (nentries == NULL)
    return 0;
  htab->entries = nentries;
553
  htab->size = nsize;
554
  htab->size_prime_index = nindex;
555 556 557 558 559 560
  htab->n_elements -= htab->n_deleted;
  htab->n_deleted = 0;

  p = oentries;
  do
    {
561
      PTR x = *p;
562

Daniel Berlin committed
563
      if (x != HTAB_EMPTY_ENTRY && x != HTAB_DELETED_ENTRY)
564
	{
565
	  PTR *q = find_empty_slot_for_expand (htab, (*htab->hash_f) (x));
566

567 568
	  *q = x;
	}
569

570 571 572
      p++;
    }
  while (p < olimit);
573

574 575
  if (htab->free_f != NULL)
    (*htab->free_f) (oentries);
576 577
  else if (htab->free_with_arg_f != NULL)
    (*htab->free_with_arg_f) (htab->alloc_arg, oentries);
578
  return 1;
579 580
}

581 582 583
/* This function searches for a hash table entry equal to the given
   element.  It cannot be used to insert or delete an element.  */

584
PTR
585
htab_find_with_hash (htab_t htab, const PTR element, hashval_t hash)
586
{
587
  hashval_t index, hash2;
588
  size_t size;
589
  PTR entry;
590 591

  htab->searches++;
592 593
  size = htab_size (htab);
  index = htab_mod (hash, htab);
594

595
  entry = htab->entries[index];
Daniel Berlin committed
596 597
  if (entry == HTAB_EMPTY_ENTRY
      || (entry != HTAB_DELETED_ENTRY && (*htab->eq_f) (entry, element)))
598 599
    return entry;

600
  hash2 = htab_mod_m2 (hash, htab);
601
  for (;;)
602
    {
603 604 605 606
      htab->collisions++;
      index += hash2;
      if (index >= size)
	index -= size;
607 608

      entry = htab->entries[index];
Daniel Berlin committed
609 610
      if (entry == HTAB_EMPTY_ENTRY
	  || (entry != HTAB_DELETED_ENTRY && (*htab->eq_f) (entry, element)))
611
	return entry;
612
    }
613 614
}

615 616
/* Like htab_find_slot_with_hash, but compute the hash value from the
   element.  */
617

618
PTR
619
htab_find (htab_t htab, const PTR element)
620 621 622 623
{
  return htab_find_with_hash (htab, element, (*htab->hash_f) (element));
}

624 625
/* This function searches for a hash table slot containing an entry
   equal to the given element.  To delete an entry, call this with
626 627 628 629 630
   insert=NO_INSERT, then call htab_clear_slot on the slot returned
   (possibly after doing some checks).  To insert an entry, call this
   with insert=INSERT, then write the value you want into the returned
   slot.  When inserting an entry, NULL may be returned if memory
   allocation fails.  */
631

632
PTR *
633 634
htab_find_slot_with_hash (htab_t htab, const PTR element,
                          hashval_t hash, enum insert_option insert)
635
{
636
  PTR *first_deleted_slot;
637
  hashval_t index, hash2;
638
  size_t size;
639
  PTR entry;
640

641 642 643 644 645 646 647
  size = htab_size (htab);
  if (insert == INSERT && size * 3 <= htab->n_elements * 4)
    {
      if (htab_expand (htab) == 0)
	return NULL;
      size = htab_size (htab);
    }
648

649
  index = htab_mod (hash, htab);
650

651
  htab->searches++;
652 653
  first_deleted_slot = NULL;

654
  entry = htab->entries[index];
Daniel Berlin committed
655
  if (entry == HTAB_EMPTY_ENTRY)
656
    goto empty_entry;
Daniel Berlin committed
657
  else if (entry == HTAB_DELETED_ENTRY)
658 659 660 661
    first_deleted_slot = &htab->entries[index];
  else if ((*htab->eq_f) (entry, element))
    return &htab->entries[index];
      
662
  hash2 = htab_mod_m2 (hash, htab);
663
  for (;;)
664
    {
665 666 667 668 669 670
      htab->collisions++;
      index += hash2;
      if (index >= size)
	index -= size;
      
      entry = htab->entries[index];
Daniel Berlin committed
671
      if (entry == HTAB_EMPTY_ENTRY)
672
	goto empty_entry;
Daniel Berlin committed
673
      else if (entry == HTAB_DELETED_ENTRY)
674 675 676 677
	{
	  if (!first_deleted_slot)
	    first_deleted_slot = &htab->entries[index];
	}
678
      else if ((*htab->eq_f) (entry, element))
679
	return &htab->entries[index];
680
    }
681 682 683 684 685 686 687

 empty_entry:
  if (insert == NO_INSERT)
    return NULL;

  if (first_deleted_slot)
    {
688
      htab->n_deleted--;
Daniel Berlin committed
689
      *first_deleted_slot = HTAB_EMPTY_ENTRY;
690 691 692
      return first_deleted_slot;
    }

693
  htab->n_elements++;
694
  return &htab->entries[index];
695 696
}

697 698
/* Like htab_find_slot_with_hash, but compute the hash value from the
   element.  */
699

700
PTR *
701
htab_find_slot (htab_t htab, const PTR element, enum insert_option insert)
702 703 704 705 706
{
  return htab_find_slot_with_hash (htab, element, (*htab->hash_f) (element),
				   insert);
}

707
/* This function deletes an element with the given value from hash
708 709 710 711
   table (the hash is computed from the element).  If there is no matching
   element in the hash table, this function does nothing.  */

void
712
htab_remove_elt (htab_t htab, PTR element)
713 714 715 716 717 718
{
  htab_remove_elt_with_hash (htab, element, (*htab->hash_f) (element));
}


/* This function deletes an element with the given value from hash
719 720
   table.  If there is no matching element in the hash table, this
   function does nothing.  */
721 722

void
723
htab_remove_elt_with_hash (htab_t htab, PTR element, hashval_t hash)
724
{
725
  PTR *slot;
726

727
  slot = htab_find_slot_with_hash (htab, element, hash, NO_INSERT);
728
  if (slot == NULL)
729 730
    return;

731 732 733
  if (htab->del_f)
    (*htab->del_f) (*slot);

Daniel Berlin committed
734
  *slot = HTAB_DELETED_ENTRY;
735
  htab->n_deleted++;
736 737
}

738 739 740
/* This function clears a specified slot in a hash table.  It is
   useful when you've already done the lookup and don't want to do it
   again.  */
741 742

void
743
htab_clear_slot (htab_t htab, PTR *slot)
744
{
745
  if (slot < htab->entries || slot >= htab->entries + htab_size (htab)
Daniel Berlin committed
746
      || *slot == HTAB_EMPTY_ENTRY || *slot == HTAB_DELETED_ENTRY)
747
    abort ();
748

749 750
  if (htab->del_f)
    (*htab->del_f) (*slot);
751

Daniel Berlin committed
752
  *slot = HTAB_DELETED_ENTRY;
753
  htab->n_deleted++;
754 755 756 757 758 759 760 761
}

/* This function scans over the entire hash table calling
   CALLBACK for each live entry.  If CALLBACK returns false,
   the iteration stops.  INFO is passed as CALLBACK's second
   argument.  */

void
762
htab_traverse_noresize (htab_t htab, htab_trav callback, PTR info)
763
{
764 765
  PTR *slot;
  PTR *limit;
Daniel Berlin committed
766
  
767
  slot = htab->entries;
768
  limit = slot + htab_size (htab);
769

770 771
  do
    {
772
      PTR x = *slot;
773

Daniel Berlin committed
774
      if (x != HTAB_EMPTY_ENTRY && x != HTAB_DELETED_ENTRY)
775
	if (!(*callback) (slot, info))
776 777 778
	  break;
    }
  while (++slot < limit);
779 780
}

781 782 783 784
/* Like htab_traverse_noresize, but does resize the table when it is
   too empty to improve effectivity of subsequent calls.  */

void
785
htab_traverse (htab_t htab, htab_trav callback, PTR info)
786
{
787 788
  size_t size = htab_size (htab);
  if (htab_elements (htab) * 8 < size && size > 32)
789 790 791 792 793
    htab_expand (htab);

  htab_traverse_noresize (htab, callback, info);
}

794 795
/* Return the fraction of fixed collisions during all work with given
   hash table. */
796

797
double
798
htab_collisions (htab_t htab)
799
{
800
  if (htab->searches == 0)
801
    return 0.0;
802 803

  return (double) htab->collisions / (double) htab->searches;
804
}
805

Richard Henderson committed
806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829
/* Hash P as a null-terminated string.

   Copied from gcc/hashtable.c.  Zack had the following to say with respect
   to applicability, though note that unlike hashtable.c, this hash table
   implementation re-hashes rather than chain buckets.

   http://gcc.gnu.org/ml/gcc-patches/2001-08/msg01021.html
   From: Zack Weinberg <zackw@panix.com>
   Date: Fri, 17 Aug 2001 02:15:56 -0400

   I got it by extracting all the identifiers from all the source code
   I had lying around in mid-1999, and testing many recurrences of
   the form "H_n = H_{n-1} * K + c_n * L + M" where K, L, M were either
   prime numbers or the appropriate identity.  This was the best one.
   I don't remember exactly what constituted "best", except I was
   looking at bucket-length distributions mostly.
   
   So it should be very good at hashing identifiers, but might not be
   as good at arbitrary strings.
   
   I'll add that it thoroughly trounces the hash functions recommended
   for this use at http://burtleburtle.net/bob/hash/index.html, both
   on speed and bucket distribution.  I haven't tried it against the
   function they just started using for Perl's hashes.  */
830 831

hashval_t
832
htab_hash_string (const PTR p)
833 834 835 836 837 838 839 840 841 842
{
  const unsigned char *str = (const unsigned char *) p;
  hashval_t r = 0;
  unsigned char c;

  while ((c = *str++) != 0)
    r = r * 67 + c - 113;

  return r;
}
843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920

/* DERIVED FROM:
--------------------------------------------------------------------
lookup2.c, by Bob Jenkins, December 1996, Public Domain.
hash(), hash2(), hash3, and mix() are externally useful functions.
Routines to test the hash are included if SELF_TEST is defined.
You can use this free for any purpose.  It has no warranty.
--------------------------------------------------------------------
*/

/*
--------------------------------------------------------------------
mix -- mix 3 32-bit values reversibly.
For every delta with one or two bit set, and the deltas of all three
  high bits or all three low bits, whether the original value of a,b,c
  is almost all zero or is uniformly distributed,
* If mix() is run forward or backward, at least 32 bits in a,b,c
  have at least 1/4 probability of changing.
* If mix() is run forward, every bit of c will change between 1/3 and
  2/3 of the time.  (Well, 22/100 and 78/100 for some 2-bit deltas.)
mix() was built out of 36 single-cycle latency instructions in a 
  structure that could supported 2x parallelism, like so:
      a -= b; 
      a -= c; x = (c>>13);
      b -= c; a ^= x;
      b -= a; x = (a<<8);
      c -= a; b ^= x;
      c -= b; x = (b>>13);
      ...
  Unfortunately, superscalar Pentiums and Sparcs can't take advantage 
  of that parallelism.  They've also turned some of those single-cycle
  latency instructions into multi-cycle latency instructions.  Still,
  this is the fastest good hash I could find.  There were about 2^^68
  to choose from.  I only looked at a billion or so.
--------------------------------------------------------------------
*/
/* same, but slower, works on systems that might have 8 byte hashval_t's */
#define mix(a,b,c) \
{ \
  a -= b; a -= c; a ^= (c>>13); \
  b -= c; b -= a; b ^= (a<< 8); \
  c -= a; c -= b; c ^= ((b&0xffffffff)>>13); \
  a -= b; a -= c; a ^= ((c&0xffffffff)>>12); \
  b -= c; b -= a; b = (b ^ (a<<16)) & 0xffffffff; \
  c -= a; c -= b; c = (c ^ (b>> 5)) & 0xffffffff; \
  a -= b; a -= c; a = (a ^ (c>> 3)) & 0xffffffff; \
  b -= c; b -= a; b = (b ^ (a<<10)) & 0xffffffff; \
  c -= a; c -= b; c = (c ^ (b>>15)) & 0xffffffff; \
}

/*
--------------------------------------------------------------------
hash() -- hash a variable-length key into a 32-bit value
  k     : the key (the unaligned variable-length array of bytes)
  len   : the length of the key, counting by bytes
  level : can be any 4-byte value
Returns a 32-bit value.  Every bit of the key affects every bit of
the return value.  Every 1-bit and 2-bit delta achieves avalanche.
About 36+6len instructions.

The best hash table sizes are powers of 2.  There is no need to do
mod a prime (mod is sooo slow!).  If you need less than 32 bits,
use a bitmask.  For example, if you need only 10 bits, do
  h = (h & hashmask(10));
In which case, the hash table should have hashsize(10) elements.

If you are hashing n strings (ub1 **)k, do it like this:
  for (i=0, h=0; i<n; ++i) h = hash( k[i], len[i], h);

By Bob Jenkins, 1996.  bob_jenkins@burtleburtle.net.  You may use this
code any way you wish, private, educational, or commercial.  It's free.

See http://burtleburtle.net/bob/hash/evahash.html
Use for hash table lookup, or anything where one collision in 2^32 is
acceptable.  Do NOT use for cryptographic purposes.
--------------------------------------------------------------------
*/

921 922 923 924 925
hashval_t
iterative_hash (const PTR k_in /* the key */,
                register size_t  length /* the length of the key */,
                register hashval_t initval /* the previous hash, or
                                              an arbitrary value */)
926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963
{
  register const unsigned char *k = (const unsigned char *)k_in;
  register hashval_t a,b,c,len;

  /* Set up the internal state */
  len = length;
  a = b = 0x9e3779b9;  /* the golden ratio; an arbitrary value */
  c = initval;           /* the previous hash value */

  /*---------------------------------------- handle most of the key */
#ifndef WORDS_BIGENDIAN
  /* On a little-endian machine, if the data is 4-byte aligned we can hash
     by word for better speed.  This gives nondeterministic results on
     big-endian machines.  */
  if (sizeof (hashval_t) == 4 && (((size_t)k)&3) == 0)
    while (len >= 12)    /* aligned */
      {
	a += *(hashval_t *)(k+0);
	b += *(hashval_t *)(k+4);
	c += *(hashval_t *)(k+8);
	mix(a,b,c);
	k += 12; len -= 12;
      }
  else /* unaligned */
#endif
    while (len >= 12)
      {
	a += (k[0] +((hashval_t)k[1]<<8) +((hashval_t)k[2]<<16) +((hashval_t)k[3]<<24));
	b += (k[4] +((hashval_t)k[5]<<8) +((hashval_t)k[6]<<16) +((hashval_t)k[7]<<24));
	c += (k[8] +((hashval_t)k[9]<<8) +((hashval_t)k[10]<<16)+((hashval_t)k[11]<<24));
	mix(a,b,c);
	k += 12; len -= 12;
      }

  /*------------------------------------- handle the last 11 bytes */
  c += length;
  switch(len)              /* all the case statements fall through */
    {
964 965 966
    case 11: c+=((hashval_t)k[10]<<24);	/* fall through */
    case 10: c+=((hashval_t)k[9]<<16);	/* fall through */
    case 9 : c+=((hashval_t)k[8]<<8);	/* fall through */
967
      /* the first byte of c is reserved for the length */
968 969 970 971 972 973 974
    case 8 : b+=((hashval_t)k[7]<<24);	/* fall through */
    case 7 : b+=((hashval_t)k[6]<<16);	/* fall through */
    case 6 : b+=((hashval_t)k[5]<<8);	/* fall through */
    case 5 : b+=k[4];			/* fall through */
    case 4 : a+=((hashval_t)k[3]<<24);	/* fall through */
    case 3 : a+=((hashval_t)k[2]<<16);	/* fall through */
    case 2 : a+=((hashval_t)k[1]<<8);	/* fall through */
975 976 977 978 979 980 981
    case 1 : a+=k[0];
      /* case 0: nothing left to add */
    }
  mix(a,b,c);
  /*-------------------------------------------- report the result */
  return c;
}
982 983 984 985 986 987 988 989 990 991

/* Returns a hash code for pointer P. Simplified version of evahash */

static hashval_t
hash_pointer (const PTR p)
{
  intptr_t v = (intptr_t) p;
  unsigned a, b, c;

  a = b = 0x9e3779b9;
992 993
  a += v >> (sizeof (intptr_t) * CHAR_BIT / 2);
  b += v & (((intptr_t) 1 << (sizeof (intptr_t) * CHAR_BIT / 2)) - 1);
994 995 996 997
  c = 0x42135234;
  mix (a, b, c);
  return c;
}