1 /*-------------------------------------------------------------------------
6 * dynahash.c supports both local-to-a-backend hash tables and hash tables in
7 * shared memory. For shared hash tables, it is the caller's responsibility
8 * to provide appropriate access interlocking. The simplest convention is
9 * that a single LWLock protects the whole hash table. Searches (HASH_FIND or
10 * hash_seq_search) need only shared lock, but any update requires exclusive
11 * lock. For heavily-used shared tables, the single-lock approach creates a
12 * concurrency bottleneck, so we also support "partitioned" locking wherein
13 * there are multiple LWLocks guarding distinct subsets of the table. To use
14 * a hash table in partitioned mode, the HASH_PARTITION flag must be given
15 * to hash_create. This prevents any attempt to split buckets on-the-fly.
16 * Therefore, each hash bucket chain operates independently, and no fields
17 * of the hash header change after init except nentries and freeList.
18 * (A partitioned table uses multiple copies of those fields, guarded by
19 * spinlocks, for additional concurrency.)
20 * This lets any subset of the hash buckets be treated as a separately
21 * lockable partition. We expect callers to use the low-order bits of a
22 * lookup key's hash value as a partition number --- this will work because
23 * of the way calc_bucket() maps hash values to bucket numbers.
25 * For hash tables in shared memory, the memory allocator function should
26 * match malloc's semantics of returning NULL on failure. For hash tables
27 * in local memory, we typically use palloc() which will throw error on
28 * failure. The code in this file has to cope with both cases.
30 * dynahash.c provides support for these types of lookup keys:
32 * 1. Null-terminated C strings (truncated if necessary to fit in keysize),
33 * compared as though by strcmp(). This is the default behavior.
35 * 2. Arbitrary binary data of size keysize, compared as though by memcmp().
36 * (Caller must ensure there are no undefined padding bits in the keys!)
37 * This is selected by specifying HASH_BLOBS flag to hash_create.
39 * 3. More complex key behavior can be selected by specifying user-supplied
40 * hashing, comparison, and/or key-copying functions. At least a hashing
41 * function must be supplied; comparison defaults to memcmp() and key copying
42 * to memcpy() when a user-defined hashing function is selected.
44 * Portions Copyright (c) 1996-2019, PostgreSQL Global Development Group
45 * Portions Copyright (c) 1994, Regents of the University of California
49 * src/backend/utils/hash/dynahash.c
51 *-------------------------------------------------------------------------
57 * Dynamic hashing, after CACM April 1988 pp 446-457, by Per-Ake Larson.
58 * Coded into C, with minor code improvements, and with hsearch(3) interface,
59 * by ejp@ausmelb.oz, Jul 26, 1988: 13:16;
60 * also, hcreate/hdestroy routines added to simulate hsearch(3).
62 * These routines simulate hsearch(3) and family, with the important
63 * difference that the hash table is dynamic - can grow indefinitely
64 * beyond its original size (as supplied to hcreate()).
66 * Performance appears to be comparable to that of hsearch(3).
67 * The 'source-code' options referred to in hsearch(3)'s 'man' page
68 * are not implemented; otherwise functionality is identical.
70 * Compilation controls:
71 * HASH_DEBUG controls some informative traces, mainly for debugging.
72 * HASH_STATISTICS causes HashAccesses and HashCollisions to be maintained;
73 * when combined with HASH_DEBUG, these are displayed by hdestroy().
75 * Problems & fixes to ejp@ausmelb.oz. WARNING: relies on pre-processor
76 * concatenation property, in probably unnecessary code 'optimization'.
78 * Modified margo@postgres.berkeley.edu February 1990
79 * added multiple table interface
80 * Modified by sullivan@postgres.berkeley.edu April 1990
81 * changed ctl structure for shared memory
88 #include "access/xact.h"
89 #include "storage/shmem.h"
90 #include "storage/spin.h"
91 #include "utils/dynahash.h"
92 #include "utils/memutils.h"
98 * A hash table has a top-level "directory", each of whose entries points
99 * to a "segment" of ssize bucket headers. The maximum number of hash
100 * buckets is thus dsize * ssize (but dsize may be expansible). Of course,
101 * the number of records in the table can be larger, but we don't want a
102 * whole lot of records per bucket or performance goes down.
104 * In a hash table allocated in shared memory, the directory cannot be
105 * expanded because it must stay at a fixed address. The directory size
106 * should be selected using hash_select_dirsize (and you'd better have
107 * a good idea of the maximum number of entries!). For non-shared hash
108 * tables, the initial directory size can be left at the default.
110 #define DEF_SEGSIZE 256
111 #define DEF_SEGSIZE_SHIFT 8 /* must be log2(DEF_SEGSIZE) */
112 #define DEF_DIRSIZE 256
113 #define DEF_FFACTOR 1 /* default fill factor */
115 /* Number of freelists to be used for a partitioned hash table. */
116 #define NUM_FREELISTS 32
118 /* A hash bucket is a linked list of HASHELEMENTs */
119 typedef HASHELEMENT *HASHBUCKET;
121 /* A hash segment is an array of bucket headers */
122 typedef HASHBUCKET *HASHSEGMENT;
127 * In a partitioned hash table, each freelist is associated with a specific
128 * set of hashcodes, as determined by the FREELIST_IDX() macro below.
129 * nentries tracks the number of live hashtable entries having those hashcodes
130 * (NOT the number of entries in the freelist, as you might expect).
132 * The coverage of a freelist might be more or less than one partition, so it
133 * needs its own lock rather than relying on caller locking. Relying on that
134 * wouldn't work even if the coverage was the same, because of the occasional
135 * need to "borrow" entries from another freelist; see get_hash_entry().
137 * Using an array of FreeListData instead of separate arrays of mutexes,
138 * nentries and freeLists helps to reduce sharing of cache lines between
143 slock_t mutex; /* spinlock for this freelist */
144 long nentries; /* number of entries in associated buckets */
145 HASHELEMENT *freeList; /* chain of free elements */
149 * Header structure for a hash table --- contains all changeable info
151 * In a shared-memory hash table, the HASHHDR is in shared memory, while
152 * each backend has a local HTAB struct. For a non-shared table, there isn't
153 * any functional difference between HASHHDR and HTAB, but we separate them
154 * anyway to share code between shared and non-shared tables.
159 * The freelist can become a point of contention in high-concurrency hash
160 * tables, so we use an array of freelists, each with its own mutex and
161 * nentries count, instead of just a single one. Although the freelists
162 * normally operate independently, we will scavenge entries from freelists
163 * other than a hashcode's default freelist when necessary.
165 * If the hash table is not partitioned, only freeList[0] is used and its
166 * spinlock is not used at all; callers' locking is assumed sufficient.
168 FreeListData freeList[NUM_FREELISTS];
170 /* These fields can change, but not in a partitioned table */
171 /* Also, dsize can't change in a shared table, even if unpartitioned */
172 long dsize; /* directory size */
173 long nsegs; /* number of allocated segments (<= dsize) */
174 uint32 max_bucket; /* ID of maximum bucket in use */
175 uint32 high_mask; /* mask to modulo into entire table */
176 uint32 low_mask; /* mask to modulo into lower half of table */
178 /* These fields are fixed at hashtable creation */
179 Size keysize; /* hash key length in bytes */
180 Size entrysize; /* total user element size in bytes */
181 long num_partitions; /* # partitions (must be power of 2), or 0 */
182 long ffactor; /* target fill factor */
183 long max_dsize; /* 'dsize' limit if directory is fixed size */
184 long ssize; /* segment size --- must be power of 2 */
185 int sshift; /* segment shift = log2(ssize) */
186 int nelem_alloc; /* number of entries to allocate at once */
188 #ifdef HASH_STATISTICS
191 * Count statistics here. NB: stats code doesn't bother with mutex, so
192 * counts could be corrupted a bit in a partitioned table.
199 #define IS_PARTITIONED(hctl) ((hctl)->num_partitions != 0)
201 #define FREELIST_IDX(hctl, hashcode) \
202 (IS_PARTITIONED(hctl) ? (hashcode) % NUM_FREELISTS : 0)
205 * Top control structure for a hashtable --- in a shared table, each backend
206 * has its own copy (OK since no fields change at runtime)
210 HASHHDR *hctl; /* => shared control information */
211 HASHSEGMENT *dir; /* directory of segment starts */
212 HashValueFunc hash; /* hash function */
213 HashCompareFunc match; /* key comparison function */
214 HashCopyFunc keycopy; /* key copying function */
215 HashAllocFunc alloc; /* memory allocator */
216 MemoryContext hcxt; /* memory context if default allocator used */
217 char *tabname; /* table name (for error messages) */
218 bool isshared; /* true if table is in shared memory */
219 bool isfixed; /* if true, don't enlarge */
221 /* freezing a shared table isn't allowed, so we can keep state here */
222 bool frozen; /* true = no more inserts allowed */
224 /* We keep local copies of these fixed values to reduce contention */
225 Size keysize; /* hash key length in bytes */
226 long ssize; /* segment size --- must be power of 2 */
227 int sshift; /* segment shift = log2(ssize) */
231 * Key (also entry) part of a HASHELEMENT
233 #define ELEMENTKEY(helem) (((char *)(helem)) + MAXALIGN(sizeof(HASHELEMENT)))
236 * Obtain element pointer given pointer to key
238 #define ELEMENT_FROM_KEY(key) \
239 ((HASHELEMENT *) (((char *) (key)) - MAXALIGN(sizeof(HASHELEMENT))))
242 * Fast MOD arithmetic, assuming that y is a power of 2 !
244 #define MOD(x,y) ((x) & ((y)-1))
247 static long hash_accesses,
253 * Private function prototypes
255 static void *DynaHashAlloc(Size size);
256 static HASHSEGMENT seg_alloc(HTAB *hashp);
257 static bool element_alloc(HTAB *hashp, int nelem, int freelist_idx);
258 static bool dir_realloc(HTAB *hashp);
259 static bool expand_table(HTAB *hashp);
260 static HASHBUCKET get_hash_entry(HTAB *hashp, int freelist_idx);
261 static void hdefault(HTAB *hashp);
262 static int choose_nelem_alloc(Size entrysize);
263 static bool init_htab(HTAB *hashp, long nelem);
264 static void hash_corrupted(HTAB *hashp);
265 static long next_pow2_long(long num);
266 static int next_pow2_int(long num);
267 static void register_seq_scan(HTAB *hashp);
268 static void deregister_seq_scan(HTAB *hashp);
269 static bool has_seq_scans(HTAB *hashp);
273 * memory allocation support
275 static MemoryContext CurrentDynaHashCxt = NULL;
278 DynaHashAlloc(Size size)
280 Assert(MemoryContextIsValid(CurrentDynaHashCxt));
281 return MemoryContextAlloc(CurrentDynaHashCxt, size);
286 * HashCompareFunc for string keys
288 * Because we copy keys with strlcpy(), they will be truncated at keysize-1
289 * bytes, so we can only compare that many ... hence strncmp is almost but
290 * not quite the right thing.
293 string_compare(const char *key1, const char *key2, Size keysize)
295 return strncmp(key1, key2, keysize - 1);
299 /************************** CREATE ROUTINES **********************/
302 * hash_create -- create a new dynamic hash table
304 * tabname: a name for the table (for debugging purposes)
305 * nelem: maximum number of elements expected
306 * *info: additional table parameters, as indicated by flags
307 * flags: bitmask indicating which parameters to take from *info
309 * Note: for a shared-memory hashtable, nelem needs to be a pretty good
310 * estimate, since we can't expand the table on the fly. But an unshared
311 * hashtable can be expanded on-the-fly, so it's better for nelem to be
312 * on the small side and let the table grow if it's exceeded. An overly
313 * large nelem will penalize hash_seq_search speed without buying much.
316 hash_create(const char *tabname, long nelem, HASHCTL *info, int flags)
322 * For shared hash tables, we have a local hash header (HTAB struct) that
323 * we allocate in TopMemoryContext; all else is in shared memory.
325 * For non-shared hash tables, everything including the hash header is in
326 * a memory context created specially for the hash table --- this makes
327 * hash_destroy very simple. The memory context is made a child of either
328 * a context specified by the caller, or TopMemoryContext if nothing is
331 if (flags & HASH_SHARED_MEM)
333 /* Set up to allocate the hash header */
334 CurrentDynaHashCxt = TopMemoryContext;
338 /* Create the hash table's private memory context */
339 if (flags & HASH_CONTEXT)
340 CurrentDynaHashCxt = info->hcxt;
342 CurrentDynaHashCxt = TopMemoryContext;
343 CurrentDynaHashCxt = AllocSetContextCreate(CurrentDynaHashCxt,
345 ALLOCSET_DEFAULT_SIZES);
348 /* Initialize the hash header, plus a copy of the table name */
349 hashp = (HTAB *) DynaHashAlloc(sizeof(HTAB) + strlen(tabname) + 1);
350 MemSet(hashp, 0, sizeof(HTAB));
352 hashp->tabname = (char *) (hashp + 1);
353 strcpy(hashp->tabname, tabname);
355 /* If we have a private context, label it with hashtable's name */
356 if (!(flags & HASH_SHARED_MEM))
357 MemoryContextSetIdentifier(CurrentDynaHashCxt, hashp->tabname);
360 * Select the appropriate hash function (see comments at head of file).
362 if (flags & HASH_FUNCTION)
363 hashp->hash = info->hash;
364 else if (flags & HASH_BLOBS)
366 /* We can optimize hashing for common key sizes */
367 Assert(flags & HASH_ELEM);
368 if (info->keysize == sizeof(uint32))
369 hashp->hash = uint32_hash;
371 hashp->hash = tag_hash;
374 hashp->hash = string_hash; /* default hash function */
377 * If you don't specify a match function, it defaults to string_compare if
378 * you used string_hash (either explicitly or by default) and to memcmp
381 * Note: explicitly specifying string_hash is deprecated, because this
382 * might not work for callers in loadable modules on some platforms due to
383 * referencing a trampoline instead of the string_hash function proper.
384 * Just let it default, eh?
386 if (flags & HASH_COMPARE)
387 hashp->match = info->match;
388 else if (hashp->hash == string_hash)
389 hashp->match = (HashCompareFunc) string_compare;
391 hashp->match = memcmp;
394 * Similarly, the key-copying function defaults to strlcpy or memcpy.
396 if (flags & HASH_KEYCOPY)
397 hashp->keycopy = info->keycopy;
398 else if (hashp->hash == string_hash)
399 hashp->keycopy = (HashCopyFunc) strlcpy;
401 hashp->keycopy = memcpy;
403 /* And select the entry allocation function, too. */
404 if (flags & HASH_ALLOC)
405 hashp->alloc = info->alloc;
407 hashp->alloc = DynaHashAlloc;
409 if (flags & HASH_SHARED_MEM)
412 * ctl structure and directory are preallocated for shared memory
413 * tables. Note that HASH_DIRSIZE and HASH_ALLOC had better be set as
416 hashp->hctl = info->hctl;
417 hashp->dir = (HASHSEGMENT *) (((char *) info->hctl) + sizeof(HASHHDR));
419 hashp->isshared = true;
421 /* hash table already exists, we're just attaching to it */
422 if (flags & HASH_ATTACH)
424 /* make local copies of some heavily-used values */
426 hashp->keysize = hctl->keysize;
427 hashp->ssize = hctl->ssize;
428 hashp->sshift = hctl->sshift;
435 /* setup hash table defaults */
438 hashp->hcxt = CurrentDynaHashCxt;
439 hashp->isshared = false;
444 hashp->hctl = (HASHHDR *) hashp->alloc(sizeof(HASHHDR));
447 (errcode(ERRCODE_OUT_OF_MEMORY),
448 errmsg("out of memory")));
451 hashp->frozen = false;
457 if (flags & HASH_PARTITION)
459 /* Doesn't make sense to partition a local hash table */
460 Assert(flags & HASH_SHARED_MEM);
463 * The number of partitions had better be a power of 2. Also, it must
464 * be less than INT_MAX (see init_htab()), so call the int version of
467 Assert(info->num_partitions == next_pow2_int(info->num_partitions));
469 hctl->num_partitions = info->num_partitions;
472 if (flags & HASH_SEGMENT)
474 hctl->ssize = info->ssize;
475 hctl->sshift = my_log2(info->ssize);
476 /* ssize had better be a power of 2 */
477 Assert(hctl->ssize == (1L << hctl->sshift));
479 if (flags & HASH_FFACTOR)
480 hctl->ffactor = info->ffactor;
483 * SHM hash tables have fixed directory size passed by the caller.
485 if (flags & HASH_DIRSIZE)
487 hctl->max_dsize = info->max_dsize;
488 hctl->dsize = info->dsize;
492 * hash table now allocates space for key and data but you have to say how
493 * much space to allocate
495 if (flags & HASH_ELEM)
497 Assert(info->entrysize >= info->keysize);
498 hctl->keysize = info->keysize;
499 hctl->entrysize = info->entrysize;
502 /* make local copies of heavily-used constant fields */
503 hashp->keysize = hctl->keysize;
504 hashp->ssize = hctl->ssize;
505 hashp->sshift = hctl->sshift;
507 /* Build the hash directory structure */
508 if (!init_htab(hashp, nelem))
509 elog(ERROR, "failed to initialize hash table \"%s\"", hashp->tabname);
512 * For a shared hash table, preallocate the requested number of elements.
513 * This reduces problems with run-time out-of-shared-memory conditions.
515 * For a non-shared hash table, preallocate the requested number of
516 * elements if it's less than our chosen nelem_alloc. This avoids wasting
517 * space if the caller correctly estimates a small table size.
519 if ((flags & HASH_SHARED_MEM) ||
520 nelem < hctl->nelem_alloc)
528 * If hash table is partitioned, give each freelist an equal share of
529 * the initial allocation. Otherwise only freeList[0] is used.
531 if (IS_PARTITIONED(hashp->hctl))
532 freelist_partitions = NUM_FREELISTS;
534 freelist_partitions = 1;
536 nelem_alloc = nelem / freelist_partitions;
537 if (nelem_alloc <= 0)
541 * Make sure we'll allocate all the requested elements; freeList[0]
542 * gets the excess if the request isn't divisible by NUM_FREELISTS.
544 if (nelem_alloc * freelist_partitions < nelem)
546 nelem - nelem_alloc * (freelist_partitions - 1);
548 nelem_alloc_first = nelem_alloc;
550 for (i = 0; i < freelist_partitions; i++)
552 int temp = (i == 0) ? nelem_alloc_first : nelem_alloc;
554 if (!element_alloc(hashp, temp, i))
556 (errcode(ERRCODE_OUT_OF_MEMORY),
557 errmsg("out of memory")));
561 if (flags & HASH_FIXED_SIZE)
562 hashp->isfixed = true;
567 * Set default HASHHDR parameters.
570 hdefault(HTAB *hashp)
572 HASHHDR *hctl = hashp->hctl;
574 MemSet(hctl, 0, sizeof(HASHHDR));
576 hctl->dsize = DEF_DIRSIZE;
579 /* rather pointless defaults for key & entry size */
580 hctl->keysize = sizeof(char *);
581 hctl->entrysize = 2 * sizeof(char *);
583 hctl->num_partitions = 0; /* not partitioned */
585 hctl->ffactor = DEF_FFACTOR;
587 /* table has no fixed maximum size */
588 hctl->max_dsize = NO_MAX_DSIZE;
590 hctl->ssize = DEF_SEGSIZE;
591 hctl->sshift = DEF_SEGSIZE_SHIFT;
593 #ifdef HASH_STATISTICS
594 hctl->accesses = hctl->collisions = 0;
599 * Given the user-specified entry size, choose nelem_alloc, ie, how many
600 * elements to add to the hash table when we need more.
603 choose_nelem_alloc(Size entrysize)
609 /* Each element has a HASHELEMENT header plus user data. */
610 /* NB: this had better match element_alloc() */
611 elementSize = MAXALIGN(sizeof(HASHELEMENT)) + MAXALIGN(entrysize);
614 * The idea here is to choose nelem_alloc at least 32, but round up so
615 * that the allocation request will be a power of 2 or just less. This
616 * makes little difference for hash tables in shared memory, but for hash
617 * tables managed by palloc, the allocation request will be rounded up to
618 * a power of 2 anyway. If we fail to take this into account, we'll waste
619 * as much as half the allocated space.
621 allocSize = 32 * 4; /* assume elementSize at least 8 */
625 nelem_alloc = allocSize / elementSize;
626 } while (nelem_alloc < 32);
632 * Compute derived fields of hctl and build the initial directory/segment
636 init_htab(HTAB *hashp, long nelem)
638 HASHHDR *hctl = hashp->hctl;
645 * initialize mutexes if it's a partitioned table
647 if (IS_PARTITIONED(hctl))
648 for (i = 0; i < NUM_FREELISTS; i++)
649 SpinLockInit(&(hctl->freeList[i].mutex));
652 * Divide number of elements by the fill factor to determine a desired
653 * number of buckets. Allocate space for the next greater power of two
656 nbuckets = next_pow2_int((nelem - 1) / hctl->ffactor + 1);
659 * In a partitioned table, nbuckets must be at least equal to
660 * num_partitions; were it less, keys with apparently different partition
661 * numbers would map to the same bucket, breaking partition independence.
662 * (Normally nbuckets will be much bigger; this is just a safety check.)
664 while (nbuckets < hctl->num_partitions)
667 hctl->max_bucket = hctl->low_mask = nbuckets - 1;
668 hctl->high_mask = (nbuckets << 1) - 1;
671 * Figure number of directory segments needed, round up to a power of 2
673 nsegs = (nbuckets - 1) / hctl->ssize + 1;
674 nsegs = next_pow2_int(nsegs);
677 * Make sure directory is big enough. If pre-allocated directory is too
678 * small, choke (caller screwed up).
680 if (nsegs > hctl->dsize)
688 /* Allocate a directory */
691 CurrentDynaHashCxt = hashp->hcxt;
692 hashp->dir = (HASHSEGMENT *)
693 hashp->alloc(hctl->dsize * sizeof(HASHSEGMENT));
698 /* Allocate initial segments */
699 for (segp = hashp->dir; hctl->nsegs < nsegs; hctl->nsegs++, segp++)
701 *segp = seg_alloc(hashp);
706 /* Choose number of entries to allocate at a time */
707 hctl->nelem_alloc = choose_nelem_alloc(hctl->entrysize);
710 fprintf(stderr, "init_htab:\n%s%p\n%s%ld\n%s%ld\n%s%d\n%s%ld\n%s%u\n%s%x\n%s%x\n%s%ld\n",
711 "TABLE POINTER ", hashp,
712 "DIRECTORY SIZE ", hctl->dsize,
713 "SEGMENT SIZE ", hctl->ssize,
714 "SEGMENT SHIFT ", hctl->sshift,
715 "FILL FACTOR ", hctl->ffactor,
716 "MAX BUCKET ", hctl->max_bucket,
717 "HIGH MASK ", hctl->high_mask,
718 "LOW MASK ", hctl->low_mask,
719 "NSEGS ", hctl->nsegs);
725 * Estimate the space needed for a hashtable containing the given number
726 * of entries of given size.
727 * NOTE: this is used to estimate the footprint of hashtables in shared
728 * memory; therefore it does not count HTAB which is in local memory.
729 * NB: assumes that all hash structure parameters have default values!
732 hash_estimate_size(long num_entries, Size entrysize)
742 /* estimate number of buckets wanted */
743 nBuckets = next_pow2_long((num_entries - 1) / DEF_FFACTOR + 1);
744 /* # of segments needed for nBuckets */
745 nSegments = next_pow2_long((nBuckets - 1) / DEF_SEGSIZE + 1);
746 /* directory entries */
747 nDirEntries = DEF_DIRSIZE;
748 while (nDirEntries < nSegments)
749 nDirEntries <<= 1; /* dir_alloc doubles dsize at each call */
751 /* fixed control info */
752 size = MAXALIGN(sizeof(HASHHDR)); /* but not HTAB, per above */
754 size = add_size(size, mul_size(nDirEntries, sizeof(HASHSEGMENT)));
756 size = add_size(size, mul_size(nSegments,
757 MAXALIGN(DEF_SEGSIZE * sizeof(HASHBUCKET))));
758 /* elements --- allocated in groups of choose_nelem_alloc() entries */
759 elementAllocCnt = choose_nelem_alloc(entrysize);
760 nElementAllocs = (num_entries - 1) / elementAllocCnt + 1;
761 elementSize = MAXALIGN(sizeof(HASHELEMENT)) + MAXALIGN(entrysize);
762 size = add_size(size,
763 mul_size(nElementAllocs,
764 mul_size(elementAllocCnt, elementSize)));
770 * Select an appropriate directory size for a hashtable with the given
771 * maximum number of entries.
772 * This is only needed for hashtables in shared memory, whose directories
773 * cannot be expanded dynamically.
774 * NB: assumes that all hash structure parameters have default values!
776 * XXX this had better agree with the behavior of init_htab()...
779 hash_select_dirsize(long num_entries)
785 /* estimate number of buckets wanted */
786 nBuckets = next_pow2_long((num_entries - 1) / DEF_FFACTOR + 1);
787 /* # of segments needed for nBuckets */
788 nSegments = next_pow2_long((nBuckets - 1) / DEF_SEGSIZE + 1);
789 /* directory entries */
790 nDirEntries = DEF_DIRSIZE;
791 while (nDirEntries < nSegments)
792 nDirEntries <<= 1; /* dir_alloc doubles dsize at each call */
798 * Compute the required initial memory allocation for a shared-memory
799 * hashtable with the given parameters. We need space for the HASHHDR
800 * and for the (non expansible) directory.
803 hash_get_shared_size(HASHCTL *info, int flags)
805 Assert(flags & HASH_DIRSIZE);
806 Assert(info->dsize == info->max_dsize);
807 return sizeof(HASHHDR) + info->dsize * sizeof(HASHSEGMENT);
811 /********************** DESTROY ROUTINES ************************/
814 hash_destroy(HTAB *hashp)
818 /* allocation method must be one we know how to free, too */
819 Assert(hashp->alloc == DynaHashAlloc);
820 /* so this hashtable must have its own context */
821 Assert(hashp->hcxt != NULL);
823 hash_stats("destroy", hashp);
826 * Free everything by destroying the hash table's memory context.
828 MemoryContextDelete(hashp->hcxt);
833 hash_stats(const char *where, HTAB *hashp)
836 fprintf(stderr, "%s: this HTAB -- accesses %ld collisions %ld\n",
837 where, hashp->hctl->accesses, hashp->hctl->collisions);
839 fprintf(stderr, "hash_stats: entries %ld keysize %ld maxp %u segmentcount %ld\n",
840 hash_get_num_entries(hashp), (long) hashp->hctl->keysize,
841 hashp->hctl->max_bucket, hashp->hctl->nsegs);
842 fprintf(stderr, "%s: total accesses %ld total collisions %ld\n",
843 where, hash_accesses, hash_collisions);
844 fprintf(stderr, "hash_stats: total expansions %ld\n",
849 /*******************************SEARCH ROUTINES *****************************/
853 * get_hash_value -- exported routine to calculate a key's hash value
855 * We export this because for partitioned tables, callers need to compute
856 * the partition number (from the low-order bits of the hash value) before
860 get_hash_value(HTAB *hashp, const void *keyPtr)
862 return hashp->hash(keyPtr, hashp->keysize);
865 /* Convert a hash value to a bucket number */
867 calc_bucket(HASHHDR *hctl, uint32 hash_val)
871 bucket = hash_val & hctl->high_mask;
872 if (bucket > hctl->max_bucket)
873 bucket = bucket & hctl->low_mask;
879 * hash_search -- look up key in table and perform action
880 * hash_search_with_hash_value -- same, with key's hash value already computed
883 * HASH_FIND: look up key in table
884 * HASH_ENTER: look up key in table, creating entry if not present
885 * HASH_ENTER_NULL: same, but return NULL if out of memory
886 * HASH_REMOVE: look up key in table, remove entry if present
888 * Return value is a pointer to the element found/entered/removed if any,
889 * or NULL if no match was found. (NB: in the case of the REMOVE action,
890 * the result is a dangling pointer that shouldn't be dereferenced!)
892 * HASH_ENTER will normally ereport a generic "out of memory" error if
893 * it is unable to create a new entry. The HASH_ENTER_NULL operation is
894 * the same except it will return NULL if out of memory. Note that
895 * HASH_ENTER_NULL cannot be used with the default palloc-based allocator,
896 * since palloc internally ereports on out-of-memory.
898 * If foundPtr isn't NULL, then *foundPtr is set true if we found an
899 * existing entry in the table, false otherwise. This is needed in the
900 * HASH_ENTER case, but is redundant with the return value otherwise.
902 * For hash_search_with_hash_value, the hashvalue parameter must have been
903 * calculated with get_hash_value().
906 hash_search(HTAB *hashp,
911 return hash_search_with_hash_value(hashp,
913 hashp->hash(keyPtr, hashp->keysize),
919 hash_search_with_hash_value(HTAB *hashp,
925 HASHHDR *hctl = hashp->hctl;
926 int freelist_idx = FREELIST_IDX(hctl, hashvalue);
932 HASHBUCKET currBucket;
933 HASHBUCKET *prevBucketPtr;
934 HashCompareFunc match;
942 * If inserting, check if it is time to split a bucket.
944 * NOTE: failure to expand table is not a fatal error, it just means we
945 * have to run at higher fill factor than we wanted. However, if we're
946 * using the palloc allocator then it will throw error anyway on
947 * out-of-memory, so we must do this before modifying the table.
949 if (action == HASH_ENTER || action == HASH_ENTER_NULL)
952 * Can't split if running in partitioned mode, nor if frozen, nor if
953 * table is the subject of any active hash_seq_search scans. Strange
954 * order of these tests is to try to check cheaper conditions first.
956 if (!IS_PARTITIONED(hctl) && !hashp->frozen &&
957 hctl->freeList[0].nentries / (long) (hctl->max_bucket + 1) >= hctl->ffactor &&
958 !has_seq_scans(hashp))
959 (void) expand_table(hashp);
963 * Do the initial lookup
965 bucket = calc_bucket(hctl, hashvalue);
967 segment_num = bucket >> hashp->sshift;
968 segment_ndx = MOD(bucket, hashp->ssize);
970 segp = hashp->dir[segment_num];
973 hash_corrupted(hashp);
975 prevBucketPtr = &segp[segment_ndx];
976 currBucket = *prevBucketPtr;
979 * Follow collision chain looking for matching key
981 match = hashp->match; /* save one fetch in inner loop */
982 keysize = hashp->keysize; /* ditto */
984 while (currBucket != NULL)
986 if (currBucket->hashvalue == hashvalue &&
987 match(ELEMENTKEY(currBucket), keyPtr, keysize) == 0)
989 prevBucketPtr = &(currBucket->link);
990 currBucket = *prevBucketPtr;
998 *foundPtr = (bool) (currBucket != NULL);
1006 if (currBucket != NULL)
1007 return (void *) ELEMENTKEY(currBucket);
1011 if (currBucket != NULL)
1013 /* if partitioned, must lock to touch nentries and freeList */
1014 if (IS_PARTITIONED(hctl))
1015 SpinLockAcquire(&(hctl->freeList[freelist_idx].mutex));
1017 /* delete the record from the appropriate nentries counter. */
1018 Assert(hctl->freeList[freelist_idx].nentries > 0);
1019 hctl->freeList[freelist_idx].nentries--;
1021 /* remove record from hash bucket's chain. */
1022 *prevBucketPtr = currBucket->link;
1024 /* add the record to the appropriate freelist. */
1025 currBucket->link = hctl->freeList[freelist_idx].freeList;
1026 hctl->freeList[freelist_idx].freeList = currBucket;
1028 if (IS_PARTITIONED(hctl))
1029 SpinLockRelease(&hctl->freeList[freelist_idx].mutex);
1032 * better hope the caller is synchronizing access to this
1033 * element, because someone else is going to reuse it the next
1034 * time something is added to the table
1036 return (void *) ELEMENTKEY(currBucket);
1040 case HASH_ENTER_NULL:
1041 /* ENTER_NULL does not work with palloc-based allocator */
1042 Assert(hashp->alloc != DynaHashAlloc);
1046 /* Return existing element if found, else create one */
1047 if (currBucket != NULL)
1048 return (void *) ELEMENTKEY(currBucket);
1050 /* disallow inserts if frozen */
1052 elog(ERROR, "cannot insert into frozen hashtable \"%s\"",
1055 currBucket = get_hash_entry(hashp, freelist_idx);
1056 if (currBucket == NULL)
1059 if (action == HASH_ENTER_NULL)
1061 /* report a generic message */
1062 if (hashp->isshared)
1064 (errcode(ERRCODE_OUT_OF_MEMORY),
1065 errmsg("out of shared memory")));
1068 (errcode(ERRCODE_OUT_OF_MEMORY),
1069 errmsg("out of memory")));
1072 /* link into hashbucket chain */
1073 *prevBucketPtr = currBucket;
1074 currBucket->link = NULL;
1076 /* copy key into record */
1077 currBucket->hashvalue = hashvalue;
1078 hashp->keycopy(ELEMENTKEY(currBucket), keyPtr, keysize);
1081 * Caller is expected to fill the data field on return. DO NOT
1082 * insert any code that could possibly throw error here, as doing
1083 * so would leave the table entry incomplete and hence corrupt the
1084 * caller's data structure.
1087 return (void *) ELEMENTKEY(currBucket);
1090 elog(ERROR, "unrecognized hash action code: %d", (int) action);
1092 return NULL; /* keep compiler quiet */
1096 * hash_update_hash_key -- change the hash key of an existing table entry
1098 * This is equivalent to removing the entry, making a new entry, and copying
1099 * over its data, except that the entry never goes to the table's freelist.
1100 * Therefore this cannot suffer an out-of-memory failure, even if there are
1101 * other processes operating in other partitions of the hashtable.
1103 * Returns true if successful, false if the requested new hash key is already
1104 * present. Throws error if the specified entry pointer isn't actually a
1107 * NB: currently, there is no special case for old and new hash keys being
1108 * identical, which means we'll report false for that situation. This is
1109 * preferable for existing uses.
1111 * NB: for a partitioned hashtable, caller must hold lock on both relevant
1112 * partitions, if the new hash key would belong to a different partition.
1115 hash_update_hash_key(HTAB *hashp,
1116 void *existingEntry,
1117 const void *newKeyPtr)
1119 HASHELEMENT *existingElement = ELEMENT_FROM_KEY(existingEntry);
1120 HASHHDR *hctl = hashp->hctl;
1121 uint32 newhashvalue;
1128 HASHBUCKET currBucket;
1129 HASHBUCKET *prevBucketPtr;
1130 HASHBUCKET *oldPrevPtr;
1131 HashCompareFunc match;
1138 /* disallow updates if frozen */
1140 elog(ERROR, "cannot update in frozen hashtable \"%s\"",
1144 * Lookup the existing element using its saved hash value. We need to do
1145 * this to be able to unlink it from its hash chain, but as a side benefit
1146 * we can verify the validity of the passed existingEntry pointer.
1148 bucket = calc_bucket(hctl, existingElement->hashvalue);
1150 segment_num = bucket >> hashp->sshift;
1151 segment_ndx = MOD(bucket, hashp->ssize);
1153 segp = hashp->dir[segment_num];
1156 hash_corrupted(hashp);
1158 prevBucketPtr = &segp[segment_ndx];
1159 currBucket = *prevBucketPtr;
1161 while (currBucket != NULL)
1163 if (currBucket == existingElement)
1165 prevBucketPtr = &(currBucket->link);
1166 currBucket = *prevBucketPtr;
1169 if (currBucket == NULL)
1170 elog(ERROR, "hash_update_hash_key argument is not in hashtable \"%s\"",
1173 oldPrevPtr = prevBucketPtr;
1176 * Now perform the equivalent of a HASH_ENTER operation to locate the hash
1177 * chain we want to put the entry into.
1179 newhashvalue = hashp->hash(newKeyPtr, hashp->keysize);
1181 newbucket = calc_bucket(hctl, newhashvalue);
1183 segment_num = newbucket >> hashp->sshift;
1184 segment_ndx = MOD(newbucket, hashp->ssize);
1186 segp = hashp->dir[segment_num];
1189 hash_corrupted(hashp);
1191 prevBucketPtr = &segp[segment_ndx];
1192 currBucket = *prevBucketPtr;
1195 * Follow collision chain looking for matching key
1197 match = hashp->match; /* save one fetch in inner loop */
1198 keysize = hashp->keysize; /* ditto */
1200 while (currBucket != NULL)
1202 if (currBucket->hashvalue == newhashvalue &&
1203 match(ELEMENTKEY(currBucket), newKeyPtr, keysize) == 0)
1205 prevBucketPtr = &(currBucket->link);
1206 currBucket = *prevBucketPtr;
1213 if (currBucket != NULL)
1214 return false; /* collision with an existing entry */
1216 currBucket = existingElement;
1219 * If old and new hash values belong to the same bucket, we need not
1220 * change any chain links, and indeed should not since this simplistic
1221 * update will corrupt the list if currBucket is the last element. (We
1222 * cannot fall out earlier, however, since we need to scan the bucket to
1223 * check for duplicate keys.)
1225 if (bucket != newbucket)
1227 /* OK to remove record from old hash bucket's chain. */
1228 *oldPrevPtr = currBucket->link;
1230 /* link into new hashbucket chain */
1231 *prevBucketPtr = currBucket;
1232 currBucket->link = NULL;
1235 /* copy new key into record */
1236 currBucket->hashvalue = newhashvalue;
1237 hashp->keycopy(ELEMENTKEY(currBucket), newKeyPtr, keysize);
1239 /* rest of record is untouched */
1245 * Allocate a new hashtable entry if possible; return NULL if out of memory.
1246 * (Or, if the underlying space allocator throws error for out-of-memory,
1247 * we won't return at all.)
1250 get_hash_entry(HTAB *hashp, int freelist_idx)
1252 HASHHDR *hctl = hashp->hctl;
1253 HASHBUCKET newElement;
1257 /* if partitioned, must lock to touch nentries and freeList */
1258 if (IS_PARTITIONED(hctl))
1259 SpinLockAcquire(&hctl->freeList[freelist_idx].mutex);
1261 /* try to get an entry from the freelist */
1262 newElement = hctl->freeList[freelist_idx].freeList;
1264 if (newElement != NULL)
1267 if (IS_PARTITIONED(hctl))
1268 SpinLockRelease(&hctl->freeList[freelist_idx].mutex);
1271 * No free elements in this freelist. In a partitioned table, there
1272 * might be entries in other freelists, but to reduce contention we
1273 * prefer to first try to get another chunk of buckets from the main
1274 * shmem allocator. If that fails, though, we *MUST* root through all
1275 * the other freelists before giving up. There are multiple callers
1276 * that assume that they can allocate every element in the initially
1277 * requested table size, or that deleting an element guarantees they
1278 * can insert a new element, even if shared memory is entirely full.
1279 * Failing because the needed element is in a different freelist is
1282 if (!element_alloc(hashp, hctl->nelem_alloc, freelist_idx))
1284 int borrow_from_idx;
1286 if (!IS_PARTITIONED(hctl))
1287 return NULL; /* out of memory */
1289 /* try to borrow element from another freelist */
1290 borrow_from_idx = freelist_idx;
1293 borrow_from_idx = (borrow_from_idx + 1) % NUM_FREELISTS;
1294 if (borrow_from_idx == freelist_idx)
1295 break; /* examined all freelists, fail */
1297 SpinLockAcquire(&(hctl->freeList[borrow_from_idx].mutex));
1298 newElement = hctl->freeList[borrow_from_idx].freeList;
1300 if (newElement != NULL)
1302 hctl->freeList[borrow_from_idx].freeList = newElement->link;
1303 SpinLockRelease(&(hctl->freeList[borrow_from_idx].mutex));
1305 /* careful: count the new element in its proper freelist */
1306 SpinLockAcquire(&hctl->freeList[freelist_idx].mutex);
1307 hctl->freeList[freelist_idx].nentries++;
1308 SpinLockRelease(&hctl->freeList[freelist_idx].mutex);
1313 SpinLockRelease(&(hctl->freeList[borrow_from_idx].mutex));
1316 /* no elements available to borrow either, so out of memory */
1321 /* remove entry from freelist, bump nentries */
1322 hctl->freeList[freelist_idx].freeList = newElement->link;
1323 hctl->freeList[freelist_idx].nentries++;
1325 if (IS_PARTITIONED(hctl))
1326 SpinLockRelease(&hctl->freeList[freelist_idx].mutex);
1332 * hash_get_num_entries -- get the number of entries in a hashtable
1335 hash_get_num_entries(HTAB *hashp)
1338 long sum = hashp->hctl->freeList[0].nentries;
1341 * We currently don't bother with acquiring the mutexes; it's only
1342 * sensible to call this function if you've got lock on all partitions of
1345 if (IS_PARTITIONED(hashp->hctl))
1347 for (i = 1; i < NUM_FREELISTS; i++)
1348 sum += hashp->hctl->freeList[i].nentries;
1355 * hash_seq_init/_search/_term
1356 * Sequentially search through hash table and return
1357 * all the elements one by one, return NULL when no more.
1359 * hash_seq_term should be called if and only if the scan is abandoned before
1360 * completion; if hash_seq_search returns NULL then it has already done the
1361 * end-of-scan cleanup.
1363 * NOTE: caller may delete the returned element before continuing the scan.
1364 * However, deleting any other element while the scan is in progress is
1365 * UNDEFINED (it might be the one that curIndex is pointing at!). Also,
1366 * if elements are added to the table while the scan is in progress, it is
1367 * unspecified whether they will be visited by the scan or not.
1369 * NOTE: it is possible to use hash_seq_init/hash_seq_search without any
1370 * worry about hash_seq_term cleanup, if the hashtable is first locked against
1371 * further insertions by calling hash_freeze.
1373 * NOTE: to use this with a partitioned hashtable, caller had better hold
1374 * at least shared lock on all partitions of the table throughout the scan!
1375 * We can cope with insertions or deletions by our own backend, but *not*
1376 * with concurrent insertions or deletions by another.
1379 hash_seq_init(HASH_SEQ_STATUS *status, HTAB *hashp)
1381 status->hashp = hashp;
1382 status->curBucket = 0;
1383 status->curEntry = NULL;
1385 register_seq_scan(hashp);
1389 hash_seq_search(HASH_SEQ_STATUS *status)
1399 HASHELEMENT *curElem;
1401 if ((curElem = status->curEntry) != NULL)
1403 /* Continuing scan of curBucket... */
1404 status->curEntry = curElem->link;
1405 if (status->curEntry == NULL) /* end of this bucket */
1406 ++status->curBucket;
1407 return (void *) ELEMENTKEY(curElem);
1411 * Search for next nonempty bucket starting at curBucket.
1413 curBucket = status->curBucket;
1414 hashp = status->hashp;
1416 ssize = hashp->ssize;
1417 max_bucket = hctl->max_bucket;
1419 if (curBucket > max_bucket)
1421 hash_seq_term(status);
1422 return NULL; /* search is done */
1426 * first find the right segment in the table directory.
1428 segment_num = curBucket >> hashp->sshift;
1429 segment_ndx = MOD(curBucket, ssize);
1431 segp = hashp->dir[segment_num];
1434 * Pick up the first item in this bucket's chain. If chain is not empty
1435 * we can begin searching it. Otherwise we have to advance to find the
1436 * next nonempty bucket. We try to optimize that case since searching a
1437 * near-empty hashtable has to iterate this loop a lot.
1439 while ((curElem = segp[segment_ndx]) == NULL)
1441 /* empty bucket, advance to next */
1442 if (++curBucket > max_bucket)
1444 status->curBucket = curBucket;
1445 hash_seq_term(status);
1446 return NULL; /* search is done */
1448 if (++segment_ndx >= ssize)
1452 segp = hashp->dir[segment_num];
1456 /* Begin scan of curBucket... */
1457 status->curEntry = curElem->link;
1458 if (status->curEntry == NULL) /* end of this bucket */
1460 status->curBucket = curBucket;
1461 return (void *) ELEMENTKEY(curElem);
1465 hash_seq_term(HASH_SEQ_STATUS *status)
1467 if (!status->hashp->frozen)
1468 deregister_seq_scan(status->hashp);
1473 * Freeze a hashtable against future insertions (deletions are
1476 * The reason for doing this is that by preventing any more bucket splits,
1477 * we no longer need to worry about registering hash_seq_search scans,
1478 * and thus caller need not be careful about ensuring hash_seq_term gets
1479 * called at the right times.
1481 * Multiple calls to hash_freeze() are allowed, but you can't freeze a table
1482 * with active scans (since hash_seq_term would then do the wrong thing).
1485 hash_freeze(HTAB *hashp)
1487 if (hashp->isshared)
1488 elog(ERROR, "cannot freeze shared hashtable \"%s\"", hashp->tabname);
1489 if (!hashp->frozen && has_seq_scans(hashp))
1490 elog(ERROR, "cannot freeze hashtable \"%s\" because it has active scans",
1492 hashp->frozen = true;
1496 /********************************* UTILITIES ************************/
1499 * Expand the table by adding one more hash bucket.
1502 expand_table(HTAB *hashp)
1504 HASHHDR *hctl = hashp->hctl;
1505 HASHSEGMENT old_seg,
1513 HASHBUCKET *oldlink,
1515 HASHBUCKET currElement,
1518 Assert(!IS_PARTITIONED(hctl));
1520 #ifdef HASH_STATISTICS
1524 new_bucket = hctl->max_bucket + 1;
1525 new_segnum = new_bucket >> hashp->sshift;
1526 new_segndx = MOD(new_bucket, hashp->ssize);
1528 if (new_segnum >= hctl->nsegs)
1530 /* Allocate new segment if necessary -- could fail if dir full */
1531 if (new_segnum >= hctl->dsize)
1532 if (!dir_realloc(hashp))
1534 if (!(hashp->dir[new_segnum] = seg_alloc(hashp)))
1539 /* OK, we created a new bucket */
1543 * *Before* changing masks, find old bucket corresponding to same hash
1544 * values; values in that bucket may need to be relocated to new bucket.
1545 * Note that new_bucket is certainly larger than low_mask at this point,
1546 * so we can skip the first step of the regular hash mask calc.
1548 old_bucket = (new_bucket & hctl->low_mask);
1551 * If we crossed a power of 2, readjust masks.
1553 if ((uint32) new_bucket > hctl->high_mask)
1555 hctl->low_mask = hctl->high_mask;
1556 hctl->high_mask = (uint32) new_bucket | hctl->low_mask;
1560 * Relocate records to the new bucket. NOTE: because of the way the hash
1561 * masking is done in calc_bucket, only one old bucket can need to be
1562 * split at this point. With a different way of reducing the hash value,
1563 * that might not be true!
1565 old_segnum = old_bucket >> hashp->sshift;
1566 old_segndx = MOD(old_bucket, hashp->ssize);
1568 old_seg = hashp->dir[old_segnum];
1569 new_seg = hashp->dir[new_segnum];
1571 oldlink = &old_seg[old_segndx];
1572 newlink = &new_seg[new_segndx];
1574 for (currElement = *oldlink;
1575 currElement != NULL;
1576 currElement = nextElement)
1578 nextElement = currElement->link;
1579 if ((long) calc_bucket(hctl, currElement->hashvalue) == old_bucket)
1581 *oldlink = currElement;
1582 oldlink = &currElement->link;
1586 *newlink = currElement;
1587 newlink = &currElement->link;
1590 /* don't forget to terminate the rebuilt hash chains... */
1599 dir_realloc(HTAB *hashp)
1607 if (hashp->hctl->max_dsize != NO_MAX_DSIZE)
1610 /* Reallocate directory */
1611 new_dsize = hashp->hctl->dsize << 1;
1612 old_dirsize = hashp->hctl->dsize * sizeof(HASHSEGMENT);
1613 new_dirsize = new_dsize * sizeof(HASHSEGMENT);
1616 CurrentDynaHashCxt = hashp->hcxt;
1617 p = (HASHSEGMENT *) hashp->alloc((Size) new_dirsize);
1621 memcpy(p, old_p, old_dirsize);
1622 MemSet(((char *) p) + old_dirsize, 0, new_dirsize - old_dirsize);
1624 hashp->hctl->dsize = new_dsize;
1626 /* XXX assume the allocator is palloc, so we know how to free */
1627 Assert(hashp->alloc == DynaHashAlloc);
1638 seg_alloc(HTAB *hashp)
1642 CurrentDynaHashCxt = hashp->hcxt;
1643 segp = (HASHSEGMENT) hashp->alloc(sizeof(HASHBUCKET) * hashp->ssize);
1648 MemSet(segp, 0, sizeof(HASHBUCKET) * hashp->ssize);
1654 * allocate some new elements and link them into the indicated free list
1657 element_alloc(HTAB *hashp, int nelem, int freelist_idx)
1659 HASHHDR *hctl = hashp->hctl;
1661 HASHELEMENT *firstElement;
1662 HASHELEMENT *tmpElement;
1663 HASHELEMENT *prevElement;
1669 /* Each element has a HASHELEMENT header plus user data. */
1670 elementSize = MAXALIGN(sizeof(HASHELEMENT)) + MAXALIGN(hctl->entrysize);
1672 CurrentDynaHashCxt = hashp->hcxt;
1673 firstElement = (HASHELEMENT *) hashp->alloc(nelem * elementSize);
1678 /* prepare to link all the new entries into the freelist */
1680 tmpElement = firstElement;
1681 for (i = 0; i < nelem; i++)
1683 tmpElement->link = prevElement;
1684 prevElement = tmpElement;
1685 tmpElement = (HASHELEMENT *) (((char *) tmpElement) + elementSize);
1688 /* if partitioned, must lock to touch freeList */
1689 if (IS_PARTITIONED(hctl))
1690 SpinLockAcquire(&hctl->freeList[freelist_idx].mutex);
1692 /* freelist could be nonempty if two backends did this concurrently */
1693 firstElement->link = hctl->freeList[freelist_idx].freeList;
1694 hctl->freeList[freelist_idx].freeList = prevElement;
1696 if (IS_PARTITIONED(hctl))
1697 SpinLockRelease(&hctl->freeList[freelist_idx].mutex);
1702 /* complain when we have detected a corrupted hashtable */
1704 hash_corrupted(HTAB *hashp)
1707 * If the corruption is in a shared hashtable, we'd better force a
1708 * systemwide restart. Otherwise, just shut down this one backend.
1710 if (hashp->isshared)
1711 elog(PANIC, "hash table \"%s\" corrupted", hashp->tabname);
1713 elog(FATAL, "hash table \"%s\" corrupted", hashp->tabname);
1716 /* calculate ceil(log base 2) of num */
1723 /* guard against too-large input, which would put us into infinite loop */
1724 if (num > LONG_MAX / 2)
1727 for (i = 0, limit = 1; limit < num; i++, limit <<= 1)
1732 /* calculate first power of 2 >= num, bounded to what will fit in a long */
1734 next_pow2_long(long num)
1736 /* my_log2's internal range check is sufficient */
1737 return 1L << my_log2(num);
1740 /* calculate first power of 2 >= num, bounded to what will fit in an int */
1742 next_pow2_int(long num)
1744 if (num > INT_MAX / 2)
1746 return 1 << my_log2(num);
1750 /************************* SEQ SCAN TRACKING ************************/
1753 * We track active hash_seq_search scans here. The need for this mechanism
1754 * comes from the fact that a scan will get confused if a bucket split occurs
1755 * while it's in progress: it might visit entries twice, or even miss some
1756 * entirely (if it's partway through the same bucket that splits). Hence
1757 * we want to inhibit bucket splits if there are any active scans on the
1758 * table being inserted into. This is a fairly rare case in current usage,
1759 * so just postponing the split until the next insertion seems sufficient.
1761 * Given present usages of the function, only a few scans are likely to be
1762 * open concurrently; so a finite-size stack of open scans seems sufficient,
1763 * and we don't worry that linear search is too slow. Note that we do
1764 * allow multiple scans of the same hashtable to be open concurrently.
1766 * This mechanism can support concurrent scan and insertion in a shared
1767 * hashtable if it's the same backend doing both. It would fail otherwise,
1768 * but locking reasons seem to preclude any such scenario anyway, so we don't
1771 * This arrangement is reasonably robust if a transient hashtable is deleted
1772 * without notifying us. The absolute worst case is we might inhibit splits
1773 * in another table created later at exactly the same address. We will give
1774 * a warning at transaction end for reference leaks, so any bugs leading to
1775 * lack of notification should be easy to catch.
1778 #define MAX_SEQ_SCANS 100
1780 static HTAB *seq_scan_tables[MAX_SEQ_SCANS]; /* tables being scanned */
1781 static int seq_scan_level[MAX_SEQ_SCANS]; /* subtransaction nest level */
1782 static int num_seq_scans = 0;
1785 /* Register a table as having an active hash_seq_search scan */
1787 register_seq_scan(HTAB *hashp)
1789 if (num_seq_scans >= MAX_SEQ_SCANS)
1790 elog(ERROR, "too many active hash_seq_search scans, cannot start one on \"%s\"",
1792 seq_scan_tables[num_seq_scans] = hashp;
1793 seq_scan_level[num_seq_scans] = GetCurrentTransactionNestLevel();
1797 /* Deregister an active scan */
1799 deregister_seq_scan(HTAB *hashp)
1803 /* Search backward since it's most likely at the stack top */
1804 for (i = num_seq_scans - 1; i >= 0; i--)
1806 if (seq_scan_tables[i] == hashp)
1808 seq_scan_tables[i] = seq_scan_tables[num_seq_scans - 1];
1809 seq_scan_level[i] = seq_scan_level[num_seq_scans - 1];
1814 elog(ERROR, "no hash_seq_search scan for hash table \"%s\"",
1818 /* Check if a table has any active scan */
1820 has_seq_scans(HTAB *hashp)
1824 for (i = 0; i < num_seq_scans; i++)
1826 if (seq_scan_tables[i] == hashp)
1832 /* Clean up any open scans at end of transaction */
1834 AtEOXact_HashTables(bool isCommit)
1837 * During abort cleanup, open scans are expected; just silently clean 'em
1838 * out. An open scan at commit means someone forgot a hash_seq_term()
1839 * call, so complain.
1841 * Note: it's tempting to try to print the tabname here, but refrain for
1842 * fear of touching deallocated memory. This isn't a user-facing message
1843 * anyway, so it needn't be pretty.
1849 for (i = 0; i < num_seq_scans; i++)
1851 elog(WARNING, "leaked hash_seq_search scan for hash table %p",
1852 seq_scan_tables[i]);
1858 /* Clean up any open scans at end of subtransaction */
1860 AtEOSubXact_HashTables(bool isCommit, int nestDepth)
1865 * Search backward to make cleanup easy. Note we must check all entries,
1866 * not only those at the end of the array, because deletion technique
1867 * doesn't keep them in order.
1869 for (i = num_seq_scans - 1; i >= 0; i--)
1871 if (seq_scan_level[i] >= nestDepth)
1874 elog(WARNING, "leaked hash_seq_search scan for hash table %p",
1875 seq_scan_tables[i]);
1876 seq_scan_tables[i] = seq_scan_tables[num_seq_scans - 1];
1877 seq_scan_level[i] = seq_scan_level[num_seq_scans - 1];