1 /*-------------------------------------------------------------------------
4 * Hash table page management code for the Postgres hash access method
6 * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
7 * Portions Copyright (c) 1994, Regents of the University of California
11 * src/backend/access/hash/hashpage.c
14 * Postgres hash pages look like ordinary relation pages. The opaque
15 * data at high addresses includes information about the page including
16 * whether a page is an overflow page or a true bucket, the bucket
17 * number, and the block numbers of the preceding and following pages
20 * The first page in a hash relation, page zero, is special -- it stores
21 * information describing the hash table; it is referred to as the
22 * "meta page." Pages one and higher store the actual data.
24 * There are also bitmap pages, which are not manipulated here;
27 *-------------------------------------------------------------------------
31 #include "access/hash.h"
32 #include "miscadmin.h"
33 #include "storage/lmgr.h"
34 #include "storage/smgr.h"
37 static bool _hash_alloc_buckets(Relation rel, BlockNumber firstblock,
39 static void _hash_splitbucket(Relation rel, Buffer metabuf,
40 Bucket obucket, Bucket nbucket,
44 uint32 highmask, uint32 lowmask);
45 static void _hash_splitbucket_guts(Relation rel, Buffer metabuf,
46 Bucket obucket, Bucket nbucket, Buffer obuf,
47 Buffer nbuf, HTAB *htab, uint32 maxbucket,
48 uint32 highmask, uint32 lowmask);
52 * We use high-concurrency locking on hash indexes (see README for an overview
53 * of the locking rules). However, we can skip taking lmgr locks when the
54 * index is local to the current backend (ie, either temp or new in the
55 * current transaction). No one else can see it, so there's no reason to
56 * take locks. We still take buffer-level locks, but not lmgr locks.
58 #define USELOCKING(rel) (!RELATION_IS_LOCAL(rel))
62 * _hash_getbuf() -- Get a buffer by block number for read or write.
64 * 'access' must be HASH_READ, HASH_WRITE, or HASH_NOLOCK.
65 * 'flags' is a bitwise OR of the allowed page types.
67 * This must be used only to fetch pages that are expected to be valid
68 * already. _hash_checkpage() is applied using the given flags.
70 * When this routine returns, the appropriate lock is set on the
71 * requested buffer and its reference count has been incremented
72 * (ie, the buffer is "locked and pinned").
74 * P_NEW is disallowed because this routine can only be used
75 * to access pages that are known to be before the filesystem EOF.
76 * Extending the index should be done with _hash_getnewbuf.
79 _hash_getbuf(Relation rel, BlockNumber blkno, int access, int flags)
84 elog(ERROR, "hash AM does not use P_NEW");
86 buf = ReadBuffer(rel, blkno);
88 if (access != HASH_NOLOCK)
89 LockBuffer(buf, access);
91 /* ref count and lock type are correct */
93 _hash_checkpage(rel, buf, flags);
99 * _hash_getbuf_with_condlock_cleanup() -- Try to get a buffer for cleanup.
101 * We read the page and try to acquire a cleanup lock. If we get it,
102 * we return the buffer; otherwise, we return InvalidBuffer.
105 _hash_getbuf_with_condlock_cleanup(Relation rel, BlockNumber blkno, int flags)
110 elog(ERROR, "hash AM does not use P_NEW");
112 buf = ReadBuffer(rel, blkno);
114 if (!ConditionalLockBufferForCleanup(buf))
117 return InvalidBuffer;
120 /* ref count and lock type are correct */
122 _hash_checkpage(rel, buf, flags);
128 * _hash_getinitbuf() -- Get and initialize a buffer by block number.
130 * This must be used only to fetch pages that are known to be before
131 * the index's filesystem EOF, but are to be filled from scratch.
132 * _hash_pageinit() is applied automatically. Otherwise it has
133 * effects similar to _hash_getbuf() with access = HASH_WRITE.
135 * When this routine returns, a write lock is set on the
136 * requested buffer and its reference count has been incremented
137 * (ie, the buffer is "locked and pinned").
139 * P_NEW is disallowed because this routine can only be used
140 * to access pages that are known to be before the filesystem EOF.
141 * Extending the index should be done with _hash_getnewbuf.
144 _hash_getinitbuf(Relation rel, BlockNumber blkno)
149 elog(ERROR, "hash AM does not use P_NEW");
151 buf = ReadBufferExtended(rel, MAIN_FORKNUM, blkno, RBM_ZERO_AND_LOCK,
154 /* ref count and lock type are correct */
156 /* initialize the page */
157 _hash_pageinit(BufferGetPage(buf), BufferGetPageSize(buf));
163 * _hash_getnewbuf() -- Get a new page at the end of the index.
165 * This has the same API as _hash_getinitbuf, except that we are adding
166 * a page to the index, and hence expect the page to be past the
167 * logical EOF. (However, we have to support the case where it isn't,
168 * since a prior try might have crashed after extending the filesystem
169 * EOF but before updating the metapage to reflect the added page.)
171 * It is caller's responsibility to ensure that only one process can
172 * extend the index at a time. In practice, this function is called
173 * only while holding write lock on the metapage, because adding a page
174 * is always associated with an update of metapage data.
177 _hash_getnewbuf(Relation rel, BlockNumber blkno, ForkNumber forkNum)
179 BlockNumber nblocks = RelationGetNumberOfBlocksInFork(rel, forkNum);
183 elog(ERROR, "hash AM does not use P_NEW");
185 elog(ERROR, "access to noncontiguous page in hash index \"%s\"",
186 RelationGetRelationName(rel));
188 /* smgr insists we use P_NEW to extend the relation */
189 if (blkno == nblocks)
191 buf = ReadBufferExtended(rel, forkNum, P_NEW, RBM_NORMAL, NULL);
192 if (BufferGetBlockNumber(buf) != blkno)
193 elog(ERROR, "unexpected hash relation size: %u, should be %u",
194 BufferGetBlockNumber(buf), blkno);
195 LockBuffer(buf, HASH_WRITE);
199 buf = ReadBufferExtended(rel, forkNum, blkno, RBM_ZERO_AND_LOCK,
203 /* ref count and lock type are correct */
205 /* initialize the page */
206 _hash_pageinit(BufferGetPage(buf), BufferGetPageSize(buf));
212 * _hash_getbuf_with_strategy() -- Get a buffer with nondefault strategy.
214 * This is identical to _hash_getbuf() but also allows a buffer access
215 * strategy to be specified. We use this for VACUUM operations.
218 _hash_getbuf_with_strategy(Relation rel, BlockNumber blkno,
219 int access, int flags,
220 BufferAccessStrategy bstrategy)
225 elog(ERROR, "hash AM does not use P_NEW");
227 buf = ReadBufferExtended(rel, MAIN_FORKNUM, blkno, RBM_NORMAL, bstrategy);
229 if (access != HASH_NOLOCK)
230 LockBuffer(buf, access);
232 /* ref count and lock type are correct */
234 _hash_checkpage(rel, buf, flags);
240 * _hash_relbuf() -- release a locked buffer.
242 * Lock and pin (refcount) are both dropped.
245 _hash_relbuf(Relation rel, Buffer buf)
247 UnlockReleaseBuffer(buf);
251 * _hash_dropbuf() -- release an unlocked buffer.
253 * This is used to unpin a buffer on which we hold no lock.
256 _hash_dropbuf(Relation rel, Buffer buf)
262 * _hash_dropscanbuf() -- release buffers used in scan.
264 * This routine unpins the buffers used during scan on which we
268 _hash_dropscanbuf(Relation rel, HashScanOpaque so)
270 /* release pin we hold on primary bucket page */
271 if (BufferIsValid(so->hashso_bucket_buf) &&
272 so->hashso_bucket_buf != so->hashso_curbuf)
273 _hash_dropbuf(rel, so->hashso_bucket_buf);
274 so->hashso_bucket_buf = InvalidBuffer;
276 /* release pin we hold on primary bucket page of bucket being split */
277 if (BufferIsValid(so->hashso_split_bucket_buf) &&
278 so->hashso_split_bucket_buf != so->hashso_curbuf)
279 _hash_dropbuf(rel, so->hashso_split_bucket_buf);
280 so->hashso_split_bucket_buf = InvalidBuffer;
282 /* release any pin we still hold */
283 if (BufferIsValid(so->hashso_curbuf))
284 _hash_dropbuf(rel, so->hashso_curbuf);
285 so->hashso_curbuf = InvalidBuffer;
287 /* reset split scan */
288 so->hashso_buc_populated = false;
289 so->hashso_buc_split = false;
293 * _hash_wrtbuf() -- write a hash page to disk.
295 * This routine releases the lock held on the buffer and our refcount
296 * for it. It is an error to call _hash_wrtbuf() without a write lock
297 * and a pin on the buffer.
299 * NOTE: this routine should go away when/if hash indexes are WAL-ified.
300 * The correct sequence of operations is to mark the buffer dirty, then
301 * write the WAL record, then release the lock and pin; so marking dirty
302 * can't be combined with releasing.
305 _hash_wrtbuf(Relation rel, Buffer buf)
307 MarkBufferDirty(buf);
308 UnlockReleaseBuffer(buf);
312 * _hash_chgbufaccess() -- Change the lock type on a buffer, without
313 * dropping our pin on it.
315 * from_access and to_access may be HASH_READ, HASH_WRITE, or HASH_NOLOCK,
316 * the last indicating that no buffer-level lock is held or wanted.
318 * When from_access == HASH_WRITE, we assume the buffer is dirty and tell
319 * bufmgr it must be written out. If the caller wants to release a write
320 * lock on a page that's not been modified, it's okay to pass from_access
321 * as HASH_READ (a bit ugly, but handy in some places).
324 _hash_chgbufaccess(Relation rel,
329 if (from_access == HASH_WRITE)
330 MarkBufferDirty(buf);
331 if (from_access != HASH_NOLOCK)
332 LockBuffer(buf, BUFFER_LOCK_UNLOCK);
333 if (to_access != HASH_NOLOCK)
334 LockBuffer(buf, to_access);
339 * _hash_metapinit() -- Initialize the metadata page of a hash index,
340 * the initial buckets, and the initial bitmap page.
342 * The initial number of buckets is dependent on num_tuples, an estimate
343 * of the number of tuples to be loaded into the index initially. The
344 * chosen number of buckets is returned.
346 * We are fairly cavalier about locking here, since we know that no one else
347 * could be accessing this index. In particular the rule about not holding
348 * multiple buffer locks is ignored.
351 _hash_metapinit(Relation rel, double num_tuples, ForkNumber forkNum)
354 HashPageOpaque pageopaque;
363 uint32 log2_num_buckets;
367 if (RelationGetNumberOfBlocksInFork(rel, forkNum) != 0)
368 elog(ERROR, "cannot initialize non-empty hash index \"%s\"",
369 RelationGetRelationName(rel));
372 * Determine the target fill factor (in tuples per bucket) for this index.
373 * The idea is to make the fill factor correspond to pages about as full
374 * as the user-settable fillfactor parameter says. We can compute it
375 * exactly since the index datatype (i.e. uint32 hash key) is fixed-width.
377 data_width = sizeof(uint32);
378 item_width = MAXALIGN(sizeof(IndexTupleData)) + MAXALIGN(data_width) +
379 sizeof(ItemIdData); /* include the line pointer */
380 ffactor = RelationGetTargetPageUsage(rel, HASH_DEFAULT_FILLFACTOR) / item_width;
381 /* keep to a sane range */
386 * Choose the number of initial bucket pages to match the fill factor
387 * given the estimated number of tuples. We round up the result to the
388 * next power of 2, however, and always force at least 2 bucket pages. The
389 * upper limit is determined by considerations explained in
390 * _hash_expandtable().
392 dnumbuckets = num_tuples / ffactor;
393 if (dnumbuckets <= 2.0)
395 else if (dnumbuckets >= (double) 0x40000000)
396 num_buckets = 0x40000000;
398 num_buckets = ((uint32) 1) << _hash_log2((uint32) dnumbuckets);
400 log2_num_buckets = _hash_log2(num_buckets);
401 Assert(num_buckets == (((uint32) 1) << log2_num_buckets));
402 Assert(log2_num_buckets < HASH_MAX_SPLITPOINTS);
405 * We initialize the metapage, the first N bucket pages, and the first
406 * bitmap page in sequence, using _hash_getnewbuf to cause smgrextend()
407 * calls to occur. This ensures that the smgr level has the right idea of
408 * the physical index length.
410 metabuf = _hash_getnewbuf(rel, HASH_METAPAGE, forkNum);
411 pg = BufferGetPage(metabuf);
413 pageopaque = (HashPageOpaque) PageGetSpecialPointer(pg);
414 pageopaque->hasho_prevblkno = InvalidBlockNumber;
415 pageopaque->hasho_nextblkno = InvalidBlockNumber;
416 pageopaque->hasho_bucket = -1;
417 pageopaque->hasho_flag = LH_META_PAGE;
418 pageopaque->hasho_page_id = HASHO_PAGE_ID;
420 metap = HashPageGetMeta(pg);
422 metap->hashm_magic = HASH_MAGIC;
423 metap->hashm_version = HASH_VERSION;
424 metap->hashm_ntuples = 0;
425 metap->hashm_nmaps = 0;
426 metap->hashm_ffactor = ffactor;
427 metap->hashm_bsize = HashGetMaxBitmapSize(pg);
428 /* find largest bitmap array size that will fit in page size */
429 for (i = _hash_log2(metap->hashm_bsize); i > 0; --i)
431 if ((1 << i) <= metap->hashm_bsize)
435 metap->hashm_bmsize = 1 << i;
436 metap->hashm_bmshift = i + BYTE_TO_BIT;
437 Assert((1 << BMPG_SHIFT(metap)) == (BMPG_MASK(metap) + 1));
440 * Label the index with its primary hash support function's OID. This is
441 * pretty useless for normal operation (in fact, hashm_procid is not used
442 * anywhere), but it might be handy for forensic purposes so we keep it.
444 metap->hashm_procid = index_getprocid(rel, 1, HASHPROC);
447 * We initialize the index with N buckets, 0 .. N-1, occupying physical
448 * blocks 1 to N. The first freespace bitmap page is in block N+1. Since
449 * N is a power of 2, we can set the masks this way:
451 metap->hashm_maxbucket = metap->hashm_lowmask = num_buckets - 1;
452 metap->hashm_highmask = (num_buckets << 1) - 1;
454 MemSet(metap->hashm_spares, 0, sizeof(metap->hashm_spares));
455 MemSet(metap->hashm_mapp, 0, sizeof(metap->hashm_mapp));
457 /* Set up mapping for one spare page after the initial splitpoints */
458 metap->hashm_spares[log2_num_buckets] = 1;
459 metap->hashm_ovflpoint = log2_num_buckets;
460 metap->hashm_firstfree = 0;
463 * Release buffer lock on the metapage while we initialize buckets.
464 * Otherwise, we'll be in interrupt holdoff and the CHECK_FOR_INTERRUPTS
465 * won't accomplish anything. It's a bad idea to hold buffer locks for
466 * long intervals in any case, since that can block the bgwriter.
468 _hash_chgbufaccess(rel, metabuf, HASH_WRITE, HASH_NOLOCK);
471 * Initialize the first N buckets
473 for (i = 0; i < num_buckets; i++)
475 /* Allow interrupts, in case N is huge */
476 CHECK_FOR_INTERRUPTS();
478 buf = _hash_getnewbuf(rel, BUCKET_TO_BLKNO(metap, i), forkNum);
479 pg = BufferGetPage(buf);
480 pageopaque = (HashPageOpaque) PageGetSpecialPointer(pg);
481 pageopaque->hasho_prevblkno = InvalidBlockNumber;
482 pageopaque->hasho_nextblkno = InvalidBlockNumber;
483 pageopaque->hasho_bucket = i;
484 pageopaque->hasho_flag = LH_BUCKET_PAGE;
485 pageopaque->hasho_page_id = HASHO_PAGE_ID;
486 _hash_wrtbuf(rel, buf);
489 /* Now reacquire buffer lock on metapage */
490 _hash_chgbufaccess(rel, metabuf, HASH_NOLOCK, HASH_WRITE);
493 * Initialize first bitmap page
495 _hash_initbitmap(rel, metap, num_buckets + 1, forkNum);
498 _hash_wrtbuf(rel, metabuf);
504 * _hash_pageinit() -- Initialize a new hash index page.
507 _hash_pageinit(Page page, Size size)
509 Assert(PageIsNew(page));
510 PageInit(page, size, sizeof(HashPageOpaqueData));
514 * Attempt to expand the hash table by creating one new bucket.
516 * This will silently do nothing if we don't get cleanup lock on old or
519 * Complete the pending splits and remove the tuples from old bucket,
520 * if there are any left over from the previous split.
522 * The caller must hold a pin, but no lock, on the metapage buffer.
523 * The buffer is returned in the same state.
526 _hash_expandtable(Relation rel, Buffer metabuf)
532 BlockNumber start_oblkno;
533 BlockNumber start_nblkno;
537 HashPageOpaque oopaque;
545 * Write-lock the meta page. It used to be necessary to acquire a
546 * heavyweight lock to begin a split, but that is no longer required.
548 _hash_chgbufaccess(rel, metabuf, HASH_NOLOCK, HASH_WRITE);
550 _hash_checkpage(rel, metabuf, LH_META_PAGE);
551 metap = HashPageGetMeta(BufferGetPage(metabuf));
554 * Check to see if split is still needed; someone else might have already
555 * done one while we waited for the lock.
557 * Make sure this stays in sync with _hash_doinsert()
559 if (metap->hashm_ntuples <=
560 (double) metap->hashm_ffactor * (metap->hashm_maxbucket + 1))
564 * Can't split anymore if maxbucket has reached its maximum possible
567 * Ideally we'd allow bucket numbers up to UINT_MAX-1 (no higher because
568 * the calculation maxbucket+1 mustn't overflow). Currently we restrict
569 * to half that because of overflow looping in _hash_log2() and
570 * insufficient space in hashm_spares[]. It's moot anyway because an
571 * index with 2^32 buckets would certainly overflow BlockNumber and hence
572 * _hash_alloc_buckets() would fail, but if we supported buckets smaller
573 * than a disk block then this would be an independent constraint.
575 * If you change this, see also the maximum initial number of buckets in
578 if (metap->hashm_maxbucket >= (uint32) 0x7FFFFFFE)
582 * Determine which bucket is to be split, and attempt to take cleanup lock
583 * on the old bucket. If we can't get the lock, give up.
585 * The cleanup lock protects us not only against other backends, but
586 * against our own backend as well.
588 * The cleanup lock is mainly to protect the split from concurrent
589 * inserts. See src/backend/access/hash/README, Lock Definitions for
590 * further details. Due to this locking restriction, if there is any
591 * pending scan, the split will give up which is not good, but harmless.
593 new_bucket = metap->hashm_maxbucket + 1;
595 old_bucket = (new_bucket & metap->hashm_lowmask);
597 start_oblkno = BUCKET_TO_BLKNO(metap, old_bucket);
599 buf_oblkno = _hash_getbuf_with_condlock_cleanup(rel, start_oblkno, LH_BUCKET_PAGE);
603 opage = BufferGetPage(buf_oblkno);
604 oopaque = (HashPageOpaque) PageGetSpecialPointer(opage);
607 * We want to finish the split from a bucket as there is no apparent
608 * benefit by not doing so and it will make the code complicated to finish
609 * the split that involves multiple buckets considering the case where new
610 * split also fails. We don't need to consider the new bucket for
611 * completing the split here as it is not possible that a re-split of new
612 * bucket starts when there is still a pending split from old bucket.
614 if (H_BUCKET_BEING_SPLIT(oopaque))
617 * Copy bucket mapping info now; refer the comment in code below where
618 * we copy this information before calling _hash_splitbucket to see
621 maxbucket = metap->hashm_maxbucket;
622 highmask = metap->hashm_highmask;
623 lowmask = metap->hashm_lowmask;
626 * Release the lock on metapage and old_bucket, before completing the
629 _hash_chgbufaccess(rel, metabuf, HASH_READ, HASH_NOLOCK);
630 _hash_chgbufaccess(rel, buf_oblkno, HASH_READ, HASH_NOLOCK);
632 _hash_finish_split(rel, metabuf, buf_oblkno, old_bucket, maxbucket,
635 /* release the pin on old buffer and retry for expand. */
636 _hash_dropbuf(rel, buf_oblkno);
642 * Clean the tuples remained from the previous split. This operation
643 * requires cleanup lock and we already have one on the old bucket, so
644 * let's do it. We also don't want to allow further splits from the bucket
645 * till the garbage of previous split is cleaned. This has two
646 * advantages; first, it helps in avoiding the bloat due to garbage and
647 * second is, during cleanup of bucket, we are always sure that the
648 * garbage tuples belong to most recently split bucket. On the contrary,
649 * if we allow cleanup of bucket after meta page is updated to indicate
650 * the new split and before the actual split, the cleanup operation won't
651 * be able to decide whether the tuple has been moved to the newly created
652 * bucket and ended up deleting such tuples.
654 if (H_NEEDS_SPLIT_CLEANUP(oopaque))
657 * Copy bucket mapping info now; refer to the comment in code below
658 * where we copy this information before calling _hash_splitbucket
659 * to see why this is okay.
661 maxbucket = metap->hashm_maxbucket;
662 highmask = metap->hashm_highmask;
663 lowmask = metap->hashm_lowmask;
665 /* Release the metapage lock. */
666 _hash_chgbufaccess(rel, metabuf, HASH_READ, HASH_NOLOCK);
668 hashbucketcleanup(rel, old_bucket, buf_oblkno, start_oblkno, NULL,
669 maxbucket, highmask, lowmask, NULL, NULL, true,
672 _hash_dropbuf(rel, buf_oblkno);
678 * There shouldn't be any active scan on new bucket.
680 * Note: it is safe to compute the new bucket's blkno here, even though we
681 * may still need to update the BUCKET_TO_BLKNO mapping. This is because
682 * the current value of hashm_spares[hashm_ovflpoint] correctly shows
683 * where we are going to put a new splitpoint's worth of buckets.
685 start_nblkno = BUCKET_TO_BLKNO(metap, new_bucket);
688 * If the split point is increasing (hashm_maxbucket's log base 2
689 * increases), we need to allocate a new batch of bucket pages.
691 spare_ndx = _hash_log2(new_bucket + 1);
692 if (spare_ndx > metap->hashm_ovflpoint)
694 Assert(spare_ndx == metap->hashm_ovflpoint + 1);
697 * The number of buckets in the new splitpoint is equal to the total
698 * number already in existence, i.e. new_bucket. Currently this maps
699 * one-to-one to blocks required, but someday we may need a more
700 * complicated calculation here.
702 if (!_hash_alloc_buckets(rel, start_nblkno, new_bucket))
704 /* can't split due to BlockNumber overflow */
705 _hash_relbuf(rel, buf_oblkno);
711 * Physically allocate the new bucket's primary page. We want to do this
712 * before changing the metapage's mapping info, in case we can't get the
713 * disk space. Ideally, we don't need to check for cleanup lock on new
714 * bucket as no other backend could find this bucket unless meta page is
715 * updated. However, it is good to be consistent with old bucket locking.
717 buf_nblkno = _hash_getnewbuf(rel, start_nblkno, MAIN_FORKNUM);
718 if (!IsBufferCleanupOK(buf_nblkno))
720 _hash_relbuf(rel, buf_oblkno);
721 _hash_relbuf(rel, buf_nblkno);
727 * Okay to proceed with split. Update the metapage bucket mapping info.
729 * Since we are scribbling on the metapage data right in the shared
730 * buffer, any failure in this next little bit leaves us with a big
731 * problem: the metapage is effectively corrupt but could get written back
732 * to disk. We don't really expect any failure, but just to be sure,
733 * establish a critical section.
735 START_CRIT_SECTION();
737 metap->hashm_maxbucket = new_bucket;
739 if (new_bucket > metap->hashm_highmask)
741 /* Starting a new doubling */
742 metap->hashm_lowmask = metap->hashm_highmask;
743 metap->hashm_highmask = new_bucket | metap->hashm_lowmask;
747 * If the split point is increasing (hashm_maxbucket's log base 2
748 * increases), we need to adjust the hashm_spares[] array and
749 * hashm_ovflpoint so that future overflow pages will be created beyond
750 * this new batch of bucket pages.
752 if (spare_ndx > metap->hashm_ovflpoint)
754 metap->hashm_spares[spare_ndx] = metap->hashm_spares[metap->hashm_ovflpoint];
755 metap->hashm_ovflpoint = spare_ndx;
758 /* Done mucking with metapage */
762 * Copy bucket mapping info now; this saves re-accessing the meta page
763 * inside _hash_splitbucket's inner loop. Note that once we drop the
764 * split lock, other splits could begin, so these values might be out of
765 * date before _hash_splitbucket finishes. That's okay, since all it
766 * needs is to tell which of these two buckets to map hashkeys into.
768 maxbucket = metap->hashm_maxbucket;
769 highmask = metap->hashm_highmask;
770 lowmask = metap->hashm_lowmask;
772 /* Write out the metapage and drop lock, but keep pin */
773 _hash_chgbufaccess(rel, metabuf, HASH_WRITE, HASH_NOLOCK);
775 /* Relocate records to the new bucket */
776 _hash_splitbucket(rel, metabuf,
777 old_bucket, new_bucket,
778 buf_oblkno, buf_nblkno,
779 maxbucket, highmask, lowmask);
783 /* Here if decide not to split or fail to acquire old bucket lock */
786 /* We didn't write the metapage, so just drop lock */
787 _hash_chgbufaccess(rel, metabuf, HASH_READ, HASH_NOLOCK);
792 * _hash_alloc_buckets -- allocate a new splitpoint's worth of bucket pages
794 * This does not need to initialize the new bucket pages; we'll do that as
795 * each one is used by _hash_expandtable(). But we have to extend the logical
796 * EOF to the end of the splitpoint; this keeps smgr's idea of the EOF in
797 * sync with ours, so that we don't get complaints from smgr.
799 * We do this by writing a page of zeroes at the end of the splitpoint range.
800 * We expect that the filesystem will ensure that the intervening pages read
801 * as zeroes too. On many filesystems this "hole" will not be allocated
802 * immediately, which means that the index file may end up more fragmented
803 * than if we forced it all to be allocated now; but since we don't scan
804 * hash indexes sequentially anyway, that probably doesn't matter.
806 * XXX It's annoying that this code is executed with the metapage lock held.
807 * We need to interlock against _hash_getovflpage() adding a new overflow page
808 * concurrently, but it'd likely be better to use LockRelationForExtension
809 * for the purpose. OTOH, adding a splitpoint is a very infrequent operation,
810 * so it may not be worth worrying about.
812 * Returns TRUE if successful, or FALSE if allocation failed due to
813 * BlockNumber overflow.
816 _hash_alloc_buckets(Relation rel, BlockNumber firstblock, uint32 nblocks)
818 BlockNumber lastblock;
819 char zerobuf[BLCKSZ];
821 lastblock = firstblock + nblocks - 1;
824 * Check for overflow in block number calculation; if so, we cannot extend
827 if (lastblock < firstblock || lastblock == InvalidBlockNumber)
830 MemSet(zerobuf, 0, sizeof(zerobuf));
832 RelationOpenSmgr(rel);
833 smgrextend(rel->rd_smgr, MAIN_FORKNUM, lastblock, zerobuf, false);
840 * _hash_splitbucket -- split 'obucket' into 'obucket' and 'nbucket'
842 * We are splitting a bucket that consists of a base bucket page and zero
843 * or more overflow (bucket chain) pages. We must relocate tuples that
844 * belong in the new bucket, and compress out any free space in the old
847 * The caller must hold cleanup locks on both buckets to ensure that
848 * no one else is trying to access them (see README).
850 * The caller must hold a pin, but no lock, on the metapage buffer.
851 * The buffer is returned in the same state. (The metapage is only
852 * touched if it becomes necessary to add or remove overflow pages.)
854 * Split needs to retain pin on primary bucket pages of both old and new
855 * buckets till end of operation. This is to prevent vacuum from starting
856 * while a split is in progress.
858 * In addition, the caller must have created the new bucket's base page,
859 * which is passed in buffer nbuf, pinned and write-locked. That lock and
860 * pin are released here. (The API is set up this way because we must do
861 * _hash_getnewbuf() before releasing the metapage write lock. So instead of
862 * passing the new bucket's start block number, we pass an actual buffer.)
865 _hash_splitbucket(Relation rel,
877 HashPageOpaque oopaque;
878 HashPageOpaque nopaque;
880 opage = BufferGetPage(obuf);
881 oopaque = (HashPageOpaque) PageGetSpecialPointer(opage);
884 * Mark the old bucket to indicate that split is in progress. At
885 * operation end, we clear split-in-progress flag.
887 oopaque->hasho_flag |= LH_BUCKET_BEING_SPLIT;
889 npage = BufferGetPage(nbuf);
892 * initialize the new bucket's primary page and mark it to indicate that
893 * split is in progress.
895 nopaque = (HashPageOpaque) PageGetSpecialPointer(npage);
896 nopaque->hasho_prevblkno = InvalidBlockNumber;
897 nopaque->hasho_nextblkno = InvalidBlockNumber;
898 nopaque->hasho_bucket = nbucket;
899 nopaque->hasho_flag = LH_BUCKET_PAGE | LH_BUCKET_BEING_POPULATED;
900 nopaque->hasho_page_id = HASHO_PAGE_ID;
902 _hash_splitbucket_guts(rel, metabuf, obucket,
903 nbucket, obuf, nbuf, NULL,
904 maxbucket, highmask, lowmask);
906 /* all done, now release the locks and pins on primary buckets. */
907 _hash_relbuf(rel, obuf);
908 _hash_relbuf(rel, nbuf);
912 * _hash_splitbucket_guts -- Helper function to perform the split operation
914 * This routine is used to partition the tuples between old and new bucket and
915 * to finish incomplete split operations. To finish the previously
916 * interrupted split operation, caller needs to fill htab. If htab is set, then
917 * we skip the movement of tuples that exists in htab, otherwise NULL value of
918 * htab indicates movement of all the tuples that belong to new bucket.
920 * Caller needs to lock and unlock the old and new primary buckets.
923 _hash_splitbucket_guts(Relation rel,
938 HashPageOpaque oopaque;
939 HashPageOpaque nopaque;
942 opage = BufferGetPage(obuf);
943 oopaque = (HashPageOpaque) PageGetSpecialPointer(opage);
946 npage = BufferGetPage(nbuf);
947 nopaque = (HashPageOpaque) PageGetSpecialPointer(npage);
950 * Partition the tuples in the old bucket between the old bucket and the
951 * new bucket, advancing along the old bucket's overflow bucket chain and
952 * adding overflow pages to the new bucket as needed. Outer loop iterates
953 * once per page in old bucket.
958 OffsetNumber ooffnum;
959 OffsetNumber omaxoffnum;
961 /* Scan each tuple in old page */
962 omaxoffnum = PageGetMaxOffsetNumber(opage);
963 for (ooffnum = FirstOffsetNumber;
964 ooffnum <= omaxoffnum;
965 ooffnum = OffsetNumberNext(ooffnum))
972 /* skip dead tuples */
973 if (ItemIdIsDead(PageGetItemId(opage, ooffnum)))
977 * Before inserting a tuple, probe the hash table containing TIDs
978 * of tuples belonging to new bucket, if we find a match, then
979 * skip that tuple, else fetch the item's hash key (conveniently
980 * stored in the item) and determine which bucket it now belongs
983 itup = (IndexTuple) PageGetItem(opage,
984 PageGetItemId(opage, ooffnum));
987 (void) hash_search(htab, &itup->t_tid, HASH_FIND, &found);
992 bucket = _hash_hashkey2bucket(_hash_get_indextuple_hashkey(itup),
993 maxbucket, highmask, lowmask);
995 if (bucket == nbucket)
1000 * make a copy of index tuple as we have to scribble on it.
1002 new_itup = CopyIndexTuple(itup);
1005 * mark the index tuple as moved by split, such tuples are
1006 * skipped by scan if there is split in progress for a bucket.
1008 new_itup->t_info |= INDEX_MOVED_BY_SPLIT_MASK;
1011 * insert the tuple into the new bucket. if it doesn't fit on
1012 * the current page in the new bucket, we must allocate a new
1013 * overflow page and place the tuple on that page instead.
1015 itemsz = IndexTupleDSize(*new_itup);
1016 itemsz = MAXALIGN(itemsz);
1018 if (PageGetFreeSpace(npage) < itemsz)
1020 /* write out nbuf and drop lock, but keep pin */
1021 _hash_chgbufaccess(rel, nbuf, HASH_WRITE, HASH_NOLOCK);
1022 /* chain to a new overflow page */
1023 nbuf = _hash_addovflpage(rel, metabuf, nbuf, (nbuf == bucket_nbuf) ? true : false);
1024 npage = BufferGetPage(nbuf);
1025 nopaque = (HashPageOpaque) PageGetSpecialPointer(npage);
1029 * Insert tuple on new page, using _hash_pgaddtup to ensure
1030 * correct ordering by hashkey. This is a tad inefficient
1031 * since we may have to shuffle itempointers repeatedly.
1032 * Possible future improvement: accumulate all the items for
1033 * the new page and qsort them before insertion.
1035 (void) _hash_pgaddtup(rel, nbuf, itemsz, new_itup);
1043 * the tuple stays on this page, so nothing to do.
1045 Assert(bucket == obucket);
1049 oblkno = oopaque->hasho_nextblkno;
1051 /* retain the pin on the old primary bucket */
1052 if (obuf == bucket_obuf)
1053 _hash_chgbufaccess(rel, obuf, HASH_READ, HASH_NOLOCK);
1055 _hash_relbuf(rel, obuf);
1057 /* Exit loop if no more overflow pages in old bucket */
1058 if (!BlockNumberIsValid(oblkno))
1061 /* Else, advance to next old page */
1062 obuf = _hash_getbuf(rel, oblkno, HASH_READ, LH_OVERFLOW_PAGE);
1063 opage = BufferGetPage(obuf);
1064 oopaque = (HashPageOpaque) PageGetSpecialPointer(opage);
1068 * We're at the end of the old bucket chain, so we're done partitioning
1069 * the tuples. Mark the old and new buckets to indicate split is
1072 * To avoid deadlocks due to locking order of buckets, first lock the old
1073 * bucket and then the new bucket.
1075 if (nbuf == bucket_nbuf)
1076 _hash_chgbufaccess(rel, bucket_nbuf, HASH_WRITE, HASH_NOLOCK);
1078 _hash_wrtbuf(rel, nbuf);
1080 _hash_chgbufaccess(rel, bucket_obuf, HASH_NOLOCK, HASH_WRITE);
1081 opage = BufferGetPage(bucket_obuf);
1082 oopaque = (HashPageOpaque) PageGetSpecialPointer(opage);
1084 _hash_chgbufaccess(rel, bucket_nbuf, HASH_NOLOCK, HASH_WRITE);
1085 npage = BufferGetPage(bucket_nbuf);
1086 nopaque = (HashPageOpaque) PageGetSpecialPointer(npage);
1088 oopaque->hasho_flag &= ~LH_BUCKET_BEING_SPLIT;
1089 nopaque->hasho_flag &= ~LH_BUCKET_BEING_POPULATED;
1092 * After the split is finished, mark the old bucket to indicate that it
1093 * contains deletable tuples. Vacuum will clear split-cleanup flag after
1094 * deleting such tuples.
1096 oopaque->hasho_flag |= LH_BUCKET_NEEDS_SPLIT_CLEANUP;
1099 * now write the buffers, here we don't release the locks as caller is
1100 * responsible to release locks.
1102 MarkBufferDirty(bucket_obuf);
1103 MarkBufferDirty(bucket_nbuf);
1107 * _hash_finish_split() -- Finish the previously interrupted split operation
1109 * To complete the split operation, we form the hash table of TIDs in new
1110 * bucket which is then used by split operation to skip tuples that are
1111 * already moved before the split operation was previously interrupted.
1113 * The caller must hold a pin, but no lock, on the metapage and old bucket's
1114 * primay page buffer. The buffers are returned in the same state. (The
1115 * metapage is only touched if it becomes necessary to add or remove overflow
1119 _hash_finish_split(Relation rel, Buffer metabuf, Buffer obuf, Bucket obucket,
1120 uint32 maxbucket, uint32 highmask, uint32 lowmask)
1124 Buffer bucket_nbuf = InvalidBuffer;
1128 BlockNumber bucket_nblkno;
1129 HashPageOpaque npageopaque;
1133 /* Initialize hash tables used to track TIDs */
1134 memset(&hash_ctl, 0, sizeof(hash_ctl));
1135 hash_ctl.keysize = sizeof(ItemPointerData);
1136 hash_ctl.entrysize = sizeof(ItemPointerData);
1137 hash_ctl.hcxt = CurrentMemoryContext;
1140 hash_create("bucket ctids",
1141 256, /* arbitrary initial size */
1143 HASH_ELEM | HASH_BLOBS | HASH_CONTEXT);
1145 bucket_nblkno = nblkno = _hash_get_newblock_from_oldbucket(rel, obucket);
1148 * Scan the new bucket and build hash table of TIDs
1152 OffsetNumber noffnum;
1153 OffsetNumber nmaxoffnum;
1155 nbuf = _hash_getbuf(rel, nblkno, HASH_READ,
1156 LH_BUCKET_PAGE | LH_OVERFLOW_PAGE);
1158 /* remember the primary bucket buffer to acquire cleanup lock on it. */
1159 if (nblkno == bucket_nblkno)
1162 npage = BufferGetPage(nbuf);
1163 npageopaque = (HashPageOpaque) PageGetSpecialPointer(npage);
1165 /* Scan each tuple in new page */
1166 nmaxoffnum = PageGetMaxOffsetNumber(npage);
1167 for (noffnum = FirstOffsetNumber;
1168 noffnum <= nmaxoffnum;
1169 noffnum = OffsetNumberNext(noffnum))
1173 /* Fetch the item's TID and insert it in hash table. */
1174 itup = (IndexTuple) PageGetItem(npage,
1175 PageGetItemId(npage, noffnum));
1177 (void) hash_search(tidhtab, &itup->t_tid, HASH_ENTER, &found);
1182 nblkno = npageopaque->hasho_nextblkno;
1185 * release our write lock without modifying buffer and ensure to
1186 * retain the pin on primary bucket.
1188 if (nbuf == bucket_nbuf)
1189 _hash_chgbufaccess(rel, nbuf, HASH_READ, HASH_NOLOCK);
1191 _hash_relbuf(rel, nbuf);
1193 /* Exit loop if no more overflow pages in new bucket */
1194 if (!BlockNumberIsValid(nblkno))
1199 * Conditionally get the cleanup lock on old and new buckets to perform
1200 * the split operation. If we don't get the cleanup locks, silently give
1201 * up and next insertion on old bucket will try again to complete the
1204 if (!ConditionalLockBufferForCleanup(obuf))
1206 hash_destroy(tidhtab);
1209 if (!ConditionalLockBufferForCleanup(bucket_nbuf))
1211 _hash_chgbufaccess(rel, obuf, HASH_READ, HASH_NOLOCK);
1212 hash_destroy(tidhtab);
1216 npage = BufferGetPage(bucket_nbuf);
1217 npageopaque = (HashPageOpaque) PageGetSpecialPointer(npage);
1218 nbucket = npageopaque->hasho_bucket;
1220 _hash_splitbucket_guts(rel, metabuf, obucket,
1221 nbucket, obuf, bucket_nbuf, tidhtab,
1222 maxbucket, highmask, lowmask);
1224 _hash_relbuf(rel, bucket_nbuf);
1225 _hash_chgbufaccess(rel, obuf, HASH_READ, HASH_NOLOCK);
1226 hash_destroy(tidhtab);