1 /*-------------------------------------------------------------------------
4 * Hash table page management code for the Postgres hash access method
6 * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group
7 * Portions Copyright (c) 1994, Regents of the University of California
11 * src/backend/access/hash/hashpage.c
14 * Postgres hash pages look like ordinary relation pages. The opaque
15 * data at high addresses includes information about the page including
16 * whether a page is an overflow page or a true bucket, the bucket
17 * number, and the block numbers of the preceding and following pages
20 * The first page in a hash relation, page zero, is special -- it stores
21 * information describing the hash table; it is referred to as the
22 * "meta page." Pages one and higher store the actual data.
24 * There are also bitmap pages, which are not manipulated here;
27 *-------------------------------------------------------------------------
31 #include "access/hash.h"
32 #include "access/hash_xlog.h"
33 #include "miscadmin.h"
34 #include "storage/lmgr.h"
35 #include "storage/smgr.h"
38 static bool _hash_alloc_buckets(Relation rel, BlockNumber firstblock,
40 static void _hash_splitbucket(Relation rel, Buffer metabuf,
41 Bucket obucket, Bucket nbucket,
46 uint32 highmask, uint32 lowmask);
47 static void log_split_page(Relation rel, Buffer buf);
51 * We use high-concurrency locking on hash indexes (see README for an overview
52 * of the locking rules). However, we can skip taking lmgr locks when the
53 * index is local to the current backend (ie, either temp or new in the
54 * current transaction). No one else can see it, so there's no reason to
55 * take locks. We still take buffer-level locks, but not lmgr locks.
57 #define USELOCKING(rel) (!RELATION_IS_LOCAL(rel))
61 * _hash_getbuf() -- Get a buffer by block number for read or write.
63 * 'access' must be HASH_READ, HASH_WRITE, or HASH_NOLOCK.
64 * 'flags' is a bitwise OR of the allowed page types.
66 * This must be used only to fetch pages that are expected to be valid
67 * already. _hash_checkpage() is applied using the given flags.
69 * When this routine returns, the appropriate lock is set on the
70 * requested buffer and its reference count has been incremented
71 * (ie, the buffer is "locked and pinned").
73 * P_NEW is disallowed because this routine can only be used
74 * to access pages that are known to be before the filesystem EOF.
75 * Extending the index should be done with _hash_getnewbuf.
78 _hash_getbuf(Relation rel, BlockNumber blkno, int access, int flags)
83 elog(ERROR, "hash AM does not use P_NEW");
85 buf = ReadBuffer(rel, blkno);
87 if (access != HASH_NOLOCK)
88 LockBuffer(buf, access);
90 /* ref count and lock type are correct */
92 _hash_checkpage(rel, buf, flags);
98 * _hash_getbuf_with_condlock_cleanup() -- Try to get a buffer for cleanup.
100 * We read the page and try to acquire a cleanup lock. If we get it,
101 * we return the buffer; otherwise, we return InvalidBuffer.
104 _hash_getbuf_with_condlock_cleanup(Relation rel, BlockNumber blkno, int flags)
109 elog(ERROR, "hash AM does not use P_NEW");
111 buf = ReadBuffer(rel, blkno);
113 if (!ConditionalLockBufferForCleanup(buf))
116 return InvalidBuffer;
119 /* ref count and lock type are correct */
121 _hash_checkpage(rel, buf, flags);
127 * _hash_getinitbuf() -- Get and initialize a buffer by block number.
129 * This must be used only to fetch pages that are known to be before
130 * the index's filesystem EOF, but are to be filled from scratch.
131 * _hash_pageinit() is applied automatically. Otherwise it has
132 * effects similar to _hash_getbuf() with access = HASH_WRITE.
134 * When this routine returns, a write lock is set on the
135 * requested buffer and its reference count has been incremented
136 * (ie, the buffer is "locked and pinned").
138 * P_NEW is disallowed because this routine can only be used
139 * to access pages that are known to be before the filesystem EOF.
140 * Extending the index should be done with _hash_getnewbuf.
143 _hash_getinitbuf(Relation rel, BlockNumber blkno)
148 elog(ERROR, "hash AM does not use P_NEW");
150 buf = ReadBufferExtended(rel, MAIN_FORKNUM, blkno, RBM_ZERO_AND_LOCK,
153 /* ref count and lock type are correct */
155 /* initialize the page */
156 _hash_pageinit(BufferGetPage(buf), BufferGetPageSize(buf));
162 * _hash_initbuf() -- Get and initialize a buffer by bucket number.
165 _hash_initbuf(Buffer buf, uint32 max_bucket, uint32 num_bucket, uint32 flag,
168 HashPageOpaque pageopaque;
171 page = BufferGetPage(buf);
173 /* initialize the page */
175 _hash_pageinit(page, BufferGetPageSize(buf));
177 pageopaque = (HashPageOpaque) PageGetSpecialPointer(page);
180 * Set hasho_prevblkno with current hashm_maxbucket. This value will be
181 * used to validate cached HashMetaPageData. See
182 * _hash_getbucketbuf_from_hashkey().
184 pageopaque->hasho_prevblkno = max_bucket;
185 pageopaque->hasho_nextblkno = InvalidBlockNumber;
186 pageopaque->hasho_bucket = num_bucket;
187 pageopaque->hasho_flag = flag;
188 pageopaque->hasho_page_id = HASHO_PAGE_ID;
192 * _hash_getnewbuf() -- Get a new page at the end of the index.
194 * This has the same API as _hash_getinitbuf, except that we are adding
195 * a page to the index, and hence expect the page to be past the
196 * logical EOF. (However, we have to support the case where it isn't,
197 * since a prior try might have crashed after extending the filesystem
198 * EOF but before updating the metapage to reflect the added page.)
200 * It is caller's responsibility to ensure that only one process can
201 * extend the index at a time. In practice, this function is called
202 * only while holding write lock on the metapage, because adding a page
203 * is always associated with an update of metapage data.
206 _hash_getnewbuf(Relation rel, BlockNumber blkno, ForkNumber forkNum)
208 BlockNumber nblocks = RelationGetNumberOfBlocksInFork(rel, forkNum);
212 elog(ERROR, "hash AM does not use P_NEW");
214 elog(ERROR, "access to noncontiguous page in hash index \"%s\"",
215 RelationGetRelationName(rel));
217 /* smgr insists we use P_NEW to extend the relation */
218 if (blkno == nblocks)
220 buf = ReadBufferExtended(rel, forkNum, P_NEW, RBM_NORMAL, NULL);
221 if (BufferGetBlockNumber(buf) != blkno)
222 elog(ERROR, "unexpected hash relation size: %u, should be %u",
223 BufferGetBlockNumber(buf), blkno);
224 LockBuffer(buf, HASH_WRITE);
228 buf = ReadBufferExtended(rel, forkNum, blkno, RBM_ZERO_AND_LOCK,
232 /* ref count and lock type are correct */
234 /* initialize the page */
235 _hash_pageinit(BufferGetPage(buf), BufferGetPageSize(buf));
241 * _hash_getbuf_with_strategy() -- Get a buffer with nondefault strategy.
243 * This is identical to _hash_getbuf() but also allows a buffer access
244 * strategy to be specified. We use this for VACUUM operations.
247 _hash_getbuf_with_strategy(Relation rel, BlockNumber blkno,
248 int access, int flags,
249 BufferAccessStrategy bstrategy)
254 elog(ERROR, "hash AM does not use P_NEW");
256 buf = ReadBufferExtended(rel, MAIN_FORKNUM, blkno, RBM_NORMAL, bstrategy);
258 if (access != HASH_NOLOCK)
259 LockBuffer(buf, access);
261 /* ref count and lock type are correct */
263 _hash_checkpage(rel, buf, flags);
269 * _hash_relbuf() -- release a locked buffer.
271 * Lock and pin (refcount) are both dropped.
274 _hash_relbuf(Relation rel, Buffer buf)
276 UnlockReleaseBuffer(buf);
280 * _hash_dropbuf() -- release an unlocked buffer.
282 * This is used to unpin a buffer on which we hold no lock.
285 _hash_dropbuf(Relation rel, Buffer buf)
291 * _hash_dropscanbuf() -- release buffers used in scan.
293 * This routine unpins the buffers used during scan on which we
297 _hash_dropscanbuf(Relation rel, HashScanOpaque so)
299 /* release pin we hold on primary bucket page */
300 if (BufferIsValid(so->hashso_bucket_buf) &&
301 so->hashso_bucket_buf != so->hashso_curbuf)
302 _hash_dropbuf(rel, so->hashso_bucket_buf);
303 so->hashso_bucket_buf = InvalidBuffer;
305 /* release pin we hold on primary bucket page of bucket being split */
306 if (BufferIsValid(so->hashso_split_bucket_buf) &&
307 so->hashso_split_bucket_buf != so->hashso_curbuf)
308 _hash_dropbuf(rel, so->hashso_split_bucket_buf);
309 so->hashso_split_bucket_buf = InvalidBuffer;
311 /* release any pin we still hold */
312 if (BufferIsValid(so->hashso_curbuf))
313 _hash_dropbuf(rel, so->hashso_curbuf);
314 so->hashso_curbuf = InvalidBuffer;
316 /* reset split scan */
317 so->hashso_buc_populated = false;
318 so->hashso_buc_split = false;
323 * _hash_init() -- Initialize the metadata page of a hash index,
324 * the initial buckets, and the initial bitmap page.
326 * The initial number of buckets is dependent on num_tuples, an estimate
327 * of the number of tuples to be loaded into the index initially. The
328 * chosen number of buckets is returned.
330 * We are fairly cavalier about locking here, since we know that no one else
331 * could be accessing this index. In particular the rule about not holding
332 * multiple buffer locks is ignored.
335 _hash_init(Relation rel, double num_tuples, ForkNumber forkNum)
350 if (RelationGetNumberOfBlocksInFork(rel, forkNum) != 0)
351 elog(ERROR, "cannot initialize non-empty hash index \"%s\"",
352 RelationGetRelationName(rel));
355 * Determine the target fill factor (in tuples per bucket) for this index.
356 * The idea is to make the fill factor correspond to pages about as full
357 * as the user-settable fillfactor parameter says. We can compute it
358 * exactly since the index datatype (i.e. uint32 hash key) is fixed-width.
360 data_width = sizeof(uint32);
361 item_width = MAXALIGN(sizeof(IndexTupleData)) + MAXALIGN(data_width) +
362 sizeof(ItemIdData); /* include the line pointer */
363 ffactor = RelationGetTargetPageUsage(rel, HASH_DEFAULT_FILLFACTOR) / item_width;
364 /* keep to a sane range */
368 procid = index_getprocid(rel, 1, HASHPROC);
371 * We initialize the metapage, the first N bucket pages, and the first
372 * bitmap page in sequence, using _hash_getnewbuf to cause smgrextend()
373 * calls to occur. This ensures that the smgr level has the right idea of
374 * the physical index length.
376 * Critical section not required, because on error the creation of the
377 * whole relation will be rolled back.
379 metabuf = _hash_getnewbuf(rel, HASH_METAPAGE, forkNum);
380 _hash_init_metabuffer(metabuf, num_tuples, procid, ffactor, false);
381 MarkBufferDirty(metabuf);
383 pg = BufferGetPage(metabuf);
384 metap = HashPageGetMeta(pg);
387 if (RelationNeedsWAL(rel))
389 xl_hash_init_meta_page xlrec;
392 xlrec.num_tuples = num_tuples;
393 xlrec.procid = metap->hashm_procid;
394 xlrec.ffactor = metap->hashm_ffactor;
397 XLogRegisterData((char *) &xlrec, SizeOfHashInitMetaPage);
398 XLogRegisterBuffer(0, metabuf, REGBUF_WILL_INIT);
400 recptr = XLogInsert(RM_HASH_ID, XLOG_HASH_INIT_META_PAGE);
402 PageSetLSN(BufferGetPage(metabuf), recptr);
405 num_buckets = metap->hashm_maxbucket + 1;
408 * Release buffer lock on the metapage while we initialize buckets.
409 * Otherwise, we'll be in interrupt holdoff and the CHECK_FOR_INTERRUPTS
410 * won't accomplish anything. It's a bad idea to hold buffer locks for
411 * long intervals in any case, since that can block the bgwriter.
413 LockBuffer(metabuf, BUFFER_LOCK_UNLOCK);
416 * Initialize and WAL Log the first N buckets
418 for (i = 0; i < num_buckets; i++)
422 /* Allow interrupts, in case N is huge */
423 CHECK_FOR_INTERRUPTS();
425 blkno = BUCKET_TO_BLKNO(metap, i);
426 buf = _hash_getnewbuf(rel, blkno, forkNum);
427 _hash_initbuf(buf, metap->hashm_maxbucket, i, LH_BUCKET_PAGE, false);
428 MarkBufferDirty(buf);
430 log_newpage(&rel->rd_node,
435 _hash_relbuf(rel, buf);
438 /* Now reacquire buffer lock on metapage */
439 LockBuffer(metabuf, BUFFER_LOCK_EXCLUSIVE);
442 * Initialize bitmap page
444 bitmapbuf = _hash_getnewbuf(rel, num_buckets + 1, forkNum);
445 _hash_initbitmapbuffer(bitmapbuf, metap->hashm_bmsize, false);
446 MarkBufferDirty(bitmapbuf);
448 /* add the new bitmap page to the metapage's list of bitmaps */
449 /* metapage already has a write lock */
450 if (metap->hashm_nmaps >= HASH_MAX_BITMAPS)
452 (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
453 errmsg("out of overflow pages in hash index \"%s\"",
454 RelationGetRelationName(rel))));
456 metap->hashm_mapp[metap->hashm_nmaps] = num_buckets + 1;
458 metap->hashm_nmaps++;
459 MarkBufferDirty(metabuf);
462 if (RelationNeedsWAL(rel))
464 xl_hash_init_bitmap_page xlrec;
467 xlrec.bmsize = metap->hashm_bmsize;
470 XLogRegisterData((char *) &xlrec, SizeOfHashInitBitmapPage);
471 XLogRegisterBuffer(0, bitmapbuf, REGBUF_WILL_INIT);
474 * This is safe only because nobody else can be modifying the index at
475 * this stage; it's only visible to the transaction that is creating
478 XLogRegisterBuffer(1, metabuf, REGBUF_STANDARD);
480 recptr = XLogInsert(RM_HASH_ID, XLOG_HASH_INIT_BITMAP_PAGE);
482 PageSetLSN(BufferGetPage(bitmapbuf), recptr);
483 PageSetLSN(BufferGetPage(metabuf), recptr);
487 _hash_relbuf(rel, bitmapbuf);
488 _hash_relbuf(rel, metabuf);
494 * _hash_init_metabuffer() -- Initialize the metadata page of a hash index.
497 _hash_init_metabuffer(Buffer buf, double num_tuples, RegProcedure procid,
498 uint16 ffactor, bool initpage)
501 HashPageOpaque pageopaque;
509 * Choose the number of initial bucket pages to match the fill factor
510 * given the estimated number of tuples. We round up the result to the
511 * total number of buckets which has to be allocated before using its
512 * _hashm_spare element. However always force at least 2 bucket pages. The
513 * upper limit is determined by considerations explained in
514 * _hash_expandtable().
516 dnumbuckets = num_tuples / ffactor;
517 if (dnumbuckets <= 2.0)
519 else if (dnumbuckets >= (double) 0x40000000)
520 num_buckets = 0x40000000;
522 num_buckets = _hash_get_totalbuckets(_hash_spareindex(dnumbuckets));
524 spare_index = _hash_spareindex(num_buckets);
525 Assert(spare_index < HASH_MAX_SPLITPOINTS);
527 page = BufferGetPage(buf);
529 _hash_pageinit(page, BufferGetPageSize(buf));
531 pageopaque = (HashPageOpaque) PageGetSpecialPointer(page);
532 pageopaque->hasho_prevblkno = InvalidBlockNumber;
533 pageopaque->hasho_nextblkno = InvalidBlockNumber;
534 pageopaque->hasho_bucket = -1;
535 pageopaque->hasho_flag = LH_META_PAGE;
536 pageopaque->hasho_page_id = HASHO_PAGE_ID;
538 metap = HashPageGetMeta(page);
540 metap->hashm_magic = HASH_MAGIC;
541 metap->hashm_version = HASH_VERSION;
542 metap->hashm_ntuples = 0;
543 metap->hashm_nmaps = 0;
544 metap->hashm_ffactor = ffactor;
545 metap->hashm_bsize = HashGetMaxBitmapSize(page);
546 /* find largest bitmap array size that will fit in page size */
547 for (i = _hash_log2(metap->hashm_bsize); i > 0; --i)
549 if ((1 << i) <= metap->hashm_bsize)
553 metap->hashm_bmsize = 1 << i;
554 metap->hashm_bmshift = i + BYTE_TO_BIT;
555 Assert((1 << BMPG_SHIFT(metap)) == (BMPG_MASK(metap) + 1));
558 * Label the index with its primary hash support function's OID. This is
559 * pretty useless for normal operation (in fact, hashm_procid is not used
560 * anywhere), but it might be handy for forensic purposes so we keep it.
562 metap->hashm_procid = procid;
565 * We initialize the index with N buckets, 0 .. N-1, occupying physical
566 * blocks 1 to N. The first freespace bitmap page is in block N+1.
568 metap->hashm_maxbucket = num_buckets - 1;
571 * Set highmask as next immediate ((2 ^ x) - 1), which should be
572 * sufficient to cover num_buckets.
574 metap->hashm_highmask = (1 << (_hash_log2(num_buckets + 1))) - 1;
575 metap->hashm_lowmask = (metap->hashm_highmask >> 1);
577 MemSet(metap->hashm_spares, 0, sizeof(metap->hashm_spares));
578 MemSet(metap->hashm_mapp, 0, sizeof(metap->hashm_mapp));
580 /* Set up mapping for one spare page after the initial splitpoints */
581 metap->hashm_spares[spare_index] = 1;
582 metap->hashm_ovflpoint = spare_index;
583 metap->hashm_firstfree = 0;
586 * Set pd_lower just past the end of the metadata. This is to log full
587 * page image of metapage in xloginsert.c.
589 ((PageHeader) page)->pd_lower =
590 ((char *) metap + sizeof(HashMetaPageData)) - (char *) page;
594 * _hash_pageinit() -- Initialize a new hash index page.
597 _hash_pageinit(Page page, Size size)
599 PageInit(page, size, sizeof(HashPageOpaqueData));
603 * Attempt to expand the hash table by creating one new bucket.
605 * This will silently do nothing if we don't get cleanup lock on old or
608 * Complete the pending splits and remove the tuples from old bucket,
609 * if there are any left over from the previous split.
611 * The caller must hold a pin, but no lock, on the metapage buffer.
612 * The buffer is returned in the same state.
615 _hash_expandtable(Relation rel, Buffer metabuf)
621 BlockNumber start_oblkno;
622 BlockNumber start_nblkno;
627 HashPageOpaque oopaque;
628 HashPageOpaque nopaque;
632 bool metap_update_masks = false;
633 bool metap_update_splitpoint = false;
638 * Write-lock the meta page. It used to be necessary to acquire a
639 * heavyweight lock to begin a split, but that is no longer required.
641 LockBuffer(metabuf, BUFFER_LOCK_EXCLUSIVE);
643 _hash_checkpage(rel, metabuf, LH_META_PAGE);
644 metap = HashPageGetMeta(BufferGetPage(metabuf));
647 * Check to see if split is still needed; someone else might have already
648 * done one while we waited for the lock.
650 * Make sure this stays in sync with _hash_doinsert()
652 if (metap->hashm_ntuples <=
653 (double) metap->hashm_ffactor * (metap->hashm_maxbucket + 1))
657 * Can't split anymore if maxbucket has reached its maximum possible
660 * Ideally we'd allow bucket numbers up to UINT_MAX-1 (no higher because
661 * the calculation maxbucket+1 mustn't overflow). Currently we restrict
662 * to half that because of overflow looping in _hash_log2() and
663 * insufficient space in hashm_spares[]. It's moot anyway because an
664 * index with 2^32 buckets would certainly overflow BlockNumber and hence
665 * _hash_alloc_buckets() would fail, but if we supported buckets smaller
666 * than a disk block then this would be an independent constraint.
668 * If you change this, see also the maximum initial number of buckets in
671 if (metap->hashm_maxbucket >= (uint32) 0x7FFFFFFE)
675 * Determine which bucket is to be split, and attempt to take cleanup lock
676 * on the old bucket. If we can't get the lock, give up.
678 * The cleanup lock protects us not only against other backends, but
679 * against our own backend as well.
681 * The cleanup lock is mainly to protect the split from concurrent
682 * inserts. See src/backend/access/hash/README, Lock Definitions for
683 * further details. Due to this locking restriction, if there is any
684 * pending scan, the split will give up which is not good, but harmless.
686 new_bucket = metap->hashm_maxbucket + 1;
688 old_bucket = (new_bucket & metap->hashm_lowmask);
690 start_oblkno = BUCKET_TO_BLKNO(metap, old_bucket);
692 buf_oblkno = _hash_getbuf_with_condlock_cleanup(rel, start_oblkno, LH_BUCKET_PAGE);
696 opage = BufferGetPage(buf_oblkno);
697 oopaque = (HashPageOpaque) PageGetSpecialPointer(opage);
700 * We want to finish the split from a bucket as there is no apparent
701 * benefit by not doing so and it will make the code complicated to finish
702 * the split that involves multiple buckets considering the case where new
703 * split also fails. We don't need to consider the new bucket for
704 * completing the split here as it is not possible that a re-split of new
705 * bucket starts when there is still a pending split from old bucket.
707 if (H_BUCKET_BEING_SPLIT(oopaque))
710 * Copy bucket mapping info now; refer the comment in code below where
711 * we copy this information before calling _hash_splitbucket to see
714 maxbucket = metap->hashm_maxbucket;
715 highmask = metap->hashm_highmask;
716 lowmask = metap->hashm_lowmask;
719 * Release the lock on metapage and old_bucket, before completing the
722 LockBuffer(metabuf, BUFFER_LOCK_UNLOCK);
723 LockBuffer(buf_oblkno, BUFFER_LOCK_UNLOCK);
725 _hash_finish_split(rel, metabuf, buf_oblkno, old_bucket, maxbucket,
728 /* release the pin on old buffer and retry for expand. */
729 _hash_dropbuf(rel, buf_oblkno);
735 * Clean the tuples remained from the previous split. This operation
736 * requires cleanup lock and we already have one on the old bucket, so
737 * let's do it. We also don't want to allow further splits from the bucket
738 * till the garbage of previous split is cleaned. This has two
739 * advantages; first, it helps in avoiding the bloat due to garbage and
740 * second is, during cleanup of bucket, we are always sure that the
741 * garbage tuples belong to most recently split bucket. On the contrary,
742 * if we allow cleanup of bucket after meta page is updated to indicate
743 * the new split and before the actual split, the cleanup operation won't
744 * be able to decide whether the tuple has been moved to the newly created
745 * bucket and ended up deleting such tuples.
747 if (H_NEEDS_SPLIT_CLEANUP(oopaque))
750 * Copy bucket mapping info now; refer to the comment in code below
751 * where we copy this information before calling _hash_splitbucket to
752 * see why this is okay.
754 maxbucket = metap->hashm_maxbucket;
755 highmask = metap->hashm_highmask;
756 lowmask = metap->hashm_lowmask;
758 /* Release the metapage lock. */
759 LockBuffer(metabuf, BUFFER_LOCK_UNLOCK);
761 hashbucketcleanup(rel, old_bucket, buf_oblkno, start_oblkno, NULL,
762 maxbucket, highmask, lowmask, NULL, NULL, true,
765 _hash_dropbuf(rel, buf_oblkno);
771 * There shouldn't be any active scan on new bucket.
773 * Note: it is safe to compute the new bucket's blkno here, even though we
774 * may still need to update the BUCKET_TO_BLKNO mapping. This is because
775 * the current value of hashm_spares[hashm_ovflpoint] correctly shows
776 * where we are going to put a new splitpoint's worth of buckets.
778 start_nblkno = BUCKET_TO_BLKNO(metap, new_bucket);
781 * If the split point is increasing we need to allocate a new batch of
784 spare_ndx = _hash_spareindex(new_bucket + 1);
785 if (spare_ndx > metap->hashm_ovflpoint)
787 uint32 buckets_to_add;
789 Assert(spare_ndx == metap->hashm_ovflpoint + 1);
792 * We treat allocation of buckets as a separate WAL-logged action.
793 * Even if we fail after this operation, won't leak bucket pages;
794 * rather, the next split will consume this space. In any case, even
795 * without failure we don't use all the space in one split operation.
797 buckets_to_add = _hash_get_totalbuckets(spare_ndx) - new_bucket;
798 if (!_hash_alloc_buckets(rel, start_nblkno, buckets_to_add))
800 /* can't split due to BlockNumber overflow */
801 _hash_relbuf(rel, buf_oblkno);
807 * Physically allocate the new bucket's primary page. We want to do this
808 * before changing the metapage's mapping info, in case we can't get the
809 * disk space. Ideally, we don't need to check for cleanup lock on new
810 * bucket as no other backend could find this bucket unless meta page is
811 * updated. However, it is good to be consistent with old bucket locking.
813 buf_nblkno = _hash_getnewbuf(rel, start_nblkno, MAIN_FORKNUM);
814 if (!IsBufferCleanupOK(buf_nblkno))
816 _hash_relbuf(rel, buf_oblkno);
817 _hash_relbuf(rel, buf_nblkno);
822 * Since we are scribbling on the pages in the shared buffers, establish a
823 * critical section. Any failure in this next code leaves us with a big
824 * problem: the metapage is effectively corrupt but could get written back
827 START_CRIT_SECTION();
830 * Okay to proceed with split. Update the metapage bucket mapping info.
832 metap->hashm_maxbucket = new_bucket;
834 if (new_bucket > metap->hashm_highmask)
836 /* Starting a new doubling */
837 metap->hashm_lowmask = metap->hashm_highmask;
838 metap->hashm_highmask = new_bucket | metap->hashm_lowmask;
839 metap_update_masks = true;
843 * If the split point is increasing we need to adjust the hashm_spares[]
844 * array and hashm_ovflpoint so that future overflow pages will be created
845 * beyond this new batch of bucket pages.
847 if (spare_ndx > metap->hashm_ovflpoint)
849 metap->hashm_spares[spare_ndx] = metap->hashm_spares[metap->hashm_ovflpoint];
850 metap->hashm_ovflpoint = spare_ndx;
851 metap_update_splitpoint = true;
854 MarkBufferDirty(metabuf);
857 * Copy bucket mapping info now; this saves re-accessing the meta page
858 * inside _hash_splitbucket's inner loop. Note that once we drop the
859 * split lock, other splits could begin, so these values might be out of
860 * date before _hash_splitbucket finishes. That's okay, since all it
861 * needs is to tell which of these two buckets to map hashkeys into.
863 maxbucket = metap->hashm_maxbucket;
864 highmask = metap->hashm_highmask;
865 lowmask = metap->hashm_lowmask;
867 opage = BufferGetPage(buf_oblkno);
868 oopaque = (HashPageOpaque) PageGetSpecialPointer(opage);
871 * Mark the old bucket to indicate that split is in progress. (At
872 * operation end, we will clear the split-in-progress flag.) Also, for a
873 * primary bucket page, hasho_prevblkno stores the number of buckets that
874 * existed as of the last split, so we must update that value here.
876 oopaque->hasho_flag |= LH_BUCKET_BEING_SPLIT;
877 oopaque->hasho_prevblkno = maxbucket;
879 MarkBufferDirty(buf_oblkno);
881 npage = BufferGetPage(buf_nblkno);
884 * initialize the new bucket's primary page and mark it to indicate that
885 * split is in progress.
887 nopaque = (HashPageOpaque) PageGetSpecialPointer(npage);
888 nopaque->hasho_prevblkno = maxbucket;
889 nopaque->hasho_nextblkno = InvalidBlockNumber;
890 nopaque->hasho_bucket = new_bucket;
891 nopaque->hasho_flag = LH_BUCKET_PAGE | LH_BUCKET_BEING_POPULATED;
892 nopaque->hasho_page_id = HASHO_PAGE_ID;
894 MarkBufferDirty(buf_nblkno);
897 if (RelationNeedsWAL(rel))
899 xl_hash_split_allocate_page xlrec;
902 xlrec.new_bucket = maxbucket;
903 xlrec.old_bucket_flag = oopaque->hasho_flag;
904 xlrec.new_bucket_flag = nopaque->hasho_flag;
909 XLogRegisterBuffer(0, buf_oblkno, REGBUF_STANDARD);
910 XLogRegisterBuffer(1, buf_nblkno, REGBUF_WILL_INIT);
911 XLogRegisterBuffer(2, metabuf, REGBUF_STANDARD);
913 if (metap_update_masks)
915 xlrec.flags |= XLH_SPLIT_META_UPDATE_MASKS;
916 XLogRegisterBufData(2, (char *) &metap->hashm_lowmask, sizeof(uint32));
917 XLogRegisterBufData(2, (char *) &metap->hashm_highmask, sizeof(uint32));
920 if (metap_update_splitpoint)
922 xlrec.flags |= XLH_SPLIT_META_UPDATE_SPLITPOINT;
923 XLogRegisterBufData(2, (char *) &metap->hashm_ovflpoint,
925 XLogRegisterBufData(2,
926 (char *) &metap->hashm_spares[metap->hashm_ovflpoint],
930 XLogRegisterData((char *) &xlrec, SizeOfHashSplitAllocPage);
932 recptr = XLogInsert(RM_HASH_ID, XLOG_HASH_SPLIT_ALLOCATE_PAGE);
934 PageSetLSN(BufferGetPage(buf_oblkno), recptr);
935 PageSetLSN(BufferGetPage(buf_nblkno), recptr);
936 PageSetLSN(BufferGetPage(metabuf), recptr);
941 /* drop lock, but keep pin */
942 LockBuffer(metabuf, BUFFER_LOCK_UNLOCK);
944 /* Relocate records to the new bucket */
945 _hash_splitbucket(rel, metabuf,
946 old_bucket, new_bucket,
947 buf_oblkno, buf_nblkno, NULL,
948 maxbucket, highmask, lowmask);
950 /* all done, now release the locks and pins on primary buckets. */
951 _hash_relbuf(rel, buf_oblkno);
952 _hash_relbuf(rel, buf_nblkno);
956 /* Here if decide not to split or fail to acquire old bucket lock */
959 /* We didn't write the metapage, so just drop lock */
960 LockBuffer(metabuf, BUFFER_LOCK_UNLOCK);
965 * _hash_alloc_buckets -- allocate a new splitpoint's worth of bucket pages
967 * This does not need to initialize the new bucket pages; we'll do that as
968 * each one is used by _hash_expandtable(). But we have to extend the logical
969 * EOF to the end of the splitpoint; this keeps smgr's idea of the EOF in
970 * sync with ours, so that we don't get complaints from smgr.
972 * We do this by writing a page of zeroes at the end of the splitpoint range.
973 * We expect that the filesystem will ensure that the intervening pages read
974 * as zeroes too. On many filesystems this "hole" will not be allocated
975 * immediately, which means that the index file may end up more fragmented
976 * than if we forced it all to be allocated now; but since we don't scan
977 * hash indexes sequentially anyway, that probably doesn't matter.
979 * XXX It's annoying that this code is executed with the metapage lock held.
980 * We need to interlock against _hash_addovflpage() adding a new overflow page
981 * concurrently, but it'd likely be better to use LockRelationForExtension
982 * for the purpose. OTOH, adding a splitpoint is a very infrequent operation,
983 * so it may not be worth worrying about.
985 * Returns TRUE if successful, or FALSE if allocation failed due to
986 * BlockNumber overflow.
989 _hash_alloc_buckets(Relation rel, BlockNumber firstblock, uint32 nblocks)
991 BlockNumber lastblock;
992 char zerobuf[BLCKSZ];
994 HashPageOpaque ovflopaque;
996 lastblock = firstblock + nblocks - 1;
999 * Check for overflow in block number calculation; if so, we cannot extend
1000 * the index anymore.
1002 if (lastblock < firstblock || lastblock == InvalidBlockNumber)
1005 page = (Page) zerobuf;
1008 * Initialize the page. Just zeroing the page won't work; see
1009 * _hash_freeovflpage for similar usage. We take care to make the special
1010 * space valid for the benefit of tools such as pageinspect.
1012 _hash_pageinit(page, BLCKSZ);
1014 ovflopaque = (HashPageOpaque) PageGetSpecialPointer(page);
1016 ovflopaque->hasho_prevblkno = InvalidBlockNumber;
1017 ovflopaque->hasho_nextblkno = InvalidBlockNumber;
1018 ovflopaque->hasho_bucket = -1;
1019 ovflopaque->hasho_flag = LH_UNUSED_PAGE;
1020 ovflopaque->hasho_page_id = HASHO_PAGE_ID;
1022 if (RelationNeedsWAL(rel))
1023 log_newpage(&rel->rd_node,
1029 RelationOpenSmgr(rel);
1030 smgrextend(rel->rd_smgr, MAIN_FORKNUM, lastblock, zerobuf, false);
1037 * _hash_splitbucket -- split 'obucket' into 'obucket' and 'nbucket'
1039 * This routine is used to partition the tuples between old and new bucket and
1040 * is used to finish the incomplete split operations. To finish the previously
1041 * interrupted split operation, the caller needs to fill htab. If htab is set,
1042 * then we skip the movement of tuples that exists in htab, otherwise NULL
1043 * value of htab indicates movement of all the tuples that belong to the new
1046 * We are splitting a bucket that consists of a base bucket page and zero
1047 * or more overflow (bucket chain) pages. We must relocate tuples that
1048 * belong in the new bucket.
1050 * The caller must hold cleanup locks on both buckets to ensure that
1051 * no one else is trying to access them (see README).
1053 * The caller must hold a pin, but no lock, on the metapage buffer.
1054 * The buffer is returned in the same state. (The metapage is only
1055 * touched if it becomes necessary to add or remove overflow pages.)
1057 * Split needs to retain pin on primary bucket pages of both old and new
1058 * buckets till end of operation. This is to prevent vacuum from starting
1059 * while a split is in progress.
1061 * In addition, the caller must have created the new bucket's base page,
1062 * which is passed in buffer nbuf, pinned and write-locked. That lock and
1063 * pin are released here. (The API is set up this way because we must do
1064 * _hash_getnewbuf() before releasing the metapage write lock. So instead of
1065 * passing the new bucket's start block number, we pass an actual buffer.)
1068 _hash_splitbucket(Relation rel,
1083 HashPageOpaque oopaque;
1084 HashPageOpaque nopaque;
1085 OffsetNumber itup_offsets[MaxIndexTuplesPerPage];
1086 IndexTuple itups[MaxIndexTuplesPerPage];
1087 Size all_tups_size = 0;
1092 opage = BufferGetPage(obuf);
1093 oopaque = (HashPageOpaque) PageGetSpecialPointer(opage);
1096 npage = BufferGetPage(nbuf);
1097 nopaque = (HashPageOpaque) PageGetSpecialPointer(npage);
1100 * Partition the tuples in the old bucket between the old bucket and the
1101 * new bucket, advancing along the old bucket's overflow bucket chain and
1102 * adding overflow pages to the new bucket as needed. Outer loop iterates
1103 * once per page in old bucket.
1108 OffsetNumber ooffnum;
1109 OffsetNumber omaxoffnum;
1111 /* Scan each tuple in old page */
1112 omaxoffnum = PageGetMaxOffsetNumber(opage);
1113 for (ooffnum = FirstOffsetNumber;
1114 ooffnum <= omaxoffnum;
1115 ooffnum = OffsetNumberNext(ooffnum))
1122 /* skip dead tuples */
1123 if (ItemIdIsDead(PageGetItemId(opage, ooffnum)))
1127 * Before inserting a tuple, probe the hash table containing TIDs
1128 * of tuples belonging to new bucket, if we find a match, then
1129 * skip that tuple, else fetch the item's hash key (conveniently
1130 * stored in the item) and determine which bucket it now belongs
1133 itup = (IndexTuple) PageGetItem(opage,
1134 PageGetItemId(opage, ooffnum));
1137 (void) hash_search(htab, &itup->t_tid, HASH_FIND, &found);
1142 bucket = _hash_hashkey2bucket(_hash_get_indextuple_hashkey(itup),
1143 maxbucket, highmask, lowmask);
1145 if (bucket == nbucket)
1147 IndexTuple new_itup;
1150 * make a copy of index tuple as we have to scribble on it.
1152 new_itup = CopyIndexTuple(itup);
1155 * mark the index tuple as moved by split, such tuples are
1156 * skipped by scan if there is split in progress for a bucket.
1158 new_itup->t_info |= INDEX_MOVED_BY_SPLIT_MASK;
1161 * insert the tuple into the new bucket. if it doesn't fit on
1162 * the current page in the new bucket, we must allocate a new
1163 * overflow page and place the tuple on that page instead.
1165 itemsz = IndexTupleDSize(*new_itup);
1166 itemsz = MAXALIGN(itemsz);
1168 if (PageGetFreeSpaceForMultipleTuples(npage, nitups + 1) < (all_tups_size + itemsz))
1171 * Change the shared buffer state in critical section,
1172 * otherwise any error could make it unrecoverable.
1174 START_CRIT_SECTION();
1176 _hash_pgaddmultitup(rel, nbuf, itups, itup_offsets, nitups);
1177 MarkBufferDirty(nbuf);
1178 /* log the split operation before releasing the lock */
1179 log_split_page(rel, nbuf);
1183 /* drop lock, but keep pin */
1184 LockBuffer(nbuf, BUFFER_LOCK_UNLOCK);
1187 for (i = 0; i < nitups; i++)
1192 /* chain to a new overflow page */
1193 nbuf = _hash_addovflpage(rel, metabuf, nbuf, (nbuf == bucket_nbuf) ? true : false);
1194 npage = BufferGetPage(nbuf);
1195 nopaque = (HashPageOpaque) PageGetSpecialPointer(npage);
1198 itups[nitups++] = new_itup;
1199 all_tups_size += itemsz;
1204 * the tuple stays on this page, so nothing to do.
1206 Assert(bucket == obucket);
1210 oblkno = oopaque->hasho_nextblkno;
1212 /* retain the pin on the old primary bucket */
1213 if (obuf == bucket_obuf)
1214 LockBuffer(obuf, BUFFER_LOCK_UNLOCK);
1216 _hash_relbuf(rel, obuf);
1218 /* Exit loop if no more overflow pages in old bucket */
1219 if (!BlockNumberIsValid(oblkno))
1222 * Change the shared buffer state in critical section, otherwise
1223 * any error could make it unrecoverable.
1225 START_CRIT_SECTION();
1227 _hash_pgaddmultitup(rel, nbuf, itups, itup_offsets, nitups);
1228 MarkBufferDirty(nbuf);
1229 /* log the split operation before releasing the lock */
1230 log_split_page(rel, nbuf);
1234 if (nbuf == bucket_nbuf)
1235 LockBuffer(nbuf, BUFFER_LOCK_UNLOCK);
1237 _hash_relbuf(rel, nbuf);
1240 for (i = 0; i < nitups; i++)
1245 /* Else, advance to next old page */
1246 obuf = _hash_getbuf(rel, oblkno, HASH_READ, LH_OVERFLOW_PAGE);
1247 opage = BufferGetPage(obuf);
1248 oopaque = (HashPageOpaque) PageGetSpecialPointer(opage);
1252 * We're at the end of the old bucket chain, so we're done partitioning
1253 * the tuples. Mark the old and new buckets to indicate split is
1256 * To avoid deadlocks due to locking order of buckets, first lock the old
1257 * bucket and then the new bucket.
1259 LockBuffer(bucket_obuf, BUFFER_LOCK_EXCLUSIVE);
1260 opage = BufferGetPage(bucket_obuf);
1261 oopaque = (HashPageOpaque) PageGetSpecialPointer(opage);
1263 LockBuffer(bucket_nbuf, BUFFER_LOCK_EXCLUSIVE);
1264 npage = BufferGetPage(bucket_nbuf);
1265 nopaque = (HashPageOpaque) PageGetSpecialPointer(npage);
1267 START_CRIT_SECTION();
1269 oopaque->hasho_flag &= ~LH_BUCKET_BEING_SPLIT;
1270 nopaque->hasho_flag &= ~LH_BUCKET_BEING_POPULATED;
1273 * After the split is finished, mark the old bucket to indicate that it
1274 * contains deletable tuples. Vacuum will clear split-cleanup flag after
1275 * deleting such tuples.
1277 oopaque->hasho_flag |= LH_BUCKET_NEEDS_SPLIT_CLEANUP;
1280 * now write the buffers, here we don't release the locks as caller is
1281 * responsible to release locks.
1283 MarkBufferDirty(bucket_obuf);
1284 MarkBufferDirty(bucket_nbuf);
1286 if (RelationNeedsWAL(rel))
1289 xl_hash_split_complete xlrec;
1291 xlrec.old_bucket_flag = oopaque->hasho_flag;
1292 xlrec.new_bucket_flag = nopaque->hasho_flag;
1296 XLogRegisterData((char *) &xlrec, SizeOfHashSplitComplete);
1298 XLogRegisterBuffer(0, bucket_obuf, REGBUF_STANDARD);
1299 XLogRegisterBuffer(1, bucket_nbuf, REGBUF_STANDARD);
1301 recptr = XLogInsert(RM_HASH_ID, XLOG_HASH_SPLIT_COMPLETE);
1303 PageSetLSN(BufferGetPage(bucket_obuf), recptr);
1304 PageSetLSN(BufferGetPage(bucket_nbuf), recptr);
1311 * _hash_finish_split() -- Finish the previously interrupted split operation
1313 * To complete the split operation, we form the hash table of TIDs in new
1314 * bucket which is then used by split operation to skip tuples that are
1315 * already moved before the split operation was previously interrupted.
1317 * The caller must hold a pin, but no lock, on the metapage and old bucket's
1318 * primary page buffer. The buffers are returned in the same state. (The
1319 * metapage is only touched if it becomes necessary to add or remove overflow
1323 _hash_finish_split(Relation rel, Buffer metabuf, Buffer obuf, Bucket obucket,
1324 uint32 maxbucket, uint32 highmask, uint32 lowmask)
1328 Buffer bucket_nbuf = InvalidBuffer;
1332 BlockNumber bucket_nblkno;
1333 HashPageOpaque npageopaque;
1337 /* Initialize hash tables used to track TIDs */
1338 memset(&hash_ctl, 0, sizeof(hash_ctl));
1339 hash_ctl.keysize = sizeof(ItemPointerData);
1340 hash_ctl.entrysize = sizeof(ItemPointerData);
1341 hash_ctl.hcxt = CurrentMemoryContext;
1344 hash_create("bucket ctids",
1345 256, /* arbitrary initial size */
1347 HASH_ELEM | HASH_BLOBS | HASH_CONTEXT);
1349 bucket_nblkno = nblkno = _hash_get_newblock_from_oldbucket(rel, obucket);
1352 * Scan the new bucket and build hash table of TIDs
1356 OffsetNumber noffnum;
1357 OffsetNumber nmaxoffnum;
1359 nbuf = _hash_getbuf(rel, nblkno, HASH_READ,
1360 LH_BUCKET_PAGE | LH_OVERFLOW_PAGE);
1362 /* remember the primary bucket buffer to acquire cleanup lock on it. */
1363 if (nblkno == bucket_nblkno)
1366 npage = BufferGetPage(nbuf);
1367 npageopaque = (HashPageOpaque) PageGetSpecialPointer(npage);
1369 /* Scan each tuple in new page */
1370 nmaxoffnum = PageGetMaxOffsetNumber(npage);
1371 for (noffnum = FirstOffsetNumber;
1372 noffnum <= nmaxoffnum;
1373 noffnum = OffsetNumberNext(noffnum))
1377 /* Fetch the item's TID and insert it in hash table. */
1378 itup = (IndexTuple) PageGetItem(npage,
1379 PageGetItemId(npage, noffnum));
1381 (void) hash_search(tidhtab, &itup->t_tid, HASH_ENTER, &found);
1386 nblkno = npageopaque->hasho_nextblkno;
1389 * release our write lock without modifying buffer and ensure to
1390 * retain the pin on primary bucket.
1392 if (nbuf == bucket_nbuf)
1393 LockBuffer(nbuf, BUFFER_LOCK_UNLOCK);
1395 _hash_relbuf(rel, nbuf);
1397 /* Exit loop if no more overflow pages in new bucket */
1398 if (!BlockNumberIsValid(nblkno))
1403 * Conditionally get the cleanup lock on old and new buckets to perform
1404 * the split operation. If we don't get the cleanup locks, silently give
1405 * up and next insertion on old bucket will try again to complete the
1408 if (!ConditionalLockBufferForCleanup(obuf))
1410 hash_destroy(tidhtab);
1413 if (!ConditionalLockBufferForCleanup(bucket_nbuf))
1415 LockBuffer(obuf, BUFFER_LOCK_UNLOCK);
1416 hash_destroy(tidhtab);
1420 npage = BufferGetPage(bucket_nbuf);
1421 npageopaque = (HashPageOpaque) PageGetSpecialPointer(npage);
1422 nbucket = npageopaque->hasho_bucket;
1424 _hash_splitbucket(rel, metabuf, obucket,
1425 nbucket, obuf, bucket_nbuf, tidhtab,
1426 maxbucket, highmask, lowmask);
1428 _hash_relbuf(rel, bucket_nbuf);
1429 LockBuffer(obuf, BUFFER_LOCK_UNLOCK);
1430 hash_destroy(tidhtab);
1434 * log_split_page() -- Log the split operation
1436 * We log the split operation when the new page in new bucket gets full,
1437 * so we log the entire page.
1439 * 'buf' must be locked by the caller which is also responsible for unlocking
1443 log_split_page(Relation rel, Buffer buf)
1445 if (RelationNeedsWAL(rel))
1451 XLogRegisterBuffer(0, buf, REGBUF_FORCE_IMAGE | REGBUF_STANDARD);
1453 recptr = XLogInsert(RM_HASH_ID, XLOG_HASH_SPLIT_PAGE);
1455 PageSetLSN(BufferGetPage(buf), recptr);
1460 * _hash_getcachedmetap() -- Returns cached metapage data.
1462 * If metabuf is not InvalidBuffer, caller must hold a pin, but no lock, on
1463 * the metapage. If not set, we'll set it before returning if we have to
1464 * refresh the cache, and return with a pin but no lock on it; caller is
1465 * responsible for releasing the pin.
1467 * We refresh the cache if it's not initialized yet or force_refresh is true.
1470 _hash_getcachedmetap(Relation rel, Buffer *metabuf, bool force_refresh)
1475 if (force_refresh || rel->rd_amcache == NULL)
1480 * It's important that we don't set rd_amcache to an invalid value.
1481 * Either MemoryContextAlloc or _hash_getbuf could fail, so don't
1482 * install a pointer to the newly-allocated storage in the actual
1483 * relcache entry until both have succeeeded.
1485 if (rel->rd_amcache == NULL)
1486 cache = MemoryContextAlloc(rel->rd_indexcxt,
1487 sizeof(HashMetaPageData));
1489 /* Read the metapage. */
1490 if (BufferIsValid(*metabuf))
1491 LockBuffer(*metabuf, BUFFER_LOCK_SHARE);
1493 *metabuf = _hash_getbuf(rel, HASH_METAPAGE, HASH_READ,
1495 page = BufferGetPage(*metabuf);
1497 /* Populate the cache. */
1498 if (rel->rd_amcache == NULL)
1499 rel->rd_amcache = cache;
1500 memcpy(rel->rd_amcache, HashPageGetMeta(page),
1501 sizeof(HashMetaPageData));
1503 /* Release metapage lock, but keep the pin. */
1504 LockBuffer(*metabuf, BUFFER_LOCK_UNLOCK);
1507 return (HashMetaPage) rel->rd_amcache;
1511 * _hash_getbucketbuf_from_hashkey() -- Get the bucket's buffer for the given
1514 * Bucket pages do not move or get removed once they are allocated. This give
1515 * us an opportunity to use the previously saved metapage contents to reach
1516 * the target bucket buffer, instead of reading from the metapage every time.
1517 * This saves one buffer access every time we want to reach the target bucket
1518 * buffer, which is very helpful savings in bufmgr traffic and contention.
1520 * The access type parameter (HASH_READ or HASH_WRITE) indicates whether the
1521 * bucket buffer has to be locked for reading or writing.
1523 * The out parameter cachedmetap is set with metapage contents used for
1524 * hashkey to bucket buffer mapping. Some callers need this info to reach the
1525 * old bucket in case of bucket split, see _hash_doinsert().
1528 _hash_getbucketbuf_from_hashkey(Relation rel, uint32 hashkey, int access,
1529 HashMetaPage *cachedmetap)
1533 Buffer metabuf = InvalidBuffer;
1537 HashPageOpaque opaque;
1539 /* We read from target bucket buffer, hence locking is must. */
1540 Assert(access == HASH_READ || access == HASH_WRITE);
1542 metap = _hash_getcachedmetap(rel, &metabuf, false);
1543 Assert(metap != NULL);
1546 * Loop until we get a lock on the correct target bucket.
1551 * Compute the target bucket number, and convert to block number.
1553 bucket = _hash_hashkey2bucket(hashkey,
1554 metap->hashm_maxbucket,
1555 metap->hashm_highmask,
1556 metap->hashm_lowmask);
1558 blkno = BUCKET_TO_BLKNO(metap, bucket);
1560 /* Fetch the primary bucket page for the bucket */
1561 buf = _hash_getbuf(rel, blkno, access, LH_BUCKET_PAGE);
1562 page = BufferGetPage(buf);
1563 opaque = (HashPageOpaque) PageGetSpecialPointer(page);
1564 Assert(opaque->hasho_bucket == bucket);
1565 Assert(opaque->hasho_prevblkno != InvalidBlockNumber);
1568 * If this bucket hasn't been split, we're done.
1570 if (opaque->hasho_prevblkno <= metap->hashm_maxbucket)
1573 /* Drop lock on this buffer, update cached metapage, and retry. */
1574 _hash_relbuf(rel, buf);
1575 metap = _hash_getcachedmetap(rel, &metabuf, true);
1576 Assert(metap != NULL);
1579 if (BufferIsValid(metabuf))
1580 _hash_dropbuf(rel, metabuf);
1583 *cachedmetap = metap;