1 /*-------------------------------------------------------------------------
4 * Hash table page management code for the Postgres hash access method
6 * Portions Copyright (c) 1996-2012, PostgreSQL Global Development Group
7 * Portions Copyright (c) 1994, Regents of the University of California
11 * src/backend/access/hash/hashpage.c
14 * Postgres hash pages look like ordinary relation pages. The opaque
15 * data at high addresses includes information about the page including
16 * whether a page is an overflow page or a true bucket, the bucket
17 * number, and the block numbers of the preceding and following pages
20 * The first page in a hash relation, page zero, is special -- it stores
21 * information describing the hash table; it is referred to as the
22 * "meta page." Pages one and higher store the actual data.
24 * There are also bitmap pages, which are not manipulated here;
27 *-------------------------------------------------------------------------
31 #include "access/hash.h"
32 #include "miscadmin.h"
33 #include "storage/lmgr.h"
34 #include "storage/smgr.h"
37 static bool _hash_alloc_buckets(Relation rel, BlockNumber firstblock,
39 static void _hash_splitbucket(Relation rel, Buffer metabuf,
40 Bucket obucket, Bucket nbucket,
41 BlockNumber start_oblkno,
42 BlockNumber start_nblkno,
44 uint32 highmask, uint32 lowmask);
48 * We use high-concurrency locking on hash indexes (see README for an overview
49 * of the locking rules). However, we can skip taking lmgr locks when the
50 * index is local to the current backend (ie, either temp or new in the
51 * current transaction). No one else can see it, so there's no reason to
52 * take locks. We still take buffer-level locks, but not lmgr locks.
54 #define USELOCKING(rel) (!RELATION_IS_LOCAL(rel))
58 * _hash_getlock() -- Acquire an lmgr lock.
60 * 'whichlock' should be zero to acquire the split-control lock, or the
61 * block number of a bucket's primary bucket page to acquire the per-bucket
62 * lock. (See README for details of the use of these locks.)
64 * 'access' must be HASH_SHARE or HASH_EXCLUSIVE.
67 _hash_getlock(Relation rel, BlockNumber whichlock, int access)
70 LockPage(rel, whichlock, access);
74 * _hash_try_getlock() -- Acquire an lmgr lock, but only if it's free.
76 * Same as above except we return FALSE without blocking if lock isn't free.
79 _hash_try_getlock(Relation rel, BlockNumber whichlock, int access)
82 return ConditionalLockPage(rel, whichlock, access);
88 * _hash_droplock() -- Release an lmgr lock.
91 _hash_droplock(Relation rel, BlockNumber whichlock, int access)
94 UnlockPage(rel, whichlock, access);
98 * _hash_getbuf() -- Get a buffer by block number for read or write.
100 * 'access' must be HASH_READ, HASH_WRITE, or HASH_NOLOCK.
101 * 'flags' is a bitwise OR of the allowed page types.
103 * This must be used only to fetch pages that are expected to be valid
104 * already. _hash_checkpage() is applied using the given flags.
106 * When this routine returns, the appropriate lock is set on the
107 * requested buffer and its reference count has been incremented
108 * (ie, the buffer is "locked and pinned").
110 * P_NEW is disallowed because this routine can only be used
111 * to access pages that are known to be before the filesystem EOF.
112 * Extending the index should be done with _hash_getnewbuf.
115 _hash_getbuf(Relation rel, BlockNumber blkno, int access, int flags)
120 elog(ERROR, "hash AM does not use P_NEW");
122 buf = ReadBuffer(rel, blkno);
124 if (access != HASH_NOLOCK)
125 LockBuffer(buf, access);
127 /* ref count and lock type are correct */
129 _hash_checkpage(rel, buf, flags);
135 * _hash_getinitbuf() -- Get and initialize a buffer by block number.
137 * This must be used only to fetch pages that are known to be before
138 * the index's filesystem EOF, but are to be filled from scratch.
139 * _hash_pageinit() is applied automatically. Otherwise it has
140 * effects similar to _hash_getbuf() with access = HASH_WRITE.
142 * When this routine returns, a write lock is set on the
143 * requested buffer and its reference count has been incremented
144 * (ie, the buffer is "locked and pinned").
146 * P_NEW is disallowed because this routine can only be used
147 * to access pages that are known to be before the filesystem EOF.
148 * Extending the index should be done with _hash_getnewbuf.
151 _hash_getinitbuf(Relation rel, BlockNumber blkno)
156 elog(ERROR, "hash AM does not use P_NEW");
158 buf = ReadBufferExtended(rel, MAIN_FORKNUM, blkno, RBM_ZERO, NULL);
160 LockBuffer(buf, HASH_WRITE);
162 /* ref count and lock type are correct */
164 /* initialize the page */
165 _hash_pageinit(BufferGetPage(buf), BufferGetPageSize(buf));
171 * _hash_getnewbuf() -- Get a new page at the end of the index.
173 * This has the same API as _hash_getinitbuf, except that we are adding
174 * a page to the index, and hence expect the page to be past the
175 * logical EOF. (However, we have to support the case where it isn't,
176 * since a prior try might have crashed after extending the filesystem
177 * EOF but before updating the metapage to reflect the added page.)
179 * It is caller's responsibility to ensure that only one process can
180 * extend the index at a time.
183 _hash_getnewbuf(Relation rel, BlockNumber blkno, ForkNumber forkNum)
185 BlockNumber nblocks = RelationGetNumberOfBlocksInFork(rel, forkNum);
189 elog(ERROR, "hash AM does not use P_NEW");
191 elog(ERROR, "access to noncontiguous page in hash index \"%s\"",
192 RelationGetRelationName(rel));
194 /* smgr insists we use P_NEW to extend the relation */
195 if (blkno == nblocks)
197 buf = ReadBufferExtended(rel, forkNum, P_NEW, RBM_NORMAL, NULL);
198 if (BufferGetBlockNumber(buf) != blkno)
199 elog(ERROR, "unexpected hash relation size: %u, should be %u",
200 BufferGetBlockNumber(buf), blkno);
203 buf = ReadBufferExtended(rel, forkNum, blkno, RBM_ZERO, NULL);
205 LockBuffer(buf, HASH_WRITE);
207 /* ref count and lock type are correct */
209 /* initialize the page */
210 _hash_pageinit(BufferGetPage(buf), BufferGetPageSize(buf));
216 * _hash_getbuf_with_strategy() -- Get a buffer with nondefault strategy.
218 * This is identical to _hash_getbuf() but also allows a buffer access
219 * strategy to be specified. We use this for VACUUM operations.
222 _hash_getbuf_with_strategy(Relation rel, BlockNumber blkno,
223 int access, int flags,
224 BufferAccessStrategy bstrategy)
229 elog(ERROR, "hash AM does not use P_NEW");
231 buf = ReadBufferExtended(rel, MAIN_FORKNUM, blkno, RBM_NORMAL, bstrategy);
233 if (access != HASH_NOLOCK)
234 LockBuffer(buf, access);
236 /* ref count and lock type are correct */
238 _hash_checkpage(rel, buf, flags);
244 * _hash_relbuf() -- release a locked buffer.
246 * Lock and pin (refcount) are both dropped.
249 _hash_relbuf(Relation rel, Buffer buf)
251 UnlockReleaseBuffer(buf);
255 * _hash_dropbuf() -- release an unlocked buffer.
257 * This is used to unpin a buffer on which we hold no lock.
260 _hash_dropbuf(Relation rel, Buffer buf)
266 * _hash_wrtbuf() -- write a hash page to disk.
268 * This routine releases the lock held on the buffer and our refcount
269 * for it. It is an error to call _hash_wrtbuf() without a write lock
270 * and a pin on the buffer.
272 * NOTE: this routine should go away when/if hash indexes are WAL-ified.
273 * The correct sequence of operations is to mark the buffer dirty, then
274 * write the WAL record, then release the lock and pin; so marking dirty
275 * can't be combined with releasing.
278 _hash_wrtbuf(Relation rel, Buffer buf)
280 MarkBufferDirty(buf);
281 UnlockReleaseBuffer(buf);
285 * _hash_chgbufaccess() -- Change the lock type on a buffer, without
286 * dropping our pin on it.
288 * from_access and to_access may be HASH_READ, HASH_WRITE, or HASH_NOLOCK,
289 * the last indicating that no buffer-level lock is held or wanted.
291 * When from_access == HASH_WRITE, we assume the buffer is dirty and tell
292 * bufmgr it must be written out. If the caller wants to release a write
293 * lock on a page that's not been modified, it's okay to pass from_access
294 * as HASH_READ (a bit ugly, but handy in some places).
297 _hash_chgbufaccess(Relation rel,
302 if (from_access == HASH_WRITE)
303 MarkBufferDirty(buf);
304 if (from_access != HASH_NOLOCK)
305 LockBuffer(buf, BUFFER_LOCK_UNLOCK);
306 if (to_access != HASH_NOLOCK)
307 LockBuffer(buf, to_access);
312 * _hash_metapinit() -- Initialize the metadata page of a hash index,
313 * the initial buckets, and the initial bitmap page.
315 * The initial number of buckets is dependent on num_tuples, an estimate
316 * of the number of tuples to be loaded into the index initially. The
317 * chosen number of buckets is returned.
319 * We are fairly cavalier about locking here, since we know that no one else
320 * could be accessing this index. In particular the rule about not holding
321 * multiple buffer locks is ignored.
324 _hash_metapinit(Relation rel, double num_tuples, ForkNumber forkNum)
327 HashPageOpaque pageopaque;
336 uint32 log2_num_buckets;
340 if (RelationGetNumberOfBlocksInFork(rel, forkNum) != 0)
341 elog(ERROR, "cannot initialize non-empty hash index \"%s\"",
342 RelationGetRelationName(rel));
345 * Determine the target fill factor (in tuples per bucket) for this index.
346 * The idea is to make the fill factor correspond to pages about as full
347 * as the user-settable fillfactor parameter says. We can compute it
348 * exactly since the index datatype (i.e. uint32 hash key) is fixed-width.
350 data_width = sizeof(uint32);
351 item_width = MAXALIGN(sizeof(IndexTupleData)) + MAXALIGN(data_width) +
352 sizeof(ItemIdData); /* include the line pointer */
353 ffactor = RelationGetTargetPageUsage(rel, HASH_DEFAULT_FILLFACTOR) / item_width;
354 /* keep to a sane range */
359 * Choose the number of initial bucket pages to match the fill factor
360 * given the estimated number of tuples. We round up the result to the
361 * next power of 2, however, and always force at least 2 bucket pages. The
362 * upper limit is determined by considerations explained in
363 * _hash_expandtable().
365 dnumbuckets = num_tuples / ffactor;
366 if (dnumbuckets <= 2.0)
368 else if (dnumbuckets >= (double) 0x40000000)
369 num_buckets = 0x40000000;
371 num_buckets = ((uint32) 1) << _hash_log2((uint32) dnumbuckets);
373 log2_num_buckets = _hash_log2(num_buckets);
374 Assert(num_buckets == (((uint32) 1) << log2_num_buckets));
375 Assert(log2_num_buckets < HASH_MAX_SPLITPOINTS);
378 * We initialize the metapage, the first N bucket pages, and the first
379 * bitmap page in sequence, using _hash_getnewbuf to cause smgrextend()
380 * calls to occur. This ensures that the smgr level has the right idea of
381 * the physical index length.
383 metabuf = _hash_getnewbuf(rel, HASH_METAPAGE, forkNum);
384 pg = BufferGetPage(metabuf);
386 pageopaque = (HashPageOpaque) PageGetSpecialPointer(pg);
387 pageopaque->hasho_prevblkno = InvalidBlockNumber;
388 pageopaque->hasho_nextblkno = InvalidBlockNumber;
389 pageopaque->hasho_bucket = -1;
390 pageopaque->hasho_flag = LH_META_PAGE;
391 pageopaque->hasho_page_id = HASHO_PAGE_ID;
393 metap = HashPageGetMeta(pg);
395 metap->hashm_magic = HASH_MAGIC;
396 metap->hashm_version = HASH_VERSION;
397 metap->hashm_ntuples = 0;
398 metap->hashm_nmaps = 0;
399 metap->hashm_ffactor = ffactor;
400 metap->hashm_bsize = HashGetMaxBitmapSize(pg);
401 /* find largest bitmap array size that will fit in page size */
402 for (i = _hash_log2(metap->hashm_bsize); i > 0; --i)
404 if ((1 << i) <= metap->hashm_bsize)
408 metap->hashm_bmsize = 1 << i;
409 metap->hashm_bmshift = i + BYTE_TO_BIT;
410 Assert((1 << BMPG_SHIFT(metap)) == (BMPG_MASK(metap) + 1));
413 * Label the index with its primary hash support function's OID. This is
414 * pretty useless for normal operation (in fact, hashm_procid is not used
415 * anywhere), but it might be handy for forensic purposes so we keep it.
417 metap->hashm_procid = index_getprocid(rel, 1, HASHPROC);
420 * We initialize the index with N buckets, 0 .. N-1, occupying physical
421 * blocks 1 to N. The first freespace bitmap page is in block N+1. Since
422 * N is a power of 2, we can set the masks this way:
424 metap->hashm_maxbucket = metap->hashm_lowmask = num_buckets - 1;
425 metap->hashm_highmask = (num_buckets << 1) - 1;
427 MemSet(metap->hashm_spares, 0, sizeof(metap->hashm_spares));
428 MemSet(metap->hashm_mapp, 0, sizeof(metap->hashm_mapp));
430 /* Set up mapping for one spare page after the initial splitpoints */
431 metap->hashm_spares[log2_num_buckets] = 1;
432 metap->hashm_ovflpoint = log2_num_buckets;
433 metap->hashm_firstfree = 0;
436 * Release buffer lock on the metapage while we initialize buckets.
437 * Otherwise, we'll be in interrupt holdoff and the CHECK_FOR_INTERRUPTS
438 * won't accomplish anything. It's a bad idea to hold buffer locks for
439 * long intervals in any case, since that can block the bgwriter.
441 _hash_chgbufaccess(rel, metabuf, HASH_WRITE, HASH_NOLOCK);
444 * Initialize the first N buckets
446 for (i = 0; i < num_buckets; i++)
448 /* Allow interrupts, in case N is huge */
449 CHECK_FOR_INTERRUPTS();
451 buf = _hash_getnewbuf(rel, BUCKET_TO_BLKNO(metap, i), forkNum);
452 pg = BufferGetPage(buf);
453 pageopaque = (HashPageOpaque) PageGetSpecialPointer(pg);
454 pageopaque->hasho_prevblkno = InvalidBlockNumber;
455 pageopaque->hasho_nextblkno = InvalidBlockNumber;
456 pageopaque->hasho_bucket = i;
457 pageopaque->hasho_flag = LH_BUCKET_PAGE;
458 pageopaque->hasho_page_id = HASHO_PAGE_ID;
459 _hash_wrtbuf(rel, buf);
462 /* Now reacquire buffer lock on metapage */
463 _hash_chgbufaccess(rel, metabuf, HASH_NOLOCK, HASH_WRITE);
466 * Initialize first bitmap page
468 _hash_initbitmap(rel, metap, num_buckets + 1, forkNum);
471 _hash_wrtbuf(rel, metabuf);
477 * _hash_pageinit() -- Initialize a new hash index page.
480 _hash_pageinit(Page page, Size size)
482 Assert(PageIsNew(page));
483 PageInit(page, size, sizeof(HashPageOpaqueData));
487 * Attempt to expand the hash table by creating one new bucket.
489 * This will silently do nothing if it cannot get the needed locks.
491 * The caller should hold no locks on the hash index.
493 * The caller must hold a pin, but no lock, on the metapage buffer.
494 * The buffer is returned in the same state.
497 _hash_expandtable(Relation rel, Buffer metabuf)
503 BlockNumber start_oblkno;
504 BlockNumber start_nblkno;
510 * Obtain the page-zero lock to assert the right to begin a split (see
513 * Note: deadlock should be impossible here. Our own backend could only be
514 * holding bucket sharelocks due to stopped indexscans; those will not
515 * block other holders of the page-zero lock, who are only interested in
516 * acquiring bucket sharelocks themselves. Exclusive bucket locks are
517 * only taken here and in hashbulkdelete, and neither of these operations
518 * needs any additional locks to complete. (If, due to some flaw in this
519 * reasoning, we manage to deadlock anyway, it's okay to error out; the
520 * index will be left in a consistent state.)
522 _hash_getlock(rel, 0, HASH_EXCLUSIVE);
524 /* Write-lock the meta page */
525 _hash_chgbufaccess(rel, metabuf, HASH_NOLOCK, HASH_WRITE);
527 _hash_checkpage(rel, metabuf, LH_META_PAGE);
528 metap = HashPageGetMeta(BufferGetPage(metabuf));
531 * Check to see if split is still needed; someone else might have already
532 * done one while we waited for the lock.
534 * Make sure this stays in sync with _hash_doinsert()
536 if (metap->hashm_ntuples <=
537 (double) metap->hashm_ffactor * (metap->hashm_maxbucket + 1))
541 * Can't split anymore if maxbucket has reached its maximum possible
544 * Ideally we'd allow bucket numbers up to UINT_MAX-1 (no higher because
545 * the calculation maxbucket+1 mustn't overflow). Currently we restrict
546 * to half that because of overflow looping in _hash_log2() and
547 * insufficient space in hashm_spares[]. It's moot anyway because an
548 * index with 2^32 buckets would certainly overflow BlockNumber and hence
549 * _hash_alloc_buckets() would fail, but if we supported buckets smaller
550 * than a disk block then this would be an independent constraint.
552 * If you change this, see also the maximum initial number of buckets in
555 if (metap->hashm_maxbucket >= (uint32) 0x7FFFFFFE)
559 * Determine which bucket is to be split, and attempt to lock the old
560 * bucket. If we can't get the lock, give up.
562 * The lock protects us against other backends, but not against our own
563 * backend. Must check for active scans separately.
565 new_bucket = metap->hashm_maxbucket + 1;
567 old_bucket = (new_bucket & metap->hashm_lowmask);
569 start_oblkno = BUCKET_TO_BLKNO(metap, old_bucket);
571 if (_hash_has_active_scan(rel, old_bucket))
574 if (!_hash_try_getlock(rel, start_oblkno, HASH_EXCLUSIVE))
578 * Likewise lock the new bucket (should never fail).
580 * Note: it is safe to compute the new bucket's blkno here, even though we
581 * may still need to update the BUCKET_TO_BLKNO mapping. This is because
582 * the current value of hashm_spares[hashm_ovflpoint] correctly shows
583 * where we are going to put a new splitpoint's worth of buckets.
585 start_nblkno = BUCKET_TO_BLKNO(metap, new_bucket);
587 if (_hash_has_active_scan(rel, new_bucket))
588 elog(ERROR, "scan in progress on supposedly new bucket");
590 if (!_hash_try_getlock(rel, start_nblkno, HASH_EXCLUSIVE))
591 elog(ERROR, "could not get lock on supposedly new bucket");
594 * If the split point is increasing (hashm_maxbucket's log base 2
595 * increases), we need to allocate a new batch of bucket pages.
597 spare_ndx = _hash_log2(new_bucket + 1);
598 if (spare_ndx > metap->hashm_ovflpoint)
600 Assert(spare_ndx == metap->hashm_ovflpoint + 1);
603 * The number of buckets in the new splitpoint is equal to the total
604 * number already in existence, i.e. new_bucket. Currently this maps
605 * one-to-one to blocks required, but someday we may need a more
606 * complicated calculation here.
608 if (!_hash_alloc_buckets(rel, start_nblkno, new_bucket))
610 /* can't split due to BlockNumber overflow */
611 _hash_droplock(rel, start_oblkno, HASH_EXCLUSIVE);
612 _hash_droplock(rel, start_nblkno, HASH_EXCLUSIVE);
618 * Okay to proceed with split. Update the metapage bucket mapping info.
620 * Since we are scribbling on the metapage data right in the shared
621 * buffer, any failure in this next little bit leaves us with a big
622 * problem: the metapage is effectively corrupt but could get written back
623 * to disk. We don't really expect any failure, but just to be sure,
624 * establish a critical section.
626 START_CRIT_SECTION();
628 metap->hashm_maxbucket = new_bucket;
630 if (new_bucket > metap->hashm_highmask)
632 /* Starting a new doubling */
633 metap->hashm_lowmask = metap->hashm_highmask;
634 metap->hashm_highmask = new_bucket | metap->hashm_lowmask;
638 * If the split point is increasing (hashm_maxbucket's log base 2
639 * increases), we need to adjust the hashm_spares[] array and
640 * hashm_ovflpoint so that future overflow pages will be created beyond
641 * this new batch of bucket pages.
643 if (spare_ndx > metap->hashm_ovflpoint)
645 metap->hashm_spares[spare_ndx] = metap->hashm_spares[metap->hashm_ovflpoint];
646 metap->hashm_ovflpoint = spare_ndx;
649 /* Done mucking with metapage */
653 * Copy bucket mapping info now; this saves re-accessing the meta page
654 * inside _hash_splitbucket's inner loop. Note that once we drop the
655 * split lock, other splits could begin, so these values might be out of
656 * date before _hash_splitbucket finishes. That's okay, since all it
657 * needs is to tell which of these two buckets to map hashkeys into.
659 maxbucket = metap->hashm_maxbucket;
660 highmask = metap->hashm_highmask;
661 lowmask = metap->hashm_lowmask;
663 /* Write out the metapage and drop lock, but keep pin */
664 _hash_chgbufaccess(rel, metabuf, HASH_WRITE, HASH_NOLOCK);
666 /* Release split lock; okay for other splits to occur now */
667 _hash_droplock(rel, 0, HASH_EXCLUSIVE);
669 /* Relocate records to the new bucket */
670 _hash_splitbucket(rel, metabuf, old_bucket, new_bucket,
671 start_oblkno, start_nblkno,
672 maxbucket, highmask, lowmask);
674 /* Release bucket locks, allowing others to access them */
675 _hash_droplock(rel, start_oblkno, HASH_EXCLUSIVE);
676 _hash_droplock(rel, start_nblkno, HASH_EXCLUSIVE);
680 /* Here if decide not to split or fail to acquire old bucket lock */
683 /* We didn't write the metapage, so just drop lock */
684 _hash_chgbufaccess(rel, metabuf, HASH_READ, HASH_NOLOCK);
686 /* Release split lock */
687 _hash_droplock(rel, 0, HASH_EXCLUSIVE);
692 * _hash_alloc_buckets -- allocate a new splitpoint's worth of bucket pages
694 * This does not need to initialize the new bucket pages; we'll do that as
695 * each one is used by _hash_expandtable(). But we have to extend the logical
696 * EOF to the end of the splitpoint; this keeps smgr's idea of the EOF in
697 * sync with ours, so that we don't get complaints from smgr.
699 * We do this by writing a page of zeroes at the end of the splitpoint range.
700 * We expect that the filesystem will ensure that the intervening pages read
701 * as zeroes too. On many filesystems this "hole" will not be allocated
702 * immediately, which means that the index file may end up more fragmented
703 * than if we forced it all to be allocated now; but since we don't scan
704 * hash indexes sequentially anyway, that probably doesn't matter.
706 * XXX It's annoying that this code is executed with the metapage lock held.
707 * We need to interlock against _hash_getovflpage() adding a new overflow page
708 * concurrently, but it'd likely be better to use LockRelationForExtension
709 * for the purpose. OTOH, adding a splitpoint is a very infrequent operation,
710 * so it may not be worth worrying about.
712 * Returns TRUE if successful, or FALSE if allocation failed due to
713 * BlockNumber overflow.
716 _hash_alloc_buckets(Relation rel, BlockNumber firstblock, uint32 nblocks)
718 BlockNumber lastblock;
719 char zerobuf[BLCKSZ];
721 lastblock = firstblock + nblocks - 1;
724 * Check for overflow in block number calculation; if so, we cannot extend
727 if (lastblock < firstblock || lastblock == InvalidBlockNumber)
730 MemSet(zerobuf, 0, sizeof(zerobuf));
732 RelationOpenSmgr(rel);
733 smgrextend(rel->rd_smgr, MAIN_FORKNUM, lastblock, zerobuf, false);
740 * _hash_splitbucket -- split 'obucket' into 'obucket' and 'nbucket'
742 * We are splitting a bucket that consists of a base bucket page and zero
743 * or more overflow (bucket chain) pages. We must relocate tuples that
744 * belong in the new bucket, and compress out any free space in the old
747 * The caller must hold exclusive locks on both buckets to ensure that
748 * no one else is trying to access them (see README).
750 * The caller must hold a pin, but no lock, on the metapage buffer.
751 * The buffer is returned in the same state. (The metapage is only
752 * touched if it becomes necessary to add or remove overflow pages.)
755 _hash_splitbucket(Relation rel,
759 BlockNumber start_oblkno,
760 BlockNumber start_nblkno,
771 HashPageOpaque oopaque;
772 HashPageOpaque nopaque;
775 * It should be okay to simultaneously write-lock pages from each bucket,
776 * since no one else can be trying to acquire buffer lock on pages of
779 oblkno = start_oblkno;
780 obuf = _hash_getbuf(rel, oblkno, HASH_WRITE, LH_BUCKET_PAGE);
781 opage = BufferGetPage(obuf);
782 oopaque = (HashPageOpaque) PageGetSpecialPointer(opage);
784 nblkno = start_nblkno;
785 nbuf = _hash_getnewbuf(rel, nblkno, MAIN_FORKNUM);
786 npage = BufferGetPage(nbuf);
788 /* initialize the new bucket's primary page */
789 nopaque = (HashPageOpaque) PageGetSpecialPointer(npage);
790 nopaque->hasho_prevblkno = InvalidBlockNumber;
791 nopaque->hasho_nextblkno = InvalidBlockNumber;
792 nopaque->hasho_bucket = nbucket;
793 nopaque->hasho_flag = LH_BUCKET_PAGE;
794 nopaque->hasho_page_id = HASHO_PAGE_ID;
797 * Partition the tuples in the old bucket between the old bucket and the
798 * new bucket, advancing along the old bucket's overflow bucket chain and
799 * adding overflow pages to the new bucket as needed. Outer loop iterates
800 * once per page in old bucket.
804 OffsetNumber ooffnum;
805 OffsetNumber omaxoffnum;
806 OffsetNumber deletable[MaxOffsetNumber];
809 /* Scan each tuple in old page */
810 omaxoffnum = PageGetMaxOffsetNumber(opage);
811 for (ooffnum = FirstOffsetNumber;
812 ooffnum <= omaxoffnum;
813 ooffnum = OffsetNumberNext(ooffnum))
820 * Fetch the item's hash key (conveniently stored in the item) and
821 * determine which bucket it now belongs in.
823 itup = (IndexTuple) PageGetItem(opage,
824 PageGetItemId(opage, ooffnum));
825 bucket = _hash_hashkey2bucket(_hash_get_indextuple_hashkey(itup),
826 maxbucket, highmask, lowmask);
828 if (bucket == nbucket)
831 * insert the tuple into the new bucket. if it doesn't fit on
832 * the current page in the new bucket, we must allocate a new
833 * overflow page and place the tuple on that page instead.
835 itemsz = IndexTupleDSize(*itup);
836 itemsz = MAXALIGN(itemsz);
838 if (PageGetFreeSpace(npage) < itemsz)
840 /* write out nbuf and drop lock, but keep pin */
841 _hash_chgbufaccess(rel, nbuf, HASH_WRITE, HASH_NOLOCK);
842 /* chain to a new overflow page */
843 nbuf = _hash_addovflpage(rel, metabuf, nbuf);
844 npage = BufferGetPage(nbuf);
845 /* we don't need nblkno or nopaque within the loop */
849 * Insert tuple on new page, using _hash_pgaddtup to ensure
850 * correct ordering by hashkey. This is a tad inefficient
851 * since we may have to shuffle itempointers repeatedly.
852 * Possible future improvement: accumulate all the items for
853 * the new page and qsort them before insertion.
855 (void) _hash_pgaddtup(rel, nbuf, itemsz, itup);
858 * Mark tuple for deletion from old page.
860 deletable[ndeletable++] = ooffnum;
865 * the tuple stays on this page, so nothing to do.
867 Assert(bucket == obucket);
871 oblkno = oopaque->hasho_nextblkno;
874 * Done scanning this old page. If we moved any tuples, delete them
879 PageIndexMultiDelete(opage, deletable, ndeletable);
880 _hash_wrtbuf(rel, obuf);
883 _hash_relbuf(rel, obuf);
885 /* Exit loop if no more overflow pages in old bucket */
886 if (!BlockNumberIsValid(oblkno))
889 /* Else, advance to next old page */
890 obuf = _hash_getbuf(rel, oblkno, HASH_WRITE, LH_OVERFLOW_PAGE);
891 opage = BufferGetPage(obuf);
892 oopaque = (HashPageOpaque) PageGetSpecialPointer(opage);
896 * We're at the end of the old bucket chain, so we're done partitioning
897 * the tuples. Before quitting, call _hash_squeezebucket to ensure the
898 * tuples remaining in the old bucket (including the overflow pages) are
899 * packed as tightly as possible. The new bucket is already tight.
901 _hash_wrtbuf(rel, nbuf);
903 _hash_squeezebucket(rel, obucket, start_oblkno, NULL);