1 /*-------------------------------------------------------------------------
4 * Hash table page management code for the Postgres hash access method
6 * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
7 * Portions Copyright (c) 1994, Regents of the University of California
11 * src/backend/access/hash/hashpage.c
14 * Postgres hash pages look like ordinary relation pages. The opaque
15 * data at high addresses includes information about the page including
16 * whether a page is an overflow page or a true bucket, the bucket
17 * number, and the block numbers of the preceding and following pages
20 * The first page in a hash relation, page zero, is special -- it stores
21 * information describing the hash table; it is referred to as the
22 * "meta page." Pages one and higher store the actual data.
24 * There are also bitmap pages, which are not manipulated here;
27 *-------------------------------------------------------------------------
31 #include "access/hash.h"
32 #include "miscadmin.h"
33 #include "storage/lmgr.h"
34 #include "storage/smgr.h"
37 static bool _hash_alloc_buckets(Relation rel, BlockNumber firstblock,
39 static void _hash_splitbucket(Relation rel, Buffer metabuf,
40 Bucket obucket, Bucket nbucket,
41 BlockNumber start_oblkno,
44 uint32 highmask, uint32 lowmask);
48 * We use high-concurrency locking on hash indexes (see README for an overview
49 * of the locking rules). However, we can skip taking lmgr locks when the
50 * index is local to the current backend (ie, either temp or new in the
51 * current transaction). No one else can see it, so there's no reason to
52 * take locks. We still take buffer-level locks, but not lmgr locks.
54 #define USELOCKING(rel) (!RELATION_IS_LOCAL(rel))
58 * _hash_getlock() -- Acquire an lmgr lock.
60 * 'whichlock' should the block number of a bucket's primary bucket page to
61 * acquire the per-bucket lock. (See README for details of the use of these
64 * 'access' must be HASH_SHARE or HASH_EXCLUSIVE.
67 _hash_getlock(Relation rel, BlockNumber whichlock, int access)
70 LockPage(rel, whichlock, access);
74 * _hash_try_getlock() -- Acquire an lmgr lock, but only if it's free.
76 * Same as above except we return FALSE without blocking if lock isn't free.
79 _hash_try_getlock(Relation rel, BlockNumber whichlock, int access)
82 return ConditionalLockPage(rel, whichlock, access);
88 * _hash_droplock() -- Release an lmgr lock.
91 _hash_droplock(Relation rel, BlockNumber whichlock, int access)
94 UnlockPage(rel, whichlock, access);
98 * _hash_getbuf() -- Get a buffer by block number for read or write.
100 * 'access' must be HASH_READ, HASH_WRITE, or HASH_NOLOCK.
101 * 'flags' is a bitwise OR of the allowed page types.
103 * This must be used only to fetch pages that are expected to be valid
104 * already. _hash_checkpage() is applied using the given flags.
106 * When this routine returns, the appropriate lock is set on the
107 * requested buffer and its reference count has been incremented
108 * (ie, the buffer is "locked and pinned").
110 * P_NEW is disallowed because this routine can only be used
111 * to access pages that are known to be before the filesystem EOF.
112 * Extending the index should be done with _hash_getnewbuf.
115 _hash_getbuf(Relation rel, BlockNumber blkno, int access, int flags)
120 elog(ERROR, "hash AM does not use P_NEW");
122 buf = ReadBuffer(rel, blkno);
124 if (access != HASH_NOLOCK)
125 LockBuffer(buf, access);
127 /* ref count and lock type are correct */
129 _hash_checkpage(rel, buf, flags);
135 * _hash_getinitbuf() -- Get and initialize a buffer by block number.
137 * This must be used only to fetch pages that are known to be before
138 * the index's filesystem EOF, but are to be filled from scratch.
139 * _hash_pageinit() is applied automatically. Otherwise it has
140 * effects similar to _hash_getbuf() with access = HASH_WRITE.
142 * When this routine returns, a write lock is set on the
143 * requested buffer and its reference count has been incremented
144 * (ie, the buffer is "locked and pinned").
146 * P_NEW is disallowed because this routine can only be used
147 * to access pages that are known to be before the filesystem EOF.
148 * Extending the index should be done with _hash_getnewbuf.
151 _hash_getinitbuf(Relation rel, BlockNumber blkno)
156 elog(ERROR, "hash AM does not use P_NEW");
158 buf = ReadBufferExtended(rel, MAIN_FORKNUM, blkno, RBM_ZERO_AND_LOCK,
161 /* ref count and lock type are correct */
163 /* initialize the page */
164 _hash_pageinit(BufferGetPage(buf), BufferGetPageSize(buf));
170 * _hash_getnewbuf() -- Get a new page at the end of the index.
172 * This has the same API as _hash_getinitbuf, except that we are adding
173 * a page to the index, and hence expect the page to be past the
174 * logical EOF. (However, we have to support the case where it isn't,
175 * since a prior try might have crashed after extending the filesystem
176 * EOF but before updating the metapage to reflect the added page.)
178 * It is caller's responsibility to ensure that only one process can
179 * extend the index at a time. In practice, this function is called
180 * only while holding write lock on the metapage, because adding a page
181 * is always associated with an update of metapage data.
184 _hash_getnewbuf(Relation rel, BlockNumber blkno, ForkNumber forkNum)
186 BlockNumber nblocks = RelationGetNumberOfBlocksInFork(rel, forkNum);
190 elog(ERROR, "hash AM does not use P_NEW");
192 elog(ERROR, "access to noncontiguous page in hash index \"%s\"",
193 RelationGetRelationName(rel));
195 /* smgr insists we use P_NEW to extend the relation */
196 if (blkno == nblocks)
198 buf = ReadBufferExtended(rel, forkNum, P_NEW, RBM_NORMAL, NULL);
199 if (BufferGetBlockNumber(buf) != blkno)
200 elog(ERROR, "unexpected hash relation size: %u, should be %u",
201 BufferGetBlockNumber(buf), blkno);
202 LockBuffer(buf, HASH_WRITE);
206 buf = ReadBufferExtended(rel, forkNum, blkno, RBM_ZERO_AND_LOCK,
210 /* ref count and lock type are correct */
212 /* initialize the page */
213 _hash_pageinit(BufferGetPage(buf), BufferGetPageSize(buf));
219 * _hash_getbuf_with_strategy() -- Get a buffer with nondefault strategy.
221 * This is identical to _hash_getbuf() but also allows a buffer access
222 * strategy to be specified. We use this for VACUUM operations.
225 _hash_getbuf_with_strategy(Relation rel, BlockNumber blkno,
226 int access, int flags,
227 BufferAccessStrategy bstrategy)
232 elog(ERROR, "hash AM does not use P_NEW");
234 buf = ReadBufferExtended(rel, MAIN_FORKNUM, blkno, RBM_NORMAL, bstrategy);
236 if (access != HASH_NOLOCK)
237 LockBuffer(buf, access);
239 /* ref count and lock type are correct */
241 _hash_checkpage(rel, buf, flags);
247 * _hash_relbuf() -- release a locked buffer.
249 * Lock and pin (refcount) are both dropped.
252 _hash_relbuf(Relation rel, Buffer buf)
254 UnlockReleaseBuffer(buf);
258 * _hash_dropbuf() -- release an unlocked buffer.
260 * This is used to unpin a buffer on which we hold no lock.
263 _hash_dropbuf(Relation rel, Buffer buf)
269 * _hash_wrtbuf() -- write a hash page to disk.
271 * This routine releases the lock held on the buffer and our refcount
272 * for it. It is an error to call _hash_wrtbuf() without a write lock
273 * and a pin on the buffer.
275 * NOTE: this routine should go away when/if hash indexes are WAL-ified.
276 * The correct sequence of operations is to mark the buffer dirty, then
277 * write the WAL record, then release the lock and pin; so marking dirty
278 * can't be combined with releasing.
281 _hash_wrtbuf(Relation rel, Buffer buf)
283 MarkBufferDirty(buf);
284 UnlockReleaseBuffer(buf);
288 * _hash_chgbufaccess() -- Change the lock type on a buffer, without
289 * dropping our pin on it.
291 * from_access and to_access may be HASH_READ, HASH_WRITE, or HASH_NOLOCK,
292 * the last indicating that no buffer-level lock is held or wanted.
294 * When from_access == HASH_WRITE, we assume the buffer is dirty and tell
295 * bufmgr it must be written out. If the caller wants to release a write
296 * lock on a page that's not been modified, it's okay to pass from_access
297 * as HASH_READ (a bit ugly, but handy in some places).
300 _hash_chgbufaccess(Relation rel,
305 if (from_access == HASH_WRITE)
306 MarkBufferDirty(buf);
307 if (from_access != HASH_NOLOCK)
308 LockBuffer(buf, BUFFER_LOCK_UNLOCK);
309 if (to_access != HASH_NOLOCK)
310 LockBuffer(buf, to_access);
315 * _hash_metapinit() -- Initialize the metadata page of a hash index,
316 * the initial buckets, and the initial bitmap page.
318 * The initial number of buckets is dependent on num_tuples, an estimate
319 * of the number of tuples to be loaded into the index initially. The
320 * chosen number of buckets is returned.
322 * We are fairly cavalier about locking here, since we know that no one else
323 * could be accessing this index. In particular the rule about not holding
324 * multiple buffer locks is ignored.
327 _hash_metapinit(Relation rel, double num_tuples, ForkNumber forkNum)
330 HashPageOpaque pageopaque;
339 uint32 log2_num_buckets;
343 if (RelationGetNumberOfBlocksInFork(rel, forkNum) != 0)
344 elog(ERROR, "cannot initialize non-empty hash index \"%s\"",
345 RelationGetRelationName(rel));
348 * Determine the target fill factor (in tuples per bucket) for this index.
349 * The idea is to make the fill factor correspond to pages about as full
350 * as the user-settable fillfactor parameter says. We can compute it
351 * exactly since the index datatype (i.e. uint32 hash key) is fixed-width.
353 data_width = sizeof(uint32);
354 item_width = MAXALIGN(sizeof(IndexTupleData)) + MAXALIGN(data_width) +
355 sizeof(ItemIdData); /* include the line pointer */
356 ffactor = RelationGetTargetPageUsage(rel, HASH_DEFAULT_FILLFACTOR) / item_width;
357 /* keep to a sane range */
362 * Choose the number of initial bucket pages to match the fill factor
363 * given the estimated number of tuples. We round up the result to the
364 * next power of 2, however, and always force at least 2 bucket pages. The
365 * upper limit is determined by considerations explained in
366 * _hash_expandtable().
368 dnumbuckets = num_tuples / ffactor;
369 if (dnumbuckets <= 2.0)
371 else if (dnumbuckets >= (double) 0x40000000)
372 num_buckets = 0x40000000;
374 num_buckets = ((uint32) 1) << _hash_log2((uint32) dnumbuckets);
376 log2_num_buckets = _hash_log2(num_buckets);
377 Assert(num_buckets == (((uint32) 1) << log2_num_buckets));
378 Assert(log2_num_buckets < HASH_MAX_SPLITPOINTS);
381 * We initialize the metapage, the first N bucket pages, and the first
382 * bitmap page in sequence, using _hash_getnewbuf to cause smgrextend()
383 * calls to occur. This ensures that the smgr level has the right idea of
384 * the physical index length.
386 metabuf = _hash_getnewbuf(rel, HASH_METAPAGE, forkNum);
387 pg = BufferGetPage(metabuf);
389 pageopaque = (HashPageOpaque) PageGetSpecialPointer(pg);
390 pageopaque->hasho_prevblkno = InvalidBlockNumber;
391 pageopaque->hasho_nextblkno = InvalidBlockNumber;
392 pageopaque->hasho_bucket = -1;
393 pageopaque->hasho_flag = LH_META_PAGE;
394 pageopaque->hasho_page_id = HASHO_PAGE_ID;
396 metap = HashPageGetMeta(pg);
398 metap->hashm_magic = HASH_MAGIC;
399 metap->hashm_version = HASH_VERSION;
400 metap->hashm_ntuples = 0;
401 metap->hashm_nmaps = 0;
402 metap->hashm_ffactor = ffactor;
403 metap->hashm_bsize = HashGetMaxBitmapSize(pg);
404 /* find largest bitmap array size that will fit in page size */
405 for (i = _hash_log2(metap->hashm_bsize); i > 0; --i)
407 if ((1 << i) <= metap->hashm_bsize)
411 metap->hashm_bmsize = 1 << i;
412 metap->hashm_bmshift = i + BYTE_TO_BIT;
413 Assert((1 << BMPG_SHIFT(metap)) == (BMPG_MASK(metap) + 1));
416 * Label the index with its primary hash support function's OID. This is
417 * pretty useless for normal operation (in fact, hashm_procid is not used
418 * anywhere), but it might be handy for forensic purposes so we keep it.
420 metap->hashm_procid = index_getprocid(rel, 1, HASHPROC);
423 * We initialize the index with N buckets, 0 .. N-1, occupying physical
424 * blocks 1 to N. The first freespace bitmap page is in block N+1. Since
425 * N is a power of 2, we can set the masks this way:
427 metap->hashm_maxbucket = metap->hashm_lowmask = num_buckets - 1;
428 metap->hashm_highmask = (num_buckets << 1) - 1;
430 MemSet(metap->hashm_spares, 0, sizeof(metap->hashm_spares));
431 MemSet(metap->hashm_mapp, 0, sizeof(metap->hashm_mapp));
433 /* Set up mapping for one spare page after the initial splitpoints */
434 metap->hashm_spares[log2_num_buckets] = 1;
435 metap->hashm_ovflpoint = log2_num_buckets;
436 metap->hashm_firstfree = 0;
439 * Release buffer lock on the metapage while we initialize buckets.
440 * Otherwise, we'll be in interrupt holdoff and the CHECK_FOR_INTERRUPTS
441 * won't accomplish anything. It's a bad idea to hold buffer locks for
442 * long intervals in any case, since that can block the bgwriter.
444 _hash_chgbufaccess(rel, metabuf, HASH_WRITE, HASH_NOLOCK);
447 * Initialize the first N buckets
449 for (i = 0; i < num_buckets; i++)
451 /* Allow interrupts, in case N is huge */
452 CHECK_FOR_INTERRUPTS();
454 buf = _hash_getnewbuf(rel, BUCKET_TO_BLKNO(metap, i), forkNum);
455 pg = BufferGetPage(buf);
456 pageopaque = (HashPageOpaque) PageGetSpecialPointer(pg);
457 pageopaque->hasho_prevblkno = InvalidBlockNumber;
458 pageopaque->hasho_nextblkno = InvalidBlockNumber;
459 pageopaque->hasho_bucket = i;
460 pageopaque->hasho_flag = LH_BUCKET_PAGE;
461 pageopaque->hasho_page_id = HASHO_PAGE_ID;
462 _hash_wrtbuf(rel, buf);
465 /* Now reacquire buffer lock on metapage */
466 _hash_chgbufaccess(rel, metabuf, HASH_NOLOCK, HASH_WRITE);
469 * Initialize first bitmap page
471 _hash_initbitmap(rel, metap, num_buckets + 1, forkNum);
474 _hash_wrtbuf(rel, metabuf);
480 * _hash_pageinit() -- Initialize a new hash index page.
483 _hash_pageinit(Page page, Size size)
485 Assert(PageIsNew(page));
486 PageInit(page, size, sizeof(HashPageOpaqueData));
490 * Attempt to expand the hash table by creating one new bucket.
492 * This will silently do nothing if it cannot get the needed locks.
494 * The caller should hold no locks on the hash index.
496 * The caller must hold a pin, but no lock, on the metapage buffer.
497 * The buffer is returned in the same state.
500 _hash_expandtable(Relation rel, Buffer metabuf)
506 BlockNumber start_oblkno;
507 BlockNumber start_nblkno;
514 * Write-lock the meta page. It used to be necessary to acquire a
515 * heavyweight lock to begin a split, but that is no longer required.
517 _hash_chgbufaccess(rel, metabuf, HASH_NOLOCK, HASH_WRITE);
519 _hash_checkpage(rel, metabuf, LH_META_PAGE);
520 metap = HashPageGetMeta(BufferGetPage(metabuf));
523 * Check to see if split is still needed; someone else might have already
524 * done one while we waited for the lock.
526 * Make sure this stays in sync with _hash_doinsert()
528 if (metap->hashm_ntuples <=
529 (double) metap->hashm_ffactor * (metap->hashm_maxbucket + 1))
533 * Can't split anymore if maxbucket has reached its maximum possible
536 * Ideally we'd allow bucket numbers up to UINT_MAX-1 (no higher because
537 * the calculation maxbucket+1 mustn't overflow). Currently we restrict
538 * to half that because of overflow looping in _hash_log2() and
539 * insufficient space in hashm_spares[]. It's moot anyway because an
540 * index with 2^32 buckets would certainly overflow BlockNumber and hence
541 * _hash_alloc_buckets() would fail, but if we supported buckets smaller
542 * than a disk block then this would be an independent constraint.
544 * If you change this, see also the maximum initial number of buckets in
547 if (metap->hashm_maxbucket >= (uint32) 0x7FFFFFFE)
551 * Determine which bucket is to be split, and attempt to lock the old
552 * bucket. If we can't get the lock, give up.
554 * The lock protects us against other backends, but not against our own
555 * backend. Must check for active scans separately.
557 new_bucket = metap->hashm_maxbucket + 1;
559 old_bucket = (new_bucket & metap->hashm_lowmask);
561 start_oblkno = BUCKET_TO_BLKNO(metap, old_bucket);
563 if (_hash_has_active_scan(rel, old_bucket))
566 if (!_hash_try_getlock(rel, start_oblkno, HASH_EXCLUSIVE))
570 * Likewise lock the new bucket (should never fail).
572 * Note: it is safe to compute the new bucket's blkno here, even though we
573 * may still need to update the BUCKET_TO_BLKNO mapping. This is because
574 * the current value of hashm_spares[hashm_ovflpoint] correctly shows
575 * where we are going to put a new splitpoint's worth of buckets.
577 start_nblkno = BUCKET_TO_BLKNO(metap, new_bucket);
579 if (_hash_has_active_scan(rel, new_bucket))
580 elog(ERROR, "scan in progress on supposedly new bucket");
582 if (!_hash_try_getlock(rel, start_nblkno, HASH_EXCLUSIVE))
583 elog(ERROR, "could not get lock on supposedly new bucket");
586 * If the split point is increasing (hashm_maxbucket's log base 2
587 * increases), we need to allocate a new batch of bucket pages.
589 spare_ndx = _hash_log2(new_bucket + 1);
590 if (spare_ndx > metap->hashm_ovflpoint)
592 Assert(spare_ndx == metap->hashm_ovflpoint + 1);
595 * The number of buckets in the new splitpoint is equal to the total
596 * number already in existence, i.e. new_bucket. Currently this maps
597 * one-to-one to blocks required, but someday we may need a more
598 * complicated calculation here.
600 if (!_hash_alloc_buckets(rel, start_nblkno, new_bucket))
602 /* can't split due to BlockNumber overflow */
603 _hash_droplock(rel, start_oblkno, HASH_EXCLUSIVE);
604 _hash_droplock(rel, start_nblkno, HASH_EXCLUSIVE);
610 * Physically allocate the new bucket's primary page. We want to do this
611 * before changing the metapage's mapping info, in case we can't get the
614 buf_nblkno = _hash_getnewbuf(rel, start_nblkno, MAIN_FORKNUM);
617 * Okay to proceed with split. Update the metapage bucket mapping info.
619 * Since we are scribbling on the metapage data right in the shared
620 * buffer, any failure in this next little bit leaves us with a big
621 * problem: the metapage is effectively corrupt but could get written back
622 * to disk. We don't really expect any failure, but just to be sure,
623 * establish a critical section.
625 START_CRIT_SECTION();
627 metap->hashm_maxbucket = new_bucket;
629 if (new_bucket > metap->hashm_highmask)
631 /* Starting a new doubling */
632 metap->hashm_lowmask = metap->hashm_highmask;
633 metap->hashm_highmask = new_bucket | metap->hashm_lowmask;
637 * If the split point is increasing (hashm_maxbucket's log base 2
638 * increases), we need to adjust the hashm_spares[] array and
639 * hashm_ovflpoint so that future overflow pages will be created beyond
640 * this new batch of bucket pages.
642 if (spare_ndx > metap->hashm_ovflpoint)
644 metap->hashm_spares[spare_ndx] = metap->hashm_spares[metap->hashm_ovflpoint];
645 metap->hashm_ovflpoint = spare_ndx;
648 /* Done mucking with metapage */
652 * Copy bucket mapping info now; this saves re-accessing the meta page
653 * inside _hash_splitbucket's inner loop. Note that once we drop the
654 * split lock, other splits could begin, so these values might be out of
655 * date before _hash_splitbucket finishes. That's okay, since all it
656 * needs is to tell which of these two buckets to map hashkeys into.
658 maxbucket = metap->hashm_maxbucket;
659 highmask = metap->hashm_highmask;
660 lowmask = metap->hashm_lowmask;
662 /* Write out the metapage and drop lock, but keep pin */
663 _hash_chgbufaccess(rel, metabuf, HASH_WRITE, HASH_NOLOCK);
665 /* Relocate records to the new bucket */
666 _hash_splitbucket(rel, metabuf,
667 old_bucket, new_bucket,
668 start_oblkno, buf_nblkno,
669 maxbucket, highmask, lowmask);
671 /* Release bucket locks, allowing others to access them */
672 _hash_droplock(rel, start_oblkno, HASH_EXCLUSIVE);
673 _hash_droplock(rel, start_nblkno, HASH_EXCLUSIVE);
677 /* Here if decide not to split or fail to acquire old bucket lock */
680 /* We didn't write the metapage, so just drop lock */
681 _hash_chgbufaccess(rel, metabuf, HASH_READ, HASH_NOLOCK);
686 * _hash_alloc_buckets -- allocate a new splitpoint's worth of bucket pages
688 * This does not need to initialize the new bucket pages; we'll do that as
689 * each one is used by _hash_expandtable(). But we have to extend the logical
690 * EOF to the end of the splitpoint; this keeps smgr's idea of the EOF in
691 * sync with ours, so that we don't get complaints from smgr.
693 * We do this by writing a page of zeroes at the end of the splitpoint range.
694 * We expect that the filesystem will ensure that the intervening pages read
695 * as zeroes too. On many filesystems this "hole" will not be allocated
696 * immediately, which means that the index file may end up more fragmented
697 * than if we forced it all to be allocated now; but since we don't scan
698 * hash indexes sequentially anyway, that probably doesn't matter.
700 * XXX It's annoying that this code is executed with the metapage lock held.
701 * We need to interlock against _hash_getovflpage() adding a new overflow page
702 * concurrently, but it'd likely be better to use LockRelationForExtension
703 * for the purpose. OTOH, adding a splitpoint is a very infrequent operation,
704 * so it may not be worth worrying about.
706 * Returns TRUE if successful, or FALSE if allocation failed due to
707 * BlockNumber overflow.
710 _hash_alloc_buckets(Relation rel, BlockNumber firstblock, uint32 nblocks)
712 BlockNumber lastblock;
713 char zerobuf[BLCKSZ];
715 lastblock = firstblock + nblocks - 1;
718 * Check for overflow in block number calculation; if so, we cannot extend
721 if (lastblock < firstblock || lastblock == InvalidBlockNumber)
724 MemSet(zerobuf, 0, sizeof(zerobuf));
726 RelationOpenSmgr(rel);
727 smgrextend(rel->rd_smgr, MAIN_FORKNUM, lastblock, zerobuf, false);
734 * _hash_splitbucket -- split 'obucket' into 'obucket' and 'nbucket'
736 * We are splitting a bucket that consists of a base bucket page and zero
737 * or more overflow (bucket chain) pages. We must relocate tuples that
738 * belong in the new bucket, and compress out any free space in the old
741 * The caller must hold exclusive locks on both buckets to ensure that
742 * no one else is trying to access them (see README).
744 * The caller must hold a pin, but no lock, on the metapage buffer.
745 * The buffer is returned in the same state. (The metapage is only
746 * touched if it becomes necessary to add or remove overflow pages.)
748 * In addition, the caller must have created the new bucket's base page,
749 * which is passed in buffer nbuf, pinned and write-locked. That lock and
750 * pin are released here. (The API is set up this way because we must do
751 * _hash_getnewbuf() before releasing the metapage write lock. So instead of
752 * passing the new bucket's start block number, we pass an actual buffer.)
755 _hash_splitbucket(Relation rel,
759 BlockNumber start_oblkno,
768 HashPageOpaque oopaque;
769 HashPageOpaque nopaque;
772 * It should be okay to simultaneously write-lock pages from each bucket,
773 * since no one else can be trying to acquire buffer lock on pages of
776 obuf = _hash_getbuf(rel, start_oblkno, HASH_WRITE, LH_BUCKET_PAGE);
777 opage = BufferGetPage(obuf);
778 oopaque = (HashPageOpaque) PageGetSpecialPointer(opage);
780 npage = BufferGetPage(nbuf);
782 /* initialize the new bucket's primary page */
783 nopaque = (HashPageOpaque) PageGetSpecialPointer(npage);
784 nopaque->hasho_prevblkno = InvalidBlockNumber;
785 nopaque->hasho_nextblkno = InvalidBlockNumber;
786 nopaque->hasho_bucket = nbucket;
787 nopaque->hasho_flag = LH_BUCKET_PAGE;
788 nopaque->hasho_page_id = HASHO_PAGE_ID;
791 * Partition the tuples in the old bucket between the old bucket and the
792 * new bucket, advancing along the old bucket's overflow bucket chain and
793 * adding overflow pages to the new bucket as needed. Outer loop iterates
794 * once per page in old bucket.
799 OffsetNumber ooffnum;
800 OffsetNumber omaxoffnum;
801 OffsetNumber deletable[MaxOffsetNumber];
804 /* Scan each tuple in old page */
805 omaxoffnum = PageGetMaxOffsetNumber(opage);
806 for (ooffnum = FirstOffsetNumber;
807 ooffnum <= omaxoffnum;
808 ooffnum = OffsetNumberNext(ooffnum))
814 /* skip dead tuples */
815 if (ItemIdIsDead(PageGetItemId(opage, ooffnum)))
819 * Fetch the item's hash key (conveniently stored in the item) and
820 * determine which bucket it now belongs in.
822 itup = (IndexTuple) PageGetItem(opage,
823 PageGetItemId(opage, ooffnum));
824 bucket = _hash_hashkey2bucket(_hash_get_indextuple_hashkey(itup),
825 maxbucket, highmask, lowmask);
827 if (bucket == nbucket)
830 * insert the tuple into the new bucket. if it doesn't fit on
831 * the current page in the new bucket, we must allocate a new
832 * overflow page and place the tuple on that page instead.
834 * XXX we have a problem here if we fail to get space for a
835 * new overflow page: we'll error out leaving the bucket split
836 * only partially complete, meaning the index is corrupt,
837 * since searches may fail to find entries they should find.
839 itemsz = IndexTupleDSize(*itup);
840 itemsz = MAXALIGN(itemsz);
842 if (PageGetFreeSpace(npage) < itemsz)
844 /* write out nbuf and drop lock, but keep pin */
845 _hash_chgbufaccess(rel, nbuf, HASH_WRITE, HASH_NOLOCK);
846 /* chain to a new overflow page */
847 nbuf = _hash_addovflpage(rel, metabuf, nbuf);
848 npage = BufferGetPage(nbuf);
849 /* we don't need nopaque within the loop */
853 * Insert tuple on new page, using _hash_pgaddtup to ensure
854 * correct ordering by hashkey. This is a tad inefficient
855 * since we may have to shuffle itempointers repeatedly.
856 * Possible future improvement: accumulate all the items for
857 * the new page and qsort them before insertion.
859 (void) _hash_pgaddtup(rel, nbuf, itemsz, itup);
862 * Mark tuple for deletion from old page.
864 deletable[ndeletable++] = ooffnum;
869 * the tuple stays on this page, so nothing to do.
871 Assert(bucket == obucket);
875 oblkno = oopaque->hasho_nextblkno;
878 * Done scanning this old page. If we moved any tuples, delete them
883 PageIndexMultiDelete(opage, deletable, ndeletable);
884 _hash_wrtbuf(rel, obuf);
887 _hash_relbuf(rel, obuf);
889 /* Exit loop if no more overflow pages in old bucket */
890 if (!BlockNumberIsValid(oblkno))
893 /* Else, advance to next old page */
894 obuf = _hash_getbuf(rel, oblkno, HASH_WRITE, LH_OVERFLOW_PAGE);
895 opage = BufferGetPage(obuf);
896 oopaque = (HashPageOpaque) PageGetSpecialPointer(opage);
900 * We're at the end of the old bucket chain, so we're done partitioning
901 * the tuples. Before quitting, call _hash_squeezebucket to ensure the
902 * tuples remaining in the old bucket (including the overflow pages) are
903 * packed as tightly as possible. The new bucket is already tight.
905 _hash_wrtbuf(rel, nbuf);
907 _hash_squeezebucket(rel, obucket, start_oblkno, NULL);