1 /*-------------------------------------------------------------------------
4 * Hash table page management code for the Postgres hash access method
6 * Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
7 * Portions Copyright (c) 1994, Regents of the University of California
11 * $PostgreSQL: pgsql/src/backend/access/hash/hashpage.c,v 1.53 2005/11/06 19:29:00 tgl Exp $
14 * Postgres hash pages look like ordinary relation pages. The opaque
15 * data at high addresses includes information about the page including
16 * whether a page is an overflow page or a true bucket, the bucket
17 * number, and the block numbers of the preceding and following pages
20 * The first page in a hash relation, page zero, is special -- it stores
21 * information describing the hash table; it is referred to as the
22 * "meta page." Pages one and higher store the actual data.
24 * There are also bitmap pages, which are not manipulated here;
27 *-------------------------------------------------------------------------
31 #include "access/genam.h"
32 #include "access/hash.h"
33 #include "miscadmin.h"
34 #include "storage/lmgr.h"
35 #include "utils/lsyscache.h"
38 static void _hash_splitbucket(Relation rel, Buffer metabuf,
39 Bucket obucket, Bucket nbucket,
40 BlockNumber start_oblkno,
41 BlockNumber start_nblkno,
43 uint32 highmask, uint32 lowmask);
47 * We use high-concurrency locking on hash indexes (see README for an overview
48 * of the locking rules). However, we can skip taking lmgr locks when the
49 * index is local to the current backend (ie, either temp or new in the
50 * current transaction). No one else can see it, so there's no reason to
51 * take locks. We still take buffer-level locks, but not lmgr locks.
53 #define USELOCKING(rel) (!RELATION_IS_LOCAL(rel))
57 * _hash_getlock() -- Acquire an lmgr lock.
59 * 'whichlock' should be zero to acquire the split-control lock, or the
60 * block number of a bucket's primary bucket page to acquire the per-bucket
61 * lock. (See README for details of the use of these locks.)
63 * 'access' must be HASH_SHARE or HASH_EXCLUSIVE.
66 _hash_getlock(Relation rel, BlockNumber whichlock, int access)
69 LockPage(rel, whichlock, access);
73 * _hash_try_getlock() -- Acquire an lmgr lock, but only if it's free.
75 * Same as above except we return FALSE without blocking if lock isn't free.
78 _hash_try_getlock(Relation rel, BlockNumber whichlock, int access)
81 return ConditionalLockPage(rel, whichlock, access);
87 * _hash_droplock() -- Release an lmgr lock.
90 _hash_droplock(Relation rel, BlockNumber whichlock, int access)
93 UnlockPage(rel, whichlock, access);
97 * _hash_getbuf() -- Get a buffer by block number for read or write.
99 * 'access' must be HASH_READ, HASH_WRITE, or HASH_NOLOCK.
101 * When this routine returns, the appropriate lock is set on the
102 * requested buffer and its reference count has been incremented
103 * (ie, the buffer is "locked and pinned").
105 * XXX P_NEW is not used because, unlike the tree structures, we
106 * need the bucket blocks to be at certain block numbers.
108 * All call sites should call either _hash_pageinit or _hash_checkpage
109 * on the returned page, depending on whether the block is expected
113 _hash_getbuf(Relation rel, BlockNumber blkno, int access)
118 elog(ERROR, "hash AM does not use P_NEW");
120 buf = ReadBuffer(rel, blkno);
122 if (access != HASH_NOLOCK)
123 LockBuffer(buf, access);
125 /* ref count and lock type are correct */
130 * _hash_relbuf() -- release a locked buffer.
132 * Lock and pin (refcount) are both dropped. Note that either read or
133 * write lock can be dropped this way, but if we modified the buffer,
134 * this is NOT the right way to release a write lock.
137 _hash_relbuf(Relation rel, Buffer buf)
139 LockBuffer(buf, BUFFER_LOCK_UNLOCK);
144 * _hash_dropbuf() -- release an unlocked buffer.
146 * This is used to unpin a buffer on which we hold no lock. It is assumed
147 * that the buffer is not dirty.
150 _hash_dropbuf(Relation rel, Buffer buf)
156 * _hash_wrtbuf() -- write a hash page to disk.
158 * This routine releases the lock held on the buffer and our refcount
159 * for it. It is an error to call _hash_wrtbuf() without a write lock
160 * and a pin on the buffer.
162 * NOTE: actually, the buffer manager just marks the shared buffer page
163 * dirty here; the real I/O happens later. This is okay since we are not
164 * relying on write ordering anyway. The WAL mechanism is responsible for
165 * guaranteeing correctness after a crash.
168 _hash_wrtbuf(Relation rel, Buffer buf)
170 LockBuffer(buf, BUFFER_LOCK_UNLOCK);
175 * _hash_wrtnorelbuf() -- write a hash page to disk, but do not release
176 * our reference or lock.
178 * It is an error to call _hash_wrtnorelbuf() without a write lock
179 * and a pin on the buffer.
184 _hash_wrtnorelbuf(Relation rel, Buffer buf)
186 WriteNoReleaseBuffer(buf);
190 * _hash_chgbufaccess() -- Change the lock type on a buffer, without
191 * dropping our pin on it.
193 * from_access and to_access may be HASH_READ, HASH_WRITE, or HASH_NOLOCK,
194 * the last indicating that no buffer-level lock is held or wanted.
196 * When from_access == HASH_WRITE, we assume the buffer is dirty and tell
197 * bufmgr it must be written out. If the caller wants to release a write
198 * lock on a page that's not been modified, it's okay to pass from_access
199 * as HASH_READ (a bit ugly, but handy in some places).
202 _hash_chgbufaccess(Relation rel,
207 if (from_access != HASH_NOLOCK)
208 LockBuffer(buf, BUFFER_LOCK_UNLOCK);
209 if (from_access == HASH_WRITE)
210 WriteNoReleaseBuffer(buf);
212 if (to_access != HASH_NOLOCK)
213 LockBuffer(buf, to_access);
218 * _hash_metapinit() -- Initialize the metadata page of a hash index,
219 * the two buckets that we begin with and the initial
222 * We are fairly cavalier about locking here, since we know that no one else
223 * could be accessing this index. In particular the rule about not holding
224 * multiple buffer locks is ignored.
227 _hash_metapinit(Relation rel)
230 HashPageOpaque pageopaque;
240 if (RelationGetNumberOfBlocks(rel) != 0)
241 elog(ERROR, "cannot initialize non-empty hash index \"%s\"",
242 RelationGetRelationName(rel));
245 * Determine the target fill factor (tuples per bucket) for this index.
246 * The idea is to make the fill factor correspond to pages about 3/4ths
247 * full. We can compute it exactly if the index datatype is fixed-width,
248 * but for var-width there's some guessing involved.
250 data_width = get_typavgwidth(RelationGetDescr(rel)->attrs[0]->atttypid,
251 RelationGetDescr(rel)->attrs[0]->atttypmod);
252 item_width = MAXALIGN(sizeof(HashItemData)) + MAXALIGN(data_width) +
253 sizeof(ItemIdData); /* include the line pointer */
254 ffactor = (BLCKSZ * 3 / 4) / item_width;
255 /* keep to a sane range */
259 metabuf = _hash_getbuf(rel, HASH_METAPAGE, HASH_WRITE);
260 pg = BufferGetPage(metabuf);
261 _hash_pageinit(pg, BufferGetPageSize(metabuf));
263 pageopaque = (HashPageOpaque) PageGetSpecialPointer(pg);
264 pageopaque->hasho_prevblkno = InvalidBlockNumber;
265 pageopaque->hasho_nextblkno = InvalidBlockNumber;
266 pageopaque->hasho_bucket = -1;
267 pageopaque->hasho_flag = LH_META_PAGE;
268 pageopaque->hasho_filler = HASHO_FILL;
270 metap = (HashMetaPage) pg;
272 metap->hashm_magic = HASH_MAGIC;
273 metap->hashm_version = HASH_VERSION;
274 metap->hashm_ntuples = 0;
275 metap->hashm_nmaps = 0;
276 metap->hashm_ffactor = ffactor;
277 metap->hashm_bsize = BufferGetPageSize(metabuf);
278 /* find largest bitmap array size that will fit in page size */
279 for (i = _hash_log2(metap->hashm_bsize); i > 0; --i)
281 if ((1 << i) <= (metap->hashm_bsize -
282 (MAXALIGN(sizeof(PageHeaderData)) +
283 MAXALIGN(sizeof(HashPageOpaqueData)))))
287 metap->hashm_bmsize = 1 << i;
288 metap->hashm_bmshift = i + BYTE_TO_BIT;
289 Assert((1 << BMPG_SHIFT(metap)) == (BMPG_MASK(metap) + 1));
291 metap->hashm_procid = index_getprocid(rel, 1, HASHPROC);
294 * We initialize the index with two buckets, 0 and 1, occupying physical
295 * blocks 1 and 2. The first freespace bitmap page is in block 3.
297 metap->hashm_maxbucket = metap->hashm_lowmask = 1; /* nbuckets - 1 */
298 metap->hashm_highmask = 3; /* (nbuckets << 1) - 1 */
300 MemSet(metap->hashm_spares, 0, sizeof(metap->hashm_spares));
301 MemSet(metap->hashm_mapp, 0, sizeof(metap->hashm_mapp));
303 metap->hashm_spares[1] = 1; /* the first bitmap page is only spare */
304 metap->hashm_ovflpoint = 1;
305 metap->hashm_firstfree = 0;
308 * Initialize the first two buckets
310 for (i = 0; i <= 1; i++)
312 buf = _hash_getbuf(rel, BUCKET_TO_BLKNO(metap, i), HASH_WRITE);
313 pg = BufferGetPage(buf);
314 _hash_pageinit(pg, BufferGetPageSize(buf));
315 pageopaque = (HashPageOpaque) PageGetSpecialPointer(pg);
316 pageopaque->hasho_prevblkno = InvalidBlockNumber;
317 pageopaque->hasho_nextblkno = InvalidBlockNumber;
318 pageopaque->hasho_bucket = i;
319 pageopaque->hasho_flag = LH_BUCKET_PAGE;
320 pageopaque->hasho_filler = HASHO_FILL;
321 _hash_wrtbuf(rel, buf);
325 * Initialize first bitmap page. Can't do this until we create the first
326 * two buckets, else smgr will complain.
328 _hash_initbitmap(rel, metap, 3);
331 _hash_wrtbuf(rel, metabuf);
335 * _hash_pageinit() -- Initialize a new hash index page.
338 _hash_pageinit(Page page, Size size)
340 Assert(PageIsNew(page));
341 PageInit(page, size, sizeof(HashPageOpaqueData));
345 * Attempt to expand the hash table by creating one new bucket.
347 * This will silently do nothing if it cannot get the needed locks.
349 * The caller should hold no locks on the hash index.
351 * The caller must hold a pin, but no lock, on the metapage buffer.
352 * The buffer is returned in the same state.
355 _hash_expandtable(Relation rel, Buffer metabuf)
361 BlockNumber start_oblkno;
362 BlockNumber start_nblkno;
368 * Obtain the page-zero lock to assert the right to begin a split (see
371 * Note: deadlock should be impossible here. Our own backend could only be
372 * holding bucket sharelocks due to stopped indexscans; those will not
373 * block other holders of the page-zero lock, who are only interested in
374 * acquiring bucket sharelocks themselves. Exclusive bucket locks are
375 * only taken here and in hashbulkdelete, and neither of these operations
376 * needs any additional locks to complete. (If, due to some flaw in this
377 * reasoning, we manage to deadlock anyway, it's okay to error out; the
378 * index will be left in a consistent state.)
380 _hash_getlock(rel, 0, HASH_EXCLUSIVE);
382 /* Write-lock the meta page */
383 _hash_chgbufaccess(rel, metabuf, HASH_NOLOCK, HASH_WRITE);
385 _hash_checkpage(rel, metabuf, LH_META_PAGE);
386 metap = (HashMetaPage) BufferGetPage(metabuf);
389 * Check to see if split is still needed; someone else might have already
390 * done one while we waited for the lock.
392 * Make sure this stays in sync with _hash_doinsert()
394 if (metap->hashm_ntuples <=
395 (double) metap->hashm_ffactor * (metap->hashm_maxbucket + 1))
399 * Determine which bucket is to be split, and attempt to lock the old
400 * bucket. If we can't get the lock, give up.
402 * The lock protects us against other backends, but not against our own
403 * backend. Must check for active scans separately.
405 * Ideally we would lock the new bucket too before proceeding, but if we are
406 * about to cross a splitpoint then the BUCKET_TO_BLKNO mapping isn't
407 * correct yet. For simplicity we update the metapage first and then
408 * lock. This should be okay because no one else should be trying to lock
409 * the new bucket yet...
411 new_bucket = metap->hashm_maxbucket + 1;
412 old_bucket = (new_bucket & metap->hashm_lowmask);
414 start_oblkno = BUCKET_TO_BLKNO(metap, old_bucket);
416 if (_hash_has_active_scan(rel, old_bucket))
419 if (!_hash_try_getlock(rel, start_oblkno, HASH_EXCLUSIVE))
423 * Okay to proceed with split. Update the metapage bucket mapping info.
425 * Since we are scribbling on the metapage data right in the shared buffer,
426 * any failure in this next little bit leaves us with a big problem: the
427 * metapage is effectively corrupt but could get written back to disk. We
428 * don't really expect any failure, but just to be sure, establish a
431 START_CRIT_SECTION();
433 metap->hashm_maxbucket = new_bucket;
435 if (new_bucket > metap->hashm_highmask)
437 /* Starting a new doubling */
438 metap->hashm_lowmask = metap->hashm_highmask;
439 metap->hashm_highmask = new_bucket | metap->hashm_lowmask;
443 * If the split point is increasing (hashm_maxbucket's log base 2
444 * increases), we need to adjust the hashm_spares[] array and
445 * hashm_ovflpoint so that future overflow pages will be created beyond
446 * this new batch of bucket pages.
448 * XXX should initialize new bucket pages to prevent out-of-order page
449 * creation? Don't wanna do it right here though.
451 spare_ndx = _hash_log2(metap->hashm_maxbucket + 1);
452 if (spare_ndx > metap->hashm_ovflpoint)
454 Assert(spare_ndx == metap->hashm_ovflpoint + 1);
455 metap->hashm_spares[spare_ndx] = metap->hashm_spares[metap->hashm_ovflpoint];
456 metap->hashm_ovflpoint = spare_ndx;
459 /* now we can compute the new bucket's primary block number */
460 start_nblkno = BUCKET_TO_BLKNO(metap, new_bucket);
462 Assert(!_hash_has_active_scan(rel, new_bucket));
464 if (!_hash_try_getlock(rel, start_nblkno, HASH_EXCLUSIVE))
465 elog(PANIC, "could not get lock on supposedly new bucket");
467 /* Done mucking with metapage */
471 * Copy bucket mapping info now; this saves re-accessing the meta page
472 * inside _hash_splitbucket's inner loop. Note that once we drop the
473 * split lock, other splits could begin, so these values might be out of
474 * date before _hash_splitbucket finishes. That's okay, since all it
475 * needs is to tell which of these two buckets to map hashkeys into.
477 maxbucket = metap->hashm_maxbucket;
478 highmask = metap->hashm_highmask;
479 lowmask = metap->hashm_lowmask;
481 /* Write out the metapage and drop lock, but keep pin */
482 _hash_chgbufaccess(rel, metabuf, HASH_WRITE, HASH_NOLOCK);
484 /* Release split lock; okay for other splits to occur now */
485 _hash_droplock(rel, 0, HASH_EXCLUSIVE);
487 /* Relocate records to the new bucket */
488 _hash_splitbucket(rel, metabuf, old_bucket, new_bucket,
489 start_oblkno, start_nblkno,
490 maxbucket, highmask, lowmask);
492 /* Release bucket locks, allowing others to access them */
493 _hash_droplock(rel, start_oblkno, HASH_EXCLUSIVE);
494 _hash_droplock(rel, start_nblkno, HASH_EXCLUSIVE);
498 /* Here if decide not to split or fail to acquire old bucket lock */
501 /* We didn't write the metapage, so just drop lock */
502 _hash_chgbufaccess(rel, metabuf, HASH_READ, HASH_NOLOCK);
504 /* Release split lock */
505 _hash_droplock(rel, 0, HASH_EXCLUSIVE);
510 * _hash_splitbucket -- split 'obucket' into 'obucket' and 'nbucket'
512 * We are splitting a bucket that consists of a base bucket page and zero
513 * or more overflow (bucket chain) pages. We must relocate tuples that
514 * belong in the new bucket, and compress out any free space in the old
517 * The caller must hold exclusive locks on both buckets to ensure that
518 * no one else is trying to access them (see README).
520 * The caller must hold a pin, but no lock, on the metapage buffer.
521 * The buffer is returned in the same state. (The metapage is only
522 * touched if it becomes necessary to add or remove overflow pages.)
525 _hash_splitbucket(Relation rel,
529 BlockNumber start_oblkno,
530 BlockNumber start_nblkno,
543 HashPageOpaque oopaque;
544 HashPageOpaque nopaque;
547 OffsetNumber ooffnum;
548 OffsetNumber noffnum;
549 OffsetNumber omaxoffnum;
552 TupleDesc itupdesc = RelationGetDescr(rel);
555 * It should be okay to simultaneously write-lock pages from each bucket,
556 * since no one else can be trying to acquire buffer lock on pages of
559 oblkno = start_oblkno;
560 obuf = _hash_getbuf(rel, oblkno, HASH_WRITE);
561 _hash_checkpage(rel, obuf, LH_BUCKET_PAGE);
562 opage = BufferGetPage(obuf);
563 oopaque = (HashPageOpaque) PageGetSpecialPointer(opage);
565 nblkno = start_nblkno;
566 nbuf = _hash_getbuf(rel, nblkno, HASH_WRITE);
567 npage = BufferGetPage(nbuf);
569 /* initialize the new bucket's primary page */
570 _hash_pageinit(npage, BufferGetPageSize(nbuf));
571 nopaque = (HashPageOpaque) PageGetSpecialPointer(npage);
572 nopaque->hasho_prevblkno = InvalidBlockNumber;
573 nopaque->hasho_nextblkno = InvalidBlockNumber;
574 nopaque->hasho_bucket = nbucket;
575 nopaque->hasho_flag = LH_BUCKET_PAGE;
576 nopaque->hasho_filler = HASHO_FILL;
579 * Partition the tuples in the old bucket between the old bucket and the
580 * new bucket, advancing along the old bucket's overflow bucket chain and
581 * adding overflow pages to the new bucket as needed.
583 ooffnum = FirstOffsetNumber;
584 omaxoffnum = PageGetMaxOffsetNumber(opage);
588 * at each iteration through this loop, each of these variables should
589 * be up-to-date: obuf opage oopaque ooffnum omaxoffnum
592 /* check if we're at the end of the page */
593 if (ooffnum > omaxoffnum)
595 /* at end of page, but check for an(other) overflow page */
596 oblkno = oopaque->hasho_nextblkno;
597 if (!BlockNumberIsValid(oblkno))
601 * we ran out of tuples on this particular page, but we have more
602 * overflow pages; advance to next page.
604 _hash_wrtbuf(rel, obuf);
606 obuf = _hash_getbuf(rel, oblkno, HASH_WRITE);
607 _hash_checkpage(rel, obuf, LH_OVERFLOW_PAGE);
608 opage = BufferGetPage(obuf);
609 oopaque = (HashPageOpaque) PageGetSpecialPointer(opage);
610 ooffnum = FirstOffsetNumber;
611 omaxoffnum = PageGetMaxOffsetNumber(opage);
616 * Re-hash the tuple to determine which bucket it now belongs in.
618 * It is annoying to call the hash function while holding locks, but
619 * releasing and relocking the page for each tuple is unappealing too.
621 hitem = (HashItem) PageGetItem(opage, PageGetItemId(opage, ooffnum));
622 itup = &(hitem->hash_itup);
623 datum = index_getattr(itup, 1, itupdesc, &null);
626 bucket = _hash_hashkey2bucket(_hash_datum2hashkey(rel, datum),
627 maxbucket, highmask, lowmask);
629 if (bucket == nbucket)
632 * insert the tuple into the new bucket. if it doesn't fit on the
633 * current page in the new bucket, we must allocate a new overflow
634 * page and place the tuple on that page instead.
636 itemsz = IndexTupleDSize(hitem->hash_itup)
637 + (sizeof(HashItemData) - sizeof(IndexTupleData));
639 itemsz = MAXALIGN(itemsz);
641 if (PageGetFreeSpace(npage) < itemsz)
643 /* write out nbuf and drop lock, but keep pin */
644 _hash_chgbufaccess(rel, nbuf, HASH_WRITE, HASH_NOLOCK);
645 /* chain to a new overflow page */
646 nbuf = _hash_addovflpage(rel, metabuf, nbuf);
647 _hash_checkpage(rel, nbuf, LH_OVERFLOW_PAGE);
648 npage = BufferGetPage(nbuf);
649 /* we don't need nopaque within the loop */
652 noffnum = OffsetNumberNext(PageGetMaxOffsetNumber(npage));
653 if (PageAddItem(npage, (Item) hitem, itemsz, noffnum, LP_USED)
654 == InvalidOffsetNumber)
655 elog(ERROR, "failed to add index item to \"%s\"",
656 RelationGetRelationName(rel));
659 * now delete the tuple from the old bucket. after this section
660 * of code, 'ooffnum' will actually point to the ItemId to which
661 * we would point if we had advanced it before the deletion
662 * (PageIndexTupleDelete repacks the ItemId array). this also
663 * means that 'omaxoffnum' is exactly one less than it used to be,
664 * so we really can just decrement it instead of calling
665 * PageGetMaxOffsetNumber.
667 PageIndexTupleDelete(opage, ooffnum);
668 omaxoffnum = OffsetNumberPrev(omaxoffnum);
673 * the tuple stays on this page. we didn't move anything, so we
674 * didn't delete anything and therefore we don't have to change
677 Assert(bucket == obucket);
678 ooffnum = OffsetNumberNext(ooffnum);
683 * We're at the end of the old bucket chain, so we're done partitioning
684 * the tuples. Before quitting, call _hash_squeezebucket to ensure the
685 * tuples remaining in the old bucket (including the overflow pages) are
686 * packed as tightly as possible. The new bucket is already tight.
688 _hash_wrtbuf(rel, obuf);
689 _hash_wrtbuf(rel, nbuf);
691 _hash_squeezebucket(rel, obucket, start_oblkno);