* hashpage.c
* Hash table page management code for the Postgres hash access method
*
- * Portions Copyright (c) 1996-2008, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1996-2014, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/hash/hashpage.c,v 1.78 2008/10/31 15:04:59 heikki Exp $
+ * src/backend/access/hash/hashpage.c
*
* NOTES
* Postgres hash pages look like ordinary relation pages. The opaque
*/
#include "postgres.h"
-#include "access/genam.h"
#include "access/hash.h"
#include "miscadmin.h"
-#include "storage/bufmgr.h"
#include "storage/lmgr.h"
#include "storage/smgr.h"
-#include "utils/lsyscache.h"
static bool _hash_alloc_buckets(Relation rel, BlockNumber firstblock,
* of the locking rules). However, we can skip taking lmgr locks when the
* index is local to the current backend (ie, either temp or new in the
* current transaction). No one else can see it, so there's no reason to
- * take locks. We still take buffer-level locks, but not lmgr locks.
+ * take locks. We still take buffer-level locks, but not lmgr locks.
*/
#define USELOCKING(rel) (!RELATION_IS_LOCAL(rel))
/*
* _hash_getlock() -- Acquire an lmgr lock.
*
- * 'whichlock' should be zero to acquire the split-control lock, or the
- * block number of a bucket's primary bucket page to acquire the per-bucket
- * lock. (See README for details of the use of these locks.)
+ * 'whichlock' should the block number of a bucket's primary bucket page to
+ * acquire the per-bucket lock. (See README for details of the use of these
+ * locks.)
*
* 'access' must be HASH_SHARE or HASH_EXCLUSIVE.
*/
*
* This must be used only to fetch pages that are known to be before
* the index's filesystem EOF, but are to be filled from scratch.
- * _hash_pageinit() is applied automatically. Otherwise it has
+ * _hash_pageinit() is applied automatically. Otherwise it has
* effects similar to _hash_getbuf() with access = HASH_WRITE.
*
* When this routine returns, a write lock is set on the
* extend the index at a time.
*/
Buffer
-_hash_getnewbuf(Relation rel, BlockNumber blkno)
+_hash_getnewbuf(Relation rel, BlockNumber blkno, ForkNumber forkNum)
{
- BlockNumber nblocks = RelationGetNumberOfBlocks(rel);
+ BlockNumber nblocks = RelationGetNumberOfBlocksInFork(rel, forkNum);
Buffer buf;
if (blkno == P_NEW)
/* smgr insists we use P_NEW to extend the relation */
if (blkno == nblocks)
{
- buf = ReadBuffer(rel, P_NEW);
+ buf = ReadBufferExtended(rel, forkNum, P_NEW, RBM_NORMAL, NULL);
if (BufferGetBlockNumber(buf) != blkno)
elog(ERROR, "unexpected hash relation size: %u, should be %u",
BufferGetBlockNumber(buf), blkno);
}
else
- buf = ReadBufferExtended(rel, MAIN_FORKNUM, blkno, RBM_ZERO, NULL);
+ buf = ReadBufferExtended(rel, forkNum, blkno, RBM_ZERO, NULL);
LockBuffer(buf, HASH_WRITE);
* multiple buffer locks is ignored.
*/
uint32
-_hash_metapinit(Relation rel, double num_tuples)
+_hash_metapinit(Relation rel, double num_tuples, ForkNumber forkNum)
{
HashMetaPage metap;
HashPageOpaque pageopaque;
uint32 i;
/* safety check */
- if (RelationGetNumberOfBlocks(rel) != 0)
+ if (RelationGetNumberOfBlocksInFork(rel, forkNum) != 0)
elog(ERROR, "cannot initialize non-empty hash index \"%s\"",
RelationGetRelationName(rel));
/*
* Determine the target fill factor (in tuples per bucket) for this index.
* The idea is to make the fill factor correspond to pages about as full
- * as the user-settable fillfactor parameter says. We can compute it
+ * as the user-settable fillfactor parameter says. We can compute it
* exactly since the index datatype (i.e. uint32 hash key) is fixed-width.
*/
data_width = sizeof(uint32);
/*
* Choose the number of initial bucket pages to match the fill factor
* given the estimated number of tuples. We round up the result to the
- * next power of 2, however, and always force at least 2 bucket pages.
- * The upper limit is determined by considerations explained in
+ * next power of 2, however, and always force at least 2 bucket pages. The
+ * upper limit is determined by considerations explained in
* _hash_expandtable().
*/
dnumbuckets = num_tuples / ffactor;
/*
* We initialize the metapage, the first N bucket pages, and the first
* bitmap page in sequence, using _hash_getnewbuf to cause smgrextend()
- * calls to occur. This ensures that the smgr level has the right idea of
+ * calls to occur. This ensures that the smgr level has the right idea of
* the physical index length.
*/
- metabuf = _hash_getnewbuf(rel, HASH_METAPAGE);
+ metabuf = _hash_getnewbuf(rel, HASH_METAPAGE, forkNum);
pg = BufferGetPage(metabuf);
pageopaque = (HashPageOpaque) PageGetSpecialPointer(pg);
/*
* We initialize the index with N buckets, 0 .. N-1, occupying physical
- * blocks 1 to N. The first freespace bitmap page is in block N+1.
- * Since N is a power of 2, we can set the masks this way:
+ * blocks 1 to N. The first freespace bitmap page is in block N+1. Since
+ * N is a power of 2, we can set the masks this way:
*/
metap->hashm_maxbucket = metap->hashm_lowmask = num_buckets - 1;
metap->hashm_highmask = (num_buckets << 1) - 1;
/*
* Release buffer lock on the metapage while we initialize buckets.
* Otherwise, we'll be in interrupt holdoff and the CHECK_FOR_INTERRUPTS
- * won't accomplish anything. It's a bad idea to hold buffer locks
- * for long intervals in any case, since that can block the bgwriter.
+ * won't accomplish anything. It's a bad idea to hold buffer locks for
+ * long intervals in any case, since that can block the bgwriter.
*/
_hash_chgbufaccess(rel, metabuf, HASH_WRITE, HASH_NOLOCK);
/* Allow interrupts, in case N is huge */
CHECK_FOR_INTERRUPTS();
- buf = _hash_getnewbuf(rel, BUCKET_TO_BLKNO(metap, i));
+ buf = _hash_getnewbuf(rel, BUCKET_TO_BLKNO(metap, i), forkNum);
pg = BufferGetPage(buf);
pageopaque = (HashPageOpaque) PageGetSpecialPointer(pg);
pageopaque->hasho_prevblkno = InvalidBlockNumber;
/*
* Initialize first bitmap page
*/
- _hash_initbitmap(rel, metap, num_buckets + 1);
+ _hash_initbitmap(rel, metap, num_buckets + 1, forkNum);
/* all done */
_hash_wrtbuf(rel, metabuf);
uint32 lowmask;
/*
- * Obtain the page-zero lock to assert the right to begin a split (see
- * README).
- *
- * Note: deadlock should be impossible here. Our own backend could only be
- * holding bucket sharelocks due to stopped indexscans; those will not
- * block other holders of the page-zero lock, who are only interested in
- * acquiring bucket sharelocks themselves. Exclusive bucket locks are
- * only taken here and in hashbulkdelete, and neither of these operations
- * needs any additional locks to complete. (If, due to some flaw in this
- * reasoning, we manage to deadlock anyway, it's okay to error out; the
- * index will be left in a consistent state.)
+ * Write-lock the meta page. It used to be necessary to acquire a
+ * heavyweight lock to begin a split, but that is no longer required.
*/
- _hash_getlock(rel, 0, HASH_EXCLUSIVE);
-
- /* Write-lock the meta page */
_hash_chgbufaccess(rel, metabuf, HASH_NOLOCK, HASH_WRITE);
_hash_checkpage(rel, metabuf, LH_META_PAGE);
* _hash_alloc_buckets() would fail, but if we supported buckets smaller
* than a disk block then this would be an independent constraint.
*
- * If you change this, see also the maximum initial number of buckets
- * in _hash_metapinit().
+ * If you change this, see also the maximum initial number of buckets in
+ * _hash_metapinit().
*/
if (metap->hashm_maxbucket >= (uint32) 0x7FFFFFFE)
goto fail;
/*
* Determine which bucket is to be split, and attempt to lock the old
- * bucket. If we can't get the lock, give up.
+ * bucket. If we can't get the lock, give up.
*
* The lock protects us against other backends, but not against our own
* backend. Must check for active scans separately.
}
/*
- * Okay to proceed with split. Update the metapage bucket mapping info.
+ * Okay to proceed with split. Update the metapage bucket mapping info.
*
* Since we are scribbling on the metapage data right in the shared
* buffer, any failure in this next little bit leaves us with a big
* Copy bucket mapping info now; this saves re-accessing the meta page
* inside _hash_splitbucket's inner loop. Note that once we drop the
* split lock, other splits could begin, so these values might be out of
- * date before _hash_splitbucket finishes. That's okay, since all it
+ * date before _hash_splitbucket finishes. That's okay, since all it
* needs is to tell which of these two buckets to map hashkeys into.
*/
maxbucket = metap->hashm_maxbucket;
/* Write out the metapage and drop lock, but keep pin */
_hash_chgbufaccess(rel, metabuf, HASH_WRITE, HASH_NOLOCK);
- /* Release split lock; okay for other splits to occur now */
- _hash_droplock(rel, 0, HASH_EXCLUSIVE);
-
/* Relocate records to the new bucket */
_hash_splitbucket(rel, metabuf, old_bucket, new_bucket,
start_oblkno, start_nblkno,
/* We didn't write the metapage, so just drop lock */
_hash_chgbufaccess(rel, metabuf, HASH_READ, HASH_NOLOCK);
-
- /* Release split lock */
- _hash_droplock(rel, 0, HASH_EXCLUSIVE);
}
MemSet(zerobuf, 0, sizeof(zerobuf));
RelationOpenSmgr(rel);
- smgrextend(rel->rd_smgr, MAIN_FORKNUM, lastblock, zerobuf, rel->rd_istemp);
+ smgrextend(rel->rd_smgr, MAIN_FORKNUM, lastblock, zerobuf, false);
return true;
}
uint32 highmask,
uint32 lowmask)
{
- Bucket bucket;
- Buffer obuf;
- Buffer nbuf;
BlockNumber oblkno;
BlockNumber nblkno;
- HashPageOpaque oopaque;
- HashPageOpaque nopaque;
- IndexTuple itup;
- Size itemsz;
- OffsetNumber ooffnum;
- OffsetNumber noffnum;
- OffsetNumber omaxoffnum;
+ Buffer obuf;
+ Buffer nbuf;
Page opage;
Page npage;
+ HashPageOpaque oopaque;
+ HashPageOpaque nopaque;
/*
* It should be okay to simultaneously write-lock pages from each bucket,
oopaque = (HashPageOpaque) PageGetSpecialPointer(opage);
nblkno = start_nblkno;
- nbuf = _hash_getnewbuf(rel, nblkno);
+ nbuf = _hash_getnewbuf(rel, nblkno, MAIN_FORKNUM);
npage = BufferGetPage(nbuf);
/* initialize the new bucket's primary page */
/*
* Partition the tuples in the old bucket between the old bucket and the
* new bucket, advancing along the old bucket's overflow bucket chain and
- * adding overflow pages to the new bucket as needed.
+ * adding overflow pages to the new bucket as needed. Outer loop iterates
+ * once per page in old bucket.
*/
- ooffnum = FirstOffsetNumber;
- omaxoffnum = PageGetMaxOffsetNumber(opage);
for (;;)
{
- /*
- * at each iteration through this loop, each of these variables should
- * be up-to-date: obuf opage oopaque ooffnum omaxoffnum
- */
-
- /* check if we're at the end of the page */
- if (ooffnum > omaxoffnum)
+ OffsetNumber ooffnum;
+ OffsetNumber omaxoffnum;
+ OffsetNumber deletable[MaxOffsetNumber];
+ int ndeletable = 0;
+
+ /* Scan each tuple in old page */
+ omaxoffnum = PageGetMaxOffsetNumber(opage);
+ for (ooffnum = FirstOffsetNumber;
+ ooffnum <= omaxoffnum;
+ ooffnum = OffsetNumberNext(ooffnum))
{
- /* at end of page, but check for an(other) overflow page */
- oblkno = oopaque->hasho_nextblkno;
- if (!BlockNumberIsValid(oblkno))
- break;
+ IndexTuple itup;
+ Size itemsz;
+ Bucket bucket;
/*
- * we ran out of tuples on this particular page, but we have more
- * overflow pages; advance to next page.
+ * Fetch the item's hash key (conveniently stored in the item) and
+ * determine which bucket it now belongs in.
*/
- _hash_wrtbuf(rel, obuf);
+ itup = (IndexTuple) PageGetItem(opage,
+ PageGetItemId(opage, ooffnum));
+ bucket = _hash_hashkey2bucket(_hash_get_indextuple_hashkey(itup),
+ maxbucket, highmask, lowmask);
- obuf = _hash_getbuf(rel, oblkno, HASH_WRITE, LH_OVERFLOW_PAGE);
- opage = BufferGetPage(obuf);
- oopaque = (HashPageOpaque) PageGetSpecialPointer(opage);
- ooffnum = FirstOffsetNumber;
- omaxoffnum = PageGetMaxOffsetNumber(opage);
- continue;
+ if (bucket == nbucket)
+ {
+ /*
+ * insert the tuple into the new bucket. if it doesn't fit on
+ * the current page in the new bucket, we must allocate a new
+ * overflow page and place the tuple on that page instead.
+ */
+ itemsz = IndexTupleDSize(*itup);
+ itemsz = MAXALIGN(itemsz);
+
+ if (PageGetFreeSpace(npage) < itemsz)
+ {
+ /* write out nbuf and drop lock, but keep pin */
+ _hash_chgbufaccess(rel, nbuf, HASH_WRITE, HASH_NOLOCK);
+ /* chain to a new overflow page */
+ nbuf = _hash_addovflpage(rel, metabuf, nbuf);
+ npage = BufferGetPage(nbuf);
+ /* we don't need nblkno or nopaque within the loop */
+ }
+
+ /*
+ * Insert tuple on new page, using _hash_pgaddtup to ensure
+ * correct ordering by hashkey. This is a tad inefficient
+ * since we may have to shuffle itempointers repeatedly.
+ * Possible future improvement: accumulate all the items for
+ * the new page and qsort them before insertion.
+ */
+ (void) _hash_pgaddtup(rel, nbuf, itemsz, itup);
+
+ /*
+ * Mark tuple for deletion from old page.
+ */
+ deletable[ndeletable++] = ooffnum;
+ }
+ else
+ {
+ /*
+ * the tuple stays on this page, so nothing to do.
+ */
+ Assert(bucket == obucket);
+ }
}
+ oblkno = oopaque->hasho_nextblkno;
+
/*
- * Fetch the item's hash key (conveniently stored in the item)
- * and determine which bucket it now belongs in.
+ * Done scanning this old page. If we moved any tuples, delete them
+ * from the old page.
*/
- itup = (IndexTuple) PageGetItem(opage, PageGetItemId(opage, ooffnum));
- bucket = _hash_hashkey2bucket(_hash_get_indextuple_hashkey(itup),
- maxbucket, highmask, lowmask);
-
- if (bucket == nbucket)
+ if (ndeletable > 0)
{
- /*
- * insert the tuple into the new bucket. if it doesn't fit on the
- * current page in the new bucket, we must allocate a new overflow
- * page and place the tuple on that page instead.
- */
- itemsz = IndexTupleDSize(*itup);
- itemsz = MAXALIGN(itemsz);
-
- if (PageGetFreeSpace(npage) < itemsz)
- {
- /* write out nbuf and drop lock, but keep pin */
- _hash_chgbufaccess(rel, nbuf, HASH_WRITE, HASH_NOLOCK);
- /* chain to a new overflow page */
- nbuf = _hash_addovflpage(rel, metabuf, nbuf);
- npage = BufferGetPage(nbuf);
- /* we don't need nopaque within the loop */
- }
-
- noffnum = OffsetNumberNext(PageGetMaxOffsetNumber(npage));
- if (PageAddItem(npage, (Item) itup, itemsz, noffnum, false, false)
- == InvalidOffsetNumber)
- elog(ERROR, "failed to add index item to \"%s\"",
- RelationGetRelationName(rel));
-
- /*
- * now delete the tuple from the old bucket. after this section
- * of code, 'ooffnum' will actually point to the ItemId to which
- * we would point if we had advanced it before the deletion
- * (PageIndexTupleDelete repacks the ItemId array). this also
- * means that 'omaxoffnum' is exactly one less than it used to be,
- * so we really can just decrement it instead of calling
- * PageGetMaxOffsetNumber.
- */
- PageIndexTupleDelete(opage, ooffnum);
- omaxoffnum = OffsetNumberPrev(omaxoffnum);
+ PageIndexMultiDelete(opage, deletable, ndeletable);
+ _hash_wrtbuf(rel, obuf);
}
else
- {
- /*
- * the tuple stays on this page. we didn't move anything, so we
- * didn't delete anything and therefore we don't have to change
- * 'omaxoffnum'.
- */
- Assert(bucket == obucket);
- ooffnum = OffsetNumberNext(ooffnum);
- }
+ _hash_relbuf(rel, obuf);
+
+ /* Exit loop if no more overflow pages in old bucket */
+ if (!BlockNumberIsValid(oblkno))
+ break;
+
+ /* Else, advance to next old page */
+ obuf = _hash_getbuf(rel, oblkno, HASH_WRITE, LH_OVERFLOW_PAGE);
+ opage = BufferGetPage(obuf);
+ oopaque = (HashPageOpaque) PageGetSpecialPointer(opage);
}
/*
* We're at the end of the old bucket chain, so we're done partitioning
- * the tuples. Before quitting, call _hash_squeezebucket to ensure the
+ * the tuples. Before quitting, call _hash_squeezebucket to ensure the
* tuples remaining in the old bucket (including the overflow pages) are
* packed as tightly as possible. The new bucket is already tight.
*/
- _hash_wrtbuf(rel, obuf);
_hash_wrtbuf(rel, nbuf);
_hash_squeezebucket(rel, obucket, start_oblkno, NULL);