* hashpage.c
* Hash table page management code for the Postgres hash access method
*
- * Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1996-2006, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/hash/hashpage.c,v 1.50 2005/06/09 18:23:50 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/access/hash/hashpage.c,v 1.58 2006/07/02 02:23:18 momjian Exp $
*
* NOTES
* Postgres hash pages look like ordinary relation pages. The opaque
#include "access/genam.h"
#include "access/hash.h"
+#include "catalog/index.h"
+#include "miscadmin.h"
#include "storage/lmgr.h"
#include "utils/lsyscache.h"
* (ie, the buffer is "locked and pinned").
*
* XXX P_NEW is not used because, unlike the tree structures, we
- * need the bucket blocks to be at certain block numbers. we must
- * depend on the caller to call _hash_pageinit on the block if it
- * knows that this is a new block.
+ * need the bucket blocks to be at certain block numbers.
+ *
+ * All call sites should call either _hash_pageinit or _hash_checkpage
+ * on the returned page, depending on whether the block is expected
+ * to be new or not.
*/
Buffer
_hash_getbuf(Relation rel, BlockNumber blkno, int access)
/*
* _hash_relbuf() -- release a locked buffer.
*
- * Lock and pin (refcount) are both dropped. Note that either read or
- * write lock can be dropped this way, but if we modified the buffer,
- * this is NOT the right way to release a write lock.
+ * Lock and pin (refcount) are both dropped.
*/
void
_hash_relbuf(Relation rel, Buffer buf)
{
- LockBuffer(buf, BUFFER_LOCK_UNLOCK);
- ReleaseBuffer(buf);
+ UnlockReleaseBuffer(buf);
}
/*
* _hash_dropbuf() -- release an unlocked buffer.
*
- * This is used to unpin a buffer on which we hold no lock. It is assumed
- * that the buffer is not dirty.
+ * This is used to unpin a buffer on which we hold no lock.
*/
void
_hash_dropbuf(Relation rel, Buffer buf)
* for it. It is an error to call _hash_wrtbuf() without a write lock
* and a pin on the buffer.
*
- * NOTE: actually, the buffer manager just marks the shared buffer page
- * dirty here; the real I/O happens later. This is okay since we are not
- * relying on write ordering anyway. The WAL mechanism is responsible for
- * guaranteeing correctness after a crash.
+ * NOTE: this routine should go away when/if hash indexes are WAL-ified.
+ * The correct sequence of operations is to mark the buffer dirty, then
+ * write the WAL record, then release the lock and pin; so marking dirty
+ * can't be combined with releasing.
*/
void
_hash_wrtbuf(Relation rel, Buffer buf)
{
- LockBuffer(buf, BUFFER_LOCK_UNLOCK);
- WriteBuffer(buf);
-}
-
-/*
- * _hash_wrtnorelbuf() -- write a hash page to disk, but do not release
- * our reference or lock.
- *
- * It is an error to call _hash_wrtnorelbuf() without a write lock
- * and a pin on the buffer.
- *
- * See above NOTE.
- */
-void
-_hash_wrtnorelbuf(Relation rel, Buffer buf)
-{
- WriteNoReleaseBuffer(buf);
+ MarkBufferDirty(buf);
+ UnlockReleaseBuffer(buf);
}
/*
int from_access,
int to_access)
{
+ if (from_access == HASH_WRITE)
+ MarkBufferDirty(buf);
if (from_access != HASH_NOLOCK)
LockBuffer(buf, BUFFER_LOCK_UNLOCK);
- if (from_access == HASH_WRITE)
- WriteNoReleaseBuffer(buf);
-
if (to_access != HASH_NOLOCK)
LockBuffer(buf, to_access);
}
RelationGetRelationName(rel));
/*
- * Determine the target fill factor (tuples per bucket) for this
- * index. The idea is to make the fill factor correspond to pages
- * about 3/4ths full. We can compute it exactly if the index datatype
- * is fixed-width, but for var-width there's some guessing involved.
+ * Determine the target fill factor (tuples per bucket) for this index.
+ * The idea is to make the fill factor correspond to pages about 3/4ths
+ * full. We can compute it exactly if the index datatype is fixed-width,
+ * but for var-width there's some guessing involved.
*/
data_width = get_typavgwidth(RelationGetDescr(rel)->attrs[0]->atttypid,
- RelationGetDescr(rel)->attrs[0]->atttypmod);
- item_width = MAXALIGN(sizeof(HashItemData)) + MAXALIGN(data_width) +
+ RelationGetDescr(rel)->attrs[0]->atttypmod);
+ item_width = MAXALIGN(sizeof(IndexTupleData)) + MAXALIGN(data_width) +
sizeof(ItemIdData); /* include the line pointer */
- ffactor = (BLCKSZ * 3 / 4) / item_width;
+ ffactor = BLCKSZ * IndexGetFillFactor(rel) / 100 / item_width;
/* keep to a sane range */
if (ffactor < 10)
ffactor = 10;
metap->hashm_procid = index_getprocid(rel, 1, HASHPROC);
/*
- * We initialize the index with two buckets, 0 and 1, occupying
- * physical blocks 1 and 2. The first freespace bitmap page is in
- * block 3.
+ * We initialize the index with two buckets, 0 and 1, occupying physical
+ * blocks 1 and 2. The first freespace bitmap page is in block 3.
*/
metap->hashm_maxbucket = metap->hashm_lowmask = 1; /* nbuckets - 1 */
metap->hashm_highmask = 3; /* (nbuckets << 1) - 1 */
}
/*
- * Initialize first bitmap page. Can't do this until we create the
- * first two buckets, else smgr will complain.
+ * Initialize first bitmap page. Can't do this until we create the first
+ * two buckets, else smgr will complain.
*/
_hash_initbitmap(rel, metap, 3);
* Obtain the page-zero lock to assert the right to begin a split (see
* README).
*
- * Note: deadlock should be impossible here. Our own backend could only
- * be holding bucket sharelocks due to stopped indexscans; those will
- * not block other holders of the page-zero lock, who are only
- * interested in acquiring bucket sharelocks themselves. Exclusive
- * bucket locks are only taken here and in hashbulkdelete, and neither
- * of these operations needs any additional locks to complete. (If,
- * due to some flaw in this reasoning, we manage to deadlock anyway,
- * it's okay to error out; the index will be left in a consistent
- * state.)
+ * Note: deadlock should be impossible here. Our own backend could only be
+ * holding bucket sharelocks due to stopped indexscans; those will not
+ * block other holders of the page-zero lock, who are only interested in
+ * acquiring bucket sharelocks themselves. Exclusive bucket locks are
+ * only taken here and in hashbulkdelete, and neither of these operations
+ * needs any additional locks to complete. (If, due to some flaw in this
+ * reasoning, we manage to deadlock anyway, it's okay to error out; the
+ * index will be left in a consistent state.)
*/
_hash_getlock(rel, 0, HASH_EXCLUSIVE);
/* Write-lock the meta page */
_hash_chgbufaccess(rel, metabuf, HASH_NOLOCK, HASH_WRITE);
+ _hash_checkpage(rel, metabuf, LH_META_PAGE);
metap = (HashMetaPage) BufferGetPage(metabuf);
- _hash_checkpage(rel, (Page) metap, LH_META_PAGE);
/*
- * Check to see if split is still needed; someone else might have
- * already done one while we waited for the lock.
+ * Check to see if split is still needed; someone else might have already
+ * done one while we waited for the lock.
*
* Make sure this stays in sync with _hash_doinsert()
*/
* backend. Must check for active scans separately.
*
* Ideally we would lock the new bucket too before proceeding, but if we
- * are about to cross a splitpoint then the BUCKET_TO_BLKNO mapping
- * isn't correct yet. For simplicity we update the metapage first and
- * then lock. This should be okay because no one else should be
- * trying to lock the new bucket yet...
+ * are about to cross a splitpoint then the BUCKET_TO_BLKNO mapping isn't
+ * correct yet. For simplicity we update the metapage first and then
+ * lock. This should be okay because no one else should be trying to lock
+ * the new bucket yet...
*/
new_bucket = metap->hashm_maxbucket + 1;
old_bucket = (new_bucket & metap->hashm_lowmask);
goto fail;
/*
- * Okay to proceed with split. Update the metapage bucket mapping
- * info.
+ * Okay to proceed with split. Update the metapage bucket mapping info.
*
* Since we are scribbling on the metapage data right in the shared
* buffer, any failure in this next little bit leaves us with a big
- * problem: the metapage is effectively corrupt but could get written
- * back to disk. We don't really expect any failure, but just to be
- * sure, establish a critical section.
+ * problem: the metapage is effectively corrupt but could get written back
+ * to disk. We don't really expect any failure, but just to be sure,
+ * establish a critical section.
*/
START_CRIT_SECTION();
/*
* If the split point is increasing (hashm_maxbucket's log base 2
* increases), we need to adjust the hashm_spares[] array and
- * hashm_ovflpoint so that future overflow pages will be created
- * beyond this new batch of bucket pages.
+ * hashm_ovflpoint so that future overflow pages will be created beyond
+ * this new batch of bucket pages.
*
* XXX should initialize new bucket pages to prevent out-of-order page
* creation? Don't wanna do it right here though.
/*
* Copy bucket mapping info now; this saves re-accessing the meta page
* inside _hash_splitbucket's inner loop. Note that once we drop the
- * split lock, other splits could begin, so these values might be out
- * of date before _hash_splitbucket finishes. That's okay, since all
- * it needs is to tell which of these two buckets to map hashkeys
- * into.
+ * split lock, other splits could begin, so these values might be out of
+ * date before _hash_splitbucket finishes. That's okay, since all it
+ * needs is to tell which of these two buckets to map hashkeys into.
*/
maxbucket = metap->hashm_maxbucket;
highmask = metap->hashm_highmask;
BlockNumber nblkno;
bool null;
Datum datum;
- HashItem hitem;
HashPageOpaque oopaque;
HashPageOpaque nopaque;
IndexTuple itup;
TupleDesc itupdesc = RelationGetDescr(rel);
/*
- * It should be okay to simultaneously write-lock pages from each
- * bucket, since no one else can be trying to acquire buffer lock on
- * pages of either bucket.
+ * It should be okay to simultaneously write-lock pages from each bucket,
+ * since no one else can be trying to acquire buffer lock on pages of
+ * either bucket.
*/
oblkno = start_oblkno;
- nblkno = start_nblkno;
obuf = _hash_getbuf(rel, oblkno, HASH_WRITE);
- nbuf = _hash_getbuf(rel, nblkno, HASH_WRITE);
+ _hash_checkpage(rel, obuf, LH_BUCKET_PAGE);
opage = BufferGetPage(obuf);
- npage = BufferGetPage(nbuf);
-
- _hash_checkpage(rel, opage, LH_BUCKET_PAGE);
oopaque = (HashPageOpaque) PageGetSpecialPointer(opage);
+ nblkno = start_nblkno;
+ nbuf = _hash_getbuf(rel, nblkno, HASH_WRITE);
+ npage = BufferGetPage(nbuf);
+
/* initialize the new bucket's primary page */
_hash_pageinit(npage, BufferGetPageSize(nbuf));
nopaque = (HashPageOpaque) PageGetSpecialPointer(npage);
nopaque->hasho_filler = HASHO_FILL;
/*
- * Partition the tuples in the old bucket between the old bucket and
- * the new bucket, advancing along the old bucket's overflow bucket
- * chain and adding overflow pages to the new bucket as needed.
+ * Partition the tuples in the old bucket between the old bucket and the
+ * new bucket, advancing along the old bucket's overflow bucket chain and
+ * adding overflow pages to the new bucket as needed.
*/
ooffnum = FirstOffsetNumber;
omaxoffnum = PageGetMaxOffsetNumber(opage);
for (;;)
{
/*
- * at each iteration through this loop, each of these variables
- * should be up-to-date: obuf opage oopaque ooffnum omaxoffnum
+ * at each iteration through this loop, each of these variables should
+ * be up-to-date: obuf opage oopaque ooffnum omaxoffnum
*/
/* check if we're at the end of the page */
break;
/*
- * we ran out of tuples on this particular page, but we have
- * more overflow pages; advance to next page.
+ * we ran out of tuples on this particular page, but we have more
+ * overflow pages; advance to next page.
*/
_hash_wrtbuf(rel, obuf);
obuf = _hash_getbuf(rel, oblkno, HASH_WRITE);
+ _hash_checkpage(rel, obuf, LH_OVERFLOW_PAGE);
opage = BufferGetPage(obuf);
- _hash_checkpage(rel, opage, LH_OVERFLOW_PAGE);
oopaque = (HashPageOpaque) PageGetSpecialPointer(opage);
ooffnum = FirstOffsetNumber;
omaxoffnum = PageGetMaxOffsetNumber(opage);
* Re-hash the tuple to determine which bucket it now belongs in.
*
* It is annoying to call the hash function while holding locks, but
- * releasing and relocking the page for each tuple is unappealing
- * too.
+ * releasing and relocking the page for each tuple is unappealing too.
*/
- hitem = (HashItem) PageGetItem(opage, PageGetItemId(opage, ooffnum));
- itup = &(hitem->hash_itup);
+ itup = (IndexTuple) PageGetItem(opage, PageGetItemId(opage, ooffnum));
datum = index_getattr(itup, 1, itupdesc, &null);
Assert(!null);
if (bucket == nbucket)
{
/*
- * insert the tuple into the new bucket. if it doesn't fit on
- * the current page in the new bucket, we must allocate a new
- * overflow page and place the tuple on that page instead.
+ * insert the tuple into the new bucket. if it doesn't fit on the
+ * current page in the new bucket, we must allocate a new overflow
+ * page and place the tuple on that page instead.
*/
- itemsz = IndexTupleDSize(hitem->hash_itup)
- + (sizeof(HashItemData) - sizeof(IndexTupleData));
-
+ itemsz = IndexTupleDSize(*itup);
itemsz = MAXALIGN(itemsz);
if (PageGetFreeSpace(npage) < itemsz)
_hash_chgbufaccess(rel, nbuf, HASH_WRITE, HASH_NOLOCK);
/* chain to a new overflow page */
nbuf = _hash_addovflpage(rel, metabuf, nbuf);
+ _hash_checkpage(rel, nbuf, LH_OVERFLOW_PAGE);
npage = BufferGetPage(nbuf);
- _hash_checkpage(rel, npage, LH_OVERFLOW_PAGE);
/* we don't need nopaque within the loop */
}
noffnum = OffsetNumberNext(PageGetMaxOffsetNumber(npage));
- if (PageAddItem(npage, (Item) hitem, itemsz, noffnum, LP_USED)
+ if (PageAddItem(npage, (Item) itup, itemsz, noffnum, LP_USED)
== InvalidOffsetNumber)
elog(ERROR, "failed to add index item to \"%s\"",
RelationGetRelationName(rel));
/*
- * now delete the tuple from the old bucket. after this
- * section of code, 'ooffnum' will actually point to the
- * ItemId to which we would point if we had advanced it before
- * the deletion (PageIndexTupleDelete repacks the ItemId
- * array). this also means that 'omaxoffnum' is exactly one
- * less than it used to be, so we really can just decrement it
- * instead of calling PageGetMaxOffsetNumber.
+ * now delete the tuple from the old bucket. after this section
+ * of code, 'ooffnum' will actually point to the ItemId to which
+ * we would point if we had advanced it before the deletion
+ * (PageIndexTupleDelete repacks the ItemId array). this also
+ * means that 'omaxoffnum' is exactly one less than it used to be,
+ * so we really can just decrement it instead of calling
+ * PageGetMaxOffsetNumber.
*/
PageIndexTupleDelete(opage, ooffnum);
omaxoffnum = OffsetNumberPrev(omaxoffnum);
else
{
/*
- * the tuple stays on this page. we didn't move anything, so
- * we didn't delete anything and therefore we don't have to
- * change 'omaxoffnum'.
+ * the tuple stays on this page. we didn't move anything, so we
+ * didn't delete anything and therefore we don't have to change
+ * 'omaxoffnum'.
*/
Assert(bucket == obucket);
ooffnum = OffsetNumberNext(ooffnum);
}
/*
- * We're at the end of the old bucket chain, so we're done
- * partitioning the tuples. Before quitting, call _hash_squeezebucket
- * to ensure the tuples remaining in the old bucket (including the
- * overflow pages) are packed as tightly as possible. The new bucket
- * is already tight.
+ * We're at the end of the old bucket chain, so we're done partitioning
+ * the tuples. Before quitting, call _hash_squeezebucket to ensure the
+ * tuples remaining in the old bucket (including the overflow pages) are
+ * packed as tightly as possible. The new bucket is already tight.
*/
_hash_wrtbuf(rel, obuf);
_hash_wrtbuf(rel, nbuf);