* Reacquire the read lock here.
*/
if (BufferIsValid(so->hashso_curbuf))
- _hash_chgbufaccess(rel, so->hashso_curbuf, HASH_NOLOCK, HASH_READ);
+ LockBuffer(so->hashso_curbuf, BUFFER_LOCK_SHARE);
/*
* If we've already initialized this scan, we can just advance it in the
/* Release read lock on current buffer, but keep it pinned */
if (BufferIsValid(so->hashso_curbuf))
- _hash_chgbufaccess(rel, so->hashso_curbuf, HASH_READ, HASH_NOLOCK);
+ LockBuffer(so->hashso_curbuf, BUFFER_LOCK_UNLOCK);
/* Return current heap TID on success */
scan->xs_ctup.t_self = so->hashso_heappos;
orig_ntuples = metap->hashm_ntuples;
memcpy(&local_metapage, metap, sizeof(local_metapage));
/* release the lock, but keep pin */
- _hash_chgbufaccess(rel, metabuf, HASH_READ, HASH_NOLOCK);
+ LockBuffer(metabuf, BUFFER_LOCK_UNLOCK);
/* Scan the buckets that we know exist */
cur_bucket = 0;
* (and thus can't be further split), update our cached metapage
* data.
*/
- _hash_chgbufaccess(rel, metabuf, HASH_NOLOCK, HASH_READ);
+ LockBuffer(metabuf, BUFFER_LOCK_SHARE);
memcpy(&local_metapage, metap, sizeof(local_metapage));
- _hash_chgbufaccess(rel, metabuf, HASH_READ, HASH_NOLOCK);
+ LockBuffer(metabuf, BUFFER_LOCK_UNLOCK);
}
bucket_buf = buf;
}
/* Write-lock metapage and check for split since we started */
- _hash_chgbufaccess(rel, metabuf, HASH_NOLOCK, HASH_WRITE);
+ LockBuffer(metabuf, BUFFER_LOCK_EXCLUSIVE);
metap = HashPageGetMeta(BufferGetPage(metabuf));
if (cur_maxbucket != metap->hashm_maxbucket)
/* There's been a split, so process the additional bucket(s) */
cur_maxbucket = metap->hashm_maxbucket;
memcpy(&local_metapage, metap, sizeof(local_metapage));
- _hash_chgbufaccess(rel, metabuf, HASH_READ, HASH_NOLOCK);
+ LockBuffer(metabuf, BUFFER_LOCK_UNLOCK);
goto loop_top;
}
* page
*/
if (retain_pin)
- _hash_chgbufaccess(rel, buf, HASH_READ, HASH_NOLOCK);
+ LockBuffer(buf, BUFFER_LOCK_UNLOCK);
else
_hash_relbuf(rel, buf);
if (buf != bucket_buf)
{
_hash_relbuf(rel, buf);
- _hash_chgbufaccess(rel, bucket_buf, HASH_NOLOCK, HASH_WRITE);
+ LockBuffer(bucket_buf, BUFFER_LOCK_EXCLUSIVE);
}
/*
_hash_squeezebucket(rel, cur_bucket, bucket_blkno, bucket_buf,
bstrategy);
else
- _hash_chgbufaccess(rel, bucket_buf, HASH_READ, HASH_NOLOCK);
+ LockBuffer(bucket_buf, BUFFER_LOCK_UNLOCK);
}
void
lowmask = metap->hashm_lowmask;
/* Release metapage lock, but keep pin. */
- _hash_chgbufaccess(rel, metabuf, HASH_READ, HASH_NOLOCK);
+ LockBuffer(metabuf, BUFFER_LOCK_UNLOCK);
/*
* If the previous iteration of this loop locked the primary page of
* Reacquire metapage lock and check that no bucket split has taken
* place while we were awaiting the bucket lock.
*/
- _hash_chgbufaccess(rel, metabuf, HASH_NOLOCK, HASH_READ);
+ LockBuffer(metabuf, BUFFER_LOCK_SHARE);
oldblkno = blkno;
retry = true;
}
if (H_BUCKET_BEING_SPLIT(pageopaque) && IsBufferCleanupOK(buf))
{
/* release the lock on bucket buffer, before completing the split. */
- _hash_chgbufaccess(rel, buf, HASH_READ, HASH_NOLOCK);
+ LockBuffer(buf, BUFFER_LOCK_UNLOCK);
_hash_finish_split(rel, metabuf, buf, pageopaque->hasho_bucket,
maxbucket, highmask, lowmask);
if (buf != bucket_buf)
_hash_relbuf(rel, buf);
else
- _hash_chgbufaccess(rel, buf, HASH_READ, HASH_NOLOCK);
+ LockBuffer(buf, BUFFER_LOCK_UNLOCK);
buf = _hash_getbuf(rel, nextblkno, HASH_WRITE, LH_OVERFLOW_PAGE);
page = BufferGetPage(buf);
}
*/
/* release our write lock without modifying buffer */
- _hash_chgbufaccess(rel, buf, HASH_READ, HASH_NOLOCK);
+ LockBuffer(buf, BUFFER_LOCK_UNLOCK);
/* chain to a new overflow page */
buf = _hash_addovflpage(rel, metabuf, buf, (buf == bucket_buf) ? true : false);
* Write-lock the metapage so we can increment the tuple count. After
* incrementing it, check to see if it's time for a split.
*/
- _hash_chgbufaccess(rel, metabuf, HASH_NOLOCK, HASH_WRITE);
+ LockBuffer(metabuf, BUFFER_LOCK_EXCLUSIVE);
metap->hashm_ntuples += 1;
(double) metap->hashm_ffactor * (metap->hashm_maxbucket + 1);
/* Write out the metapage and drop lock, but keep pin */
- _hash_chgbufaccess(rel, metabuf, HASH_WRITE, HASH_NOLOCK);
+ MarkBufferDirty(metabuf);
+ LockBuffer(metabuf, BUFFER_LOCK_UNLOCK);
/* Attempt to split if a split is needed */
if (do_expand)
* Write-lock the tail page. It is okay to hold two buffer locks here
* since there cannot be anyone else contending for access to ovflbuf.
*/
- _hash_chgbufaccess(rel, buf, HASH_NOLOCK, HASH_WRITE);
+ LockBuffer(buf, BUFFER_LOCK_EXCLUSIVE);
/* probably redundant... */
_hash_checkpage(rel, buf, LH_BUCKET_PAGE | LH_OVERFLOW_PAGE);
/* we assume we do not need to write the unmodified page */
if ((pageopaque->hasho_flag & LH_BUCKET_PAGE) && retain_pin)
- _hash_chgbufaccess(rel, buf, HASH_READ, HASH_NOLOCK);
+ LockBuffer(buf, BUFFER_LOCK_UNLOCK);
else
_hash_relbuf(rel, buf);
pageopaque->hasho_nextblkno = BufferGetBlockNumber(ovflbuf);
MarkBufferDirty(buf);
if ((pageopaque->hasho_flag & LH_BUCKET_PAGE) && retain_pin)
- _hash_chgbufaccess(rel, buf, HASH_READ, HASH_NOLOCK);
+ LockBuffer(buf, BUFFER_LOCK_UNLOCK);
else
_hash_relbuf(rel, buf);
j;
/* Get exclusive lock on the meta page */
- _hash_chgbufaccess(rel, metabuf, HASH_NOLOCK, HASH_WRITE);
+ LockBuffer(metabuf, BUFFER_LOCK_EXCLUSIVE);
_hash_checkpage(rel, metabuf, LH_META_PAGE);
metap = HashPageGetMeta(BufferGetPage(metabuf));
last_inpage = BMPGSZ_BIT(metap) - 1;
/* Release exclusive lock on metapage while reading bitmap page */
- _hash_chgbufaccess(rel, metabuf, HASH_READ, HASH_NOLOCK);
+ LockBuffer(metabuf, BUFFER_LOCK_UNLOCK);
mapbuf = _hash_getbuf(rel, mapblkno, HASH_WRITE, LH_BITMAP_PAGE);
mappage = BufferGetPage(mapbuf);
bit = 0;
/* Reacquire exclusive lock on the meta page */
- _hash_chgbufaccess(rel, metabuf, HASH_NOLOCK, HASH_WRITE);
+ LockBuffer(metabuf, BUFFER_LOCK_EXCLUSIVE);
}
/*
metap->hashm_firstfree = bit + 1;
/* Write updated metapage and release lock, but not pin */
- _hash_chgbufaccess(rel, metabuf, HASH_WRITE, HASH_NOLOCK);
+ MarkBufferDirty(metabuf);
+ LockBuffer(metabuf, BUFFER_LOCK_UNLOCK);
return newbuf;
_hash_relbuf(rel, mapbuf);
/* Reacquire exclusive lock on the meta page */
- _hash_chgbufaccess(rel, metabuf, HASH_NOLOCK, HASH_WRITE);
+ LockBuffer(metabuf, BUFFER_LOCK_EXCLUSIVE);
/* convert bit to absolute bit number */
bit += (i << BMPG_SHIFT(metap));
metap->hashm_firstfree = bit + 1;
/* Write updated metapage and release lock, but not pin */
- _hash_chgbufaccess(rel, metabuf, HASH_WRITE, HASH_NOLOCK);
+ MarkBufferDirty(metabuf);
+ LockBuffer(metabuf, BUFFER_LOCK_UNLOCK);
}
else
{
/* We didn't change the metapage, so no need to write */
- _hash_chgbufaccess(rel, metabuf, HASH_READ, HASH_NOLOCK);
+ LockBuffer(metabuf, BUFFER_LOCK_UNLOCK);
}
/* Fetch, init, and return the recycled page */
blkno = metap->hashm_mapp[bitmappage];
/* Release metapage lock while we access the bitmap page */
- _hash_chgbufaccess(rel, metabuf, HASH_READ, HASH_NOLOCK);
+ LockBuffer(metabuf, BUFFER_LOCK_UNLOCK);
/* Clear the bitmap bit to indicate that this overflow page is free */
mapbuf = _hash_getbuf(rel, blkno, HASH_WRITE, LH_BITMAP_PAGE);
_hash_relbuf(rel, mapbuf);
/* Get write-lock on metapage to update firstfree */
- _hash_chgbufaccess(rel, metabuf, HASH_NOLOCK, HASH_WRITE);
+ LockBuffer(metabuf, BUFFER_LOCK_EXCLUSIVE);
/* if this is now the first free page, update hashm_firstfree */
if (ovflbitno < metap->hashm_firstfree)
*/
if (!BlockNumberIsValid(wopaque->hasho_nextblkno))
{
- _hash_chgbufaccess(rel, wbuf, HASH_READ, HASH_NOLOCK);
+ LockBuffer(wbuf, BUFFER_LOCK_UNLOCK);
return;
}
if (wbuf_dirty)
MarkBufferDirty(wbuf);
if (retain_pin)
- _hash_chgbufaccess(rel, wbuf, HASH_READ, HASH_NOLOCK);
+ LockBuffer(wbuf, BUFFER_LOCK_UNLOCK);
else
_hash_relbuf(rel, wbuf);
{
/* retain the pin on primary bucket page till end of bucket scan */
if (wblkno == bucket_blkno)
- _hash_chgbufaccess(rel, wbuf, HASH_READ, HASH_NOLOCK);
+ LockBuffer(wbuf, BUFFER_LOCK_UNLOCK);
else
_hash_relbuf(rel, wbuf);
return;
so->hashso_buc_split = false;
}
-/*
- * _hash_chgbufaccess() -- Change the lock type on a buffer, without
- * dropping our pin on it.
- *
- * from_access and to_access may be HASH_READ, HASH_WRITE, or HASH_NOLOCK,
- * the last indicating that no buffer-level lock is held or wanted.
- *
- * When from_access == HASH_WRITE, we assume the buffer is dirty and tell
- * bufmgr it must be written out. If the caller wants to release a write
- * lock on a page that's not been modified, it's okay to pass from_access
- * as HASH_READ (a bit ugly, but handy in some places).
- */
-void
-_hash_chgbufaccess(Relation rel,
- Buffer buf,
- int from_access,
- int to_access)
-{
- if (from_access == HASH_WRITE)
- MarkBufferDirty(buf);
- if (from_access != HASH_NOLOCK)
- LockBuffer(buf, BUFFER_LOCK_UNLOCK);
- if (to_access != HASH_NOLOCK)
- LockBuffer(buf, to_access);
-}
-
/*
* _hash_metapinit() -- Initialize the metadata page of a hash index,
* won't accomplish anything. It's a bad idea to hold buffer locks for
* long intervals in any case, since that can block the bgwriter.
*/
- _hash_chgbufaccess(rel, metabuf, HASH_WRITE, HASH_NOLOCK);
+ MarkBufferDirty(metabuf);
+ LockBuffer(metabuf, BUFFER_LOCK_UNLOCK);
/*
* Initialize the first N buckets
}
/* Now reacquire buffer lock on metapage */
- _hash_chgbufaccess(rel, metabuf, HASH_NOLOCK, HASH_WRITE);
+ LockBuffer(metabuf, BUFFER_LOCK_EXCLUSIVE);
/*
* Initialize first bitmap page
* Write-lock the meta page. It used to be necessary to acquire a
* heavyweight lock to begin a split, but that is no longer required.
*/
- _hash_chgbufaccess(rel, metabuf, HASH_NOLOCK, HASH_WRITE);
+ LockBuffer(metabuf, BUFFER_LOCK_EXCLUSIVE);
_hash_checkpage(rel, metabuf, LH_META_PAGE);
metap = HashPageGetMeta(BufferGetPage(metabuf));
* Release the lock on metapage and old_bucket, before completing the
* split.
*/
- _hash_chgbufaccess(rel, metabuf, HASH_READ, HASH_NOLOCK);
- _hash_chgbufaccess(rel, buf_oblkno, HASH_READ, HASH_NOLOCK);
+ LockBuffer(metabuf, BUFFER_LOCK_UNLOCK);
+ LockBuffer(buf_oblkno, BUFFER_LOCK_UNLOCK);
_hash_finish_split(rel, metabuf, buf_oblkno, old_bucket, maxbucket,
highmask, lowmask);
lowmask = metap->hashm_lowmask;
/* Release the metapage lock. */
- _hash_chgbufaccess(rel, metabuf, HASH_READ, HASH_NOLOCK);
+ LockBuffer(metabuf, BUFFER_LOCK_UNLOCK);
hashbucketcleanup(rel, old_bucket, buf_oblkno, start_oblkno, NULL,
maxbucket, highmask, lowmask, NULL, NULL, true,
lowmask = metap->hashm_lowmask;
/* Write out the metapage and drop lock, but keep pin */
- _hash_chgbufaccess(rel, metabuf, HASH_WRITE, HASH_NOLOCK);
+ MarkBufferDirty(metabuf);
+ LockBuffer(metabuf, BUFFER_LOCK_UNLOCK);
/* Relocate records to the new bucket */
_hash_splitbucket(rel, metabuf,
fail:
/* We didn't write the metapage, so just drop lock */
- _hash_chgbufaccess(rel, metabuf, HASH_READ, HASH_NOLOCK);
+ LockBuffer(metabuf, BUFFER_LOCK_UNLOCK);
}
if (PageGetFreeSpace(npage) < itemsz)
{
/* write out nbuf and drop lock, but keep pin */
- _hash_chgbufaccess(rel, nbuf, HASH_WRITE, HASH_NOLOCK);
+ MarkBufferDirty(nbuf);
+ LockBuffer(nbuf, BUFFER_LOCK_UNLOCK);
/* chain to a new overflow page */
nbuf = _hash_addovflpage(rel, metabuf, nbuf, (nbuf == bucket_nbuf) ? true : false);
npage = BufferGetPage(nbuf);
/* retain the pin on the old primary bucket */
if (obuf == bucket_obuf)
- _hash_chgbufaccess(rel, obuf, HASH_READ, HASH_NOLOCK);
+ LockBuffer(obuf, BUFFER_LOCK_UNLOCK);
else
_hash_relbuf(rel, obuf);
* bucket and then the new bucket.
*/
if (nbuf == bucket_nbuf)
- _hash_chgbufaccess(rel, bucket_nbuf, HASH_WRITE, HASH_NOLOCK);
+ {
+ MarkBufferDirty(bucket_nbuf);
+ LockBuffer(bucket_nbuf, BUFFER_LOCK_UNLOCK);
+ }
else
{
MarkBufferDirty(nbuf);
_hash_relbuf(rel, nbuf);
}
- _hash_chgbufaccess(rel, bucket_obuf, HASH_NOLOCK, HASH_WRITE);
+ LockBuffer(bucket_obuf, BUFFER_LOCK_EXCLUSIVE);
opage = BufferGetPage(bucket_obuf);
oopaque = (HashPageOpaque) PageGetSpecialPointer(opage);
- _hash_chgbufaccess(rel, bucket_nbuf, HASH_NOLOCK, HASH_WRITE);
+ LockBuffer(bucket_nbuf, BUFFER_LOCK_EXCLUSIVE);
npage = BufferGetPage(bucket_nbuf);
nopaque = (HashPageOpaque) PageGetSpecialPointer(npage);
* retain the pin on primary bucket.
*/
if (nbuf == bucket_nbuf)
- _hash_chgbufaccess(rel, nbuf, HASH_READ, HASH_NOLOCK);
+ LockBuffer(nbuf, BUFFER_LOCK_UNLOCK);
else
_hash_relbuf(rel, nbuf);
}
if (!ConditionalLockBufferForCleanup(bucket_nbuf))
{
- _hash_chgbufaccess(rel, obuf, HASH_READ, HASH_NOLOCK);
+ LockBuffer(obuf, BUFFER_LOCK_UNLOCK);
hash_destroy(tidhtab);
return;
}
maxbucket, highmask, lowmask);
_hash_relbuf(rel, bucket_nbuf);
- _hash_chgbufaccess(rel, obuf, HASH_READ, HASH_NOLOCK);
+ LockBuffer(obuf, BUFFER_LOCK_UNLOCK);
hash_destroy(tidhtab);
}
* comments in _hash_first to know the reason of retaining pin.
*/
if (*bufp == so->hashso_bucket_buf || *bufp == so->hashso_split_bucket_buf)
- _hash_chgbufaccess(rel, *bufp, HASH_READ, HASH_NOLOCK);
+ LockBuffer(*bufp, BUFFER_LOCK_UNLOCK);
else
_hash_relbuf(rel, *bufp);
*/
Assert(BufferIsValid(*bufp));
- _hash_chgbufaccess(rel, *bufp, HASH_NOLOCK, HASH_READ);
+ LockBuffer(*bufp, BUFFER_LOCK_SHARE);
/*
* setting hashso_buc_split to true indicates that we are scanning
* comments in _hash_first to know the reason of retaining pin.
*/
if (*bufp == so->hashso_bucket_buf || *bufp == so->hashso_split_bucket_buf)
- _hash_chgbufaccess(rel, *bufp, HASH_READ, HASH_NOLOCK);
+ LockBuffer(*bufp, BUFFER_LOCK_UNLOCK);
else
_hash_relbuf(rel, *bufp);
*/
Assert(BufferIsValid(*bufp));
- _hash_chgbufaccess(rel, *bufp, HASH_NOLOCK, HASH_READ);
+ LockBuffer(*bufp, BUFFER_LOCK_SHARE);
*pagep = BufferGetPage(*bufp);
*opaquep = (HashPageOpaque) PageGetSpecialPointer(*pagep);
blkno = BUCKET_TO_BLKNO(metap, bucket);
/* Release metapage lock, but keep pin. */
- _hash_chgbufaccess(rel, metabuf, HASH_READ, HASH_NOLOCK);
+ LockBuffer(metabuf, BUFFER_LOCK_UNLOCK);
/*
* If the previous iteration of this loop locked what is still the
* Reacquire metapage lock and check that no bucket split has taken
* place while we were awaiting the bucket lock.
*/
- _hash_chgbufaccess(rel, metabuf, HASH_NOLOCK, HASH_READ);
+ LockBuffer(metabuf, BUFFER_LOCK_SHARE);
oldblkno = blkno;
retry = true;
}
* release the lock on new bucket and re-acquire it after acquiring
* the lock on old bucket.
*/
- _hash_chgbufaccess(rel, buf, HASH_READ, HASH_NOLOCK);
+ LockBuffer(buf, BUFFER_LOCK_UNLOCK);
old_buf = _hash_getbuf(rel, old_blkno, HASH_READ, LH_BUCKET_PAGE);
* scanning.
*/
so->hashso_split_bucket_buf = old_buf;
- _hash_chgbufaccess(rel, old_buf, HASH_READ, HASH_NOLOCK);
+ LockBuffer(old_buf, BUFFER_LOCK_UNLOCK);
- _hash_chgbufaccess(rel, buf, HASH_NOLOCK, HASH_READ);
+ LockBuffer(buf, BUFFER_LOCK_SHARE);
page = BufferGetPage(buf);
opaque = (HashPageOpaque) PageGetSpecialPointer(page);
Assert(opaque->hasho_bucket == bucket);
extern void _hash_relbuf(Relation rel, Buffer buf);
extern void _hash_dropbuf(Relation rel, Buffer buf);
extern void _hash_dropscanbuf(Relation rel, HashScanOpaque so);
-extern void _hash_chgbufaccess(Relation rel, Buffer buf, int from_access,
- int to_access);
extern uint32 _hash_metapinit(Relation rel, double num_tuples,
ForkNumber forkNum);
extern void _hash_pageinit(Page page, Size size);