num_index_tuples = metap->hashm_ntuples;
}
- _hash_wrtbuf(rel, metabuf);
+ MarkBufferDirty(metabuf);
+ _hash_relbuf(rel, metabuf);
/* return statistics */
if (stats == NULL)
OffsetNumber deletable[MaxOffsetNumber];
int ndeletable = 0;
bool retain_pin = false;
- bool curr_page_dirty = false;
vacuum_delay_point();
{
PageIndexMultiDelete(page, deletable, ndeletable);
bucket_dirty = true;
- curr_page_dirty = true;
+ MarkBufferDirty(buf);
}
/* bail out if there are no more pages to scan. */
* release the lock on previous page after acquiring the lock on next
* page
*/
- if (curr_page_dirty)
- {
- if (retain_pin)
- _hash_chgbufaccess(rel, buf, HASH_WRITE, HASH_NOLOCK);
- else
- _hash_wrtbuf(rel, buf);
- curr_page_dirty = false;
- }
- else if (retain_pin)
+ if (retain_pin)
_hash_chgbufaccess(rel, buf, HASH_READ, HASH_NOLOCK);
else
_hash_relbuf(rel, buf);
bucket_opaque = (HashPageOpaque) PageGetSpecialPointer(page);
bucket_opaque->hasho_flag &= ~LH_BUCKET_NEEDS_SPLIT_CLEANUP;
+ MarkBufferDirty(bucket_buf);
}
/*
_hash_squeezebucket(rel, cur_bucket, bucket_blkno, bucket_buf,
bstrategy);
else
- _hash_chgbufaccess(rel, bucket_buf, HASH_WRITE, HASH_NOLOCK);
+ _hash_chgbufaccess(rel, bucket_buf, HASH_READ, HASH_NOLOCK);
}
void
(void) _hash_pgaddtup(rel, buf, itemsz, itup);
/*
- * write and release the modified page. if the page we modified was an
+ * dirty and release the modified page. if the page we modified was an
* overflow page, we also need to separately drop the pin we retained on
* the primary bucket page.
*/
- _hash_wrtbuf(rel, buf);
+ MarkBufferDirty(buf);
+ _hash_relbuf(rel, buf);
if (buf != bucket_buf)
_hash_dropbuf(rel, bucket_buf);
/* logically chain overflow page to previous page */
pageopaque->hasho_nextblkno = BufferGetBlockNumber(ovflbuf);
+ MarkBufferDirty(buf);
if ((pageopaque->hasho_flag & LH_BUCKET_PAGE) && retain_pin)
- _hash_chgbufaccess(rel, buf, HASH_WRITE, HASH_NOLOCK);
+ _hash_chgbufaccess(rel, buf, HASH_READ, HASH_NOLOCK);
else
- _hash_wrtbuf(rel, buf);
+ _hash_relbuf(rel, buf);
return ovflbuf;
}
/* mark page "in use" in the bitmap */
SETBIT(freep, bit);
- _hash_wrtbuf(rel, mapbuf);
+ MarkBufferDirty(mapbuf);
+ _hash_relbuf(rel, mapbuf);
/* Reacquire exclusive lock on the meta page */
_hash_chgbufaccess(rel, metabuf, HASH_NOLOCK, HASH_WRITE);
* in _hash_pageinit() when the page is reused.)
*/
MemSet(ovflpage, 0, BufferGetPageSize(ovflbuf));
- _hash_wrtbuf(rel, ovflbuf);
+ MarkBufferDirty(ovflbuf);
+ _hash_relbuf(rel, ovflbuf);
/*
* Fix up the bucket chain. this is a doubly-linked list, so we must fix
prevopaque->hasho_nextblkno = nextblkno;
if (prevblkno != writeblkno)
- _hash_wrtbuf(rel, prevbuf);
+ {
+ MarkBufferDirty(prevbuf);
+ _hash_relbuf(rel, prevbuf);
+ }
}
/* write and unlock the write buffer */
Assert(nextopaque->hasho_bucket == bucket);
nextopaque->hasho_prevblkno = prevblkno;
- _hash_wrtbuf(rel, nextbuf);
+ MarkBufferDirty(nextbuf);
+ _hash_relbuf(rel, nextbuf);
}
/* Note: bstrategy is intentionally not used for metapage and bitmap */
freep = HashPageGetBitmap(mappage);
Assert(ISSET(freep, bitmapbit));
CLRBIT(freep, bitmapbit);
- _hash_wrtbuf(rel, mapbuf);
+ MarkBufferDirty(mapbuf);
+ _hash_relbuf(rel, mapbuf);
/* Get write-lock on metapage to update firstfree */
_hash_chgbufaccess(rel, metabuf, HASH_NOLOCK, HASH_WRITE);
if (ovflbitno < metap->hashm_firstfree)
{
metap->hashm_firstfree = ovflbitno;
- _hash_wrtbuf(rel, metabuf);
- }
- else
- {
- /* no need to change metapage */
- _hash_relbuf(rel, metabuf);
+ MarkBufferDirty(metabuf);
}
+ _hash_relbuf(rel, metabuf);
return nextblkno;
}
freep = HashPageGetBitmap(pg);
MemSet(freep, 0xFF, BMPGSZ_BYTE(metap));
- /* write out the new bitmap page (releasing write lock and pin) */
- _hash_wrtbuf(rel, buf);
+ /* dirty the new bitmap page, and release write lock and pin */
+ MarkBufferDirty(buf);
+ _hash_relbuf(rel, buf);
/* add the new bitmap page to the metapage's list of bitmaps */
/* metapage already has a write lock */
* on next page
*/
if (wbuf_dirty)
- {
- if (retain_pin)
- _hash_chgbufaccess(rel, wbuf, HASH_WRITE, HASH_NOLOCK);
- else
- _hash_wrtbuf(rel, wbuf);
- }
- else if (retain_pin)
+ MarkBufferDirty(wbuf);
+ if (retain_pin)
_hash_chgbufaccess(rel, wbuf, HASH_READ, HASH_NOLOCK);
else
_hash_relbuf(rel, wbuf);
{
/* Delete tuples we already moved off read page */
PageIndexMultiDelete(rpage, deletable, ndeletable);
- _hash_wrtbuf(rel, rbuf);
+ MarkBufferDirty(rbuf);
}
- else
- _hash_relbuf(rel, rbuf);
+ _hash_relbuf(rel, rbuf);
return;
}
so->hashso_buc_split = false;
}
-/*
- * _hash_wrtbuf() -- write a hash page to disk.
- *
- * This routine releases the lock held on the buffer and our refcount
- * for it. It is an error to call _hash_wrtbuf() without a write lock
- * and a pin on the buffer.
- *
- * NOTE: this routine should go away when/if hash indexes are WAL-ified.
- * The correct sequence of operations is to mark the buffer dirty, then
- * write the WAL record, then release the lock and pin; so marking dirty
- * can't be combined with releasing.
- */
-void
-_hash_wrtbuf(Relation rel, Buffer buf)
-{
- MarkBufferDirty(buf);
- UnlockReleaseBuffer(buf);
-}
-
/*
* _hash_chgbufaccess() -- Change the lock type on a buffer, without
* dropping our pin on it.
pageopaque->hasho_bucket = i;
pageopaque->hasho_flag = LH_BUCKET_PAGE;
pageopaque->hasho_page_id = HASHO_PAGE_ID;
- _hash_wrtbuf(rel, buf);
+ MarkBufferDirty(buf);
+ _hash_relbuf(rel, buf);
}
/* Now reacquire buffer lock on metapage */
_hash_initbitmap(rel, metap, num_buckets + 1, forkNum);
/* all done */
- _hash_wrtbuf(rel, metabuf);
+ MarkBufferDirty(metabuf);
+ _hash_relbuf(rel, metabuf);
return num_buckets;
}
if (nbuf == bucket_nbuf)
_hash_chgbufaccess(rel, bucket_nbuf, HASH_WRITE, HASH_NOLOCK);
else
- _hash_wrtbuf(rel, nbuf);
+ {
+ MarkBufferDirty(nbuf);
+ _hash_relbuf(rel, nbuf);
+ }
_hash_chgbufaccess(rel, bucket_obuf, HASH_NOLOCK, HASH_WRITE);
opage = BufferGetPage(bucket_obuf);
extern void _hash_relbuf(Relation rel, Buffer buf);
extern void _hash_dropbuf(Relation rel, Buffer buf);
extern void _hash_dropscanbuf(Relation rel, HashScanOpaque so);
-extern void _hash_wrtbuf(Relation rel, Buffer buf);
extern void _hash_chgbufaccess(Relation rel, Buffer buf, int from_access,
int to_access);
extern uint32 _hash_metapinit(Relation rel, double num_tuples,