/*
* Since this can be redone later if needed, mark as a hint.
*/
- MarkBufferDirtyHint(buf);
+ MarkBufferDirtyHint(buf, true);
}
/*
{
((PageHeader) page)->pd_prune_xid = prstate.new_prune_xid;
PageClearFull(page);
- MarkBufferDirtyHint(buffer);
+ MarkBufferDirtyHint(buffer, true);
}
}
* crucial. Be sure to mark the proper buffer dirty.
*/
if (nbuf != InvalidBuffer)
- MarkBufferDirtyHint(nbuf);
+ MarkBufferDirtyHint(nbuf, true);
else
- MarkBufferDirtyHint(buf);
+ MarkBufferDirtyHint(buf, true);
}
}
}
opaque->btpo_cycleid == vstate->cycleid)
{
opaque->btpo_cycleid = 0;
- MarkBufferDirtyHint(buf);
+ MarkBufferDirtyHint(buf, true);
}
}
if (killedsomething)
{
opaque->btpo_flags |= BTP_HAS_GARBAGE;
- MarkBufferDirtyHint(so->currPos.buf);
+ MarkBufferDirtyHint(so->currPos.buf, true);
}
if (!haveLock)
appendStringInfo(buf, "restore point: %s", xlrec->rp_name);
}
- else if (info == XLOG_HINT)
+ else if (info == XLOG_FPI)
{
BkpBlock *bkp = (BkpBlock *) rec;
- appendStringInfo(buf, "page hint: %s block %u",
+ appendStringInfo(buf, "full-page image: %s block %u",
relpathperm(bkp->node, bkp->fork),
bkp->block);
}
* records. In that case, multiple copies of the same block would be recorded
* in separate WAL records by different backends, though that is still OK from
* a correctness perspective.
- *
- * Note that this only works for buffers that fit the standard page model,
- * i.e. those for which buffer_std == true
*/
XLogRecPtr
-XLogSaveBufferForHint(Buffer buffer)
+XLogSaveBufferForHint(Buffer buffer, bool buffer_std)
{
XLogRecPtr recptr = InvalidXLogRecPtr;
XLogRecPtr lsn;
* and reset rdata for any actual WAL record insert.
*/
rdata[0].buffer = buffer;
- rdata[0].buffer_std = true;
+ rdata[0].buffer_std = buffer_std;
/*
* Check buffer while not holding an exclusive lock.
* Copy buffer so we don't have to worry about concurrent hint bit or
* lsn updates. We assume pd_lower/upper cannot be changed without an
* exclusive lock, so the contents bkp are not racy.
+ *
+ * With buffer_std set to false, XLogCheckBuffer() sets hole_length and
+ * hole_offset to 0; so the following code is safe for either case.
*/
memcpy(copied_buffer, origdata, bkpb.hole_offset);
memcpy(copied_buffer + bkpb.hole_offset,
rdata[1].buffer = InvalidBuffer;
rdata[1].next = NULL;
- recptr = XLogInsert(RM_XLOG_ID, XLOG_HINT, rdata);
+ recptr = XLogInsert(RM_XLOG_ID, XLOG_FPI, rdata);
}
return recptr;
{
/* nothing to do here */
}
- else if (info == XLOG_HINT)
+ else if (info == XLOG_FPI)
{
char *data;
BkpBlock bkpb;
/*
- * Hint bit records contain a backup block stored "inline" in the
- * normal data since the locking when writing hint records isn't
+ * Full-page image (FPI) records contain a backup block stored "inline"
+ * in the normal data since the locking when writing hint records isn't
* sufficient to use the normal backup block mechanism, which assumes
* exclusive lock on the buffer supplied.
*
HeapTupleHeaderSetXmax(seqtuple->t_data, InvalidTransactionId);
seqtuple->t_data->t_infomask &= ~HEAP_XMAX_COMMITTED;
seqtuple->t_data->t_infomask |= HEAP_XMAX_INVALID;
- MarkBufferDirtyHint(*buf);
+ MarkBufferDirtyHint(*buf, true);
}
seq = (Form_pg_sequence) GETSTRUCT(seqtuple);
* (due to a race condition), so it cannot be used for important changes.
*/
void
-MarkBufferDirtyHint(Buffer buffer)
+MarkBufferDirtyHint(Buffer buffer, bool buffer_std)
{
volatile BufferDesc *bufHdr;
Page page = BufferGetPage(buffer);
* rather than full transactionids.
*/
MyPgXact->delayChkpt = delayChkpt = true;
- lsn = XLogSaveBufferForHint(buffer);
+ lsn = XLogSaveBufferForHint(buffer, buffer_std);
}
LockBufHdr(bufHdr);
PageInit(page, BLCKSZ, 0);
if (fsm_set_avail(page, slot, new_cat))
- MarkBufferDirtyHint(buf);
+ MarkBufferDirtyHint(buf, false);
UnlockReleaseBuffer(buf);
}
return; /* nothing to do; the FSM was already smaller */
LockBuffer(buf, BUFFER_LOCK_EXCLUSIVE);
fsm_truncate_avail(BufferGetPage(buf), first_removed_slot);
- MarkBufferDirtyHint(buf);
+ MarkBufferDirtyHint(buf, false);
UnlockReleaseBuffer(buf);
new_nfsmblocks = fsm_logical_to_physical(first_removed_address) + 1;
page = BufferGetPage(buf);
if (fsm_set_avail(page, slot, newValue))
- MarkBufferDirtyHint(buf);
+ MarkBufferDirtyHint(buf, false);
if (minValue != 0)
{
{
LockBuffer(buf, BUFFER_LOCK_EXCLUSIVE);
fsm_set_avail(BufferGetPage(buf), slot, child_avail);
- MarkBufferDirtyHint(buf);
+ MarkBufferDirtyHint(buf, false);
LockBuffer(buf, BUFFER_LOCK_UNLOCK);
}
}
exclusive_lock_held = true;
}
fsm_rebuild_page(page);
- MarkBufferDirtyHint(buf);
+ MarkBufferDirtyHint(buf, false);
goto restart;
}
}
}
tuple->t_infomask |= infomask;
- MarkBufferDirtyHint(buffer);
+ MarkBufferDirtyHint(buffer, true);
}
/*
extern int XLogFileInit(XLogSegNo segno, bool *use_existent, bool use_lock);
extern int XLogFileOpen(XLogSegNo segno);
-extern XLogRecPtr XLogSaveBufferForHint(Buffer buffer);
+extern XLogRecPtr XLogSaveBufferForHint(Buffer buffer, bool buffer_std);
extern void CheckXLogRemoved(XLogSegNo segno, TimeLineID tli);
extern void XLogSetAsyncXactLSN(XLogRecPtr record);
#define XLOG_RESTORE_POINT 0x70
#define XLOG_FPW_CHANGE 0x80
#define XLOG_END_OF_RECOVERY 0x90
-#define XLOG_HINT 0xA0
+#define XLOG_FPI 0xA0
/*
extern void BufferGetTag(Buffer buffer, RelFileNode *rnode,
ForkNumber *forknum, BlockNumber *blknum);
-extern void MarkBufferDirtyHint(Buffer buffer);
+extern void MarkBufferDirtyHint(Buffer buffer, bool buffer_std);
extern void UnlockBuffers(void);
extern void LockBuffer(Buffer buffer, int mode);