This gets rid of XLByteLT, XLByteLE, XLByteEQ and XLByteAdvance.
These were useful for brevity when XLogRecPtrs were split in
xlogid/xrecoff; but now that they are simple uint64's, they are just
clutter. The only downside to making this change would be ease of
backporting patches, but that has been negated by other substantive
changes to the involved code anyway. The clarity of simpler expressions
makes the change worthwhile.
Most of the changes are mechanical, but in a couple of places, the patch
author chose to invert the operator sense, making the code flow more
logical (and more in line with preceding comments).
Author: Andres Freund
Eyeballed by Dimitri Fontaine and Alvaro Herrera
return; /* page was deleted, nothing to do */
page = (Page) BufferGetPage(buffer);
- if (!XLByteLE(lsn, PageGetLSN(page)))
+ if (lsn > PageGetLSN(page))
{
if (data->isData)
{
return;
page = (Page) BufferGetPage(buffer);
- if (!XLByteLE(lsn, PageGetLSN(page)))
+ if (lsn > PageGetLSN(page))
{
if (GinPageIsData(page))
{
if (BufferIsValid(dbuffer))
{
page = BufferGetPage(dbuffer);
- if (!XLByteLE(lsn, PageGetLSN(page)))
+ if (lsn > PageGetLSN(page))
{
Assert(GinPageIsData(page));
GinPageGetOpaque(page)->flags = GIN_DELETED;
if (BufferIsValid(pbuffer))
{
page = BufferGetPage(pbuffer);
- if (!XLByteLE(lsn, PageGetLSN(page)))
+ if (lsn > PageGetLSN(page))
{
Assert(GinPageIsData(page));
Assert(!GinPageIsLeaf(page));
if (BufferIsValid(lbuffer))
{
page = BufferGetPage(lbuffer);
- if (!XLByteLE(lsn, PageGetLSN(page)))
+ if (lsn > PageGetLSN(page))
{
Assert(GinPageIsData(page));
GinPageGetOpaque(page)->rightlink = data->rightLink;
return; /* assume index was deleted, nothing to do */
metapage = BufferGetPage(metabuffer);
- if (!XLByteLE(lsn, PageGetLSN(metapage)))
+ if (lsn > PageGetLSN(metapage))
{
memcpy(GinPageGetMeta(metapage), &data->metadata, sizeof(GinMetaPageData));
PageSetLSN(metapage, lsn);
{
Page page = BufferGetPage(buffer);
- if (!XLByteLE(lsn, PageGetLSN(page)))
+ if (lsn > PageGetLSN(page))
{
OffsetNumber l,
off = (PageIsEmpty(page)) ? FirstOffsetNumber :
{
Page page = BufferGetPage(buffer);
- if (!XLByteLE(lsn, PageGetLSN(page)))
+ if (lsn > PageGetLSN(page))
{
GinPageGetOpaque(page)->rightlink = data->newRightlink;
return; /* assume index was deleted, nothing to do */
metapage = BufferGetPage(metabuffer);
- if (!XLByteLE(lsn, PageGetLSN(metapage)))
+ if (lsn > PageGetLSN(metapage))
{
memcpy(GinPageGetMeta(metapage), &data->metadata, sizeof(GinMetaPageData));
PageSetLSN(metapage, lsn);
{
Page page = BufferGetPage(buffer);
- if (!XLByteLE(lsn, PageGetLSN(page)))
+ if (lsn > PageGetLSN(page))
{
GinPageGetOpaque(page)->flags = GIN_DELETED;
}
if (stack->blkno != GIST_ROOT_BLKNO &&
- XLByteLT(stack->parent->lsn,
- GistPageGetOpaque(stack->page)->nsn))
+ stack->parent->lsn < GistPageGetOpaque(stack->page)->nsn)
{
/*
* Concurrent split detected. There's no guarantee that the
xlocked = true;
stack->page = (Page) BufferGetPage(stack->buffer);
- if (!XLByteEQ(PageGetLSN(stack->page), stack->lsn))
+ if (PageGetLSN(stack->page) != stack->lsn)
{
/* the page was changed while we unlocked it, retry */
continue;
*/
}
else if (GistFollowRight(stack->page) ||
- XLByteLT(stack->parent->lsn,
- GistPageGetOpaque(stack->page)->nsn))
+ stack->parent->lsn <
+ GistPageGetOpaque(stack->page)->nsn)
{
/*
* The page was split while we momentarily unlocked the
if (GistFollowRight(page))
elog(ERROR, "concurrent GiST page split was incomplete");
- if (top->parent && XLByteLT(top->parent->lsn, GistPageGetOpaque(page)->nsn) &&
+ if (top->parent && top->parent->lsn < GistPageGetOpaque(page)->nsn &&
GistPageGetOpaque(page)->rightlink != InvalidBlockNumber /* sanity check */ )
{
/*
parent->page = (Page) BufferGetPage(parent->buffer);
/* here we don't need to distinguish between split and page update */
- if (child->downlinkoffnum == InvalidOffsetNumber || !XLByteEQ(parent->lsn, PageGetLSN(parent->page)))
+ if (child->downlinkoffnum == InvalidOffsetNumber ||
+ parent->lsn != PageGetLSN(parent->page))
{
/* parent is changed, look child in right links until found */
OffsetNumber i,
*/
if (!XLogRecPtrIsInvalid(pageItem->data.parentlsn) &&
(GistFollowRight(page) ||
- XLByteLT(pageItem->data.parentlsn, opaque->nsn)) &&
+ pageItem->data.parentlsn < opaque->nsn) &&
opaque->rightlink != InvalidBlockNumber /* sanity check */ )
{
/* There was a page split, follow right link to add pages */
GISTPageOpaque opaque = GistPageGetOpaque(page);
if (stack->blkno != GIST_ROOT_BLKNO && !XLogRecPtrIsInvalid(stack->parentlsn) &&
- (GistFollowRight(page) || XLByteLT(stack->parentlsn, opaque->nsn)) &&
+ (GistFollowRight(page) || stack->parentlsn < opaque->nsn) &&
opaque->rightlink != InvalidBlockNumber /* sanity check */ )
{
/* split page detected, install right link to the stack */
* of this record, because the updated NSN is not included in the full
* page image.
*/
- if (!XLByteLT(lsn, PageGetLSN(page)))
+ if (lsn >= PageGetLSN(page))
{
GistPageGetOpaque(page)->nsn = lsn;
GistClearFollowRight(page);
page = (Page) BufferGetPage(buffer);
/* nothing more to do if change already applied */
- if (XLByteLE(lsn, PageGetLSN(page)))
+ if (lsn <= PageGetLSN(page))
{
UnlockReleaseBuffer(buffer);
return;
LockBufferForCleanup(buffer);
page = (Page) BufferGetPage(buffer);
- if (XLByteLE(lsn, PageGetLSN(page)))
+ if (lsn <= PageGetLSN(page))
{
UnlockReleaseBuffer(buffer);
return;
return;
page = (Page) BufferGetPage(buffer);
- if (XLByteLE(lsn, PageGetLSN(page)))
+ if (lsn <= PageGetLSN(page))
{
UnlockReleaseBuffer(buffer);
return;
* XLOG record's LSN, we mustn't mark the page all-visible, because
* the subsequent update won't be replayed to clear the flag.
*/
- if (!XLByteLE(lsn, PageGetLSN(page)))
+ if (lsn > PageGetLSN(page))
{
PageSetAllVisible(page);
MarkBufferDirty(buffer);
* we did for the heap page. If this results in a dropped bit, no
* real harm is done; and the next VACUUM will fix it.
*/
- if (!XLByteLE(lsn, PageGetLSN(BufferGetPage(vmbuffer))))
+ if (lsn > PageGetLSN(BufferGetPage(vmbuffer)))
visibilitymap_set(reln, xlrec->block, lsn, vmbuffer,
xlrec->cutoff_xid);
return;
page = (Page) BufferGetPage(buffer);
- if (XLByteLE(lsn, PageGetLSN(page))) /* changes are applied */
+ if (lsn <= PageGetLSN(page)) /* changes are applied */
{
UnlockReleaseBuffer(buffer);
return;
return;
page = (Page) BufferGetPage(buffer);
- if (XLByteLE(lsn, PageGetLSN(page))) /* changes are applied */
+ if (lsn <= PageGetLSN(page)) /* changes are applied */
{
UnlockReleaseBuffer(buffer);
return;
return;
page = (Page) BufferGetPage(buffer);
- if (XLByteLE(lsn, PageGetLSN(page))) /* changes are applied */
+ if (lsn <= PageGetLSN(page)) /* changes are applied */
{
UnlockReleaseBuffer(buffer);
return;
goto newt;
page = (Page) BufferGetPage(obuffer);
- if (XLByteLE(lsn, PageGetLSN(page))) /* changes are applied */
+ if (lsn <= PageGetLSN(page)) /* changes are applied */
{
if (samepage)
{
return;
page = (Page) BufferGetPage(nbuffer);
- if (XLByteLE(lsn, PageGetLSN(page))) /* changes are applied */
+ if (lsn <= PageGetLSN(page)) /* changes are applied */
{
UnlockReleaseBuffer(nbuffer);
if (BufferIsValid(obuffer))
return;
page = (Page) BufferGetPage(buffer);
- if (XLByteLE(lsn, PageGetLSN(page))) /* changes are applied */
+ if (lsn <= PageGetLSN(page)) /* changes are applied */
{
UnlockReleaseBuffer(buffer);
return;
return;
page = (Page) BufferGetPage(buffer);
- if (XLByteLE(lsn, PageGetLSN(page))) /* changes are applied */
+ if (lsn <= PageGetLSN(page)) /* changes are applied */
{
UnlockReleaseBuffer(buffer);
return;
{
page = (Page) BufferGetPage(buffer);
- if (XLByteLE(lsn, PageGetLSN(page)))
+ if (lsn <= PageGetLSN(page))
{
UnlockReleaseBuffer(buffer);
}
Page lpage = (Page) BufferGetPage(lbuf);
BTPageOpaque lopaque = (BTPageOpaque) PageGetSpecialPointer(lpage);
- if (!XLByteLE(lsn, PageGetLSN(lpage)))
+ if (lsn > PageGetLSN(lpage))
{
OffsetNumber off;
OffsetNumber maxoff = PageGetMaxOffsetNumber(lpage);
{
Page page = (Page) BufferGetPage(buffer);
- if (!XLByteLE(lsn, PageGetLSN(page)))
+ if (lsn > PageGetLSN(page))
{
BTPageOpaque pageop = (BTPageOpaque) PageGetSpecialPointer(page);
LockBufferForCleanup(buffer);
page = (Page) BufferGetPage(buffer);
- if (XLByteLE(lsn, PageGetLSN(page)))
+ if (lsn <= PageGetLSN(page))
{
UnlockReleaseBuffer(buffer);
return;
return;
page = (Page) BufferGetPage(buffer);
- if (XLByteLE(lsn, PageGetLSN(page)))
+ if (lsn <= PageGetLSN(page))
{
UnlockReleaseBuffer(buffer);
return;
{
page = (Page) BufferGetPage(buffer);
pageop = (BTPageOpaque) PageGetSpecialPointer(page);
- if (XLByteLE(lsn, PageGetLSN(page)))
+ if (lsn <= PageGetLSN(page))
{
UnlockReleaseBuffer(buffer);
}
if (BufferIsValid(buffer))
{
page = (Page) BufferGetPage(buffer);
- if (XLByteLE(lsn, PageGetLSN(page)))
+ if (lsn <= PageGetLSN(page))
{
UnlockReleaseBuffer(buffer);
}
if (BufferIsValid(buffer))
{
page = (Page) BufferGetPage(buffer);
- if (XLByteLE(lsn, PageGetLSN(page)))
+ if (lsn <= PageGetLSN(page))
{
UnlockReleaseBuffer(buffer);
}
SpGistInitBuffer(buffer,
SPGIST_LEAF | (xldata->storesNulls ? SPGIST_NULLS : 0));
- if (!XLByteLE(lsn, PageGetLSN(page)))
+ if (lsn > PageGetLSN(page))
{
/* insert new tuple */
if (xldata->offnumLeaf != xldata->offnumHeadLeaf)
if (BufferIsValid(buffer))
{
page = BufferGetPage(buffer);
- if (!XLByteLE(lsn, PageGetLSN(page)))
+ if (lsn > PageGetLSN(page))
{
SpGistInnerTuple tuple;
SpGistInitBuffer(buffer,
SPGIST_LEAF | (xldata->storesNulls ? SPGIST_NULLS : 0));
- if (!XLByteLE(lsn, PageGetLSN(page)))
+ if (lsn > PageGetLSN(page))
{
int i;
if (BufferIsValid(buffer))
{
page = BufferGetPage(buffer);
- if (!XLByteLE(lsn, PageGetLSN(page)))
+ if (lsn > PageGetLSN(page))
{
spgPageIndexMultiDelete(&state, page, toDelete, xldata->nMoves,
state.isBuild ? SPGIST_PLACEHOLDER : SPGIST_REDIRECT,
if (BufferIsValid(buffer))
{
page = BufferGetPage(buffer);
- if (!XLByteLE(lsn, PageGetLSN(page)))
+ if (lsn > PageGetLSN(page))
{
SpGistInnerTuple tuple;
if (BufferIsValid(buffer))
{
page = BufferGetPage(buffer);
- if (!XLByteLE(lsn, PageGetLSN(page)))
+ if (lsn > PageGetLSN(page))
{
PageIndexTupleDelete(page, xldata->offnum);
if (PageAddItem(page, (Item) innerTuple, innerTuple->size,
if (xldata->newPage)
SpGistInitBuffer(buffer, 0);
- if (!XLByteLE(lsn, PageGetLSN(page)))
+ if (lsn > PageGetLSN(page))
{
addOrReplaceTuple(page, (Item) innerTuple,
innerTuple->size, xldata->offnumNew);
if (BufferIsValid(buffer))
{
page = BufferGetPage(buffer);
- if (!XLByteLE(lsn, PageGetLSN(page)))
+ if (lsn > PageGetLSN(page))
{
SpGistDeadTuple dt;
if (BufferIsValid(buffer))
{
page = BufferGetPage(buffer);
- if (!XLByteLE(lsn, PageGetLSN(page)))
+ if (lsn > PageGetLSN(page))
{
SpGistInnerTuple innerTuple;
if (xldata->newPage)
SpGistInitBuffer(buffer, 0);
- if (!XLByteLE(lsn, PageGetLSN(page)))
+ if (lsn > PageGetLSN(page))
{
addOrReplaceTuple(page, (Item) postfixTuple,
postfixTuple->size, xldata->offnumPostfix);
if (BufferIsValid(buffer))
{
page = BufferGetPage(buffer);
- if (!XLByteLE(lsn, PageGetLSN(page)))
+ if (lsn > PageGetLSN(page))
{
PageIndexTupleDelete(page, xldata->offnumPrefix);
if (PageAddItem(page, (Item) prefixTuple, prefixTuple->size,
if (BufferIsValid(srcBuffer))
{
srcPage = BufferGetPage(srcBuffer);
- if (!XLByteLE(lsn, PageGetLSN(srcPage)))
+ if (lsn > PageGetLSN(srcPage))
{
/*
* We have it a bit easier here than in doPickSplit(),
if (BufferIsValid(destBuffer))
{
destPage = (Page) BufferGetPage(destBuffer);
- if (XLByteLE(lsn, PageGetLSN(destPage)))
+ if (lsn <= PageGetLSN(destPage))
destPage = NULL; /* don't do any page updates */
}
else
SpGistInitBuffer(buffer,
(xldata->storesNulls ? SPGIST_NULLS : 0));
- if (!XLByteLE(lsn, PageGetLSN(page)))
+ if (lsn > PageGetLSN(page))
{
addOrReplaceTuple(page, (Item) innerTuple, innerTuple->size,
xldata->offnumInner);
{
page = BufferGetPage(buffer);
- if (!XLByteLE(lsn, PageGetLSN(page)))
+ if (lsn > PageGetLSN(page))
{
SpGistInnerTuple parent;
if (BufferIsValid(buffer))
{
page = BufferGetPage(buffer);
- if (!XLByteLE(lsn, PageGetLSN(page)))
+ if (lsn > PageGetLSN(page))
{
spgPageIndexMultiDelete(&state, page,
toDead, xldata->nDead,
if (BufferIsValid(buffer))
{
page = BufferGetPage(buffer);
- if (!XLByteLE(lsn, PageGetLSN(page)))
+ if (lsn > PageGetLSN(page))
{
/* The tuple numbers are in order */
PageIndexMultiDelete(page, toDelete, xldata->nDelete);
if (BufferIsValid(buffer))
{
page = BufferGetPage(buffer);
- if (!XLByteLE(lsn, PageGetLSN(page)))
+ if (lsn > PageGetLSN(page))
{
SpGistPageOpaque opaque = SpGistPageGetOpaque(page);
int i;
{
int lsnindex = GetLSNIndex(slotno, xid);
- if (XLByteLT(ClogCtl->shared->group_lsn[lsnindex], lsn))
+ if (ClogCtl->shared->group_lsn[lsnindex] < lsn)
ClogCtl->shared->group_lsn[lsnindex] = lsn;
}
}
{
XLogRecPtr this_lsn = shared->group_lsn[lsnindex++];
- if (XLByteLT(max_lsn, this_lsn))
+ if (max_lsn < this_lsn)
max_lsn = this_lsn;
}
foreach(cell, history)
{
TimeLineHistoryEntry *tle = (TimeLineHistoryEntry *) lfirst(cell);
- if ((XLogRecPtrIsInvalid(tle->begin) || XLByteLE(tle->begin, ptr)) &&
- (XLogRecPtrIsInvalid(tle->end) || XLByteLT(ptr, tle->end)))
+ if ((XLogRecPtrIsInvalid(tle->begin) || tle->begin <= ptr) &&
+ (XLogRecPtrIsInvalid(tle->end) || ptr < tle->end))
{
/* found it */
return tle->tli;
PGXACT *pgxact = &ProcGlobal->allPgXact[gxact->pgprocno];
if (gxact->valid &&
- XLByteLE(gxact->prepare_lsn, redo_horizon))
+ gxact->prepare_lsn <= redo_horizon)
xids[nxids++] = pgxact->xid;
}
* affect the contents of the XLOG record, so we'll update our local copy
* but not force a recomputation.
*/
- if (!XLByteEQ(RedoRecPtr, Insert->RedoRecPtr))
+ if (RedoRecPtr != Insert->RedoRecPtr)
{
- Assert(XLByteLT(RedoRecPtr, Insert->RedoRecPtr));
+ Assert(RedoRecPtr < Insert->RedoRecPtr);
RedoRecPtr = Insert->RedoRecPtr;
if (doPageWrites)
if (dtbuf[i] == InvalidBuffer)
continue;
if (dtbuf_bkp[i] == false &&
- XLByteLE(dtbuf_lsn[i], RedoRecPtr))
+ dtbuf_lsn[i] <= RedoRecPtr)
{
/*
* Oops, this buffer now needs to be backed up, but we
LWLockAcquire(WALWriteLock, LW_EXCLUSIVE);
LogwrtResult = XLogCtl->LogwrtResult;
- if (!XLByteLE(RecPtr, LogwrtResult.Flush))
+ if (LogwrtResult.Flush < RecPtr)
{
XLogwrtRqst FlushRqst;
SpinLockAcquire(&xlogctl->info_lck);
xlogctl->LogwrtResult = LogwrtResult;
- if (XLByteLT(xlogctl->LogwrtRqst.Write, LogwrtResult.Write))
+ if (xlogctl->LogwrtRqst.Write < LogwrtResult.Write)
xlogctl->LogwrtRqst.Write = LogwrtResult.Write;
- if (XLByteLT(xlogctl->LogwrtRqst.Flush, LogwrtResult.Flush))
+ if (xlogctl->LogwrtRqst.Flush < LogwrtResult.Flush)
xlogctl->LogwrtRqst.Flush = LogwrtResult.Flush;
SpinLockRelease(&xlogctl->info_lck);
}
SpinLockAcquire(&xlogctl->info_lck);
/* advance global request to include new block(s) */
- if (XLByteLT(xlogctl->LogwrtRqst.Write, WriteRqst))
+ if (xlogctl->LogwrtRqst.Write < WriteRqst)
xlogctl->LogwrtRqst.Write = WriteRqst;
/* update local result copy while I have the chance */
LogwrtResult = xlogctl->LogwrtResult;
*lsn = PageGetLSN(page);
if (doPageWrites &&
- XLByteLE(PageGetLSN(page), RedoRecPtr))
+ PageGetLSN(page) <= RedoRecPtr)
{
/*
* The page needs to be backed up, so set up *bkpb
* written out.
*/
OldPageRqstPtr = XLogCtl->xlblocks[nextidx];
- if (!XLByteLE(OldPageRqstPtr, LogwrtResult.Write))
+ if (LogwrtResult.Write < OldPageRqstPtr)
{
/* nope, got work to do... */
XLogRecPtr FinishedPageRqstPtr;
volatile XLogCtlData *xlogctl = XLogCtl;
SpinLockAcquire(&xlogctl->info_lck);
- if (XLByteLT(xlogctl->LogwrtRqst.Write, FinishedPageRqstPtr))
+ if (xlogctl->LogwrtRqst.Write < FinishedPageRqstPtr)
xlogctl->LogwrtRqst.Write = FinishedPageRqstPtr;
LogwrtResult = xlogctl->LogwrtResult;
SpinLockRelease(&xlogctl->info_lck);
* Now that we have an up-to-date LogwrtResult value, see if we still
* need to write it or if someone else already did.
*/
- if (!XLByteLE(OldPageRqstPtr, LogwrtResult.Write))
+ if (LogwrtResult.Write < OldPageRqstPtr)
{
/* Must acquire write lock */
LWLockAcquire(WALWriteLock, LW_EXCLUSIVE);
LogwrtResult = XLogCtl->LogwrtResult;
- if (XLByteLE(OldPageRqstPtr, LogwrtResult.Write))
+ if (LogwrtResult.Write >= OldPageRqstPtr)
{
/* OK, someone wrote it already */
LWLockRelease(WALWriteLock);
{
/* force it to a segment start point */
if (NewPageBeginPtr % XLogSegSize != 0)
- XLByteAdvance(NewPageBeginPtr,
- XLogSegSize - NewPageBeginPtr % XLogSegSize);
+ NewPageBeginPtr += XLogSegSize - NewPageBeginPtr % XLogSegSize;
}
NewPageEndPtr = NewPageBeginPtr;
- XLByteAdvance(NewPageEndPtr, XLOG_BLCKSZ);
+ NewPageEndPtr += XLOG_BLCKSZ;
XLogCtl->xlblocks[nextidx] = NewPageEndPtr;
NewPage = (XLogPageHeader) (XLogCtl->pages + nextidx * (Size) XLOG_BLCKSZ);
*/
curridx = Write->curridx;
- while (XLByteLT(LogwrtResult.Write, WriteRqst.Write))
+ while (LogwrtResult.Write < WriteRqst.Write)
{
/*
* Make sure we're not ahead of the insert process. This could happen
* if we're passed a bogus WriteRqst.Write that is past the end of the
* last page that's been initialized by AdvanceXLInsertBuffer.
*/
- if (!XLByteLT(LogwrtResult.Write, XLogCtl->xlblocks[curridx]))
+ if (LogwrtResult.Write >= XLogCtl->xlblocks[curridx])
elog(PANIC, "xlog write request %X/%X is past end of log %X/%X",
(uint32) (LogwrtResult.Write >> 32), (uint32) LogwrtResult.Write,
(uint32) (XLogCtl->xlblocks[curridx] >> 32),
/* Advance LogwrtResult.Write to end of current buffer page */
LogwrtResult.Write = XLogCtl->xlblocks[curridx];
- ispartialpage = XLByteLT(WriteRqst.Write, LogwrtResult.Write);
+ ispartialpage = WriteRqst.Write < LogwrtResult.Write;
if (!XLByteInPrevSeg(LogwrtResult.Write, openLogSegNo))
{
* contiguous in memory), or if we are at the end of the logfile
* segment.
*/
- last_iteration = !XLByteLT(LogwrtResult.Write, WriteRqst.Write);
+ last_iteration = WriteRqst.Write <= LogwrtResult.Write;
finishing_seg = !ispartialpage &&
(startoffset + npages * XLOG_BLCKSZ) >= XLogSegSize;
/*
* If asked to flush, do so
*/
- if (XLByteLT(LogwrtResult.Flush, WriteRqst.Flush) &&
- XLByteLT(LogwrtResult.Flush, LogwrtResult.Write))
+ if (LogwrtResult.Flush < WriteRqst.Flush &&
+ LogwrtResult.Flush < LogwrtResult.Write)
+
{
/*
* Could get here without iterating above loop, in which case we might
SpinLockAcquire(&xlogctl->info_lck);
xlogctl->LogwrtResult = LogwrtResult;
- if (XLByteLT(xlogctl->LogwrtRqst.Write, LogwrtResult.Write))
+ if (xlogctl->LogwrtRqst.Write < LogwrtResult.Write)
xlogctl->LogwrtRqst.Write = LogwrtResult.Write;
- if (XLByteLT(xlogctl->LogwrtRqst.Flush, LogwrtResult.Flush))
+ if (xlogctl->LogwrtRqst.Flush < LogwrtResult.Flush)
xlogctl->LogwrtRqst.Flush = LogwrtResult.Flush;
SpinLockRelease(&xlogctl->info_lck);
}
SpinLockAcquire(&xlogctl->info_lck);
LogwrtResult = xlogctl->LogwrtResult;
sleeping = xlogctl->WalWriterSleeping;
- if (XLByteLT(xlogctl->asyncXactLSN, asyncXactLSN))
+ if (xlogctl->asyncXactLSN < asyncXactLSN)
xlogctl->asyncXactLSN = asyncXactLSN;
SpinLockRelease(&xlogctl->info_lck);
WriteRqstPtr -= WriteRqstPtr % XLOG_BLCKSZ;
/* if we have already flushed that far, we're done */
- if (XLByteLE(WriteRqstPtr, LogwrtResult.Flush))
+ if (WriteRqstPtr <= LogwrtResult.Flush)
return;
}
UpdateMinRecoveryPoint(XLogRecPtr lsn, bool force)
{
/* Quick check using our local copy of the variable */
- if (!updateMinRecoveryPoint || (!force && XLByteLE(lsn, minRecoveryPoint)))
+ if (!updateMinRecoveryPoint || (!force && lsn <= minRecoveryPoint))
return;
LWLockAcquire(ControlFileLock, LW_EXCLUSIVE);
*/
if (minRecoveryPoint == 0)
updateMinRecoveryPoint = false;
- else if (force || XLByteLT(minRecoveryPoint, lsn))
+ else if (force || minRecoveryPoint < lsn)
{
/* use volatile pointer to prevent code rearrangement */
volatile XLogCtlData *xlogctl = XLogCtl;
newMinRecoveryPointTLI = xlogctl->replayEndTLI;
SpinLockRelease(&xlogctl->info_lck);
- if (!force && XLByteLT(newMinRecoveryPoint, lsn))
+ if (!force && newMinRecoveryPoint < lsn)
elog(WARNING,
"xlog min recovery request %X/%X is past current point %X/%X",
(uint32) (lsn >> 32) , (uint32) lsn,
(uint32) newMinRecoveryPoint);
/* update control file */
- if (XLByteLT(ControlFile->minRecoveryPoint, newMinRecoveryPoint))
+ if (ControlFile->minRecoveryPoint < newMinRecoveryPoint)
{
ControlFile->minRecoveryPoint = newMinRecoveryPoint;
ControlFile->minRecoveryPointTLI = newMinRecoveryPointTLI;
}
/* Quick exit if already known flushed */
- if (XLByteLE(record, LogwrtResult.Flush))
+ if (record <= LogwrtResult.Flush)
return;
#ifdef WAL_DEBUG
/* read LogwrtResult and update local state */
SpinLockAcquire(&xlogctl->info_lck);
- if (XLByteLT(WriteRqstPtr, xlogctl->LogwrtRqst.Write))
+ if (WriteRqstPtr < xlogctl->LogwrtRqst.Write)
WriteRqstPtr = xlogctl->LogwrtRqst.Write;
LogwrtResult = xlogctl->LogwrtResult;
SpinLockRelease(&xlogctl->info_lck);
/* done already? */
- if (XLByteLE(record, LogwrtResult.Flush))
+ if (record <= LogwrtResult.Flush)
break;
/*
/* Got the lock; recheck whether request is satisfied */
LogwrtResult = XLogCtl->LogwrtResult;
- if (XLByteLE(record, LogwrtResult.Flush))
+ if (record <= LogwrtResult.Flush)
{
LWLockRelease(WALWriteLock);
break;
* calls from bufmgr.c are not within critical sections and so we will not
* force a restart for a bad LSN on a data page.
*/
- if (XLByteLT(LogwrtResult.Flush, record))
+ if (LogwrtResult.Flush < record)
elog(ERROR,
"xlog flush request %X/%X is not satisfied --- flushed only to %X/%X",
(uint32) (record >> 32), (uint32) record,
WriteRqstPtr -= WriteRqstPtr % XLOG_BLCKSZ;
/* if we have already flushed that far, consider async commit records */
- if (XLByteLE(WriteRqstPtr, LogwrtResult.Flush))
+ if (WriteRqstPtr <= LogwrtResult.Flush)
{
/* use volatile pointer to prevent code rearrangement */
volatile XLogCtlData *xlogctl = XLogCtl;
* holding an open file handle to a logfile that's no longer in use,
* preventing the file from being deleted.
*/
- if (XLByteLE(WriteRqstPtr, LogwrtResult.Flush))
+ if (WriteRqstPtr <= LogwrtResult.Flush)
{
if (openLogFile >= 0)
{
/* now wait for the write lock */
LWLockAcquire(WALWriteLock, LW_EXCLUSIVE);
LogwrtResult = XLogCtl->LogwrtResult;
- if (!XLByteLE(WriteRqstPtr, LogwrtResult.Flush))
+ if (WriteRqstPtr > LogwrtResult.Flush)
{
XLogwrtRqst WriteRqst;
if (RecoveryInProgress())
{
/* Quick exit if already known updated */
- if (XLByteLE(record, minRecoveryPoint) || !updateMinRecoveryPoint)
+ if (record <= minRecoveryPoint || !updateMinRecoveryPoint)
return false;
/*
updateMinRecoveryPoint = false;
/* check again */
- if (XLByteLE(record, minRecoveryPoint) || !updateMinRecoveryPoint)
+ if (record <= minRecoveryPoint || !updateMinRecoveryPoint)
return false;
else
return true;
}
/* Quick exit if already known flushed */
- if (XLByteLE(record, LogwrtResult.Flush))
+ if (record <= LogwrtResult.Flush)
return false;
/* read LogwrtResult and update local state */
}
/* check again */
- if (XLByteLE(record, LogwrtResult.Flush))
+ if (record <= LogwrtResult.Flush)
return false;
return true;
do
{
/* Calculate pointer to beginning of next page */
- XLByteAdvance(pagelsn, XLOG_BLCKSZ);
+ pagelsn += XLOG_BLCKSZ;
/* Wait for the next page to become available */
if (!XLogPageRead(&pagelsn, emode, false, false))
return NULL;
return false;
}
- if (!XLByteEQ(hdr->xlp_pageaddr, recaddr))
+ if (hdr->xlp_pageaddr != recaddr)
{
ereport(emode_for_corrupt_record(emode, recaddr),
(errmsg("unexpected pageaddr %X/%X in log segment %s, offset %u",
* We can't exactly verify the prev-link, but surely it should be less
* than the record's own address.
*/
- if (!XLByteLT(record->xl_prev, *RecPtr))
+ if (!(record->xl_prev < *RecPtr))
{
ereport(emode_for_corrupt_record(emode, *RecPtr),
(errmsg("record with incorrect prev-link %X/%X at %X/%X",
* check guards against torn WAL pages where a stale but valid-looking
* WAL record starts on a sector boundary.
*/
- if (!XLByteEQ(record->xl_prev, ReadRecPtr))
+ if (record->xl_prev != ReadRecPtr)
{
ereport(emode_for_corrupt_record(emode, *RecPtr),
(errmsg("record with incorrect prev-link %X/%X at %X/%X",
* next timeline was forked off from it *after* the current recovery
* location.
*/
- if (XLByteLT(currentTle->end, EndRecPtr))
+ if (currentTle->end < EndRecPtr)
{
ereport(LOG,
(errmsg("new timeline %u forked off current database system timeline %u before current recovery point %X/%X",
* backup_label around that references a WAL segment that's
* already been archived.
*/
- if (XLByteLT(checkPoint.redo, checkPointLoc))
+ if (checkPoint.redo < checkPointLoc)
{
if (!ReadRecord(&(checkPoint.redo), LOG, false))
ereport(FATAL,
RedoRecPtr = XLogCtl->Insert.RedoRecPtr = checkPoint.redo;
- if (XLByteLT(RecPtr, checkPoint.redo))
+ if (RecPtr < checkPoint.redo)
ereport(PANIC,
(errmsg("invalid redo in checkpoint record")));
* have been a clean shutdown and we did not have a recovery.conf file,
* then assume no recovery needed.
*/
- if (XLByteLT(checkPoint.redo, RecPtr))
+ if (checkPoint.redo < RecPtr)
{
if (wasShutdown)
ereport(PANIC,
if (InArchiveRecovery)
{
/* initialize minRecoveryPoint if not set yet */
- if (XLByteLT(ControlFile->minRecoveryPoint, checkPoint.redo))
+ if (ControlFile->minRecoveryPoint < checkPoint.redo)
{
ControlFile->minRecoveryPoint = checkPoint.redo;
ControlFile->minRecoveryPointTLI = checkPoint.ThisTimeLineID;
* Find the first record that logically follows the checkpoint --- it
* might physically precede it, though.
*/
- if (XLByteLT(checkPoint.redo, RecPtr))
+ if (checkPoint.redo < RecPtr)
{
/* back up to find the record */
record = ReadRecord(&(checkPoint.redo), PANIC, false);
* advanced beyond the WAL we processed.
*/
if (InRecovery &&
- (XLByteLT(EndOfLog, minRecoveryPoint) ||
+ (EndOfLog < minRecoveryPoint ||
!XLogRecPtrIsInvalid(ControlFile->backupStartPoint)))
{
if (reachedStopPoint)
* Have we reached the point where our base backup was completed?
*/
if (!XLogRecPtrIsInvalid(ControlFile->backupEndPoint) &&
- XLByteLE(ControlFile->backupEndPoint, EndRecPtr))
+ ControlFile->backupEndPoint <= EndRecPtr)
{
/*
* We have reached the end of base backup, as indicated by pg_control.
LWLockAcquire(ControlFileLock, LW_EXCLUSIVE);
- if (XLByteLT(ControlFile->minRecoveryPoint, EndRecPtr))
+ if (ControlFile->minRecoveryPoint < EndRecPtr)
ControlFile->minRecoveryPoint = EndRecPtr;
ControlFile->backupStartPoint = InvalidXLogRecPtr;
* consistent yet.
*/
if (!reachedConsistency && !ControlFile->backupEndRequired &&
- XLByteLE(minRecoveryPoint, XLogCtl->lastReplayedEndRecPtr) &&
+ minRecoveryPoint <= XLogCtl->lastReplayedEndRecPtr &&
XLogRecPtrIsInvalid(ControlFile->backupStartPoint))
{
/*
volatile XLogCtlData *xlogctl = XLogCtl;
SpinLockAcquire(&xlogctl->info_lck);
- Assert(XLByteLE(RedoRecPtr, xlogctl->Insert.RedoRecPtr));
+ Assert(RedoRecPtr <= xlogctl->Insert.RedoRecPtr);
RedoRecPtr = xlogctl->Insert.RedoRecPtr;
SpinLockRelease(&xlogctl->info_lck);
* We now have ProcLastRecPtr = start of actual checkpoint record, recptr
* = end of actual checkpoint record.
*/
- if (shutdown && !XLByteEQ(checkPoint.redo, ProcLastRecPtr))
+ if (shutdown && checkPoint.redo != ProcLastRecPtr)
ereport(PANIC,
(errmsg("concurrent transaction log activity while database system is shutting down")));
* side-effect.
*/
if (XLogRecPtrIsInvalid(lastCheckPointRecPtr) ||
- XLByteLE(lastCheckPoint.redo, ControlFile->checkPointCopy.redo))
+ lastCheckPoint.redo <= ControlFile->checkPointCopy.redo)
{
ereport(DEBUG2,
(errmsg("skipping restartpoint, already performed at %X/%X",
*/
LWLockAcquire(ControlFileLock, LW_EXCLUSIVE);
if (ControlFile->state == DB_IN_ARCHIVE_RECOVERY &&
- XLByteLT(ControlFile->checkPointCopy.redo, lastCheckPoint.redo))
+ ControlFile->checkPointCopy.redo < lastCheckPoint.redo)
{
ControlFile->prevCheckPoint = ControlFile->checkPoint;
ControlFile->checkPoint = lastCheckPointRecPtr;
* new timeline.
*/
if (!XLogRecPtrIsInvalid(minRecoveryPoint) &&
- XLByteLT(lsn, minRecoveryPoint) &&
+ lsn < minRecoveryPoint &&
newTLI > minRecoveryPointTLI)
ereport(PANIC,
(errmsg("unexpected timeline ID %u in checkpoint record, before reaching minimum recovery point %X/%X on timeline %u",
memcpy(&startpoint, XLogRecGetData(record), sizeof(startpoint));
- if (XLByteEQ(ControlFile->backupStartPoint, startpoint))
+ if (ControlFile->backupStartPoint == startpoint)
{
/*
* We have reached the end of base backup, the point where
LWLockAcquire(ControlFileLock, LW_EXCLUSIVE);
- if (XLByteLT(ControlFile->minRecoveryPoint, lsn))
+ if (ControlFile->minRecoveryPoint < lsn)
{
ControlFile->minRecoveryPoint = lsn;
ControlFile->minRecoveryPointTLI = ThisTimeLineID;
*/
minRecoveryPoint = ControlFile->minRecoveryPoint;
minRecoveryPointTLI = ControlFile->minRecoveryPointTLI;
- if (minRecoveryPoint != 0 && XLByteLT(minRecoveryPoint, lsn))
+ if (minRecoveryPoint != 0 && minRecoveryPoint < lsn)
{
ControlFile->minRecoveryPoint = lsn;
ControlFile->minRecoveryPointTLI = ThisTimeLineID;
if (!fpw)
{
SpinLockAcquire(&xlogctl->info_lck);
- if (XLByteLT(xlogctl->lastFpwDisableRecPtr, ReadRecPtr))
+ if (xlogctl->lastFpwDisableRecPtr < ReadRecPtr)
xlogctl->lastFpwDisableRecPtr = ReadRecPtr;
SpinLockRelease(&xlogctl->info_lck);
}
recptr = xlogctl->lastFpwDisableRecPtr;
SpinLockRelease(&xlogctl->info_lck);
- if (!checkpointfpw || XLByteLE(startpoint, recptr))
+ if (!checkpointfpw || startpoint <= recptr)
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
errmsg("WAL generated with full_page_writes=off was replayed "
* either because only few buffers have been dirtied yet.
*/
LWLockAcquire(WALInsertLock, LW_SHARED);
- if (XLByteLT(XLogCtl->Insert.lastBackupStart, startpoint))
+ if (XLogCtl->Insert.lastBackupStart < startpoint)
{
XLogCtl->Insert.lastBackupStart = startpoint;
gotUniqueStartpoint = true;
recptr = xlogctl->lastFpwDisableRecPtr;
SpinLockRelease(&xlogctl->info_lck);
- if (XLByteLE(startpoint, recptr))
+ if (startpoint <= recptr)
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
errmsg("WAL generated with full_page_writes=off was replayed "
retry:
/* See if we need to retrieve more data */
if (readFile < 0 ||
- (readSource == XLOG_FROM_STREAM && !XLByteLT(*RecPtr, receivedUpto)))
+ (readSource == XLOG_FROM_STREAM && receivedUpto <= *RecPtr))
{
if (StandbyMode)
{
* When we are behind, XLogReceiptTime will not advance, so the
* grace time allotted to conflicting queries will decrease.
*/
- if (XLByteLT(RecPtr, receivedUpto))
+ if (RecPtr < receivedUpto)
havedata = true;
else
{
XLogRecPtr latestChunkStart;
receivedUpto = GetWalRcvWriteRecPtr(&latestChunkStart, &receiveTLI);
- if (XLByteLT(RecPtr, receivedUpto) && receiveTLI == curFileTLI)
+ if (RecPtr < receivedUpto && receiveTLI == curFileTLI)
{
havedata = true;
- if (!XLByteLT(RecPtr, latestChunkStart))
+ if (latestChunkStart <= RecPtr)
{
XLogReceiptTime = GetCurrentTimestamp();
SetCurrentChunkStartTime(XLogReceiptTime);
if (readSource == XLOG_FROM_PG_XLOG && emode == LOG)
{
- if (XLByteEQ(RecPtr, lastComplaint))
+ if (RecPtr == lastComplaint)
emode = DEBUG1;
else
lastComplaint = RecPtr;
{
XLogRecPtr redoptr = GetRedoRecPtr();
- if (XLByteLE(PageGetLSN(page), redoptr))
+ if (PageGetLSN(page) <= redoptr)
{
/* last update of seq was before checkpoint */
fetch = log = fetch + SEQ_LOG_VALS;
* be a low cost check.
*/
if (!WalSndCtl->sync_standbys_defined ||
- XLByteLE(XactCommitLSN, WalSndCtl->lsn[mode]))
+ XactCommitLSN <= WalSndCtl->lsn[mode])
{
LWLockRelease(SyncRepLock);
return;
* Stop at the queue element that we should after to ensure the queue
* is ordered by LSN.
*/
- if (XLByteLT(proc->waitLSN, MyProc->waitLSN))
+ if (proc->waitLSN < MyProc->waitLSN)
break;
proc = (PGPROC *) SHMQueuePrev(&(WalSndCtl->SyncRepQueue[mode]),
* Set the lsn first so that when we wake backends they will release up to
* this location.
*/
- if (XLByteLT(walsndctl->lsn[SYNC_REP_WAIT_WRITE], MyWalSnd->write))
+ if (walsndctl->lsn[SYNC_REP_WAIT_WRITE] < MyWalSnd->write)
{
walsndctl->lsn[SYNC_REP_WAIT_WRITE] = MyWalSnd->write;
numwrite = SyncRepWakeQueue(false, SYNC_REP_WAIT_WRITE);
}
- if (XLByteLT(walsndctl->lsn[SYNC_REP_WAIT_FLUSH], MyWalSnd->flush))
+ if (walsndctl->lsn[SYNC_REP_WAIT_FLUSH] < MyWalSnd->flush)
{
walsndctl->lsn[SYNC_REP_WAIT_FLUSH] = MyWalSnd->flush;
numflush = SyncRepWakeQueue(false, SYNC_REP_WAIT_FLUSH);
/*
* Assume the queue is ordered by LSN
*/
- if (!all && XLByteLT(walsndctl->lsn[mode], proc->waitLSN))
+ if (!all && walsndctl->lsn[mode] < proc->waitLSN)
return numprocs;
/*
* Check the queue is ordered by LSN and that multiple procs don't
* have matching LSNs
*/
- if (XLByteLE(proc->waitLSN, lastLSN))
+ if (proc->waitLSN <= lastLSN)
return false;
lastLSN = proc->waitLSN;
}
/* Update state for write */
- XLByteAdvance(recptr, byteswritten);
+ recptr += byteswritten;
recvOff += byteswritten;
nbytes -= byteswritten;
static void
XLogWalRcvFlush(bool dying)
{
- if (XLByteLT(LogstreamResult.Flush, LogstreamResult.Write))
+ if (LogstreamResult.Flush < LogstreamResult.Write)
{
/* use volatile pointer to prevent code rearrangement */
volatile WalRcvData *walrcv = WalRcv;
/* Update shared-memory status */
SpinLockAcquire(&walrcv->mutex);
- if (XLByteLT(walrcv->receivedUpto, LogstreamResult.Flush))
+ if (walrcv->receivedUpto < LogstreamResult.Flush)
{
walrcv->latestChunkStart = walrcv->receivedUpto;
walrcv->receivedUpto = LogstreamResult.Flush;
* probably OK.
*/
if (!force
- && XLByteEQ(writePtr, LogstreamResult.Write)
- && XLByteEQ(flushPtr, LogstreamResult.Flush)
+ && writePtr == LogstreamResult.Write
+ && flushPtr == LogstreamResult.Flush
&& !TimestampDifferenceExceeds(sendTime, now,
wal_receiver_status_interval * 1000))
return;
/* Update shared-memory status */
SpinLockAcquire(&walrcv->mutex);
- if (XLByteLT(walrcv->latestWalEnd, walEnd))
+ if (walrcv->latestWalEnd < walEnd)
walrcv->latestWalEndTime = sendTime;
walrcv->latestWalEnd = walEnd;
walrcv->lastMsgSendTime = sendTime;
replayPtr = GetXLogReplayRecPtr(NULL);
- if (XLByteEQ(receivePtr, replayPtr))
+ if (receivePtr == replayPtr)
return 0;
TimestampDifference(GetCurrentChunkReplayStartTime(),
* WAL segment.
*/
if (!XLogRecPtrIsInvalid(switchpoint) &&
- XLByteLT(switchpoint, cmd->startpoint))
+ switchpoint < cmd->startpoint)
{
ereport(ERROR,
(errmsg("requested starting point %X/%X on timeline %u is not in this server's history",
/* If there is nothing to stream, don't even enter COPY mode */
if (!sendTimeLineIsHistoric ||
- XLByteLT(cmd->startpoint, sendTimeLineValidUpto))
+ cmd->startpoint < sendTimeLineValidUpto)
{
/*
* When we first start replication the standby will be behind the primary.
* Don't allow a request to stream from a future point in WAL that
* hasn't been flushed to disk in this server yet.
*/
- if (XLByteLT(FlushPtr, cmd->startpoint))
+ if (FlushPtr < cmd->startpoint)
{
ereport(ERROR,
(errmsg("requested starting point %X/%X is ahead of the WAL flush position of this server %X/%X",
}
/* Update state for read */
- XLByteAdvance(recptr, readbytes);
+ recptr += readbytes;
sendOff += readbytes;
nbytes -= readbytes;
history = readTimeLineHistory(ThisTimeLineID);
sendTimeLineValidUpto = tliSwitchPoint(sendTimeLine, history);
- Assert(XLByteLE(sentPtr, sendTimeLineValidUpto));
+ Assert(sentPtr <= sendTimeLineValidUpto);
list_free_deep(history);
- /* the switchpoint should be >= current send pointer */
- if (!XLByteLE(sentPtr, sendTimeLineValidUpto))
+ /* the current send pointer should be <= the switchpoint */
+ if (!(sentPtr <= sendTimeLineValidUpto))
elog(ERROR, "server switched off timeline %u at %X/%X, but walsender already streamed up to %X/%X",
sendTimeLine,
(uint32) (sendTimeLineValidUpto >> 32),
* If this is a historic timeline and we've reached the point where we
* forked to the next timeline, stop streaming.
*/
- if (sendTimeLineIsHistoric && XLByteLE(sendTimeLineValidUpto, sentPtr))
+ if (sendTimeLineIsHistoric && sendTimeLineValidUpto <= sentPtr)
{
/* close the current file. */
if (sendFile >= 0)
}
/* Do we have any work to do? */
- Assert(XLByteLE(sentPtr, SendRqstPtr));
- if (XLByteLE(SendRqstPtr, sentPtr))
+ Assert(sentPtr <= SendRqstPtr);
+ if (SendRqstPtr <= sentPtr)
{
*caughtup = true;
return;
*/
startptr = sentPtr;
endptr = startptr;
- XLByteAdvance(endptr, MAX_SEND_SIZE);
+ endptr += MAX_SEND_SIZE;
/* if we went beyond SendRqstPtr, back off */
- if (XLByteLE(SendRqstPtr, endptr))
+ if (SendRqstPtr <= endptr)
{
endptr = SendRqstPtr;
if (sendTimeLineIsHistoric)
if (recptr.xlogid == 0 && recptr.xrecoff == 0)
continue;
- if (!found || XLByteLT(recptr, oldest))
+ if (!found || recptr < oldest)
oldest = recptr;
found = true;
}
/* Write was successful, advance our position */
bytes_written += bytes_to_write;
bytes_left -= bytes_to_write;
- XLByteAdvance(blockpos, bytes_to_write);
+ blockpos += bytes_to_write;
xlogoff += bytes_to_write;
/* Did we reach the end of a WAL segment? */
#define InvalidXLogRecPtr 0
#define XLogRecPtrIsInvalid(r) ((r) == InvalidXLogRecPtr)
-/*
- * Macros for comparing XLogRecPtrs
- */
-#define XLByteLT(a, b) ((a) < (b))
-#define XLByteLE(a, b) ((a) <= (b))
-#define XLByteEQ(a, b) ((a) == (b))
-
-
-/*
- * Macro for advancing a record pointer by the specified number of bytes.
- */
-#define XLByteAdvance(recptr, nbytes) \
- (recptr) += nbytes \
-
/*
* XLogSegNo - physical log file sequence number.
*/