/* It will fit, perform the insertion */
START_CRIT_SECTION();
- if (RelationNeedsWAL(btree->index))
+ if (RelationNeedsWAL(btree->index) && !btree->isBuild)
{
XLogBeginInsert();
XLogRegisterBuffer(0, stack->buffer, REGBUF_STANDARD);
MarkBufferDirty(childbuf);
}
- if (RelationNeedsWAL(btree->index))
+ if (RelationNeedsWAL(btree->index) && !btree->isBuild)
{
XLogRecPtr recptr;
ginxlogInsert xlrec;
}
/* write WAL record */
- if (RelationNeedsWAL(btree->index))
+ if (RelationNeedsWAL(btree->index) && !btree->isBuild)
{
XLogRecPtr recptr;
* Great, all the items fit on a single page. If needed, prepare data
* for a WAL record describing the changes we'll make.
*/
- if (RelationNeedsWAL(btree->index))
+ if (RelationNeedsWAL(btree->index) && !btree->isBuild)
computeLeafRecompressWALData(leaf);
/*
dataPlaceToPageLeafRecompress(buf, leaf);
/* If needed, register WAL data built by computeLeafRecompressWALData */
- if (RelationNeedsWAL(btree->index))
+ if (RelationNeedsWAL(btree->index) && !btree->isBuild)
{
XLogRegisterBufData(0, leaf->walinfo, leaf->walinfolen);
}
pitem = (PostingItem *) insertdata;
GinDataPageAddPostingItem(page, pitem, off);
- if (RelationNeedsWAL(btree->index))
+ if (RelationNeedsWAL(btree->index) && !btree->isBuild)
{
/*
* This must be static, because it has to survive until XLogInsert,
Pointer ptr;
int nrootitems;
int rootsize;
+ bool is_build = (buildStats != NULL);
/* Construct the new root page in memory first. */
tmppage = (Page) palloc(BLCKSZ);
PageRestoreTempPage(tmppage, page);
MarkBufferDirty(buffer);
- if (RelationNeedsWAL(index))
+ if (RelationNeedsWAL(index) && !is_build)
{
XLogRecPtr recptr;
ginxlogCreatePostingTree data;
elog(ERROR, "failed to add item to index page in \"%s\"",
RelationGetRelationName(btree->index));
- if (RelationNeedsWAL(btree->index))
+ if (RelationNeedsWAL(btree->index) && !btree->isBuild)
{
/*
* This must be static, because it has to survive until XLogInsert,
buildStats->nEntries++;
ginPrepareEntryScan(&btree, attnum, key, category, ginstate);
+ btree.isBuild = (buildStats != NULL);
stack = ginFindLeafPage(&btree, false, false, NULL);
page = BufferGetPage(stack->buffer);
GinInitBuffer(RootBuffer, GIN_LEAF);
MarkBufferDirty(RootBuffer);
- if (RelationNeedsWAL(index))
- {
- XLogRecPtr recptr;
- Page page;
-
- XLogBeginInsert();
- XLogRegisterBuffer(0, MetaBuffer, REGBUF_WILL_INIT | REGBUF_STANDARD);
- XLogRegisterBuffer(1, RootBuffer, REGBUF_WILL_INIT);
-
- recptr = XLogInsert(RM_GIN_ID, XLOG_GIN_CREATE_INDEX);
-
- page = BufferGetPage(RootBuffer);
- PageSetLSN(page, recptr);
-
- page = BufferGetPage(MetaBuffer);
- PageSetLSN(page, recptr);
- }
UnlockReleaseBuffer(MetaBuffer);
UnlockReleaseBuffer(RootBuffer);
* Update metapage stats
*/
buildstate.buildStats.nTotalPages = RelationGetNumberOfBlocks(index);
- ginUpdateStats(index, &buildstate.buildStats);
+ ginUpdateStats(index, &buildstate.buildStats, true);
+
+ /*
+ * We didn't write WAL records as we built the index, so if WAL-logging is
+ * required, write all pages to the WAL now.
+ */
+ if (RelationNeedsWAL(index))
+ {
+ log_newpage_range(index, MAIN_FORKNUM,
+ 0, RelationGetNumberOfBlocks(index),
+ true);
+ }
/*
* Return statistics
* Note: nPendingPages and ginVersion are *not* copied over
*/
void
-ginUpdateStats(Relation index, const GinStatsData *stats)
+ginUpdateStats(Relation index, const GinStatsData *stats, bool is_build)
{
Buffer metabuffer;
Page metapage;
MarkBufferDirty(metabuffer);
- if (RelationNeedsWAL(index))
+ if (RelationNeedsWAL(index) && !is_build)
{
XLogRecPtr recptr;
ginxlogUpdateMeta data;
/* Update the metapage with accurate page and entry counts */
idxStat.nTotalPages = npages;
- ginUpdateStats(info->index, &idxStat);
+ ginUpdateStats(info->index, &idxStat, false);
/* Finally, vacuum the FSM */
IndexFreeSpaceMapVacuum(info->index);
UnlockReleaseBuffer(buffer);
}
-static void
-ginRedoCreateIndex(XLogReaderState *record)
-{
- XLogRecPtr lsn = record->EndRecPtr;
- Buffer RootBuffer,
- MetaBuffer;
- Page page;
-
- MetaBuffer = XLogInitBufferForRedo(record, 0);
- Assert(BufferGetBlockNumber(MetaBuffer) == GIN_METAPAGE_BLKNO);
- page = (Page) BufferGetPage(MetaBuffer);
-
- GinInitMetabuffer(MetaBuffer);
-
- PageSetLSN(page, lsn);
- MarkBufferDirty(MetaBuffer);
-
- RootBuffer = XLogInitBufferForRedo(record, 1);
- Assert(BufferGetBlockNumber(RootBuffer) == GIN_ROOT_BLKNO);
- page = (Page) BufferGetPage(RootBuffer);
-
- GinInitBuffer(RootBuffer, GIN_LEAF);
-
- PageSetLSN(page, lsn);
- MarkBufferDirty(RootBuffer);
-
- UnlockReleaseBuffer(RootBuffer);
- UnlockReleaseBuffer(MetaBuffer);
-}
-
static void
ginRedoCreatePTree(XLogReaderState *record)
{
oldCtx = MemoryContextSwitchTo(opCtx);
switch (info)
{
- case XLOG_GIN_CREATE_INDEX:
- ginRedoCreateIndex(record);
- break;
case XLOG_GIN_CREATE_PTREE:
ginRedoCreatePTree(record);
break;
values, isnull, true /* size is currently bogus */ );
itup->t_tid = *ht_ctid;
- gistdoinsert(r, itup, 0, giststate, heapRel);
+ gistdoinsert(r, itup, 0, giststate, heapRel, false);
/* cleanup */
MemoryContextSwitchTo(oldCxt);
Buffer leftchildbuf,
List **splitinfo,
bool markfollowright,
- Relation heapRel)
+ Relation heapRel,
+ bool is_build)
{
BlockNumber blkno = BufferGetBlockNumber(buffer);
Page page = BufferGetPage(buffer);
* insertion for that. NB: The number of pages and data segments
* specified here must match the calculations in gistXLogSplit()!
*/
- if (RelationNeedsWAL(rel))
+ if (!is_build && RelationNeedsWAL(rel))
XLogEnsureRecordSpace(npage, 1 + npage * 2);
START_CRIT_SECTION();
PageRestoreTempPage(dist->page, BufferGetPage(dist->buffer));
dist->page = BufferGetPage(dist->buffer);
- /* Write the WAL record */
- if (RelationNeedsWAL(rel))
- recptr = gistXLogSplit(is_leaf,
- dist, oldrlink, oldnsn, leftchildbuf,
- markfollowright);
+ /*
+ * Write the WAL record.
+ *
+ * If we're building a new index, however, we don't WAL-log changes
+ * yet. The LSN-NSN interlock between parent and child requires that
+ * LSNs never move backwards, so set the LSNs to a value that's
+ * smaller than any real or fake unlogged LSN that might be generated
+ * later. (There can't be any concurrent scans during index build, so
+ * we don't need to be able to detect concurrent splits yet.)
+ */
+ if (is_build)
+ recptr = GistBuildLSN;
else
- recptr = gistGetFakeLSN(rel);
+ {
+ if (RelationNeedsWAL(rel))
+ recptr = gistXLogSplit(is_leaf,
+ dist, oldrlink, oldnsn, leftchildbuf,
+ markfollowright);
+ else
+ recptr = gistGetFakeLSN(rel);
+ }
for (ptr = dist; ptr; ptr = ptr->next)
- {
PageSetLSN(ptr->page, recptr);
- }
/*
* Return the new child buffers to the caller.
if (BufferIsValid(leftchildbuf))
MarkBufferDirty(leftchildbuf);
- if (RelationNeedsWAL(rel))
+ if (is_build)
+ recptr = GistBuildLSN;
+ else
{
- OffsetNumber ndeloffs = 0,
- deloffs[1];
-
- if (OffsetNumberIsValid(oldoffnum))
+ if (RelationNeedsWAL(rel))
{
- deloffs[0] = oldoffnum;
- ndeloffs = 1;
- }
+ OffsetNumber ndeloffs = 0,
+ deloffs[1];
- recptr = gistXLogUpdate(buffer,
- deloffs, ndeloffs, itup, ntup,
- leftchildbuf);
+ if (OffsetNumberIsValid(oldoffnum))
+ {
+ deloffs[0] = oldoffnum;
+ ndeloffs = 1;
+ }
- PageSetLSN(page, recptr);
- }
- else
- {
- recptr = gistGetFakeLSN(rel);
- PageSetLSN(page, recptr);
+ recptr = gistXLogUpdate(buffer,
+ deloffs, ndeloffs, itup, ntup,
+ leftchildbuf);
+ }
+ else
+ recptr = gistGetFakeLSN(rel);
}
+ PageSetLSN(page, recptr);
if (newblkno)
*newblkno = blkno;
*/
void
gistdoinsert(Relation r, IndexTuple itup, Size freespace,
- GISTSTATE *giststate, Relation heapRel)
+ GISTSTATE *giststate, Relation heapRel, bool is_build)
{
ItemId iid;
IndexTuple idxtuple;
state.freespace = freespace;
state.r = r;
state.heapRel = heapRel;
+ state.is_build = is_build;
/* Start from the root */
firststack.blkno = GIST_ROOT_BLKNO;
leftchild,
&splitinfo,
true,
- state->heapRel);
+ state->heapRel,
+ state->is_build);
/*
* Before recursing up in case the page was split, release locks on the
GISTInitBuffer(buffer, F_LEAF);
MarkBufferDirty(buffer);
-
- if (RelationNeedsWAL(index))
- {
- XLogRecPtr recptr;
-
- XLogBeginInsert();
- XLogRegisterBuffer(0, buffer, REGBUF_WILL_INIT);
-
- recptr = XLogInsert(RM_GIST_ID, XLOG_GIST_CREATE_INDEX);
- PageSetLSN(page, recptr);
- }
- else
- PageSetLSN(page, gistGetFakeLSN(heap));
+ PageSetLSN(page, GistBuildLSN);
UnlockReleaseBuffer(buffer);
freeGISTstate(buildstate.giststate);
+ /*
+ * We didn't write WAL records as we built the index, so if WAL-logging is
+ * required, write all pages to the WAL now.
+ */
+ if (RelationNeedsWAL(index))
+ {
+ log_newpage_range(index, MAIN_FORKNUM,
+ 0, RelationGetNumberOfBlocks(index),
+ true);
+ }
+
/*
* Return statistics
*/
* locked, we call gistdoinsert directly.
*/
gistdoinsert(index, itup, buildstate->freespace,
- buildstate->giststate, buildstate->heaprel);
+ buildstate->giststate, buildstate->heaprel, true);
}
/* Update tuple count and total size. */
InvalidBuffer,
&splitinfo,
false,
- buildstate->heaprel);
+ buildstate->heaprel, true);
/*
* If this is a root split, update the root path item kept in memory. This
XLogRecPtr
gistGetFakeLSN(Relation rel)
{
- static XLogRecPtr counter = 1;
+ static XLogRecPtr counter = FirstNormalUnloggedLSN;
if (rel->rd_rel->relpersistence == RELPERSISTENCE_TEMP)
{
UnlockReleaseBuffer(firstbuffer);
}
-static void
-gistRedoCreateIndex(XLogReaderState *record)
-{
- XLogRecPtr lsn = record->EndRecPtr;
- Buffer buffer;
- Page page;
-
- buffer = XLogInitBufferForRedo(record, 0);
- Assert(BufferGetBlockNumber(buffer) == GIST_ROOT_BLKNO);
- page = (Page) BufferGetPage(buffer);
-
- GISTInitBuffer(buffer, F_LEAF);
-
- PageSetLSN(page, lsn);
-
- MarkBufferDirty(buffer);
- UnlockReleaseBuffer(buffer);
-}
-
/* redo page deletion */
static void
gistRedoPageDelete(XLogReaderState *record)
case XLOG_GIST_PAGE_SPLIT:
gistRedoPageSplitRecord(record);
break;
- case XLOG_GIST_CREATE_INDEX:
- gistRedoCreateIndex(record);
- break;
case XLOG_GIST_PAGE_DELETE:
gistRedoPageDelete(record);
break;
switch (info)
{
- case XLOG_GIN_CREATE_INDEX:
- /* no further information */
- break;
case XLOG_GIN_CREATE_PTREE:
/* no further information */
break;
switch (info & ~XLR_INFO_MASK)
{
- case XLOG_GIN_CREATE_INDEX:
- id = "CREATE_INDEX";
- break;
case XLOG_GIN_CREATE_PTREE:
id = "CREATE_PTREE";
break;
case XLOG_GIST_PAGE_SPLIT:
out_gistxlogPageSplit(buf, (gistxlogPageSplit *) rec);
break;
- case XLOG_GIST_CREATE_INDEX:
- break;
case XLOG_GIST_PAGE_DELETE:
out_gistxlogPageDelete(buf, (gistxlogPageDelete *) rec);
break;
case XLOG_GIST_PAGE_SPLIT:
id = "PAGE_SPLIT";
break;
- case XLOG_GIST_CREATE_INDEX:
- id = "CREATE_INDEX";
- break;
case XLOG_GIST_PAGE_DELETE:
id = "PAGE_DELETE";
break;
switch (info)
{
- case XLOG_SPGIST_CREATE_INDEX:
- break;
case XLOG_SPGIST_ADD_LEAF:
{
spgxlogAddLeaf *xlrec = (spgxlogAddLeaf *) rec;
switch (info & ~XLR_INFO_MASK)
{
- case XLOG_SPGIST_CREATE_INDEX:
- id = "CREATE_INDEX";
- break;
case XLOG_SPGIST_ADD_LEAF:
id = "ADD_LEAF";
break;
MarkBufferDirty(current->buffer);
- if (RelationNeedsWAL(index))
+ if (RelationNeedsWAL(index) && !state->isBuild)
{
XLogRecPtr recptr;
int flags;
MarkBufferDirty(current->buffer);
MarkBufferDirty(nbuf);
- if (RelationNeedsWAL(index))
+ if (RelationNeedsWAL(index) && !state->isBuild)
{
XLogRecPtr recptr;
saveCurrent.buffer = InvalidBuffer;
}
- if (RelationNeedsWAL(index))
+ if (RelationNeedsWAL(index) && !state->isBuild)
{
XLogRecPtr recptr;
int flags;
MarkBufferDirty(current->buffer);
- if (RelationNeedsWAL(index))
+ if (RelationNeedsWAL(index) && !state->isBuild)
{
XLogRecPtr recptr;
MarkBufferDirty(saveCurrent.buffer);
- if (RelationNeedsWAL(index))
+ if (RelationNeedsWAL(index) && !state->isBuild)
{
XLogRecPtr recptr;
int flags;
MarkBufferDirty(current->buffer);
- if (RelationNeedsWAL(index))
+ if (RelationNeedsWAL(index) && !state->isBuild)
{
XLogRecPtr recptr;
SpGistInitBuffer(nullbuffer, SPGIST_LEAF | SPGIST_NULLS);
MarkBufferDirty(nullbuffer);
- if (RelationNeedsWAL(index))
- {
- XLogRecPtr recptr;
-
- XLogBeginInsert();
-
- /*
- * Replay will re-initialize the pages, so don't take full pages
- * images. No other data to log.
- */
- XLogRegisterBuffer(0, metabuffer, REGBUF_WILL_INIT | REGBUF_STANDARD);
- XLogRegisterBuffer(1, rootbuffer, REGBUF_WILL_INIT | REGBUF_STANDARD);
- XLogRegisterBuffer(2, nullbuffer, REGBUF_WILL_INIT | REGBUF_STANDARD);
-
- recptr = XLogInsert(RM_SPGIST_ID, XLOG_SPGIST_CREATE_INDEX);
-
- PageSetLSN(BufferGetPage(metabuffer), recptr);
- PageSetLSN(BufferGetPage(rootbuffer), recptr);
- PageSetLSN(BufferGetPage(nullbuffer), recptr);
- }
END_CRIT_SECTION();
SpGistUpdateMetaPage(index);
+ /*
+ * We didn't write WAL records as we built the index, so if WAL-logging is
+ * required, write all pages to the WAL now.
+ */
+ if (RelationNeedsWAL(index))
+ {
+ log_newpage_range(index, MAIN_FORKNUM,
+ 0, RelationGetNumberOfBlocks(index),
+ true);
+ }
+
result = (IndexBuildResult *) palloc0(sizeof(IndexBuildResult));
result->heap_tuples = reltuples;
result->index_tuples = buildstate.indtuples;
size);
}
-static void
-spgRedoCreateIndex(XLogReaderState *record)
-{
- XLogRecPtr lsn = record->EndRecPtr;
- Buffer buffer;
- Page page;
-
- buffer = XLogInitBufferForRedo(record, 0);
- Assert(BufferGetBlockNumber(buffer) == SPGIST_METAPAGE_BLKNO);
- page = (Page) BufferGetPage(buffer);
- SpGistInitMetapage(page);
- PageSetLSN(page, lsn);
- MarkBufferDirty(buffer);
- UnlockReleaseBuffer(buffer);
-
- buffer = XLogInitBufferForRedo(record, 1);
- Assert(BufferGetBlockNumber(buffer) == SPGIST_ROOT_BLKNO);
- SpGistInitBuffer(buffer, SPGIST_LEAF);
- page = (Page) BufferGetPage(buffer);
- PageSetLSN(page, lsn);
- MarkBufferDirty(buffer);
- UnlockReleaseBuffer(buffer);
-
- buffer = XLogInitBufferForRedo(record, 2);
- Assert(BufferGetBlockNumber(buffer) == SPGIST_NULL_BLKNO);
- SpGistInitBuffer(buffer, SPGIST_LEAF | SPGIST_NULLS);
- page = (Page) BufferGetPage(buffer);
- PageSetLSN(page, lsn);
- MarkBufferDirty(buffer);
- UnlockReleaseBuffer(buffer);
-}
-
static void
spgRedoAddLeaf(XLogReaderState *record)
{
oldCxt = MemoryContextSwitchTo(opCtx);
switch (info)
{
- case XLOG_SPGIST_CREATE_INDEX:
- spgRedoCreateIndex(record);
- break;
case XLOG_SPGIST_ADD_LEAF:
spgRedoAddLeaf(record);
break;
ControlFile->time = checkPoint.time;
ControlFile->checkPoint = checkPoint.redo;
ControlFile->checkPointCopy = checkPoint;
- ControlFile->unloggedLSN = 1;
+ ControlFile->unloggedLSN = FirstNormalUnloggedLSN;
/* Set important parameter values for use when replaying WAL */
ControlFile->MaxConnections = MaxConnections;
}
else if (info == XLOG_FPI || info == XLOG_FPI_FOR_HINT)
{
- Buffer buffer;
-
/*
* Full-page image (FPI) records contain nothing else but a backup
- * block. The block reference must include a full-page image -
- * otherwise there would be no point in this record.
+ * block (or multiple backup blocks). Every block reference must
+ * include a full-page image - otherwise there would be no point in
+ * this record.
*
* No recovery conflicts are generated by these generic records - if a
* resource manager needs to generate conflicts, it has to define a
* XLOG_FPI and XLOG_FPI_FOR_HINT records, they use a different info
* code just to distinguish them for statistics purposes.
*/
- if (XLogReadBufferForRedo(record, 0, &buffer) != BLK_RESTORED)
- elog(ERROR, "unexpected XLogReadBufferForRedo result when restoring backup block");
- UnlockReleaseBuffer(buffer);
+ for (uint8 block_id = 0; block_id <= record->max_block_id; block_id++)
+ {
+ Buffer buffer;
+
+ if (XLogReadBufferForRedo(record, block_id, &buffer) != BLK_RESTORED)
+ elog(ERROR, "unexpected XLogReadBufferForRedo result when restoring backup block");
+ UnlockReleaseBuffer(buffer);
+ }
}
else if (info == XLOG_BACKUP_END)
{
return log_newpage(&rnode, forkNum, blkno, page, page_std);
}
+/*
+ * WAL-log a range of blocks in a relation.
+ *
+ * An image of all pages with block numbers 'startblk' <= X < 'endblock' is
+ * written to the WAL. If the range is large, this is done in multiple WAL
+ * records.
+ *
+ * If all page follows the standard page layout, with a PageHeader and unused
+ * space between pd_lower and pd_upper, set 'page_std' to true. That allows
+ * the unused space to be left out from the WAL records, making them smaller.
+ *
+ * NOTE: This function acquires exclusive-locks on the pages. Typically, this
+ * is used on a newly-built relation, and the caller is holding a
+ * AccessExclusiveLock on it, so no other backend can be accessing it at the
+ * same time. If that's not the case, you must ensure that this does not
+ * cause a deadlock through some other means.
+ */
+void
+log_newpage_range(Relation rel, ForkNumber forkNum,
+ BlockNumber startblk, BlockNumber endblk,
+ bool page_std)
+{
+ BlockNumber blkno;
+
+ /*
+ * Iterate over all the pages in the range. They are collected into
+ * batches of XLR_MAX_BLOCK_ID pages, and a single WAL-record is written
+ * for each batch.
+ */
+ XLogEnsureRecordSpace(XLR_MAX_BLOCK_ID - 1, 0);
+
+ blkno = startblk;
+ while (blkno < endblk)
+ {
+ Buffer bufpack[XLR_MAX_BLOCK_ID];
+ XLogRecPtr recptr;
+ int nbufs;
+ int i;
+
+ CHECK_FOR_INTERRUPTS();
+
+ /* Collect a batch of blocks. */
+ nbufs = 0;
+ while (nbufs < XLR_MAX_BLOCK_ID && blkno < endblk)
+ {
+ Buffer buf = ReadBuffer(rel, blkno);
+
+ LockBuffer(buf, BUFFER_LOCK_EXCLUSIVE);
+
+ /*
+ * Completely empty pages are not WAL-logged. Writing a WAL record
+ * would change the LSN, and we don't want that. We want the page
+ * to stay empty.
+ */
+ if (!PageIsNew(BufferGetPage(buf)))
+ bufpack[nbufs++] = buf;
+ else
+ UnlockReleaseBuffer(buf);
+ blkno++;
+ }
+
+ /* Write WAL record for this batch. */
+ XLogBeginInsert();
+
+ START_CRIT_SECTION();
+ for (i = 0; i < nbufs; i++)
+ {
+ XLogRegisterBuffer(i, bufpack[i], REGBUF_FORCE_IMAGE | REGBUF_STANDARD);
+ MarkBufferDirty(bufpack[i]);
+ }
+
+ recptr = XLogInsert(RM_XLOG_ID, XLOG_FPI);
+
+ for (i = 0; i < nbufs; i++)
+ {
+ PageSetLSN(BufferGetPage(bufpack[i]), recptr);
+ UnlockReleaseBuffer(bufpack[i]);
+ }
+ END_CRIT_SECTION();
+ }
+}
+
/*
* Allocate working buffers needed for WAL record construction.
*/
/* ginutil.c */
extern void ginGetStats(Relation index, GinStatsData *stats);
-extern void ginUpdateStats(Relation index, const GinStatsData *stats);
+extern void ginUpdateStats(Relation index, const GinStatsData *stats,
+ bool is_build);
#endif /* GIN_H */
#include "lib/stringinfo.h"
#include "storage/off.h"
-#define XLOG_GIN_CREATE_INDEX 0x00
-
#define XLOG_GIN_CREATE_PTREE 0x10
typedef struct ginxlogCreatePostingTree
typedef XLogRecPtr GistNSN;
+/*
+ * A bogus LSN / NSN value used during index build. Must be smaller than any
+ * real or fake unlogged LSN, so that after an index build finishes, all the
+ * splits are considered completed.
+ */
+#define GistBuildLSN ((XLogRecPtr) 1)
+
/*
* For on-disk compatibility with pre-9.3 servers, NSN is stored as two
* 32-bit fields on disk, same as LSNs.
Relation r;
Relation heapRel;
Size freespace; /* free space to be left */
+ bool is_build;
GISTInsertStack *stack;
} GISTInsertState;
IndexTuple itup,
Size freespace,
GISTSTATE *GISTstate,
- Relation heapRel);
+ Relation heapRel,
+ bool is_build);
/* A List of these is returned from gistplacetopage() in *splitinfo */
typedef struct
Buffer leftchildbuf,
List **splitinfo,
bool markleftchild,
- Relation heapRel);
+ Relation heapRel,
+ bool is_build);
extern SplitedPageLayout *gistSplit(Relation r, Page page, IndexTuple *itup,
int len, GISTSTATE *giststate);
* FSM */
#define XLOG_GIST_PAGE_SPLIT 0x30
/* #define XLOG_GIST_INSERT_COMPLETE 0x40 */ /* not used anymore */
-#define XLOG_GIST_CREATE_INDEX 0x50
+ /* #define XLOG_GIST_CREATE_INDEX 0x50 */ /* not used anymore */
#define XLOG_GIST_PAGE_DELETE 0x60
/*
#include "storage/off.h"
/* XLOG record types for SPGiST */
-#define XLOG_SPGIST_CREATE_INDEX 0x00
+/* #define XLOG_SPGIST_CREATE_INDEX 0x00 */ /* not used anymore */
#define XLOG_SPGIST_ADD_LEAF 0x10
#define XLOG_SPGIST_MOVE_LEAFS 0x20
#define XLOG_SPGIST_ADD_NODE 0x30
#define InvalidXLogRecPtr 0
#define XLogRecPtrIsInvalid(r) ((r) == InvalidXLogRecPtr)
+/*
+ * First LSN to use for "fake" LSNs.
+ *
+ * Values smaller than this can be used for special per-AM purposes.
+ */
+#define FirstNormalUnloggedLSN ((XLogRecPtr) 1000)
+
/*
* XLogSegNo - physical log file sequence number.
*/
#include "storage/block.h"
#include "storage/buf.h"
#include "storage/relfilenode.h"
+#include "utils/relcache.h"
/*
* The minimum size of the WAL construction working area. If you need to
extern XLogRecPtr log_newpage(RelFileNode *rnode, ForkNumber forkNum,
BlockNumber blk, char *page, bool page_std);
extern XLogRecPtr log_newpage_buffer(Buffer buffer, bool page_std);
+extern void log_newpage_range(Relation rel, ForkNumber forkNum,
+ BlockNumber startblk, BlockNumber endblk, bool page_std);
extern XLogRecPtr XLogSaveBufferForHint(Buffer buffer, bool buffer_std);
extern void InitXLogInsert(void);