XLogRecPtr lsn = record->EndRecPtr;
Page page;
Buffer metabuf;
+ ForkNumber forknum;
xl_hash_init_meta_page *xlrec = (xl_hash_init_meta_page *) XLogRecGetData(record);
page = (Page) BufferGetPage(metabuf);
PageSetLSN(page, lsn);
MarkBufferDirty(metabuf);
+
+ /*
+ * Force the on-disk state of init forks to always be in sync with the
+ * state in shared buffers. See XLogReadBufferForRedoExtended. We need
+ * special handling for init forks as create index operations don't log a
+ * full page image of the metapage.
+ */
+ XLogRecGetBlockTag(record, 0, NULL, &forknum, NULL);
+ if (forknum == INIT_FORKNUM)
+ FlushOneBuffer(metabuf);
+
/* all done */
UnlockReleaseBuffer(metabuf);
}
Page page;
HashMetaPage metap;
uint32 num_buckets;
+ ForkNumber forknum;
xl_hash_init_bitmap_page *xlrec = (xl_hash_init_bitmap_page *) XLogRecGetData(record);
_hash_initbitmapbuffer(bitmapbuf, xlrec->bmsize, true);
PageSetLSN(BufferGetPage(bitmapbuf), lsn);
MarkBufferDirty(bitmapbuf);
+
+ /*
+ * Force the on-disk state of init forks to always be in sync with the
+ * state in shared buffers. See XLogReadBufferForRedoExtended. We need
+ * special handling for init forks as create index operations don't log a
+ * full page image of the metapage.
+ */
+ XLogRecGetBlockTag(record, 0, NULL, &forknum, NULL);
+ if (forknum == INIT_FORKNUM)
+ FlushOneBuffer(bitmapbuf);
UnlockReleaseBuffer(bitmapbuf);
/* add the new bitmap page to the metapage's list of bitmaps */
PageSetLSN(page, lsn);
MarkBufferDirty(metabuf);
+
+ XLogRecGetBlockTag(record, 1, NULL, &forknum, NULL);
+ if (forknum == INIT_FORKNUM)
+ FlushOneBuffer(metabuf);
}
if (BufferIsValid(metabuf))
UnlockReleaseBuffer(metabuf);
int32 ffactor;
uint32 num_buckets;
uint32 i;
+ bool use_wal;
/* safety check */
if (RelationGetNumberOfBlocksInFork(rel, forkNum) != 0)
elog(ERROR, "cannot initialize non-empty hash index \"%s\"",
RelationGetRelationName(rel));
+ /*
+ * WAL log creation of pages if the relation is persistent, or this is the
+ * init fork. Init forks for unlogged relations always need to be WAL
+ * logged.
+ */
+ use_wal = RelationNeedsWAL(rel) || forkNum == INIT_FORKNUM;
+
/*
* Determine the target fill factor (in tuples per bucket) for this index.
* The idea is to make the fill factor correspond to pages about as full
metap = HashPageGetMeta(pg);
/* XLOG stuff */
- if (RelationNeedsWAL(rel))
+ if (use_wal)
{
xl_hash_init_meta_page xlrec;
XLogRecPtr recptr;
_hash_initbuf(buf, metap->hashm_maxbucket, i, LH_BUCKET_PAGE, false);
MarkBufferDirty(buf);
- log_newpage(&rel->rd_node,
- forkNum,
- blkno,
- BufferGetPage(buf),
- true);
+ if (use_wal)
+ log_newpage(&rel->rd_node,
+ forkNum,
+ blkno,
+ BufferGetPage(buf),
+ true);
_hash_relbuf(rel, buf);
}
MarkBufferDirty(metabuf);
/* XLOG stuff */
- if (RelationNeedsWAL(rel))
+ if (use_wal)
{
xl_hash_init_bitmap_page xlrec;
XLogRecPtr recptr;