From 684200543b4cbfe1ac002c9962e90683d4ea4691 Mon Sep 17 00:00:00 2001 From: Andres Freund Date: Mon, 28 Jan 2019 17:16:56 -0800 Subject: [PATCH] Revert "Move page initialization from RelationAddExtraBlocks() to use." This reverts commit fc02e6724f3ce069b33284bce092052ab55bd751 and e6799d5a53011985d916fdb48fe014a4ae70422e. Parts of the buildfarm error out with ERROR: page %u of relation "%s" should be empty but is not errors, and so far I/we do not know why. fc02e672 didn't fix the issue. As I cannot reproduce the issue locally, it seems best to get the buildfarm green again, and reproduce the issue without time pressure. --- src/backend/access/heap/hio.c | 31 ++++------ src/backend/access/heap/vacuumlazy.c | 89 ++++++++++------------------ 2 files changed, 42 insertions(+), 78 deletions(-) diff --git a/src/backend/access/heap/hio.c b/src/backend/access/heap/hio.c index 985c4c4777..3da0b49ccc 100644 --- a/src/backend/access/heap/hio.c +++ b/src/backend/access/heap/hio.c @@ -204,8 +204,7 @@ RelationAddExtraBlocks(Relation relation, BulkInsertState bistate) /* * Extend by one page. This should generally match the main-line * extension code in RelationGetBufferForTuple, except that we hold - * the relation extension lock throughout, and we don't immediately - * initialize the page (see below). + * the relation extension lock throughout. */ buffer = ReadBufferBI(relation, P_NEW, bistate); @@ -217,16 +216,18 @@ RelationAddExtraBlocks(Relation relation, BulkInsertState bistate) BufferGetBlockNumber(buffer), RelationGetRelationName(relation)); + PageInit(page, BufferGetPageSize(buffer), 0); + /* - * Add the page to the FSM without initializing. If we were to - * initialize here the page would potentially get flushed out to disk - * before we add any useful content. There's no guarantee that that'd - * happen before a potential crash, so we need to deal with - * uninitialized pages anyway, thus avoid the potential for - * unnecessary writes. + * We mark all the new buffers dirty, but do nothing to write them + * out; they'll probably get used soon, and even if they are not, a + * crash will leave an okay all-zeroes page on disk. */ + MarkBufferDirty(buffer); + + /* we'll need this info below */ blockNum = BufferGetBlockNumber(buffer); - freespace = BufferGetPageSize(buffer) - SizeOfPageHeaderData; + freespace = PageGetHeapFreeSpace(page); UnlockReleaseBuffer(buffer); @@ -478,18 +479,6 @@ loop: * we're done. */ page = BufferGetPage(buffer); - - /* - * Initialize page, it'll be used soon. We could avoid dirtying the - * buffer here, and rely on the caller to do so whenever it puts a - * tuple onto the page, but there seems not much benefit in doing so. - */ - if (PageIsNew(page)) - { - PageInit(page, BufferGetPageSize(buffer), 0); - MarkBufferDirty(buffer); - } - pageFreeSpace = PageGetHeapFreeSpace(page); if (len + saveFreeSpace <= pageFreeSpace) { diff --git a/src/backend/access/heap/vacuumlazy.c b/src/backend/access/heap/vacuumlazy.c index 53c72c14c3..37aa484ec3 100644 --- a/src/backend/access/heap/vacuumlazy.c +++ b/src/backend/access/heap/vacuumlazy.c @@ -860,65 +860,43 @@ lazy_scan_heap(Relation onerel, int options, LVRelStats *vacrelstats, if (PageIsNew(page)) { - bool still_new; - /* - * All-zeroes pages can be left over if either a backend extends - * the relation by a single page, but crashes before the newly - * initialized page has been written out, or when bulk-extending - * the relation (which creates a number of empty pages at the tail - * end of the relation, but enters them into the FSM). - * - * Make sure these pages are in the FSM, to ensure they can be - * reused. Do that by testing if there's any space recorded for - * the page. If not, enter it. - * - * Note we do not enter the page into the visibilitymap. That has - * the downside that we repeatedly visit this page in subsequent - * vacuums, but otherwise we'll never not discover the space on a - * promoted standby. The harm of repeated checking ought to - * normally not be too bad - the space usually should be used at - * some point, otherwise there wouldn't be any regular vacuums. + * An all-zeroes page could be left over if a backend extends the + * relation but crashes before initializing the page. Reclaim such + * pages for use. * * We have to be careful here because we could be looking at a - * page that someone has just added to the relation and the - * extending backend might not yet have been able to lock the page - * (see RelationGetBufferForTuple), which is problematic because - * of cross-checks that new pages are actually new. If we add this - * page to the FSM, this page could be reused, and such - * crosschecks could fail. To protect against that, release the - * buffer lock, grab the relation extension lock momentarily, and - * re-lock the buffer. If the page is still empty and not in the - * FSM by then, it must be left over from a from a crashed - * backend, and we can record the free space. + * page that someone has just added to the relation and not yet + * been able to initialize (see RelationGetBufferForTuple). To + * protect against that, release the buffer lock, grab the + * relation extension lock momentarily, and re-lock the buffer. If + * the page is still uninitialized by then, it must be left over + * from a crashed backend, and we can initialize it. + * + * We don't really need the relation lock when this is a new or + * temp relation, but it's probably not worth the code space to + * check that, since this surely isn't a critical path. * * Note: the comparable code in vacuum.c need not worry because - * it's got an exclusive lock on the whole relation. + * it's got exclusive lock on the whole relation. */ LockBuffer(buf, BUFFER_LOCK_UNLOCK); LockRelationForExtension(onerel, ExclusiveLock); UnlockRelationForExtension(onerel, ExclusiveLock); LockBufferForCleanup(buf); - - /* - * Perform checking of FSM after releasing lock, the fsm is - * approximate, after all. - */ - still_new = PageIsNew(page); - UnlockReleaseBuffer(buf); - - if (still_new) + if (PageIsNew(page)) { + ereport(WARNING, + (errmsg("relation \"%s\" page %u is uninitialized --- fixing", + relname, blkno))); + PageInit(page, BufferGetPageSize(buf), 0); empty_pages++; - - if (GetRecordedFreeSpace(onerel, blkno) == 0) - { - Size freespace; - - freespace = BufferGetPageSize(buf) - SizeOfPageHeaderData; - RecordPageWithFreeSpace(onerel, blkno, freespace); - } } + freespace = PageGetHeapFreeSpace(page); + MarkBufferDirty(buf); + UnlockReleaseBuffer(buf); + + RecordPageWithFreeSpace(onerel, blkno, freespace); continue; } @@ -927,10 +905,7 @@ lazy_scan_heap(Relation onerel, int options, LVRelStats *vacrelstats, empty_pages++; freespace = PageGetHeapFreeSpace(page); - /* - * Empty pages are always all-visible and all-frozen (note that - * the same is currently not true for new pages, see above). - */ + /* empty pages are always all-visible and all-frozen */ if (!PageIsAllVisible(page)) { START_CRIT_SECTION(); @@ -1664,13 +1639,12 @@ lazy_check_needs_freeze(Buffer buf, bool *hastup) *hastup = false; - /* - * New and empty pages, obviously, don't contain tuples. We could make - * sure that the page is registered in the FSM, but it doesn't seem worth - * waiting for a cleanup lock just for that, especially because it's - * likely that the pin holder will do so. - */ - if (PageIsNew(page) || PageIsEmpty(page)) + /* If we hit an uninitialized page, we want to force vacuuming it. */ + if (PageIsNew(page)) + return true; + + /* Quick out for ordinary empty page. */ + if (PageIsEmpty(page)) return false; maxoff = PageGetMaxOffsetNumber(page); @@ -2055,6 +2029,7 @@ count_nondeletable_pages(Relation onerel, LVRelStats *vacrelstats) if (PageIsNew(page) || PageIsEmpty(page)) { + /* PageIsNew probably shouldn't happen... */ UnlockReleaseBuffer(buf); continue; } -- 2.40.0