1 /*-------------------------------------------------------------------------
4 * Concurrent ("lazy") vacuuming.
7 * The major space usage for LAZY VACUUM is storage for the array of dead
8 * tuple TIDs, with the next biggest need being storage for per-disk-page
9 * free space info. We want to ensure we can vacuum even the very largest
10 * relations with finite memory space usage. To do that, we set upper bounds
11 * on the number of tuples and pages we will keep track of at once.
13 * We are willing to use at most maintenance_work_mem (or perhaps
14 * autovacuum_work_mem) memory space to keep track of dead tuples. We
15 * initially allocate an array of TIDs of that size, with an upper limit that
16 * depends on table size (this limit ensures we don't allocate a huge area
17 * uselessly for vacuuming small tables). If the array threatens to overflow,
18 * we suspend the heap scan phase and perform a pass of index cleanup and page
19 * compaction, then resume the heap scan with an empty TID array.
21 * If we're processing a table with no indexes, we can just vacuum each page
22 * as we go; there's no need to save up multiple tuples to minimize the number
23 * of index scans performed. So we don't use maintenance_work_mem memory for
24 * the TID array, just enough to hold as many heap tuples as fit on one page.
27 * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group
28 * Portions Copyright (c) 1994, Regents of the University of California
32 * src/backend/commands/vacuumlazy.c
34 *-------------------------------------------------------------------------
40 #include "access/genam.h"
41 #include "access/heapam.h"
42 #include "access/heapam_xlog.h"
43 #include "access/htup_details.h"
44 #include "access/multixact.h"
45 #include "access/transam.h"
46 #include "access/visibilitymap.h"
47 #include "access/xlog.h"
48 #include "catalog/catalog.h"
49 #include "catalog/storage.h"
50 #include "commands/dbcommands.h"
51 #include "commands/progress.h"
52 #include "commands/vacuum.h"
53 #include "miscadmin.h"
55 #include "portability/instr_time.h"
56 #include "postmaster/autovacuum.h"
57 #include "storage/bufmgr.h"
58 #include "storage/freespace.h"
59 #include "storage/lmgr.h"
60 #include "utils/lsyscache.h"
61 #include "utils/memutils.h"
62 #include "utils/pg_rusage.h"
63 #include "utils/timestamp.h"
64 #include "utils/tqual.h"
68 * Space/time tradeoff parameters: do these need to be user-tunable?
70 * To consider truncating the relation, we want there to be at least
71 * REL_TRUNCATE_MINIMUM or (relsize / REL_TRUNCATE_FRACTION) (whichever
72 * is less) potentially-freeable pages.
74 #define REL_TRUNCATE_MINIMUM 1000
75 #define REL_TRUNCATE_FRACTION 16
78 * Timing parameters for truncate locking heuristics.
80 * These were not exposed as user tunable GUC values because it didn't seem
81 * that the potential for improvement was great enough to merit the cost of
84 #define VACUUM_TRUNCATE_LOCK_CHECK_INTERVAL 20 /* ms */
85 #define VACUUM_TRUNCATE_LOCK_WAIT_INTERVAL 50 /* ms */
86 #define VACUUM_TRUNCATE_LOCK_TIMEOUT 5000 /* ms */
89 * Guesstimation of number of dead tuples per page. This is used to
90 * provide an upper limit to memory allocated when vacuuming small
93 #define LAZY_ALLOC_TUPLES MaxHeapTuplesPerPage
96 * Before we consider skipping a page that's marked as clean in
97 * visibility map, we must've seen at least this many clean pages.
99 #define SKIP_PAGES_THRESHOLD ((BlockNumber) 32)
102 * Size of the prefetch window for lazy vacuum backwards truncation scan.
103 * Needs to be a power of 2.
105 #define PREFETCH_SIZE ((BlockNumber) 32)
107 typedef struct LVRelStats
109 /* hasindex = true means two-pass strategy; false means one-pass */
111 /* Overall statistics about rel */
112 BlockNumber old_rel_pages; /* previous value of pg_class.relpages */
113 BlockNumber rel_pages; /* total number of pages */
114 BlockNumber scanned_pages; /* number of pages we examined */
115 BlockNumber pinskipped_pages; /* # of pages we skipped due to a pin */
116 BlockNumber frozenskipped_pages; /* # of frozen pages we skipped */
117 BlockNumber tupcount_pages; /* pages whose tuples we counted */
118 double scanned_tuples; /* counts only tuples on tupcount_pages */
119 double old_rel_tuples; /* previous value of pg_class.reltuples */
120 double new_rel_tuples; /* new estimated total # of tuples */
121 double new_dead_tuples; /* new estimated total # of dead tuples */
122 BlockNumber pages_removed;
123 double tuples_deleted;
124 BlockNumber nonempty_pages; /* actually, last nonempty page + 1 */
125 /* List of TIDs of tuples we intend to delete */
126 /* NB: this list is ordered by TID address */
127 int num_dead_tuples; /* current # of entries */
128 int max_dead_tuples; /* # slots allocated in array */
129 ItemPointer dead_tuples; /* array of ItemPointerData */
131 TransactionId latestRemovedXid;
132 bool lock_waiter_detected;
136 /* A few variables that don't seem worth passing around as parameters */
137 static int elevel = -1;
139 static TransactionId OldestXmin;
140 static TransactionId FreezeLimit;
141 static MultiXactId MultiXactCutoff;
143 static BufferAccessStrategy vac_strategy;
146 /* non-export function prototypes */
147 static void lazy_scan_heap(Relation onerel, int options,
148 LVRelStats *vacrelstats, Relation *Irel, int nindexes,
150 static void lazy_vacuum_heap(Relation onerel, LVRelStats *vacrelstats);
151 static bool lazy_check_needs_freeze(Buffer buf, bool *hastup);
152 static void lazy_vacuum_index(Relation indrel,
153 IndexBulkDeleteResult **stats,
154 LVRelStats *vacrelstats);
155 static void lazy_cleanup_index(Relation indrel,
156 IndexBulkDeleteResult *stats,
157 LVRelStats *vacrelstats);
158 static int lazy_vacuum_page(Relation onerel, BlockNumber blkno, Buffer buffer,
159 int tupindex, LVRelStats *vacrelstats, Buffer *vmbuffer);
160 static bool should_attempt_truncation(LVRelStats *vacrelstats);
161 static void lazy_truncate_heap(Relation onerel, LVRelStats *vacrelstats);
162 static BlockNumber count_nondeletable_pages(Relation onerel,
163 LVRelStats *vacrelstats);
164 static void lazy_space_alloc(LVRelStats *vacrelstats, BlockNumber relblocks);
165 static void lazy_record_dead_tuple(LVRelStats *vacrelstats,
166 ItemPointer itemptr);
167 static bool lazy_tid_reaped(ItemPointer itemptr, void *state);
168 static int vac_cmp_itemptr(const void *left, const void *right);
169 static bool heap_page_is_all_visible(Relation rel, Buffer buf,
170 TransactionId *visibility_cutoff_xid, bool *all_frozen);
174 * lazy_vacuum_rel() -- perform LAZY VACUUM for one heap relation
176 * This routine vacuums a single heap, cleans out its indexes, and
177 * updates its relpages and reltuples statistics.
179 * At entry, we have already established a transaction and opened
180 * and locked the relation.
183 lazy_vacuum_rel(Relation onerel, int options, VacuumParams *params,
184 BufferAccessStrategy bstrategy)
186 LVRelStats *vacrelstats;
190 TimestampTz starttime = 0;
195 bool aggressive; /* should we scan all unfrozen pages? */
196 bool scanned_all_unfrozen; /* actually scanned all such pages? */
197 TransactionId xidFullScanLimit;
198 MultiXactId mxactFullScanLimit;
199 BlockNumber new_rel_pages;
200 double new_rel_tuples;
201 BlockNumber new_rel_allvisible;
202 double new_live_tuples;
203 TransactionId new_frozen_xid;
204 MultiXactId new_min_multi;
206 Assert(params != NULL);
208 /* measure elapsed time iff autovacuum logging requires it */
209 if (IsAutoVacuumWorkerProcess() && params->log_min_duration >= 0)
211 pg_rusage_init(&ru0);
212 starttime = GetCurrentTimestamp();
215 if (options & VACOPT_VERBOSE)
220 pgstat_progress_start_command(PROGRESS_COMMAND_VACUUM,
221 RelationGetRelid(onerel));
223 vac_strategy = bstrategy;
225 vacuum_set_xid_limits(onerel,
226 params->freeze_min_age,
227 params->freeze_table_age,
228 params->multixact_freeze_min_age,
229 params->multixact_freeze_table_age,
230 &OldestXmin, &FreezeLimit, &xidFullScanLimit,
231 &MultiXactCutoff, &mxactFullScanLimit);
234 * We request an aggressive scan if the table's frozen Xid is now older
235 * than or equal to the requested Xid full-table scan limit; or if the
236 * table's minimum MultiXactId is older than or equal to the requested
237 * mxid full-table scan limit; or if DISABLE_PAGE_SKIPPING was specified.
239 aggressive = TransactionIdPrecedesOrEquals(onerel->rd_rel->relfrozenxid,
241 aggressive |= MultiXactIdPrecedesOrEquals(onerel->rd_rel->relminmxid,
243 if (options & VACOPT_DISABLE_PAGE_SKIPPING)
246 vacrelstats = (LVRelStats *) palloc0(sizeof(LVRelStats));
248 vacrelstats->old_rel_pages = onerel->rd_rel->relpages;
249 vacrelstats->old_rel_tuples = onerel->rd_rel->reltuples;
250 vacrelstats->num_index_scans = 0;
251 vacrelstats->pages_removed = 0;
252 vacrelstats->lock_waiter_detected = false;
254 /* Open all indexes of the relation */
255 vac_open_indexes(onerel, RowExclusiveLock, &nindexes, &Irel);
256 vacrelstats->hasindex = (nindexes > 0);
258 /* Do the vacuuming */
259 lazy_scan_heap(onerel, options, vacrelstats, Irel, nindexes, aggressive);
261 /* Done with indexes */
262 vac_close_indexes(nindexes, Irel, NoLock);
265 * Compute whether we actually scanned the all unfrozen pages. If we did,
266 * we can adjust relfrozenxid and relminmxid.
268 * NB: We need to check this before truncating the relation, because that
269 * will change ->rel_pages.
271 if ((vacrelstats->scanned_pages + vacrelstats->frozenskipped_pages)
272 < vacrelstats->rel_pages)
275 scanned_all_unfrozen = false;
278 scanned_all_unfrozen = true;
281 * Optionally truncate the relation.
283 if (should_attempt_truncation(vacrelstats))
284 lazy_truncate_heap(onerel, vacrelstats);
286 /* Report that we are now doing final cleanup */
287 pgstat_progress_update_param(PROGRESS_VACUUM_PHASE,
288 PROGRESS_VACUUM_PHASE_FINAL_CLEANUP);
290 /* Vacuum the Free Space Map */
291 FreeSpaceMapVacuum(onerel);
294 * Update statistics in pg_class.
296 * A corner case here is that if we scanned no pages at all because every
297 * page is all-visible, we should not update relpages/reltuples, because
298 * we have no new information to contribute. In particular this keeps us
299 * from replacing relpages=reltuples=0 (which means "unknown tuple
300 * density") with nonzero relpages and reltuples=0 (which means "zero
301 * tuple density") unless there's some actual evidence for the latter.
303 * It's important that we use tupcount_pages and not scanned_pages for the
304 * check described above; scanned_pages counts pages where we could not
305 * get cleanup lock, and which were processed only for frozenxid purposes.
307 * We do update relallvisible even in the corner case, since if the table
308 * is all-visible we'd definitely like to know that. But clamp the value
309 * to be not more than what we're setting relpages to.
311 * Also, don't change relfrozenxid/relminmxid if we skipped any pages,
312 * since then we don't know for certain that all tuples have a newer xmin.
314 new_rel_pages = vacrelstats->rel_pages;
315 new_rel_tuples = vacrelstats->new_rel_tuples;
316 if (vacrelstats->tupcount_pages == 0 && new_rel_pages > 0)
318 new_rel_pages = vacrelstats->old_rel_pages;
319 new_rel_tuples = vacrelstats->old_rel_tuples;
322 visibilitymap_count(onerel, &new_rel_allvisible, NULL);
323 if (new_rel_allvisible > new_rel_pages)
324 new_rel_allvisible = new_rel_pages;
326 new_frozen_xid = scanned_all_unfrozen ? FreezeLimit : InvalidTransactionId;
327 new_min_multi = scanned_all_unfrozen ? MultiXactCutoff : InvalidMultiXactId;
329 vac_update_relstats(onerel,
333 vacrelstats->hasindex,
338 /* report results to the stats collector, too */
339 new_live_tuples = new_rel_tuples - vacrelstats->new_dead_tuples;
340 if (new_live_tuples < 0)
341 new_live_tuples = 0; /* just in case */
343 pgstat_report_vacuum(RelationGetRelid(onerel),
344 onerel->rd_rel->relisshared,
346 vacrelstats->new_dead_tuples);
347 pgstat_progress_end_command();
349 /* and log the action if appropriate */
350 if (IsAutoVacuumWorkerProcess() && params->log_min_duration >= 0)
352 TimestampTz endtime = GetCurrentTimestamp();
354 if (params->log_min_duration == 0 ||
355 TimestampDifferenceExceeds(starttime, endtime,
356 params->log_min_duration))
360 TimestampDifference(starttime, endtime, &secs, &usecs);
364 if ((secs > 0) || (usecs > 0))
366 read_rate = (double) BLCKSZ * VacuumPageMiss / (1024 * 1024) /
367 (secs + usecs / 1000000.0);
368 write_rate = (double) BLCKSZ * VacuumPageDirty / (1024 * 1024) /
369 (secs + usecs / 1000000.0);
373 * This is pretty messy, but we split it up so that we can skip
374 * emitting individual parts of the message when not applicable.
376 initStringInfo(&buf);
377 appendStringInfo(&buf, _("automatic vacuum of table \"%s.%s.%s\": index scans: %d\n"),
378 get_database_name(MyDatabaseId),
379 get_namespace_name(RelationGetNamespace(onerel)),
380 RelationGetRelationName(onerel),
381 vacrelstats->num_index_scans);
382 appendStringInfo(&buf, _("pages: %u removed, %u remain, %u skipped due to pins, %u skipped frozen\n"),
383 vacrelstats->pages_removed,
384 vacrelstats->rel_pages,
385 vacrelstats->pinskipped_pages,
386 vacrelstats->frozenskipped_pages);
387 appendStringInfo(&buf,
388 _("tuples: %.0f removed, %.0f remain, %.0f are dead but not yet removable, oldest xmin: %u\n"),
389 vacrelstats->tuples_deleted,
390 vacrelstats->new_rel_tuples,
391 vacrelstats->new_dead_tuples,
393 appendStringInfo(&buf,
394 _("buffer usage: %d hits, %d misses, %d dirtied\n"),
398 appendStringInfo(&buf, _("avg read rate: %.3f MB/s, avg write rate: %.3f MB/s\n"),
399 read_rate, write_rate);
400 appendStringInfo(&buf, _("system usage: %s"), pg_rusage_show(&ru0));
403 (errmsg_internal("%s", buf.data)));
410 * For Hot Standby we need to know the highest transaction id that will
411 * be removed by any change. VACUUM proceeds in a number of passes so
412 * we need to consider how each pass operates. The first phase runs
413 * heap_page_prune(), which can issue XLOG_HEAP2_CLEAN records as it
414 * progresses - these will have a latestRemovedXid on each record.
415 * In some cases this removes all of the tuples to be removed, though
416 * often we have dead tuples with index pointers so we must remember them
417 * for removal in phase 3. Index records for those rows are removed
418 * in phase 2 and index blocks do not have MVCC information attached.
419 * So before we can allow removal of any index tuples we need to issue
420 * a WAL record containing the latestRemovedXid of rows that will be
421 * removed in phase three. This allows recovery queries to block at the
422 * correct place, i.e. before phase two, rather than during phase three
423 * which would be after the rows have become inaccessible.
426 vacuum_log_cleanup_info(Relation rel, LVRelStats *vacrelstats)
429 * Skip this for relations for which no WAL is to be written, or if we're
430 * not trying to support archive recovery.
432 if (!RelationNeedsWAL(rel) || !XLogIsNeeded())
436 * No need to write the record at all unless it contains a valid value
438 if (TransactionIdIsValid(vacrelstats->latestRemovedXid))
439 (void) log_heap_cleanup_info(rel->rd_node, vacrelstats->latestRemovedXid);
443 * lazy_scan_heap() -- scan an open heap relation
445 * This routine prunes each page in the heap, which will among other
446 * things truncate dead tuples to dead line pointers, defragment the
447 * page, and set commit status bits (see heap_page_prune). It also builds
448 * lists of dead tuples and pages with free space, calculates statistics
449 * on the number of live tuples in the heap, and marks pages as
450 * all-visible if appropriate. When done, or when we run low on space for
451 * dead-tuple TIDs, invoke vacuuming of indexes and call lazy_vacuum_heap
452 * to reclaim dead line pointers.
454 * If there are no indexes then we can reclaim line pointers on the fly;
455 * dead line pointers need only be retained until all index pointers that
456 * reference them have been killed.
459 lazy_scan_heap(Relation onerel, int options, LVRelStats *vacrelstats,
460 Relation *Irel, int nindexes, bool aggressive)
466 BlockNumber empty_pages,
472 IndexBulkDeleteResult **indstats;
475 Buffer vmbuffer = InvalidBuffer;
476 BlockNumber next_unskippable_block;
477 bool skipping_blocks;
478 xl_heap_freeze_tuple *frozen;
480 const int initprog_index[] = {
481 PROGRESS_VACUUM_PHASE,
482 PROGRESS_VACUUM_TOTAL_HEAP_BLKS,
483 PROGRESS_VACUUM_MAX_DEAD_TUPLES
485 int64 initprog_val[3];
487 pg_rusage_init(&ru0);
489 relname = RelationGetRelationName(onerel);
491 (errmsg("vacuuming \"%s.%s\"",
492 get_namespace_name(RelationGetNamespace(onerel)),
495 empty_pages = vacuumed_pages = 0;
496 num_tuples = tups_vacuumed = nkeep = nunused = 0;
498 indstats = (IndexBulkDeleteResult **)
499 palloc0(nindexes * sizeof(IndexBulkDeleteResult *));
501 nblocks = RelationGetNumberOfBlocks(onerel);
502 vacrelstats->rel_pages = nblocks;
503 vacrelstats->scanned_pages = 0;
504 vacrelstats->tupcount_pages = 0;
505 vacrelstats->nonempty_pages = 0;
506 vacrelstats->latestRemovedXid = InvalidTransactionId;
508 lazy_space_alloc(vacrelstats, nblocks);
509 frozen = palloc(sizeof(xl_heap_freeze_tuple) * MaxHeapTuplesPerPage);
511 /* Report that we're scanning the heap, advertising total # of blocks */
512 initprog_val[0] = PROGRESS_VACUUM_PHASE_SCAN_HEAP;
513 initprog_val[1] = nblocks;
514 initprog_val[2] = vacrelstats->max_dead_tuples;
515 pgstat_progress_update_multi_param(3, initprog_index, initprog_val);
518 * Except when aggressive is set, we want to skip pages that are
519 * all-visible according to the visibility map, but only when we can skip
520 * at least SKIP_PAGES_THRESHOLD consecutive pages. Since we're reading
521 * sequentially, the OS should be doing readahead for us, so there's no
522 * gain in skipping a page now and then; that's likely to disable
523 * readahead and so be counterproductive. Also, skipping even a single
524 * page means that we can't update relfrozenxid, so we only want to do it
525 * if we can skip a goodly number of pages.
527 * When aggressive is set, we can't skip pages just because they are
528 * all-visible, but we can still skip pages that are all-frozen, since
529 * such pages do not need freezing and do not affect the value that we can
530 * safely set for relfrozenxid or relminmxid.
532 * Before entering the main loop, establish the invariant that
533 * next_unskippable_block is the next block number >= blkno that's not we
534 * can't skip based on the visibility map, either all-visible for a
535 * regular scan or all-frozen for an aggressive scan. We set it to
536 * nblocks if there's no such block. We also set up the skipping_blocks
537 * flag correctly at this stage.
539 * Note: The value returned by visibilitymap_get_status could be slightly
540 * out-of-date, since we make this test before reading the corresponding
541 * heap page or locking the buffer. This is OK. If we mistakenly think
542 * that the page is all-visible or all-frozen when in fact the flag's just
543 * been cleared, we might fail to vacuum the page. It's easy to see that
544 * skipping a page when aggressive is not set is not a very big deal; we
545 * might leave some dead tuples lying around, but the next vacuum will
546 * find them. But even when aggressive *is* set, it's still OK if we miss
547 * a page whose all-frozen marking has just been cleared. Any new XIDs
548 * just added to that page are necessarily newer than the GlobalXmin we
549 * computed, so they'll have no effect on the value to which we can safely
550 * set relfrozenxid. A similar argument applies for MXIDs and relminmxid.
552 * We will scan the table's last page, at least to the extent of
553 * determining whether it has tuples or not, even if it should be skipped
554 * according to the above rules; except when we've already determined that
555 * it's not worth trying to truncate the table. This avoids having
556 * lazy_truncate_heap() take access-exclusive lock on the table to attempt
557 * a truncation that just fails immediately because there are tuples in
558 * the last page. This is worth avoiding mainly because such a lock must
559 * be replayed on any hot standby, where it can be disruptive.
561 next_unskippable_block = 0;
562 if ((options & VACOPT_DISABLE_PAGE_SKIPPING) == 0)
564 while (next_unskippable_block < nblocks)
568 vmstatus = visibilitymap_get_status(onerel, next_unskippable_block,
572 if ((vmstatus & VISIBILITYMAP_ALL_FROZEN) == 0)
577 if ((vmstatus & VISIBILITYMAP_ALL_VISIBLE) == 0)
580 vacuum_delay_point();
581 next_unskippable_block++;
585 if (next_unskippable_block >= SKIP_PAGES_THRESHOLD)
586 skipping_blocks = true;
588 skipping_blocks = false;
590 for (blkno = 0; blkno < nblocks; blkno++)
601 bool all_visible_according_to_vm = false;
603 bool all_frozen = true; /* provided all_visible is also true */
604 bool has_dead_tuples;
605 TransactionId visibility_cutoff_xid = InvalidTransactionId;
607 /* see note above about forcing scanning of last page */
608 #define FORCE_CHECK_PAGE() \
609 (blkno == nblocks - 1 && should_attempt_truncation(vacrelstats))
611 pgstat_progress_update_param(PROGRESS_VACUUM_HEAP_BLKS_SCANNED, blkno);
613 if (blkno == next_unskippable_block)
615 /* Time to advance next_unskippable_block */
616 next_unskippable_block++;
617 if ((options & VACOPT_DISABLE_PAGE_SKIPPING) == 0)
619 while (next_unskippable_block < nblocks)
623 vmskipflags = visibilitymap_get_status(onerel,
624 next_unskippable_block,
628 if ((vmskipflags & VISIBILITYMAP_ALL_FROZEN) == 0)
633 if ((vmskipflags & VISIBILITYMAP_ALL_VISIBLE) == 0)
636 vacuum_delay_point();
637 next_unskippable_block++;
642 * We know we can't skip the current block. But set up
643 * skipping_all_visible_blocks to do the right thing at the
646 if (next_unskippable_block - blkno > SKIP_PAGES_THRESHOLD)
647 skipping_blocks = true;
649 skipping_blocks = false;
652 * Normally, the fact that we can't skip this block must mean that
653 * it's not all-visible. But in an aggressive vacuum we know only
654 * that it's not all-frozen, so it might still be all-visible.
656 if (aggressive && VM_ALL_VISIBLE(onerel, blkno, &vmbuffer))
657 all_visible_according_to_vm = true;
662 * The current block is potentially skippable; if we've seen a
663 * long enough run of skippable blocks to justify skipping it, and
664 * we're not forced to check it, then go ahead and skip.
665 * Otherwise, the page must be at least all-visible if not
666 * all-frozen, so we can set all_visible_according_to_vm = true.
668 if (skipping_blocks && !FORCE_CHECK_PAGE())
671 * Tricky, tricky. If this is in aggressive vacuum, the page
672 * must have been all-frozen at the time we checked whether it
673 * was skippable, but it might not be any more. We must be
674 * careful to count it as a skipped all-frozen page in that
675 * case, or else we'll think we can't update relfrozenxid and
676 * relminmxid. If it's not an aggressive vacuum, we don't
677 * know whether it was all-frozen, so we have to recheck; but
678 * in this case an approximate answer is OK.
680 if (aggressive || VM_ALL_FROZEN(onerel, blkno, &vmbuffer))
681 vacrelstats->frozenskipped_pages++;
684 all_visible_according_to_vm = true;
687 vacuum_delay_point();
690 * If we are close to overrunning the available space for dead-tuple
691 * TIDs, pause and do a cycle of vacuuming before we tackle this page.
693 if ((vacrelstats->max_dead_tuples - vacrelstats->num_dead_tuples) < MaxHeapTuplesPerPage &&
694 vacrelstats->num_dead_tuples > 0)
696 const int hvp_index[] = {
697 PROGRESS_VACUUM_PHASE,
698 PROGRESS_VACUUM_NUM_INDEX_VACUUMS
703 * Before beginning index vacuuming, we release any pin we may
704 * hold on the visibility map page. This isn't necessary for
705 * correctness, but we do it anyway to avoid holding the pin
706 * across a lengthy, unrelated operation.
708 if (BufferIsValid(vmbuffer))
710 ReleaseBuffer(vmbuffer);
711 vmbuffer = InvalidBuffer;
714 /* Log cleanup info before we touch indexes */
715 vacuum_log_cleanup_info(onerel, vacrelstats);
717 /* Report that we are now vacuuming indexes */
718 pgstat_progress_update_param(PROGRESS_VACUUM_PHASE,
719 PROGRESS_VACUUM_PHASE_VACUUM_INDEX);
721 /* Remove index entries */
722 for (i = 0; i < nindexes; i++)
723 lazy_vacuum_index(Irel[i],
728 * Report that we are now vacuuming the heap. We also increase
729 * the number of index scans here; note that by using
730 * pgstat_progress_update_multi_param we can update both
731 * parameters atomically.
733 hvp_val[0] = PROGRESS_VACUUM_PHASE_VACUUM_HEAP;
734 hvp_val[1] = vacrelstats->num_index_scans + 1;
735 pgstat_progress_update_multi_param(2, hvp_index, hvp_val);
737 /* Remove tuples from heap */
738 lazy_vacuum_heap(onerel, vacrelstats);
741 * Forget the now-vacuumed tuples, and press on, but be careful
742 * not to reset latestRemovedXid since we want that value to be
745 vacrelstats->num_dead_tuples = 0;
746 vacrelstats->num_index_scans++;
748 /* Report that we are once again scanning the heap */
749 pgstat_progress_update_param(PROGRESS_VACUUM_PHASE,
750 PROGRESS_VACUUM_PHASE_SCAN_HEAP);
754 * Pin the visibility map page in case we need to mark the page
755 * all-visible. In most cases this will be very cheap, because we'll
756 * already have the correct page pinned anyway. However, it's
757 * possible that (a) next_unskippable_block is covered by a different
758 * VM page than the current block or (b) we released our pin and did a
759 * cycle of index vacuuming.
762 visibilitymap_pin(onerel, blkno, &vmbuffer);
764 buf = ReadBufferExtended(onerel, MAIN_FORKNUM, blkno,
765 RBM_NORMAL, vac_strategy);
767 /* We need buffer cleanup lock so that we can prune HOT chains. */
768 if (!ConditionalLockBufferForCleanup(buf))
771 * If we're not performing an aggressive scan to guard against XID
772 * wraparound, and we don't want to forcibly check the page, then
773 * it's OK to skip vacuuming pages we get a lock conflict on. They
774 * will be dealt with in some future vacuum.
776 if (!aggressive && !FORCE_CHECK_PAGE())
779 vacrelstats->pinskipped_pages++;
784 * Read the page with share lock to see if any xids on it need to
785 * be frozen. If not we just skip the page, after updating our
786 * scan statistics. If there are some, we wait for cleanup lock.
788 * We could defer the lock request further by remembering the page
789 * and coming back to it later, or we could even register
790 * ourselves for multiple buffers and then service whichever one
791 * is received first. For now, this seems good enough.
793 * If we get here with aggressive false, then we're just forcibly
794 * checking the page, and so we don't want to insist on getting
795 * the lock; we only need to know if the page contains tuples, so
796 * that we can update nonempty_pages correctly. It's convenient
797 * to use lazy_check_needs_freeze() for both situations, though.
799 LockBuffer(buf, BUFFER_LOCK_SHARE);
800 if (!lazy_check_needs_freeze(buf, &hastup))
802 UnlockReleaseBuffer(buf);
803 vacrelstats->scanned_pages++;
804 vacrelstats->pinskipped_pages++;
806 vacrelstats->nonempty_pages = blkno + 1;
812 * Here, we must not advance scanned_pages; that would amount
813 * to claiming that the page contains no freezable tuples.
815 UnlockReleaseBuffer(buf);
816 vacrelstats->pinskipped_pages++;
818 vacrelstats->nonempty_pages = blkno + 1;
821 LockBuffer(buf, BUFFER_LOCK_UNLOCK);
822 LockBufferForCleanup(buf);
823 /* drop through to normal processing */
826 vacrelstats->scanned_pages++;
827 vacrelstats->tupcount_pages++;
829 page = BufferGetPage(buf);
834 * An all-zeroes page could be left over if a backend extends the
835 * relation but crashes before initializing the page. Reclaim such
838 * We have to be careful here because we could be looking at a
839 * page that someone has just added to the relation and not yet
840 * been able to initialize (see RelationGetBufferForTuple). To
841 * protect against that, release the buffer lock, grab the
842 * relation extension lock momentarily, and re-lock the buffer. If
843 * the page is still uninitialized by then, it must be left over
844 * from a crashed backend, and we can initialize it.
846 * We don't really need the relation lock when this is a new or
847 * temp relation, but it's probably not worth the code space to
848 * check that, since this surely isn't a critical path.
850 * Note: the comparable code in vacuum.c need not worry because
851 * it's got exclusive lock on the whole relation.
853 LockBuffer(buf, BUFFER_LOCK_UNLOCK);
854 LockRelationForExtension(onerel, ExclusiveLock);
855 UnlockRelationForExtension(onerel, ExclusiveLock);
856 LockBufferForCleanup(buf);
860 (errmsg("relation \"%s\" page %u is uninitialized --- fixing",
862 PageInit(page, BufferGetPageSize(buf), 0);
865 freespace = PageGetHeapFreeSpace(page);
866 MarkBufferDirty(buf);
867 UnlockReleaseBuffer(buf);
869 RecordPageWithFreeSpace(onerel, blkno, freespace);
873 if (PageIsEmpty(page))
876 freespace = PageGetHeapFreeSpace(page);
878 /* empty pages are always all-visible and all-frozen */
879 if (!PageIsAllVisible(page))
881 START_CRIT_SECTION();
883 /* mark buffer dirty before writing a WAL record */
884 MarkBufferDirty(buf);
887 * It's possible that another backend has extended the heap,
888 * initialized the page, and then failed to WAL-log the page
889 * due to an ERROR. Since heap extension is not WAL-logged,
890 * recovery might try to replay our record setting the page
891 * all-visible and find that the page isn't initialized, which
892 * will cause a PANIC. To prevent that, check whether the
893 * page has been previously WAL-logged, and if not, do that
896 if (RelationNeedsWAL(onerel) &&
897 PageGetLSN(page) == InvalidXLogRecPtr)
898 log_newpage_buffer(buf, true);
900 PageSetAllVisible(page);
901 visibilitymap_set(onerel, blkno, buf, InvalidXLogRecPtr,
902 vmbuffer, InvalidTransactionId,
903 VISIBILITYMAP_ALL_VISIBLE | VISIBILITYMAP_ALL_FROZEN);
907 UnlockReleaseBuffer(buf);
908 RecordPageWithFreeSpace(onerel, blkno, freespace);
913 * Prune all HOT-update chains in this page.
915 * We count tuples removed by the pruning step as removed by VACUUM.
917 tups_vacuumed += heap_page_prune(onerel, buf, OldestXmin, false,
918 &vacrelstats->latestRemovedXid);
921 * Now scan the page to collect vacuumable items and check for tuples
922 * requiring freezing.
925 has_dead_tuples = false;
928 prev_dead_count = vacrelstats->num_dead_tuples;
929 maxoff = PageGetMaxOffsetNumber(page);
932 * Note: If you change anything in the loop below, also look at
933 * heap_page_is_all_visible to see if that needs to be changed.
935 for (offnum = FirstOffsetNumber;
937 offnum = OffsetNumberNext(offnum))
941 itemid = PageGetItemId(page, offnum);
943 /* Unused items require no processing, but we count 'em */
944 if (!ItemIdIsUsed(itemid))
950 /* Redirect items mustn't be touched */
951 if (ItemIdIsRedirected(itemid))
953 hastup = true; /* this page won't be truncatable */
957 ItemPointerSet(&(tuple.t_self), blkno, offnum);
960 * DEAD item pointers are to be vacuumed normally; but we don't
961 * count them in tups_vacuumed, else we'd be double-counting (at
962 * least in the common case where heap_page_prune() just freed up
965 if (ItemIdIsDead(itemid))
967 lazy_record_dead_tuple(vacrelstats, &(tuple.t_self));
972 Assert(ItemIdIsNormal(itemid));
974 tuple.t_data = (HeapTupleHeader) PageGetItem(page, itemid);
975 tuple.t_len = ItemIdGetLength(itemid);
976 tuple.t_tableOid = RelationGetRelid(onerel);
980 switch (HeapTupleSatisfiesVacuum(&tuple, OldestXmin, buf))
985 * Ordinarily, DEAD tuples would have been removed by
986 * heap_page_prune(), but it's possible that the tuple
987 * state changed since heap_page_prune() looked. In
988 * particular an INSERT_IN_PROGRESS tuple could have
989 * changed to DEAD if the inserter aborted. So this
990 * cannot be considered an error condition.
992 * If the tuple is HOT-updated then it must only be
993 * removed by a prune operation; so we keep it just as if
994 * it were RECENTLY_DEAD. Also, if it's a heap-only
995 * tuple, we choose to keep it, because it'll be a lot
996 * cheaper to get rid of it in the next pruning pass than
997 * to treat it like an indexed tuple.
999 if (HeapTupleIsHotUpdated(&tuple) ||
1000 HeapTupleIsHeapOnly(&tuple))
1003 tupgone = true; /* we can delete the tuple */
1004 all_visible = false;
1006 case HEAPTUPLE_LIVE:
1007 /* Tuple is good --- but let's do some validity checks */
1008 if (onerel->rd_rel->relhasoids &&
1009 !OidIsValid(HeapTupleGetOid(&tuple)))
1010 elog(WARNING, "relation \"%s\" TID %u/%u: OID is invalid",
1011 relname, blkno, offnum);
1014 * Is the tuple definitely visible to all transactions?
1016 * NB: Like with per-tuple hint bits, we can't set the
1017 * PD_ALL_VISIBLE flag if the inserter committed
1018 * asynchronously. See SetHintBits for more info. Check
1019 * that the tuple is hinted xmin-committed because of
1026 if (!HeapTupleHeaderXminCommitted(tuple.t_data))
1028 all_visible = false;
1033 * The inserter definitely committed. But is it old
1034 * enough that everyone sees it as committed?
1036 xmin = HeapTupleHeaderGetXmin(tuple.t_data);
1037 if (!TransactionIdPrecedes(xmin, OldestXmin))
1039 all_visible = false;
1043 /* Track newest xmin on page. */
1044 if (TransactionIdFollows(xmin, visibility_cutoff_xid))
1045 visibility_cutoff_xid = xmin;
1048 case HEAPTUPLE_RECENTLY_DEAD:
1051 * If tuple is recently deleted then we must not remove it
1055 all_visible = false;
1057 case HEAPTUPLE_INSERT_IN_PROGRESS:
1058 /* This is an expected case during concurrent vacuum */
1059 all_visible = false;
1061 case HEAPTUPLE_DELETE_IN_PROGRESS:
1062 /* This is an expected case during concurrent vacuum */
1063 all_visible = false;
1066 elog(ERROR, "unexpected HeapTupleSatisfiesVacuum result");
1072 lazy_record_dead_tuple(vacrelstats, &(tuple.t_self));
1073 HeapTupleHeaderAdvanceLatestRemovedXid(tuple.t_data,
1074 &vacrelstats->latestRemovedXid);
1076 has_dead_tuples = true;
1080 bool tuple_totally_frozen;
1086 * Each non-removable tuple must be checked to see if it needs
1087 * freezing. Note we already have exclusive buffer lock.
1089 if (heap_prepare_freeze_tuple(tuple.t_data, FreezeLimit,
1090 MultiXactCutoff, &frozen[nfrozen],
1091 &tuple_totally_frozen))
1092 frozen[nfrozen++].offset = offnum;
1094 if (!tuple_totally_frozen)
1097 } /* scan along page */
1100 * If we froze any tuples, mark the buffer dirty, and write a WAL
1101 * record recording the changes. We must log the changes to be
1102 * crash-safe against future truncation of CLOG.
1106 START_CRIT_SECTION();
1108 MarkBufferDirty(buf);
1110 /* execute collected freezes */
1111 for (i = 0; i < nfrozen; i++)
1114 HeapTupleHeader htup;
1116 itemid = PageGetItemId(page, frozen[i].offset);
1117 htup = (HeapTupleHeader) PageGetItem(page, itemid);
1119 heap_execute_freeze_tuple(htup, &frozen[i]);
1122 /* Now WAL-log freezing if necessary */
1123 if (RelationNeedsWAL(onerel))
1127 recptr = log_heap_freeze(onerel, buf, FreezeLimit,
1129 PageSetLSN(page, recptr);
1136 * If there are no indexes then we can vacuum the page right now
1137 * instead of doing a second scan.
1139 if (nindexes == 0 &&
1140 vacrelstats->num_dead_tuples > 0)
1142 /* Remove tuples from heap */
1143 lazy_vacuum_page(onerel, blkno, buf, 0, vacrelstats, &vmbuffer);
1144 has_dead_tuples = false;
1147 * Forget the now-vacuumed tuples, and press on, but be careful
1148 * not to reset latestRemovedXid since we want that value to be
1151 vacrelstats->num_dead_tuples = 0;
1155 freespace = PageGetHeapFreeSpace(page);
1157 /* mark page all-visible, if appropriate */
1158 if (all_visible && !all_visible_according_to_vm)
1160 uint8 flags = VISIBILITYMAP_ALL_VISIBLE;
1163 flags |= VISIBILITYMAP_ALL_FROZEN;
1166 * It should never be the case that the visibility map page is set
1167 * while the page-level bit is clear, but the reverse is allowed
1168 * (if checksums are not enabled). Regardless, set the both bits
1169 * so that we get back in sync.
1171 * NB: If the heap page is all-visible but the VM bit is not set,
1172 * we don't need to dirty the heap page. However, if checksums
1173 * are enabled, we do need to make sure that the heap page is
1174 * dirtied before passing it to visibilitymap_set(), because it
1175 * may be logged. Given that this situation should only happen in
1176 * rare cases after a crash, it is not worth optimizing.
1178 PageSetAllVisible(page);
1179 MarkBufferDirty(buf);
1180 visibilitymap_set(onerel, blkno, buf, InvalidXLogRecPtr,
1181 vmbuffer, visibility_cutoff_xid, flags);
1185 * As of PostgreSQL 9.2, the visibility map bit should never be set if
1186 * the page-level bit is clear. However, it's possible that the bit
1187 * got cleared after we checked it and before we took the buffer
1188 * content lock, so we must recheck before jumping to the conclusion
1189 * that something bad has happened.
1191 else if (all_visible_according_to_vm && !PageIsAllVisible(page)
1192 && VM_ALL_VISIBLE(onerel, blkno, &vmbuffer))
1194 elog(WARNING, "page is not marked all-visible but visibility map bit is set in relation \"%s\" page %u",
1196 visibilitymap_clear(onerel, blkno, vmbuffer,
1197 VISIBILITYMAP_VALID_BITS);
1201 * It's possible for the value returned by GetOldestXmin() to move
1202 * backwards, so it's not wrong for us to see tuples that appear to
1203 * not be visible to everyone yet, while PD_ALL_VISIBLE is already
1204 * set. The real safe xmin value never moves backwards, but
1205 * GetOldestXmin() is conservative and sometimes returns a value
1206 * that's unnecessarily small, so if we see that contradiction it just
1207 * means that the tuples that we think are not visible to everyone yet
1208 * actually are, and the PD_ALL_VISIBLE flag is correct.
1210 * There should never be dead tuples on a page with PD_ALL_VISIBLE
1213 else if (PageIsAllVisible(page) && has_dead_tuples)
1215 elog(WARNING, "page containing dead tuples is marked as all-visible in relation \"%s\" page %u",
1217 PageClearAllVisible(page);
1218 MarkBufferDirty(buf);
1219 visibilitymap_clear(onerel, blkno, vmbuffer,
1220 VISIBILITYMAP_VALID_BITS);
1224 * If the all-visible page is turned out to be all-frozen but not
1225 * marked, we should so mark it. Note that all_frozen is only valid
1226 * if all_visible is true, so we must check both.
1228 else if (all_visible_according_to_vm && all_visible && all_frozen &&
1229 !VM_ALL_FROZEN(onerel, blkno, &vmbuffer))
1232 * We can pass InvalidTransactionId as the cutoff XID here,
1233 * because setting the all-frozen bit doesn't cause recovery
1236 visibilitymap_set(onerel, blkno, buf, InvalidXLogRecPtr,
1237 vmbuffer, InvalidTransactionId,
1238 VISIBILITYMAP_ALL_FROZEN);
1241 UnlockReleaseBuffer(buf);
1243 /* Remember the location of the last page with nonremovable tuples */
1245 vacrelstats->nonempty_pages = blkno + 1;
1248 * If we remembered any tuples for deletion, then the page will be
1249 * visited again by lazy_vacuum_heap, which will compute and record
1250 * its post-compaction free space. If not, then we're done with this
1251 * page, so remember its free space as-is. (This path will always be
1252 * taken if there are no indexes.)
1254 if (vacrelstats->num_dead_tuples == prev_dead_count)
1255 RecordPageWithFreeSpace(onerel, blkno, freespace);
1258 /* report that everything is scanned and vacuumed */
1259 pgstat_progress_update_param(PROGRESS_VACUUM_HEAP_BLKS_SCANNED, blkno);
1263 /* save stats for use later */
1264 vacrelstats->scanned_tuples = num_tuples;
1265 vacrelstats->tuples_deleted = tups_vacuumed;
1266 vacrelstats->new_dead_tuples = nkeep;
1268 /* now we can compute the new value for pg_class.reltuples */
1269 vacrelstats->new_rel_tuples = vac_estimate_reltuples(onerel, false,
1271 vacrelstats->tupcount_pages,
1275 * Release any remaining pin on visibility map page.
1277 if (BufferIsValid(vmbuffer))
1279 ReleaseBuffer(vmbuffer);
1280 vmbuffer = InvalidBuffer;
1283 /* If any tuples need to be deleted, perform final vacuum cycle */
1284 /* XXX put a threshold on min number of tuples here? */
1285 if (vacrelstats->num_dead_tuples > 0)
1287 const int hvp_index[] = {
1288 PROGRESS_VACUUM_PHASE,
1289 PROGRESS_VACUUM_NUM_INDEX_VACUUMS
1293 /* Log cleanup info before we touch indexes */
1294 vacuum_log_cleanup_info(onerel, vacrelstats);
1296 /* Report that we are now vacuuming indexes */
1297 pgstat_progress_update_param(PROGRESS_VACUUM_PHASE,
1298 PROGRESS_VACUUM_PHASE_VACUUM_INDEX);
1300 /* Remove index entries */
1301 for (i = 0; i < nindexes; i++)
1302 lazy_vacuum_index(Irel[i],
1306 /* Report that we are now vacuuming the heap */
1307 hvp_val[0] = PROGRESS_VACUUM_PHASE_VACUUM_HEAP;
1308 hvp_val[1] = vacrelstats->num_index_scans + 1;
1309 pgstat_progress_update_multi_param(2, hvp_index, hvp_val);
1311 /* Remove tuples from heap */
1312 pgstat_progress_update_param(PROGRESS_VACUUM_PHASE,
1313 PROGRESS_VACUUM_PHASE_VACUUM_HEAP);
1314 lazy_vacuum_heap(onerel, vacrelstats);
1315 vacrelstats->num_index_scans++;
1318 /* report all blocks vacuumed; and that we're cleaning up */
1319 pgstat_progress_update_param(PROGRESS_VACUUM_HEAP_BLKS_VACUUMED, blkno);
1320 pgstat_progress_update_param(PROGRESS_VACUUM_PHASE,
1321 PROGRESS_VACUUM_PHASE_INDEX_CLEANUP);
1323 /* Do post-vacuum cleanup and statistics update for each index */
1324 for (i = 0; i < nindexes; i++)
1325 lazy_cleanup_index(Irel[i], indstats[i], vacrelstats);
1327 /* If no indexes, make log report that lazy_vacuum_heap would've made */
1330 (errmsg("\"%s\": removed %.0f row versions in %u pages",
1331 RelationGetRelationName(onerel),
1332 tups_vacuumed, vacuumed_pages)));
1335 * This is pretty messy, but we split it up so that we can skip emitting
1336 * individual parts of the message when not applicable.
1338 initStringInfo(&buf);
1339 appendStringInfo(&buf,
1340 _("%.0f dead row versions cannot be removed yet, oldest xmin: %u\n"),
1342 appendStringInfo(&buf, _("There were %.0f unused item pointers.\n"),
1344 appendStringInfo(&buf, ngettext("Skipped %u page due to buffer pins, ",
1345 "Skipped %u pages due to buffer pins, ",
1346 vacrelstats->pinskipped_pages),
1347 vacrelstats->pinskipped_pages);
1348 appendStringInfo(&buf, ngettext("%u frozen page.\n",
1349 "%u frozen pages.\n",
1350 vacrelstats->frozenskipped_pages),
1351 vacrelstats->frozenskipped_pages);
1352 appendStringInfo(&buf, ngettext("%u page is entirely empty.\n",
1353 "%u pages are entirely empty.\n",
1356 appendStringInfo(&buf, "%s.", pg_rusage_show(&ru0));
1359 (errmsg("\"%s\": found %.0f removable, %.0f nonremovable row versions in %u out of %u pages",
1360 RelationGetRelationName(onerel),
1361 tups_vacuumed, num_tuples,
1362 vacrelstats->scanned_pages, nblocks),
1363 errdetail_internal("%s", buf.data)));
1369 * lazy_vacuum_heap() -- second pass over the heap
1371 * This routine marks dead tuples as unused and compacts out free
1372 * space on their pages. Pages not having dead tuples recorded from
1373 * lazy_scan_heap are not visited at all.
1375 * Note: the reason for doing this as a second pass is we cannot remove
1376 * the tuples until we've removed their index entries, and we want to
1377 * process index entry removal in batches as large as possible.
1380 lazy_vacuum_heap(Relation onerel, LVRelStats *vacrelstats)
1385 Buffer vmbuffer = InvalidBuffer;
1387 pg_rusage_init(&ru0);
1391 while (tupindex < vacrelstats->num_dead_tuples)
1398 vacuum_delay_point();
1400 tblk = ItemPointerGetBlockNumber(&vacrelstats->dead_tuples[tupindex]);
1401 buf = ReadBufferExtended(onerel, MAIN_FORKNUM, tblk, RBM_NORMAL,
1403 if (!ConditionalLockBufferForCleanup(buf))
1409 tupindex = lazy_vacuum_page(onerel, tblk, buf, tupindex, vacrelstats,
1412 /* Now that we've compacted the page, record its available space */
1413 page = BufferGetPage(buf);
1414 freespace = PageGetHeapFreeSpace(page);
1416 UnlockReleaseBuffer(buf);
1417 RecordPageWithFreeSpace(onerel, tblk, freespace);
1421 if (BufferIsValid(vmbuffer))
1423 ReleaseBuffer(vmbuffer);
1424 vmbuffer = InvalidBuffer;
1428 (errmsg("\"%s\": removed %d row versions in %d pages",
1429 RelationGetRelationName(onerel),
1431 errdetail_internal("%s", pg_rusage_show(&ru0))));
1435 * lazy_vacuum_page() -- free dead tuples on a page
1436 * and repair its fragmentation.
1438 * Caller must hold pin and buffer cleanup lock on the buffer.
1440 * tupindex is the index in vacrelstats->dead_tuples of the first dead
1441 * tuple for this page. We assume the rest follow sequentially.
1442 * The return value is the first tupindex after the tuples of this page.
1445 lazy_vacuum_page(Relation onerel, BlockNumber blkno, Buffer buffer,
1446 int tupindex, LVRelStats *vacrelstats, Buffer *vmbuffer)
1448 Page page = BufferGetPage(buffer);
1449 OffsetNumber unused[MaxOffsetNumber];
1451 TransactionId visibility_cutoff_xid;
1454 pgstat_progress_update_param(PROGRESS_VACUUM_HEAP_BLKS_VACUUMED, blkno);
1456 START_CRIT_SECTION();
1458 for (; tupindex < vacrelstats->num_dead_tuples; tupindex++)
1464 tblk = ItemPointerGetBlockNumber(&vacrelstats->dead_tuples[tupindex]);
1466 break; /* past end of tuples for this block */
1467 toff = ItemPointerGetOffsetNumber(&vacrelstats->dead_tuples[tupindex]);
1468 itemid = PageGetItemId(page, toff);
1469 ItemIdSetUnused(itemid);
1470 unused[uncnt++] = toff;
1473 PageRepairFragmentation(page);
1476 * Mark buffer dirty before we write WAL.
1478 MarkBufferDirty(buffer);
1481 if (RelationNeedsWAL(onerel))
1485 recptr = log_heap_clean(onerel, buffer,
1488 vacrelstats->latestRemovedXid);
1489 PageSetLSN(page, recptr);
1493 * End critical section, so we safely can do visibility tests (which
1494 * possibly need to perform IO and allocate memory!). If we crash now the
1495 * page (including the corresponding vm bit) might not be marked all
1496 * visible, but that's fine. A later vacuum will fix that.
1501 * Now that we have removed the dead tuples from the page, once again
1502 * check if the page has become all-visible. The page is already marked
1503 * dirty, exclusively locked, and, if needed, a full page image has been
1504 * emitted in the log_heap_clean() above.
1506 if (heap_page_is_all_visible(onerel, buffer, &visibility_cutoff_xid,
1508 PageSetAllVisible(page);
1511 * All the changes to the heap page have been done. If the all-visible
1512 * flag is now set, also set the VM all-visible bit (and, if possible, the
1513 * all-frozen bit) unless this has already been done previously.
1515 if (PageIsAllVisible(page))
1517 uint8 vm_status = visibilitymap_get_status(onerel, blkno, vmbuffer);
1520 /* Set the VM all-frozen bit to flag, if needed */
1521 if ((vm_status & VISIBILITYMAP_ALL_VISIBLE) == 0)
1522 flags |= VISIBILITYMAP_ALL_VISIBLE;
1523 if ((vm_status & VISIBILITYMAP_ALL_FROZEN) == 0 && all_frozen)
1524 flags |= VISIBILITYMAP_ALL_FROZEN;
1526 Assert(BufferIsValid(*vmbuffer));
1528 visibilitymap_set(onerel, blkno, buffer, InvalidXLogRecPtr,
1529 *vmbuffer, visibility_cutoff_xid, flags);
1536 * lazy_check_needs_freeze() -- scan page to see if any tuples
1537 * need to be cleaned to avoid wraparound
1539 * Returns true if the page needs to be vacuumed using cleanup lock.
1540 * Also returns a flag indicating whether page contains any tuples at all.
1543 lazy_check_needs_freeze(Buffer buf, bool *hastup)
1545 Page page = BufferGetPage(buf);
1546 OffsetNumber offnum,
1548 HeapTupleHeader tupleheader;
1552 /* If we hit an uninitialized page, we want to force vacuuming it. */
1553 if (PageIsNew(page))
1556 /* Quick out for ordinary empty page. */
1557 if (PageIsEmpty(page))
1560 maxoff = PageGetMaxOffsetNumber(page);
1561 for (offnum = FirstOffsetNumber;
1563 offnum = OffsetNumberNext(offnum))
1567 itemid = PageGetItemId(page, offnum);
1569 /* this should match hastup test in count_nondeletable_pages() */
1570 if (ItemIdIsUsed(itemid))
1573 /* dead and redirect items never need freezing */
1574 if (!ItemIdIsNormal(itemid))
1577 tupleheader = (HeapTupleHeader) PageGetItem(page, itemid);
1579 if (heap_tuple_needs_freeze(tupleheader, FreezeLimit,
1580 MultiXactCutoff, buf))
1582 } /* scan along page */
1589 * lazy_vacuum_index() -- vacuum one index relation.
1591 * Delete all the index entries pointing to tuples listed in
1592 * vacrelstats->dead_tuples, and update running statistics.
1595 lazy_vacuum_index(Relation indrel,
1596 IndexBulkDeleteResult **stats,
1597 LVRelStats *vacrelstats)
1599 IndexVacuumInfo ivinfo;
1602 pg_rusage_init(&ru0);
1604 ivinfo.index = indrel;
1605 ivinfo.analyze_only = false;
1606 ivinfo.estimated_count = true;
1607 ivinfo.message_level = elevel;
1608 ivinfo.num_heap_tuples = vacrelstats->old_rel_tuples;
1609 ivinfo.strategy = vac_strategy;
1611 /* Do bulk deletion */
1612 *stats = index_bulk_delete(&ivinfo, *stats,
1613 lazy_tid_reaped, (void *) vacrelstats);
1616 (errmsg("scanned index \"%s\" to remove %d row versions",
1617 RelationGetRelationName(indrel),
1618 vacrelstats->num_dead_tuples),
1619 errdetail_internal("%s", pg_rusage_show(&ru0))));
1623 * lazy_cleanup_index() -- do post-vacuum cleanup for one index relation.
1626 lazy_cleanup_index(Relation indrel,
1627 IndexBulkDeleteResult *stats,
1628 LVRelStats *vacrelstats)
1630 IndexVacuumInfo ivinfo;
1633 pg_rusage_init(&ru0);
1635 ivinfo.index = indrel;
1636 ivinfo.analyze_only = false;
1637 ivinfo.estimated_count = (vacrelstats->tupcount_pages < vacrelstats->rel_pages);
1638 ivinfo.message_level = elevel;
1639 ivinfo.num_heap_tuples = vacrelstats->new_rel_tuples;
1640 ivinfo.strategy = vac_strategy;
1642 stats = index_vacuum_cleanup(&ivinfo, stats);
1648 * Now update statistics in pg_class, but only if the index says the count
1651 if (!stats->estimated_count)
1652 vac_update_relstats(indrel,
1654 stats->num_index_tuples,
1657 InvalidTransactionId,
1662 (errmsg("index \"%s\" now contains %.0f row versions in %u pages",
1663 RelationGetRelationName(indrel),
1664 stats->num_index_tuples,
1666 errdetail("%.0f index row versions were removed.\n"
1667 "%u index pages have been deleted, %u are currently reusable.\n"
1669 stats->tuples_removed,
1670 stats->pages_deleted, stats->pages_free,
1671 pg_rusage_show(&ru0))));
1677 * should_attempt_truncation - should we attempt to truncate the heap?
1679 * Don't even think about it unless we have a shot at releasing a goodly
1680 * number of pages. Otherwise, the time taken isn't worth it.
1682 * Also don't attempt it if we are doing early pruning/vacuuming, because a
1683 * scan which cannot find a truncated heap page cannot determine that the
1684 * snapshot is too old to read that page. We might be able to get away with
1685 * truncating all except one of the pages, setting its LSN to (at least) the
1686 * maximum of the truncated range if we also treated an index leaf tuple
1687 * pointing to a missing heap page as something to trigger the "snapshot too
1688 * old" error, but that seems fragile and seems like it deserves its own patch
1689 * if we consider it.
1691 * This is split out so that we can test whether truncation is going to be
1692 * called for before we actually do it. If you change the logic here, be
1693 * careful to depend only on fields that lazy_scan_heap updates on-the-fly.
1696 should_attempt_truncation(LVRelStats *vacrelstats)
1698 BlockNumber possibly_freeable;
1700 possibly_freeable = vacrelstats->rel_pages - vacrelstats->nonempty_pages;
1701 if (possibly_freeable > 0 &&
1702 (possibly_freeable >= REL_TRUNCATE_MINIMUM ||
1703 possibly_freeable >= vacrelstats->rel_pages / REL_TRUNCATE_FRACTION) &&
1704 old_snapshot_threshold < 0)
1711 * lazy_truncate_heap - try to truncate off any empty pages at the end
1714 lazy_truncate_heap(Relation onerel, LVRelStats *vacrelstats)
1716 BlockNumber old_rel_pages = vacrelstats->rel_pages;
1717 BlockNumber new_rel_pages;
1721 pg_rusage_init(&ru0);
1723 /* Report that we are now truncating */
1724 pgstat_progress_update_param(PROGRESS_VACUUM_PHASE,
1725 PROGRESS_VACUUM_PHASE_TRUNCATE);
1728 * Loop until no more truncating can be done.
1733 * We need full exclusive lock on the relation in order to do
1734 * truncation. If we can't get it, give up rather than waiting --- we
1735 * don't want to block other backends, and we don't want to deadlock
1736 * (which is quite possible considering we already hold a lower-grade
1739 vacrelstats->lock_waiter_detected = false;
1743 if (ConditionalLockRelation(onerel, AccessExclusiveLock))
1747 * Check for interrupts while trying to (re-)acquire the exclusive
1750 CHECK_FOR_INTERRUPTS();
1752 if (++lock_retry > (VACUUM_TRUNCATE_LOCK_TIMEOUT /
1753 VACUUM_TRUNCATE_LOCK_WAIT_INTERVAL))
1756 * We failed to establish the lock in the specified number of
1757 * retries. This means we give up truncating.
1759 vacrelstats->lock_waiter_detected = true;
1761 (errmsg("\"%s\": stopping truncate due to conflicting lock request",
1762 RelationGetRelationName(onerel))));
1766 pg_usleep(VACUUM_TRUNCATE_LOCK_WAIT_INTERVAL * 1000L);
1770 * Now that we have exclusive lock, look to see if the rel has grown
1771 * whilst we were vacuuming with non-exclusive lock. If so, give up;
1772 * the newly added pages presumably contain non-deletable tuples.
1774 new_rel_pages = RelationGetNumberOfBlocks(onerel);
1775 if (new_rel_pages != old_rel_pages)
1778 * Note: we intentionally don't update vacrelstats->rel_pages with
1779 * the new rel size here. If we did, it would amount to assuming
1780 * that the new pages are empty, which is unlikely. Leaving the
1781 * numbers alone amounts to assuming that the new pages have the
1782 * same tuple density as existing ones, which is less unlikely.
1784 UnlockRelation(onerel, AccessExclusiveLock);
1789 * Scan backwards from the end to verify that the end pages actually
1790 * contain no tuples. This is *necessary*, not optional, because
1791 * other backends could have added tuples to these pages whilst we
1794 new_rel_pages = count_nondeletable_pages(onerel, vacrelstats);
1796 if (new_rel_pages >= old_rel_pages)
1798 /* can't do anything after all */
1799 UnlockRelation(onerel, AccessExclusiveLock);
1806 RelationTruncate(onerel, new_rel_pages);
1809 * We can release the exclusive lock as soon as we have truncated.
1810 * Other backends can't safely access the relation until they have
1811 * processed the smgr invalidation that smgrtruncate sent out ... but
1812 * that should happen as part of standard invalidation processing once
1813 * they acquire lock on the relation.
1815 UnlockRelation(onerel, AccessExclusiveLock);
1818 * Update statistics. Here, it *is* correct to adjust rel_pages
1819 * without also touching reltuples, since the tuple count wasn't
1820 * changed by the truncation.
1822 vacrelstats->pages_removed += old_rel_pages - new_rel_pages;
1823 vacrelstats->rel_pages = new_rel_pages;
1826 (errmsg("\"%s\": truncated %u to %u pages",
1827 RelationGetRelationName(onerel),
1828 old_rel_pages, new_rel_pages),
1829 errdetail_internal("%s",
1830 pg_rusage_show(&ru0))));
1831 old_rel_pages = new_rel_pages;
1832 } while (new_rel_pages > vacrelstats->nonempty_pages &&
1833 vacrelstats->lock_waiter_detected);
1837 * Rescan end pages to verify that they are (still) empty of tuples.
1839 * Returns number of nondeletable pages (last nonempty page + 1).
1842 count_nondeletable_pages(Relation onerel, LVRelStats *vacrelstats)
1845 BlockNumber prefetchedUntil;
1846 instr_time starttime;
1848 /* Initialize the starttime if we check for conflicting lock requests */
1849 INSTR_TIME_SET_CURRENT(starttime);
1852 * Start checking blocks at what we believe relation end to be and move
1853 * backwards. (Strange coding of loop control is needed because blkno is
1854 * unsigned.) To make the scan faster, we prefetch a few blocks at a time
1855 * in forward direction, so that OS-level readahead can kick in.
1857 blkno = vacrelstats->rel_pages;
1858 StaticAssertStmt((PREFETCH_SIZE & (PREFETCH_SIZE - 1)) == 0,
1859 "prefetch size must be power of 2");
1860 prefetchedUntil = InvalidBlockNumber;
1861 while (blkno > vacrelstats->nonempty_pages)
1865 OffsetNumber offnum,
1870 * Check if another process requests a lock on our relation. We are
1871 * holding an AccessExclusiveLock here, so they will be waiting. We
1872 * only do this once per VACUUM_TRUNCATE_LOCK_CHECK_INTERVAL, and we
1873 * only check if that interval has elapsed once every 32 blocks to
1874 * keep the number of system calls and actual shared lock table
1875 * lookups to a minimum.
1877 if ((blkno % 32) == 0)
1879 instr_time currenttime;
1882 INSTR_TIME_SET_CURRENT(currenttime);
1883 elapsed = currenttime;
1884 INSTR_TIME_SUBTRACT(elapsed, starttime);
1885 if ((INSTR_TIME_GET_MICROSEC(elapsed) / 1000)
1886 >= VACUUM_TRUNCATE_LOCK_CHECK_INTERVAL)
1888 if (LockHasWaitersRelation(onerel, AccessExclusiveLock))
1891 (errmsg("\"%s\": suspending truncate due to conflicting lock request",
1892 RelationGetRelationName(onerel))));
1894 vacrelstats->lock_waiter_detected = true;
1897 starttime = currenttime;
1902 * We don't insert a vacuum delay point here, because we have an
1903 * exclusive lock on the table which we want to hold for as short a
1904 * time as possible. We still need to check for interrupts however.
1906 CHECK_FOR_INTERRUPTS();
1910 /* If we haven't prefetched this lot yet, do so now. */
1911 if (prefetchedUntil > blkno)
1913 BlockNumber prefetchStart;
1916 prefetchStart = blkno & ~(PREFETCH_SIZE - 1);
1917 for (pblkno = prefetchStart; pblkno <= blkno; pblkno++)
1919 PrefetchBuffer(onerel, MAIN_FORKNUM, pblkno);
1920 CHECK_FOR_INTERRUPTS();
1922 prefetchedUntil = prefetchStart;
1925 buf = ReadBufferExtended(onerel, MAIN_FORKNUM, blkno,
1926 RBM_NORMAL, vac_strategy);
1928 /* In this phase we only need shared access to the buffer */
1929 LockBuffer(buf, BUFFER_LOCK_SHARE);
1931 page = BufferGetPage(buf);
1933 if (PageIsNew(page) || PageIsEmpty(page))
1935 /* PageIsNew probably shouldn't happen... */
1936 UnlockReleaseBuffer(buf);
1941 maxoff = PageGetMaxOffsetNumber(page);
1942 for (offnum = FirstOffsetNumber;
1944 offnum = OffsetNumberNext(offnum))
1948 itemid = PageGetItemId(page, offnum);
1951 * Note: any non-unused item should be taken as a reason to keep
1952 * this page. We formerly thought that DEAD tuples could be
1953 * thrown away, but that's not so, because we'd not have cleaned
1954 * out their index entries.
1956 if (ItemIdIsUsed(itemid))
1959 break; /* can stop scanning */
1961 } /* scan along page */
1963 UnlockReleaseBuffer(buf);
1965 /* Done scanning if we found a tuple here */
1971 * If we fall out of the loop, all the previously-thought-to-be-empty
1972 * pages still are; we need not bother to look at the last known-nonempty
1975 return vacrelstats->nonempty_pages;
1979 * lazy_space_alloc - space allocation decisions for lazy vacuum
1981 * See the comments at the head of this file for rationale.
1984 lazy_space_alloc(LVRelStats *vacrelstats, BlockNumber relblocks)
1987 int vac_work_mem = IsAutoVacuumWorkerProcess() &&
1988 autovacuum_work_mem != -1 ?
1989 autovacuum_work_mem : maintenance_work_mem;
1991 if (vacrelstats->hasindex)
1993 maxtuples = (vac_work_mem * 1024L) / sizeof(ItemPointerData);
1994 maxtuples = Min(maxtuples, INT_MAX);
1995 maxtuples = Min(maxtuples, MaxAllocSize / sizeof(ItemPointerData));
1997 /* curious coding here to ensure the multiplication can't overflow */
1998 if ((BlockNumber) (maxtuples / LAZY_ALLOC_TUPLES) > relblocks)
1999 maxtuples = relblocks * LAZY_ALLOC_TUPLES;
2001 /* stay sane if small maintenance_work_mem */
2002 maxtuples = Max(maxtuples, MaxHeapTuplesPerPage);
2006 maxtuples = MaxHeapTuplesPerPage;
2009 vacrelstats->num_dead_tuples = 0;
2010 vacrelstats->max_dead_tuples = (int) maxtuples;
2011 vacrelstats->dead_tuples = (ItemPointer)
2012 palloc(maxtuples * sizeof(ItemPointerData));
2016 * lazy_record_dead_tuple - remember one deletable tuple
2019 lazy_record_dead_tuple(LVRelStats *vacrelstats,
2020 ItemPointer itemptr)
2023 * The array shouldn't overflow under normal behavior, but perhaps it
2024 * could if we are given a really small maintenance_work_mem. In that
2025 * case, just forget the last few tuples (we'll get 'em next time).
2027 if (vacrelstats->num_dead_tuples < vacrelstats->max_dead_tuples)
2029 vacrelstats->dead_tuples[vacrelstats->num_dead_tuples] = *itemptr;
2030 vacrelstats->num_dead_tuples++;
2031 pgstat_progress_update_param(PROGRESS_VACUUM_NUM_DEAD_TUPLES,
2032 vacrelstats->num_dead_tuples);
2037 * lazy_tid_reaped() -- is a particular tid deletable?
2039 * This has the right signature to be an IndexBulkDeleteCallback.
2041 * Assumes dead_tuples array is in sorted order.
2044 lazy_tid_reaped(ItemPointer itemptr, void *state)
2046 LVRelStats *vacrelstats = (LVRelStats *) state;
2049 res = (ItemPointer) bsearch((void *) itemptr,
2050 (void *) vacrelstats->dead_tuples,
2051 vacrelstats->num_dead_tuples,
2052 sizeof(ItemPointerData),
2055 return (res != NULL);
2059 * Comparator routines for use with qsort() and bsearch().
2062 vac_cmp_itemptr(const void *left, const void *right)
2069 lblk = ItemPointerGetBlockNumber((ItemPointer) left);
2070 rblk = ItemPointerGetBlockNumber((ItemPointer) right);
2077 loff = ItemPointerGetOffsetNumber((ItemPointer) left);
2078 roff = ItemPointerGetOffsetNumber((ItemPointer) right);
2089 * Check if every tuple in the given page is visible to all current and future
2090 * transactions. Also return the visibility_cutoff_xid which is the highest
2091 * xmin amongst the visible tuples. Set *all_frozen to true if every tuple
2092 * on this page is frozen.
2095 heap_page_is_all_visible(Relation rel, Buffer buf,
2096 TransactionId *visibility_cutoff_xid,
2099 Page page = BufferGetPage(buf);
2100 BlockNumber blockno = BufferGetBlockNumber(buf);
2101 OffsetNumber offnum,
2103 bool all_visible = true;
2105 *visibility_cutoff_xid = InvalidTransactionId;
2109 * This is a stripped down version of the line pointer scan in
2110 * lazy_scan_heap(). So if you change anything here, also check that code.
2112 maxoff = PageGetMaxOffsetNumber(page);
2113 for (offnum = FirstOffsetNumber;
2114 offnum <= maxoff && all_visible;
2115 offnum = OffsetNumberNext(offnum))
2118 HeapTupleData tuple;
2120 itemid = PageGetItemId(page, offnum);
2122 /* Unused or redirect line pointers are of no interest */
2123 if (!ItemIdIsUsed(itemid) || ItemIdIsRedirected(itemid))
2126 ItemPointerSet(&(tuple.t_self), blockno, offnum);
2129 * Dead line pointers can have index pointers pointing to them. So
2130 * they can't be treated as visible
2132 if (ItemIdIsDead(itemid))
2134 all_visible = false;
2135 *all_frozen = false;
2139 Assert(ItemIdIsNormal(itemid));
2141 tuple.t_data = (HeapTupleHeader) PageGetItem(page, itemid);
2142 tuple.t_len = ItemIdGetLength(itemid);
2143 tuple.t_tableOid = RelationGetRelid(rel);
2145 switch (HeapTupleSatisfiesVacuum(&tuple, OldestXmin, buf))
2147 case HEAPTUPLE_LIVE:
2151 /* Check comments in lazy_scan_heap. */
2152 if (!HeapTupleHeaderXminCommitted(tuple.t_data))
2154 all_visible = false;
2155 *all_frozen = false;
2160 * The inserter definitely committed. But is it old enough
2161 * that everyone sees it as committed?
2163 xmin = HeapTupleHeaderGetXmin(tuple.t_data);
2164 if (!TransactionIdPrecedes(xmin, OldestXmin))
2166 all_visible = false;
2167 *all_frozen = false;
2171 /* Track newest xmin on page. */
2172 if (TransactionIdFollows(xmin, *visibility_cutoff_xid))
2173 *visibility_cutoff_xid = xmin;
2175 /* Check whether this tuple is already frozen or not */
2176 if (all_visible && *all_frozen &&
2177 heap_tuple_needs_eventual_freeze(tuple.t_data))
2178 *all_frozen = false;
2182 case HEAPTUPLE_DEAD:
2183 case HEAPTUPLE_RECENTLY_DEAD:
2184 case HEAPTUPLE_INSERT_IN_PROGRESS:
2185 case HEAPTUPLE_DELETE_IN_PROGRESS:
2187 all_visible = false;
2188 *all_frozen = false;
2192 elog(ERROR, "unexpected HeapTupleSatisfiesVacuum result");
2195 } /* scan along page */