1 /*-------------------------------------------------------------------------
4 * Concurrent ("lazy") vacuuming.
7 * The major space usage for LAZY VACUUM is storage for the array of dead
8 * tuple TIDs, with the next biggest need being storage for per-disk-page
9 * free space info. We want to ensure we can vacuum even the very largest
10 * relations with finite memory space usage. To do that, we set upper bounds
11 * on the number of tuples and pages we will keep track of at once.
13 * We are willing to use at most maintenance_work_mem (or perhaps
14 * autovacuum_work_mem) memory space to keep track of dead tuples. We
15 * initially allocate an array of TIDs of that size, with an upper limit that
16 * depends on table size (this limit ensures we don't allocate a huge area
17 * uselessly for vacuuming small tables). If the array threatens to overflow,
18 * we suspend the heap scan phase and perform a pass of index cleanup and page
19 * compaction, then resume the heap scan with an empty TID array.
21 * If we're processing a table with no indexes, we can just vacuum each page
22 * as we go; there's no need to save up multiple tuples to minimize the number
23 * of index scans performed. So we don't use maintenance_work_mem memory for
24 * the TID array, just enough to hold as many heap tuples as fit on one page.
27 * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
28 * Portions Copyright (c) 1994, Regents of the University of California
32 * src/backend/commands/vacuumlazy.c
34 *-------------------------------------------------------------------------
40 #include "access/genam.h"
41 #include "access/heapam.h"
42 #include "access/heapam_xlog.h"
43 #include "access/htup_details.h"
44 #include "access/multixact.h"
45 #include "access/transam.h"
46 #include "access/visibilitymap.h"
47 #include "access/xlog.h"
48 #include "catalog/catalog.h"
49 #include "catalog/storage.h"
50 #include "commands/dbcommands.h"
51 #include "commands/vacuum.h"
52 #include "miscadmin.h"
54 #include "portability/instr_time.h"
55 #include "postmaster/autovacuum.h"
56 #include "storage/bufmgr.h"
57 #include "storage/freespace.h"
58 #include "storage/lmgr.h"
59 #include "utils/lsyscache.h"
60 #include "utils/memutils.h"
61 #include "utils/pg_rusage.h"
62 #include "utils/timestamp.h"
63 #include "utils/tqual.h"
67 * Space/time tradeoff parameters: do these need to be user-tunable?
69 * To consider truncating the relation, we want there to be at least
70 * REL_TRUNCATE_MINIMUM or (relsize / REL_TRUNCATE_FRACTION) (whichever
71 * is less) potentially-freeable pages.
73 #define REL_TRUNCATE_MINIMUM 1000
74 #define REL_TRUNCATE_FRACTION 16
77 * Timing parameters for truncate locking heuristics.
79 * These were not exposed as user tunable GUC values because it didn't seem
80 * that the potential for improvement was great enough to merit the cost of
83 #define VACUUM_TRUNCATE_LOCK_CHECK_INTERVAL 20 /* ms */
84 #define VACUUM_TRUNCATE_LOCK_WAIT_INTERVAL 50 /* ms */
85 #define VACUUM_TRUNCATE_LOCK_TIMEOUT 5000 /* ms */
88 * Guesstimation of number of dead tuples per page. This is used to
89 * provide an upper limit to memory allocated when vacuuming small
92 #define LAZY_ALLOC_TUPLES MaxHeapTuplesPerPage
95 * Before we consider skipping a page that's marked as clean in
96 * visibility map, we must've seen at least this many clean pages.
98 #define SKIP_PAGES_THRESHOLD ((BlockNumber) 32)
100 typedef struct LVRelStats
102 /* hasindex = true means two-pass strategy; false means one-pass */
104 /* Overall statistics about rel */
105 BlockNumber old_rel_pages; /* previous value of pg_class.relpages */
106 BlockNumber rel_pages; /* total number of pages */
107 BlockNumber scanned_pages; /* number of pages we examined */
108 BlockNumber pinskipped_pages; /* # of pages we skipped due to a pin */
109 double scanned_tuples; /* counts only tuples on scanned pages */
110 double old_rel_tuples; /* previous value of pg_class.reltuples */
111 double new_rel_tuples; /* new estimated total # of tuples */
112 double new_dead_tuples; /* new estimated total # of dead tuples */
113 BlockNumber pages_removed;
114 double tuples_deleted;
115 BlockNumber nonempty_pages; /* actually, last nonempty page + 1 */
116 /* List of TIDs of tuples we intend to delete */
117 /* NB: this list is ordered by TID address */
118 int num_dead_tuples; /* current # of entries */
119 int max_dead_tuples; /* # slots allocated in array */
120 ItemPointer dead_tuples; /* array of ItemPointerData */
122 TransactionId latestRemovedXid;
123 bool lock_waiter_detected;
127 /* A few variables that don't seem worth passing around as parameters */
128 static int elevel = -1;
130 static TransactionId OldestXmin;
131 static TransactionId FreezeLimit;
132 static MultiXactId MultiXactCutoff;
134 static BufferAccessStrategy vac_strategy;
137 /* non-export function prototypes */
138 static void lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats,
139 Relation *Irel, int nindexes, bool scan_all);
140 static void lazy_vacuum_heap(Relation onerel, LVRelStats *vacrelstats);
141 static bool lazy_check_needs_freeze(Buffer buf);
142 static void lazy_vacuum_index(Relation indrel,
143 IndexBulkDeleteResult **stats,
144 LVRelStats *vacrelstats);
145 static void lazy_cleanup_index(Relation indrel,
146 IndexBulkDeleteResult *stats,
147 LVRelStats *vacrelstats);
148 static int lazy_vacuum_page(Relation onerel, BlockNumber blkno, Buffer buffer,
149 int tupindex, LVRelStats *vacrelstats, Buffer *vmbuffer);
150 static void lazy_truncate_heap(Relation onerel, LVRelStats *vacrelstats);
151 static BlockNumber count_nondeletable_pages(Relation onerel,
152 LVRelStats *vacrelstats);
153 static void lazy_space_alloc(LVRelStats *vacrelstats, BlockNumber relblocks);
154 static void lazy_record_dead_tuple(LVRelStats *vacrelstats,
155 ItemPointer itemptr);
156 static bool lazy_tid_reaped(ItemPointer itemptr, void *state);
157 static int vac_cmp_itemptr(const void *left, const void *right);
158 static bool heap_page_is_all_visible(Relation rel, Buffer buf,
159 TransactionId *visibility_cutoff_xid);
163 * lazy_vacuum_rel() -- perform LAZY VACUUM for one heap relation
165 * This routine vacuums a single heap, cleans out its indexes, and
166 * updates its relpages and reltuples statistics.
168 * At entry, we have already established a transaction and opened
169 * and locked the relation.
172 lazy_vacuum_rel(Relation onerel, int options, VacuumParams *params,
173 BufferAccessStrategy bstrategy)
175 LVRelStats *vacrelstats;
178 BlockNumber possibly_freeable;
180 TimestampTz starttime = 0;
185 bool scan_all; /* should we scan all pages? */
186 bool scanned_all; /* did we actually scan all pages? */
187 TransactionId xidFullScanLimit;
188 MultiXactId mxactFullScanLimit;
189 BlockNumber new_rel_pages;
190 double new_rel_tuples;
191 BlockNumber new_rel_allvisible;
192 double new_live_tuples;
193 TransactionId new_frozen_xid;
194 MultiXactId new_min_multi;
196 Assert(params != NULL);
198 /* measure elapsed time iff autovacuum logging requires it */
199 if (IsAutoVacuumWorkerProcess() && params->log_min_duration >= 0)
201 pg_rusage_init(&ru0);
202 starttime = GetCurrentTimestamp();
205 if (options & VACOPT_VERBOSE)
210 vac_strategy = bstrategy;
212 vacuum_set_xid_limits(onerel,
213 params->freeze_min_age,
214 params->freeze_table_age,
215 params->multixact_freeze_min_age,
216 params->multixact_freeze_table_age,
217 &OldestXmin, &FreezeLimit, &xidFullScanLimit,
218 &MultiXactCutoff, &mxactFullScanLimit);
221 * We request a full scan if either the table's frozen Xid is now older
222 * than or equal to the requested Xid full-table scan limit; or if the
223 * table's minimum MultiXactId is older than or equal to the requested
224 * mxid full-table scan limit.
226 scan_all = TransactionIdPrecedesOrEquals(onerel->rd_rel->relfrozenxid,
228 scan_all |= MultiXactIdPrecedesOrEquals(onerel->rd_rel->relminmxid,
231 vacrelstats = (LVRelStats *) palloc0(sizeof(LVRelStats));
233 vacrelstats->old_rel_pages = onerel->rd_rel->relpages;
234 vacrelstats->old_rel_tuples = onerel->rd_rel->reltuples;
235 vacrelstats->num_index_scans = 0;
236 vacrelstats->pages_removed = 0;
237 vacrelstats->lock_waiter_detected = false;
239 /* Open all indexes of the relation */
240 vac_open_indexes(onerel, RowExclusiveLock, &nindexes, &Irel);
241 vacrelstats->hasindex = (nindexes > 0);
243 /* Do the vacuuming */
244 lazy_scan_heap(onerel, vacrelstats, Irel, nindexes, scan_all);
246 /* Done with indexes */
247 vac_close_indexes(nindexes, Irel, NoLock);
250 * Compute whether we actually scanned the whole relation. If we did, we
251 * can adjust relfrozenxid and relminmxid.
253 * NB: We need to check this before truncating the relation, because that
254 * will change ->rel_pages.
256 if (vacrelstats->scanned_pages < vacrelstats->rel_pages)
265 * Optionally truncate the relation.
267 * Don't even think about it unless we have a shot at releasing a goodly
268 * number of pages. Otherwise, the time taken isn't worth it.
270 possibly_freeable = vacrelstats->rel_pages - vacrelstats->nonempty_pages;
271 if (possibly_freeable > 0 &&
272 (possibly_freeable >= REL_TRUNCATE_MINIMUM ||
273 possibly_freeable >= vacrelstats->rel_pages / REL_TRUNCATE_FRACTION))
274 lazy_truncate_heap(onerel, vacrelstats);
276 /* Vacuum the Free Space Map */
277 FreeSpaceMapVacuum(onerel);
280 * Update statistics in pg_class.
282 * A corner case here is that if we scanned no pages at all because every
283 * page is all-visible, we should not update relpages/reltuples, because
284 * we have no new information to contribute. In particular this keeps us
285 * from replacing relpages=reltuples=0 (which means "unknown tuple
286 * density") with nonzero relpages and reltuples=0 (which means "zero
287 * tuple density") unless there's some actual evidence for the latter.
289 * We do update relallvisible even in the corner case, since if the table
290 * is all-visible we'd definitely like to know that. But clamp the value
291 * to be not more than what we're setting relpages to.
293 * Also, don't change relfrozenxid/relminmxid if we skipped any pages,
294 * since then we don't know for certain that all tuples have a newer xmin.
296 new_rel_pages = vacrelstats->rel_pages;
297 new_rel_tuples = vacrelstats->new_rel_tuples;
298 if (vacrelstats->scanned_pages == 0 && new_rel_pages > 0)
300 new_rel_pages = vacrelstats->old_rel_pages;
301 new_rel_tuples = vacrelstats->old_rel_tuples;
304 new_rel_allvisible = visibilitymap_count(onerel);
305 if (new_rel_allvisible > new_rel_pages)
306 new_rel_allvisible = new_rel_pages;
308 new_frozen_xid = scanned_all ? FreezeLimit : InvalidTransactionId;
309 new_min_multi = scanned_all ? MultiXactCutoff : InvalidMultiXactId;
311 vac_update_relstats(onerel,
315 vacrelstats->hasindex,
320 /* report results to the stats collector, too */
321 new_live_tuples = new_rel_tuples - vacrelstats->new_dead_tuples;
322 if (new_live_tuples < 0)
323 new_live_tuples = 0; /* just in case */
325 pgstat_report_vacuum(RelationGetRelid(onerel),
326 onerel->rd_rel->relisshared,
328 vacrelstats->new_dead_tuples);
330 /* and log the action if appropriate */
331 if (IsAutoVacuumWorkerProcess() && params->log_min_duration >= 0)
333 TimestampTz endtime = GetCurrentTimestamp();
335 if (params->log_min_duration == 0 ||
336 TimestampDifferenceExceeds(starttime, endtime,
337 params->log_min_duration))
341 TimestampDifference(starttime, endtime, &secs, &usecs);
345 if ((secs > 0) || (usecs > 0))
347 read_rate = (double) BLCKSZ *VacuumPageMiss / (1024 * 1024) /
348 (secs + usecs / 1000000.0);
349 write_rate = (double) BLCKSZ *VacuumPageDirty / (1024 * 1024) /
350 (secs + usecs / 1000000.0);
354 * This is pretty messy, but we split it up so that we can skip
355 * emitting individual parts of the message when not applicable.
357 initStringInfo(&buf);
358 appendStringInfo(&buf, _("automatic vacuum of table \"%s.%s.%s\": index scans: %d\n"),
359 get_database_name(MyDatabaseId),
360 get_namespace_name(RelationGetNamespace(onerel)),
361 RelationGetRelationName(onerel),
362 vacrelstats->num_index_scans);
363 appendStringInfo(&buf, _("pages: %u removed, %u remain, %u skipped due to pins\n"),
364 vacrelstats->pages_removed,
365 vacrelstats->rel_pages,
366 vacrelstats->pinskipped_pages);
367 appendStringInfo(&buf,
368 _("tuples: %.0f removed, %.0f remain, %.0f are dead but not yet removable\n"),
369 vacrelstats->tuples_deleted,
370 vacrelstats->new_rel_tuples,
371 vacrelstats->new_dead_tuples);
372 appendStringInfo(&buf,
373 _("buffer usage: %d hits, %d misses, %d dirtied\n"),
377 appendStringInfo(&buf, _("avg read rate: %.3f MB/s, avg write rate: %.3f MB/s\n"),
378 read_rate, write_rate);
379 appendStringInfo(&buf, _("system usage: %s"), pg_rusage_show(&ru0));
382 (errmsg_internal("%s", buf.data)));
389 * For Hot Standby we need to know the highest transaction id that will
390 * be removed by any change. VACUUM proceeds in a number of passes so
391 * we need to consider how each pass operates. The first phase runs
392 * heap_page_prune(), which can issue XLOG_HEAP2_CLEAN records as it
393 * progresses - these will have a latestRemovedXid on each record.
394 * In some cases this removes all of the tuples to be removed, though
395 * often we have dead tuples with index pointers so we must remember them
396 * for removal in phase 3. Index records for those rows are removed
397 * in phase 2 and index blocks do not have MVCC information attached.
398 * So before we can allow removal of any index tuples we need to issue
399 * a WAL record containing the latestRemovedXid of rows that will be
400 * removed in phase three. This allows recovery queries to block at the
401 * correct place, i.e. before phase two, rather than during phase three
402 * which would be after the rows have become inaccessible.
405 vacuum_log_cleanup_info(Relation rel, LVRelStats *vacrelstats)
408 * Skip this for relations for which no WAL is to be written, or if we're
409 * not trying to support archive recovery.
411 if (!RelationNeedsWAL(rel) || !XLogIsNeeded())
415 * No need to write the record at all unless it contains a valid value
417 if (TransactionIdIsValid(vacrelstats->latestRemovedXid))
418 (void) log_heap_cleanup_info(rel->rd_node, vacrelstats->latestRemovedXid);
422 * lazy_scan_heap() -- scan an open heap relation
424 * This routine prunes each page in the heap, which will among other
425 * things truncate dead tuples to dead line pointers, defragment the
426 * page, and set commit status bits (see heap_page_prune). It also builds
427 * lists of dead tuples and pages with free space, calculates statistics
428 * on the number of live tuples in the heap, and marks pages as
429 * all-visible if appropriate. When done, or when we run low on space for
430 * dead-tuple TIDs, invoke vacuuming of indexes and call lazy_vacuum_heap
431 * to reclaim dead line pointers.
433 * If there are no indexes then we can reclaim line pointers on the fly;
434 * dead line pointers need only be retained until all index pointers that
435 * reference them have been killed.
438 lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats,
439 Relation *Irel, int nindexes, bool scan_all)
445 BlockNumber empty_pages,
451 IndexBulkDeleteResult **indstats;
454 Buffer vmbuffer = InvalidBuffer;
455 BlockNumber next_not_all_visible_block;
456 bool skipping_all_visible_blocks;
457 xl_heap_freeze_tuple *frozen;
460 pg_rusage_init(&ru0);
462 relname = RelationGetRelationName(onerel);
464 (errmsg("vacuuming \"%s.%s\"",
465 get_namespace_name(RelationGetNamespace(onerel)),
468 empty_pages = vacuumed_pages = 0;
469 num_tuples = tups_vacuumed = nkeep = nunused = 0;
471 indstats = (IndexBulkDeleteResult **)
472 palloc0(nindexes * sizeof(IndexBulkDeleteResult *));
474 nblocks = RelationGetNumberOfBlocks(onerel);
475 vacrelstats->rel_pages = nblocks;
476 vacrelstats->scanned_pages = 0;
477 vacrelstats->nonempty_pages = 0;
478 vacrelstats->latestRemovedXid = InvalidTransactionId;
480 lazy_space_alloc(vacrelstats, nblocks);
481 frozen = palloc(sizeof(xl_heap_freeze_tuple) * MaxHeapTuplesPerPage);
484 * We want to skip pages that don't require vacuuming according to the
485 * visibility map, but only when we can skip at least SKIP_PAGES_THRESHOLD
486 * consecutive pages. Since we're reading sequentially, the OS should be
487 * doing readahead for us, so there's no gain in skipping a page now and
488 * then; that's likely to disable readahead and so be counterproductive.
489 * Also, skipping even a single page means that we can't update
490 * relfrozenxid, so we only want to do it if we can skip a goodly number
493 * Before entering the main loop, establish the invariant that
494 * next_not_all_visible_block is the next block number >= blkno that's not
495 * all-visible according to the visibility map, or nblocks if there's no
496 * such block. Also, we set up the skipping_all_visible_blocks flag,
497 * which is needed because we need hysteresis in the decision: once we've
498 * started skipping blocks, we may as well skip everything up to the next
499 * not-all-visible block.
501 * Note: if scan_all is true, we won't actually skip any pages; but we
502 * maintain next_not_all_visible_block anyway, so as to set up the
503 * all_visible_according_to_vm flag correctly for each page.
505 * Note: The value returned by visibilitymap_test could be slightly
506 * out-of-date, since we make this test before reading the corresponding
507 * heap page or locking the buffer. This is OK. If we mistakenly think
508 * that the page is all-visible when in fact the flag's just been cleared,
509 * we might fail to vacuum the page. But it's OK to skip pages when
510 * scan_all is not set, so no great harm done; the next vacuum will find
511 * them. If we make the reverse mistake and vacuum a page unnecessarily,
512 * it'll just be a no-op.
514 for (next_not_all_visible_block = 0;
515 next_not_all_visible_block < nblocks;
516 next_not_all_visible_block++)
518 if (!visibilitymap_test(onerel, next_not_all_visible_block, &vmbuffer))
520 vacuum_delay_point();
522 if (next_not_all_visible_block >= SKIP_PAGES_THRESHOLD)
523 skipping_all_visible_blocks = true;
525 skipping_all_visible_blocks = false;
527 for (blkno = 0; blkno < nblocks; blkno++)
538 bool all_visible_according_to_vm;
540 bool has_dead_tuples;
541 TransactionId visibility_cutoff_xid = InvalidTransactionId;
543 if (blkno == next_not_all_visible_block)
545 /* Time to advance next_not_all_visible_block */
546 for (next_not_all_visible_block++;
547 next_not_all_visible_block < nblocks;
548 next_not_all_visible_block++)
550 if (!visibilitymap_test(onerel, next_not_all_visible_block,
553 vacuum_delay_point();
557 * We know we can't skip the current block. But set up
558 * skipping_all_visible_blocks to do the right thing at the
561 if (next_not_all_visible_block - blkno > SKIP_PAGES_THRESHOLD)
562 skipping_all_visible_blocks = true;
564 skipping_all_visible_blocks = false;
565 all_visible_according_to_vm = false;
569 /* Current block is all-visible */
570 if (skipping_all_visible_blocks && !scan_all)
572 all_visible_according_to_vm = true;
575 vacuum_delay_point();
578 * If we are close to overrunning the available space for dead-tuple
579 * TIDs, pause and do a cycle of vacuuming before we tackle this page.
581 if ((vacrelstats->max_dead_tuples - vacrelstats->num_dead_tuples) < MaxHeapTuplesPerPage &&
582 vacrelstats->num_dead_tuples > 0)
585 * Before beginning index vacuuming, we release any pin we may
586 * hold on the visibility map page. This isn't necessary for
587 * correctness, but we do it anyway to avoid holding the pin
588 * across a lengthy, unrelated operation.
590 if (BufferIsValid(vmbuffer))
592 ReleaseBuffer(vmbuffer);
593 vmbuffer = InvalidBuffer;
596 /* Log cleanup info before we touch indexes */
597 vacuum_log_cleanup_info(onerel, vacrelstats);
599 /* Remove index entries */
600 for (i = 0; i < nindexes; i++)
601 lazy_vacuum_index(Irel[i],
604 /* Remove tuples from heap */
605 lazy_vacuum_heap(onerel, vacrelstats);
608 * Forget the now-vacuumed tuples, and press on, but be careful
609 * not to reset latestRemovedXid since we want that value to be
612 vacrelstats->num_dead_tuples = 0;
613 vacrelstats->num_index_scans++;
617 * Pin the visibility map page in case we need to mark the page
618 * all-visible. In most cases this will be very cheap, because we'll
619 * already have the correct page pinned anyway. However, it's
620 * possible that (a) next_not_all_visible_block is covered by a
621 * different VM page than the current block or (b) we released our pin
622 * and did a cycle of index vacuuming.
624 visibilitymap_pin(onerel, blkno, &vmbuffer);
626 buf = ReadBufferExtended(onerel, MAIN_FORKNUM, blkno,
627 RBM_NORMAL, vac_strategy);
629 /* We need buffer cleanup lock so that we can prune HOT chains. */
630 if (!ConditionalLockBufferForCleanup(buf))
633 * If we're not scanning the whole relation to guard against XID
634 * wraparound, it's OK to skip vacuuming a page. The next vacuum
640 vacrelstats->pinskipped_pages++;
645 * If this is a wraparound checking vacuum, then we read the page
646 * with share lock to see if any xids need to be frozen. If the
647 * page doesn't need attention we just skip and continue. If it
648 * does, we wait for cleanup lock.
650 * We could defer the lock request further by remembering the page
651 * and coming back to it later, or we could even register
652 * ourselves for multiple buffers and then service whichever one
653 * is received first. For now, this seems good enough.
655 LockBuffer(buf, BUFFER_LOCK_SHARE);
656 if (!lazy_check_needs_freeze(buf))
658 UnlockReleaseBuffer(buf);
659 vacrelstats->scanned_pages++;
660 vacrelstats->pinskipped_pages++;
663 LockBuffer(buf, BUFFER_LOCK_UNLOCK);
664 LockBufferForCleanup(buf);
665 /* drop through to normal processing */
668 vacrelstats->scanned_pages++;
670 page = BufferGetPage(buf);
675 * An all-zeroes page could be left over if a backend extends the
676 * relation but crashes before initializing the page. Reclaim such
679 * We have to be careful here because we could be looking at a
680 * page that someone has just added to the relation and not yet
681 * been able to initialize (see RelationGetBufferForTuple). To
682 * protect against that, release the buffer lock, grab the
683 * relation extension lock momentarily, and re-lock the buffer. If
684 * the page is still uninitialized by then, it must be left over
685 * from a crashed backend, and we can initialize it.
687 * We don't really need the relation lock when this is a new or
688 * temp relation, but it's probably not worth the code space to
689 * check that, since this surely isn't a critical path.
691 * Note: the comparable code in vacuum.c need not worry because
692 * it's got exclusive lock on the whole relation.
694 LockBuffer(buf, BUFFER_LOCK_UNLOCK);
695 LockRelationForExtension(onerel, ExclusiveLock);
696 UnlockRelationForExtension(onerel, ExclusiveLock);
697 LockBufferForCleanup(buf);
701 (errmsg("relation \"%s\" page %u is uninitialized --- fixing",
703 PageInit(page, BufferGetPageSize(buf), 0);
706 freespace = PageGetHeapFreeSpace(page);
707 MarkBufferDirty(buf);
708 UnlockReleaseBuffer(buf);
710 RecordPageWithFreeSpace(onerel, blkno, freespace);
714 if (PageIsEmpty(page))
717 freespace = PageGetHeapFreeSpace(page);
719 /* empty pages are always all-visible */
720 if (!PageIsAllVisible(page))
722 START_CRIT_SECTION();
724 /* mark buffer dirty before writing a WAL record */
725 MarkBufferDirty(buf);
728 * It's possible that another backend has extended the heap,
729 * initialized the page, and then failed to WAL-log the page
730 * due to an ERROR. Since heap extension is not WAL-logged,
731 * recovery might try to replay our record setting the page
732 * all-visible and find that the page isn't initialized, which
733 * will cause a PANIC. To prevent that, check whether the
734 * page has been previously WAL-logged, and if not, do that
737 if (RelationNeedsWAL(onerel) &&
738 PageGetLSN(page) == InvalidXLogRecPtr)
739 log_newpage_buffer(buf, true);
741 PageSetAllVisible(page);
742 visibilitymap_set(onerel, blkno, buf, InvalidXLogRecPtr,
743 vmbuffer, InvalidTransactionId);
747 UnlockReleaseBuffer(buf);
748 RecordPageWithFreeSpace(onerel, blkno, freespace);
753 * Prune all HOT-update chains in this page.
755 * We count tuples removed by the pruning step as removed by VACUUM.
757 tups_vacuumed += heap_page_prune(onerel, buf, OldestXmin, false,
758 &vacrelstats->latestRemovedXid);
761 * Now scan the page to collect vacuumable items and check for tuples
762 * requiring freezing.
765 has_dead_tuples = false;
768 prev_dead_count = vacrelstats->num_dead_tuples;
769 maxoff = PageGetMaxOffsetNumber(page);
772 * Note: If you change anything in the loop below, also look at
773 * heap_page_is_all_visible to see if that needs to be changed.
775 for (offnum = FirstOffsetNumber;
777 offnum = OffsetNumberNext(offnum))
781 itemid = PageGetItemId(page, offnum);
783 /* Unused items require no processing, but we count 'em */
784 if (!ItemIdIsUsed(itemid))
790 /* Redirect items mustn't be touched */
791 if (ItemIdIsRedirected(itemid))
793 hastup = true; /* this page won't be truncatable */
797 ItemPointerSet(&(tuple.t_self), blkno, offnum);
800 * DEAD item pointers are to be vacuumed normally; but we don't
801 * count them in tups_vacuumed, else we'd be double-counting (at
802 * least in the common case where heap_page_prune() just freed up
805 if (ItemIdIsDead(itemid))
807 lazy_record_dead_tuple(vacrelstats, &(tuple.t_self));
812 Assert(ItemIdIsNormal(itemid));
814 tuple.t_data = (HeapTupleHeader) PageGetItem(page, itemid);
815 tuple.t_len = ItemIdGetLength(itemid);
816 tuple.t_tableOid = RelationGetRelid(onerel);
820 switch (HeapTupleSatisfiesVacuum(&tuple, OldestXmin, buf))
825 * Ordinarily, DEAD tuples would have been removed by
826 * heap_page_prune(), but it's possible that the tuple
827 * state changed since heap_page_prune() looked. In
828 * particular an INSERT_IN_PROGRESS tuple could have
829 * changed to DEAD if the inserter aborted. So this
830 * cannot be considered an error condition.
832 * If the tuple is HOT-updated then it must only be
833 * removed by a prune operation; so we keep it just as if
834 * it were RECENTLY_DEAD. Also, if it's a heap-only
835 * tuple, we choose to keep it, because it'll be a lot
836 * cheaper to get rid of it in the next pruning pass than
837 * to treat it like an indexed tuple.
839 if (HeapTupleIsHotUpdated(&tuple) ||
840 HeapTupleIsHeapOnly(&tuple))
843 tupgone = true; /* we can delete the tuple */
847 /* Tuple is good --- but let's do some validity checks */
848 if (onerel->rd_rel->relhasoids &&
849 !OidIsValid(HeapTupleGetOid(&tuple)))
850 elog(WARNING, "relation \"%s\" TID %u/%u: OID is invalid",
851 relname, blkno, offnum);
854 * Is the tuple definitely visible to all transactions?
856 * NB: Like with per-tuple hint bits, we can't set the
857 * PD_ALL_VISIBLE flag if the inserter committed
858 * asynchronously. See SetHintBits for more info. Check
859 * that the tuple is hinted xmin-committed because of
866 if (!HeapTupleHeaderXminCommitted(tuple.t_data))
873 * The inserter definitely committed. But is it old
874 * enough that everyone sees it as committed?
876 xmin = HeapTupleHeaderGetXmin(tuple.t_data);
877 if (!TransactionIdPrecedes(xmin, OldestXmin))
883 /* Track newest xmin on page. */
884 if (TransactionIdFollows(xmin, visibility_cutoff_xid))
885 visibility_cutoff_xid = xmin;
888 case HEAPTUPLE_RECENTLY_DEAD:
891 * If tuple is recently deleted then we must not remove it
897 case HEAPTUPLE_INSERT_IN_PROGRESS:
898 /* This is an expected case during concurrent vacuum */
901 case HEAPTUPLE_DELETE_IN_PROGRESS:
902 /* This is an expected case during concurrent vacuum */
906 elog(ERROR, "unexpected HeapTupleSatisfiesVacuum result");
912 lazy_record_dead_tuple(vacrelstats, &(tuple.t_self));
913 HeapTupleHeaderAdvanceLatestRemovedXid(tuple.t_data,
914 &vacrelstats->latestRemovedXid);
916 has_dead_tuples = true;
924 * Each non-removable tuple must be checked to see if it needs
925 * freezing. Note we already have exclusive buffer lock.
927 if (heap_prepare_freeze_tuple(tuple.t_data, FreezeLimit,
928 MultiXactCutoff, &frozen[nfrozen]))
929 frozen[nfrozen++].offset = offnum;
931 } /* scan along page */
934 * If we froze any tuples, mark the buffer dirty, and write a WAL
935 * record recording the changes. We must log the changes to be
936 * crash-safe against future truncation of CLOG.
940 START_CRIT_SECTION();
942 MarkBufferDirty(buf);
944 /* execute collected freezes */
945 for (i = 0; i < nfrozen; i++)
948 HeapTupleHeader htup;
950 itemid = PageGetItemId(page, frozen[i].offset);
951 htup = (HeapTupleHeader) PageGetItem(page, itemid);
953 heap_execute_freeze_tuple(htup, &frozen[i]);
956 /* Now WAL-log freezing if necessary */
957 if (RelationNeedsWAL(onerel))
961 recptr = log_heap_freeze(onerel, buf, FreezeLimit,
963 PageSetLSN(page, recptr);
970 * If there are no indexes then we can vacuum the page right now
971 * instead of doing a second scan.
974 vacrelstats->num_dead_tuples > 0)
976 /* Remove tuples from heap */
977 lazy_vacuum_page(onerel, blkno, buf, 0, vacrelstats, &vmbuffer);
978 has_dead_tuples = false;
981 * Forget the now-vacuumed tuples, and press on, but be careful
982 * not to reset latestRemovedXid since we want that value to be
985 vacrelstats->num_dead_tuples = 0;
989 freespace = PageGetHeapFreeSpace(page);
991 /* mark page all-visible, if appropriate */
992 if (all_visible && !all_visible_according_to_vm)
995 * It should never be the case that the visibility map page is set
996 * while the page-level bit is clear, but the reverse is allowed
997 * (if checksums are not enabled). Regardless, set the both bits
998 * so that we get back in sync.
1000 * NB: If the heap page is all-visible but the VM bit is not set,
1001 * we don't need to dirty the heap page. However, if checksums
1002 * are enabled, we do need to make sure that the heap page is
1003 * dirtied before passing it to visibilitymap_set(), because it
1004 * may be logged. Given that this situation should only happen in
1005 * rare cases after a crash, it is not worth optimizing.
1007 PageSetAllVisible(page);
1008 MarkBufferDirty(buf);
1009 visibilitymap_set(onerel, blkno, buf, InvalidXLogRecPtr,
1010 vmbuffer, visibility_cutoff_xid);
1014 * As of PostgreSQL 9.2, the visibility map bit should never be set if
1015 * the page-level bit is clear. However, it's possible that the bit
1016 * got cleared after we checked it and before we took the buffer
1017 * content lock, so we must recheck before jumping to the conclusion
1018 * that something bad has happened.
1020 else if (all_visible_according_to_vm && !PageIsAllVisible(page)
1021 && visibilitymap_test(onerel, blkno, &vmbuffer))
1023 elog(WARNING, "page is not marked all-visible but visibility map bit is set in relation \"%s\" page %u",
1025 visibilitymap_clear(onerel, blkno, vmbuffer);
1029 * It's possible for the value returned by GetOldestXmin() to move
1030 * backwards, so it's not wrong for us to see tuples that appear to
1031 * not be visible to everyone yet, while PD_ALL_VISIBLE is already
1032 * set. The real safe xmin value never moves backwards, but
1033 * GetOldestXmin() is conservative and sometimes returns a value
1034 * that's unnecessarily small, so if we see that contradiction it just
1035 * means that the tuples that we think are not visible to everyone yet
1036 * actually are, and the PD_ALL_VISIBLE flag is correct.
1038 * There should never be dead tuples on a page with PD_ALL_VISIBLE
1041 else if (PageIsAllVisible(page) && has_dead_tuples)
1043 elog(WARNING, "page containing dead tuples is marked as all-visible in relation \"%s\" page %u",
1045 PageClearAllVisible(page);
1046 MarkBufferDirty(buf);
1047 visibilitymap_clear(onerel, blkno, vmbuffer);
1050 UnlockReleaseBuffer(buf);
1052 /* Remember the location of the last page with nonremovable tuples */
1054 vacrelstats->nonempty_pages = blkno + 1;
1057 * If we remembered any tuples for deletion, then the page will be
1058 * visited again by lazy_vacuum_heap, which will compute and record
1059 * its post-compaction free space. If not, then we're done with this
1060 * page, so remember its free space as-is. (This path will always be
1061 * taken if there are no indexes.)
1063 if (vacrelstats->num_dead_tuples == prev_dead_count)
1064 RecordPageWithFreeSpace(onerel, blkno, freespace);
1069 /* save stats for use later */
1070 vacrelstats->scanned_tuples = num_tuples;
1071 vacrelstats->tuples_deleted = tups_vacuumed;
1072 vacrelstats->new_dead_tuples = nkeep;
1074 /* now we can compute the new value for pg_class.reltuples */
1075 vacrelstats->new_rel_tuples = vac_estimate_reltuples(onerel, false,
1077 vacrelstats->scanned_pages,
1081 * Release any remaining pin on visibility map page.
1083 if (BufferIsValid(vmbuffer))
1085 ReleaseBuffer(vmbuffer);
1086 vmbuffer = InvalidBuffer;
1089 /* If any tuples need to be deleted, perform final vacuum cycle */
1090 /* XXX put a threshold on min number of tuples here? */
1091 if (vacrelstats->num_dead_tuples > 0)
1093 /* Log cleanup info before we touch indexes */
1094 vacuum_log_cleanup_info(onerel, vacrelstats);
1096 /* Remove index entries */
1097 for (i = 0; i < nindexes; i++)
1098 lazy_vacuum_index(Irel[i],
1101 /* Remove tuples from heap */
1102 lazy_vacuum_heap(onerel, vacrelstats);
1103 vacrelstats->num_index_scans++;
1106 /* Do post-vacuum cleanup and statistics update for each index */
1107 for (i = 0; i < nindexes; i++)
1108 lazy_cleanup_index(Irel[i], indstats[i], vacrelstats);
1110 /* If no indexes, make log report that lazy_vacuum_heap would've made */
1113 (errmsg("\"%s\": removed %.0f row versions in %u pages",
1114 RelationGetRelationName(onerel),
1115 tups_vacuumed, vacuumed_pages)));
1118 * This is pretty messy, but we split it up so that we can skip emitting
1119 * individual parts of the message when not applicable.
1121 initStringInfo(&buf);
1122 appendStringInfo(&buf,
1123 _("%.0f dead row versions cannot be removed yet.\n"),
1125 appendStringInfo(&buf, _("There were %.0f unused item pointers.\n"),
1127 appendStringInfo(&buf, _("Skipped %u pages due to buffer pins.\n"),
1128 vacrelstats->pinskipped_pages);
1129 appendStringInfo(&buf, _("%u pages are entirely empty.\n"),
1131 appendStringInfo(&buf, _("%s."),
1132 pg_rusage_show(&ru0));
1135 (errmsg("\"%s\": found %.0f removable, %.0f nonremovable row versions in %u out of %u pages",
1136 RelationGetRelationName(onerel),
1137 tups_vacuumed, num_tuples,
1138 vacrelstats->scanned_pages, nblocks),
1139 errdetail_internal("%s", buf.data)));
1145 * lazy_vacuum_heap() -- second pass over the heap
1147 * This routine marks dead tuples as unused and compacts out free
1148 * space on their pages. Pages not having dead tuples recorded from
1149 * lazy_scan_heap are not visited at all.
1151 * Note: the reason for doing this as a second pass is we cannot remove
1152 * the tuples until we've removed their index entries, and we want to
1153 * process index entry removal in batches as large as possible.
1156 lazy_vacuum_heap(Relation onerel, LVRelStats *vacrelstats)
1161 Buffer vmbuffer = InvalidBuffer;
1163 pg_rusage_init(&ru0);
1167 while (tupindex < vacrelstats->num_dead_tuples)
1174 vacuum_delay_point();
1176 tblk = ItemPointerGetBlockNumber(&vacrelstats->dead_tuples[tupindex]);
1177 buf = ReadBufferExtended(onerel, MAIN_FORKNUM, tblk, RBM_NORMAL,
1179 if (!ConditionalLockBufferForCleanup(buf))
1185 tupindex = lazy_vacuum_page(onerel, tblk, buf, tupindex, vacrelstats,
1188 /* Now that we've compacted the page, record its available space */
1189 page = BufferGetPage(buf);
1190 freespace = PageGetHeapFreeSpace(page);
1192 UnlockReleaseBuffer(buf);
1193 RecordPageWithFreeSpace(onerel, tblk, freespace);
1197 if (BufferIsValid(vmbuffer))
1199 ReleaseBuffer(vmbuffer);
1200 vmbuffer = InvalidBuffer;
1204 (errmsg("\"%s\": removed %d row versions in %d pages",
1205 RelationGetRelationName(onerel),
1208 pg_rusage_show(&ru0))));
1212 * lazy_vacuum_page() -- free dead tuples on a page
1213 * and repair its fragmentation.
1215 * Caller must hold pin and buffer cleanup lock on the buffer.
1217 * tupindex is the index in vacrelstats->dead_tuples of the first dead
1218 * tuple for this page. We assume the rest follow sequentially.
1219 * The return value is the first tupindex after the tuples of this page.
1222 lazy_vacuum_page(Relation onerel, BlockNumber blkno, Buffer buffer,
1223 int tupindex, LVRelStats *vacrelstats, Buffer *vmbuffer)
1225 Page page = BufferGetPage(buffer);
1226 OffsetNumber unused[MaxOffsetNumber];
1228 TransactionId visibility_cutoff_xid;
1230 START_CRIT_SECTION();
1232 for (; tupindex < vacrelstats->num_dead_tuples; tupindex++)
1238 tblk = ItemPointerGetBlockNumber(&vacrelstats->dead_tuples[tupindex]);
1240 break; /* past end of tuples for this block */
1241 toff = ItemPointerGetOffsetNumber(&vacrelstats->dead_tuples[tupindex]);
1242 itemid = PageGetItemId(page, toff);
1243 ItemIdSetUnused(itemid);
1244 unused[uncnt++] = toff;
1247 PageRepairFragmentation(page);
1250 * Mark buffer dirty before we write WAL.
1252 MarkBufferDirty(buffer);
1255 if (RelationNeedsWAL(onerel))
1259 recptr = log_heap_clean(onerel, buffer,
1262 vacrelstats->latestRemovedXid);
1263 PageSetLSN(page, recptr);
1267 * End critical section, so we safely can do visibility tests (which
1268 * possibly need to perform IO and allocate memory!). If we crash now the
1269 * page (including the corresponding vm bit) might not be marked all
1270 * visible, but that's fine. A later vacuum will fix that.
1275 * Now that we have removed the dead tuples from the page, once again
1276 * check if the page has become all-visible. The page is already marked
1277 * dirty, exclusively locked, and, if needed, a full page image has been
1278 * emitted in the log_heap_clean() above.
1280 if (heap_page_is_all_visible(onerel, buffer, &visibility_cutoff_xid))
1281 PageSetAllVisible(page);
1284 * All the changes to the heap page have been done. If the all-visible
1285 * flag is now set, also set the VM bit.
1287 if (PageIsAllVisible(page) &&
1288 !visibilitymap_test(onerel, blkno, vmbuffer))
1290 Assert(BufferIsValid(*vmbuffer));
1291 visibilitymap_set(onerel, blkno, buffer, InvalidXLogRecPtr, *vmbuffer,
1292 visibility_cutoff_xid);
1299 * lazy_check_needs_freeze() -- scan page to see if any tuples
1300 * need to be cleaned to avoid wraparound
1302 * Returns true if the page needs to be vacuumed using cleanup lock.
1305 lazy_check_needs_freeze(Buffer buf)
1308 OffsetNumber offnum,
1310 HeapTupleHeader tupleheader;
1312 page = BufferGetPage(buf);
1314 if (PageIsNew(page) || PageIsEmpty(page))
1316 /* PageIsNew probably shouldn't happen... */
1320 maxoff = PageGetMaxOffsetNumber(page);
1321 for (offnum = FirstOffsetNumber;
1323 offnum = OffsetNumberNext(offnum))
1327 itemid = PageGetItemId(page, offnum);
1329 if (!ItemIdIsNormal(itemid))
1332 tupleheader = (HeapTupleHeader) PageGetItem(page, itemid);
1334 if (heap_tuple_needs_freeze(tupleheader, FreezeLimit,
1335 MultiXactCutoff, buf))
1337 } /* scan along page */
1344 * lazy_vacuum_index() -- vacuum one index relation.
1346 * Delete all the index entries pointing to tuples listed in
1347 * vacrelstats->dead_tuples, and update running statistics.
1350 lazy_vacuum_index(Relation indrel,
1351 IndexBulkDeleteResult **stats,
1352 LVRelStats *vacrelstats)
1354 IndexVacuumInfo ivinfo;
1357 pg_rusage_init(&ru0);
1359 ivinfo.index = indrel;
1360 ivinfo.analyze_only = false;
1361 ivinfo.estimated_count = true;
1362 ivinfo.message_level = elevel;
1363 ivinfo.num_heap_tuples = vacrelstats->old_rel_tuples;
1364 ivinfo.strategy = vac_strategy;
1366 /* Do bulk deletion */
1367 *stats = index_bulk_delete(&ivinfo, *stats,
1368 lazy_tid_reaped, (void *) vacrelstats);
1371 (errmsg("scanned index \"%s\" to remove %d row versions",
1372 RelationGetRelationName(indrel),
1373 vacrelstats->num_dead_tuples),
1374 errdetail("%s.", pg_rusage_show(&ru0))));
1378 * lazy_cleanup_index() -- do post-vacuum cleanup for one index relation.
1381 lazy_cleanup_index(Relation indrel,
1382 IndexBulkDeleteResult *stats,
1383 LVRelStats *vacrelstats)
1385 IndexVacuumInfo ivinfo;
1388 pg_rusage_init(&ru0);
1390 ivinfo.index = indrel;
1391 ivinfo.analyze_only = false;
1392 ivinfo.estimated_count = (vacrelstats->scanned_pages < vacrelstats->rel_pages);
1393 ivinfo.message_level = elevel;
1394 ivinfo.num_heap_tuples = vacrelstats->new_rel_tuples;
1395 ivinfo.strategy = vac_strategy;
1397 stats = index_vacuum_cleanup(&ivinfo, stats);
1403 * Now update statistics in pg_class, but only if the index says the count
1406 if (!stats->estimated_count)
1407 vac_update_relstats(indrel,
1409 stats->num_index_tuples,
1412 InvalidTransactionId,
1417 (errmsg("index \"%s\" now contains %.0f row versions in %u pages",
1418 RelationGetRelationName(indrel),
1419 stats->num_index_tuples,
1421 errdetail("%.0f index row versions were removed.\n"
1422 "%u index pages have been deleted, %u are currently reusable.\n"
1424 stats->tuples_removed,
1425 stats->pages_deleted, stats->pages_free,
1426 pg_rusage_show(&ru0))));
1432 * lazy_truncate_heap - try to truncate off any empty pages at the end
1435 lazy_truncate_heap(Relation onerel, LVRelStats *vacrelstats)
1437 BlockNumber old_rel_pages = vacrelstats->rel_pages;
1438 BlockNumber new_rel_pages;
1442 pg_rusage_init(&ru0);
1445 * Loop until no more truncating can be done.
1450 * We need full exclusive lock on the relation in order to do
1451 * truncation. If we can't get it, give up rather than waiting --- we
1452 * don't want to block other backends, and we don't want to deadlock
1453 * (which is quite possible considering we already hold a lower-grade
1456 vacrelstats->lock_waiter_detected = false;
1460 if (ConditionalLockRelation(onerel, AccessExclusiveLock))
1464 * Check for interrupts while trying to (re-)acquire the exclusive
1467 CHECK_FOR_INTERRUPTS();
1469 if (++lock_retry > (VACUUM_TRUNCATE_LOCK_TIMEOUT /
1470 VACUUM_TRUNCATE_LOCK_WAIT_INTERVAL))
1473 * We failed to establish the lock in the specified number of
1474 * retries. This means we give up truncating.
1476 vacrelstats->lock_waiter_detected = true;
1478 (errmsg("\"%s\": stopping truncate due to conflicting lock request",
1479 RelationGetRelationName(onerel))));
1483 pg_usleep(VACUUM_TRUNCATE_LOCK_WAIT_INTERVAL);
1487 * Now that we have exclusive lock, look to see if the rel has grown
1488 * whilst we were vacuuming with non-exclusive lock. If so, give up;
1489 * the newly added pages presumably contain non-deletable tuples.
1491 new_rel_pages = RelationGetNumberOfBlocks(onerel);
1492 if (new_rel_pages != old_rel_pages)
1495 * Note: we intentionally don't update vacrelstats->rel_pages with
1496 * the new rel size here. If we did, it would amount to assuming
1497 * that the new pages are empty, which is unlikely. Leaving the
1498 * numbers alone amounts to assuming that the new pages have the
1499 * same tuple density as existing ones, which is less unlikely.
1501 UnlockRelation(onerel, AccessExclusiveLock);
1506 * Scan backwards from the end to verify that the end pages actually
1507 * contain no tuples. This is *necessary*, not optional, because
1508 * other backends could have added tuples to these pages whilst we
1511 new_rel_pages = count_nondeletable_pages(onerel, vacrelstats);
1513 if (new_rel_pages >= old_rel_pages)
1515 /* can't do anything after all */
1516 UnlockRelation(onerel, AccessExclusiveLock);
1523 RelationTruncate(onerel, new_rel_pages);
1526 * We can release the exclusive lock as soon as we have truncated.
1527 * Other backends can't safely access the relation until they have
1528 * processed the smgr invalidation that smgrtruncate sent out ... but
1529 * that should happen as part of standard invalidation processing once
1530 * they acquire lock on the relation.
1532 UnlockRelation(onerel, AccessExclusiveLock);
1535 * Update statistics. Here, it *is* correct to adjust rel_pages
1536 * without also touching reltuples, since the tuple count wasn't
1537 * changed by the truncation.
1539 vacrelstats->pages_removed += old_rel_pages - new_rel_pages;
1540 vacrelstats->rel_pages = new_rel_pages;
1543 (errmsg("\"%s\": truncated %u to %u pages",
1544 RelationGetRelationName(onerel),
1545 old_rel_pages, new_rel_pages),
1547 pg_rusage_show(&ru0))));
1548 old_rel_pages = new_rel_pages;
1549 } while (new_rel_pages > vacrelstats->nonempty_pages &&
1550 vacrelstats->lock_waiter_detected);
1554 * Rescan end pages to verify that they are (still) empty of tuples.
1556 * Returns number of nondeletable pages (last nonempty page + 1).
1559 count_nondeletable_pages(Relation onerel, LVRelStats *vacrelstats)
1562 instr_time starttime;
1564 /* Initialize the starttime if we check for conflicting lock requests */
1565 INSTR_TIME_SET_CURRENT(starttime);
1567 /* Strange coding of loop control is needed because blkno is unsigned */
1568 blkno = vacrelstats->rel_pages;
1569 while (blkno > vacrelstats->nonempty_pages)
1573 OffsetNumber offnum,
1578 * Check if another process requests a lock on our relation. We are
1579 * holding an AccessExclusiveLock here, so they will be waiting. We
1580 * only do this once per VACUUM_TRUNCATE_LOCK_CHECK_INTERVAL, and we
1581 * only check if that interval has elapsed once every 32 blocks to
1582 * keep the number of system calls and actual shared lock table
1583 * lookups to a minimum.
1585 if ((blkno % 32) == 0)
1587 instr_time currenttime;
1590 INSTR_TIME_SET_CURRENT(currenttime);
1591 elapsed = currenttime;
1592 INSTR_TIME_SUBTRACT(elapsed, starttime);
1593 if ((INSTR_TIME_GET_MICROSEC(elapsed) / 1000)
1594 >= VACUUM_TRUNCATE_LOCK_CHECK_INTERVAL)
1596 if (LockHasWaitersRelation(onerel, AccessExclusiveLock))
1599 (errmsg("\"%s\": suspending truncate due to conflicting lock request",
1600 RelationGetRelationName(onerel))));
1602 vacrelstats->lock_waiter_detected = true;
1605 starttime = currenttime;
1610 * We don't insert a vacuum delay point here, because we have an
1611 * exclusive lock on the table which we want to hold for as short a
1612 * time as possible. We still need to check for interrupts however.
1614 CHECK_FOR_INTERRUPTS();
1618 buf = ReadBufferExtended(onerel, MAIN_FORKNUM, blkno,
1619 RBM_NORMAL, vac_strategy);
1621 /* In this phase we only need shared access to the buffer */
1622 LockBuffer(buf, BUFFER_LOCK_SHARE);
1624 page = BufferGetPage(buf);
1626 if (PageIsNew(page) || PageIsEmpty(page))
1628 /* PageIsNew probably shouldn't happen... */
1629 UnlockReleaseBuffer(buf);
1634 maxoff = PageGetMaxOffsetNumber(page);
1635 for (offnum = FirstOffsetNumber;
1637 offnum = OffsetNumberNext(offnum))
1641 itemid = PageGetItemId(page, offnum);
1644 * Note: any non-unused item should be taken as a reason to keep
1645 * this page. We formerly thought that DEAD tuples could be
1646 * thrown away, but that's not so, because we'd not have cleaned
1647 * out their index entries.
1649 if (ItemIdIsUsed(itemid))
1652 break; /* can stop scanning */
1654 } /* scan along page */
1656 UnlockReleaseBuffer(buf);
1658 /* Done scanning if we found a tuple here */
1664 * If we fall out of the loop, all the previously-thought-to-be-empty
1665 * pages still are; we need not bother to look at the last known-nonempty
1668 return vacrelstats->nonempty_pages;
1672 * lazy_space_alloc - space allocation decisions for lazy vacuum
1674 * See the comments at the head of this file for rationale.
1677 lazy_space_alloc(LVRelStats *vacrelstats, BlockNumber relblocks)
1680 int vac_work_mem = IsAutoVacuumWorkerProcess() &&
1681 autovacuum_work_mem != -1 ?
1682 autovacuum_work_mem : maintenance_work_mem;
1684 if (vacrelstats->hasindex)
1686 maxtuples = (vac_work_mem * 1024L) / sizeof(ItemPointerData);
1687 maxtuples = Min(maxtuples, INT_MAX);
1688 maxtuples = Min(maxtuples, MaxAllocSize / sizeof(ItemPointerData));
1690 /* curious coding here to ensure the multiplication can't overflow */
1691 if ((BlockNumber) (maxtuples / LAZY_ALLOC_TUPLES) > relblocks)
1692 maxtuples = relblocks * LAZY_ALLOC_TUPLES;
1694 /* stay sane if small maintenance_work_mem */
1695 maxtuples = Max(maxtuples, MaxHeapTuplesPerPage);
1699 maxtuples = MaxHeapTuplesPerPage;
1702 vacrelstats->num_dead_tuples = 0;
1703 vacrelstats->max_dead_tuples = (int) maxtuples;
1704 vacrelstats->dead_tuples = (ItemPointer)
1705 palloc(maxtuples * sizeof(ItemPointerData));
1709 * lazy_record_dead_tuple - remember one deletable tuple
1712 lazy_record_dead_tuple(LVRelStats *vacrelstats,
1713 ItemPointer itemptr)
1716 * The array shouldn't overflow under normal behavior, but perhaps it
1717 * could if we are given a really small maintenance_work_mem. In that
1718 * case, just forget the last few tuples (we'll get 'em next time).
1720 if (vacrelstats->num_dead_tuples < vacrelstats->max_dead_tuples)
1722 vacrelstats->dead_tuples[vacrelstats->num_dead_tuples] = *itemptr;
1723 vacrelstats->num_dead_tuples++;
1728 * lazy_tid_reaped() -- is a particular tid deletable?
1730 * This has the right signature to be an IndexBulkDeleteCallback.
1732 * Assumes dead_tuples array is in sorted order.
1735 lazy_tid_reaped(ItemPointer itemptr, void *state)
1737 LVRelStats *vacrelstats = (LVRelStats *) state;
1740 res = (ItemPointer) bsearch((void *) itemptr,
1741 (void *) vacrelstats->dead_tuples,
1742 vacrelstats->num_dead_tuples,
1743 sizeof(ItemPointerData),
1746 return (res != NULL);
1750 * Comparator routines for use with qsort() and bsearch().
1753 vac_cmp_itemptr(const void *left, const void *right)
1760 lblk = ItemPointerGetBlockNumber((ItemPointer) left);
1761 rblk = ItemPointerGetBlockNumber((ItemPointer) right);
1768 loff = ItemPointerGetOffsetNumber((ItemPointer) left);
1769 roff = ItemPointerGetOffsetNumber((ItemPointer) right);
1780 * Check if every tuple in the given page is visible to all current and future
1781 * transactions. Also return the visibility_cutoff_xid which is the highest
1782 * xmin amongst the visible tuples.
1785 heap_page_is_all_visible(Relation rel, Buffer buf, TransactionId *visibility_cutoff_xid)
1787 Page page = BufferGetPage(buf);
1788 BlockNumber blockno = BufferGetBlockNumber(buf);
1789 OffsetNumber offnum,
1791 bool all_visible = true;
1793 *visibility_cutoff_xid = InvalidTransactionId;
1796 * This is a stripped down version of the line pointer scan in
1797 * lazy_scan_heap(). So if you change anything here, also check that code.
1799 maxoff = PageGetMaxOffsetNumber(page);
1800 for (offnum = FirstOffsetNumber;
1801 offnum <= maxoff && all_visible;
1802 offnum = OffsetNumberNext(offnum))
1805 HeapTupleData tuple;
1807 itemid = PageGetItemId(page, offnum);
1809 /* Unused or redirect line pointers are of no interest */
1810 if (!ItemIdIsUsed(itemid) || ItemIdIsRedirected(itemid))
1813 ItemPointerSet(&(tuple.t_self), blockno, offnum);
1816 * Dead line pointers can have index pointers pointing to them. So
1817 * they can't be treated as visible
1819 if (ItemIdIsDead(itemid))
1821 all_visible = false;
1825 Assert(ItemIdIsNormal(itemid));
1827 tuple.t_data = (HeapTupleHeader) PageGetItem(page, itemid);
1828 tuple.t_len = ItemIdGetLength(itemid);
1829 tuple.t_tableOid = RelationGetRelid(rel);
1831 switch (HeapTupleSatisfiesVacuum(&tuple, OldestXmin, buf))
1833 case HEAPTUPLE_LIVE:
1837 /* Check comments in lazy_scan_heap. */
1838 if (!HeapTupleHeaderXminCommitted(tuple.t_data))
1840 all_visible = false;
1845 * The inserter definitely committed. But is it old enough
1846 * that everyone sees it as committed?
1848 xmin = HeapTupleHeaderGetXmin(tuple.t_data);
1849 if (!TransactionIdPrecedes(xmin, OldestXmin))
1851 all_visible = false;
1855 /* Track newest xmin on page. */
1856 if (TransactionIdFollows(xmin, *visibility_cutoff_xid))
1857 *visibility_cutoff_xid = xmin;
1861 case HEAPTUPLE_DEAD:
1862 case HEAPTUPLE_RECENTLY_DEAD:
1863 case HEAPTUPLE_INSERT_IN_PROGRESS:
1864 case HEAPTUPLE_DELETE_IN_PROGRESS:
1865 all_visible = false;
1869 elog(ERROR, "unexpected HeapTupleSatisfiesVacuum result");
1872 } /* scan along page */