1 /*-------------------------------------------------------------------------
4 * Concurrent ("lazy") vacuuming.
7 * The major space usage for LAZY VACUUM is storage for the array of dead
8 * tuple TIDs, with the next biggest need being storage for per-disk-page
9 * free space info. We want to ensure we can vacuum even the very largest
10 * relations with finite memory space usage. To do that, we set upper bounds
11 * on the number of tuples and pages we will keep track of at once.
13 * We are willing to use at most maintenance_work_mem (or perhaps
14 * autovacuum_work_mem) memory space to keep track of dead tuples. We
15 * initially allocate an array of TIDs of that size, with an upper limit that
16 * depends on table size (this limit ensures we don't allocate a huge area
17 * uselessly for vacuuming small tables). If the array threatens to overflow,
18 * we suspend the heap scan phase and perform a pass of index cleanup and page
19 * compaction, then resume the heap scan with an empty TID array.
21 * If we're processing a table with no indexes, we can just vacuum each page
22 * as we go; there's no need to save up multiple tuples to minimize the number
23 * of index scans performed. So we don't use maintenance_work_mem memory for
24 * the TID array, just enough to hold as many heap tuples as fit on one page.
27 * Portions Copyright (c) 1996-2014, PostgreSQL Global Development Group
28 * Portions Copyright (c) 1994, Regents of the University of California
32 * src/backend/commands/vacuumlazy.c
34 *-------------------------------------------------------------------------
40 #include "access/genam.h"
41 #include "access/heapam.h"
42 #include "access/heapam_xlog.h"
43 #include "access/htup_details.h"
44 #include "access/multixact.h"
45 #include "access/transam.h"
46 #include "access/visibilitymap.h"
47 #include "catalog/storage.h"
48 #include "commands/dbcommands.h"
49 #include "commands/vacuum.h"
50 #include "miscadmin.h"
52 #include "portability/instr_time.h"
53 #include "postmaster/autovacuum.h"
54 #include "storage/bufmgr.h"
55 #include "storage/freespace.h"
56 #include "storage/lmgr.h"
57 #include "utils/lsyscache.h"
58 #include "utils/memutils.h"
59 #include "utils/pg_rusage.h"
60 #include "utils/timestamp.h"
61 #include "utils/tqual.h"
65 * Space/time tradeoff parameters: do these need to be user-tunable?
67 * To consider truncating the relation, we want there to be at least
68 * REL_TRUNCATE_MINIMUM or (relsize / REL_TRUNCATE_FRACTION) (whichever
69 * is less) potentially-freeable pages.
71 #define REL_TRUNCATE_MINIMUM 1000
72 #define REL_TRUNCATE_FRACTION 16
75 * Timing parameters for truncate locking heuristics.
77 * These were not exposed as user tunable GUC values because it didn't seem
78 * that the potential for improvement was great enough to merit the cost of
81 #define VACUUM_TRUNCATE_LOCK_CHECK_INTERVAL 20 /* ms */
82 #define VACUUM_TRUNCATE_LOCK_WAIT_INTERVAL 50 /* ms */
83 #define VACUUM_TRUNCATE_LOCK_TIMEOUT 5000 /* ms */
86 * Guesstimation of number of dead tuples per page. This is used to
87 * provide an upper limit to memory allocated when vacuuming small
90 #define LAZY_ALLOC_TUPLES MaxHeapTuplesPerPage
93 * Before we consider skipping a page that's marked as clean in
94 * visibility map, we must've seen at least this many clean pages.
96 #define SKIP_PAGES_THRESHOLD ((BlockNumber) 32)
98 typedef struct LVRelStats
100 /* hasindex = true means two-pass strategy; false means one-pass */
102 /* Overall statistics about rel */
103 BlockNumber old_rel_pages; /* previous value of pg_class.relpages */
104 BlockNumber rel_pages; /* total number of pages */
105 BlockNumber scanned_pages; /* number of pages we examined */
106 double scanned_tuples; /* counts only tuples on scanned pages */
107 double old_rel_tuples; /* previous value of pg_class.reltuples */
108 double new_rel_tuples; /* new estimated total # of tuples */
109 double new_dead_tuples; /* new estimated total # of dead tuples */
110 BlockNumber pages_removed;
111 double tuples_deleted;
112 BlockNumber nonempty_pages; /* actually, last nonempty page + 1 */
113 /* List of TIDs of tuples we intend to delete */
114 /* NB: this list is ordered by TID address */
115 int num_dead_tuples; /* current # of entries */
116 int max_dead_tuples; /* # slots allocated in array */
117 ItemPointer dead_tuples; /* array of ItemPointerData */
119 TransactionId latestRemovedXid;
120 bool lock_waiter_detected;
124 /* A few variables that don't seem worth passing around as parameters */
125 static int elevel = -1;
127 static TransactionId OldestXmin;
128 static TransactionId FreezeLimit;
129 static MultiXactId MultiXactCutoff;
131 static BufferAccessStrategy vac_strategy;
134 /* non-export function prototypes */
135 static void lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats,
136 Relation *Irel, int nindexes, bool scan_all);
137 static void lazy_vacuum_heap(Relation onerel, LVRelStats *vacrelstats);
138 static bool lazy_check_needs_freeze(Buffer buf);
139 static void lazy_vacuum_index(Relation indrel,
140 IndexBulkDeleteResult **stats,
141 LVRelStats *vacrelstats);
142 static void lazy_cleanup_index(Relation indrel,
143 IndexBulkDeleteResult *stats,
144 LVRelStats *vacrelstats);
145 static int lazy_vacuum_page(Relation onerel, BlockNumber blkno, Buffer buffer,
146 int tupindex, LVRelStats *vacrelstats, Buffer *vmbuffer);
147 static void lazy_truncate_heap(Relation onerel, LVRelStats *vacrelstats);
148 static BlockNumber count_nondeletable_pages(Relation onerel,
149 LVRelStats *vacrelstats);
150 static void lazy_space_alloc(LVRelStats *vacrelstats, BlockNumber relblocks);
151 static void lazy_record_dead_tuple(LVRelStats *vacrelstats,
152 ItemPointer itemptr);
153 static bool lazy_tid_reaped(ItemPointer itemptr, void *state);
154 static int vac_cmp_itemptr(const void *left, const void *right);
155 static bool heap_page_is_all_visible(Relation rel, Buffer buf,
156 TransactionId *visibility_cutoff_xid);
160 * lazy_vacuum_rel() -- perform LAZY VACUUM for one heap relation
162 * This routine vacuums a single heap, cleans out its indexes, and
163 * updates its relpages and reltuples statistics.
165 * At entry, we have already established a transaction and opened
166 * and locked the relation.
169 lazy_vacuum_rel(Relation onerel, VacuumStmt *vacstmt,
170 BufferAccessStrategy bstrategy)
172 LVRelStats *vacrelstats;
175 BlockNumber possibly_freeable;
177 TimestampTz starttime = 0;
182 bool scan_all; /* should we scan all pages? */
183 bool scanned_all; /* did we actually scan all pages? */
184 TransactionId xidFullScanLimit;
185 MultiXactId mxactFullScanLimit;
186 BlockNumber new_rel_pages;
187 double new_rel_tuples;
188 BlockNumber new_rel_allvisible;
189 double new_live_tuples;
190 TransactionId new_frozen_xid;
191 MultiXactId new_min_multi;
193 /* measure elapsed time iff autovacuum logging requires it */
194 if (IsAutoVacuumWorkerProcess() && Log_autovacuum_min_duration >= 0)
196 pg_rusage_init(&ru0);
197 starttime = GetCurrentTimestamp();
200 if (vacstmt->options & VACOPT_VERBOSE)
205 vac_strategy = bstrategy;
207 vacuum_set_xid_limits(vacstmt->freeze_min_age, vacstmt->freeze_table_age,
208 onerel->rd_rel->relisshared,
209 &OldestXmin, &FreezeLimit, &xidFullScanLimit,
210 &MultiXactCutoff, &mxactFullScanLimit);
213 * We request a full scan if either the table's frozen Xid is now older
214 * than or equal to the requested Xid full-table scan limit; or if the
215 * table's minimum MultiXactId is older than or equal to the requested mxid
216 * full-table scan limit.
218 scan_all = TransactionIdPrecedesOrEquals(onerel->rd_rel->relfrozenxid,
220 scan_all |= MultiXactIdPrecedesOrEquals(onerel->rd_rel->relminmxid,
223 vacrelstats = (LVRelStats *) palloc0(sizeof(LVRelStats));
225 vacrelstats->old_rel_pages = onerel->rd_rel->relpages;
226 vacrelstats->old_rel_tuples = onerel->rd_rel->reltuples;
227 vacrelstats->num_index_scans = 0;
228 vacrelstats->pages_removed = 0;
229 vacrelstats->lock_waiter_detected = false;
231 /* Open all indexes of the relation */
232 vac_open_indexes(onerel, RowExclusiveLock, &nindexes, &Irel);
233 vacrelstats->hasindex = (nindexes > 0);
235 /* Do the vacuuming */
236 lazy_scan_heap(onerel, vacrelstats, Irel, nindexes, scan_all);
238 /* Done with indexes */
239 vac_close_indexes(nindexes, Irel, NoLock);
242 * Compute whether we actually scanned the whole relation. If we did, we
243 * can adjust relfrozenxid and relminmxid.
245 * NB: We need to check this before truncating the relation, because that
246 * will change ->rel_pages.
248 if (vacrelstats->scanned_pages < vacrelstats->rel_pages)
257 * Optionally truncate the relation.
259 * Don't even think about it unless we have a shot at releasing a goodly
260 * number of pages. Otherwise, the time taken isn't worth it.
262 possibly_freeable = vacrelstats->rel_pages - vacrelstats->nonempty_pages;
263 if (possibly_freeable > 0 &&
264 (possibly_freeable >= REL_TRUNCATE_MINIMUM ||
265 possibly_freeable >= vacrelstats->rel_pages / REL_TRUNCATE_FRACTION))
266 lazy_truncate_heap(onerel, vacrelstats);
268 /* Vacuum the Free Space Map */
269 FreeSpaceMapVacuum(onerel);
272 * Update statistics in pg_class.
274 * A corner case here is that if we scanned no pages at all because every
275 * page is all-visible, we should not update relpages/reltuples, because
276 * we have no new information to contribute. In particular this keeps us
277 * from replacing relpages=reltuples=0 (which means "unknown tuple
278 * density") with nonzero relpages and reltuples=0 (which means "zero
279 * tuple density") unless there's some actual evidence for the latter.
281 * We do update relallvisible even in the corner case, since if the table
282 * is all-visible we'd definitely like to know that. But clamp the value
283 * to be not more than what we're setting relpages to.
285 * Also, don't change relfrozenxid/relminmxid if we skipped any pages,
286 * since then we don't know for certain that all tuples have a newer xmin.
288 new_rel_pages = vacrelstats->rel_pages;
289 new_rel_tuples = vacrelstats->new_rel_tuples;
290 if (vacrelstats->scanned_pages == 0 && new_rel_pages > 0)
292 new_rel_pages = vacrelstats->old_rel_pages;
293 new_rel_tuples = vacrelstats->old_rel_tuples;
296 new_rel_allvisible = visibilitymap_count(onerel);
297 if (new_rel_allvisible > new_rel_pages)
298 new_rel_allvisible = new_rel_pages;
300 new_frozen_xid = scanned_all ? FreezeLimit : InvalidTransactionId;
301 new_min_multi = scanned_all ? MultiXactCutoff : InvalidMultiXactId;
303 vac_update_relstats(onerel,
307 vacrelstats->hasindex,
311 /* report results to the stats collector, too */
312 new_live_tuples = new_rel_tuples - vacrelstats->new_dead_tuples;
313 if (new_live_tuples < 0)
314 new_live_tuples = 0; /* just in case */
316 pgstat_report_vacuum(RelationGetRelid(onerel),
317 onerel->rd_rel->relisshared,
319 vacrelstats->new_dead_tuples);
321 /* and log the action if appropriate */
322 if (IsAutoVacuumWorkerProcess() && Log_autovacuum_min_duration >= 0)
324 TimestampTz endtime = GetCurrentTimestamp();
326 if (Log_autovacuum_min_duration == 0 ||
327 TimestampDifferenceExceeds(starttime, endtime,
328 Log_autovacuum_min_duration))
330 TimestampDifference(starttime, endtime, &secs, &usecs);
334 if ((secs > 0) || (usecs > 0))
336 read_rate = (double) BLCKSZ *VacuumPageMiss / (1024 * 1024) /
337 (secs + usecs / 1000000.0);
338 write_rate = (double) BLCKSZ *VacuumPageDirty / (1024 * 1024) /
339 (secs + usecs / 1000000.0);
342 (errmsg("automatic vacuum of table \"%s.%s.%s\": index scans: %d\n"
343 "pages: %d removed, %d remain\n"
344 "tuples: %.0f removed, %.0f remain, %.0f are dead but not yet removable\n"
345 "buffer usage: %d hits, %d misses, %d dirtied\n"
346 "avg read rate: %.3f MB/s, avg write rate: %.3f MB/s\n"
348 get_database_name(MyDatabaseId),
349 get_namespace_name(RelationGetNamespace(onerel)),
350 RelationGetRelationName(onerel),
351 vacrelstats->num_index_scans,
352 vacrelstats->pages_removed,
353 vacrelstats->rel_pages,
354 vacrelstats->tuples_deleted,
355 vacrelstats->new_rel_tuples,
356 vacrelstats->new_dead_tuples,
360 read_rate, write_rate,
361 pg_rusage_show(&ru0))));
367 * For Hot Standby we need to know the highest transaction id that will
368 * be removed by any change. VACUUM proceeds in a number of passes so
369 * we need to consider how each pass operates. The first phase runs
370 * heap_page_prune(), which can issue XLOG_HEAP2_CLEAN records as it
371 * progresses - these will have a latestRemovedXid on each record.
372 * In some cases this removes all of the tuples to be removed, though
373 * often we have dead tuples with index pointers so we must remember them
374 * for removal in phase 3. Index records for those rows are removed
375 * in phase 2 and index blocks do not have MVCC information attached.
376 * So before we can allow removal of any index tuples we need to issue
377 * a WAL record containing the latestRemovedXid of rows that will be
378 * removed in phase three. This allows recovery queries to block at the
379 * correct place, i.e. before phase two, rather than during phase three
380 * which would be after the rows have become inaccessible.
383 vacuum_log_cleanup_info(Relation rel, LVRelStats *vacrelstats)
386 * Skip this for relations for which no WAL is to be written, or if we're
387 * not trying to support archive recovery.
389 if (!RelationNeedsWAL(rel) || !XLogIsNeeded())
393 * No need to write the record at all unless it contains a valid value
395 if (TransactionIdIsValid(vacrelstats->latestRemovedXid))
396 (void) log_heap_cleanup_info(rel->rd_node, vacrelstats->latestRemovedXid);
400 * lazy_scan_heap() -- scan an open heap relation
402 * This routine prunes each page in the heap, which will among other
403 * things truncate dead tuples to dead line pointers, defragment the
404 * page, and set commit status bits (see heap_page_prune). It also builds
405 * lists of dead tuples and pages with free space, calculates statistics
406 * on the number of live tuples in the heap, and marks pages as
407 * all-visible if appropriate. When done, or when we run low on space for
408 * dead-tuple TIDs, invoke vacuuming of indexes and call lazy_vacuum_heap
409 * to reclaim dead line pointers.
411 * If there are no indexes then we can reclaim line pointers on the fly;
412 * dead line pointers need only be retained until all index pointers that
413 * reference them have been killed.
416 lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats,
417 Relation *Irel, int nindexes, bool scan_all)
423 BlockNumber empty_pages,
429 IndexBulkDeleteResult **indstats;
432 Buffer vmbuffer = InvalidBuffer;
433 BlockNumber next_not_all_visible_block;
434 bool skipping_all_visible_blocks;
435 xl_heap_freeze_tuple *frozen;
437 pg_rusage_init(&ru0);
439 relname = RelationGetRelationName(onerel);
441 (errmsg("vacuuming \"%s.%s\"",
442 get_namespace_name(RelationGetNamespace(onerel)),
445 empty_pages = vacuumed_pages = 0;
446 num_tuples = tups_vacuumed = nkeep = nunused = 0;
448 indstats = (IndexBulkDeleteResult **)
449 palloc0(nindexes * sizeof(IndexBulkDeleteResult *));
451 nblocks = RelationGetNumberOfBlocks(onerel);
452 vacrelstats->rel_pages = nblocks;
453 vacrelstats->scanned_pages = 0;
454 vacrelstats->nonempty_pages = 0;
455 vacrelstats->latestRemovedXid = InvalidTransactionId;
457 lazy_space_alloc(vacrelstats, nblocks);
458 frozen = palloc(sizeof(xl_heap_freeze_tuple) * MaxHeapTuplesPerPage);
461 * We want to skip pages that don't require vacuuming according to the
462 * visibility map, but only when we can skip at least SKIP_PAGES_THRESHOLD
463 * consecutive pages. Since we're reading sequentially, the OS should be
464 * doing readahead for us, so there's no gain in skipping a page now and
465 * then; that's likely to disable readahead and so be counterproductive.
466 * Also, skipping even a single page means that we can't update
467 * relfrozenxid, so we only want to do it if we can skip a goodly number
470 * Before entering the main loop, establish the invariant that
471 * next_not_all_visible_block is the next block number >= blkno that's not
472 * all-visible according to the visibility map, or nblocks if there's no
473 * such block. Also, we set up the skipping_all_visible_blocks flag,
474 * which is needed because we need hysteresis in the decision: once we've
475 * started skipping blocks, we may as well skip everything up to the next
476 * not-all-visible block.
478 * Note: if scan_all is true, we won't actually skip any pages; but we
479 * maintain next_not_all_visible_block anyway, so as to set up the
480 * all_visible_according_to_vm flag correctly for each page.
482 * Note: The value returned by visibilitymap_test could be slightly
483 * out-of-date, since we make this test before reading the corresponding
484 * heap page or locking the buffer. This is OK. If we mistakenly think
485 * that the page is all-visible when in fact the flag's just been cleared,
486 * we might fail to vacuum the page. But it's OK to skip pages when
487 * scan_all is not set, so no great harm done; the next vacuum will find
488 * them. If we make the reverse mistake and vacuum a page unnecessarily,
489 * it'll just be a no-op.
491 for (next_not_all_visible_block = 0;
492 next_not_all_visible_block < nblocks;
493 next_not_all_visible_block++)
495 if (!visibilitymap_test(onerel, next_not_all_visible_block, &vmbuffer))
497 vacuum_delay_point();
499 if (next_not_all_visible_block >= SKIP_PAGES_THRESHOLD)
500 skipping_all_visible_blocks = true;
502 skipping_all_visible_blocks = false;
504 for (blkno = 0; blkno < nblocks; blkno++)
515 bool all_visible_according_to_vm;
517 bool has_dead_tuples;
518 TransactionId visibility_cutoff_xid = InvalidTransactionId;
520 if (blkno == next_not_all_visible_block)
522 /* Time to advance next_not_all_visible_block */
523 for (next_not_all_visible_block++;
524 next_not_all_visible_block < nblocks;
525 next_not_all_visible_block++)
527 if (!visibilitymap_test(onerel, next_not_all_visible_block,
530 vacuum_delay_point();
534 * We know we can't skip the current block. But set up
535 * skipping_all_visible_blocks to do the right thing at the
538 if (next_not_all_visible_block - blkno > SKIP_PAGES_THRESHOLD)
539 skipping_all_visible_blocks = true;
541 skipping_all_visible_blocks = false;
542 all_visible_according_to_vm = false;
546 /* Current block is all-visible */
547 if (skipping_all_visible_blocks && !scan_all)
549 all_visible_according_to_vm = true;
552 vacuum_delay_point();
555 * If we are close to overrunning the available space for dead-tuple
556 * TIDs, pause and do a cycle of vacuuming before we tackle this page.
558 if ((vacrelstats->max_dead_tuples - vacrelstats->num_dead_tuples) < MaxHeapTuplesPerPage &&
559 vacrelstats->num_dead_tuples > 0)
562 * Before beginning index vacuuming, we release any pin we may
563 * hold on the visibility map page. This isn't necessary for
564 * correctness, but we do it anyway to avoid holding the pin
565 * across a lengthy, unrelated operation.
567 if (BufferIsValid(vmbuffer))
569 ReleaseBuffer(vmbuffer);
570 vmbuffer = InvalidBuffer;
573 /* Log cleanup info before we touch indexes */
574 vacuum_log_cleanup_info(onerel, vacrelstats);
576 /* Remove index entries */
577 for (i = 0; i < nindexes; i++)
578 lazy_vacuum_index(Irel[i],
581 /* Remove tuples from heap */
582 lazy_vacuum_heap(onerel, vacrelstats);
585 * Forget the now-vacuumed tuples, and press on, but be careful
586 * not to reset latestRemovedXid since we want that value to be
589 vacrelstats->num_dead_tuples = 0;
590 vacrelstats->num_index_scans++;
594 * Pin the visibility map page in case we need to mark the page
595 * all-visible. In most cases this will be very cheap, because we'll
596 * already have the correct page pinned anyway. However, it's
597 * possible that (a) next_not_all_visible_block is covered by a
598 * different VM page than the current block or (b) we released our pin
599 * and did a cycle of index vacuuming.
601 visibilitymap_pin(onerel, blkno, &vmbuffer);
603 buf = ReadBufferExtended(onerel, MAIN_FORKNUM, blkno,
604 RBM_NORMAL, vac_strategy);
606 /* We need buffer cleanup lock so that we can prune HOT chains. */
607 if (!ConditionalLockBufferForCleanup(buf))
610 * If we're not scanning the whole relation to guard against XID
611 * wraparound, it's OK to skip vacuuming a page. The next vacuum
621 * If this is a wraparound checking vacuum, then we read the page
622 * with share lock to see if any xids need to be frozen. If the
623 * page doesn't need attention we just skip and continue. If it
624 * does, we wait for cleanup lock.
626 * We could defer the lock request further by remembering the page
627 * and coming back to it later, or we could even register
628 * ourselves for multiple buffers and then service whichever one
629 * is received first. For now, this seems good enough.
631 LockBuffer(buf, BUFFER_LOCK_SHARE);
632 if (!lazy_check_needs_freeze(buf))
634 UnlockReleaseBuffer(buf);
635 vacrelstats->scanned_pages++;
638 LockBuffer(buf, BUFFER_LOCK_UNLOCK);
639 LockBufferForCleanup(buf);
640 /* drop through to normal processing */
643 vacrelstats->scanned_pages++;
645 page = BufferGetPage(buf);
650 * An all-zeroes page could be left over if a backend extends the
651 * relation but crashes before initializing the page. Reclaim such
654 * We have to be careful here because we could be looking at a
655 * page that someone has just added to the relation and not yet
656 * been able to initialize (see RelationGetBufferForTuple). To
657 * protect against that, release the buffer lock, grab the
658 * relation extension lock momentarily, and re-lock the buffer. If
659 * the page is still uninitialized by then, it must be left over
660 * from a crashed backend, and we can initialize it.
662 * We don't really need the relation lock when this is a new or
663 * temp relation, but it's probably not worth the code space to
664 * check that, since this surely isn't a critical path.
666 * Note: the comparable code in vacuum.c need not worry because
667 * it's got exclusive lock on the whole relation.
669 LockBuffer(buf, BUFFER_LOCK_UNLOCK);
670 LockRelationForExtension(onerel, ExclusiveLock);
671 UnlockRelationForExtension(onerel, ExclusiveLock);
672 LockBufferForCleanup(buf);
676 (errmsg("relation \"%s\" page %u is uninitialized --- fixing",
678 PageInit(page, BufferGetPageSize(buf), 0);
681 freespace = PageGetHeapFreeSpace(page);
682 MarkBufferDirty(buf);
683 UnlockReleaseBuffer(buf);
685 RecordPageWithFreeSpace(onerel, blkno, freespace);
689 if (PageIsEmpty(page))
692 freespace = PageGetHeapFreeSpace(page);
694 /* empty pages are always all-visible */
695 if (!PageIsAllVisible(page))
697 START_CRIT_SECTION();
699 /* mark buffer dirty before writing a WAL record */
700 MarkBufferDirty(buf);
703 * It's possible that another backend has extended the heap,
704 * initialized the page, and then failed to WAL-log the page
705 * due to an ERROR. Since heap extension is not WAL-logged,
706 * recovery might try to replay our record setting the
707 * page all-visible and find that the page isn't initialized,
708 * which will cause a PANIC. To prevent that, check whether
709 * the page has been previously WAL-logged, and if not, do that
712 if (RelationNeedsWAL(onerel) &&
713 PageGetLSN(page) == InvalidXLogRecPtr)
714 log_newpage_buffer(buf, true);
716 PageSetAllVisible(page);
717 visibilitymap_set(onerel, blkno, buf, InvalidXLogRecPtr,
718 vmbuffer, InvalidTransactionId);
722 UnlockReleaseBuffer(buf);
723 RecordPageWithFreeSpace(onerel, blkno, freespace);
728 * Prune all HOT-update chains in this page.
730 * We count tuples removed by the pruning step as removed by VACUUM.
732 tups_vacuumed += heap_page_prune(onerel, buf, OldestXmin, false,
733 &vacrelstats->latestRemovedXid);
736 * Now scan the page to collect vacuumable items and check for tuples
737 * requiring freezing.
740 has_dead_tuples = false;
743 prev_dead_count = vacrelstats->num_dead_tuples;
744 maxoff = PageGetMaxOffsetNumber(page);
747 * Note: If you change anything in the loop below, also look at
748 * heap_page_is_all_visible to see if that needs to be changed.
750 for (offnum = FirstOffsetNumber;
752 offnum = OffsetNumberNext(offnum))
756 itemid = PageGetItemId(page, offnum);
758 /* Unused items require no processing, but we count 'em */
759 if (!ItemIdIsUsed(itemid))
765 /* Redirect items mustn't be touched */
766 if (ItemIdIsRedirected(itemid))
768 hastup = true; /* this page won't be truncatable */
772 ItemPointerSet(&(tuple.t_self), blkno, offnum);
775 * DEAD item pointers are to be vacuumed normally; but we don't
776 * count them in tups_vacuumed, else we'd be double-counting (at
777 * least in the common case where heap_page_prune() just freed up
780 if (ItemIdIsDead(itemid))
782 lazy_record_dead_tuple(vacrelstats, &(tuple.t_self));
787 Assert(ItemIdIsNormal(itemid));
789 tuple.t_data = (HeapTupleHeader) PageGetItem(page, itemid);
790 tuple.t_len = ItemIdGetLength(itemid);
791 tuple.t_tableOid = RelationGetRelid(onerel);
795 switch (HeapTupleSatisfiesVacuum(&tuple, OldestXmin, buf))
800 * Ordinarily, DEAD tuples would have been removed by
801 * heap_page_prune(), but it's possible that the tuple
802 * state changed since heap_page_prune() looked. In
803 * particular an INSERT_IN_PROGRESS tuple could have
804 * changed to DEAD if the inserter aborted. So this
805 * cannot be considered an error condition.
807 * If the tuple is HOT-updated then it must only be
808 * removed by a prune operation; so we keep it just as if
809 * it were RECENTLY_DEAD. Also, if it's a heap-only
810 * tuple, we choose to keep it, because it'll be a lot
811 * cheaper to get rid of it in the next pruning pass than
812 * to treat it like an indexed tuple.
814 if (HeapTupleIsHotUpdated(&tuple) ||
815 HeapTupleIsHeapOnly(&tuple))
818 tupgone = true; /* we can delete the tuple */
822 /* Tuple is good --- but let's do some validity checks */
823 if (onerel->rd_rel->relhasoids &&
824 !OidIsValid(HeapTupleGetOid(&tuple)))
825 elog(WARNING, "relation \"%s\" TID %u/%u: OID is invalid",
826 relname, blkno, offnum);
829 * Is the tuple definitely visible to all transactions?
831 * NB: Like with per-tuple hint bits, we can't set the
832 * PD_ALL_VISIBLE flag if the inserter committed
833 * asynchronously. See SetHintBits for more info. Check
834 * that the tuple is hinted xmin-committed because
841 if (!HeapTupleHeaderXminCommitted(tuple.t_data))
848 * The inserter definitely committed. But is it old
849 * enough that everyone sees it as committed?
851 xmin = HeapTupleHeaderGetXmin(tuple.t_data);
852 if (!TransactionIdPrecedes(xmin, OldestXmin))
858 /* Track newest xmin on page. */
859 if (TransactionIdFollows(xmin, visibility_cutoff_xid))
860 visibility_cutoff_xid = xmin;
863 case HEAPTUPLE_RECENTLY_DEAD:
866 * If tuple is recently deleted then we must not remove it
872 case HEAPTUPLE_INSERT_IN_PROGRESS:
873 /* This is an expected case during concurrent vacuum */
876 case HEAPTUPLE_DELETE_IN_PROGRESS:
877 /* This is an expected case during concurrent vacuum */
881 elog(ERROR, "unexpected HeapTupleSatisfiesVacuum result");
887 lazy_record_dead_tuple(vacrelstats, &(tuple.t_self));
888 HeapTupleHeaderAdvanceLatestRemovedXid(tuple.t_data,
889 &vacrelstats->latestRemovedXid);
891 has_dead_tuples = true;
899 * Each non-removable tuple must be checked to see if it needs
900 * freezing. Note we already have exclusive buffer lock.
902 if (heap_prepare_freeze_tuple(tuple.t_data, FreezeLimit,
903 MultiXactCutoff, &frozen[nfrozen]))
904 frozen[nfrozen++].offset = offnum;
906 } /* scan along page */
909 * If we froze any tuples, mark the buffer dirty, and write a WAL
910 * record recording the changes. We must log the changes to be
911 * crash-safe against future truncation of CLOG.
915 START_CRIT_SECTION();
917 MarkBufferDirty(buf);
919 /* execute collected freezes */
920 for (i = 0; i < nfrozen; i++)
923 HeapTupleHeader htup;
925 itemid = PageGetItemId(page, frozen[i].offset);
926 htup = (HeapTupleHeader) PageGetItem(page, itemid);
928 heap_execute_freeze_tuple(htup, &frozen[i]);
931 /* Now WAL-log freezing if neccessary */
932 if (RelationNeedsWAL(onerel))
936 recptr = log_heap_freeze(onerel, buf, FreezeLimit,
938 PageSetLSN(page, recptr);
945 * If there are no indexes then we can vacuum the page right now
946 * instead of doing a second scan.
949 vacrelstats->num_dead_tuples > 0)
951 /* Remove tuples from heap */
952 lazy_vacuum_page(onerel, blkno, buf, 0, vacrelstats, &vmbuffer);
953 has_dead_tuples = false;
956 * Forget the now-vacuumed tuples, and press on, but be careful
957 * not to reset latestRemovedXid since we want that value to be
960 vacrelstats->num_dead_tuples = 0;
964 freespace = PageGetHeapFreeSpace(page);
966 /* mark page all-visible, if appropriate */
967 if (all_visible && !all_visible_according_to_vm)
970 * It should never be the case that the visibility map page is set
971 * while the page-level bit is clear, but the reverse is allowed
972 * (if checksums are not enabled). Regardless, set the both bits
973 * so that we get back in sync.
975 * NB: If the heap page is all-visible but the VM bit is not set,
976 * we don't need to dirty the heap page. However, if checksums
977 * are enabled, we do need to make sure that the heap page is
978 * dirtied before passing it to visibilitymap_set(), because it
979 * may be logged. Given that this situation should only happen in
980 * rare cases after a crash, it is not worth optimizing.
982 PageSetAllVisible(page);
983 MarkBufferDirty(buf);
984 visibilitymap_set(onerel, blkno, buf, InvalidXLogRecPtr,
985 vmbuffer, visibility_cutoff_xid);
989 * As of PostgreSQL 9.2, the visibility map bit should never be set if
990 * the page-level bit is clear. However, it's possible that the bit
991 * got cleared after we checked it and before we took the buffer
992 * content lock, so we must recheck before jumping to the conclusion
993 * that something bad has happened.
995 else if (all_visible_according_to_vm && !PageIsAllVisible(page)
996 && visibilitymap_test(onerel, blkno, &vmbuffer))
998 elog(WARNING, "page is not marked all-visible but visibility map bit is set in relation \"%s\" page %u",
1000 visibilitymap_clear(onerel, blkno, vmbuffer);
1004 * It's possible for the value returned by GetOldestXmin() to move
1005 * backwards, so it's not wrong for us to see tuples that appear to
1006 * not be visible to everyone yet, while PD_ALL_VISIBLE is already
1007 * set. The real safe xmin value never moves backwards, but
1008 * GetOldestXmin() is conservative and sometimes returns a value
1009 * that's unnecessarily small, so if we see that contradiction it just
1010 * means that the tuples that we think are not visible to everyone yet
1011 * actually are, and the PD_ALL_VISIBLE flag is correct.
1013 * There should never be dead tuples on a page with PD_ALL_VISIBLE
1016 else if (PageIsAllVisible(page) && has_dead_tuples)
1018 elog(WARNING, "page containing dead tuples is marked as all-visible in relation \"%s\" page %u",
1020 PageClearAllVisible(page);
1021 MarkBufferDirty(buf);
1022 visibilitymap_clear(onerel, blkno, vmbuffer);
1025 UnlockReleaseBuffer(buf);
1027 /* Remember the location of the last page with nonremovable tuples */
1029 vacrelstats->nonempty_pages = blkno + 1;
1032 * If we remembered any tuples for deletion, then the page will be
1033 * visited again by lazy_vacuum_heap, which will compute and record
1034 * its post-compaction free space. If not, then we're done with this
1035 * page, so remember its free space as-is. (This path will always be
1036 * taken if there are no indexes.)
1038 if (vacrelstats->num_dead_tuples == prev_dead_count)
1039 RecordPageWithFreeSpace(onerel, blkno, freespace);
1044 /* save stats for use later */
1045 vacrelstats->scanned_tuples = num_tuples;
1046 vacrelstats->tuples_deleted = tups_vacuumed;
1047 vacrelstats->new_dead_tuples = nkeep;
1049 /* now we can compute the new value for pg_class.reltuples */
1050 vacrelstats->new_rel_tuples = vac_estimate_reltuples(onerel, false,
1052 vacrelstats->scanned_pages,
1056 * Release any remaining pin on visibility map page.
1058 if (BufferIsValid(vmbuffer))
1060 ReleaseBuffer(vmbuffer);
1061 vmbuffer = InvalidBuffer;
1064 /* If any tuples need to be deleted, perform final vacuum cycle */
1065 /* XXX put a threshold on min number of tuples here? */
1066 if (vacrelstats->num_dead_tuples > 0)
1068 /* Log cleanup info before we touch indexes */
1069 vacuum_log_cleanup_info(onerel, vacrelstats);
1071 /* Remove index entries */
1072 for (i = 0; i < nindexes; i++)
1073 lazy_vacuum_index(Irel[i],
1076 /* Remove tuples from heap */
1077 lazy_vacuum_heap(onerel, vacrelstats);
1078 vacrelstats->num_index_scans++;
1081 /* Do post-vacuum cleanup and statistics update for each index */
1082 for (i = 0; i < nindexes; i++)
1083 lazy_cleanup_index(Irel[i], indstats[i], vacrelstats);
1085 /* If no indexes, make log report that lazy_vacuum_heap would've made */
1088 (errmsg("\"%s\": removed %.0f row versions in %u pages",
1089 RelationGetRelationName(onerel),
1090 tups_vacuumed, vacuumed_pages)));
1093 (errmsg("\"%s\": found %.0f removable, %.0f nonremovable row versions in %u out of %u pages",
1094 RelationGetRelationName(onerel),
1095 tups_vacuumed, num_tuples,
1096 vacrelstats->scanned_pages, nblocks),
1097 errdetail("%.0f dead row versions cannot be removed yet.\n"
1098 "There were %.0f unused item pointers.\n"
1099 "%u pages are entirely empty.\n"
1104 pg_rusage_show(&ru0))));
1109 * lazy_vacuum_heap() -- second pass over the heap
1111 * This routine marks dead tuples as unused and compacts out free
1112 * space on their pages. Pages not having dead tuples recorded from
1113 * lazy_scan_heap are not visited at all.
1115 * Note: the reason for doing this as a second pass is we cannot remove
1116 * the tuples until we've removed their index entries, and we want to
1117 * process index entry removal in batches as large as possible.
1120 lazy_vacuum_heap(Relation onerel, LVRelStats *vacrelstats)
1125 Buffer vmbuffer = InvalidBuffer;
1127 pg_rusage_init(&ru0);
1131 while (tupindex < vacrelstats->num_dead_tuples)
1138 vacuum_delay_point();
1140 tblk = ItemPointerGetBlockNumber(&vacrelstats->dead_tuples[tupindex]);
1141 buf = ReadBufferExtended(onerel, MAIN_FORKNUM, tblk, RBM_NORMAL,
1143 if (!ConditionalLockBufferForCleanup(buf))
1149 tupindex = lazy_vacuum_page(onerel, tblk, buf, tupindex, vacrelstats,
1152 /* Now that we've compacted the page, record its available space */
1153 page = BufferGetPage(buf);
1154 freespace = PageGetHeapFreeSpace(page);
1156 UnlockReleaseBuffer(buf);
1157 RecordPageWithFreeSpace(onerel, tblk, freespace);
1161 if (BufferIsValid(vmbuffer))
1163 ReleaseBuffer(vmbuffer);
1164 vmbuffer = InvalidBuffer;
1168 (errmsg("\"%s\": removed %d row versions in %d pages",
1169 RelationGetRelationName(onerel),
1172 pg_rusage_show(&ru0))));
1176 * lazy_vacuum_page() -- free dead tuples on a page
1177 * and repair its fragmentation.
1179 * Caller must hold pin and buffer cleanup lock on the buffer.
1181 * tupindex is the index in vacrelstats->dead_tuples of the first dead
1182 * tuple for this page. We assume the rest follow sequentially.
1183 * The return value is the first tupindex after the tuples of this page.
1186 lazy_vacuum_page(Relation onerel, BlockNumber blkno, Buffer buffer,
1187 int tupindex, LVRelStats *vacrelstats, Buffer *vmbuffer)
1189 Page page = BufferGetPage(buffer);
1190 OffsetNumber unused[MaxOffsetNumber];
1192 TransactionId visibility_cutoff_xid;
1194 START_CRIT_SECTION();
1196 for (; tupindex < vacrelstats->num_dead_tuples; tupindex++)
1202 tblk = ItemPointerGetBlockNumber(&vacrelstats->dead_tuples[tupindex]);
1204 break; /* past end of tuples for this block */
1205 toff = ItemPointerGetOffsetNumber(&vacrelstats->dead_tuples[tupindex]);
1206 itemid = PageGetItemId(page, toff);
1207 ItemIdSetUnused(itemid);
1208 unused[uncnt++] = toff;
1211 PageRepairFragmentation(page);
1214 * Mark buffer dirty before we write WAL.
1216 MarkBufferDirty(buffer);
1219 if (RelationNeedsWAL(onerel))
1223 recptr = log_heap_clean(onerel, buffer,
1226 vacrelstats->latestRemovedXid);
1227 PageSetLSN(page, recptr);
1231 * Now that we have removed the dead tuples from the page, once again
1232 * check if the page has become all-visible.
1234 if (!visibilitymap_test(onerel, blkno, vmbuffer) &&
1235 heap_page_is_all_visible(onerel, buffer, &visibility_cutoff_xid))
1237 Assert(BufferIsValid(*vmbuffer));
1238 PageSetAllVisible(page);
1239 visibilitymap_set(onerel, blkno, buffer, InvalidXLogRecPtr, *vmbuffer,
1240 visibility_cutoff_xid);
1249 * lazy_check_needs_freeze() -- scan page to see if any tuples
1250 * need to be cleaned to avoid wraparound
1252 * Returns true if the page needs to be vacuumed using cleanup lock.
1255 lazy_check_needs_freeze(Buffer buf)
1258 OffsetNumber offnum,
1260 HeapTupleHeader tupleheader;
1262 page = BufferGetPage(buf);
1264 if (PageIsNew(page) || PageIsEmpty(page))
1266 /* PageIsNew probably shouldn't happen... */
1270 maxoff = PageGetMaxOffsetNumber(page);
1271 for (offnum = FirstOffsetNumber;
1273 offnum = OffsetNumberNext(offnum))
1277 itemid = PageGetItemId(page, offnum);
1279 if (!ItemIdIsNormal(itemid))
1282 tupleheader = (HeapTupleHeader) PageGetItem(page, itemid);
1284 if (heap_tuple_needs_freeze(tupleheader, FreezeLimit,
1285 MultiXactCutoff, buf))
1287 } /* scan along page */
1294 * lazy_vacuum_index() -- vacuum one index relation.
1296 * Delete all the index entries pointing to tuples listed in
1297 * vacrelstats->dead_tuples, and update running statistics.
1300 lazy_vacuum_index(Relation indrel,
1301 IndexBulkDeleteResult **stats,
1302 LVRelStats *vacrelstats)
1304 IndexVacuumInfo ivinfo;
1307 pg_rusage_init(&ru0);
1309 ivinfo.index = indrel;
1310 ivinfo.analyze_only = false;
1311 ivinfo.estimated_count = true;
1312 ivinfo.message_level = elevel;
1313 ivinfo.num_heap_tuples = vacrelstats->old_rel_tuples;
1314 ivinfo.strategy = vac_strategy;
1316 /* Do bulk deletion */
1317 *stats = index_bulk_delete(&ivinfo, *stats,
1318 lazy_tid_reaped, (void *) vacrelstats);
1321 (errmsg("scanned index \"%s\" to remove %d row versions",
1322 RelationGetRelationName(indrel),
1323 vacrelstats->num_dead_tuples),
1324 errdetail("%s.", pg_rusage_show(&ru0))));
1328 * lazy_cleanup_index() -- do post-vacuum cleanup for one index relation.
1331 lazy_cleanup_index(Relation indrel,
1332 IndexBulkDeleteResult *stats,
1333 LVRelStats *vacrelstats)
1335 IndexVacuumInfo ivinfo;
1338 pg_rusage_init(&ru0);
1340 ivinfo.index = indrel;
1341 ivinfo.analyze_only = false;
1342 ivinfo.estimated_count = (vacrelstats->scanned_pages < vacrelstats->rel_pages);
1343 ivinfo.message_level = elevel;
1344 ivinfo.num_heap_tuples = vacrelstats->new_rel_tuples;
1345 ivinfo.strategy = vac_strategy;
1347 stats = index_vacuum_cleanup(&ivinfo, stats);
1353 * Now update statistics in pg_class, but only if the index says the count
1356 if (!stats->estimated_count)
1357 vac_update_relstats(indrel,
1359 stats->num_index_tuples,
1362 InvalidTransactionId,
1363 InvalidMultiXactId);
1366 (errmsg("index \"%s\" now contains %.0f row versions in %u pages",
1367 RelationGetRelationName(indrel),
1368 stats->num_index_tuples,
1370 errdetail("%.0f index row versions were removed.\n"
1371 "%u index pages have been deleted, %u are currently reusable.\n"
1373 stats->tuples_removed,
1374 stats->pages_deleted, stats->pages_free,
1375 pg_rusage_show(&ru0))));
1381 * lazy_truncate_heap - try to truncate off any empty pages at the end
1384 lazy_truncate_heap(Relation onerel, LVRelStats *vacrelstats)
1386 BlockNumber old_rel_pages = vacrelstats->rel_pages;
1387 BlockNumber new_rel_pages;
1391 pg_rusage_init(&ru0);
1394 * Loop until no more truncating can be done.
1399 * We need full exclusive lock on the relation in order to do
1400 * truncation. If we can't get it, give up rather than waiting --- we
1401 * don't want to block other backends, and we don't want to deadlock
1402 * (which is quite possible considering we already hold a lower-grade
1405 vacrelstats->lock_waiter_detected = false;
1409 if (ConditionalLockRelation(onerel, AccessExclusiveLock))
1413 * Check for interrupts while trying to (re-)acquire the exclusive
1416 CHECK_FOR_INTERRUPTS();
1418 if (++lock_retry > (VACUUM_TRUNCATE_LOCK_TIMEOUT /
1419 VACUUM_TRUNCATE_LOCK_WAIT_INTERVAL))
1422 * We failed to establish the lock in the specified number of
1423 * retries. This means we give up truncating.
1425 vacrelstats->lock_waiter_detected = true;
1427 (errmsg("\"%s\": stopping truncate due to conflicting lock request",
1428 RelationGetRelationName(onerel))));
1432 pg_usleep(VACUUM_TRUNCATE_LOCK_WAIT_INTERVAL);
1436 * Now that we have exclusive lock, look to see if the rel has grown
1437 * whilst we were vacuuming with non-exclusive lock. If so, give up;
1438 * the newly added pages presumably contain non-deletable tuples.
1440 new_rel_pages = RelationGetNumberOfBlocks(onerel);
1441 if (new_rel_pages != old_rel_pages)
1444 * Note: we intentionally don't update vacrelstats->rel_pages with
1445 * the new rel size here. If we did, it would amount to assuming
1446 * that the new pages are empty, which is unlikely. Leaving the
1447 * numbers alone amounts to assuming that the new pages have the
1448 * same tuple density as existing ones, which is less unlikely.
1450 UnlockRelation(onerel, AccessExclusiveLock);
1455 * Scan backwards from the end to verify that the end pages actually
1456 * contain no tuples. This is *necessary*, not optional, because
1457 * other backends could have added tuples to these pages whilst we
1460 new_rel_pages = count_nondeletable_pages(onerel, vacrelstats);
1462 if (new_rel_pages >= old_rel_pages)
1464 /* can't do anything after all */
1465 UnlockRelation(onerel, AccessExclusiveLock);
1472 RelationTruncate(onerel, new_rel_pages);
1475 * We can release the exclusive lock as soon as we have truncated.
1476 * Other backends can't safely access the relation until they have
1477 * processed the smgr invalidation that smgrtruncate sent out ... but
1478 * that should happen as part of standard invalidation processing once
1479 * they acquire lock on the relation.
1481 UnlockRelation(onerel, AccessExclusiveLock);
1484 * Update statistics. Here, it *is* correct to adjust rel_pages
1485 * without also touching reltuples, since the tuple count wasn't
1486 * changed by the truncation.
1488 vacrelstats->pages_removed += old_rel_pages - new_rel_pages;
1489 vacrelstats->rel_pages = new_rel_pages;
1492 (errmsg("\"%s\": truncated %u to %u pages",
1493 RelationGetRelationName(onerel),
1494 old_rel_pages, new_rel_pages),
1496 pg_rusage_show(&ru0))));
1497 old_rel_pages = new_rel_pages;
1498 } while (new_rel_pages > vacrelstats->nonempty_pages &&
1499 vacrelstats->lock_waiter_detected);
1503 * Rescan end pages to verify that they are (still) empty of tuples.
1505 * Returns number of nondeletable pages (last nonempty page + 1).
1508 count_nondeletable_pages(Relation onerel, LVRelStats *vacrelstats)
1511 instr_time starttime;
1513 /* Initialize the starttime if we check for conflicting lock requests */
1514 INSTR_TIME_SET_CURRENT(starttime);
1516 /* Strange coding of loop control is needed because blkno is unsigned */
1517 blkno = vacrelstats->rel_pages;
1518 while (blkno > vacrelstats->nonempty_pages)
1522 OffsetNumber offnum,
1527 * Check if another process requests a lock on our relation. We are
1528 * holding an AccessExclusiveLock here, so they will be waiting. We
1529 * only do this once per VACUUM_TRUNCATE_LOCK_CHECK_INTERVAL, and we
1530 * only check if that interval has elapsed once every 32 blocks to
1531 * keep the number of system calls and actual shared lock table
1532 * lookups to a minimum.
1534 if ((blkno % 32) == 0)
1536 instr_time currenttime;
1539 INSTR_TIME_SET_CURRENT(currenttime);
1540 elapsed = currenttime;
1541 INSTR_TIME_SUBTRACT(elapsed, starttime);
1542 if ((INSTR_TIME_GET_MICROSEC(elapsed) / 1000)
1543 >= VACUUM_TRUNCATE_LOCK_CHECK_INTERVAL)
1545 if (LockHasWaitersRelation(onerel, AccessExclusiveLock))
1548 (errmsg("\"%s\": suspending truncate due to conflicting lock request",
1549 RelationGetRelationName(onerel))));
1551 vacrelstats->lock_waiter_detected = true;
1554 starttime = currenttime;
1559 * We don't insert a vacuum delay point here, because we have an
1560 * exclusive lock on the table which we want to hold for as short a
1561 * time as possible. We still need to check for interrupts however.
1563 CHECK_FOR_INTERRUPTS();
1567 buf = ReadBufferExtended(onerel, MAIN_FORKNUM, blkno,
1568 RBM_NORMAL, vac_strategy);
1570 /* In this phase we only need shared access to the buffer */
1571 LockBuffer(buf, BUFFER_LOCK_SHARE);
1573 page = BufferGetPage(buf);
1575 if (PageIsNew(page) || PageIsEmpty(page))
1577 /* PageIsNew probably shouldn't happen... */
1578 UnlockReleaseBuffer(buf);
1583 maxoff = PageGetMaxOffsetNumber(page);
1584 for (offnum = FirstOffsetNumber;
1586 offnum = OffsetNumberNext(offnum))
1590 itemid = PageGetItemId(page, offnum);
1593 * Note: any non-unused item should be taken as a reason to keep
1594 * this page. We formerly thought that DEAD tuples could be
1595 * thrown away, but that's not so, because we'd not have cleaned
1596 * out their index entries.
1598 if (ItemIdIsUsed(itemid))
1601 break; /* can stop scanning */
1603 } /* scan along page */
1605 UnlockReleaseBuffer(buf);
1607 /* Done scanning if we found a tuple here */
1613 * If we fall out of the loop, all the previously-thought-to-be-empty
1614 * pages still are; we need not bother to look at the last known-nonempty
1617 return vacrelstats->nonempty_pages;
1621 * lazy_space_alloc - space allocation decisions for lazy vacuum
1623 * See the comments at the head of this file for rationale.
1626 lazy_space_alloc(LVRelStats *vacrelstats, BlockNumber relblocks)
1629 int vac_work_mem = IsAutoVacuumWorkerProcess() &&
1630 autovacuum_work_mem != -1 ?
1631 autovacuum_work_mem : maintenance_work_mem;
1633 if (vacrelstats->hasindex)
1635 maxtuples = (vac_work_mem * 1024L) / sizeof(ItemPointerData);
1636 maxtuples = Min(maxtuples, INT_MAX);
1637 maxtuples = Min(maxtuples, MaxAllocSize / sizeof(ItemPointerData));
1639 /* curious coding here to ensure the multiplication can't overflow */
1640 if ((BlockNumber) (maxtuples / LAZY_ALLOC_TUPLES) > relblocks)
1641 maxtuples = relblocks * LAZY_ALLOC_TUPLES;
1643 /* stay sane if small maintenance_work_mem */
1644 maxtuples = Max(maxtuples, MaxHeapTuplesPerPage);
1648 maxtuples = MaxHeapTuplesPerPage;
1651 vacrelstats->num_dead_tuples = 0;
1652 vacrelstats->max_dead_tuples = (int) maxtuples;
1653 vacrelstats->dead_tuples = (ItemPointer)
1654 palloc(maxtuples * sizeof(ItemPointerData));
1658 * lazy_record_dead_tuple - remember one deletable tuple
1661 lazy_record_dead_tuple(LVRelStats *vacrelstats,
1662 ItemPointer itemptr)
1665 * The array shouldn't overflow under normal behavior, but perhaps it
1666 * could if we are given a really small maintenance_work_mem. In that
1667 * case, just forget the last few tuples (we'll get 'em next time).
1669 if (vacrelstats->num_dead_tuples < vacrelstats->max_dead_tuples)
1671 vacrelstats->dead_tuples[vacrelstats->num_dead_tuples] = *itemptr;
1672 vacrelstats->num_dead_tuples++;
1677 * lazy_tid_reaped() -- is a particular tid deletable?
1679 * This has the right signature to be an IndexBulkDeleteCallback.
1681 * Assumes dead_tuples array is in sorted order.
1684 lazy_tid_reaped(ItemPointer itemptr, void *state)
1686 LVRelStats *vacrelstats = (LVRelStats *) state;
1689 res = (ItemPointer) bsearch((void *) itemptr,
1690 (void *) vacrelstats->dead_tuples,
1691 vacrelstats->num_dead_tuples,
1692 sizeof(ItemPointerData),
1695 return (res != NULL);
1699 * Comparator routines for use with qsort() and bsearch().
1702 vac_cmp_itemptr(const void *left, const void *right)
1709 lblk = ItemPointerGetBlockNumber((ItemPointer) left);
1710 rblk = ItemPointerGetBlockNumber((ItemPointer) right);
1717 loff = ItemPointerGetOffsetNumber((ItemPointer) left);
1718 roff = ItemPointerGetOffsetNumber((ItemPointer) right);
1729 * Check if every tuple in the given page is visible to all current and future
1730 * transactions. Also return the visibility_cutoff_xid which is the highest
1731 * xmin amongst the visible tuples.
1734 heap_page_is_all_visible(Relation rel, Buffer buf, TransactionId *visibility_cutoff_xid)
1736 Page page = BufferGetPage(buf);
1737 OffsetNumber offnum,
1739 bool all_visible = true;
1741 *visibility_cutoff_xid = InvalidTransactionId;
1744 * This is a stripped down version of the line pointer scan in
1745 * lazy_scan_heap(). So if you change anything here, also check that code.
1747 maxoff = PageGetMaxOffsetNumber(page);
1748 for (offnum = FirstOffsetNumber;
1749 offnum <= maxoff && all_visible;
1750 offnum = OffsetNumberNext(offnum))
1753 HeapTupleData tuple;
1755 itemid = PageGetItemId(page, offnum);
1757 /* Unused or redirect line pointers are of no interest */
1758 if (!ItemIdIsUsed(itemid) || ItemIdIsRedirected(itemid))
1761 ItemPointerSet(&(tuple.t_self), BufferGetBlockNumber(buf), offnum);
1764 * Dead line pointers can have index pointers pointing to them. So
1765 * they can't be treated as visible
1767 if (ItemIdIsDead(itemid))
1769 all_visible = false;
1773 Assert(ItemIdIsNormal(itemid));
1775 tuple.t_data = (HeapTupleHeader) PageGetItem(page, itemid);
1776 tuple.t_len = ItemIdGetLength(itemid);
1777 tuple.t_tableOid = RelationGetRelid(rel);
1779 switch (HeapTupleSatisfiesVacuum(&tuple, OldestXmin, buf))
1781 case HEAPTUPLE_LIVE:
1785 /* Check comments in lazy_scan_heap. */
1786 if (!HeapTupleHeaderXminCommitted(tuple.t_data))
1788 all_visible = false;
1793 * The inserter definitely committed. But is it old enough
1794 * that everyone sees it as committed?
1796 xmin = HeapTupleHeaderGetXmin(tuple.t_data);
1797 if (!TransactionIdPrecedes(xmin, OldestXmin))
1799 all_visible = false;
1803 /* Track newest xmin on page. */
1804 if (TransactionIdFollows(xmin, *visibility_cutoff_xid))
1805 *visibility_cutoff_xid = xmin;
1809 case HEAPTUPLE_DEAD:
1810 case HEAPTUPLE_RECENTLY_DEAD:
1811 case HEAPTUPLE_INSERT_IN_PROGRESS:
1812 case HEAPTUPLE_DELETE_IN_PROGRESS:
1813 all_visible = false;
1817 elog(ERROR, "unexpected HeapTupleSatisfiesVacuum result");
1820 } /* scan along page */