1 /*-------------------------------------------------------------------------
4 * Concurrent ("lazy") vacuuming.
7 * The major space usage for LAZY VACUUM is storage for the array of dead
8 * tuple TIDs, with the next biggest need being storage for per-disk-page
9 * free space info. We want to ensure we can vacuum even the very largest
10 * relations with finite memory space usage. To do that, we set upper bounds
11 * on the number of tuples and pages we will keep track of at once.
13 * We are willing to use at most maintenance_work_mem memory space to keep
14 * track of dead tuples. We initially allocate an array of TIDs of that size,
15 * with an upper limit that depends on table size (this limit ensures we don't
16 * allocate a huge area uselessly for vacuuming small tables). If the array
17 * threatens to overflow, we suspend the heap scan phase and perform a pass of
18 * index cleanup and page compaction, then resume the heap scan with an empty
21 * If we're processing a table with no indexes, we can just vacuum each page
22 * as we go; there's no need to save up multiple tuples to minimize the number
23 * of index scans performed. So we don't use maintenance_work_mem memory for
24 * the TID array, just enough to hold as many heap tuples as fit on one page.
27 * Portions Copyright (c) 1996-2013, PostgreSQL Global Development Group
28 * Portions Copyright (c) 1994, Regents of the University of California
32 * src/backend/commands/vacuumlazy.c
34 *-------------------------------------------------------------------------
40 #include "access/genam.h"
41 #include "access/heapam.h"
42 #include "access/heapam_xlog.h"
43 #include "access/htup_details.h"
44 #include "access/multixact.h"
45 #include "access/transam.h"
46 #include "access/visibilitymap.h"
47 #include "catalog/storage.h"
48 #include "commands/dbcommands.h"
49 #include "commands/vacuum.h"
50 #include "miscadmin.h"
52 #include "portability/instr_time.h"
53 #include "postmaster/autovacuum.h"
54 #include "storage/bufmgr.h"
55 #include "storage/freespace.h"
56 #include "storage/lmgr.h"
57 #include "utils/lsyscache.h"
58 #include "utils/memutils.h"
59 #include "utils/pg_rusage.h"
60 #include "utils/timestamp.h"
61 #include "utils/tqual.h"
65 * Space/time tradeoff parameters: do these need to be user-tunable?
67 * To consider truncating the relation, we want there to be at least
68 * REL_TRUNCATE_MINIMUM or (relsize / REL_TRUNCATE_FRACTION) (whichever
69 * is less) potentially-freeable pages.
71 #define REL_TRUNCATE_MINIMUM 1000
72 #define REL_TRUNCATE_FRACTION 16
75 * Timing parameters for truncate locking heuristics.
77 * These were not exposed as user tunable GUC values because it didn't seem
78 * that the potential for improvement was great enough to merit the cost of
81 #define VACUUM_TRUNCATE_LOCK_CHECK_INTERVAL 20 /* ms */
82 #define VACUUM_TRUNCATE_LOCK_WAIT_INTERVAL 50 /* ms */
83 #define VACUUM_TRUNCATE_LOCK_TIMEOUT 5000 /* ms */
86 * Guesstimation of number of dead tuples per page. This is used to
87 * provide an upper limit to memory allocated when vacuuming small
90 #define LAZY_ALLOC_TUPLES MaxHeapTuplesPerPage
93 * Before we consider skipping a page that's marked as clean in
94 * visibility map, we must've seen at least this many clean pages.
96 #define SKIP_PAGES_THRESHOLD ((BlockNumber) 32)
98 typedef struct LVRelStats
100 /* hasindex = true means two-pass strategy; false means one-pass */
102 /* Overall statistics about rel */
103 BlockNumber old_rel_pages; /* previous value of pg_class.relpages */
104 BlockNumber rel_pages; /* total number of pages */
105 BlockNumber scanned_pages; /* number of pages we examined */
106 double scanned_tuples; /* counts only tuples on scanned pages */
107 double old_rel_tuples; /* previous value of pg_class.reltuples */
108 double new_rel_tuples; /* new estimated total # of tuples */
109 BlockNumber pages_removed;
110 double tuples_deleted;
111 BlockNumber nonempty_pages; /* actually, last nonempty page + 1 */
112 /* List of TIDs of tuples we intend to delete */
113 /* NB: this list is ordered by TID address */
114 int num_dead_tuples; /* current # of entries */
115 int max_dead_tuples; /* # slots allocated in array */
116 ItemPointer dead_tuples; /* array of ItemPointerData */
118 TransactionId latestRemovedXid;
119 bool lock_waiter_detected;
123 /* A few variables that don't seem worth passing around as parameters */
124 static int elevel = -1;
126 static TransactionId OldestXmin;
127 static TransactionId FreezeLimit;
128 static MultiXactId MultiXactFrzLimit;
130 static BufferAccessStrategy vac_strategy;
133 /* non-export function prototypes */
134 static void lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats,
135 Relation *Irel, int nindexes, bool scan_all);
136 static void lazy_vacuum_heap(Relation onerel, LVRelStats *vacrelstats);
137 static bool lazy_check_needs_freeze(Buffer buf);
138 static void lazy_vacuum_index(Relation indrel,
139 IndexBulkDeleteResult **stats,
140 LVRelStats *vacrelstats);
141 static void lazy_cleanup_index(Relation indrel,
142 IndexBulkDeleteResult *stats,
143 LVRelStats *vacrelstats);
144 static int lazy_vacuum_page(Relation onerel, BlockNumber blkno, Buffer buffer,
145 int tupindex, LVRelStats *vacrelstats, Buffer *vmbuffer);
146 static void lazy_truncate_heap(Relation onerel, LVRelStats *vacrelstats);
147 static BlockNumber count_nondeletable_pages(Relation onerel,
148 LVRelStats *vacrelstats);
149 static void lazy_space_alloc(LVRelStats *vacrelstats, BlockNumber relblocks);
150 static void lazy_record_dead_tuple(LVRelStats *vacrelstats,
151 ItemPointer itemptr);
152 static bool lazy_tid_reaped(ItemPointer itemptr, void *state);
153 static int vac_cmp_itemptr(const void *left, const void *right);
154 static bool heap_page_is_all_visible(Buffer buf,
155 TransactionId *visibility_cutoff_xid);
159 * lazy_vacuum_rel() -- perform LAZY VACUUM for one heap relation
161 * This routine vacuums a single heap, cleans out its indexes, and
162 * updates its relpages and reltuples statistics.
164 * At entry, we have already established a transaction and opened
165 * and locked the relation.
168 lazy_vacuum_rel(Relation onerel, VacuumStmt *vacstmt,
169 BufferAccessStrategy bstrategy)
171 LVRelStats *vacrelstats;
174 BlockNumber possibly_freeable;
176 TimestampTz starttime = 0;
182 TransactionId freezeTableLimit;
183 BlockNumber new_rel_pages;
184 double new_rel_tuples;
185 BlockNumber new_rel_allvisible;
186 TransactionId new_frozen_xid;
187 MultiXactId new_min_multi;
189 /* measure elapsed time iff autovacuum logging requires it */
190 if (IsAutoVacuumWorkerProcess() && Log_autovacuum_min_duration >= 0)
192 pg_rusage_init(&ru0);
193 starttime = GetCurrentTimestamp();
196 if (vacstmt->options & VACOPT_VERBOSE)
201 vac_strategy = bstrategy;
203 vacuum_set_xid_limits(vacstmt->freeze_min_age, vacstmt->freeze_table_age,
204 onerel->rd_rel->relisshared,
205 &OldestXmin, &FreezeLimit, &freezeTableLimit,
207 scan_all = TransactionIdPrecedesOrEquals(onerel->rd_rel->relfrozenxid,
210 vacrelstats = (LVRelStats *) palloc0(sizeof(LVRelStats));
212 vacrelstats->old_rel_pages = onerel->rd_rel->relpages;
213 vacrelstats->old_rel_tuples = onerel->rd_rel->reltuples;
214 vacrelstats->num_index_scans = 0;
215 vacrelstats->pages_removed = 0;
216 vacrelstats->lock_waiter_detected = false;
218 /* Open all indexes of the relation */
219 vac_open_indexes(onerel, RowExclusiveLock, &nindexes, &Irel);
220 vacrelstats->hasindex = (nindexes > 0);
222 /* Do the vacuuming */
223 lazy_scan_heap(onerel, vacrelstats, Irel, nindexes, scan_all);
225 /* Done with indexes */
226 vac_close_indexes(nindexes, Irel, NoLock);
229 * Optionally truncate the relation.
231 * Don't even think about it unless we have a shot at releasing a goodly
232 * number of pages. Otherwise, the time taken isn't worth it.
234 possibly_freeable = vacrelstats->rel_pages - vacrelstats->nonempty_pages;
235 if (possibly_freeable > 0 &&
236 (possibly_freeable >= REL_TRUNCATE_MINIMUM ||
237 possibly_freeable >= vacrelstats->rel_pages / REL_TRUNCATE_FRACTION))
238 lazy_truncate_heap(onerel, vacrelstats);
240 /* Vacuum the Free Space Map */
241 FreeSpaceMapVacuum(onerel);
244 * Update statistics in pg_class.
246 * A corner case here is that if we scanned no pages at all because every
247 * page is all-visible, we should not update relpages/reltuples, because
248 * we have no new information to contribute. In particular this keeps us
249 * from replacing relpages=reltuples=0 (which means "unknown tuple
250 * density") with nonzero relpages and reltuples=0 (which means "zero
251 * tuple density") unless there's some actual evidence for the latter.
253 * We do update relallvisible even in the corner case, since if the table
254 * is all-visible we'd definitely like to know that. But clamp the value
255 * to be not more than what we're setting relpages to.
257 * Also, don't change relfrozenxid if we skipped any pages, since then we
258 * don't know for certain that all tuples have a newer xmin.
260 new_rel_pages = vacrelstats->rel_pages;
261 new_rel_tuples = vacrelstats->new_rel_tuples;
262 if (vacrelstats->scanned_pages == 0 && new_rel_pages > 0)
264 new_rel_pages = vacrelstats->old_rel_pages;
265 new_rel_tuples = vacrelstats->old_rel_tuples;
268 new_rel_allvisible = visibilitymap_count(onerel);
269 if (new_rel_allvisible > new_rel_pages)
270 new_rel_allvisible = new_rel_pages;
272 new_frozen_xid = FreezeLimit;
273 if (vacrelstats->scanned_pages < vacrelstats->rel_pages)
274 new_frozen_xid = InvalidTransactionId;
276 new_min_multi = MultiXactFrzLimit;
277 if (vacrelstats->scanned_pages < vacrelstats->rel_pages)
278 new_min_multi = InvalidMultiXactId;
280 vac_update_relstats(onerel,
284 vacrelstats->hasindex,
288 /* report results to the stats collector, too */
289 pgstat_report_vacuum(RelationGetRelid(onerel),
290 onerel->rd_rel->relisshared,
293 /* and log the action if appropriate */
294 if (IsAutoVacuumWorkerProcess() && Log_autovacuum_min_duration >= 0)
296 TimestampTz endtime = GetCurrentTimestamp();
298 if (Log_autovacuum_min_duration == 0 ||
299 TimestampDifferenceExceeds(starttime, endtime,
300 Log_autovacuum_min_duration))
302 TimestampDifference(starttime, endtime, &secs, &usecs);
306 if ((secs > 0) || (usecs > 0))
308 read_rate = (double) BLCKSZ *VacuumPageMiss / (1024 * 1024) /
309 (secs + usecs / 1000000.0);
310 write_rate = (double) BLCKSZ *VacuumPageDirty / (1024 * 1024) /
311 (secs + usecs / 1000000.0);
314 (errmsg("automatic vacuum of table \"%s.%s.%s\": index scans: %d\n"
315 "pages: %d removed, %d remain\n"
316 "tuples: %.0f removed, %.0f remain\n"
317 "buffer usage: %d hits, %d misses, %d dirtied\n"
318 "avg read rate: %.3f MB/s, avg write rate: %.3f MB/s\n"
320 get_database_name(MyDatabaseId),
321 get_namespace_name(RelationGetNamespace(onerel)),
322 RelationGetRelationName(onerel),
323 vacrelstats->num_index_scans,
324 vacrelstats->pages_removed,
325 vacrelstats->rel_pages,
326 vacrelstats->tuples_deleted,
327 vacrelstats->new_rel_tuples,
331 read_rate, write_rate,
332 pg_rusage_show(&ru0))));
338 * For Hot Standby we need to know the highest transaction id that will
339 * be removed by any change. VACUUM proceeds in a number of passes so
340 * we need to consider how each pass operates. The first phase runs
341 * heap_page_prune(), which can issue XLOG_HEAP2_CLEAN records as it
342 * progresses - these will have a latestRemovedXid on each record.
343 * In some cases this removes all of the tuples to be removed, though
344 * often we have dead tuples with index pointers so we must remember them
345 * for removal in phase 3. Index records for those rows are removed
346 * in phase 2 and index blocks do not have MVCC information attached.
347 * So before we can allow removal of any index tuples we need to issue
348 * a WAL record containing the latestRemovedXid of rows that will be
349 * removed in phase three. This allows recovery queries to block at the
350 * correct place, i.e. before phase two, rather than during phase three
351 * which would be after the rows have become inaccessible.
354 vacuum_log_cleanup_info(Relation rel, LVRelStats *vacrelstats)
357 * Skip this for relations for which no WAL is to be written, or if we're
358 * not trying to support archive recovery.
360 if (!RelationNeedsWAL(rel) || !XLogIsNeeded())
364 * No need to write the record at all unless it contains a valid value
366 if (TransactionIdIsValid(vacrelstats->latestRemovedXid))
367 (void) log_heap_cleanup_info(rel->rd_node, vacrelstats->latestRemovedXid);
371 * lazy_scan_heap() -- scan an open heap relation
373 * This routine prunes each page in the heap, which will among other
374 * things truncate dead tuples to dead line pointers, defragment the
375 * page, and set commit status bits (see heap_page_prune). It also builds
376 * lists of dead tuples and pages with free space, calculates statistics
377 * on the number of live tuples in the heap, and marks pages as
378 * all-visible if appropriate. When done, or when we run low on space for
379 * dead-tuple TIDs, invoke vacuuming of indexes and call lazy_vacuum_heap
380 * to reclaim dead line pointers.
382 * If there are no indexes then we can reclaim line pointers on the fly;
383 * dead line pointers need only be retained until all index pointers that
384 * reference them have been killed.
387 lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats,
388 Relation *Irel, int nindexes, bool scan_all)
394 BlockNumber empty_pages,
400 IndexBulkDeleteResult **indstats;
403 Buffer vmbuffer = InvalidBuffer;
404 BlockNumber next_not_all_visible_block;
405 bool skipping_all_visible_blocks;
407 pg_rusage_init(&ru0);
409 relname = RelationGetRelationName(onerel);
411 (errmsg("vacuuming \"%s.%s\"",
412 get_namespace_name(RelationGetNamespace(onerel)),
415 empty_pages = vacuumed_pages = 0;
416 num_tuples = tups_vacuumed = nkeep = nunused = 0;
418 indstats = (IndexBulkDeleteResult **)
419 palloc0(nindexes * sizeof(IndexBulkDeleteResult *));
421 nblocks = RelationGetNumberOfBlocks(onerel);
422 vacrelstats->rel_pages = nblocks;
423 vacrelstats->scanned_pages = 0;
424 vacrelstats->nonempty_pages = 0;
425 vacrelstats->latestRemovedXid = InvalidTransactionId;
427 lazy_space_alloc(vacrelstats, nblocks);
430 * We want to skip pages that don't require vacuuming according to the
431 * visibility map, but only when we can skip at least SKIP_PAGES_THRESHOLD
432 * consecutive pages. Since we're reading sequentially, the OS should be
433 * doing readahead for us, so there's no gain in skipping a page now and
434 * then; that's likely to disable readahead and so be counterproductive.
435 * Also, skipping even a single page means that we can't update
436 * relfrozenxid, so we only want to do it if we can skip a goodly number
439 * Before entering the main loop, establish the invariant that
440 * next_not_all_visible_block is the next block number >= blkno that's not
441 * all-visible according to the visibility map, or nblocks if there's no
442 * such block. Also, we set up the skipping_all_visible_blocks flag,
443 * which is needed because we need hysteresis in the decision: once we've
444 * started skipping blocks, we may as well skip everything up to the next
445 * not-all-visible block.
447 * Note: if scan_all is true, we won't actually skip any pages; but we
448 * maintain next_not_all_visible_block anyway, so as to set up the
449 * all_visible_according_to_vm flag correctly for each page.
451 * Note: The value returned by visibilitymap_test could be slightly
452 * out-of-date, since we make this test before reading the corresponding
453 * heap page or locking the buffer. This is OK. If we mistakenly think
454 * that the page is all-visible when in fact the flag's just been cleared,
455 * we might fail to vacuum the page. But it's OK to skip pages when
456 * scan_all is not set, so no great harm done; the next vacuum will find
457 * them. If we make the reverse mistake and vacuum a page unnecessarily,
458 * it'll just be a no-op.
460 for (next_not_all_visible_block = 0;
461 next_not_all_visible_block < nblocks;
462 next_not_all_visible_block++)
464 if (!visibilitymap_test(onerel, next_not_all_visible_block, &vmbuffer))
466 vacuum_delay_point();
468 if (next_not_all_visible_block >= SKIP_PAGES_THRESHOLD)
469 skipping_all_visible_blocks = true;
471 skipping_all_visible_blocks = false;
473 for (blkno = 0; blkno < nblocks; blkno++)
482 OffsetNumber frozen[MaxOffsetNumber];
485 bool all_visible_according_to_vm;
487 bool has_dead_tuples;
488 TransactionId visibility_cutoff_xid = InvalidTransactionId;
490 if (blkno == next_not_all_visible_block)
492 /* Time to advance next_not_all_visible_block */
493 for (next_not_all_visible_block++;
494 next_not_all_visible_block < nblocks;
495 next_not_all_visible_block++)
497 if (!visibilitymap_test(onerel, next_not_all_visible_block,
500 vacuum_delay_point();
504 * We know we can't skip the current block. But set up
505 * skipping_all_visible_blocks to do the right thing at the
508 if (next_not_all_visible_block - blkno > SKIP_PAGES_THRESHOLD)
509 skipping_all_visible_blocks = true;
511 skipping_all_visible_blocks = false;
512 all_visible_according_to_vm = false;
516 /* Current block is all-visible */
517 if (skipping_all_visible_blocks && !scan_all)
519 all_visible_according_to_vm = true;
522 vacuum_delay_point();
525 * If we are close to overrunning the available space for dead-tuple
526 * TIDs, pause and do a cycle of vacuuming before we tackle this page.
528 if ((vacrelstats->max_dead_tuples - vacrelstats->num_dead_tuples) < MaxHeapTuplesPerPage &&
529 vacrelstats->num_dead_tuples > 0)
532 * Before beginning index vacuuming, we release any pin we may
533 * hold on the visibility map page. This isn't necessary for
534 * correctness, but we do it anyway to avoid holding the pin
535 * across a lengthy, unrelated operation.
537 if (BufferIsValid(vmbuffer))
539 ReleaseBuffer(vmbuffer);
540 vmbuffer = InvalidBuffer;
543 /* Log cleanup info before we touch indexes */
544 vacuum_log_cleanup_info(onerel, vacrelstats);
546 /* Remove index entries */
547 for (i = 0; i < nindexes; i++)
548 lazy_vacuum_index(Irel[i],
551 /* Remove tuples from heap */
552 lazy_vacuum_heap(onerel, vacrelstats);
555 * Forget the now-vacuumed tuples, and press on, but be careful
556 * not to reset latestRemovedXid since we want that value to be
559 vacrelstats->num_dead_tuples = 0;
560 vacrelstats->num_index_scans++;
564 * Pin the visibility map page in case we need to mark the page
565 * all-visible. In most cases this will be very cheap, because we'll
566 * already have the correct page pinned anyway. However, it's
567 * possible that (a) next_not_all_visible_block is covered by a
568 * different VM page than the current block or (b) we released our pin
569 * and did a cycle of index vacuuming.
571 visibilitymap_pin(onerel, blkno, &vmbuffer);
573 buf = ReadBufferExtended(onerel, MAIN_FORKNUM, blkno,
574 RBM_NORMAL, vac_strategy);
576 /* We need buffer cleanup lock so that we can prune HOT chains. */
577 if (!ConditionalLockBufferForCleanup(buf))
580 * If we're not scanning the whole relation to guard against XID
581 * wraparound, it's OK to skip vacuuming a page. The next vacuum
591 * If this is a wraparound checking vacuum, then we read the page
592 * with share lock to see if any xids need to be frozen. If the
593 * page doesn't need attention we just skip and continue. If it
594 * does, we wait for cleanup lock.
596 * We could defer the lock request further by remembering the page
597 * and coming back to it later, or we could even register
598 * ourselves for multiple buffers and then service whichever one
599 * is received first. For now, this seems good enough.
601 LockBuffer(buf, BUFFER_LOCK_SHARE);
602 if (!lazy_check_needs_freeze(buf))
604 UnlockReleaseBuffer(buf);
607 LockBuffer(buf, BUFFER_LOCK_UNLOCK);
608 LockBufferForCleanup(buf);
609 /* drop through to normal processing */
612 vacrelstats->scanned_pages++;
614 page = BufferGetPage(buf);
619 * An all-zeroes page could be left over if a backend extends the
620 * relation but crashes before initializing the page. Reclaim such
623 * We have to be careful here because we could be looking at a
624 * page that someone has just added to the relation and not yet
625 * been able to initialize (see RelationGetBufferForTuple). To
626 * protect against that, release the buffer lock, grab the
627 * relation extension lock momentarily, and re-lock the buffer. If
628 * the page is still uninitialized by then, it must be left over
629 * from a crashed backend, and we can initialize it.
631 * We don't really need the relation lock when this is a new or
632 * temp relation, but it's probably not worth the code space to
633 * check that, since this surely isn't a critical path.
635 * Note: the comparable code in vacuum.c need not worry because
636 * it's got exclusive lock on the whole relation.
638 LockBuffer(buf, BUFFER_LOCK_UNLOCK);
639 LockRelationForExtension(onerel, ExclusiveLock);
640 UnlockRelationForExtension(onerel, ExclusiveLock);
641 LockBufferForCleanup(buf);
645 (errmsg("relation \"%s\" page %u is uninitialized --- fixing",
647 PageInit(page, BufferGetPageSize(buf), 0);
650 freespace = PageGetHeapFreeSpace(page);
651 MarkBufferDirty(buf);
652 UnlockReleaseBuffer(buf);
654 RecordPageWithFreeSpace(onerel, blkno, freespace);
658 if (PageIsEmpty(page))
661 freespace = PageGetHeapFreeSpace(page);
663 /* empty pages are always all-visible */
664 if (!PageIsAllVisible(page))
666 PageSetAllVisible(page);
667 MarkBufferDirty(buf);
668 visibilitymap_set(onerel, blkno, buf, InvalidXLogRecPtr,
669 vmbuffer, InvalidTransactionId);
672 UnlockReleaseBuffer(buf);
673 RecordPageWithFreeSpace(onerel, blkno, freespace);
678 * Prune all HOT-update chains in this page.
680 * We count tuples removed by the pruning step as removed by VACUUM.
682 tups_vacuumed += heap_page_prune(onerel, buf, OldestXmin, false,
683 &vacrelstats->latestRemovedXid);
686 * Now scan the page to collect vacuumable items and check for tuples
687 * requiring freezing.
690 has_dead_tuples = false;
693 prev_dead_count = vacrelstats->num_dead_tuples;
694 maxoff = PageGetMaxOffsetNumber(page);
697 * Note: If you change anything in the loop below, also look at
698 * heap_page_is_all_visible to see if that needs to be changed.
700 for (offnum = FirstOffsetNumber;
702 offnum = OffsetNumberNext(offnum))
706 itemid = PageGetItemId(page, offnum);
708 /* Unused items require no processing, but we count 'em */
709 if (!ItemIdIsUsed(itemid))
715 /* Redirect items mustn't be touched */
716 if (ItemIdIsRedirected(itemid))
718 hastup = true; /* this page won't be truncatable */
722 ItemPointerSet(&(tuple.t_self), blkno, offnum);
725 * DEAD item pointers are to be vacuumed normally; but we don't
726 * count them in tups_vacuumed, else we'd be double-counting (at
727 * least in the common case where heap_page_prune() just freed up
730 if (ItemIdIsDead(itemid))
732 lazy_record_dead_tuple(vacrelstats, &(tuple.t_self));
737 Assert(ItemIdIsNormal(itemid));
739 tuple.t_data = (HeapTupleHeader) PageGetItem(page, itemid);
740 tuple.t_len = ItemIdGetLength(itemid);
744 switch (HeapTupleSatisfiesVacuum(tuple.t_data, OldestXmin, buf))
749 * Ordinarily, DEAD tuples would have been removed by
750 * heap_page_prune(), but it's possible that the tuple
751 * state changed since heap_page_prune() looked. In
752 * particular an INSERT_IN_PROGRESS tuple could have
753 * changed to DEAD if the inserter aborted. So this
754 * cannot be considered an error condition.
756 * If the tuple is HOT-updated then it must only be
757 * removed by a prune operation; so we keep it just as if
758 * it were RECENTLY_DEAD. Also, if it's a heap-only
759 * tuple, we choose to keep it, because it'll be a lot
760 * cheaper to get rid of it in the next pruning pass than
761 * to treat it like an indexed tuple.
763 if (HeapTupleIsHotUpdated(&tuple) ||
764 HeapTupleIsHeapOnly(&tuple))
767 tupgone = true; /* we can delete the tuple */
771 /* Tuple is good --- but let's do some validity checks */
772 if (onerel->rd_rel->relhasoids &&
773 !OidIsValid(HeapTupleGetOid(&tuple)))
774 elog(WARNING, "relation \"%s\" TID %u/%u: OID is invalid",
775 relname, blkno, offnum);
778 * Is the tuple definitely visible to all transactions?
780 * NB: Like with per-tuple hint bits, we can't set the
781 * PD_ALL_VISIBLE flag if the inserter committed
782 * asynchronously. See SetHintBits for more info. Check
783 * that the HEAP_XMIN_COMMITTED hint bit is set because of
790 if (!(tuple.t_data->t_infomask & HEAP_XMIN_COMMITTED))
797 * The inserter definitely committed. But is it old
798 * enough that everyone sees it as committed?
800 xmin = HeapTupleHeaderGetXmin(tuple.t_data);
801 if (!TransactionIdPrecedes(xmin, OldestXmin))
807 /* Track newest xmin on page. */
808 if (TransactionIdFollows(xmin, visibility_cutoff_xid))
809 visibility_cutoff_xid = xmin;
812 case HEAPTUPLE_RECENTLY_DEAD:
815 * If tuple is recently deleted then we must not remove it
821 case HEAPTUPLE_INSERT_IN_PROGRESS:
822 /* This is an expected case during concurrent vacuum */
825 case HEAPTUPLE_DELETE_IN_PROGRESS:
826 /* This is an expected case during concurrent vacuum */
830 elog(ERROR, "unexpected HeapTupleSatisfiesVacuum result");
836 lazy_record_dead_tuple(vacrelstats, &(tuple.t_self));
837 HeapTupleHeaderAdvanceLatestRemovedXid(tuple.t_data,
838 &vacrelstats->latestRemovedXid);
840 has_dead_tuples = true;
848 * Each non-removable tuple must be checked to see if it needs
849 * freezing. Note we already have exclusive buffer lock.
851 if (heap_freeze_tuple(tuple.t_data, FreezeLimit,
853 frozen[nfrozen++] = offnum;
855 } /* scan along page */
858 * If we froze any tuples, mark the buffer dirty, and write a WAL
859 * record recording the changes. We must log the changes to be
860 * crash-safe against future truncation of CLOG.
864 MarkBufferDirty(buf);
865 if (RelationNeedsWAL(onerel))
869 recptr = log_heap_freeze(onerel, buf, FreezeLimit,
870 MultiXactFrzLimit, frozen, nfrozen);
871 PageSetLSN(page, recptr);
876 * If there are no indexes then we can vacuum the page right now
877 * instead of doing a second scan.
880 vacrelstats->num_dead_tuples > 0)
882 /* Remove tuples from heap */
883 lazy_vacuum_page(onerel, blkno, buf, 0, vacrelstats, &vmbuffer);
886 * Forget the now-vacuumed tuples, and press on, but be careful
887 * not to reset latestRemovedXid since we want that value to be
890 vacrelstats->num_dead_tuples = 0;
894 freespace = PageGetHeapFreeSpace(page);
896 /* mark page all-visible, if appropriate */
897 if (all_visible && !all_visible_according_to_vm)
900 * It should never be the case that the visibility map page is set
901 * while the page-level bit is clear, but the reverse is allowed
902 * (if checksums are not enabled). Regardless, set the both bits
903 * so that we get back in sync.
905 * NB: If the heap page is all-visible but the VM bit is not set,
906 * we don't need to dirty the heap page. However, if checksums are
907 * enabled, we do need to make sure that the heap page is dirtied
908 * before passing it to visibilitymap_set(), because it may be
909 * logged. Given that this situation should only happen in rare
910 * cases after a crash, it is not worth optimizing.
912 PageSetAllVisible(page);
913 MarkBufferDirty(buf);
914 visibilitymap_set(onerel, blkno, buf, InvalidXLogRecPtr,
915 vmbuffer, visibility_cutoff_xid);
919 * As of PostgreSQL 9.2, the visibility map bit should never be set if
920 * the page-level bit is clear. However, it's possible that the bit
921 * got cleared after we checked it and before we took the buffer
922 * content lock, so we must recheck before jumping to the conclusion
923 * that something bad has happened.
925 else if (all_visible_according_to_vm && !PageIsAllVisible(page)
926 && visibilitymap_test(onerel, blkno, &vmbuffer))
928 elog(WARNING, "page is not marked all-visible but visibility map bit is set in relation \"%s\" page %u",
930 visibilitymap_clear(onerel, blkno, vmbuffer);
934 * It's possible for the value returned by GetOldestXmin() to move
935 * backwards, so it's not wrong for us to see tuples that appear to
936 * not be visible to everyone yet, while PD_ALL_VISIBLE is already
937 * set. The real safe xmin value never moves backwards, but
938 * GetOldestXmin() is conservative and sometimes returns a value
939 * that's unnecessarily small, so if we see that contradiction it just
940 * means that the tuples that we think are not visible to everyone yet
941 * actually are, and the PD_ALL_VISIBLE flag is correct.
943 * There should never be dead tuples on a page with PD_ALL_VISIBLE
946 else if (PageIsAllVisible(page) && has_dead_tuples)
948 elog(WARNING, "page containing dead tuples is marked as all-visible in relation \"%s\" page %u",
950 PageClearAllVisible(page);
951 MarkBufferDirty(buf);
952 visibilitymap_clear(onerel, blkno, vmbuffer);
955 UnlockReleaseBuffer(buf);
957 /* Remember the location of the last page with nonremovable tuples */
959 vacrelstats->nonempty_pages = blkno + 1;
962 * If we remembered any tuples for deletion, then the page will be
963 * visited again by lazy_vacuum_heap, which will compute and record
964 * its post-compaction free space. If not, then we're done with this
965 * page, so remember its free space as-is. (This path will always be
966 * taken if there are no indexes.)
968 if (vacrelstats->num_dead_tuples == prev_dead_count)
969 RecordPageWithFreeSpace(onerel, blkno, freespace);
972 /* save stats for use later */
973 vacrelstats->scanned_tuples = num_tuples;
974 vacrelstats->tuples_deleted = tups_vacuumed;
976 /* now we can compute the new value for pg_class.reltuples */
977 vacrelstats->new_rel_tuples = vac_estimate_reltuples(onerel, false,
979 vacrelstats->scanned_pages,
983 * Release any remaining pin on visibility map page.
985 if (BufferIsValid(vmbuffer))
987 ReleaseBuffer(vmbuffer);
988 vmbuffer = InvalidBuffer;
991 /* If any tuples need to be deleted, perform final vacuum cycle */
992 /* XXX put a threshold on min number of tuples here? */
993 if (vacrelstats->num_dead_tuples > 0)
995 /* Log cleanup info before we touch indexes */
996 vacuum_log_cleanup_info(onerel, vacrelstats);
998 /* Remove index entries */
999 for (i = 0; i < nindexes; i++)
1000 lazy_vacuum_index(Irel[i],
1003 /* Remove tuples from heap */
1004 lazy_vacuum_heap(onerel, vacrelstats);
1005 vacrelstats->num_index_scans++;
1008 /* Do post-vacuum cleanup and statistics update for each index */
1009 for (i = 0; i < nindexes; i++)
1010 lazy_cleanup_index(Irel[i], indstats[i], vacrelstats);
1012 /* If no indexes, make log report that lazy_vacuum_heap would've made */
1015 (errmsg("\"%s\": removed %.0f row versions in %u pages",
1016 RelationGetRelationName(onerel),
1017 tups_vacuumed, vacuumed_pages)));
1020 (errmsg("\"%s\": found %.0f removable, %.0f nonremovable row versions in %u out of %u pages",
1021 RelationGetRelationName(onerel),
1022 tups_vacuumed, num_tuples,
1023 vacrelstats->scanned_pages, nblocks),
1024 errdetail("%.0f dead row versions cannot be removed yet.\n"
1025 "There were %.0f unused item pointers.\n"
1026 "%u pages are entirely empty.\n"
1031 pg_rusage_show(&ru0))));
1036 * lazy_vacuum_heap() -- second pass over the heap
1038 * This routine marks dead tuples as unused and compacts out free
1039 * space on their pages. Pages not having dead tuples recorded from
1040 * lazy_scan_heap are not visited at all.
1042 * Note: the reason for doing this as a second pass is we cannot remove
1043 * the tuples until we've removed their index entries, and we want to
1044 * process index entry removal in batches as large as possible.
1047 lazy_vacuum_heap(Relation onerel, LVRelStats *vacrelstats)
1052 Buffer vmbuffer = InvalidBuffer;
1054 pg_rusage_init(&ru0);
1058 while (tupindex < vacrelstats->num_dead_tuples)
1065 vacuum_delay_point();
1067 tblk = ItemPointerGetBlockNumber(&vacrelstats->dead_tuples[tupindex]);
1068 buf = ReadBufferExtended(onerel, MAIN_FORKNUM, tblk, RBM_NORMAL,
1070 if (!ConditionalLockBufferForCleanup(buf))
1076 tupindex = lazy_vacuum_page(onerel, tblk, buf, tupindex, vacrelstats,
1079 /* Now that we've compacted the page, record its available space */
1080 page = BufferGetPage(buf);
1081 freespace = PageGetHeapFreeSpace(page);
1083 UnlockReleaseBuffer(buf);
1084 RecordPageWithFreeSpace(onerel, tblk, freespace);
1088 if (BufferIsValid(vmbuffer))
1090 ReleaseBuffer(vmbuffer);
1091 vmbuffer = InvalidBuffer;
1095 (errmsg("\"%s\": removed %d row versions in %d pages",
1096 RelationGetRelationName(onerel),
1099 pg_rusage_show(&ru0))));
1103 * lazy_vacuum_page() -- free dead tuples on a page
1104 * and repair its fragmentation.
1106 * Caller must hold pin and buffer cleanup lock on the buffer.
1108 * tupindex is the index in vacrelstats->dead_tuples of the first dead
1109 * tuple for this page. We assume the rest follow sequentially.
1110 * The return value is the first tupindex after the tuples of this page.
1113 lazy_vacuum_page(Relation onerel, BlockNumber blkno, Buffer buffer,
1114 int tupindex, LVRelStats *vacrelstats, Buffer *vmbuffer)
1116 Page page = BufferGetPage(buffer);
1117 OffsetNumber unused[MaxOffsetNumber];
1119 TransactionId visibility_cutoff_xid;
1121 START_CRIT_SECTION();
1123 for (; tupindex < vacrelstats->num_dead_tuples; tupindex++)
1129 tblk = ItemPointerGetBlockNumber(&vacrelstats->dead_tuples[tupindex]);
1131 break; /* past end of tuples for this block */
1132 toff = ItemPointerGetOffsetNumber(&vacrelstats->dead_tuples[tupindex]);
1133 itemid = PageGetItemId(page, toff);
1134 ItemIdSetUnused(itemid);
1135 unused[uncnt++] = toff;
1138 PageRepairFragmentation(page);
1141 * Mark buffer dirty before we write WAL.
1143 * If checksums are enabled, visibilitymap_set() may log the heap page, so
1144 * we must mark heap buffer dirty before calling visibilitymap_set().
1146 MarkBufferDirty(buffer);
1149 * Now that we have removed the dead tuples from the page, once again check
1150 * if the page has become all-visible.
1152 if (!visibilitymap_test(onerel, blkno, vmbuffer) &&
1153 heap_page_is_all_visible(buffer, &visibility_cutoff_xid))
1155 Assert(BufferIsValid(*vmbuffer));
1156 PageSetAllVisible(page);
1157 visibilitymap_set(onerel, blkno, buffer, InvalidXLogRecPtr, *vmbuffer,
1158 visibility_cutoff_xid);
1162 if (RelationNeedsWAL(onerel))
1166 recptr = log_heap_clean(onerel, buffer,
1169 vacrelstats->latestRemovedXid);
1170 PageSetLSN(page, recptr);
1179 * lazy_check_needs_freeze() -- scan page to see if any tuples
1180 * need to be cleaned to avoid wraparound
1182 * Returns true if the page needs to be vacuumed using cleanup lock.
1185 lazy_check_needs_freeze(Buffer buf)
1188 OffsetNumber offnum,
1190 HeapTupleHeader tupleheader;
1192 page = BufferGetPage(buf);
1194 if (PageIsNew(page) || PageIsEmpty(page))
1196 /* PageIsNew probably shouldn't happen... */
1200 maxoff = PageGetMaxOffsetNumber(page);
1201 for (offnum = FirstOffsetNumber;
1203 offnum = OffsetNumberNext(offnum))
1207 itemid = PageGetItemId(page, offnum);
1209 if (!ItemIdIsNormal(itemid))
1212 tupleheader = (HeapTupleHeader) PageGetItem(page, itemid);
1214 if (heap_tuple_needs_freeze(tupleheader, FreezeLimit,
1215 MultiXactFrzLimit, buf))
1217 } /* scan along page */
1224 * lazy_vacuum_index() -- vacuum one index relation.
1226 * Delete all the index entries pointing to tuples listed in
1227 * vacrelstats->dead_tuples, and update running statistics.
1230 lazy_vacuum_index(Relation indrel,
1231 IndexBulkDeleteResult **stats,
1232 LVRelStats *vacrelstats)
1234 IndexVacuumInfo ivinfo;
1237 pg_rusage_init(&ru0);
1239 ivinfo.index = indrel;
1240 ivinfo.analyze_only = false;
1241 ivinfo.estimated_count = true;
1242 ivinfo.message_level = elevel;
1243 ivinfo.num_heap_tuples = vacrelstats->old_rel_tuples;
1244 ivinfo.strategy = vac_strategy;
1246 /* Do bulk deletion */
1247 *stats = index_bulk_delete(&ivinfo, *stats,
1248 lazy_tid_reaped, (void *) vacrelstats);
1251 (errmsg("scanned index \"%s\" to remove %d row versions",
1252 RelationGetRelationName(indrel),
1253 vacrelstats->num_dead_tuples),
1254 errdetail("%s.", pg_rusage_show(&ru0))));
1258 * lazy_cleanup_index() -- do post-vacuum cleanup for one index relation.
1261 lazy_cleanup_index(Relation indrel,
1262 IndexBulkDeleteResult *stats,
1263 LVRelStats *vacrelstats)
1265 IndexVacuumInfo ivinfo;
1268 pg_rusage_init(&ru0);
1270 ivinfo.index = indrel;
1271 ivinfo.analyze_only = false;
1272 ivinfo.estimated_count = (vacrelstats->scanned_pages < vacrelstats->rel_pages);
1273 ivinfo.message_level = elevel;
1274 ivinfo.num_heap_tuples = vacrelstats->new_rel_tuples;
1275 ivinfo.strategy = vac_strategy;
1277 stats = index_vacuum_cleanup(&ivinfo, stats);
1283 * Now update statistics in pg_class, but only if the index says the count
1286 if (!stats->estimated_count)
1287 vac_update_relstats(indrel,
1289 stats->num_index_tuples,
1292 InvalidTransactionId,
1293 InvalidMultiXactId);
1296 (errmsg("index \"%s\" now contains %.0f row versions in %u pages",
1297 RelationGetRelationName(indrel),
1298 stats->num_index_tuples,
1300 errdetail("%.0f index row versions were removed.\n"
1301 "%u index pages have been deleted, %u are currently reusable.\n"
1303 stats->tuples_removed,
1304 stats->pages_deleted, stats->pages_free,
1305 pg_rusage_show(&ru0))));
1311 * lazy_truncate_heap - try to truncate off any empty pages at the end
1314 lazy_truncate_heap(Relation onerel, LVRelStats *vacrelstats)
1316 BlockNumber old_rel_pages = vacrelstats->rel_pages;
1317 BlockNumber new_rel_pages;
1321 pg_rusage_init(&ru0);
1324 * Loop until no more truncating can be done.
1329 * We need full exclusive lock on the relation in order to do
1330 * truncation. If we can't get it, give up rather than waiting --- we
1331 * don't want to block other backends, and we don't want to deadlock
1332 * (which is quite possible considering we already hold a lower-grade
1335 vacrelstats->lock_waiter_detected = false;
1339 if (ConditionalLockRelation(onerel, AccessExclusiveLock))
1343 * Check for interrupts while trying to (re-)acquire the exclusive
1346 CHECK_FOR_INTERRUPTS();
1348 if (++lock_retry > (VACUUM_TRUNCATE_LOCK_TIMEOUT /
1349 VACUUM_TRUNCATE_LOCK_WAIT_INTERVAL))
1352 * We failed to establish the lock in the specified number of
1353 * retries. This means we give up truncating.
1355 vacrelstats->lock_waiter_detected = true;
1357 (errmsg("\"%s\": stopping truncate due to conflicting lock request",
1358 RelationGetRelationName(onerel))));
1362 pg_usleep(VACUUM_TRUNCATE_LOCK_WAIT_INTERVAL);
1366 * Now that we have exclusive lock, look to see if the rel has grown
1367 * whilst we were vacuuming with non-exclusive lock. If so, give up;
1368 * the newly added pages presumably contain non-deletable tuples.
1370 new_rel_pages = RelationGetNumberOfBlocks(onerel);
1371 if (new_rel_pages != old_rel_pages)
1374 * Note: we intentionally don't update vacrelstats->rel_pages with
1375 * the new rel size here. If we did, it would amount to assuming
1376 * that the new pages are empty, which is unlikely. Leaving the
1377 * numbers alone amounts to assuming that the new pages have the
1378 * same tuple density as existing ones, which is less unlikely.
1380 UnlockRelation(onerel, AccessExclusiveLock);
1385 * Scan backwards from the end to verify that the end pages actually
1386 * contain no tuples. This is *necessary*, not optional, because
1387 * other backends could have added tuples to these pages whilst we
1390 new_rel_pages = count_nondeletable_pages(onerel, vacrelstats);
1392 if (new_rel_pages >= old_rel_pages)
1394 /* can't do anything after all */
1395 UnlockRelation(onerel, AccessExclusiveLock);
1402 RelationTruncate(onerel, new_rel_pages);
1405 * We can release the exclusive lock as soon as we have truncated.
1406 * Other backends can't safely access the relation until they have
1407 * processed the smgr invalidation that smgrtruncate sent out ... but
1408 * that should happen as part of standard invalidation processing once
1409 * they acquire lock on the relation.
1411 UnlockRelation(onerel, AccessExclusiveLock);
1414 * Update statistics. Here, it *is* correct to adjust rel_pages
1415 * without also touching reltuples, since the tuple count wasn't
1416 * changed by the truncation.
1418 vacrelstats->pages_removed += old_rel_pages - new_rel_pages;
1419 vacrelstats->rel_pages = new_rel_pages;
1422 (errmsg("\"%s\": truncated %u to %u pages",
1423 RelationGetRelationName(onerel),
1424 old_rel_pages, new_rel_pages),
1426 pg_rusage_show(&ru0))));
1427 old_rel_pages = new_rel_pages;
1428 } while (new_rel_pages > vacrelstats->nonempty_pages &&
1429 vacrelstats->lock_waiter_detected);
1433 * Rescan end pages to verify that they are (still) empty of tuples.
1435 * Returns number of nondeletable pages (last nonempty page + 1).
1438 count_nondeletable_pages(Relation onerel, LVRelStats *vacrelstats)
1441 instr_time starttime;
1443 /* Initialize the starttime if we check for conflicting lock requests */
1444 INSTR_TIME_SET_CURRENT(starttime);
1446 /* Strange coding of loop control is needed because blkno is unsigned */
1447 blkno = vacrelstats->rel_pages;
1448 while (blkno > vacrelstats->nonempty_pages)
1452 OffsetNumber offnum,
1457 * Check if another process requests a lock on our relation. We are
1458 * holding an AccessExclusiveLock here, so they will be waiting. We
1459 * only do this once per VACUUM_TRUNCATE_LOCK_CHECK_INTERVAL, and we
1460 * only check if that interval has elapsed once every 32 blocks to
1461 * keep the number of system calls and actual shared lock table
1462 * lookups to a minimum.
1464 if ((blkno % 32) == 0)
1466 instr_time currenttime;
1469 INSTR_TIME_SET_CURRENT(currenttime);
1470 elapsed = currenttime;
1471 INSTR_TIME_SUBTRACT(elapsed, starttime);
1472 if ((INSTR_TIME_GET_MICROSEC(elapsed) / 1000)
1473 >= VACUUM_TRUNCATE_LOCK_CHECK_INTERVAL)
1475 if (LockHasWaitersRelation(onerel, AccessExclusiveLock))
1478 (errmsg("\"%s\": suspending truncate due to conflicting lock request",
1479 RelationGetRelationName(onerel))));
1481 vacrelstats->lock_waiter_detected = true;
1484 starttime = currenttime;
1489 * We don't insert a vacuum delay point here, because we have an
1490 * exclusive lock on the table which we want to hold for as short a
1491 * time as possible. We still need to check for interrupts however.
1493 CHECK_FOR_INTERRUPTS();
1497 buf = ReadBufferExtended(onerel, MAIN_FORKNUM, blkno,
1498 RBM_NORMAL, vac_strategy);
1500 /* In this phase we only need shared access to the buffer */
1501 LockBuffer(buf, BUFFER_LOCK_SHARE);
1503 page = BufferGetPage(buf);
1505 if (PageIsNew(page) || PageIsEmpty(page))
1507 /* PageIsNew probably shouldn't happen... */
1508 UnlockReleaseBuffer(buf);
1513 maxoff = PageGetMaxOffsetNumber(page);
1514 for (offnum = FirstOffsetNumber;
1516 offnum = OffsetNumberNext(offnum))
1520 itemid = PageGetItemId(page, offnum);
1523 * Note: any non-unused item should be taken as a reason to keep
1524 * this page. We formerly thought that DEAD tuples could be
1525 * thrown away, but that's not so, because we'd not have cleaned
1526 * out their index entries.
1528 if (ItemIdIsUsed(itemid))
1531 break; /* can stop scanning */
1533 } /* scan along page */
1535 UnlockReleaseBuffer(buf);
1537 /* Done scanning if we found a tuple here */
1543 * If we fall out of the loop, all the previously-thought-to-be-empty
1544 * pages still are; we need not bother to look at the last known-nonempty
1547 return vacrelstats->nonempty_pages;
1551 * lazy_space_alloc - space allocation decisions for lazy vacuum
1553 * See the comments at the head of this file for rationale.
1556 lazy_space_alloc(LVRelStats *vacrelstats, BlockNumber relblocks)
1560 if (vacrelstats->hasindex)
1562 maxtuples = (maintenance_work_mem * 1024L) / sizeof(ItemPointerData);
1563 maxtuples = Min(maxtuples, INT_MAX);
1564 maxtuples = Min(maxtuples, MaxAllocSize / sizeof(ItemPointerData));
1566 /* curious coding here to ensure the multiplication can't overflow */
1567 if ((BlockNumber) (maxtuples / LAZY_ALLOC_TUPLES) > relblocks)
1568 maxtuples = relblocks * LAZY_ALLOC_TUPLES;
1570 /* stay sane if small maintenance_work_mem */
1571 maxtuples = Max(maxtuples, MaxHeapTuplesPerPage);
1575 maxtuples = MaxHeapTuplesPerPage;
1578 vacrelstats->num_dead_tuples = 0;
1579 vacrelstats->max_dead_tuples = (int) maxtuples;
1580 vacrelstats->dead_tuples = (ItemPointer)
1581 palloc(maxtuples * sizeof(ItemPointerData));
1585 * lazy_record_dead_tuple - remember one deletable tuple
1588 lazy_record_dead_tuple(LVRelStats *vacrelstats,
1589 ItemPointer itemptr)
1592 * The array shouldn't overflow under normal behavior, but perhaps it
1593 * could if we are given a really small maintenance_work_mem. In that
1594 * case, just forget the last few tuples (we'll get 'em next time).
1596 if (vacrelstats->num_dead_tuples < vacrelstats->max_dead_tuples)
1598 vacrelstats->dead_tuples[vacrelstats->num_dead_tuples] = *itemptr;
1599 vacrelstats->num_dead_tuples++;
1604 * lazy_tid_reaped() -- is a particular tid deletable?
1606 * This has the right signature to be an IndexBulkDeleteCallback.
1608 * Assumes dead_tuples array is in sorted order.
1611 lazy_tid_reaped(ItemPointer itemptr, void *state)
1613 LVRelStats *vacrelstats = (LVRelStats *) state;
1616 res = (ItemPointer) bsearch((void *) itemptr,
1617 (void *) vacrelstats->dead_tuples,
1618 vacrelstats->num_dead_tuples,
1619 sizeof(ItemPointerData),
1622 return (res != NULL);
1626 * Comparator routines for use with qsort() and bsearch().
1629 vac_cmp_itemptr(const void *left, const void *right)
1636 lblk = ItemPointerGetBlockNumber((ItemPointer) left);
1637 rblk = ItemPointerGetBlockNumber((ItemPointer) right);
1644 loff = ItemPointerGetOffsetNumber((ItemPointer) left);
1645 roff = ItemPointerGetOffsetNumber((ItemPointer) right);
1656 * Check if every tuple in the given page is visible to all current and future
1657 * transactions. Also return the visibility_cutoff_xid which is the highest
1658 * xmin amongst the visible tuples.
1661 heap_page_is_all_visible(Buffer buf, TransactionId *visibility_cutoff_xid)
1663 Page page = BufferGetPage(buf);
1664 OffsetNumber offnum,
1666 bool all_visible = true;
1668 *visibility_cutoff_xid = InvalidTransactionId;
1671 * This is a stripped down version of the line pointer scan in
1672 * lazy_scan_heap(). So if you change anything here, also check that
1675 maxoff = PageGetMaxOffsetNumber(page);
1676 for (offnum = FirstOffsetNumber;
1677 offnum <= maxoff && all_visible;
1678 offnum = OffsetNumberNext(offnum))
1681 HeapTupleData tuple;
1683 itemid = PageGetItemId(page, offnum);
1685 /* Unused or redirect line pointers are of no interest */
1686 if (!ItemIdIsUsed(itemid) || ItemIdIsRedirected(itemid))
1689 ItemPointerSet(&(tuple.t_self), BufferGetBlockNumber(buf), offnum);
1692 * Dead line pointers can have index pointers pointing to them. So they
1693 * can't be treated as visible
1695 if (ItemIdIsDead(itemid))
1697 all_visible = false;
1701 Assert(ItemIdIsNormal(itemid));
1703 tuple.t_data = (HeapTupleHeader) PageGetItem(page, itemid);
1705 switch (HeapTupleSatisfiesVacuum(tuple.t_data, OldestXmin, buf))
1707 case HEAPTUPLE_LIVE:
1711 /* Check comments in lazy_scan_heap. */
1712 if (!(tuple.t_data->t_infomask & HEAP_XMIN_COMMITTED))
1714 all_visible = false;
1719 * The inserter definitely committed. But is it old
1720 * enough that everyone sees it as committed?
1722 xmin = HeapTupleHeaderGetXmin(tuple.t_data);
1723 if (!TransactionIdPrecedes(xmin, OldestXmin))
1725 all_visible = false;
1729 /* Track newest xmin on page. */
1730 if (TransactionIdFollows(xmin, *visibility_cutoff_xid))
1731 *visibility_cutoff_xid = xmin;
1735 case HEAPTUPLE_DEAD:
1736 case HEAPTUPLE_RECENTLY_DEAD:
1737 case HEAPTUPLE_INSERT_IN_PROGRESS:
1738 case HEAPTUPLE_DELETE_IN_PROGRESS:
1739 all_visible = false;
1743 elog(ERROR, "unexpected HeapTupleSatisfiesVacuum result");
1746 } /* scan along page */