1 /*-------------------------------------------------------------------------
4 * Concurrent ("lazy") vacuuming.
7 * The major space usage for LAZY VACUUM is storage for the array of dead
8 * tuple TIDs, with the next biggest need being storage for per-disk-page
9 * free space info. We want to ensure we can vacuum even the very largest
10 * relations with finite memory space usage. To do that, we set upper bounds
11 * on the number of tuples and pages we will keep track of at once.
13 * We are willing to use at most maintenance_work_mem memory space to keep
14 * track of dead tuples. We initially allocate an array of TIDs of that size,
15 * with an upper limit that depends on table size (this limit ensures we don't
16 * allocate a huge area uselessly for vacuuming small tables). If the array
17 * threatens to overflow, we suspend the heap scan phase and perform a pass of
18 * index cleanup and page compaction, then resume the heap scan with an empty
21 * If we're processing a table with no indexes, we can just vacuum each page
22 * as we go; there's no need to save up multiple tuples to minimize the number
23 * of index scans performed. So we don't use maintenance_work_mem memory for
24 * the TID array, just enough to hold as many heap tuples as fit on one page.
27 * Portions Copyright (c) 1996-2010, PostgreSQL Global Development Group
28 * Portions Copyright (c) 1994, Regents of the University of California
32 * $PostgreSQL: pgsql/src/backend/commands/vacuumlazy.c,v 1.134 2010/04/21 19:53:24 sriggs Exp $
34 *-------------------------------------------------------------------------
40 #include "access/genam.h"
41 #include "access/heapam.h"
42 #include "access/transam.h"
43 #include "access/visibilitymap.h"
44 #include "catalog/storage.h"
45 #include "commands/dbcommands.h"
46 #include "commands/vacuum.h"
47 #include "miscadmin.h"
49 #include "postmaster/autovacuum.h"
50 #include "storage/bufmgr.h"
51 #include "storage/freespace.h"
52 #include "storage/lmgr.h"
53 #include "utils/lsyscache.h"
54 #include "utils/memutils.h"
55 #include "utils/pg_rusage.h"
56 #include "utils/tqual.h"
60 * Space/time tradeoff parameters: do these need to be user-tunable?
62 * To consider truncating the relation, we want there to be at least
63 * REL_TRUNCATE_MINIMUM or (relsize / REL_TRUNCATE_FRACTION) (whichever
64 * is less) potentially-freeable pages.
66 #define REL_TRUNCATE_MINIMUM 1000
67 #define REL_TRUNCATE_FRACTION 16
70 * Guesstimation of number of dead tuples per page. This is used to
71 * provide an upper limit to memory allocated when vacuuming small
74 #define LAZY_ALLOC_TUPLES MaxHeapTuplesPerPage
77 * Before we consider skipping a page that's marked as clean in
78 * visibility map, we must've seen at least this many clean pages.
80 #define SKIP_PAGES_THRESHOLD 32
82 typedef struct LVRelStats
84 /* hasindex = true means two-pass strategy; false means one-pass */
86 bool scanned_all; /* have we scanned all pages (this far)? */
87 /* Overall statistics about rel */
88 BlockNumber rel_pages;
89 double old_rel_tuples; /* previous value of pg_class.reltuples */
90 double rel_tuples; /* counts only tuples on scanned pages */
91 BlockNumber pages_removed;
92 double tuples_deleted;
93 BlockNumber nonempty_pages; /* actually, last nonempty page + 1 */
94 /* List of TIDs of tuples we intend to delete */
95 /* NB: this list is ordered by TID address */
96 int num_dead_tuples; /* current # of entries */
97 int max_dead_tuples; /* # slots allocated in array */
98 ItemPointer dead_tuples; /* array of ItemPointerData */
100 TransactionId latestRemovedXid;
104 /* A few variables that don't seem worth passing around as parameters */
105 static int elevel = -1;
107 static TransactionId OldestXmin;
108 static TransactionId FreezeLimit;
110 static BufferAccessStrategy vac_strategy;
113 /* non-export function prototypes */
114 static void lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats,
115 Relation *Irel, int nindexes, bool scan_all);
116 static void lazy_vacuum_heap(Relation onerel, LVRelStats *vacrelstats);
117 static void lazy_vacuum_index(Relation indrel,
118 IndexBulkDeleteResult **stats,
119 LVRelStats *vacrelstats);
120 static void lazy_cleanup_index(Relation indrel,
121 IndexBulkDeleteResult *stats,
122 LVRelStats *vacrelstats);
123 static int lazy_vacuum_page(Relation onerel, BlockNumber blkno, Buffer buffer,
124 int tupindex, LVRelStats *vacrelstats);
125 static void lazy_truncate_heap(Relation onerel, LVRelStats *vacrelstats);
126 static BlockNumber count_nondeletable_pages(Relation onerel,
127 LVRelStats *vacrelstats);
128 static void lazy_space_alloc(LVRelStats *vacrelstats, BlockNumber relblocks);
129 static void lazy_record_dead_tuple(LVRelStats *vacrelstats,
130 ItemPointer itemptr);
131 static bool lazy_tid_reaped(ItemPointer itemptr, void *state);
132 static int vac_cmp_itemptr(const void *left, const void *right);
136 * lazy_vacuum_rel() -- perform LAZY VACUUM for one heap relation
138 * This routine vacuums a single heap, cleans out its indexes, and
139 * updates its relpages and reltuples statistics.
141 * At entry, we have already established a transaction and opened
142 * and locked the relation.
145 lazy_vacuum_rel(Relation onerel, VacuumStmt *vacstmt,
146 BufferAccessStrategy bstrategy, bool *scanned_all)
148 LVRelStats *vacrelstats;
151 BlockNumber possibly_freeable;
153 TimestampTz starttime = 0;
155 TransactionId freezeTableLimit;
157 pg_rusage_init(&ru0);
159 /* measure elapsed time iff autovacuum logging requires it */
160 if (IsAutoVacuumWorkerProcess() && Log_autovacuum_min_duration > 0)
161 starttime = GetCurrentTimestamp();
163 if (vacstmt->options & VACOPT_VERBOSE)
168 vac_strategy = bstrategy;
170 vacuum_set_xid_limits(vacstmt->freeze_min_age, vacstmt->freeze_table_age,
171 onerel->rd_rel->relisshared,
172 &OldestXmin, &FreezeLimit, &freezeTableLimit);
173 scan_all = TransactionIdPrecedesOrEquals(onerel->rd_rel->relfrozenxid,
176 vacrelstats = (LVRelStats *) palloc0(sizeof(LVRelStats));
178 vacrelstats->scanned_all = true; /* will be cleared if we skip a page */
179 vacrelstats->old_rel_tuples = onerel->rd_rel->reltuples;
180 vacrelstats->num_index_scans = 0;
182 /* Open all indexes of the relation */
183 vac_open_indexes(onerel, RowExclusiveLock, &nindexes, &Irel);
184 vacrelstats->hasindex = (nindexes > 0);
186 /* Do the vacuuming */
187 lazy_scan_heap(onerel, vacrelstats, Irel, nindexes, scan_all);
189 /* Done with indexes */
190 vac_close_indexes(nindexes, Irel, NoLock);
193 * Optionally truncate the relation.
195 * Don't even think about it unless we have a shot at releasing a goodly
196 * number of pages. Otherwise, the time taken isn't worth it.
198 possibly_freeable = vacrelstats->rel_pages - vacrelstats->nonempty_pages;
199 if (possibly_freeable > 0 &&
200 (possibly_freeable >= REL_TRUNCATE_MINIMUM ||
201 possibly_freeable >= vacrelstats->rel_pages / REL_TRUNCATE_FRACTION))
202 lazy_truncate_heap(onerel, vacrelstats);
204 /* Vacuum the Free Space Map */
205 FreeSpaceMapVacuum(onerel);
208 * Update statistics in pg_class. But only if we didn't skip any pages;
209 * the tuple count only includes tuples from the pages we've visited, and
210 * we haven't frozen tuples in unvisited pages either. The page count is
211 * accurate in any case, but because we use the reltuples / relpages ratio
212 * in the planner, it's better to not update relpages either if we can't
215 if (vacrelstats->scanned_all)
216 vac_update_relstats(onerel,
217 vacrelstats->rel_pages, vacrelstats->rel_tuples,
218 vacrelstats->hasindex,
221 /* report results to the stats collector, too */
222 pgstat_report_vacuum(RelationGetRelid(onerel),
223 onerel->rd_rel->relisshared,
224 vacrelstats->scanned_all,
225 vacrelstats->rel_tuples);
227 /* and log the action if appropriate */
228 if (IsAutoVacuumWorkerProcess() && Log_autovacuum_min_duration >= 0)
230 if (Log_autovacuum_min_duration == 0 ||
231 TimestampDifferenceExceeds(starttime, GetCurrentTimestamp(),
232 Log_autovacuum_min_duration))
234 (errmsg("automatic vacuum of table \"%s.%s.%s\": index scans: %d\n"
235 "pages: %d removed, %d remain\n"
236 "tuples: %.0f removed, %.0f remain\n"
238 get_database_name(MyDatabaseId),
239 get_namespace_name(RelationGetNamespace(onerel)),
240 RelationGetRelationName(onerel),
241 vacrelstats->num_index_scans,
242 vacrelstats->pages_removed, vacrelstats->rel_pages,
243 vacrelstats->tuples_deleted, vacrelstats->rel_tuples,
244 pg_rusage_show(&ru0))));
248 *scanned_all = vacrelstats->scanned_all;
252 * For Hot Standby we need to know the highest transaction id that will
253 * be removed by any change. VACUUM proceeds in a number of passes so
254 * we need to consider how each pass operates. The first phase runs
255 * heap_page_prune(), which can issue XLOG_HEAP2_CLEAN records as it
256 * progresses - these will have a latestRemovedXid on each record.
257 * In some cases this removes all of the tuples to be removed, though
258 * often we have dead tuples with index pointers so we must remember them
259 * for removal in phase 3. Index records for those rows are removed
260 * in phase 2 and index blocks do not have MVCC information attached.
261 * So before we can allow removal of any index tuples we need to issue
262 * a WAL record containing the latestRemovedXid of rows that will be
263 * removed in phase three. This allows recovery queries to block at the
264 * correct place, i.e. before phase two, rather than during phase three
265 * which would be after the rows have become inaccessible.
268 vacuum_log_cleanup_info(Relation rel, LVRelStats *vacrelstats)
271 * No need to log changes for temp tables, they do not contain data
272 * visible on the standby server.
274 if (rel->rd_istemp || !XLogIsNeeded())
277 if (vacrelstats->tuples_deleted > 0)
279 Assert(TransactionIdIsValid(vacrelstats->latestRemovedXid));
281 (void) log_heap_cleanup_info(rel->rd_node, vacrelstats->latestRemovedXid);
286 * lazy_scan_heap() -- scan an open heap relation
288 * This routine sets commit status bits, builds lists of dead tuples
289 * and pages with free space, and calculates statistics on the number
290 * of live tuples in the heap. When done, or when we run low on space
291 * for dead-tuple TIDs, invoke vacuuming of indexes and heap.
293 * If there are no indexes then we just vacuum each dirty page as we
294 * process it, since there's no point in gathering many tuples.
297 lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats,
298 Relation *Irel, int nindexes, bool scan_all)
304 BlockNumber empty_pages,
311 IndexBulkDeleteResult **indstats;
314 Buffer vmbuffer = InvalidBuffer;
315 BlockNumber all_visible_streak;
317 pg_rusage_init(&ru0);
319 relname = RelationGetRelationName(onerel);
321 (errmsg("vacuuming \"%s.%s\"",
322 get_namespace_name(RelationGetNamespace(onerel)),
325 empty_pages = vacuumed_pages = scanned_pages = 0;
326 num_tuples = tups_vacuumed = nkeep = nunused = 0;
328 indstats = (IndexBulkDeleteResult **)
329 palloc0(nindexes * sizeof(IndexBulkDeleteResult *));
331 nblocks = RelationGetNumberOfBlocks(onerel);
332 vacrelstats->rel_pages = nblocks;
333 vacrelstats->nonempty_pages = 0;
334 vacrelstats->latestRemovedXid = InvalidTransactionId;
336 lazy_space_alloc(vacrelstats, nblocks);
338 all_visible_streak = 0;
339 for (blkno = 0; blkno < nblocks; blkno++)
348 OffsetNumber frozen[MaxOffsetNumber];
351 bool all_visible_according_to_vm = false;
355 * Skip pages that don't require vacuuming according to the visibility
356 * map. But only if we've seen a streak of at least
357 * SKIP_PAGES_THRESHOLD pages marked as clean. Since we're reading
358 * sequentially, the OS should be doing readahead for us and there's
359 * no gain in skipping a page now and then. You need a longer run of
360 * consecutive skipped pages before it's worthwhile. Also, skipping
361 * even a single page means that we can't update relfrozenxid or
362 * reltuples, so we only want to do it if there's a good chance to
363 * skip a goodly number of pages.
367 all_visible_according_to_vm =
368 visibilitymap_test(onerel, blkno, &vmbuffer);
369 if (all_visible_according_to_vm)
371 all_visible_streak++;
372 if (all_visible_streak >= SKIP_PAGES_THRESHOLD)
374 vacrelstats->scanned_all = false;
379 all_visible_streak = 0;
382 vacuum_delay_point();
387 * If we are close to overrunning the available space for dead-tuple
388 * TIDs, pause and do a cycle of vacuuming before we tackle this page.
390 if ((vacrelstats->max_dead_tuples - vacrelstats->num_dead_tuples) < MaxHeapTuplesPerPage &&
391 vacrelstats->num_dead_tuples > 0)
393 /* Log cleanup info before we touch indexes */
394 vacuum_log_cleanup_info(onerel, vacrelstats);
396 /* Remove index entries */
397 for (i = 0; i < nindexes; i++)
398 lazy_vacuum_index(Irel[i],
401 /* Remove tuples from heap */
402 lazy_vacuum_heap(onerel, vacrelstats);
404 * Forget the now-vacuumed tuples, and press on, but be careful
405 * not to reset latestRemovedXid since we want that value to be valid.
407 vacrelstats->num_dead_tuples = 0;
408 vacrelstats->num_index_scans++;
411 buf = ReadBufferExtended(onerel, MAIN_FORKNUM, blkno,
412 RBM_NORMAL, vac_strategy);
414 /* We need buffer cleanup lock so that we can prune HOT chains. */
415 LockBufferForCleanup(buf);
417 page = BufferGetPage(buf);
422 * An all-zeroes page could be left over if a backend extends the
423 * relation but crashes before initializing the page. Reclaim such
426 * We have to be careful here because we could be looking at a
427 * page that someone has just added to the relation and not yet
428 * been able to initialize (see RelationGetBufferForTuple). To
429 * protect against that, release the buffer lock, grab the
430 * relation extension lock momentarily, and re-lock the buffer. If
431 * the page is still uninitialized by then, it must be left over
432 * from a crashed backend, and we can initialize it.
434 * We don't really need the relation lock when this is a new or
435 * temp relation, but it's probably not worth the code space to
436 * check that, since this surely isn't a critical path.
438 * Note: the comparable code in vacuum.c need not worry because
439 * it's got exclusive lock on the whole relation.
441 LockBuffer(buf, BUFFER_LOCK_UNLOCK);
442 LockRelationForExtension(onerel, ExclusiveLock);
443 UnlockRelationForExtension(onerel, ExclusiveLock);
444 LockBufferForCleanup(buf);
448 (errmsg("relation \"%s\" page %u is uninitialized --- fixing",
450 PageInit(page, BufferGetPageSize(buf), 0);
453 freespace = PageGetHeapFreeSpace(page);
454 MarkBufferDirty(buf);
455 UnlockReleaseBuffer(buf);
457 RecordPageWithFreeSpace(onerel, blkno, freespace);
461 if (PageIsEmpty(page))
464 freespace = PageGetHeapFreeSpace(page);
466 if (!PageIsAllVisible(page))
468 PageSetAllVisible(page);
469 SetBufferCommitInfoNeedsSave(buf);
472 LockBuffer(buf, BUFFER_LOCK_UNLOCK);
474 /* Update the visibility map */
475 if (!all_visible_according_to_vm)
477 visibilitymap_pin(onerel, blkno, &vmbuffer);
478 LockBuffer(buf, BUFFER_LOCK_SHARE);
479 if (PageIsAllVisible(page))
480 visibilitymap_set(onerel, blkno, PageGetLSN(page), &vmbuffer);
481 LockBuffer(buf, BUFFER_LOCK_UNLOCK);
485 RecordPageWithFreeSpace(onerel, blkno, freespace);
490 * Prune all HOT-update chains in this page.
492 * We count tuples removed by the pruning step as removed by VACUUM.
494 tups_vacuumed += heap_page_prune(onerel, buf, OldestXmin, false,
495 &vacrelstats->latestRemovedXid);
497 * Now scan the page to collect vacuumable items and check for tuples
498 * requiring freezing.
503 prev_dead_count = vacrelstats->num_dead_tuples;
504 maxoff = PageGetMaxOffsetNumber(page);
505 for (offnum = FirstOffsetNumber;
507 offnum = OffsetNumberNext(offnum))
511 itemid = PageGetItemId(page, offnum);
513 /* Unused items require no processing, but we count 'em */
514 if (!ItemIdIsUsed(itemid))
520 /* Redirect items mustn't be touched */
521 if (ItemIdIsRedirected(itemid))
523 hastup = true; /* this page won't be truncatable */
527 ItemPointerSet(&(tuple.t_self), blkno, offnum);
530 * DEAD item pointers are to be vacuumed normally; but we don't
531 * count them in tups_vacuumed, else we'd be double-counting (at
532 * least in the common case where heap_page_prune() just freed up
535 if (ItemIdIsDead(itemid))
537 lazy_record_dead_tuple(vacrelstats, &(tuple.t_self));
542 Assert(ItemIdIsNormal(itemid));
544 tuple.t_data = (HeapTupleHeader) PageGetItem(page, itemid);
545 tuple.t_len = ItemIdGetLength(itemid);
549 switch (HeapTupleSatisfiesVacuum(tuple.t_data, OldestXmin, buf))
554 * Ordinarily, DEAD tuples would have been removed by
555 * heap_page_prune(), but it's possible that the tuple
556 * state changed since heap_page_prune() looked. In
557 * particular an INSERT_IN_PROGRESS tuple could have
558 * changed to DEAD if the inserter aborted. So this
559 * cannot be considered an error condition.
561 * If the tuple is HOT-updated then it must only be
562 * removed by a prune operation; so we keep it just as if
563 * it were RECENTLY_DEAD. Also, if it's a heap-only
564 * tuple, we choose to keep it, because it'll be a lot
565 * cheaper to get rid of it in the next pruning pass than
566 * to treat it like an indexed tuple.
568 if (HeapTupleIsHotUpdated(&tuple) ||
569 HeapTupleIsHeapOnly(&tuple))
572 tupgone = true; /* we can delete the tuple */
576 /* Tuple is good --- but let's do some validity checks */
577 if (onerel->rd_rel->relhasoids &&
578 !OidIsValid(HeapTupleGetOid(&tuple)))
579 elog(WARNING, "relation \"%s\" TID %u/%u: OID is invalid",
580 relname, blkno, offnum);
583 * Is the tuple definitely visible to all transactions?
585 * NB: Like with per-tuple hint bits, we can't set the
586 * PD_ALL_VISIBLE flag if the inserter committed
587 * asynchronously. See SetHintBits for more info. Check
588 * that the HEAP_XMIN_COMMITTED hint bit is set because of
595 if (!(tuple.t_data->t_infomask & HEAP_XMIN_COMMITTED))
602 * The inserter definitely committed. But is it old
603 * enough that everyone sees it as committed?
605 xmin = HeapTupleHeaderGetXmin(tuple.t_data);
606 if (!TransactionIdPrecedes(xmin, OldestXmin))
613 case HEAPTUPLE_RECENTLY_DEAD:
616 * If tuple is recently deleted then we must not remove it
622 case HEAPTUPLE_INSERT_IN_PROGRESS:
623 /* This is an expected case during concurrent vacuum */
626 case HEAPTUPLE_DELETE_IN_PROGRESS:
627 /* This is an expected case during concurrent vacuum */
631 elog(ERROR, "unexpected HeapTupleSatisfiesVacuum result");
637 lazy_record_dead_tuple(vacrelstats, &(tuple.t_self));
638 HeapTupleHeaderAdvanceLatestRemovedXid(tuple.t_data,
639 &vacrelstats->latestRemovedXid);
648 * Each non-removable tuple must be checked to see if it needs
649 * freezing. Note we already have exclusive buffer lock.
651 if (heap_freeze_tuple(tuple.t_data, FreezeLimit,
653 frozen[nfrozen++] = offnum;
655 } /* scan along page */
658 * If we froze any tuples, mark the buffer dirty, and write a WAL
659 * record recording the changes. We must log the changes to be
660 * crash-safe against future truncation of CLOG.
664 MarkBufferDirty(buf);
665 /* no XLOG for temp tables, though */
666 if (!onerel->rd_istemp)
670 recptr = log_heap_freeze(onerel, buf, FreezeLimit,
672 PageSetLSN(page, recptr);
673 PageSetTLI(page, ThisTimeLineID);
678 * If there are no indexes then we can vacuum the page right now
679 * instead of doing a second scan.
682 vacrelstats->num_dead_tuples > 0)
684 /* Remove tuples from heap */
685 lazy_vacuum_page(onerel, blkno, buf, 0, vacrelstats);
687 * Forget the now-vacuumed tuples, and press on, but be careful
688 * not to reset latestRemovedXid since we want that value to be valid.
690 Assert(TransactionIdIsValid(vacrelstats->latestRemovedXid));
691 vacrelstats->num_dead_tuples = 0;
695 freespace = PageGetHeapFreeSpace(page);
697 /* Update the all-visible flag on the page */
698 if (!PageIsAllVisible(page) && all_visible)
700 PageSetAllVisible(page);
701 SetBufferCommitInfoNeedsSave(buf);
703 else if (PageIsAllVisible(page) && !all_visible)
705 elog(WARNING, "PD_ALL_VISIBLE flag was incorrectly set in relation \"%s\" page %u",
707 PageClearAllVisible(page);
708 SetBufferCommitInfoNeedsSave(buf);
711 * Normally, we would drop the lock on the heap page before
712 * updating the visibility map, but since this case shouldn't
713 * happen anyway, don't worry about that.
715 visibilitymap_clear(onerel, blkno);
718 LockBuffer(buf, BUFFER_LOCK_UNLOCK);
720 /* Update the visibility map */
721 if (!all_visible_according_to_vm && all_visible)
723 visibilitymap_pin(onerel, blkno, &vmbuffer);
724 LockBuffer(buf, BUFFER_LOCK_SHARE);
725 if (PageIsAllVisible(page))
726 visibilitymap_set(onerel, blkno, PageGetLSN(page), &vmbuffer);
727 LockBuffer(buf, BUFFER_LOCK_UNLOCK);
732 /* Remember the location of the last page with nonremovable tuples */
734 vacrelstats->nonempty_pages = blkno + 1;
737 * If we remembered any tuples for deletion, then the page will be
738 * visited again by lazy_vacuum_heap, which will compute and record
739 * its post-compaction free space. If not, then we're done with this
740 * page, so remember its free space as-is. (This path will always be
741 * taken if there are no indexes.)
743 if (vacrelstats->num_dead_tuples == prev_dead_count)
744 RecordPageWithFreeSpace(onerel, blkno, freespace);
747 /* save stats for use later */
748 vacrelstats->rel_tuples = num_tuples;
749 vacrelstats->tuples_deleted = tups_vacuumed;
751 /* If any tuples need to be deleted, perform final vacuum cycle */
752 /* XXX put a threshold on min number of tuples here? */
753 if (vacrelstats->num_dead_tuples > 0)
755 /* Log cleanup info before we touch indexes */
756 vacuum_log_cleanup_info(onerel, vacrelstats);
758 /* Remove index entries */
759 for (i = 0; i < nindexes; i++)
760 lazy_vacuum_index(Irel[i],
763 /* Remove tuples from heap */
764 lazy_vacuum_heap(onerel, vacrelstats);
765 vacrelstats->num_index_scans++;
768 /* Release the pin on the visibility map page */
769 if (BufferIsValid(vmbuffer))
771 ReleaseBuffer(vmbuffer);
772 vmbuffer = InvalidBuffer;
775 /* Do post-vacuum cleanup and statistics update for each index */
776 for (i = 0; i < nindexes; i++)
777 lazy_cleanup_index(Irel[i], indstats[i], vacrelstats);
779 /* If no indexes, make log report that lazy_vacuum_heap would've made */
782 (errmsg("\"%s\": removed %.0f row versions in %u pages",
783 RelationGetRelationName(onerel),
784 tups_vacuumed, vacuumed_pages)));
787 (errmsg("\"%s\": found %.0f removable, %.0f nonremovable row versions in %u out of %u pages",
788 RelationGetRelationName(onerel),
789 tups_vacuumed, num_tuples, scanned_pages, nblocks),
790 errdetail("%.0f dead row versions cannot be removed yet.\n"
791 "There were %.0f unused item pointers.\n"
792 "%u pages are entirely empty.\n"
797 pg_rusage_show(&ru0))));
802 * lazy_vacuum_heap() -- second pass over the heap
804 * This routine marks dead tuples as unused and compacts out free
805 * space on their pages. Pages not having dead tuples recorded from
806 * lazy_scan_heap are not visited at all.
808 * Note: the reason for doing this as a second pass is we cannot remove
809 * the tuples until we've removed their index entries, and we want to
810 * process index entry removal in batches as large as possible.
813 lazy_vacuum_heap(Relation onerel, LVRelStats *vacrelstats)
819 pg_rusage_init(&ru0);
823 while (tupindex < vacrelstats->num_dead_tuples)
830 vacuum_delay_point();
832 tblk = ItemPointerGetBlockNumber(&vacrelstats->dead_tuples[tupindex]);
833 buf = ReadBufferExtended(onerel, MAIN_FORKNUM, tblk, RBM_NORMAL,
835 LockBufferForCleanup(buf);
836 tupindex = lazy_vacuum_page(onerel, tblk, buf, tupindex, vacrelstats);
838 /* Now that we've compacted the page, record its available space */
839 page = BufferGetPage(buf);
840 freespace = PageGetHeapFreeSpace(page);
842 UnlockReleaseBuffer(buf);
843 RecordPageWithFreeSpace(onerel, tblk, freespace);
848 (errmsg("\"%s\": removed %d row versions in %d pages",
849 RelationGetRelationName(onerel),
852 pg_rusage_show(&ru0))));
856 * lazy_vacuum_page() -- free dead tuples on a page
857 * and repair its fragmentation.
859 * Caller must hold pin and buffer cleanup lock on the buffer.
861 * tupindex is the index in vacrelstats->dead_tuples of the first dead
862 * tuple for this page. We assume the rest follow sequentially.
863 * The return value is the first tupindex after the tuples of this page.
866 lazy_vacuum_page(Relation onerel, BlockNumber blkno, Buffer buffer,
867 int tupindex, LVRelStats *vacrelstats)
869 Page page = BufferGetPage(buffer);
870 OffsetNumber unused[MaxOffsetNumber];
873 START_CRIT_SECTION();
875 for (; tupindex < vacrelstats->num_dead_tuples; tupindex++)
881 tblk = ItemPointerGetBlockNumber(&vacrelstats->dead_tuples[tupindex]);
883 break; /* past end of tuples for this block */
884 toff = ItemPointerGetOffsetNumber(&vacrelstats->dead_tuples[tupindex]);
885 itemid = PageGetItemId(page, toff);
886 ItemIdSetUnused(itemid);
887 unused[uncnt++] = toff;
890 PageRepairFragmentation(page);
892 MarkBufferDirty(buffer);
895 if (!onerel->rd_istemp)
899 recptr = log_heap_clean(onerel, buffer,
902 vacrelstats->latestRemovedXid);
903 PageSetLSN(page, recptr);
904 PageSetTLI(page, ThisTimeLineID);
913 * lazy_vacuum_index() -- vacuum one index relation.
915 * Delete all the index entries pointing to tuples listed in
916 * vacrelstats->dead_tuples, and update running statistics.
919 lazy_vacuum_index(Relation indrel,
920 IndexBulkDeleteResult **stats,
921 LVRelStats *vacrelstats)
923 IndexVacuumInfo ivinfo;
926 pg_rusage_init(&ru0);
928 ivinfo.index = indrel;
929 ivinfo.analyze_only = false;
930 ivinfo.estimated_count = true;
931 ivinfo.message_level = elevel;
932 ivinfo.num_heap_tuples = vacrelstats->old_rel_tuples;
933 ivinfo.strategy = vac_strategy;
935 /* Do bulk deletion */
936 *stats = index_bulk_delete(&ivinfo, *stats,
937 lazy_tid_reaped, (void *) vacrelstats);
940 (errmsg("scanned index \"%s\" to remove %d row versions",
941 RelationGetRelationName(indrel),
942 vacrelstats->num_dead_tuples),
943 errdetail("%s.", pg_rusage_show(&ru0))));
947 * lazy_cleanup_index() -- do post-vacuum cleanup for one index relation.
950 lazy_cleanup_index(Relation indrel,
951 IndexBulkDeleteResult *stats,
952 LVRelStats *vacrelstats)
954 IndexVacuumInfo ivinfo;
957 pg_rusage_init(&ru0);
959 ivinfo.index = indrel;
960 ivinfo.analyze_only = false;
961 ivinfo.estimated_count = !vacrelstats->scanned_all;
962 ivinfo.message_level = elevel;
963 /* use rel_tuples only if we scanned all pages, else fall back */
964 ivinfo.num_heap_tuples = vacrelstats->scanned_all ? vacrelstats->rel_tuples : vacrelstats->old_rel_tuples;
965 ivinfo.strategy = vac_strategy;
967 stats = index_vacuum_cleanup(&ivinfo, stats);
973 * Now update statistics in pg_class, but only if the index says the count
976 if (!stats->estimated_count)
977 vac_update_relstats(indrel,
978 stats->num_pages, stats->num_index_tuples,
979 false, InvalidTransactionId);
982 (errmsg("index \"%s\" now contains %.0f row versions in %u pages",
983 RelationGetRelationName(indrel),
984 stats->num_index_tuples,
986 errdetail("%.0f index row versions were removed.\n"
987 "%u index pages have been deleted, %u are currently reusable.\n"
989 stats->tuples_removed,
990 stats->pages_deleted, stats->pages_free,
991 pg_rusage_show(&ru0))));
997 * lazy_truncate_heap - try to truncate off any empty pages at the end
1000 lazy_truncate_heap(Relation onerel, LVRelStats *vacrelstats)
1002 BlockNumber old_rel_pages = vacrelstats->rel_pages;
1003 BlockNumber new_rel_pages;
1006 pg_rusage_init(&ru0);
1009 * We need full exclusive lock on the relation in order to do truncation.
1010 * If we can't get it, give up rather than waiting --- we don't want to
1011 * block other backends, and we don't want to deadlock (which is quite
1012 * possible considering we already hold a lower-grade lock).
1014 if (!ConditionalLockRelation(onerel, AccessExclusiveLock))
1018 * Now that we have exclusive lock, look to see if the rel has grown
1019 * whilst we were vacuuming with non-exclusive lock. If so, give up; the
1020 * newly added pages presumably contain non-deletable tuples.
1022 new_rel_pages = RelationGetNumberOfBlocks(onerel);
1023 if (new_rel_pages != old_rel_pages)
1025 /* might as well use the latest news when we update pg_class stats */
1026 vacrelstats->rel_pages = new_rel_pages;
1027 UnlockRelation(onerel, AccessExclusiveLock);
1032 * Scan backwards from the end to verify that the end pages actually
1033 * contain no tuples. This is *necessary*, not optional, because other
1034 * backends could have added tuples to these pages whilst we were
1037 new_rel_pages = count_nondeletable_pages(onerel, vacrelstats);
1039 if (new_rel_pages >= old_rel_pages)
1041 /* can't do anything after all */
1042 UnlockRelation(onerel, AccessExclusiveLock);
1049 RelationTruncate(onerel, new_rel_pages);
1052 * We can release the exclusive lock as soon as we have truncated. Other
1053 * backends can't safely access the relation until they have processed the
1054 * smgr invalidation that smgrtruncate sent out ... but that should happen
1055 * as part of standard invalidation processing once they acquire lock on
1058 UnlockRelation(onerel, AccessExclusiveLock);
1060 /* update statistics */
1061 vacrelstats->rel_pages = new_rel_pages;
1062 vacrelstats->pages_removed = old_rel_pages - new_rel_pages;
1065 (errmsg("\"%s\": truncated %u to %u pages",
1066 RelationGetRelationName(onerel),
1067 old_rel_pages, new_rel_pages),
1069 pg_rusage_show(&ru0))));
1073 * Rescan end pages to verify that they are (still) empty of tuples.
1075 * Returns number of nondeletable pages (last nonempty page + 1).
1078 count_nondeletable_pages(Relation onerel, LVRelStats *vacrelstats)
1082 /* Strange coding of loop control is needed because blkno is unsigned */
1083 blkno = vacrelstats->rel_pages;
1084 while (blkno > vacrelstats->nonempty_pages)
1088 OffsetNumber offnum,
1093 * We don't insert a vacuum delay point here, because we have an
1094 * exclusive lock on the table which we want to hold for as short a
1095 * time as possible. We still need to check for interrupts however.
1097 CHECK_FOR_INTERRUPTS();
1101 buf = ReadBufferExtended(onerel, MAIN_FORKNUM, blkno,
1102 RBM_NORMAL, vac_strategy);
1104 /* In this phase we only need shared access to the buffer */
1105 LockBuffer(buf, BUFFER_LOCK_SHARE);
1107 page = BufferGetPage(buf);
1109 if (PageIsNew(page) || PageIsEmpty(page))
1111 /* PageIsNew probably shouldn't happen... */
1112 UnlockReleaseBuffer(buf);
1117 maxoff = PageGetMaxOffsetNumber(page);
1118 for (offnum = FirstOffsetNumber;
1120 offnum = OffsetNumberNext(offnum))
1124 itemid = PageGetItemId(page, offnum);
1127 * Note: any non-unused item should be taken as a reason to keep
1128 * this page. We formerly thought that DEAD tuples could be
1129 * thrown away, but that's not so, because we'd not have cleaned
1130 * out their index entries.
1132 if (ItemIdIsUsed(itemid))
1135 break; /* can stop scanning */
1137 } /* scan along page */
1139 UnlockReleaseBuffer(buf);
1141 /* Done scanning if we found a tuple here */
1147 * If we fall out of the loop, all the previously-thought-to-be-empty
1148 * pages still are; we need not bother to look at the last known-nonempty
1151 return vacrelstats->nonempty_pages;
1155 * lazy_space_alloc - space allocation decisions for lazy vacuum
1157 * See the comments at the head of this file for rationale.
1160 lazy_space_alloc(LVRelStats *vacrelstats, BlockNumber relblocks)
1164 if (vacrelstats->hasindex)
1166 maxtuples = (maintenance_work_mem * 1024L) / sizeof(ItemPointerData);
1167 maxtuples = Min(maxtuples, INT_MAX);
1168 maxtuples = Min(maxtuples, MaxAllocSize / sizeof(ItemPointerData));
1170 /* curious coding here to ensure the multiplication can't overflow */
1171 if ((BlockNumber) (maxtuples / LAZY_ALLOC_TUPLES) > relblocks)
1172 maxtuples = relblocks * LAZY_ALLOC_TUPLES;
1174 /* stay sane if small maintenance_work_mem */
1175 maxtuples = Max(maxtuples, MaxHeapTuplesPerPage);
1179 maxtuples = MaxHeapTuplesPerPage;
1182 vacrelstats->num_dead_tuples = 0;
1183 vacrelstats->max_dead_tuples = (int) maxtuples;
1184 vacrelstats->dead_tuples = (ItemPointer)
1185 palloc(maxtuples * sizeof(ItemPointerData));
1189 * lazy_record_dead_tuple - remember one deletable tuple
1192 lazy_record_dead_tuple(LVRelStats *vacrelstats,
1193 ItemPointer itemptr)
1196 * The array shouldn't overflow under normal behavior, but perhaps it
1197 * could if we are given a really small maintenance_work_mem. In that
1198 * case, just forget the last few tuples (we'll get 'em next time).
1200 if (vacrelstats->num_dead_tuples < vacrelstats->max_dead_tuples)
1202 vacrelstats->dead_tuples[vacrelstats->num_dead_tuples] = *itemptr;
1203 vacrelstats->num_dead_tuples++;
1208 * lazy_tid_reaped() -- is a particular tid deletable?
1210 * This has the right signature to be an IndexBulkDeleteCallback.
1212 * Assumes dead_tuples array is in sorted order.
1215 lazy_tid_reaped(ItemPointer itemptr, void *state)
1217 LVRelStats *vacrelstats = (LVRelStats *) state;
1220 res = (ItemPointer) bsearch((void *) itemptr,
1221 (void *) vacrelstats->dead_tuples,
1222 vacrelstats->num_dead_tuples,
1223 sizeof(ItemPointerData),
1226 return (res != NULL);
1230 * Comparator routines for use with qsort() and bsearch().
1233 vac_cmp_itemptr(const void *left, const void *right)
1240 lblk = ItemPointerGetBlockNumber((ItemPointer) left);
1241 rblk = ItemPointerGetBlockNumber((ItemPointer) right);
1248 loff = ItemPointerGetOffsetNumber((ItemPointer) left);
1249 roff = ItemPointerGetOffsetNumber((ItemPointer) right);