]> granicus.if.org Git - postgresql/blob - src/backend/commands/vacuumlazy.c
Phase 2 of pgindent updates.
[postgresql] / src / backend / commands / vacuumlazy.c
1 /*-------------------------------------------------------------------------
2  *
3  * vacuumlazy.c
4  *        Concurrent ("lazy") vacuuming.
5  *
6  *
7  * The major space usage for LAZY VACUUM is storage for the array of dead
8  * tuple TIDs, with the next biggest need being storage for per-disk-page
9  * free space info.  We want to ensure we can vacuum even the very largest
10  * relations with finite memory space usage.  To do that, we set upper bounds
11  * on the number of tuples and pages we will keep track of at once.
12  *
13  * We are willing to use at most maintenance_work_mem (or perhaps
14  * autovacuum_work_mem) memory space to keep track of dead tuples.  We
15  * initially allocate an array of TIDs of that size, with an upper limit that
16  * depends on table size (this limit ensures we don't allocate a huge area
17  * uselessly for vacuuming small tables).  If the array threatens to overflow,
18  * we suspend the heap scan phase and perform a pass of index cleanup and page
19  * compaction, then resume the heap scan with an empty TID array.
20  *
21  * If we're processing a table with no indexes, we can just vacuum each page
22  * as we go; there's no need to save up multiple tuples to minimize the number
23  * of index scans performed.  So we don't use maintenance_work_mem memory for
24  * the TID array, just enough to hold as many heap tuples as fit on one page.
25  *
26  *
27  * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group
28  * Portions Copyright (c) 1994, Regents of the University of California
29  *
30  *
31  * IDENTIFICATION
32  *        src/backend/commands/vacuumlazy.c
33  *
34  *-------------------------------------------------------------------------
35  */
36 #include "postgres.h"
37
38 #include <math.h>
39
40 #include "access/genam.h"
41 #include "access/heapam.h"
42 #include "access/heapam_xlog.h"
43 #include "access/htup_details.h"
44 #include "access/multixact.h"
45 #include "access/transam.h"
46 #include "access/visibilitymap.h"
47 #include "access/xlog.h"
48 #include "catalog/catalog.h"
49 #include "catalog/storage.h"
50 #include "commands/dbcommands.h"
51 #include "commands/progress.h"
52 #include "commands/vacuum.h"
53 #include "miscadmin.h"
54 #include "pgstat.h"
55 #include "portability/instr_time.h"
56 #include "postmaster/autovacuum.h"
57 #include "storage/bufmgr.h"
58 #include "storage/freespace.h"
59 #include "storage/lmgr.h"
60 #include "utils/lsyscache.h"
61 #include "utils/memutils.h"
62 #include "utils/pg_rusage.h"
63 #include "utils/timestamp.h"
64 #include "utils/tqual.h"
65
66
67 /*
68  * Space/time tradeoff parameters: do these need to be user-tunable?
69  *
70  * To consider truncating the relation, we want there to be at least
71  * REL_TRUNCATE_MINIMUM or (relsize / REL_TRUNCATE_FRACTION) (whichever
72  * is less) potentially-freeable pages.
73  */
74 #define REL_TRUNCATE_MINIMUM    1000
75 #define REL_TRUNCATE_FRACTION   16
76
77 /*
78  * Timing parameters for truncate locking heuristics.
79  *
80  * These were not exposed as user tunable GUC values because it didn't seem
81  * that the potential for improvement was great enough to merit the cost of
82  * supporting them.
83  */
84 #define VACUUM_TRUNCATE_LOCK_CHECK_INTERVAL             20      /* ms */
85 #define VACUUM_TRUNCATE_LOCK_WAIT_INTERVAL              50      /* ms */
86 #define VACUUM_TRUNCATE_LOCK_TIMEOUT                    5000    /* ms */
87
88 /*
89  * Guesstimation of number of dead tuples per page.  This is used to
90  * provide an upper limit to memory allocated when vacuuming small
91  * tables.
92  */
93 #define LAZY_ALLOC_TUPLES               MaxHeapTuplesPerPage
94
95 /*
96  * Before we consider skipping a page that's marked as clean in
97  * visibility map, we must've seen at least this many clean pages.
98  */
99 #define SKIP_PAGES_THRESHOLD    ((BlockNumber) 32)
100
101 /*
102  * Size of the prefetch window for lazy vacuum backwards truncation scan.
103  * Needs to be a power of 2.
104  */
105 #define PREFETCH_SIZE                   ((BlockNumber) 32)
106
107 typedef struct LVRelStats
108 {
109         /* hasindex = true means two-pass strategy; false means one-pass */
110         bool            hasindex;
111         /* Overall statistics about rel */
112         BlockNumber old_rel_pages;      /* previous value of pg_class.relpages */
113         BlockNumber rel_pages;          /* total number of pages */
114         BlockNumber scanned_pages;      /* number of pages we examined */
115         BlockNumber pinskipped_pages;   /* # of pages we skipped due to a pin */
116         BlockNumber frozenskipped_pages;        /* # of frozen pages we skipped */
117         BlockNumber tupcount_pages; /* pages whose tuples we counted */
118         double          scanned_tuples; /* counts only tuples on tupcount_pages */
119         double          old_rel_tuples; /* previous value of pg_class.reltuples */
120         double          new_rel_tuples; /* new estimated total # of tuples */
121         double          new_dead_tuples;        /* new estimated total # of dead tuples */
122         BlockNumber pages_removed;
123         double          tuples_deleted;
124         BlockNumber nonempty_pages; /* actually, last nonempty page + 1 */
125         /* List of TIDs of tuples we intend to delete */
126         /* NB: this list is ordered by TID address */
127         int                     num_dead_tuples;        /* current # of entries */
128         int                     max_dead_tuples;        /* # slots allocated in array */
129         ItemPointer dead_tuples;        /* array of ItemPointerData */
130         int                     num_index_scans;
131         TransactionId latestRemovedXid;
132         bool            lock_waiter_detected;
133 } LVRelStats;
134
135
136 /* A few variables that don't seem worth passing around as parameters */
137 static int      elevel = -1;
138
139 static TransactionId OldestXmin;
140 static TransactionId FreezeLimit;
141 static MultiXactId MultiXactCutoff;
142
143 static BufferAccessStrategy vac_strategy;
144
145
146 /* non-export function prototypes */
147 static void lazy_scan_heap(Relation onerel, int options,
148                            LVRelStats *vacrelstats, Relation *Irel, int nindexes,
149                            bool aggressive);
150 static void lazy_vacuum_heap(Relation onerel, LVRelStats *vacrelstats);
151 static bool lazy_check_needs_freeze(Buffer buf, bool *hastup);
152 static void lazy_vacuum_index(Relation indrel,
153                                   IndexBulkDeleteResult **stats,
154                                   LVRelStats *vacrelstats);
155 static void lazy_cleanup_index(Relation indrel,
156                                    IndexBulkDeleteResult *stats,
157                                    LVRelStats *vacrelstats);
158 static int lazy_vacuum_page(Relation onerel, BlockNumber blkno, Buffer buffer,
159                                  int tupindex, LVRelStats *vacrelstats, Buffer *vmbuffer);
160 static bool should_attempt_truncation(LVRelStats *vacrelstats);
161 static void lazy_truncate_heap(Relation onerel, LVRelStats *vacrelstats);
162 static BlockNumber count_nondeletable_pages(Relation onerel,
163                                                  LVRelStats *vacrelstats);
164 static void lazy_space_alloc(LVRelStats *vacrelstats, BlockNumber relblocks);
165 static void lazy_record_dead_tuple(LVRelStats *vacrelstats,
166                                            ItemPointer itemptr);
167 static bool lazy_tid_reaped(ItemPointer itemptr, void *state);
168 static int      vac_cmp_itemptr(const void *left, const void *right);
169 static bool heap_page_is_all_visible(Relation rel, Buffer buf,
170                                          TransactionId *visibility_cutoff_xid, bool *all_frozen);
171
172
173 /*
174  *      lazy_vacuum_rel() -- perform LAZY VACUUM for one heap relation
175  *
176  *              This routine vacuums a single heap, cleans out its indexes, and
177  *              updates its relpages and reltuples statistics.
178  *
179  *              At entry, we have already established a transaction and opened
180  *              and locked the relation.
181  */
182 void
183 lazy_vacuum_rel(Relation onerel, int options, VacuumParams *params,
184                                 BufferAccessStrategy bstrategy)
185 {
186         LVRelStats *vacrelstats;
187         Relation   *Irel;
188         int                     nindexes;
189         PGRUsage        ru0;
190         TimestampTz starttime = 0;
191         long            secs;
192         int                     usecs;
193         double          read_rate,
194                                 write_rate;
195         bool            aggressive;             /* should we scan all unfrozen pages? */
196         bool            scanned_all_unfrozen;   /* actually scanned all such pages? */
197         TransactionId xidFullScanLimit;
198         MultiXactId mxactFullScanLimit;
199         BlockNumber new_rel_pages;
200         double          new_rel_tuples;
201         BlockNumber new_rel_allvisible;
202         double          new_live_tuples;
203         TransactionId new_frozen_xid;
204         MultiXactId new_min_multi;
205
206         Assert(params != NULL);
207
208         /* measure elapsed time iff autovacuum logging requires it */
209         if (IsAutoVacuumWorkerProcess() && params->log_min_duration >= 0)
210         {
211                 pg_rusage_init(&ru0);
212                 starttime = GetCurrentTimestamp();
213         }
214
215         if (options & VACOPT_VERBOSE)
216                 elevel = INFO;
217         else
218                 elevel = DEBUG2;
219
220         pgstat_progress_start_command(PROGRESS_COMMAND_VACUUM,
221                                                                   RelationGetRelid(onerel));
222
223         vac_strategy = bstrategy;
224
225         vacuum_set_xid_limits(onerel,
226                                                   params->freeze_min_age,
227                                                   params->freeze_table_age,
228                                                   params->multixact_freeze_min_age,
229                                                   params->multixact_freeze_table_age,
230                                                   &OldestXmin, &FreezeLimit, &xidFullScanLimit,
231                                                   &MultiXactCutoff, &mxactFullScanLimit);
232
233         /*
234          * We request an aggressive scan if the table's frozen Xid is now older
235          * than or equal to the requested Xid full-table scan limit; or if the
236          * table's minimum MultiXactId is older than or equal to the requested
237          * mxid full-table scan limit; or if DISABLE_PAGE_SKIPPING was specified.
238          */
239         aggressive = TransactionIdPrecedesOrEquals(onerel->rd_rel->relfrozenxid,
240                                                                                            xidFullScanLimit);
241         aggressive |= MultiXactIdPrecedesOrEquals(onerel->rd_rel->relminmxid,
242                                                                                           mxactFullScanLimit);
243         if (options & VACOPT_DISABLE_PAGE_SKIPPING)
244                 aggressive = true;
245
246         vacrelstats = (LVRelStats *) palloc0(sizeof(LVRelStats));
247
248         vacrelstats->old_rel_pages = onerel->rd_rel->relpages;
249         vacrelstats->old_rel_tuples = onerel->rd_rel->reltuples;
250         vacrelstats->num_index_scans = 0;
251         vacrelstats->pages_removed = 0;
252         vacrelstats->lock_waiter_detected = false;
253
254         /* Open all indexes of the relation */
255         vac_open_indexes(onerel, RowExclusiveLock, &nindexes, &Irel);
256         vacrelstats->hasindex = (nindexes > 0);
257
258         /* Do the vacuuming */
259         lazy_scan_heap(onerel, options, vacrelstats, Irel, nindexes, aggressive);
260
261         /* Done with indexes */
262         vac_close_indexes(nindexes, Irel, NoLock);
263
264         /*
265          * Compute whether we actually scanned the all unfrozen pages. If we did,
266          * we can adjust relfrozenxid and relminmxid.
267          *
268          * NB: We need to check this before truncating the relation, because that
269          * will change ->rel_pages.
270          */
271         if ((vacrelstats->scanned_pages + vacrelstats->frozenskipped_pages)
272                 < vacrelstats->rel_pages)
273         {
274                 Assert(!aggressive);
275                 scanned_all_unfrozen = false;
276         }
277         else
278                 scanned_all_unfrozen = true;
279
280         /*
281          * Optionally truncate the relation.
282          */
283         if (should_attempt_truncation(vacrelstats))
284                 lazy_truncate_heap(onerel, vacrelstats);
285
286         /* Report that we are now doing final cleanup */
287         pgstat_progress_update_param(PROGRESS_VACUUM_PHASE,
288                                                                  PROGRESS_VACUUM_PHASE_FINAL_CLEANUP);
289
290         /* Vacuum the Free Space Map */
291         FreeSpaceMapVacuum(onerel);
292
293         /*
294          * Update statistics in pg_class.
295          *
296          * A corner case here is that if we scanned no pages at all because every
297          * page is all-visible, we should not update relpages/reltuples, because
298          * we have no new information to contribute.  In particular this keeps us
299          * from replacing relpages=reltuples=0 (which means "unknown tuple
300          * density") with nonzero relpages and reltuples=0 (which means "zero
301          * tuple density") unless there's some actual evidence for the latter.
302          *
303          * It's important that we use tupcount_pages and not scanned_pages for the
304          * check described above; scanned_pages counts pages where we could not
305          * get cleanup lock, and which were processed only for frozenxid purposes.
306          *
307          * We do update relallvisible even in the corner case, since if the table
308          * is all-visible we'd definitely like to know that.  But clamp the value
309          * to be not more than what we're setting relpages to.
310          *
311          * Also, don't change relfrozenxid/relminmxid if we skipped any pages,
312          * since then we don't know for certain that all tuples have a newer xmin.
313          */
314         new_rel_pages = vacrelstats->rel_pages;
315         new_rel_tuples = vacrelstats->new_rel_tuples;
316         if (vacrelstats->tupcount_pages == 0 && new_rel_pages > 0)
317         {
318                 new_rel_pages = vacrelstats->old_rel_pages;
319                 new_rel_tuples = vacrelstats->old_rel_tuples;
320         }
321
322         visibilitymap_count(onerel, &new_rel_allvisible, NULL);
323         if (new_rel_allvisible > new_rel_pages)
324                 new_rel_allvisible = new_rel_pages;
325
326         new_frozen_xid = scanned_all_unfrozen ? FreezeLimit : InvalidTransactionId;
327         new_min_multi = scanned_all_unfrozen ? MultiXactCutoff : InvalidMultiXactId;
328
329         vac_update_relstats(onerel,
330                                                 new_rel_pages,
331                                                 new_rel_tuples,
332                                                 new_rel_allvisible,
333                                                 vacrelstats->hasindex,
334                                                 new_frozen_xid,
335                                                 new_min_multi,
336                                                 false);
337
338         /* report results to the stats collector, too */
339         new_live_tuples = new_rel_tuples - vacrelstats->new_dead_tuples;
340         if (new_live_tuples < 0)
341                 new_live_tuples = 0;    /* just in case */
342
343         pgstat_report_vacuum(RelationGetRelid(onerel),
344                                                  onerel->rd_rel->relisshared,
345                                                  new_live_tuples,
346                                                  vacrelstats->new_dead_tuples);
347         pgstat_progress_end_command();
348
349         /* and log the action if appropriate */
350         if (IsAutoVacuumWorkerProcess() && params->log_min_duration >= 0)
351         {
352                 TimestampTz endtime = GetCurrentTimestamp();
353
354                 if (params->log_min_duration == 0 ||
355                         TimestampDifferenceExceeds(starttime, endtime,
356                                                                            params->log_min_duration))
357                 {
358                         StringInfoData buf;
359
360                         TimestampDifference(starttime, endtime, &secs, &usecs);
361
362                         read_rate = 0;
363                         write_rate = 0;
364                         if ((secs > 0) || (usecs > 0))
365                         {
366                                 read_rate = (double) BLCKSZ * VacuumPageMiss / (1024 * 1024) /
367                                         (secs + usecs / 1000000.0);
368                                 write_rate = (double) BLCKSZ * VacuumPageDirty / (1024 * 1024) /
369                                         (secs + usecs / 1000000.0);
370                         }
371
372                         /*
373                          * This is pretty messy, but we split it up so that we can skip
374                          * emitting individual parts of the message when not applicable.
375                          */
376                         initStringInfo(&buf);
377                         appendStringInfo(&buf, _("automatic vacuum of table \"%s.%s.%s\": index scans: %d\n"),
378                                                          get_database_name(MyDatabaseId),
379                                                          get_namespace_name(RelationGetNamespace(onerel)),
380                                                          RelationGetRelationName(onerel),
381                                                          vacrelstats->num_index_scans);
382                         appendStringInfo(&buf, _("pages: %u removed, %u remain, %u skipped due to pins, %u skipped frozen\n"),
383                                                          vacrelstats->pages_removed,
384                                                          vacrelstats->rel_pages,
385                                                          vacrelstats->pinskipped_pages,
386                                                          vacrelstats->frozenskipped_pages);
387                         appendStringInfo(&buf,
388                                                          _("tuples: %.0f removed, %.0f remain, %.0f are dead but not yet removable, oldest xmin: %u\n"),
389                                                          vacrelstats->tuples_deleted,
390                                                          vacrelstats->new_rel_tuples,
391                                                          vacrelstats->new_dead_tuples,
392                                                          OldestXmin);
393                         appendStringInfo(&buf,
394                                                  _("buffer usage: %d hits, %d misses, %d dirtied\n"),
395                                                          VacuumPageHit,
396                                                          VacuumPageMiss,
397                                                          VacuumPageDirty);
398                         appendStringInfo(&buf, _("avg read rate: %.3f MB/s, avg write rate: %.3f MB/s\n"),
399                                                          read_rate, write_rate);
400                         appendStringInfo(&buf, _("system usage: %s"), pg_rusage_show(&ru0));
401
402                         ereport(LOG,
403                                         (errmsg_internal("%s", buf.data)));
404                         pfree(buf.data);
405                 }
406         }
407 }
408
409 /*
410  * For Hot Standby we need to know the highest transaction id that will
411  * be removed by any change. VACUUM proceeds in a number of passes so
412  * we need to consider how each pass operates. The first phase runs
413  * heap_page_prune(), which can issue XLOG_HEAP2_CLEAN records as it
414  * progresses - these will have a latestRemovedXid on each record.
415  * In some cases this removes all of the tuples to be removed, though
416  * often we have dead tuples with index pointers so we must remember them
417  * for removal in phase 3. Index records for those rows are removed
418  * in phase 2 and index blocks do not have MVCC information attached.
419  * So before we can allow removal of any index tuples we need to issue
420  * a WAL record containing the latestRemovedXid of rows that will be
421  * removed in phase three. This allows recovery queries to block at the
422  * correct place, i.e. before phase two, rather than during phase three
423  * which would be after the rows have become inaccessible.
424  */
425 static void
426 vacuum_log_cleanup_info(Relation rel, LVRelStats *vacrelstats)
427 {
428         /*
429          * Skip this for relations for which no WAL is to be written, or if we're
430          * not trying to support archive recovery.
431          */
432         if (!RelationNeedsWAL(rel) || !XLogIsNeeded())
433                 return;
434
435         /*
436          * No need to write the record at all unless it contains a valid value
437          */
438         if (TransactionIdIsValid(vacrelstats->latestRemovedXid))
439                 (void) log_heap_cleanup_info(rel->rd_node, vacrelstats->latestRemovedXid);
440 }
441
442 /*
443  *      lazy_scan_heap() -- scan an open heap relation
444  *
445  *              This routine prunes each page in the heap, which will among other
446  *              things truncate dead tuples to dead line pointers, defragment the
447  *              page, and set commit status bits (see heap_page_prune).  It also builds
448  *              lists of dead tuples and pages with free space, calculates statistics
449  *              on the number of live tuples in the heap, and marks pages as
450  *              all-visible if appropriate.  When done, or when we run low on space for
451  *              dead-tuple TIDs, invoke vacuuming of indexes and call lazy_vacuum_heap
452  *              to reclaim dead line pointers.
453  *
454  *              If there are no indexes then we can reclaim line pointers on the fly;
455  *              dead line pointers need only be retained until all index pointers that
456  *              reference them have been killed.
457  */
458 static void
459 lazy_scan_heap(Relation onerel, int options, LVRelStats *vacrelstats,
460                            Relation *Irel, int nindexes, bool aggressive)
461 {
462         BlockNumber nblocks,
463                                 blkno;
464         HeapTupleData tuple;
465         char       *relname;
466         BlockNumber empty_pages,
467                                 vacuumed_pages;
468         double          num_tuples,
469                                 tups_vacuumed,
470                                 nkeep,
471                                 nunused;
472         IndexBulkDeleteResult **indstats;
473         int                     i;
474         PGRUsage        ru0;
475         Buffer          vmbuffer = InvalidBuffer;
476         BlockNumber next_unskippable_block;
477         bool            skipping_blocks;
478         xl_heap_freeze_tuple *frozen;
479         StringInfoData buf;
480         const int       initprog_index[] = {
481                 PROGRESS_VACUUM_PHASE,
482                 PROGRESS_VACUUM_TOTAL_HEAP_BLKS,
483                 PROGRESS_VACUUM_MAX_DEAD_TUPLES
484         };
485         int64           initprog_val[3];
486
487         pg_rusage_init(&ru0);
488
489         relname = RelationGetRelationName(onerel);
490         ereport(elevel,
491                         (errmsg("vacuuming \"%s.%s\"",
492                                         get_namespace_name(RelationGetNamespace(onerel)),
493                                         relname)));
494
495         empty_pages = vacuumed_pages = 0;
496         num_tuples = tups_vacuumed = nkeep = nunused = 0;
497
498         indstats = (IndexBulkDeleteResult **)
499                 palloc0(nindexes * sizeof(IndexBulkDeleteResult *));
500
501         nblocks = RelationGetNumberOfBlocks(onerel);
502         vacrelstats->rel_pages = nblocks;
503         vacrelstats->scanned_pages = 0;
504         vacrelstats->tupcount_pages = 0;
505         vacrelstats->nonempty_pages = 0;
506         vacrelstats->latestRemovedXid = InvalidTransactionId;
507
508         lazy_space_alloc(vacrelstats, nblocks);
509         frozen = palloc(sizeof(xl_heap_freeze_tuple) * MaxHeapTuplesPerPage);
510
511         /* Report that we're scanning the heap, advertising total # of blocks */
512         initprog_val[0] = PROGRESS_VACUUM_PHASE_SCAN_HEAP;
513         initprog_val[1] = nblocks;
514         initprog_val[2] = vacrelstats->max_dead_tuples;
515         pgstat_progress_update_multi_param(3, initprog_index, initprog_val);
516
517         /*
518          * Except when aggressive is set, we want to skip pages that are
519          * all-visible according to the visibility map, but only when we can skip
520          * at least SKIP_PAGES_THRESHOLD consecutive pages.  Since we're reading
521          * sequentially, the OS should be doing readahead for us, so there's no
522          * gain in skipping a page now and then; that's likely to disable
523          * readahead and so be counterproductive. Also, skipping even a single
524          * page means that we can't update relfrozenxid, so we only want to do it
525          * if we can skip a goodly number of pages.
526          *
527          * When aggressive is set, we can't skip pages just because they are
528          * all-visible, but we can still skip pages that are all-frozen, since
529          * such pages do not need freezing and do not affect the value that we can
530          * safely set for relfrozenxid or relminmxid.
531          *
532          * Before entering the main loop, establish the invariant that
533          * next_unskippable_block is the next block number >= blkno that's not we
534          * can't skip based on the visibility map, either all-visible for a
535          * regular scan or all-frozen for an aggressive scan.  We set it to
536          * nblocks if there's no such block.  We also set up the skipping_blocks
537          * flag correctly at this stage.
538          *
539          * Note: The value returned by visibilitymap_get_status could be slightly
540          * out-of-date, since we make this test before reading the corresponding
541          * heap page or locking the buffer.  This is OK.  If we mistakenly think
542          * that the page is all-visible or all-frozen when in fact the flag's just
543          * been cleared, we might fail to vacuum the page.  It's easy to see that
544          * skipping a page when aggressive is not set is not a very big deal; we
545          * might leave some dead tuples lying around, but the next vacuum will
546          * find them.  But even when aggressive *is* set, it's still OK if we miss
547          * a page whose all-frozen marking has just been cleared.  Any new XIDs
548          * just added to that page are necessarily newer than the GlobalXmin we
549          * computed, so they'll have no effect on the value to which we can safely
550          * set relfrozenxid.  A similar argument applies for MXIDs and relminmxid.
551          *
552          * We will scan the table's last page, at least to the extent of
553          * determining whether it has tuples or not, even if it should be skipped
554          * according to the above rules; except when we've already determined that
555          * it's not worth trying to truncate the table.  This avoids having
556          * lazy_truncate_heap() take access-exclusive lock on the table to attempt
557          * a truncation that just fails immediately because there are tuples in
558          * the last page.  This is worth avoiding mainly because such a lock must
559          * be replayed on any hot standby, where it can be disruptive.
560          */
561         next_unskippable_block = 0;
562         if ((options & VACOPT_DISABLE_PAGE_SKIPPING) == 0)
563         {
564                 while (next_unskippable_block < nblocks)
565                 {
566                         uint8           vmstatus;
567
568                         vmstatus = visibilitymap_get_status(onerel, next_unskippable_block,
569                                                                                                 &vmbuffer);
570                         if (aggressive)
571                         {
572                                 if ((vmstatus & VISIBILITYMAP_ALL_FROZEN) == 0)
573                                         break;
574                         }
575                         else
576                         {
577                                 if ((vmstatus & VISIBILITYMAP_ALL_VISIBLE) == 0)
578                                         break;
579                         }
580                         vacuum_delay_point();
581                         next_unskippable_block++;
582                 }
583         }
584
585         if (next_unskippable_block >= SKIP_PAGES_THRESHOLD)
586                 skipping_blocks = true;
587         else
588                 skipping_blocks = false;
589
590         for (blkno = 0; blkno < nblocks; blkno++)
591         {
592                 Buffer          buf;
593                 Page            page;
594                 OffsetNumber offnum,
595                                         maxoff;
596                 bool            tupgone,
597                                         hastup;
598                 int                     prev_dead_count;
599                 int                     nfrozen;
600                 Size            freespace;
601                 bool            all_visible_according_to_vm = false;
602                 bool            all_visible;
603                 bool            all_frozen = true;      /* provided all_visible is also true */
604                 bool            has_dead_tuples;
605                 TransactionId visibility_cutoff_xid = InvalidTransactionId;
606
607                 /* see note above about forcing scanning of last page */
608 #define FORCE_CHECK_PAGE() \
609                 (blkno == nblocks - 1 && should_attempt_truncation(vacrelstats))
610
611                 pgstat_progress_update_param(PROGRESS_VACUUM_HEAP_BLKS_SCANNED, blkno);
612
613                 if (blkno == next_unskippable_block)
614                 {
615                         /* Time to advance next_unskippable_block */
616                         next_unskippable_block++;
617                         if ((options & VACOPT_DISABLE_PAGE_SKIPPING) == 0)
618                         {
619                                 while (next_unskippable_block < nblocks)
620                                 {
621                                         uint8           vmskipflags;
622
623                                         vmskipflags = visibilitymap_get_status(onerel,
624                                                                                                           next_unskippable_block,
625                                                                                                                    &vmbuffer);
626                                         if (aggressive)
627                                         {
628                                                 if ((vmskipflags & VISIBILITYMAP_ALL_FROZEN) == 0)
629                                                         break;
630                                         }
631                                         else
632                                         {
633                                                 if ((vmskipflags & VISIBILITYMAP_ALL_VISIBLE) == 0)
634                                                         break;
635                                         }
636                                         vacuum_delay_point();
637                                         next_unskippable_block++;
638                                 }
639                         }
640
641                         /*
642                          * We know we can't skip the current block.  But set up
643                          * skipping_all_visible_blocks to do the right thing at the
644                          * following blocks.
645                          */
646                         if (next_unskippable_block - blkno > SKIP_PAGES_THRESHOLD)
647                                 skipping_blocks = true;
648                         else
649                                 skipping_blocks = false;
650
651                         /*
652                          * Normally, the fact that we can't skip this block must mean that
653                          * it's not all-visible.  But in an aggressive vacuum we know only
654                          * that it's not all-frozen, so it might still be all-visible.
655                          */
656                         if (aggressive && VM_ALL_VISIBLE(onerel, blkno, &vmbuffer))
657                                 all_visible_according_to_vm = true;
658                 }
659                 else
660                 {
661                         /*
662                          * The current block is potentially skippable; if we've seen a
663                          * long enough run of skippable blocks to justify skipping it, and
664                          * we're not forced to check it, then go ahead and skip.
665                          * Otherwise, the page must be at least all-visible if not
666                          * all-frozen, so we can set all_visible_according_to_vm = true.
667                          */
668                         if (skipping_blocks && !FORCE_CHECK_PAGE())
669                         {
670                                 /*
671                                  * Tricky, tricky.  If this is in aggressive vacuum, the page
672                                  * must have been all-frozen at the time we checked whether it
673                                  * was skippable, but it might not be any more.  We must be
674                                  * careful to count it as a skipped all-frozen page in that
675                                  * case, or else we'll think we can't update relfrozenxid and
676                                  * relminmxid.  If it's not an aggressive vacuum, we don't
677                                  * know whether it was all-frozen, so we have to recheck; but
678                                  * in this case an approximate answer is OK.
679                                  */
680                                 if (aggressive || VM_ALL_FROZEN(onerel, blkno, &vmbuffer))
681                                         vacrelstats->frozenskipped_pages++;
682                                 continue;
683                         }
684                         all_visible_according_to_vm = true;
685                 }
686
687                 vacuum_delay_point();
688
689                 /*
690                  * If we are close to overrunning the available space for dead-tuple
691                  * TIDs, pause and do a cycle of vacuuming before we tackle this page.
692                  */
693                 if ((vacrelstats->max_dead_tuples - vacrelstats->num_dead_tuples) < MaxHeapTuplesPerPage &&
694                         vacrelstats->num_dead_tuples > 0)
695                 {
696                         const int       hvp_index[] = {
697                                 PROGRESS_VACUUM_PHASE,
698                                 PROGRESS_VACUUM_NUM_INDEX_VACUUMS
699                         };
700                         int64           hvp_val[2];
701
702                         /*
703                          * Before beginning index vacuuming, we release any pin we may
704                          * hold on the visibility map page.  This isn't necessary for
705                          * correctness, but we do it anyway to avoid holding the pin
706                          * across a lengthy, unrelated operation.
707                          */
708                         if (BufferIsValid(vmbuffer))
709                         {
710                                 ReleaseBuffer(vmbuffer);
711                                 vmbuffer = InvalidBuffer;
712                         }
713
714                         /* Log cleanup info before we touch indexes */
715                         vacuum_log_cleanup_info(onerel, vacrelstats);
716
717                         /* Report that we are now vacuuming indexes */
718                         pgstat_progress_update_param(PROGRESS_VACUUM_PHASE,
719                                                                                  PROGRESS_VACUUM_PHASE_VACUUM_INDEX);
720
721                         /* Remove index entries */
722                         for (i = 0; i < nindexes; i++)
723                                 lazy_vacuum_index(Irel[i],
724                                                                   &indstats[i],
725                                                                   vacrelstats);
726
727                         /*
728                          * Report that we are now vacuuming the heap.  We also increase
729                          * the number of index scans here; note that by using
730                          * pgstat_progress_update_multi_param we can update both
731                          * parameters atomically.
732                          */
733                         hvp_val[0] = PROGRESS_VACUUM_PHASE_VACUUM_HEAP;
734                         hvp_val[1] = vacrelstats->num_index_scans + 1;
735                         pgstat_progress_update_multi_param(2, hvp_index, hvp_val);
736
737                         /* Remove tuples from heap */
738                         lazy_vacuum_heap(onerel, vacrelstats);
739
740                         /*
741                          * Forget the now-vacuumed tuples, and press on, but be careful
742                          * not to reset latestRemovedXid since we want that value to be
743                          * valid.
744                          */
745                         vacrelstats->num_dead_tuples = 0;
746                         vacrelstats->num_index_scans++;
747
748                         /* Report that we are once again scanning the heap */
749                         pgstat_progress_update_param(PROGRESS_VACUUM_PHASE,
750                                                                                  PROGRESS_VACUUM_PHASE_SCAN_HEAP);
751                 }
752
753                 /*
754                  * Pin the visibility map page in case we need to mark the page
755                  * all-visible.  In most cases this will be very cheap, because we'll
756                  * already have the correct page pinned anyway.  However, it's
757                  * possible that (a) next_unskippable_block is covered by a different
758                  * VM page than the current block or (b) we released our pin and did a
759                  * cycle of index vacuuming.
760                  *
761                  */
762                 visibilitymap_pin(onerel, blkno, &vmbuffer);
763
764                 buf = ReadBufferExtended(onerel, MAIN_FORKNUM, blkno,
765                                                                  RBM_NORMAL, vac_strategy);
766
767                 /* We need buffer cleanup lock so that we can prune HOT chains. */
768                 if (!ConditionalLockBufferForCleanup(buf))
769                 {
770                         /*
771                          * If we're not performing an aggressive scan to guard against XID
772                          * wraparound, and we don't want to forcibly check the page, then
773                          * it's OK to skip vacuuming pages we get a lock conflict on. They
774                          * will be dealt with in some future vacuum.
775                          */
776                         if (!aggressive && !FORCE_CHECK_PAGE())
777                         {
778                                 ReleaseBuffer(buf);
779                                 vacrelstats->pinskipped_pages++;
780                                 continue;
781                         }
782
783                         /*
784                          * Read the page with share lock to see if any xids on it need to
785                          * be frozen.  If not we just skip the page, after updating our
786                          * scan statistics.  If there are some, we wait for cleanup lock.
787                          *
788                          * We could defer the lock request further by remembering the page
789                          * and coming back to it later, or we could even register
790                          * ourselves for multiple buffers and then service whichever one
791                          * is received first.  For now, this seems good enough.
792                          *
793                          * If we get here with aggressive false, then we're just forcibly
794                          * checking the page, and so we don't want to insist on getting
795                          * the lock; we only need to know if the page contains tuples, so
796                          * that we can update nonempty_pages correctly.  It's convenient
797                          * to use lazy_check_needs_freeze() for both situations, though.
798                          */
799                         LockBuffer(buf, BUFFER_LOCK_SHARE);
800                         if (!lazy_check_needs_freeze(buf, &hastup))
801                         {
802                                 UnlockReleaseBuffer(buf);
803                                 vacrelstats->scanned_pages++;
804                                 vacrelstats->pinskipped_pages++;
805                                 if (hastup)
806                                         vacrelstats->nonempty_pages = blkno + 1;
807                                 continue;
808                         }
809                         if (!aggressive)
810                         {
811                                 /*
812                                  * Here, we must not advance scanned_pages; that would amount
813                                  * to claiming that the page contains no freezable tuples.
814                                  */
815                                 UnlockReleaseBuffer(buf);
816                                 vacrelstats->pinskipped_pages++;
817                                 if (hastup)
818                                         vacrelstats->nonempty_pages = blkno + 1;
819                                 continue;
820                         }
821                         LockBuffer(buf, BUFFER_LOCK_UNLOCK);
822                         LockBufferForCleanup(buf);
823                         /* drop through to normal processing */
824                 }
825
826                 vacrelstats->scanned_pages++;
827                 vacrelstats->tupcount_pages++;
828
829                 page = BufferGetPage(buf);
830
831                 if (PageIsNew(page))
832                 {
833                         /*
834                          * An all-zeroes page could be left over if a backend extends the
835                          * relation but crashes before initializing the page. Reclaim such
836                          * pages for use.
837                          *
838                          * We have to be careful here because we could be looking at a
839                          * page that someone has just added to the relation and not yet
840                          * been able to initialize (see RelationGetBufferForTuple). To
841                          * protect against that, release the buffer lock, grab the
842                          * relation extension lock momentarily, and re-lock the buffer. If
843                          * the page is still uninitialized by then, it must be left over
844                          * from a crashed backend, and we can initialize it.
845                          *
846                          * We don't really need the relation lock when this is a new or
847                          * temp relation, but it's probably not worth the code space to
848                          * check that, since this surely isn't a critical path.
849                          *
850                          * Note: the comparable code in vacuum.c need not worry because
851                          * it's got exclusive lock on the whole relation.
852                          */
853                         LockBuffer(buf, BUFFER_LOCK_UNLOCK);
854                         LockRelationForExtension(onerel, ExclusiveLock);
855                         UnlockRelationForExtension(onerel, ExclusiveLock);
856                         LockBufferForCleanup(buf);
857                         if (PageIsNew(page))
858                         {
859                                 ereport(WARNING,
860                                 (errmsg("relation \"%s\" page %u is uninitialized --- fixing",
861                                                 relname, blkno)));
862                                 PageInit(page, BufferGetPageSize(buf), 0);
863                                 empty_pages++;
864                         }
865                         freespace = PageGetHeapFreeSpace(page);
866                         MarkBufferDirty(buf);
867                         UnlockReleaseBuffer(buf);
868
869                         RecordPageWithFreeSpace(onerel, blkno, freespace);
870                         continue;
871                 }
872
873                 if (PageIsEmpty(page))
874                 {
875                         empty_pages++;
876                         freespace = PageGetHeapFreeSpace(page);
877
878                         /* empty pages are always all-visible and all-frozen */
879                         if (!PageIsAllVisible(page))
880                         {
881                                 START_CRIT_SECTION();
882
883                                 /* mark buffer dirty before writing a WAL record */
884                                 MarkBufferDirty(buf);
885
886                                 /*
887                                  * It's possible that another backend has extended the heap,
888                                  * initialized the page, and then failed to WAL-log the page
889                                  * due to an ERROR.  Since heap extension is not WAL-logged,
890                                  * recovery might try to replay our record setting the page
891                                  * all-visible and find that the page isn't initialized, which
892                                  * will cause a PANIC.  To prevent that, check whether the
893                                  * page has been previously WAL-logged, and if not, do that
894                                  * now.
895                                  */
896                                 if (RelationNeedsWAL(onerel) &&
897                                         PageGetLSN(page) == InvalidXLogRecPtr)
898                                         log_newpage_buffer(buf, true);
899
900                                 PageSetAllVisible(page);
901                                 visibilitymap_set(onerel, blkno, buf, InvalidXLogRecPtr,
902                                                                   vmbuffer, InvalidTransactionId,
903                                            VISIBILITYMAP_ALL_VISIBLE | VISIBILITYMAP_ALL_FROZEN);
904                                 END_CRIT_SECTION();
905                         }
906
907                         UnlockReleaseBuffer(buf);
908                         RecordPageWithFreeSpace(onerel, blkno, freespace);
909                         continue;
910                 }
911
912                 /*
913                  * Prune all HOT-update chains in this page.
914                  *
915                  * We count tuples removed by the pruning step as removed by VACUUM.
916                  */
917                 tups_vacuumed += heap_page_prune(onerel, buf, OldestXmin, false,
918                                                                                  &vacrelstats->latestRemovedXid);
919
920                 /*
921                  * Now scan the page to collect vacuumable items and check for tuples
922                  * requiring freezing.
923                  */
924                 all_visible = true;
925                 has_dead_tuples = false;
926                 nfrozen = 0;
927                 hastup = false;
928                 prev_dead_count = vacrelstats->num_dead_tuples;
929                 maxoff = PageGetMaxOffsetNumber(page);
930
931                 /*
932                  * Note: If you change anything in the loop below, also look at
933                  * heap_page_is_all_visible to see if that needs to be changed.
934                  */
935                 for (offnum = FirstOffsetNumber;
936                          offnum <= maxoff;
937                          offnum = OffsetNumberNext(offnum))
938                 {
939                         ItemId          itemid;
940
941                         itemid = PageGetItemId(page, offnum);
942
943                         /* Unused items require no processing, but we count 'em */
944                         if (!ItemIdIsUsed(itemid))
945                         {
946                                 nunused += 1;
947                                 continue;
948                         }
949
950                         /* Redirect items mustn't be touched */
951                         if (ItemIdIsRedirected(itemid))
952                         {
953                                 hastup = true;  /* this page won't be truncatable */
954                                 continue;
955                         }
956
957                         ItemPointerSet(&(tuple.t_self), blkno, offnum);
958
959                         /*
960                          * DEAD item pointers are to be vacuumed normally; but we don't
961                          * count them in tups_vacuumed, else we'd be double-counting (at
962                          * least in the common case where heap_page_prune() just freed up
963                          * a non-HOT tuple).
964                          */
965                         if (ItemIdIsDead(itemid))
966                         {
967                                 lazy_record_dead_tuple(vacrelstats, &(tuple.t_self));
968                                 all_visible = false;
969                                 continue;
970                         }
971
972                         Assert(ItemIdIsNormal(itemid));
973
974                         tuple.t_data = (HeapTupleHeader) PageGetItem(page, itemid);
975                         tuple.t_len = ItemIdGetLength(itemid);
976                         tuple.t_tableOid = RelationGetRelid(onerel);
977
978                         tupgone = false;
979
980                         switch (HeapTupleSatisfiesVacuum(&tuple, OldestXmin, buf))
981                         {
982                                 case HEAPTUPLE_DEAD:
983
984                                         /*
985                                          * Ordinarily, DEAD tuples would have been removed by
986                                          * heap_page_prune(), but it's possible that the tuple
987                                          * state changed since heap_page_prune() looked.  In
988                                          * particular an INSERT_IN_PROGRESS tuple could have
989                                          * changed to DEAD if the inserter aborted.  So this
990                                          * cannot be considered an error condition.
991                                          *
992                                          * If the tuple is HOT-updated then it must only be
993                                          * removed by a prune operation; so we keep it just as if
994                                          * it were RECENTLY_DEAD.  Also, if it's a heap-only
995                                          * tuple, we choose to keep it, because it'll be a lot
996                                          * cheaper to get rid of it in the next pruning pass than
997                                          * to treat it like an indexed tuple.
998                                          */
999                                         if (HeapTupleIsHotUpdated(&tuple) ||
1000                                                 HeapTupleIsHeapOnly(&tuple))
1001                                                 nkeep += 1;
1002                                         else
1003                                                 tupgone = true; /* we can delete the tuple */
1004                                         all_visible = false;
1005                                         break;
1006                                 case HEAPTUPLE_LIVE:
1007                                         /* Tuple is good --- but let's do some validity checks */
1008                                         if (onerel->rd_rel->relhasoids &&
1009                                                 !OidIsValid(HeapTupleGetOid(&tuple)))
1010                                                 elog(WARNING, "relation \"%s\" TID %u/%u: OID is invalid",
1011                                                          relname, blkno, offnum);
1012
1013                                         /*
1014                                          * Is the tuple definitely visible to all transactions?
1015                                          *
1016                                          * NB: Like with per-tuple hint bits, we can't set the
1017                                          * PD_ALL_VISIBLE flag if the inserter committed
1018                                          * asynchronously. See SetHintBits for more info. Check
1019                                          * that the tuple is hinted xmin-committed because of
1020                                          * that.
1021                                          */
1022                                         if (all_visible)
1023                                         {
1024                                                 TransactionId xmin;
1025
1026                                                 if (!HeapTupleHeaderXminCommitted(tuple.t_data))
1027                                                 {
1028                                                         all_visible = false;
1029                                                         break;
1030                                                 }
1031
1032                                                 /*
1033                                                  * The inserter definitely committed. But is it old
1034                                                  * enough that everyone sees it as committed?
1035                                                  */
1036                                                 xmin = HeapTupleHeaderGetXmin(tuple.t_data);
1037                                                 if (!TransactionIdPrecedes(xmin, OldestXmin))
1038                                                 {
1039                                                         all_visible = false;
1040                                                         break;
1041                                                 }
1042
1043                                                 /* Track newest xmin on page. */
1044                                                 if (TransactionIdFollows(xmin, visibility_cutoff_xid))
1045                                                         visibility_cutoff_xid = xmin;
1046                                         }
1047                                         break;
1048                                 case HEAPTUPLE_RECENTLY_DEAD:
1049
1050                                         /*
1051                                          * If tuple is recently deleted then we must not remove it
1052                                          * from relation.
1053                                          */
1054                                         nkeep += 1;
1055                                         all_visible = false;
1056                                         break;
1057                                 case HEAPTUPLE_INSERT_IN_PROGRESS:
1058                                         /* This is an expected case during concurrent vacuum */
1059                                         all_visible = false;
1060                                         break;
1061                                 case HEAPTUPLE_DELETE_IN_PROGRESS:
1062                                         /* This is an expected case during concurrent vacuum */
1063                                         all_visible = false;
1064                                         break;
1065                                 default:
1066                                         elog(ERROR, "unexpected HeapTupleSatisfiesVacuum result");
1067                                         break;
1068                         }
1069
1070                         if (tupgone)
1071                         {
1072                                 lazy_record_dead_tuple(vacrelstats, &(tuple.t_self));
1073                                 HeapTupleHeaderAdvanceLatestRemovedXid(tuple.t_data,
1074                                                                                          &vacrelstats->latestRemovedXid);
1075                                 tups_vacuumed += 1;
1076                                 has_dead_tuples = true;
1077                         }
1078                         else
1079                         {
1080                                 bool            tuple_totally_frozen;
1081
1082                                 num_tuples += 1;
1083                                 hastup = true;
1084
1085                                 /*
1086                                  * Each non-removable tuple must be checked to see if it needs
1087                                  * freezing.  Note we already have exclusive buffer lock.
1088                                  */
1089                                 if (heap_prepare_freeze_tuple(tuple.t_data, FreezeLimit,
1090                                                                                    MultiXactCutoff, &frozen[nfrozen],
1091                                                                                           &tuple_totally_frozen))
1092                                         frozen[nfrozen++].offset = offnum;
1093
1094                                 if (!tuple_totally_frozen)
1095                                         all_frozen = false;
1096                         }
1097                 }                                               /* scan along page */
1098
1099                 /*
1100                  * If we froze any tuples, mark the buffer dirty, and write a WAL
1101                  * record recording the changes.  We must log the changes to be
1102                  * crash-safe against future truncation of CLOG.
1103                  */
1104                 if (nfrozen > 0)
1105                 {
1106                         START_CRIT_SECTION();
1107
1108                         MarkBufferDirty(buf);
1109
1110                         /* execute collected freezes */
1111                         for (i = 0; i < nfrozen; i++)
1112                         {
1113                                 ItemId          itemid;
1114                                 HeapTupleHeader htup;
1115
1116                                 itemid = PageGetItemId(page, frozen[i].offset);
1117                                 htup = (HeapTupleHeader) PageGetItem(page, itemid);
1118
1119                                 heap_execute_freeze_tuple(htup, &frozen[i]);
1120                         }
1121
1122                         /* Now WAL-log freezing if necessary */
1123                         if (RelationNeedsWAL(onerel))
1124                         {
1125                                 XLogRecPtr      recptr;
1126
1127                                 recptr = log_heap_freeze(onerel, buf, FreezeLimit,
1128                                                                                  frozen, nfrozen);
1129                                 PageSetLSN(page, recptr);
1130                         }
1131
1132                         END_CRIT_SECTION();
1133                 }
1134
1135                 /*
1136                  * If there are no indexes then we can vacuum the page right now
1137                  * instead of doing a second scan.
1138                  */
1139                 if (nindexes == 0 &&
1140                         vacrelstats->num_dead_tuples > 0)
1141                 {
1142                         /* Remove tuples from heap */
1143                         lazy_vacuum_page(onerel, blkno, buf, 0, vacrelstats, &vmbuffer);
1144                         has_dead_tuples = false;
1145
1146                         /*
1147                          * Forget the now-vacuumed tuples, and press on, but be careful
1148                          * not to reset latestRemovedXid since we want that value to be
1149                          * valid.
1150                          */
1151                         vacrelstats->num_dead_tuples = 0;
1152                         vacuumed_pages++;
1153                 }
1154
1155                 freespace = PageGetHeapFreeSpace(page);
1156
1157                 /* mark page all-visible, if appropriate */
1158                 if (all_visible && !all_visible_according_to_vm)
1159                 {
1160                         uint8           flags = VISIBILITYMAP_ALL_VISIBLE;
1161
1162                         if (all_frozen)
1163                                 flags |= VISIBILITYMAP_ALL_FROZEN;
1164
1165                         /*
1166                          * It should never be the case that the visibility map page is set
1167                          * while the page-level bit is clear, but the reverse is allowed
1168                          * (if checksums are not enabled).  Regardless, set the both bits
1169                          * so that we get back in sync.
1170                          *
1171                          * NB: If the heap page is all-visible but the VM bit is not set,
1172                          * we don't need to dirty the heap page.  However, if checksums
1173                          * are enabled, we do need to make sure that the heap page is
1174                          * dirtied before passing it to visibilitymap_set(), because it
1175                          * may be logged.  Given that this situation should only happen in
1176                          * rare cases after a crash, it is not worth optimizing.
1177                          */
1178                         PageSetAllVisible(page);
1179                         MarkBufferDirty(buf);
1180                         visibilitymap_set(onerel, blkno, buf, InvalidXLogRecPtr,
1181                                                           vmbuffer, visibility_cutoff_xid, flags);
1182                 }
1183
1184                 /*
1185                  * As of PostgreSQL 9.2, the visibility map bit should never be set if
1186                  * the page-level bit is clear.  However, it's possible that the bit
1187                  * got cleared after we checked it and before we took the buffer
1188                  * content lock, so we must recheck before jumping to the conclusion
1189                  * that something bad has happened.
1190                  */
1191                 else if (all_visible_according_to_vm && !PageIsAllVisible(page)
1192                                  && VM_ALL_VISIBLE(onerel, blkno, &vmbuffer))
1193                 {
1194                         elog(WARNING, "page is not marked all-visible but visibility map bit is set in relation \"%s\" page %u",
1195                                  relname, blkno);
1196                         visibilitymap_clear(onerel, blkno, vmbuffer,
1197                                                                 VISIBILITYMAP_VALID_BITS);
1198                 }
1199
1200                 /*
1201                  * It's possible for the value returned by GetOldestXmin() to move
1202                  * backwards, so it's not wrong for us to see tuples that appear to
1203                  * not be visible to everyone yet, while PD_ALL_VISIBLE is already
1204                  * set. The real safe xmin value never moves backwards, but
1205                  * GetOldestXmin() is conservative and sometimes returns a value
1206                  * that's unnecessarily small, so if we see that contradiction it just
1207                  * means that the tuples that we think are not visible to everyone yet
1208                  * actually are, and the PD_ALL_VISIBLE flag is correct.
1209                  *
1210                  * There should never be dead tuples on a page with PD_ALL_VISIBLE
1211                  * set, however.
1212                  */
1213                 else if (PageIsAllVisible(page) && has_dead_tuples)
1214                 {
1215                         elog(WARNING, "page containing dead tuples is marked as all-visible in relation \"%s\" page %u",
1216                                  relname, blkno);
1217                         PageClearAllVisible(page);
1218                         MarkBufferDirty(buf);
1219                         visibilitymap_clear(onerel, blkno, vmbuffer,
1220                                                                 VISIBILITYMAP_VALID_BITS);
1221                 }
1222
1223                 /*
1224                  * If the all-visible page is turned out to be all-frozen but not
1225                  * marked, we should so mark it.  Note that all_frozen is only valid
1226                  * if all_visible is true, so we must check both.
1227                  */
1228                 else if (all_visible_according_to_vm && all_visible && all_frozen &&
1229                                  !VM_ALL_FROZEN(onerel, blkno, &vmbuffer))
1230                 {
1231                         /*
1232                          * We can pass InvalidTransactionId as the cutoff XID here,
1233                          * because setting the all-frozen bit doesn't cause recovery
1234                          * conflicts.
1235                          */
1236                         visibilitymap_set(onerel, blkno, buf, InvalidXLogRecPtr,
1237                                                           vmbuffer, InvalidTransactionId,
1238                                                           VISIBILITYMAP_ALL_FROZEN);
1239                 }
1240
1241                 UnlockReleaseBuffer(buf);
1242
1243                 /* Remember the location of the last page with nonremovable tuples */
1244                 if (hastup)
1245                         vacrelstats->nonempty_pages = blkno + 1;
1246
1247                 /*
1248                  * If we remembered any tuples for deletion, then the page will be
1249                  * visited again by lazy_vacuum_heap, which will compute and record
1250                  * its post-compaction free space.  If not, then we're done with this
1251                  * page, so remember its free space as-is.  (This path will always be
1252                  * taken if there are no indexes.)
1253                  */
1254                 if (vacrelstats->num_dead_tuples == prev_dead_count)
1255                         RecordPageWithFreeSpace(onerel, blkno, freespace);
1256         }
1257
1258         /* report that everything is scanned and vacuumed */
1259         pgstat_progress_update_param(PROGRESS_VACUUM_HEAP_BLKS_SCANNED, blkno);
1260
1261         pfree(frozen);
1262
1263         /* save stats for use later */
1264         vacrelstats->scanned_tuples = num_tuples;
1265         vacrelstats->tuples_deleted = tups_vacuumed;
1266         vacrelstats->new_dead_tuples = nkeep;
1267
1268         /* now we can compute the new value for pg_class.reltuples */
1269         vacrelstats->new_rel_tuples = vac_estimate_reltuples(onerel, false,
1270                                                                                                                  nblocks,
1271                                                                                                  vacrelstats->tupcount_pages,
1272                                                                                                                  num_tuples);
1273
1274         /*
1275          * Release any remaining pin on visibility map page.
1276          */
1277         if (BufferIsValid(vmbuffer))
1278         {
1279                 ReleaseBuffer(vmbuffer);
1280                 vmbuffer = InvalidBuffer;
1281         }
1282
1283         /* If any tuples need to be deleted, perform final vacuum cycle */
1284         /* XXX put a threshold on min number of tuples here? */
1285         if (vacrelstats->num_dead_tuples > 0)
1286         {
1287                 const int       hvp_index[] = {
1288                         PROGRESS_VACUUM_PHASE,
1289                         PROGRESS_VACUUM_NUM_INDEX_VACUUMS
1290                 };
1291                 int64           hvp_val[2];
1292
1293                 /* Log cleanup info before we touch indexes */
1294                 vacuum_log_cleanup_info(onerel, vacrelstats);
1295
1296                 /* Report that we are now vacuuming indexes */
1297                 pgstat_progress_update_param(PROGRESS_VACUUM_PHASE,
1298                                                                          PROGRESS_VACUUM_PHASE_VACUUM_INDEX);
1299
1300                 /* Remove index entries */
1301                 for (i = 0; i < nindexes; i++)
1302                         lazy_vacuum_index(Irel[i],
1303                                                           &indstats[i],
1304                                                           vacrelstats);
1305
1306                 /* Report that we are now vacuuming the heap */
1307                 hvp_val[0] = PROGRESS_VACUUM_PHASE_VACUUM_HEAP;
1308                 hvp_val[1] = vacrelstats->num_index_scans + 1;
1309                 pgstat_progress_update_multi_param(2, hvp_index, hvp_val);
1310
1311                 /* Remove tuples from heap */
1312                 pgstat_progress_update_param(PROGRESS_VACUUM_PHASE,
1313                                                                          PROGRESS_VACUUM_PHASE_VACUUM_HEAP);
1314                 lazy_vacuum_heap(onerel, vacrelstats);
1315                 vacrelstats->num_index_scans++;
1316         }
1317
1318         /* report all blocks vacuumed; and that we're cleaning up */
1319         pgstat_progress_update_param(PROGRESS_VACUUM_HEAP_BLKS_VACUUMED, blkno);
1320         pgstat_progress_update_param(PROGRESS_VACUUM_PHASE,
1321                                                                  PROGRESS_VACUUM_PHASE_INDEX_CLEANUP);
1322
1323         /* Do post-vacuum cleanup and statistics update for each index */
1324         for (i = 0; i < nindexes; i++)
1325                 lazy_cleanup_index(Irel[i], indstats[i], vacrelstats);
1326
1327         /* If no indexes, make log report that lazy_vacuum_heap would've made */
1328         if (vacuumed_pages)
1329                 ereport(elevel,
1330                                 (errmsg("\"%s\": removed %.0f row versions in %u pages",
1331                                                 RelationGetRelationName(onerel),
1332                                                 tups_vacuumed, vacuumed_pages)));
1333
1334         /*
1335          * This is pretty messy, but we split it up so that we can skip emitting
1336          * individual parts of the message when not applicable.
1337          */
1338         initStringInfo(&buf);
1339         appendStringInfo(&buf,
1340                 _("%.0f dead row versions cannot be removed yet, oldest xmin: %u\n"),
1341                                          nkeep, OldestXmin);
1342         appendStringInfo(&buf, _("There were %.0f unused item pointers.\n"),
1343                                          nunused);
1344         appendStringInfo(&buf, ngettext("Skipped %u page due to buffer pins, ",
1345                                                                         "Skipped %u pages due to buffer pins, ",
1346                                                                         vacrelstats->pinskipped_pages),
1347                                          vacrelstats->pinskipped_pages);
1348         appendStringInfo(&buf, ngettext("%u frozen page.\n",
1349                                                                         "%u frozen pages.\n",
1350                                                                         vacrelstats->frozenskipped_pages),
1351                                          vacrelstats->frozenskipped_pages);
1352         appendStringInfo(&buf, ngettext("%u page is entirely empty.\n",
1353                                                                         "%u pages are entirely empty.\n",
1354                                                                         empty_pages),
1355                                          empty_pages);
1356         appendStringInfo(&buf, "%s.", pg_rusage_show(&ru0));
1357
1358         ereport(elevel,
1359                         (errmsg("\"%s\": found %.0f removable, %.0f nonremovable row versions in %u out of %u pages",
1360                                         RelationGetRelationName(onerel),
1361                                         tups_vacuumed, num_tuples,
1362                                         vacrelstats->scanned_pages, nblocks),
1363                          errdetail_internal("%s", buf.data)));
1364         pfree(buf.data);
1365 }
1366
1367
1368 /*
1369  *      lazy_vacuum_heap() -- second pass over the heap
1370  *
1371  *              This routine marks dead tuples as unused and compacts out free
1372  *              space on their pages.  Pages not having dead tuples recorded from
1373  *              lazy_scan_heap are not visited at all.
1374  *
1375  * Note: the reason for doing this as a second pass is we cannot remove
1376  * the tuples until we've removed their index entries, and we want to
1377  * process index entry removal in batches as large as possible.
1378  */
1379 static void
1380 lazy_vacuum_heap(Relation onerel, LVRelStats *vacrelstats)
1381 {
1382         int                     tupindex;
1383         int                     npages;
1384         PGRUsage        ru0;
1385         Buffer          vmbuffer = InvalidBuffer;
1386
1387         pg_rusage_init(&ru0);
1388         npages = 0;
1389
1390         tupindex = 0;
1391         while (tupindex < vacrelstats->num_dead_tuples)
1392         {
1393                 BlockNumber tblk;
1394                 Buffer          buf;
1395                 Page            page;
1396                 Size            freespace;
1397
1398                 vacuum_delay_point();
1399
1400                 tblk = ItemPointerGetBlockNumber(&vacrelstats->dead_tuples[tupindex]);
1401                 buf = ReadBufferExtended(onerel, MAIN_FORKNUM, tblk, RBM_NORMAL,
1402                                                                  vac_strategy);
1403                 if (!ConditionalLockBufferForCleanup(buf))
1404                 {
1405                         ReleaseBuffer(buf);
1406                         ++tupindex;
1407                         continue;
1408                 }
1409                 tupindex = lazy_vacuum_page(onerel, tblk, buf, tupindex, vacrelstats,
1410                                                                         &vmbuffer);
1411
1412                 /* Now that we've compacted the page, record its available space */
1413                 page = BufferGetPage(buf);
1414                 freespace = PageGetHeapFreeSpace(page);
1415
1416                 UnlockReleaseBuffer(buf);
1417                 RecordPageWithFreeSpace(onerel, tblk, freespace);
1418                 npages++;
1419         }
1420
1421         if (BufferIsValid(vmbuffer))
1422         {
1423                 ReleaseBuffer(vmbuffer);
1424                 vmbuffer = InvalidBuffer;
1425         }
1426
1427         ereport(elevel,
1428                         (errmsg("\"%s\": removed %d row versions in %d pages",
1429                                         RelationGetRelationName(onerel),
1430                                         tupindex, npages),
1431                          errdetail_internal("%s", pg_rusage_show(&ru0))));
1432 }
1433
1434 /*
1435  *      lazy_vacuum_page() -- free dead tuples on a page
1436  *                                       and repair its fragmentation.
1437  *
1438  * Caller must hold pin and buffer cleanup lock on the buffer.
1439  *
1440  * tupindex is the index in vacrelstats->dead_tuples of the first dead
1441  * tuple for this page.  We assume the rest follow sequentially.
1442  * The return value is the first tupindex after the tuples of this page.
1443  */
1444 static int
1445 lazy_vacuum_page(Relation onerel, BlockNumber blkno, Buffer buffer,
1446                                  int tupindex, LVRelStats *vacrelstats, Buffer *vmbuffer)
1447 {
1448         Page            page = BufferGetPage(buffer);
1449         OffsetNumber unused[MaxOffsetNumber];
1450         int                     uncnt = 0;
1451         TransactionId visibility_cutoff_xid;
1452         bool            all_frozen;
1453
1454         pgstat_progress_update_param(PROGRESS_VACUUM_HEAP_BLKS_VACUUMED, blkno);
1455
1456         START_CRIT_SECTION();
1457
1458         for (; tupindex < vacrelstats->num_dead_tuples; tupindex++)
1459         {
1460                 BlockNumber tblk;
1461                 OffsetNumber toff;
1462                 ItemId          itemid;
1463
1464                 tblk = ItemPointerGetBlockNumber(&vacrelstats->dead_tuples[tupindex]);
1465                 if (tblk != blkno)
1466                         break;                          /* past end of tuples for this block */
1467                 toff = ItemPointerGetOffsetNumber(&vacrelstats->dead_tuples[tupindex]);
1468                 itemid = PageGetItemId(page, toff);
1469                 ItemIdSetUnused(itemid);
1470                 unused[uncnt++] = toff;
1471         }
1472
1473         PageRepairFragmentation(page);
1474
1475         /*
1476          * Mark buffer dirty before we write WAL.
1477          */
1478         MarkBufferDirty(buffer);
1479
1480         /* XLOG stuff */
1481         if (RelationNeedsWAL(onerel))
1482         {
1483                 XLogRecPtr      recptr;
1484
1485                 recptr = log_heap_clean(onerel, buffer,
1486                                                                 NULL, 0, NULL, 0,
1487                                                                 unused, uncnt,
1488                                                                 vacrelstats->latestRemovedXid);
1489                 PageSetLSN(page, recptr);
1490         }
1491
1492         /*
1493          * End critical section, so we safely can do visibility tests (which
1494          * possibly need to perform IO and allocate memory!). If we crash now the
1495          * page (including the corresponding vm bit) might not be marked all
1496          * visible, but that's fine. A later vacuum will fix that.
1497          */
1498         END_CRIT_SECTION();
1499
1500         /*
1501          * Now that we have removed the dead tuples from the page, once again
1502          * check if the page has become all-visible.  The page is already marked
1503          * dirty, exclusively locked, and, if needed, a full page image has been
1504          * emitted in the log_heap_clean() above.
1505          */
1506         if (heap_page_is_all_visible(onerel, buffer, &visibility_cutoff_xid,
1507                                                                  &all_frozen))
1508                 PageSetAllVisible(page);
1509
1510         /*
1511          * All the changes to the heap page have been done. If the all-visible
1512          * flag is now set, also set the VM all-visible bit (and, if possible, the
1513          * all-frozen bit) unless this has already been done previously.
1514          */
1515         if (PageIsAllVisible(page))
1516         {
1517                 uint8           vm_status = visibilitymap_get_status(onerel, blkno, vmbuffer);
1518                 uint8           flags = 0;
1519
1520                 /* Set the VM all-frozen bit to flag, if needed */
1521                 if ((vm_status & VISIBILITYMAP_ALL_VISIBLE) == 0)
1522                         flags |= VISIBILITYMAP_ALL_VISIBLE;
1523                 if ((vm_status & VISIBILITYMAP_ALL_FROZEN) == 0 && all_frozen)
1524                         flags |= VISIBILITYMAP_ALL_FROZEN;
1525
1526                 Assert(BufferIsValid(*vmbuffer));
1527                 if (flags != 0)
1528                         visibilitymap_set(onerel, blkno, buffer, InvalidXLogRecPtr,
1529                                                           *vmbuffer, visibility_cutoff_xid, flags);
1530         }
1531
1532         return tupindex;
1533 }
1534
1535 /*
1536  *      lazy_check_needs_freeze() -- scan page to see if any tuples
1537  *                                       need to be cleaned to avoid wraparound
1538  *
1539  * Returns true if the page needs to be vacuumed using cleanup lock.
1540  * Also returns a flag indicating whether page contains any tuples at all.
1541  */
1542 static bool
1543 lazy_check_needs_freeze(Buffer buf, bool *hastup)
1544 {
1545         Page            page = BufferGetPage(buf);
1546         OffsetNumber offnum,
1547                                 maxoff;
1548         HeapTupleHeader tupleheader;
1549
1550         *hastup = false;
1551
1552         /* If we hit an uninitialized page, we want to force vacuuming it. */
1553         if (PageIsNew(page))
1554                 return true;
1555
1556         /* Quick out for ordinary empty page. */
1557         if (PageIsEmpty(page))
1558                 return false;
1559
1560         maxoff = PageGetMaxOffsetNumber(page);
1561         for (offnum = FirstOffsetNumber;
1562                  offnum <= maxoff;
1563                  offnum = OffsetNumberNext(offnum))
1564         {
1565                 ItemId          itemid;
1566
1567                 itemid = PageGetItemId(page, offnum);
1568
1569                 /* this should match hastup test in count_nondeletable_pages() */
1570                 if (ItemIdIsUsed(itemid))
1571                         *hastup = true;
1572
1573                 /* dead and redirect items never need freezing */
1574                 if (!ItemIdIsNormal(itemid))
1575                         continue;
1576
1577                 tupleheader = (HeapTupleHeader) PageGetItem(page, itemid);
1578
1579                 if (heap_tuple_needs_freeze(tupleheader, FreezeLimit,
1580                                                                         MultiXactCutoff, buf))
1581                         return true;
1582         }                                                       /* scan along page */
1583
1584         return false;
1585 }
1586
1587
1588 /*
1589  *      lazy_vacuum_index() -- vacuum one index relation.
1590  *
1591  *              Delete all the index entries pointing to tuples listed in
1592  *              vacrelstats->dead_tuples, and update running statistics.
1593  */
1594 static void
1595 lazy_vacuum_index(Relation indrel,
1596                                   IndexBulkDeleteResult **stats,
1597                                   LVRelStats *vacrelstats)
1598 {
1599         IndexVacuumInfo ivinfo;
1600         PGRUsage        ru0;
1601
1602         pg_rusage_init(&ru0);
1603
1604         ivinfo.index = indrel;
1605         ivinfo.analyze_only = false;
1606         ivinfo.estimated_count = true;
1607         ivinfo.message_level = elevel;
1608         ivinfo.num_heap_tuples = vacrelstats->old_rel_tuples;
1609         ivinfo.strategy = vac_strategy;
1610
1611         /* Do bulk deletion */
1612         *stats = index_bulk_delete(&ivinfo, *stats,
1613                                                            lazy_tid_reaped, (void *) vacrelstats);
1614
1615         ereport(elevel,
1616                         (errmsg("scanned index \"%s\" to remove %d row versions",
1617                                         RelationGetRelationName(indrel),
1618                                         vacrelstats->num_dead_tuples),
1619                          errdetail_internal("%s", pg_rusage_show(&ru0))));
1620 }
1621
1622 /*
1623  *      lazy_cleanup_index() -- do post-vacuum cleanup for one index relation.
1624  */
1625 static void
1626 lazy_cleanup_index(Relation indrel,
1627                                    IndexBulkDeleteResult *stats,
1628                                    LVRelStats *vacrelstats)
1629 {
1630         IndexVacuumInfo ivinfo;
1631         PGRUsage        ru0;
1632
1633         pg_rusage_init(&ru0);
1634
1635         ivinfo.index = indrel;
1636         ivinfo.analyze_only = false;
1637         ivinfo.estimated_count = (vacrelstats->tupcount_pages < vacrelstats->rel_pages);
1638         ivinfo.message_level = elevel;
1639         ivinfo.num_heap_tuples = vacrelstats->new_rel_tuples;
1640         ivinfo.strategy = vac_strategy;
1641
1642         stats = index_vacuum_cleanup(&ivinfo, stats);
1643
1644         if (!stats)
1645                 return;
1646
1647         /*
1648          * Now update statistics in pg_class, but only if the index says the count
1649          * is accurate.
1650          */
1651         if (!stats->estimated_count)
1652                 vac_update_relstats(indrel,
1653                                                         stats->num_pages,
1654                                                         stats->num_index_tuples,
1655                                                         0,
1656                                                         false,
1657                                                         InvalidTransactionId,
1658                                                         InvalidMultiXactId,
1659                                                         false);
1660
1661         ereport(elevel,
1662                         (errmsg("index \"%s\" now contains %.0f row versions in %u pages",
1663                                         RelationGetRelationName(indrel),
1664                                         stats->num_index_tuples,
1665                                         stats->num_pages),
1666                          errdetail("%.0f index row versions were removed.\n"
1667                          "%u index pages have been deleted, %u are currently reusable.\n"
1668                                            "%s.",
1669                                            stats->tuples_removed,
1670                                            stats->pages_deleted, stats->pages_free,
1671                                            pg_rusage_show(&ru0))));
1672
1673         pfree(stats);
1674 }
1675
1676 /*
1677  * should_attempt_truncation - should we attempt to truncate the heap?
1678  *
1679  * Don't even think about it unless we have a shot at releasing a goodly
1680  * number of pages.  Otherwise, the time taken isn't worth it.
1681  *
1682  * Also don't attempt it if we are doing early pruning/vacuuming, because a
1683  * scan which cannot find a truncated heap page cannot determine that the
1684  * snapshot is too old to read that page.  We might be able to get away with
1685  * truncating all except one of the pages, setting its LSN to (at least) the
1686  * maximum of the truncated range if we also treated an index leaf tuple
1687  * pointing to a missing heap page as something to trigger the "snapshot too
1688  * old" error, but that seems fragile and seems like it deserves its own patch
1689  * if we consider it.
1690  *
1691  * This is split out so that we can test whether truncation is going to be
1692  * called for before we actually do it.  If you change the logic here, be
1693  * careful to depend only on fields that lazy_scan_heap updates on-the-fly.
1694  */
1695 static bool
1696 should_attempt_truncation(LVRelStats *vacrelstats)
1697 {
1698         BlockNumber possibly_freeable;
1699
1700         possibly_freeable = vacrelstats->rel_pages - vacrelstats->nonempty_pages;
1701         if (possibly_freeable > 0 &&
1702                 (possibly_freeable >= REL_TRUNCATE_MINIMUM ||
1703           possibly_freeable >= vacrelstats->rel_pages / REL_TRUNCATE_FRACTION) &&
1704                 old_snapshot_threshold < 0)
1705                 return true;
1706         else
1707                 return false;
1708 }
1709
1710 /*
1711  * lazy_truncate_heap - try to truncate off any empty pages at the end
1712  */
1713 static void
1714 lazy_truncate_heap(Relation onerel, LVRelStats *vacrelstats)
1715 {
1716         BlockNumber old_rel_pages = vacrelstats->rel_pages;
1717         BlockNumber new_rel_pages;
1718         PGRUsage        ru0;
1719         int                     lock_retry;
1720
1721         pg_rusage_init(&ru0);
1722
1723         /* Report that we are now truncating */
1724         pgstat_progress_update_param(PROGRESS_VACUUM_PHASE,
1725                                                                  PROGRESS_VACUUM_PHASE_TRUNCATE);
1726
1727         /*
1728          * Loop until no more truncating can be done.
1729          */
1730         do
1731         {
1732                 /*
1733                  * We need full exclusive lock on the relation in order to do
1734                  * truncation. If we can't get it, give up rather than waiting --- we
1735                  * don't want to block other backends, and we don't want to deadlock
1736                  * (which is quite possible considering we already hold a lower-grade
1737                  * lock).
1738                  */
1739                 vacrelstats->lock_waiter_detected = false;
1740                 lock_retry = 0;
1741                 while (true)
1742                 {
1743                         if (ConditionalLockRelation(onerel, AccessExclusiveLock))
1744                                 break;
1745
1746                         /*
1747                          * Check for interrupts while trying to (re-)acquire the exclusive
1748                          * lock.
1749                          */
1750                         CHECK_FOR_INTERRUPTS();
1751
1752                         if (++lock_retry > (VACUUM_TRUNCATE_LOCK_TIMEOUT /
1753                                                                 VACUUM_TRUNCATE_LOCK_WAIT_INTERVAL))
1754                         {
1755                                 /*
1756                                  * We failed to establish the lock in the specified number of
1757                                  * retries. This means we give up truncating.
1758                                  */
1759                                 vacrelstats->lock_waiter_detected = true;
1760                                 ereport(elevel,
1761                                                 (errmsg("\"%s\": stopping truncate due to conflicting lock request",
1762                                                                 RelationGetRelationName(onerel))));
1763                                 return;
1764                         }
1765
1766                         pg_usleep(VACUUM_TRUNCATE_LOCK_WAIT_INTERVAL * 1000L);
1767                 }
1768
1769                 /*
1770                  * Now that we have exclusive lock, look to see if the rel has grown
1771                  * whilst we were vacuuming with non-exclusive lock.  If so, give up;
1772                  * the newly added pages presumably contain non-deletable tuples.
1773                  */
1774                 new_rel_pages = RelationGetNumberOfBlocks(onerel);
1775                 if (new_rel_pages != old_rel_pages)
1776                 {
1777                         /*
1778                          * Note: we intentionally don't update vacrelstats->rel_pages with
1779                          * the new rel size here.  If we did, it would amount to assuming
1780                          * that the new pages are empty, which is unlikely. Leaving the
1781                          * numbers alone amounts to assuming that the new pages have the
1782                          * same tuple density as existing ones, which is less unlikely.
1783                          */
1784                         UnlockRelation(onerel, AccessExclusiveLock);
1785                         return;
1786                 }
1787
1788                 /*
1789                  * Scan backwards from the end to verify that the end pages actually
1790                  * contain no tuples.  This is *necessary*, not optional, because
1791                  * other backends could have added tuples to these pages whilst we
1792                  * were vacuuming.
1793                  */
1794                 new_rel_pages = count_nondeletable_pages(onerel, vacrelstats);
1795
1796                 if (new_rel_pages >= old_rel_pages)
1797                 {
1798                         /* can't do anything after all */
1799                         UnlockRelation(onerel, AccessExclusiveLock);
1800                         return;
1801                 }
1802
1803                 /*
1804                  * Okay to truncate.
1805                  */
1806                 RelationTruncate(onerel, new_rel_pages);
1807
1808                 /*
1809                  * We can release the exclusive lock as soon as we have truncated.
1810                  * Other backends can't safely access the relation until they have
1811                  * processed the smgr invalidation that smgrtruncate sent out ... but
1812                  * that should happen as part of standard invalidation processing once
1813                  * they acquire lock on the relation.
1814                  */
1815                 UnlockRelation(onerel, AccessExclusiveLock);
1816
1817                 /*
1818                  * Update statistics.  Here, it *is* correct to adjust rel_pages
1819                  * without also touching reltuples, since the tuple count wasn't
1820                  * changed by the truncation.
1821                  */
1822                 vacrelstats->pages_removed += old_rel_pages - new_rel_pages;
1823                 vacrelstats->rel_pages = new_rel_pages;
1824
1825                 ereport(elevel,
1826                                 (errmsg("\"%s\": truncated %u to %u pages",
1827                                                 RelationGetRelationName(onerel),
1828                                                 old_rel_pages, new_rel_pages),
1829                                  errdetail_internal("%s",
1830                                                                         pg_rusage_show(&ru0))));
1831                 old_rel_pages = new_rel_pages;
1832         } while (new_rel_pages > vacrelstats->nonempty_pages &&
1833                          vacrelstats->lock_waiter_detected);
1834 }
1835
1836 /*
1837  * Rescan end pages to verify that they are (still) empty of tuples.
1838  *
1839  * Returns number of nondeletable pages (last nonempty page + 1).
1840  */
1841 static BlockNumber
1842 count_nondeletable_pages(Relation onerel, LVRelStats *vacrelstats)
1843 {
1844         BlockNumber blkno;
1845         BlockNumber prefetchedUntil;
1846         instr_time      starttime;
1847
1848         /* Initialize the starttime if we check for conflicting lock requests */
1849         INSTR_TIME_SET_CURRENT(starttime);
1850
1851         /*
1852          * Start checking blocks at what we believe relation end to be and move
1853          * backwards.  (Strange coding of loop control is needed because blkno is
1854          * unsigned.)  To make the scan faster, we prefetch a few blocks at a time
1855          * in forward direction, so that OS-level readahead can kick in.
1856          */
1857         blkno = vacrelstats->rel_pages;
1858         StaticAssertStmt((PREFETCH_SIZE & (PREFETCH_SIZE - 1)) == 0,
1859                                          "prefetch size must be power of 2");
1860         prefetchedUntil = InvalidBlockNumber;
1861         while (blkno > vacrelstats->nonempty_pages)
1862         {
1863                 Buffer          buf;
1864                 Page            page;
1865                 OffsetNumber offnum,
1866                                         maxoff;
1867                 bool            hastup;
1868
1869                 /*
1870                  * Check if another process requests a lock on our relation. We are
1871                  * holding an AccessExclusiveLock here, so they will be waiting. We
1872                  * only do this once per VACUUM_TRUNCATE_LOCK_CHECK_INTERVAL, and we
1873                  * only check if that interval has elapsed once every 32 blocks to
1874                  * keep the number of system calls and actual shared lock table
1875                  * lookups to a minimum.
1876                  */
1877                 if ((blkno % 32) == 0)
1878                 {
1879                         instr_time      currenttime;
1880                         instr_time      elapsed;
1881
1882                         INSTR_TIME_SET_CURRENT(currenttime);
1883                         elapsed = currenttime;
1884                         INSTR_TIME_SUBTRACT(elapsed, starttime);
1885                         if ((INSTR_TIME_GET_MICROSEC(elapsed) / 1000)
1886                                 >= VACUUM_TRUNCATE_LOCK_CHECK_INTERVAL)
1887                         {
1888                                 if (LockHasWaitersRelation(onerel, AccessExclusiveLock))
1889                                 {
1890                                         ereport(elevel,
1891                                                         (errmsg("\"%s\": suspending truncate due to conflicting lock request",
1892                                                                         RelationGetRelationName(onerel))));
1893
1894                                         vacrelstats->lock_waiter_detected = true;
1895                                         return blkno;
1896                                 }
1897                                 starttime = currenttime;
1898                         }
1899                 }
1900
1901                 /*
1902                  * We don't insert a vacuum delay point here, because we have an
1903                  * exclusive lock on the table which we want to hold for as short a
1904                  * time as possible.  We still need to check for interrupts however.
1905                  */
1906                 CHECK_FOR_INTERRUPTS();
1907
1908                 blkno--;
1909
1910                 /* If we haven't prefetched this lot yet, do so now. */
1911                 if (prefetchedUntil > blkno)
1912                 {
1913                         BlockNumber prefetchStart;
1914                         BlockNumber pblkno;
1915
1916                         prefetchStart = blkno & ~(PREFETCH_SIZE - 1);
1917                         for (pblkno = prefetchStart; pblkno <= blkno; pblkno++)
1918                         {
1919                                 PrefetchBuffer(onerel, MAIN_FORKNUM, pblkno);
1920                                 CHECK_FOR_INTERRUPTS();
1921                         }
1922                         prefetchedUntil = prefetchStart;
1923                 }
1924
1925                 buf = ReadBufferExtended(onerel, MAIN_FORKNUM, blkno,
1926                                                                  RBM_NORMAL, vac_strategy);
1927
1928                 /* In this phase we only need shared access to the buffer */
1929                 LockBuffer(buf, BUFFER_LOCK_SHARE);
1930
1931                 page = BufferGetPage(buf);
1932
1933                 if (PageIsNew(page) || PageIsEmpty(page))
1934                 {
1935                         /* PageIsNew probably shouldn't happen... */
1936                         UnlockReleaseBuffer(buf);
1937                         continue;
1938                 }
1939
1940                 hastup = false;
1941                 maxoff = PageGetMaxOffsetNumber(page);
1942                 for (offnum = FirstOffsetNumber;
1943                          offnum <= maxoff;
1944                          offnum = OffsetNumberNext(offnum))
1945                 {
1946                         ItemId          itemid;
1947
1948                         itemid = PageGetItemId(page, offnum);
1949
1950                         /*
1951                          * Note: any non-unused item should be taken as a reason to keep
1952                          * this page.  We formerly thought that DEAD tuples could be
1953                          * thrown away, but that's not so, because we'd not have cleaned
1954                          * out their index entries.
1955                          */
1956                         if (ItemIdIsUsed(itemid))
1957                         {
1958                                 hastup = true;
1959                                 break;                  /* can stop scanning */
1960                         }
1961                 }                                               /* scan along page */
1962
1963                 UnlockReleaseBuffer(buf);
1964
1965                 /* Done scanning if we found a tuple here */
1966                 if (hastup)
1967                         return blkno + 1;
1968         }
1969
1970         /*
1971          * If we fall out of the loop, all the previously-thought-to-be-empty
1972          * pages still are; we need not bother to look at the last known-nonempty
1973          * page.
1974          */
1975         return vacrelstats->nonempty_pages;
1976 }
1977
1978 /*
1979  * lazy_space_alloc - space allocation decisions for lazy vacuum
1980  *
1981  * See the comments at the head of this file for rationale.
1982  */
1983 static void
1984 lazy_space_alloc(LVRelStats *vacrelstats, BlockNumber relblocks)
1985 {
1986         long            maxtuples;
1987         int                     vac_work_mem = IsAutoVacuumWorkerProcess() &&
1988         autovacuum_work_mem != -1 ?
1989         autovacuum_work_mem : maintenance_work_mem;
1990
1991         if (vacrelstats->hasindex)
1992         {
1993                 maxtuples = (vac_work_mem * 1024L) / sizeof(ItemPointerData);
1994                 maxtuples = Min(maxtuples, INT_MAX);
1995                 maxtuples = Min(maxtuples, MaxAllocSize / sizeof(ItemPointerData));
1996
1997                 /* curious coding here to ensure the multiplication can't overflow */
1998                 if ((BlockNumber) (maxtuples / LAZY_ALLOC_TUPLES) > relblocks)
1999                         maxtuples = relblocks * LAZY_ALLOC_TUPLES;
2000
2001                 /* stay sane if small maintenance_work_mem */
2002                 maxtuples = Max(maxtuples, MaxHeapTuplesPerPage);
2003         }
2004         else
2005         {
2006                 maxtuples = MaxHeapTuplesPerPage;
2007         }
2008
2009         vacrelstats->num_dead_tuples = 0;
2010         vacrelstats->max_dead_tuples = (int) maxtuples;
2011         vacrelstats->dead_tuples = (ItemPointer)
2012                 palloc(maxtuples * sizeof(ItemPointerData));
2013 }
2014
2015 /*
2016  * lazy_record_dead_tuple - remember one deletable tuple
2017  */
2018 static void
2019 lazy_record_dead_tuple(LVRelStats *vacrelstats,
2020                                            ItemPointer itemptr)
2021 {
2022         /*
2023          * The array shouldn't overflow under normal behavior, but perhaps it
2024          * could if we are given a really small maintenance_work_mem. In that
2025          * case, just forget the last few tuples (we'll get 'em next time).
2026          */
2027         if (vacrelstats->num_dead_tuples < vacrelstats->max_dead_tuples)
2028         {
2029                 vacrelstats->dead_tuples[vacrelstats->num_dead_tuples] = *itemptr;
2030                 vacrelstats->num_dead_tuples++;
2031                 pgstat_progress_update_param(PROGRESS_VACUUM_NUM_DEAD_TUPLES,
2032                                                                          vacrelstats->num_dead_tuples);
2033         }
2034 }
2035
2036 /*
2037  *      lazy_tid_reaped() -- is a particular tid deletable?
2038  *
2039  *              This has the right signature to be an IndexBulkDeleteCallback.
2040  *
2041  *              Assumes dead_tuples array is in sorted order.
2042  */
2043 static bool
2044 lazy_tid_reaped(ItemPointer itemptr, void *state)
2045 {
2046         LVRelStats *vacrelstats = (LVRelStats *) state;
2047         ItemPointer res;
2048
2049         res = (ItemPointer) bsearch((void *) itemptr,
2050                                                                 (void *) vacrelstats->dead_tuples,
2051                                                                 vacrelstats->num_dead_tuples,
2052                                                                 sizeof(ItemPointerData),
2053                                                                 vac_cmp_itemptr);
2054
2055         return (res != NULL);
2056 }
2057
2058 /*
2059  * Comparator routines for use with qsort() and bsearch().
2060  */
2061 static int
2062 vac_cmp_itemptr(const void *left, const void *right)
2063 {
2064         BlockNumber lblk,
2065                                 rblk;
2066         OffsetNumber loff,
2067                                 roff;
2068
2069         lblk = ItemPointerGetBlockNumber((ItemPointer) left);
2070         rblk = ItemPointerGetBlockNumber((ItemPointer) right);
2071
2072         if (lblk < rblk)
2073                 return -1;
2074         if (lblk > rblk)
2075                 return 1;
2076
2077         loff = ItemPointerGetOffsetNumber((ItemPointer) left);
2078         roff = ItemPointerGetOffsetNumber((ItemPointer) right);
2079
2080         if (loff < roff)
2081                 return -1;
2082         if (loff > roff)
2083                 return 1;
2084
2085         return 0;
2086 }
2087
2088 /*
2089  * Check if every tuple in the given page is visible to all current and future
2090  * transactions. Also return the visibility_cutoff_xid which is the highest
2091  * xmin amongst the visible tuples.  Set *all_frozen to true if every tuple
2092  * on this page is frozen.
2093  */
2094 static bool
2095 heap_page_is_all_visible(Relation rel, Buffer buf,
2096                                                  TransactionId *visibility_cutoff_xid,
2097                                                  bool *all_frozen)
2098 {
2099         Page            page = BufferGetPage(buf);
2100         BlockNumber blockno = BufferGetBlockNumber(buf);
2101         OffsetNumber offnum,
2102                                 maxoff;
2103         bool            all_visible = true;
2104
2105         *visibility_cutoff_xid = InvalidTransactionId;
2106         *all_frozen = true;
2107
2108         /*
2109          * This is a stripped down version of the line pointer scan in
2110          * lazy_scan_heap(). So if you change anything here, also check that code.
2111          */
2112         maxoff = PageGetMaxOffsetNumber(page);
2113         for (offnum = FirstOffsetNumber;
2114                  offnum <= maxoff && all_visible;
2115                  offnum = OffsetNumberNext(offnum))
2116         {
2117                 ItemId          itemid;
2118                 HeapTupleData tuple;
2119
2120                 itemid = PageGetItemId(page, offnum);
2121
2122                 /* Unused or redirect line pointers are of no interest */
2123                 if (!ItemIdIsUsed(itemid) || ItemIdIsRedirected(itemid))
2124                         continue;
2125
2126                 ItemPointerSet(&(tuple.t_self), blockno, offnum);
2127
2128                 /*
2129                  * Dead line pointers can have index pointers pointing to them. So
2130                  * they can't be treated as visible
2131                  */
2132                 if (ItemIdIsDead(itemid))
2133                 {
2134                         all_visible = false;
2135                         *all_frozen = false;
2136                         break;
2137                 }
2138
2139                 Assert(ItemIdIsNormal(itemid));
2140
2141                 tuple.t_data = (HeapTupleHeader) PageGetItem(page, itemid);
2142                 tuple.t_len = ItemIdGetLength(itemid);
2143                 tuple.t_tableOid = RelationGetRelid(rel);
2144
2145                 switch (HeapTupleSatisfiesVacuum(&tuple, OldestXmin, buf))
2146                 {
2147                         case HEAPTUPLE_LIVE:
2148                                 {
2149                                         TransactionId xmin;
2150
2151                                         /* Check comments in lazy_scan_heap. */
2152                                         if (!HeapTupleHeaderXminCommitted(tuple.t_data))
2153                                         {
2154                                                 all_visible = false;
2155                                                 *all_frozen = false;
2156                                                 break;
2157                                         }
2158
2159                                         /*
2160                                          * The inserter definitely committed. But is it old enough
2161                                          * that everyone sees it as committed?
2162                                          */
2163                                         xmin = HeapTupleHeaderGetXmin(tuple.t_data);
2164                                         if (!TransactionIdPrecedes(xmin, OldestXmin))
2165                                         {
2166                                                 all_visible = false;
2167                                                 *all_frozen = false;
2168                                                 break;
2169                                         }
2170
2171                                         /* Track newest xmin on page. */
2172                                         if (TransactionIdFollows(xmin, *visibility_cutoff_xid))
2173                                                 *visibility_cutoff_xid = xmin;
2174
2175                                         /* Check whether this tuple is already frozen or not */
2176                                         if (all_visible && *all_frozen &&
2177                                                 heap_tuple_needs_eventual_freeze(tuple.t_data))
2178                                                 *all_frozen = false;
2179                                 }
2180                                 break;
2181
2182                         case HEAPTUPLE_DEAD:
2183                         case HEAPTUPLE_RECENTLY_DEAD:
2184                         case HEAPTUPLE_INSERT_IN_PROGRESS:
2185                         case HEAPTUPLE_DELETE_IN_PROGRESS:
2186                                 {
2187                                         all_visible = false;
2188                                         *all_frozen = false;
2189                                         break;
2190                                 }
2191                         default:
2192                                 elog(ERROR, "unexpected HeapTupleSatisfiesVacuum result");
2193                                 break;
2194                 }
2195         }                                                       /* scan along page */
2196
2197         return all_visible;
2198 }