1 /*-------------------------------------------------------------------------
4 * The postgres vacuum cleaner.
6 * This file includes the "full" version of VACUUM, as well as control code
7 * used by all three of full VACUUM, lazy VACUUM, and ANALYZE. See
8 * vacuumlazy.c and analyze.c for the rest of the code for the latter two.
11 * Portions Copyright (c) 1996-2008, PostgreSQL Global Development Group
12 * Portions Copyright (c) 1994, Regents of the University of California
16 * $PostgreSQL: pgsql/src/backend/commands/vacuum.c,v 1.379 2008/10/31 15:05:00 heikki Exp $
18 *-------------------------------------------------------------------------
25 #include "access/clog.h"
26 #include "access/genam.h"
27 #include "access/heapam.h"
28 #include "access/transam.h"
29 #include "access/xact.h"
30 #include "access/xlog.h"
31 #include "catalog/namespace.h"
32 #include "catalog/pg_database.h"
33 #include "catalog/pg_namespace.h"
34 #include "commands/dbcommands.h"
35 #include "commands/vacuum.h"
36 #include "executor/executor.h"
37 #include "miscadmin.h"
39 #include "postmaster/autovacuum.h"
40 #include "storage/bufmgr.h"
41 #include "storage/freespace.h"
42 #include "storage/lmgr.h"
43 #include "storage/proc.h"
44 #include "storage/procarray.h"
45 #include "utils/acl.h"
46 #include "utils/builtins.h"
47 #include "utils/flatfiles.h"
48 #include "utils/fmgroids.h"
49 #include "utils/inval.h"
50 #include "utils/lsyscache.h"
51 #include "utils/memutils.h"
52 #include "utils/pg_rusage.h"
53 #include "utils/relcache.h"
54 #include "utils/snapmgr.h"
55 #include "utils/syscache.h"
56 #include "utils/tqual.h"
62 int vacuum_freeze_min_age;
65 * VacPage structures keep track of each page on which we find useful
66 * amounts of free space.
68 typedef struct VacPageData
70 BlockNumber blkno; /* BlockNumber of this Page */
71 Size free; /* FreeSpace on this Page */
72 uint16 offsets_used; /* Number of OffNums used by vacuum */
73 uint16 offsets_free; /* Number of OffNums free or to be free */
74 OffsetNumber offsets[1]; /* Array of free OffNums */
77 typedef VacPageData *VacPage;
79 typedef struct VacPageListData
81 BlockNumber empty_end_pages; /* Number of "empty" end-pages */
82 int num_pages; /* Number of pages in pagedesc */
83 int num_allocated_pages; /* Number of allocated pages in
85 VacPage *pagedesc; /* Descriptions of pages */
88 typedef VacPageListData *VacPageList;
91 * The "vtlinks" array keeps information about each recently-updated tuple
92 * ("recent" meaning its XMAX is too new to let us recycle the tuple).
93 * We store the tuple's own TID as well as its t_ctid (its link to the next
94 * newer tuple version). Searching in this array allows us to follow update
95 * chains backwards from newer to older tuples. When we move a member of an
96 * update chain, we must move *all* the live members of the chain, so that we
97 * can maintain their t_ctid link relationships (we must not just overwrite
98 * t_ctid in an existing tuple).
100 * Note: because t_ctid links can be stale (this would only occur if a prior
101 * VACUUM crashed partway through), it is possible that new_tid points to an
102 * empty slot or unrelated tuple. We have to check the linkage as we follow
103 * it, just as is done in EvalPlanQual.
105 typedef struct VTupleLinkData
107 ItemPointerData new_tid; /* t_ctid of an updated tuple */
108 ItemPointerData this_tid; /* t_self of the tuple */
111 typedef VTupleLinkData *VTupleLink;
114 * We use an array of VTupleMoveData to plan a chain tuple move fully
117 typedef struct VTupleMoveData
119 ItemPointerData tid; /* tuple ID */
120 VacPage vacpage; /* where to move it to */
121 bool cleanVpd; /* clean vacpage before using? */
124 typedef VTupleMoveData *VTupleMove;
127 * VRelStats contains the data acquired by scan_heap for use later
129 typedef struct VRelStats
131 /* miscellaneous statistics */
132 BlockNumber rel_pages; /* pages in relation */
133 double rel_tuples; /* tuples that remain after vacuuming */
134 double rel_indexed_tuples; /* indexed tuples that remain */
135 Size min_tlen; /* min surviving tuple size */
136 Size max_tlen; /* max surviving tuple size */
138 /* vtlinks array for tuple chain following - sorted by new_tid */
143 /*----------------------------------------------------------------------
146 * As these variables always appear together, we put them into one struct
147 * and pull initialization and cleanup into separate routines.
148 * ExecContext is used by repair_frag() and move_xxx_tuple(). More
149 * accurately: It is *used* only in move_xxx_tuple(), but because this
150 * routine is called many times, we initialize the struct just once in
151 * repair_frag() and pass it on to move_xxx_tuple().
153 typedef struct ExecContextData
155 ResultRelInfo *resultRelInfo;
157 TupleTableSlot *slot;
160 typedef ExecContextData *ExecContext;
163 ExecContext_Init(ExecContext ec, Relation rel)
165 TupleDesc tupdesc = RelationGetDescr(rel);
168 * We need a ResultRelInfo and an EState so we can use the regular
169 * executor's index-entry-making machinery.
171 ec->estate = CreateExecutorState();
173 ec->resultRelInfo = makeNode(ResultRelInfo);
174 ec->resultRelInfo->ri_RangeTableIndex = 1; /* dummy */
175 ec->resultRelInfo->ri_RelationDesc = rel;
176 ec->resultRelInfo->ri_TrigDesc = NULL; /* we don't fire triggers */
178 ExecOpenIndices(ec->resultRelInfo);
180 ec->estate->es_result_relations = ec->resultRelInfo;
181 ec->estate->es_num_result_relations = 1;
182 ec->estate->es_result_relation_info = ec->resultRelInfo;
184 /* Set up a tuple slot too */
185 ec->slot = MakeSingleTupleTableSlot(tupdesc);
189 ExecContext_Finish(ExecContext ec)
191 ExecDropSingleTupleTableSlot(ec->slot);
192 ExecCloseIndices(ec->resultRelInfo);
193 FreeExecutorState(ec->estate);
197 * End of ExecContext Implementation
198 *----------------------------------------------------------------------
201 /* A few variables that don't seem worth passing around as parameters */
202 static MemoryContext vac_context = NULL;
204 static int elevel = -1;
206 static TransactionId OldestXmin;
207 static TransactionId FreezeLimit;
209 static BufferAccessStrategy vac_strategy;
212 /* non-export function prototypes */
213 static List *get_rel_oids(Oid relid, const RangeVar *vacrel,
214 const char *stmttype);
215 static void vac_truncate_clog(TransactionId frozenXID);
216 static void vacuum_rel(Oid relid, VacuumStmt *vacstmt, bool do_toast,
217 bool for_wraparound);
218 static void full_vacuum_rel(Relation onerel, VacuumStmt *vacstmt);
219 static void scan_heap(VRelStats *vacrelstats, Relation onerel,
220 VacPageList vacuum_pages, VacPageList fraged_pages);
221 static void repair_frag(VRelStats *vacrelstats, Relation onerel,
222 VacPageList vacuum_pages, VacPageList fraged_pages,
223 int nindexes, Relation *Irel);
224 static void move_chain_tuple(Relation rel,
225 Buffer old_buf, Page old_page, HeapTuple old_tup,
226 Buffer dst_buf, Page dst_page, VacPage dst_vacpage,
227 ExecContext ec, ItemPointer ctid, bool cleanVpd);
228 static void move_plain_tuple(Relation rel,
229 Buffer old_buf, Page old_page, HeapTuple old_tup,
230 Buffer dst_buf, Page dst_page, VacPage dst_vacpage,
232 static void update_hint_bits(Relation rel, VacPageList fraged_pages,
233 int num_fraged_pages, BlockNumber last_move_dest_block,
235 static void vacuum_heap(VRelStats *vacrelstats, Relation onerel,
236 VacPageList vacpagelist);
237 static void vacuum_page(Relation onerel, Buffer buffer, VacPage vacpage);
238 static void vacuum_index(VacPageList vacpagelist, Relation indrel,
239 double num_tuples, int keep_tuples);
240 static void scan_index(Relation indrel, double num_tuples);
241 static bool tid_reaped(ItemPointer itemptr, void *state);
242 static void vac_update_fsm(Relation onerel, VacPageList fraged_pages,
243 BlockNumber rel_pages);
244 static VacPage copy_vac_page(VacPage vacpage);
245 static void vpage_insert(VacPageList vacpagelist, VacPage vpnew);
246 static void *vac_bsearch(const void *key, const void *base,
247 size_t nelem, size_t size,
248 int (*compar) (const void *, const void *));
249 static int vac_cmp_blk(const void *left, const void *right);
250 static int vac_cmp_offno(const void *left, const void *right);
251 static int vac_cmp_vtlinks(const void *left, const void *right);
252 static bool enough_space(VacPage vacpage, Size len);
253 static Size PageGetFreeSpaceWithFillFactor(Relation relation, Page page);
256 /****************************************************************************
258 * Code common to all flavors of VACUUM and ANALYZE *
260 ****************************************************************************
265 * Primary entry point for VACUUM and ANALYZE commands.
267 * relid is normally InvalidOid; if it is not, then it provides the relation
268 * OID to be processed, and vacstmt->relation is ignored. (The non-invalid
269 * case is currently only used by autovacuum.)
271 * do_toast is passed as FALSE by autovacuum, because it processes TOAST
274 * for_wraparound is used by autovacuum to let us know when it's forcing
275 * a vacuum for wraparound, which should not be auto-cancelled.
277 * bstrategy is normally given as NULL, but in autovacuum it can be passed
278 * in to use the same buffer strategy object across multiple vacuum() calls.
280 * isTopLevel should be passed down from ProcessUtility.
282 * It is the caller's responsibility that vacstmt and bstrategy
283 * (if given) be allocated in a memory context that won't disappear
284 * at transaction commit.
287 vacuum(VacuumStmt *vacstmt, Oid relid, bool do_toast,
288 BufferAccessStrategy bstrategy, bool for_wraparound, bool isTopLevel)
290 const char *stmttype = vacstmt->vacuum ? "VACUUM" : "ANALYZE";
291 volatile MemoryContext anl_context = NULL;
292 volatile bool all_rels,
297 if (vacstmt->verbose)
303 * We cannot run VACUUM inside a user transaction block; if we were inside
304 * a transaction, then our commit- and start-transaction-command calls
305 * would not have the intended effect! Furthermore, the forced commit that
306 * occurs before truncating the relation's file would have the effect of
307 * committing the rest of the user's transaction too, which would
308 * certainly not be the desired behavior. (This only applies to VACUUM
309 * FULL, though. We could in theory run lazy VACUUM inside a transaction
310 * block, but we choose to disallow that case because we'd rather commit
311 * as soon as possible after finishing the vacuum. This is mainly so that
312 * we can let go the AccessExclusiveLock that we may be holding.)
314 * ANALYZE (without VACUUM) can run either way.
318 PreventTransactionChain(isTopLevel, stmttype);
319 in_outer_xact = false;
322 in_outer_xact = IsInTransactionChain(isTopLevel);
325 * Send info about dead objects to the statistics collector, unless we are
326 * in autovacuum --- autovacuum.c does this for itself.
328 if (vacstmt->vacuum && !IsAutoVacuumWorkerProcess())
329 pgstat_vacuum_stat();
332 * Create special memory context for cross-transaction storage.
334 * Since it is a child of PortalContext, it will go away eventually even
335 * if we suffer an error; there's no need for special abort cleanup logic.
337 vac_context = AllocSetContextCreate(PortalContext,
339 ALLOCSET_DEFAULT_MINSIZE,
340 ALLOCSET_DEFAULT_INITSIZE,
341 ALLOCSET_DEFAULT_MAXSIZE);
344 * If caller didn't give us a buffer strategy object, make one in the
345 * cross-transaction memory context.
347 if (bstrategy == NULL)
349 MemoryContext old_context = MemoryContextSwitchTo(vac_context);
351 bstrategy = GetAccessStrategy(BAS_VACUUM);
352 MemoryContextSwitchTo(old_context);
354 vac_strategy = bstrategy;
356 /* Remember whether we are processing everything in the DB */
357 all_rels = (!OidIsValid(relid) && vacstmt->relation == NULL);
360 * Build list of relations to process, unless caller gave us one. (If we
361 * build one, we put it in vac_context for safekeeping.)
363 relations = get_rel_oids(relid, vacstmt->relation, stmttype);
366 * Decide whether we need to start/commit our own transactions.
368 * For VACUUM (with or without ANALYZE): always do so, so that we can
369 * release locks as soon as possible. (We could possibly use the outer
370 * transaction for a one-table VACUUM, but handling TOAST tables would be
373 * For ANALYZE (no VACUUM): if inside a transaction block, we cannot
374 * start/commit our own transactions. Also, there's no need to do so if
375 * only processing one relation. For multiple relations when not within a
376 * transaction block, and also in an autovacuum worker, use own
377 * transactions so we can release locks sooner.
380 use_own_xacts = true;
383 Assert(vacstmt->analyze);
384 if (IsAutoVacuumWorkerProcess())
385 use_own_xacts = true;
386 else if (in_outer_xact)
387 use_own_xacts = false;
388 else if (list_length(relations) > 1)
389 use_own_xacts = true;
391 use_own_xacts = false;
395 * If we are running ANALYZE without per-table transactions, we'll need a
396 * memory context with table lifetime.
399 anl_context = AllocSetContextCreate(PortalContext,
401 ALLOCSET_DEFAULT_MINSIZE,
402 ALLOCSET_DEFAULT_INITSIZE,
403 ALLOCSET_DEFAULT_MAXSIZE);
406 * vacuum_rel expects to be entered with no transaction active; it will
407 * start and commit its own transaction. But we are called by an SQL
408 * command, and so we are executing inside a transaction already. We
409 * commit the transaction started in PostgresMain() here, and start
410 * another one before exiting to match the commit waiting for us back in
415 /* ActiveSnapshot is not set by autovacuum */
416 if (ActiveSnapshotSet())
419 /* matches the StartTransaction in PostgresMain() */
420 CommitTransactionCommand();
423 /* Turn vacuum cost accounting on or off */
428 VacuumCostActive = (VacuumCostDelay > 0);
429 VacuumCostBalance = 0;
432 * Loop to process each selected relation.
434 foreach(cur, relations)
436 Oid relid = lfirst_oid(cur);
439 vacuum_rel(relid, vacstmt, do_toast, for_wraparound);
441 if (vacstmt->analyze)
443 MemoryContext old_context = NULL;
446 * If using separate xacts, start one for analyze. Otherwise,
447 * we can use the outer transaction, but we still need to call
448 * analyze_rel in a memory context that will be cleaned up on
449 * return (else we leak memory while processing multiple
454 StartTransactionCommand();
455 /* functions in indexes may want a snapshot set */
456 PushActiveSnapshot(GetTransactionSnapshot());
459 old_context = MemoryContextSwitchTo(anl_context);
461 analyze_rel(relid, vacstmt, vac_strategy);
466 CommitTransactionCommand();
470 MemoryContextSwitchTo(old_context);
471 MemoryContextResetAndDeleteChildren(anl_context);
478 /* Make sure cost accounting is turned off after error */
479 VacuumCostActive = false;
484 /* Turn off vacuum cost accounting */
485 VacuumCostActive = false;
488 * Finish up processing.
492 /* here, we are not in a transaction */
495 * This matches the CommitTransaction waiting for us in
498 StartTransactionCommand();
501 if (vacstmt->vacuum && !IsAutoVacuumWorkerProcess())
504 * Update pg_database.datfrozenxid, and truncate pg_clog if possible.
505 * (autovacuum.c does this for itself.)
507 vac_update_datfrozenxid();
511 * Clean up working storage --- note we must do this after
512 * StartTransactionCommand, else we might be trying to delete the active
515 MemoryContextDelete(vac_context);
519 MemoryContextDelete(anl_context);
523 * Build a list of Oids for each relation to be processed
525 * The list is built in vac_context so that it will survive across our
526 * per-relation transactions.
529 get_rel_oids(Oid relid, const RangeVar *vacrel, const char *stmttype)
531 List *oid_list = NIL;
532 MemoryContext oldcontext;
534 /* OID supplied by VACUUM's caller? */
535 if (OidIsValid(relid))
537 oldcontext = MemoryContextSwitchTo(vac_context);
538 oid_list = lappend_oid(oid_list, relid);
539 MemoryContextSwitchTo(oldcontext);
543 /* Process a specific relation */
546 relid = RangeVarGetRelid(vacrel, false);
548 /* Make a relation list entry for this guy */
549 oldcontext = MemoryContextSwitchTo(vac_context);
550 oid_list = lappend_oid(oid_list, relid);
551 MemoryContextSwitchTo(oldcontext);
555 /* Process all plain relations listed in pg_class */
562 Anum_pg_class_relkind,
563 BTEqualStrategyNumber, F_CHAREQ,
564 CharGetDatum(RELKIND_RELATION));
566 pgclass = heap_open(RelationRelationId, AccessShareLock);
568 scan = heap_beginscan(pgclass, SnapshotNow, 1, &key);
570 while ((tuple = heap_getnext(scan, ForwardScanDirection)) != NULL)
572 /* Make a relation list entry for this guy */
573 oldcontext = MemoryContextSwitchTo(vac_context);
574 oid_list = lappend_oid(oid_list, HeapTupleGetOid(tuple));
575 MemoryContextSwitchTo(oldcontext);
579 heap_close(pgclass, AccessShareLock);
586 * vacuum_set_xid_limits() -- compute oldest-Xmin and freeze cutoff points
589 vacuum_set_xid_limits(int freeze_min_age, bool sharedRel,
590 TransactionId *oldestXmin,
591 TransactionId *freezeLimit)
595 TransactionId safeLimit;
598 * We can always ignore processes running lazy vacuum. This is because we
599 * use these values only for deciding which tuples we must keep in the
600 * tables. Since lazy vacuum doesn't write its XID anywhere, it's safe to
601 * ignore it. In theory it could be problematic to ignore lazy vacuums on
602 * a full vacuum, but keep in mind that only one vacuum process can be
603 * working on a particular table at any time, and that each vacuum is
604 * always an independent transaction.
606 *oldestXmin = GetOldestXmin(sharedRel, true);
608 Assert(TransactionIdIsNormal(*oldestXmin));
611 * Determine the minimum freeze age to use: as specified by the caller, or
612 * vacuum_freeze_min_age, but in any case not more than half
613 * autovacuum_freeze_max_age, so that autovacuums to prevent XID
614 * wraparound won't occur too frequently.
616 freezemin = freeze_min_age;
618 freezemin = vacuum_freeze_min_age;
619 freezemin = Min(freezemin, autovacuum_freeze_max_age / 2);
620 Assert(freezemin >= 0);
623 * Compute the cutoff XID, being careful not to generate a "permanent" XID
625 limit = *oldestXmin - freezemin;
626 if (!TransactionIdIsNormal(limit))
627 limit = FirstNormalTransactionId;
630 * If oldestXmin is very far back (in practice, more than
631 * autovacuum_freeze_max_age / 2 XIDs old), complain and force a minimum
632 * freeze age of zero.
634 safeLimit = ReadNewTransactionId() - autovacuum_freeze_max_age;
635 if (!TransactionIdIsNormal(safeLimit))
636 safeLimit = FirstNormalTransactionId;
638 if (TransactionIdPrecedes(limit, safeLimit))
641 (errmsg("oldest xmin is far in the past"),
642 errhint("Close open transactions soon to avoid wraparound problems.")));
646 *freezeLimit = limit;
651 * vac_update_relstats() -- update statistics for one relation
653 * Update the whole-relation statistics that are kept in its pg_class
654 * row. There are additional stats that will be updated if we are
655 * doing ANALYZE, but we always update these stats. This routine works
656 * for both index and heap relation entries in pg_class.
658 * We violate transaction semantics here by overwriting the rel's
659 * existing pg_class tuple with the new values. This is reasonably
660 * safe since the new values are correct whether or not this transaction
661 * commits. The reason for this is that if we updated these tuples in
662 * the usual way, vacuuming pg_class itself wouldn't work very well ---
663 * by the time we got done with a vacuum cycle, most of the tuples in
664 * pg_class would've been obsoleted. Of course, this only works for
665 * fixed-size never-null columns, but these are.
667 * Another reason for doing it this way is that when we are in a lazy
668 * VACUUM and have PROC_IN_VACUUM set, we mustn't do any updates ---
669 * somebody vacuuming pg_class might think they could delete a tuple
670 * marked with xmin = our xid.
672 * This routine is shared by full VACUUM, lazy VACUUM, and stand-alone
676 vac_update_relstats(Oid relid, BlockNumber num_pages, double num_tuples,
677 bool hasindex, TransactionId frozenxid)
681 Form_pg_class pgcform;
684 rd = heap_open(RelationRelationId, RowExclusiveLock);
686 /* Fetch a copy of the tuple to scribble on */
687 ctup = SearchSysCacheCopy(RELOID,
688 ObjectIdGetDatum(relid),
690 if (!HeapTupleIsValid(ctup))
691 elog(ERROR, "pg_class entry for relid %u vanished during vacuuming",
693 pgcform = (Form_pg_class) GETSTRUCT(ctup);
695 /* Apply required updates, if any, to copied tuple */
698 if (pgcform->relpages != (int32) num_pages)
700 pgcform->relpages = (int32) num_pages;
703 if (pgcform->reltuples != (float4) num_tuples)
705 pgcform->reltuples = (float4) num_tuples;
708 if (pgcform->relhasindex != hasindex)
710 pgcform->relhasindex = hasindex;
715 * If we have discovered that there are no indexes, then there's no
716 * primary key either. This could be done more thoroughly...
720 if (pgcform->relhaspkey)
722 pgcform->relhaspkey = false;
728 * relfrozenxid should never go backward. Caller can pass
729 * InvalidTransactionId if it has no new data.
731 if (TransactionIdIsNormal(frozenxid) &&
732 TransactionIdPrecedes(pgcform->relfrozenxid, frozenxid))
734 pgcform->relfrozenxid = frozenxid;
739 * If anything changed, write out the tuple. Even if nothing changed,
740 * force relcache invalidation so all backends reset their rd_targblock
741 * --- otherwise it might point to a page we truncated away.
745 heap_inplace_update(rd, ctup);
746 /* the above sends a cache inval message */
750 /* no need to change tuple, but force relcache inval anyway */
751 CacheInvalidateRelcacheByTuple(ctup);
754 heap_close(rd, RowExclusiveLock);
759 * vac_update_datfrozenxid() -- update pg_database.datfrozenxid for our DB
761 * Update pg_database's datfrozenxid entry for our database to be the
762 * minimum of the pg_class.relfrozenxid values. If we are able to
763 * advance pg_database.datfrozenxid, also try to truncate pg_clog.
765 * We violate transaction semantics here by overwriting the database's
766 * existing pg_database tuple with the new value. This is reasonably
767 * safe since the new value is correct whether or not this transaction
768 * commits. As with vac_update_relstats, this avoids leaving dead tuples
769 * behind after a VACUUM.
771 * This routine is shared by full and lazy VACUUM.
774 vac_update_datfrozenxid(void)
777 Form_pg_database dbform;
781 TransactionId newFrozenXid;
785 * Initialize the "min" calculation with GetOldestXmin, which is a
786 * reasonable approximation to the minimum relfrozenxid for not-yet-
787 * committed pg_class entries for new tables; see AddNewRelationTuple().
788 * Se we cannot produce a wrong minimum by starting with this.
790 newFrozenXid = GetOldestXmin(true, true);
793 * We must seqscan pg_class to find the minimum Xid, because there is no
794 * index that can help us here.
796 relation = heap_open(RelationRelationId, AccessShareLock);
798 scan = systable_beginscan(relation, InvalidOid, false,
799 SnapshotNow, 0, NULL);
801 while ((classTup = systable_getnext(scan)) != NULL)
803 Form_pg_class classForm = (Form_pg_class) GETSTRUCT(classTup);
806 * Only consider heap and TOAST tables (anything else should have
807 * InvalidTransactionId in relfrozenxid anyway.)
809 if (classForm->relkind != RELKIND_RELATION &&
810 classForm->relkind != RELKIND_TOASTVALUE)
813 Assert(TransactionIdIsNormal(classForm->relfrozenxid));
815 if (TransactionIdPrecedes(classForm->relfrozenxid, newFrozenXid))
816 newFrozenXid = classForm->relfrozenxid;
819 /* we're done with pg_class */
820 systable_endscan(scan);
821 heap_close(relation, AccessShareLock);
823 Assert(TransactionIdIsNormal(newFrozenXid));
825 /* Now fetch the pg_database tuple we need to update. */
826 relation = heap_open(DatabaseRelationId, RowExclusiveLock);
828 /* Fetch a copy of the tuple to scribble on */
829 tuple = SearchSysCacheCopy(DATABASEOID,
830 ObjectIdGetDatum(MyDatabaseId),
832 if (!HeapTupleIsValid(tuple))
833 elog(ERROR, "could not find tuple for database %u", MyDatabaseId);
834 dbform = (Form_pg_database) GETSTRUCT(tuple);
837 * Don't allow datfrozenxid to go backward (probably can't happen anyway);
838 * and detect the common case where it doesn't go forward either.
840 if (TransactionIdPrecedes(dbform->datfrozenxid, newFrozenXid))
842 dbform->datfrozenxid = newFrozenXid;
847 heap_inplace_update(relation, tuple);
849 heap_freetuple(tuple);
850 heap_close(relation, RowExclusiveLock);
853 * If we were able to advance datfrozenxid, mark the flat-file copy of
854 * pg_database for update at commit, and see if we can truncate pg_clog.
858 database_file_update_needed();
859 vac_truncate_clog(newFrozenXid);
865 * vac_truncate_clog() -- attempt to truncate the commit log
867 * Scan pg_database to determine the system-wide oldest datfrozenxid,
868 * and use it to truncate the transaction commit log (pg_clog).
869 * Also update the XID wrap limit info maintained by varsup.c.
871 * The passed XID is simply the one I just wrote into my pg_database
872 * entry. It's used to initialize the "min" calculation.
874 * This routine is shared by full and lazy VACUUM. Note that it's
875 * only invoked when we've managed to change our DB's datfrozenxid
879 vac_truncate_clog(TransactionId frozenXID)
881 TransactionId myXID = GetCurrentTransactionId();
885 NameData oldest_datname;
886 bool frozenAlreadyWrapped = false;
888 /* init oldest_datname to sync with my frozenXID */
889 namestrcpy(&oldest_datname, get_database_name(MyDatabaseId));
892 * Scan pg_database to compute the minimum datfrozenxid
894 * Note: we need not worry about a race condition with new entries being
895 * inserted by CREATE DATABASE. Any such entry will have a copy of some
896 * existing DB's datfrozenxid, and that source DB cannot be ours because
897 * of the interlock against copying a DB containing an active backend.
898 * Hence the new entry will not reduce the minimum. Also, if two VACUUMs
899 * concurrently modify the datfrozenxid's of different databases, the
900 * worst possible outcome is that pg_clog is not truncated as aggressively
903 relation = heap_open(DatabaseRelationId, AccessShareLock);
905 scan = heap_beginscan(relation, SnapshotNow, 0, NULL);
907 while ((tuple = heap_getnext(scan, ForwardScanDirection)) != NULL)
909 Form_pg_database dbform = (Form_pg_database) GETSTRUCT(tuple);
911 Assert(TransactionIdIsNormal(dbform->datfrozenxid));
913 if (TransactionIdPrecedes(myXID, dbform->datfrozenxid))
914 frozenAlreadyWrapped = true;
915 else if (TransactionIdPrecedes(dbform->datfrozenxid, frozenXID))
917 frozenXID = dbform->datfrozenxid;
918 namecpy(&oldest_datname, &dbform->datname);
924 heap_close(relation, AccessShareLock);
927 * Do not truncate CLOG if we seem to have suffered wraparound already;
928 * the computed minimum XID might be bogus. This case should now be
929 * impossible due to the defenses in GetNewTransactionId, but we keep the
932 if (frozenAlreadyWrapped)
935 (errmsg("some databases have not been vacuumed in over 2 billion transactions"),
936 errdetail("You might have already suffered transaction-wraparound data loss.")));
940 /* Truncate CLOG to the oldest frozenxid */
941 TruncateCLOG(frozenXID);
944 * Update the wrap limit for GetNewTransactionId. Note: this function
945 * will also signal the postmaster for an(other) autovac cycle if needed.
947 SetTransactionIdLimit(frozenXID, &oldest_datname);
951 /****************************************************************************
953 * Code common to both flavors of VACUUM *
955 ****************************************************************************
960 * vacuum_rel() -- vacuum one heap relation
962 * Doing one heap at a time incurs extra overhead, since we need to
963 * check that the heap exists again just before we vacuum it. The
964 * reason that we do this is so that vacuuming can be spread across
965 * many small transactions. Otherwise, two-phase locking would require
966 * us to lock the entire database during one pass of the vacuum cleaner.
968 * At entry and exit, we are not inside a transaction.
971 vacuum_rel(Oid relid, VacuumStmt *vacstmt, bool do_toast, bool for_wraparound)
980 /* Begin a transaction for vacuuming this relation */
981 StartTransactionCommand();
984 * Functions in indexes may want a snapshot set. Also, setting
985 * a snapshot ensures that RecentGlobalXmin is kept truly recent.
987 PushActiveSnapshot(GetTransactionSnapshot());
992 * In lazy vacuum, we can set the PROC_IN_VACUUM flag, which lets other
993 * concurrent VACUUMs know that they can ignore this one while
994 * determining their OldestXmin. (The reason we don't set it during a
995 * full VACUUM is exactly that we may have to run user- defined
996 * functions for functional indexes, and we want to make sure that if
997 * they use the snapshot set above, any tuples it requires can't get
998 * removed from other tables. An index function that depends on the
999 * contents of other tables is arguably broken, but we won't break it
1000 * here by violating transaction semantics.)
1002 * We also set the VACUUM_FOR_WRAPAROUND flag, which is passed down
1003 * by autovacuum; it's used to avoid cancelling a vacuum that was
1004 * invoked in an emergency.
1006 * Note: these flags remain set until CommitTransaction or
1007 * AbortTransaction. We don't want to clear them until we reset
1008 * MyProc->xid/xmin, else OldestXmin might appear to go backwards,
1009 * which is probably Not Good.
1011 LWLockAcquire(ProcArrayLock, LW_EXCLUSIVE);
1012 MyProc->vacuumFlags |= PROC_IN_VACUUM;
1014 MyProc->vacuumFlags |= PROC_VACUUM_FOR_WRAPAROUND;
1015 LWLockRelease(ProcArrayLock);
1019 * Check for user-requested abort. Note we want this to be inside a
1020 * transaction, so xact.c doesn't issue useless WARNING.
1022 CHECK_FOR_INTERRUPTS();
1025 * Determine the type of lock we want --- hard exclusive lock for a FULL
1026 * vacuum, but just ShareUpdateExclusiveLock for concurrent vacuum. Either
1027 * way, we can be sure that no other backend is vacuuming the same table.
1029 lmode = vacstmt->full ? AccessExclusiveLock : ShareUpdateExclusiveLock;
1032 * Open the relation and get the appropriate lock on it.
1034 * There's a race condition here: the rel may have gone away since the
1035 * last time we saw it. If so, we don't need to vacuum it.
1037 onerel = try_relation_open(relid, lmode);
1041 PopActiveSnapshot();
1042 CommitTransactionCommand();
1047 * Check permissions.
1049 * We allow the user to vacuum a table if he is superuser, the table
1050 * owner, or the database owner (but in the latter case, only if it's not
1051 * a shared relation). pg_class_ownercheck includes the superuser case.
1053 * Note we choose to treat permissions failure as a WARNING and keep
1054 * trying to vacuum the rest of the DB --- is this appropriate?
1056 if (!(pg_class_ownercheck(RelationGetRelid(onerel), GetUserId()) ||
1057 (pg_database_ownercheck(MyDatabaseId, GetUserId()) && !onerel->rd_rel->relisshared)))
1059 if (onerel->rd_rel->relisshared)
1061 (errmsg("skipping \"%s\" --- only superuser can vacuum it",
1062 RelationGetRelationName(onerel))));
1063 else if (onerel->rd_rel->relnamespace == PG_CATALOG_NAMESPACE)
1065 (errmsg("skipping \"%s\" --- only superuser or database owner can vacuum it",
1066 RelationGetRelationName(onerel))));
1069 (errmsg("skipping \"%s\" --- only table or database owner can vacuum it",
1070 RelationGetRelationName(onerel))));
1071 relation_close(onerel, lmode);
1072 PopActiveSnapshot();
1073 CommitTransactionCommand();
1078 * Check that it's a vacuumable table; we used to do this in get_rel_oids()
1079 * but seems safer to check after we've locked the relation.
1081 if (onerel->rd_rel->relkind != RELKIND_RELATION &&
1082 onerel->rd_rel->relkind != RELKIND_TOASTVALUE)
1085 (errmsg("skipping \"%s\" --- cannot vacuum indexes, views, or special system tables",
1086 RelationGetRelationName(onerel))));
1087 relation_close(onerel, lmode);
1088 PopActiveSnapshot();
1089 CommitTransactionCommand();
1094 * Silently ignore tables that are temp tables of other backends ---
1095 * trying to vacuum these will lead to great unhappiness, since their
1096 * contents are probably not up-to-date on disk. (We don't throw a
1097 * warning here; it would just lead to chatter during a database-wide
1100 if (isOtherTempNamespace(RelationGetNamespace(onerel)))
1102 relation_close(onerel, lmode);
1103 PopActiveSnapshot();
1104 CommitTransactionCommand();
1109 * Get a session-level lock too. This will protect our access to the
1110 * relation across multiple transactions, so that we can vacuum the
1111 * relation's TOAST table (if any) secure in the knowledge that no one is
1112 * deleting the parent relation.
1114 * NOTE: this cannot block, even if someone else is waiting for access,
1115 * because the lock manager knows that both lock requests are from the
1118 onerelid = onerel->rd_lockInfo.lockRelId;
1119 LockRelationIdForSession(&onerelid, lmode);
1122 * Remember the relation's TOAST relation for later, if the caller asked
1126 toast_relid = onerel->rd_rel->reltoastrelid;
1128 toast_relid = InvalidOid;
1131 * Switch to the table owner's userid, so that any index functions are
1132 * run as that user. (This is unnecessary, but harmless, for lazy
1135 GetUserIdAndContext(&save_userid, &save_secdefcxt);
1136 SetUserIdAndContext(onerel->rd_rel->relowner, true);
1139 * Do the actual work --- either FULL or "lazy" vacuum
1142 full_vacuum_rel(onerel, vacstmt);
1144 lazy_vacuum_rel(onerel, vacstmt, vac_strategy);
1146 /* Restore userid */
1147 SetUserIdAndContext(save_userid, save_secdefcxt);
1149 /* all done with this class, but hold lock until commit */
1150 relation_close(onerel, NoLock);
1153 * Complete the transaction and free all temporary memory used.
1155 PopActiveSnapshot();
1156 CommitTransactionCommand();
1159 * If the relation has a secondary toast rel, vacuum that too while we
1160 * still hold the session lock on the master table. Note however that
1161 * "analyze" will not get done on the toast table. This is good, because
1162 * the toaster always uses hardcoded index access and statistics are
1163 * totally unimportant for toast relations.
1165 if (toast_relid != InvalidOid)
1166 vacuum_rel(toast_relid, vacstmt, false, for_wraparound);
1169 * Now release the session-level lock on the master table.
1171 UnlockRelationIdForSession(&onerelid, lmode);
1175 /****************************************************************************
1177 * Code for VACUUM FULL (only) *
1179 ****************************************************************************
1184 * full_vacuum_rel() -- perform FULL VACUUM for one heap relation
1186 * This routine vacuums a single heap, cleans out its indexes, and
1187 * updates its num_pages and num_tuples statistics.
1189 * At entry, we have already established a transaction and opened
1190 * and locked the relation.
1193 full_vacuum_rel(Relation onerel, VacuumStmt *vacstmt)
1195 VacPageListData vacuum_pages; /* List of pages to vacuum and/or
1197 VacPageListData fraged_pages; /* List of pages with space enough for
1202 VRelStats *vacrelstats;
1204 vacuum_set_xid_limits(vacstmt->freeze_min_age, onerel->rd_rel->relisshared,
1205 &OldestXmin, &FreezeLimit);
1208 * Flush any previous async-commit transactions. This does not guarantee
1209 * that we will be able to set hint bits for tuples they inserted, but it
1210 * improves the probability, especially in simple sequential-commands
1211 * cases. See scan_heap() and repair_frag() for more about this.
1213 XLogAsyncCommitFlush();
1216 * Set up statistics-gathering machinery.
1218 vacrelstats = (VRelStats *) palloc(sizeof(VRelStats));
1219 vacrelstats->rel_pages = 0;
1220 vacrelstats->rel_tuples = 0;
1221 vacrelstats->rel_indexed_tuples = 0;
1222 vacrelstats->hasindex = false;
1225 vacuum_pages.num_pages = fraged_pages.num_pages = 0;
1226 scan_heap(vacrelstats, onerel, &vacuum_pages, &fraged_pages);
1228 /* Now open all indexes of the relation */
1229 vac_open_indexes(onerel, AccessExclusiveLock, &nindexes, &Irel);
1231 vacrelstats->hasindex = true;
1233 /* Clean/scan index relation(s) */
1236 if (vacuum_pages.num_pages > 0)
1238 for (i = 0; i < nindexes; i++)
1239 vacuum_index(&vacuum_pages, Irel[i],
1240 vacrelstats->rel_indexed_tuples, 0);
1244 /* just scan indexes to update statistic */
1245 for (i = 0; i < nindexes; i++)
1246 scan_index(Irel[i], vacrelstats->rel_indexed_tuples);
1250 if (fraged_pages.num_pages > 0)
1252 /* Try to shrink heap */
1253 repair_frag(vacrelstats, onerel, &vacuum_pages, &fraged_pages,
1255 vac_close_indexes(nindexes, Irel, NoLock);
1259 vac_close_indexes(nindexes, Irel, NoLock);
1260 if (vacuum_pages.num_pages > 0)
1262 /* Clean pages from vacuum_pages list */
1263 vacuum_heap(vacrelstats, onerel, &vacuum_pages);
1267 /* update thefree space map with final free space info, and vacuum it */
1268 vac_update_fsm(onerel, &fraged_pages, vacrelstats->rel_pages);
1269 FreeSpaceMapVacuum(onerel);
1271 /* update statistics in pg_class */
1272 vac_update_relstats(RelationGetRelid(onerel), vacrelstats->rel_pages,
1273 vacrelstats->rel_tuples, vacrelstats->hasindex,
1276 /* report results to the stats collector, too */
1277 pgstat_report_vacuum(RelationGetRelid(onerel), onerel->rd_rel->relisshared,
1278 vacstmt->analyze, vacrelstats->rel_tuples);
1283 * scan_heap() -- scan an open heap relation
1285 * This routine sets commit status bits, constructs vacuum_pages (list
1286 * of pages we need to compact free space on and/or clean indexes of
1287 * deleted tuples), constructs fraged_pages (list of pages with free
1288 * space that tuples could be moved into), and calculates statistics
1289 * on the number of live tuples in the heap.
1292 scan_heap(VRelStats *vacrelstats, Relation onerel,
1293 VacPageList vacuum_pages, VacPageList fraged_pages)
1295 BlockNumber nblocks,
1299 BlockNumber empty_pages,
1308 Size min_tlen = MaxHeapTupleSize;
1310 bool do_shrinking = true;
1311 VTupleLink vtlinks = (VTupleLink) palloc(100 * sizeof(VTupleLinkData));
1312 int num_vtlinks = 0;
1313 int free_vtlinks = 100;
1316 pg_rusage_init(&ru0);
1318 relname = RelationGetRelationName(onerel);
1320 (errmsg("vacuuming \"%s.%s\"",
1321 get_namespace_name(RelationGetNamespace(onerel)),
1324 empty_pages = empty_end_pages = 0;
1325 num_tuples = num_indexed_tuples = tups_vacuumed = nkeep = nunused = 0;
1328 nblocks = RelationGetNumberOfBlocks(onerel);
1331 * We initially create each VacPage item in a maximal-sized workspace,
1332 * then copy the workspace into a just-large-enough copy.
1334 vacpage = (VacPage) palloc(sizeof(VacPageData) + MaxOffsetNumber * sizeof(OffsetNumber));
1336 for (blkno = 0; blkno < nblocks; blkno++)
1343 OffsetNumber offnum,
1346 OffsetNumber frozen[MaxOffsetNumber];
1349 vacuum_delay_point();
1351 buf = ReadBufferExtended(onerel, MAIN_FORKNUM, blkno, RBM_NORMAL,
1353 page = BufferGetPage(buf);
1356 * Since we are holding exclusive lock on the relation, no other
1357 * backend can be accessing the page; however it is possible that the
1358 * background writer will try to write the page if it's already marked
1359 * dirty. To ensure that invalid data doesn't get written to disk, we
1360 * must take exclusive buffer lock wherever we potentially modify
1361 * pages. In fact, we insist on cleanup lock so that we can safely
1362 * call heap_page_prune(). (This might be overkill, since the
1363 * bgwriter pays no attention to individual tuples, but on the other
1364 * hand it's unlikely that the bgwriter has this particular page
1365 * pinned at this instant. So violating the coding rule would buy us
1368 LockBufferForCleanup(buf);
1370 vacpage->blkno = blkno;
1371 vacpage->offsets_used = 0;
1372 vacpage->offsets_free = 0;
1374 if (PageIsNew(page))
1376 VacPage vacpagecopy;
1379 (errmsg("relation \"%s\" page %u is uninitialized --- fixing",
1381 PageInit(page, BufferGetPageSize(buf), 0);
1382 MarkBufferDirty(buf);
1383 vacpage->free = PageGetFreeSpaceWithFillFactor(onerel, page);
1384 free_space += vacpage->free;
1387 vacpagecopy = copy_vac_page(vacpage);
1388 vpage_insert(vacuum_pages, vacpagecopy);
1389 vpage_insert(fraged_pages, vacpagecopy);
1390 UnlockReleaseBuffer(buf);
1394 if (PageIsEmpty(page))
1396 VacPage vacpagecopy;
1398 vacpage->free = PageGetFreeSpaceWithFillFactor(onerel, page);
1399 free_space += vacpage->free;
1402 vacpagecopy = copy_vac_page(vacpage);
1403 vpage_insert(vacuum_pages, vacpagecopy);
1404 vpage_insert(fraged_pages, vacpagecopy);
1405 UnlockReleaseBuffer(buf);
1410 * Prune all HOT-update chains in this page.
1412 * We use the redirect_move option so that redirecting line pointers
1413 * get collapsed out; this allows us to not worry about them below.
1415 * We count tuples removed by the pruning step as removed by VACUUM.
1417 tups_vacuumed += heap_page_prune(onerel, buf, OldestXmin,
1421 * Now scan the page to collect vacuumable items and check for tuples
1422 * requiring freezing.
1426 maxoff = PageGetMaxOffsetNumber(page);
1427 for (offnum = FirstOffsetNumber;
1429 offnum = OffsetNumberNext(offnum))
1431 ItemId itemid = PageGetItemId(page, offnum);
1432 bool tupgone = false;
1433 HeapTupleData tuple;
1436 * Collect un-used items too - it's possible to have indexes
1437 * pointing here after crash. (That's an ancient comment and is
1438 * likely obsolete with WAL, but we might as well continue to
1439 * check for such problems.)
1441 if (!ItemIdIsUsed(itemid))
1443 vacpage->offsets[vacpage->offsets_free++] = offnum;
1449 * DEAD item pointers are to be vacuumed normally; but we don't
1450 * count them in tups_vacuumed, else we'd be double-counting (at
1451 * least in the common case where heap_page_prune() just freed up
1454 if (ItemIdIsDead(itemid))
1456 vacpage->offsets[vacpage->offsets_free++] = offnum;
1460 /* Shouldn't have any redirected items anymore */
1461 if (!ItemIdIsNormal(itemid))
1462 elog(ERROR, "relation \"%s\" TID %u/%u: unexpected redirect item",
1463 relname, blkno, offnum);
1465 tuple.t_data = (HeapTupleHeader) PageGetItem(page, itemid);
1466 tuple.t_len = ItemIdGetLength(itemid);
1467 ItemPointerSet(&(tuple.t_self), blkno, offnum);
1469 switch (HeapTupleSatisfiesVacuum(tuple.t_data, OldestXmin, buf))
1471 case HEAPTUPLE_LIVE:
1472 /* Tuple is good --- but let's do some validity checks */
1473 if (onerel->rd_rel->relhasoids &&
1474 !OidIsValid(HeapTupleGetOid(&tuple)))
1475 elog(WARNING, "relation \"%s\" TID %u/%u: OID is invalid",
1476 relname, blkno, offnum);
1479 * The shrinkage phase of VACUUM FULL requires that all
1480 * live tuples have XMIN_COMMITTED set --- see comments in
1481 * repair_frag()'s walk-along-page loop. Use of async
1482 * commit may prevent HeapTupleSatisfiesVacuum from
1483 * setting the bit for a recently committed tuple. Rather
1484 * than trying to handle this corner case, we just give up
1488 !(tuple.t_data->t_infomask & HEAP_XMIN_COMMITTED))
1491 (errmsg("relation \"%s\" TID %u/%u: XMIN_COMMITTED not set for transaction %u --- cannot shrink relation",
1492 relname, blkno, offnum,
1493 HeapTupleHeaderGetXmin(tuple.t_data))));
1494 do_shrinking = false;
1497 case HEAPTUPLE_DEAD:
1500 * Ordinarily, DEAD tuples would have been removed by
1501 * heap_page_prune(), but it's possible that the tuple
1502 * state changed since heap_page_prune() looked. In
1503 * particular an INSERT_IN_PROGRESS tuple could have
1504 * changed to DEAD if the inserter aborted. So this
1505 * cannot be considered an error condition, though it does
1506 * suggest that someone released a lock early.
1508 * If the tuple is HOT-updated then it must only be
1509 * removed by a prune operation; so we keep it as if it
1510 * were RECENTLY_DEAD, and abandon shrinking. (XXX is it
1511 * worth trying to make the shrinking code smart enough to
1512 * handle this? It's an unusual corner case.)
1514 * DEAD heap-only tuples can safely be removed if they
1515 * aren't themselves HOT-updated, although this is a bit
1516 * inefficient since we'll uselessly try to remove index
1519 if (HeapTupleIsHotUpdated(&tuple))
1524 (errmsg("relation \"%s\" TID %u/%u: dead HOT-updated tuple --- cannot shrink relation",
1525 relname, blkno, offnum)));
1526 do_shrinking = false;
1530 tupgone = true; /* we can delete the tuple */
1533 * We need not require XMIN_COMMITTED or
1534 * XMAX_COMMITTED to be set, since we will remove the
1535 * tuple without any further examination of its hint
1540 case HEAPTUPLE_RECENTLY_DEAD:
1543 * If tuple is recently deleted then we must not remove it
1549 * As with the LIVE case, shrinkage requires
1550 * XMIN_COMMITTED to be set.
1553 !(tuple.t_data->t_infomask & HEAP_XMIN_COMMITTED))
1556 (errmsg("relation \"%s\" TID %u/%u: XMIN_COMMITTED not set for transaction %u --- cannot shrink relation",
1557 relname, blkno, offnum,
1558 HeapTupleHeaderGetXmin(tuple.t_data))));
1559 do_shrinking = false;
1563 * If we do shrinking and this tuple is updated one then
1564 * remember it to construct updated tuple dependencies.
1567 !(ItemPointerEquals(&(tuple.t_self),
1568 &(tuple.t_data->t_ctid))))
1570 if (free_vtlinks == 0)
1572 free_vtlinks = 1000;
1573 vtlinks = (VTupleLink) repalloc(vtlinks,
1574 (free_vtlinks + num_vtlinks) *
1575 sizeof(VTupleLinkData));
1577 vtlinks[num_vtlinks].new_tid = tuple.t_data->t_ctid;
1578 vtlinks[num_vtlinks].this_tid = tuple.t_self;
1583 case HEAPTUPLE_INSERT_IN_PROGRESS:
1586 * This should not happen, since we hold exclusive lock on
1587 * the relation; shouldn't we raise an error? (Actually,
1588 * it can happen in system catalogs, since we tend to
1589 * release write lock before commit there.) As above, we
1590 * can't apply repair_frag() if the tuple state is
1595 (errmsg("relation \"%s\" TID %u/%u: InsertTransactionInProgress %u --- cannot shrink relation",
1596 relname, blkno, offnum,
1597 HeapTupleHeaderGetXmin(tuple.t_data))));
1598 do_shrinking = false;
1600 case HEAPTUPLE_DELETE_IN_PROGRESS:
1603 * This should not happen, since we hold exclusive lock on
1604 * the relation; shouldn't we raise an error? (Actually,
1605 * it can happen in system catalogs, since we tend to
1606 * release write lock before commit there.) As above, we
1607 * can't apply repair_frag() if the tuple state is
1612 (errmsg("relation \"%s\" TID %u/%u: DeleteTransactionInProgress %u --- cannot shrink relation",
1613 relname, blkno, offnum,
1614 HeapTupleHeaderGetXmax(tuple.t_data))));
1615 do_shrinking = false;
1618 elog(ERROR, "unexpected HeapTupleSatisfiesVacuum result");
1627 * Here we are building a temporary copy of the page with dead
1628 * tuples removed. Below we will apply
1629 * PageRepairFragmentation to the copy, so that we can
1630 * determine how much space will be available after removal of
1631 * dead tuples. But note we are NOT changing the real page
1634 if (tempPage == NULL)
1638 pageSize = PageGetPageSize(page);
1639 tempPage = (Page) palloc(pageSize);
1640 memcpy(tempPage, page, pageSize);
1643 /* mark it unused on the temp page */
1644 lpp = PageGetItemId(tempPage, offnum);
1645 ItemIdSetUnused(lpp);
1647 vacpage->offsets[vacpage->offsets_free++] = offnum;
1653 if (!HeapTupleIsHeapOnly(&tuple))
1654 num_indexed_tuples += 1;
1656 if (tuple.t_len < min_tlen)
1657 min_tlen = tuple.t_len;
1658 if (tuple.t_len > max_tlen)
1659 max_tlen = tuple.t_len;
1662 * Each non-removable tuple must be checked to see if it needs
1665 if (heap_freeze_tuple(tuple.t_data, FreezeLimit,
1667 frozen[nfrozen++] = offnum;
1669 } /* scan along page */
1671 if (tempPage != NULL)
1673 /* Some tuples are removable; figure free space after removal */
1674 PageRepairFragmentation(tempPage);
1675 vacpage->free = PageGetFreeSpaceWithFillFactor(onerel, tempPage);
1681 /* Just use current available space */
1682 vacpage->free = PageGetFreeSpaceWithFillFactor(onerel, page);
1683 /* Need to reap the page if it has UNUSED or DEAD line pointers */
1684 do_reap = (vacpage->offsets_free > 0);
1687 free_space += vacpage->free;
1690 * Add the page to vacuum_pages if it requires reaping, and add it to
1691 * fraged_pages if it has a useful amount of free space. "Useful"
1692 * means enough for a minimal-sized tuple. But we don't know that
1693 * accurately near the start of the relation, so add pages
1694 * unconditionally if they have >= BLCKSZ/10 free space. Also
1695 * forcibly add pages with no live tuples, to avoid confusing the
1696 * empty_end_pages logic. (In the presence of unreasonably small
1697 * fillfactor, it seems possible that such pages might not pass
1698 * the free-space test, but they had better be in the list anyway.)
1700 do_frag = (vacpage->free >= min_tlen || vacpage->free >= BLCKSZ / 10 ||
1703 if (do_reap || do_frag)
1705 VacPage vacpagecopy = copy_vac_page(vacpage);
1708 vpage_insert(vacuum_pages, vacpagecopy);
1710 vpage_insert(fraged_pages, vacpagecopy);
1714 * Include the page in empty_end_pages if it will be empty after
1715 * vacuuming; this is to keep us from using it as a move destination.
1716 * Note that such pages are guaranteed to be in fraged_pages.
1724 empty_end_pages = 0;
1727 * If we froze any tuples, mark the buffer dirty, and write a WAL
1728 * record recording the changes. We must log the changes to be
1729 * crash-safe against future truncation of CLOG.
1733 MarkBufferDirty(buf);
1734 /* no XLOG for temp tables, though */
1735 if (!onerel->rd_istemp)
1739 recptr = log_heap_freeze(onerel, buf, FreezeLimit,
1741 PageSetLSN(page, recptr);
1742 PageSetTLI(page, ThisTimeLineID);
1746 UnlockReleaseBuffer(buf);
1751 /* save stats in the rel list for use later */
1752 vacrelstats->rel_tuples = num_tuples;
1753 vacrelstats->rel_indexed_tuples = num_indexed_tuples;
1754 vacrelstats->rel_pages = nblocks;
1755 if (num_tuples == 0)
1756 min_tlen = max_tlen = 0;
1757 vacrelstats->min_tlen = min_tlen;
1758 vacrelstats->max_tlen = max_tlen;
1760 vacuum_pages->empty_end_pages = empty_end_pages;
1761 fraged_pages->empty_end_pages = empty_end_pages;
1764 * Clear the fraged_pages list if we found we couldn't shrink. Else,
1765 * remove any "empty" end-pages from the list, and compute usable free
1766 * space = free space in remaining pages.
1772 Assert((BlockNumber) fraged_pages->num_pages >= empty_end_pages);
1773 fraged_pages->num_pages -= empty_end_pages;
1774 usable_free_space = 0;
1775 for (i = 0; i < fraged_pages->num_pages; i++)
1776 usable_free_space += fraged_pages->pagedesc[i]->free;
1780 fraged_pages->num_pages = 0;
1781 usable_free_space = 0;
1784 /* don't bother to save vtlinks if we will not call repair_frag */
1785 if (fraged_pages->num_pages > 0 && num_vtlinks > 0)
1787 qsort((char *) vtlinks, num_vtlinks, sizeof(VTupleLinkData),
1789 vacrelstats->vtlinks = vtlinks;
1790 vacrelstats->num_vtlinks = num_vtlinks;
1794 vacrelstats->vtlinks = NULL;
1795 vacrelstats->num_vtlinks = 0;
1800 (errmsg("\"%s\": found %.0f removable, %.0f nonremovable row versions in %u pages",
1801 RelationGetRelationName(onerel),
1802 tups_vacuumed, num_tuples, nblocks),
1803 errdetail("%.0f dead row versions cannot be removed yet.\n"
1804 "Nonremovable row versions range from %lu to %lu bytes long.\n"
1805 "There were %.0f unused item pointers.\n"
1806 "Total free space (including removable row versions) is %.0f bytes.\n"
1807 "%u pages are or will become empty, including %u at the end of the table.\n"
1808 "%u pages containing %.0f free bytes are potential move destinations.\n"
1811 (unsigned long) min_tlen, (unsigned long) max_tlen,
1814 empty_pages, empty_end_pages,
1815 fraged_pages->num_pages, usable_free_space,
1816 pg_rusage_show(&ru0))));
1821 * repair_frag() -- try to repair relation's fragmentation
1823 * This routine marks dead tuples as unused and tries re-use dead space
1824 * by moving tuples (and inserting indexes if needed). It constructs
1825 * Nvacpagelist list of free-ed pages (moved tuples) and clean indexes
1826 * for them after committing (in hack-manner - without losing locks
1827 * and freeing memory!) current transaction. It truncates relation
1828 * if some end-blocks are gone away.
1831 repair_frag(VRelStats *vacrelstats, Relation onerel,
1832 VacPageList vacuum_pages, VacPageList fraged_pages,
1833 int nindexes, Relation *Irel)
1835 TransactionId myXID = GetCurrentTransactionId();
1836 Buffer dst_buffer = InvalidBuffer;
1837 BlockNumber nblocks,
1839 BlockNumber last_move_dest_block = 0,
1841 Page dst_page = NULL;
1843 VacPageListData Nvacpagelist;
1844 VacPage dst_vacpage = NULL,
1852 int keep_tuples = 0;
1853 int keep_indexed_tuples = 0;
1856 pg_rusage_init(&ru0);
1858 ExecContext_Init(&ec, onerel);
1860 Nvacpagelist.num_pages = 0;
1861 num_fraged_pages = fraged_pages->num_pages;
1862 Assert((BlockNumber) vacuum_pages->num_pages >= vacuum_pages->empty_end_pages);
1863 vacuumed_pages = vacuum_pages->num_pages - vacuum_pages->empty_end_pages;
1864 if (vacuumed_pages > 0)
1866 /* get last reaped page from vacuum_pages */
1867 last_vacuum_page = vacuum_pages->pagedesc[vacuumed_pages - 1];
1868 last_vacuum_block = last_vacuum_page->blkno;
1872 last_vacuum_page = NULL;
1873 last_vacuum_block = InvalidBlockNumber;
1876 vacpage = (VacPage) palloc(sizeof(VacPageData) + MaxOffsetNumber * sizeof(OffsetNumber));
1877 vacpage->offsets_used = vacpage->offsets_free = 0;
1880 * Scan pages backwards from the last nonempty page, trying to move tuples
1881 * down to lower pages. Quit when we reach a page that we have moved any
1882 * tuples onto, or the first page if we haven't moved anything, or when we
1883 * find a page we cannot completely empty (this last condition is handled
1884 * by "break" statements within the loop).
1886 * NB: this code depends on the vacuum_pages and fraged_pages lists being
1887 * in order by blkno.
1889 nblocks = vacrelstats->rel_pages;
1890 for (blkno = nblocks - vacuum_pages->empty_end_pages - 1;
1891 blkno > last_move_dest_block;
1896 OffsetNumber offnum,
1901 vacuum_delay_point();
1904 * Forget fraged_pages pages at or after this one; they're no longer
1905 * useful as move targets, since we only want to move down. Note that
1906 * since we stop the outer loop at last_move_dest_block, pages removed
1907 * here cannot have had anything moved onto them already.
1909 * Also note that we don't change the stored fraged_pages list, only
1910 * our local variable num_fraged_pages; so the forgotten pages are
1911 * still available to be loaded into the free space map later.
1913 while (num_fraged_pages > 0 &&
1914 fraged_pages->pagedesc[num_fraged_pages - 1]->blkno >= blkno)
1916 Assert(fraged_pages->pagedesc[num_fraged_pages - 1]->offsets_used == 0);
1921 * Process this page of relation.
1923 buf = ReadBufferExtended(onerel, MAIN_FORKNUM, blkno, RBM_NORMAL,
1925 page = BufferGetPage(buf);
1927 vacpage->offsets_free = 0;
1929 isempty = PageIsEmpty(page);
1931 /* Is the page in the vacuum_pages list? */
1932 if (blkno == last_vacuum_block)
1934 if (last_vacuum_page->offsets_free > 0)
1936 /* there are dead tuples on this page - clean them */
1938 LockBuffer(buf, BUFFER_LOCK_EXCLUSIVE);
1939 vacuum_page(onerel, buf, last_vacuum_page);
1940 LockBuffer(buf, BUFFER_LOCK_UNLOCK);
1945 if (vacuumed_pages > 0)
1947 /* get prev reaped page from vacuum_pages */
1948 last_vacuum_page = vacuum_pages->pagedesc[vacuumed_pages - 1];
1949 last_vacuum_block = last_vacuum_page->blkno;
1953 last_vacuum_page = NULL;
1954 last_vacuum_block = InvalidBlockNumber;
1965 chain_tuple_moved = false; /* no one chain-tuple was moved off
1967 vacpage->blkno = blkno;
1968 maxoff = PageGetMaxOffsetNumber(page);
1969 for (offnum = FirstOffsetNumber;
1971 offnum = OffsetNumberNext(offnum))
1974 HeapTupleData tuple;
1975 ItemId itemid = PageGetItemId(page, offnum);
1977 if (!ItemIdIsUsed(itemid))
1980 if (ItemIdIsDead(itemid))
1982 /* just remember it for vacuum_page() */
1983 vacpage->offsets[vacpage->offsets_free++] = offnum;
1987 /* Shouldn't have any redirected items now */
1988 Assert(ItemIdIsNormal(itemid));
1990 tuple.t_data = (HeapTupleHeader) PageGetItem(page, itemid);
1991 tuple_len = tuple.t_len = ItemIdGetLength(itemid);
1992 ItemPointerSet(&(tuple.t_self), blkno, offnum);
1995 * VACUUM FULL has an exclusive lock on the relation. So
1996 * normally no other transaction can have pending INSERTs or
1997 * DELETEs in this relation. A tuple is either:
1998 * (a) live (XMIN_COMMITTED)
1999 * (b) known dead (XMIN_INVALID, or XMAX_COMMITTED and xmax
2000 * is visible to all active transactions)
2001 * (c) inserted and deleted (XMIN_COMMITTED+XMAX_COMMITTED)
2002 * but at least one active transaction does not see the
2003 * deleting transaction (ie, it's RECENTLY_DEAD)
2004 * (d) moved by the currently running VACUUM
2005 * (e) inserted or deleted by a not yet committed transaction,
2006 * or by a transaction we couldn't set XMIN_COMMITTED for.
2007 * In case (e) we wouldn't be in repair_frag() at all, because
2008 * scan_heap() detects those cases and shuts off shrinking.
2009 * We can't see case (b) here either, because such tuples were
2010 * already removed by vacuum_page(). Cases (a) and (c) are
2011 * normal and will have XMIN_COMMITTED set. Case (d) is only
2012 * possible if a whole tuple chain has been moved while
2013 * processing this or a higher numbered block.
2016 if (!(tuple.t_data->t_infomask & HEAP_XMIN_COMMITTED))
2018 if (tuple.t_data->t_infomask & HEAP_MOVED_IN)
2019 elog(ERROR, "HEAP_MOVED_IN was not expected");
2020 if (!(tuple.t_data->t_infomask & HEAP_MOVED_OFF))
2021 elog(ERROR, "HEAP_MOVED_OFF was expected");
2024 * MOVED_OFF by another VACUUM would have caused the
2025 * visibility check to set XMIN_COMMITTED or XMIN_INVALID.
2027 if (HeapTupleHeaderGetXvac(tuple.t_data) != myXID)
2028 elog(ERROR, "invalid XVAC in tuple header");
2031 * If this (chain) tuple is moved by me already then I have to
2032 * check is it in vacpage or not - i.e. is it moved while
2033 * cleaning this page or some previous one.
2036 /* Can't we Assert(keep_tuples > 0) here? */
2037 if (keep_tuples == 0)
2039 if (chain_tuple_moved)
2041 /* some chains were moved while cleaning this page */
2042 Assert(vacpage->offsets_free > 0);
2043 for (i = 0; i < vacpage->offsets_free; i++)
2045 if (vacpage->offsets[i] == offnum)
2048 if (i >= vacpage->offsets_free) /* not found */
2050 vacpage->offsets[vacpage->offsets_free++] = offnum;
2053 * If this is not a heap-only tuple, there must be an
2054 * index entry for this item which will be removed in
2055 * the index cleanup. Decrement the
2056 * keep_indexed_tuples count to remember this.
2058 if (!HeapTupleHeaderIsHeapOnly(tuple.t_data))
2059 keep_indexed_tuples--;
2065 vacpage->offsets[vacpage->offsets_free++] = offnum;
2068 * If this is not a heap-only tuple, there must be an
2069 * index entry for this item which will be removed in the
2070 * index cleanup. Decrement the keep_indexed_tuples count
2073 if (!HeapTupleHeaderIsHeapOnly(tuple.t_data))
2074 keep_indexed_tuples--;
2081 * If this tuple is in a chain of tuples created in updates by
2082 * "recent" transactions then we have to move the whole chain of
2083 * tuples to other places, so that we can write new t_ctid links
2084 * that preserve the chain relationship.
2086 * This test is complicated. Read it as "if tuple is a recently
2087 * created updated version, OR if it is an obsoleted version". (In
2088 * the second half of the test, we needn't make any check on XMAX
2089 * --- it must be recently obsoleted, else scan_heap would have
2090 * deemed it removable.)
2092 * NOTE: this test is not 100% accurate: it is possible for a
2093 * tuple to be an updated one with recent xmin, and yet not match
2094 * any new_tid entry in the vtlinks list. Presumably there was
2095 * once a parent tuple with xmax matching the xmin, but it's
2096 * possible that that tuple has been removed --- for example, if
2097 * it had xmin = xmax and wasn't itself an updated version, then
2098 * HeapTupleSatisfiesVacuum would deem it removable as soon as the
2099 * xmin xact completes.
2101 * To be on the safe side, we abandon the repair_frag process if
2102 * we cannot find the parent tuple in vtlinks. This may be overly
2103 * conservative; AFAICS it would be safe to move the chain.
2105 * Also, because we distinguish DEAD and RECENTLY_DEAD tuples
2106 * using OldestXmin, which is a rather coarse test, it is quite
2107 * possible to have an update chain in which a tuple we think is
2108 * RECENTLY_DEAD links forward to one that is definitely DEAD. In
2109 * such a case the RECENTLY_DEAD tuple must actually be dead, but
2110 * it seems too complicated to try to make VACUUM remove it. We
2111 * treat each contiguous set of RECENTLY_DEAD tuples as a
2112 * separately movable chain, ignoring any intervening DEAD ones.
2114 if (((tuple.t_data->t_infomask & HEAP_UPDATED) &&
2115 !TransactionIdPrecedes(HeapTupleHeaderGetXmin(tuple.t_data),
2117 (!(tuple.t_data->t_infomask & (HEAP_XMAX_INVALID |
2119 !(ItemPointerEquals(&(tuple.t_self),
2120 &(tuple.t_data->t_ctid)))))
2123 bool freeCbuf = false;
2124 bool chain_move_failed = false;
2125 bool moved_target = false;
2126 ItemPointerData Ctid;
2127 HeapTupleData tp = tuple;
2128 Size tlen = tuple_len;
2132 VacPage to_vacpage = NULL;
2136 if (dst_buffer != InvalidBuffer)
2138 ReleaseBuffer(dst_buffer);
2139 dst_buffer = InvalidBuffer;
2142 /* Quick exit if we have no vtlinks to search in */
2143 if (vacrelstats->vtlinks == NULL)
2145 elog(DEBUG2, "parent item in update-chain not found --- cannot continue repair_frag");
2146 break; /* out of walk-along-page loop */
2150 * If this tuple is in the begin/middle of the chain then we
2151 * have to move to the end of chain. As with any t_ctid
2152 * chase, we have to verify that each new tuple is really the
2153 * descendant of the tuple we came from; however, here we need
2154 * even more than the normal amount of paranoia. If t_ctid
2155 * links forward to a tuple determined to be DEAD, then
2156 * depending on where that tuple is, it might already have
2157 * been removed, and perhaps even replaced by a MOVED_IN
2158 * tuple. We don't want to include any DEAD tuples in the
2159 * chain, so we have to recheck HeapTupleSatisfiesVacuum.
2161 while (!(tp.t_data->t_infomask & (HEAP_XMAX_INVALID |
2163 !(ItemPointerEquals(&(tp.t_self),
2164 &(tp.t_data->t_ctid))))
2166 ItemPointerData nextTid;
2167 TransactionId priorXmax;
2170 OffsetNumber nextOffnum;
2172 HeapTupleHeader nextTdata;
2173 HTSV_Result nextTstatus;
2175 nextTid = tp.t_data->t_ctid;
2176 priorXmax = HeapTupleHeaderGetXmax(tp.t_data);
2177 /* assume block# is OK (see heap_fetch comments) */
2178 nextBuf = ReadBufferExtended(onerel, MAIN_FORKNUM,
2179 ItemPointerGetBlockNumber(&nextTid),
2180 RBM_NORMAL, vac_strategy);
2181 nextPage = BufferGetPage(nextBuf);
2182 /* If bogus or unused slot, assume tp is end of chain */
2183 nextOffnum = ItemPointerGetOffsetNumber(&nextTid);
2184 if (nextOffnum < FirstOffsetNumber ||
2185 nextOffnum > PageGetMaxOffsetNumber(nextPage))
2187 ReleaseBuffer(nextBuf);
2190 nextItemid = PageGetItemId(nextPage, nextOffnum);
2191 if (!ItemIdIsNormal(nextItemid))
2193 ReleaseBuffer(nextBuf);
2196 /* if not matching XMIN, assume tp is end of chain */
2197 nextTdata = (HeapTupleHeader) PageGetItem(nextPage,
2199 if (!TransactionIdEquals(HeapTupleHeaderGetXmin(nextTdata),
2202 ReleaseBuffer(nextBuf);
2207 * Must check for DEAD or MOVED_IN tuple, too. This could
2208 * potentially update hint bits, so we'd better hold the
2209 * buffer content lock.
2211 LockBuffer(nextBuf, BUFFER_LOCK_SHARE);
2212 nextTstatus = HeapTupleSatisfiesVacuum(nextTdata,
2215 if (nextTstatus == HEAPTUPLE_DEAD ||
2216 nextTstatus == HEAPTUPLE_INSERT_IN_PROGRESS)
2218 UnlockReleaseBuffer(nextBuf);
2221 LockBuffer(nextBuf, BUFFER_LOCK_UNLOCK);
2222 /* if it's MOVED_OFF we shoulda moved this one with it */
2223 if (nextTstatus == HEAPTUPLE_DELETE_IN_PROGRESS)
2224 elog(ERROR, "updated tuple is already HEAP_MOVED_OFF");
2225 /* OK, switch our attention to the next tuple in chain */
2226 tp.t_data = nextTdata;
2227 tp.t_self = nextTid;
2228 tlen = tp.t_len = ItemIdGetLength(nextItemid);
2230 ReleaseBuffer(Cbuf);
2235 /* Set up workspace for planning the chain move */
2236 vtmove = (VTupleMove) palloc(100 * sizeof(VTupleMoveData));
2241 * Now, walk backwards up the chain (towards older tuples) and
2242 * check if all items in chain can be moved. We record all
2243 * the moves that need to be made in the vtmove array.
2250 HeapTupleHeader PTdata;
2251 VTupleLinkData vtld,
2254 /* Identify a target page to move this tuple to */
2255 if (to_vacpage == NULL ||
2256 !enough_space(to_vacpage, tlen))
2258 for (i = 0; i < num_fraged_pages; i++)
2260 if (enough_space(fraged_pages->pagedesc[i], tlen))
2264 if (i == num_fraged_pages)
2266 /* can't move item anywhere */
2267 chain_move_failed = true;
2268 break; /* out of check-all-items loop */
2271 to_vacpage = fraged_pages->pagedesc[to_item];
2273 to_vacpage->free -= MAXALIGN(tlen);
2274 if (to_vacpage->offsets_used >= to_vacpage->offsets_free)
2275 to_vacpage->free -= sizeof(ItemIdData);
2276 (to_vacpage->offsets_used)++;
2278 /* Add an entry to vtmove list */
2279 if (free_vtmove == 0)
2282 vtmove = (VTupleMove)
2284 (free_vtmove + num_vtmove) *
2285 sizeof(VTupleMoveData));
2287 vtmove[num_vtmove].tid = tp.t_self;
2288 vtmove[num_vtmove].vacpage = to_vacpage;
2289 if (to_vacpage->offsets_used == 1)
2290 vtmove[num_vtmove].cleanVpd = true;
2292 vtmove[num_vtmove].cleanVpd = false;
2296 /* Remember if we reached the original target tuple */
2297 if (ItemPointerGetBlockNumber(&tp.t_self) == blkno &&
2298 ItemPointerGetOffsetNumber(&tp.t_self) == offnum)
2299 moved_target = true;
2301 /* Done if at beginning of chain */
2302 if (!(tp.t_data->t_infomask & HEAP_UPDATED) ||
2303 TransactionIdPrecedes(HeapTupleHeaderGetXmin(tp.t_data),
2305 break; /* out of check-all-items loop */
2307 /* Move to tuple with prior row version */
2308 vtld.new_tid = tp.t_self;
2310 vac_bsearch((void *) &vtld,
2311 (void *) (vacrelstats->vtlinks),
2312 vacrelstats->num_vtlinks,
2313 sizeof(VTupleLinkData),
2317 /* see discussion above */
2318 elog(DEBUG2, "parent item in update-chain not found --- cannot continue repair_frag");
2319 chain_move_failed = true;
2320 break; /* out of check-all-items loop */
2322 tp.t_self = vtlp->this_tid;
2323 Pbuf = ReadBufferExtended(onerel, MAIN_FORKNUM,
2324 ItemPointerGetBlockNumber(&(tp.t_self)),
2325 RBM_NORMAL, vac_strategy);
2326 Ppage = BufferGetPage(Pbuf);
2327 Pitemid = PageGetItemId(Ppage,
2328 ItemPointerGetOffsetNumber(&(tp.t_self)));
2329 /* this can't happen since we saw tuple earlier: */
2330 if (!ItemIdIsNormal(Pitemid))
2331 elog(ERROR, "parent itemid marked as unused");
2332 PTdata = (HeapTupleHeader) PageGetItem(Ppage, Pitemid);
2334 /* ctid should not have changed since we saved it */
2335 Assert(ItemPointerEquals(&(vtld.new_tid),
2336 &(PTdata->t_ctid)));
2339 * Read above about cases when !ItemIdIsUsed(nextItemid)
2340 * (child item is removed)... Due to the fact that at the
2341 * moment we don't remove unuseful part of update-chain,
2342 * it's possible to get non-matching parent row here. Like
2343 * as in the case which caused this problem, we stop
2344 * shrinking here. I could try to find real parent row but
2345 * want not to do it because of real solution will be
2346 * implemented anyway, later, and we are too close to 6.5
2347 * release. - vadim 06/11/99
2349 if ((PTdata->t_infomask & HEAP_XMAX_IS_MULTI) ||
2350 !(TransactionIdEquals(HeapTupleHeaderGetXmax(PTdata),
2351 HeapTupleHeaderGetXmin(tp.t_data))))
2353 ReleaseBuffer(Pbuf);
2354 elog(DEBUG2, "too old parent tuple found --- cannot continue repair_frag");
2355 chain_move_failed = true;
2356 break; /* out of check-all-items loop */
2359 tlen = tp.t_len = ItemIdGetLength(Pitemid);
2361 ReleaseBuffer(Cbuf);
2364 } /* end of check-all-items loop */
2367 ReleaseBuffer(Cbuf);
2370 /* Double-check that we will move the current target tuple */
2371 if (!moved_target && !chain_move_failed)
2373 elog(DEBUG2, "failed to chain back to target --- cannot continue repair_frag");
2374 chain_move_failed = true;
2377 if (chain_move_failed)
2380 * Undo changes to offsets_used state. We don't bother
2381 * cleaning up the amount-free state, since we're not
2382 * going to do any further tuple motion.
2384 for (i = 0; i < num_vtmove; i++)
2386 Assert(vtmove[i].vacpage->offsets_used > 0);
2387 (vtmove[i].vacpage->offsets_used)--;
2390 break; /* out of walk-along-page loop */
2394 * Okay, move the whole tuple chain in reverse order.
2396 * Ctid tracks the new location of the previously-moved tuple.
2398 ItemPointerSetInvalid(&Ctid);
2399 for (ti = 0; ti < num_vtmove; ti++)
2401 VacPage destvacpage = vtmove[ti].vacpage;
2405 /* Get page to move from */
2406 tuple.t_self = vtmove[ti].tid;
2407 Cbuf = ReadBufferExtended(onerel, MAIN_FORKNUM,
2408 ItemPointerGetBlockNumber(&(tuple.t_self)),
2409 RBM_NORMAL, vac_strategy);
2411 /* Get page to move to */
2412 dst_buffer = ReadBufferExtended(onerel, MAIN_FORKNUM,
2414 RBM_NORMAL, vac_strategy);
2416 LockBuffer(dst_buffer, BUFFER_LOCK_EXCLUSIVE);
2417 if (dst_buffer != Cbuf)
2418 LockBuffer(Cbuf, BUFFER_LOCK_EXCLUSIVE);
2420 dst_page = BufferGetPage(dst_buffer);
2421 Cpage = BufferGetPage(Cbuf);
2423 Citemid = PageGetItemId(Cpage,
2424 ItemPointerGetOffsetNumber(&(tuple.t_self)));
2425 tuple.t_data = (HeapTupleHeader) PageGetItem(Cpage, Citemid);
2426 tuple_len = tuple.t_len = ItemIdGetLength(Citemid);
2428 move_chain_tuple(onerel, Cbuf, Cpage, &tuple,
2429 dst_buffer, dst_page, destvacpage,
2430 &ec, &Ctid, vtmove[ti].cleanVpd);
2433 * If the tuple we are moving is a heap-only tuple, this
2434 * move will generate an additional index entry, so
2435 * increment the rel_indexed_tuples count.
2437 if (HeapTupleHeaderIsHeapOnly(tuple.t_data))
2438 vacrelstats->rel_indexed_tuples++;
2441 if (destvacpage->blkno > last_move_dest_block)
2442 last_move_dest_block = destvacpage->blkno;
2445 * Remember that we moved tuple from the current page
2446 * (corresponding index tuple will be cleaned).
2449 vacpage->offsets[vacpage->offsets_free++] =
2450 ItemPointerGetOffsetNumber(&(tuple.t_self));
2454 * When we move tuple chains, we may need to move
2455 * tuples from a block that we haven't yet scanned in
2456 * the outer walk-along-the-relation loop. Note that
2457 * we can't be moving a tuple from a block that we
2458 * have already scanned because if such a tuple
2459 * exists, then we must have moved the chain along
2460 * with that tuple when we scanned that block. IOW the
2461 * test of (Cbuf != buf) guarantees that the tuple we
2462 * are looking at right now is in a block which is yet
2465 * We maintain two counters to correctly count the
2466 * moved-off tuples from blocks that are not yet
2467 * scanned (keep_tuples) and how many of them have
2468 * index pointers (keep_indexed_tuples). The main
2469 * reason to track the latter is to help verify that
2470 * indexes have the expected number of entries when
2471 * all the dust settles.
2473 if (!HeapTupleHeaderIsHeapOnly(tuple.t_data))
2474 keep_indexed_tuples++;
2478 ReleaseBuffer(dst_buffer);
2479 ReleaseBuffer(Cbuf);
2480 } /* end of move-the-tuple-chain loop */
2482 dst_buffer = InvalidBuffer;
2484 chain_tuple_moved = true;
2486 /* advance to next tuple in walk-along-page loop */
2488 } /* end of is-tuple-in-chain test */
2490 /* try to find new page for this tuple */
2491 if (dst_buffer == InvalidBuffer ||
2492 !enough_space(dst_vacpage, tuple_len))
2494 if (dst_buffer != InvalidBuffer)
2496 ReleaseBuffer(dst_buffer);
2497 dst_buffer = InvalidBuffer;
2499 for (i = 0; i < num_fraged_pages; i++)
2501 if (enough_space(fraged_pages->pagedesc[i], tuple_len))
2504 if (i == num_fraged_pages)
2505 break; /* can't move item anywhere */
2506 dst_vacpage = fraged_pages->pagedesc[i];
2507 dst_buffer = ReadBufferExtended(onerel, MAIN_FORKNUM,
2509 RBM_NORMAL, vac_strategy);
2510 LockBuffer(dst_buffer, BUFFER_LOCK_EXCLUSIVE);
2511 dst_page = BufferGetPage(dst_buffer);
2512 /* if this page was not used before - clean it */
2513 if (!PageIsEmpty(dst_page) && dst_vacpage->offsets_used == 0)
2514 vacuum_page(onerel, dst_buffer, dst_vacpage);
2517 LockBuffer(dst_buffer, BUFFER_LOCK_EXCLUSIVE);
2519 LockBuffer(buf, BUFFER_LOCK_EXCLUSIVE);
2521 move_plain_tuple(onerel, buf, page, &tuple,
2522 dst_buffer, dst_page, dst_vacpage, &ec);
2525 * If the tuple we are moving is a heap-only tuple, this move will
2526 * generate an additional index entry, so increment the
2527 * rel_indexed_tuples count.
2529 if (HeapTupleHeaderIsHeapOnly(tuple.t_data))
2530 vacrelstats->rel_indexed_tuples++;
2533 if (dst_vacpage->blkno > last_move_dest_block)
2534 last_move_dest_block = dst_vacpage->blkno;
2537 * Remember that we moved tuple from the current page
2538 * (corresponding index tuple will be cleaned).
2540 vacpage->offsets[vacpage->offsets_free++] = offnum;
2541 } /* walk along page */
2544 * If we broke out of the walk-along-page loop early (ie, still have
2545 * offnum <= maxoff), then we failed to move some tuple off this page.
2546 * No point in shrinking any more, so clean up and exit the per-page
2549 if (offnum < maxoff && keep_tuples > 0)
2554 * Fix vacpage state for any unvisited tuples remaining on page
2556 for (off = OffsetNumberNext(offnum);
2558 off = OffsetNumberNext(off))
2560 ItemId itemid = PageGetItemId(page, off);
2561 HeapTupleHeader htup;
2563 if (!ItemIdIsUsed(itemid))
2565 /* Shouldn't be any DEAD or REDIRECT items anymore */
2566 Assert(ItemIdIsNormal(itemid));
2568 htup = (HeapTupleHeader) PageGetItem(page, itemid);
2569 if (htup->t_infomask & HEAP_XMIN_COMMITTED)
2573 * See comments in the walk-along-page loop above about why
2574 * only MOVED_OFF tuples should be found here.
2576 if (htup->t_infomask & HEAP_MOVED_IN)
2577 elog(ERROR, "HEAP_MOVED_IN was not expected");
2578 if (!(htup->t_infomask & HEAP_MOVED_OFF))
2579 elog(ERROR, "HEAP_MOVED_OFF was expected");
2580 if (HeapTupleHeaderGetXvac(htup) != myXID)
2581 elog(ERROR, "invalid XVAC in tuple header");
2583 if (chain_tuple_moved)
2585 /* some chains were moved while cleaning this page */
2586 Assert(vacpage->offsets_free > 0);
2587 for (i = 0; i < vacpage->offsets_free; i++)
2589 if (vacpage->offsets[i] == off)
2592 if (i >= vacpage->offsets_free) /* not found */
2594 vacpage->offsets[vacpage->offsets_free++] = off;
2595 Assert(keep_tuples > 0);
2598 * If this is not a heap-only tuple, there must be an
2599 * index entry for this item which will be removed in
2600 * the index cleanup. Decrement the
2601 * keep_indexed_tuples count to remember this.
2603 if (!HeapTupleHeaderIsHeapOnly(htup))
2604 keep_indexed_tuples--;
2610 vacpage->offsets[vacpage->offsets_free++] = off;
2611 Assert(keep_tuples > 0);
2612 if (!HeapTupleHeaderIsHeapOnly(htup))
2613 keep_indexed_tuples--;
2619 if (vacpage->offsets_free > 0) /* some tuples were moved */
2621 if (chain_tuple_moved) /* else - they are ordered */
2623 qsort((char *) (vacpage->offsets), vacpage->offsets_free,
2624 sizeof(OffsetNumber), vac_cmp_offno);
2626 vpage_insert(&Nvacpagelist, copy_vac_page(vacpage));
2631 if (offnum <= maxoff)
2632 break; /* had to quit early, see above note */
2634 } /* walk along relation */
2636 blkno++; /* new number of blocks */
2638 if (dst_buffer != InvalidBuffer)
2640 Assert(num_moved > 0);
2641 ReleaseBuffer(dst_buffer);
2647 * We have to commit our tuple movings before we truncate the
2648 * relation. Ideally we should do Commit/StartTransactionCommand
2649 * here, relying on the session-level table lock to protect our
2650 * exclusive access to the relation. However, that would require a
2651 * lot of extra code to close and re-open the relation, indexes, etc.
2652 * For now, a quick hack: record status of current transaction as
2653 * committed, and continue. We force the commit to be synchronous so
2654 * that it's down to disk before we truncate. (Note: tqual.c knows
2655 * that VACUUM FULL always uses sync commit, too.) The transaction
2656 * continues to be shown as running in the ProcArray.
2658 * XXX This desperately needs to be revisited. Any failure after this
2659 * point will result in a PANIC "cannot abort transaction nnn, it was
2660 * already committed"!
2663 (void) RecordTransactionCommit();
2667 * We are not going to move any more tuples across pages, but we still
2668 * need to apply vacuum_page to compact free space in the remaining pages
2669 * in vacuum_pages list. Note that some of these pages may also be in the
2670 * fraged_pages list, and may have had tuples moved onto them; if so, we
2671 * already did vacuum_page and needn't do it again.
2673 for (i = 0, curpage = vacuum_pages->pagedesc;
2677 vacuum_delay_point();
2679 Assert((*curpage)->blkno < blkno);
2680 if ((*curpage)->offsets_used == 0)
2685 /* this page was not used as a move target, so must clean it */
2686 buf = ReadBufferExtended(onerel, MAIN_FORKNUM, (*curpage)->blkno,
2687 RBM_NORMAL, vac_strategy);
2688 LockBuffer(buf, BUFFER_LOCK_EXCLUSIVE);
2689 page = BufferGetPage(buf);
2690 if (!PageIsEmpty(page))
2691 vacuum_page(onerel, buf, *curpage);
2692 UnlockReleaseBuffer(buf);
2697 * Now scan all the pages that we moved tuples onto and update tuple
2698 * status bits. This is not really necessary, but will save time for
2699 * future transactions examining these tuples.
2701 update_hint_bits(onerel, fraged_pages, num_fraged_pages,
2702 last_move_dest_block, num_moved);
2705 * It'd be cleaner to make this report at the bottom of this routine, but
2706 * then the rusage would double-count the second pass of index vacuuming.
2707 * So do it here and ignore the relatively small amount of processing that
2711 (errmsg("\"%s\": moved %u row versions, truncated %u to %u pages",
2712 RelationGetRelationName(onerel),
2713 num_moved, nblocks, blkno),
2715 pg_rusage_show(&ru0))));
2718 * Reflect the motion of system tuples to catalog cache here.
2720 CommandCounterIncrement();
2722 if (Nvacpagelist.num_pages > 0)
2724 /* vacuum indexes again if needed */
2731 /* re-sort Nvacpagelist.pagedesc */
2732 for (vpleft = Nvacpagelist.pagedesc,
2733 vpright = Nvacpagelist.pagedesc + Nvacpagelist.num_pages - 1;
2734 vpleft < vpright; vpleft++, vpright--)
2742 * keep_tuples is the number of tuples that have been moved off a
2743 * page during chain moves but not been scanned over subsequently.
2744 * The tuple ids of these tuples are not recorded as free offsets
2745 * for any VacPage, so they will not be cleared from the indexes.
2746 * keep_indexed_tuples is the portion of these that are expected
2747 * to have index entries.
2749 Assert(keep_tuples >= 0);
2750 for (i = 0; i < nindexes; i++)
2751 vacuum_index(&Nvacpagelist, Irel[i],
2752 vacrelstats->rel_indexed_tuples,
2753 keep_indexed_tuples);
2757 * Clean moved-off tuples from last page in Nvacpagelist list.
2759 * We need only do this in this one page, because higher-numbered
2760 * pages are going to be truncated from the relation entirely. But see
2761 * comments for update_hint_bits().
2763 if (vacpage->blkno == (blkno - 1) &&
2764 vacpage->offsets_free > 0)
2768 OffsetNumber unused[MaxOffsetNumber];
2769 OffsetNumber offnum,
2774 buf = ReadBufferExtended(onerel, MAIN_FORKNUM, vacpage->blkno,
2775 RBM_NORMAL, vac_strategy);
2776 LockBuffer(buf, BUFFER_LOCK_EXCLUSIVE);
2777 page = BufferGetPage(buf);
2778 maxoff = PageGetMaxOffsetNumber(page);
2779 for (offnum = FirstOffsetNumber;
2781 offnum = OffsetNumberNext(offnum))
2783 ItemId itemid = PageGetItemId(page, offnum);
2784 HeapTupleHeader htup;
2786 if (!ItemIdIsUsed(itemid))
2788 /* Shouldn't be any DEAD or REDIRECT items anymore */
2789 Assert(ItemIdIsNormal(itemid));
2791 htup = (HeapTupleHeader) PageGetItem(page, itemid);
2792 if (htup->t_infomask & HEAP_XMIN_COMMITTED)
2796 * See comments in the walk-along-page loop above about why
2797 * only MOVED_OFF tuples should be found here.
2799 if (htup->t_infomask & HEAP_MOVED_IN)
2800 elog(ERROR, "HEAP_MOVED_IN was not expected");
2801 if (!(htup->t_infomask & HEAP_MOVED_OFF))
2802 elog(ERROR, "HEAP_MOVED_OFF was expected");
2803 if (HeapTupleHeaderGetXvac(htup) != myXID)
2804 elog(ERROR, "invalid XVAC in tuple header");
2806 ItemIdSetUnused(itemid);
2809 unused[uncnt++] = offnum;
2811 Assert(vacpage->offsets_free == num_tuples);
2813 START_CRIT_SECTION();
2815 PageRepairFragmentation(page);
2817 MarkBufferDirty(buf);
2820 if (!onerel->rd_istemp)
2824 recptr = log_heap_clean(onerel, buf,
2828 PageSetLSN(page, recptr);
2829 PageSetTLI(page, ThisTimeLineID);
2834 UnlockReleaseBuffer(buf);
2837 /* now - free new list of reaped pages */
2838 curpage = Nvacpagelist.pagedesc;
2839 for (i = 0; i < Nvacpagelist.num_pages; i++, curpage++)
2841 pfree(Nvacpagelist.pagedesc);
2844 /* Truncate relation, if needed */
2845 if (blkno < nblocks)
2847 FreeSpaceMapTruncateRel(onerel, blkno);
2848 RelationTruncate(onerel, blkno);
2849 vacrelstats->rel_pages = blkno; /* set new number of blocks */
2854 if (vacrelstats->vtlinks != NULL)
2855 pfree(vacrelstats->vtlinks);
2857 ExecContext_Finish(&ec);
2861 * move_chain_tuple() -- move one tuple that is part of a tuple chain
2863 * This routine moves old_tup from old_page to dst_page.
2864 * old_page and dst_page might be the same page.
2865 * On entry old_buf and dst_buf are locked exclusively, both locks (or
2866 * the single lock, if this is a intra-page-move) are released before
2869 * Yes, a routine with ten parameters is ugly, but it's still better
2870 * than having these 120 lines of code in repair_frag() which is
2871 * already too long and almost unreadable.
2874 move_chain_tuple(Relation rel,
2875 Buffer old_buf, Page old_page, HeapTuple old_tup,
2876 Buffer dst_buf, Page dst_page, VacPage dst_vacpage,
2877 ExecContext ec, ItemPointer ctid, bool cleanVpd)
2879 TransactionId myXID = GetCurrentTransactionId();
2880 HeapTupleData newtup;
2881 OffsetNumber newoff;
2883 Size tuple_len = old_tup->t_len;
2886 * make a modifiable copy of the source tuple.
2888 heap_copytuple_with_tuple(old_tup, &newtup);
2891 * register invalidation of source tuple in catcaches.
2893 CacheInvalidateHeapTuple(rel, old_tup);
2895 /* NO EREPORT(ERROR) TILL CHANGES ARE LOGGED */
2896 START_CRIT_SECTION();
2899 * mark the source tuple MOVED_OFF.
2901 old_tup->t_data->t_infomask &= ~(HEAP_XMIN_COMMITTED |
2904 old_tup->t_data->t_infomask |= HEAP_MOVED_OFF;
2905 HeapTupleHeaderSetXvac(old_tup->t_data, myXID);
2908 * If this page was not used before - clean it.
2910 * NOTE: a nasty bug used to lurk here. It is possible for the source and
2911 * destination pages to be the same (since this tuple-chain member can be
2912 * on a page lower than the one we're currently processing in the outer
2913 * loop). If that's true, then after vacuum_page() the source tuple will
2914 * have been moved, and tuple.t_data will be pointing at garbage.
2915 * Therefore we must do everything that uses old_tup->t_data BEFORE this
2918 * This path is different from the other callers of vacuum_page, because
2919 * we have already incremented the vacpage's offsets_used field to account
2920 * for the tuple(s) we expect to move onto the page. Therefore
2921 * vacuum_page's check for offsets_used == 0 is wrong. But since that's a
2922 * good debugging check for all other callers, we work around it here
2923 * rather than remove it.
2925 if (!PageIsEmpty(dst_page) && cleanVpd)
2927 int sv_offsets_used = dst_vacpage->offsets_used;
2929 dst_vacpage->offsets_used = 0;
2930 vacuum_page(rel, dst_buf, dst_vacpage);
2931 dst_vacpage->offsets_used = sv_offsets_used;
2935 * Update the state of the copied tuple, and store it on the destination
2936 * page. The copied tuple is never part of a HOT chain.
2938 newtup.t_data->t_infomask &= ~(HEAP_XMIN_COMMITTED |
2941 newtup.t_data->t_infomask |= HEAP_MOVED_IN;
2942 HeapTupleHeaderClearHotUpdated(newtup.t_data);
2943 HeapTupleHeaderClearHeapOnly(newtup.t_data);
2944 HeapTupleHeaderSetXvac(newtup.t_data, myXID);
2945 newoff = PageAddItem(dst_page, (Item) newtup.t_data, tuple_len,
2946 InvalidOffsetNumber, false, true);
2947 if (newoff == InvalidOffsetNumber)
2948 elog(PANIC, "failed to add item with len = %lu to page %u while moving tuple chain",
2949 (unsigned long) tuple_len, dst_vacpage->blkno);
2950 newitemid = PageGetItemId(dst_page, newoff);
2951 /* drop temporary copy, and point to the version on the dest page */
2952 pfree(newtup.t_data);
2953 newtup.t_data = (HeapTupleHeader) PageGetItem(dst_page, newitemid);
2955 ItemPointerSet(&(newtup.t_self), dst_vacpage->blkno, newoff);
2958 * Set new tuple's t_ctid pointing to itself if last tuple in chain, and
2959 * to next tuple in chain otherwise. (Since we move the chain in reverse
2960 * order, this is actually the previously processed tuple.)
2962 if (!ItemPointerIsValid(ctid))
2963 newtup.t_data->t_ctid = newtup.t_self;
2965 newtup.t_data->t_ctid = *ctid;
2966 *ctid = newtup.t_self;
2968 MarkBufferDirty(dst_buf);
2969 if (dst_buf != old_buf)
2970 MarkBufferDirty(old_buf);
2973 if (!rel->rd_istemp)
2975 XLogRecPtr recptr = log_heap_move(rel, old_buf, old_tup->t_self,
2978 if (old_buf != dst_buf)
2980 PageSetLSN(old_page, recptr);
2981 PageSetTLI(old_page, ThisTimeLineID);
2983 PageSetLSN(dst_page, recptr);
2984 PageSetTLI(dst_page, ThisTimeLineID);
2989 LockBuffer(dst_buf, BUFFER_LOCK_UNLOCK);
2990 if (dst_buf != old_buf)
2991 LockBuffer(old_buf, BUFFER_LOCK_UNLOCK);
2993 /* Create index entries for the moved tuple */
2994 if (ec->resultRelInfo->ri_NumIndices > 0)
2996 ExecStoreTuple(&newtup, ec->slot, InvalidBuffer, false);
2997 ExecInsertIndexTuples(ec->slot, &(newtup.t_self), ec->estate, true);
2998 ResetPerTupleExprContext(ec->estate);
3003 * move_plain_tuple() -- move one tuple that is not part of a chain
3005 * This routine moves old_tup from old_page to dst_page.
3006 * On entry old_buf and dst_buf are locked exclusively, both locks are
3007 * released before exit.
3009 * Yes, a routine with eight parameters is ugly, but it's still better
3010 * than having these 90 lines of code in repair_frag() which is already
3011 * too long and almost unreadable.
3014 move_plain_tuple(Relation rel,
3015 Buffer old_buf, Page old_page, HeapTuple old_tup,
3016 Buffer dst_buf, Page dst_page, VacPage dst_vacpage,
3019 TransactionId myXID = GetCurrentTransactionId();
3020 HeapTupleData newtup;
3021 OffsetNumber newoff;
3023 Size tuple_len = old_tup->t_len;
3026 heap_copytuple_with_tuple(old_tup, &newtup);
3029 * register invalidation of source tuple in catcaches.
3031 * (Note: we do not need to register the copied tuple, because we are not
3032 * changing the tuple contents and so there cannot be any need to flush
3033 * negative catcache entries.)
3035 CacheInvalidateHeapTuple(rel, old_tup);
3037 /* NO EREPORT(ERROR) TILL CHANGES ARE LOGGED */
3038 START_CRIT_SECTION();
3041 * Mark new tuple as MOVED_IN by me; also mark it not HOT.
3043 newtup.t_data->t_infomask &= ~(HEAP_XMIN_COMMITTED |
3046 newtup.t_data->t_infomask |= HEAP_MOVED_IN;
3047 HeapTupleHeaderClearHotUpdated(newtup.t_data);
3048 HeapTupleHeaderClearHeapOnly(newtup.t_data);
3049 HeapTupleHeaderSetXvac(newtup.t_data, myXID);
3051 /* add tuple to the page */
3052 newoff = PageAddItem(dst_page, (Item) newtup.t_data, tuple_len,
3053 InvalidOffsetNumber, false, true);
3054 if (newoff == InvalidOffsetNumber)
3055 elog(PANIC, "failed to add item with len = %lu to page %u (free space %lu, nusd %u, noff %u)",
3056 (unsigned long) tuple_len,
3057 dst_vacpage->blkno, (unsigned long) dst_vacpage->free,
3058 dst_vacpage->offsets_used, dst_vacpage->offsets_free);
3059 newitemid = PageGetItemId(dst_page, newoff);
3060 pfree(newtup.t_data);
3061 newtup.t_data = (HeapTupleHeader) PageGetItem(dst_page, newitemid);
3062 ItemPointerSet(&(newtup.t_data->t_ctid), dst_vacpage->blkno, newoff);
3063 newtup.t_self = newtup.t_data->t_ctid;
3066 * Mark old tuple as MOVED_OFF by me.
3068 old_tup->t_data->t_infomask &= ~(HEAP_XMIN_COMMITTED |
3071 old_tup->t_data->t_infomask |= HEAP_MOVED_OFF;
3072 HeapTupleHeaderSetXvac(old_tup->t_data, myXID);
3074 MarkBufferDirty(dst_buf);
3075 MarkBufferDirty(old_buf);
3078 if (!rel->rd_istemp)
3080 XLogRecPtr recptr = log_heap_move(rel, old_buf, old_tup->t_self,
3083 PageSetLSN(old_page, recptr);
3084 PageSetTLI(old_page, ThisTimeLineID);
3085 PageSetLSN(dst_page, recptr);
3086 PageSetTLI(dst_page, ThisTimeLineID);
3091 dst_vacpage->free = PageGetFreeSpaceWithFillFactor(rel, dst_page);
3092 LockBuffer(dst_buf, BUFFER_LOCK_UNLOCK);
3093 LockBuffer(old_buf, BUFFER_LOCK_UNLOCK);
3095 dst_vacpage->offsets_used++;
3097 /* insert index' tuples if needed */
3098 if (ec->resultRelInfo->ri_NumIndices > 0)
3100 ExecStoreTuple(&newtup, ec->slot, InvalidBuffer, false);
3101 ExecInsertIndexTuples(ec->slot, &(newtup.t_self), ec->estate, true);
3102 ResetPerTupleExprContext(ec->estate);
3107 * update_hint_bits() -- update hint bits in destination pages
3109 * Scan all the pages that we moved tuples onto and update tuple status bits.
3110 * This is not really necessary, but it will save time for future transactions
3111 * examining these tuples.
3113 * This pass guarantees that all HEAP_MOVED_IN tuples are marked as
3114 * XMIN_COMMITTED, so that future tqual tests won't need to check their XVAC.
3116 * BUT NOTICE that this code fails to clear HEAP_MOVED_OFF tuples from
3117 * pages that were move source pages but not move dest pages. The bulk
3118 * of the move source pages will be physically truncated from the relation,
3119 * and the last page remaining in the rel will be fixed separately in
3120 * repair_frag(), so the only cases where a MOVED_OFF tuple won't get its
3121 * hint bits updated are tuples that are moved as part of a chain and were
3122 * on pages that were not either move destinations nor at the end of the rel.
3123 * To completely ensure that no MOVED_OFF tuples remain unmarked, we'd have
3124 * to remember and revisit those pages too.
3126 * One wonders whether it wouldn't be better to skip this work entirely,
3127 * and let the tuple status updates happen someplace that's not holding an
3128 * exclusive lock on the relation.
3131 update_hint_bits(Relation rel, VacPageList fraged_pages, int num_fraged_pages,
3132 BlockNumber last_move_dest_block, int num_moved)
3134 TransactionId myXID = GetCurrentTransactionId();
3135 int checked_moved = 0;
3139 for (i = 0, curpage = fraged_pages->pagedesc;
3140 i < num_fraged_pages;
3145 OffsetNumber max_offset;
3149 vacuum_delay_point();
3151 if ((*curpage)->blkno > last_move_dest_block)
3152 break; /* no need to scan any further */
3153 if ((*curpage)->offsets_used == 0)
3154 continue; /* this page was never used as a move dest */
3155 buf = ReadBufferExtended(rel, MAIN_FORKNUM, (*curpage)->blkno,
3156 RBM_NORMAL, vac_strategy);
3157 LockBuffer(buf, BUFFER_LOCK_EXCLUSIVE);
3158 page = BufferGetPage(buf);
3159 max_offset = PageGetMaxOffsetNumber(page);
3160 for (off = FirstOffsetNumber;
3162 off = OffsetNumberNext(off))
3164 ItemId itemid = PageGetItemId(page, off);
3165 HeapTupleHeader htup;
3167 if (!ItemIdIsUsed(itemid))
3169 /* Shouldn't be any DEAD or REDIRECT items anymore */
3170 Assert(ItemIdIsNormal(itemid));
3172 htup = (HeapTupleHeader) PageGetItem(page, itemid);
3173 if (htup->t_infomask & HEAP_XMIN_COMMITTED)
3177 * Here we may see either MOVED_OFF or MOVED_IN tuples.
3179 if (!(htup->t_infomask & HEAP_MOVED))
3180 elog(ERROR, "HEAP_MOVED_OFF/HEAP_MOVED_IN was expected");
3181 if (HeapTupleHeaderGetXvac(htup) != myXID)
3182 elog(ERROR, "invalid XVAC in tuple header");
3184 if (htup->t_infomask & HEAP_MOVED_IN)
3186 htup->t_infomask |= HEAP_XMIN_COMMITTED;
3187 htup->t_infomask &= ~HEAP_MOVED;
3191 htup->t_infomask |= HEAP_XMIN_INVALID;
3193 MarkBufferDirty(buf);
3194 UnlockReleaseBuffer(buf);
3195 Assert((*curpage)->offsets_used == num_tuples);
3196 checked_moved += num_tuples;
3198 Assert(num_moved == checked_moved);
3202 * vacuum_heap() -- free dead tuples
3204 * This routine marks dead tuples as unused and truncates relation
3205 * if there are "empty" end-blocks.
3208 vacuum_heap(VRelStats *vacrelstats, Relation onerel, VacPageList vacuum_pages)
3212 BlockNumber relblocks;
3216 nblocks = vacuum_pages->num_pages;
3217 nblocks -= vacuum_pages->empty_end_pages; /* nothing to do with them */
3219 for (i = 0, vacpage = vacuum_pages->pagedesc; i < nblocks; i++, vacpage++)
3221 vacuum_delay_point();
3223 if ((*vacpage)->offsets_free > 0)
3225 buf = ReadBufferExtended(onerel, MAIN_FORKNUM, (*vacpage)->blkno,
3226 RBM_NORMAL, vac_strategy);
3227 LockBuffer(buf, BUFFER_LOCK_EXCLUSIVE);
3228 vacuum_page(onerel, buf, *vacpage);
3229 UnlockReleaseBuffer(buf);
3233 /* Truncate relation if there are some empty end-pages */
3234 Assert(vacrelstats->rel_pages >= vacuum_pages->empty_end_pages);
3235 if (vacuum_pages->empty_end_pages > 0)
3237 relblocks = vacrelstats->rel_pages - vacuum_pages->empty_end_pages;
3239 (errmsg("\"%s\": truncated %u to %u pages",
3240 RelationGetRelationName(onerel),
3241 vacrelstats->rel_pages, relblocks)));
3242 FreeSpaceMapTruncateRel(onerel, relblocks);
3243 RelationTruncate(onerel, relblocks);
3244 vacrelstats->rel_pages = relblocks; /* set new number of blocks */
3249 * vacuum_page() -- free dead tuples on a page
3250 * and repair its fragmentation.
3252 * Caller must hold pin and lock on buffer.
3255 vacuum_page(Relation onerel, Buffer buffer, VacPage vacpage)
3257 Page page = BufferGetPage(buffer);
3260 /* There shouldn't be any tuples moved onto the page yet! */
3261 Assert(vacpage->offsets_used == 0);
3263 START_CRIT_SECTION();
3265 for (i = 0; i < vacpage->offsets_free; i++)
3267 ItemId itemid = PageGetItemId(page, vacpage->offsets[i]);
3269 ItemIdSetUnused(itemid);
3272 PageRepairFragmentation(page);
3274 MarkBufferDirty(buffer);
3277 if (!onerel->rd_istemp)
3281 recptr = log_heap_clean(onerel, buffer,
3283 vacpage->offsets, vacpage->offsets_free,
3285 PageSetLSN(page, recptr);
3286 PageSetTLI(page, ThisTimeLineID);
3293 * scan_index() -- scan one index relation to update pg_class statistics.
3295 * We use this when we have no deletions to do.
3298 scan_index(Relation indrel, double num_tuples)
3300 IndexBulkDeleteResult *stats;
3301 IndexVacuumInfo ivinfo;
3304 pg_rusage_init(&ru0);
3306 ivinfo.index = indrel;
3307 ivinfo.vacuum_full = true;
3308 ivinfo.message_level = elevel;
3309 ivinfo.num_heap_tuples = num_tuples;
3310 ivinfo.strategy = vac_strategy;
3312 stats = index_vacuum_cleanup(&ivinfo, NULL);
3317 /* now update statistics in pg_class */
3318 vac_update_relstats(RelationGetRelid(indrel),
3319 stats->num_pages, stats->num_index_tuples,
3320 false, InvalidTransactionId);
3323 (errmsg("index \"%s\" now contains %.0f row versions in %u pages",
3324 RelationGetRelationName(indrel),
3325 stats->num_index_tuples,
3327 errdetail("%u index pages have been deleted, %u are currently reusable.\n"
3329 stats->pages_deleted, stats->pages_free,
3330 pg_rusage_show(&ru0))));
3333 * Check for tuple count mismatch. If the index is partial, then it's OK
3334 * for it to have fewer tuples than the heap; else we got trouble.
3336 if (stats->num_index_tuples != num_tuples)
3338 if (stats->num_index_tuples > num_tuples ||
3339 !vac_is_partial_index(indrel))
3341 (errmsg("index \"%s\" contains %.0f row versions, but table contains %.0f row versions",
3342 RelationGetRelationName(indrel),
3343 stats->num_index_tuples, num_tuples),
3344 errhint("Rebuild the index with REINDEX.")));
3351 * vacuum_index() -- vacuum one index relation.
3353 * Vpl is the VacPageList of the heap we're currently vacuuming.
3354 * It's locked. Indrel is an index relation on the vacuumed heap.
3356 * We don't bother to set locks on the index relation here, since
3357 * the parent table is exclusive-locked already.
3359 * Finally, we arrange to update the index relation's statistics in
3363 vacuum_index(VacPageList vacpagelist, Relation indrel,
3364 double num_tuples, int keep_tuples)
3366 IndexBulkDeleteResult *stats;
3367 IndexVacuumInfo ivinfo;
3370 pg_rusage_init(&ru0);
3372 ivinfo.index = indrel;
3373 ivinfo.vacuum_full = true;
3374 ivinfo.message_level = elevel;
3375 ivinfo.num_heap_tuples = num_tuples + keep_tuples;
3376 ivinfo.strategy = vac_strategy;
3378 /* Do bulk deletion */
3379 stats = index_bulk_delete(&ivinfo, NULL, tid_reaped, (void *) vacpagelist);
3381 /* Do post-VACUUM cleanup */
3382 stats = index_vacuum_cleanup(&ivinfo, stats);
3387 /* now update statistics in pg_class */
3388 vac_update_relstats(RelationGetRelid(indrel),
3389 stats->num_pages, stats->num_index_tuples,
3390 false, InvalidTransactionId);
3393 (errmsg("index \"%s\" now contains %.0f row versions in %u pages",
3394 RelationGetRelationName(indrel),
3395 stats->num_index_tuples,
3397 errdetail("%.0f index row versions were removed.\n"
3398 "%u index pages have been deleted, %u are currently reusable.\n"
3400 stats->tuples_removed,
3401 stats->pages_deleted, stats->pages_free,
3402 pg_rusage_show(&ru0))));
3405 * Check for tuple count mismatch. If the index is partial, then it's OK
3406 * for it to have fewer tuples than the heap; else we got trouble.
3408 if (stats->num_index_tuples != num_tuples + keep_tuples)
3410 if (stats->num_index_tuples > num_tuples + keep_tuples ||
3411 !vac_is_partial_index(indrel))
3413 (errmsg("index \"%s\" contains %.0f row versions, but table contains %.0f row versions",
3414 RelationGetRelationName(indrel),
3415 stats->num_index_tuples, num_tuples + keep_tuples),
3416 errhint("Rebuild the index with REINDEX.")));
3423 * tid_reaped() -- is a particular tid reaped?
3425 * This has the right signature to be an IndexBulkDeleteCallback.
3427 * vacpagelist->VacPage_array is sorted in right order.
3430 tid_reaped(ItemPointer itemptr, void *state)
3432 VacPageList vacpagelist = (VacPageList) state;
3433 OffsetNumber ioffno;
3437 VacPageData vacpage;
3439 vacpage.blkno = ItemPointerGetBlockNumber(itemptr);
3440 ioffno = ItemPointerGetOffsetNumber(itemptr);
3443 vpp = (VacPage *) vac_bsearch((void *) &vp,
3444 (void *) (vacpagelist->pagedesc),
3445 vacpagelist->num_pages,
3452 /* ok - we are on a partially or fully reaped page */
3455 if (vp->offsets_free == 0)
3457 /* this is EmptyPage, so claim all tuples on it are reaped!!! */
3461 voff = (OffsetNumber *) vac_bsearch((void *) &ioffno,
3462 (void *) (vp->offsets),
3464 sizeof(OffsetNumber),
3475 * Update the Free Space Map with the info we now have about free space in
3479 vac_update_fsm(Relation onerel, VacPageList fraged_pages,
3480 BlockNumber rel_pages)
3482 int nPages = fraged_pages->num_pages;
3483 VacPage *pagedesc = fraged_pages->pagedesc;
3486 for (i = 0; i < nPages; i++)
3489 * fraged_pages may contain entries for pages that we later decided to
3490 * truncate from the relation; don't enter them into the free space
3493 if (pagedesc[i]->blkno >= rel_pages)
3496 RecordPageWithFreeSpace(onerel, pagedesc[i]->blkno, pagedesc[i]->free);
3501 /* Copy a VacPage structure */
3503 copy_vac_page(VacPage vacpage)
3507 /* allocate a VacPageData entry */
3508 newvacpage = (VacPage) palloc(sizeof(VacPageData) +
3509 vacpage->offsets_free * sizeof(OffsetNumber));
3512 if (vacpage->offsets_free > 0)
3513 memcpy(newvacpage->offsets, vacpage->offsets,
3514 vacpage->offsets_free * sizeof(OffsetNumber));
3515 newvacpage->blkno = vacpage->blkno;
3516 newvacpage->free = vacpage->free;
3517 newvacpage->offsets_used = vacpage->offsets_used;
3518 newvacpage->offsets_free = vacpage->offsets_free;
3524 * Add a VacPage pointer to a VacPageList.
3526 * As a side effect of the way that scan_heap works,
3527 * higher pages come after lower pages in the array
3528 * (and highest tid on a page is last).
3531 vpage_insert(VacPageList vacpagelist, VacPage vpnew)
3533 #define PG_NPAGEDESC 1024
3535 /* allocate a VacPage entry if needed */
3536 if (vacpagelist->num_pages == 0)
3538 vacpagelist->pagedesc = (VacPage *) palloc(PG_NPAGEDESC * sizeof(VacPage));
3539 vacpagelist->num_allocated_pages = PG_NPAGEDESC;
3541 else if (vacpagelist->num_pages >= vacpagelist->num_allocated_pages)
3543 vacpagelist->num_allocated_pages *= 2;
3544 vacpagelist->pagedesc = (VacPage *) repalloc(vacpagelist->pagedesc, vacpagelist->num_allocated_pages * sizeof(VacPage));
3546 vacpagelist->pagedesc[vacpagelist->num_pages] = vpnew;
3547 (vacpagelist->num_pages)++;
3551 * vac_bsearch: just like standard C library routine bsearch(),
3552 * except that we first test to see whether the target key is outside
3553 * the range of the table entries. This case is handled relatively slowly
3554 * by the normal binary search algorithm (ie, no faster than any other key)
3555 * but it occurs often enough in VACUUM to be worth optimizing.
3558 vac_bsearch(const void *key, const void *base,
3559 size_t nelem, size_t size,
3560 int (*compar) (const void *, const void *))
3567 res = compar(key, base);
3571 return (void *) base;
3574 last = (const void *) ((const char *) base + (nelem - 1) * size);
3575 res = compar(key, last);
3579 return (void *) last;
3582 return NULL; /* already checked 'em all */
3583 return bsearch(key, base, nelem, size, compar);
3587 * Comparator routines for use with qsort() and bsearch().
3590 vac_cmp_blk(const void *left, const void *right)
3595 lblk = (*((VacPage *) left))->blkno;
3596 rblk = (*((VacPage *) right))->blkno;
3606 vac_cmp_offno(const void *left, const void *right)
3608 if (*(OffsetNumber *) left < *(OffsetNumber *) right)
3610 if (*(OffsetNumber *) left == *(OffsetNumber *) right)
3616 vac_cmp_vtlinks(const void *left, const void *right)
3618 if (((VTupleLink) left)->new_tid.ip_blkid.bi_hi <
3619 ((VTupleLink) right)->new_tid.ip_blkid.bi_hi)
3621 if (((VTupleLink) left)->new_tid.ip_blkid.bi_hi >
3622 ((VTupleLink) right)->new_tid.ip_blkid.bi_hi)
3624 /* bi_hi-es are equal */
3625 if (((VTupleLink) left)->new_tid.ip_blkid.bi_lo <
3626 ((VTupleLink) right)->new_tid.ip_blkid.bi_lo)
3628 if (((VTupleLink) left)->new_tid.ip_blkid.bi_lo >
3629 ((VTupleLink) right)->new_tid.ip_blkid.bi_lo)
3631 /* bi_lo-es are equal */
3632 if (((VTupleLink) left)->new_tid.ip_posid <
3633 ((VTupleLink) right)->new_tid.ip_posid)
3635 if (((VTupleLink) left)->new_tid.ip_posid >
3636 ((VTupleLink) right)->new_tid.ip_posid)
3643 * Open all the indexes of the given relation, obtaining the specified kind
3644 * of lock on each. Return an array of Relation pointers for the indexes
3645 * into *Irel, and the number of indexes into *nindexes.
3648 vac_open_indexes(Relation relation, LOCKMODE lockmode,
3649 int *nindexes, Relation **Irel)
3652 ListCell *indexoidscan;
3655 Assert(lockmode != NoLock);
3657 indexoidlist = RelationGetIndexList(relation);
3659 *nindexes = list_length(indexoidlist);
3662 *Irel = (Relation *) palloc(*nindexes * sizeof(Relation));
3667 foreach(indexoidscan, indexoidlist)
3669 Oid indexoid = lfirst_oid(indexoidscan);
3671 (*Irel)[i++] = index_open(indexoid, lockmode);
3674 list_free(indexoidlist);
3678 * Release the resources acquired by vac_open_indexes. Optionally release
3679 * the locks (say NoLock to keep 'em).
3682 vac_close_indexes(int nindexes, Relation *Irel, LOCKMODE lockmode)
3689 Relation ind = Irel[nindexes];
3691 index_close(ind, lockmode);
3698 * Is an index partial (ie, could it contain fewer tuples than the heap?)
3701 vac_is_partial_index(Relation indrel)
3704 * If the index's AM doesn't support nulls, it's partial for our purposes
3706 if (!indrel->rd_am->amindexnulls)
3709 /* Otherwise, look to see if there's a partial-index predicate */
3710 if (!heap_attisnull(indrel->rd_indextuple, Anum_pg_index_indpred))
3718 enough_space(VacPage vacpage, Size len)
3720 len = MAXALIGN(len);
3722 if (len > vacpage->free)
3725 /* if there are free itemid(s) and len <= free_space... */
3726 if (vacpage->offsets_used < vacpage->offsets_free)
3729 /* noff_used >= noff_free and so we'll have to allocate new itemid */
3730 if (len + sizeof(ItemIdData) <= vacpage->free)
3737 PageGetFreeSpaceWithFillFactor(Relation relation, Page page)
3740 * It is correct to use PageGetExactFreeSpace() here, *not*
3741 * PageGetHeapFreeSpace(). This is because (a) we do our own, exact
3742 * accounting for whether line pointers must be added, and (b) we will
3743 * recycle any LP_DEAD line pointers before starting to add rows to a
3744 * page, but that may not have happened yet at the time this function is
3745 * applied to a page, which means PageGetHeapFreeSpace()'s protection
3746 * against too many line pointers on a page could fire incorrectly. We do
3747 * not need that protection here: since VACUUM FULL always recycles all
3748 * dead line pointers first, it'd be physically impossible to insert more
3749 * than MaxHeapTuplesPerPage tuples anyway.
3751 Size freespace = PageGetExactFreeSpace(page);
3754 targetfree = RelationGetTargetPageFreeSpace(relation,
3755 HEAP_DEFAULT_FILLFACTOR);
3756 if (freespace > targetfree)
3757 return freespace - targetfree;
3763 * vacuum_delay_point --- check for interrupts and cost-based delay.
3765 * This should be called in each major loop of VACUUM processing,
3766 * typically once per page processed.
3769 vacuum_delay_point(void)
3771 /* Always check for interrupts */
3772 CHECK_FOR_INTERRUPTS();
3774 /* Nap if appropriate */
3775 if (VacuumCostActive && !InterruptPending &&
3776 VacuumCostBalance >= VacuumCostLimit)
3780 msec = VacuumCostDelay * VacuumCostBalance / VacuumCostLimit;
3781 if (msec > VacuumCostDelay * 4)
3782 msec = VacuumCostDelay * 4;
3784 pg_usleep(msec * 1000L);
3786 VacuumCostBalance = 0;
3788 /* update balance values for workers */
3789 AutoVacuumUpdateDelay();
3791 /* Might have gotten an interrupt while sleeping */
3792 CHECK_FOR_INTERRUPTS();