]> granicus.if.org Git - postgresql/blobdiff - src/backend/commands/vacuum.c
Repair some REINDEX problems per recent discussions. The relcache is
[postgresql] / src / backend / commands / vacuum.c
index cda893fab75e007fdf010b8fe60e14fa49b217e2..e626848f12b6bf237d19fdbeef06b4cf35696c2b 100644 (file)
@@ -8,12 +8,12 @@
  * vacuumlazy.c and analyze.c for the rest of the code for the latter two.
  *
  *
- * Portions Copyright (c) 1996-2002, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1996-2003, PostgreSQL Global Development Group
  * Portions Copyright (c) 1994, Regents of the University of California
  *
  *
  * IDENTIFICATION
- *       $Header: /cvsroot/pgsql/src/backend/commands/vacuum.c,v 1.235 2002/08/30 22:18:05 tgl Exp $
+ *       $Header: /cvsroot/pgsql/src/backend/commands/vacuum.c,v 1.260 2003/09/24 18:54:01 tgl Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -100,14 +100,11 @@ typedef struct VRelStats
 
 static MemoryContext vac_context = NULL;
 
-static int elevel = -1;
+static int     elevel = -1;
 
 static TransactionId OldestXmin;
 static TransactionId FreezeLimit;
 
-static TransactionId initialOldestXmin;
-static TransactionId initialFreezeLimit;
-
 
 /* non-export function prototypes */
 static List *getrels(const RangeVar *vacrel, const char *stmttype);
@@ -116,7 +113,7 @@ static void vac_update_dbstats(Oid dbid,
                                   TransactionId frozenXID);
 static void vac_truncate_clog(TransactionId vacuumXID,
                                  TransactionId frozenXID);
-static void vacuum_rel(Oid relid, VacuumStmt *vacstmt, char expected_relkind);
+static bool vacuum_rel(Oid relid, VacuumStmt *vacstmt, char expected_relkind);
 static void full_vacuum_rel(Relation onerel, VacuumStmt *vacstmt);
 static void scan_heap(VRelStats *vacrelstats, Relation onerel,
                  VacPageList vacuum_pages, VacPageList fraged_pages);
@@ -160,13 +157,16 @@ vacuum(VacuumStmt *vacstmt)
 {
        const char *stmttype = vacstmt->vacuum ? "VACUUM" : "ANALYZE";
        MemoryContext anl_context = NULL;
+       TransactionId initialOldestXmin = InvalidTransactionId;
+       TransactionId initialFreezeLimit = InvalidTransactionId;
+       bool            all_rels;
        List       *vrl,
                           *cur;
 
        if (vacstmt->verbose)
                elevel = INFO;
        else
-               elevel = DEBUG1;
+               elevel = DEBUG2;
 
        /*
         * We cannot run VACUUM inside a user transaction block; if we were
@@ -177,12 +177,8 @@ vacuum(VacuumStmt *vacstmt)
         * user's transaction too, which would certainly not be the desired
         * behavior.
         */
-       if (vacstmt->vacuum && IsTransactionBlock())
-               elog(ERROR, "%s cannot run inside a BEGIN/END block", stmttype);
-
-       /* Running VACUUM from a function would free the function context */
-       if (vacstmt->vacuum && !MemoryContextContains(QueryContext, vacstmt))
-               elog(ERROR, "%s cannot be executed from a function", stmttype);
+       if (vacstmt->vacuum)
+               PreventTransactionChain((void *) vacstmt, stmttype);
 
        /*
         * Send info about dead objects to the statistics collector
@@ -193,27 +189,31 @@ vacuum(VacuumStmt *vacstmt)
        /*
         * Create special memory context for cross-transaction storage.
         *
-        * Since it is a child of QueryContext, it will go away eventually even
+        * Since it is a child of PortalContext, it will go away eventually even
         * if we suffer an error; there's no need for special abort cleanup
         * logic.
         */
-       vac_context = AllocSetContextCreate(QueryContext,
+       vac_context = AllocSetContextCreate(PortalContext,
                                                                                "Vacuum",
                                                                                ALLOCSET_DEFAULT_MINSIZE,
                                                                                ALLOCSET_DEFAULT_INITSIZE,
                                                                                ALLOCSET_DEFAULT_MAXSIZE);
 
        /*
-        * If we are running only ANALYZE, we don't need per-table transactions,
-        * but we still need a memory context with table lifetime.
+        * If we are running only ANALYZE, we don't need per-table
+        * transactions, but we still need a memory context with table
+        * lifetime.
         */
        if (vacstmt->analyze && !vacstmt->vacuum)
-               anl_context = AllocSetContextCreate(QueryContext,
+               anl_context = AllocSetContextCreate(PortalContext,
                                                                                        "Analyze",
                                                                                        ALLOCSET_DEFAULT_MINSIZE,
                                                                                        ALLOCSET_DEFAULT_INITSIZE,
                                                                                        ALLOCSET_DEFAULT_MAXSIZE);
 
+       /* Assume we are processing everything unless one table is mentioned */
+       all_rels = (vacstmt->relation == NULL);
+
        /* Build list of relations to process (note this lives in vac_context) */
        vrl = getrels(vacstmt->relation, stmttype);
 
@@ -221,59 +221,61 @@ vacuum(VacuumStmt *vacstmt)
         * Formerly, there was code here to prevent more than one VACUUM from
         * executing concurrently in the same database.  However, there's no
         * good reason to prevent that, and manually removing lockfiles after
-        * a vacuum crash was a pain for dbadmins.  So, forget about lockfiles,
-        * and just rely on the locks we grab on each target table
+        * a vacuum crash was a pain for dbadmins.      So, forget about
+        * lockfiles, and just rely on the locks we grab on each target table
         * to ensure that there aren't two VACUUMs running on the same table
         * at the same time.
         */
 
        /*
-        * The strangeness with committing and starting transactions here is due
-        * to wanting to run each table's VACUUM as a separate transaction, so
-        * that we don't hold locks unnecessarily long.  Also, if we are doing
-        * VACUUM ANALYZE, the ANALYZE part runs as a separate transaction from
-        * the VACUUM to further reduce locking.
+        * The strangeness with committing and starting transactions here is
+        * due to wanting to run each table's VACUUM as a separate
+        * transaction, so that we don't hold locks unnecessarily long.  Also,
+        * if we are doing VACUUM ANALYZE, the ANALYZE part runs as a separate
+        * transaction from the VACUUM to further reduce locking.
         *
         * vacuum_rel expects to be entered with no transaction active; it will
         * start and commit its own transaction.  But we are called by an SQL
         * command, and so we are executing inside a transaction already.  We
         * commit the transaction started in PostgresMain() here, and start
-        * another one before exiting to match the commit waiting for us back in
-        * PostgresMain().
+        * another one before exiting to match the commit waiting for us back
+        * in PostgresMain().
         *
         * In the case of an ANALYZE statement (no vacuum, just analyze) it's
-        * okay to run the whole thing in the outer transaction, and so we skip
-        * transaction start/stop operations.
+        * okay to run the whole thing in the outer transaction, and so we
+        * skip transaction start/stop operations.
         */
        if (vacstmt->vacuum)
        {
-               if (vacstmt->relation == NULL)
+               if (all_rels)
                {
                        /*
                         * It's a database-wide VACUUM.
                         *
                         * Compute the initially applicable OldestXmin and FreezeLimit
                         * XIDs, so that we can record these values at the end of the
-                        * VACUUM. Note that individual tables may well be processed with
-                        * newer values, but we can guarantee that no (non-shared)
-                        * relations are processed with older ones.
+                        * VACUUM. Note that individual tables may well be processed
+                        * with newer values, but we can guarantee that no
+                        * (non-shared) relations are processed with older ones.
                         *
-                        * It is okay to record non-shared values in pg_database, even though
-                        * we may vacuum shared relations with older cutoffs, because only
-                        * the minimum of the values present in pg_database matters.  We
-                        * can be sure that shared relations have at some time been
-                        * vacuumed with cutoffs no worse than the global minimum; for, if
-                        * there is a backend in some other DB with xmin = OLDXMIN that's
-                        * determining the cutoff with which we vacuum shared relations,
-                        * it is not possible for that database to have a cutoff newer
-                        * than OLDXMIN recorded in pg_database.
+                        * It is okay to record non-shared values in pg_database, even
+                        * though we may vacuum shared relations with older cutoffs,
+                        * because only the minimum of the values present in
+                        * pg_database matters.  We can be sure that shared relations
+                        * have at some time been vacuumed with cutoffs no worse than
+                        * the global minimum; for, if there is a backend in some
+                        * other DB with xmin = OLDXMIN that's determining the cutoff
+                        * with which we vacuum shared relations, it is not possible
+                        * for that database to have a cutoff newer than OLDXMIN
+                        * recorded in pg_database.
                         */
                        vacuum_set_xid_limits(vacstmt, false,
-                                                                 &initialOldestXmin, &initialFreezeLimit);
+                                                                 &initialOldestXmin,
+                                                                 &initialFreezeLimit);
                }
 
                /* matches the StartTransaction in PostgresMain() */
-               CommitTransactionCommand(true);
+               CommitTransactionCommand();
        }
 
        /*
@@ -281,29 +283,37 @@ vacuum(VacuumStmt *vacstmt)
         */
        foreach(cur, vrl)
        {
-               Oid             relid = (Oid) lfirsti(cur);
+               Oid                     relid = lfirsto(cur);
 
                if (vacstmt->vacuum)
-                       vacuum_rel(relid, vacstmt, RELKIND_RELATION);
+               {
+                       if (!vacuum_rel(relid, vacstmt, RELKIND_RELATION))
+                               all_rels = false;               /* forget about updating dbstats */
+               }
                if (vacstmt->analyze)
                {
                        MemoryContext old_context = NULL;
 
                        /*
-                        * If we vacuumed, use new transaction for analyze.  Otherwise,
+                        * If we vacuumed, use new transaction for analyze. Otherwise,
                         * we can use the outer transaction, but we still need to call
                         * analyze_rel in a memory context that will be cleaned up on
-                        * return (else we leak memory while processing multiple tables).
+                        * return (else we leak memory while processing multiple
+                        * tables).
                         */
                        if (vacstmt->vacuum)
-                               StartTransactionCommand(true);
+                       {
+                               StartTransactionCommand();
+                               SetQuerySnapshot();             /* might be needed for functions
+                                                                                * in indexes */
+                       }
                        else
                                old_context = MemoryContextSwitchTo(anl_context);
 
                        analyze_rel(relid, vacstmt);
 
                        if (vacstmt->vacuum)
-                               CommitTransactionCommand(true);
+                               CommitTransactionCommand();
                        else
                        {
                                MemoryContextSwitchTo(old_context);
@@ -320,18 +330,24 @@ vacuum(VacuumStmt *vacstmt)
                /* here, we are not in a transaction */
 
                /*
-                * This matches the CommitTransaction waiting for us in PostgresMain().
-                * We tell xact.c not to chain the upcoming commit, so that a VACUUM
-                * doesn't start a transaction block, even when autocommit is off.
+                * This matches the CommitTransaction waiting for us in
+                * PostgresMain().
                 */
-               StartTransactionCommand(true);
+               StartTransactionCommand();
 
                /*
-                * If we did a database-wide VACUUM, update the database's pg_database
-                * row with info about the transaction IDs used, and try to truncate
-                * pg_clog.
+                * If it was a database-wide VACUUM, print FSM usage statistics
+                * (we don't make you be superuser to see these).
                 */
                if (vacstmt->relation == NULL)
+                       PrintFreeSpaceMapStatistics(elevel);
+
+               /*
+                * If we completed a database-wide VACUUM without skipping any
+                * relations, update the database's pg_database row with info
+                * about the transaction IDs used, and try to truncate pg_clog.
+                */
+               if (all_rels)
                {
                        vac_update_dbstats(MyDatabaseId,
                                                           initialOldestXmin, initialFreezeLimit);
@@ -366,13 +382,13 @@ getrels(const RangeVar *vacrel, const char *stmttype)
        if (vacrel)
        {
                /* Process specific relation */
-               Oid             relid;
+               Oid                     relid;
 
                relid = RangeVarGetRelid(vacrel, false);
 
                /* Make a relation list entry for this guy */
                oldcontext = MemoryContextSwitchTo(vac_context);
-               vrl = lappendi(vrl, relid);
+               vrl = lappendo(vrl, relid);
                MemoryContextSwitchTo(oldcontext);
        }
        else
@@ -396,8 +412,7 @@ getrels(const RangeVar *vacrel, const char *stmttype)
                {
                        /* Make a relation list entry for this guy */
                        oldcontext = MemoryContextSwitchTo(vac_context);
-                       AssertTupleDescHasOid(pgclass->rd_att);
-                       vrl = lappendi(vrl, HeapTupleGetOid(tuple));
+                       vrl = lappendo(vrl, HeapTupleGetOid(tuple));
                        MemoryContextSwitchTo(oldcontext);
                }
 
@@ -447,7 +462,9 @@ vacuum_set_xid_limits(VacuumStmt *vacstmt, bool sharedRel,
         */
        if (TransactionIdFollows(limit, *oldestXmin))
        {
-               elog(WARNING, "oldest Xmin is far in the past --- close open transactions soon to avoid wraparound problems");
+               ereport(WARNING,
+                               (errmsg("oldest Xmin is far in the past"),
+                                errhint("Close open transactions soon to avoid wraparound problems.")));
                limit = *oldestXmin;
        }
 
@@ -518,9 +535,9 @@ vac_update_relstats(Oid relid, BlockNumber num_pages, double num_tuples,
 
        /*
         * Invalidate the tuple in the catcaches; this also arranges to flush
-        * the relation's relcache entry.  (If we fail to commit for some reason,
-        * no flush will occur, but no great harm is done since there are no
-        * noncritical state updates here.)
+        * the relation's relcache entry.  (If we fail to commit for some
+        * reason, no flush will occur, but no great harm is done since there
+        * are no noncritical state updates here.)
         */
        CacheInvalidateHeapTuple(rd, &rtup);
 
@@ -569,7 +586,7 @@ vac_update_dbstats(Oid dbid,
        tuple = heap_getnext(scan, ForwardScanDirection);
 
        if (!HeapTupleIsValid(tuple))
-               elog(ERROR, "database %u does not exist", dbid);
+               elog(ERROR, "could not find tuple for database %u", dbid);
 
        dbform = (Form_pg_database) GETSTRUCT(tuple);
 
@@ -648,13 +665,14 @@ vac_truncate_clog(TransactionId vacuumXID, TransactionId frozenXID)
        heap_close(relation, AccessShareLock);
 
        /*
-        * Do not truncate CLOG if we seem to have suffered wraparound already;
-        * the computed minimum XID might be bogus.
+        * Do not truncate CLOG if we seem to have suffered wraparound
+        * already; the computed minimum XID might be bogus.
         */
        if (vacuumAlreadyWrapped)
        {
-               elog(WARNING, "Some databases have not been vacuumed in over 2 billion transactions."
-                        "\n\tYou may have already suffered transaction-wraparound data loss.");
+               ereport(WARNING,
+                               (errmsg("some databases have not been vacuumed in over 2 billion transactions"),
+                                errdetail("You may have already suffered transaction-wraparound data loss.")));
                return;
        }
 
@@ -664,17 +682,20 @@ vac_truncate_clog(TransactionId vacuumXID, TransactionId frozenXID)
        /* Give warning about impending wraparound problems */
        if (frozenAlreadyWrapped)
        {
-               elog(WARNING, "Some databases have not been vacuumed in over 1 billion transactions."
-                        "\n\tBetter vacuum them soon, or you may have a wraparound failure.");
+               ereport(WARNING,
+                               (errmsg("some databases have not been vacuumed in over 1 billion transactions"),
+                                errhint("Better vacuum them soon, or you may have a wraparound failure.")));
        }
        else
        {
                age = (int32) (myXID - frozenXID);
                if (age > (int32) ((MaxTransactionId >> 3) * 3))
-                       elog(WARNING, "Some databases have not been vacuumed in %d transactions."
-                                "\n\tBetter vacuum them within %d transactions,"
-                                "\n\tor you may have a wraparound failure.",
-                                age, (int32) (MaxTransactionId >> 1) - age);
+                       ereport(WARNING,
+                                       (errmsg("some databases have not been vacuumed in %d transactions",
+                                                       age),
+                                        errhint("Better vacuum them within %d transactions, "
+                                                        "or you may have a wraparound failure.",
+                                                        (int32) (MaxTransactionId >> 1) - age)));
        }
 }
 
@@ -690,6 +711,11 @@ vac_truncate_clog(TransactionId vacuumXID, TransactionId frozenXID)
 /*
  *     vacuum_rel() -- vacuum one heap relation
  *
+ *             Returns TRUE if we actually processed the relation (or can ignore it
+ *             for some reason), FALSE if we failed to process it due to permissions
+ *             or other reasons.  (A FALSE result really means that some data
+ *             may have been left unvacuumed, so we can't update XID stats.)
+ *
  *             Doing one heap at a time incurs extra overhead, since we need to
  *             check that the heap exists again just before we vacuum it.      The
  *             reason that we do this is so that vacuuming can be spread across
@@ -698,16 +724,19 @@ vac_truncate_clog(TransactionId vacuumXID, TransactionId frozenXID)
  *
  *             At entry and exit, we are not inside a transaction.
  */
-static void
+static bool
 vacuum_rel(Oid relid, VacuumStmt *vacstmt, char expected_relkind)
 {
        LOCKMODE        lmode;
        Relation        onerel;
        LockRelId       onerelid;
        Oid                     toast_relid;
+       bool            result;
 
        /* Begin a transaction for vacuuming this relation */
-       StartTransactionCommand(true);
+       StartTransactionCommand();
+       SetQuerySnapshot();                     /* might be needed for functions in
+                                                                * indexes */
 
        /*
         * Check for user-requested abort.      Note we want this to be inside a
@@ -723,8 +752,8 @@ vacuum_rel(Oid relid, VacuumStmt *vacstmt, char expected_relkind)
                                                          ObjectIdGetDatum(relid),
                                                          0, 0, 0))
        {
-               CommitTransactionCommand(true);
-               return;
+               CommitTransactionCommand();
+               return true;                    /* okay 'cause no data there */
        }
 
        /*
@@ -741,7 +770,8 @@ vacuum_rel(Oid relid, VacuumStmt *vacstmt, char expected_relkind)
         *
         * We allow the user to vacuum a table if he is superuser, the table
         * owner, or the database owner (but in the latter case, only if it's
-        * not a shared relation).      pg_class_ownercheck includes the superuser case.
+        * not a shared relation).      pg_class_ownercheck includes the superuser
+        * case.
         *
         * Note we choose to treat permissions failure as a WARNING and keep
         * trying to vacuum the rest of the DB --- is this appropriate?
@@ -749,13 +779,14 @@ vacuum_rel(Oid relid, VacuumStmt *vacstmt, char expected_relkind)
        onerel = relation_open(relid, lmode);
 
        if (!(pg_class_ownercheck(RelationGetRelid(onerel), GetUserId()) ||
-                 (is_dbadmin(MyDatabaseId) && !onerel->rd_rel->relisshared)))
+                 (pg_database_ownercheck(MyDatabaseId, GetUserId()) && !onerel->rd_rel->relisshared)))
        {
-               elog(WARNING, "Skipping \"%s\" --- only table or database owner can VACUUM it",
-                        RelationGetRelationName(onerel));
+               ereport(WARNING,
+                               (errmsg("skipping \"%s\" --- only table or database owner can VACUUM it",
+                                               RelationGetRelationName(onerel))));
                relation_close(onerel, lmode);
-               CommitTransactionCommand(true);
-               return;
+               CommitTransactionCommand();
+               return false;
        }
 
        /*
@@ -764,11 +795,27 @@ vacuum_rel(Oid relid, VacuumStmt *vacstmt, char expected_relkind)
         */
        if (onerel->rd_rel->relkind != expected_relkind)
        {
-               elog(WARNING, "Skipping \"%s\" --- can not process indexes, views or special system tables",
-                        RelationGetRelationName(onerel));
+               ereport(WARNING,
+                               (errmsg("skipping \"%s\" --- cannot VACUUM indexes, views or special system tables",
+                                               RelationGetRelationName(onerel))));
                relation_close(onerel, lmode);
-               CommitTransactionCommand(true);
-               return;
+               CommitTransactionCommand();
+               return false;
+       }
+
+       /*
+        * Silently ignore tables that are temp tables of other backends ---
+        * trying to vacuum these will lead to great unhappiness, since their
+        * contents are probably not up-to-date on disk.  (We don't throw a
+        * warning here; it would just lead to chatter during a database-wide
+        * VACUUM.)
+        */
+       if (isOtherTempNamespace(RelationGetNamespace(onerel)))
+       {
+               relation_close(onerel, lmode);
+               CommitTransactionCommand();
+               return true;                    /* assume no long-lived data in temp
+                                                                * tables */
        }
 
        /*
@@ -797,13 +844,15 @@ vacuum_rel(Oid relid, VacuumStmt *vacstmt, char expected_relkind)
        else
                lazy_vacuum_rel(onerel, vacstmt);
 
+       result = true;                          /* did the vacuum */
+
        /* all done with this class, but hold lock until commit */
        relation_close(onerel, NoLock);
 
        /*
         * Complete the transaction and free all temporary memory used.
         */
-       CommitTransactionCommand(true);
+       CommitTransactionCommand();
 
        /*
         * If the relation has a secondary toast rel, vacuum that too while we
@@ -813,12 +862,17 @@ vacuum_rel(Oid relid, VacuumStmt *vacstmt, char expected_relkind)
         * statistics are totally unimportant for toast relations.
         */
        if (toast_relid != InvalidOid)
-               vacuum_rel(toast_relid, vacstmt, RELKIND_TOASTVALUE);
+       {
+               if (!vacuum_rel(toast_relid, vacstmt, RELKIND_TOASTVALUE))
+                       result = false;         /* failed to vacuum the TOAST table? */
+       }
 
        /*
         * Now release the session-level lock on the master table.
         */
        UnlockRelationForSession(&onerelid, lmode);
+
+       return result;
 }
 
 
@@ -850,11 +904,6 @@ full_vacuum_rel(Relation onerel, VacuumStmt *vacstmt)
        int                     nindexes,
                                i;
        VRelStats  *vacrelstats;
-       bool            reindex = false;
-
-       if (IsIgnoringSystemIndexes() &&
-               IsSystemRelation(onerel))
-               reindex = true;
 
        vacuum_set_xid_limits(vacstmt, onerel->rd_rel->relisshared,
                                                  &OldestXmin, &FreezeLimit);
@@ -873,27 +922,9 @@ full_vacuum_rel(Relation onerel, VacuumStmt *vacstmt)
 
        /* Now open all indexes of the relation */
        vac_open_indexes(onerel, &nindexes, &Irel);
-       if (!Irel)
-               reindex = false;
-       else if (!RelationGetForm(onerel)->relhasindex)
-               reindex = true;
        if (nindexes > 0)
                vacrelstats->hasindex = true;
 
-#ifdef NOT_USED
-
-       /*
-        * reindex in VACUUM is dangerous under WAL. ifdef out until it
-        * becomes safe.
-        */
-       if (reindex)
-       {
-               vac_close_indexes(nindexes, Irel);
-               Irel = (Relation *) NULL;
-               activate_indexes_of_a_table(RelationGetRelid(onerel), false);
-       }
-#endif   /* NOT_USED */
-
        /* Clean/scan index relation(s) */
        if (Irel != (Relation *) NULL)
        {
@@ -936,16 +967,10 @@ full_vacuum_rel(Relation onerel, VacuumStmt *vacstmt)
                         */
                        i = FlushRelationBuffers(onerel, vacrelstats->rel_pages);
                        if (i < 0)
-                               elog(ERROR, "VACUUM (full_vacuum_rel): FlushRelationBuffers returned %d",
-                                        i);
+                               elog(ERROR, "FlushRelationBuffers returned %d", i);
                }
        }
 
-#ifdef NOT_USED
-       if (reindex)
-               activate_indexes_of_a_table(RelationGetRelid(onerel), true);
-#endif   /* NOT_USED */
-
        /* update shared free space map with final free space info */
        vac_update_fsm(onerel, &fraged_pages, vacrelstats->rel_pages);
 
@@ -982,15 +1007,13 @@ scan_heap(VRelStats *vacrelstats, Relation onerel,
        VacPage         vacpage,
                                vacpagecopy;
        BlockNumber empty_pages,
-                               new_pages,
-                               changed_pages,
                                empty_end_pages;
        double          num_tuples,
                                tups_vacuumed,
                                nkeep,
                                nunused;
-       double          free_size,
-                               usable_free_size;
+       double          free_space,
+                               usable_free_space;
        Size            min_tlen = MaxTupleSize;
        Size            max_tlen = 0;
        int                     i;
@@ -1003,13 +1026,14 @@ scan_heap(VRelStats *vacrelstats, Relation onerel,
        vac_init_rusage(&ru0);
 
        relname = RelationGetRelationName(onerel);
-       elog(elevel, "--Relation %s.%s--",
-                get_namespace_name(RelationGetNamespace(onerel)),
-                relname);
+       ereport(elevel,
+                       (errmsg("vacuuming \"%s.%s\"",
+                                       get_namespace_name(RelationGetNamespace(onerel)),
+                                       relname)));
 
-       empty_pages = new_pages = changed_pages = empty_end_pages = 0;
+       empty_pages = empty_end_pages = 0;
        num_tuples = tups_vacuumed = nkeep = nunused = 0;
-       free_size = 0;
+       free_space = 0;
 
        nblocks = RelationGetNumberOfBlocks(onerel);
 
@@ -1037,12 +1061,13 @@ scan_heap(VRelStats *vacrelstats, Relation onerel,
 
                if (PageIsNew(page))
                {
-                       elog(WARNING, "Rel %s: Uninitialized page %u - fixing",
-                                relname, blkno);
+                       ereport(WARNING,
+                       (errmsg("relation \"%s\" page %u is uninitialized --- fixing",
+                                       relname, blkno)));
                        PageInit(page, BufferGetPageSize(buf), 0);
                        vacpage->free = ((PageHeader) page)->pd_upper - ((PageHeader) page)->pd_lower;
-                       free_size += (vacpage->free - sizeof(ItemIdData));
-                       new_pages++;
+                       free_space += vacpage->free;
+                       empty_pages++;
                        empty_end_pages++;
                        vacpagecopy = copy_vac_page(vacpage);
                        vpage_insert(vacuum_pages, vacpagecopy);
@@ -1054,7 +1079,7 @@ scan_heap(VRelStats *vacrelstats, Relation onerel,
                if (PageIsEmpty(page))
                {
                        vacpage->free = ((PageHeader) page)->pd_upper - ((PageHeader) page)->pd_lower;
-                       free_size += (vacpage->free - sizeof(ItemIdData));
+                       free_space += vacpage->free;
                        empty_pages++;
                        empty_end_pages++;
                        vacpagecopy = copy_vac_page(vacpage);
@@ -1150,9 +1175,12 @@ scan_heap(VRelStats *vacrelstats, Relation onerel,
                                        /*
                                         * This should not happen, since we hold exclusive
                                         * lock on the relation; shouldn't we raise an error?
+                                        * (Actually, it can happen in system catalogs, since
+                                        * we tend to release write lock before commit there.)
                                         */
-                                       elog(WARNING, "Rel %s: TID %u/%u: InsertTransactionInProgress %u - can't shrink relation",
-                                                relname, blkno, offnum, HeapTupleHeaderGetXmin(tuple.t_data));
+                                       ereport(NOTICE,
+                                                       (errmsg("relation \"%s\" TID %u/%u: InsertTransactionInProgress %u --- can't shrink relation",
+                                                                       relname, blkno, offnum, HeapTupleHeaderGetXmin(tuple.t_data))));
                                        do_shrinking = false;
                                        break;
                                case HEAPTUPLE_DELETE_IN_PROGRESS:
@@ -1160,13 +1188,16 @@ scan_heap(VRelStats *vacrelstats, Relation onerel,
                                        /*
                                         * This should not happen, since we hold exclusive
                                         * lock on the relation; shouldn't we raise an error?
+                                        * (Actually, it can happen in system catalogs, since
+                                        * we tend to release write lock before commit there.)
                                         */
-                                       elog(WARNING, "Rel %s: TID %u/%u: DeleteTransactionInProgress %u - can't shrink relation",
-                                                relname, blkno, offnum, HeapTupleHeaderGetXmax(tuple.t_data));
+                                       ereport(NOTICE,
+                                                       (errmsg("relation \"%s\" TID %u/%u: DeleteTransactionInProgress %u --- can't shrink relation",
+                                                                       relname, blkno, offnum, HeapTupleHeaderGetXmax(tuple.t_data))));
                                        do_shrinking = false;
                                        break;
                                default:
-                                       elog(ERROR, "Unexpected HeapTupleSatisfiesVacuum result");
+                                       elog(ERROR, "unexpected HeapTupleSatisfiesVacuum result");
                                        break;
                        }
 
@@ -1179,8 +1210,8 @@ scan_heap(VRelStats *vacrelstats, Relation onerel,
                         */
                        if (onerel->rd_rel->relhasoids &&
                                !OidIsValid(HeapTupleGetOid(&tuple)))
-                               elog(WARNING, "Rel %s: TID %u/%u: OID IS INVALID. TUPGONE %d.",
-                                        relname, blkno, offnum, (int) tupgone);
+                               elog(WARNING, "relation \"%s\" TID %u/%u: OID is invalid",
+                                        relname, blkno, offnum);
 
                        if (tupgone)
                        {
@@ -1237,7 +1268,7 @@ scan_heap(VRelStats *vacrelstats, Relation onerel,
                        do_reap = (vacpage->offsets_free > 0);
                }
 
-               free_size += vacpage->free;
+               free_space += vacpage->free;
 
                /*
                 * Add the page to fraged_pages if it has a useful amount of free
@@ -1256,16 +1287,21 @@ scan_heap(VRelStats *vacrelstats, Relation onerel,
                                vpage_insert(fraged_pages, vacpagecopy);
                }
 
+               /*
+                * Include the page in empty_end_pages if it will be empty after
+                * vacuuming; this is to keep us from using it as a move
+                * destination.
+                */
                if (notup)
+               {
+                       empty_pages++;
                        empty_end_pages++;
+               }
                else
                        empty_end_pages = 0;
 
                if (pgchanged)
-               {
                        WriteBuffer(buf);
-                       changed_pages++;
-               }
                else
                        ReleaseBuffer(buf);
        }
@@ -1292,14 +1328,14 @@ scan_heap(VRelStats *vacrelstats, Relation onerel,
        {
                Assert((BlockNumber) fraged_pages->num_pages >= empty_end_pages);
                fraged_pages->num_pages -= empty_end_pages;
-               usable_free_size = 0;
+               usable_free_space = 0;
                for (i = 0; i < fraged_pages->num_pages; i++)
-                       usable_free_size += fraged_pages->pagedesc[i]->free;
+                       usable_free_space += fraged_pages->pagedesc[i]->free;
        }
        else
        {
                fraged_pages->num_pages = 0;
-               usable_free_size = 0;
+               usable_free_space = 0;
        }
 
        /* don't bother to save vtlinks if we will not call repair_frag */
@@ -1317,16 +1353,24 @@ scan_heap(VRelStats *vacrelstats, Relation onerel,
                pfree(vtlinks);
        }
 
-       elog(elevel, "Pages %u: Changed %u, reaped %u, Empty %u, New %u; \
-Tup %.0f: Vac %.0f, Keep/VTL %.0f/%u, UnUsed %.0f, MinLen %lu, MaxLen %lu; \
-Re-using: Free/Avail. Space %.0f/%.0f; EndEmpty/Avail. Pages %u/%u.\n\t%s",
-                nblocks, changed_pages, vacuum_pages->num_pages, empty_pages,
-                new_pages, num_tuples, tups_vacuumed,
-                nkeep, vacrelstats->num_vtlinks,
-                nunused, (unsigned long) min_tlen, (unsigned long) max_tlen,
-                free_size, usable_free_size,
-                empty_end_pages, fraged_pages->num_pages,
-                vac_show_rusage(&ru0));
+       ereport(elevel,
+                       (errmsg("\"%s\": found %.0f removable, %.0f nonremovable tuples in %u pages",
+                                       RelationGetRelationName(onerel),
+                                       tups_vacuumed, num_tuples, nblocks),
+                        errdetail("%.0f dead tuples cannot be removed yet.\n"
+                               "Nonremovable tuples range from %lu to %lu bytes long.\n"
+                                          "There were %.0f unused item pointers.\n"
+                "Total free space (including removable tuples) is %.0f bytes.\n"
+                                          "%u pages are or will become empty, including %u at the end of the table.\n"
+                                          "%u pages containing %.0f free bytes are potential move destinations.\n"
+                                          "%s",
+                                          nkeep,
+                                          (unsigned long) min_tlen, (unsigned long) max_tlen,
+                                          nunused,
+                                          free_space,
+                                          empty_pages, empty_end_pages,
+                                          fraged_pages->num_pages, usable_free_space,
+                                          vac_show_rusage(&ru0))));
 }
 
 
@@ -1398,6 +1442,8 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
         * We need a ResultRelInfo and an EState so we can use the regular
         * executor's index-entry-making machinery.
         */
+       estate = CreateExecutorState();
+
        resultRelInfo = makeNode(ResultRelInfo);
        resultRelInfo->ri_RangeTableIndex = 1;          /* dummy */
        resultRelInfo->ri_RelationDesc = onerel;
@@ -1405,7 +1451,6 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
 
        ExecOpenIndices(resultRelInfo);
 
-       estate = CreateExecutorState();
        estate->es_result_relations = resultRelInfo;
        estate->es_num_result_relations = 1;
        estate->es_result_relation_info = resultRelInfo;
@@ -1549,7 +1594,7 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
                                if (tuple.t_data->t_infomask & HEAP_MOVED_OFF)
                                {
                                        if (HeapTupleHeaderGetXvac(tuple.t_data) != myXID)
-                                               elog(ERROR, "Invalid XVAC in tuple header");
+                                               elog(ERROR, "invalid XVAC in tuple header");
                                        if (keep_tuples == 0)
                                                continue;
                                        if (chain_tuple_moved)          /* some chains was moved
@@ -1582,21 +1627,23 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
                         * by "recent" transactions then we have to move all chain of
                         * tuples to another places.
                         *
-                        * NOTE: this test is not 100% accurate: it is possible for
-                        * tuple to be an updated one with recent xmin, and yet not
-                        * have a corresponding tuple in the vtlinks list.  Presumably
+                        * NOTE: this test is not 100% accurate: it is possible for a
+                        * tuple to be an updated one with recent xmin, and yet not
+                        * have a corresponding tuple in the vtlinks list.      Presumably
                         * there was once a parent tuple with xmax matching the xmin,
                         * but it's possible that that tuple has been removed --- for
-                        * example, if it had xmin = xmax then HeapTupleSatisfiesVacuum
-                        * would deem it removable as soon as the xmin xact completes.
+                        * example, if it had xmin = xmax then
+                        * HeapTupleSatisfiesVacuum would deem it removable as soon as
+                        * the xmin xact completes.
                         *
                         * To be on the safe side, we abandon the repair_frag process if
-                        * we cannot find the parent tuple in vtlinks.  This may be overly
-                        * conservative; AFAICS it would be safe to move the chain.
+                        * we cannot find the parent tuple in vtlinks.  This may be
+                        * overly conservative; AFAICS it would be safe to move the
+                        * chain.
                         */
                        if (((tuple.t_data->t_infomask & HEAP_UPDATED) &&
                         !TransactionIdPrecedes(HeapTupleHeaderGetXmin(tuple.t_data),
-                                               OldestXmin)) ||
+                                                                       OldestXmin)) ||
                                (!(tuple.t_data->t_infomask & (HEAP_XMAX_INVALID |
                                                                                           HEAP_MARKED_FOR_UPDATE)) &&
                                 !(ItemPointerEquals(&(tuple.t_self),
@@ -1626,8 +1673,8 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
                                /* Quick exit if we have no vtlinks to search in */
                                if (vacrelstats->vtlinks == NULL)
                                {
-                                       elog(WARNING, "Parent item in update-chain not found - can't continue repair_frag");
-                                       break;  /* out of walk-along-page loop */
+                                       elog(DEBUG2, "parent item in update-chain not found --- can't continue repair_frag");
+                                       break;          /* out of walk-along-page loop */
                                }
 
                                vtmove = (VTupleMove) palloc(100 * sizeof(VTupleMoveData));
@@ -1639,7 +1686,7 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
                                 * we have to move to the end of chain.
                                 */
                                while (!(tp.t_data->t_infomask & (HEAP_XMAX_INVALID |
-                                                                                                 HEAP_MARKED_FOR_UPDATE)) &&
+                                                                                         HEAP_MARKED_FOR_UPDATE)) &&
                                           !(ItemPointerEquals(&(tp.t_self),
                                                                                   &(tp.t_data->t_ctid))))
                                {
@@ -1663,7 +1710,7 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
                                                 * in scan_heap(), but it's not implemented at the
                                                 * moment and so we just stop shrinking here.
                                                 */
-                                               elog(WARNING, "Child itemid in update-chain marked as unused - can't continue repair_frag");
+                                               elog(DEBUG2, "child itemid in update-chain marked as unused --- can't continue repair_frag");
                                                chain_move_failed = true;
                                                break;  /* out of loop to move to chain end */
                                        }
@@ -1705,14 +1752,14 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
                                                {
                                                        /* can't move item anywhere */
                                                        chain_move_failed = true;
-                                                       break; /* out of check-all-items loop */
+                                                       break;          /* out of check-all-items loop */
                                                }
                                                to_item = i;
                                                to_vacpage = fraged_pages->pagedesc[to_item];
                                        }
                                        to_vacpage->free -= MAXALIGN(tlen);
                                        if (to_vacpage->offsets_used >= to_vacpage->offsets_free)
-                                               to_vacpage->free -= MAXALIGN(sizeof(ItemIdData));
+                                               to_vacpage->free -= sizeof(ItemIdData);
                                        (to_vacpage->offsets_used)++;
                                        if (free_vtmove == 0)
                                        {
@@ -1733,8 +1780,8 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
 
                                        /* At beginning of chain? */
                                        if (!(tp.t_data->t_infomask & HEAP_UPDATED) ||
-                                           TransactionIdPrecedes(HeapTupleHeaderGetXmin(tp.t_data),
-                                                                 OldestXmin))
+                                               TransactionIdPrecedes(HeapTupleHeaderGetXmin(tp.t_data),
+                                                                                         OldestXmin))
                                                break;
 
                                        /* No, move to tuple with prior row version */
@@ -1748,19 +1795,19 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
                                        if (vtlp == NULL)
                                        {
                                                /* see discussion above */
-                                               elog(WARNING, "Parent item in update-chain not found - can't continue repair_frag");
+                                               elog(DEBUG2, "parent item in update-chain not found --- can't continue repair_frag");
                                                chain_move_failed = true;
-                                               break; /* out of check-all-items loop */
+                                               break;  /* out of check-all-items loop */
                                        }
                                        tp.t_self = vtlp->this_tid;
                                        Pbuf = ReadBuffer(onerel,
-                                                                         ItemPointerGetBlockNumber(&(tp.t_self)));
+                                                               ItemPointerGetBlockNumber(&(tp.t_self)));
                                        Ppage = BufferGetPage(Pbuf);
                                        Pitemid = PageGetItemId(Ppage,
-                                                                                       ItemPointerGetOffsetNumber(&(tp.t_self)));
+                                                          ItemPointerGetOffsetNumber(&(tp.t_self)));
                                        /* this can't happen since we saw tuple earlier: */
                                        if (!ItemIdIsUsed(Pitemid))
-                                               elog(ERROR, "Parent itemid marked as unused");
+                                               elog(ERROR, "parent itemid marked as unused");
                                        Ptp.t_datamcxt = NULL;
                                        Ptp.t_data = (HeapTupleHeader) PageGetItem(Ppage, Pitemid);
 
@@ -1769,25 +1816,24 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
                                                                                         &(Ptp.t_data->t_ctid)));
 
                                        /*
-                                        * Read above about cases when
-                                        * !ItemIdIsUsed(Citemid) (child item is
-                                        * removed)... Due to the fact that at the moment
-                                        * we don't remove unuseful part of update-chain,
-                                        * it's possible to get too old parent row here.
-                                        * Like as in the case which caused this problem,
-                                        * we stop shrinking here. I could try to find
-                                        * real parent row but want not to do it because
-                                        * of real solution will be implemented anyway,
-                                        * later, and we are too close to 6.5 release. -
-                                        * vadim 06/11/99
+                                        * Read above about cases when !ItemIdIsUsed(Citemid)
+                                        * (child item is removed)... Due to the fact that at
+                                        * the moment we don't remove unuseful part of
+                                        * update-chain, it's possible to get too old parent
+                                        * row here. Like as in the case which caused this
+                                        * problem, we stop shrinking here. I could try to
+                                        * find real parent row but want not to do it because
+                                        * of real solution will be implemented anyway, later,
+                                        * and we are too close to 6.5 release. - vadim
+                                        * 06/11/99
                                         */
                                        if (!(TransactionIdEquals(HeapTupleHeaderGetXmax(Ptp.t_data),
-                                                                                         HeapTupleHeaderGetXmin(tp.t_data))))
+                                                                        HeapTupleHeaderGetXmin(tp.t_data))))
                                        {
                                                ReleaseBuffer(Pbuf);
-                                               elog(WARNING, "Too old parent tuple found - can't continue repair_frag");
+                                               elog(DEBUG2, "too old parent tuple found --- can't continue repair_frag");
                                                chain_move_failed = true;
-                                               break; /* out of check-all-items loop */
+                                               break;  /* out of check-all-items loop */
                                        }
                                        tp.t_datamcxt = Ptp.t_datamcxt;
                                        tp.t_data = Ptp.t_data;
@@ -1796,7 +1842,7 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
                                                ReleaseBuffer(Cbuf);
                                        Cbuf = Pbuf;
                                        freeCbuf = true;
-                               } /* end of check-all-items loop */
+                               }                               /* end of check-all-items loop */
 
                                if (freeCbuf)
                                        ReleaseBuffer(Cbuf);
@@ -1805,9 +1851,9 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
                                if (chain_move_failed)
                                {
                                        /*
-                                        * Undo changes to offsets_used state.  We don't bother
-                                        * cleaning up the amount-free state, since we're not
-                                        * going to do any further tuple motion.
+                                        * Undo changes to offsets_used state.  We don't
+                                        * bother cleaning up the amount-free state, since
+                                        * we're not going to do any further tuple motion.
                                         */
                                        for (i = 0; i < num_vtmove; i++)
                                        {
@@ -1858,11 +1904,12 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
                                         */
                                        CacheInvalidateHeapTuple(onerel, &tuple);
 
-                                       /* NO ELOG(ERROR) TILL CHANGES ARE LOGGED */
+                                       /* NO EREPORT(ERROR) TILL CHANGES ARE LOGGED */
                                        START_CRIT_SECTION();
 
-                                       tuple.t_data->t_infomask &=
-                                               ~(HEAP_XMIN_COMMITTED | HEAP_XMIN_INVALID | HEAP_MOVED_IN);
+                                       tuple.t_data->t_infomask &= ~(HEAP_XMIN_COMMITTED |
+                                                                                                 HEAP_XMIN_INVALID |
+                                                                                                 HEAP_MOVED_IN);
                                        tuple.t_data->t_infomask |= HEAP_MOVED_OFF;
                                        HeapTupleHeaderSetXvac(tuple.t_data, myXID);
 
@@ -1901,15 +1948,19 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
                                         * Update the state of the copied tuple, and store it
                                         * on the destination page.
                                         */
-                                       newtup.t_data->t_infomask &=
-                                               ~(HEAP_XMIN_COMMITTED | HEAP_XMIN_INVALID | HEAP_MOVED_OFF);
+                                       newtup.t_data->t_infomask &= ~(HEAP_XMIN_COMMITTED |
+                                                                                                  HEAP_XMIN_INVALID |
+                                                                                                  HEAP_MOVED_OFF);
                                        newtup.t_data->t_infomask |= HEAP_MOVED_IN;
                                        HeapTupleHeaderSetXvac(newtup.t_data, myXID);
-                                       newoff = PageAddItem(ToPage, (Item) newtup.t_data, tuple_len,
-                                                                                InvalidOffsetNumber, LP_USED);
+                                       newoff = PageAddItem(ToPage,
+                                                                                (Item) newtup.t_data,
+                                                                                tuple_len,
+                                                                                InvalidOffsetNumber,
+                                                                                LP_USED);
                                        if (newoff == InvalidOffsetNumber)
                                        {
-                                               elog(PANIC, "moving chain: failed to add item with len = %lu to page %u",
+                                               elog(PANIC, "failed to add item with len = %lu to page %u while moving tuple chain",
                                                  (unsigned long) tuple_len, destvacpage->blkno);
                                        }
                                        newitemid = PageGetItemId(ToPage, newoff);
@@ -1935,7 +1986,10 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
                                        }
                                        else
                                        {
-                                               /* No XLOG record, but still need to flag that XID exists on disk */
+                                               /*
+                                                * No XLOG record, but still need to flag that XID
+                                                * exists on disk
+                                                */
                                                MyXactMadeTempRelUpdate = true;
                                        }
 
@@ -1981,7 +2035,7 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
 
                                        WriteBuffer(cur_buffer);
                                        WriteBuffer(Cbuf);
-                               } /* end of move-the-tuple-chain loop */
+                               }                               /* end of move-the-tuple-chain loop */
 
                                cur_buffer = InvalidBuffer;
                                pfree(vtmove);
@@ -1989,7 +2043,7 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
 
                                /* advance to next tuple in walk-along-page loop */
                                continue;
-                       } /* end of is-tuple-in-chain test */
+                       }                                       /* end of is-tuple-in-chain test */
 
                        /* try to find new page for this tuple */
                        if (cur_buffer == InvalidBuffer ||
@@ -2027,22 +2081,21 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
                        /*
                         * register invalidation of source tuple in catcaches.
                         *
-                        * (Note: we do not need to register the copied tuple,
-                        * because we are not changing the tuple contents and
-                        * so there cannot be any need to flush negative
-                        * catcache entries.)
+                        * (Note: we do not need to register the copied tuple, because we
+                        * are not changing the tuple contents and so there cannot be
+                        * any need to flush negative catcache entries.)
                         */
                        CacheInvalidateHeapTuple(onerel, &tuple);
 
-                       /* NO ELOG(ERROR) TILL CHANGES ARE LOGGED */
+                       /* NO EREPORT(ERROR) TILL CHANGES ARE LOGGED */
                        START_CRIT_SECTION();
 
                        /*
-                        * Mark new tuple as moved_in by vacuum and store vacuum XID
-                        * in t_cid !!!
+                        * Mark new tuple as MOVED_IN by me.
                         */
-                       newtup.t_data->t_infomask &=
-                               ~(HEAP_XMIN_COMMITTED | HEAP_XMIN_INVALID | HEAP_MOVED_OFF);
+                       newtup.t_data->t_infomask &= ~(HEAP_XMIN_COMMITTED |
+                                                                                  HEAP_XMIN_INVALID |
+                                                                                  HEAP_MOVED_OFF);
                        newtup.t_data->t_infomask |= HEAP_MOVED_IN;
                        HeapTupleHeaderSetXvac(newtup.t_data, myXID);
 
@@ -2064,11 +2117,11 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
                        newtup.t_self = newtup.t_data->t_ctid;
 
                        /*
-                        * Mark old tuple as moved_off by vacuum and store vacuum XID
-                        * in t_cid !!!
+                        * Mark old tuple as MOVED_OFF by me.
                         */
-                       tuple.t_data->t_infomask &=
-                               ~(HEAP_XMIN_COMMITTED | HEAP_XMIN_INVALID | HEAP_MOVED_IN);
+                       tuple.t_data->t_infomask &= ~(HEAP_XMIN_COMMITTED |
+                                                                                 HEAP_XMIN_INVALID |
+                                                                                 HEAP_MOVED_IN);
                        tuple.t_data->t_infomask |= HEAP_MOVED_OFF;
                        HeapTupleHeaderSetXvac(tuple.t_data, myXID);
 
@@ -2086,7 +2139,10 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
                        }
                        else
                        {
-                               /* No XLOG record, but still need to flag that XID exists on disk */
+                               /*
+                                * No XLOG record, but still need to flag that XID exists
+                                * on disk
+                                */
                                MyXactMadeTempRelUpdate = true;
                        }
 
@@ -2112,8 +2168,8 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
                }                                               /* walk along page */
 
                /*
-                * If we broke out of the walk-along-page loop early (ie, still have
-                * offnum <= maxoff), then we failed to move some tuple off
+                * If we broke out of the walk-along-page loop early (ie, still
+                * have offnum <= maxoff), then we failed to move some tuple off
                 * this page.  No point in shrinking any more, so clean up and
                 * exit the per-page loop.
                 */
@@ -2122,7 +2178,8 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
                        OffsetNumber off;
 
                        /*
-                        * Fix vacpage state for any unvisited tuples remaining on page
+                        * Fix vacpage state for any unvisited tuples remaining on
+                        * page
                         */
                        for (off = OffsetNumberNext(offnum);
                                 off <= maxoff;
@@ -2136,11 +2193,11 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
                                if (tuple.t_data->t_infomask & HEAP_XMIN_COMMITTED)
                                        continue;
                                if (tuple.t_data->t_infomask & HEAP_MOVED_IN)
-                                       elog(ERROR, "HEAP_MOVED_IN was not expected (2)");
+                                       elog(ERROR, "HEAP_MOVED_IN was not expected");
                                if (tuple.t_data->t_infomask & HEAP_MOVED_OFF)
                                {
                                        if (HeapTupleHeaderGetXvac(tuple.t_data) != myXID)
-                                               elog(ERROR, "Invalid XVAC in tuple header (4)");
+                                               elog(ERROR, "invalid XVAC in tuple header");
                                        /* some chains was moved while */
                                        if (chain_tuple_moved)
                                        {                       /* cleaning this page */
@@ -2165,7 +2222,7 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
                                        }
                                }
                                else
-                                       elog(ERROR, "HEAP_MOVED_OFF was expected (2)");
+                                       elog(ERROR, "HEAP_MOVED_OFF was expected");
                        }
                }
 
@@ -2242,7 +2299,7 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
         * status bits.  This is not really necessary, but will save time for
         * future transactions examining these tuples.
         *
-        * XXX WARNING that this code fails to clear HEAP_MOVED_OFF tuples from
+        * XXX NOTICE that this code fails to clear HEAP_MOVED_OFF tuples from
         * pages that were move source pages but not move dest pages.  One
         * also wonders whether it wouldn't be better to skip this step and
         * let the tuple status updates happen someplace that's not holding an
@@ -2278,7 +2335,7 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
                                if (!(tuple.t_data->t_infomask & HEAP_MOVED))
                                        elog(ERROR, "HEAP_MOVED_OFF/HEAP_MOVED_IN was expected");
                                if (HeapTupleHeaderGetXvac(tuple.t_data) != myXID)
-                                       elog(ERROR, "Invalid XVAC in tuple header (2)");
+                                       elog(ERROR, "invalid XVAC in tuple header");
                                if (tuple.t_data->t_infomask & HEAP_MOVED_IN)
                                {
                                        tuple.t_data->t_infomask |= HEAP_XMIN_COMMITTED;
@@ -2296,10 +2353,18 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
        }
        Assert(num_moved == checked_moved);
 
-       elog(elevel, "Rel %s: Pages: %u --> %u; Tuple(s) moved: %u.\n\t%s",
-                RelationGetRelationName(onerel),
-                nblocks, blkno, num_moved,
-                vac_show_rusage(&ru0));
+       /*
+        * It'd be cleaner to make this report at the bottom of this routine,
+        * but then the rusage would double-count the second pass of index
+        * vacuuming.  So do it here and ignore the relatively small amount of
+        * processing that occurs below.
+        */
+       ereport(elevel,
+                       (errmsg("\"%s\": moved %u tuples, truncated %u to %u pages",
+                                       RelationGetRelationName(onerel),
+                                       num_moved, nblocks, blkno),
+                        errdetail("%s",
+                                          vac_show_rusage(&ru0))));
 
        /*
         * Reflect the motion of system tuples to catalog cache here.
@@ -2334,8 +2399,7 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
                if (vacpage->blkno == (blkno - 1) &&
                        vacpage->offsets_free > 0)
                {
-                       OffsetNumber unbuf[BLCKSZ / sizeof(OffsetNumber)];
-                       OffsetNumber *unused = unbuf;
+                       OffsetNumber unused[BLCKSZ / sizeof(OffsetNumber)];
                        int                     uncnt;
 
                        buf = ReadBuffer(onerel, vacpage->blkno);
@@ -2358,12 +2422,12 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
                                        if (tuple.t_data->t_infomask & HEAP_MOVED_OFF)
                                        {
                                                if (HeapTupleHeaderGetXvac(tuple.t_data) != myXID)
-                                                       elog(ERROR, "Invalid XVAC in tuple header (3)");
+                                                       elog(ERROR, "invalid XVAC in tuple header");
                                                itemid->lp_flags &= ~LP_USED;
                                                num_tuples++;
                                        }
                                        else
-                                               elog(ERROR, "HEAP_MOVED_OFF was expected (3)");
+                                               elog(ERROR, "HEAP_MOVED_OFF was expected");
                                }
 
                        }
@@ -2378,14 +2442,16 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
                        {
                                XLogRecPtr      recptr;
 
-                               recptr = log_heap_clean(onerel, buf, (char *) unused,
-                                                 (char *) (&(unused[uncnt])) - (char *) unused);
+                               recptr = log_heap_clean(onerel, buf, unused, uncnt);
                                PageSetLSN(page, recptr);
                                PageSetSUI(page, ThisStartUpID);
                        }
                        else
                        {
-                               /* No XLOG record, but still need to flag that XID exists on disk */
+                               /*
+                                * No XLOG record, but still need to flag that XID exists
+                                * on disk
+                                */
                                MyXactMadeTempRelUpdate = true;
                        }
 
@@ -2410,8 +2476,7 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
         */
        i = FlushRelationBuffers(onerel, blkno);
        if (i < 0)
-               elog(ERROR, "VACUUM (repair_frag): FlushRelationBuffers returned %d",
-                        i);
+               elog(ERROR, "FlushRelationBuffers returned %d", i);
 
        /* truncate relation, if needed */
        if (blkno < nblocks)
@@ -2430,6 +2495,8 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
        ExecDropTupleTable(tupleTable, true);
 
        ExecCloseIndices(resultRelInfo);
+
+       FreeExecutorState(estate);
 }
 
 /*
@@ -2474,15 +2541,15 @@ vacuum_heap(VRelStats *vacrelstats, Relation onerel, VacPageList vacuum_pages)
 
        i = FlushRelationBuffers(onerel, relblocks);
        if (i < 0)
-               elog(ERROR, "VACUUM (vacuum_heap): FlushRelationBuffers returned %d",
-                        i);
+               elog(ERROR, "FlushRelationBuffers returned %d", i);
 
        /* truncate relation if there are some empty end-pages */
        if (vacuum_pages->empty_end_pages > 0)
        {
-               elog(elevel, "Rel %s: Pages: %u --> %u.",
-                        RelationGetRelationName(onerel),
-                        vacrelstats->rel_pages, relblocks);
+               ereport(elevel,
+                               (errmsg("\"%s\": truncated %u to %u pages",
+                                               RelationGetRelationName(onerel),
+                                               vacrelstats->rel_pages, relblocks)));
                relblocks = smgrtruncate(DEFAULT_SMGR, onerel, relblocks);
                onerel->rd_nblocks = relblocks; /* update relcache immediately */
                onerel->rd_targblock = InvalidBlockNumber;
@@ -2498,8 +2565,7 @@ vacuum_heap(VRelStats *vacrelstats, Relation onerel, VacPageList vacuum_pages)
 static void
 vacuum_page(Relation onerel, Buffer buffer, VacPage vacpage)
 {
-       OffsetNumber unbuf[BLCKSZ / sizeof(OffsetNumber)];
-       OffsetNumber *unused = unbuf;
+       OffsetNumber unused[BLCKSZ / sizeof(OffsetNumber)];
        int                     uncnt;
        Page            page = BufferGetPage(buffer);
        ItemId          itemid;
@@ -2523,8 +2589,7 @@ vacuum_page(Relation onerel, Buffer buffer, VacPage vacpage)
        {
                XLogRecPtr      recptr;
 
-               recptr = log_heap_clean(onerel, buffer, (char *) unused,
-                                                 (char *) (&(unused[uncnt])) - (char *) unused);
+               recptr = log_heap_clean(onerel, buffer, unused, uncnt);
                PageSetLSN(page, recptr);
                PageSetSUI(page, ThisStartUpID);
        }
@@ -2546,17 +2611,25 @@ static void
 scan_index(Relation indrel, double num_tuples)
 {
        IndexBulkDeleteResult *stats;
+       IndexVacuumCleanupInfo vcinfo;
        VacRUsage       ru0;
 
        vac_init_rusage(&ru0);
 
        /*
-        * Even though we're not planning to delete anything, use the
-        * ambulkdelete call, so that the scan happens within the index AM for
-        * more speed.
+        * Even though we're not planning to delete anything, we use the
+        * ambulkdelete call, because (a) the scan happens within the index AM
+        * for more speed, and (b) it may want to pass private statistics to
+        * the amvacuumcleanup call.
         */
        stats = index_bulk_delete(indrel, dummy_tid_reaped, NULL);
 
+       /* Do post-VACUUM cleanup, even though we deleted nothing */
+       vcinfo.vacuum_full = true;
+       vcinfo.message_level = elevel;
+
+       stats = index_vacuum_cleanup(indrel, &vcinfo, stats);
+
        if (!stats)
                return;
 
@@ -2565,10 +2638,15 @@ scan_index(Relation indrel, double num_tuples)
                                                stats->num_pages, stats->num_index_tuples,
                                                false);
 
-       elog(elevel, "Index %s: Pages %u; Tuples %.0f.\n\t%s",
-                RelationGetRelationName(indrel),
-                stats->num_pages, stats->num_index_tuples,
-                vac_show_rusage(&ru0));
+       ereport(elevel,
+                       (errmsg("index \"%s\" now contains %.0f tuples in %u pages",
+                                       RelationGetRelationName(indrel),
+                                       stats->num_index_tuples,
+                                       stats->num_pages),
+                        errdetail("%u index pages have been deleted, %u are currently reusable.\n"
+                                          "%s",
+                                          stats->pages_deleted, stats->pages_free,
+                                          vac_show_rusage(&ru0))));
 
        /*
         * Check for tuple count mismatch.      If the index is partial, then it's
@@ -2578,10 +2656,11 @@ scan_index(Relation indrel, double num_tuples)
        {
                if (stats->num_index_tuples > num_tuples ||
                        !vac_is_partial_index(indrel))
-                       elog(WARNING, "Index %s: NUMBER OF INDEX' TUPLES (%.0f) IS NOT THE SAME AS HEAP' (%.0f).\
-\n\tRecreate the index.",
-                                RelationGetRelationName(indrel),
-                                stats->num_index_tuples, num_tuples);
+                       ereport(WARNING,
+                                       (errmsg("index \"%s\" contains %.0f tuples, but table contains %.0f tuples",
+                                                       RelationGetRelationName(indrel),
+                                                       stats->num_index_tuples, num_tuples),
+                                        errhint("Rebuild the index with REINDEX.")));
        }
 
        pfree(stats);
@@ -2604,6 +2683,7 @@ vacuum_index(VacPageList vacpagelist, Relation indrel,
                         double num_tuples, int keep_tuples)
 {
        IndexBulkDeleteResult *stats;
+       IndexVacuumCleanupInfo vcinfo;
        VacRUsage       ru0;
 
        vac_init_rusage(&ru0);
@@ -2611,6 +2691,12 @@ vacuum_index(VacPageList vacpagelist, Relation indrel,
        /* Do bulk deletion */
        stats = index_bulk_delete(indrel, tid_reaped, (void *) vacpagelist);
 
+       /* Do post-VACUUM cleanup */
+       vcinfo.vacuum_full = true;
+       vcinfo.message_level = elevel;
+
+       stats = index_vacuum_cleanup(indrel, &vcinfo, stats);
+
        if (!stats)
                return;
 
@@ -2619,10 +2705,17 @@ vacuum_index(VacPageList vacpagelist, Relation indrel,
                                                stats->num_pages, stats->num_index_tuples,
                                                false);
 
-       elog(elevel, "Index %s: Pages %u; Tuples %.0f: Deleted %.0f.\n\t%s",
-                RelationGetRelationName(indrel), stats->num_pages,
-                stats->num_index_tuples - keep_tuples, stats->tuples_removed,
-                vac_show_rusage(&ru0));
+       ereport(elevel,
+                       (errmsg("index \"%s\" now contains %.0f tuples in %u pages",
+                                       RelationGetRelationName(indrel),
+                                       stats->num_index_tuples,
+                                       stats->num_pages),
+                        errdetail("%.0f index tuples were removed.\n"
+                "%u index pages have been deleted, %u are currently reusable.\n"
+                                          "%s",
+                                          stats->tuples_removed,
+                                          stats->pages_deleted, stats->pages_free,
+                                          vac_show_rusage(&ru0))));
 
        /*
         * Check for tuple count mismatch.      If the index is partial, then it's
@@ -2632,10 +2725,11 @@ vacuum_index(VacPageList vacpagelist, Relation indrel,
        {
                if (stats->num_index_tuples > num_tuples + keep_tuples ||
                        !vac_is_partial_index(indrel))
-                       elog(WARNING, "Index %s: NUMBER OF INDEX' TUPLES (%.0f) IS NOT THE SAME AS HEAP' (%.0f).\
-\n\tRecreate the index.",
-                                RelationGetRelationName(indrel),
-                                stats->num_index_tuples, num_tuples);
+                       ereport(WARNING,
+                                       (errmsg("index \"%s\" contains %.0f tuples, but table contains %.0f tuples",
+                                                       RelationGetRelationName(indrel),
+                                         stats->num_index_tuples, num_tuples + keep_tuples),
+                                        errhint("Rebuild the index with REINDEX.")));
        }
 
        pfree(stats);
@@ -2711,36 +2805,51 @@ vac_update_fsm(Relation onerel, VacPageList fraged_pages,
                           BlockNumber rel_pages)
 {
        int                     nPages = fraged_pages->num_pages;
+       VacPage    *pagedesc = fraged_pages->pagedesc;
+       Size            threshold;
+       PageFreeSpaceInfo *pageSpaces;
+       int                     outPages;
        int                     i;
-       BlockNumber *pages;
-       Size       *spaceAvail;
+
+       /*
+        * We only report pages with free space at least equal to the average
+        * request size --- this avoids cluttering FSM with uselessly-small
+        * bits of space.  Although FSM would discard pages with little free
+        * space anyway, it's important to do this prefiltering because (a) it
+        * reduces the time spent holding the FSM lock in
+        * RecordRelationFreeSpace, and (b) FSM uses the number of pages
+        * reported as a statistic for guiding space management.  If we didn't
+        * threshold our reports the same way vacuumlazy.c does, we'd be
+        * skewing that statistic.
+        */
+       threshold = GetAvgFSMRequestSize(&onerel->rd_node);
 
        /* +1 to avoid palloc(0) */
-       pages = (BlockNumber *) palloc((nPages + 1) * sizeof(BlockNumber));
-       spaceAvail = (Size *) palloc((nPages + 1) * sizeof(Size));
+       pageSpaces = (PageFreeSpaceInfo *)
+               palloc((nPages + 1) * sizeof(PageFreeSpaceInfo));
+       outPages = 0;
 
        for (i = 0; i < nPages; i++)
        {
-               pages[i] = fraged_pages->pagedesc[i]->blkno;
-               spaceAvail[i] = fraged_pages->pagedesc[i]->free;
-
                /*
                 * fraged_pages may contain entries for pages that we later
                 * decided to truncate from the relation; don't enter them into
-                * the map!
+                * the free space map!
                 */
-               if (pages[i] >= rel_pages)
-               {
-                       nPages = i;
+               if (pagedesc[i]->blkno >= rel_pages)
                        break;
+
+               if (pagedesc[i]->free >= threshold)
+               {
+                       pageSpaces[outPages].blkno = pagedesc[i]->blkno;
+                       pageSpaces[outPages].avail = pagedesc[i]->free;
+                       outPages++;
                }
        }
 
-       MultiRecordFreeSpace(&onerel->rd_node,
-                                                0, MaxBlockNumber,
-                                                nPages, pages, spaceAvail);
-       pfree(pages);
-       pfree(spaceAvail);
+       RecordRelationFreeSpace(&onerel->rd_node, outPages, pageSpaces);
+
+       pfree(pageSpaces);
 }
 
 /* Copy a VacPage structure */
@@ -2903,7 +3012,7 @@ vac_open_indexes(Relation relation, int *nindexes, Relation **Irel)
        i = 0;
        foreach(indexoidscan, indexoidlist)
        {
-               Oid                     indexoid = lfirsti(indexoidscan);
+               Oid                     indexoid = lfirsto(indexoidscan);
 
                (*Irel)[i] = index_open(indexoid);
                i++;
@@ -2939,7 +3048,10 @@ vac_is_partial_index(Relation indrel)
                return true;
 
        /* Otherwise, look to see if there's a partial-index predicate */
-       return (VARSIZE(&indrel->rd_index->indpred) > VARHDRSZ);
+       if (!heap_attisnull(indrel->rd_indextuple, Anum_pg_index_indpred))
+               return true;
+
+       return false;
 }