]> granicus.if.org Git - postgresql/blobdiff - src/backend/commands/vacuum.c
Repair some REINDEX problems per recent discussions. The relcache is
[postgresql] / src / backend / commands / vacuum.c
index c1b17bba86b2a1ad12b43a4f7a4bdc1ee786493d..e626848f12b6bf237d19fdbeef06b4cf35696c2b 100644 (file)
@@ -8,12 +8,12 @@
  * vacuumlazy.c and analyze.c for the rest of the code for the latter two.
  *
  *
- * Portions Copyright (c) 1996-2002, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1996-2003, PostgreSQL Global Development Group
  * Portions Copyright (c) 1994, Regents of the University of California
  *
  *
  * IDENTIFICATION
- *       $Header: /cvsroot/pgsql/src/backend/commands/vacuum.c,v 1.247 2003/02/09 06:56:27 tgl Exp $
+ *       $Header: /cvsroot/pgsql/src/backend/commands/vacuum.c,v 1.260 2003/09/24 18:54:01 tgl Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -166,7 +166,7 @@ vacuum(VacuumStmt *vacstmt)
        if (vacstmt->verbose)
                elevel = INFO;
        else
-               elevel = DEBUG1;
+               elevel = DEBUG2;
 
        /*
         * We cannot run VACUUM inside a user transaction block; if we were
@@ -189,11 +189,11 @@ vacuum(VacuumStmt *vacstmt)
        /*
         * Create special memory context for cross-transaction storage.
         *
-        * Since it is a child of QueryContext, it will go away eventually even
+        * Since it is a child of PortalContext, it will go away eventually even
         * if we suffer an error; there's no need for special abort cleanup
         * logic.
         */
-       vac_context = AllocSetContextCreate(QueryContext,
+       vac_context = AllocSetContextCreate(PortalContext,
                                                                                "Vacuum",
                                                                                ALLOCSET_DEFAULT_MINSIZE,
                                                                                ALLOCSET_DEFAULT_INITSIZE,
@@ -205,7 +205,7 @@ vacuum(VacuumStmt *vacstmt)
         * lifetime.
         */
        if (vacstmt->analyze && !vacstmt->vacuum)
-               anl_context = AllocSetContextCreate(QueryContext,
+               anl_context = AllocSetContextCreate(PortalContext,
                                                                                        "Analyze",
                                                                                        ALLOCSET_DEFAULT_MINSIZE,
                                                                                        ALLOCSET_DEFAULT_INITSIZE,
@@ -275,7 +275,7 @@ vacuum(VacuumStmt *vacstmt)
                }
 
                /* matches the StartTransaction in PostgresMain() */
-               CommitTransactionCommand(true);
+               CommitTransactionCommand();
        }
 
        /*
@@ -287,24 +287,25 @@ vacuum(VacuumStmt *vacstmt)
 
                if (vacstmt->vacuum)
                {
-                       if (! vacuum_rel(relid, vacstmt, RELKIND_RELATION))
-                               all_rels = false; /* forget about updating dbstats */
+                       if (!vacuum_rel(relid, vacstmt, RELKIND_RELATION))
+                               all_rels = false;               /* forget about updating dbstats */
                }
                if (vacstmt->analyze)
                {
                        MemoryContext old_context = NULL;
 
                        /*
-                        * If we vacuumed, use new transaction for analyze.
-                        * Otherwise, we can use the outer transaction, but we still
-                        * need to call analyze_rel in a memory context that will be
-                        * cleaned up on return (else we leak memory while processing
-                        * multiple tables).
+                        * If we vacuumed, use new transaction for analyze. Otherwise,
+                        * we can use the outer transaction, but we still need to call
+                        * analyze_rel in a memory context that will be cleaned up on
+                        * return (else we leak memory while processing multiple
+                        * tables).
                         */
                        if (vacstmt->vacuum)
                        {
-                               StartTransactionCommand(true);
-                               SetQuerySnapshot();     /* might be needed for functional index */
+                               StartTransactionCommand();
+                               SetQuerySnapshot();             /* might be needed for functions
+                                                                                * in indexes */
                        }
                        else
                                old_context = MemoryContextSwitchTo(anl_context);
@@ -312,7 +313,7 @@ vacuum(VacuumStmt *vacstmt)
                        analyze_rel(relid, vacstmt);
 
                        if (vacstmt->vacuum)
-                               CommitTransactionCommand(true);
+                               CommitTransactionCommand();
                        else
                        {
                                MemoryContextSwitchTo(old_context);
@@ -330,11 +331,16 @@ vacuum(VacuumStmt *vacstmt)
 
                /*
                 * This matches the CommitTransaction waiting for us in
-                * PostgresMain(). We tell xact.c not to chain the upcoming
-                * commit, so that a VACUUM doesn't start a transaction block,
-                * even when autocommit is off.
+                * PostgresMain().
                 */
-               StartTransactionCommand(true);
+               StartTransactionCommand();
+
+               /*
+                * If it was a database-wide VACUUM, print FSM usage statistics
+                * (we don't make you be superuser to see these).
+                */
+               if (vacstmt->relation == NULL)
+                       PrintFreeSpaceMapStatistics(elevel);
 
                /*
                 * If we completed a database-wide VACUUM without skipping any
@@ -456,7 +462,9 @@ vacuum_set_xid_limits(VacuumStmt *vacstmt, bool sharedRel,
         */
        if (TransactionIdFollows(limit, *oldestXmin))
        {
-               elog(WARNING, "oldest Xmin is far in the past --- close open transactions soon to avoid wraparound problems");
+               ereport(WARNING,
+                               (errmsg("oldest Xmin is far in the past"),
+                                errhint("Close open transactions soon to avoid wraparound problems.")));
                limit = *oldestXmin;
        }
 
@@ -578,7 +586,7 @@ vac_update_dbstats(Oid dbid,
        tuple = heap_getnext(scan, ForwardScanDirection);
 
        if (!HeapTupleIsValid(tuple))
-               elog(ERROR, "database %u does not exist", dbid);
+               elog(ERROR, "could not find tuple for database %u", dbid);
 
        dbform = (Form_pg_database) GETSTRUCT(tuple);
 
@@ -662,8 +670,9 @@ vac_truncate_clog(TransactionId vacuumXID, TransactionId frozenXID)
         */
        if (vacuumAlreadyWrapped)
        {
-               elog(WARNING, "Some databases have not been vacuumed in over 2 billion transactions."
-                        "\n\tYou may have already suffered transaction-wraparound data loss.");
+               ereport(WARNING,
+                               (errmsg("some databases have not been vacuumed in over 2 billion transactions"),
+                                errdetail("You may have already suffered transaction-wraparound data loss.")));
                return;
        }
 
@@ -673,17 +682,20 @@ vac_truncate_clog(TransactionId vacuumXID, TransactionId frozenXID)
        /* Give warning about impending wraparound problems */
        if (frozenAlreadyWrapped)
        {
-               elog(WARNING, "Some databases have not been vacuumed in over 1 billion transactions."
-                        "\n\tBetter vacuum them soon, or you may have a wraparound failure.");
+               ereport(WARNING,
+                               (errmsg("some databases have not been vacuumed in over 1 billion transactions"),
+                                errhint("Better vacuum them soon, or you may have a wraparound failure.")));
        }
        else
        {
                age = (int32) (myXID - frozenXID);
                if (age > (int32) ((MaxTransactionId >> 3) * 3))
-                       elog(WARNING, "Some databases have not been vacuumed in %d transactions."
-                                "\n\tBetter vacuum them within %d transactions,"
-                                "\n\tor you may have a wraparound failure.",
-                                age, (int32) (MaxTransactionId >> 1) - age);
+                       ereport(WARNING,
+                                       (errmsg("some databases have not been vacuumed in %d transactions",
+                                                       age),
+                                        errhint("Better vacuum them within %d transactions, "
+                                                        "or you may have a wraparound failure.",
+                                                        (int32) (MaxTransactionId >> 1) - age)));
        }
 }
 
@@ -722,8 +734,9 @@ vacuum_rel(Oid relid, VacuumStmt *vacstmt, char expected_relkind)
        bool            result;
 
        /* Begin a transaction for vacuuming this relation */
-       StartTransactionCommand(true);
-       SetQuerySnapshot();                     /* might be needed for functional index */
+       StartTransactionCommand();
+       SetQuerySnapshot();                     /* might be needed for functions in
+                                                                * indexes */
 
        /*
         * Check for user-requested abort.      Note we want this to be inside a
@@ -739,7 +752,7 @@ vacuum_rel(Oid relid, VacuumStmt *vacstmt, char expected_relkind)
                                                          ObjectIdGetDatum(relid),
                                                          0, 0, 0))
        {
-               CommitTransactionCommand(true);
+               CommitTransactionCommand();
                return true;                    /* okay 'cause no data there */
        }
 
@@ -766,12 +779,13 @@ vacuum_rel(Oid relid, VacuumStmt *vacstmt, char expected_relkind)
        onerel = relation_open(relid, lmode);
 
        if (!(pg_class_ownercheck(RelationGetRelid(onerel), GetUserId()) ||
-                 (is_dbadmin(MyDatabaseId) && !onerel->rd_rel->relisshared)))
+                 (pg_database_ownercheck(MyDatabaseId, GetUserId()) && !onerel->rd_rel->relisshared)))
        {
-               elog(WARNING, "Skipping \"%s\" --- only table or database owner can VACUUM it",
-                        RelationGetRelationName(onerel));
+               ereport(WARNING,
+                               (errmsg("skipping \"%s\" --- only table or database owner can VACUUM it",
+                                               RelationGetRelationName(onerel))));
                relation_close(onerel, lmode);
-               CommitTransactionCommand(true);
+               CommitTransactionCommand();
                return false;
        }
 
@@ -781,10 +795,11 @@ vacuum_rel(Oid relid, VacuumStmt *vacstmt, char expected_relkind)
         */
        if (onerel->rd_rel->relkind != expected_relkind)
        {
-               elog(WARNING, "Skipping \"%s\" --- can not process indexes, views or special system tables",
-                        RelationGetRelationName(onerel));
+               ereport(WARNING,
+                               (errmsg("skipping \"%s\" --- cannot VACUUM indexes, views or special system tables",
+                                               RelationGetRelationName(onerel))));
                relation_close(onerel, lmode);
-               CommitTransactionCommand(true);
+               CommitTransactionCommand();
                return false;
        }
 
@@ -798,8 +813,9 @@ vacuum_rel(Oid relid, VacuumStmt *vacstmt, char expected_relkind)
        if (isOtherTempNamespace(RelationGetNamespace(onerel)))
        {
                relation_close(onerel, lmode);
-               CommitTransactionCommand(true);
-               return true;                    /* assume no long-lived data in temp tables */
+               CommitTransactionCommand();
+               return true;                    /* assume no long-lived data in temp
+                                                                * tables */
        }
 
        /*
@@ -836,7 +852,7 @@ vacuum_rel(Oid relid, VacuumStmt *vacstmt, char expected_relkind)
        /*
         * Complete the transaction and free all temporary memory used.
         */
-       CommitTransactionCommand(true);
+       CommitTransactionCommand();
 
        /*
         * If the relation has a secondary toast rel, vacuum that too while we
@@ -847,7 +863,7 @@ vacuum_rel(Oid relid, VacuumStmt *vacstmt, char expected_relkind)
         */
        if (toast_relid != InvalidOid)
        {
-               if (! vacuum_rel(toast_relid, vacstmt, RELKIND_TOASTVALUE))
+               if (!vacuum_rel(toast_relid, vacstmt, RELKIND_TOASTVALUE))
                        result = false;         /* failed to vacuum the TOAST table? */
        }
 
@@ -888,11 +904,6 @@ full_vacuum_rel(Relation onerel, VacuumStmt *vacstmt)
        int                     nindexes,
                                i;
        VRelStats  *vacrelstats;
-       bool            reindex = false;
-
-       if (IsIgnoringSystemIndexes() &&
-               IsSystemRelation(onerel))
-               reindex = true;
 
        vacuum_set_xid_limits(vacstmt, onerel->rd_rel->relisshared,
                                                  &OldestXmin, &FreezeLimit);
@@ -911,27 +922,9 @@ full_vacuum_rel(Relation onerel, VacuumStmt *vacstmt)
 
        /* Now open all indexes of the relation */
        vac_open_indexes(onerel, &nindexes, &Irel);
-       if (!Irel)
-               reindex = false;
-       else if (!RelationGetForm(onerel)->relhasindex)
-               reindex = true;
        if (nindexes > 0)
                vacrelstats->hasindex = true;
 
-#ifdef NOT_USED
-
-       /*
-        * reindex in VACUUM is dangerous under WAL. ifdef out until it
-        * becomes safe.
-        */
-       if (reindex)
-       {
-               vac_close_indexes(nindexes, Irel);
-               Irel = (Relation *) NULL;
-               activate_indexes_of_a_table(onerel, false);
-       }
-#endif   /* NOT_USED */
-
        /* Clean/scan index relation(s) */
        if (Irel != (Relation *) NULL)
        {
@@ -974,16 +967,10 @@ full_vacuum_rel(Relation onerel, VacuumStmt *vacstmt)
                         */
                        i = FlushRelationBuffers(onerel, vacrelstats->rel_pages);
                        if (i < 0)
-                               elog(ERROR, "VACUUM (full_vacuum_rel): FlushRelationBuffers returned %d",
-                                        i);
+                               elog(ERROR, "FlushRelationBuffers returned %d", i);
                }
        }
 
-#ifdef NOT_USED
-       if (reindex)
-               activate_indexes_of_a_table(onerel, true);
-#endif   /* NOT_USED */
-
        /* update shared free space map with final free space info */
        vac_update_fsm(onerel, &fraged_pages, vacrelstats->rel_pages);
 
@@ -1020,15 +1007,13 @@ scan_heap(VRelStats *vacrelstats, Relation onerel,
        VacPage         vacpage,
                                vacpagecopy;
        BlockNumber empty_pages,
-                               new_pages,
-                               changed_pages,
                                empty_end_pages;
        double          num_tuples,
                                tups_vacuumed,
                                nkeep,
                                nunused;
-       double          free_size,
-                               usable_free_size;
+       double          free_space,
+                               usable_free_space;
        Size            min_tlen = MaxTupleSize;
        Size            max_tlen = 0;
        int                     i;
@@ -1041,13 +1026,14 @@ scan_heap(VRelStats *vacrelstats, Relation onerel,
        vac_init_rusage(&ru0);
 
        relname = RelationGetRelationName(onerel);
-       elog(elevel, "--Relation %s.%s--",
-                get_namespace_name(RelationGetNamespace(onerel)),
-                relname);
+       ereport(elevel,
+                       (errmsg("vacuuming \"%s.%s\"",
+                                       get_namespace_name(RelationGetNamespace(onerel)),
+                                       relname)));
 
-       empty_pages = new_pages = changed_pages = empty_end_pages = 0;
+       empty_pages = empty_end_pages = 0;
        num_tuples = tups_vacuumed = nkeep = nunused = 0;
-       free_size = 0;
+       free_space = 0;
 
        nblocks = RelationGetNumberOfBlocks(onerel);
 
@@ -1075,12 +1061,13 @@ scan_heap(VRelStats *vacrelstats, Relation onerel,
 
                if (PageIsNew(page))
                {
-                       elog(WARNING, "Rel %s: Uninitialized page %u - fixing",
-                                relname, blkno);
+                       ereport(WARNING,
+                       (errmsg("relation \"%s\" page %u is uninitialized --- fixing",
+                                       relname, blkno)));
                        PageInit(page, BufferGetPageSize(buf), 0);
                        vacpage->free = ((PageHeader) page)->pd_upper - ((PageHeader) page)->pd_lower;
-                       free_size += vacpage->free;
-                       new_pages++;
+                       free_space += vacpage->free;
+                       empty_pages++;
                        empty_end_pages++;
                        vacpagecopy = copy_vac_page(vacpage);
                        vpage_insert(vacuum_pages, vacpagecopy);
@@ -1092,7 +1079,7 @@ scan_heap(VRelStats *vacrelstats, Relation onerel,
                if (PageIsEmpty(page))
                {
                        vacpage->free = ((PageHeader) page)->pd_upper - ((PageHeader) page)->pd_lower;
-                       free_size += vacpage->free;
+                       free_space += vacpage->free;
                        empty_pages++;
                        empty_end_pages++;
                        vacpagecopy = copy_vac_page(vacpage);
@@ -1188,9 +1175,12 @@ scan_heap(VRelStats *vacrelstats, Relation onerel,
                                        /*
                                         * This should not happen, since we hold exclusive
                                         * lock on the relation; shouldn't we raise an error?
+                                        * (Actually, it can happen in system catalogs, since
+                                        * we tend to release write lock before commit there.)
                                         */
-                                       elog(WARNING, "Rel %s: TID %u/%u: InsertTransactionInProgress %u - can't shrink relation",
-                                                relname, blkno, offnum, HeapTupleHeaderGetXmin(tuple.t_data));
+                                       ereport(NOTICE,
+                                                       (errmsg("relation \"%s\" TID %u/%u: InsertTransactionInProgress %u --- can't shrink relation",
+                                                                       relname, blkno, offnum, HeapTupleHeaderGetXmin(tuple.t_data))));
                                        do_shrinking = false;
                                        break;
                                case HEAPTUPLE_DELETE_IN_PROGRESS:
@@ -1198,13 +1188,16 @@ scan_heap(VRelStats *vacrelstats, Relation onerel,
                                        /*
                                         * This should not happen, since we hold exclusive
                                         * lock on the relation; shouldn't we raise an error?
+                                        * (Actually, it can happen in system catalogs, since
+                                        * we tend to release write lock before commit there.)
                                         */
-                                       elog(WARNING, "Rel %s: TID %u/%u: DeleteTransactionInProgress %u - can't shrink relation",
-                                                relname, blkno, offnum, HeapTupleHeaderGetXmax(tuple.t_data));
+                                       ereport(NOTICE,
+                                                       (errmsg("relation \"%s\" TID %u/%u: DeleteTransactionInProgress %u --- can't shrink relation",
+                                                                       relname, blkno, offnum, HeapTupleHeaderGetXmax(tuple.t_data))));
                                        do_shrinking = false;
                                        break;
                                default:
-                                       elog(ERROR, "Unexpected HeapTupleSatisfiesVacuum result");
+                                       elog(ERROR, "unexpected HeapTupleSatisfiesVacuum result");
                                        break;
                        }
 
@@ -1217,8 +1210,8 @@ scan_heap(VRelStats *vacrelstats, Relation onerel,
                         */
                        if (onerel->rd_rel->relhasoids &&
                                !OidIsValid(HeapTupleGetOid(&tuple)))
-                               elog(WARNING, "Rel %s: TID %u/%u: OID IS INVALID. TUPGONE %d.",
-                                        relname, blkno, offnum, (int) tupgone);
+                               elog(WARNING, "relation \"%s\" TID %u/%u: OID is invalid",
+                                        relname, blkno, offnum);
 
                        if (tupgone)
                        {
@@ -1275,7 +1268,7 @@ scan_heap(VRelStats *vacrelstats, Relation onerel,
                        do_reap = (vacpage->offsets_free > 0);
                }
 
-               free_size += vacpage->free;
+               free_space += vacpage->free;
 
                /*
                 * Add the page to fraged_pages if it has a useful amount of free
@@ -1294,16 +1287,21 @@ scan_heap(VRelStats *vacrelstats, Relation onerel,
                                vpage_insert(fraged_pages, vacpagecopy);
                }
 
+               /*
+                * Include the page in empty_end_pages if it will be empty after
+                * vacuuming; this is to keep us from using it as a move
+                * destination.
+                */
                if (notup)
+               {
+                       empty_pages++;
                        empty_end_pages++;
+               }
                else
                        empty_end_pages = 0;
 
                if (pgchanged)
-               {
                        WriteBuffer(buf);
-                       changed_pages++;
-               }
                else
                        ReleaseBuffer(buf);
        }
@@ -1330,14 +1328,14 @@ scan_heap(VRelStats *vacrelstats, Relation onerel,
        {
                Assert((BlockNumber) fraged_pages->num_pages >= empty_end_pages);
                fraged_pages->num_pages -= empty_end_pages;
-               usable_free_size = 0;
+               usable_free_space = 0;
                for (i = 0; i < fraged_pages->num_pages; i++)
-                       usable_free_size += fraged_pages->pagedesc[i]->free;
+                       usable_free_space += fraged_pages->pagedesc[i]->free;
        }
        else
        {
                fraged_pages->num_pages = 0;
-               usable_free_size = 0;
+               usable_free_space = 0;
        }
 
        /* don't bother to save vtlinks if we will not call repair_frag */
@@ -1355,17 +1353,24 @@ scan_heap(VRelStats *vacrelstats, Relation onerel,
                pfree(vtlinks);
        }
 
-       elog(elevel, "Pages %u: Changed %u, reaped %u, Empty %u, New %u; "
-                "Tup %.0f: Vac %.0f, Keep/VTL %.0f/%u, UnUsed %.0f, MinLen %lu, "
-                "MaxLen %lu; Re-using: Free/Avail. Space %.0f/%.0f; "
-                "EndEmpty/Avail. Pages %u/%u.\n\t%s",
-                nblocks, changed_pages, vacuum_pages->num_pages, empty_pages,
-                new_pages, num_tuples, tups_vacuumed,
-                nkeep, vacrelstats->num_vtlinks,
-                nunused, (unsigned long) min_tlen, (unsigned long) max_tlen,
-                free_size, usable_free_size,
-                empty_end_pages, fraged_pages->num_pages,
-                vac_show_rusage(&ru0));
+       ereport(elevel,
+                       (errmsg("\"%s\": found %.0f removable, %.0f nonremovable tuples in %u pages",
+                                       RelationGetRelationName(onerel),
+                                       tups_vacuumed, num_tuples, nblocks),
+                        errdetail("%.0f dead tuples cannot be removed yet.\n"
+                               "Nonremovable tuples range from %lu to %lu bytes long.\n"
+                                          "There were %.0f unused item pointers.\n"
+                "Total free space (including removable tuples) is %.0f bytes.\n"
+                                          "%u pages are or will become empty, including %u at the end of the table.\n"
+                                          "%u pages containing %.0f free bytes are potential move destinations.\n"
+                                          "%s",
+                                          nkeep,
+                                          (unsigned long) min_tlen, (unsigned long) max_tlen,
+                                          nunused,
+                                          free_space,
+                                          empty_pages, empty_end_pages,
+                                          fraged_pages->num_pages, usable_free_space,
+                                          vac_show_rusage(&ru0))));
 }
 
 
@@ -1589,7 +1594,7 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
                                if (tuple.t_data->t_infomask & HEAP_MOVED_OFF)
                                {
                                        if (HeapTupleHeaderGetXvac(tuple.t_data) != myXID)
-                                               elog(ERROR, "Invalid XVAC in tuple header");
+                                               elog(ERROR, "invalid XVAC in tuple header");
                                        if (keep_tuples == 0)
                                                continue;
                                        if (chain_tuple_moved)          /* some chains was moved
@@ -1668,7 +1673,7 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
                                /* Quick exit if we have no vtlinks to search in */
                                if (vacrelstats->vtlinks == NULL)
                                {
-                                       elog(DEBUG1, "Parent item in update-chain not found - can't continue repair_frag");
+                                       elog(DEBUG2, "parent item in update-chain not found --- can't continue repair_frag");
                                        break;          /* out of walk-along-page loop */
                                }
 
@@ -1705,7 +1710,7 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
                                                 * in scan_heap(), but it's not implemented at the
                                                 * moment and so we just stop shrinking here.
                                                 */
-                                               elog(DEBUG1, "Child itemid in update-chain marked as unused - can't continue repair_frag");
+                                               elog(DEBUG2, "child itemid in update-chain marked as unused --- can't continue repair_frag");
                                                chain_move_failed = true;
                                                break;  /* out of loop to move to chain end */
                                        }
@@ -1790,7 +1795,7 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
                                        if (vtlp == NULL)
                                        {
                                                /* see discussion above */
-                                               elog(DEBUG1, "Parent item in update-chain not found - can't continue repair_frag");
+                                               elog(DEBUG2, "parent item in update-chain not found --- can't continue repair_frag");
                                                chain_move_failed = true;
                                                break;  /* out of check-all-items loop */
                                        }
@@ -1802,7 +1807,7 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
                                                           ItemPointerGetOffsetNumber(&(tp.t_self)));
                                        /* this can't happen since we saw tuple earlier: */
                                        if (!ItemIdIsUsed(Pitemid))
-                                               elog(ERROR, "Parent itemid marked as unused");
+                                               elog(ERROR, "parent itemid marked as unused");
                                        Ptp.t_datamcxt = NULL;
                                        Ptp.t_data = (HeapTupleHeader) PageGetItem(Ppage, Pitemid);
 
@@ -1826,7 +1831,7 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
                                                                         HeapTupleHeaderGetXmin(tp.t_data))))
                                        {
                                                ReleaseBuffer(Pbuf);
-                                               elog(DEBUG1, "Too old parent tuple found - can't continue repair_frag");
+                                               elog(DEBUG2, "too old parent tuple found --- can't continue repair_frag");
                                                chain_move_failed = true;
                                                break;  /* out of check-all-items loop */
                                        }
@@ -1899,7 +1904,7 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
                                         */
                                        CacheInvalidateHeapTuple(onerel, &tuple);
 
-                                       /* NO ELOG(ERROR) TILL CHANGES ARE LOGGED */
+                                       /* NO EREPORT(ERROR) TILL CHANGES ARE LOGGED */
                                        START_CRIT_SECTION();
 
                                        tuple.t_data->t_infomask &= ~(HEAP_XMIN_COMMITTED |
@@ -1955,7 +1960,7 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
                                                                                 LP_USED);
                                        if (newoff == InvalidOffsetNumber)
                                        {
-                                               elog(PANIC, "moving chain: failed to add item with len = %lu to page %u",
+                                               elog(PANIC, "failed to add item with len = %lu to page %u while moving tuple chain",
                                                  (unsigned long) tuple_len, destvacpage->blkno);
                                        }
                                        newitemid = PageGetItemId(ToPage, newoff);
@@ -2082,7 +2087,7 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
                         */
                        CacheInvalidateHeapTuple(onerel, &tuple);
 
-                       /* NO ELOG(ERROR) TILL CHANGES ARE LOGGED */
+                       /* NO EREPORT(ERROR) TILL CHANGES ARE LOGGED */
                        START_CRIT_SECTION();
 
                        /*
@@ -2188,11 +2193,11 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
                                if (tuple.t_data->t_infomask & HEAP_XMIN_COMMITTED)
                                        continue;
                                if (tuple.t_data->t_infomask & HEAP_MOVED_IN)
-                                       elog(ERROR, "HEAP_MOVED_IN was not expected (2)");
+                                       elog(ERROR, "HEAP_MOVED_IN was not expected");
                                if (tuple.t_data->t_infomask & HEAP_MOVED_OFF)
                                {
                                        if (HeapTupleHeaderGetXvac(tuple.t_data) != myXID)
-                                               elog(ERROR, "Invalid XVAC in tuple header (4)");
+                                               elog(ERROR, "invalid XVAC in tuple header");
                                        /* some chains was moved while */
                                        if (chain_tuple_moved)
                                        {                       /* cleaning this page */
@@ -2217,7 +2222,7 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
                                        }
                                }
                                else
-                                       elog(ERROR, "HEAP_MOVED_OFF was expected (2)");
+                                       elog(ERROR, "HEAP_MOVED_OFF was expected");
                        }
                }
 
@@ -2330,7 +2335,7 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
                                if (!(tuple.t_data->t_infomask & HEAP_MOVED))
                                        elog(ERROR, "HEAP_MOVED_OFF/HEAP_MOVED_IN was expected");
                                if (HeapTupleHeaderGetXvac(tuple.t_data) != myXID)
-                                       elog(ERROR, "Invalid XVAC in tuple header (2)");
+                                       elog(ERROR, "invalid XVAC in tuple header");
                                if (tuple.t_data->t_infomask & HEAP_MOVED_IN)
                                {
                                        tuple.t_data->t_infomask |= HEAP_XMIN_COMMITTED;
@@ -2348,10 +2353,18 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
        }
        Assert(num_moved == checked_moved);
 
-       elog(elevel, "Rel %s: Pages: %u --> %u; Tuple(s) moved: %u.\n\t%s",
-                RelationGetRelationName(onerel),
-                nblocks, blkno, num_moved,
-                vac_show_rusage(&ru0));
+       /*
+        * It'd be cleaner to make this report at the bottom of this routine,
+        * but then the rusage would double-count the second pass of index
+        * vacuuming.  So do it here and ignore the relatively small amount of
+        * processing that occurs below.
+        */
+       ereport(elevel,
+                       (errmsg("\"%s\": moved %u tuples, truncated %u to %u pages",
+                                       RelationGetRelationName(onerel),
+                                       num_moved, nblocks, blkno),
+                        errdetail("%s",
+                                          vac_show_rusage(&ru0))));
 
        /*
         * Reflect the motion of system tuples to catalog cache here.
@@ -2386,8 +2399,7 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
                if (vacpage->blkno == (blkno - 1) &&
                        vacpage->offsets_free > 0)
                {
-                       OffsetNumber unbuf[BLCKSZ / sizeof(OffsetNumber)];
-                       OffsetNumber *unused = unbuf;
+                       OffsetNumber unused[BLCKSZ / sizeof(OffsetNumber)];
                        int                     uncnt;
 
                        buf = ReadBuffer(onerel, vacpage->blkno);
@@ -2410,12 +2422,12 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
                                        if (tuple.t_data->t_infomask & HEAP_MOVED_OFF)
                                        {
                                                if (HeapTupleHeaderGetXvac(tuple.t_data) != myXID)
-                                                       elog(ERROR, "Invalid XVAC in tuple header (3)");
+                                                       elog(ERROR, "invalid XVAC in tuple header");
                                                itemid->lp_flags &= ~LP_USED;
                                                num_tuples++;
                                        }
                                        else
-                                               elog(ERROR, "HEAP_MOVED_OFF was expected (3)");
+                                               elog(ERROR, "HEAP_MOVED_OFF was expected");
                                }
 
                        }
@@ -2430,8 +2442,7 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
                        {
                                XLogRecPtr      recptr;
 
-                               recptr = log_heap_clean(onerel, buf, (char *) unused,
-                                                 (char *) (&(unused[uncnt])) - (char *) unused);
+                               recptr = log_heap_clean(onerel, buf, unused, uncnt);
                                PageSetLSN(page, recptr);
                                PageSetSUI(page, ThisStartUpID);
                        }
@@ -2465,8 +2476,7 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
         */
        i = FlushRelationBuffers(onerel, blkno);
        if (i < 0)
-               elog(ERROR, "VACUUM (repair_frag): FlushRelationBuffers returned %d",
-                        i);
+               elog(ERROR, "FlushRelationBuffers returned %d", i);
 
        /* truncate relation, if needed */
        if (blkno < nblocks)
@@ -2531,15 +2541,15 @@ vacuum_heap(VRelStats *vacrelstats, Relation onerel, VacPageList vacuum_pages)
 
        i = FlushRelationBuffers(onerel, relblocks);
        if (i < 0)
-               elog(ERROR, "VACUUM (vacuum_heap): FlushRelationBuffers returned %d",
-                        i);
+               elog(ERROR, "FlushRelationBuffers returned %d", i);
 
        /* truncate relation if there are some empty end-pages */
        if (vacuum_pages->empty_end_pages > 0)
        {
-               elog(elevel, "Rel %s: Pages: %u --> %u.",
-                        RelationGetRelationName(onerel),
-                        vacrelstats->rel_pages, relblocks);
+               ereport(elevel,
+                               (errmsg("\"%s\": truncated %u to %u pages",
+                                               RelationGetRelationName(onerel),
+                                               vacrelstats->rel_pages, relblocks)));
                relblocks = smgrtruncate(DEFAULT_SMGR, onerel, relblocks);
                onerel->rd_nblocks = relblocks; /* update relcache immediately */
                onerel->rd_targblock = InvalidBlockNumber;
@@ -2555,8 +2565,7 @@ vacuum_heap(VRelStats *vacrelstats, Relation onerel, VacPageList vacuum_pages)
 static void
 vacuum_page(Relation onerel, Buffer buffer, VacPage vacpage)
 {
-       OffsetNumber unbuf[BLCKSZ / sizeof(OffsetNumber)];
-       OffsetNumber *unused = unbuf;
+       OffsetNumber unused[BLCKSZ / sizeof(OffsetNumber)];
        int                     uncnt;
        Page            page = BufferGetPage(buffer);
        ItemId          itemid;
@@ -2580,8 +2589,7 @@ vacuum_page(Relation onerel, Buffer buffer, VacPage vacpage)
        {
                XLogRecPtr      recptr;
 
-               recptr = log_heap_clean(onerel, buffer, (char *) unused,
-                                                 (char *) (&(unused[uncnt])) - (char *) unused);
+               recptr = log_heap_clean(onerel, buffer, unused, uncnt);
                PageSetLSN(page, recptr);
                PageSetSUI(page, ThisStartUpID);
        }
@@ -2603,17 +2611,25 @@ static void
 scan_index(Relation indrel, double num_tuples)
 {
        IndexBulkDeleteResult *stats;
+       IndexVacuumCleanupInfo vcinfo;
        VacRUsage       ru0;
 
        vac_init_rusage(&ru0);
 
        /*
-        * Even though we're not planning to delete anything, use the
-        * ambulkdelete call, so that the scan happens within the index AM for
-        * more speed.
+        * Even though we're not planning to delete anything, we use the
+        * ambulkdelete call, because (a) the scan happens within the index AM
+        * for more speed, and (b) it may want to pass private statistics to
+        * the amvacuumcleanup call.
         */
        stats = index_bulk_delete(indrel, dummy_tid_reaped, NULL);
 
+       /* Do post-VACUUM cleanup, even though we deleted nothing */
+       vcinfo.vacuum_full = true;
+       vcinfo.message_level = elevel;
+
+       stats = index_vacuum_cleanup(indrel, &vcinfo, stats);
+
        if (!stats)
                return;
 
@@ -2622,10 +2638,15 @@ scan_index(Relation indrel, double num_tuples)
                                                stats->num_pages, stats->num_index_tuples,
                                                false);
 
-       elog(elevel, "Index %s: Pages %u; Tuples %.0f.\n\t%s",
-                RelationGetRelationName(indrel),
-                stats->num_pages, stats->num_index_tuples,
-                vac_show_rusage(&ru0));
+       ereport(elevel,
+                       (errmsg("index \"%s\" now contains %.0f tuples in %u pages",
+                                       RelationGetRelationName(indrel),
+                                       stats->num_index_tuples,
+                                       stats->num_pages),
+                        errdetail("%u index pages have been deleted, %u are currently reusable.\n"
+                                          "%s",
+                                          stats->pages_deleted, stats->pages_free,
+                                          vac_show_rusage(&ru0))));
 
        /*
         * Check for tuple count mismatch.      If the index is partial, then it's
@@ -2635,10 +2656,11 @@ scan_index(Relation indrel, double num_tuples)
        {
                if (stats->num_index_tuples > num_tuples ||
                        !vac_is_partial_index(indrel))
-                       elog(WARNING, "Index %s: NUMBER OF INDEX' TUPLES (%.0f) IS NOT THE SAME AS HEAP' (%.0f)."
-                                "\n\tRecreate the index.",
-                                RelationGetRelationName(indrel),
-                                stats->num_index_tuples, num_tuples);
+                       ereport(WARNING,
+                                       (errmsg("index \"%s\" contains %.0f tuples, but table contains %.0f tuples",
+                                                       RelationGetRelationName(indrel),
+                                                       stats->num_index_tuples, num_tuples),
+                                        errhint("Rebuild the index with REINDEX.")));
        }
 
        pfree(stats);
@@ -2661,6 +2683,7 @@ vacuum_index(VacPageList vacpagelist, Relation indrel,
                         double num_tuples, int keep_tuples)
 {
        IndexBulkDeleteResult *stats;
+       IndexVacuumCleanupInfo vcinfo;
        VacRUsage       ru0;
 
        vac_init_rusage(&ru0);
@@ -2668,6 +2691,12 @@ vacuum_index(VacPageList vacpagelist, Relation indrel,
        /* Do bulk deletion */
        stats = index_bulk_delete(indrel, tid_reaped, (void *) vacpagelist);
 
+       /* Do post-VACUUM cleanup */
+       vcinfo.vacuum_full = true;
+       vcinfo.message_level = elevel;
+
+       stats = index_vacuum_cleanup(indrel, &vcinfo, stats);
+
        if (!stats)
                return;
 
@@ -2676,10 +2705,17 @@ vacuum_index(VacPageList vacpagelist, Relation indrel,
                                                stats->num_pages, stats->num_index_tuples,
                                                false);
 
-       elog(elevel, "Index %s: Pages %u; Tuples %.0f: Deleted %.0f.\n\t%s",
-                RelationGetRelationName(indrel), stats->num_pages,
-                stats->num_index_tuples - keep_tuples, stats->tuples_removed,
-                vac_show_rusage(&ru0));
+       ereport(elevel,
+                       (errmsg("index \"%s\" now contains %.0f tuples in %u pages",
+                                       RelationGetRelationName(indrel),
+                                       stats->num_index_tuples,
+                                       stats->num_pages),
+                        errdetail("%.0f index tuples were removed.\n"
+                "%u index pages have been deleted, %u are currently reusable.\n"
+                                          "%s",
+                                          stats->tuples_removed,
+                                          stats->pages_deleted, stats->pages_free,
+                                          vac_show_rusage(&ru0))));
 
        /*
         * Check for tuple count mismatch.      If the index is partial, then it's
@@ -2689,10 +2725,11 @@ vacuum_index(VacPageList vacpagelist, Relation indrel,
        {
                if (stats->num_index_tuples > num_tuples + keep_tuples ||
                        !vac_is_partial_index(indrel))
-                       elog(WARNING, "Index %s: NUMBER OF INDEX' TUPLES (%.0f) IS NOT THE SAME AS HEAP' (%.0f)."
-                                "\n\tRecreate the index.",
-                                RelationGetRelationName(indrel),
-                                stats->num_index_tuples, num_tuples);
+                       ereport(WARNING,
+                                       (errmsg("index \"%s\" contains %.0f tuples, but table contains %.0f tuples",
+                                                       RelationGetRelationName(indrel),
+                                         stats->num_index_tuples, num_tuples + keep_tuples),
+                                        errhint("Rebuild the index with REINDEX.")));
        }
 
        pfree(stats);
@@ -2768,31 +2805,49 @@ vac_update_fsm(Relation onerel, VacPageList fraged_pages,
                           BlockNumber rel_pages)
 {
        int                     nPages = fraged_pages->num_pages;
-       int                     i;
+       VacPage    *pagedesc = fraged_pages->pagedesc;
+       Size            threshold;
        PageFreeSpaceInfo *pageSpaces;
+       int                     outPages;
+       int                     i;
+
+       /*
+        * We only report pages with free space at least equal to the average
+        * request size --- this avoids cluttering FSM with uselessly-small
+        * bits of space.  Although FSM would discard pages with little free
+        * space anyway, it's important to do this prefiltering because (a) it
+        * reduces the time spent holding the FSM lock in
+        * RecordRelationFreeSpace, and (b) FSM uses the number of pages
+        * reported as a statistic for guiding space management.  If we didn't
+        * threshold our reports the same way vacuumlazy.c does, we'd be
+        * skewing that statistic.
+        */
+       threshold = GetAvgFSMRequestSize(&onerel->rd_node);
 
        /* +1 to avoid palloc(0) */
        pageSpaces = (PageFreeSpaceInfo *)
                palloc((nPages + 1) * sizeof(PageFreeSpaceInfo));
+       outPages = 0;
 
        for (i = 0; i < nPages; i++)
        {
-               pageSpaces[i].blkno = fraged_pages->pagedesc[i]->blkno;
-               pageSpaces[i].avail = fraged_pages->pagedesc[i]->free;
-
                /*
                 * fraged_pages may contain entries for pages that we later
                 * decided to truncate from the relation; don't enter them into
                 * the free space map!
                 */
-               if (pageSpaces[i].blkno >= rel_pages)
-               {
-                       nPages = i;
+               if (pagedesc[i]->blkno >= rel_pages)
                        break;
+
+               if (pagedesc[i]->free >= threshold)
+               {
+                       pageSpaces[outPages].blkno = pagedesc[i]->blkno;
+                       pageSpaces[outPages].avail = pagedesc[i]->free;
+                       outPages++;
                }
        }
 
-       MultiRecordFreeSpace(&onerel->rd_node, 0, nPages, pageSpaces);
+       RecordRelationFreeSpace(&onerel->rd_node, outPages, pageSpaces);
 
        pfree(pageSpaces);
 }
@@ -2993,7 +3048,10 @@ vac_is_partial_index(Relation indrel)
                return true;
 
        /* Otherwise, look to see if there's a partial-index predicate */
-       return (VARSIZE(&indrel->rd_index->indpred) > VARHDRSZ);
+       if (!heap_attisnull(indrel->rd_indextuple, Anum_pg_index_indpred))
+               return true;
+
+       return false;
 }