* vacuumlazy.c and analyze.c for the rest of the code for the latter two.
*
*
- * Portions Copyright (c) 1996-2002, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1996-2003, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/vacuum.c,v 1.251 2003/03/04 21:51:20 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/commands/vacuum.c,v 1.260 2003/09/24 18:54:01 tgl Exp $
*
*-------------------------------------------------------------------------
*/
if (vacstmt->verbose)
elevel = INFO;
else
- elevel = DEBUG1;
+ elevel = DEBUG2;
/*
* We cannot run VACUUM inside a user transaction block; if we were
/*
* Create special memory context for cross-transaction storage.
*
- * Since it is a child of QueryContext, it will go away eventually even
+ * Since it is a child of PortalContext, it will go away eventually even
* if we suffer an error; there's no need for special abort cleanup
* logic.
*/
- vac_context = AllocSetContextCreate(QueryContext,
+ vac_context = AllocSetContextCreate(PortalContext,
"Vacuum",
ALLOCSET_DEFAULT_MINSIZE,
ALLOCSET_DEFAULT_INITSIZE,
* lifetime.
*/
if (vacstmt->analyze && !vacstmt->vacuum)
- anl_context = AllocSetContextCreate(QueryContext,
+ anl_context = AllocSetContextCreate(PortalContext,
"Analyze",
ALLOCSET_DEFAULT_MINSIZE,
ALLOCSET_DEFAULT_INITSIZE,
}
/* matches the StartTransaction in PostgresMain() */
- CommitTransactionCommand(true);
+ CommitTransactionCommand();
}
/*
if (vacstmt->vacuum)
{
- if (! vacuum_rel(relid, vacstmt, RELKIND_RELATION))
- all_rels = false; /* forget about updating dbstats */
+ if (!vacuum_rel(relid, vacstmt, RELKIND_RELATION))
+ all_rels = false; /* forget about updating dbstats */
}
if (vacstmt->analyze)
{
MemoryContext old_context = NULL;
/*
- * If we vacuumed, use new transaction for analyze.
- * Otherwise, we can use the outer transaction, but we still
- * need to call analyze_rel in a memory context that will be
- * cleaned up on return (else we leak memory while processing
- * multiple tables).
+ * If we vacuumed, use new transaction for analyze. Otherwise,
+ * we can use the outer transaction, but we still need to call
+ * analyze_rel in a memory context that will be cleaned up on
+ * return (else we leak memory while processing multiple
+ * tables).
*/
if (vacstmt->vacuum)
{
- StartTransactionCommand(true);
- SetQuerySnapshot(); /* might be needed for functional index */
+ StartTransactionCommand();
+ SetQuerySnapshot(); /* might be needed for functions
+ * in indexes */
}
else
old_context = MemoryContextSwitchTo(anl_context);
analyze_rel(relid, vacstmt);
if (vacstmt->vacuum)
- CommitTransactionCommand(true);
+ CommitTransactionCommand();
else
{
MemoryContextSwitchTo(old_context);
/*
* This matches the CommitTransaction waiting for us in
- * PostgresMain(). We tell xact.c not to chain the upcoming
- * commit, so that a VACUUM doesn't start a transaction block,
- * even when autocommit is off.
+ * PostgresMain().
*/
- StartTransactionCommand(true);
+ StartTransactionCommand();
/*
* If it was a database-wide VACUUM, print FSM usage statistics
*/
if (TransactionIdFollows(limit, *oldestXmin))
{
- elog(WARNING, "oldest Xmin is far in the past --- close open transactions soon to avoid wraparound problems");
+ ereport(WARNING,
+ (errmsg("oldest Xmin is far in the past"),
+ errhint("Close open transactions soon to avoid wraparound problems.")));
limit = *oldestXmin;
}
tuple = heap_getnext(scan, ForwardScanDirection);
if (!HeapTupleIsValid(tuple))
- elog(ERROR, "database %u does not exist", dbid);
+ elog(ERROR, "could not find tuple for database %u", dbid);
dbform = (Form_pg_database) GETSTRUCT(tuple);
*/
if (vacuumAlreadyWrapped)
{
- elog(WARNING, "Some databases have not been vacuumed in over 2 billion transactions."
- "\n\tYou may have already suffered transaction-wraparound data loss.");
+ ereport(WARNING,
+ (errmsg("some databases have not been vacuumed in over 2 billion transactions"),
+ errdetail("You may have already suffered transaction-wraparound data loss.")));
return;
}
/* Give warning about impending wraparound problems */
if (frozenAlreadyWrapped)
{
- elog(WARNING, "Some databases have not been vacuumed in over 1 billion transactions."
- "\n\tBetter vacuum them soon, or you may have a wraparound failure.");
+ ereport(WARNING,
+ (errmsg("some databases have not been vacuumed in over 1 billion transactions"),
+ errhint("Better vacuum them soon, or you may have a wraparound failure.")));
}
else
{
age = (int32) (myXID - frozenXID);
if (age > (int32) ((MaxTransactionId >> 3) * 3))
- elog(WARNING, "Some databases have not been vacuumed in %d transactions."
- "\n\tBetter vacuum them within %d transactions,"
- "\n\tor you may have a wraparound failure.",
- age, (int32) (MaxTransactionId >> 1) - age);
+ ereport(WARNING,
+ (errmsg("some databases have not been vacuumed in %d transactions",
+ age),
+ errhint("Better vacuum them within %d transactions, "
+ "or you may have a wraparound failure.",
+ (int32) (MaxTransactionId >> 1) - age)));
}
}
bool result;
/* Begin a transaction for vacuuming this relation */
- StartTransactionCommand(true);
- SetQuerySnapshot(); /* might be needed for functional index */
+ StartTransactionCommand();
+ SetQuerySnapshot(); /* might be needed for functions in
+ * indexes */
/*
* Check for user-requested abort. Note we want this to be inside a
ObjectIdGetDatum(relid),
0, 0, 0))
{
- CommitTransactionCommand(true);
+ CommitTransactionCommand();
return true; /* okay 'cause no data there */
}
onerel = relation_open(relid, lmode);
if (!(pg_class_ownercheck(RelationGetRelid(onerel), GetUserId()) ||
- (is_dbadmin(MyDatabaseId) && !onerel->rd_rel->relisshared)))
+ (pg_database_ownercheck(MyDatabaseId, GetUserId()) && !onerel->rd_rel->relisshared)))
{
- elog(WARNING, "Skipping \"%s\" --- only table or database owner can VACUUM it",
- RelationGetRelationName(onerel));
+ ereport(WARNING,
+ (errmsg("skipping \"%s\" --- only table or database owner can VACUUM it",
+ RelationGetRelationName(onerel))));
relation_close(onerel, lmode);
- CommitTransactionCommand(true);
+ CommitTransactionCommand();
return false;
}
*/
if (onerel->rd_rel->relkind != expected_relkind)
{
- elog(WARNING, "Skipping \"%s\" --- can not process indexes, views or special system tables",
- RelationGetRelationName(onerel));
+ ereport(WARNING,
+ (errmsg("skipping \"%s\" --- cannot VACUUM indexes, views or special system tables",
+ RelationGetRelationName(onerel))));
relation_close(onerel, lmode);
- CommitTransactionCommand(true);
+ CommitTransactionCommand();
return false;
}
if (isOtherTempNamespace(RelationGetNamespace(onerel)))
{
relation_close(onerel, lmode);
- CommitTransactionCommand(true);
- return true; /* assume no long-lived data in temp tables */
+ CommitTransactionCommand();
+ return true; /* assume no long-lived data in temp
+ * tables */
}
/*
/*
* Complete the transaction and free all temporary memory used.
*/
- CommitTransactionCommand(true);
+ CommitTransactionCommand();
/*
* If the relation has a secondary toast rel, vacuum that too while we
*/
if (toast_relid != InvalidOid)
{
- if (! vacuum_rel(toast_relid, vacstmt, RELKIND_TOASTVALUE))
+ if (!vacuum_rel(toast_relid, vacstmt, RELKIND_TOASTVALUE))
result = false; /* failed to vacuum the TOAST table? */
}
int nindexes,
i;
VRelStats *vacrelstats;
- bool reindex = false;
-
- if (IsIgnoringSystemIndexes() &&
- IsSystemRelation(onerel))
- reindex = true;
vacuum_set_xid_limits(vacstmt, onerel->rd_rel->relisshared,
&OldestXmin, &FreezeLimit);
/* Now open all indexes of the relation */
vac_open_indexes(onerel, &nindexes, &Irel);
- if (!Irel)
- reindex = false;
- else if (!RelationGetForm(onerel)->relhasindex)
- reindex = true;
if (nindexes > 0)
vacrelstats->hasindex = true;
-#ifdef NOT_USED
-
- /*
- * reindex in VACUUM is dangerous under WAL. ifdef out until it
- * becomes safe.
- */
- if (reindex)
- {
- vac_close_indexes(nindexes, Irel);
- Irel = (Relation *) NULL;
- activate_indexes_of_a_table(onerel, false);
- }
-#endif /* NOT_USED */
-
/* Clean/scan index relation(s) */
if (Irel != (Relation *) NULL)
{
*/
i = FlushRelationBuffers(onerel, vacrelstats->rel_pages);
if (i < 0)
- elog(ERROR, "VACUUM (full_vacuum_rel): FlushRelationBuffers returned %d",
- i);
+ elog(ERROR, "FlushRelationBuffers returned %d", i);
}
}
-#ifdef NOT_USED
- if (reindex)
- activate_indexes_of_a_table(onerel, true);
-#endif /* NOT_USED */
-
/* update shared free space map with final free space info */
vac_update_fsm(onerel, &fraged_pages, vacrelstats->rel_pages);
VacPage vacpage,
vacpagecopy;
BlockNumber empty_pages,
- new_pages,
- changed_pages,
empty_end_pages;
double num_tuples,
tups_vacuumed,
nkeep,
nunused;
- double free_size,
- usable_free_size;
+ double free_space,
+ usable_free_space;
Size min_tlen = MaxTupleSize;
Size max_tlen = 0;
int i;
vac_init_rusage(&ru0);
relname = RelationGetRelationName(onerel);
- elog(elevel, "--Relation %s.%s--",
- get_namespace_name(RelationGetNamespace(onerel)),
- relname);
+ ereport(elevel,
+ (errmsg("vacuuming \"%s.%s\"",
+ get_namespace_name(RelationGetNamespace(onerel)),
+ relname)));
- empty_pages = new_pages = changed_pages = empty_end_pages = 0;
+ empty_pages = empty_end_pages = 0;
num_tuples = tups_vacuumed = nkeep = nunused = 0;
- free_size = 0;
+ free_space = 0;
nblocks = RelationGetNumberOfBlocks(onerel);
if (PageIsNew(page))
{
- elog(WARNING, "Rel %s: Uninitialized page %u - fixing",
- relname, blkno);
+ ereport(WARNING,
+ (errmsg("relation \"%s\" page %u is uninitialized --- fixing",
+ relname, blkno)));
PageInit(page, BufferGetPageSize(buf), 0);
vacpage->free = ((PageHeader) page)->pd_upper - ((PageHeader) page)->pd_lower;
- free_size += vacpage->free;
- new_pages++;
+ free_space += vacpage->free;
+ empty_pages++;
empty_end_pages++;
vacpagecopy = copy_vac_page(vacpage);
vpage_insert(vacuum_pages, vacpagecopy);
if (PageIsEmpty(page))
{
vacpage->free = ((PageHeader) page)->pd_upper - ((PageHeader) page)->pd_lower;
- free_size += vacpage->free;
+ free_space += vacpage->free;
empty_pages++;
empty_end_pages++;
vacpagecopy = copy_vac_page(vacpage);
/*
* This should not happen, since we hold exclusive
* lock on the relation; shouldn't we raise an error?
+ * (Actually, it can happen in system catalogs, since
+ * we tend to release write lock before commit there.)
*/
- elog(WARNING, "Rel %s: TID %u/%u: InsertTransactionInProgress %u - can't shrink relation",
- relname, blkno, offnum, HeapTupleHeaderGetXmin(tuple.t_data));
+ ereport(NOTICE,
+ (errmsg("relation \"%s\" TID %u/%u: InsertTransactionInProgress %u --- can't shrink relation",
+ relname, blkno, offnum, HeapTupleHeaderGetXmin(tuple.t_data))));
do_shrinking = false;
break;
case HEAPTUPLE_DELETE_IN_PROGRESS:
/*
* This should not happen, since we hold exclusive
* lock on the relation; shouldn't we raise an error?
+ * (Actually, it can happen in system catalogs, since
+ * we tend to release write lock before commit there.)
*/
- elog(WARNING, "Rel %s: TID %u/%u: DeleteTransactionInProgress %u - can't shrink relation",
- relname, blkno, offnum, HeapTupleHeaderGetXmax(tuple.t_data));
+ ereport(NOTICE,
+ (errmsg("relation \"%s\" TID %u/%u: DeleteTransactionInProgress %u --- can't shrink relation",
+ relname, blkno, offnum, HeapTupleHeaderGetXmax(tuple.t_data))));
do_shrinking = false;
break;
default:
- elog(ERROR, "Unexpected HeapTupleSatisfiesVacuum result");
+ elog(ERROR, "unexpected HeapTupleSatisfiesVacuum result");
break;
}
*/
if (onerel->rd_rel->relhasoids &&
!OidIsValid(HeapTupleGetOid(&tuple)))
- elog(WARNING, "Rel %s: TID %u/%u: OID IS INVALID. TUPGONE %d.",
- relname, blkno, offnum, (int) tupgone);
+ elog(WARNING, "relation \"%s\" TID %u/%u: OID is invalid",
+ relname, blkno, offnum);
if (tupgone)
{
do_reap = (vacpage->offsets_free > 0);
}
- free_size += vacpage->free;
+ free_space += vacpage->free;
/*
* Add the page to fraged_pages if it has a useful amount of free
vpage_insert(fraged_pages, vacpagecopy);
}
+ /*
+ * Include the page in empty_end_pages if it will be empty after
+ * vacuuming; this is to keep us from using it as a move
+ * destination.
+ */
if (notup)
+ {
+ empty_pages++;
empty_end_pages++;
+ }
else
empty_end_pages = 0;
if (pgchanged)
- {
WriteBuffer(buf);
- changed_pages++;
- }
else
ReleaseBuffer(buf);
}
{
Assert((BlockNumber) fraged_pages->num_pages >= empty_end_pages);
fraged_pages->num_pages -= empty_end_pages;
- usable_free_size = 0;
+ usable_free_space = 0;
for (i = 0; i < fraged_pages->num_pages; i++)
- usable_free_size += fraged_pages->pagedesc[i]->free;
+ usable_free_space += fraged_pages->pagedesc[i]->free;
}
else
{
fraged_pages->num_pages = 0;
- usable_free_size = 0;
+ usable_free_space = 0;
}
/* don't bother to save vtlinks if we will not call repair_frag */
pfree(vtlinks);
}
- elog(elevel, "Pages %u: Changed %u, reaped %u, Empty %u, New %u; "
- "Tup %.0f: Vac %.0f, Keep/VTL %.0f/%u, UnUsed %.0f, MinLen %lu, "
- "MaxLen %lu; Re-using: Free/Avail. Space %.0f/%.0f; "
- "EndEmpty/Avail. Pages %u/%u.\n\t%s",
- nblocks, changed_pages, vacuum_pages->num_pages, empty_pages,
- new_pages, num_tuples, tups_vacuumed,
- nkeep, vacrelstats->num_vtlinks,
- nunused, (unsigned long) min_tlen, (unsigned long) max_tlen,
- free_size, usable_free_size,
- empty_end_pages, fraged_pages->num_pages,
- vac_show_rusage(&ru0));
+ ereport(elevel,
+ (errmsg("\"%s\": found %.0f removable, %.0f nonremovable tuples in %u pages",
+ RelationGetRelationName(onerel),
+ tups_vacuumed, num_tuples, nblocks),
+ errdetail("%.0f dead tuples cannot be removed yet.\n"
+ "Nonremovable tuples range from %lu to %lu bytes long.\n"
+ "There were %.0f unused item pointers.\n"
+ "Total free space (including removable tuples) is %.0f bytes.\n"
+ "%u pages are or will become empty, including %u at the end of the table.\n"
+ "%u pages containing %.0f free bytes are potential move destinations.\n"
+ "%s",
+ nkeep,
+ (unsigned long) min_tlen, (unsigned long) max_tlen,
+ nunused,
+ free_space,
+ empty_pages, empty_end_pages,
+ fraged_pages->num_pages, usable_free_space,
+ vac_show_rusage(&ru0))));
}
if (tuple.t_data->t_infomask & HEAP_MOVED_OFF)
{
if (HeapTupleHeaderGetXvac(tuple.t_data) != myXID)
- elog(ERROR, "Invalid XVAC in tuple header");
+ elog(ERROR, "invalid XVAC in tuple header");
if (keep_tuples == 0)
continue;
if (chain_tuple_moved) /* some chains was moved
/* Quick exit if we have no vtlinks to search in */
if (vacrelstats->vtlinks == NULL)
{
- elog(DEBUG1, "Parent item in update-chain not found - can't continue repair_frag");
+ elog(DEBUG2, "parent item in update-chain not found --- can't continue repair_frag");
break; /* out of walk-along-page loop */
}
* in scan_heap(), but it's not implemented at the
* moment and so we just stop shrinking here.
*/
- elog(DEBUG1, "Child itemid in update-chain marked as unused - can't continue repair_frag");
+ elog(DEBUG2, "child itemid in update-chain marked as unused --- can't continue repair_frag");
chain_move_failed = true;
break; /* out of loop to move to chain end */
}
if (vtlp == NULL)
{
/* see discussion above */
- elog(DEBUG1, "Parent item in update-chain not found - can't continue repair_frag");
+ elog(DEBUG2, "parent item in update-chain not found --- can't continue repair_frag");
chain_move_failed = true;
break; /* out of check-all-items loop */
}
ItemPointerGetOffsetNumber(&(tp.t_self)));
/* this can't happen since we saw tuple earlier: */
if (!ItemIdIsUsed(Pitemid))
- elog(ERROR, "Parent itemid marked as unused");
+ elog(ERROR, "parent itemid marked as unused");
Ptp.t_datamcxt = NULL;
Ptp.t_data = (HeapTupleHeader) PageGetItem(Ppage, Pitemid);
HeapTupleHeaderGetXmin(tp.t_data))))
{
ReleaseBuffer(Pbuf);
- elog(DEBUG1, "Too old parent tuple found - can't continue repair_frag");
+ elog(DEBUG2, "too old parent tuple found --- can't continue repair_frag");
chain_move_failed = true;
break; /* out of check-all-items loop */
}
*/
CacheInvalidateHeapTuple(onerel, &tuple);
- /* NO ELOG(ERROR) TILL CHANGES ARE LOGGED */
+ /* NO EREPORT(ERROR) TILL CHANGES ARE LOGGED */
START_CRIT_SECTION();
tuple.t_data->t_infomask &= ~(HEAP_XMIN_COMMITTED |
LP_USED);
if (newoff == InvalidOffsetNumber)
{
- elog(PANIC, "moving chain: failed to add item with len = %lu to page %u",
+ elog(PANIC, "failed to add item with len = %lu to page %u while moving tuple chain",
(unsigned long) tuple_len, destvacpage->blkno);
}
newitemid = PageGetItemId(ToPage, newoff);
*/
CacheInvalidateHeapTuple(onerel, &tuple);
- /* NO ELOG(ERROR) TILL CHANGES ARE LOGGED */
+ /* NO EREPORT(ERROR) TILL CHANGES ARE LOGGED */
START_CRIT_SECTION();
/*
if (tuple.t_data->t_infomask & HEAP_XMIN_COMMITTED)
continue;
if (tuple.t_data->t_infomask & HEAP_MOVED_IN)
- elog(ERROR, "HEAP_MOVED_IN was not expected (2)");
+ elog(ERROR, "HEAP_MOVED_IN was not expected");
if (tuple.t_data->t_infomask & HEAP_MOVED_OFF)
{
if (HeapTupleHeaderGetXvac(tuple.t_data) != myXID)
- elog(ERROR, "Invalid XVAC in tuple header (4)");
+ elog(ERROR, "invalid XVAC in tuple header");
/* some chains was moved while */
if (chain_tuple_moved)
{ /* cleaning this page */
}
}
else
- elog(ERROR, "HEAP_MOVED_OFF was expected (2)");
+ elog(ERROR, "HEAP_MOVED_OFF was expected");
}
}
if (!(tuple.t_data->t_infomask & HEAP_MOVED))
elog(ERROR, "HEAP_MOVED_OFF/HEAP_MOVED_IN was expected");
if (HeapTupleHeaderGetXvac(tuple.t_data) != myXID)
- elog(ERROR, "Invalid XVAC in tuple header (2)");
+ elog(ERROR, "invalid XVAC in tuple header");
if (tuple.t_data->t_infomask & HEAP_MOVED_IN)
{
tuple.t_data->t_infomask |= HEAP_XMIN_COMMITTED;
}
Assert(num_moved == checked_moved);
- elog(elevel, "Rel %s: Pages: %u --> %u; Tuple(s) moved: %u.\n\t%s",
- RelationGetRelationName(onerel),
- nblocks, blkno, num_moved,
- vac_show_rusage(&ru0));
+ /*
+ * It'd be cleaner to make this report at the bottom of this routine,
+ * but then the rusage would double-count the second pass of index
+ * vacuuming. So do it here and ignore the relatively small amount of
+ * processing that occurs below.
+ */
+ ereport(elevel,
+ (errmsg("\"%s\": moved %u tuples, truncated %u to %u pages",
+ RelationGetRelationName(onerel),
+ num_moved, nblocks, blkno),
+ errdetail("%s",
+ vac_show_rusage(&ru0))));
/*
* Reflect the motion of system tuples to catalog cache here.
if (tuple.t_data->t_infomask & HEAP_MOVED_OFF)
{
if (HeapTupleHeaderGetXvac(tuple.t_data) != myXID)
- elog(ERROR, "Invalid XVAC in tuple header (3)");
+ elog(ERROR, "invalid XVAC in tuple header");
itemid->lp_flags &= ~LP_USED;
num_tuples++;
}
else
- elog(ERROR, "HEAP_MOVED_OFF was expected (3)");
+ elog(ERROR, "HEAP_MOVED_OFF was expected");
}
}
*/
i = FlushRelationBuffers(onerel, blkno);
if (i < 0)
- elog(ERROR, "VACUUM (repair_frag): FlushRelationBuffers returned %d",
- i);
+ elog(ERROR, "FlushRelationBuffers returned %d", i);
/* truncate relation, if needed */
if (blkno < nblocks)
i = FlushRelationBuffers(onerel, relblocks);
if (i < 0)
- elog(ERROR, "VACUUM (vacuum_heap): FlushRelationBuffers returned %d",
- i);
+ elog(ERROR, "FlushRelationBuffers returned %d", i);
/* truncate relation if there are some empty end-pages */
if (vacuum_pages->empty_end_pages > 0)
{
- elog(elevel, "Rel %s: Pages: %u --> %u.",
- RelationGetRelationName(onerel),
- vacrelstats->rel_pages, relblocks);
+ ereport(elevel,
+ (errmsg("\"%s\": truncated %u to %u pages",
+ RelationGetRelationName(onerel),
+ vacrelstats->rel_pages, relblocks)));
relblocks = smgrtruncate(DEFAULT_SMGR, onerel, relblocks);
onerel->rd_nblocks = relblocks; /* update relcache immediately */
onerel->rd_targblock = InvalidBlockNumber;
stats->num_pages, stats->num_index_tuples,
false);
- elog(elevel, "Index %s: Pages %u, %u deleted, %u free; Tuples %.0f.\n\t%s",
- RelationGetRelationName(indrel),
- stats->num_pages, stats->pages_deleted, stats->pages_free,
- stats->num_index_tuples,
- vac_show_rusage(&ru0));
+ ereport(elevel,
+ (errmsg("index \"%s\" now contains %.0f tuples in %u pages",
+ RelationGetRelationName(indrel),
+ stats->num_index_tuples,
+ stats->num_pages),
+ errdetail("%u index pages have been deleted, %u are currently reusable.\n"
+ "%s",
+ stats->pages_deleted, stats->pages_free,
+ vac_show_rusage(&ru0))));
/*
* Check for tuple count mismatch. If the index is partial, then it's
{
if (stats->num_index_tuples > num_tuples ||
!vac_is_partial_index(indrel))
- elog(WARNING, "Index %s: NUMBER OF INDEX' TUPLES (%.0f) IS NOT THE SAME AS HEAP' (%.0f)."
- "\n\tRecreate the index.",
- RelationGetRelationName(indrel),
- stats->num_index_tuples, num_tuples);
+ ereport(WARNING,
+ (errmsg("index \"%s\" contains %.0f tuples, but table contains %.0f tuples",
+ RelationGetRelationName(indrel),
+ stats->num_index_tuples, num_tuples),
+ errhint("Rebuild the index with REINDEX.")));
}
pfree(stats);
stats->num_pages, stats->num_index_tuples,
false);
- elog(elevel, "Index %s: Pages %u, %u deleted, %u free; Tuples %.0f: Deleted %.0f.\n\t%s",
- RelationGetRelationName(indrel),
- stats->num_pages, stats->pages_deleted, stats->pages_free,
- stats->num_index_tuples - keep_tuples, stats->tuples_removed,
- vac_show_rusage(&ru0));
+ ereport(elevel,
+ (errmsg("index \"%s\" now contains %.0f tuples in %u pages",
+ RelationGetRelationName(indrel),
+ stats->num_index_tuples,
+ stats->num_pages),
+ errdetail("%.0f index tuples were removed.\n"
+ "%u index pages have been deleted, %u are currently reusable.\n"
+ "%s",
+ stats->tuples_removed,
+ stats->pages_deleted, stats->pages_free,
+ vac_show_rusage(&ru0))));
/*
* Check for tuple count mismatch. If the index is partial, then it's
{
if (stats->num_index_tuples > num_tuples + keep_tuples ||
!vac_is_partial_index(indrel))
- elog(WARNING, "Index %s: NUMBER OF INDEX' TUPLES (%.0f) IS NOT THE SAME AS HEAP' (%.0f)."
- "\n\tRecreate the index.",
- RelationGetRelationName(indrel),
- stats->num_index_tuples, num_tuples);
+ ereport(WARNING,
+ (errmsg("index \"%s\" contains %.0f tuples, but table contains %.0f tuples",
+ RelationGetRelationName(indrel),
+ stats->num_index_tuples, num_tuples + keep_tuples),
+ errhint("Rebuild the index with REINDEX.")));
}
pfree(stats);
/*
* We only report pages with free space at least equal to the average
- * request size --- this avoids cluttering FSM with uselessly-small bits
- * of space. Although FSM would discard pages with little free space
- * anyway, it's important to do this prefiltering because (a) it reduces
- * the time spent holding the FSM lock in RecordRelationFreeSpace, and
- * (b) FSM uses the number of pages reported as a statistic for guiding
- * space management. If we didn't threshold our reports the same way
- * vacuumlazy.c does, we'd be skewing that statistic.
+ * request size --- this avoids cluttering FSM with uselessly-small
+ * bits of space. Although FSM would discard pages with little free
+ * space anyway, it's important to do this prefiltering because (a) it
+ * reduces the time spent holding the FSM lock in
+ * RecordRelationFreeSpace, and (b) FSM uses the number of pages
+ * reported as a statistic for guiding space management. If we didn't
+ * threshold our reports the same way vacuumlazy.c does, we'd be
+ * skewing that statistic.
*/
threshold = GetAvgFSMRequestSize(&onerel->rd_node);
return true;
/* Otherwise, look to see if there's a partial-index predicate */
- return (VARSIZE(&indrel->rd_index->indpred) > VARHDRSZ);
+ if (!heap_attisnull(indrel->rd_indextuple, Anum_pg_index_indpred))
+ return true;
+
+ return false;
}