* relations with finite memory space usage. To do that, we set upper bounds
* on the number of tuples and pages we will keep track of at once.
*
- * We are willing to use at most maintenance_work_mem memory space to keep
- * track of dead tuples. We initially allocate an array of TIDs of that size,
- * with an upper limit that depends on table size (this limit ensures we don't
- * allocate a huge area uselessly for vacuuming small tables). If the array
- * threatens to overflow, we suspend the heap scan phase and perform a pass of
- * index cleanup and page compaction, then resume the heap scan with an empty
- * TID array.
+ * We are willing to use at most maintenance_work_mem (or perhaps
+ * autovacuum_work_mem) memory space to keep track of dead tuples. We
+ * initially allocate an array of TIDs of that size, with an upper limit that
+ * depends on table size (this limit ensures we don't allocate a huge area
+ * uselessly for vacuuming small tables). If the array threatens to overflow,
+ * we suspend the heap scan phase and perform a pass of index cleanup and page
+ * compaction, then resume the heap scan with an empty TID array.
*
* If we're processing a table with no indexes, we can just vacuum each page
* as we go; there's no need to save up multiple tuples to minimize the number
* the TID array, just enough to hold as many heap tuples as fit on one page.
*
*
- * Portions Copyright (c) 1996-2013, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1996-2014, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
*
double scanned_tuples; /* counts only tuples on scanned pages */
double old_rel_tuples; /* previous value of pg_class.reltuples */
double new_rel_tuples; /* new estimated total # of tuples */
+ double new_dead_tuples; /* new estimated total # of dead tuples */
BlockNumber pages_removed;
double tuples_deleted;
BlockNumber nonempty_pages; /* actually, last nonempty page + 1 */
BlockNumber new_rel_pages;
double new_rel_tuples;
BlockNumber new_rel_allvisible;
+ double new_live_tuples;
TransactionId new_frozen_xid;
MultiXactId new_min_multi;
vac_strategy = bstrategy;
vacuum_set_xid_limits(vacstmt->freeze_min_age, vacstmt->freeze_table_age,
+ vacstmt->multixact_freeze_min_age,
+ vacstmt->multixact_freeze_table_age,
onerel->rd_rel->relisshared,
&OldestXmin, &FreezeLimit, &xidFullScanLimit,
&MultiXactCutoff, &mxactFullScanLimit);
/*
* We request a full scan if either the table's frozen Xid is now older
* than or equal to the requested Xid full-table scan limit; or if the
- * table's minimum MultiXactId is older than or equal to the requested mxid
- * full-table scan limit.
+ * table's minimum MultiXactId is older than or equal to the requested
+ * mxid full-table scan limit.
*/
scan_all = TransactionIdPrecedesOrEquals(onerel->rd_rel->relfrozenxid,
xidFullScanLimit);
new_min_multi);
/* report results to the stats collector, too */
+ new_live_tuples = new_rel_tuples - vacrelstats->new_dead_tuples;
+ if (new_live_tuples < 0)
+ new_live_tuples = 0; /* just in case */
+
pgstat_report_vacuum(RelationGetRelid(onerel),
onerel->rd_rel->relisshared,
- new_rel_tuples);
+ new_live_tuples,
+ vacrelstats->new_dead_tuples);
/* and log the action if appropriate */
if (IsAutoVacuumWorkerProcess() && Log_autovacuum_min_duration >= 0)
ereport(LOG,
(errmsg("automatic vacuum of table \"%s.%s.%s\": index scans: %d\n"
"pages: %d removed, %d remain\n"
- "tuples: %.0f removed, %.0f remain\n"
+ "tuples: %.0f removed, %.0f remain, %.0f are dead but not yet removable\n"
"buffer usage: %d hits, %d misses, %d dirtied\n"
"avg read rate: %.3f MB/s, avg write rate: %.3f MB/s\n"
"system usage: %s",
vacrelstats->rel_pages,
vacrelstats->tuples_deleted,
vacrelstats->new_rel_tuples,
+ vacrelstats->new_dead_tuples,
VacuumPageHit,
VacuumPageMiss,
VacuumPageDirty,
Buffer vmbuffer = InvalidBuffer;
BlockNumber next_not_all_visible_block;
bool skipping_all_visible_blocks;
+ xl_heap_freeze_tuple *frozen;
pg_rusage_init(&ru0);
vacrelstats->latestRemovedXid = InvalidTransactionId;
lazy_space_alloc(vacrelstats, nblocks);
+ frozen = palloc(sizeof(xl_heap_freeze_tuple) * MaxHeapTuplesPerPage);
/*
* We want to skip pages that don't require vacuuming according to the
bool tupgone,
hastup;
int prev_dead_count;
- OffsetNumber frozen[MaxOffsetNumber];
int nfrozen;
Size freespace;
bool all_visible_according_to_vm;
* which will cause a PANIC. To prevent that, check whether
* the page has been previously WAL-logged, and if not, do that
* now.
- *
- * XXX: It would be nice to use a logging method supporting
- * standard buffers here since log_newpage_buffer() will write
- * the full block instead of omitting the hole.
*/
if (RelationNeedsWAL(onerel) &&
PageGetLSN(page) == InvalidXLogRecPtr)
- log_newpage_buffer(buf);
+ log_newpage_buffer(buf, true);
PageSetAllVisible(page);
visibilitymap_set(onerel, blkno, buf, InvalidXLogRecPtr,
* NB: Like with per-tuple hint bits, we can't set the
* PD_ALL_VISIBLE flag if the inserter committed
* asynchronously. See SetHintBits for more info. Check
- * that the HEAP_XMIN_COMMITTED hint bit is set because of
- * that.
+ * that the tuple is hinted xmin-committed because
+ * of that.
*/
if (all_visible)
{
TransactionId xmin;
- if (!(tuple.t_data->t_infomask & HEAP_XMIN_COMMITTED))
+ if (!HeapTupleHeaderXminCommitted(tuple.t_data))
{
all_visible = false;
break;
* Each non-removable tuple must be checked to see if it needs
* freezing. Note we already have exclusive buffer lock.
*/
- if (heap_freeze_tuple(tuple.t_data, FreezeLimit,
- MultiXactCutoff))
- frozen[nfrozen++] = offnum;
+ if (heap_prepare_freeze_tuple(tuple.t_data, FreezeLimit,
+ MultiXactCutoff, &frozen[nfrozen]))
+ frozen[nfrozen++].offset = offnum;
}
} /* scan along page */
*/
if (nfrozen > 0)
{
+ START_CRIT_SECTION();
+
MarkBufferDirty(buf);
+
+ /* execute collected freezes */
+ for (i = 0; i < nfrozen; i++)
+ {
+ ItemId itemid;
+ HeapTupleHeader htup;
+
+ itemid = PageGetItemId(page, frozen[i].offset);
+ htup = (HeapTupleHeader) PageGetItem(page, itemid);
+
+ heap_execute_freeze_tuple(htup, &frozen[i]);
+ }
+
+ /* Now WAL-log freezing if neccessary */
if (RelationNeedsWAL(onerel))
{
XLogRecPtr recptr;
recptr = log_heap_freeze(onerel, buf, FreezeLimit,
- MultiXactCutoff, frozen, nfrozen);
+ frozen, nfrozen);
PageSetLSN(page, recptr);
}
+
+ END_CRIT_SECTION();
}
/*
RecordPageWithFreeSpace(onerel, blkno, freespace);
}
+ pfree(frozen);
+
/* save stats for use later */
vacrelstats->scanned_tuples = num_tuples;
vacrelstats->tuples_deleted = tups_vacuumed;
+ vacrelstats->new_dead_tuples = nkeep;
/* now we can compute the new value for pg_class.reltuples */
vacrelstats->new_rel_tuples = vac_estimate_reltuples(onerel, false,
/*
* Mark buffer dirty before we write WAL.
- *
- * If checksums are enabled, visibilitymap_set() may log the heap page, so
- * we must mark heap buffer dirty before calling visibilitymap_set().
*/
MarkBufferDirty(buffer);
+ /* XLOG stuff */
+ if (RelationNeedsWAL(onerel))
+ {
+ XLogRecPtr recptr;
+
+ recptr = log_heap_clean(onerel, buffer,
+ NULL, 0, NULL, 0,
+ unused, uncnt,
+ vacrelstats->latestRemovedXid);
+ PageSetLSN(page, recptr);
+ }
+
/*
* Now that we have removed the dead tuples from the page, once again
* check if the page has become all-visible.
visibility_cutoff_xid);
}
- /* XLOG stuff */
- if (RelationNeedsWAL(onerel))
- {
- XLogRecPtr recptr;
-
- recptr = log_heap_clean(onerel, buffer,
- NULL, 0, NULL, 0,
- unused, uncnt,
- vacrelstats->latestRemovedXid);
- PageSetLSN(page, recptr);
- }
-
END_CRIT_SECTION();
return tupindex;
lazy_space_alloc(LVRelStats *vacrelstats, BlockNumber relblocks)
{
long maxtuples;
+ int vac_work_mem = IsAutoVacuumWorkerProcess() &&
+ autovacuum_work_mem != -1 ?
+ autovacuum_work_mem : maintenance_work_mem;
if (vacrelstats->hasindex)
{
- maxtuples = (maintenance_work_mem * 1024L) / sizeof(ItemPointerData);
+ maxtuples = (vac_work_mem * 1024L) / sizeof(ItemPointerData);
maxtuples = Min(maxtuples, INT_MAX);
maxtuples = Min(maxtuples, MaxAllocSize / sizeof(ItemPointerData));
TransactionId xmin;
/* Check comments in lazy_scan_heap. */
- if (!(tuple.t_data->t_infomask & HEAP_XMIN_COMMITTED))
+ if (!HeapTupleHeaderXminCommitted(tuple.t_data))
{
all_visible = false;
break;