]> granicus.if.org Git - postgresql/blobdiff - src/backend/commands/vacuumlazy.c
Separate multixact freezing parameters from xid's
[postgresql] / src / backend / commands / vacuumlazy.c
index fe2d9e78fa8ef62ec35780ec4f266292bf3fabdb..d77892ee7f8ce0f1cfd5ca67b5f0d4e0b2b5e37d 100644 (file)
  * relations with finite memory space usage.  To do that, we set upper bounds
  * on the number of tuples and pages we will keep track of at once.
  *
- * We are willing to use at most maintenance_work_mem memory space to keep
- * track of dead tuples.  We initially allocate an array of TIDs of that size,
- * with an upper limit that depends on table size (this limit ensures we don't
- * allocate a huge area uselessly for vacuuming small tables). If the array
- * threatens to overflow, we suspend the heap scan phase and perform a pass of
- * index cleanup and page compaction, then resume the heap scan with an empty
- * TID array.
+ * We are willing to use at most maintenance_work_mem (or perhaps
+ * autovacuum_work_mem) memory space to keep track of dead tuples.  We
+ * initially allocate an array of TIDs of that size, with an upper limit that
+ * depends on table size (this limit ensures we don't allocate a huge area
+ * uselessly for vacuuming small tables).  If the array threatens to overflow,
+ * we suspend the heap scan phase and perform a pass of index cleanup and page
+ * compaction, then resume the heap scan with an empty TID array.
  *
  * If we're processing a table with no indexes, we can just vacuum each page
  * as we go; there's no need to save up multiple tuples to minimize the number
@@ -24,7 +24,7 @@
  * the TID array, just enough to hold as many heap tuples as fit on one page.
  *
  *
- * Portions Copyright (c) 1996-2013, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1996-2014, PostgreSQL Global Development Group
  * Portions Copyright (c) 1994, Regents of the University of California
  *
  *
@@ -106,6 +106,7 @@ typedef struct LVRelStats
        double          scanned_tuples; /* counts only tuples on scanned pages */
        double          old_rel_tuples; /* previous value of pg_class.reltuples */
        double          new_rel_tuples; /* new estimated total # of tuples */
+       double          new_dead_tuples;        /* new estimated total # of dead tuples */
        BlockNumber pages_removed;
        double          tuples_deleted;
        BlockNumber nonempty_pages; /* actually, last nonempty page + 1 */
@@ -185,6 +186,7 @@ lazy_vacuum_rel(Relation onerel, VacuumStmt *vacstmt,
        BlockNumber new_rel_pages;
        double          new_rel_tuples;
        BlockNumber new_rel_allvisible;
+       double          new_live_tuples;
        TransactionId new_frozen_xid;
        MultiXactId new_min_multi;
 
@@ -203,6 +205,8 @@ lazy_vacuum_rel(Relation onerel, VacuumStmt *vacstmt,
        vac_strategy = bstrategy;
 
        vacuum_set_xid_limits(vacstmt->freeze_min_age, vacstmt->freeze_table_age,
+                                                 vacstmt->multixact_freeze_min_age,
+                                                 vacstmt->multixact_freeze_table_age,
                                                  onerel->rd_rel->relisshared,
                                                  &OldestXmin, &FreezeLimit, &xidFullScanLimit,
                                                  &MultiXactCutoff, &mxactFullScanLimit);
@@ -210,8 +214,8 @@ lazy_vacuum_rel(Relation onerel, VacuumStmt *vacstmt,
        /*
         * We request a full scan if either the table's frozen Xid is now older
         * than or equal to the requested Xid full-table scan limit; or if the
-        * table's minimum MultiXactId is older than or equal to the requested mxid
-        * full-table scan limit.
+        * table's minimum MultiXactId is older than or equal to the requested
+        * mxid full-table scan limit.
         */
        scan_all = TransactionIdPrecedesOrEquals(onerel->rd_rel->relfrozenxid,
                                                                                         xidFullScanLimit);
@@ -307,9 +311,14 @@ lazy_vacuum_rel(Relation onerel, VacuumStmt *vacstmt,
                                                new_min_multi);
 
        /* report results to the stats collector, too */
+       new_live_tuples = new_rel_tuples - vacrelstats->new_dead_tuples;
+       if (new_live_tuples < 0)
+               new_live_tuples = 0;    /* just in case */
+
        pgstat_report_vacuum(RelationGetRelid(onerel),
                                                 onerel->rd_rel->relisshared,
-                                                new_rel_tuples);
+                                                new_live_tuples,
+                                                vacrelstats->new_dead_tuples);
 
        /* and log the action if appropriate */
        if (IsAutoVacuumWorkerProcess() && Log_autovacuum_min_duration >= 0)
@@ -334,7 +343,7 @@ lazy_vacuum_rel(Relation onerel, VacuumStmt *vacstmt,
                        ereport(LOG,
                                        (errmsg("automatic vacuum of table \"%s.%s.%s\": index scans: %d\n"
                                                        "pages: %d removed, %d remain\n"
-                                                       "tuples: %.0f removed, %.0f remain\n"
+                                                       "tuples: %.0f removed, %.0f remain, %.0f are dead but not yet removable\n"
                                                        "buffer usage: %d hits, %d misses, %d dirtied\n"
                                          "avg read rate: %.3f MB/s, avg write rate: %.3f MB/s\n"
                                                        "system usage: %s",
@@ -346,6 +355,7 @@ lazy_vacuum_rel(Relation onerel, VacuumStmt *vacstmt,
                                                        vacrelstats->rel_pages,
                                                        vacrelstats->tuples_deleted,
                                                        vacrelstats->new_rel_tuples,
+                                                       vacrelstats->new_dead_tuples,
                                                        VacuumPageHit,
                                                        VacuumPageMiss,
                                                        VacuumPageDirty,
@@ -424,6 +434,7 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats,
        Buffer          vmbuffer = InvalidBuffer;
        BlockNumber next_not_all_visible_block;
        bool            skipping_all_visible_blocks;
+       xl_heap_freeze_tuple *frozen;
 
        pg_rusage_init(&ru0);
 
@@ -446,6 +457,7 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats,
        vacrelstats->latestRemovedXid = InvalidTransactionId;
 
        lazy_space_alloc(vacrelstats, nblocks);
+       frozen = palloc(sizeof(xl_heap_freeze_tuple) * MaxHeapTuplesPerPage);
 
        /*
         * We want to skip pages that don't require vacuuming according to the
@@ -500,7 +512,6 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats,
                bool            tupgone,
                                        hastup;
                int                     prev_dead_count;
-               OffsetNumber frozen[MaxOffsetNumber];
                int                     nfrozen;
                Size            freespace;
                bool            all_visible_according_to_vm;
@@ -699,14 +710,10 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats,
                                 * which will cause a PANIC.  To prevent that, check whether
                                 * the page has been previously WAL-logged, and if not, do that
                                 * now.
-                                *
-                                * XXX: It would be nice to use a logging method supporting
-                                * standard buffers here since log_newpage_buffer() will write
-                                * the full block instead of omitting the hole.
                                 */
                                if (RelationNeedsWAL(onerel) &&
                                        PageGetLSN(page) == InvalidXLogRecPtr)
-                                       log_newpage_buffer(buf);
+                                       log_newpage_buffer(buf, true);
 
                                PageSetAllVisible(page);
                                visibilitymap_set(onerel, blkno, buf, InvalidXLogRecPtr,
@@ -826,14 +833,14 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats,
                                         * NB: Like with per-tuple hint bits, we can't set the
                                         * PD_ALL_VISIBLE flag if the inserter committed
                                         * asynchronously. See SetHintBits for more info. Check
-                                        * that the HEAP_XMIN_COMMITTED hint bit is set because of
-                                        * that.
+                                        * that the tuple is hinted xmin-committed because
+                                        * of that.
                                         */
                                        if (all_visible)
                                        {
                                                TransactionId xmin;
 
-                                               if (!(tuple.t_data->t_infomask & HEAP_XMIN_COMMITTED))
+                                               if (!HeapTupleHeaderXminCommitted(tuple.t_data))
                                                {
                                                        all_visible = false;
                                                        break;
@@ -894,9 +901,9 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats,
                                 * Each non-removable tuple must be checked to see if it needs
                                 * freezing.  Note we already have exclusive buffer lock.
                                 */
-                               if (heap_freeze_tuple(tuple.t_data, FreezeLimit,
-                                                                         MultiXactCutoff))
-                                       frozen[nfrozen++] = offnum;
+                               if (heap_prepare_freeze_tuple(tuple.t_data, FreezeLimit,
+                                                                                 MultiXactCutoff, &frozen[nfrozen]))
+                                       frozen[nfrozen++].offset = offnum;
                        }
                }                                               /* scan along page */
 
@@ -907,15 +914,33 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats,
                 */
                if (nfrozen > 0)
                {
+                       START_CRIT_SECTION();
+
                        MarkBufferDirty(buf);
+
+                       /* execute collected freezes */
+                       for (i = 0; i < nfrozen; i++)
+                       {
+                               ItemId          itemid;
+                               HeapTupleHeader htup;
+
+                               itemid = PageGetItemId(page, frozen[i].offset);
+                               htup = (HeapTupleHeader) PageGetItem(page, itemid);
+
+                               heap_execute_freeze_tuple(htup, &frozen[i]);
+                       }
+
+                       /* Now WAL-log freezing if neccessary */
                        if (RelationNeedsWAL(onerel))
                        {
                                XLogRecPtr      recptr;
 
                                recptr = log_heap_freeze(onerel, buf, FreezeLimit,
-                                                                                MultiXactCutoff, frozen, nfrozen);
+                                                                                frozen, nfrozen);
                                PageSetLSN(page, recptr);
                        }
+
+                       END_CRIT_SECTION();
                }
 
                /*
@@ -1016,9 +1041,12 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats,
                        RecordPageWithFreeSpace(onerel, blkno, freespace);
        }
 
+       pfree(frozen);
+
        /* save stats for use later */
        vacrelstats->scanned_tuples = num_tuples;
        vacrelstats->tuples_deleted = tups_vacuumed;
+       vacrelstats->new_dead_tuples = nkeep;
 
        /* now we can compute the new value for pg_class.reltuples */
        vacrelstats->new_rel_tuples = vac_estimate_reltuples(onerel, false,
@@ -1186,12 +1214,21 @@ lazy_vacuum_page(Relation onerel, BlockNumber blkno, Buffer buffer,
 
        /*
         * Mark buffer dirty before we write WAL.
-        *
-        * If checksums are enabled, visibilitymap_set() may log the heap page, so
-        * we must mark heap buffer dirty before calling visibilitymap_set().
         */
        MarkBufferDirty(buffer);
 
+       /* XLOG stuff */
+       if (RelationNeedsWAL(onerel))
+       {
+               XLogRecPtr      recptr;
+
+               recptr = log_heap_clean(onerel, buffer,
+                                                               NULL, 0, NULL, 0,
+                                                               unused, uncnt,
+                                                               vacrelstats->latestRemovedXid);
+               PageSetLSN(page, recptr);
+       }
+
        /*
         * Now that we have removed the dead tuples from the page, once again
         * check if the page has become all-visible.
@@ -1205,18 +1242,6 @@ lazy_vacuum_page(Relation onerel, BlockNumber blkno, Buffer buffer,
                                                  visibility_cutoff_xid);
        }
 
-       /* XLOG stuff */
-       if (RelationNeedsWAL(onerel))
-       {
-               XLogRecPtr      recptr;
-
-               recptr = log_heap_clean(onerel, buffer,
-                                                               NULL, 0, NULL, 0,
-                                                               unused, uncnt,
-                                                               vacrelstats->latestRemovedXid);
-               PageSetLSN(page, recptr);
-       }
-
        END_CRIT_SECTION();
 
        return tupindex;
@@ -1603,10 +1628,13 @@ static void
 lazy_space_alloc(LVRelStats *vacrelstats, BlockNumber relblocks)
 {
        long            maxtuples;
+       int                     vac_work_mem =  IsAutoVacuumWorkerProcess() &&
+                                                                       autovacuum_work_mem != -1 ?
+                                                               autovacuum_work_mem : maintenance_work_mem;
 
        if (vacrelstats->hasindex)
        {
-               maxtuples = (maintenance_work_mem * 1024L) / sizeof(ItemPointerData);
+               maxtuples = (vac_work_mem * 1024L) / sizeof(ItemPointerData);
                maxtuples = Min(maxtuples, INT_MAX);
                maxtuples = Min(maxtuples, MaxAllocSize / sizeof(ItemPointerData));
 
@@ -1757,7 +1785,7 @@ heap_page_is_all_visible(Relation rel, Buffer buf, TransactionId *visibility_cut
                                        TransactionId xmin;
 
                                        /* Check comments in lazy_scan_heap. */
-                                       if (!(tuple.t_data->t_infomask & HEAP_XMIN_COMMITTED))
+                                       if (!HeapTupleHeaderXminCommitted(tuple.t_data))
                                        {
                                                all_visible = false;
                                                break;