]> granicus.if.org Git - postgresql/blobdiff - src/backend/access/heap/pruneheap.c
Modify BufferGetPage() to prepare for "snapshot too old" feature
[postgresql] / src / backend / access / heap / pruneheap.c
index 390585bd2eba86f25506adea6b3cba363cf76d2a..19201b0bca5c9be7f2a36bb838414345aabe8385 100644 (file)
@@ -3,7 +3,7 @@
  * pruneheap.c
  *       heap page pruning and HOT-chain management code
  *
- * Portions Copyright (c) 1996-2013, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
  * Portions Copyright (c) 1994, Regents of the University of California
  *
  *
 #include "access/heapam_xlog.h"
 #include "access/transam.h"
 #include "access/htup_details.h"
+#include "access/xlog.h"
+#include "catalog/catalog.h"
 #include "miscadmin.h"
 #include "pgstat.h"
 #include "storage/bufmgr.h"
+#include "utils/snapmgr.h"
 #include "utils/rel.h"
 #include "utils/tqual.h"
 
-
 /* Working data for heap_page_prune and subroutines */
 typedef struct
 {
@@ -70,10 +72,34 @@ static void heap_prune_record_unused(PruneState *prstate, OffsetNumber offnum);
  * or RECENTLY_DEAD (see HeapTupleSatisfiesVacuum).
  */
 void
-heap_page_prune_opt(Relation relation, Buffer buffer, TransactionId OldestXmin)
+heap_page_prune_opt(Relation relation, Buffer buffer)
 {
-       Page            page = BufferGetPage(buffer);
+       Page            page = BufferGetPage(buffer, NULL, NULL, BGP_NO_SNAPSHOT_TEST);
        Size            minfree;
+       TransactionId OldestXmin;
+
+       /*
+        * We can't write WAL in recovery mode, so there's no point trying to
+        * clean the page. The master will likely issue a cleaning WAL record soon
+        * anyway, so this is no particular loss.
+        */
+       if (RecoveryInProgress())
+               return;
+
+       /*
+        * Use the appropriate xmin horizon for this relation. If it's a proper
+        * catalog relation or a user defined, additional, catalog relation, we
+        * need to use the horizon that includes slots, otherwise the data-only
+        * horizon can be used. Note that the toast relation of user defined
+        * relations are *not* considered catalog relations.
+        */
+       if (IsCatalogRelation(relation) ||
+               RelationIsAccessibleInLogicalDecoding(relation))
+               OldestXmin = RecentGlobalXmin;
+       else
+               OldestXmin = RecentGlobalDataXmin;
+
+       Assert(TransactionIdIsValid(OldestXmin));
 
        /*
         * Let's see if we really need pruning.
@@ -84,14 +110,6 @@ heap_page_prune_opt(Relation relation, Buffer buffer, TransactionId OldestXmin)
        if (!PageIsPrunable(page, OldestXmin))
                return;
 
-       /*
-        * We can't write WAL in recovery mode, so there's no point trying to
-        * clean the page. The master will likely issue a cleaning WAL record soon
-        * anyway, so this is no particular loss.
-        */
-       if (RecoveryInProgress())
-               return;
-
        /*
         * We prune when a previous UPDATE failed to find enough space on the page
         * for a new tuple version, or when free space falls below the relation's
@@ -100,7 +118,7 @@ heap_page_prune_opt(Relation relation, Buffer buffer, TransactionId OldestXmin)
         * Checking free space here is questionable since we aren't holding any
         * lock on the buffer; in the worst case we could get a bogus answer. It's
         * unlikely to be *seriously* wrong, though, since reading either pd_lower
-        * or pd_upper is probably atomic.      Avoiding taking a lock seems more
+        * or pd_upper is probably atomic.  Avoiding taking a lock seems more
         * important than sometimes getting a wrong answer in what is after all
         * just a heuristic estimate.
         */
@@ -156,7 +174,7 @@ heap_page_prune(Relation relation, Buffer buffer, TransactionId OldestXmin,
                                bool report_stats, TransactionId *latestRemovedXid)
 {
        int                     ndeleted = 0;
-       Page            page = BufferGetPage(buffer);
+       Page            page = BufferGetPage(buffer, NULL, NULL, BGP_NO_SNAPSHOT_TEST);
        OffsetNumber offnum,
                                maxoff;
        PruneState      prstate;
@@ -173,7 +191,7 @@ heap_page_prune(Relation relation, Buffer buffer, TransactionId OldestXmin,
         * initialize the rest of our working state.
         */
        prstate.new_prune_xid = InvalidTransactionId;
-       prstate.latestRemovedXid = InvalidTransactionId;
+       prstate.latestRemovedXid = *latestRemovedXid;
        prstate.nredirected = prstate.ndead = prstate.nunused = 0;
        memset(prstate.marked, 0, sizeof(prstate.marked));
 
@@ -243,8 +261,8 @@ heap_page_prune(Relation relation, Buffer buffer, TransactionId OldestXmin,
                                                                        prstate.nowunused, prstate.nunused,
                                                                        prstate.latestRemovedXid);
 
-                       PageSetLSN(BufferGetPage(buffer), recptr);
-                       PageSetTLI(BufferGetPage(buffer), ThisTimeLineID);
+                       PageSetLSN(BufferGetPage(buffer, NULL, NULL,
+                                                                        BGP_NO_SNAPSHOT_TEST), recptr);
                }
        }
        else
@@ -263,7 +281,7 @@ heap_page_prune(Relation relation, Buffer buffer, TransactionId OldestXmin,
                {
                        ((PageHeader) page)->pd_prune_xid = prstate.new_prune_xid;
                        PageClearFull(page);
-                       SetBufferCommitInfoNeedsSave(buffer);
+                       MarkBufferDirtyHint(buffer, true);
                }
        }
 
@@ -316,8 +334,8 @@ heap_page_prune(Relation relation, Buffer buffer, TransactionId OldestXmin,
  * OldestXmin is the cutoff XID used to identify dead tuples.
  *
  * We don't actually change the page here, except perhaps for hint-bit updates
- * caused by HeapTupleSatisfiesVacuum. We just add entries to the arrays in
- * prstate showing the changes to be made.     Items to be redirected are added
+ * caused by HeapTupleSatisfiesVacuum.  We just add entries to the arrays in
+ * prstate showing the changes to be made.  Items to be redirected are added
  * to the redirected[] array (two entries per redirection); items to be set to
  * LP_DEAD state are added to nowdead[]; and items to be set to LP_UNUSED
  * state are added to nowunused[].
@@ -330,7 +348,7 @@ heap_prune_chain(Relation relation, Buffer buffer, OffsetNumber rootoffnum,
                                 PruneState *prstate)
 {
        int                     ndeleted = 0;
-       Page            dp = (Page) BufferGetPage(buffer);
+       Page            dp = BufferGetPage(buffer, NULL, NULL, BGP_NO_SNAPSHOT_TEST);
        TransactionId priorXmax = InvalidTransactionId;
        ItemId          rootlp;
        HeapTupleHeader htup;
@@ -340,6 +358,9 @@ heap_prune_chain(Relation relation, Buffer buffer, OffsetNumber rootoffnum,
        OffsetNumber chainitems[MaxHeapTuplesPerPage];
        int                     nchain = 0,
                                i;
+       HeapTupleData tup;
+
+       tup.t_tableOid = RelationGetRelid(relation);
 
        rootlp = PageGetItemId(dp, rootoffnum);
 
@@ -349,6 +370,11 @@ heap_prune_chain(Relation relation, Buffer buffer, OffsetNumber rootoffnum,
        if (ItemIdIsNormal(rootlp))
        {
                htup = (HeapTupleHeader) PageGetItem(dp, rootlp);
+
+               tup.t_data = htup;
+               tup.t_len = ItemIdGetLength(rootlp);
+               ItemPointerSet(&(tup.t_self), BufferGetBlockNumber(buffer), rootoffnum);
+
                if (HeapTupleHeaderIsHeapOnly(htup))
                {
                        /*
@@ -359,7 +385,7 @@ heap_prune_chain(Relation relation, Buffer buffer, OffsetNumber rootoffnum,
                         * We need this primarily to handle aborted HOT updates, that is,
                         * XMIN_INVALID heap-only tuples.  Those might not be linked to by
                         * any chain, since the parent tuple might be re-updated before
-                        * any pruning occurs.  So we have to be able to reap them
+                        * any pruning occurs.  So we have to be able to reap them
                         * separately from chain-pruning.  (Note that
                         * HeapTupleHeaderIsHotUpdated will never return true for an
                         * XMIN_INVALID tuple, so this code will work even when there were
@@ -369,7 +395,7 @@ heap_prune_chain(Relation relation, Buffer buffer, OffsetNumber rootoffnum,
                         * either here or while following a chain below.  Whichever path
                         * gets there first will mark the tuple unused.
                         */
-                       if (HeapTupleSatisfiesVacuum(htup, OldestXmin, buffer)
+                       if (HeapTupleSatisfiesVacuum(&tup, OldestXmin, buffer)
                                == HEAPTUPLE_DEAD && !HeapTupleHeaderIsHotUpdated(htup))
                        {
                                heap_prune_record_unused(prstate, rootoffnum);
@@ -432,6 +458,10 @@ heap_prune_chain(Relation relation, Buffer buffer, OffsetNumber rootoffnum,
                Assert(ItemIdIsNormal(lp));
                htup = (HeapTupleHeader) PageGetItem(dp, lp);
 
+               tup.t_data = htup;
+               tup.t_len = ItemIdGetLength(lp);
+               ItemPointerSet(&(tup.t_self), BufferGetBlockNumber(buffer), offnum);
+
                /*
                 * Check the tuple XMIN against prior XMAX, if any
                 */
@@ -449,7 +479,7 @@ heap_prune_chain(Relation relation, Buffer buffer, OffsetNumber rootoffnum,
                 */
                tupdead = recent_dead = false;
 
-               switch (HeapTupleSatisfiesVacuum(htup, OldestXmin, buffer))
+               switch (HeapTupleSatisfiesVacuum(&tup, OldestXmin, buffer))
                {
                        case HEAPTUPLE_DEAD:
                                tupdead = true;
@@ -463,7 +493,7 @@ heap_prune_chain(Relation relation, Buffer buffer, OffsetNumber rootoffnum,
                                 * that the page is reconsidered for pruning in future.
                                 */
                                heap_prune_record_prunable(prstate,
-                                                                                  HeapTupleHeaderGetXmax(htup));
+                                                                                  HeapTupleHeaderGetUpdateXid(htup));
                                break;
 
                        case HEAPTUPLE_DELETE_IN_PROGRESS:
@@ -473,7 +503,7 @@ heap_prune_chain(Relation relation, Buffer buffer, OffsetNumber rootoffnum,
                                 * that the page is reconsidered for pruning in future.
                                 */
                                heap_prune_record_prunable(prstate,
-                                                                                  HeapTupleHeaderGetXmax(htup));
+                                                                                  HeapTupleHeaderGetUpdateXid(htup));
                                break;
 
                        case HEAPTUPLE_LIVE:
@@ -521,7 +551,7 @@ heap_prune_chain(Relation relation, Buffer buffer, OffsetNumber rootoffnum,
                Assert(ItemPointerGetBlockNumber(&htup->t_ctid) ==
                           BufferGetBlockNumber(buffer));
                offnum = ItemPointerGetOffsetNumber(&htup->t_ctid);
-               priorXmax = HeapTupleHeaderGetXmax(htup);
+               priorXmax = HeapTupleHeaderGetUpdateXid(htup);
        }
 
        /*
@@ -546,7 +576,7 @@ heap_prune_chain(Relation relation, Buffer buffer, OffsetNumber rootoffnum,
 
                /*
                 * If the root entry had been a normal tuple, we are deleting it, so
-                * count it in the result.      But changing a redirect (even to DEAD
+                * count it in the result.  But changing a redirect (even to DEAD
                 * state) doesn't count.
                 */
                if (ItemIdIsNormal(rootlp))
@@ -635,7 +665,7 @@ heap_prune_record_unused(PruneState *prstate, OffsetNumber offnum)
  * buffer, and is inside a critical section.
  *
  * This is split out because it is also used by heap_xlog_clean()
- * to replay the WAL record when needed after a crash. Note that the
+ * to replay the WAL record when needed after a crash.  Note that the
  * arguments are identical to those of log_heap_clean().
  */
 void
@@ -644,7 +674,8 @@ heap_page_prune_execute(Buffer buffer,
                                                OffsetNumber *nowdead, int ndead,
                                                OffsetNumber *nowunused, int nunused)
 {
-       Page            page = (Page) BufferGetPage(buffer);
+       Page            page = BufferGetPage(buffer, NULL, NULL,
+                                                                        BGP_NO_SNAPSHOT_TEST);
        OffsetNumber *offnum;
        int                     i;
 
@@ -746,7 +777,7 @@ heap_get_root_tuples(Page page, OffsetNumber *root_offsets)
 
                        /* Set up to scan the HOT-chain */
                        nextoffnum = ItemPointerGetOffsetNumber(&htup->t_ctid);
-                       priorXmax = HeapTupleHeaderGetXmax(htup);
+                       priorXmax = HeapTupleHeaderGetUpdateXid(htup);
                }
                else
                {
@@ -787,7 +818,7 @@ heap_get_root_tuples(Page page, OffsetNumber *root_offsets)
                                break;
 
                        nextoffnum = ItemPointerGetOffsetNumber(&htup->t_ctid);
-                       priorXmax = HeapTupleHeaderGetXmax(htup);
+                       priorXmax = HeapTupleHeaderGetUpdateXid(htup);
                }
        }
 }