]> granicus.if.org Git - postgresql/blob - src/backend/access/heap/pruneheap.c
61f2ce4cdedf9c16f58cefc2cbeae55f37689b15
[postgresql] / src / backend / access / heap / pruneheap.c
1 /*-------------------------------------------------------------------------
2  *
3  * pruneheap.c
4  *        heap page pruning and HOT-chain management code
5  *
6  * Portions Copyright (c) 1996-2011, PostgreSQL Global Development Group
7  * Portions Copyright (c) 1994, Regents of the University of California
8  *
9  *
10  * IDENTIFICATION
11  *        src/backend/access/heap/pruneheap.c
12  *
13  *-------------------------------------------------------------------------
14  */
15 #include "postgres.h"
16
17 #include "access/heapam.h"
18 #include "access/transam.h"
19 #include "miscadmin.h"
20 #include "pgstat.h"
21 #include "storage/bufmgr.h"
22 #include "utils/rel.h"
23 #include "utils/tqual.h"
24
25
26 /* Working data for heap_page_prune and subroutines */
27 typedef struct
28 {
29         TransactionId new_prune_xid;    /* new prune hint value for page */
30         TransactionId latestRemovedXid;         /* latest xid to be removed by this
31                                                                                  * prune */
32         int                     nredirected;    /* numbers of entries in arrays below */
33         int                     ndead;
34         int                     nunused;
35         /* arrays that accumulate indexes of items to be changed */
36         OffsetNumber redirected[MaxHeapTuplesPerPage * 2];
37         OffsetNumber nowdead[MaxHeapTuplesPerPage];
38         OffsetNumber nowunused[MaxHeapTuplesPerPage];
39         /* marked[i] is TRUE if item i is entered in one of the above arrays */
40         bool            marked[MaxHeapTuplesPerPage + 1];
41 } PruneState;
42
43 /* Local functions */
44 static int heap_prune_chain(Relation relation, Buffer buffer,
45                                  OffsetNumber rootoffnum,
46                                  TransactionId OldestXmin,
47                                  PruneState *prstate);
48 static void heap_prune_record_prunable(PruneState *prstate, TransactionId xid);
49 static void heap_prune_record_redirect(PruneState *prstate,
50                                                    OffsetNumber offnum, OffsetNumber rdoffnum);
51 static void heap_prune_record_dead(PruneState *prstate, OffsetNumber offnum);
52 static void heap_prune_record_unused(PruneState *prstate, OffsetNumber offnum);
53
54
55 /*
56  * Optionally prune and repair fragmentation in the specified page.
57  *
58  * This is an opportunistic function.  It will perform housekeeping
59  * only if the page heuristically looks like a candidate for pruning and we
60  * can acquire buffer cleanup lock without blocking.
61  *
62  * Note: this is called quite often.  It's important that it fall out quickly
63  * if there's not any use in pruning.
64  *
65  * Caller must have pin on the buffer, and must *not* have a lock on it.
66  *
67  * OldestXmin is the cutoff XID used to distinguish whether tuples are DEAD
68  * or RECENTLY_DEAD (see HeapTupleSatisfiesVacuum).
69  */
70 void
71 heap_page_prune_opt(Relation relation, Buffer buffer, TransactionId OldestXmin)
72 {
73         Page            page = BufferGetPage(buffer);
74         Size            minfree;
75
76         /*
77          * Let's see if we really need pruning.
78          *
79          * Forget it if page is not hinted to contain something prunable that's
80          * older than OldestXmin.
81          */
82         if (!PageIsPrunable(page, OldestXmin))
83                 return;
84
85         /*
86          * We can't write WAL in recovery mode, so there's no point trying to
87          * clean the page. The master will likely issue a cleaning WAL record soon
88          * anyway, so this is no particular loss.
89          */
90         if (RecoveryInProgress())
91                 return;
92
93         /*
94          * We prune when a previous UPDATE failed to find enough space on the page
95          * for a new tuple version, or when free space falls below the relation's
96          * fill-factor target (but not less than 10%).
97          *
98          * Checking free space here is questionable since we aren't holding any
99          * lock on the buffer; in the worst case we could get a bogus answer. It's
100          * unlikely to be *seriously* wrong, though, since reading either pd_lower
101          * or pd_upper is probably atomic.      Avoiding taking a lock seems more
102          * important than sometimes getting a wrong answer in what is after all
103          * just a heuristic estimate.
104          */
105         minfree = RelationGetTargetPageFreeSpace(relation,
106                                                                                          HEAP_DEFAULT_FILLFACTOR);
107         minfree = Max(minfree, BLCKSZ / 10);
108
109         if (PageIsFull(page) || PageGetHeapFreeSpace(page) < minfree)
110         {
111                 /* OK, try to get exclusive buffer lock */
112                 if (!ConditionalLockBufferForCleanup(buffer))
113                         return;
114
115                 /*
116                  * Now that we have buffer lock, get accurate information about the
117                  * page's free space, and recheck the heuristic about whether to
118                  * prune. (We needn't recheck PageIsPrunable, since no one else could
119                  * have pruned while we hold pin.)
120                  */
121                 if (PageIsFull(page) || PageGetHeapFreeSpace(page) < minfree)
122                 {
123                         TransactionId ignore = InvalidTransactionId;            /* return value not
124                                                                                                                                  * needed */
125
126                         /* OK to prune */
127                         (void) heap_page_prune(relation, buffer, OldestXmin, true, &ignore);
128                 }
129
130                 /* And release buffer lock */
131                 LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
132         }
133 }
134
135
136 /*
137  * Prune and repair fragmentation in the specified page.
138  *
139  * Caller must have pin and buffer cleanup lock on the page.
140  *
141  * OldestXmin is the cutoff XID used to distinguish whether tuples are DEAD
142  * or RECENTLY_DEAD (see HeapTupleSatisfiesVacuum).
143  *
144  * If report_stats is true then we send the number of reclaimed heap-only
145  * tuples to pgstats.  (This must be FALSE during vacuum, since vacuum will
146  * send its own new total to pgstats, and we don't want this delta applied
147  * on top of that.)
148  *
149  * Returns the number of tuples deleted from the page and sets
150  * latestRemovedXid.
151  */
152 int
153 heap_page_prune(Relation relation, Buffer buffer, TransactionId OldestXmin,
154                                 bool report_stats, TransactionId *latestRemovedXid)
155 {
156         int                     ndeleted = 0;
157         Page            page = BufferGetPage(buffer);
158         OffsetNumber offnum,
159                                 maxoff;
160         PruneState      prstate;
161
162         /*
163          * Our strategy is to scan the page and make lists of items to change,
164          * then apply the changes within a critical section.  This keeps as much
165          * logic as possible out of the critical section, and also ensures that
166          * WAL replay will work the same as the normal case.
167          *
168          * First, initialize the new pd_prune_xid value to zero (indicating no
169          * prunable tuples).  If we find any tuples which may soon become
170          * prunable, we will save the lowest relevant XID in new_prune_xid. Also
171          * initialize the rest of our working state.
172          */
173         prstate.new_prune_xid = InvalidTransactionId;
174         prstate.latestRemovedXid = InvalidTransactionId;
175         prstate.nredirected = prstate.ndead = prstate.nunused = 0;
176         memset(prstate.marked, 0, sizeof(prstate.marked));
177
178         /* Scan the page */
179         maxoff = PageGetMaxOffsetNumber(page);
180         for (offnum = FirstOffsetNumber;
181                  offnum <= maxoff;
182                  offnum = OffsetNumberNext(offnum))
183         {
184                 ItemId          itemid;
185
186                 /* Ignore items already processed as part of an earlier chain */
187                 if (prstate.marked[offnum])
188                         continue;
189
190                 /* Nothing to do if slot is empty or already dead */
191                 itemid = PageGetItemId(page, offnum);
192                 if (!ItemIdIsUsed(itemid) || ItemIdIsDead(itemid))
193                         continue;
194
195                 /* Process this item or chain of items */
196                 ndeleted += heap_prune_chain(relation, buffer, offnum,
197                                                                          OldestXmin,
198                                                                          &prstate);
199         }
200
201         /* Any error while applying the changes is critical */
202         START_CRIT_SECTION();
203
204         /* Have we found any prunable items? */
205         if (prstate.nredirected > 0 || prstate.ndead > 0 || prstate.nunused > 0)
206         {
207                 /*
208                  * Apply the planned item changes, then repair page fragmentation, and
209                  * update the page's hint bit about whether it has free line pointers.
210                  */
211                 heap_page_prune_execute(buffer,
212                                                                 prstate.redirected, prstate.nredirected,
213                                                                 prstate.nowdead, prstate.ndead,
214                                                                 prstate.nowunused, prstate.nunused);
215
216                 /*
217                  * Update the page's pd_prune_xid field to either zero, or the lowest
218                  * XID of any soon-prunable tuple.
219                  */
220                 ((PageHeader) page)->pd_prune_xid = prstate.new_prune_xid;
221
222                 /*
223                  * Also clear the "page is full" flag, since there's no point in
224                  * repeating the prune/defrag process until something else happens to
225                  * the page.
226                  */
227                 PageClearFull(page);
228
229                 MarkBufferDirty(buffer);
230
231                 /*
232                  * Emit a WAL HEAP_CLEAN record showing what we did
233                  */
234                 if (RelationNeedsWAL(relation))
235                 {
236                         XLogRecPtr      recptr;
237
238                         recptr = log_heap_clean(relation, buffer,
239                                                                         prstate.redirected, prstate.nredirected,
240                                                                         prstate.nowdead, prstate.ndead,
241                                                                         prstate.nowunused, prstate.nunused,
242                                                                         prstate.latestRemovedXid);
243
244                         PageSetLSN(BufferGetPage(buffer), recptr);
245                         PageSetTLI(BufferGetPage(buffer), ThisTimeLineID);
246                 }
247         }
248         else
249         {
250                 /*
251                  * If we didn't prune anything, but have found a new value for the
252                  * pd_prune_xid field, update it and mark the buffer dirty. This is
253                  * treated as a non-WAL-logged hint.
254                  *
255                  * Also clear the "page is full" flag if it is set, since there's no
256                  * point in repeating the prune/defrag process until something else
257                  * happens to the page.
258                  */
259                 if (((PageHeader) page)->pd_prune_xid != prstate.new_prune_xid ||
260                         PageIsFull(page))
261                 {
262                         ((PageHeader) page)->pd_prune_xid = prstate.new_prune_xid;
263                         PageClearFull(page);
264                         SetBufferCommitInfoNeedsSave(buffer);
265                 }
266         }
267
268         END_CRIT_SECTION();
269
270         /*
271          * If requested, report the number of tuples reclaimed to pgstats. This is
272          * ndeleted minus ndead, because we don't want to count a now-DEAD root
273          * item as a deletion for this purpose.
274          */
275         if (report_stats && ndeleted > prstate.ndead)
276                 pgstat_update_heap_dead_tuples(relation, ndeleted - prstate.ndead);
277
278         *latestRemovedXid = prstate.latestRemovedXid;
279
280         /*
281          * XXX Should we update the FSM information of this page ?
282          *
283          * There are two schools of thought here. We may not want to update FSM
284          * information so that the page is not used for unrelated UPDATEs/INSERTs
285          * and any free space in this page will remain available for further
286          * UPDATEs in *this* page, thus improving chances for doing HOT updates.
287          *
288          * But for a large table and where a page does not receive further UPDATEs
289          * for a long time, we might waste this space by not updating the FSM
290          * information. The relation may get extended and fragmented further.
291          *
292          * One possibility is to leave "fillfactor" worth of space in this page
293          * and update FSM with the remaining space.
294          *
295          * In any case, the current FSM implementation doesn't accept
296          * one-page-at-a-time updates, so this is all academic for now.
297          */
298
299         return ndeleted;
300 }
301
302
303 /*
304  * Prune specified item pointer or a HOT chain originating at that item.
305  *
306  * If the item is an index-referenced tuple (i.e. not a heap-only tuple),
307  * the HOT chain is pruned by removing all DEAD tuples at the start of the HOT
308  * chain.  We also prune any RECENTLY_DEAD tuples preceding a DEAD tuple.
309  * This is OK because a RECENTLY_DEAD tuple preceding a DEAD tuple is really
310  * DEAD, the OldestXmin test is just too coarse to detect it.
311  *
312  * The root line pointer is redirected to the tuple immediately after the
313  * latest DEAD tuple.  If all tuples in the chain are DEAD, the root line
314  * pointer is marked LP_DEAD.  (This includes the case of a DEAD simple
315  * tuple, which we treat as a chain of length 1.)
316  *
317  * OldestXmin is the cutoff XID used to identify dead tuples.
318  *
319  * We don't actually change the page here, except perhaps for hint-bit updates
320  * caused by HeapTupleSatisfiesVacuum.  We just add entries to the arrays in
321  * prstate showing the changes to be made.      Items to be redirected are added
322  * to the redirected[] array (two entries per redirection); items to be set to
323  * LP_DEAD state are added to nowdead[]; and items to be set to LP_UNUSED
324  * state are added to nowunused[].
325  *
326  * Returns the number of tuples (to be) deleted from the page.
327  */
328 static int
329 heap_prune_chain(Relation relation, Buffer buffer, OffsetNumber rootoffnum,
330                                  TransactionId OldestXmin,
331                                  PruneState *prstate)
332 {
333         int                     ndeleted = 0;
334         Page            dp = (Page) BufferGetPage(buffer);
335         TransactionId priorXmax = InvalidTransactionId;
336         ItemId          rootlp;
337         HeapTupleHeader htup;
338         OffsetNumber latestdead = InvalidOffsetNumber,
339                                 maxoff = PageGetMaxOffsetNumber(dp),
340                                 offnum;
341         OffsetNumber chainitems[MaxHeapTuplesPerPage];
342         int                     nchain = 0,
343                                 i;
344
345         rootlp = PageGetItemId(dp, rootoffnum);
346
347         /*
348          * If it's a heap-only tuple, then it is not the start of a HOT chain.
349          */
350         if (ItemIdIsNormal(rootlp))
351         {
352                 htup = (HeapTupleHeader) PageGetItem(dp, rootlp);
353                 if (HeapTupleHeaderIsHeapOnly(htup))
354                 {
355                         /*
356                          * If the tuple is DEAD and doesn't chain to anything else, mark
357                          * it unused immediately.  (If it does chain, we can only remove
358                          * it as part of pruning its chain.)
359                          *
360                          * We need this primarily to handle aborted HOT updates, that is,
361                          * XMIN_INVALID heap-only tuples.  Those might not be linked to by
362                          * any chain, since the parent tuple might be re-updated before
363                          * any pruning occurs.  So we have to be able to reap them
364                          * separately from chain-pruning.  (Note that
365                          * HeapTupleHeaderIsHotUpdated will never return true for an
366                          * XMIN_INVALID tuple, so this code will work even when there were
367                          * sequential updates within the aborted transaction.)
368                          *
369                          * Note that we might first arrive at a dead heap-only tuple
370                          * either here or while following a chain below.  Whichever path
371                          * gets there first will mark the tuple unused.
372                          */
373                         if (HeapTupleSatisfiesVacuum(htup, OldestXmin, buffer)
374                                 == HEAPTUPLE_DEAD && !HeapTupleHeaderIsHotUpdated(htup))
375                         {
376                                 heap_prune_record_unused(prstate, rootoffnum);
377                                 HeapTupleHeaderAdvanceLatestRemovedXid(htup,
378                                                                                                  &prstate->latestRemovedXid);
379                                 ndeleted++;
380                         }
381
382                         /* Nothing more to do */
383                         return ndeleted;
384                 }
385         }
386
387         /* Start from the root tuple */
388         offnum = rootoffnum;
389
390         /* while not end of the chain */
391         for (;;)
392         {
393                 ItemId          lp;
394                 bool            tupdead,
395                                         recent_dead;
396
397                 /* Some sanity checks */
398                 if (offnum < FirstOffsetNumber || offnum > maxoff)
399                         break;
400
401                 /* If item is already processed, stop --- it must not be same chain */
402                 if (prstate->marked[offnum])
403                         break;
404
405                 lp = PageGetItemId(dp, offnum);
406
407                 /* Unused item obviously isn't part of the chain */
408                 if (!ItemIdIsUsed(lp))
409                         break;
410
411                 /*
412                  * If we are looking at the redirected root line pointer, jump to the
413                  * first normal tuple in the chain.  If we find a redirect somewhere
414                  * else, stop --- it must not be same chain.
415                  */
416                 if (ItemIdIsRedirected(lp))
417                 {
418                         if (nchain > 0)
419                                 break;                  /* not at start of chain */
420                         chainitems[nchain++] = offnum;
421                         offnum = ItemIdGetRedirect(rootlp);
422                         continue;
423                 }
424
425                 /*
426                  * Likewise, a dead item pointer can't be part of the chain. (We
427                  * already eliminated the case of dead root tuple outside this
428                  * function.)
429                  */
430                 if (ItemIdIsDead(lp))
431                         break;
432
433                 Assert(ItemIdIsNormal(lp));
434                 htup = (HeapTupleHeader) PageGetItem(dp, lp);
435
436                 /*
437                  * Check the tuple XMIN against prior XMAX, if any
438                  */
439                 if (TransactionIdIsValid(priorXmax) &&
440                         !TransactionIdEquals(HeapTupleHeaderGetXmin(htup), priorXmax))
441                         break;
442
443                 /*
444                  * OK, this tuple is indeed a member of the chain.
445                  */
446                 chainitems[nchain++] = offnum;
447
448                 /*
449                  * Check tuple's visibility status.
450                  */
451                 tupdead = recent_dead = false;
452
453                 switch (HeapTupleSatisfiesVacuum(htup, OldestXmin, buffer))
454                 {
455                         case HEAPTUPLE_DEAD:
456                                 tupdead = true;
457                                 break;
458
459                         case HEAPTUPLE_RECENTLY_DEAD:
460                                 recent_dead = true;
461
462                                 /*
463                                  * This tuple may soon become DEAD.  Update the hint field so
464                                  * that the page is reconsidered for pruning in future.
465                                  */
466                                 heap_prune_record_prunable(prstate,
467                                                                                    HeapTupleHeaderGetXmax(htup));
468                                 break;
469
470                         case HEAPTUPLE_DELETE_IN_PROGRESS:
471
472                                 /*
473                                  * This tuple may soon become DEAD.  Update the hint field so
474                                  * that the page is reconsidered for pruning in future.
475                                  */
476                                 heap_prune_record_prunable(prstate,
477                                                                                    HeapTupleHeaderGetXmax(htup));
478                                 break;
479
480                         case HEAPTUPLE_LIVE:
481                         case HEAPTUPLE_INSERT_IN_PROGRESS:
482
483                                 /*
484                                  * If we wanted to optimize for aborts, we might consider
485                                  * marking the page prunable when we see INSERT_IN_PROGRESS.
486                                  * But we don't.  See related decisions about when to mark the
487                                  * page prunable in heapam.c.
488                                  */
489                                 break;
490
491                         default:
492                                 elog(ERROR, "unexpected HeapTupleSatisfiesVacuum result");
493                                 break;
494                 }
495
496                 /*
497                  * Remember the last DEAD tuple seen.  We will advance past
498                  * RECENTLY_DEAD tuples just in case there's a DEAD one after them;
499                  * but we can't advance past anything else.  (XXX is it really worth
500                  * continuing to scan beyond RECENTLY_DEAD?  The case where we will
501                  * find another DEAD tuple is a fairly unusual corner case.)
502                  */
503                 if (tupdead)
504                 {
505                         latestdead = offnum;
506                         HeapTupleHeaderAdvanceLatestRemovedXid(htup,
507                                                                                                  &prstate->latestRemovedXid);
508                 }
509                 else if (!recent_dead)
510                         break;
511
512                 /*
513                  * If the tuple is not HOT-updated, then we are at the end of this
514                  * HOT-update chain.
515                  */
516                 if (!HeapTupleHeaderIsHotUpdated(htup))
517                         break;
518
519                 /*
520                  * Advance to next chain member.
521                  */
522                 Assert(ItemPointerGetBlockNumber(&htup->t_ctid) ==
523                            BufferGetBlockNumber(buffer));
524                 offnum = ItemPointerGetOffsetNumber(&htup->t_ctid);
525                 priorXmax = HeapTupleHeaderGetXmax(htup);
526         }
527
528         /*
529          * If we found a DEAD tuple in the chain, adjust the HOT chain so that all
530          * the DEAD tuples at the start of the chain are removed and the root line
531          * pointer is appropriately redirected.
532          */
533         if (OffsetNumberIsValid(latestdead))
534         {
535                 /*
536                  * Mark as unused each intermediate item that we are able to remove
537                  * from the chain.
538                  *
539                  * When the previous item is the last dead tuple seen, we are at the
540                  * right candidate for redirection.
541                  */
542                 for (i = 1; (i < nchain) && (chainitems[i - 1] != latestdead); i++)
543                 {
544                         heap_prune_record_unused(prstate, chainitems[i]);
545                         ndeleted++;
546                 }
547
548                 /*
549                  * If the root entry had been a normal tuple, we are deleting it, so
550                  * count it in the result.      But changing a redirect (even to DEAD
551                  * state) doesn't count.
552                  */
553                 if (ItemIdIsNormal(rootlp))
554                         ndeleted++;
555
556                 /*
557                  * If the DEAD tuple is at the end of the chain, the entire chain is
558                  * dead and the root line pointer can be marked dead.  Otherwise just
559                  * redirect the root to the correct chain member.
560                  */
561                 if (i >= nchain)
562                         heap_prune_record_dead(prstate, rootoffnum);
563                 else
564                         heap_prune_record_redirect(prstate, rootoffnum, chainitems[i]);
565         }
566         else if (nchain < 2 && ItemIdIsRedirected(rootlp))
567         {
568                 /*
569                  * We found a redirect item that doesn't point to a valid follow-on
570                  * item.  This can happen if the loop in heap_page_prune caused us to
571                  * visit the dead successor of a redirect item before visiting the
572                  * redirect item.  We can clean up by setting the redirect item to
573                  * DEAD state.
574                  */
575                 heap_prune_record_dead(prstate, rootoffnum);
576         }
577
578         return ndeleted;
579 }
580
581 /* Record lowest soon-prunable XID */
582 static void
583 heap_prune_record_prunable(PruneState *prstate, TransactionId xid)
584 {
585         /*
586          * This should exactly match the PageSetPrunable macro.  We can't store
587          * directly into the page header yet, so we update working state.
588          */
589         Assert(TransactionIdIsNormal(xid));
590         if (!TransactionIdIsValid(prstate->new_prune_xid) ||
591                 TransactionIdPrecedes(xid, prstate->new_prune_xid))
592                 prstate->new_prune_xid = xid;
593 }
594
595 /* Record item pointer to be redirected */
596 static void
597 heap_prune_record_redirect(PruneState *prstate,
598                                                    OffsetNumber offnum, OffsetNumber rdoffnum)
599 {
600         Assert(prstate->nredirected < MaxHeapTuplesPerPage);
601         prstate->redirected[prstate->nredirected * 2] = offnum;
602         prstate->redirected[prstate->nredirected * 2 + 1] = rdoffnum;
603         prstate->nredirected++;
604         Assert(!prstate->marked[offnum]);
605         prstate->marked[offnum] = true;
606         Assert(!prstate->marked[rdoffnum]);
607         prstate->marked[rdoffnum] = true;
608 }
609
610 /* Record item pointer to be marked dead */
611 static void
612 heap_prune_record_dead(PruneState *prstate, OffsetNumber offnum)
613 {
614         Assert(prstate->ndead < MaxHeapTuplesPerPage);
615         prstate->nowdead[prstate->ndead] = offnum;
616         prstate->ndead++;
617         Assert(!prstate->marked[offnum]);
618         prstate->marked[offnum] = true;
619 }
620
621 /* Record item pointer to be marked unused */
622 static void
623 heap_prune_record_unused(PruneState *prstate, OffsetNumber offnum)
624 {
625         Assert(prstate->nunused < MaxHeapTuplesPerPage);
626         prstate->nowunused[prstate->nunused] = offnum;
627         prstate->nunused++;
628         Assert(!prstate->marked[offnum]);
629         prstate->marked[offnum] = true;
630 }
631
632
633 /*
634  * Perform the actual page changes needed by heap_page_prune.
635  * It is expected that the caller has suitable pin and lock on the
636  * buffer, and is inside a critical section.
637  *
638  * This is split out because it is also used by heap_xlog_clean()
639  * to replay the WAL record when needed after a crash.  Note that the
640  * arguments are identical to those of log_heap_clean().
641  */
642 void
643 heap_page_prune_execute(Buffer buffer,
644                                                 OffsetNumber *redirected, int nredirected,
645                                                 OffsetNumber *nowdead, int ndead,
646                                                 OffsetNumber *nowunused, int nunused)
647 {
648         Page            page = (Page) BufferGetPage(buffer);
649         OffsetNumber *offnum;
650         int                     i;
651
652         /* Update all redirected line pointers */
653         offnum = redirected;
654         for (i = 0; i < nredirected; i++)
655         {
656                 OffsetNumber fromoff = *offnum++;
657                 OffsetNumber tooff = *offnum++;
658                 ItemId          fromlp = PageGetItemId(page, fromoff);
659
660                 ItemIdSetRedirect(fromlp, tooff);
661         }
662
663         /* Update all now-dead line pointers */
664         offnum = nowdead;
665         for (i = 0; i < ndead; i++)
666         {
667                 OffsetNumber off = *offnum++;
668                 ItemId          lp = PageGetItemId(page, off);
669
670                 ItemIdSetDead(lp);
671         }
672
673         /* Update all now-unused line pointers */
674         offnum = nowunused;
675         for (i = 0; i < nunused; i++)
676         {
677                 OffsetNumber off = *offnum++;
678                 ItemId          lp = PageGetItemId(page, off);
679
680                 ItemIdSetUnused(lp);
681         }
682
683         /*
684          * Finally, repair any fragmentation, and update the page's hint bit about
685          * whether it has free pointers.
686          */
687         PageRepairFragmentation(page);
688 }
689
690
691 /*
692  * For all items in this page, find their respective root line pointers.
693  * If item k is part of a HOT-chain with root at item j, then we set
694  * root_offsets[k - 1] = j.
695  *
696  * The passed-in root_offsets array must have MaxHeapTuplesPerPage entries.
697  * We zero out all unused entries.
698  *
699  * The function must be called with at least share lock on the buffer, to
700  * prevent concurrent prune operations.
701  *
702  * Note: The information collected here is valid only as long as the caller
703  * holds a pin on the buffer. Once pin is released, a tuple might be pruned
704  * and reused by a completely unrelated tuple.
705  */
706 void
707 heap_get_root_tuples(Page page, OffsetNumber *root_offsets)
708 {
709         OffsetNumber offnum,
710                                 maxoff;
711
712         MemSet(root_offsets, 0, MaxHeapTuplesPerPage * sizeof(OffsetNumber));
713
714         maxoff = PageGetMaxOffsetNumber(page);
715         for (offnum = FirstOffsetNumber; offnum <= maxoff; offnum = OffsetNumberNext(offnum))
716         {
717                 ItemId          lp = PageGetItemId(page, offnum);
718                 HeapTupleHeader htup;
719                 OffsetNumber nextoffnum;
720                 TransactionId priorXmax;
721
722                 /* skip unused and dead items */
723                 if (!ItemIdIsUsed(lp) || ItemIdIsDead(lp))
724                         continue;
725
726                 if (ItemIdIsNormal(lp))
727                 {
728                         htup = (HeapTupleHeader) PageGetItem(page, lp);
729
730                         /*
731                          * Check if this tuple is part of a HOT-chain rooted at some other
732                          * tuple. If so, skip it for now; we'll process it when we find
733                          * its root.
734                          */
735                         if (HeapTupleHeaderIsHeapOnly(htup))
736                                 continue;
737
738                         /*
739                          * This is either a plain tuple or the root of a HOT-chain.
740                          * Remember it in the mapping.
741                          */
742                         root_offsets[offnum - 1] = offnum;
743
744                         /* If it's not the start of a HOT-chain, we're done with it */
745                         if (!HeapTupleHeaderIsHotUpdated(htup))
746                                 continue;
747
748                         /* Set up to scan the HOT-chain */
749                         nextoffnum = ItemPointerGetOffsetNumber(&htup->t_ctid);
750                         priorXmax = HeapTupleHeaderGetXmax(htup);
751                 }
752                 else
753                 {
754                         /* Must be a redirect item. We do not set its root_offsets entry */
755                         Assert(ItemIdIsRedirected(lp));
756                         /* Set up to scan the HOT-chain */
757                         nextoffnum = ItemIdGetRedirect(lp);
758                         priorXmax = InvalidTransactionId;
759                 }
760
761                 /*
762                  * Now follow the HOT-chain and collect other tuples in the chain.
763                  *
764                  * Note: Even though this is a nested loop, the complexity of the
765                  * function is O(N) because a tuple in the page should be visited not
766                  * more than twice, once in the outer loop and once in HOT-chain
767                  * chases.
768                  */
769                 for (;;)
770                 {
771                         lp = PageGetItemId(page, nextoffnum);
772
773                         /* Check for broken chains */
774                         if (!ItemIdIsNormal(lp))
775                                 break;
776
777                         htup = (HeapTupleHeader) PageGetItem(page, lp);
778
779                         if (TransactionIdIsValid(priorXmax) &&
780                                 !TransactionIdEquals(priorXmax, HeapTupleHeaderGetXmin(htup)))
781                                 break;
782
783                         /* Remember the root line pointer for this item */
784                         root_offsets[nextoffnum - 1] = offnum;
785
786                         /* Advance to next chain member, if any */
787                         if (!HeapTupleHeaderIsHotUpdated(htup))
788                                 break;
789
790                         nextoffnum = ItemPointerGetOffsetNumber(&htup->t_ctid);
791                         priorXmax = HeapTupleHeaderGetXmax(htup);
792                 }
793         }
794 }