1 /*-------------------------------------------------------------------------
4 * heap access method code
6 * Portions Copyright (c) 1996-2014, PostgreSQL Global Development Group
7 * Portions Copyright (c) 1994, Regents of the University of California
11 * src/backend/access/heap/heapam.c
15 * relation_open - open any relation by relation OID
16 * relation_openrv - open any relation specified by a RangeVar
17 * relation_close - close any relation
18 * heap_open - open a heap relation by relation OID
19 * heap_openrv - open a heap relation specified by a RangeVar
20 * heap_close - (now just a macro for relation_close)
21 * heap_beginscan - begin relation scan
22 * heap_rescan - restart a relation scan
23 * heap_endscan - end relation scan
24 * heap_getnext - retrieve next tuple in scan
25 * heap_fetch - retrieve tuple with given tid
26 * heap_insert - insert tuple into a relation
27 * heap_multi_insert - insert multiple tuples into a relation
28 * heap_delete - delete a tuple from a relation
29 * heap_update - replace a tuple in a relation with another tuple
30 * heap_markpos - mark scan position
31 * heap_restrpos - restore position to marked location
32 * heap_sync - sync heap, for when no WAL has been written
35 * This file contains the heap_ routines which implement
36 * the POSTGRES heap access method used for all POSTGRES
39 *-------------------------------------------------------------------------
43 #include "access/heapam.h"
44 #include "access/heapam_xlog.h"
45 #include "access/hio.h"
46 #include "access/multixact.h"
47 #include "access/relscan.h"
48 #include "access/sysattr.h"
49 #include "access/transam.h"
50 #include "access/tuptoaster.h"
51 #include "access/valid.h"
52 #include "access/visibilitymap.h"
53 #include "access/xact.h"
54 #include "access/xlog.h"
55 #include "access/xloginsert.h"
56 #include "access/xlogutils.h"
57 #include "catalog/catalog.h"
58 #include "catalog/namespace.h"
59 #include "miscadmin.h"
61 #include "storage/bufmgr.h"
62 #include "storage/freespace.h"
63 #include "storage/lmgr.h"
64 #include "storage/predicate.h"
65 #include "storage/procarray.h"
66 #include "storage/smgr.h"
67 #include "storage/standby.h"
68 #include "utils/datum.h"
69 #include "utils/inval.h"
70 #include "utils/lsyscache.h"
71 #include "utils/relcache.h"
72 #include "utils/snapmgr.h"
73 #include "utils/syscache.h"
74 #include "utils/tqual.h"
78 bool synchronize_seqscans = true;
81 static HeapScanDesc heap_beginscan_internal(Relation relation,
83 int nkeys, ScanKey key,
84 bool allow_strat, bool allow_sync,
85 bool is_bitmapscan, bool temp_snap);
86 static HeapTuple heap_prepare_insert(Relation relation, HeapTuple tup,
87 TransactionId xid, CommandId cid, int options);
88 static XLogRecPtr log_heap_update(Relation reln, Buffer oldbuf,
89 Buffer newbuf, HeapTuple oldtup,
90 HeapTuple newtup, HeapTuple old_key_tup,
91 bool all_visible_cleared, bool new_all_visible_cleared);
92 static void HeapSatisfiesHOTandKeyUpdate(Relation relation,
94 Bitmapset *key_attrs, Bitmapset *id_attrs,
95 bool *satisfies_hot, bool *satisfies_key,
97 HeapTuple oldtup, HeapTuple newtup);
98 static void compute_new_xmax_infomask(TransactionId xmax, uint16 old_infomask,
99 uint16 old_infomask2, TransactionId add_to_xmax,
100 LockTupleMode mode, bool is_update,
101 TransactionId *result_xmax, uint16 *result_infomask,
102 uint16 *result_infomask2);
103 static HTSU_Result heap_lock_updated_tuple(Relation rel, HeapTuple tuple,
104 ItemPointer ctid, TransactionId xid,
106 static void GetMultiXactIdHintBits(MultiXactId multi, uint16 *new_infomask,
107 uint16 *new_infomask2);
108 static TransactionId MultiXactIdGetUpdateXid(TransactionId xmax,
110 static void MultiXactIdWait(MultiXactId multi, MultiXactStatus status, uint16 infomask,
111 Relation rel, ItemPointer ctid, XLTW_Oper oper,
113 static bool ConditionalMultiXactIdWait(MultiXactId multi, MultiXactStatus status,
114 uint16 infomask, Relation rel, int *remaining);
115 static XLogRecPtr log_heap_new_cid(Relation relation, HeapTuple tup);
116 static HeapTuple ExtractReplicaIdentity(Relation rel, HeapTuple tup, bool key_modified,
121 * Each tuple lock mode has a corresponding heavyweight lock, and one or two
122 * corresponding MultiXactStatuses (one to merely lock tuples, another one to
123 * update them). This table (and the macros below) helps us determine the
124 * heavyweight lock mode and MultiXactStatus values to use for any particular
125 * tuple lock strength.
127 * Don't look at lockstatus/updstatus directly! Use get_mxact_status_for_lock
137 tupleLockExtraInfo[MaxLockTupleMode + 1] =
139 { /* LockTupleKeyShare */
141 MultiXactStatusForKeyShare,
142 -1 /* KeyShare does not allow updating tuples */
144 { /* LockTupleShare */
146 MultiXactStatusForShare,
147 -1 /* Share does not allow updating tuples */
149 { /* LockTupleNoKeyExclusive */
151 MultiXactStatusForNoKeyUpdate,
152 MultiXactStatusNoKeyUpdate
154 { /* LockTupleExclusive */
156 MultiXactStatusForUpdate,
157 MultiXactStatusUpdate
161 /* Get the LOCKMODE for a given MultiXactStatus */
162 #define LOCKMODE_from_mxstatus(status) \
163 (tupleLockExtraInfo[TUPLOCK_from_mxstatus((status))].hwlock)
166 * Acquire heavyweight locks on tuples, using a LockTupleMode strength value.
167 * This is more readable than having every caller translate it to lock.h's
170 #define LockTupleTuplock(rel, tup, mode) \
171 LockTuple((rel), (tup), tupleLockExtraInfo[mode].hwlock)
172 #define UnlockTupleTuplock(rel, tup, mode) \
173 UnlockTuple((rel), (tup), tupleLockExtraInfo[mode].hwlock)
174 #define ConditionalLockTupleTuplock(rel, tup, mode) \
175 ConditionalLockTuple((rel), (tup), tupleLockExtraInfo[mode].hwlock)
178 * This table maps tuple lock strength values for each particular
179 * MultiXactStatus value.
181 static const int MultiXactStatusLock[MaxMultiXactStatus + 1] =
183 LockTupleKeyShare, /* ForKeyShare */
184 LockTupleShare, /* ForShare */
185 LockTupleNoKeyExclusive, /* ForNoKeyUpdate */
186 LockTupleExclusive, /* ForUpdate */
187 LockTupleNoKeyExclusive, /* NoKeyUpdate */
188 LockTupleExclusive /* Update */
191 /* Get the LockTupleMode for a given MultiXactStatus */
192 #define TUPLOCK_from_mxstatus(status) \
193 (MultiXactStatusLock[(status)])
195 /* ----------------------------------------------------------------
196 * heap support routines
197 * ----------------------------------------------------------------
201 * initscan - scan code common to heap_beginscan and heap_rescan
205 initscan(HeapScanDesc scan, ScanKey key, bool is_rescan)
211 * Determine the number of blocks we have to scan.
213 * It is sufficient to do this once at scan start, since any tuples added
214 * while the scan is in progress will be invisible to my snapshot anyway.
215 * (That is not true when using a non-MVCC snapshot. However, we couldn't
216 * guarantee to return tuples added after scan start anyway, since they
217 * might go into pages we already scanned. To guarantee consistent
218 * results for a non-MVCC snapshot, the caller must hold some higher-level
219 * lock that ensures the interesting tuple(s) won't change.)
221 scan->rs_nblocks = RelationGetNumberOfBlocks(scan->rs_rd);
224 * If the table is large relative to NBuffers, use a bulk-read access
225 * strategy and enable synchronized scanning (see syncscan.c). Although
226 * the thresholds for these features could be different, we make them the
227 * same so that there are only two behaviors to tune rather than four.
228 * (However, some callers need to be able to disable one or both of these
229 * behaviors, independently of the size of the table; also there is a GUC
230 * variable that can disable synchronized scanning.)
232 * During a rescan, don't make a new strategy object if we don't have to.
234 if (!RelationUsesLocalBuffers(scan->rs_rd) &&
235 scan->rs_nblocks > NBuffers / 4)
237 allow_strat = scan->rs_allow_strat;
238 allow_sync = scan->rs_allow_sync;
241 allow_strat = allow_sync = false;
245 if (scan->rs_strategy == NULL)
246 scan->rs_strategy = GetAccessStrategy(BAS_BULKREAD);
250 if (scan->rs_strategy != NULL)
251 FreeAccessStrategy(scan->rs_strategy);
252 scan->rs_strategy = NULL;
258 * If rescan, keep the previous startblock setting so that rewinding a
259 * cursor doesn't generate surprising results. Reset the syncscan
262 scan->rs_syncscan = (allow_sync && synchronize_seqscans);
264 else if (allow_sync && synchronize_seqscans)
266 scan->rs_syncscan = true;
267 scan->rs_startblock = ss_get_location(scan->rs_rd, scan->rs_nblocks);
271 scan->rs_syncscan = false;
272 scan->rs_startblock = 0;
275 scan->rs_initblock = 0;
276 scan->rs_numblocks = InvalidBlockNumber;
277 scan->rs_inited = false;
278 scan->rs_ctup.t_data = NULL;
279 ItemPointerSetInvalid(&scan->rs_ctup.t_self);
280 scan->rs_cbuf = InvalidBuffer;
281 scan->rs_cblock = InvalidBlockNumber;
283 /* we don't have a marked position... */
284 ItemPointerSetInvalid(&(scan->rs_mctid));
286 /* page-at-a-time fields are always invalid when not rs_inited */
289 * copy the scan key, if appropriate
292 memcpy(scan->rs_key, key, scan->rs_nkeys * sizeof(ScanKeyData));
295 * Currently, we don't have a stats counter for bitmap heap scans (but the
296 * underlying bitmap index scans will be counted).
298 if (!scan->rs_bitmapscan)
299 pgstat_count_heap_scan(scan->rs_rd);
303 heap_setscanlimits(HeapScanDesc scan, BlockNumber startBlk, BlockNumber numBlks)
305 scan->rs_startblock = startBlk;
306 scan->rs_initblock = startBlk;
307 scan->rs_numblocks = numBlks;
311 * heapgetpage - subroutine for heapgettup()
313 * This routine reads and pins the specified page of the relation.
314 * In page-at-a-time mode it performs additional work, namely determining
315 * which tuples on the page are visible.
318 heapgetpage(HeapScanDesc scan, BlockNumber page)
325 OffsetNumber lineoff;
329 Assert(page < scan->rs_nblocks);
331 /* release previous scan buffer, if any */
332 if (BufferIsValid(scan->rs_cbuf))
334 ReleaseBuffer(scan->rs_cbuf);
335 scan->rs_cbuf = InvalidBuffer;
339 * Be sure to check for interrupts at least once per page. Checks at
340 * higher code levels won't be able to stop a seqscan that encounters many
341 * pages' worth of consecutive dead tuples.
343 CHECK_FOR_INTERRUPTS();
345 /* read page using selected strategy */
346 scan->rs_cbuf = ReadBufferExtended(scan->rs_rd, MAIN_FORKNUM, page,
347 RBM_NORMAL, scan->rs_strategy);
348 scan->rs_cblock = page;
350 if (!scan->rs_pageatatime)
353 buffer = scan->rs_cbuf;
354 snapshot = scan->rs_snapshot;
357 * Prune and repair fragmentation for the whole page, if possible.
359 heap_page_prune_opt(scan->rs_rd, buffer);
362 * We must hold share lock on the buffer content while examining tuple
363 * visibility. Afterwards, however, the tuples we have found to be
364 * visible are guaranteed good as long as we hold the buffer pin.
366 LockBuffer(buffer, BUFFER_LOCK_SHARE);
368 dp = (Page) BufferGetPage(buffer);
369 lines = PageGetMaxOffsetNumber(dp);
373 * If the all-visible flag indicates that all tuples on the page are
374 * visible to everyone, we can skip the per-tuple visibility tests.
376 * Note: In hot standby, a tuple that's already visible to all
377 * transactions in the master might still be invisible to a read-only
378 * transaction in the standby. We partly handle this problem by tracking
379 * the minimum xmin of visible tuples as the cut-off XID while marking a
380 * page all-visible on master and WAL log that along with the visibility
381 * map SET operation. In hot standby, we wait for (or abort) all
382 * transactions that can potentially may not see one or more tuples on the
383 * page. That's how index-only scans work fine in hot standby. A crucial
384 * difference between index-only scans and heap scans is that the
385 * index-only scan completely relies on the visibility map where as heap
386 * scan looks at the page-level PD_ALL_VISIBLE flag. We are not sure if
387 * the page-level flag can be trusted in the same way, because it might
388 * get propagated somehow without being explicitly WAL-logged, e.g. via a
389 * full page write. Until we can prove that beyond doubt, let's check each
390 * tuple for visibility the hard way.
392 all_visible = PageIsAllVisible(dp) && !snapshot->takenDuringRecovery;
394 for (lineoff = FirstOffsetNumber, lpp = PageGetItemId(dp, lineoff);
398 if (ItemIdIsNormal(lpp))
400 HeapTupleData loctup;
403 loctup.t_tableOid = RelationGetRelid(scan->rs_rd);
404 loctup.t_data = (HeapTupleHeader) PageGetItem((Page) dp, lpp);
405 loctup.t_len = ItemIdGetLength(lpp);
406 ItemPointerSet(&(loctup.t_self), page, lineoff);
411 valid = HeapTupleSatisfiesVisibility(&loctup, snapshot, buffer);
413 CheckForSerializableConflictOut(valid, scan->rs_rd, &loctup,
417 scan->rs_vistuples[ntup++] = lineoff;
421 LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
423 Assert(ntup <= MaxHeapTuplesPerPage);
424 scan->rs_ntuples = ntup;
428 * heapgettup - fetch next heap tuple
430 * Initialize the scan if not already done; then advance to the next
431 * tuple as indicated by "dir"; return the next tuple in scan->rs_ctup,
432 * or set scan->rs_ctup.t_data = NULL if no more tuples.
434 * dir == NoMovementScanDirection means "re-fetch the tuple indicated
437 * Note: the reason nkeys/key are passed separately, even though they are
438 * kept in the scan descriptor, is that the caller may not want us to check
441 * Note: when we fall off the end of the scan in either direction, we
442 * reset rs_inited. This means that a further request with the same
443 * scan direction will restart the scan, which is a bit odd, but a
444 * request with the opposite scan direction will start a fresh scan
445 * in the proper direction. The latter is required behavior for cursors,
446 * while the former case is generally undefined behavior in Postgres
447 * so we don't care too much.
451 heapgettup(HeapScanDesc scan,
456 HeapTuple tuple = &(scan->rs_ctup);
457 Snapshot snapshot = scan->rs_snapshot;
458 bool backward = ScanDirectionIsBackward(dir);
463 OffsetNumber lineoff;
468 * calculate next starting lineoff, given scan direction
470 if (ScanDirectionIsForward(dir))
472 if (!scan->rs_inited)
475 * return null immediately if relation is empty
477 if (scan->rs_nblocks == 0)
479 Assert(!BufferIsValid(scan->rs_cbuf));
480 tuple->t_data = NULL;
483 page = scan->rs_startblock; /* first page */
484 heapgetpage(scan, page);
485 lineoff = FirstOffsetNumber; /* first offnum */
486 scan->rs_inited = true;
490 /* continue from previously returned page/tuple */
491 page = scan->rs_cblock; /* current page */
492 lineoff = /* next offnum */
493 OffsetNumberNext(ItemPointerGetOffsetNumber(&(tuple->t_self)));
496 LockBuffer(scan->rs_cbuf, BUFFER_LOCK_SHARE);
498 dp = (Page) BufferGetPage(scan->rs_cbuf);
499 lines = PageGetMaxOffsetNumber(dp);
500 /* page and lineoff now reference the physically next tid */
502 linesleft = lines - lineoff + 1;
506 if (!scan->rs_inited)
509 * return null immediately if relation is empty
511 if (scan->rs_nblocks == 0)
513 Assert(!BufferIsValid(scan->rs_cbuf));
514 tuple->t_data = NULL;
519 * Disable reporting to syncscan logic in a backwards scan; it's
520 * not very likely anyone else is doing the same thing at the same
521 * time, and much more likely that we'll just bollix things for
524 scan->rs_syncscan = false;
525 /* start from last page of the scan */
526 if (scan->rs_startblock > 0)
527 page = scan->rs_startblock - 1;
529 page = scan->rs_nblocks - 1;
530 heapgetpage(scan, page);
534 /* continue from previously returned page/tuple */
535 page = scan->rs_cblock; /* current page */
538 LockBuffer(scan->rs_cbuf, BUFFER_LOCK_SHARE);
540 dp = (Page) BufferGetPage(scan->rs_cbuf);
541 lines = PageGetMaxOffsetNumber(dp);
543 if (!scan->rs_inited)
545 lineoff = lines; /* final offnum */
546 scan->rs_inited = true;
550 lineoff = /* previous offnum */
551 OffsetNumberPrev(ItemPointerGetOffsetNumber(&(tuple->t_self)));
553 /* page and lineoff now reference the physically previous tid */
560 * ``no movement'' scan direction: refetch prior tuple
562 if (!scan->rs_inited)
564 Assert(!BufferIsValid(scan->rs_cbuf));
565 tuple->t_data = NULL;
569 page = ItemPointerGetBlockNumber(&(tuple->t_self));
570 if (page != scan->rs_cblock)
571 heapgetpage(scan, page);
573 /* Since the tuple was previously fetched, needn't lock page here */
574 dp = (Page) BufferGetPage(scan->rs_cbuf);
575 lineoff = ItemPointerGetOffsetNumber(&(tuple->t_self));
576 lpp = PageGetItemId(dp, lineoff);
577 Assert(ItemIdIsNormal(lpp));
579 tuple->t_data = (HeapTupleHeader) PageGetItem((Page) dp, lpp);
580 tuple->t_len = ItemIdGetLength(lpp);
586 * advance the scan until we find a qualifying tuple or run out of stuff
589 lpp = PageGetItemId(dp, lineoff);
592 while (linesleft > 0)
594 if (ItemIdIsNormal(lpp))
598 tuple->t_data = (HeapTupleHeader) PageGetItem((Page) dp, lpp);
599 tuple->t_len = ItemIdGetLength(lpp);
600 ItemPointerSet(&(tuple->t_self), page, lineoff);
603 * if current tuple qualifies, return it.
605 valid = HeapTupleSatisfiesVisibility(tuple,
609 CheckForSerializableConflictOut(valid, scan->rs_rd, tuple,
610 scan->rs_cbuf, snapshot);
612 if (valid && key != NULL)
613 HeapKeyTest(tuple, RelationGetDescr(scan->rs_rd),
618 LockBuffer(scan->rs_cbuf, BUFFER_LOCK_UNLOCK);
624 * otherwise move to the next item on the page
629 --lpp; /* move back in this page's ItemId array */
634 ++lpp; /* move forward in this page's ItemId array */
640 * if we get here, it means we've exhausted the items on this page and
641 * it's time to move to the next.
643 LockBuffer(scan->rs_cbuf, BUFFER_LOCK_UNLOCK);
646 * advance to next/prior page and detect end of scan
650 finished = (page == scan->rs_startblock) ||
651 (scan->rs_numblocks != InvalidBlockNumber ? --scan->rs_numblocks <= 0 : false);
653 page = scan->rs_nblocks;
659 if (page >= scan->rs_nblocks)
661 finished = (page == scan->rs_startblock) ||
662 (scan->rs_numblocks != InvalidBlockNumber ? --scan->rs_numblocks <= 0 : false);
665 * Report our new scan position for synchronization purposes. We
666 * don't do that when moving backwards, however. That would just
667 * mess up any other forward-moving scanners.
669 * Note: we do this before checking for end of scan so that the
670 * final state of the position hint is back at the start of the
671 * rel. That's not strictly necessary, but otherwise when you run
672 * the same query multiple times the starting position would shift
673 * a little bit backwards on every invocation, which is confusing.
674 * We don't guarantee any specific ordering in general, though.
676 if (scan->rs_syncscan)
677 ss_report_location(scan->rs_rd, page);
681 * return NULL if we've exhausted all the pages
685 if (BufferIsValid(scan->rs_cbuf))
686 ReleaseBuffer(scan->rs_cbuf);
687 scan->rs_cbuf = InvalidBuffer;
688 scan->rs_cblock = InvalidBlockNumber;
689 tuple->t_data = NULL;
690 scan->rs_inited = false;
694 heapgetpage(scan, page);
696 LockBuffer(scan->rs_cbuf, BUFFER_LOCK_SHARE);
698 dp = (Page) BufferGetPage(scan->rs_cbuf);
699 lines = PageGetMaxOffsetNumber((Page) dp);
704 lpp = PageGetItemId(dp, lines);
708 lineoff = FirstOffsetNumber;
709 lpp = PageGetItemId(dp, FirstOffsetNumber);
715 * heapgettup_pagemode - fetch next heap tuple in page-at-a-time mode
717 * Same API as heapgettup, but used in page-at-a-time mode
719 * The internal logic is much the same as heapgettup's too, but there are some
720 * differences: we do not take the buffer content lock (that only needs to
721 * happen inside heapgetpage), and we iterate through just the tuples listed
722 * in rs_vistuples[] rather than all tuples on the page. Notice that
723 * lineindex is 0-based, where the corresponding loop variable lineoff in
724 * heapgettup is 1-based.
728 heapgettup_pagemode(HeapScanDesc scan,
733 HeapTuple tuple = &(scan->rs_ctup);
734 bool backward = ScanDirectionIsBackward(dir);
740 OffsetNumber lineoff;
745 * calculate next starting lineindex, given scan direction
747 if (ScanDirectionIsForward(dir))
749 if (!scan->rs_inited)
752 * return null immediately if relation is empty
754 if (scan->rs_nblocks == 0)
756 Assert(!BufferIsValid(scan->rs_cbuf));
757 tuple->t_data = NULL;
760 page = scan->rs_startblock; /* first page */
761 heapgetpage(scan, page);
763 scan->rs_inited = true;
767 /* continue from previously returned page/tuple */
768 page = scan->rs_cblock; /* current page */
769 lineindex = scan->rs_cindex + 1;
772 dp = (Page) BufferGetPage(scan->rs_cbuf);
773 lines = scan->rs_ntuples;
774 /* page and lineindex now reference the next visible tid */
776 linesleft = lines - lineindex;
780 if (!scan->rs_inited)
783 * return null immediately if relation is empty
785 if (scan->rs_nblocks == 0)
787 Assert(!BufferIsValid(scan->rs_cbuf));
788 tuple->t_data = NULL;
793 * Disable reporting to syncscan logic in a backwards scan; it's
794 * not very likely anyone else is doing the same thing at the same
795 * time, and much more likely that we'll just bollix things for
798 scan->rs_syncscan = false;
799 /* start from last page of the scan */
800 if (scan->rs_startblock > 0)
801 page = scan->rs_startblock - 1;
803 page = scan->rs_nblocks - 1;
804 heapgetpage(scan, page);
808 /* continue from previously returned page/tuple */
809 page = scan->rs_cblock; /* current page */
812 dp = (Page) BufferGetPage(scan->rs_cbuf);
813 lines = scan->rs_ntuples;
815 if (!scan->rs_inited)
817 lineindex = lines - 1;
818 scan->rs_inited = true;
822 lineindex = scan->rs_cindex - 1;
824 /* page and lineindex now reference the previous visible tid */
826 linesleft = lineindex + 1;
831 * ``no movement'' scan direction: refetch prior tuple
833 if (!scan->rs_inited)
835 Assert(!BufferIsValid(scan->rs_cbuf));
836 tuple->t_data = NULL;
840 page = ItemPointerGetBlockNumber(&(tuple->t_self));
841 if (page != scan->rs_cblock)
842 heapgetpage(scan, page);
844 /* Since the tuple was previously fetched, needn't lock page here */
845 dp = (Page) BufferGetPage(scan->rs_cbuf);
846 lineoff = ItemPointerGetOffsetNumber(&(tuple->t_self));
847 lpp = PageGetItemId(dp, lineoff);
848 Assert(ItemIdIsNormal(lpp));
850 tuple->t_data = (HeapTupleHeader) PageGetItem((Page) dp, lpp);
851 tuple->t_len = ItemIdGetLength(lpp);
853 /* check that rs_cindex is in sync */
854 Assert(scan->rs_cindex < scan->rs_ntuples);
855 Assert(lineoff == scan->rs_vistuples[scan->rs_cindex]);
861 * advance the scan until we find a qualifying tuple or run out of stuff
866 while (linesleft > 0)
868 lineoff = scan->rs_vistuples[lineindex];
869 lpp = PageGetItemId(dp, lineoff);
870 Assert(ItemIdIsNormal(lpp));
872 tuple->t_data = (HeapTupleHeader) PageGetItem((Page) dp, lpp);
873 tuple->t_len = ItemIdGetLength(lpp);
874 ItemPointerSet(&(tuple->t_self), page, lineoff);
877 * if current tuple qualifies, return it.
883 HeapKeyTest(tuple, RelationGetDescr(scan->rs_rd),
887 scan->rs_cindex = lineindex;
893 scan->rs_cindex = lineindex;
898 * otherwise move to the next item on the page
908 * if we get here, it means we've exhausted the items on this page and
909 * it's time to move to the next.
913 finished = (page == scan->rs_startblock) ||
914 (scan->rs_numblocks != InvalidBlockNumber ? --scan->rs_numblocks <= 0 : false);
916 page = scan->rs_nblocks;
922 if (page >= scan->rs_nblocks)
924 finished = (page == scan->rs_startblock) ||
925 (scan->rs_numblocks != InvalidBlockNumber ? --scan->rs_numblocks <= 0 : false);
928 * Report our new scan position for synchronization purposes. We
929 * don't do that when moving backwards, however. That would just
930 * mess up any other forward-moving scanners.
932 * Note: we do this before checking for end of scan so that the
933 * final state of the position hint is back at the start of the
934 * rel. That's not strictly necessary, but otherwise when you run
935 * the same query multiple times the starting position would shift
936 * a little bit backwards on every invocation, which is confusing.
937 * We don't guarantee any specific ordering in general, though.
939 if (scan->rs_syncscan)
940 ss_report_location(scan->rs_rd, page);
944 * return NULL if we've exhausted all the pages
948 if (BufferIsValid(scan->rs_cbuf))
949 ReleaseBuffer(scan->rs_cbuf);
950 scan->rs_cbuf = InvalidBuffer;
951 scan->rs_cblock = InvalidBlockNumber;
952 tuple->t_data = NULL;
953 scan->rs_inited = false;
957 heapgetpage(scan, page);
959 dp = (Page) BufferGetPage(scan->rs_cbuf);
960 lines = scan->rs_ntuples;
963 lineindex = lines - 1;
970 #if defined(DISABLE_COMPLEX_MACRO)
972 * This is formatted so oddly so that the correspondence to the macro
973 * definition in access/htup_details.h is maintained.
976 fastgetattr(HeapTuple tup, int attnum, TupleDesc tupleDesc,
983 HeapTupleNoNulls(tup) ?
985 (tupleDesc)->attrs[(attnum) - 1]->attcacheoff >= 0 ?
987 fetchatt((tupleDesc)->attrs[(attnum) - 1],
988 (char *) (tup)->t_data + (tup)->t_data->t_hoff +
989 (tupleDesc)->attrs[(attnum) - 1]->attcacheoff)
992 nocachegetattr((tup), (attnum), (tupleDesc))
996 att_isnull((attnum) - 1, (tup)->t_data->t_bits) ?
1003 nocachegetattr((tup), (attnum), (tupleDesc))
1013 #endif /* defined(DISABLE_COMPLEX_MACRO) */
1016 /* ----------------------------------------------------------------
1017 * heap access method interface
1018 * ----------------------------------------------------------------
1022 * relation_open - open any relation by relation OID
1024 * If lockmode is not "NoLock", the specified kind of lock is
1025 * obtained on the relation. (Generally, NoLock should only be
1026 * used if the caller knows it has some appropriate lock on the
1027 * relation already.)
1029 * An error is raised if the relation does not exist.
1031 * NB: a "relation" is anything with a pg_class entry. The caller is
1032 * expected to check whether the relkind is something it can handle.
1036 relation_open(Oid relationId, LOCKMODE lockmode)
1040 Assert(lockmode >= NoLock && lockmode < MAX_LOCKMODES);
1042 /* Get the lock before trying to open the relcache entry */
1043 if (lockmode != NoLock)
1044 LockRelationOid(relationId, lockmode);
1046 /* The relcache does all the real work... */
1047 r = RelationIdGetRelation(relationId);
1049 if (!RelationIsValid(r))
1050 elog(ERROR, "could not open relation with OID %u", relationId);
1052 /* Make note that we've accessed a temporary relation */
1053 if (RelationUsesLocalBuffers(r))
1054 MyXactAccessedTempRel = true;
1056 pgstat_initstats(r);
1062 * try_relation_open - open any relation by relation OID
1064 * Same as relation_open, except return NULL instead of failing
1065 * if the relation does not exist.
1069 try_relation_open(Oid relationId, LOCKMODE lockmode)
1073 Assert(lockmode >= NoLock && lockmode < MAX_LOCKMODES);
1075 /* Get the lock first */
1076 if (lockmode != NoLock)
1077 LockRelationOid(relationId, lockmode);
1080 * Now that we have the lock, probe to see if the relation really exists
1083 if (!SearchSysCacheExists1(RELOID, ObjectIdGetDatum(relationId)))
1085 /* Release useless lock */
1086 if (lockmode != NoLock)
1087 UnlockRelationOid(relationId, lockmode);
1092 /* Should be safe to do a relcache load */
1093 r = RelationIdGetRelation(relationId);
1095 if (!RelationIsValid(r))
1096 elog(ERROR, "could not open relation with OID %u", relationId);
1098 /* Make note that we've accessed a temporary relation */
1099 if (RelationUsesLocalBuffers(r))
1100 MyXactAccessedTempRel = true;
1102 pgstat_initstats(r);
1108 * relation_openrv - open any relation specified by a RangeVar
1110 * Same as relation_open, but the relation is specified by a RangeVar.
1114 relation_openrv(const RangeVar *relation, LOCKMODE lockmode)
1119 * Check for shared-cache-inval messages before trying to open the
1120 * relation. This is needed even if we already hold a lock on the
1121 * relation, because GRANT/REVOKE are executed without taking any lock on
1122 * the target relation, and we want to be sure we see current ACL
1123 * information. We can skip this if asked for NoLock, on the assumption
1124 * that such a call is not the first one in the current command, and so we
1125 * should be reasonably up-to-date already. (XXX this all could stand to
1126 * be redesigned, but for the moment we'll keep doing this like it's been
1127 * done historically.)
1129 if (lockmode != NoLock)
1130 AcceptInvalidationMessages();
1132 /* Look up and lock the appropriate relation using namespace search */
1133 relOid = RangeVarGetRelid(relation, lockmode, false);
1135 /* Let relation_open do the rest */
1136 return relation_open(relOid, NoLock);
1140 * relation_openrv_extended - open any relation specified by a RangeVar
1142 * Same as relation_openrv, but with an additional missing_ok argument
1143 * allowing a NULL return rather than an error if the relation is not
1144 * found. (Note that some other causes, such as permissions problems,
1145 * will still result in an ereport.)
1149 relation_openrv_extended(const RangeVar *relation, LOCKMODE lockmode,
1155 * Check for shared-cache-inval messages before trying to open the
1156 * relation. See comments in relation_openrv().
1158 if (lockmode != NoLock)
1159 AcceptInvalidationMessages();
1161 /* Look up and lock the appropriate relation using namespace search */
1162 relOid = RangeVarGetRelid(relation, lockmode, missing_ok);
1164 /* Return NULL on not-found */
1165 if (!OidIsValid(relOid))
1168 /* Let relation_open do the rest */
1169 return relation_open(relOid, NoLock);
1173 * relation_close - close any relation
1175 * If lockmode is not "NoLock", we then release the specified lock.
1177 * Note that it is often sensible to hold a lock beyond relation_close;
1178 * in that case, the lock is released automatically at xact end.
1182 relation_close(Relation relation, LOCKMODE lockmode)
1184 LockRelId relid = relation->rd_lockInfo.lockRelId;
1186 Assert(lockmode >= NoLock && lockmode < MAX_LOCKMODES);
1188 /* The relcache does the real work... */
1189 RelationClose(relation);
1191 if (lockmode != NoLock)
1192 UnlockRelationId(&relid, lockmode);
1197 * heap_open - open a heap relation by relation OID
1199 * This is essentially relation_open plus check that the relation
1200 * is not an index nor a composite type. (The caller should also
1201 * check that it's not a view or foreign table before assuming it has
1206 heap_open(Oid relationId, LOCKMODE lockmode)
1210 r = relation_open(relationId, lockmode);
1212 if (r->rd_rel->relkind == RELKIND_INDEX)
1214 (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1215 errmsg("\"%s\" is an index",
1216 RelationGetRelationName(r))));
1217 else if (r->rd_rel->relkind == RELKIND_COMPOSITE_TYPE)
1219 (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1220 errmsg("\"%s\" is a composite type",
1221 RelationGetRelationName(r))));
1227 * heap_openrv - open a heap relation specified
1228 * by a RangeVar node
1230 * As above, but relation is specified by a RangeVar.
1234 heap_openrv(const RangeVar *relation, LOCKMODE lockmode)
1238 r = relation_openrv(relation, lockmode);
1240 if (r->rd_rel->relkind == RELKIND_INDEX)
1242 (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1243 errmsg("\"%s\" is an index",
1244 RelationGetRelationName(r))));
1245 else if (r->rd_rel->relkind == RELKIND_COMPOSITE_TYPE)
1247 (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1248 errmsg("\"%s\" is a composite type",
1249 RelationGetRelationName(r))));
1255 * heap_openrv_extended - open a heap relation specified
1256 * by a RangeVar node
1258 * As above, but optionally return NULL instead of failing for
1259 * relation-not-found.
1263 heap_openrv_extended(const RangeVar *relation, LOCKMODE lockmode,
1268 r = relation_openrv_extended(relation, lockmode, missing_ok);
1272 if (r->rd_rel->relkind == RELKIND_INDEX)
1274 (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1275 errmsg("\"%s\" is an index",
1276 RelationGetRelationName(r))));
1277 else if (r->rd_rel->relkind == RELKIND_COMPOSITE_TYPE)
1279 (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1280 errmsg("\"%s\" is a composite type",
1281 RelationGetRelationName(r))));
1289 * heap_beginscan - begin relation scan
1291 * heap_beginscan_strat offers an extended API that lets the caller control
1292 * whether a nondefault buffer access strategy can be used, and whether
1293 * syncscan can be chosen (possibly resulting in the scan not starting from
1294 * block zero). Both of these default to TRUE with plain heap_beginscan.
1296 * heap_beginscan_bm is an alternative entry point for setting up a
1297 * HeapScanDesc for a bitmap heap scan. Although that scan technology is
1298 * really quite unlike a standard seqscan, there is just enough commonality
1299 * to make it worth using the same data structure.
1303 heap_beginscan(Relation relation, Snapshot snapshot,
1304 int nkeys, ScanKey key)
1306 return heap_beginscan_internal(relation, snapshot, nkeys, key,
1307 true, true, false, false);
1311 heap_beginscan_catalog(Relation relation, int nkeys, ScanKey key)
1313 Oid relid = RelationGetRelid(relation);
1314 Snapshot snapshot = RegisterSnapshot(GetCatalogSnapshot(relid));
1316 return heap_beginscan_internal(relation, snapshot, nkeys, key,
1317 true, true, false, true);
1321 heap_beginscan_strat(Relation relation, Snapshot snapshot,
1322 int nkeys, ScanKey key,
1323 bool allow_strat, bool allow_sync)
1325 return heap_beginscan_internal(relation, snapshot, nkeys, key,
1326 allow_strat, allow_sync, false, false);
1330 heap_beginscan_bm(Relation relation, Snapshot snapshot,
1331 int nkeys, ScanKey key)
1333 return heap_beginscan_internal(relation, snapshot, nkeys, key,
1334 false, false, true, false);
1338 heap_beginscan_internal(Relation relation, Snapshot snapshot,
1339 int nkeys, ScanKey key,
1340 bool allow_strat, bool allow_sync,
1341 bool is_bitmapscan, bool temp_snap)
1346 * increment relation ref count while scanning relation
1348 * This is just to make really sure the relcache entry won't go away while
1349 * the scan has a pointer to it. Caller should be holding the rel open
1350 * anyway, so this is redundant in all normal scenarios...
1352 RelationIncrementReferenceCount(relation);
1355 * allocate and initialize scan descriptor
1357 scan = (HeapScanDesc) palloc(sizeof(HeapScanDescData));
1359 scan->rs_rd = relation;
1360 scan->rs_snapshot = snapshot;
1361 scan->rs_nkeys = nkeys;
1362 scan->rs_bitmapscan = is_bitmapscan;
1363 scan->rs_strategy = NULL; /* set in initscan */
1364 scan->rs_allow_strat = allow_strat;
1365 scan->rs_allow_sync = allow_sync;
1366 scan->rs_temp_snap = temp_snap;
1369 * we can use page-at-a-time mode if it's an MVCC-safe snapshot
1371 scan->rs_pageatatime = IsMVCCSnapshot(snapshot);
1374 * For a seqscan in a serializable transaction, acquire a predicate lock
1375 * on the entire relation. This is required not only to lock all the
1376 * matching tuples, but also to conflict with new insertions into the
1377 * table. In an indexscan, we take page locks on the index pages covering
1378 * the range specified in the scan qual, but in a heap scan there is
1379 * nothing more fine-grained to lock. A bitmap scan is a different story,
1380 * there we have already scanned the index and locked the index pages
1381 * covering the predicate. But in that case we still have to lock any
1382 * matching heap tuples.
1385 PredicateLockRelation(relation, snapshot);
1387 /* we only need to set this up once */
1388 scan->rs_ctup.t_tableOid = RelationGetRelid(relation);
1391 * we do this here instead of in initscan() because heap_rescan also calls
1392 * initscan() and we don't want to allocate memory again
1395 scan->rs_key = (ScanKey) palloc(sizeof(ScanKeyData) * nkeys);
1397 scan->rs_key = NULL;
1399 initscan(scan, key, false);
1405 * heap_rescan - restart a relation scan
1409 heap_rescan(HeapScanDesc scan,
1413 * unpin scan buffers
1415 if (BufferIsValid(scan->rs_cbuf))
1416 ReleaseBuffer(scan->rs_cbuf);
1419 * reinitialize scan descriptor
1421 initscan(scan, key, true);
1425 * heap_endscan - end relation scan
1427 * See how to integrate with index scans.
1428 * Check handling if reldesc caching.
1432 heap_endscan(HeapScanDesc scan)
1434 /* Note: no locking manipulations needed */
1437 * unpin scan buffers
1439 if (BufferIsValid(scan->rs_cbuf))
1440 ReleaseBuffer(scan->rs_cbuf);
1443 * decrement relation reference count and free scan descriptor storage
1445 RelationDecrementReferenceCount(scan->rs_rd);
1448 pfree(scan->rs_key);
1450 if (scan->rs_strategy != NULL)
1451 FreeAccessStrategy(scan->rs_strategy);
1453 if (scan->rs_temp_snap)
1454 UnregisterSnapshot(scan->rs_snapshot);
1460 * heap_getnext - retrieve next tuple in scan
1462 * Fix to work with index relations.
1463 * We don't return the buffer anymore, but you can get it from the
1464 * returned HeapTuple.
1469 #define HEAPDEBUG_1 \
1470 elog(DEBUG2, "heap_getnext([%s,nkeys=%d],dir=%d) called", \
1471 RelationGetRelationName(scan->rs_rd), scan->rs_nkeys, (int) direction)
1472 #define HEAPDEBUG_2 \
1473 elog(DEBUG2, "heap_getnext returning EOS")
1474 #define HEAPDEBUG_3 \
1475 elog(DEBUG2, "heap_getnext returning tuple")
1480 #endif /* !defined(HEAPDEBUGALL) */
1484 heap_getnext(HeapScanDesc scan, ScanDirection direction)
1486 /* Note: no locking manipulations needed */
1488 HEAPDEBUG_1; /* heap_getnext( info ) */
1490 if (scan->rs_pageatatime)
1491 heapgettup_pagemode(scan, direction,
1492 scan->rs_nkeys, scan->rs_key);
1494 heapgettup(scan, direction, scan->rs_nkeys, scan->rs_key);
1496 if (scan->rs_ctup.t_data == NULL)
1498 HEAPDEBUG_2; /* heap_getnext returning EOS */
1503 * if we get here it means we have a new current scan tuple, so point to
1504 * the proper return buffer and return the tuple.
1506 HEAPDEBUG_3; /* heap_getnext returning tuple */
1508 pgstat_count_heap_getnext(scan->rs_rd);
1510 return &(scan->rs_ctup);
1514 * heap_fetch - retrieve tuple with given tid
1516 * On entry, tuple->t_self is the TID to fetch. We pin the buffer holding
1517 * the tuple, fill in the remaining fields of *tuple, and check the tuple
1518 * against the specified snapshot.
1520 * If successful (tuple found and passes snapshot time qual), then *userbuf
1521 * is set to the buffer holding the tuple and TRUE is returned. The caller
1522 * must unpin the buffer when done with the tuple.
1524 * If the tuple is not found (ie, item number references a deleted slot),
1525 * then tuple->t_data is set to NULL and FALSE is returned.
1527 * If the tuple is found but fails the time qual check, then FALSE is returned
1528 * but tuple->t_data is left pointing to the tuple.
1530 * keep_buf determines what is done with the buffer in the FALSE-result cases.
1531 * When the caller specifies keep_buf = true, we retain the pin on the buffer
1532 * and return it in *userbuf (so the caller must eventually unpin it); when
1533 * keep_buf = false, the pin is released and *userbuf is set to InvalidBuffer.
1535 * stats_relation is the relation to charge the heap_fetch operation against
1536 * for statistical purposes. (This could be the heap rel itself, an
1537 * associated index, or NULL to not count the fetch at all.)
1539 * heap_fetch does not follow HOT chains: only the exact TID requested will
1542 * It is somewhat inconsistent that we ereport() on invalid block number but
1543 * return false on invalid item number. There are a couple of reasons though.
1544 * One is that the caller can relatively easily check the block number for
1545 * validity, but cannot check the item number without reading the page
1546 * himself. Another is that when we are following a t_ctid link, we can be
1547 * reasonably confident that the page number is valid (since VACUUM shouldn't
1548 * truncate off the destination page without having killed the referencing
1549 * tuple first), but the item number might well not be good.
1552 heap_fetch(Relation relation,
1557 Relation stats_relation)
1559 ItemPointer tid = &(tuple->t_self);
1563 OffsetNumber offnum;
1567 * Fetch and pin the appropriate page of the relation.
1569 buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(tid));
1572 * Need share lock on buffer to examine tuple commit status.
1574 LockBuffer(buffer, BUFFER_LOCK_SHARE);
1575 page = BufferGetPage(buffer);
1578 * We'd better check for out-of-range offnum in case of VACUUM since the
1581 offnum = ItemPointerGetOffsetNumber(tid);
1582 if (offnum < FirstOffsetNumber || offnum > PageGetMaxOffsetNumber(page))
1584 LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
1589 ReleaseBuffer(buffer);
1590 *userbuf = InvalidBuffer;
1592 tuple->t_data = NULL;
1597 * get the item line pointer corresponding to the requested tid
1599 lp = PageGetItemId(page, offnum);
1602 * Must check for deleted tuple.
1604 if (!ItemIdIsNormal(lp))
1606 LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
1611 ReleaseBuffer(buffer);
1612 *userbuf = InvalidBuffer;
1614 tuple->t_data = NULL;
1619 * fill in *tuple fields
1621 tuple->t_data = (HeapTupleHeader) PageGetItem(page, lp);
1622 tuple->t_len = ItemIdGetLength(lp);
1623 tuple->t_tableOid = RelationGetRelid(relation);
1626 * check time qualification of tuple, then release lock
1628 valid = HeapTupleSatisfiesVisibility(tuple, snapshot, buffer);
1631 PredicateLockTuple(relation, tuple, snapshot);
1633 CheckForSerializableConflictOut(valid, relation, tuple, buffer, snapshot);
1635 LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
1640 * All checks passed, so return the tuple as valid. Caller is now
1641 * responsible for releasing the buffer.
1645 /* Count the successful fetch against appropriate rel, if any */
1646 if (stats_relation != NULL)
1647 pgstat_count_heap_fetch(stats_relation);
1652 /* Tuple failed time qual, but maybe caller wants to see it anyway. */
1657 ReleaseBuffer(buffer);
1658 *userbuf = InvalidBuffer;
1665 * heap_hot_search_buffer - search HOT chain for tuple satisfying snapshot
1667 * On entry, *tid is the TID of a tuple (either a simple tuple, or the root
1668 * of a HOT chain), and buffer is the buffer holding this tuple. We search
1669 * for the first chain member satisfying the given snapshot. If one is
1670 * found, we update *tid to reference that tuple's offset number, and
1671 * return TRUE. If no match, return FALSE without modifying *tid.
1673 * heapTuple is a caller-supplied buffer. When a match is found, we return
1674 * the tuple here, in addition to updating *tid. If no match is found, the
1675 * contents of this buffer on return are undefined.
1677 * If all_dead is not NULL, we check non-visible tuples to see if they are
1678 * globally dead; *all_dead is set TRUE if all members of the HOT chain
1679 * are vacuumable, FALSE if not.
1681 * Unlike heap_fetch, the caller must already have pin and (at least) share
1682 * lock on the buffer; it is still pinned/locked at exit. Also unlike
1683 * heap_fetch, we do not report any pgstats count; caller may do so if wanted.
1686 heap_hot_search_buffer(ItemPointer tid, Relation relation, Buffer buffer,
1687 Snapshot snapshot, HeapTuple heapTuple,
1688 bool *all_dead, bool first_call)
1690 Page dp = (Page) BufferGetPage(buffer);
1691 TransactionId prev_xmax = InvalidTransactionId;
1692 OffsetNumber offnum;
1693 bool at_chain_start;
1697 /* If this is not the first call, previous call returned a (live!) tuple */
1699 *all_dead = first_call;
1701 Assert(TransactionIdIsValid(RecentGlobalXmin));
1703 Assert(ItemPointerGetBlockNumber(tid) == BufferGetBlockNumber(buffer));
1704 offnum = ItemPointerGetOffsetNumber(tid);
1705 at_chain_start = first_call;
1708 heapTuple->t_self = *tid;
1710 /* Scan through possible multiple members of HOT-chain */
1715 /* check for bogus TID */
1716 if (offnum < FirstOffsetNumber || offnum > PageGetMaxOffsetNumber(dp))
1719 lp = PageGetItemId(dp, offnum);
1721 /* check for unused, dead, or redirected items */
1722 if (!ItemIdIsNormal(lp))
1724 /* We should only see a redirect at start of chain */
1725 if (ItemIdIsRedirected(lp) && at_chain_start)
1727 /* Follow the redirect */
1728 offnum = ItemIdGetRedirect(lp);
1729 at_chain_start = false;
1732 /* else must be end of chain */
1736 heapTuple->t_data = (HeapTupleHeader) PageGetItem(dp, lp);
1737 heapTuple->t_len = ItemIdGetLength(lp);
1738 heapTuple->t_tableOid = RelationGetRelid(relation);
1739 ItemPointerSetOffsetNumber(&heapTuple->t_self, offnum);
1742 * Shouldn't see a HEAP_ONLY tuple at chain start.
1744 if (at_chain_start && HeapTupleIsHeapOnly(heapTuple))
1748 * The xmin should match the previous xmax value, else chain is
1751 if (TransactionIdIsValid(prev_xmax) &&
1752 !TransactionIdEquals(prev_xmax,
1753 HeapTupleHeaderGetXmin(heapTuple->t_data)))
1757 * When first_call is true (and thus, skip is initially false) we'll
1758 * return the first tuple we find. But on later passes, heapTuple
1759 * will initially be pointing to the tuple we returned last time.
1760 * Returning it again would be incorrect (and would loop forever), so
1761 * we skip it and return the next match we find.
1766 * For the benefit of logical decoding, have t_self point at the
1767 * element of the HOT chain we're currently investigating instead
1768 * of the root tuple of the HOT chain. This is important because
1769 * the *Satisfies routine for historical mvcc snapshots needs the
1770 * correct tid to decide about the visibility in some cases.
1772 ItemPointerSet(&(heapTuple->t_self), BufferGetBlockNumber(buffer), offnum);
1774 /* If it's visible per the snapshot, we must return it */
1775 valid = HeapTupleSatisfiesVisibility(heapTuple, snapshot, buffer);
1776 CheckForSerializableConflictOut(valid, relation, heapTuple,
1778 /* reset to original, non-redirected, tid */
1779 heapTuple->t_self = *tid;
1783 ItemPointerSetOffsetNumber(tid, offnum);
1784 PredicateLockTuple(relation, heapTuple, snapshot);
1793 * If we can't see it, maybe no one else can either. At caller
1794 * request, check whether all chain members are dead to all
1797 if (all_dead && *all_dead &&
1798 !HeapTupleIsSurelyDead(heapTuple, RecentGlobalXmin))
1802 * Check to see if HOT chain continues past this tuple; if so fetch
1803 * the next offnum and loop around.
1805 if (HeapTupleIsHotUpdated(heapTuple))
1807 Assert(ItemPointerGetBlockNumber(&heapTuple->t_data->t_ctid) ==
1808 ItemPointerGetBlockNumber(tid));
1809 offnum = ItemPointerGetOffsetNumber(&heapTuple->t_data->t_ctid);
1810 at_chain_start = false;
1811 prev_xmax = HeapTupleHeaderGetUpdateXid(heapTuple->t_data);
1814 break; /* end of chain */
1821 * heap_hot_search - search HOT chain for tuple satisfying snapshot
1823 * This has the same API as heap_hot_search_buffer, except that the caller
1824 * does not provide the buffer containing the page, rather we access it
1828 heap_hot_search(ItemPointer tid, Relation relation, Snapshot snapshot,
1833 HeapTupleData heapTuple;
1835 buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(tid));
1836 LockBuffer(buffer, BUFFER_LOCK_SHARE);
1837 result = heap_hot_search_buffer(tid, relation, buffer, snapshot,
1838 &heapTuple, all_dead, true);
1839 LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
1840 ReleaseBuffer(buffer);
1845 * heap_get_latest_tid - get the latest tid of a specified tuple
1847 * Actually, this gets the latest version that is visible according to
1848 * the passed snapshot. You can pass SnapshotDirty to get the very latest,
1849 * possibly uncommitted version.
1851 * *tid is both an input and an output parameter: it is updated to
1852 * show the latest version of the row. Note that it will not be changed
1853 * if no version of the row passes the snapshot test.
1856 heap_get_latest_tid(Relation relation,
1861 ItemPointerData ctid;
1862 TransactionId priorXmax;
1864 /* this is to avoid Assert failures on bad input */
1865 if (!ItemPointerIsValid(tid))
1869 * Since this can be called with user-supplied TID, don't trust the input
1870 * too much. (RelationGetNumberOfBlocks is an expensive check, so we
1871 * don't check t_ctid links again this way. Note that it would not do to
1872 * call it just once and save the result, either.)
1874 blk = ItemPointerGetBlockNumber(tid);
1875 if (blk >= RelationGetNumberOfBlocks(relation))
1876 elog(ERROR, "block number %u is out of range for relation \"%s\"",
1877 blk, RelationGetRelationName(relation));
1880 * Loop to chase down t_ctid links. At top of loop, ctid is the tuple we
1881 * need to examine, and *tid is the TID we will return if ctid turns out
1884 * Note that we will loop until we reach the end of the t_ctid chain.
1885 * Depending on the snapshot passed, there might be at most one visible
1886 * version of the row, but we don't try to optimize for that.
1889 priorXmax = InvalidTransactionId; /* cannot check first XMIN */
1894 OffsetNumber offnum;
1900 * Read, pin, and lock the page.
1902 buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(&ctid));
1903 LockBuffer(buffer, BUFFER_LOCK_SHARE);
1904 page = BufferGetPage(buffer);
1907 * Check for bogus item number. This is not treated as an error
1908 * condition because it can happen while following a t_ctid link. We
1909 * just assume that the prior tid is OK and return it unchanged.
1911 offnum = ItemPointerGetOffsetNumber(&ctid);
1912 if (offnum < FirstOffsetNumber || offnum > PageGetMaxOffsetNumber(page))
1914 UnlockReleaseBuffer(buffer);
1917 lp = PageGetItemId(page, offnum);
1918 if (!ItemIdIsNormal(lp))
1920 UnlockReleaseBuffer(buffer);
1924 /* OK to access the tuple */
1926 tp.t_data = (HeapTupleHeader) PageGetItem(page, lp);
1927 tp.t_len = ItemIdGetLength(lp);
1928 tp.t_tableOid = RelationGetRelid(relation);
1931 * After following a t_ctid link, we might arrive at an unrelated
1932 * tuple. Check for XMIN match.
1934 if (TransactionIdIsValid(priorXmax) &&
1935 !TransactionIdEquals(priorXmax, HeapTupleHeaderGetXmin(tp.t_data)))
1937 UnlockReleaseBuffer(buffer);
1942 * Check time qualification of tuple; if visible, set it as the new
1945 valid = HeapTupleSatisfiesVisibility(&tp, snapshot, buffer);
1946 CheckForSerializableConflictOut(valid, relation, &tp, buffer, snapshot);
1951 * If there's a valid t_ctid link, follow it, else we're done.
1953 if ((tp.t_data->t_infomask & HEAP_XMAX_INVALID) ||
1954 HeapTupleHeaderIsOnlyLocked(tp.t_data) ||
1955 ItemPointerEquals(&tp.t_self, &tp.t_data->t_ctid))
1957 UnlockReleaseBuffer(buffer);
1961 ctid = tp.t_data->t_ctid;
1962 priorXmax = HeapTupleHeaderGetUpdateXid(tp.t_data);
1963 UnlockReleaseBuffer(buffer);
1969 * UpdateXmaxHintBits - update tuple hint bits after xmax transaction ends
1971 * This is called after we have waited for the XMAX transaction to terminate.
1972 * If the transaction aborted, we guarantee the XMAX_INVALID hint bit will
1973 * be set on exit. If the transaction committed, we set the XMAX_COMMITTED
1974 * hint bit if possible --- but beware that that may not yet be possible,
1975 * if the transaction committed asynchronously.
1977 * Note that if the transaction was a locker only, we set HEAP_XMAX_INVALID
1978 * even if it commits.
1980 * Hence callers should look only at XMAX_INVALID.
1982 * Note this is not allowed for tuples whose xmax is a multixact.
1985 UpdateXmaxHintBits(HeapTupleHeader tuple, Buffer buffer, TransactionId xid)
1987 Assert(TransactionIdEquals(HeapTupleHeaderGetRawXmax(tuple), xid));
1988 Assert(!(tuple->t_infomask & HEAP_XMAX_IS_MULTI));
1990 if (!(tuple->t_infomask & (HEAP_XMAX_COMMITTED | HEAP_XMAX_INVALID)))
1992 if (!HEAP_XMAX_IS_LOCKED_ONLY(tuple->t_infomask) &&
1993 TransactionIdDidCommit(xid))
1994 HeapTupleSetHintBits(tuple, buffer, HEAP_XMAX_COMMITTED,
1997 HeapTupleSetHintBits(tuple, buffer, HEAP_XMAX_INVALID,
1998 InvalidTransactionId);
2004 * GetBulkInsertState - prepare status object for a bulk insert
2007 GetBulkInsertState(void)
2009 BulkInsertState bistate;
2011 bistate = (BulkInsertState) palloc(sizeof(BulkInsertStateData));
2012 bistate->strategy = GetAccessStrategy(BAS_BULKWRITE);
2013 bistate->current_buf = InvalidBuffer;
2018 * FreeBulkInsertState - clean up after finishing a bulk insert
2021 FreeBulkInsertState(BulkInsertState bistate)
2023 if (bistate->current_buf != InvalidBuffer)
2024 ReleaseBuffer(bistate->current_buf);
2025 FreeAccessStrategy(bistate->strategy);
2031 * heap_insert - insert tuple into a heap
2033 * The new tuple is stamped with current transaction ID and the specified
2036 * If the HEAP_INSERT_SKIP_WAL option is specified, the new tuple is not
2037 * logged in WAL, even for a non-temp relation. Safe usage of this behavior
2038 * requires that we arrange that all new tuples go into new pages not
2039 * containing any tuples from other transactions, and that the relation gets
2040 * fsync'd before commit. (See also heap_sync() comments)
2042 * The HEAP_INSERT_SKIP_FSM option is passed directly to
2043 * RelationGetBufferForTuple, which see for more info.
2045 * HEAP_INSERT_FROZEN should only be specified for inserts into
2046 * relfilenodes created during the current subtransaction and when
2047 * there are no prior snapshots or pre-existing portals open.
2048 * This causes rows to be frozen, which is an MVCC violation and
2049 * requires explicit options chosen by user.
2051 * Note that these options will be applied when inserting into the heap's
2052 * TOAST table, too, if the tuple requires any out-of-line data.
2054 * The BulkInsertState object (if any; bistate can be NULL for default
2055 * behavior) is also just passed through to RelationGetBufferForTuple.
2057 * The return value is the OID assigned to the tuple (either here or by the
2058 * caller), or InvalidOid if no OID. The header fields of *tup are updated
2059 * to match the stored tuple; in particular tup->t_self receives the actual
2060 * TID where the tuple was stored. But note that any toasting of fields
2061 * within the tuple data is NOT reflected into *tup.
2064 heap_insert(Relation relation, HeapTuple tup, CommandId cid,
2065 int options, BulkInsertState bistate)
2067 TransactionId xid = GetCurrentTransactionId();
2070 Buffer vmbuffer = InvalidBuffer;
2071 bool all_visible_cleared = false;
2074 * Fill in tuple header fields, assign an OID, and toast the tuple if
2077 * Note: below this point, heaptup is the data we actually intend to store
2078 * into the relation; tup is the caller's original untoasted data.
2080 heaptup = heap_prepare_insert(relation, tup, xid, cid, options);
2083 * We're about to do the actual insert -- but check for conflict first, to
2084 * avoid possibly having to roll back work we've just done.
2086 * For a heap insert, we only need to check for table-level SSI locks. Our
2087 * new tuple can't possibly conflict with existing tuple locks, and heap
2088 * page locks are only consolidated versions of tuple locks; they do not
2089 * lock "gaps" as index page locks do. So we don't need to identify a
2090 * buffer before making the call.
2092 CheckForSerializableConflictIn(relation, NULL, InvalidBuffer);
2095 * Find buffer to insert this tuple into. If the page is all visible,
2096 * this will also pin the requisite visibility map page.
2098 buffer = RelationGetBufferForTuple(relation, heaptup->t_len,
2099 InvalidBuffer, options, bistate,
2102 /* NO EREPORT(ERROR) from here till changes are logged */
2103 START_CRIT_SECTION();
2105 RelationPutHeapTuple(relation, buffer, heaptup);
2107 if (PageIsAllVisible(BufferGetPage(buffer)))
2109 all_visible_cleared = true;
2110 PageClearAllVisible(BufferGetPage(buffer));
2111 visibilitymap_clear(relation,
2112 ItemPointerGetBlockNumber(&(heaptup->t_self)),
2117 * XXX Should we set PageSetPrunable on this page ?
2119 * The inserting transaction may eventually abort thus making this tuple
2120 * DEAD and hence available for pruning. Though we don't want to optimize
2121 * for aborts, if no other tuple in this page is UPDATEd/DELETEd, the
2122 * aborted tuple will never be pruned until next vacuum is triggered.
2124 * If you do add PageSetPrunable here, add it in heap_xlog_insert too.
2127 MarkBufferDirty(buffer);
2130 if (!(options & HEAP_INSERT_SKIP_WAL) && RelationNeedsWAL(relation))
2132 xl_heap_insert xlrec;
2133 xl_heap_header xlhdr;
2135 Page page = BufferGetPage(buffer);
2136 uint8 info = XLOG_HEAP_INSERT;
2140 * If this is a catalog, we need to transmit combocids to properly
2141 * decode, so log that as well.
2143 if (RelationIsAccessibleInLogicalDecoding(relation))
2144 log_heap_new_cid(relation, heaptup);
2147 * If this is the single and first tuple on page, we can reinit the
2148 * page instead of restoring the whole thing. Set flag, and hide
2149 * buffer references from XLogInsert.
2151 if (ItemPointerGetOffsetNumber(&(heaptup->t_self)) == FirstOffsetNumber &&
2152 PageGetMaxOffsetNumber(page) == FirstOffsetNumber)
2154 info |= XLOG_HEAP_INIT_PAGE;
2155 bufflags |= REGBUF_WILL_INIT;
2158 xlrec.offnum = ItemPointerGetOffsetNumber(&heaptup->t_self);
2159 xlrec.flags = all_visible_cleared ? XLOG_HEAP_ALL_VISIBLE_CLEARED : 0;
2160 Assert(ItemPointerGetBlockNumber(&heaptup->t_self) == BufferGetBlockNumber(buffer));
2163 * For logical decoding, we need the tuple even if we're doing a full
2164 * page write, so make sure it's included even if we take a full-page
2165 * image. (XXX We could alternatively store a pointer into the FPW).
2167 if (RelationIsLogicallyLogged(relation))
2169 xlrec.flags |= XLOG_HEAP_CONTAINS_NEW_TUPLE;
2170 bufflags |= REGBUF_KEEP_DATA;
2174 XLogRegisterData((char *) &xlrec, SizeOfHeapInsert);
2176 xlhdr.t_infomask2 = heaptup->t_data->t_infomask2;
2177 xlhdr.t_infomask = heaptup->t_data->t_infomask;
2178 xlhdr.t_hoff = heaptup->t_data->t_hoff;
2181 * note we mark xlhdr as belonging to buffer; if XLogInsert decides to
2182 * write the whole page to the xlog, we don't need to store
2183 * xl_heap_header in the xlog.
2185 XLogRegisterBuffer(0, buffer, REGBUF_STANDARD | bufflags);
2186 XLogRegisterBufData(0, (char *) &xlhdr, SizeOfHeapHeader);
2187 /* PG73FORMAT: write bitmap [+ padding] [+ oid] + data */
2188 XLogRegisterBufData(0,
2189 (char *) heaptup->t_data + offsetof(HeapTupleHeaderData, t_bits),
2190 heaptup->t_len - offsetof(HeapTupleHeaderData, t_bits));
2192 recptr = XLogInsert(RM_HEAP_ID, info);
2194 PageSetLSN(page, recptr);
2199 UnlockReleaseBuffer(buffer);
2200 if (vmbuffer != InvalidBuffer)
2201 ReleaseBuffer(vmbuffer);
2204 * If tuple is cachable, mark it for invalidation from the caches in case
2205 * we abort. Note it is OK to do this after releasing the buffer, because
2206 * the heaptup data structure is all in local memory, not in the shared
2209 CacheInvalidateHeapTuple(relation, heaptup, NULL);
2211 pgstat_count_heap_insert(relation, 1);
2214 * If heaptup is a private copy, release it. Don't forget to copy t_self
2215 * back to the caller's image, too.
2219 tup->t_self = heaptup->t_self;
2220 heap_freetuple(heaptup);
2223 return HeapTupleGetOid(tup);
2227 * Subroutine for heap_insert(). Prepares a tuple for insertion. This sets the
2228 * tuple header fields, assigns an OID, and toasts the tuple if necessary.
2229 * Returns a toasted version of the tuple if it was toasted, or the original
2230 * tuple if not. Note that in any case, the header fields are also set in
2231 * the original tuple.
2234 heap_prepare_insert(Relation relation, HeapTuple tup, TransactionId xid,
2235 CommandId cid, int options)
2237 if (relation->rd_rel->relhasoids)
2240 /* this is redundant with an Assert in HeapTupleSetOid */
2241 Assert(tup->t_data->t_infomask & HEAP_HASOID);
2245 * If the object id of this tuple has already been assigned, trust the
2246 * caller. There are a couple of ways this can happen. At initial db
2247 * creation, the backend program sets oids for tuples. When we define
2248 * an index, we set the oid. Finally, in the future, we may allow
2249 * users to set their own object ids in order to support a persistent
2250 * object store (objects need to contain pointers to one another).
2252 if (!OidIsValid(HeapTupleGetOid(tup)))
2253 HeapTupleSetOid(tup, GetNewOid(relation));
2257 /* check there is not space for an OID */
2258 Assert(!(tup->t_data->t_infomask & HEAP_HASOID));
2261 tup->t_data->t_infomask &= ~(HEAP_XACT_MASK);
2262 tup->t_data->t_infomask2 &= ~(HEAP2_XACT_MASK);
2263 tup->t_data->t_infomask |= HEAP_XMAX_INVALID;
2264 HeapTupleHeaderSetXmin(tup->t_data, xid);
2265 if (options & HEAP_INSERT_FROZEN)
2266 HeapTupleHeaderSetXminFrozen(tup->t_data);
2268 HeapTupleHeaderSetCmin(tup->t_data, cid);
2269 HeapTupleHeaderSetXmax(tup->t_data, 0); /* for cleanliness */
2270 tup->t_tableOid = RelationGetRelid(relation);
2273 * If the new tuple is too big for storage or contains already toasted
2274 * out-of-line attributes from some other relation, invoke the toaster.
2276 if (relation->rd_rel->relkind != RELKIND_RELATION &&
2277 relation->rd_rel->relkind != RELKIND_MATVIEW)
2279 /* toast table entries should never be recursively toasted */
2280 Assert(!HeapTupleHasExternal(tup));
2283 else if (HeapTupleHasExternal(tup) || tup->t_len > TOAST_TUPLE_THRESHOLD)
2284 return toast_insert_or_update(relation, tup, NULL, options);
2290 * heap_multi_insert - insert multiple tuple into a heap
2292 * This is like heap_insert(), but inserts multiple tuples in one operation.
2293 * That's faster than calling heap_insert() in a loop, because when multiple
2294 * tuples can be inserted on a single page, we can write just a single WAL
2295 * record covering all of them, and only need to lock/unlock the page once.
2297 * Note: this leaks memory into the current memory context. You can create a
2298 * temporary context before calling this, if that's a problem.
2301 heap_multi_insert(Relation relation, HeapTuple *tuples, int ntuples,
2302 CommandId cid, int options, BulkInsertState bistate)
2304 TransactionId xid = GetCurrentTransactionId();
2305 HeapTuple *heaptuples;
2308 char *scratch = NULL;
2312 bool need_tuple_data = RelationIsLogicallyLogged(relation);
2313 bool need_cids = RelationIsAccessibleInLogicalDecoding(relation);
2315 needwal = !(options & HEAP_INSERT_SKIP_WAL) && RelationNeedsWAL(relation);
2316 saveFreeSpace = RelationGetTargetPageFreeSpace(relation,
2317 HEAP_DEFAULT_FILLFACTOR);
2319 /* Toast and set header data in all the tuples */
2320 heaptuples = palloc(ntuples * sizeof(HeapTuple));
2321 for (i = 0; i < ntuples; i++)
2322 heaptuples[i] = heap_prepare_insert(relation, tuples[i],
2326 * Allocate some memory to use for constructing the WAL record. Using
2327 * palloc() within a critical section is not safe, so we allocate this
2331 scratch = palloc(BLCKSZ);
2334 * We're about to do the actual inserts -- but check for conflict first,
2335 * to avoid possibly having to roll back work we've just done.
2337 * For a heap insert, we only need to check for table-level SSI locks. Our
2338 * new tuple can't possibly conflict with existing tuple locks, and heap
2339 * page locks are only consolidated versions of tuple locks; they do not
2340 * lock "gaps" as index page locks do. So we don't need to identify a
2341 * buffer before making the call.
2343 CheckForSerializableConflictIn(relation, NULL, InvalidBuffer);
2346 while (ndone < ntuples)
2349 Buffer vmbuffer = InvalidBuffer;
2350 bool all_visible_cleared = false;
2353 CHECK_FOR_INTERRUPTS();
2356 * Find buffer where at least the next tuple will fit. If the page is
2357 * all-visible, this will also pin the requisite visibility map page.
2359 buffer = RelationGetBufferForTuple(relation, heaptuples[ndone]->t_len,
2360 InvalidBuffer, options, bistate,
2362 page = BufferGetPage(buffer);
2364 /* NO EREPORT(ERROR) from here till changes are logged */
2365 START_CRIT_SECTION();
2368 * RelationGetBufferForTuple has ensured that the first tuple fits.
2369 * Put that on the page, and then as many other tuples as fit.
2371 RelationPutHeapTuple(relation, buffer, heaptuples[ndone]);
2372 for (nthispage = 1; ndone + nthispage < ntuples; nthispage++)
2374 HeapTuple heaptup = heaptuples[ndone + nthispage];
2376 if (PageGetHeapFreeSpace(page) < MAXALIGN(heaptup->t_len) + saveFreeSpace)
2379 RelationPutHeapTuple(relation, buffer, heaptup);
2382 * We don't use heap_multi_insert for catalog tuples yet, but
2383 * better be prepared...
2385 if (needwal && need_cids)
2386 log_heap_new_cid(relation, heaptup);
2389 if (PageIsAllVisible(page))
2391 all_visible_cleared = true;
2392 PageClearAllVisible(page);
2393 visibilitymap_clear(relation,
2394 BufferGetBlockNumber(buffer),
2399 * XXX Should we set PageSetPrunable on this page ? See heap_insert()
2402 MarkBufferDirty(buffer);
2408 xl_heap_multi_insert *xlrec;
2409 uint8 info = XLOG_HEAP2_MULTI_INSERT;
2412 char *scratchptr = scratch;
2417 * If the page was previously empty, we can reinit the page
2418 * instead of restoring the whole thing.
2420 init = (ItemPointerGetOffsetNumber(&(heaptuples[ndone]->t_self)) == FirstOffsetNumber &&
2421 PageGetMaxOffsetNumber(page) == FirstOffsetNumber + nthispage - 1);
2423 /* allocate xl_heap_multi_insert struct from the scratch area */
2424 xlrec = (xl_heap_multi_insert *) scratchptr;
2425 scratchptr += SizeOfHeapMultiInsert;
2428 * Allocate offsets array. Unless we're reinitializing the page,
2429 * in that case the tuples are stored in order starting at
2430 * FirstOffsetNumber and we don't need to store the offsets
2434 scratchptr += nthispage * sizeof(OffsetNumber);
2436 /* the rest of the scratch space is used for tuple data */
2437 tupledata = scratchptr;
2439 xlrec->flags = all_visible_cleared ? XLOG_HEAP_ALL_VISIBLE_CLEARED : 0;
2440 xlrec->ntuples = nthispage;
2443 * Write out an xl_multi_insert_tuple and the tuple data itself
2446 for (i = 0; i < nthispage; i++)
2448 HeapTuple heaptup = heaptuples[ndone + i];
2449 xl_multi_insert_tuple *tuphdr;
2453 xlrec->offsets[i] = ItemPointerGetOffsetNumber(&heaptup->t_self);
2454 /* xl_multi_insert_tuple needs two-byte alignment. */
2455 tuphdr = (xl_multi_insert_tuple *) SHORTALIGN(scratchptr);
2456 scratchptr = ((char *) tuphdr) + SizeOfMultiInsertTuple;
2458 tuphdr->t_infomask2 = heaptup->t_data->t_infomask2;
2459 tuphdr->t_infomask = heaptup->t_data->t_infomask;
2460 tuphdr->t_hoff = heaptup->t_data->t_hoff;
2462 /* write bitmap [+ padding] [+ oid] + data */
2463 datalen = heaptup->t_len - offsetof(HeapTupleHeaderData, t_bits);
2465 (char *) heaptup->t_data + offsetof(HeapTupleHeaderData, t_bits),
2467 tuphdr->datalen = datalen;
2468 scratchptr += datalen;
2470 totaldatalen = scratchptr - tupledata;
2471 Assert((scratchptr - scratch) < BLCKSZ);
2473 if (need_tuple_data)
2474 xlrec->flags |= XLOG_HEAP_CONTAINS_NEW_TUPLE;
2477 * Signal that this is the last xl_heap_multi_insert record
2478 * emitted by this call to heap_multi_insert(). Needed for logical
2479 * decoding so it knows when to cleanup temporary data.
2481 if (ndone + nthispage == ntuples)
2482 xlrec->flags |= XLOG_HEAP_LAST_MULTI_INSERT;
2486 info |= XLOG_HEAP_INIT_PAGE;
2487 bufflags |= REGBUF_WILL_INIT;
2491 * If we're doing logical decoding, include the new tuple data
2492 * even if we take a full-page image of the page.
2494 if (need_tuple_data)
2495 bufflags |= REGBUF_KEEP_DATA;
2498 XLogRegisterData((char *) xlrec, tupledata - scratch);
2499 XLogRegisterBuffer(0, buffer, REGBUF_STANDARD | bufflags);
2501 XLogRegisterBufData(0, tupledata, totaldatalen);
2502 recptr = XLogInsert(RM_HEAP2_ID, info);
2504 PageSetLSN(page, recptr);
2509 UnlockReleaseBuffer(buffer);
2510 if (vmbuffer != InvalidBuffer)
2511 ReleaseBuffer(vmbuffer);
2517 * If tuples are cachable, mark them for invalidation from the caches in
2518 * case we abort. Note it is OK to do this after releasing the buffer,
2519 * because the heaptuples data structure is all in local memory, not in
2520 * the shared buffer.
2522 if (IsCatalogRelation(relation))
2524 for (i = 0; i < ntuples; i++)
2525 CacheInvalidateHeapTuple(relation, heaptuples[i], NULL);
2529 * Copy t_self fields back to the caller's original tuples. This does
2530 * nothing for untoasted tuples (tuples[i] == heaptuples[i)], but it's
2531 * probably faster to always copy than check.
2533 for (i = 0; i < ntuples; i++)
2534 tuples[i]->t_self = heaptuples[i]->t_self;
2536 pgstat_count_heap_insert(relation, ntuples);
2540 * simple_heap_insert - insert a tuple
2542 * Currently, this routine differs from heap_insert only in supplying
2543 * a default command ID and not allowing access to the speedup options.
2545 * This should be used rather than using heap_insert directly in most places
2546 * where we are modifying system catalogs.
2549 simple_heap_insert(Relation relation, HeapTuple tup)
2551 return heap_insert(relation, tup, GetCurrentCommandId(true), 0, NULL);
2555 * Given infomask/infomask2, compute the bits that must be saved in the
2556 * "infobits" field of xl_heap_delete, xl_heap_update, xl_heap_lock,
2557 * xl_heap_lock_updated WAL records.
2559 * See fix_infomask_from_infobits.
2562 compute_infobits(uint16 infomask, uint16 infomask2)
2565 ((infomask & HEAP_XMAX_IS_MULTI) != 0 ? XLHL_XMAX_IS_MULTI : 0) |
2566 ((infomask & HEAP_XMAX_LOCK_ONLY) != 0 ? XLHL_XMAX_LOCK_ONLY : 0) |
2567 ((infomask & HEAP_XMAX_EXCL_LOCK) != 0 ? XLHL_XMAX_EXCL_LOCK : 0) |
2568 /* note we ignore HEAP_XMAX_SHR_LOCK here */
2569 ((infomask & HEAP_XMAX_KEYSHR_LOCK) != 0 ? XLHL_XMAX_KEYSHR_LOCK : 0) |
2570 ((infomask2 & HEAP_KEYS_UPDATED) != 0 ?
2571 XLHL_KEYS_UPDATED : 0);
2575 * Given two versions of the same t_infomask for a tuple, compare them and
2576 * return whether the relevant status for a tuple Xmax has changed. This is
2577 * used after a buffer lock has been released and reacquired: we want to ensure
2578 * that the tuple state continues to be the same it was when we previously
2581 * Note the Xmax field itself must be compared separately.
2584 xmax_infomask_changed(uint16 new_infomask, uint16 old_infomask)
2586 const uint16 interesting =
2587 HEAP_XMAX_IS_MULTI | HEAP_XMAX_LOCK_ONLY | HEAP_LOCK_MASK;
2589 if ((new_infomask & interesting) != (old_infomask & interesting))
2596 * heap_delete - delete a tuple
2598 * NB: do not call this directly unless you are prepared to deal with
2599 * concurrent-update conditions. Use simple_heap_delete instead.
2601 * relation - table to be modified (caller must hold suitable lock)
2602 * tid - TID of tuple to be deleted
2603 * cid - delete command ID (used for visibility test, and stored into
2604 * cmax if successful)
2605 * crosscheck - if not InvalidSnapshot, also check tuple against this
2606 * wait - true if should wait for any conflicting update to commit/abort
2607 * hufd - output parameter, filled in failure cases (see below)
2609 * Normal, successful return value is HeapTupleMayBeUpdated, which
2610 * actually means we did delete it. Failure return codes are
2611 * HeapTupleSelfUpdated, HeapTupleUpdated, or HeapTupleBeingUpdated
2612 * (the last only possible if wait == false).
2614 * In the failure cases, the routine fills *hufd with the tuple's t_ctid,
2615 * t_xmax (resolving a possible MultiXact, if necessary), and t_cmax
2616 * (the last only for HeapTupleSelfUpdated, since we
2617 * cannot obtain cmax from a combocid generated by another transaction).
2618 * See comments for struct HeapUpdateFailureData for additional info.
2621 heap_delete(Relation relation, ItemPointer tid,
2622 CommandId cid, Snapshot crosscheck, bool wait,
2623 HeapUpdateFailureData *hufd)
2626 TransactionId xid = GetCurrentTransactionId();
2632 Buffer vmbuffer = InvalidBuffer;
2633 TransactionId new_xmax;
2634 uint16 new_infomask,
2636 bool have_tuple_lock = false;
2638 bool all_visible_cleared = false;
2639 HeapTuple old_key_tuple = NULL; /* replica identity of the tuple */
2640 bool old_key_copied = false;
2642 Assert(ItemPointerIsValid(tid));
2644 block = ItemPointerGetBlockNumber(tid);
2645 buffer = ReadBuffer(relation, block);
2646 page = BufferGetPage(buffer);
2649 * Before locking the buffer, pin the visibility map page if it appears to
2650 * be necessary. Since we haven't got the lock yet, someone else might be
2651 * in the middle of changing this, so we'll need to recheck after we have
2654 if (PageIsAllVisible(page))
2655 visibilitymap_pin(relation, block, &vmbuffer);
2657 LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
2660 * If we didn't pin the visibility map page and the page has become all
2661 * visible while we were busy locking the buffer, we'll have to unlock and
2662 * re-lock, to avoid holding the buffer lock across an I/O. That's a bit
2663 * unfortunate, but hopefully shouldn't happen often.
2665 if (vmbuffer == InvalidBuffer && PageIsAllVisible(page))
2667 LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
2668 visibilitymap_pin(relation, block, &vmbuffer);
2669 LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
2672 lp = PageGetItemId(page, ItemPointerGetOffsetNumber(tid));
2673 Assert(ItemIdIsNormal(lp));
2675 tp.t_tableOid = RelationGetRelid(relation);
2676 tp.t_data = (HeapTupleHeader) PageGetItem(page, lp);
2677 tp.t_len = ItemIdGetLength(lp);
2681 result = HeapTupleSatisfiesUpdate(&tp, cid, buffer);
2683 if (result == HeapTupleInvisible)
2685 UnlockReleaseBuffer(buffer);
2686 elog(ERROR, "attempted to delete invisible tuple");
2688 else if (result == HeapTupleBeingUpdated && wait)
2690 TransactionId xwait;
2693 /* must copy state data before unlocking buffer */
2694 xwait = HeapTupleHeaderGetRawXmax(tp.t_data);
2695 infomask = tp.t_data->t_infomask;
2697 LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
2700 * Acquire tuple lock to establish our priority for the tuple (see
2701 * heap_lock_tuple). LockTuple will release us when we are
2702 * next-in-line for the tuple.
2704 * If we are forced to "start over" below, we keep the tuple lock;
2705 * this arranges that we stay at the head of the line while rechecking
2708 if (!have_tuple_lock)
2710 LockTupleTuplock(relation, &(tp.t_self), LockTupleExclusive);
2711 have_tuple_lock = true;
2715 * Sleep until concurrent transaction ends. Note that we don't care
2716 * which lock mode the locker has, because we need the strongest one.
2719 if (infomask & HEAP_XMAX_IS_MULTI)
2721 /* wait for multixact */
2722 MultiXactIdWait((MultiXactId) xwait, MultiXactStatusUpdate, infomask,
2723 relation, &tp.t_data->t_ctid, XLTW_Delete,
2725 LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
2728 * If xwait had just locked the tuple then some other xact could
2729 * update this tuple before we get to this point. Check for xmax
2730 * change, and start over if so.
2732 if (xmax_infomask_changed(tp.t_data->t_infomask, infomask) ||
2733 !TransactionIdEquals(HeapTupleHeaderGetRawXmax(tp.t_data),
2738 * You might think the multixact is necessarily done here, but not
2739 * so: it could have surviving members, namely our own xact or
2740 * other subxacts of this backend. It is legal for us to delete
2741 * the tuple in either case, however (the latter case is
2742 * essentially a situation of upgrading our former shared lock to
2743 * exclusive). We don't bother changing the on-disk hint bits
2744 * since we are about to overwrite the xmax altogether.
2749 /* wait for regular transaction to end */
2750 XactLockTableWait(xwait, relation, &tp.t_data->t_ctid, XLTW_Delete);
2751 LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
2754 * xwait is done, but if xwait had just locked the tuple then some
2755 * other xact could update this tuple before we get to this point.
2756 * Check for xmax change, and start over if so.
2758 if (xmax_infomask_changed(tp.t_data->t_infomask, infomask) ||
2759 !TransactionIdEquals(HeapTupleHeaderGetRawXmax(tp.t_data),
2763 /* Otherwise check if it committed or aborted */
2764 UpdateXmaxHintBits(tp.t_data, buffer, xwait);
2768 * We may overwrite if previous xmax aborted, or if it committed but
2769 * only locked the tuple without updating it.
2771 if ((tp.t_data->t_infomask & HEAP_XMAX_INVALID) ||
2772 HEAP_XMAX_IS_LOCKED_ONLY(tp.t_data->t_infomask) ||
2773 HeapTupleHeaderIsOnlyLocked(tp.t_data))
2774 result = HeapTupleMayBeUpdated;
2776 result = HeapTupleUpdated;
2779 if (crosscheck != InvalidSnapshot && result == HeapTupleMayBeUpdated)
2781 /* Perform additional check for transaction-snapshot mode RI updates */
2782 if (!HeapTupleSatisfiesVisibility(&tp, crosscheck, buffer))
2783 result = HeapTupleUpdated;
2786 if (result != HeapTupleMayBeUpdated)
2788 Assert(result == HeapTupleSelfUpdated ||
2789 result == HeapTupleUpdated ||
2790 result == HeapTupleBeingUpdated);
2791 Assert(!(tp.t_data->t_infomask & HEAP_XMAX_INVALID));
2792 hufd->ctid = tp.t_data->t_ctid;
2793 hufd->xmax = HeapTupleHeaderGetUpdateXid(tp.t_data);
2794 if (result == HeapTupleSelfUpdated)
2795 hufd->cmax = HeapTupleHeaderGetCmax(tp.t_data);
2797 hufd->cmax = InvalidCommandId;
2798 UnlockReleaseBuffer(buffer);
2799 if (have_tuple_lock)
2800 UnlockTupleTuplock(relation, &(tp.t_self), LockTupleExclusive);
2801 if (vmbuffer != InvalidBuffer)
2802 ReleaseBuffer(vmbuffer);
2807 * We're about to do the actual delete -- check for conflict first, to
2808 * avoid possibly having to roll back work we've just done.
2810 CheckForSerializableConflictIn(relation, &tp, buffer);
2812 /* replace cid with a combo cid if necessary */
2813 HeapTupleHeaderAdjustCmax(tp.t_data, &cid, &iscombo);
2816 * Compute replica identity tuple before entering the critical section so
2817 * we don't PANIC upon a memory allocation failure.
2819 old_key_tuple = ExtractReplicaIdentity(relation, &tp, true, &old_key_copied);
2822 * If this is the first possibly-multixact-able operation in the current
2823 * transaction, set my per-backend OldestMemberMXactId setting. We can be
2824 * certain that the transaction will never become a member of any older
2825 * MultiXactIds than that. (We have to do this even if we end up just
2826 * using our own TransactionId below, since some other backend could
2827 * incorporate our XID into a MultiXact immediately afterwards.)
2829 MultiXactIdSetOldestMember();
2831 compute_new_xmax_infomask(HeapTupleHeaderGetRawXmax(tp.t_data),
2832 tp.t_data->t_infomask, tp.t_data->t_infomask2,
2833 xid, LockTupleExclusive, true,
2834 &new_xmax, &new_infomask, &new_infomask2);
2836 START_CRIT_SECTION();
2839 * If this transaction commits, the tuple will become DEAD sooner or
2840 * later. Set flag that this page is a candidate for pruning once our xid
2841 * falls below the OldestXmin horizon. If the transaction finally aborts,
2842 * the subsequent page pruning will be a no-op and the hint will be
2845 PageSetPrunable(page, xid);
2847 if (PageIsAllVisible(page))
2849 all_visible_cleared = true;
2850 PageClearAllVisible(page);
2851 visibilitymap_clear(relation, BufferGetBlockNumber(buffer),
2855 /* store transaction information of xact deleting the tuple */
2856 tp.t_data->t_infomask &= ~(HEAP_XMAX_BITS | HEAP_MOVED);
2857 tp.t_data->t_infomask2 &= ~HEAP_KEYS_UPDATED;
2858 tp.t_data->t_infomask |= new_infomask;
2859 tp.t_data->t_infomask2 |= new_infomask2;
2860 HeapTupleHeaderClearHotUpdated(tp.t_data);
2861 HeapTupleHeaderSetXmax(tp.t_data, new_xmax);
2862 HeapTupleHeaderSetCmax(tp.t_data, cid, iscombo);
2863 /* Make sure there is no forward chain link in t_ctid */
2864 tp.t_data->t_ctid = tp.t_self;
2866 MarkBufferDirty(buffer);
2869 if (RelationNeedsWAL(relation))
2871 xl_heap_delete xlrec;
2874 /* For logical decode we need combocids to properly decode the catalog */
2875 if (RelationIsAccessibleInLogicalDecoding(relation))
2876 log_heap_new_cid(relation, &tp);
2878 xlrec.flags = all_visible_cleared ? XLOG_HEAP_ALL_VISIBLE_CLEARED : 0;
2879 xlrec.infobits_set = compute_infobits(tp.t_data->t_infomask,
2880 tp.t_data->t_infomask2);
2881 xlrec.offnum = ItemPointerGetOffsetNumber(&tp.t_self);
2882 xlrec.xmax = new_xmax;
2884 if (old_key_tuple != NULL)
2886 if (relation->rd_rel->relreplident == REPLICA_IDENTITY_FULL)
2887 xlrec.flags |= XLOG_HEAP_CONTAINS_OLD_TUPLE;
2889 xlrec.flags |= XLOG_HEAP_CONTAINS_OLD_KEY;
2893 XLogRegisterData((char *) &xlrec, SizeOfHeapDelete);
2895 XLogRegisterBuffer(0, buffer, REGBUF_STANDARD);
2898 * Log replica identity of the deleted tuple if there is one
2900 if (old_key_tuple != NULL)
2902 xl_heap_header xlhdr;
2904 xlhdr.t_infomask2 = old_key_tuple->t_data->t_infomask2;
2905 xlhdr.t_infomask = old_key_tuple->t_data->t_infomask;
2906 xlhdr.t_hoff = old_key_tuple->t_data->t_hoff;
2908 XLogRegisterData((char *) &xlhdr, SizeOfHeapHeader);
2909 XLogRegisterData((char *) old_key_tuple->t_data
2910 + offsetof(HeapTupleHeaderData, t_bits),
2911 old_key_tuple->t_len
2912 - offsetof(HeapTupleHeaderData, t_bits));
2915 recptr = XLogInsert(RM_HEAP_ID, XLOG_HEAP_DELETE);
2917 PageSetLSN(page, recptr);
2922 LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
2924 if (vmbuffer != InvalidBuffer)
2925 ReleaseBuffer(vmbuffer);
2928 * If the tuple has toasted out-of-line attributes, we need to delete
2929 * those items too. We have to do this before releasing the buffer
2930 * because we need to look at the contents of the tuple, but it's OK to
2931 * release the content lock on the buffer first.
2933 if (relation->rd_rel->relkind != RELKIND_RELATION &&
2934 relation->rd_rel->relkind != RELKIND_MATVIEW)
2936 /* toast table entries should never be recursively toasted */
2937 Assert(!HeapTupleHasExternal(&tp));
2939 else if (HeapTupleHasExternal(&tp))
2940 toast_delete(relation, &tp);
2943 * Mark tuple for invalidation from system caches at next command
2944 * boundary. We have to do this before releasing the buffer because we
2945 * need to look at the contents of the tuple.
2947 CacheInvalidateHeapTuple(relation, &tp, NULL);
2949 /* Now we can release the buffer */
2950 ReleaseBuffer(buffer);
2953 * Release the lmgr tuple lock, if we had it.
2955 if (have_tuple_lock)
2956 UnlockTupleTuplock(relation, &(tp.t_self), LockTupleExclusive);
2958 pgstat_count_heap_delete(relation);
2960 if (old_key_tuple != NULL && old_key_copied)
2961 heap_freetuple(old_key_tuple);
2963 return HeapTupleMayBeUpdated;
2967 * simple_heap_delete - delete a tuple
2969 * This routine may be used to delete a tuple when concurrent updates of
2970 * the target tuple are not expected (for example, because we have a lock
2971 * on the relation associated with the tuple). Any failure is reported
2975 simple_heap_delete(Relation relation, ItemPointer tid)
2978 HeapUpdateFailureData hufd;
2980 result = heap_delete(relation, tid,
2981 GetCurrentCommandId(true), InvalidSnapshot,
2982 true /* wait for commit */ ,
2986 case HeapTupleSelfUpdated:
2987 /* Tuple was already updated in current command? */
2988 elog(ERROR, "tuple already updated by self");
2991 case HeapTupleMayBeUpdated:
2992 /* done successfully */
2995 case HeapTupleUpdated:
2996 elog(ERROR, "tuple concurrently updated");
3000 elog(ERROR, "unrecognized heap_delete status: %u", result);
3006 * heap_update - replace a tuple
3008 * NB: do not call this directly unless you are prepared to deal with
3009 * concurrent-update conditions. Use simple_heap_update instead.
3011 * relation - table to be modified (caller must hold suitable lock)
3012 * otid - TID of old tuple to be replaced
3013 * newtup - newly constructed tuple data to store
3014 * cid - update command ID (used for visibility test, and stored into
3015 * cmax/cmin if successful)
3016 * crosscheck - if not InvalidSnapshot, also check old tuple against this
3017 * wait - true if should wait for any conflicting update to commit/abort
3018 * hufd - output parameter, filled in failure cases (see below)
3019 * lockmode - output parameter, filled with lock mode acquired on tuple
3021 * Normal, successful return value is HeapTupleMayBeUpdated, which
3022 * actually means we *did* update it. Failure return codes are
3023 * HeapTupleSelfUpdated, HeapTupleUpdated, or HeapTupleBeingUpdated
3024 * (the last only possible if wait == false).
3026 * On success, the header fields of *newtup are updated to match the new
3027 * stored tuple; in particular, newtup->t_self is set to the TID where the
3028 * new tuple was inserted, and its HEAP_ONLY_TUPLE flag is set iff a HOT
3029 * update was done. However, any TOAST changes in the new tuple's
3030 * data are not reflected into *newtup.
3032 * In the failure cases, the routine fills *hufd with the tuple's t_ctid,
3033 * t_xmax (resolving a possible MultiXact, if necessary), and t_cmax
3034 * (the last only for HeapTupleSelfUpdated, since we
3035 * cannot obtain cmax from a combocid generated by another transaction).
3036 * See comments for struct HeapUpdateFailureData for additional info.
3039 heap_update(Relation relation, ItemPointer otid, HeapTuple newtup,
3040 CommandId cid, Snapshot crosscheck, bool wait,
3041 HeapUpdateFailureData *hufd, LockTupleMode *lockmode)
3044 TransactionId xid = GetCurrentTransactionId();
3045 Bitmapset *hot_attrs;
3046 Bitmapset *key_attrs;
3047 Bitmapset *id_attrs;
3049 HeapTupleData oldtup;
3051 HeapTuple old_key_tuple = NULL;
3052 bool old_key_copied = false;
3055 MultiXactStatus mxact_status;
3058 vmbuffer = InvalidBuffer,
3059 vmbuffer_new = InvalidBuffer;
3064 bool have_tuple_lock = false;
3069 bool use_hot_update = false;
3071 bool all_visible_cleared = false;
3072 bool all_visible_cleared_new = false;
3073 bool checked_lockers;
3074 bool locker_remains;
3075 TransactionId xmax_new_tuple,
3077 uint16 infomask_old_tuple,
3078 infomask2_old_tuple,
3080 infomask2_new_tuple;
3082 Assert(ItemPointerIsValid(otid));
3085 * Fetch the list of attributes to be checked for HOT update. This is
3086 * wasted effort if we fail to update or have to put the new tuple on a
3087 * different page. But we must compute the list before obtaining buffer
3088 * lock --- in the worst case, if we are doing an update on one of the
3089 * relevant system catalogs, we could deadlock if we try to fetch the list
3090 * later. In any case, the relcache caches the data so this is usually
3093 * Note that we get a copy here, so we need not worry about relcache flush
3094 * happening midway through.
3096 hot_attrs = RelationGetIndexAttrBitmap(relation, INDEX_ATTR_BITMAP_ALL);
3097 key_attrs = RelationGetIndexAttrBitmap(relation, INDEX_ATTR_BITMAP_KEY);
3098 id_attrs = RelationGetIndexAttrBitmap(relation,
3099 INDEX_ATTR_BITMAP_IDENTITY_KEY);
3101 block = ItemPointerGetBlockNumber(otid);
3102 buffer = ReadBuffer(relation, block);
3103 page = BufferGetPage(buffer);
3106 * Before locking the buffer, pin the visibility map page if it appears to
3107 * be necessary. Since we haven't got the lock yet, someone else might be
3108 * in the middle of changing this, so we'll need to recheck after we have
3111 if (PageIsAllVisible(page))
3112 visibilitymap_pin(relation, block, &vmbuffer);
3114 LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
3116 lp = PageGetItemId(page, ItemPointerGetOffsetNumber(otid));
3117 Assert(ItemIdIsNormal(lp));
3120 * Fill in enough data in oldtup for HeapSatisfiesHOTandKeyUpdate to work
3123 oldtup.t_tableOid = RelationGetRelid(relation);
3124 oldtup.t_data = (HeapTupleHeader) PageGetItem(page, lp);
3125 oldtup.t_len = ItemIdGetLength(lp);
3126 oldtup.t_self = *otid;
3128 /* the new tuple is ready, except for this: */
3129 newtup->t_tableOid = RelationGetRelid(relation);
3131 /* Fill in OID for newtup */
3132 if (relation->rd_rel->relhasoids)
3135 /* this is redundant with an Assert in HeapTupleSetOid */
3136 Assert(newtup->t_data->t_infomask & HEAP_HASOID);
3138 HeapTupleSetOid(newtup, HeapTupleGetOid(&oldtup));
3142 /* check there is not space for an OID */
3143 Assert(!(newtup->t_data->t_infomask & HEAP_HASOID));
3147 * If we're not updating any "key" column, we can grab a weaker lock type.
3148 * This allows for more concurrency when we are running simultaneously
3149 * with foreign key checks.
3151 * Note that if a column gets detoasted while executing the update, but
3152 * the value ends up being the same, this test will fail and we will use
3153 * the stronger lock. This is acceptable; the important case to optimize
3154 * is updates that don't manipulate key columns, not those that
3155 * serendipitiously arrive at the same key values.
3157 HeapSatisfiesHOTandKeyUpdate(relation, hot_attrs, key_attrs, id_attrs,
3158 &satisfies_hot, &satisfies_key,
3159 &satisfies_id, &oldtup, newtup);
3162 *lockmode = LockTupleNoKeyExclusive;
3163 mxact_status = MultiXactStatusNoKeyUpdate;
3167 * If this is the first possibly-multixact-able operation in the
3168 * current transaction, set my per-backend OldestMemberMXactId
3169 * setting. We can be certain that the transaction will never become a
3170 * member of any older MultiXactIds than that. (We have to do this
3171 * even if we end up just using our own TransactionId below, since
3172 * some other backend could incorporate our XID into a MultiXact
3173 * immediately afterwards.)
3175 MultiXactIdSetOldestMember();
3179 *lockmode = LockTupleExclusive;
3180 mxact_status = MultiXactStatusUpdate;
3185 * Note: beyond this point, use oldtup not otid to refer to old tuple.
3186 * otid may very well point at newtup->t_self, which we will overwrite
3187 * with the new tuple's location, so there's great risk of confusion if we
3192 checked_lockers = false;
3193 locker_remains = false;
3194 result = HeapTupleSatisfiesUpdate(&oldtup, cid, buffer);
3196 /* see below about the "no wait" case */
3197 Assert(result != HeapTupleBeingUpdated || wait);
3199 if (result == HeapTupleInvisible)
3201 UnlockReleaseBuffer(buffer);
3202 elog(ERROR, "attempted to update invisible tuple");
3204 else if (result == HeapTupleBeingUpdated && wait)
3206 TransactionId xwait;
3208 bool can_continue = false;
3210 checked_lockers = true;
3213 * XXX note that we don't consider the "no wait" case here. This
3214 * isn't a problem currently because no caller uses that case, but it
3215 * should be fixed if such a caller is introduced. It wasn't a
3216 * problem previously because this code would always wait, but now
3217 * that some tuple locks do not conflict with one of the lock modes we
3218 * use, it is possible that this case is interesting to handle
3221 * This may cause failures with third-party code that calls
3222 * heap_update directly.
3225 /* must copy state data before unlocking buffer */
3226 xwait = HeapTupleHeaderGetRawXmax(oldtup.t_data);
3227 infomask = oldtup.t_data->t_infomask;
3229 LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
3232 * Acquire tuple lock to establish our priority for the tuple (see
3233 * heap_lock_tuple). LockTuple will release us when we are
3234 * next-in-line for the tuple.
3236 * If we are forced to "start over" below, we keep the tuple lock;
3237 * this arranges that we stay at the head of the line while rechecking
3240 if (!have_tuple_lock)
3242 LockTupleTuplock(relation, &(oldtup.t_self), *lockmode);
3243 have_tuple_lock = true;
3247 * Now we have to do something about the existing locker. If it's a
3248 * multi, sleep on it; we might be awakened before it is completely
3249 * gone (or even not sleep at all in some cases); we need to preserve
3250 * it as locker, unless it is gone completely.
3252 * If it's not a multi, we need to check for sleeping conditions
3253 * before actually going to sleep. If the update doesn't conflict
3254 * with the locks, we just continue without sleeping (but making sure
3257 if (infomask & HEAP_XMAX_IS_MULTI)
3259 TransactionId update_xact;
3262 /* wait for multixact */
3263 MultiXactIdWait((MultiXactId) xwait, mxact_status, infomask,
3264 relation, &oldtup.t_data->t_ctid, XLTW_Update,
3266 LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
3269 * If xwait had just locked the tuple then some other xact could
3270 * update this tuple before we get to this point. Check for xmax
3271 * change, and start over if so.
3273 if (xmax_infomask_changed(oldtup.t_data->t_infomask, infomask) ||
3274 !TransactionIdEquals(HeapTupleHeaderGetRawXmax(oldtup.t_data),
3279 * Note that the multixact may not be done by now. It could have
3280 * surviving members; our own xact or other subxacts of this
3281 * backend, and also any other concurrent transaction that locked
3282 * the tuple with KeyShare if we only got TupleLockUpdate. If
3283 * this is the case, we have to be careful to mark the updated
3284 * tuple with the surviving members in Xmax.
3286 * Note that there could have been another update in the
3287 * MultiXact. In that case, we need to check whether it committed
3288 * or aborted. If it aborted we are safe to update it again;
3289 * otherwise there is an update conflict, and we have to return
3290 * HeapTupleUpdated below.
3292 * In the LockTupleExclusive case, we still need to preserve the
3293 * surviving members: those would include the tuple locks we had
3294 * before this one, which are important to keep in case this
3297 update_xact = InvalidTransactionId;
3298 if (!HEAP_XMAX_IS_LOCKED_ONLY(oldtup.t_data->t_infomask))
3299 update_xact = HeapTupleGetUpdateXid(oldtup.t_data);
3302 * There was no UPDATE in the MultiXact; or it aborted. No
3303 * TransactionIdIsInProgress() call needed here, since we called
3304 * MultiXactIdWait() above.
3306 if (!TransactionIdIsValid(update_xact) ||
3307 TransactionIdDidAbort(update_xact))
3308 can_continue = true;
3310 locker_remains = remain != 0;
3315 * If it's just a key-share locker, and we're not changing the key
3316 * columns, we don't need to wait for it to end; but we need to
3317 * preserve it as locker.
3319 if (HEAP_XMAX_IS_KEYSHR_LOCKED(infomask) && key_intact)
3321 LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
3324 * recheck the locker; if someone else changed the tuple while
3325 * we weren't looking, start over.
3327 if (xmax_infomask_changed(oldtup.t_data->t_infomask, infomask) ||
3328 !TransactionIdEquals(
3329 HeapTupleHeaderGetRawXmax(oldtup.t_data),
3333 can_continue = true;
3334 locker_remains = true;
3338 /* wait for regular transaction to end */
3339 XactLockTableWait(xwait, relation, &oldtup.t_data->t_ctid,
3341 LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
3344 * xwait is done, but if xwait had just locked the tuple then
3345 * some other xact could update this tuple before we get to
3346 * this point. Check for xmax change, and start over if so.
3348 if (xmax_infomask_changed(oldtup.t_data->t_infomask, infomask) ||
3349 !TransactionIdEquals(
3350 HeapTupleHeaderGetRawXmax(oldtup.t_data),
3354 /* Otherwise check if it committed or aborted */
3355 UpdateXmaxHintBits(oldtup.t_data, buffer, xwait);
3356 if (oldtup.t_data->t_infomask & HEAP_XMAX_INVALID)
3357 can_continue = true;
3361 result = can_continue ? HeapTupleMayBeUpdated : HeapTupleUpdated;
3364 if (crosscheck != InvalidSnapshot && result == HeapTupleMayBeUpdated)
3366 /* Perform additional check for transaction-snapshot mode RI updates */
3367 if (!HeapTupleSatisfiesVisibility(&oldtup, crosscheck, buffer))
3368 result = HeapTupleUpdated;
3371 if (result != HeapTupleMayBeUpdated)
3373 Assert(result == HeapTupleSelfUpdated ||
3374 result == HeapTupleUpdated ||
3375 result == HeapTupleBeingUpdated);
3376 Assert(!(oldtup.t_data->t_infomask & HEAP_XMAX_INVALID));
3377 hufd->ctid = oldtup.t_data->t_ctid;
3378 hufd->xmax = HeapTupleHeaderGetUpdateXid(oldtup.t_data);
3379 if (result == HeapTupleSelfUpdated)
3380 hufd->cmax = HeapTupleHeaderGetCmax(oldtup.t_data);
3382 hufd->cmax = InvalidCommandId;
3383 UnlockReleaseBuffer(buffer);
3384 if (have_tuple_lock)
3385 UnlockTupleTuplock(relation, &(oldtup.t_self), *lockmode);
3386 if (vmbuffer != InvalidBuffer)
3387 ReleaseBuffer(vmbuffer);
3388 bms_free(hot_attrs);
3389 bms_free(key_attrs);
3394 * If we didn't pin the visibility map page and the page has become all
3395 * visible while we were busy locking the buffer, or during some
3396 * subsequent window during which we had it unlocked, we'll have to unlock
3397 * and re-lock, to avoid holding the buffer lock across an I/O. That's a
3398 * bit unfortunate, especially since we'll now have to recheck whether the
3399 * tuple has been locked or updated under us, but hopefully it won't
3400 * happen very often.
3402 if (vmbuffer == InvalidBuffer && PageIsAllVisible(page))
3404 LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
3405 visibilitymap_pin(relation, block, &vmbuffer);
3406 LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
3411 * We're about to do the actual update -- check for conflict first, to
3412 * avoid possibly having to roll back work we've just done.
3414 CheckForSerializableConflictIn(relation, &oldtup, buffer);
3416 /* Fill in transaction status data */
3419 * If the tuple we're updating is locked, we need to preserve the locking
3420 * info in the old tuple's Xmax. Prepare a new Xmax value for this.
3422 compute_new_xmax_infomask(HeapTupleHeaderGetRawXmax(oldtup.t_data),
3423 oldtup.t_data->t_infomask,
3424 oldtup.t_data->t_infomask2,
3425 xid, *lockmode, true,
3426 &xmax_old_tuple, &infomask_old_tuple,
3427 &infomask2_old_tuple);
3430 * And also prepare an Xmax value for the new copy of the tuple. If there
3431 * was no xmax previously, or there was one but all lockers are now gone,
3432 * then use InvalidXid; otherwise, get the xmax from the old tuple. (In
3433 * rare cases that might also be InvalidXid and yet not have the
3434 * HEAP_XMAX_INVALID bit set; that's fine.)
3436 if ((oldtup.t_data->t_infomask & HEAP_XMAX_INVALID) ||
3437 (checked_lockers && !locker_remains))
3438 xmax_new_tuple = InvalidTransactionId;
3440 xmax_new_tuple = HeapTupleHeaderGetRawXmax(oldtup.t_data);
3442 if (!TransactionIdIsValid(xmax_new_tuple))
3444 infomask_new_tuple = HEAP_XMAX_INVALID;
3445 infomask2_new_tuple = 0;
3450 * If we found a valid Xmax for the new tuple, then the infomask bits
3451 * to use on the new tuple depend on what was there on the old one.
3452 * Note that since we're doing an update, the only possibility is that
3453 * the lockers had FOR KEY SHARE lock.
3455 if (oldtup.t_data->t_infomask & HEAP_XMAX_IS_MULTI)
3457 GetMultiXactIdHintBits(xmax_new_tuple, &infomask_new_tuple,
3458 &infomask2_new_tuple);
3462 infomask_new_tuple = HEAP_XMAX_KEYSHR_LOCK | HEAP_XMAX_LOCK_ONLY;
3463 infomask2_new_tuple = 0;
3468 * Prepare the new tuple with the appropriate initial values of Xmin and
3469 * Xmax, as well as initial infomask bits as computed above.
3471 newtup->t_data->t_infomask &= ~(HEAP_XACT_MASK);
3472 newtup->t_data->t_infomask2 &= ~(HEAP2_XACT_MASK);
3473 HeapTupleHeaderSetXmin(newtup->t_data, xid);
3474 HeapTupleHeaderSetCmin(newtup->t_data, cid);
3475 newtup->t_data->t_infomask |= HEAP_UPDATED | infomask_new_tuple;
3476 newtup->t_data->t_infomask2 |= infomask2_new_tuple;
3477 HeapTupleHeaderSetXmax(newtup->t_data, xmax_new_tuple);
3480 * Replace cid with a combo cid if necessary. Note that we already put
3481 * the plain cid into the new tuple.
3483 HeapTupleHeaderAdjustCmax(oldtup.t_data, &cid, &iscombo);
3486 * If the toaster needs to be activated, OR if the new tuple will not fit
3487 * on the same page as the old, then we need to release the content lock
3488 * (but not the pin!) on the old tuple's buffer while we are off doing
3489 * TOAST and/or table-file-extension work. We must mark the old tuple to
3490 * show that it's already being updated, else other processes may try to
3491 * update it themselves.
3493 * We need to invoke the toaster if there are already any out-of-line
3494 * toasted values present, or if the new tuple is over-threshold.
3496 if (relation->rd_rel->relkind != RELKIND_RELATION &&
3497 relation->rd_rel->relkind != RELKIND_MATVIEW)
3499 /* toast table entries should never be recursively toasted */
3500 Assert(!HeapTupleHasExternal(&oldtup));
3501 Assert(!HeapTupleHasExternal(newtup));
3505 need_toast = (HeapTupleHasExternal(&oldtup) ||
3506 HeapTupleHasExternal(newtup) ||
3507 newtup->t_len > TOAST_TUPLE_THRESHOLD);
3509 pagefree = PageGetHeapFreeSpace(page);
3511 newtupsize = MAXALIGN(newtup->t_len);
3513 if (need_toast || newtupsize > pagefree)
3515 /* Clear obsolete visibility flags ... */
3516 oldtup.t_data->t_infomask &= ~(HEAP_XMAX_BITS | HEAP_MOVED);
3517 oldtup.t_data->t_infomask2 &= ~HEAP_KEYS_UPDATED;
3518 HeapTupleClearHotUpdated(&oldtup);
3519 /* ... and store info about transaction updating this tuple */
3520 Assert(TransactionIdIsValid(xmax_old_tuple));
3521 HeapTupleHeaderSetXmax(oldtup.t_data, xmax_old_tuple);
3522 oldtup.t_data->t_infomask |= infomask_old_tuple;
3523 oldtup.t_data->t_infomask2 |= infomask2_old_tuple;
3524 HeapTupleHeaderSetCmax(oldtup.t_data, cid, iscombo);
3525 /* temporarily make it look not-updated */
3526 oldtup.t_data->t_ctid = oldtup.t_self;
3527 already_marked = true;
3528 LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
3531 * Let the toaster do its thing, if needed.
3533 * Note: below this point, heaptup is the data we actually intend to
3534 * store into the relation; newtup is the caller's original untoasted
3539 /* Note we always use WAL and FSM during updates */
3540 heaptup = toast_insert_or_update(relation, newtup, &oldtup, 0);
3541 newtupsize = MAXALIGN(heaptup->t_len);
3547 * Now, do we need a new page for the tuple, or not? This is a bit
3548 * tricky since someone else could have added tuples to the page while
3549 * we weren't looking. We have to recheck the available space after
3550 * reacquiring the buffer lock. But don't bother to do that if the
3551 * former amount of free space is still not enough; it's unlikely
3552 * there's more free now than before.
3554 * What's more, if we need to get a new page, we will need to acquire
3555 * buffer locks on both old and new pages. To avoid deadlock against
3556 * some other backend trying to get the same two locks in the other
3557 * order, we must be consistent about the order we get the locks in.
3558 * We use the rule "lock the lower-numbered page of the relation
3559 * first". To implement this, we must do RelationGetBufferForTuple
3560 * while not holding the lock on the old page, and we must rely on it
3561 * to get the locks on both pages in the correct order.
3563 if (newtupsize > pagefree)
3565 /* Assume there's no chance to put heaptup on same page. */
3566 newbuf = RelationGetBufferForTuple(relation, heaptup->t_len,
3568 &vmbuffer_new, &vmbuffer);
3572 /* Re-acquire the lock on the old tuple's page. */
3573 LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
3574 /* Re-check using the up-to-date free space */
3575 pagefree = PageGetHeapFreeSpace(page);
3576 if (newtupsize > pagefree)
3579 * Rats, it doesn't fit anymore. We must now unlock and
3580 * relock to avoid deadlock. Fortunately, this path should
3583 LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
3584 newbuf = RelationGetBufferForTuple(relation, heaptup->t_len,
3586 &vmbuffer_new, &vmbuffer);
3590 /* OK, it fits here, so we're done. */
3597 /* No TOAST work needed, and it'll fit on same page */
3598 already_marked = false;
3604 * We're about to create the new tuple -- check for conflict first, to
3605 * avoid possibly having to roll back work we've just done.
3607 * NOTE: For a tuple insert, we only need to check for table locks, since
3608 * predicate locking at the index level will cover ranges for anything
3609 * except a table scan. Therefore, only provide the relation.
3611 CheckForSerializableConflictIn(relation, NULL, InvalidBuffer);
3614 * At this point newbuf and buffer are both pinned and locked, and newbuf
3615 * has enough space for the new tuple. If they are the same buffer, only
3619 if (newbuf == buffer)
3622 * Since the new tuple is going into the same page, we might be able
3623 * to do a HOT update. Check if any of the index columns have been
3624 * changed. If not, then HOT update is possible.
3627 use_hot_update = true;
3631 /* Set a hint that the old page could use prune/defrag */
3636 * Compute replica identity tuple before entering the critical section so
3637 * we don't PANIC upon a memory allocation failure.
3638 * ExtractReplicaIdentity() will return NULL if nothing needs to be
3641 old_key_tuple = ExtractReplicaIdentity(relation, &oldtup, !satisfies_id, &old_key_copied);
3643 /* NO EREPORT(ERROR) from here till changes are logged */
3644 START_CRIT_SECTION();
3647 * If this transaction commits, the old tuple will become DEAD sooner or
3648 * later. Set flag that this page is a candidate for pruning once our xid
3649 * falls below the OldestXmin horizon. If the transaction finally aborts,
3650 * the subsequent page pruning will be a no-op and the hint will be
3653 * XXX Should we set hint on newbuf as well? If the transaction aborts,
3654 * there would be a prunable tuple in the newbuf; but for now we choose
3655 * not to optimize for aborts. Note that heap_xlog_update must be kept in
3656 * sync if this decision changes.
3658 PageSetPrunable(page, xid);
3662 /* Mark the old tuple as HOT-updated */
3663 HeapTupleSetHotUpdated(&oldtup);
3664 /* And mark the new tuple as heap-only */
3665 HeapTupleSetHeapOnly(heaptup);
3666 /* Mark the caller's copy too, in case different from heaptup */
3667 HeapTupleSetHeapOnly(newtup);
3671 /* Make sure tuples are correctly marked as not-HOT */
3672 HeapTupleClearHotUpdated(&oldtup);
3673 HeapTupleClearHeapOnly(heaptup);
3674 HeapTupleClearHeapOnly(newtup);
3677 RelationPutHeapTuple(relation, newbuf, heaptup); /* insert new tuple */
3679 if (!already_marked)
3681 /* Clear obsolete visibility flags ... */
3682 oldtup.t_data->t_infomask &= ~(HEAP_XMAX_BITS | HEAP_MOVED);
3683 oldtup.t_data->t_infomask2 &= ~HEAP_KEYS_UPDATED;
3684 /* ... and store info about transaction updating this tuple */
3685 Assert(TransactionIdIsValid(xmax_old_tuple));
3686 HeapTupleHeaderSetXmax(oldtup.t_data, xmax_old_tuple);
3687 oldtup.t_data->t_infomask |= infomask_old_tuple;
3688 oldtup.t_data->t_infomask2 |= infomask2_old_tuple;
3689 HeapTupleHeaderSetCmax(oldtup.t_data, cid, iscombo);
3692 /* record address of new tuple in t_ctid of old one */
3693 oldtup.t_data->t_ctid = heaptup->t_self;
3695 /* clear PD_ALL_VISIBLE flags */
3696 if (PageIsAllVisible(BufferGetPage(buffer)))
3698 all_visible_cleared = true;
3699 PageClearAllVisible(BufferGetPage(buffer));
3700 visibilitymap_clear(relation, BufferGetBlockNumber(buffer),
3703 if (newbuf != buffer && PageIsAllVisible(BufferGetPage(newbuf)))
3705 all_visible_cleared_new = true;
3706 PageClearAllVisible(BufferGetPage(newbuf));
3707 visibilitymap_clear(relation, BufferGetBlockNumber(newbuf),
3711 if (newbuf != buffer)
3712 MarkBufferDirty(newbuf);
3713 MarkBufferDirty(buffer);
3716 if (RelationNeedsWAL(relation))
3721 * For logical decoding we need combocids to properly decode the
3724 if (RelationIsAccessibleInLogicalDecoding(relation))
3726 log_heap_new_cid(relation, &oldtup);
3727 log_heap_new_cid(relation, heaptup);
3730 recptr = log_heap_update(relation, buffer,
3731 newbuf, &oldtup, heaptup,
3733 all_visible_cleared,
3734 all_visible_cleared_new);
3735 if (newbuf != buffer)
3737 PageSetLSN(BufferGetPage(newbuf), recptr);
3739 PageSetLSN(BufferGetPage(buffer), recptr);
3744 if (newbuf != buffer)
3745 LockBuffer(newbuf, BUFFER_LOCK_UNLOCK);
3746 LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
3749 * Mark old tuple for invalidation from system caches at next command
3750 * boundary, and mark the new tuple for invalidation in case we abort. We
3751 * have to do this before releasing the buffer because oldtup is in the
3752 * buffer. (heaptup is all in local memory, but it's necessary to process
3753 * both tuple versions in one call to inval.c so we can avoid redundant
3756 CacheInvalidateHeapTuple(relation, &oldtup, heaptup);
3758 /* Now we can release the buffer(s) */
3759 if (newbuf != buffer)
3760 ReleaseBuffer(newbuf);
3761 ReleaseBuffer(buffer);
3762 if (BufferIsValid(vmbuffer_new))
3763 ReleaseBuffer(vmbuffer_new);
3764 if (BufferIsValid(vmbuffer))
3765 ReleaseBuffer(vmbuffer);
3768 * Release the lmgr tuple lock, if we had it.
3770 if (have_tuple_lock)
3771 UnlockTupleTuplock(relation, &(oldtup.t_self), *lockmode);
3773 pgstat_count_heap_update(relation, use_hot_update);
3776 * If heaptup is a private copy, release it. Don't forget to copy t_self
3777 * back to the caller's image, too.
3779 if (heaptup != newtup)
3781 newtup->t_self = heaptup->t_self;
3782 heap_freetuple(heaptup);
3785 if (old_key_tuple != NULL && old_key_copied)
3786 heap_freetuple(old_key_tuple);
3788 bms_free(hot_attrs);
3789 bms_free(key_attrs);
3791 return HeapTupleMayBeUpdated;
3795 * Check if the specified attribute's value is same in both given tuples.
3796 * Subroutine for HeapSatisfiesHOTandKeyUpdate.
3799 heap_tuple_attr_equals(TupleDesc tupdesc, int attrnum,
3800 HeapTuple tup1, HeapTuple tup2)
3806 Form_pg_attribute att;
3809 * If it's a whole-tuple reference, say "not equal". It's not really
3810 * worth supporting this case, since it could only succeed after a no-op
3811 * update, which is hardly a case worth optimizing for.
3817 * Likewise, automatically say "not equal" for any system attribute other
3818 * than OID and tableOID; we cannot expect these to be consistent in a HOT
3819 * chain, or even to be set correctly yet in the new tuple.
3823 if (attrnum != ObjectIdAttributeNumber &&
3824 attrnum != TableOidAttributeNumber)
3829 * Extract the corresponding values. XXX this is pretty inefficient if
3830 * there are many indexed columns. Should HeapSatisfiesHOTandKeyUpdate do
3831 * a single heap_deform_tuple call on each tuple, instead? But that
3832 * doesn't work for system columns ...
3834 value1 = heap_getattr(tup1, attrnum, tupdesc, &isnull1);
3835 value2 = heap_getattr(tup2, attrnum, tupdesc, &isnull2);
3838 * If one value is NULL and other is not, then they are certainly not
3841 if (isnull1 != isnull2)
3845 * If both are NULL, they can be considered equal.
3851 * We do simple binary comparison of the two datums. This may be overly
3852 * strict because there can be multiple binary representations for the
3853 * same logical value. But we should be OK as long as there are no false
3854 * positives. Using a type-specific equality operator is messy because
3855 * there could be multiple notions of equality in different operator
3856 * classes; furthermore, we cannot safely invoke user-defined functions
3857 * while holding exclusive buffer lock.
3861 /* The only allowed system columns are OIDs, so do this */
3862 return (DatumGetObjectId(value1) == DatumGetObjectId(value2));
3866 Assert(attrnum <= tupdesc->natts);
3867 att = tupdesc->attrs[attrnum - 1];
3868 return datumIsEqual(value1, value2, att->attbyval, att->attlen);
3873 * Check which columns are being updated.
3875 * This simultaneously checks conditions for HOT updates, for FOR KEY
3876 * SHARE updates, and REPLICA IDENTITY concerns. Since much of the time they
3877 * will be checking very similar sets of columns, and doing the same tests on
3878 * them, it makes sense to optimize and do them together.
3880 * We receive three bitmapsets comprising the three sets of columns we're
3881 * interested in. Note these are destructively modified; that is OK since
3882 * this is invoked at most once in heap_update.
3884 * hot_result is set to TRUE if it's okay to do a HOT update (i.e. it does not
3885 * modified indexed columns); key_result is set to TRUE if the update does not
3886 * modify columns used in the key; id_result is set to TRUE if the update does
3887 * not modify columns in any index marked as the REPLICA IDENTITY.
3890 HeapSatisfiesHOTandKeyUpdate(Relation relation, Bitmapset *hot_attrs,
3891 Bitmapset *key_attrs, Bitmapset *id_attrs,
3892 bool *satisfies_hot, bool *satisfies_key,
3894 HeapTuple oldtup, HeapTuple newtup)
3896 int next_hot_attnum;
3897 int next_key_attnum;
3899 bool hot_result = true;
3900 bool key_result = true;
3901 bool id_result = true;
3903 /* If REPLICA IDENTITY is set to FULL, id_attrs will be empty. */
3904 Assert(bms_is_subset(id_attrs, key_attrs));
3905 Assert(bms_is_subset(key_attrs, hot_attrs));
3908 * If one of these sets contains no remaining bits, bms_first_member will
3909 * return -1, and after adding FirstLowInvalidHeapAttributeNumber (which
3910 * is negative!) we'll get an attribute number that can't possibly be
3911 * real, and thus won't match any actual attribute number.
3913 next_hot_attnum = bms_first_member(hot_attrs);
3914 next_hot_attnum += FirstLowInvalidHeapAttributeNumber;
3915 next_key_attnum = bms_first_member(key_attrs);
3916 next_key_attnum += FirstLowInvalidHeapAttributeNumber;
3917 next_id_attnum = bms_first_member(id_attrs);
3918 next_id_attnum += FirstLowInvalidHeapAttributeNumber;
3926 * Since the HOT attributes are a superset of the key attributes and
3927 * the key attributes are a superset of the id attributes, this logic
3928 * is guaranteed to identify the next column that needs to be checked.
3930 if (hot_result && next_hot_attnum > FirstLowInvalidHeapAttributeNumber)
3931 check_now = next_hot_attnum;
3932 else if (key_result && next_key_attnum > FirstLowInvalidHeapAttributeNumber)
3933 check_now = next_key_attnum;
3934 else if (id_result && next_id_attnum > FirstLowInvalidHeapAttributeNumber)
3935 check_now = next_id_attnum;
3939 /* See whether it changed. */
3940 changed = !heap_tuple_attr_equals(RelationGetDescr(relation),
3941 check_now, oldtup, newtup);
3944 if (check_now == next_hot_attnum)
3946 if (check_now == next_key_attnum)
3948 if (check_now == next_id_attnum)
3951 /* if all are false now, we can stop checking */
3952 if (!hot_result && !key_result && !id_result)
3957 * Advance the next attribute numbers for the sets that contain the
3958 * attribute we just checked. As we work our way through the columns,
3959 * the next_attnum values will rise; but when each set becomes empty,
3960 * bms_first_member() will return -1 and the attribute number will end
3961 * up with a value less than FirstLowInvalidHeapAttributeNumber.
3963 if (hot_result && check_now == next_hot_attnum)
3965 next_hot_attnum = bms_first_member(hot_attrs);
3966 next_hot_attnum += FirstLowInvalidHeapAttributeNumber;
3968 if (key_result && check_now == next_key_attnum)
3970 next_key_attnum = bms_first_member(key_attrs);
3971 next_key_attnum += FirstLowInvalidHeapAttributeNumber;
3973 if (id_result && check_now == next_id_attnum)
3975 next_id_attnum = bms_first_member(id_attrs);
3976 next_id_attnum += FirstLowInvalidHeapAttributeNumber;
3980 *satisfies_hot = hot_result;
3981 *satisfies_key = key_result;
3982 *satisfies_id = id_result;
3986 * simple_heap_update - replace a tuple
3988 * This routine may be used to update a tuple when concurrent updates of
3989 * the target tuple are not expected (for example, because we have a lock
3990 * on the relation associated with the tuple). Any failure is reported
3994 simple_heap_update(Relation relation, ItemPointer otid, HeapTuple tup)
3997 HeapUpdateFailureData hufd;
3998 LockTupleMode lockmode;
4000 result = heap_update(relation, otid, tup,
4001 GetCurrentCommandId(true), InvalidSnapshot,
4002 true /* wait for commit */ ,
4006 case HeapTupleSelfUpdated:
4007 /* Tuple was already updated in current command? */
4008 elog(ERROR, "tuple already updated by self");
4011 case HeapTupleMayBeUpdated:
4012 /* done successfully */
4015 case HeapTupleUpdated:
4016 elog(ERROR, "tuple concurrently updated");
4020 elog(ERROR, "unrecognized heap_update status: %u", result);
4027 * Return the MultiXactStatus corresponding to the given tuple lock mode.
4029 static MultiXactStatus
4030 get_mxact_status_for_lock(LockTupleMode mode, bool is_update)
4035 retval = tupleLockExtraInfo[mode].updstatus;
4037 retval = tupleLockExtraInfo[mode].lockstatus;
4040 elog(ERROR, "invalid lock tuple mode %d/%s", mode,
4041 is_update ? "true" : "false");
4043 return (MultiXactStatus) retval;
4048 * heap_lock_tuple - lock a tuple in shared or exclusive mode
4050 * Note that this acquires a buffer pin, which the caller must release.
4053 * relation: relation containing tuple (caller must hold suitable lock)
4054 * tuple->t_self: TID of tuple to lock (rest of struct need not be valid)
4055 * cid: current command ID (used for visibility test, and stored into
4056 * tuple's cmax if lock is successful)
4057 * mode: indicates if shared or exclusive tuple lock is desired
4058 * wait_policy: what to do if tuple lock is not available
4059 * follow_updates: if true, follow the update chain to also lock descendant
4062 * Output parameters:
4063 * *tuple: all fields filled in
4064 * *buffer: set to buffer holding tuple (pinned but not locked at exit)
4065 * *hufd: filled in failure cases (see below)
4067 * Function result may be:
4068 * HeapTupleMayBeUpdated: lock was successfully acquired
4069 * HeapTupleSelfUpdated: lock failed because tuple updated by self
4070 * HeapTupleUpdated: lock failed because tuple updated by other xact
4071 * HeapTupleWouldBlock: lock couldn't be acquired and wait_policy is skip
4073 * In the failure cases, the routine fills *hufd with the tuple's t_ctid,
4074 * t_xmax (resolving a possible MultiXact, if necessary), and t_cmax
4075 * (the last only for HeapTupleSelfUpdated, since we
4076 * cannot obtain cmax from a combocid generated by another transaction).
4077 * See comments for struct HeapUpdateFailureData for additional info.
4079 * See README.tuplock for a thorough explanation of this mechanism.
4082 heap_lock_tuple(Relation relation, HeapTuple tuple,
4083 CommandId cid, LockTupleMode mode, LockWaitPolicy wait_policy,
4084 bool follow_updates,
4085 Buffer *buffer, HeapUpdateFailureData *hufd)
4088 ItemPointer tid = &(tuple->t_self);
4093 uint16 old_infomask,
4096 bool have_tuple_lock = false;
4098 *buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(tid));
4099 LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
4101 page = BufferGetPage(*buffer);
4102 lp = PageGetItemId(page, ItemPointerGetOffsetNumber(tid));
4103 Assert(ItemIdIsNormal(lp));
4105 tuple->t_data = (HeapTupleHeader) PageGetItem(page, lp);
4106 tuple->t_len = ItemIdGetLength(lp);
4107 tuple->t_tableOid = RelationGetRelid(relation);
4110 result = HeapTupleSatisfiesUpdate(tuple, cid, *buffer);
4112 if (result == HeapTupleInvisible)
4114 UnlockReleaseBuffer(*buffer);
4115 elog(ERROR, "attempted to lock invisible tuple");
4117 else if (result == HeapTupleBeingUpdated)
4119 TransactionId xwait;
4123 ItemPointerData t_ctid;
4125 /* must copy state data before unlocking buffer */
4126 xwait = HeapTupleHeaderGetRawXmax(tuple->t_data);
4127 infomask = tuple->t_data->t_infomask;
4128 infomask2 = tuple->t_data->t_infomask2;
4129 ItemPointerCopy(&tuple->t_data->t_ctid, &t_ctid);
4131 LockBuffer(*buffer, BUFFER_LOCK_UNLOCK);
4134 * If any subtransaction of the current top transaction already holds
4135 * a lock as strong or stronger than what we're requesting, we
4136 * effectively hold the desired lock already. We *must* succeed
4137 * without trying to take the tuple lock, else we will deadlock
4138 * against anyone wanting to acquire a stronger lock.
4140 if (infomask & HEAP_XMAX_IS_MULTI)
4144 MultiXactMember *members;
4147 * We don't need to allow old multixacts here; if that had been
4148 * the case, HeapTupleSatisfiesUpdate would have returned
4149 * MayBeUpdated and we wouldn't be here.
4152 GetMultiXactIdMembers(xwait, &members, false,
4153 HEAP_XMAX_IS_LOCKED_ONLY(infomask));
4155 for (i = 0; i < nmembers; i++)
4157 if (TransactionIdIsCurrentTransactionId(members[i].xid))
4159 LockTupleMode membermode;
4161 membermode = TUPLOCK_from_mxstatus(members[i].status);
4163 if (membermode >= mode)
4165 if (have_tuple_lock)
4166 UnlockTupleTuplock(relation, tid, mode);
4169 return HeapTupleMayBeUpdated;
4179 * Acquire tuple lock to establish our priority for the tuple.
4180 * LockTuple will release us when we are next-in-line for the tuple.
4181 * We must do this even if we are share-locking.
4183 * If we are forced to "start over" below, we keep the tuple lock;
4184 * this arranges that we stay at the head of the line while rechecking
4187 if (!have_tuple_lock)
4189 switch (wait_policy)
4192 LockTupleTuplock(relation, tid, mode);
4195 if (!ConditionalLockTupleTuplock(relation, tid, mode))
4197 result = HeapTupleWouldBlock;
4198 /* recovery code expects to have buffer lock held */
4199 LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
4204 if (!ConditionalLockTupleTuplock(relation, tid, mode))
4206 (errcode(ERRCODE_LOCK_NOT_AVAILABLE),
4207 errmsg("could not obtain lock on row in relation \"%s\"",
4208 RelationGetRelationName(relation))));
4211 have_tuple_lock = true;
4215 * Initially assume that we will have to wait for the locking
4216 * transaction(s) to finish. We check various cases below in which
4217 * this can be turned off.
4219 require_sleep = true;
4220 if (mode == LockTupleKeyShare)
4223 * If we're requesting KeyShare, and there's no update present, we
4224 * don't need to wait. Even if there is an update, we can still
4225 * continue if the key hasn't been modified.
4227 * However, if there are updates, we need to walk the update chain
4228 * to mark future versions of the row as locked, too. That way,
4229 * if somebody deletes that future version, we're protected
4230 * against the key going away. This locking of future versions
4231 * could block momentarily, if a concurrent transaction is
4232 * deleting a key; or it could return a value to the effect that
4233 * the transaction deleting the key has already committed. So we
4234 * do this before re-locking the buffer; otherwise this would be
4235 * prone to deadlocks.
4237 * Note that the TID we're locking was grabbed before we unlocked
4238 * the buffer. For it to change while we're not looking, the
4239 * other properties we're testing for below after re-locking the
4240 * buffer would also change, in which case we would restart this
4243 if (!(infomask2 & HEAP_KEYS_UPDATED))
4247 updated = !HEAP_XMAX_IS_LOCKED_ONLY(infomask);
4250 * If there are updates, follow the update chain; bail out if
4251 * that cannot be done.
4253 if (follow_updates && updated)
4257 res = heap_lock_updated_tuple(relation, tuple, &t_ctid,
4258 GetCurrentTransactionId(),
4260 if (res != HeapTupleMayBeUpdated)
4263 /* recovery code expects to have buffer lock held */
4264 LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
4269 LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
4272 * Make sure it's still an appropriate lock, else start over.
4273 * Also, if it wasn't updated before we released the lock, but
4274 * is updated now, we start over too; the reason is that we
4275 * now need to follow the update chain to lock the new
4278 if (!HeapTupleHeaderIsOnlyLocked(tuple->t_data) &&
4279 ((tuple->t_data->t_infomask2 & HEAP_KEYS_UPDATED) ||
4283 /* Things look okay, so we can skip sleeping */
4284 require_sleep = false;
4287 * Note we allow Xmax to change here; other updaters/lockers
4288 * could have modified it before we grabbed the buffer lock.
4289 * However, this is not a problem, because with the recheck we
4290 * just did we ensure that they still don't conflict with the
4295 else if (mode == LockTupleShare)
4298 * If we're requesting Share, we can similarly avoid sleeping if
4299 * there's no update and no exclusive lock present.
4301 if (HEAP_XMAX_IS_LOCKED_ONLY(infomask) &&
4302 !HEAP_XMAX_IS_EXCL_LOCKED(infomask))
4304 LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
4307 * Make sure it's still an appropriate lock, else start over.
4308 * See above about allowing xmax to change.
4310 if (!HEAP_XMAX_IS_LOCKED_ONLY(tuple->t_data->t_infomask) ||
4311 HEAP_XMAX_IS_EXCL_LOCKED(tuple->t_data->t_infomask))
4313 require_sleep = false;
4316 else if (mode == LockTupleNoKeyExclusive)
4319 * If we're requesting NoKeyExclusive, we might also be able to
4320 * avoid sleeping; just ensure that there's no other lock type
4321 * than KeyShare. Note that this is a bit more involved than just
4322 * checking hint bits -- we need to expand the multixact to figure
4323 * out lock modes for each one (unless there was only one such
4326 if (infomask & HEAP_XMAX_IS_MULTI)
4329 MultiXactMember *members;
4332 * We don't need to allow old multixacts here; if that had
4333 * been the case, HeapTupleSatisfiesUpdate would have returned
4334 * MayBeUpdated and we wouldn't be here.
4337 GetMultiXactIdMembers(xwait, &members, false,
4338 HEAP_XMAX_IS_LOCKED_ONLY(infomask));
4343 * No need to keep the previous xmax here. This is
4344 * unlikely to happen.
4346 require_sleep = false;
4351 bool allowed = true;
4353 for (i = 0; i < nmembers; i++)
4355 if (members[i].status != MultiXactStatusForKeyShare)
4364 * if the xmax changed under us in the meantime, start
4367 LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
4368 if (xmax_infomask_changed(tuple->t_data->t_infomask, infomask) ||
4369 !TransactionIdEquals(HeapTupleHeaderGetRawXmax(tuple->t_data),
4375 /* otherwise, we're good */
4376 require_sleep = false;
4382 else if (HEAP_XMAX_IS_KEYSHR_LOCKED(infomask))
4384 LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
4386 /* if the xmax changed in the meantime, start over */
4387 if (xmax_infomask_changed(tuple->t_data->t_infomask, infomask) ||
4388 !TransactionIdEquals(
4389 HeapTupleHeaderGetRawXmax(tuple->t_data),
4392 /* otherwise, we're good */
4393 require_sleep = false;
4398 * By here, we either have already acquired the buffer exclusive lock,
4399 * or we must wait for the locking transaction or multixact; so below
4400 * we ensure that we grab buffer lock after the sleep.
4405 if (infomask & HEAP_XMAX_IS_MULTI)
4407 MultiXactStatus status = get_mxact_status_for_lock(mode, false);
4409 /* We only ever lock tuples, never update them */
4410 if (status >= MultiXactStatusNoKeyUpdate)
4411 elog(ERROR, "invalid lock mode in heap_lock_tuple");
4413 /* wait for multixact to end, or die trying */
4414 switch (wait_policy)
4417 MultiXactIdWait((MultiXactId) xwait, status, infomask,
4418 relation, &tuple->t_data->t_ctid, XLTW_Lock, NULL);
4421 if (!ConditionalMultiXactIdWait((MultiXactId) xwait,
4422 status, infomask, relation,
4425 result = HeapTupleWouldBlock;
4426 /* recovery code expects to have buffer lock held */
4427 LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
4432 if (!ConditionalMultiXactIdWait((MultiXactId) xwait,
4433 status, infomask, relation,
4436 (errcode(ERRCODE_LOCK_NOT_AVAILABLE),
4437 errmsg("could not obtain lock on row in relation \"%s\"",
4438 RelationGetRelationName(relation))));
4443 /* if there are updates, follow the update chain */
4444 if (follow_updates &&
4445 !HEAP_XMAX_IS_LOCKED_ONLY(infomask))
4449 res = heap_lock_updated_tuple(relation, tuple, &t_ctid,
4450 GetCurrentTransactionId(),
4452 if (res != HeapTupleMayBeUpdated)
4455 /* recovery code expects to have buffer lock held */
4456 LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
4461 LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
4464 * If xwait had just locked the tuple then some other xact
4465 * could update this tuple before we get to this point. Check
4466 * for xmax change, and start over if so.
4468 if (xmax_infomask_changed(tuple->t_data->t_infomask, infomask) ||
4469 !TransactionIdEquals(
4470 HeapTupleHeaderGetRawXmax(tuple->t_data),
4475 * Of course, the multixact might not be done here: if we're
4476 * requesting a light lock mode, other transactions with light
4477 * locks could still be alive, as well as locks owned by our
4478 * own xact or other subxacts of this backend. We need to
4479 * preserve the surviving MultiXact members. Note that it
4480 * isn't absolutely necessary in the latter case, but doing so
4486 /* wait for regular transaction to end, or die trying */
4487 switch (wait_policy)
4490 XactLockTableWait(xwait, relation, &tuple->t_data->t_ctid,
4494 if (!ConditionalXactLockTableWait(xwait))
4496 result = HeapTupleWouldBlock;
4497 /* recovery code expects to have buffer lock held */
4498 LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
4503 if (!ConditionalXactLockTableWait(xwait))
4505 (errcode(ERRCODE_LOCK_NOT_AVAILABLE),
4506 errmsg("could not obtain lock on row in relation \"%s\"",
4507 RelationGetRelationName(relation))));
4511 /* if there are updates, follow the update chain */
4512 if (follow_updates &&
4513 !HEAP_XMAX_IS_LOCKED_ONLY(infomask))
4517 res = heap_lock_updated_tuple(relation, tuple, &t_ctid,
4518 GetCurrentTransactionId(),
4520 if (res != HeapTupleMayBeUpdated)
4523 /* recovery code expects to have buffer lock held */
4524 LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
4529 LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
4532 * xwait is done, but if xwait had just locked the tuple then
4533 * some other xact could update this tuple before we get to
4534 * this point. Check for xmax change, and start over if so.
4536 if (xmax_infomask_changed(tuple->t_data->t_infomask, infomask) ||
4537 !TransactionIdEquals(
4538 HeapTupleHeaderGetRawXmax(tuple->t_data),
4543 * Otherwise check if it committed or aborted. Note we cannot
4544 * be here if the tuple was only locked by somebody who didn't
4545 * conflict with us; that should have been handled above. So
4546 * that transaction must necessarily be gone by now.
4548 UpdateXmaxHintBits(tuple->t_data, *buffer, xwait);
4552 /* By here, we're certain that we hold buffer exclusive lock again */
4555 * We may lock if previous xmax aborted, or if it committed but only
4556 * locked the tuple without updating it; or if we didn't have to wait
4557 * at all for whatever reason.
4559 if (!require_sleep ||
4560 (tuple->t_data->t_infomask & HEAP_XMAX_INVALID) ||
4561 HEAP_XMAX_IS_LOCKED_ONLY(tuple->t_data->t_infomask) ||
4562 HeapTupleHeaderIsOnlyLocked(tuple->t_data))
4563 result = HeapTupleMayBeUpdated;
4565 result = HeapTupleUpdated;
4569 if (result != HeapTupleMayBeUpdated)
4571 Assert(result == HeapTupleSelfUpdated || result == HeapTupleUpdated ||
4572 result == HeapTupleWouldBlock);
4573 Assert(!(tuple->t_data->t_infomask & HEAP_XMAX_INVALID));
4574 hufd->ctid = tuple->t_data->t_ctid;
4575 hufd->xmax = HeapTupleHeaderGetUpdateXid(tuple->t_data);
4576 if (result == HeapTupleSelfUpdated)
4577 hufd->cmax = HeapTupleHeaderGetCmax(tuple->t_data);
4579 hufd->cmax = InvalidCommandId;
4580 LockBuffer(*buffer, BUFFER_LOCK_UNLOCK);
4581 if (have_tuple_lock)
4582 UnlockTupleTuplock(relation, tid, mode);
4586 xmax = HeapTupleHeaderGetRawXmax(tuple->t_data);
4587 old_infomask = tuple->t_data->t_infomask;
4590 * We might already hold the desired lock (or stronger), possibly under a
4591 * different subtransaction of the current top transaction. If so, there
4592 * is no need to change state or issue a WAL record. We already handled
4593 * the case where this is true for xmax being a MultiXactId, so now check
4594 * for cases where it is a plain TransactionId.
4596 * Note in particular that this covers the case where we already hold
4597 * exclusive lock on the tuple and the caller only wants key share or
4598 * share lock. It would certainly not do to give up the exclusive lock.
4600 if (!(old_infomask & (HEAP_XMAX_INVALID |
4601 HEAP_XMAX_COMMITTED |
4602 HEAP_XMAX_IS_MULTI)) &&
4603 (mode == LockTupleKeyShare ?
4604 (HEAP_XMAX_IS_KEYSHR_LOCKED(old_infomask) ||
4605 HEAP_XMAX_IS_SHR_LOCKED(old_infomask) ||
4606 HEAP_XMAX_IS_EXCL_LOCKED(old_infomask)) :
4607 mode == LockTupleShare ?
4608 (HEAP_XMAX_IS_SHR_LOCKED(old_infomask) ||
4609 HEAP_XMAX_IS_EXCL_LOCKED(old_infomask)) :
4610 (HEAP_XMAX_IS_EXCL_LOCKED(old_infomask))) &&
4611 TransactionIdIsCurrentTransactionId(xmax))
4613 LockBuffer(*buffer, BUFFER_LOCK_UNLOCK);
4614 /* Probably can't hold tuple lock here, but may as well check */
4615 if (have_tuple_lock)
4616 UnlockTupleTuplock(relation, tid, mode);
4617 return HeapTupleMayBeUpdated;
4621 * If this is the first possibly-multixact-able operation in the current
4622 * transaction, set my per-backend OldestMemberMXactId setting. We can be
4623 * certain that the transaction will never become a member of any older
4624 * MultiXactIds than that. (We have to do this even if we end up just
4625 * using our own TransactionId below, since some other backend could
4626 * incorporate our XID into a MultiXact immediately afterwards.)
4628 MultiXactIdSetOldestMember();
4631 * Compute the new xmax and infomask to store into the tuple. Note we do
4632 * not modify the tuple just yet, because that would leave it in the wrong
4633 * state if multixact.c elogs.
4635 compute_new_xmax_infomask(xmax, old_infomask, tuple->t_data->t_infomask2,
4636 GetCurrentTransactionId(), mode, false,
4637 &xid, &new_infomask, &new_infomask2);
4639 START_CRIT_SECTION();
4642 * Store transaction information of xact locking the tuple.
4644 * Note: Cmax is meaningless in this context, so don't set it; this avoids
4645 * possibly generating a useless combo CID. Moreover, if we're locking a
4646 * previously updated tuple, it's important to preserve the Cmax.
4648 * Also reset the HOT UPDATE bit, but only if there's no update; otherwise
4649 * we would break the HOT chain.
4651 tuple->t_data->t_infomask &= ~HEAP_XMAX_BITS;
4652 tuple->t_data->t_infomask2 &= ~HEAP_KEYS_UPDATED;
4653 tuple->t_data->t_infomask |= new_infomask;
4654 tuple->t_data->t_infomask2 |= new_infomask2;
4655 if (HEAP_XMAX_IS_LOCKED_ONLY(new_infomask))
4656 HeapTupleHeaderClearHotUpdated(tuple->t_data);
4657 HeapTupleHeaderSetXmax(tuple->t_data, xid);
4660 * Make sure there is no forward chain link in t_ctid. Note that in the
4661 * cases where the tuple has been updated, we must not overwrite t_ctid,
4662 * because it was set by the updater. Moreover, if the tuple has been
4663 * updated, we need to follow the update chain to lock the new versions of
4664 * the tuple as well.
4666 if (HEAP_XMAX_IS_LOCKED_ONLY(new_infomask))
4667 tuple->t_data->t_ctid = *tid;
4669 MarkBufferDirty(*buffer);
4672 * XLOG stuff. You might think that we don't need an XLOG record because
4673 * there is no state change worth restoring after a crash. You would be
4674 * wrong however: we have just written either a TransactionId or a
4675 * MultiXactId that may never have been seen on disk before, and we need
4676 * to make sure that there are XLOG entries covering those ID numbers.
4677 * Else the same IDs might be re-used after a crash, which would be
4678 * disastrous if this page made it to disk before the crash. Essentially
4679 * we have to enforce the WAL log-before-data rule even in this case.
4680 * (Also, in a PITR log-shipping or 2PC environment, we have to have XLOG
4681 * entries for everything anyway.)
4683 if (RelationNeedsWAL(relation))
4689 XLogRegisterBuffer(0, *buffer, REGBUF_STANDARD);
4691 xlrec.offnum = ItemPointerGetOffsetNumber(&tuple->t_self);
4692 xlrec.locking_xid = xid;
4693 xlrec.infobits_set = compute_infobits(new_infomask,
4694 tuple->t_data->t_infomask2);
4695 XLogRegisterData((char *) &xlrec, SizeOfHeapLock);
4697 recptr = XLogInsert(RM_HEAP_ID, XLOG_HEAP_LOCK);
4699 PageSetLSN(page, recptr);
4704 LockBuffer(*buffer, BUFFER_LOCK_UNLOCK);
4707 * Don't update the visibility map here. Locking a tuple doesn't change
4712 * Now that we have successfully marked the tuple as locked, we can
4713 * release the lmgr tuple lock, if we had it.
4715 if (have_tuple_lock)
4716 UnlockTupleTuplock(relation, tid, mode);
4718 return HeapTupleMayBeUpdated;
4723 * Given an original set of Xmax and infomask, and a transaction (identified by
4724 * add_to_xmax) acquiring a new lock of some mode, compute the new Xmax and
4725 * corresponding infomasks to use on the tuple.
4727 * Note that this might have side effects such as creating a new MultiXactId.
4729 * Most callers will have called HeapTupleSatisfiesUpdate before this function;
4730 * that will have set the HEAP_XMAX_INVALID bit if the xmax was a MultiXactId
4731 * but it was not running anymore. There is a race condition, which is that the
4732 * MultiXactId may have finished since then, but that uncommon case is handled
4733 * either here, or within MultiXactIdExpand.
4735 * There is a similar race condition possible when the old xmax was a regular
4736 * TransactionId. We test TransactionIdIsInProgress again just to narrow the
4737 * window, but it's still possible to end up creating an unnecessary
4738 * MultiXactId. Fortunately this is harmless.
4741 compute_new_xmax_infomask(TransactionId xmax, uint16 old_infomask,
4742 uint16 old_infomask2, TransactionId add_to_xmax,
4743 LockTupleMode mode, bool is_update,
4744 TransactionId *result_xmax, uint16 *result_infomask,
4745 uint16 *result_infomask2)
4747 TransactionId new_xmax;
4748 uint16 new_infomask,
4751 Assert(TransactionIdIsCurrentTransactionId(add_to_xmax));
4756 if (old_infomask & HEAP_XMAX_INVALID)
4759 * No previous locker; we just insert our own TransactionId.
4761 * Note that it's critical that this case be the first one checked,
4762 * because there are several blocks below that come back to this one
4763 * to implement certain optimizations; old_infomask might contain
4764 * other dirty bits in those cases, but we don't really care.
4768 new_xmax = add_to_xmax;
4769 if (mode == LockTupleExclusive)
4770 new_infomask2 |= HEAP_KEYS_UPDATED;
4774 new_infomask |= HEAP_XMAX_LOCK_ONLY;
4777 case LockTupleKeyShare:
4778 new_xmax = add_to_xmax;
4779 new_infomask |= HEAP_XMAX_KEYSHR_LOCK;
4781 case LockTupleShare:
4782 new_xmax = add_to_xmax;
4783 new_infomask |= HEAP_XMAX_SHR_LOCK;
4785 case LockTupleNoKeyExclusive:
4786 new_xmax = add_to_xmax;
4787 new_infomask |= HEAP_XMAX_EXCL_LOCK;
4789 case LockTupleExclusive:
4790 new_xmax = add_to_xmax;
4791 new_infomask |= HEAP_XMAX_EXCL_LOCK;
4792 new_infomask2 |= HEAP_KEYS_UPDATED;
4795 new_xmax = InvalidTransactionId; /* silence compiler */
4796 elog(ERROR, "invalid lock mode");
4800 else if (old_infomask & HEAP_XMAX_IS_MULTI)
4802 MultiXactStatus new_status;
4805 * Currently we don't allow XMAX_COMMITTED to be set for multis, so
4808 Assert(!(old_infomask & HEAP_XMAX_COMMITTED));
4811 * A multixact together with LOCK_ONLY set but neither lock bit set
4812 * (i.e. a pg_upgraded share locked tuple) cannot possibly be running
4813 * anymore. This check is critical for databases upgraded by
4814 * pg_upgrade; both MultiXactIdIsRunning and MultiXactIdExpand assume
4815 * that such multis are never passed.
4817 if (!(old_infomask & HEAP_LOCK_MASK) &&
4818 HEAP_XMAX_IS_LOCKED_ONLY(old_infomask))
4820 old_infomask &= ~HEAP_XMAX_IS_MULTI;
4821 old_infomask |= HEAP_XMAX_INVALID;
4826 * If the XMAX is already a MultiXactId, then we need to expand it to
4827 * include add_to_xmax; but if all the members were lockers and are
4828 * all gone, we can do away with the IS_MULTI bit and just set
4829 * add_to_xmax as the only locker/updater. If all lockers are gone
4830 * and we have an updater that aborted, we can also do without a
4833 * The cost of doing GetMultiXactIdMembers would be paid by
4834 * MultiXactIdExpand if we weren't to do this, so this check is not
4835 * incurring extra work anyhow.
4837 if (!MultiXactIdIsRunning(xmax, HEAP_XMAX_IS_LOCKED_ONLY(old_infomask)))
4839 if (HEAP_XMAX_IS_LOCKED_ONLY(old_infomask) ||
4840 TransactionIdDidAbort(MultiXactIdGetUpdateXid(xmax,
4844 * Reset these bits and restart; otherwise fall through to
4845 * create a new multi below.
4847 old_infomask &= ~HEAP_XMAX_IS_MULTI;
4848 old_infomask |= HEAP_XMAX_INVALID;
4853 new_status = get_mxact_status_for_lock(mode, is_update);
4855 new_xmax = MultiXactIdExpand((MultiXactId) xmax, add_to_xmax,
4857 GetMultiXactIdHintBits(new_xmax, &new_infomask, &new_infomask2);
4859 else if (old_infomask & HEAP_XMAX_COMMITTED)
4862 * It's a committed update, so we need to preserve him as updater of
4865 MultiXactStatus status;
4866 MultiXactStatus new_status;
4868 if (old_infomask2 & HEAP_KEYS_UPDATED)
4869 status = MultiXactStatusUpdate;
4871 status = MultiXactStatusNoKeyUpdate;
4873 new_status = get_mxact_status_for_lock(mode, is_update);
4876 * since it's not running, it's obviously impossible for the old
4877 * updater to be identical to the current one, so we need not check
4878 * for that case as we do in the block above.
4880 new_xmax = MultiXactIdCreate(xmax, status, add_to_xmax, new_status);
4881 GetMultiXactIdHintBits(new_xmax, &new_infomask, &new_infomask2);
4883 else if (TransactionIdIsInProgress(xmax))
4886 * If the XMAX is a valid, in-progress TransactionId, then we need to
4887 * create a new MultiXactId that includes both the old locker or
4888 * updater and our own TransactionId.
4890 MultiXactStatus new_status;
4891 MultiXactStatus old_status;
4892 LockTupleMode old_mode;
4894 if (HEAP_XMAX_IS_LOCKED_ONLY(old_infomask))
4896 if (HEAP_XMAX_IS_KEYSHR_LOCKED(old_infomask))
4897 old_status = MultiXactStatusForKeyShare;
4898 else if (HEAP_XMAX_IS_SHR_LOCKED(old_infomask))
4899 old_status = MultiXactStatusForShare;
4900 else if (HEAP_XMAX_IS_EXCL_LOCKED(old_infomask))
4902 if (old_infomask2 & HEAP_KEYS_UPDATED)
4903 old_status = MultiXactStatusForUpdate;
4905 old_status = MultiXactStatusForNoKeyUpdate;
4910 * LOCK_ONLY can be present alone only when a page has been
4911 * upgraded by pg_upgrade. But in that case,
4912 * TransactionIdIsInProgress() should have returned false. We
4913 * assume it's no longer locked in this case.
4915 elog(WARNING, "LOCK_ONLY found for Xid in progress %u", xmax);
4916 old_infomask |= HEAP_XMAX_INVALID;
4917 old_infomask &= ~HEAP_XMAX_LOCK_ONLY;
4923 /* it's an update, but which kind? */
4924 if (old_infomask2 & HEAP_KEYS_UPDATED)
4925 old_status = MultiXactStatusUpdate;
4927 old_status = MultiXactStatusNoKeyUpdate;
4930 old_mode = TUPLOCK_from_mxstatus(old_status);
4933 * If the lock to be acquired is for the same TransactionId as the
4934 * existing lock, there's an optimization possible: consider only the
4935 * strongest of both locks as the only one present, and restart.
4937 if (xmax == add_to_xmax)
4940 * Note that it's not possible for the original tuple to be
4941 * updated: we wouldn't be here because the tuple would have been
4942 * invisible and we wouldn't try to update it. As a subtlety,
4943 * this code can also run when traversing an update chain to lock
4944 * future versions of a tuple. But we wouldn't be here either,
4945 * because the add_to_xmax would be different from the original
4948 Assert(HEAP_XMAX_IS_LOCKED_ONLY(old_infomask));
4950 /* acquire the strongest of both */
4951 if (mode < old_mode)
4953 /* mustn't touch is_update */
4955 old_infomask |= HEAP_XMAX_INVALID;
4959 /* otherwise, just fall back to creating a new multixact */
4960 new_status = get_mxact_status_for_lock(mode, is_update);
4961 new_xmax = MultiXactIdCreate(xmax, old_status,
4962 add_to_xmax, new_status);
4963 GetMultiXactIdHintBits(new_xmax, &new_infomask, &new_infomask2);
4965 else if (!HEAP_XMAX_IS_LOCKED_ONLY(old_infomask) &&
4966 TransactionIdDidCommit(xmax))
4969 * It's a committed update, so we gotta preserve him as updater of the
4972 MultiXactStatus status;
4973 MultiXactStatus new_status;
4975 if (old_infomask2 & HEAP_KEYS_UPDATED)
4976 status = MultiXactStatusUpdate;
4978 status = MultiXactStatusNoKeyUpdate;
4980 new_status = get_mxact_status_for_lock(mode, is_update);
4983 * since it's not running, it's obviously impossible for the old
4984 * updater to be identical to the current one, so we need not check
4985 * for that case as we do in the block above.
4987 new_xmax = MultiXactIdCreate(xmax, status, add_to_xmax, new_status);
4988 GetMultiXactIdHintBits(new_xmax, &new_infomask, &new_infomask2);
4993 * Can get here iff the locking/updating transaction was running when
4994 * the infomask was extracted from the tuple, but finished before
4995 * TransactionIdIsInProgress got to run. Deal with it as if there was
4996 * no locker at all in the first place.
4998 old_infomask |= HEAP_XMAX_INVALID;
5002 *result_infomask = new_infomask;
5003 *result_infomask2 = new_infomask2;
5004 *result_xmax = new_xmax;
5008 * Subroutine for heap_lock_updated_tuple_rec.
5010 * Given an hypothetical multixact status held by the transaction identified
5011 * with the given xid, does the current transaction need to wait, fail, or can
5012 * it continue if it wanted to acquire a lock of the given mode? "needwait"
5013 * is set to true if waiting is necessary; if it can continue, then
5014 * HeapTupleMayBeUpdated is returned. In case of a conflict, a different
5015 * HeapTupleSatisfiesUpdate return code is returned.
5017 * The held status is said to be hypothetical because it might correspond to a
5018 * lock held by a single Xid, i.e. not a real MultiXactId; we express it this
5019 * way for simplicity of API.
5022 test_lockmode_for_conflict(MultiXactStatus status, TransactionId xid,
5023 LockTupleMode mode, bool *needwait)
5025 MultiXactStatus wantedstatus;
5028 wantedstatus = get_mxact_status_for_lock(mode, false);
5031 * Note: we *must* check TransactionIdIsInProgress before
5032 * TransactionIdDidAbort/Commit; see comment at top of tqual.c for an
5035 if (TransactionIdIsCurrentTransactionId(xid))
5038 * Updated by our own transaction? Just return failure. This
5039 * shouldn't normally happen.
5041 return HeapTupleSelfUpdated;
5043 else if (TransactionIdIsInProgress(xid))
5046 * If the locking transaction is running, what we do depends on
5047 * whether the lock modes conflict: if they do, then we must wait for
5048 * it to finish; otherwise we can fall through to lock this tuple
5049 * version without waiting.
5051 if (DoLockModesConflict(LOCKMODE_from_mxstatus(status),
5052 LOCKMODE_from_mxstatus(wantedstatus)))
5058 * If we set needwait above, then this value doesn't matter;
5059 * otherwise, this value signals to caller that it's okay to proceed.
5061 return HeapTupleMayBeUpdated;
5063 else if (TransactionIdDidAbort(xid))
5064 return HeapTupleMayBeUpdated;
5065 else if (TransactionIdDidCommit(xid))
5068 * The other transaction committed. If it was only a locker, then the
5069 * lock is completely gone now and we can return success; but if it
5070 * was an update, then what we do depends on whether the two lock
5071 * modes conflict. If they conflict, then we must report error to
5072 * caller. But if they don't, we can fall through to allow the current
5073 * transaction to lock the tuple.
5075 * Note: the reason we worry about ISUPDATE here is because as soon as
5076 * a transaction ends, all its locks are gone and meaningless, and
5077 * thus we can ignore them; whereas its updates persist. In the
5078 * TransactionIdIsInProgress case, above, we don't need to check
5079 * because we know the lock is still "alive" and thus a conflict needs
5080 * always be checked.
5082 if (!ISUPDATE_from_mxstatus(status))
5083 return HeapTupleMayBeUpdated;
5085 if (DoLockModesConflict(LOCKMODE_from_mxstatus(status),
5086 LOCKMODE_from_mxstatus(wantedstatus)))
5088 return HeapTupleUpdated;
5090 return HeapTupleMayBeUpdated;
5093 /* Not in progress, not aborted, not committed -- must have crashed */
5094 return HeapTupleMayBeUpdated;
5099 * Recursive part of heap_lock_updated_tuple
5101 * Fetch the tuple pointed to by tid in rel, and mark it as locked by the given
5102 * xid with the given mode; if this tuple is updated, recurse to lock the new
5106 heap_lock_updated_tuple_rec(Relation rel, ItemPointer tid, TransactionId xid,
5109 ItemPointerData tupid;
5110 HeapTupleData mytup;
5112 uint16 new_infomask,
5118 TransactionId priorXmax = InvalidTransactionId;
5120 ItemPointerCopy(tid, &tupid);
5125 new_xmax = InvalidTransactionId;
5126 ItemPointerCopy(&tupid, &(mytup.t_self));
5128 if (!heap_fetch(rel, SnapshotAny, &mytup, &buf, false, NULL))
5131 * if we fail to find the updated version of the tuple, it's
5132 * because it was vacuumed/pruned away after its creator
5133 * transaction aborted. So behave as if we got to the end of the
5134 * chain, and there's no further tuple to lock: return success to
5137 return HeapTupleMayBeUpdated;
5141 CHECK_FOR_INTERRUPTS();
5142 LockBuffer(buf, BUFFER_LOCK_EXCLUSIVE);
5145 * Check the tuple XMIN against prior XMAX, if any. If we reached the
5146 * end of the chain, we're done, so return success.
5148 if (TransactionIdIsValid(priorXmax) &&
5149 !TransactionIdEquals(HeapTupleHeaderGetXmin(mytup.t_data),
5152 UnlockReleaseBuffer(buf);
5153 return HeapTupleMayBeUpdated;
5156 old_infomask = mytup.t_data->t_infomask;
5157 old_infomask2 = mytup.t_data->t_infomask2;
5158 xmax = HeapTupleHeaderGetRawXmax(mytup.t_data);
5161 * If this tuple version has been updated or locked by some concurrent
5162 * transaction(s), what we do depends on whether our lock mode
5163 * conflicts with what those other transactions hold, and also on the
5166 if (!(old_infomask & HEAP_XMAX_INVALID))
5168 TransactionId rawxmax;
5171 rawxmax = HeapTupleHeaderGetRawXmax(mytup.t_data);
5172 if (old_infomask & HEAP_XMAX_IS_MULTI)
5176 MultiXactMember *members;
5178 nmembers = GetMultiXactIdMembers(rawxmax, &members, false,
5179 HEAP_XMAX_IS_LOCKED_ONLY(old_infomask));
5180 for (i = 0; i < nmembers; i++)
5184 res = test_lockmode_for_conflict(members[i].status,
5190 LockBuffer(buf, BUFFER_LOCK_UNLOCK);
5191 XactLockTableWait(members[i].xid, rel,
5192 &mytup.t_data->t_ctid,
5197 if (res != HeapTupleMayBeUpdated)
5199 UnlockReleaseBuffer(buf);
5210 MultiXactStatus status;
5213 * For a non-multi Xmax, we first need to compute the
5214 * corresponding MultiXactStatus by using the infomask bits.
5216 if (HEAP_XMAX_IS_LOCKED_ONLY(old_infomask))
5218 if (HEAP_XMAX_IS_KEYSHR_LOCKED(old_infomask))
5219 status = MultiXactStatusForKeyShare;
5220 else if (HEAP_XMAX_IS_SHR_LOCKED(old_infomask))
5221 status = MultiXactStatusForShare;
5222 else if (HEAP_XMAX_IS_EXCL_LOCKED(old_infomask))
5224 if (old_infomask2 & HEAP_KEYS_UPDATED)
5225 status = MultiXactStatusForUpdate;
5227 status = MultiXactStatusForNoKeyUpdate;
5232 * LOCK_ONLY present alone (a pg_upgraded tuple marked
5233 * as share-locked in the old cluster) shouldn't be
5234 * seen in the middle of an update chain.
5236 elog(ERROR, "invalid lock status in tuple");
5241 /* it's an update, but which kind? */
5242 if (old_infomask2 & HEAP_KEYS_UPDATED)
5243 status = MultiXactStatusUpdate;
5245 status = MultiXactStatusNoKeyUpdate;
5248 res = test_lockmode_for_conflict(status, rawxmax, mode,
5252 LockBuffer(buf, BUFFER_LOCK_UNLOCK);
5253 XactLockTableWait(rawxmax, rel, &mytup.t_data->t_ctid,
5257 if (res != HeapTupleMayBeUpdated)
5259 UnlockReleaseBuffer(buf);
5265 /* compute the new Xmax and infomask values for the tuple ... */
5266 compute_new_xmax_infomask(xmax, old_infomask, mytup.t_data->t_infomask2,
5268 &new_xmax, &new_infomask, &new_infomask2);
5270 START_CRIT_SECTION();
5272 /* ... and set them */
5273 HeapTupleHeaderSetXmax(mytup.t_data, new_xmax);
5274 mytup.t_data->t_infomask &= ~HEAP_XMAX_BITS;
5275 mytup.t_data->t_infomask2 &= ~HEAP_KEYS_UPDATED;
5276 mytup.t_data->t_infomask |= new_infomask;
5277 mytup.t_data->t_infomask2 |= new_infomask2;
5279 MarkBufferDirty(buf);
5282 if (RelationNeedsWAL(rel))
5284 xl_heap_lock_updated xlrec;
5286 Page page = BufferGetPage(buf);
5289 XLogRegisterBuffer(0, buf, REGBUF_STANDARD);
5291 xlrec.offnum = ItemPointerGetOffsetNumber(&mytup.t_self);
5292 xlrec.xmax = new_xmax;
5293 xlrec.infobits_set = compute_infobits(new_infomask, new_infomask2);
5295 XLogRegisterData((char *) &xlrec, SizeOfHeapLockUpdated);
5297 recptr = XLogInsert(RM_HEAP2_ID, XLOG_HEAP2_LOCK_UPDATED);
5299 PageSetLSN(page, recptr);
5304 /* if we find the end of update chain, we're done. */
5305 if (mytup.t_data->t_infomask & HEAP_XMAX_INVALID ||
5306 ItemPointerEquals(&mytup.t_self, &mytup.t_data->t_ctid) ||
5307 HeapTupleHeaderIsOnlyLocked(mytup.t_data))
5309 UnlockReleaseBuffer(buf);
5310 return HeapTupleMayBeUpdated;
5313 /* tail recursion */
5314 priorXmax = HeapTupleHeaderGetUpdateXid(mytup.t_data);
5315 ItemPointerCopy(&(mytup.t_data->t_ctid), &tupid);
5316 UnlockReleaseBuffer(buf);
5321 * heap_lock_updated_tuple
5322 * Follow update chain when locking an updated tuple, acquiring locks (row
5323 * marks) on the updated versions.
5325 * The initial tuple is assumed to be already locked.
5327 * This function doesn't check visibility, it just inconditionally marks the
5328 * tuple(s) as locked. If any tuple in the updated chain is being deleted
5329 * concurrently (or updated with the key being modified), sleep until the
5330 * transaction doing it is finished.
5332 * Note that we don't acquire heavyweight tuple locks on the tuples we walk
5333 * when we have to wait for other transactions to release them, as opposed to
5334 * what heap_lock_tuple does. The reason is that having more than one
5335 * transaction walking the chain is probably uncommon enough that risk of
5336 * starvation is not likely: one of the preconditions for being here is that
5337 * the snapshot in use predates the update that created this tuple (because we
5338 * started at an earlier version of the tuple), but at the same time such a
5339 * transaction cannot be using repeatable read or serializable isolation
5340 * levels, because that would lead to a serializability failure.
5343 heap_lock_updated_tuple(Relation rel, HeapTuple tuple, ItemPointer ctid,
5344 TransactionId xid, LockTupleMode mode)
5346 if (!ItemPointerEquals(&tuple->t_self, ctid))
5349 * If this is the first possibly-multixact-able operation in the
5350 * current transaction, set my per-backend OldestMemberMXactId
5351 * setting. We can be certain that the transaction will never become a
5352 * member of any older MultiXactIds than that. (We have to do this
5353 * even if we end up just using our own TransactionId below, since
5354 * some other backend could incorporate our XID into a MultiXact
5355 * immediately afterwards.)
5357 MultiXactIdSetOldestMember();
5359 return heap_lock_updated_tuple_rec(rel, ctid, xid, mode);
5362 /* nothing to lock */
5363 return HeapTupleMayBeUpdated;
5368 * heap_inplace_update - update a tuple "in place" (ie, overwrite it)
5370 * Overwriting violates both MVCC and transactional safety, so the uses
5371 * of this function in Postgres are extremely limited. Nonetheless we
5372 * find some places to use it.
5374 * The tuple cannot change size, and therefore it's reasonable to assume
5375 * that its null bitmap (if any) doesn't change either. So we just
5376 * overwrite the data portion of the tuple without touching the null
5377 * bitmap or any of the header fields.
5379 * tuple is an in-memory tuple structure containing the data to be written
5380 * over the target tuple. Also, tuple->t_self identifies the target tuple.
5383 heap_inplace_update(Relation relation, HeapTuple tuple)
5387 OffsetNumber offnum;
5389 HeapTupleHeader htup;
5393 buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(&(tuple->t_self)));
5394 LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
5395 page = (Page) BufferGetPage(buffer);
5397 offnum = ItemPointerGetOffsetNumber(&(tuple->t_self));
5398 if (PageGetMaxOffsetNumber(page) >= offnum)
5399 lp = PageGetItemId(page, offnum);
5401 if (PageGetMaxOffsetNumber(page) < offnum || !ItemIdIsNormal(lp))
5402 elog(ERROR, "heap_inplace_update: invalid lp");
5404 htup = (HeapTupleHeader) PageGetItem(page, lp);
5406 oldlen = ItemIdGetLength(lp) - htup->t_hoff;
5407 newlen = tuple->t_len - tuple->t_data->t_hoff;
5408 if (oldlen != newlen || htup->t_hoff != tuple->t_data->t_hoff)
5409 elog(ERROR, "heap_inplace_update: wrong tuple length");
5411 /* NO EREPORT(ERROR) from here till changes are logged */
5412 START_CRIT_SECTION();
5414 memcpy((char *) htup + htup->t_hoff,
5415 (char *) tuple->t_data + tuple->t_data->t_hoff,
5418 MarkBufferDirty(buffer);
5421 if (RelationNeedsWAL(relation))
5423 xl_heap_inplace xlrec;
5426 xlrec.offnum = ItemPointerGetOffsetNumber(&tuple->t_self);
5429 XLogRegisterData((char *) &xlrec, SizeOfHeapInplace);
5431 XLogRegisterBuffer(0, buffer, REGBUF_STANDARD);
5432 XLogRegisterBufData(0, (char *) htup + htup->t_hoff, newlen);
5434 recptr = XLogInsert(RM_HEAP_ID, XLOG_HEAP_INPLACE);
5436 PageSetLSN(page, recptr);
5441 UnlockReleaseBuffer(buffer);
5444 * Send out shared cache inval if necessary. Note that because we only
5445 * pass the new version of the tuple, this mustn't be used for any
5446 * operations that could change catcache lookup keys. But we aren't
5447 * bothering with index updates either, so that's true a fortiori.
5449 if (!IsBootstrapProcessingMode())
5450 CacheInvalidateHeapTuple(relation, tuple, NULL);
5453 #define FRM_NOOP 0x0001
5454 #define FRM_INVALIDATE_XMAX 0x0002
5455 #define FRM_RETURN_IS_XID 0x0004
5456 #define FRM_RETURN_IS_MULTI 0x0008
5457 #define FRM_MARK_COMMITTED 0x0010
5461 * Determine what to do during freezing when a tuple is marked by a
5464 * NB -- this might have the side-effect of creating a new MultiXactId!
5466 * "flags" is an output value; it's used to tell caller what to do on return.
5467 * Possible flags are:
5469 * don't do anything -- keep existing Xmax
5470 * FRM_INVALIDATE_XMAX
5471 * mark Xmax as InvalidTransactionId and set XMAX_INVALID flag.
5473 * The Xid return value is a single update Xid to set as xmax.
5474 * FRM_MARK_COMMITTED
5475 * Xmax can be marked as HEAP_XMAX_COMMITTED
5476 * FRM_RETURN_IS_MULTI
5477 * The return value is a new MultiXactId to set as new Xmax.
5478 * (caller must obtain proper infomask bits using GetMultiXactIdHintBits)
5480 static TransactionId
5481 FreezeMultiXactId(MultiXactId multi, uint16 t_infomask,
5482 TransactionId cutoff_xid, MultiXactId cutoff_multi,
5485 TransactionId xid = InvalidTransactionId;
5487 MultiXactMember *members;
5491 MultiXactMember *newmembers;
5493 TransactionId update_xid;
5494 bool update_committed;
5499 /* We should only be called in Multis */
5500 Assert(t_infomask & HEAP_XMAX_IS_MULTI);
5502 if (!MultiXactIdIsValid(multi))
5504 /* Ensure infomask bits are appropriately set/reset */
5505 *flags |= FRM_INVALIDATE_XMAX;
5506 return InvalidTransactionId;
5508 else if (MultiXactIdPrecedes(multi, cutoff_multi))
5511 * This old multi cannot possibly have members still running. If it
5512 * was a locker only, it can be removed without any further
5513 * consideration; but if it contained an update, we might need to
5516 * Don't assert MultiXactIdIsRunning if the multi came from a
5517 * pg_upgrade'd share-locked tuple, though, as doing that causes an
5518 * error to be raised unnecessarily.
5520 Assert((!(t_infomask & HEAP_LOCK_MASK) &&
5521 HEAP_XMAX_IS_LOCKED_ONLY(t_infomask)) ||
5522 !MultiXactIdIsRunning(multi,
5523 HEAP_XMAX_IS_LOCKED_ONLY(t_infomask)));
5524 if (HEAP_XMAX_IS_LOCKED_ONLY(t_infomask))
5526 *flags |= FRM_INVALIDATE_XMAX;
5527 xid = InvalidTransactionId; /* not strictly necessary */
5531 /* replace multi by update xid */
5532 xid = MultiXactIdGetUpdateXid(multi, t_infomask);
5534 /* wasn't only a lock, xid needs to be valid */
5535 Assert(TransactionIdIsValid(xid));
5538 * If the xid is older than the cutoff, it has to have aborted,
5539 * otherwise the tuple would have gotten pruned away.
5541 if (TransactionIdPrecedes(xid, cutoff_xid))
5543 Assert(!TransactionIdDidCommit(xid));
5544 *flags |= FRM_INVALIDATE_XMAX;
5545 xid = InvalidTransactionId; /* not strictly necessary */
5549 *flags |= FRM_RETURN_IS_XID;
5557 * This multixact might have or might not have members still running, but
5558 * we know it's valid and is newer than the cutoff point for multis.
5559 * However, some member(s) of it may be below the cutoff for Xids, so we
5560 * need to walk the whole members array to figure out what to do, if
5564 allow_old = !(t_infomask & HEAP_LOCK_MASK) &&
5565 HEAP_XMAX_IS_LOCKED_ONLY(t_infomask);
5567 GetMultiXactIdMembers(multi, &members, allow_old,
5568 HEAP_XMAX_IS_LOCKED_ONLY(t_infomask));
5571 /* Nothing worth keeping */
5572 *flags |= FRM_INVALIDATE_XMAX;
5573 return InvalidTransactionId;
5576 /* is there anything older than the cutoff? */
5577 need_replace = false;
5578 for (i = 0; i < nmembers; i++)
5580 if (TransactionIdPrecedes(members[i].xid, cutoff_xid))
5582 need_replace = true;
5588 * In the simplest case, there is no member older than the cutoff; we can
5589 * keep the existing MultiXactId as is.
5595 return InvalidTransactionId;
5599 * If the multi needs to be updated, figure out which members do we need
5603 newmembers = palloc(sizeof(MultiXactMember) * nmembers);
5604 has_lockers = false;
5605 update_xid = InvalidTransactionId;
5606 update_committed = false;
5608 for (i = 0; i < nmembers; i++)
5611 * Determine whether to keep this member or ignore it.
5613 if (ISUPDATE_from_mxstatus(members[i].status))
5615 TransactionId xid = members[i].xid;
5618 * It's an update; should we keep it? If the transaction is known
5619 * aborted or crashed then it's okay to ignore it, otherwise not.
5620 * Note that an updater older than cutoff_xid cannot possibly be
5621 * committed, because HeapTupleSatisfiesVacuum would have returned
5622 * HEAPTUPLE_DEAD and we would not be trying to freeze the tuple.
5624 * As with all tuple visibility routines, it's critical to test
5625 * TransactionIdIsInProgress before TransactionIdDidCommit,
5626 * because of race conditions explained in detail in tqual.c.
5628 if (TransactionIdIsCurrentTransactionId(xid) ||
5629 TransactionIdIsInProgress(xid))
5631 Assert(!TransactionIdIsValid(update_xid));
5634 else if (TransactionIdDidCommit(xid))
5637 * The transaction committed, so we can tell caller to set
5638 * HEAP_XMAX_COMMITTED. (We can only do this because we know
5639 * the transaction is not running.)
5641 Assert(!TransactionIdIsValid(update_xid));
5642 update_committed = true;
5647 * Not in progress, not committed -- must be aborted or crashed;
5652 * Since the tuple wasn't marked HEAPTUPLE_DEAD by vacuum, the
5653 * update Xid cannot possibly be older than the xid cutoff.
5655 Assert(!TransactionIdIsValid(update_xid) ||
5656 !TransactionIdPrecedes(update_xid, cutoff_xid));
5659 * If we determined that it's an Xid corresponding to an update
5660 * that must be retained, additionally add it to the list of
5661 * members of the new Multi, in case we end up using that. (We
5662 * might still decide to use only an update Xid and not a multi,
5663 * but it's easier to maintain the list as we walk the old members
5666 if (TransactionIdIsValid(update_xid))
5667 newmembers[nnewmembers++] = members[i];
5671 /* We only keep lockers if they are still running */
5672 if (TransactionIdIsCurrentTransactionId(members[i].xid) ||
5673 TransactionIdIsInProgress(members[i].xid))
5675 /* running locker cannot possibly be older than the cutoff */
5676 Assert(!TransactionIdPrecedes(members[i].xid, cutoff_xid));
5677 newmembers[nnewmembers++] = members[i];
5685 if (nnewmembers == 0)
5687 /* nothing worth keeping!? Tell caller to remove the whole thing */
5688 *flags |= FRM_INVALIDATE_XMAX;
5689 xid = InvalidTransactionId;
5691 else if (TransactionIdIsValid(update_xid) && !has_lockers)
5694 * If there's a single member and it's an update, pass it back alone
5695 * without creating a new Multi. (XXX we could do this when there's a
5696 * single remaining locker, too, but that would complicate the API too
5697 * much; moreover, the case with the single updater is more
5698 * interesting, because those are longer-lived.)
5700 Assert(nnewmembers == 1);
5701 *flags |= FRM_RETURN_IS_XID;
5702 if (update_committed)
5703 *flags |= FRM_MARK_COMMITTED;
5709 * Create a new multixact with the surviving members of the previous
5710 * one, to set as new Xmax in the tuple.
5712 xid = MultiXactIdCreateFromMembers(nnewmembers, newmembers);
5713 *flags |= FRM_RETURN_IS_MULTI;
5722 * heap_prepare_freeze_tuple
5724 * Check to see whether any of the XID fields of a tuple (xmin, xmax, xvac)
5725 * are older than the specified cutoff XID and cutoff MultiXactId. If so,
5726 * setup enough state (in the *frz output argument) to later execute and
5727 * WAL-log what we would need to do, and return TRUE. Return FALSE if nothing
5730 * Caller is responsible for setting the offset field, if appropriate.
5732 * It is assumed that the caller has checked the tuple with
5733 * HeapTupleSatisfiesVacuum() and determined that it is not HEAPTUPLE_DEAD
5734 * (else we should be removing the tuple, not freezing it).
5736 * NB: cutoff_xid *must* be <= the current global xmin, to ensure that any
5737 * XID older than it could neither be running nor seen as running by any
5738 * open transaction. This ensures that the replacement will not change
5739 * anyone's idea of the tuple state.
5740 * Similarly, cutoff_multi must be less than or equal to the smallest
5741 * MultiXactId used by any transaction currently open.
5743 * If the tuple is in a shared buffer, caller must hold an exclusive lock on
5746 * NB: It is not enough to set hint bits to indicate something is
5747 * committed/invalid -- they might not be set on a standby, or after crash
5748 * recovery. We really need to remove old xids.
5751 heap_prepare_freeze_tuple(HeapTupleHeader tuple, TransactionId cutoff_xid,
5752 TransactionId cutoff_multi,
5753 xl_heap_freeze_tuple *frz)
5756 bool changed = false;
5757 bool freeze_xmax = false;
5761 frz->t_infomask2 = tuple->t_infomask2;
5762 frz->t_infomask = tuple->t_infomask;
5763 frz->xmax = HeapTupleHeaderGetRawXmax(tuple);
5766 xid = HeapTupleHeaderGetXmin(tuple);
5767 if (TransactionIdIsNormal(xid) &&
5768 TransactionIdPrecedes(xid, cutoff_xid))
5770 frz->t_infomask |= HEAP_XMIN_FROZEN;
5775 * Process xmax. To thoroughly examine the current Xmax value we need to
5776 * resolve a MultiXactId to its member Xids, in case some of them are
5777 * below the given cutoff for Xids. In that case, those values might need
5778 * freezing, too. Also, if a multi needs freezing, we cannot simply take
5779 * it out --- if there's a live updater Xid, it needs to be kept.
5781 * Make sure to keep heap_tuple_needs_freeze in sync with this.
5783 xid = HeapTupleHeaderGetRawXmax(tuple);
5785 if (tuple->t_infomask & HEAP_XMAX_IS_MULTI)
5787 TransactionId newxmax;
5790 newxmax = FreezeMultiXactId(xid, tuple->t_infomask,
5791 cutoff_xid, cutoff_multi, &flags);
5793 if (flags & FRM_INVALIDATE_XMAX)
5795 else if (flags & FRM_RETURN_IS_XID)
5798 * NB -- some of these transformations are only valid because we
5799 * know the return Xid is a tuple updater (i.e. not merely a
5800 * locker.) Also note that the only reason we don't explicitely
5801 * worry about HEAP_KEYS_UPDATED is because it lives in
5802 * t_infomask2 rather than t_infomask.
5804 frz->t_infomask &= ~HEAP_XMAX_BITS;
5805 frz->xmax = newxmax;
5806 if (flags & FRM_MARK_COMMITTED)
5807 frz->t_infomask &= HEAP_XMAX_COMMITTED;
5810 else if (flags & FRM_RETURN_IS_MULTI)
5816 * We can't use GetMultiXactIdHintBits directly on the new multi
5817 * here; that routine initializes the masks to all zeroes, which
5818 * would lose other bits we need. Doing it this way ensures all
5819 * unrelated bits remain untouched.
5821 frz->t_infomask &= ~HEAP_XMAX_BITS;
5822 frz->t_infomask2 &= ~HEAP_KEYS_UPDATED;
5823 GetMultiXactIdHintBits(newxmax, &newbits, &newbits2);
5824 frz->t_infomask |= newbits;
5825 frz->t_infomask2 |= newbits2;
5827 frz->xmax = newxmax;
5833 Assert(flags & FRM_NOOP);
5836 else if (TransactionIdIsNormal(xid) &&
5837 TransactionIdPrecedes(xid, cutoff_xid))
5844 frz->xmax = InvalidTransactionId;
5847 * The tuple might be marked either XMAX_INVALID or XMAX_COMMITTED +
5848 * LOCKED. Normalize to INVALID just to be sure no one gets confused.
5849 * Also get rid of the HEAP_KEYS_UPDATED bit.
5851 frz->t_infomask &= ~HEAP_XMAX_BITS;
5852 frz->t_infomask |= HEAP_XMAX_INVALID;
5853 frz->t_infomask2 &= ~HEAP_HOT_UPDATED;
5854 frz->t_infomask2 &= ~HEAP_KEYS_UPDATED;
5859 * Old-style VACUUM FULL is gone, but we have to keep this code as long as
5860 * we support having MOVED_OFF/MOVED_IN tuples in the database.
5862 if (tuple->t_infomask & HEAP_MOVED)
5864 xid = HeapTupleHeaderGetXvac(tuple);
5865 if (TransactionIdIsNormal(xid) &&
5866 TransactionIdPrecedes(xid, cutoff_xid))
5869 * If a MOVED_OFF tuple is not dead, the xvac transaction must
5870 * have failed; whereas a non-dead MOVED_IN tuple must mean the
5871 * xvac transaction succeeded.
5873 if (tuple->t_infomask & HEAP_MOVED_OFF)
5874 frz->frzflags |= XLH_INVALID_XVAC;
5876 frz->frzflags |= XLH_FREEZE_XVAC;
5879 * Might as well fix the hint bits too; usually XMIN_COMMITTED
5880 * will already be set here, but there's a small chance not.
5882 Assert(!(tuple->t_infomask & HEAP_XMIN_INVALID));
5883 frz->t_infomask |= HEAP_XMIN_COMMITTED;
5892 * heap_execute_freeze_tuple
5893 * Execute the prepared freezing of a tuple.
5895 * Caller is responsible for ensuring that no other backend can access the
5896 * storage underlying this tuple, either by holding an exclusive lock on the
5897 * buffer containing it (which is what lazy VACUUM does), or by having it by
5898 * in private storage (which is what CLUSTER and friends do).
5900 * Note: it might seem we could make the changes without exclusive lock, since
5901 * TransactionId read/write is assumed atomic anyway. However there is a race
5902 * condition: someone who just fetched an old XID that we overwrite here could
5903 * conceivably not finish checking the XID against pg_clog before we finish
5904 * the VACUUM and perhaps truncate off the part of pg_clog he needs. Getting
5905 * exclusive lock ensures no other backend is in process of checking the
5906 * tuple status. Also, getting exclusive lock makes it safe to adjust the
5909 * NB: All code in here must be safe to execute during crash recovery!
5912 heap_execute_freeze_tuple(HeapTupleHeader tuple, xl_heap_freeze_tuple *frz)
5914 HeapTupleHeaderSetXmax(tuple, frz->xmax);
5916 if (frz->frzflags & XLH_FREEZE_XVAC)
5917 HeapTupleHeaderSetXvac(tuple, FrozenTransactionId);
5919 if (frz->frzflags & XLH_INVALID_XVAC)
5920 HeapTupleHeaderSetXvac(tuple, InvalidTransactionId);
5922 tuple->t_infomask = frz->t_infomask;
5923 tuple->t_infomask2 = frz->t_infomask2;
5928 * Freeze tuple in place, without WAL logging.
5930 * Useful for callers like CLUSTER that perform their own WAL logging.
5933 heap_freeze_tuple(HeapTupleHeader tuple, TransactionId cutoff_xid,
5934 TransactionId cutoff_multi)
5936 xl_heap_freeze_tuple frz;
5939 do_freeze = heap_prepare_freeze_tuple(tuple, cutoff_xid, cutoff_multi,
5943 * Note that because this is not a WAL-logged operation, we don't need to
5944 * fill in the offset in the freeze record.
5948 heap_execute_freeze_tuple(tuple, &frz);
5953 * For a given MultiXactId, return the hint bits that should be set in the
5956 * Normally this should be called for a multixact that was just created, and
5957 * so is on our local cache, so the GetMembers call is fast.
5960 GetMultiXactIdHintBits(MultiXactId multi, uint16 *new_infomask,
5961 uint16 *new_infomask2)
5964 MultiXactMember *members;
5966 uint16 bits = HEAP_XMAX_IS_MULTI;
5968 bool has_update = false;
5969 LockTupleMode strongest = LockTupleKeyShare;
5972 * We only use this in multis we just created, so they cannot be values
5975 nmembers = GetMultiXactIdMembers(multi, &members, false, false);
5977 for (i = 0; i < nmembers; i++)
5982 * Remember the strongest lock mode held by any member of the
5985 mode = TUPLOCK_from_mxstatus(members[i].status);
5986 if (mode > strongest)
5989 /* See what other bits we need */
5990 switch (members[i].status)
5992 case MultiXactStatusForKeyShare:
5993 case MultiXactStatusForShare:
5994 case MultiXactStatusForNoKeyUpdate:
5997 case MultiXactStatusForUpdate:
5998 bits2 |= HEAP_KEYS_UPDATED;
6001 case MultiXactStatusNoKeyUpdate:
6005 case MultiXactStatusUpdate:
6006 bits2 |= HEAP_KEYS_UPDATED;
6012 if (strongest == LockTupleExclusive ||
6013 strongest == LockTupleNoKeyExclusive)
6014 bits |= HEAP_XMAX_EXCL_LOCK;
6015 else if (strongest == LockTupleShare)
6016 bits |= HEAP_XMAX_SHR_LOCK;
6017 else if (strongest == LockTupleKeyShare)
6018 bits |= HEAP_XMAX_KEYSHR_LOCK;
6021 bits |= HEAP_XMAX_LOCK_ONLY;
6026 *new_infomask = bits;
6027 *new_infomask2 = bits2;
6031 * MultiXactIdGetUpdateXid
6033 * Given a multixact Xmax and corresponding infomask, which does not have the
6034 * HEAP_XMAX_LOCK_ONLY bit set, obtain and return the Xid of the updating
6037 * Caller is expected to check the status of the updating transaction, if
6040 static TransactionId
6041 MultiXactIdGetUpdateXid(TransactionId xmax, uint16 t_infomask)
6043 TransactionId update_xact = InvalidTransactionId;
6044 MultiXactMember *members;
6047 Assert(!(t_infomask & HEAP_XMAX_LOCK_ONLY));
6048 Assert(t_infomask & HEAP_XMAX_IS_MULTI);
6051 * Since we know the LOCK_ONLY bit is not set, this cannot be a multi from
6054 nmembers = GetMultiXactIdMembers(xmax, &members, false, false);
6060 for (i = 0; i < nmembers; i++)
6062 /* Ignore lockers */
6063 if (!ISUPDATE_from_mxstatus(members[i].status))
6066 /* there can be at most one updater */
6067 Assert(update_xact == InvalidTransactionId);
6068 update_xact = members[i].xid;
6069 #ifndef USE_ASSERT_CHECKING
6072 * in an assert-enabled build, walk the whole array to ensure
6073 * there's no other updater.
6086 * HeapTupleGetUpdateXid
6087 * As above, but use a HeapTupleHeader
6089 * See also HeapTupleHeaderGetUpdateXid, which can be used without previously
6090 * checking the hint bits.
6093 HeapTupleGetUpdateXid(HeapTupleHeader tuple)
6095 return MultiXactIdGetUpdateXid(HeapTupleHeaderGetRawXmax(tuple),
6100 * Do_MultiXactIdWait
6101 * Actual implementation for the two functions below.
6103 * 'multi', 'status' and 'infomask' indicate what to sleep on (the status is
6104 * needed to ensure we only sleep on conflicting members, and the infomask is
6105 * used to optimize multixact access in case it's a lock-only multi); 'nowait'
6106 * indicates whether to use conditional lock acquisition, to allow callers to
6107 * fail if lock is unavailable. 'rel', 'ctid' and 'oper' are used to set up
6108 * context information for error messages. 'remaining', if not NULL, receives
6109 * the number of members that are still running, including any (non-aborted)
6110 * subtransactions of our own transaction.
6112 * We do this by sleeping on each member using XactLockTableWait. Any
6113 * members that belong to the current backend are *not* waited for, however;
6114 * this would not merely be useless but would lead to Assert failure inside
6115 * XactLockTableWait. By the time this returns, it is certain that all
6116 * transactions *of other backends* that were members of the MultiXactId
6117 * that conflict with the requested status are dead (and no new ones can have
6118 * been added, since it is not legal to add members to an existing
6121 * But by the time we finish sleeping, someone else may have changed the Xmax
6122 * of the containing tuple, so the caller needs to iterate on us somehow.
6124 * Note that in case we return false, the number of remaining members is
6125 * not to be trusted.
6128 Do_MultiXactIdWait(MultiXactId multi, MultiXactStatus status,
6129 uint16 infomask, bool nowait,
6130 Relation rel, ItemPointer ctid, XLTW_Oper oper,
6135 MultiXactMember *members;
6139 allow_old = !(infomask & HEAP_LOCK_MASK) && HEAP_XMAX_IS_LOCKED_ONLY(infomask);
6140 nmembers = GetMultiXactIdMembers(multi, &members, allow_old,
6141 HEAP_XMAX_IS_LOCKED_ONLY(infomask));
6147 for (i = 0; i < nmembers; i++)
6149 TransactionId memxid = members[i].xid;
6150 MultiXactStatus memstatus = members[i].status;
6152 if (TransactionIdIsCurrentTransactionId(memxid))
6158 if (!DoLockModesConflict(LOCKMODE_from_mxstatus(memstatus),
6159 LOCKMODE_from_mxstatus(status)))
6161 if (remaining && TransactionIdIsInProgress(memxid))
6167 * This member conflicts with our multi, so we have to sleep (or
6168 * return failure, if asked to avoid waiting.)
6170 * Note that we don't set up an error context callback ourselves,
6171 * but instead we pass the info down to XactLockTableWait. This
6172 * might seem a bit wasteful because the context is set up and
6173 * tore down for each member of the multixact, but in reality it
6174 * should be barely noticeable, and it avoids duplicate code.
6178 result = ConditionalXactLockTableWait(memxid);
6183 XactLockTableWait(memxid, rel, ctid, oper);
6190 *remaining = remain;
6197 * Sleep on a MultiXactId.
6199 * By the time we finish sleeping, someone else may have changed the Xmax
6200 * of the containing tuple, so the caller needs to iterate on us somehow.
6202 * We return (in *remaining, if not NULL) the number of members that are still
6203 * running, including any (non-aborted) subtransactions of our own transaction.
6206 MultiXactIdWait(MultiXactId multi, MultiXactStatus status, uint16 infomask,
6207 Relation rel, ItemPointer ctid, XLTW_Oper oper,
6210 (void) Do_MultiXactIdWait(multi, status, infomask, false,
6211 rel, ctid, oper, remaining);
6215 * ConditionalMultiXactIdWait
6216 * As above, but only lock if we can get the lock without blocking.
6218 * By the time we finish sleeping, someone else may have changed the Xmax
6219 * of the containing tuple, so the caller needs to iterate on us somehow.
6221 * If the multixact is now all gone, return true. Returns false if some
6222 * transactions might still be running.
6224 * We return (in *remaining, if not NULL) the number of members that are still
6225 * running, including any (non-aborted) subtransactions of our own transaction.
6228 ConditionalMultiXactIdWait(MultiXactId multi, MultiXactStatus status,
6229 uint16 infomask, Relation rel, int *remaining)
6231 return Do_MultiXactIdWait(multi, status, infomask, true,
6232 rel, NULL, XLTW_None, remaining);
6236 * heap_tuple_needs_freeze
6238 * Check to see whether any of the XID fields of a tuple (xmin, xmax, xvac)
6239 * are older than the specified cutoff XID or MultiXactId. If so, return TRUE.
6241 * It doesn't matter whether the tuple is alive or dead, we are checking
6242 * to see if a tuple needs to be removed or frozen to avoid wraparound.
6244 * NB: Cannot rely on hint bits here, they might not be set after a crash or
6248 heap_tuple_needs_freeze(HeapTupleHeader tuple, TransactionId cutoff_xid,
6249 MultiXactId cutoff_multi, Buffer buf)
6253 xid = HeapTupleHeaderGetXmin(tuple);
6254 if (TransactionIdIsNormal(xid) &&
6255 TransactionIdPrecedes(xid, cutoff_xid))
6259 * The considerations for multixacts are complicated; look at
6260 * heap_freeze_tuple for justifications. This routine had better be in
6261 * sync with that one!
6263 if (tuple->t_infomask & HEAP_XMAX_IS_MULTI)
6267 multi = HeapTupleHeaderGetRawXmax(tuple);
6268 if (!MultiXactIdIsValid(multi))
6270 /* no xmax set, ignore */
6273 else if (MultiXactIdPrecedes(multi, cutoff_multi))
6277 MultiXactMember *members;
6282 /* need to check whether any member of the mxact is too old */
6284 allow_old = !(tuple->t_infomask & HEAP_LOCK_MASK) &&
6285 HEAP_XMAX_IS_LOCKED_ONLY(tuple->t_infomask);
6286 nmembers = GetMultiXactIdMembers(multi, &members, allow_old,
6287 HEAP_XMAX_IS_LOCKED_ONLY(tuple->t_infomask));
6289 for (i = 0; i < nmembers; i++)
6291 if (TransactionIdPrecedes(members[i].xid, cutoff_xid))
6303 xid = HeapTupleHeaderGetRawXmax(tuple);
6304 if (TransactionIdIsNormal(xid) &&
6305 TransactionIdPrecedes(xid, cutoff_xid))
6309 if (tuple->t_infomask & HEAP_MOVED)
6311 xid = HeapTupleHeaderGetXvac(tuple);
6312 if (TransactionIdIsNormal(xid) &&
6313 TransactionIdPrecedes(xid, cutoff_xid))
6321 * heap_markpos - mark scan position
6325 heap_markpos(HeapScanDesc scan)
6327 /* Note: no locking manipulations needed */
6329 if (scan->rs_ctup.t_data != NULL)
6331 scan->rs_mctid = scan->rs_ctup.t_self;
6332 if (scan->rs_pageatatime)
6333 scan->rs_mindex = scan->rs_cindex;
6336 ItemPointerSetInvalid(&scan->rs_mctid);
6340 * heap_restrpos - restore position to marked location
6344 heap_restrpos(HeapScanDesc scan)
6346 /* XXX no amrestrpos checking that ammarkpos called */
6348 if (!ItemPointerIsValid(&scan->rs_mctid))
6350 scan->rs_ctup.t_data = NULL;
6353 * unpin scan buffers
6355 if (BufferIsValid(scan->rs_cbuf))
6356 ReleaseBuffer(scan->rs_cbuf);
6357 scan->rs_cbuf = InvalidBuffer;
6358 scan->rs_cblock = InvalidBlockNumber;
6359 scan->rs_inited = false;
6364 * If we reached end of scan, rs_inited will now be false. We must
6365 * reset it to true to keep heapgettup from doing the wrong thing.
6367 scan->rs_inited = true;
6368 scan->rs_ctup.t_self = scan->rs_mctid;
6369 if (scan->rs_pageatatime)
6371 scan->rs_cindex = scan->rs_mindex;
6372 heapgettup_pagemode(scan,
6373 NoMovementScanDirection,
6374 0, /* needn't recheck scan keys */
6379 NoMovementScanDirection,
6380 0, /* needn't recheck scan keys */
6386 * If 'tuple' contains any visible XID greater than latestRemovedXid,
6387 * ratchet forwards latestRemovedXid to the greatest one found.
6388 * This is used as the basis for generating Hot Standby conflicts, so
6389 * if a tuple was never visible then removing it should not conflict
6393 HeapTupleHeaderAdvanceLatestRemovedXid(HeapTupleHeader tuple,
6394 TransactionId *latestRemovedXid)
6396 TransactionId xmin = HeapTupleHeaderGetXmin(tuple);
6397 TransactionId xmax = HeapTupleHeaderGetUpdateXid(tuple);
6398 TransactionId xvac = HeapTupleHeaderGetXvac(tuple);
6400 if (tuple->t_infomask & HEAP_MOVED)
6402 if (TransactionIdPrecedes(*latestRemovedXid, xvac))
6403 *latestRemovedXid = xvac;
6407 * Ignore tuples inserted by an aborted transaction or if the tuple was
6408 * updated/deleted by the inserting transaction.
6410 * Look for a committed hint bit, or if no xmin bit is set, check clog.
6411 * This needs to work on both master and standby, where it is used to
6412 * assess btree delete records.
6414 if (HeapTupleHeaderXminCommitted(tuple) ||
6415 (!HeapTupleHeaderXminInvalid(tuple) && TransactionIdDidCommit(xmin)))
6418 TransactionIdFollows(xmax, *latestRemovedXid))
6419 *latestRemovedXid = xmax;
6422 /* *latestRemovedXid may still be invalid at end */
6426 * Perform XLogInsert to register a heap cleanup info message. These
6427 * messages are sent once per VACUUM and are required because
6428 * of the phasing of removal operations during a lazy VACUUM.
6429 * see comments for vacuum_log_cleanup_info().
6432 log_heap_cleanup_info(RelFileNode rnode, TransactionId latestRemovedXid)
6434 xl_heap_cleanup_info xlrec;
6438 xlrec.latestRemovedXid = latestRemovedXid;
6441 XLogRegisterData((char *) &xlrec, SizeOfHeapCleanupInfo);
6443 recptr = XLogInsert(RM_HEAP2_ID, XLOG_HEAP2_CLEANUP_INFO);
6449 * Perform XLogInsert for a heap-clean operation. Caller must already
6450 * have modified the buffer and marked it dirty.
6452 * Note: prior to Postgres 8.3, the entries in the nowunused[] array were
6453 * zero-based tuple indexes. Now they are one-based like other uses
6456 * We also include latestRemovedXid, which is the greatest XID present in
6457 * the removed tuples. That allows recovery processing to cancel or wait
6458 * for long standby queries that can still see these tuples.
6461 log_heap_clean(Relation reln, Buffer buffer,
6462 OffsetNumber *redirected, int nredirected,
6463 OffsetNumber *nowdead, int ndead,
6464 OffsetNumber *nowunused, int nunused,
6465 TransactionId latestRemovedXid)
6467 xl_heap_clean xlrec;
6470 /* Caller should not call me on a non-WAL-logged relation */
6471 Assert(RelationNeedsWAL(reln));
6473 xlrec.latestRemovedXid = latestRemovedXid;
6474 xlrec.nredirected = nredirected;
6475 xlrec.ndead = ndead;
6478 XLogRegisterData((char *) &xlrec, SizeOfHeapClean);
6480 XLogRegisterBuffer(0, buffer, REGBUF_STANDARD);
6483 * The OffsetNumber arrays are not actually in the buffer, but we pretend
6484 * that they are. When XLogInsert stores the whole buffer, the offset
6485 * arrays need not be stored too. Note that even if all three arrays are
6486 * empty, we want to expose the buffer as a candidate for whole-page
6487 * storage, since this record type implies a defragmentation operation
6488 * even if no item pointers changed state.
6490 if (nredirected > 0)
6491 XLogRegisterBufData(0, (char *) redirected,
6492 nredirected * sizeof(OffsetNumber) * 2);
6495 XLogRegisterBufData(0, (char *) nowdead,
6496 ndead * sizeof(OffsetNumber));
6499 XLogRegisterBufData(0, (char *) nowunused,
6500 nunused * sizeof(OffsetNumber));
6502 recptr = XLogInsert(RM_HEAP2_ID, XLOG_HEAP2_CLEAN);
6508 * Perform XLogInsert for a heap-freeze operation. Caller must have already
6509 * modified the buffer and marked it dirty.
6512 log_heap_freeze(Relation reln, Buffer buffer, TransactionId cutoff_xid,
6513 xl_heap_freeze_tuple *tuples, int ntuples)
6515 xl_heap_freeze_page xlrec;
6518 /* Caller should not call me on a non-WAL-logged relation */
6519 Assert(RelationNeedsWAL(reln));
6520 /* nor when there are no tuples to freeze */
6521 Assert(ntuples > 0);
6523 xlrec.cutoff_xid = cutoff_xid;
6524 xlrec.ntuples = ntuples;
6527 XLogRegisterData((char *) &xlrec, SizeOfHeapFreezePage);
6530 * The freeze plan array is not actually in the buffer, but pretend that
6531 * it is. When XLogInsert stores the whole buffer, the freeze plan need
6532 * not be stored too.
6534 XLogRegisterBuffer(0, buffer, REGBUF_STANDARD);
6535 XLogRegisterBufData(0, (char *) tuples,
6536 ntuples * sizeof(xl_heap_freeze_tuple));
6538 recptr = XLogInsert(RM_HEAP2_ID, XLOG_HEAP2_FREEZE_PAGE);
6544 * Perform XLogInsert for a heap-visible operation. 'block' is the block
6545 * being marked all-visible, and vm_buffer is the buffer containing the
6546 * corresponding visibility map block. Both should have already been modified
6549 * If checksums are enabled, we also generate a full-page image of
6550 * heap_buffer, if necessary.
6553 log_heap_visible(RelFileNode rnode, Buffer heap_buffer, Buffer vm_buffer,
6554 TransactionId cutoff_xid)
6556 xl_heap_visible xlrec;
6560 Assert(BufferIsValid(heap_buffer));
6561 Assert(BufferIsValid(vm_buffer));
6563 xlrec.cutoff_xid = cutoff_xid;
6565 XLogRegisterData((char *) &xlrec, SizeOfHeapVisible);
6567 XLogRegisterBuffer(0, vm_buffer, 0);
6569 flags = REGBUF_STANDARD;
6570 if (!XLogHintBitIsNeeded())
6571 flags |= REGBUF_NO_IMAGE;
6572 XLogRegisterBuffer(1, heap_buffer, flags);
6574 recptr = XLogInsert(RM_HEAP2_ID, XLOG_HEAP2_VISIBLE);
6580 * Perform XLogInsert for a heap-update operation. Caller must already
6581 * have modified the buffer(s) and marked them dirty.
6584 log_heap_update(Relation reln, Buffer oldbuf,
6585 Buffer newbuf, HeapTuple oldtup, HeapTuple newtup,
6586 HeapTuple old_key_tuple,
6587 bool all_visible_cleared, bool new_all_visible_cleared)
6589 xl_heap_update xlrec;
6590 xl_heap_header xlhdr;
6591 xl_heap_header xlhdr_idx;
6593 uint16 prefix_suffix[2];
6594 uint16 prefixlen = 0,
6597 Page page = BufferGetPage(newbuf);
6598 bool need_tuple_data = RelationIsLogicallyLogged(reln);
6602 /* Caller should not call me on a non-WAL-logged relation */
6603 Assert(RelationNeedsWAL(reln));
6607 if (HeapTupleIsHeapOnly(newtup))
6608 info = XLOG_HEAP_HOT_UPDATE;
6610 info = XLOG_HEAP_UPDATE;
6613 * If the old and new tuple are on the same page, we only need to log the
6614 * parts of the new tuple that were changed. That saves on the amount of
6615 * WAL we need to write. Currently, we just count any unchanged bytes in
6616 * the beginning and end of the tuple. That's quick to check, and
6617 * perfectly covers the common case that only one field is updated.
6619 * We could do this even if the old and new tuple are on different pages,
6620 * but only if we don't make a full-page image of the old page, which is
6621 * difficult to know in advance. Also, if the old tuple is corrupt for
6622 * some reason, it would allow the corruption to propagate the new page,
6623 * so it seems best to avoid. Under the general assumption that most
6624 * updates tend to create the new tuple version on the same page, there
6625 * isn't much to be gained by doing this across pages anyway.
6627 * Skip this if we're taking a full-page image of the new page, as we
6628 * don't include the new tuple in the WAL record in that case. Also
6629 * disable if wal_level='logical', as logical decoding needs to be able to
6630 * read the new tuple in whole from the WAL record alone.
6632 if (oldbuf == newbuf && !need_tuple_data &&
6633 !XLogCheckBufferNeedsBackup(newbuf))
6635 char *oldp = (char *) oldtup->t_data + oldtup->t_data->t_hoff;
6636 char *newp = (char *) newtup->t_data + newtup->t_data->t_hoff;
6637 int oldlen = oldtup->t_len - oldtup->t_data->t_hoff;
6638 int newlen = newtup->t_len - newtup->t_data->t_hoff;
6640 /* Check for common prefix between old and new tuple */
6641 for (prefixlen = 0; prefixlen < Min(oldlen, newlen); prefixlen++)
6643 if (newp[prefixlen] != oldp[prefixlen])
6648 * Storing the length of the prefix takes 2 bytes, so we need to save
6649 * at least 3 bytes or there's no point.
6654 /* Same for suffix */
6655 for (suffixlen = 0; suffixlen < Min(oldlen, newlen) - prefixlen; suffixlen++)
6657 if (newp[newlen - suffixlen - 1] != oldp[oldlen - suffixlen - 1])
6664 /* Prepare main WAL data chain */
6666 if (all_visible_cleared)
6667 xlrec.flags |= XLOG_HEAP_ALL_VISIBLE_CLEARED;
6668 if (new_all_visible_cleared)
6669 xlrec.flags |= XLOG_HEAP_NEW_ALL_VISIBLE_CLEARED;
6671 xlrec.flags |= XLOG_HEAP_PREFIX_FROM_OLD;
6673 xlrec.flags |= XLOG_HEAP_SUFFIX_FROM_OLD;
6674 if (need_tuple_data)
6676 xlrec.flags |= XLOG_HEAP_CONTAINS_NEW_TUPLE;
6679 if (reln->rd_rel->relreplident == REPLICA_IDENTITY_FULL)
6680 xlrec.flags |= XLOG_HEAP_CONTAINS_OLD_TUPLE;
6682 xlrec.flags |= XLOG_HEAP_CONTAINS_OLD_KEY;
6686 /* If new tuple is the single and first tuple on page... */
6687 if (ItemPointerGetOffsetNumber(&(newtup->t_self)) == FirstOffsetNumber &&
6688 PageGetMaxOffsetNumber(page) == FirstOffsetNumber)
6690 info |= XLOG_HEAP_INIT_PAGE;
6696 /* Prepare WAL data for the old page */
6697 xlrec.old_offnum = ItemPointerGetOffsetNumber(&oldtup->t_self);
6698 xlrec.old_xmax = HeapTupleHeaderGetRawXmax(oldtup->t_data);
6699 xlrec.old_infobits_set = compute_infobits(oldtup->t_data->t_infomask,
6700 oldtup->t_data->t_infomask2);
6702 /* Prepare WAL data for the new page */
6703 xlrec.new_offnum = ItemPointerGetOffsetNumber(&newtup->t_self);
6704 xlrec.new_xmax = HeapTupleHeaderGetRawXmax(newtup->t_data);
6706 bufflags = REGBUF_STANDARD;
6708 bufflags |= REGBUF_WILL_INIT;
6709 if (need_tuple_data)
6710 bufflags |= REGBUF_KEEP_DATA;
6712 XLogRegisterBuffer(0, newbuf, bufflags);
6713 if (oldbuf != newbuf)
6714 XLogRegisterBuffer(1, oldbuf, REGBUF_STANDARD);
6716 XLogRegisterData((char *) &xlrec, SizeOfHeapUpdate);
6719 * Prepare WAL data for the new tuple.
6721 if (prefixlen > 0 || suffixlen > 0)
6723 if (prefixlen > 0 && suffixlen > 0)
6725 prefix_suffix[0] = prefixlen;
6726 prefix_suffix[1] = suffixlen;
6727 XLogRegisterBufData(0, (char *) &prefix_suffix, sizeof(uint16) * 2);
6729 else if (prefixlen > 0)
6731 XLogRegisterBufData(0, (char *) &prefixlen, sizeof(uint16));
6735 XLogRegisterBufData(0, (char *) &suffixlen, sizeof(uint16));
6739 xlhdr.t_infomask2 = newtup->t_data->t_infomask2;
6740 xlhdr.t_infomask = newtup->t_data->t_infomask;
6741 xlhdr.t_hoff = newtup->t_data->t_hoff;
6742 Assert(offsetof(HeapTupleHeaderData, t_bits) + prefixlen + suffixlen <= newtup->t_len);
6745 * PG73FORMAT: write bitmap [+ padding] [+ oid] + data
6747 * The 'data' doesn't include the common prefix or suffix.
6749 XLogRegisterBufData(0, (char *) &xlhdr, SizeOfHeapHeader);
6752 XLogRegisterBufData(0,
6753 ((char *) newtup->t_data) + offsetof(HeapTupleHeaderData, t_bits),
6754 newtup->t_len - offsetof(HeapTupleHeaderData, t_bits) -suffixlen);
6759 * Have to write the null bitmap and data after the common prefix as
6760 * two separate rdata entries.
6762 /* bitmap [+ padding] [+ oid] */
6763 if (newtup->t_data->t_hoff - offsetof(HeapTupleHeaderData, t_bits) >0)
6765 XLogRegisterBufData(0,
6766 ((char *) newtup->t_data) + offsetof(HeapTupleHeaderData, t_bits),
6767 newtup->t_data->t_hoff - offsetof(HeapTupleHeaderData, t_bits));
6770 /* data after common prefix */
6771 XLogRegisterBufData(0,
6772 ((char *) newtup->t_data) + newtup->t_data->t_hoff + prefixlen,
6773 newtup->t_len - newtup->t_data->t_hoff - prefixlen - suffixlen);
6776 /* We need to log a tuple identity */
6777 if (need_tuple_data && old_key_tuple)
6779 /* don't really need this, but its more comfy to decode */
6780 xlhdr_idx.t_infomask2 = old_key_tuple->t_data->t_infomask2;
6781 xlhdr_idx.t_infomask = old_key_tuple->t_data->t_infomask;
6782 xlhdr_idx.t_hoff = old_key_tuple->t_data->t_hoff;
6784 XLogRegisterData((char *) &xlhdr_idx, SizeOfHeapHeader);
6786 /* PG73FORMAT: write bitmap [+ padding] [+ oid] + data */
6787 XLogRegisterData((char *) old_key_tuple->t_data + offsetof(HeapTupleHeaderData, t_bits),
6788 old_key_tuple->t_len - offsetof(HeapTupleHeaderData, t_bits));
6791 recptr = XLogInsert(RM_HEAP_ID, info);
6797 * Perform XLogInsert of a XLOG_HEAP2_NEW_CID record
6799 * This is only used in wal_level >= WAL_LEVEL_LOGICAL, and only for catalog
6803 log_heap_new_cid(Relation relation, HeapTuple tup)
6805 xl_heap_new_cid xlrec;
6808 HeapTupleHeader hdr = tup->t_data;
6810 Assert(ItemPointerIsValid(&tup->t_self));
6811 Assert(tup->t_tableOid != InvalidOid);
6813 xlrec.top_xid = GetTopTransactionId();
6814 xlrec.target_node = relation->rd_node;
6815 xlrec.target_tid = tup->t_self;
6818 * If the tuple got inserted & deleted in the same TX we definitely have a
6819 * combocid, set cmin and cmax.
6821 if (hdr->t_infomask & HEAP_COMBOCID)
6823 Assert(!(hdr->t_infomask & HEAP_XMAX_INVALID));
6824 Assert(!HeapTupleHeaderXminInvalid(hdr));
6825 xlrec.cmin = HeapTupleHeaderGetCmin(hdr);
6826 xlrec.cmax = HeapTupleHeaderGetCmax(hdr);
6827 xlrec.combocid = HeapTupleHeaderGetRawCommandId(hdr);
6829 /* No combocid, so only cmin or cmax can be set by this TX */
6835 * We need to check for LOCK ONLY because multixacts might be
6836 * transferred to the new tuple in case of FOR KEY SHARE updates in
6837 * which case there will be a xmax, although the tuple just got
6840 if (hdr->t_infomask & HEAP_XMAX_INVALID ||
6841 HEAP_XMAX_IS_LOCKED_ONLY(hdr->t_infomask))
6843 xlrec.cmin = HeapTupleHeaderGetRawCommandId(hdr);
6844 xlrec.cmax = InvalidCommandId;
6846 /* Tuple from a different tx updated or deleted. */
6849 xlrec.cmin = InvalidCommandId;
6850 xlrec.cmax = HeapTupleHeaderGetRawCommandId(hdr);
6853 xlrec.combocid = InvalidCommandId;
6857 * Note that we don't need to register the buffer here, because this
6858 * operation does not modify the page. The insert/update/delete that
6859 * called us certainly did, but that's WAL-logged separately.
6862 XLogRegisterData((char *) &xlrec, SizeOfHeapNewCid);
6864 recptr = XLogInsert(RM_HEAP2_ID, XLOG_HEAP2_NEW_CID);
6870 * Build a heap tuple representing the configured REPLICA IDENTITY to represent
6871 * the old tuple in a UPDATE or DELETE.
6873 * Returns NULL if there's no need to log a identity or if there's no suitable
6874 * key in the Relation relation.
6877 ExtractReplicaIdentity(Relation relation, HeapTuple tp, bool key_changed, bool *copy)
6879 TupleDesc desc = RelationGetDescr(relation);
6883 char replident = relation->rd_rel->relreplident;
6884 HeapTuple key_tuple = NULL;
6885 bool nulls[MaxHeapAttributeNumber];
6886 Datum values[MaxHeapAttributeNumber];
6891 if (!RelationIsLogicallyLogged(relation))
6894 if (replident == REPLICA_IDENTITY_NOTHING)
6897 if (replident == REPLICA_IDENTITY_FULL)
6900 * When logging the entire old tuple, it very well could contain
6901 * toasted columns. If so, force them to be inlined.
6903 if (HeapTupleHasExternal(tp))
6906 tp = toast_flatten_tuple(tp, RelationGetDescr(relation));
6911 /* if the key hasn't changed and we're only logging the key, we're done */
6915 /* find the replica identity index */
6916 replidindex = RelationGetReplicaIndex(relation);
6917 if (!OidIsValid(replidindex))
6919 elog(DEBUG4, "could not find configured replica identity for table \"%s\"",
6920 RelationGetRelationName(relation));
6924 idx_rel = RelationIdGetRelation(replidindex);
6925 idx_desc = RelationGetDescr(idx_rel);
6927 /* deform tuple, so we have fast access to columns */
6928 heap_deform_tuple(tp, desc, values, nulls);
6930 /* set all columns to NULL, regardless of whether they actually are */
6931 memset(nulls, 1, sizeof(nulls));
6934 * Now set all columns contained in the index to NOT NULL, they cannot
6935 * currently be NULL.
6937 for (natt = 0; natt < idx_desc->natts; natt++)
6939 int attno = idx_rel->rd_index->indkey.values[natt];
6944 * The OID column can appear in an index definition, but that's
6945 * OK, becuse we always copy the OID if present (see below). Other
6946 * system columns may not.
6948 if (attno == ObjectIdAttributeNumber)
6950 elog(ERROR, "system column in index");
6952 nulls[attno - 1] = false;
6955 key_tuple = heap_form_tuple(desc, values, nulls);
6957 RelationClose(idx_rel);
6960 * Always copy oids if the table has them, even if not included in the
6961 * index. The space in the logged tuple is used anyway, so there's little
6962 * point in not including the information.
6964 if (relation->rd_rel->relhasoids)
6965 HeapTupleSetOid(key_tuple, HeapTupleGetOid(tp));
6968 * If the tuple, which by here only contains indexed columns, still has
6969 * toasted columns, force them to be inlined. This is somewhat unlikely
6970 * since there's limits on the size of indexed columns, so we don't
6971 * duplicate toast_flatten_tuple()s functionality in the above loop over
6972 * the indexed columns, even if it would be more efficient.
6974 if (HeapTupleHasExternal(key_tuple))
6976 HeapTuple oldtup = key_tuple;
6978 key_tuple = toast_flatten_tuple(oldtup, RelationGetDescr(relation));
6979 heap_freetuple(oldtup);
6986 * Handles CLEANUP_INFO
6989 heap_xlog_cleanup_info(XLogReaderState *record)
6991 xl_heap_cleanup_info *xlrec = (xl_heap_cleanup_info *) XLogRecGetData(record);
6994 ResolveRecoveryConflictWithSnapshot(xlrec->latestRemovedXid, xlrec->node);
6997 * Actual operation is a no-op. Record type exists to provide a means for
6998 * conflict processing to occur before we begin index vacuum actions. see
6999 * vacuumlazy.c and also comments in btvacuumpage()
7002 /* Backup blocks are not used in cleanup_info records */
7003 Assert(!XLogRecHasAnyBlockRefs(record));
7007 * Handles HEAP2_CLEAN record type
7010 heap_xlog_clean(XLogReaderState *record)
7012 XLogRecPtr lsn = record->EndRecPtr;
7013 xl_heap_clean *xlrec = (xl_heap_clean *) XLogRecGetData(record);
7018 XLogRedoAction action;
7020 XLogRecGetBlockTag(record, 0, &rnode, NULL, &blkno);
7023 * We're about to remove tuples. In Hot Standby mode, ensure that there's
7024 * no queries running for which the removed tuples are still visible.
7026 * Not all HEAP2_CLEAN records remove tuples with xids, so we only want to
7027 * conflict on the records that cause MVCC failures for user queries. If
7028 * latestRemovedXid is invalid, skip conflict processing.
7030 if (InHotStandby && TransactionIdIsValid(xlrec->latestRemovedXid))
7031 ResolveRecoveryConflictWithSnapshot(xlrec->latestRemovedXid, rnode);
7034 * If we have a full-page image, restore it (using a cleanup lock) and
7037 action = XLogReadBufferForRedoExtended(record, 0, RBM_NORMAL, true,
7039 if (action == BLK_NEEDS_REDO)
7041 Page page = (Page) BufferGetPage(buffer);
7043 OffsetNumber *redirected;
7044 OffsetNumber *nowdead;
7045 OffsetNumber *nowunused;
7051 redirected = (OffsetNumber *) XLogRecGetBlockData(record, 0, &datalen);
7053 nredirected = xlrec->nredirected;
7054 ndead = xlrec->ndead;
7055 end = (OffsetNumber *) ((char *) redirected + datalen);
7056 nowdead = redirected + (nredirected * 2);
7057 nowunused = nowdead + ndead;
7058 nunused = (end - nowunused);
7059 Assert(nunused >= 0);
7061 /* Update all item pointers per the record, and repair fragmentation */
7062 heap_page_prune_execute(buffer,
7063 redirected, nredirected,
7065 nowunused, nunused);
7067 freespace = PageGetHeapFreeSpace(page); /* needed to update FSM below */
7070 * Note: we don't worry about updating the page's prunability hints.
7071 * At worst this will cause an extra prune cycle to occur soon.
7074 PageSetLSN(page, lsn);
7075 MarkBufferDirty(buffer);
7077 if (BufferIsValid(buffer))
7078 UnlockReleaseBuffer(buffer);
7081 * Update the FSM as well.
7083 * XXX: Don't do this if the page was restored from full page image. We
7084 * don't bother to update the FSM in that case, it doesn't need to be
7085 * totally accurate anyway.
7087 if (action == BLK_NEEDS_REDO)
7088 XLogRecordPageWithFreeSpace(rnode, blkno, freespace);
7092 * Replay XLOG_HEAP2_VISIBLE record.
7094 * The critical integrity requirement here is that we must never end up with
7095 * a situation where the visibility map bit is set, and the page-level
7096 * PD_ALL_VISIBLE bit is clear. If that were to occur, then a subsequent
7097 * page modification would fail to clear the visibility map bit.
7100 heap_xlog_visible(XLogReaderState *record)
7102 XLogRecPtr lsn = record->EndRecPtr;
7103 xl_heap_visible *xlrec = (xl_heap_visible *) XLogRecGetData(record);
7104 Buffer vmbuffer = InvalidBuffer;
7109 XLogRedoAction action;
7111 XLogRecGetBlockTag(record, 1, &rnode, NULL, &blkno);
7114 * If there are any Hot Standby transactions running that have an xmin
7115 * horizon old enough that this page isn't all-visible for them, they
7116 * might incorrectly decide that an index-only scan can skip a heap fetch.
7118 * NB: It might be better to throw some kind of "soft" conflict here that
7119 * forces any index-only scan that is in flight to perform heap fetches,
7120 * rather than killing the transaction outright.
7123 ResolveRecoveryConflictWithSnapshot(xlrec->cutoff_xid, rnode);
7126 * Read the heap page, if it still exists. If the heap file has dropped or
7127 * truncated later in recovery, we don't need to update the page, but we'd
7128 * better still update the visibility map.
7130 action = XLogReadBufferForRedo(record, 1, &buffer);
7131 if (action == BLK_NEEDS_REDO)
7134 * We don't bump the LSN of the heap page when setting the visibility
7135 * map bit (unless checksums are enabled, in which case we must),
7136 * because that would generate an unworkable volume of full-page
7137 * writes. This exposes us to torn page hazards, but since we're not
7138 * inspecting the existing page contents in any way, we don't care.
7140 * However, all operations that clear the visibility map bit *do* bump
7141 * the LSN, and those operations will only be replayed if the XLOG LSN
7142 * follows the page LSN. Thus, if the page LSN has advanced past our
7143 * XLOG record's LSN, we mustn't mark the page all-visible, because
7144 * the subsequent update won't be replayed to clear the flag.
7146 page = BufferGetPage(buffer);
7147 PageSetAllVisible(page);
7148 MarkBufferDirty(buffer);
7150 else if (action == BLK_RESTORED)
7153 * If heap block was backed up, restore it. This can only happen with
7154 * checksums enabled.
7156 Assert(DataChecksumsEnabled());
7158 if (BufferIsValid(buffer))
7159 UnlockReleaseBuffer(buffer);
7162 * Even if we skipped the heap page update due to the LSN interlock, it's
7163 * still safe to update the visibility map. Any WAL record that clears
7164 * the visibility map bit does so before checking the page LSN, so any
7165 * bits that need to be cleared will still be cleared.
7167 if (XLogReadBufferForRedoExtended(record, 0, RBM_ZERO_ON_ERROR, false,
7168 &vmbuffer) == BLK_NEEDS_REDO)
7170 Page vmpage = BufferGetPage(vmbuffer);
7173 /* initialize the page if it was read as zeros */
7174 if (PageIsNew(vmpage))
7175 PageInit(vmpage, BLCKSZ, 0);
7178 * XLogReplayBufferExtended locked the buffer. But visibilitymap_set
7179 * will handle locking itself.
7181 LockBuffer(vmbuffer, BUFFER_LOCK_UNLOCK);
7183 reln = CreateFakeRelcacheEntry(rnode);
7184 visibilitymap_pin(reln, blkno, &vmbuffer);
7187 * Don't set the bit if replay has already passed this point.
7189 * It might be safe to do this unconditionally; if replay has passed
7190 * this point, we'll replay at least as far this time as we did
7191 * before, and if this bit needs to be cleared, the record responsible
7192 * for doing so should be again replayed, and clear it. For right
7193 * now, out of an abundance of conservatism, we use the same test here
7194 * we did for the heap page. If this results in a dropped bit, no
7195 * real harm is done; and the next VACUUM will fix it.
7197 if (lsn > PageGetLSN(vmpage))
7198 visibilitymap_set(reln, blkno, InvalidBuffer, lsn, vmbuffer,
7201 ReleaseBuffer(vmbuffer);
7202 FreeFakeRelcacheEntry(reln);
7204 else if (BufferIsValid(vmbuffer))
7205 UnlockReleaseBuffer(vmbuffer);
7209 * Replay XLOG_HEAP2_FREEZE_PAGE records
7212 heap_xlog_freeze_page(XLogReaderState *record)
7214 XLogRecPtr lsn = record->EndRecPtr;
7215 xl_heap_freeze_page *xlrec = (xl_heap_freeze_page *) XLogRecGetData(record);
7216 TransactionId cutoff_xid = xlrec->cutoff_xid;
7221 * In Hot Standby mode, ensure that there's no queries running which still
7222 * consider the frozen xids as running.
7228 XLogRecGetBlockTag(record, 0, &rnode, NULL, NULL);
7229 ResolveRecoveryConflictWithSnapshot(cutoff_xid, rnode);
7232 if (XLogReadBufferForRedo(record, 0, &buffer) == BLK_NEEDS_REDO)
7234 Page page = BufferGetPage(buffer);
7235 xl_heap_freeze_tuple *tuples;
7237 tuples = (xl_heap_freeze_tuple *) XLogRecGetBlockData(record, 0, NULL);
7239 /* now execute freeze plan for each frozen tuple */
7240 for (ntup = 0; ntup < xlrec->ntuples; ntup++)
7242 xl_heap_freeze_tuple *xlrec_tp;
7244 HeapTupleHeader tuple;
7246 xlrec_tp = &tuples[ntup];
7247 lp = PageGetItemId(page, xlrec_tp->offset); /* offsets are one-based */
7248 tuple = (HeapTupleHeader) PageGetItem(page, lp);
7250 heap_execute_freeze_tuple(tuple, xlrec_tp);
7253 PageSetLSN(page, lsn);
7254 MarkBufferDirty(buffer);
7256 if (BufferIsValid(buffer))
7257 UnlockReleaseBuffer(buffer);
7261 * Given an "infobits" field from an XLog record, set the correct bits in the
7262 * given infomask and infomask2 for the tuple touched by the record.
7264 * (This is the reverse of compute_infobits).
7267 fix_infomask_from_infobits(uint8 infobits, uint16 *infomask, uint16 *infomask2)
7269 *infomask &= ~(HEAP_XMAX_IS_MULTI | HEAP_XMAX_LOCK_ONLY |
7270 HEAP_XMAX_KEYSHR_LOCK | HEAP_XMAX_EXCL_LOCK);
7271 *infomask2 &= ~HEAP_KEYS_UPDATED;
7273 if (infobits & XLHL_XMAX_IS_MULTI)
7274 *infomask |= HEAP_XMAX_IS_MULTI;
7275 if (infobits & XLHL_XMAX_LOCK_ONLY)
7276 *infomask |= HEAP_XMAX_LOCK_ONLY;
7277 if (infobits & XLHL_XMAX_EXCL_LOCK)
7278 *infomask |= HEAP_XMAX_EXCL_LOCK;
7279 /* note HEAP_XMAX_SHR_LOCK isn't considered here */
7280 if (infobits & XLHL_XMAX_KEYSHR_LOCK)
7281 *infomask |= HEAP_XMAX_KEYSHR_LOCK;
7283 if (infobits & XLHL_KEYS_UPDATED)
7284 *infomask2 |= HEAP_KEYS_UPDATED;
7288 heap_xlog_delete(XLogReaderState *record)
7290 XLogRecPtr lsn = record->EndRecPtr;
7291 xl_heap_delete *xlrec = (xl_heap_delete *) XLogRecGetData(record);
7295 HeapTupleHeader htup;
7297 RelFileNode target_node;
7298 ItemPointerData target_tid;
7300 XLogRecGetBlockTag(record, 0, &target_node, NULL, &blkno);
7301 ItemPointerSetBlockNumber(&target_tid, blkno);
7302 ItemPointerSetOffsetNumber(&target_tid, xlrec->offnum);
7305 * The visibility map may need to be fixed even if the heap page is
7306 * already up-to-date.
7308 if (xlrec->flags & XLOG_HEAP_ALL_VISIBLE_CLEARED)
7310 Relation reln = CreateFakeRelcacheEntry(target_node);
7311 Buffer vmbuffer = InvalidBuffer;
7313 visibilitymap_pin(reln, blkno, &vmbuffer);
7314 visibilitymap_clear(reln, blkno, vmbuffer);
7315 ReleaseBuffer(vmbuffer);
7316 FreeFakeRelcacheEntry(reln);
7319 if (XLogReadBufferForRedo(record, 0, &buffer) == BLK_NEEDS_REDO)
7321 page = BufferGetPage(buffer);
7323 if (PageGetMaxOffsetNumber(page) >= xlrec->offnum)
7324 lp = PageGetItemId(page, xlrec->offnum);
7326 if (PageGetMaxOffsetNumber(page) < xlrec->offnum || !ItemIdIsNormal(lp))
7327 elog(PANIC, "heap_delete_redo: invalid lp");
7329 htup = (HeapTupleHeader) PageGetItem(page, lp);
7331 htup->t_infomask &= ~(HEAP_XMAX_BITS | HEAP_MOVED);
7332 htup->t_infomask2 &= ~HEAP_KEYS_UPDATED;
7333 HeapTupleHeaderClearHotUpdated(htup);
7334 fix_infomask_from_infobits(xlrec->infobits_set,
7335 &htup->t_infomask, &htup->t_infomask2);
7336 HeapTupleHeaderSetXmax(htup, xlrec->xmax);
7337 HeapTupleHeaderSetCmax(htup, FirstCommandId, false);
7339 /* Mark the page as a candidate for pruning */
7340 PageSetPrunable(page, XLogRecGetXid(record));
7342 if (xlrec->flags & XLOG_HEAP_ALL_VISIBLE_CLEARED)
7343 PageClearAllVisible(page);
7345 /* Make sure there is no forward chain link in t_ctid */
7346 htup->t_ctid = target_tid;
7347 PageSetLSN(page, lsn);
7348 MarkBufferDirty(buffer);
7350 if (BufferIsValid(buffer))
7351 UnlockReleaseBuffer(buffer);
7355 heap_xlog_insert(XLogReaderState *record)
7357 XLogRecPtr lsn = record->EndRecPtr;
7358 xl_heap_insert *xlrec = (xl_heap_insert *) XLogRecGetData(record);
7363 HeapTupleHeaderData hdr;
7364 char data[MaxHeapTupleSize];
7366 HeapTupleHeader htup;
7367 xl_heap_header xlhdr;
7370 RelFileNode target_node;
7372 ItemPointerData target_tid;
7373 XLogRedoAction action;
7375 XLogRecGetBlockTag(record, 0, &target_node, NULL, &blkno);
7376 ItemPointerSetBlockNumber(&target_tid, blkno);
7377 ItemPointerSetOffsetNumber(&target_tid, xlrec->offnum);
7380 * The visibility map may need to be fixed even if the heap page is
7381 * already up-to-date.
7383 if (xlrec->flags & XLOG_HEAP_ALL_VISIBLE_CLEARED)
7385 Relation reln = CreateFakeRelcacheEntry(target_node);
7386 Buffer vmbuffer = InvalidBuffer;
7388 visibilitymap_pin(reln, blkno, &vmbuffer);
7389 visibilitymap_clear(reln, blkno, vmbuffer);
7390 ReleaseBuffer(vmbuffer);
7391 FreeFakeRelcacheEntry(reln);
7395 * If we inserted the first and only tuple on the page, re-initialize the
7396 * page from scratch.
7398 if (XLogRecGetInfo(record) & XLOG_HEAP_INIT_PAGE)
7400 buffer = XLogInitBufferForRedo(record, 0);
7401 page = BufferGetPage(buffer);
7402 PageInit(page, BufferGetPageSize(buffer), 0);
7403 action = BLK_NEEDS_REDO;
7406 action = XLogReadBufferForRedo(record, 0, &buffer);
7407 if (action == BLK_NEEDS_REDO)
7412 page = BufferGetPage(buffer);
7414 if (PageGetMaxOffsetNumber(page) + 1 < xlrec->offnum)
7415 elog(PANIC, "heap_insert_redo: invalid max offset number");
7417 data = XLogRecGetBlockData(record, 0, &datalen);
7419 newlen = datalen - SizeOfHeapHeader;
7420 Assert(datalen > SizeOfHeapHeader && newlen <= MaxHeapTupleSize);
7421 memcpy((char *) &xlhdr, data, SizeOfHeapHeader);
7422 data += SizeOfHeapHeader;
7425 MemSet((char *) htup, 0, sizeof(HeapTupleHeaderData));
7426 /* PG73FORMAT: get bitmap [+ padding] [+ oid] + data */
7427 memcpy((char *) htup + offsetof(HeapTupleHeaderData, t_bits),
7430 newlen += offsetof(HeapTupleHeaderData, t_bits);
7431 htup->t_infomask2 = xlhdr.t_infomask2;
7432 htup->t_infomask = xlhdr.t_infomask;
7433 htup->t_hoff = xlhdr.t_hoff;
7434 HeapTupleHeaderSetXmin(htup, XLogRecGetXid(record));
7435 HeapTupleHeaderSetCmin(htup, FirstCommandId);
7436 htup->t_ctid = target_tid;
7438 if (PageAddItem(page, (Item) htup, newlen, xlrec->offnum,
7439 true, true) == InvalidOffsetNumber)
7440 elog(PANIC, "heap_insert_redo: failed to add tuple");
7442 freespace = PageGetHeapFreeSpace(page); /* needed to update FSM below */
7444 PageSetLSN(page, lsn);
7446 if (xlrec->flags & XLOG_HEAP_ALL_VISIBLE_CLEARED)
7447 PageClearAllVisible(page);
7449 MarkBufferDirty(buffer);
7451 if (BufferIsValid(buffer))
7452 UnlockReleaseBuffer(buffer);
7455 * If the page is running low on free space, update the FSM as well.
7456 * Arbitrarily, our definition of "low" is less than 20%. We can't do much
7457 * better than that without knowing the fill-factor for the table.
7459 * XXX: Don't do this if the page was restored from full page image. We
7460 * don't bother to update the FSM in that case, it doesn't need to be
7461 * totally accurate anyway.
7463 if (action == BLK_NEEDS_REDO && freespace < BLCKSZ / 5)
7464 XLogRecordPageWithFreeSpace(target_node, blkno, freespace);
7468 * Handles MULTI_INSERT record type.
7471 heap_xlog_multi_insert(XLogReaderState *record)
7473 XLogRecPtr lsn = record->EndRecPtr;
7474 xl_heap_multi_insert *xlrec;
7481 HeapTupleHeaderData hdr;
7482 char data[MaxHeapTupleSize];
7484 HeapTupleHeader htup;
7488 bool isinit = (XLogRecGetInfo(record) & XLOG_HEAP_INIT_PAGE) != 0;
7489 XLogRedoAction action;
7492 * Insertion doesn't overwrite MVCC data, so no conflict processing is
7495 xlrec = (xl_heap_multi_insert *) XLogRecGetData(record);
7497 XLogRecGetBlockTag(record, 0, &rnode, NULL, &blkno);
7500 * The visibility map may need to be fixed even if the heap page is
7501 * already up-to-date.
7503 if (xlrec->flags & XLOG_HEAP_ALL_VISIBLE_CLEARED)
7505 Relation reln = CreateFakeRelcacheEntry(rnode);
7506 Buffer vmbuffer = InvalidBuffer;
7508 visibilitymap_pin(reln, blkno, &vmbuffer);
7509 visibilitymap_clear(reln, blkno, vmbuffer);
7510 ReleaseBuffer(vmbuffer);
7511 FreeFakeRelcacheEntry(reln);
7516 buffer = XLogInitBufferForRedo(record, 0);
7517 page = BufferGetPage(buffer);
7518 PageInit(page, BufferGetPageSize(buffer), 0);
7519 action = BLK_NEEDS_REDO;
7522 action = XLogReadBufferForRedo(record, 0, &buffer);
7523 if (action == BLK_NEEDS_REDO)
7529 /* Tuples are stored as block data */
7530 tupdata = XLogRecGetBlockData(record, 0, &len);
7531 endptr = tupdata + len;
7533 page = (Page) BufferGetPage(buffer);
7535 for (i = 0; i < xlrec->ntuples; i++)
7537 OffsetNumber offnum;
7538 xl_multi_insert_tuple *xlhdr;
7541 * If we're reinitializing the page, the tuples are stored in
7542 * order from FirstOffsetNumber. Otherwise there's an array of
7543 * offsets in the WAL record, and the tuples come after that.
7546 offnum = FirstOffsetNumber + i;
7548 offnum = xlrec->offsets[i];
7549 if (PageGetMaxOffsetNumber(page) + 1 < offnum)
7550 elog(PANIC, "heap_multi_insert_redo: invalid max offset number");
7552 xlhdr = (xl_multi_insert_tuple *) SHORTALIGN(tupdata);
7553 tupdata = ((char *) xlhdr) + SizeOfMultiInsertTuple;
7555 newlen = xlhdr->datalen;
7556 Assert(newlen <= MaxHeapTupleSize);
7558 MemSet((char *) htup, 0, sizeof(HeapTupleHeaderData));
7559 /* PG73FORMAT: get bitmap [+ padding] [+ oid] + data */
7560 memcpy((char *) htup + offsetof(HeapTupleHeaderData, t_bits),
7565 newlen += offsetof(HeapTupleHeaderData, t_bits);
7566 htup->t_infomask2 = xlhdr->t_infomask2;
7567 htup->t_infomask = xlhdr->t_infomask;
7568 htup->t_hoff = xlhdr->t_hoff;
7569 HeapTupleHeaderSetXmin(htup, XLogRecGetXid(record));
7570 HeapTupleHeaderSetCmin(htup, FirstCommandId);
7571 ItemPointerSetBlockNumber(&htup->t_ctid, blkno);
7572 ItemPointerSetOffsetNumber(&htup->t_ctid, offnum);
7574 offnum = PageAddItem(page, (Item) htup, newlen, offnum, true, true);
7575 if (offnum == InvalidOffsetNumber)
7576 elog(PANIC, "heap_multi_insert_redo: failed to add tuple");
7578 if (tupdata != endptr)
7579 elog(PANIC, "heap_multi_insert_redo: total tuple length mismatch");
7581 freespace = PageGetHeapFreeSpace(page); /* needed to update FSM below */
7583 PageSetLSN(page, lsn);
7585 if (xlrec->flags & XLOG_HEAP_ALL_VISIBLE_CLEARED)
7586 PageClearAllVisible(page);
7588 MarkBufferDirty(buffer);
7590 if (BufferIsValid(buffer))
7591 UnlockReleaseBuffer(buffer);
7594 * If the page is running low on free space, update the FSM as well.
7595 * Arbitrarily, our definition of "low" is less than 20%. We can't do much
7596 * better than that without knowing the fill-factor for the table.
7598 * XXX: Don't do this if the page was restored from full page image. We
7599 * don't bother to update the FSM in that case, it doesn't need to be
7600 * totally accurate anyway.
7602 if (action == BLK_NEEDS_REDO && freespace < BLCKSZ / 5)
7603 XLogRecordPageWithFreeSpace(rnode, blkno, freespace);
7607 * Handles UPDATE and HOT_UPDATE
7610 heap_xlog_update(XLogReaderState *record, bool hot_update)
7612 XLogRecPtr lsn = record->EndRecPtr;
7613 xl_heap_update *xlrec = (xl_heap_update *) XLogRecGetData(record);
7617 ItemPointerData newtid;
7621 OffsetNumber offnum;
7623 HeapTupleData oldtup;
7624 HeapTupleHeader htup;
7625 uint16 prefixlen = 0,
7630 HeapTupleHeaderData hdr;
7631 char data[MaxHeapTupleSize];
7633 xl_heap_header xlhdr;
7636 XLogRedoAction oldaction;
7637 XLogRedoAction newaction;
7639 /* initialize to keep the compiler quiet */
7640 oldtup.t_data = NULL;
7643 XLogRecGetBlockTag(record, 0, &rnode, NULL, &newblk);
7644 if (XLogRecGetBlockTag(record, 1, NULL, NULL, &oldblk))
7646 /* HOT updates are never done across pages */
7647 Assert(!hot_update);
7652 ItemPointerSet(&newtid, newblk, xlrec->new_offnum);
7655 * The visibility map may need to be fixed even if the heap page is
7656 * already up-to-date.
7658 if (xlrec->flags & XLOG_HEAP_ALL_VISIBLE_CLEARED)
7660 Relation reln = CreateFakeRelcacheEntry(rnode);
7661 Buffer vmbuffer = InvalidBuffer;
7663 visibilitymap_pin(reln, oldblk, &vmbuffer);
7664 visibilitymap_clear(reln, oldblk, vmbuffer);
7665 ReleaseBuffer(vmbuffer);
7666 FreeFakeRelcacheEntry(reln);
7670 * In normal operation, it is important to lock the two pages in
7671 * page-number order, to avoid possible deadlocks against other update
7672 * operations going the other way. However, during WAL replay there can
7673 * be no other update happening, so we don't need to worry about that. But
7674 * we *do* need to worry that we don't expose an inconsistent state to Hot
7675 * Standby queries --- so the original page can't be unlocked before we've
7676 * added the new tuple to the new page.
7679 /* Deal with old tuple version */
7680 oldaction = XLogReadBufferForRedo(record, (oldblk == newblk) ? 0 : 1,
7682 if (oldaction == BLK_NEEDS_REDO)
7684 page = BufferGetPage(obuffer);
7685 offnum = xlrec->old_offnum;
7686 if (PageGetMaxOffsetNumber(page) >= offnum)
7687 lp = PageGetItemId(page, offnum);
7689 if (PageGetMaxOffsetNumber(page) < offnum || !ItemIdIsNormal(lp))
7690 elog(PANIC, "heap_update_redo: invalid lp");
7692 htup = (HeapTupleHeader) PageGetItem(page, lp);
7694 oldtup.t_data = htup;
7695 oldtup.t_len = ItemIdGetLength(lp);
7697 htup->t_infomask &= ~(HEAP_XMAX_BITS | HEAP_MOVED);
7698 htup->t_infomask2 &= ~HEAP_KEYS_UPDATED;
7700 HeapTupleHeaderSetHotUpdated(htup);
7702 HeapTupleHeaderClearHotUpdated(htup);
7703 fix_infomask_from_infobits(xlrec->old_infobits_set, &htup->t_infomask,
7704 &htup->t_infomask2);
7705 HeapTupleHeaderSetXmax(htup, xlrec->old_xmax);
7706 HeapTupleHeaderSetCmax(htup, FirstCommandId, false);
7707 /* Set forward chain link in t_ctid */
7708 htup->t_ctid = newtid;
7710 /* Mark the page as a candidate for pruning */
7711 PageSetPrunable(page, XLogRecGetXid(record));
7713 if (xlrec->flags & XLOG_HEAP_ALL_VISIBLE_CLEARED)
7714 PageClearAllVisible(page);
7716 PageSetLSN(page, lsn);
7717 MarkBufferDirty(obuffer);
7721 * Read the page the new tuple goes into, if different from old.
7723 if (oldblk == newblk)
7726 newaction = oldaction;
7728 else if (XLogRecGetInfo(record) & XLOG_HEAP_INIT_PAGE)
7730 nbuffer = XLogInitBufferForRedo(record, 0);
7731 page = (Page) BufferGetPage(nbuffer);
7732 PageInit(page, BufferGetPageSize(nbuffer), 0);
7733 newaction = BLK_NEEDS_REDO;
7736 newaction = XLogReadBufferForRedo(record, 0, &nbuffer);
7739 * The visibility map may need to be fixed even if the heap page is
7740 * already up-to-date.
7742 if (xlrec->flags & XLOG_HEAP_NEW_ALL_VISIBLE_CLEARED)
7744 Relation reln = CreateFakeRelcacheEntry(rnode);
7745 Buffer vmbuffer = InvalidBuffer;
7747 visibilitymap_pin(reln, newblk, &vmbuffer);
7748 visibilitymap_clear(reln, newblk, vmbuffer);
7749 ReleaseBuffer(vmbuffer);
7750 FreeFakeRelcacheEntry(reln);
7753 /* Deal with new tuple */
7754 if (newaction == BLK_NEEDS_REDO)
7761 recdata = XLogRecGetBlockData(record, 0, &datalen);
7762 recdata_end = recdata + datalen;
7764 page = BufferGetPage(nbuffer);
7766 offnum = xlrec->new_offnum;
7767 if (PageGetMaxOffsetNumber(page) + 1 < offnum)
7768 elog(PANIC, "heap_update_redo: invalid max offset number");
7770 if (xlrec->flags & XLOG_HEAP_PREFIX_FROM_OLD)
7772 Assert(newblk == oldblk);
7773 memcpy(&prefixlen, recdata, sizeof(uint16));
7774 recdata += sizeof(uint16);
7776 if (xlrec->flags & XLOG_HEAP_SUFFIX_FROM_OLD)
7778 Assert(newblk == oldblk);
7779 memcpy(&suffixlen, recdata, sizeof(uint16));
7780 recdata += sizeof(uint16);
7783 memcpy((char *) &xlhdr, recdata, SizeOfHeapHeader);
7784 recdata += SizeOfHeapHeader;
7786 tuplen = recdata_end - recdata;
7787 Assert(tuplen <= MaxHeapTupleSize);
7790 MemSet((char *) htup, 0, sizeof(HeapTupleHeaderData));
7793 * Reconstruct the new tuple using the prefix and/or suffix from the
7794 * old tuple, and the data stored in the WAL record.
7796 newp = (char *) htup + offsetof(HeapTupleHeaderData, t_bits);
7801 /* copy bitmap [+ padding] [+ oid] from WAL record */
7802 len = xlhdr.t_hoff - offsetof(HeapTupleHeaderData, t_bits);
7803 memcpy(newp, recdata, len);
7807 /* copy prefix from old tuple */
7808 memcpy(newp, (char *) oldtup.t_data + oldtup.t_data->t_hoff, prefixlen);
7811 /* copy new tuple data from WAL record */
7812 len = tuplen - (xlhdr.t_hoff - offsetof(HeapTupleHeaderData, t_bits));
7813 memcpy(newp, recdata, len);
7820 * copy bitmap [+ padding] [+ oid] + data from record, all in one
7823 memcpy(newp, recdata, tuplen);
7827 Assert(recdata == recdata_end);
7829 /* copy suffix from old tuple */
7831 memcpy(newp, (char *) oldtup.t_data + oldtup.t_len - suffixlen, suffixlen);
7833 newlen = offsetof(HeapTupleHeaderData, t_bits) + tuplen + prefixlen + suffixlen;
7834 htup->t_infomask2 = xlhdr.t_infomask2;
7835 htup->t_infomask = xlhdr.t_infomask;
7836 htup->t_hoff = xlhdr.t_hoff;
7838 HeapTupleHeaderSetXmin(htup, XLogRecGetXid(record));
7839 HeapTupleHeaderSetCmin(htup, FirstCommandId);
7840 HeapTupleHeaderSetXmax(htup, xlrec->new_xmax);
7841 /* Make sure there is no forward chain link in t_ctid */
7842 htup->t_ctid = newtid;
7844 offnum = PageAddItem(page, (Item) htup, newlen, offnum, true, true);
7845 if (offnum == InvalidOffsetNumber)
7846 elog(PANIC, "heap_update_redo: failed to add tuple");
7848 if (xlrec->flags & XLOG_HEAP_NEW_ALL_VISIBLE_CLEARED)
7849 PageClearAllVisible(page);
7851 freespace = PageGetHeapFreeSpace(page); /* needed to update FSM below */
7853 PageSetLSN(page, lsn);
7854 MarkBufferDirty(nbuffer);
7857 if (BufferIsValid(nbuffer) && nbuffer != obuffer)
7858 UnlockReleaseBuffer(nbuffer);
7859 if (BufferIsValid(obuffer))
7860 UnlockReleaseBuffer(obuffer);
7863 * If the new page is running low on free space, update the FSM as well.
7864 * Arbitrarily, our definition of "low" is less than 20%. We can't do much
7865 * better than that without knowing the fill-factor for the table.
7867 * However, don't update the FSM on HOT updates, because after crash
7868 * recovery, either the old or the new tuple will certainly be dead and
7869 * prunable. After pruning, the page will have roughly as much free space
7870 * as it did before the update, assuming the new tuple is about the same
7871 * size as the old one.
7873 * XXX: Don't do this if the page was restored from full page image. We
7874 * don't bother to update the FSM in that case, it doesn't need to be
7875 * totally accurate anyway.
7877 if (newaction == BLK_NEEDS_REDO && !hot_update && freespace < BLCKSZ / 5)
7878 XLogRecordPageWithFreeSpace(rnode, newblk, freespace);
7882 heap_xlog_lock(XLogReaderState *record)
7884 XLogRecPtr lsn = record->EndRecPtr;
7885 xl_heap_lock *xlrec = (xl_heap_lock *) XLogRecGetData(record);
7888 OffsetNumber offnum;
7890 HeapTupleHeader htup;
7892 if (XLogReadBufferForRedo(record, 0, &buffer) == BLK_NEEDS_REDO)
7894 page = (Page) BufferGetPage(buffer);
7896 offnum = xlrec->offnum;
7897 if (PageGetMaxOffsetNumber(page) >= offnum)
7898 lp = PageGetItemId(page, offnum);
7900 if (PageGetMaxOffsetNumber(page) < offnum || !ItemIdIsNormal(lp))
7901 elog(PANIC, "heap_lock_redo: invalid lp");
7903 htup = (HeapTupleHeader) PageGetItem(page, lp);
7905 fix_infomask_from_infobits(xlrec->infobits_set, &htup->t_infomask,
7906 &htup->t_infomask2);
7909 * Clear relevant update flags, but only if the modified infomask says
7910 * there's no update.
7912 if (HEAP_XMAX_IS_LOCKED_ONLY(htup->t_infomask))
7914 HeapTupleHeaderClearHotUpdated(htup);
7915 /* Make sure there is no forward chain link in t_ctid */
7916 ItemPointerSet(&htup->t_ctid,
7917 BufferGetBlockNumber(buffer),
7920 HeapTupleHeaderSetXmax(htup, xlrec->locking_xid);
7921 HeapTupleHeaderSetCmax(htup, FirstCommandId, false);
7922 PageSetLSN(page, lsn);
7923 MarkBufferDirty(buffer);
7925 if (BufferIsValid(buffer))
7926 UnlockReleaseBuffer(buffer);
7930 heap_xlog_lock_updated(XLogReaderState *record)
7932 XLogRecPtr lsn = record->EndRecPtr;
7933 xl_heap_lock_updated *xlrec;
7936 OffsetNumber offnum;
7938 HeapTupleHeader htup;
7940 xlrec = (xl_heap_lock_updated *) XLogRecGetData(record);
7942 if (XLogReadBufferForRedo(record, 0, &buffer) == BLK_NEEDS_REDO)
7944 page = BufferGetPage(buffer);
7946 offnum = xlrec->offnum;
7947 if (PageGetMaxOffsetNumber(page) >= offnum)
7948 lp = PageGetItemId(page, offnum);
7950 if (PageGetMaxOffsetNumber(page) < offnum || !ItemIdIsNormal(lp))
7951 elog(PANIC, "heap_xlog_lock_updated: invalid lp");
7953 htup = (HeapTupleHeader) PageGetItem(page, lp);
7955 fix_infomask_from_infobits(xlrec->infobits_set, &htup->t_infomask,
7956 &htup->t_infomask2);
7957 HeapTupleHeaderSetXmax(htup, xlrec->xmax);
7959 PageSetLSN(page, lsn);
7960 MarkBufferDirty(buffer);
7962 if (BufferIsValid(buffer))
7963 UnlockReleaseBuffer(buffer);
7967 heap_xlog_inplace(XLogReaderState *record)
7969 XLogRecPtr lsn = record->EndRecPtr;
7970 xl_heap_inplace *xlrec = (xl_heap_inplace *) XLogRecGetData(record);
7973 OffsetNumber offnum;
7975 HeapTupleHeader htup;
7979 if (XLogReadBufferForRedo(record, 0, &buffer) == BLK_NEEDS_REDO)
7981 char *newtup = XLogRecGetBlockData(record, 0, &newlen);
7983 page = BufferGetPage(buffer);
7985 offnum = xlrec->offnum;
7986 if (PageGetMaxOffsetNumber(page) >= offnum)
7987 lp = PageGetItemId(page, offnum);
7989 if (PageGetMaxOffsetNumber(page) < offnum || !ItemIdIsNormal(lp))
7990 elog(PANIC, "heap_inplace_redo: invalid lp");
7992 htup = (HeapTupleHeader) PageGetItem(page, lp);
7994 oldlen = ItemIdGetLength(lp) - htup->t_hoff;
7995 if (oldlen != newlen)
7996 elog(PANIC, "heap_inplace_redo: wrong tuple length");
7998 memcpy((char *) htup + htup->t_hoff, newtup, newlen);
8000 PageSetLSN(page, lsn);
8001 MarkBufferDirty(buffer);
8003 if (BufferIsValid(buffer))
8004 UnlockReleaseBuffer(buffer);
8008 heap_redo(XLogReaderState *record)
8010 uint8 info = XLogRecGetInfo(record) & ~XLR_INFO_MASK;
8013 * These operations don't overwrite MVCC data so no conflict processing is
8014 * required. The ones in heap2 rmgr do.
8017 switch (info & XLOG_HEAP_OPMASK)
8019 case XLOG_HEAP_INSERT:
8020 heap_xlog_insert(record);
8022 case XLOG_HEAP_DELETE:
8023 heap_xlog_delete(record);
8025 case XLOG_HEAP_UPDATE:
8026 heap_xlog_update(record, false);
8028 case XLOG_HEAP_HOT_UPDATE:
8029 heap_xlog_update(record, true);
8031 case XLOG_HEAP_LOCK:
8032 heap_xlog_lock(record);
8034 case XLOG_HEAP_INPLACE:
8035 heap_xlog_inplace(record);
8038 elog(PANIC, "heap_redo: unknown op code %u", info);
8043 heap2_redo(XLogReaderState *record)
8045 uint8 info = XLogRecGetInfo(record) & ~XLR_INFO_MASK;
8047 switch (info & XLOG_HEAP_OPMASK)
8049 case XLOG_HEAP2_CLEAN:
8050 heap_xlog_clean(record);
8052 case XLOG_HEAP2_FREEZE_PAGE:
8053 heap_xlog_freeze_page(record);
8055 case XLOG_HEAP2_CLEANUP_INFO:
8056 heap_xlog_cleanup_info(record);
8058 case XLOG_HEAP2_VISIBLE:
8059 heap_xlog_visible(record);
8061 case XLOG_HEAP2_MULTI_INSERT:
8062 heap_xlog_multi_insert(record);
8064 case XLOG_HEAP2_LOCK_UPDATED:
8065 heap_xlog_lock_updated(record);
8067 case XLOG_HEAP2_NEW_CID:
8070 * Nothing to do on a real replay, only used during logical
8074 case XLOG_HEAP2_REWRITE:
8075 heap_xlog_logical_rewrite(record);
8078 elog(PANIC, "heap2_redo: unknown op code %u", info);
8083 * heap_sync - sync a heap, for use when no WAL has been written
8085 * This forces the heap contents (including TOAST heap if any) down to disk.
8086 * If we skipped using WAL, and WAL is otherwise needed, we must force the
8087 * relation down to disk before it's safe to commit the transaction. This
8088 * requires writing out any dirty buffers and then doing a forced fsync.
8090 * Indexes are not touched. (Currently, index operations associated with
8091 * the commands that use this are WAL-logged and so do not need fsync.
8092 * That behavior might change someday, but in any case it's likely that
8093 * any fsync decisions required would be per-index and hence not appropriate
8097 heap_sync(Relation rel)
8099 /* non-WAL-logged tables never need fsync */
8100 if (!RelationNeedsWAL(rel))
8104 FlushRelationBuffers(rel);
8105 /* FlushRelationBuffers will have opened rd_smgr */
8106 smgrimmedsync(rel->rd_smgr, MAIN_FORKNUM);
8108 /* FSM is not critical, don't bother syncing it */
8110 /* toast heap, if any */
8111 if (OidIsValid(rel->rd_rel->reltoastrelid))
8115 toastrel = heap_open(rel->rd_rel->reltoastrelid, AccessShareLock);
8116 FlushRelationBuffers(toastrel);
8117 smgrimmedsync(toastrel->rd_smgr, MAIN_FORKNUM);
8118 heap_close(toastrel, AccessShareLock);