1 /*-------------------------------------------------------------------------
4 * heap access method code
6 * Portions Copyright (c) 1996-2014, PostgreSQL Global Development Group
7 * Portions Copyright (c) 1994, Regents of the University of California
11 * src/backend/access/heap/heapam.c
15 * relation_open - open any relation by relation OID
16 * relation_openrv - open any relation specified by a RangeVar
17 * relation_close - close any relation
18 * heap_open - open a heap relation by relation OID
19 * heap_openrv - open a heap relation specified by a RangeVar
20 * heap_close - (now just a macro for relation_close)
21 * heap_beginscan - begin relation scan
22 * heap_rescan - restart a relation scan
23 * heap_endscan - end relation scan
24 * heap_getnext - retrieve next tuple in scan
25 * heap_fetch - retrieve tuple with given tid
26 * heap_insert - insert tuple into a relation
27 * heap_multi_insert - insert multiple tuples into a relation
28 * heap_delete - delete a tuple from a relation
29 * heap_update - replace a tuple in a relation with another tuple
30 * heap_markpos - mark scan position
31 * heap_restrpos - restore position to marked location
32 * heap_sync - sync heap, for when no WAL has been written
35 * This file contains the heap_ routines which implement
36 * the POSTGRES heap access method used for all POSTGRES
39 *-------------------------------------------------------------------------
43 #include "access/heapam.h"
44 #include "access/heapam_xlog.h"
45 #include "access/hio.h"
46 #include "access/multixact.h"
47 #include "access/relscan.h"
48 #include "access/sysattr.h"
49 #include "access/transam.h"
50 #include "access/tuptoaster.h"
51 #include "access/valid.h"
52 #include "access/visibilitymap.h"
53 #include "access/xact.h"
54 #include "access/xlogutils.h"
55 #include "catalog/catalog.h"
56 #include "catalog/namespace.h"
57 #include "miscadmin.h"
59 #include "storage/bufmgr.h"
60 #include "storage/freespace.h"
61 #include "storage/lmgr.h"
62 #include "storage/predicate.h"
63 #include "storage/procarray.h"
64 #include "storage/smgr.h"
65 #include "storage/standby.h"
66 #include "utils/datum.h"
67 #include "utils/inval.h"
68 #include "utils/lsyscache.h"
69 #include "utils/relcache.h"
70 #include "utils/snapmgr.h"
71 #include "utils/syscache.h"
72 #include "utils/tqual.h"
76 bool synchronize_seqscans = true;
79 static HeapScanDesc heap_beginscan_internal(Relation relation,
81 int nkeys, ScanKey key,
82 bool allow_strat, bool allow_sync,
83 bool is_bitmapscan, bool temp_snap);
84 static HeapTuple heap_prepare_insert(Relation relation, HeapTuple tup,
85 TransactionId xid, CommandId cid, int options);
86 static XLogRecPtr log_heap_update(Relation reln, Buffer oldbuf,
87 Buffer newbuf, HeapTuple oldtup,
88 HeapTuple newtup, HeapTuple old_key_tup,
89 bool all_visible_cleared, bool new_all_visible_cleared);
90 static void HeapSatisfiesHOTandKeyUpdate(Relation relation,
92 Bitmapset *key_attrs, Bitmapset *id_attrs,
93 bool *satisfies_hot, bool *satisfies_key,
95 HeapTuple oldtup, HeapTuple newtup);
96 static void compute_new_xmax_infomask(TransactionId xmax, uint16 old_infomask,
97 uint16 old_infomask2, TransactionId add_to_xmax,
98 LockTupleMode mode, bool is_update,
99 TransactionId *result_xmax, uint16 *result_infomask,
100 uint16 *result_infomask2);
101 static HTSU_Result heap_lock_updated_tuple(Relation rel, HeapTuple tuple,
102 ItemPointer ctid, TransactionId xid,
104 static void GetMultiXactIdHintBits(MultiXactId multi, uint16 *new_infomask,
105 uint16 *new_infomask2);
106 static TransactionId MultiXactIdGetUpdateXid(TransactionId xmax,
108 static void MultiXactIdWait(MultiXactId multi, MultiXactStatus status, uint16 infomask,
109 Relation rel, ItemPointer ctid, XLTW_Oper oper,
111 static bool ConditionalMultiXactIdWait(MultiXactId multi, MultiXactStatus status,
112 uint16 infomask, Relation rel, int *remaining);
113 static XLogRecPtr log_heap_new_cid(Relation relation, HeapTuple tup);
114 static HeapTuple ExtractReplicaIdentity(Relation rel, HeapTuple tup, bool key_modified,
119 * Each tuple lock mode has a corresponding heavyweight lock, and one or two
120 * corresponding MultiXactStatuses (one to merely lock tuples, another one to
121 * update them). This table (and the macros below) helps us determine the
122 * heavyweight lock mode and MultiXactStatus values to use for any particular
123 * tuple lock strength.
125 * Don't look at lockstatus/updstatus directly! Use get_mxact_status_for_lock
135 tupleLockExtraInfo[MaxLockTupleMode + 1] =
137 { /* LockTupleKeyShare */
139 MultiXactStatusForKeyShare,
140 -1 /* KeyShare does not allow updating tuples */
142 { /* LockTupleShare */
144 MultiXactStatusForShare,
145 -1 /* Share does not allow updating tuples */
147 { /* LockTupleNoKeyExclusive */
149 MultiXactStatusForNoKeyUpdate,
150 MultiXactStatusNoKeyUpdate
152 { /* LockTupleExclusive */
154 MultiXactStatusForUpdate,
155 MultiXactStatusUpdate
159 /* Get the LOCKMODE for a given MultiXactStatus */
160 #define LOCKMODE_from_mxstatus(status) \
161 (tupleLockExtraInfo[TUPLOCK_from_mxstatus((status))].hwlock)
164 * Acquire heavyweight locks on tuples, using a LockTupleMode strength value.
165 * This is more readable than having every caller translate it to lock.h's
168 #define LockTupleTuplock(rel, tup, mode) \
169 LockTuple((rel), (tup), tupleLockExtraInfo[mode].hwlock)
170 #define UnlockTupleTuplock(rel, tup, mode) \
171 UnlockTuple((rel), (tup), tupleLockExtraInfo[mode].hwlock)
172 #define ConditionalLockTupleTuplock(rel, tup, mode) \
173 ConditionalLockTuple((rel), (tup), tupleLockExtraInfo[mode].hwlock)
176 * This table maps tuple lock strength values for each particular
177 * MultiXactStatus value.
179 static const int MultiXactStatusLock[MaxMultiXactStatus + 1] =
181 LockTupleKeyShare, /* ForKeyShare */
182 LockTupleShare, /* ForShare */
183 LockTupleNoKeyExclusive, /* ForNoKeyUpdate */
184 LockTupleExclusive, /* ForUpdate */
185 LockTupleNoKeyExclusive, /* NoKeyUpdate */
186 LockTupleExclusive /* Update */
189 /* Get the LockTupleMode for a given MultiXactStatus */
190 #define TUPLOCK_from_mxstatus(status) \
191 (MultiXactStatusLock[(status)])
193 /* ----------------------------------------------------------------
194 * heap support routines
195 * ----------------------------------------------------------------
199 * initscan - scan code common to heap_beginscan and heap_rescan
203 initscan(HeapScanDesc scan, ScanKey key, bool is_rescan)
209 * Determine the number of blocks we have to scan.
211 * It is sufficient to do this once at scan start, since any tuples added
212 * while the scan is in progress will be invisible to my snapshot anyway.
213 * (That is not true when using a non-MVCC snapshot. However, we couldn't
214 * guarantee to return tuples added after scan start anyway, since they
215 * might go into pages we already scanned. To guarantee consistent
216 * results for a non-MVCC snapshot, the caller must hold some higher-level
217 * lock that ensures the interesting tuple(s) won't change.)
219 scan->rs_nblocks = RelationGetNumberOfBlocks(scan->rs_rd);
222 * If the table is large relative to NBuffers, use a bulk-read access
223 * strategy and enable synchronized scanning (see syncscan.c). Although
224 * the thresholds for these features could be different, we make them the
225 * same so that there are only two behaviors to tune rather than four.
226 * (However, some callers need to be able to disable one or both of these
227 * behaviors, independently of the size of the table; also there is a GUC
228 * variable that can disable synchronized scanning.)
230 * During a rescan, don't make a new strategy object if we don't have to.
232 if (!RelationUsesLocalBuffers(scan->rs_rd) &&
233 scan->rs_nblocks > NBuffers / 4)
235 allow_strat = scan->rs_allow_strat;
236 allow_sync = scan->rs_allow_sync;
239 allow_strat = allow_sync = false;
243 if (scan->rs_strategy == NULL)
244 scan->rs_strategy = GetAccessStrategy(BAS_BULKREAD);
248 if (scan->rs_strategy != NULL)
249 FreeAccessStrategy(scan->rs_strategy);
250 scan->rs_strategy = NULL;
256 * If rescan, keep the previous startblock setting so that rewinding a
257 * cursor doesn't generate surprising results. Reset the syncscan
260 scan->rs_syncscan = (allow_sync && synchronize_seqscans);
262 else if (allow_sync && synchronize_seqscans)
264 scan->rs_syncscan = true;
265 scan->rs_startblock = ss_get_location(scan->rs_rd, scan->rs_nblocks);
269 scan->rs_syncscan = false;
270 scan->rs_startblock = 0;
273 scan->rs_inited = false;
274 scan->rs_ctup.t_data = NULL;
275 ItemPointerSetInvalid(&scan->rs_ctup.t_self);
276 scan->rs_cbuf = InvalidBuffer;
277 scan->rs_cblock = InvalidBlockNumber;
279 /* we don't have a marked position... */
280 ItemPointerSetInvalid(&(scan->rs_mctid));
282 /* page-at-a-time fields are always invalid when not rs_inited */
285 * copy the scan key, if appropriate
288 memcpy(scan->rs_key, key, scan->rs_nkeys * sizeof(ScanKeyData));
291 * Currently, we don't have a stats counter for bitmap heap scans (but the
292 * underlying bitmap index scans will be counted).
294 if (!scan->rs_bitmapscan)
295 pgstat_count_heap_scan(scan->rs_rd);
299 * heapgetpage - subroutine for heapgettup()
301 * This routine reads and pins the specified page of the relation.
302 * In page-at-a-time mode it performs additional work, namely determining
303 * which tuples on the page are visible.
306 heapgetpage(HeapScanDesc scan, BlockNumber page)
313 OffsetNumber lineoff;
317 Assert(page < scan->rs_nblocks);
319 /* release previous scan buffer, if any */
320 if (BufferIsValid(scan->rs_cbuf))
322 ReleaseBuffer(scan->rs_cbuf);
323 scan->rs_cbuf = InvalidBuffer;
327 * Be sure to check for interrupts at least once per page. Checks at
328 * higher code levels won't be able to stop a seqscan that encounters many
329 * pages' worth of consecutive dead tuples.
331 CHECK_FOR_INTERRUPTS();
333 /* read page using selected strategy */
334 scan->rs_cbuf = ReadBufferExtended(scan->rs_rd, MAIN_FORKNUM, page,
335 RBM_NORMAL, scan->rs_strategy);
336 scan->rs_cblock = page;
338 if (!scan->rs_pageatatime)
341 buffer = scan->rs_cbuf;
342 snapshot = scan->rs_snapshot;
345 * Prune and repair fragmentation for the whole page, if possible.
347 heap_page_prune_opt(scan->rs_rd, buffer);
350 * We must hold share lock on the buffer content while examining tuple
351 * visibility. Afterwards, however, the tuples we have found to be
352 * visible are guaranteed good as long as we hold the buffer pin.
354 LockBuffer(buffer, BUFFER_LOCK_SHARE);
356 dp = (Page) BufferGetPage(buffer);
357 lines = PageGetMaxOffsetNumber(dp);
361 * If the all-visible flag indicates that all tuples on the page are
362 * visible to everyone, we can skip the per-tuple visibility tests.
364 * Note: In hot standby, a tuple that's already visible to all
365 * transactions in the master might still be invisible to a read-only
366 * transaction in the standby. We partly handle this problem by tracking
367 * the minimum xmin of visible tuples as the cut-off XID while marking a
368 * page all-visible on master and WAL log that along with the visibility
369 * map SET operation. In hot standby, we wait for (or abort) all
370 * transactions that can potentially may not see one or more tuples on the
371 * page. That's how index-only scans work fine in hot standby. A crucial
372 * difference between index-only scans and heap scans is that the
373 * index-only scan completely relies on the visibility map where as heap
374 * scan looks at the page-level PD_ALL_VISIBLE flag. We are not sure if
375 * the page-level flag can be trusted in the same way, because it might
376 * get propagated somehow without being explicitly WAL-logged, e.g. via a
377 * full page write. Until we can prove that beyond doubt, let's check each
378 * tuple for visibility the hard way.
380 all_visible = PageIsAllVisible(dp) && !snapshot->takenDuringRecovery;
382 for (lineoff = FirstOffsetNumber, lpp = PageGetItemId(dp, lineoff);
386 if (ItemIdIsNormal(lpp))
388 HeapTupleData loctup;
391 loctup.t_tableOid = RelationGetRelid(scan->rs_rd);
392 loctup.t_data = (HeapTupleHeader) PageGetItem((Page) dp, lpp);
393 loctup.t_len = ItemIdGetLength(lpp);
394 ItemPointerSet(&(loctup.t_self), page, lineoff);
399 valid = HeapTupleSatisfiesVisibility(&loctup, snapshot, buffer);
401 CheckForSerializableConflictOut(valid, scan->rs_rd, &loctup,
405 scan->rs_vistuples[ntup++] = lineoff;
409 LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
411 Assert(ntup <= MaxHeapTuplesPerPage);
412 scan->rs_ntuples = ntup;
416 * heapgettup - fetch next heap tuple
418 * Initialize the scan if not already done; then advance to the next
419 * tuple as indicated by "dir"; return the next tuple in scan->rs_ctup,
420 * or set scan->rs_ctup.t_data = NULL if no more tuples.
422 * dir == NoMovementScanDirection means "re-fetch the tuple indicated
425 * Note: the reason nkeys/key are passed separately, even though they are
426 * kept in the scan descriptor, is that the caller may not want us to check
429 * Note: when we fall off the end of the scan in either direction, we
430 * reset rs_inited. This means that a further request with the same
431 * scan direction will restart the scan, which is a bit odd, but a
432 * request with the opposite scan direction will start a fresh scan
433 * in the proper direction. The latter is required behavior for cursors,
434 * while the former case is generally undefined behavior in Postgres
435 * so we don't care too much.
439 heapgettup(HeapScanDesc scan,
444 HeapTuple tuple = &(scan->rs_ctup);
445 Snapshot snapshot = scan->rs_snapshot;
446 bool backward = ScanDirectionIsBackward(dir);
451 OffsetNumber lineoff;
456 * calculate next starting lineoff, given scan direction
458 if (ScanDirectionIsForward(dir))
460 if (!scan->rs_inited)
463 * return null immediately if relation is empty
465 if (scan->rs_nblocks == 0)
467 Assert(!BufferIsValid(scan->rs_cbuf));
468 tuple->t_data = NULL;
471 page = scan->rs_startblock; /* first page */
472 heapgetpage(scan, page);
473 lineoff = FirstOffsetNumber; /* first offnum */
474 scan->rs_inited = true;
478 /* continue from previously returned page/tuple */
479 page = scan->rs_cblock; /* current page */
480 lineoff = /* next offnum */
481 OffsetNumberNext(ItemPointerGetOffsetNumber(&(tuple->t_self)));
484 LockBuffer(scan->rs_cbuf, BUFFER_LOCK_SHARE);
486 dp = (Page) BufferGetPage(scan->rs_cbuf);
487 lines = PageGetMaxOffsetNumber(dp);
488 /* page and lineoff now reference the physically next tid */
490 linesleft = lines - lineoff + 1;
494 if (!scan->rs_inited)
497 * return null immediately if relation is empty
499 if (scan->rs_nblocks == 0)
501 Assert(!BufferIsValid(scan->rs_cbuf));
502 tuple->t_data = NULL;
507 * Disable reporting to syncscan logic in a backwards scan; it's
508 * not very likely anyone else is doing the same thing at the same
509 * time, and much more likely that we'll just bollix things for
512 scan->rs_syncscan = false;
513 /* start from last page of the scan */
514 if (scan->rs_startblock > 0)
515 page = scan->rs_startblock - 1;
517 page = scan->rs_nblocks - 1;
518 heapgetpage(scan, page);
522 /* continue from previously returned page/tuple */
523 page = scan->rs_cblock; /* current page */
526 LockBuffer(scan->rs_cbuf, BUFFER_LOCK_SHARE);
528 dp = (Page) BufferGetPage(scan->rs_cbuf);
529 lines = PageGetMaxOffsetNumber(dp);
531 if (!scan->rs_inited)
533 lineoff = lines; /* final offnum */
534 scan->rs_inited = true;
538 lineoff = /* previous offnum */
539 OffsetNumberPrev(ItemPointerGetOffsetNumber(&(tuple->t_self)));
541 /* page and lineoff now reference the physically previous tid */
548 * ``no movement'' scan direction: refetch prior tuple
550 if (!scan->rs_inited)
552 Assert(!BufferIsValid(scan->rs_cbuf));
553 tuple->t_data = NULL;
557 page = ItemPointerGetBlockNumber(&(tuple->t_self));
558 if (page != scan->rs_cblock)
559 heapgetpage(scan, page);
561 /* Since the tuple was previously fetched, needn't lock page here */
562 dp = (Page) BufferGetPage(scan->rs_cbuf);
563 lineoff = ItemPointerGetOffsetNumber(&(tuple->t_self));
564 lpp = PageGetItemId(dp, lineoff);
565 Assert(ItemIdIsNormal(lpp));
567 tuple->t_data = (HeapTupleHeader) PageGetItem((Page) dp, lpp);
568 tuple->t_len = ItemIdGetLength(lpp);
574 * advance the scan until we find a qualifying tuple or run out of stuff
577 lpp = PageGetItemId(dp, lineoff);
580 while (linesleft > 0)
582 if (ItemIdIsNormal(lpp))
586 tuple->t_data = (HeapTupleHeader) PageGetItem((Page) dp, lpp);
587 tuple->t_len = ItemIdGetLength(lpp);
588 ItemPointerSet(&(tuple->t_self), page, lineoff);
591 * if current tuple qualifies, return it.
593 valid = HeapTupleSatisfiesVisibility(tuple,
597 CheckForSerializableConflictOut(valid, scan->rs_rd, tuple,
598 scan->rs_cbuf, snapshot);
600 if (valid && key != NULL)
601 HeapKeyTest(tuple, RelationGetDescr(scan->rs_rd),
606 LockBuffer(scan->rs_cbuf, BUFFER_LOCK_UNLOCK);
612 * otherwise move to the next item on the page
617 --lpp; /* move back in this page's ItemId array */
622 ++lpp; /* move forward in this page's ItemId array */
628 * if we get here, it means we've exhausted the items on this page and
629 * it's time to move to the next.
631 LockBuffer(scan->rs_cbuf, BUFFER_LOCK_UNLOCK);
634 * advance to next/prior page and detect end of scan
638 finished = (page == scan->rs_startblock);
640 page = scan->rs_nblocks;
646 if (page >= scan->rs_nblocks)
648 finished = (page == scan->rs_startblock);
651 * Report our new scan position for synchronization purposes. We
652 * don't do that when moving backwards, however. That would just
653 * mess up any other forward-moving scanners.
655 * Note: we do this before checking for end of scan so that the
656 * final state of the position hint is back at the start of the
657 * rel. That's not strictly necessary, but otherwise when you run
658 * the same query multiple times the starting position would shift
659 * a little bit backwards on every invocation, which is confusing.
660 * We don't guarantee any specific ordering in general, though.
662 if (scan->rs_syncscan)
663 ss_report_location(scan->rs_rd, page);
667 * return NULL if we've exhausted all the pages
671 if (BufferIsValid(scan->rs_cbuf))
672 ReleaseBuffer(scan->rs_cbuf);
673 scan->rs_cbuf = InvalidBuffer;
674 scan->rs_cblock = InvalidBlockNumber;
675 tuple->t_data = NULL;
676 scan->rs_inited = false;
680 heapgetpage(scan, page);
682 LockBuffer(scan->rs_cbuf, BUFFER_LOCK_SHARE);
684 dp = (Page) BufferGetPage(scan->rs_cbuf);
685 lines = PageGetMaxOffsetNumber((Page) dp);
690 lpp = PageGetItemId(dp, lines);
694 lineoff = FirstOffsetNumber;
695 lpp = PageGetItemId(dp, FirstOffsetNumber);
701 * heapgettup_pagemode - fetch next heap tuple in page-at-a-time mode
703 * Same API as heapgettup, but used in page-at-a-time mode
705 * The internal logic is much the same as heapgettup's too, but there are some
706 * differences: we do not take the buffer content lock (that only needs to
707 * happen inside heapgetpage), and we iterate through just the tuples listed
708 * in rs_vistuples[] rather than all tuples on the page. Notice that
709 * lineindex is 0-based, where the corresponding loop variable lineoff in
710 * heapgettup is 1-based.
714 heapgettup_pagemode(HeapScanDesc scan,
719 HeapTuple tuple = &(scan->rs_ctup);
720 bool backward = ScanDirectionIsBackward(dir);
726 OffsetNumber lineoff;
731 * calculate next starting lineindex, given scan direction
733 if (ScanDirectionIsForward(dir))
735 if (!scan->rs_inited)
738 * return null immediately if relation is empty
740 if (scan->rs_nblocks == 0)
742 Assert(!BufferIsValid(scan->rs_cbuf));
743 tuple->t_data = NULL;
746 page = scan->rs_startblock; /* first page */
747 heapgetpage(scan, page);
749 scan->rs_inited = true;
753 /* continue from previously returned page/tuple */
754 page = scan->rs_cblock; /* current page */
755 lineindex = scan->rs_cindex + 1;
758 dp = (Page) BufferGetPage(scan->rs_cbuf);
759 lines = scan->rs_ntuples;
760 /* page and lineindex now reference the next visible tid */
762 linesleft = lines - lineindex;
766 if (!scan->rs_inited)
769 * return null immediately if relation is empty
771 if (scan->rs_nblocks == 0)
773 Assert(!BufferIsValid(scan->rs_cbuf));
774 tuple->t_data = NULL;
779 * Disable reporting to syncscan logic in a backwards scan; it's
780 * not very likely anyone else is doing the same thing at the same
781 * time, and much more likely that we'll just bollix things for
784 scan->rs_syncscan = false;
785 /* start from last page of the scan */
786 if (scan->rs_startblock > 0)
787 page = scan->rs_startblock - 1;
789 page = scan->rs_nblocks - 1;
790 heapgetpage(scan, page);
794 /* continue from previously returned page/tuple */
795 page = scan->rs_cblock; /* current page */
798 dp = (Page) BufferGetPage(scan->rs_cbuf);
799 lines = scan->rs_ntuples;
801 if (!scan->rs_inited)
803 lineindex = lines - 1;
804 scan->rs_inited = true;
808 lineindex = scan->rs_cindex - 1;
810 /* page and lineindex now reference the previous visible tid */
812 linesleft = lineindex + 1;
817 * ``no movement'' scan direction: refetch prior tuple
819 if (!scan->rs_inited)
821 Assert(!BufferIsValid(scan->rs_cbuf));
822 tuple->t_data = NULL;
826 page = ItemPointerGetBlockNumber(&(tuple->t_self));
827 if (page != scan->rs_cblock)
828 heapgetpage(scan, page);
830 /* Since the tuple was previously fetched, needn't lock page here */
831 dp = (Page) BufferGetPage(scan->rs_cbuf);
832 lineoff = ItemPointerGetOffsetNumber(&(tuple->t_self));
833 lpp = PageGetItemId(dp, lineoff);
834 Assert(ItemIdIsNormal(lpp));
836 tuple->t_data = (HeapTupleHeader) PageGetItem((Page) dp, lpp);
837 tuple->t_len = ItemIdGetLength(lpp);
839 /* check that rs_cindex is in sync */
840 Assert(scan->rs_cindex < scan->rs_ntuples);
841 Assert(lineoff == scan->rs_vistuples[scan->rs_cindex]);
847 * advance the scan until we find a qualifying tuple or run out of stuff
852 while (linesleft > 0)
854 lineoff = scan->rs_vistuples[lineindex];
855 lpp = PageGetItemId(dp, lineoff);
856 Assert(ItemIdIsNormal(lpp));
858 tuple->t_data = (HeapTupleHeader) PageGetItem((Page) dp, lpp);
859 tuple->t_len = ItemIdGetLength(lpp);
860 ItemPointerSet(&(tuple->t_self), page, lineoff);
863 * if current tuple qualifies, return it.
869 HeapKeyTest(tuple, RelationGetDescr(scan->rs_rd),
873 scan->rs_cindex = lineindex;
879 scan->rs_cindex = lineindex;
884 * otherwise move to the next item on the page
894 * if we get here, it means we've exhausted the items on this page and
895 * it's time to move to the next.
899 finished = (page == scan->rs_startblock);
901 page = scan->rs_nblocks;
907 if (page >= scan->rs_nblocks)
909 finished = (page == scan->rs_startblock);
912 * Report our new scan position for synchronization purposes. We
913 * don't do that when moving backwards, however. That would just
914 * mess up any other forward-moving scanners.
916 * Note: we do this before checking for end of scan so that the
917 * final state of the position hint is back at the start of the
918 * rel. That's not strictly necessary, but otherwise when you run
919 * the same query multiple times the starting position would shift
920 * a little bit backwards on every invocation, which is confusing.
921 * We don't guarantee any specific ordering in general, though.
923 if (scan->rs_syncscan)
924 ss_report_location(scan->rs_rd, page);
928 * return NULL if we've exhausted all the pages
932 if (BufferIsValid(scan->rs_cbuf))
933 ReleaseBuffer(scan->rs_cbuf);
934 scan->rs_cbuf = InvalidBuffer;
935 scan->rs_cblock = InvalidBlockNumber;
936 tuple->t_data = NULL;
937 scan->rs_inited = false;
941 heapgetpage(scan, page);
943 dp = (Page) BufferGetPage(scan->rs_cbuf);
944 lines = scan->rs_ntuples;
947 lineindex = lines - 1;
954 #if defined(DISABLE_COMPLEX_MACRO)
956 * This is formatted so oddly so that the correspondence to the macro
957 * definition in access/htup_details.h is maintained.
960 fastgetattr(HeapTuple tup, int attnum, TupleDesc tupleDesc,
967 HeapTupleNoNulls(tup) ?
969 (tupleDesc)->attrs[(attnum) - 1]->attcacheoff >= 0 ?
971 fetchatt((tupleDesc)->attrs[(attnum) - 1],
972 (char *) (tup)->t_data + (tup)->t_data->t_hoff +
973 (tupleDesc)->attrs[(attnum) - 1]->attcacheoff)
976 nocachegetattr((tup), (attnum), (tupleDesc))
980 att_isnull((attnum) - 1, (tup)->t_data->t_bits) ?
987 nocachegetattr((tup), (attnum), (tupleDesc))
997 #endif /* defined(DISABLE_COMPLEX_MACRO) */
1000 /* ----------------------------------------------------------------
1001 * heap access method interface
1002 * ----------------------------------------------------------------
1006 * relation_open - open any relation by relation OID
1008 * If lockmode is not "NoLock", the specified kind of lock is
1009 * obtained on the relation. (Generally, NoLock should only be
1010 * used if the caller knows it has some appropriate lock on the
1011 * relation already.)
1013 * An error is raised if the relation does not exist.
1015 * NB: a "relation" is anything with a pg_class entry. The caller is
1016 * expected to check whether the relkind is something it can handle.
1020 relation_open(Oid relationId, LOCKMODE lockmode)
1024 Assert(lockmode >= NoLock && lockmode < MAX_LOCKMODES);
1026 /* Get the lock before trying to open the relcache entry */
1027 if (lockmode != NoLock)
1028 LockRelationOid(relationId, lockmode);
1030 /* The relcache does all the real work... */
1031 r = RelationIdGetRelation(relationId);
1033 if (!RelationIsValid(r))
1034 elog(ERROR, "could not open relation with OID %u", relationId);
1036 /* Make note that we've accessed a temporary relation */
1037 if (RelationUsesLocalBuffers(r))
1038 MyXactAccessedTempRel = true;
1040 pgstat_initstats(r);
1046 * try_relation_open - open any relation by relation OID
1048 * Same as relation_open, except return NULL instead of failing
1049 * if the relation does not exist.
1053 try_relation_open(Oid relationId, LOCKMODE lockmode)
1057 Assert(lockmode >= NoLock && lockmode < MAX_LOCKMODES);
1059 /* Get the lock first */
1060 if (lockmode != NoLock)
1061 LockRelationOid(relationId, lockmode);
1064 * Now that we have the lock, probe to see if the relation really exists
1067 if (!SearchSysCacheExists1(RELOID, ObjectIdGetDatum(relationId)))
1069 /* Release useless lock */
1070 if (lockmode != NoLock)
1071 UnlockRelationOid(relationId, lockmode);
1076 /* Should be safe to do a relcache load */
1077 r = RelationIdGetRelation(relationId);
1079 if (!RelationIsValid(r))
1080 elog(ERROR, "could not open relation with OID %u", relationId);
1082 /* Make note that we've accessed a temporary relation */
1083 if (RelationUsesLocalBuffers(r))
1084 MyXactAccessedTempRel = true;
1086 pgstat_initstats(r);
1092 * relation_openrv - open any relation specified by a RangeVar
1094 * Same as relation_open, but the relation is specified by a RangeVar.
1098 relation_openrv(const RangeVar *relation, LOCKMODE lockmode)
1103 * Check for shared-cache-inval messages before trying to open the
1104 * relation. This is needed even if we already hold a lock on the
1105 * relation, because GRANT/REVOKE are executed without taking any lock on
1106 * the target relation, and we want to be sure we see current ACL
1107 * information. We can skip this if asked for NoLock, on the assumption
1108 * that such a call is not the first one in the current command, and so we
1109 * should be reasonably up-to-date already. (XXX this all could stand to
1110 * be redesigned, but for the moment we'll keep doing this like it's been
1111 * done historically.)
1113 if (lockmode != NoLock)
1114 AcceptInvalidationMessages();
1116 /* Look up and lock the appropriate relation using namespace search */
1117 relOid = RangeVarGetRelid(relation, lockmode, false);
1119 /* Let relation_open do the rest */
1120 return relation_open(relOid, NoLock);
1124 * relation_openrv_extended - open any relation specified by a RangeVar
1126 * Same as relation_openrv, but with an additional missing_ok argument
1127 * allowing a NULL return rather than an error if the relation is not
1128 * found. (Note that some other causes, such as permissions problems,
1129 * will still result in an ereport.)
1133 relation_openrv_extended(const RangeVar *relation, LOCKMODE lockmode,
1139 * Check for shared-cache-inval messages before trying to open the
1140 * relation. See comments in relation_openrv().
1142 if (lockmode != NoLock)
1143 AcceptInvalidationMessages();
1145 /* Look up and lock the appropriate relation using namespace search */
1146 relOid = RangeVarGetRelid(relation, lockmode, missing_ok);
1148 /* Return NULL on not-found */
1149 if (!OidIsValid(relOid))
1152 /* Let relation_open do the rest */
1153 return relation_open(relOid, NoLock);
1157 * relation_close - close any relation
1159 * If lockmode is not "NoLock", we then release the specified lock.
1161 * Note that it is often sensible to hold a lock beyond relation_close;
1162 * in that case, the lock is released automatically at xact end.
1166 relation_close(Relation relation, LOCKMODE lockmode)
1168 LockRelId relid = relation->rd_lockInfo.lockRelId;
1170 Assert(lockmode >= NoLock && lockmode < MAX_LOCKMODES);
1172 /* The relcache does the real work... */
1173 RelationClose(relation);
1175 if (lockmode != NoLock)
1176 UnlockRelationId(&relid, lockmode);
1181 * heap_open - open a heap relation by relation OID
1183 * This is essentially relation_open plus check that the relation
1184 * is not an index nor a composite type. (The caller should also
1185 * check that it's not a view or foreign table before assuming it has
1190 heap_open(Oid relationId, LOCKMODE lockmode)
1194 r = relation_open(relationId, lockmode);
1196 if (r->rd_rel->relkind == RELKIND_INDEX)
1198 (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1199 errmsg("\"%s\" is an index",
1200 RelationGetRelationName(r))));
1201 else if (r->rd_rel->relkind == RELKIND_COMPOSITE_TYPE)
1203 (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1204 errmsg("\"%s\" is a composite type",
1205 RelationGetRelationName(r))));
1211 * heap_openrv - open a heap relation specified
1212 * by a RangeVar node
1214 * As above, but relation is specified by a RangeVar.
1218 heap_openrv(const RangeVar *relation, LOCKMODE lockmode)
1222 r = relation_openrv(relation, lockmode);
1224 if (r->rd_rel->relkind == RELKIND_INDEX)
1226 (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1227 errmsg("\"%s\" is an index",
1228 RelationGetRelationName(r))));
1229 else if (r->rd_rel->relkind == RELKIND_COMPOSITE_TYPE)
1231 (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1232 errmsg("\"%s\" is a composite type",
1233 RelationGetRelationName(r))));
1239 * heap_openrv_extended - open a heap relation specified
1240 * by a RangeVar node
1242 * As above, but optionally return NULL instead of failing for
1243 * relation-not-found.
1247 heap_openrv_extended(const RangeVar *relation, LOCKMODE lockmode,
1252 r = relation_openrv_extended(relation, lockmode, missing_ok);
1256 if (r->rd_rel->relkind == RELKIND_INDEX)
1258 (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1259 errmsg("\"%s\" is an index",
1260 RelationGetRelationName(r))));
1261 else if (r->rd_rel->relkind == RELKIND_COMPOSITE_TYPE)
1263 (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1264 errmsg("\"%s\" is a composite type",
1265 RelationGetRelationName(r))));
1273 * heap_beginscan - begin relation scan
1275 * heap_beginscan_strat offers an extended API that lets the caller control
1276 * whether a nondefault buffer access strategy can be used, and whether
1277 * syncscan can be chosen (possibly resulting in the scan not starting from
1278 * block zero). Both of these default to TRUE with plain heap_beginscan.
1280 * heap_beginscan_bm is an alternative entry point for setting up a
1281 * HeapScanDesc for a bitmap heap scan. Although that scan technology is
1282 * really quite unlike a standard seqscan, there is just enough commonality
1283 * to make it worth using the same data structure.
1287 heap_beginscan(Relation relation, Snapshot snapshot,
1288 int nkeys, ScanKey key)
1290 return heap_beginscan_internal(relation, snapshot, nkeys, key,
1291 true, true, false, false);
1295 heap_beginscan_catalog(Relation relation, int nkeys, ScanKey key)
1297 Oid relid = RelationGetRelid(relation);
1298 Snapshot snapshot = RegisterSnapshot(GetCatalogSnapshot(relid));
1300 return heap_beginscan_internal(relation, snapshot, nkeys, key,
1301 true, true, false, true);
1305 heap_beginscan_strat(Relation relation, Snapshot snapshot,
1306 int nkeys, ScanKey key,
1307 bool allow_strat, bool allow_sync)
1309 return heap_beginscan_internal(relation, snapshot, nkeys, key,
1310 allow_strat, allow_sync, false, false);
1314 heap_beginscan_bm(Relation relation, Snapshot snapshot,
1315 int nkeys, ScanKey key)
1317 return heap_beginscan_internal(relation, snapshot, nkeys, key,
1318 false, false, true, false);
1322 heap_beginscan_internal(Relation relation, Snapshot snapshot,
1323 int nkeys, ScanKey key,
1324 bool allow_strat, bool allow_sync,
1325 bool is_bitmapscan, bool temp_snap)
1330 * increment relation ref count while scanning relation
1332 * This is just to make really sure the relcache entry won't go away while
1333 * the scan has a pointer to it. Caller should be holding the rel open
1334 * anyway, so this is redundant in all normal scenarios...
1336 RelationIncrementReferenceCount(relation);
1339 * allocate and initialize scan descriptor
1341 scan = (HeapScanDesc) palloc(sizeof(HeapScanDescData));
1343 scan->rs_rd = relation;
1344 scan->rs_snapshot = snapshot;
1345 scan->rs_nkeys = nkeys;
1346 scan->rs_bitmapscan = is_bitmapscan;
1347 scan->rs_strategy = NULL; /* set in initscan */
1348 scan->rs_allow_strat = allow_strat;
1349 scan->rs_allow_sync = allow_sync;
1350 scan->rs_temp_snap = temp_snap;
1353 * we can use page-at-a-time mode if it's an MVCC-safe snapshot
1355 scan->rs_pageatatime = IsMVCCSnapshot(snapshot);
1358 * For a seqscan in a serializable transaction, acquire a predicate lock
1359 * on the entire relation. This is required not only to lock all the
1360 * matching tuples, but also to conflict with new insertions into the
1361 * table. In an indexscan, we take page locks on the index pages covering
1362 * the range specified in the scan qual, but in a heap scan there is
1363 * nothing more fine-grained to lock. A bitmap scan is a different story,
1364 * there we have already scanned the index and locked the index pages
1365 * covering the predicate. But in that case we still have to lock any
1366 * matching heap tuples.
1369 PredicateLockRelation(relation, snapshot);
1371 /* we only need to set this up once */
1372 scan->rs_ctup.t_tableOid = RelationGetRelid(relation);
1375 * we do this here instead of in initscan() because heap_rescan also calls
1376 * initscan() and we don't want to allocate memory again
1379 scan->rs_key = (ScanKey) palloc(sizeof(ScanKeyData) * nkeys);
1381 scan->rs_key = NULL;
1383 initscan(scan, key, false);
1389 * heap_rescan - restart a relation scan
1393 heap_rescan(HeapScanDesc scan,
1397 * unpin scan buffers
1399 if (BufferIsValid(scan->rs_cbuf))
1400 ReleaseBuffer(scan->rs_cbuf);
1403 * reinitialize scan descriptor
1405 initscan(scan, key, true);
1409 * heap_endscan - end relation scan
1411 * See how to integrate with index scans.
1412 * Check handling if reldesc caching.
1416 heap_endscan(HeapScanDesc scan)
1418 /* Note: no locking manipulations needed */
1421 * unpin scan buffers
1423 if (BufferIsValid(scan->rs_cbuf))
1424 ReleaseBuffer(scan->rs_cbuf);
1427 * decrement relation reference count and free scan descriptor storage
1429 RelationDecrementReferenceCount(scan->rs_rd);
1432 pfree(scan->rs_key);
1434 if (scan->rs_strategy != NULL)
1435 FreeAccessStrategy(scan->rs_strategy);
1437 if (scan->rs_temp_snap)
1438 UnregisterSnapshot(scan->rs_snapshot);
1444 * heap_getnext - retrieve next tuple in scan
1446 * Fix to work with index relations.
1447 * We don't return the buffer anymore, but you can get it from the
1448 * returned HeapTuple.
1453 #define HEAPDEBUG_1 \
1454 elog(DEBUG2, "heap_getnext([%s,nkeys=%d],dir=%d) called", \
1455 RelationGetRelationName(scan->rs_rd), scan->rs_nkeys, (int) direction)
1456 #define HEAPDEBUG_2 \
1457 elog(DEBUG2, "heap_getnext returning EOS")
1458 #define HEAPDEBUG_3 \
1459 elog(DEBUG2, "heap_getnext returning tuple")
1464 #endif /* !defined(HEAPDEBUGALL) */
1468 heap_getnext(HeapScanDesc scan, ScanDirection direction)
1470 /* Note: no locking manipulations needed */
1472 HEAPDEBUG_1; /* heap_getnext( info ) */
1474 if (scan->rs_pageatatime)
1475 heapgettup_pagemode(scan, direction,
1476 scan->rs_nkeys, scan->rs_key);
1478 heapgettup(scan, direction, scan->rs_nkeys, scan->rs_key);
1480 if (scan->rs_ctup.t_data == NULL)
1482 HEAPDEBUG_2; /* heap_getnext returning EOS */
1487 * if we get here it means we have a new current scan tuple, so point to
1488 * the proper return buffer and return the tuple.
1490 HEAPDEBUG_3; /* heap_getnext returning tuple */
1492 pgstat_count_heap_getnext(scan->rs_rd);
1494 return &(scan->rs_ctup);
1498 * heap_fetch - retrieve tuple with given tid
1500 * On entry, tuple->t_self is the TID to fetch. We pin the buffer holding
1501 * the tuple, fill in the remaining fields of *tuple, and check the tuple
1502 * against the specified snapshot.
1504 * If successful (tuple found and passes snapshot time qual), then *userbuf
1505 * is set to the buffer holding the tuple and TRUE is returned. The caller
1506 * must unpin the buffer when done with the tuple.
1508 * If the tuple is not found (ie, item number references a deleted slot),
1509 * then tuple->t_data is set to NULL and FALSE is returned.
1511 * If the tuple is found but fails the time qual check, then FALSE is returned
1512 * but tuple->t_data is left pointing to the tuple.
1514 * keep_buf determines what is done with the buffer in the FALSE-result cases.
1515 * When the caller specifies keep_buf = true, we retain the pin on the buffer
1516 * and return it in *userbuf (so the caller must eventually unpin it); when
1517 * keep_buf = false, the pin is released and *userbuf is set to InvalidBuffer.
1519 * stats_relation is the relation to charge the heap_fetch operation against
1520 * for statistical purposes. (This could be the heap rel itself, an
1521 * associated index, or NULL to not count the fetch at all.)
1523 * heap_fetch does not follow HOT chains: only the exact TID requested will
1526 * It is somewhat inconsistent that we ereport() on invalid block number but
1527 * return false on invalid item number. There are a couple of reasons though.
1528 * One is that the caller can relatively easily check the block number for
1529 * validity, but cannot check the item number without reading the page
1530 * himself. Another is that when we are following a t_ctid link, we can be
1531 * reasonably confident that the page number is valid (since VACUUM shouldn't
1532 * truncate off the destination page without having killed the referencing
1533 * tuple first), but the item number might well not be good.
1536 heap_fetch(Relation relation,
1541 Relation stats_relation)
1543 ItemPointer tid = &(tuple->t_self);
1547 OffsetNumber offnum;
1551 * Fetch and pin the appropriate page of the relation.
1553 buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(tid));
1556 * Need share lock on buffer to examine tuple commit status.
1558 LockBuffer(buffer, BUFFER_LOCK_SHARE);
1559 page = BufferGetPage(buffer);
1562 * We'd better check for out-of-range offnum in case of VACUUM since the
1565 offnum = ItemPointerGetOffsetNumber(tid);
1566 if (offnum < FirstOffsetNumber || offnum > PageGetMaxOffsetNumber(page))
1568 LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
1573 ReleaseBuffer(buffer);
1574 *userbuf = InvalidBuffer;
1576 tuple->t_data = NULL;
1581 * get the item line pointer corresponding to the requested tid
1583 lp = PageGetItemId(page, offnum);
1586 * Must check for deleted tuple.
1588 if (!ItemIdIsNormal(lp))
1590 LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
1595 ReleaseBuffer(buffer);
1596 *userbuf = InvalidBuffer;
1598 tuple->t_data = NULL;
1603 * fill in *tuple fields
1605 tuple->t_data = (HeapTupleHeader) PageGetItem(page, lp);
1606 tuple->t_len = ItemIdGetLength(lp);
1607 tuple->t_tableOid = RelationGetRelid(relation);
1610 * check time qualification of tuple, then release lock
1612 valid = HeapTupleSatisfiesVisibility(tuple, snapshot, buffer);
1615 PredicateLockTuple(relation, tuple, snapshot);
1617 CheckForSerializableConflictOut(valid, relation, tuple, buffer, snapshot);
1619 LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
1624 * All checks passed, so return the tuple as valid. Caller is now
1625 * responsible for releasing the buffer.
1629 /* Count the successful fetch against appropriate rel, if any */
1630 if (stats_relation != NULL)
1631 pgstat_count_heap_fetch(stats_relation);
1636 /* Tuple failed time qual, but maybe caller wants to see it anyway. */
1641 ReleaseBuffer(buffer);
1642 *userbuf = InvalidBuffer;
1649 * heap_hot_search_buffer - search HOT chain for tuple satisfying snapshot
1651 * On entry, *tid is the TID of a tuple (either a simple tuple, or the root
1652 * of a HOT chain), and buffer is the buffer holding this tuple. We search
1653 * for the first chain member satisfying the given snapshot. If one is
1654 * found, we update *tid to reference that tuple's offset number, and
1655 * return TRUE. If no match, return FALSE without modifying *tid.
1657 * heapTuple is a caller-supplied buffer. When a match is found, we return
1658 * the tuple here, in addition to updating *tid. If no match is found, the
1659 * contents of this buffer on return are undefined.
1661 * If all_dead is not NULL, we check non-visible tuples to see if they are
1662 * globally dead; *all_dead is set TRUE if all members of the HOT chain
1663 * are vacuumable, FALSE if not.
1665 * Unlike heap_fetch, the caller must already have pin and (at least) share
1666 * lock on the buffer; it is still pinned/locked at exit. Also unlike
1667 * heap_fetch, we do not report any pgstats count; caller may do so if wanted.
1670 heap_hot_search_buffer(ItemPointer tid, Relation relation, Buffer buffer,
1671 Snapshot snapshot, HeapTuple heapTuple,
1672 bool *all_dead, bool first_call)
1674 Page dp = (Page) BufferGetPage(buffer);
1675 TransactionId prev_xmax = InvalidTransactionId;
1676 OffsetNumber offnum;
1677 bool at_chain_start;
1681 /* If this is not the first call, previous call returned a (live!) tuple */
1683 *all_dead = first_call;
1685 Assert(TransactionIdIsValid(RecentGlobalXmin));
1687 Assert(ItemPointerGetBlockNumber(tid) == BufferGetBlockNumber(buffer));
1688 offnum = ItemPointerGetOffsetNumber(tid);
1689 at_chain_start = first_call;
1692 heapTuple->t_self = *tid;
1694 /* Scan through possible multiple members of HOT-chain */
1699 /* check for bogus TID */
1700 if (offnum < FirstOffsetNumber || offnum > PageGetMaxOffsetNumber(dp))
1703 lp = PageGetItemId(dp, offnum);
1705 /* check for unused, dead, or redirected items */
1706 if (!ItemIdIsNormal(lp))
1708 /* We should only see a redirect at start of chain */
1709 if (ItemIdIsRedirected(lp) && at_chain_start)
1711 /* Follow the redirect */
1712 offnum = ItemIdGetRedirect(lp);
1713 at_chain_start = false;
1716 /* else must be end of chain */
1720 heapTuple->t_data = (HeapTupleHeader) PageGetItem(dp, lp);
1721 heapTuple->t_len = ItemIdGetLength(lp);
1722 heapTuple->t_tableOid = RelationGetRelid(relation);
1723 ItemPointerSetOffsetNumber(&heapTuple->t_self, offnum);
1726 * Shouldn't see a HEAP_ONLY tuple at chain start.
1728 if (at_chain_start && HeapTupleIsHeapOnly(heapTuple))
1732 * The xmin should match the previous xmax value, else chain is
1735 if (TransactionIdIsValid(prev_xmax) &&
1736 !TransactionIdEquals(prev_xmax,
1737 HeapTupleHeaderGetXmin(heapTuple->t_data)))
1741 * When first_call is true (and thus, skip is initially false) we'll
1742 * return the first tuple we find. But on later passes, heapTuple
1743 * will initially be pointing to the tuple we returned last time.
1744 * Returning it again would be incorrect (and would loop forever), so
1745 * we skip it and return the next match we find.
1750 * For the benefit of logical decoding, have t_self point at the
1751 * element of the HOT chain we're currently investigating instead
1752 * of the root tuple of the HOT chain. This is important because
1753 * the *Satisfies routine for historical mvcc snapshots needs the
1754 * correct tid to decide about the visibility in some cases.
1756 ItemPointerSet(&(heapTuple->t_self), BufferGetBlockNumber(buffer), offnum);
1758 /* If it's visible per the snapshot, we must return it */
1759 valid = HeapTupleSatisfiesVisibility(heapTuple, snapshot, buffer);
1760 CheckForSerializableConflictOut(valid, relation, heapTuple,
1762 /* reset to original, non-redirected, tid */
1763 heapTuple->t_self = *tid;
1767 ItemPointerSetOffsetNumber(tid, offnum);
1768 PredicateLockTuple(relation, heapTuple, snapshot);
1777 * If we can't see it, maybe no one else can either. At caller
1778 * request, check whether all chain members are dead to all
1781 if (all_dead && *all_dead &&
1782 !HeapTupleIsSurelyDead(heapTuple, RecentGlobalXmin))
1786 * Check to see if HOT chain continues past this tuple; if so fetch
1787 * the next offnum and loop around.
1789 if (HeapTupleIsHotUpdated(heapTuple))
1791 Assert(ItemPointerGetBlockNumber(&heapTuple->t_data->t_ctid) ==
1792 ItemPointerGetBlockNumber(tid));
1793 offnum = ItemPointerGetOffsetNumber(&heapTuple->t_data->t_ctid);
1794 at_chain_start = false;
1795 prev_xmax = HeapTupleHeaderGetUpdateXid(heapTuple->t_data);
1798 break; /* end of chain */
1805 * heap_hot_search - search HOT chain for tuple satisfying snapshot
1807 * This has the same API as heap_hot_search_buffer, except that the caller
1808 * does not provide the buffer containing the page, rather we access it
1812 heap_hot_search(ItemPointer tid, Relation relation, Snapshot snapshot,
1817 HeapTupleData heapTuple;
1819 buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(tid));
1820 LockBuffer(buffer, BUFFER_LOCK_SHARE);
1821 result = heap_hot_search_buffer(tid, relation, buffer, snapshot,
1822 &heapTuple, all_dead, true);
1823 LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
1824 ReleaseBuffer(buffer);
1829 * heap_get_latest_tid - get the latest tid of a specified tuple
1831 * Actually, this gets the latest version that is visible according to
1832 * the passed snapshot. You can pass SnapshotDirty to get the very latest,
1833 * possibly uncommitted version.
1835 * *tid is both an input and an output parameter: it is updated to
1836 * show the latest version of the row. Note that it will not be changed
1837 * if no version of the row passes the snapshot test.
1840 heap_get_latest_tid(Relation relation,
1845 ItemPointerData ctid;
1846 TransactionId priorXmax;
1848 /* this is to avoid Assert failures on bad input */
1849 if (!ItemPointerIsValid(tid))
1853 * Since this can be called with user-supplied TID, don't trust the input
1854 * too much. (RelationGetNumberOfBlocks is an expensive check, so we
1855 * don't check t_ctid links again this way. Note that it would not do to
1856 * call it just once and save the result, either.)
1858 blk = ItemPointerGetBlockNumber(tid);
1859 if (blk >= RelationGetNumberOfBlocks(relation))
1860 elog(ERROR, "block number %u is out of range for relation \"%s\"",
1861 blk, RelationGetRelationName(relation));
1864 * Loop to chase down t_ctid links. At top of loop, ctid is the tuple we
1865 * need to examine, and *tid is the TID we will return if ctid turns out
1868 * Note that we will loop until we reach the end of the t_ctid chain.
1869 * Depending on the snapshot passed, there might be at most one visible
1870 * version of the row, but we don't try to optimize for that.
1873 priorXmax = InvalidTransactionId; /* cannot check first XMIN */
1878 OffsetNumber offnum;
1884 * Read, pin, and lock the page.
1886 buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(&ctid));
1887 LockBuffer(buffer, BUFFER_LOCK_SHARE);
1888 page = BufferGetPage(buffer);
1891 * Check for bogus item number. This is not treated as an error
1892 * condition because it can happen while following a t_ctid link. We
1893 * just assume that the prior tid is OK and return it unchanged.
1895 offnum = ItemPointerGetOffsetNumber(&ctid);
1896 if (offnum < FirstOffsetNumber || offnum > PageGetMaxOffsetNumber(page))
1898 UnlockReleaseBuffer(buffer);
1901 lp = PageGetItemId(page, offnum);
1902 if (!ItemIdIsNormal(lp))
1904 UnlockReleaseBuffer(buffer);
1908 /* OK to access the tuple */
1910 tp.t_data = (HeapTupleHeader) PageGetItem(page, lp);
1911 tp.t_len = ItemIdGetLength(lp);
1912 tp.t_tableOid = RelationGetRelid(relation);
1915 * After following a t_ctid link, we might arrive at an unrelated
1916 * tuple. Check for XMIN match.
1918 if (TransactionIdIsValid(priorXmax) &&
1919 !TransactionIdEquals(priorXmax, HeapTupleHeaderGetXmin(tp.t_data)))
1921 UnlockReleaseBuffer(buffer);
1926 * Check time qualification of tuple; if visible, set it as the new
1929 valid = HeapTupleSatisfiesVisibility(&tp, snapshot, buffer);
1930 CheckForSerializableConflictOut(valid, relation, &tp, buffer, snapshot);
1935 * If there's a valid t_ctid link, follow it, else we're done.
1937 if ((tp.t_data->t_infomask & HEAP_XMAX_INVALID) ||
1938 HeapTupleHeaderIsOnlyLocked(tp.t_data) ||
1939 ItemPointerEquals(&tp.t_self, &tp.t_data->t_ctid))
1941 UnlockReleaseBuffer(buffer);
1945 ctid = tp.t_data->t_ctid;
1946 priorXmax = HeapTupleHeaderGetUpdateXid(tp.t_data);
1947 UnlockReleaseBuffer(buffer);
1953 * UpdateXmaxHintBits - update tuple hint bits after xmax transaction ends
1955 * This is called after we have waited for the XMAX transaction to terminate.
1956 * If the transaction aborted, we guarantee the XMAX_INVALID hint bit will
1957 * be set on exit. If the transaction committed, we set the XMAX_COMMITTED
1958 * hint bit if possible --- but beware that that may not yet be possible,
1959 * if the transaction committed asynchronously.
1961 * Note that if the transaction was a locker only, we set HEAP_XMAX_INVALID
1962 * even if it commits.
1964 * Hence callers should look only at XMAX_INVALID.
1966 * Note this is not allowed for tuples whose xmax is a multixact.
1969 UpdateXmaxHintBits(HeapTupleHeader tuple, Buffer buffer, TransactionId xid)
1971 Assert(TransactionIdEquals(HeapTupleHeaderGetRawXmax(tuple), xid));
1972 Assert(!(tuple->t_infomask & HEAP_XMAX_IS_MULTI));
1974 if (!(tuple->t_infomask & (HEAP_XMAX_COMMITTED | HEAP_XMAX_INVALID)))
1976 if (!HEAP_XMAX_IS_LOCKED_ONLY(tuple->t_infomask) &&
1977 TransactionIdDidCommit(xid))
1978 HeapTupleSetHintBits(tuple, buffer, HEAP_XMAX_COMMITTED,
1981 HeapTupleSetHintBits(tuple, buffer, HEAP_XMAX_INVALID,
1982 InvalidTransactionId);
1988 * GetBulkInsertState - prepare status object for a bulk insert
1991 GetBulkInsertState(void)
1993 BulkInsertState bistate;
1995 bistate = (BulkInsertState) palloc(sizeof(BulkInsertStateData));
1996 bistate->strategy = GetAccessStrategy(BAS_BULKWRITE);
1997 bistate->current_buf = InvalidBuffer;
2002 * FreeBulkInsertState - clean up after finishing a bulk insert
2005 FreeBulkInsertState(BulkInsertState bistate)
2007 if (bistate->current_buf != InvalidBuffer)
2008 ReleaseBuffer(bistate->current_buf);
2009 FreeAccessStrategy(bistate->strategy);
2015 * heap_insert - insert tuple into a heap
2017 * The new tuple is stamped with current transaction ID and the specified
2020 * If the HEAP_INSERT_SKIP_WAL option is specified, the new tuple is not
2021 * logged in WAL, even for a non-temp relation. Safe usage of this behavior
2022 * requires that we arrange that all new tuples go into new pages not
2023 * containing any tuples from other transactions, and that the relation gets
2024 * fsync'd before commit. (See also heap_sync() comments)
2026 * The HEAP_INSERT_SKIP_FSM option is passed directly to
2027 * RelationGetBufferForTuple, which see for more info.
2029 * HEAP_INSERT_FROZEN should only be specified for inserts into
2030 * relfilenodes created during the current subtransaction and when
2031 * there are no prior snapshots or pre-existing portals open.
2032 * This causes rows to be frozen, which is an MVCC violation and
2033 * requires explicit options chosen by user.
2035 * Note that these options will be applied when inserting into the heap's
2036 * TOAST table, too, if the tuple requires any out-of-line data.
2038 * The BulkInsertState object (if any; bistate can be NULL for default
2039 * behavior) is also just passed through to RelationGetBufferForTuple.
2041 * The return value is the OID assigned to the tuple (either here or by the
2042 * caller), or InvalidOid if no OID. The header fields of *tup are updated
2043 * to match the stored tuple; in particular tup->t_self receives the actual
2044 * TID where the tuple was stored. But note that any toasting of fields
2045 * within the tuple data is NOT reflected into *tup.
2048 heap_insert(Relation relation, HeapTuple tup, CommandId cid,
2049 int options, BulkInsertState bistate)
2051 TransactionId xid = GetCurrentTransactionId();
2054 Buffer vmbuffer = InvalidBuffer;
2055 bool all_visible_cleared = false;
2058 * Fill in tuple header fields, assign an OID, and toast the tuple if
2061 * Note: below this point, heaptup is the data we actually intend to store
2062 * into the relation; tup is the caller's original untoasted data.
2064 heaptup = heap_prepare_insert(relation, tup, xid, cid, options);
2067 * We're about to do the actual insert -- but check for conflict first, to
2068 * avoid possibly having to roll back work we've just done.
2070 * For a heap insert, we only need to check for table-level SSI locks. Our
2071 * new tuple can't possibly conflict with existing tuple locks, and heap
2072 * page locks are only consolidated versions of tuple locks; they do not
2073 * lock "gaps" as index page locks do. So we don't need to identify a
2074 * buffer before making the call.
2076 CheckForSerializableConflictIn(relation, NULL, InvalidBuffer);
2079 * Find buffer to insert this tuple into. If the page is all visible,
2080 * this will also pin the requisite visibility map page.
2082 buffer = RelationGetBufferForTuple(relation, heaptup->t_len,
2083 InvalidBuffer, options, bistate,
2086 /* NO EREPORT(ERROR) from here till changes are logged */
2087 START_CRIT_SECTION();
2089 RelationPutHeapTuple(relation, buffer, heaptup);
2091 if (PageIsAllVisible(BufferGetPage(buffer)))
2093 all_visible_cleared = true;
2094 PageClearAllVisible(BufferGetPage(buffer));
2095 visibilitymap_clear(relation,
2096 ItemPointerGetBlockNumber(&(heaptup->t_self)),
2101 * XXX Should we set PageSetPrunable on this page ?
2103 * The inserting transaction may eventually abort thus making this tuple
2104 * DEAD and hence available for pruning. Though we don't want to optimize
2105 * for aborts, if no other tuple in this page is UPDATEd/DELETEd, the
2106 * aborted tuple will never be pruned until next vacuum is triggered.
2108 * If you do add PageSetPrunable here, add it in heap_xlog_insert too.
2111 MarkBufferDirty(buffer);
2114 if (!(options & HEAP_INSERT_SKIP_WAL) && RelationNeedsWAL(relation))
2116 xl_heap_insert xlrec;
2117 xl_heap_header xlhdr;
2119 XLogRecData rdata[4];
2120 Page page = BufferGetPage(buffer);
2121 uint8 info = XLOG_HEAP_INSERT;
2122 bool need_tuple_data;
2125 * For logical decoding, we need the tuple even if we're doing a full
2126 * page write, so make sure to log it separately. (XXX We could
2127 * alternatively store a pointer into the FPW).
2129 * Also, if this is a catalog, we need to transmit combocids to
2130 * properly decode, so log that as well.
2132 need_tuple_data = RelationIsLogicallyLogged(relation);
2133 if (RelationIsAccessibleInLogicalDecoding(relation))
2134 log_heap_new_cid(relation, heaptup);
2136 xlrec.flags = all_visible_cleared ? XLOG_HEAP_ALL_VISIBLE_CLEARED : 0;
2137 xlrec.target.node = relation->rd_node;
2138 xlrec.target.tid = heaptup->t_self;
2139 rdata[0].data = (char *) &xlrec;
2140 rdata[0].len = SizeOfHeapInsert;
2141 rdata[0].buffer = InvalidBuffer;
2142 rdata[0].next = &(rdata[1]);
2144 xlhdr.t_infomask2 = heaptup->t_data->t_infomask2;
2145 xlhdr.t_infomask = heaptup->t_data->t_infomask;
2146 xlhdr.t_hoff = heaptup->t_data->t_hoff;
2149 * note we mark rdata[1] as belonging to buffer; if XLogInsert decides
2150 * to write the whole page to the xlog, we don't need to store
2151 * xl_heap_header in the xlog.
2153 rdata[1].data = (char *) &xlhdr;
2154 rdata[1].len = SizeOfHeapHeader;
2155 rdata[1].buffer = need_tuple_data ? InvalidBuffer : buffer;
2156 rdata[1].buffer_std = true;
2157 rdata[1].next = &(rdata[2]);
2159 /* PG73FORMAT: write bitmap [+ padding] [+ oid] + data */
2160 rdata[2].data = (char *) heaptup->t_data + offsetof(HeapTupleHeaderData, t_bits);
2161 rdata[2].len = heaptup->t_len - offsetof(HeapTupleHeaderData, t_bits);
2162 rdata[2].buffer = need_tuple_data ? InvalidBuffer : buffer;
2163 rdata[2].buffer_std = true;
2164 rdata[2].next = NULL;
2167 * Make a separate rdata entry for the tuple's buffer if we're doing
2168 * logical decoding, so that an eventual FPW doesn't remove the
2171 if (need_tuple_data)
2173 rdata[2].next = &(rdata[3]);
2175 rdata[3].data = NULL;
2177 rdata[3].buffer = buffer;
2178 rdata[3].buffer_std = true;
2179 rdata[3].next = NULL;
2181 xlrec.flags |= XLOG_HEAP_CONTAINS_NEW_TUPLE;
2185 * If this is the single and first tuple on page, we can reinit the
2186 * page instead of restoring the whole thing. Set flag, and hide
2187 * buffer references from XLogInsert.
2189 if (ItemPointerGetOffsetNumber(&(heaptup->t_self)) == FirstOffsetNumber &&
2190 PageGetMaxOffsetNumber(page) == FirstOffsetNumber)
2192 info |= XLOG_HEAP_INIT_PAGE;
2193 rdata[1].buffer = rdata[2].buffer = rdata[3].buffer = InvalidBuffer;
2196 recptr = XLogInsert(RM_HEAP_ID, info, rdata);
2198 PageSetLSN(page, recptr);
2203 UnlockReleaseBuffer(buffer);
2204 if (vmbuffer != InvalidBuffer)
2205 ReleaseBuffer(vmbuffer);
2208 * If tuple is cachable, mark it for invalidation from the caches in case
2209 * we abort. Note it is OK to do this after releasing the buffer, because
2210 * the heaptup data structure is all in local memory, not in the shared
2213 CacheInvalidateHeapTuple(relation, heaptup, NULL);
2215 pgstat_count_heap_insert(relation, 1);
2218 * If heaptup is a private copy, release it. Don't forget to copy t_self
2219 * back to the caller's image, too.
2223 tup->t_self = heaptup->t_self;
2224 heap_freetuple(heaptup);
2227 return HeapTupleGetOid(tup);
2231 * Subroutine for heap_insert(). Prepares a tuple for insertion. This sets the
2232 * tuple header fields, assigns an OID, and toasts the tuple if necessary.
2233 * Returns a toasted version of the tuple if it was toasted, or the original
2234 * tuple if not. Note that in any case, the header fields are also set in
2235 * the original tuple.
2238 heap_prepare_insert(Relation relation, HeapTuple tup, TransactionId xid,
2239 CommandId cid, int options)
2241 if (relation->rd_rel->relhasoids)
2244 /* this is redundant with an Assert in HeapTupleSetOid */
2245 Assert(tup->t_data->t_infomask & HEAP_HASOID);
2249 * If the object id of this tuple has already been assigned, trust the
2250 * caller. There are a couple of ways this can happen. At initial db
2251 * creation, the backend program sets oids for tuples. When we define
2252 * an index, we set the oid. Finally, in the future, we may allow
2253 * users to set their own object ids in order to support a persistent
2254 * object store (objects need to contain pointers to one another).
2256 if (!OidIsValid(HeapTupleGetOid(tup)))
2257 HeapTupleSetOid(tup, GetNewOid(relation));
2261 /* check there is not space for an OID */
2262 Assert(!(tup->t_data->t_infomask & HEAP_HASOID));
2265 tup->t_data->t_infomask &= ~(HEAP_XACT_MASK);
2266 tup->t_data->t_infomask2 &= ~(HEAP2_XACT_MASK);
2267 tup->t_data->t_infomask |= HEAP_XMAX_INVALID;
2268 HeapTupleHeaderSetXmin(tup->t_data, xid);
2269 if (options & HEAP_INSERT_FROZEN)
2270 HeapTupleHeaderSetXminFrozen(tup->t_data);
2272 HeapTupleHeaderSetCmin(tup->t_data, cid);
2273 HeapTupleHeaderSetXmax(tup->t_data, 0); /* for cleanliness */
2274 tup->t_tableOid = RelationGetRelid(relation);
2277 * If the new tuple is too big for storage or contains already toasted
2278 * out-of-line attributes from some other relation, invoke the toaster.
2280 if (relation->rd_rel->relkind != RELKIND_RELATION &&
2281 relation->rd_rel->relkind != RELKIND_MATVIEW)
2283 /* toast table entries should never be recursively toasted */
2284 Assert(!HeapTupleHasExternal(tup));
2287 else if (HeapTupleHasExternal(tup) || tup->t_len > TOAST_TUPLE_THRESHOLD)
2288 return toast_insert_or_update(relation, tup, NULL, options);
2294 * heap_multi_insert - insert multiple tuple into a heap
2296 * This is like heap_insert(), but inserts multiple tuples in one operation.
2297 * That's faster than calling heap_insert() in a loop, because when multiple
2298 * tuples can be inserted on a single page, we can write just a single WAL
2299 * record covering all of them, and only need to lock/unlock the page once.
2301 * Note: this leaks memory into the current memory context. You can create a
2302 * temporary context before calling this, if that's a problem.
2305 heap_multi_insert(Relation relation, HeapTuple *tuples, int ntuples,
2306 CommandId cid, int options, BulkInsertState bistate)
2308 TransactionId xid = GetCurrentTransactionId();
2309 HeapTuple *heaptuples;
2312 char *scratch = NULL;
2316 bool need_tuple_data = RelationIsLogicallyLogged(relation);
2317 bool need_cids = RelationIsAccessibleInLogicalDecoding(relation);
2319 needwal = !(options & HEAP_INSERT_SKIP_WAL) && RelationNeedsWAL(relation);
2320 saveFreeSpace = RelationGetTargetPageFreeSpace(relation,
2321 HEAP_DEFAULT_FILLFACTOR);
2323 /* Toast and set header data in all the tuples */
2324 heaptuples = palloc(ntuples * sizeof(HeapTuple));
2325 for (i = 0; i < ntuples; i++)
2326 heaptuples[i] = heap_prepare_insert(relation, tuples[i],
2330 * Allocate some memory to use for constructing the WAL record. Using
2331 * palloc() within a critical section is not safe, so we allocate this
2335 scratch = palloc(BLCKSZ);
2338 * We're about to do the actual inserts -- but check for conflict first,
2339 * to avoid possibly having to roll back work we've just done.
2341 * For a heap insert, we only need to check for table-level SSI locks. Our
2342 * new tuple can't possibly conflict with existing tuple locks, and heap
2343 * page locks are only consolidated versions of tuple locks; they do not
2344 * lock "gaps" as index page locks do. So we don't need to identify a
2345 * buffer before making the call.
2347 CheckForSerializableConflictIn(relation, NULL, InvalidBuffer);
2350 while (ndone < ntuples)
2353 Buffer vmbuffer = InvalidBuffer;
2354 bool all_visible_cleared = false;
2357 CHECK_FOR_INTERRUPTS();
2360 * Find buffer where at least the next tuple will fit. If the page is
2361 * all-visible, this will also pin the requisite visibility map page.
2363 buffer = RelationGetBufferForTuple(relation, heaptuples[ndone]->t_len,
2364 InvalidBuffer, options, bistate,
2366 page = BufferGetPage(buffer);
2368 /* NO EREPORT(ERROR) from here till changes are logged */
2369 START_CRIT_SECTION();
2372 * RelationGetBufferForTuple has ensured that the first tuple fits.
2373 * Put that on the page, and then as many other tuples as fit.
2375 RelationPutHeapTuple(relation, buffer, heaptuples[ndone]);
2376 for (nthispage = 1; ndone + nthispage < ntuples; nthispage++)
2378 HeapTuple heaptup = heaptuples[ndone + nthispage];
2380 if (PageGetHeapFreeSpace(page) < MAXALIGN(heaptup->t_len) + saveFreeSpace)
2383 RelationPutHeapTuple(relation, buffer, heaptup);
2386 if (PageIsAllVisible(page))
2388 all_visible_cleared = true;
2389 PageClearAllVisible(page);
2390 visibilitymap_clear(relation,
2391 BufferGetBlockNumber(buffer),
2396 * XXX Should we set PageSetPrunable on this page ? See heap_insert()
2399 MarkBufferDirty(buffer);
2405 xl_heap_multi_insert *xlrec;
2406 XLogRecData rdata[3];
2407 uint8 info = XLOG_HEAP2_MULTI_INSERT;
2410 char *scratchptr = scratch;
2414 * If the page was previously empty, we can reinit the page
2415 * instead of restoring the whole thing.
2417 init = (ItemPointerGetOffsetNumber(&(heaptuples[ndone]->t_self)) == FirstOffsetNumber &&
2418 PageGetMaxOffsetNumber(page) == FirstOffsetNumber + nthispage - 1);
2420 /* allocate xl_heap_multi_insert struct from the scratch area */
2421 xlrec = (xl_heap_multi_insert *) scratchptr;
2422 scratchptr += SizeOfHeapMultiInsert;
2425 * Allocate offsets array. Unless we're reinitializing the page,
2426 * in that case the tuples are stored in order starting at
2427 * FirstOffsetNumber and we don't need to store the offsets
2431 scratchptr += nthispage * sizeof(OffsetNumber);
2433 /* the rest of the scratch space is used for tuple data */
2434 tupledata = scratchptr;
2436 xlrec->flags = all_visible_cleared ? XLOG_HEAP_ALL_VISIBLE_CLEARED : 0;
2437 xlrec->node = relation->rd_node;
2438 xlrec->blkno = BufferGetBlockNumber(buffer);
2439 xlrec->ntuples = nthispage;
2442 * Write out an xl_multi_insert_tuple and the tuple data itself
2445 for (i = 0; i < nthispage; i++)
2447 HeapTuple heaptup = heaptuples[ndone + i];
2448 xl_multi_insert_tuple *tuphdr;
2452 xlrec->offsets[i] = ItemPointerGetOffsetNumber(&heaptup->t_self);
2453 /* xl_multi_insert_tuple needs two-byte alignment. */
2454 tuphdr = (xl_multi_insert_tuple *) SHORTALIGN(scratchptr);
2455 scratchptr = ((char *) tuphdr) + SizeOfMultiInsertTuple;
2457 tuphdr->t_infomask2 = heaptup->t_data->t_infomask2;
2458 tuphdr->t_infomask = heaptup->t_data->t_infomask;
2459 tuphdr->t_hoff = heaptup->t_data->t_hoff;
2461 /* write bitmap [+ padding] [+ oid] + data */
2462 datalen = heaptup->t_len - offsetof(HeapTupleHeaderData, t_bits);
2464 (char *) heaptup->t_data + offsetof(HeapTupleHeaderData, t_bits),
2466 tuphdr->datalen = datalen;
2467 scratchptr += datalen;
2470 * We don't use heap_multi_insert for catalog tuples yet, but
2471 * better be prepared...
2474 log_heap_new_cid(relation, heaptup);
2476 totaldatalen = scratchptr - tupledata;
2477 Assert((scratchptr - scratch) < BLCKSZ);
2479 rdata[0].data = (char *) xlrec;
2480 rdata[0].len = tupledata - scratch;
2481 rdata[0].buffer = InvalidBuffer;
2482 rdata[0].next = &rdata[1];
2484 rdata[1].data = tupledata;
2485 rdata[1].len = totaldatalen;
2486 rdata[1].buffer = need_tuple_data ? InvalidBuffer : buffer;
2487 rdata[1].buffer_std = true;
2488 rdata[1].next = NULL;
2491 * Make a separate rdata entry for the tuple's buffer if we're
2492 * doing logical decoding, so that an eventual FPW doesn't remove
2495 if (need_tuple_data)
2497 rdata[1].next = &(rdata[2]);
2499 rdata[2].data = NULL;
2501 rdata[2].buffer = buffer;
2502 rdata[2].buffer_std = true;
2503 rdata[2].next = NULL;
2504 xlrec->flags |= XLOG_HEAP_CONTAINS_NEW_TUPLE;
2508 * If we're going to reinitialize the whole page using the WAL
2509 * record, hide buffer reference from XLogInsert.
2513 rdata[1].buffer = rdata[2].buffer = InvalidBuffer;
2514 info |= XLOG_HEAP_INIT_PAGE;
2518 * Signal that this is the last xl_heap_multi_insert record
2519 * emitted by this call to heap_multi_insert(). Needed for logical
2520 * decoding so it knows when to cleanup temporary data.
2522 if (ndone + nthispage == ntuples)
2523 xlrec->flags |= XLOG_HEAP_LAST_MULTI_INSERT;
2525 recptr = XLogInsert(RM_HEAP2_ID, info, rdata);
2527 PageSetLSN(page, recptr);
2532 UnlockReleaseBuffer(buffer);
2533 if (vmbuffer != InvalidBuffer)
2534 ReleaseBuffer(vmbuffer);
2540 * If tuples are cachable, mark them for invalidation from the caches in
2541 * case we abort. Note it is OK to do this after releasing the buffer,
2542 * because the heaptuples data structure is all in local memory, not in
2543 * the shared buffer.
2545 if (IsCatalogRelation(relation))
2547 for (i = 0; i < ntuples; i++)
2548 CacheInvalidateHeapTuple(relation, heaptuples[i], NULL);
2552 * Copy t_self fields back to the caller's original tuples. This does
2553 * nothing for untoasted tuples (tuples[i] == heaptuples[i)], but it's
2554 * probably faster to always copy than check.
2556 for (i = 0; i < ntuples; i++)
2557 tuples[i]->t_self = heaptuples[i]->t_self;
2559 pgstat_count_heap_insert(relation, ntuples);
2563 * simple_heap_insert - insert a tuple
2565 * Currently, this routine differs from heap_insert only in supplying
2566 * a default command ID and not allowing access to the speedup options.
2568 * This should be used rather than using heap_insert directly in most places
2569 * where we are modifying system catalogs.
2572 simple_heap_insert(Relation relation, HeapTuple tup)
2574 return heap_insert(relation, tup, GetCurrentCommandId(true), 0, NULL);
2578 * Given infomask/infomask2, compute the bits that must be saved in the
2579 * "infobits" field of xl_heap_delete, xl_heap_update, xl_heap_lock,
2580 * xl_heap_lock_updated WAL records.
2582 * See fix_infomask_from_infobits.
2585 compute_infobits(uint16 infomask, uint16 infomask2)
2588 ((infomask & HEAP_XMAX_IS_MULTI) != 0 ? XLHL_XMAX_IS_MULTI : 0) |
2589 ((infomask & HEAP_XMAX_LOCK_ONLY) != 0 ? XLHL_XMAX_LOCK_ONLY : 0) |
2590 ((infomask & HEAP_XMAX_EXCL_LOCK) != 0 ? XLHL_XMAX_EXCL_LOCK : 0) |
2591 /* note we ignore HEAP_XMAX_SHR_LOCK here */
2592 ((infomask & HEAP_XMAX_KEYSHR_LOCK) != 0 ? XLHL_XMAX_KEYSHR_LOCK : 0) |
2593 ((infomask2 & HEAP_KEYS_UPDATED) != 0 ?
2594 XLHL_KEYS_UPDATED : 0);
2598 * Given two versions of the same t_infomask for a tuple, compare them and
2599 * return whether the relevant status for a tuple Xmax has changed. This is
2600 * used after a buffer lock has been released and reacquired: we want to ensure
2601 * that the tuple state continues to be the same it was when we previously
2604 * Note the Xmax field itself must be compared separately.
2607 xmax_infomask_changed(uint16 new_infomask, uint16 old_infomask)
2609 const uint16 interesting =
2610 HEAP_XMAX_IS_MULTI | HEAP_XMAX_LOCK_ONLY | HEAP_LOCK_MASK;
2612 if ((new_infomask & interesting) != (old_infomask & interesting))
2619 * heap_delete - delete a tuple
2621 * NB: do not call this directly unless you are prepared to deal with
2622 * concurrent-update conditions. Use simple_heap_delete instead.
2624 * relation - table to be modified (caller must hold suitable lock)
2625 * tid - TID of tuple to be deleted
2626 * cid - delete command ID (used for visibility test, and stored into
2627 * cmax if successful)
2628 * crosscheck - if not InvalidSnapshot, also check tuple against this
2629 * wait - true if should wait for any conflicting update to commit/abort
2630 * hufd - output parameter, filled in failure cases (see below)
2632 * Normal, successful return value is HeapTupleMayBeUpdated, which
2633 * actually means we did delete it. Failure return codes are
2634 * HeapTupleSelfUpdated, HeapTupleUpdated, or HeapTupleBeingUpdated
2635 * (the last only possible if wait == false).
2637 * In the failure cases, the routine fills *hufd with the tuple's t_ctid,
2638 * t_xmax (resolving a possible MultiXact, if necessary), and t_cmax
2639 * (the last only for HeapTupleSelfUpdated, since we
2640 * cannot obtain cmax from a combocid generated by another transaction).
2641 * See comments for struct HeapUpdateFailureData for additional info.
2644 heap_delete(Relation relation, ItemPointer tid,
2645 CommandId cid, Snapshot crosscheck, bool wait,
2646 HeapUpdateFailureData *hufd)
2649 TransactionId xid = GetCurrentTransactionId();
2655 Buffer vmbuffer = InvalidBuffer;
2656 TransactionId new_xmax;
2657 uint16 new_infomask,
2659 bool have_tuple_lock = false;
2661 bool all_visible_cleared = false;
2662 HeapTuple old_key_tuple = NULL; /* replica identity of the tuple */
2663 bool old_key_copied = false;
2665 Assert(ItemPointerIsValid(tid));
2667 block = ItemPointerGetBlockNumber(tid);
2668 buffer = ReadBuffer(relation, block);
2669 page = BufferGetPage(buffer);
2672 * Before locking the buffer, pin the visibility map page if it appears to
2673 * be necessary. Since we haven't got the lock yet, someone else might be
2674 * in the middle of changing this, so we'll need to recheck after we have
2677 if (PageIsAllVisible(page))
2678 visibilitymap_pin(relation, block, &vmbuffer);
2680 LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
2683 * If we didn't pin the visibility map page and the page has become all
2684 * visible while we were busy locking the buffer, we'll have to unlock and
2685 * re-lock, to avoid holding the buffer lock across an I/O. That's a bit
2686 * unfortunate, but hopefully shouldn't happen often.
2688 if (vmbuffer == InvalidBuffer && PageIsAllVisible(page))
2690 LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
2691 visibilitymap_pin(relation, block, &vmbuffer);
2692 LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
2695 lp = PageGetItemId(page, ItemPointerGetOffsetNumber(tid));
2696 Assert(ItemIdIsNormal(lp));
2698 tp.t_tableOid = RelationGetRelid(relation);
2699 tp.t_data = (HeapTupleHeader) PageGetItem(page, lp);
2700 tp.t_len = ItemIdGetLength(lp);
2704 result = HeapTupleSatisfiesUpdate(&tp, cid, buffer);
2706 if (result == HeapTupleInvisible)
2708 UnlockReleaseBuffer(buffer);
2709 elog(ERROR, "attempted to delete invisible tuple");
2711 else if (result == HeapTupleBeingUpdated && wait)
2713 TransactionId xwait;
2716 /* must copy state data before unlocking buffer */
2717 xwait = HeapTupleHeaderGetRawXmax(tp.t_data);
2718 infomask = tp.t_data->t_infomask;
2720 LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
2723 * Acquire tuple lock to establish our priority for the tuple (see
2724 * heap_lock_tuple). LockTuple will release us when we are
2725 * next-in-line for the tuple.
2727 * If we are forced to "start over" below, we keep the tuple lock;
2728 * this arranges that we stay at the head of the line while rechecking
2731 if (!have_tuple_lock)
2733 LockTupleTuplock(relation, &(tp.t_self), LockTupleExclusive);
2734 have_tuple_lock = true;
2738 * Sleep until concurrent transaction ends. Note that we don't care
2739 * which lock mode the locker has, because we need the strongest one.
2742 if (infomask & HEAP_XMAX_IS_MULTI)
2744 /* wait for multixact */
2745 MultiXactIdWait((MultiXactId) xwait, MultiXactStatusUpdate, infomask,
2746 relation, &tp.t_data->t_ctid, XLTW_Delete,
2748 LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
2751 * If xwait had just locked the tuple then some other xact could
2752 * update this tuple before we get to this point. Check for xmax
2753 * change, and start over if so.
2755 if (xmax_infomask_changed(tp.t_data->t_infomask, infomask) ||
2756 !TransactionIdEquals(HeapTupleHeaderGetRawXmax(tp.t_data),
2761 * You might think the multixact is necessarily done here, but not
2762 * so: it could have surviving members, namely our own xact or
2763 * other subxacts of this backend. It is legal for us to delete
2764 * the tuple in either case, however (the latter case is
2765 * essentially a situation of upgrading our former shared lock to
2766 * exclusive). We don't bother changing the on-disk hint bits
2767 * since we are about to overwrite the xmax altogether.
2772 /* wait for regular transaction to end */
2773 XactLockTableWait(xwait, relation, &tp.t_data->t_ctid, XLTW_Delete);
2774 LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
2777 * xwait is done, but if xwait had just locked the tuple then some
2778 * other xact could update this tuple before we get to this point.
2779 * Check for xmax change, and start over if so.
2781 if (xmax_infomask_changed(tp.t_data->t_infomask, infomask) ||
2782 !TransactionIdEquals(HeapTupleHeaderGetRawXmax(tp.t_data),
2786 /* Otherwise check if it committed or aborted */
2787 UpdateXmaxHintBits(tp.t_data, buffer, xwait);
2791 * We may overwrite if previous xmax aborted, or if it committed but
2792 * only locked the tuple without updating it.
2794 if ((tp.t_data->t_infomask & HEAP_XMAX_INVALID) ||
2795 HEAP_XMAX_IS_LOCKED_ONLY(tp.t_data->t_infomask) ||
2796 HeapTupleHeaderIsOnlyLocked(tp.t_data))
2797 result = HeapTupleMayBeUpdated;
2799 result = HeapTupleUpdated;
2802 if (crosscheck != InvalidSnapshot && result == HeapTupleMayBeUpdated)
2804 /* Perform additional check for transaction-snapshot mode RI updates */
2805 if (!HeapTupleSatisfiesVisibility(&tp, crosscheck, buffer))
2806 result = HeapTupleUpdated;
2809 if (result != HeapTupleMayBeUpdated)
2811 Assert(result == HeapTupleSelfUpdated ||
2812 result == HeapTupleUpdated ||
2813 result == HeapTupleBeingUpdated);
2814 Assert(!(tp.t_data->t_infomask & HEAP_XMAX_INVALID));
2815 hufd->ctid = tp.t_data->t_ctid;
2816 hufd->xmax = HeapTupleHeaderGetUpdateXid(tp.t_data);
2817 if (result == HeapTupleSelfUpdated)
2818 hufd->cmax = HeapTupleHeaderGetCmax(tp.t_data);
2820 hufd->cmax = InvalidCommandId;
2821 UnlockReleaseBuffer(buffer);
2822 if (have_tuple_lock)
2823 UnlockTupleTuplock(relation, &(tp.t_self), LockTupleExclusive);
2824 if (vmbuffer != InvalidBuffer)
2825 ReleaseBuffer(vmbuffer);
2830 * We're about to do the actual delete -- check for conflict first, to
2831 * avoid possibly having to roll back work we've just done.
2833 CheckForSerializableConflictIn(relation, &tp, buffer);
2835 /* replace cid with a combo cid if necessary */
2836 HeapTupleHeaderAdjustCmax(tp.t_data, &cid, &iscombo);
2839 * Compute replica identity tuple before entering the critical section so
2840 * we don't PANIC upon a memory allocation failure.
2842 old_key_tuple = ExtractReplicaIdentity(relation, &tp, true, &old_key_copied);
2845 * If this is the first possibly-multixact-able operation in the current
2846 * transaction, set my per-backend OldestMemberMXactId setting. We can be
2847 * certain that the transaction will never become a member of any older
2848 * MultiXactIds than that. (We have to do this even if we end up just
2849 * using our own TransactionId below, since some other backend could
2850 * incorporate our XID into a MultiXact immediately afterwards.)
2852 MultiXactIdSetOldestMember();
2854 compute_new_xmax_infomask(HeapTupleHeaderGetRawXmax(tp.t_data),
2855 tp.t_data->t_infomask, tp.t_data->t_infomask2,
2856 xid, LockTupleExclusive, true,
2857 &new_xmax, &new_infomask, &new_infomask2);
2859 START_CRIT_SECTION();
2862 * If this transaction commits, the tuple will become DEAD sooner or
2863 * later. Set flag that this page is a candidate for pruning once our xid
2864 * falls below the OldestXmin horizon. If the transaction finally aborts,
2865 * the subsequent page pruning will be a no-op and the hint will be
2868 PageSetPrunable(page, xid);
2870 if (PageIsAllVisible(page))
2872 all_visible_cleared = true;
2873 PageClearAllVisible(page);
2874 visibilitymap_clear(relation, BufferGetBlockNumber(buffer),
2878 /* store transaction information of xact deleting the tuple */
2879 tp.t_data->t_infomask &= ~(HEAP_XMAX_BITS | HEAP_MOVED);
2880 tp.t_data->t_infomask2 &= ~HEAP_KEYS_UPDATED;
2881 tp.t_data->t_infomask |= new_infomask;
2882 tp.t_data->t_infomask2 |= new_infomask2;
2883 HeapTupleHeaderClearHotUpdated(tp.t_data);
2884 HeapTupleHeaderSetXmax(tp.t_data, new_xmax);
2885 HeapTupleHeaderSetCmax(tp.t_data, cid, iscombo);
2886 /* Make sure there is no forward chain link in t_ctid */
2887 tp.t_data->t_ctid = tp.t_self;
2889 MarkBufferDirty(buffer);
2892 if (RelationNeedsWAL(relation))
2894 xl_heap_delete xlrec;
2896 XLogRecData rdata[4];
2898 /* For logical decode we need combocids to properly decode the catalog */
2899 if (RelationIsAccessibleInLogicalDecoding(relation))
2900 log_heap_new_cid(relation, &tp);
2902 xlrec.flags = all_visible_cleared ? XLOG_HEAP_ALL_VISIBLE_CLEARED : 0;
2903 xlrec.infobits_set = compute_infobits(tp.t_data->t_infomask,
2904 tp.t_data->t_infomask2);
2905 xlrec.target.node = relation->rd_node;
2906 xlrec.target.tid = tp.t_self;
2907 xlrec.xmax = new_xmax;
2908 rdata[0].data = (char *) &xlrec;
2909 rdata[0].len = SizeOfHeapDelete;
2910 rdata[0].buffer = InvalidBuffer;
2911 rdata[0].next = &(rdata[1]);
2913 rdata[1].data = NULL;
2915 rdata[1].buffer = buffer;
2916 rdata[1].buffer_std = true;
2917 rdata[1].next = NULL;
2920 * Log replica identity of the deleted tuple if there is one
2922 if (old_key_tuple != NULL)
2924 xl_heap_header xlhdr;
2926 xlhdr.t_infomask2 = old_key_tuple->t_data->t_infomask2;
2927 xlhdr.t_infomask = old_key_tuple->t_data->t_infomask;
2928 xlhdr.t_hoff = old_key_tuple->t_data->t_hoff;
2930 rdata[1].next = &(rdata[2]);
2931 rdata[2].data = (char *) &xlhdr;
2932 rdata[2].len = SizeOfHeapHeader;
2933 rdata[2].buffer = InvalidBuffer;
2934 rdata[2].next = NULL;
2936 rdata[2].next = &(rdata[3]);
2937 rdata[3].data = (char *) old_key_tuple->t_data
2938 + offsetof(HeapTupleHeaderData, t_bits);
2939 rdata[3].len = old_key_tuple->t_len
2940 - offsetof(HeapTupleHeaderData, t_bits);
2941 rdata[3].buffer = InvalidBuffer;
2942 rdata[3].next = NULL;
2944 if (relation->rd_rel->relreplident == REPLICA_IDENTITY_FULL)
2945 xlrec.flags |= XLOG_HEAP_CONTAINS_OLD_TUPLE;
2947 xlrec.flags |= XLOG_HEAP_CONTAINS_OLD_KEY;
2950 recptr = XLogInsert(RM_HEAP_ID, XLOG_HEAP_DELETE, rdata);
2952 PageSetLSN(page, recptr);
2957 LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
2959 if (vmbuffer != InvalidBuffer)
2960 ReleaseBuffer(vmbuffer);
2963 * If the tuple has toasted out-of-line attributes, we need to delete
2964 * those items too. We have to do this before releasing the buffer
2965 * because we need to look at the contents of the tuple, but it's OK to
2966 * release the content lock on the buffer first.
2968 if (relation->rd_rel->relkind != RELKIND_RELATION &&
2969 relation->rd_rel->relkind != RELKIND_MATVIEW)
2971 /* toast table entries should never be recursively toasted */
2972 Assert(!HeapTupleHasExternal(&tp));
2974 else if (HeapTupleHasExternal(&tp))
2975 toast_delete(relation, &tp);
2978 * Mark tuple for invalidation from system caches at next command
2979 * boundary. We have to do this before releasing the buffer because we
2980 * need to look at the contents of the tuple.
2982 CacheInvalidateHeapTuple(relation, &tp, NULL);
2984 /* Now we can release the buffer */
2985 ReleaseBuffer(buffer);
2988 * Release the lmgr tuple lock, if we had it.
2990 if (have_tuple_lock)
2991 UnlockTupleTuplock(relation, &(tp.t_self), LockTupleExclusive);
2993 pgstat_count_heap_delete(relation);
2995 if (old_key_tuple != NULL && old_key_copied)
2996 heap_freetuple(old_key_tuple);
2998 return HeapTupleMayBeUpdated;
3002 * simple_heap_delete - delete a tuple
3004 * This routine may be used to delete a tuple when concurrent updates of
3005 * the target tuple are not expected (for example, because we have a lock
3006 * on the relation associated with the tuple). Any failure is reported
3010 simple_heap_delete(Relation relation, ItemPointer tid)
3013 HeapUpdateFailureData hufd;
3015 result = heap_delete(relation, tid,
3016 GetCurrentCommandId(true), InvalidSnapshot,
3017 true /* wait for commit */ ,
3021 case HeapTupleSelfUpdated:
3022 /* Tuple was already updated in current command? */
3023 elog(ERROR, "tuple already updated by self");
3026 case HeapTupleMayBeUpdated:
3027 /* done successfully */
3030 case HeapTupleUpdated:
3031 elog(ERROR, "tuple concurrently updated");
3035 elog(ERROR, "unrecognized heap_delete status: %u", result);
3041 * heap_update - replace a tuple
3043 * NB: do not call this directly unless you are prepared to deal with
3044 * concurrent-update conditions. Use simple_heap_update instead.
3046 * relation - table to be modified (caller must hold suitable lock)
3047 * otid - TID of old tuple to be replaced
3048 * newtup - newly constructed tuple data to store
3049 * cid - update command ID (used for visibility test, and stored into
3050 * cmax/cmin if successful)
3051 * crosscheck - if not InvalidSnapshot, also check old tuple against this
3052 * wait - true if should wait for any conflicting update to commit/abort
3053 * hufd - output parameter, filled in failure cases (see below)
3054 * lockmode - output parameter, filled with lock mode acquired on tuple
3056 * Normal, successful return value is HeapTupleMayBeUpdated, which
3057 * actually means we *did* update it. Failure return codes are
3058 * HeapTupleSelfUpdated, HeapTupleUpdated, or HeapTupleBeingUpdated
3059 * (the last only possible if wait == false).
3061 * On success, the header fields of *newtup are updated to match the new
3062 * stored tuple; in particular, newtup->t_self is set to the TID where the
3063 * new tuple was inserted, and its HEAP_ONLY_TUPLE flag is set iff a HOT
3064 * update was done. However, any TOAST changes in the new tuple's
3065 * data are not reflected into *newtup.
3067 * In the failure cases, the routine fills *hufd with the tuple's t_ctid,
3068 * t_xmax (resolving a possible MultiXact, if necessary), and t_cmax
3069 * (the last only for HeapTupleSelfUpdated, since we
3070 * cannot obtain cmax from a combocid generated by another transaction).
3071 * See comments for struct HeapUpdateFailureData for additional info.
3074 heap_update(Relation relation, ItemPointer otid, HeapTuple newtup,
3075 CommandId cid, Snapshot crosscheck, bool wait,
3076 HeapUpdateFailureData *hufd, LockTupleMode *lockmode)
3079 TransactionId xid = GetCurrentTransactionId();
3080 Bitmapset *hot_attrs;
3081 Bitmapset *key_attrs;
3082 Bitmapset *id_attrs;
3084 HeapTupleData oldtup;
3086 HeapTuple old_key_tuple = NULL;
3087 bool old_key_copied = false;
3090 MultiXactStatus mxact_status;
3093 vmbuffer = InvalidBuffer,
3094 vmbuffer_new = InvalidBuffer;
3099 bool have_tuple_lock = false;
3104 bool use_hot_update = false;
3106 bool all_visible_cleared = false;
3107 bool all_visible_cleared_new = false;
3108 bool checked_lockers;
3109 bool locker_remains;
3110 TransactionId xmax_new_tuple,
3112 uint16 infomask_old_tuple,
3113 infomask2_old_tuple,
3115 infomask2_new_tuple;
3117 Assert(ItemPointerIsValid(otid));
3120 * Fetch the list of attributes to be checked for HOT update. This is
3121 * wasted effort if we fail to update or have to put the new tuple on a
3122 * different page. But we must compute the list before obtaining buffer
3123 * lock --- in the worst case, if we are doing an update on one of the
3124 * relevant system catalogs, we could deadlock if we try to fetch the list
3125 * later. In any case, the relcache caches the data so this is usually
3128 * Note that we get a copy here, so we need not worry about relcache flush
3129 * happening midway through.
3131 hot_attrs = RelationGetIndexAttrBitmap(relation, INDEX_ATTR_BITMAP_ALL);
3132 key_attrs = RelationGetIndexAttrBitmap(relation, INDEX_ATTR_BITMAP_KEY);
3133 id_attrs = RelationGetIndexAttrBitmap(relation,
3134 INDEX_ATTR_BITMAP_IDENTITY_KEY);
3136 block = ItemPointerGetBlockNumber(otid);
3137 buffer = ReadBuffer(relation, block);
3138 page = BufferGetPage(buffer);
3141 * Before locking the buffer, pin the visibility map page if it appears to
3142 * be necessary. Since we haven't got the lock yet, someone else might be
3143 * in the middle of changing this, so we'll need to recheck after we have
3146 if (PageIsAllVisible(page))
3147 visibilitymap_pin(relation, block, &vmbuffer);
3149 LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
3151 lp = PageGetItemId(page, ItemPointerGetOffsetNumber(otid));
3152 Assert(ItemIdIsNormal(lp));
3155 * Fill in enough data in oldtup for HeapSatisfiesHOTandKeyUpdate to work
3158 oldtup.t_tableOid = RelationGetRelid(relation);
3159 oldtup.t_data = (HeapTupleHeader) PageGetItem(page, lp);
3160 oldtup.t_len = ItemIdGetLength(lp);
3161 oldtup.t_self = *otid;
3163 /* the new tuple is ready, except for this: */
3164 newtup->t_tableOid = RelationGetRelid(relation);
3166 /* Fill in OID for newtup */
3167 if (relation->rd_rel->relhasoids)
3170 /* this is redundant with an Assert in HeapTupleSetOid */
3171 Assert(newtup->t_data->t_infomask & HEAP_HASOID);
3173 HeapTupleSetOid(newtup, HeapTupleGetOid(&oldtup));
3177 /* check there is not space for an OID */
3178 Assert(!(newtup->t_data->t_infomask & HEAP_HASOID));
3182 * If we're not updating any "key" column, we can grab a weaker lock type.
3183 * This allows for more concurrency when we are running simultaneously
3184 * with foreign key checks.
3186 * Note that if a column gets detoasted while executing the update, but
3187 * the value ends up being the same, this test will fail and we will use
3188 * the stronger lock. This is acceptable; the important case to optimize
3189 * is updates that don't manipulate key columns, not those that
3190 * serendipitiously arrive at the same key values.
3192 HeapSatisfiesHOTandKeyUpdate(relation, hot_attrs, key_attrs, id_attrs,
3193 &satisfies_hot, &satisfies_key,
3194 &satisfies_id, &oldtup, newtup);
3197 *lockmode = LockTupleNoKeyExclusive;
3198 mxact_status = MultiXactStatusNoKeyUpdate;
3202 * If this is the first possibly-multixact-able operation in the
3203 * current transaction, set my per-backend OldestMemberMXactId
3204 * setting. We can be certain that the transaction will never become a
3205 * member of any older MultiXactIds than that. (We have to do this
3206 * even if we end up just using our own TransactionId below, since
3207 * some other backend could incorporate our XID into a MultiXact
3208 * immediately afterwards.)
3210 MultiXactIdSetOldestMember();
3214 *lockmode = LockTupleExclusive;
3215 mxact_status = MultiXactStatusUpdate;
3220 * Note: beyond this point, use oldtup not otid to refer to old tuple.
3221 * otid may very well point at newtup->t_self, which we will overwrite
3222 * with the new tuple's location, so there's great risk of confusion if we
3227 checked_lockers = false;
3228 locker_remains = false;
3229 result = HeapTupleSatisfiesUpdate(&oldtup, cid, buffer);
3231 /* see below about the "no wait" case */
3232 Assert(result != HeapTupleBeingUpdated || wait);
3234 if (result == HeapTupleInvisible)
3236 UnlockReleaseBuffer(buffer);
3237 elog(ERROR, "attempted to update invisible tuple");
3239 else if (result == HeapTupleBeingUpdated && wait)
3241 TransactionId xwait;
3243 bool can_continue = false;
3245 checked_lockers = true;
3248 * XXX note that we don't consider the "no wait" case here. This
3249 * isn't a problem currently because no caller uses that case, but it
3250 * should be fixed if such a caller is introduced. It wasn't a
3251 * problem previously because this code would always wait, but now
3252 * that some tuple locks do not conflict with one of the lock modes we
3253 * use, it is possible that this case is interesting to handle
3256 * This may cause failures with third-party code that calls
3257 * heap_update directly.
3260 /* must copy state data before unlocking buffer */
3261 xwait = HeapTupleHeaderGetRawXmax(oldtup.t_data);
3262 infomask = oldtup.t_data->t_infomask;
3264 LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
3267 * Acquire tuple lock to establish our priority for the tuple (see
3268 * heap_lock_tuple). LockTuple will release us when we are
3269 * next-in-line for the tuple.
3271 * If we are forced to "start over" below, we keep the tuple lock;
3272 * this arranges that we stay at the head of the line while rechecking
3275 if (!have_tuple_lock)
3277 LockTupleTuplock(relation, &(oldtup.t_self), *lockmode);
3278 have_tuple_lock = true;
3282 * Now we have to do something about the existing locker. If it's a
3283 * multi, sleep on it; we might be awakened before it is completely
3284 * gone (or even not sleep at all in some cases); we need to preserve
3285 * it as locker, unless it is gone completely.
3287 * If it's not a multi, we need to check for sleeping conditions
3288 * before actually going to sleep. If the update doesn't conflict
3289 * with the locks, we just continue without sleeping (but making sure
3292 if (infomask & HEAP_XMAX_IS_MULTI)
3294 TransactionId update_xact;
3297 /* wait for multixact */
3298 MultiXactIdWait((MultiXactId) xwait, mxact_status, infomask,
3299 relation, &oldtup.t_data->t_ctid, XLTW_Update,
3301 LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
3304 * If xwait had just locked the tuple then some other xact could
3305 * update this tuple before we get to this point. Check for xmax
3306 * change, and start over if so.
3308 if (xmax_infomask_changed(oldtup.t_data->t_infomask, infomask) ||
3309 !TransactionIdEquals(HeapTupleHeaderGetRawXmax(oldtup.t_data),
3314 * Note that the multixact may not be done by now. It could have
3315 * surviving members; our own xact or other subxacts of this
3316 * backend, and also any other concurrent transaction that locked
3317 * the tuple with KeyShare if we only got TupleLockUpdate. If
3318 * this is the case, we have to be careful to mark the updated
3319 * tuple with the surviving members in Xmax.
3321 * Note that there could have been another update in the
3322 * MultiXact. In that case, we need to check whether it committed
3323 * or aborted. If it aborted we are safe to update it again;
3324 * otherwise there is an update conflict, and we have to return
3325 * HeapTupleUpdated below.
3327 * In the LockTupleExclusive case, we still need to preserve the
3328 * surviving members: those would include the tuple locks we had
3329 * before this one, which are important to keep in case this
3332 update_xact = InvalidTransactionId;
3333 if (!HEAP_XMAX_IS_LOCKED_ONLY(oldtup.t_data->t_infomask))
3334 update_xact = HeapTupleGetUpdateXid(oldtup.t_data);
3337 * There was no UPDATE in the MultiXact; or it aborted. No
3338 * TransactionIdIsInProgress() call needed here, since we called
3339 * MultiXactIdWait() above.
3341 if (!TransactionIdIsValid(update_xact) ||
3342 TransactionIdDidAbort(update_xact))
3343 can_continue = true;
3345 locker_remains = remain != 0;
3350 * If it's just a key-share locker, and we're not changing the key
3351 * columns, we don't need to wait for it to end; but we need to
3352 * preserve it as locker.
3354 if (HEAP_XMAX_IS_KEYSHR_LOCKED(infomask) && key_intact)
3356 LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
3359 * recheck the locker; if someone else changed the tuple while
3360 * we weren't looking, start over.
3362 if (xmax_infomask_changed(oldtup.t_data->t_infomask, infomask) ||
3363 !TransactionIdEquals(
3364 HeapTupleHeaderGetRawXmax(oldtup.t_data),
3368 can_continue = true;
3369 locker_remains = true;
3373 /* wait for regular transaction to end */
3374 XactLockTableWait(xwait, relation, &oldtup.t_data->t_ctid,
3376 LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
3379 * xwait is done, but if xwait had just locked the tuple then
3380 * some other xact could update this tuple before we get to
3381 * this point. Check for xmax change, and start over if so.
3383 if (xmax_infomask_changed(oldtup.t_data->t_infomask, infomask) ||
3384 !TransactionIdEquals(
3385 HeapTupleHeaderGetRawXmax(oldtup.t_data),
3389 /* Otherwise check if it committed or aborted */
3390 UpdateXmaxHintBits(oldtup.t_data, buffer, xwait);
3391 if (oldtup.t_data->t_infomask & HEAP_XMAX_INVALID)
3392 can_continue = true;
3396 result = can_continue ? HeapTupleMayBeUpdated : HeapTupleUpdated;
3399 if (crosscheck != InvalidSnapshot && result == HeapTupleMayBeUpdated)
3401 /* Perform additional check for transaction-snapshot mode RI updates */
3402 if (!HeapTupleSatisfiesVisibility(&oldtup, crosscheck, buffer))
3403 result = HeapTupleUpdated;
3406 if (result != HeapTupleMayBeUpdated)
3408 Assert(result == HeapTupleSelfUpdated ||
3409 result == HeapTupleUpdated ||
3410 result == HeapTupleBeingUpdated);
3411 Assert(!(oldtup.t_data->t_infomask & HEAP_XMAX_INVALID));
3412 hufd->ctid = oldtup.t_data->t_ctid;
3413 hufd->xmax = HeapTupleHeaderGetUpdateXid(oldtup.t_data);
3414 if (result == HeapTupleSelfUpdated)
3415 hufd->cmax = HeapTupleHeaderGetCmax(oldtup.t_data);
3417 hufd->cmax = InvalidCommandId;
3418 UnlockReleaseBuffer(buffer);
3419 if (have_tuple_lock)
3420 UnlockTupleTuplock(relation, &(oldtup.t_self), *lockmode);
3421 if (vmbuffer != InvalidBuffer)
3422 ReleaseBuffer(vmbuffer);
3423 bms_free(hot_attrs);
3424 bms_free(key_attrs);
3429 * If we didn't pin the visibility map page and the page has become all
3430 * visible while we were busy locking the buffer, or during some
3431 * subsequent window during which we had it unlocked, we'll have to unlock
3432 * and re-lock, to avoid holding the buffer lock across an I/O. That's a
3433 * bit unfortunate, especially since we'll now have to recheck whether the
3434 * tuple has been locked or updated under us, but hopefully it won't
3435 * happen very often.
3437 if (vmbuffer == InvalidBuffer && PageIsAllVisible(page))
3439 LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
3440 visibilitymap_pin(relation, block, &vmbuffer);
3441 LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
3446 * We're about to do the actual update -- check for conflict first, to
3447 * avoid possibly having to roll back work we've just done.
3449 CheckForSerializableConflictIn(relation, &oldtup, buffer);
3451 /* Fill in transaction status data */
3454 * If the tuple we're updating is locked, we need to preserve the locking
3455 * info in the old tuple's Xmax. Prepare a new Xmax value for this.
3457 compute_new_xmax_infomask(HeapTupleHeaderGetRawXmax(oldtup.t_data),
3458 oldtup.t_data->t_infomask,
3459 oldtup.t_data->t_infomask2,
3460 xid, *lockmode, true,
3461 &xmax_old_tuple, &infomask_old_tuple,
3462 &infomask2_old_tuple);
3465 * And also prepare an Xmax value for the new copy of the tuple. If there
3466 * was no xmax previously, or there was one but all lockers are now gone,
3467 * then use InvalidXid; otherwise, get the xmax from the old tuple. (In
3468 * rare cases that might also be InvalidXid and yet not have the
3469 * HEAP_XMAX_INVALID bit set; that's fine.)
3471 if ((oldtup.t_data->t_infomask & HEAP_XMAX_INVALID) ||
3472 (checked_lockers && !locker_remains))
3473 xmax_new_tuple = InvalidTransactionId;
3475 xmax_new_tuple = HeapTupleHeaderGetRawXmax(oldtup.t_data);
3477 if (!TransactionIdIsValid(xmax_new_tuple))
3479 infomask_new_tuple = HEAP_XMAX_INVALID;
3480 infomask2_new_tuple = 0;
3485 * If we found a valid Xmax for the new tuple, then the infomask bits
3486 * to use on the new tuple depend on what was there on the old one.
3487 * Note that since we're doing an update, the only possibility is that
3488 * the lockers had FOR KEY SHARE lock.
3490 if (oldtup.t_data->t_infomask & HEAP_XMAX_IS_MULTI)
3492 GetMultiXactIdHintBits(xmax_new_tuple, &infomask_new_tuple,
3493 &infomask2_new_tuple);
3497 infomask_new_tuple = HEAP_XMAX_KEYSHR_LOCK | HEAP_XMAX_LOCK_ONLY;
3498 infomask2_new_tuple = 0;
3503 * Prepare the new tuple with the appropriate initial values of Xmin and
3504 * Xmax, as well as initial infomask bits as computed above.
3506 newtup->t_data->t_infomask &= ~(HEAP_XACT_MASK);
3507 newtup->t_data->t_infomask2 &= ~(HEAP2_XACT_MASK);
3508 HeapTupleHeaderSetXmin(newtup->t_data, xid);
3509 HeapTupleHeaderSetCmin(newtup->t_data, cid);
3510 newtup->t_data->t_infomask |= HEAP_UPDATED | infomask_new_tuple;
3511 newtup->t_data->t_infomask2 |= infomask2_new_tuple;
3512 HeapTupleHeaderSetXmax(newtup->t_data, xmax_new_tuple);
3515 * Replace cid with a combo cid if necessary. Note that we already put
3516 * the plain cid into the new tuple.
3518 HeapTupleHeaderAdjustCmax(oldtup.t_data, &cid, &iscombo);
3521 * If the toaster needs to be activated, OR if the new tuple will not fit
3522 * on the same page as the old, then we need to release the content lock
3523 * (but not the pin!) on the old tuple's buffer while we are off doing
3524 * TOAST and/or table-file-extension work. We must mark the old tuple to
3525 * show that it's already being updated, else other processes may try to
3526 * update it themselves.
3528 * We need to invoke the toaster if there are already any out-of-line
3529 * toasted values present, or if the new tuple is over-threshold.
3531 if (relation->rd_rel->relkind != RELKIND_RELATION &&
3532 relation->rd_rel->relkind != RELKIND_MATVIEW)
3534 /* toast table entries should never be recursively toasted */
3535 Assert(!HeapTupleHasExternal(&oldtup));
3536 Assert(!HeapTupleHasExternal(newtup));
3540 need_toast = (HeapTupleHasExternal(&oldtup) ||
3541 HeapTupleHasExternal(newtup) ||
3542 newtup->t_len > TOAST_TUPLE_THRESHOLD);
3544 pagefree = PageGetHeapFreeSpace(page);
3546 newtupsize = MAXALIGN(newtup->t_len);
3548 if (need_toast || newtupsize > pagefree)
3550 /* Clear obsolete visibility flags ... */
3551 oldtup.t_data->t_infomask &= ~(HEAP_XMAX_BITS | HEAP_MOVED);
3552 oldtup.t_data->t_infomask2 &= ~HEAP_KEYS_UPDATED;
3553 HeapTupleClearHotUpdated(&oldtup);
3554 /* ... and store info about transaction updating this tuple */
3555 Assert(TransactionIdIsValid(xmax_old_tuple));
3556 HeapTupleHeaderSetXmax(oldtup.t_data, xmax_old_tuple);
3557 oldtup.t_data->t_infomask |= infomask_old_tuple;
3558 oldtup.t_data->t_infomask2 |= infomask2_old_tuple;
3559 HeapTupleHeaderSetCmax(oldtup.t_data, cid, iscombo);
3560 /* temporarily make it look not-updated */
3561 oldtup.t_data->t_ctid = oldtup.t_self;
3562 already_marked = true;
3563 LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
3566 * Let the toaster do its thing, if needed.
3568 * Note: below this point, heaptup is the data we actually intend to
3569 * store into the relation; newtup is the caller's original untoasted
3574 /* Note we always use WAL and FSM during updates */
3575 heaptup = toast_insert_or_update(relation, newtup, &oldtup, 0);
3576 newtupsize = MAXALIGN(heaptup->t_len);
3582 * Now, do we need a new page for the tuple, or not? This is a bit
3583 * tricky since someone else could have added tuples to the page while
3584 * we weren't looking. We have to recheck the available space after
3585 * reacquiring the buffer lock. But don't bother to do that if the
3586 * former amount of free space is still not enough; it's unlikely
3587 * there's more free now than before.
3589 * What's more, if we need to get a new page, we will need to acquire
3590 * buffer locks on both old and new pages. To avoid deadlock against
3591 * some other backend trying to get the same two locks in the other
3592 * order, we must be consistent about the order we get the locks in.
3593 * We use the rule "lock the lower-numbered page of the relation
3594 * first". To implement this, we must do RelationGetBufferForTuple
3595 * while not holding the lock on the old page, and we must rely on it
3596 * to get the locks on both pages in the correct order.
3598 if (newtupsize > pagefree)
3600 /* Assume there's no chance to put heaptup on same page. */
3601 newbuf = RelationGetBufferForTuple(relation, heaptup->t_len,
3603 &vmbuffer_new, &vmbuffer);
3607 /* Re-acquire the lock on the old tuple's page. */
3608 LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
3609 /* Re-check using the up-to-date free space */
3610 pagefree = PageGetHeapFreeSpace(page);
3611 if (newtupsize > pagefree)
3614 * Rats, it doesn't fit anymore. We must now unlock and
3615 * relock to avoid deadlock. Fortunately, this path should
3618 LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
3619 newbuf = RelationGetBufferForTuple(relation, heaptup->t_len,
3621 &vmbuffer_new, &vmbuffer);
3625 /* OK, it fits here, so we're done. */
3632 /* No TOAST work needed, and it'll fit on same page */
3633 already_marked = false;
3639 * We're about to create the new tuple -- check for conflict first, to
3640 * avoid possibly having to roll back work we've just done.
3642 * NOTE: For a tuple insert, we only need to check for table locks, since
3643 * predicate locking at the index level will cover ranges for anything
3644 * except a table scan. Therefore, only provide the relation.
3646 CheckForSerializableConflictIn(relation, NULL, InvalidBuffer);
3649 * At this point newbuf and buffer are both pinned and locked, and newbuf
3650 * has enough space for the new tuple. If they are the same buffer, only
3654 if (newbuf == buffer)
3657 * Since the new tuple is going into the same page, we might be able
3658 * to do a HOT update. Check if any of the index columns have been
3659 * changed. If not, then HOT update is possible.
3662 use_hot_update = true;
3666 /* Set a hint that the old page could use prune/defrag */
3671 * Compute replica identity tuple before entering the critical section so
3672 * we don't PANIC upon a memory allocation failure.
3673 * ExtractReplicaIdentity() will return NULL if nothing needs to be
3676 old_key_tuple = ExtractReplicaIdentity(relation, &oldtup, !satisfies_id, &old_key_copied);
3678 /* NO EREPORT(ERROR) from here till changes are logged */
3679 START_CRIT_SECTION();
3682 * If this transaction commits, the old tuple will become DEAD sooner or
3683 * later. Set flag that this page is a candidate for pruning once our xid
3684 * falls below the OldestXmin horizon. If the transaction finally aborts,
3685 * the subsequent page pruning will be a no-op and the hint will be
3688 * XXX Should we set hint on newbuf as well? If the transaction aborts,
3689 * there would be a prunable tuple in the newbuf; but for now we choose
3690 * not to optimize for aborts. Note that heap_xlog_update must be kept in
3691 * sync if this decision changes.
3693 PageSetPrunable(page, xid);
3697 /* Mark the old tuple as HOT-updated */
3698 HeapTupleSetHotUpdated(&oldtup);
3699 /* And mark the new tuple as heap-only */
3700 HeapTupleSetHeapOnly(heaptup);
3701 /* Mark the caller's copy too, in case different from heaptup */
3702 HeapTupleSetHeapOnly(newtup);
3706 /* Make sure tuples are correctly marked as not-HOT */
3707 HeapTupleClearHotUpdated(&oldtup);
3708 HeapTupleClearHeapOnly(heaptup);
3709 HeapTupleClearHeapOnly(newtup);
3712 RelationPutHeapTuple(relation, newbuf, heaptup); /* insert new tuple */
3714 if (!already_marked)
3716 /* Clear obsolete visibility flags ... */
3717 oldtup.t_data->t_infomask &= ~(HEAP_XMAX_BITS | HEAP_MOVED);
3718 oldtup.t_data->t_infomask2 &= ~HEAP_KEYS_UPDATED;
3719 /* ... and store info about transaction updating this tuple */
3720 Assert(TransactionIdIsValid(xmax_old_tuple));
3721 HeapTupleHeaderSetXmax(oldtup.t_data, xmax_old_tuple);
3722 oldtup.t_data->t_infomask |= infomask_old_tuple;
3723 oldtup.t_data->t_infomask2 |= infomask2_old_tuple;
3724 HeapTupleHeaderSetCmax(oldtup.t_data, cid, iscombo);
3727 /* record address of new tuple in t_ctid of old one */
3728 oldtup.t_data->t_ctid = heaptup->t_self;
3730 /* clear PD_ALL_VISIBLE flags */
3731 if (PageIsAllVisible(BufferGetPage(buffer)))
3733 all_visible_cleared = true;
3734 PageClearAllVisible(BufferGetPage(buffer));
3735 visibilitymap_clear(relation, BufferGetBlockNumber(buffer),
3738 if (newbuf != buffer && PageIsAllVisible(BufferGetPage(newbuf)))
3740 all_visible_cleared_new = true;
3741 PageClearAllVisible(BufferGetPage(newbuf));
3742 visibilitymap_clear(relation, BufferGetBlockNumber(newbuf),
3746 if (newbuf != buffer)
3747 MarkBufferDirty(newbuf);
3748 MarkBufferDirty(buffer);
3751 if (RelationNeedsWAL(relation))
3756 * For logical decoding we need combocids to properly decode the
3759 if (RelationIsAccessibleInLogicalDecoding(relation))
3761 log_heap_new_cid(relation, &oldtup);
3762 log_heap_new_cid(relation, heaptup);
3765 recptr = log_heap_update(relation, buffer,
3766 newbuf, &oldtup, heaptup,
3768 all_visible_cleared,
3769 all_visible_cleared_new);
3770 if (newbuf != buffer)
3772 PageSetLSN(BufferGetPage(newbuf), recptr);
3774 PageSetLSN(BufferGetPage(buffer), recptr);
3779 if (newbuf != buffer)
3780 LockBuffer(newbuf, BUFFER_LOCK_UNLOCK);
3781 LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
3784 * Mark old tuple for invalidation from system caches at next command
3785 * boundary, and mark the new tuple for invalidation in case we abort. We
3786 * have to do this before releasing the buffer because oldtup is in the
3787 * buffer. (heaptup is all in local memory, but it's necessary to process
3788 * both tuple versions in one call to inval.c so we can avoid redundant
3791 CacheInvalidateHeapTuple(relation, &oldtup, heaptup);
3793 /* Now we can release the buffer(s) */
3794 if (newbuf != buffer)
3795 ReleaseBuffer(newbuf);
3796 ReleaseBuffer(buffer);
3797 if (BufferIsValid(vmbuffer_new))
3798 ReleaseBuffer(vmbuffer_new);
3799 if (BufferIsValid(vmbuffer))
3800 ReleaseBuffer(vmbuffer);
3803 * Release the lmgr tuple lock, if we had it.
3805 if (have_tuple_lock)
3806 UnlockTupleTuplock(relation, &(oldtup.t_self), *lockmode);
3808 pgstat_count_heap_update(relation, use_hot_update);
3811 * If heaptup is a private copy, release it. Don't forget to copy t_self
3812 * back to the caller's image, too.
3814 if (heaptup != newtup)
3816 newtup->t_self = heaptup->t_self;
3817 heap_freetuple(heaptup);
3820 if (old_key_tuple != NULL && old_key_copied)
3821 heap_freetuple(old_key_tuple);
3823 bms_free(hot_attrs);
3824 bms_free(key_attrs);
3826 return HeapTupleMayBeUpdated;
3830 * Check if the specified attribute's value is same in both given tuples.
3831 * Subroutine for HeapSatisfiesHOTandKeyUpdate.
3834 heap_tuple_attr_equals(TupleDesc tupdesc, int attrnum,
3835 HeapTuple tup1, HeapTuple tup2)
3841 Form_pg_attribute att;
3844 * If it's a whole-tuple reference, say "not equal". It's not really
3845 * worth supporting this case, since it could only succeed after a no-op
3846 * update, which is hardly a case worth optimizing for.
3852 * Likewise, automatically say "not equal" for any system attribute other
3853 * than OID and tableOID; we cannot expect these to be consistent in a HOT
3854 * chain, or even to be set correctly yet in the new tuple.
3858 if (attrnum != ObjectIdAttributeNumber &&
3859 attrnum != TableOidAttributeNumber)
3864 * Extract the corresponding values. XXX this is pretty inefficient if
3865 * there are many indexed columns. Should HeapSatisfiesHOTandKeyUpdate do
3866 * a single heap_deform_tuple call on each tuple, instead? But that
3867 * doesn't work for system columns ...
3869 value1 = heap_getattr(tup1, attrnum, tupdesc, &isnull1);
3870 value2 = heap_getattr(tup2, attrnum, tupdesc, &isnull2);
3873 * If one value is NULL and other is not, then they are certainly not
3876 if (isnull1 != isnull2)
3880 * If both are NULL, they can be considered equal.
3886 * We do simple binary comparison of the two datums. This may be overly
3887 * strict because there can be multiple binary representations for the
3888 * same logical value. But we should be OK as long as there are no false
3889 * positives. Using a type-specific equality operator is messy because
3890 * there could be multiple notions of equality in different operator
3891 * classes; furthermore, we cannot safely invoke user-defined functions
3892 * while holding exclusive buffer lock.
3896 /* The only allowed system columns are OIDs, so do this */
3897 return (DatumGetObjectId(value1) == DatumGetObjectId(value2));
3901 Assert(attrnum <= tupdesc->natts);
3902 att = tupdesc->attrs[attrnum - 1];
3903 return datumIsEqual(value1, value2, att->attbyval, att->attlen);
3908 * Check which columns are being updated.
3910 * This simultaneously checks conditions for HOT updates, for FOR KEY
3911 * SHARE updates, and REPLICA IDENTITY concerns. Since much of the time they
3912 * will be checking very similar sets of columns, and doing the same tests on
3913 * them, it makes sense to optimize and do them together.
3915 * We receive three bitmapsets comprising the three sets of columns we're
3916 * interested in. Note these are destructively modified; that is OK since
3917 * this is invoked at most once in heap_update.
3919 * hot_result is set to TRUE if it's okay to do a HOT update (i.e. it does not
3920 * modified indexed columns); key_result is set to TRUE if the update does not
3921 * modify columns used in the key; id_result is set to TRUE if the update does
3922 * not modify columns in any index marked as the REPLICA IDENTITY.
3925 HeapSatisfiesHOTandKeyUpdate(Relation relation, Bitmapset *hot_attrs,
3926 Bitmapset *key_attrs, Bitmapset *id_attrs,
3927 bool *satisfies_hot, bool *satisfies_key,
3929 HeapTuple oldtup, HeapTuple newtup)
3931 int next_hot_attnum;
3932 int next_key_attnum;
3934 bool hot_result = true;
3935 bool key_result = true;
3936 bool id_result = true;
3938 /* If REPLICA IDENTITY is set to FULL, id_attrs will be empty. */
3939 Assert(bms_is_subset(id_attrs, key_attrs));
3940 Assert(bms_is_subset(key_attrs, hot_attrs));
3943 * If one of these sets contains no remaining bits, bms_first_member will
3944 * return -1, and after adding FirstLowInvalidHeapAttributeNumber (which
3945 * is negative!) we'll get an attribute number that can't possibly be
3946 * real, and thus won't match any actual attribute number.
3948 next_hot_attnum = bms_first_member(hot_attrs);
3949 next_hot_attnum += FirstLowInvalidHeapAttributeNumber;
3950 next_key_attnum = bms_first_member(key_attrs);
3951 next_key_attnum += FirstLowInvalidHeapAttributeNumber;
3952 next_id_attnum = bms_first_member(id_attrs);
3953 next_id_attnum += FirstLowInvalidHeapAttributeNumber;
3961 * Since the HOT attributes are a superset of the key attributes and
3962 * the key attributes are a superset of the id attributes, this logic
3963 * is guaranteed to identify the next column that needs to be checked.
3965 if (hot_result && next_hot_attnum > FirstLowInvalidHeapAttributeNumber)
3966 check_now = next_hot_attnum;
3967 else if (key_result && next_key_attnum > FirstLowInvalidHeapAttributeNumber)
3968 check_now = next_key_attnum;
3969 else if (id_result && next_id_attnum > FirstLowInvalidHeapAttributeNumber)
3970 check_now = next_id_attnum;
3974 /* See whether it changed. */
3975 changed = !heap_tuple_attr_equals(RelationGetDescr(relation),
3976 check_now, oldtup, newtup);
3979 if (check_now == next_hot_attnum)
3981 if (check_now == next_key_attnum)
3983 if (check_now == next_id_attnum)
3986 /* if all are false now, we can stop checking */
3987 if (!hot_result && !key_result && !id_result)
3992 * Advance the next attribute numbers for the sets that contain the
3993 * attribute we just checked. As we work our way through the columns,
3994 * the next_attnum values will rise; but when each set becomes empty,
3995 * bms_first_member() will return -1 and the attribute number will end
3996 * up with a value less than FirstLowInvalidHeapAttributeNumber.
3998 if (hot_result && check_now == next_hot_attnum)
4000 next_hot_attnum = bms_first_member(hot_attrs);
4001 next_hot_attnum += FirstLowInvalidHeapAttributeNumber;
4003 if (key_result && check_now == next_key_attnum)
4005 next_key_attnum = bms_first_member(key_attrs);
4006 next_key_attnum += FirstLowInvalidHeapAttributeNumber;
4008 if (id_result && check_now == next_id_attnum)
4010 next_id_attnum = bms_first_member(id_attrs);
4011 next_id_attnum += FirstLowInvalidHeapAttributeNumber;
4015 *satisfies_hot = hot_result;
4016 *satisfies_key = key_result;
4017 *satisfies_id = id_result;
4021 * simple_heap_update - replace a tuple
4023 * This routine may be used to update a tuple when concurrent updates of
4024 * the target tuple are not expected (for example, because we have a lock
4025 * on the relation associated with the tuple). Any failure is reported
4029 simple_heap_update(Relation relation, ItemPointer otid, HeapTuple tup)
4032 HeapUpdateFailureData hufd;
4033 LockTupleMode lockmode;
4035 result = heap_update(relation, otid, tup,
4036 GetCurrentCommandId(true), InvalidSnapshot,
4037 true /* wait for commit */ ,
4041 case HeapTupleSelfUpdated:
4042 /* Tuple was already updated in current command? */
4043 elog(ERROR, "tuple already updated by self");
4046 case HeapTupleMayBeUpdated:
4047 /* done successfully */
4050 case HeapTupleUpdated:
4051 elog(ERROR, "tuple concurrently updated");
4055 elog(ERROR, "unrecognized heap_update status: %u", result);
4062 * Return the MultiXactStatus corresponding to the given tuple lock mode.
4064 static MultiXactStatus
4065 get_mxact_status_for_lock(LockTupleMode mode, bool is_update)
4070 retval = tupleLockExtraInfo[mode].updstatus;
4072 retval = tupleLockExtraInfo[mode].lockstatus;
4075 elog(ERROR, "invalid lock tuple mode %d/%s", mode,
4076 is_update ? "true" : "false");
4078 return (MultiXactStatus) retval;
4083 * heap_lock_tuple - lock a tuple in shared or exclusive mode
4085 * Note that this acquires a buffer pin, which the caller must release.
4088 * relation: relation containing tuple (caller must hold suitable lock)
4089 * tuple->t_self: TID of tuple to lock (rest of struct need not be valid)
4090 * cid: current command ID (used for visibility test, and stored into
4091 * tuple's cmax if lock is successful)
4092 * mode: indicates if shared or exclusive tuple lock is desired
4093 * nowait: if true, ereport rather than blocking if lock not available
4094 * follow_updates: if true, follow the update chain to also lock descendant
4097 * Output parameters:
4098 * *tuple: all fields filled in
4099 * *buffer: set to buffer holding tuple (pinned but not locked at exit)
4100 * *hufd: filled in failure cases (see below)
4102 * Function result may be:
4103 * HeapTupleMayBeUpdated: lock was successfully acquired
4104 * HeapTupleSelfUpdated: lock failed because tuple updated by self
4105 * HeapTupleUpdated: lock failed because tuple updated by other xact
4107 * In the failure cases, the routine fills *hufd with the tuple's t_ctid,
4108 * t_xmax (resolving a possible MultiXact, if necessary), and t_cmax
4109 * (the last only for HeapTupleSelfUpdated, since we
4110 * cannot obtain cmax from a combocid generated by another transaction).
4111 * See comments for struct HeapUpdateFailureData for additional info.
4113 * See README.tuplock for a thorough explanation of this mechanism.
4116 heap_lock_tuple(Relation relation, HeapTuple tuple,
4117 CommandId cid, LockTupleMode mode, bool nowait,
4118 bool follow_updates,
4119 Buffer *buffer, HeapUpdateFailureData *hufd)
4122 ItemPointer tid = &(tuple->t_self);
4127 uint16 old_infomask,
4130 bool have_tuple_lock = false;
4132 *buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(tid));
4133 LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
4135 page = BufferGetPage(*buffer);
4136 lp = PageGetItemId(page, ItemPointerGetOffsetNumber(tid));
4137 Assert(ItemIdIsNormal(lp));
4139 tuple->t_data = (HeapTupleHeader) PageGetItem(page, lp);
4140 tuple->t_len = ItemIdGetLength(lp);
4141 tuple->t_tableOid = RelationGetRelid(relation);
4144 result = HeapTupleSatisfiesUpdate(tuple, cid, *buffer);
4146 if (result == HeapTupleInvisible)
4148 UnlockReleaseBuffer(*buffer);
4149 elog(ERROR, "attempted to lock invisible tuple");
4151 else if (result == HeapTupleBeingUpdated)
4153 TransactionId xwait;
4157 ItemPointerData t_ctid;
4159 /* must copy state data before unlocking buffer */
4160 xwait = HeapTupleHeaderGetRawXmax(tuple->t_data);
4161 infomask = tuple->t_data->t_infomask;
4162 infomask2 = tuple->t_data->t_infomask2;
4163 ItemPointerCopy(&tuple->t_data->t_ctid, &t_ctid);
4165 LockBuffer(*buffer, BUFFER_LOCK_UNLOCK);
4168 * If any subtransaction of the current top transaction already holds
4169 * a lock as strong or stronger than what we're requesting, we
4170 * effectively hold the desired lock already. We *must* succeed
4171 * without trying to take the tuple lock, else we will deadlock
4172 * against anyone wanting to acquire a stronger lock.
4174 if (infomask & HEAP_XMAX_IS_MULTI)
4178 MultiXactMember *members;
4181 * We don't need to allow old multixacts here; if that had been
4182 * the case, HeapTupleSatisfiesUpdate would have returned
4183 * MayBeUpdated and we wouldn't be here.
4186 GetMultiXactIdMembers(xwait, &members, false,
4187 HEAP_XMAX_IS_LOCKED_ONLY(infomask));
4189 for (i = 0; i < nmembers; i++)
4191 if (TransactionIdIsCurrentTransactionId(members[i].xid))
4193 LockTupleMode membermode;
4195 membermode = TUPLOCK_from_mxstatus(members[i].status);
4197 if (membermode >= mode)
4199 if (have_tuple_lock)
4200 UnlockTupleTuplock(relation, tid, mode);
4203 return HeapTupleMayBeUpdated;
4213 * Acquire tuple lock to establish our priority for the tuple.
4214 * LockTuple will release us when we are next-in-line for the tuple.
4215 * We must do this even if we are share-locking.
4217 * If we are forced to "start over" below, we keep the tuple lock;
4218 * this arranges that we stay at the head of the line while rechecking
4221 if (!have_tuple_lock)
4225 if (!ConditionalLockTupleTuplock(relation, tid, mode))
4227 (errcode(ERRCODE_LOCK_NOT_AVAILABLE),
4228 errmsg("could not obtain lock on row in relation \"%s\"",
4229 RelationGetRelationName(relation))));
4232 LockTupleTuplock(relation, tid, mode);
4233 have_tuple_lock = true;
4237 * Initially assume that we will have to wait for the locking
4238 * transaction(s) to finish. We check various cases below in which
4239 * this can be turned off.
4241 require_sleep = true;
4242 if (mode == LockTupleKeyShare)
4245 * If we're requesting KeyShare, and there's no update present, we
4246 * don't need to wait. Even if there is an update, we can still
4247 * continue if the key hasn't been modified.
4249 * However, if there are updates, we need to walk the update chain
4250 * to mark future versions of the row as locked, too. That way,
4251 * if somebody deletes that future version, we're protected
4252 * against the key going away. This locking of future versions
4253 * could block momentarily, if a concurrent transaction is
4254 * deleting a key; or it could return a value to the effect that
4255 * the transaction deleting the key has already committed. So we
4256 * do this before re-locking the buffer; otherwise this would be
4257 * prone to deadlocks.
4259 * Note that the TID we're locking was grabbed before we unlocked
4260 * the buffer. For it to change while we're not looking, the
4261 * other properties we're testing for below after re-locking the
4262 * buffer would also change, in which case we would restart this
4265 if (!(infomask2 & HEAP_KEYS_UPDATED))
4269 updated = !HEAP_XMAX_IS_LOCKED_ONLY(infomask);
4272 * If there are updates, follow the update chain; bail out if
4273 * that cannot be done.
4275 if (follow_updates && updated)
4279 res = heap_lock_updated_tuple(relation, tuple, &t_ctid,
4280 GetCurrentTransactionId(),
4282 if (res != HeapTupleMayBeUpdated)
4285 /* recovery code expects to have buffer lock held */
4286 LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
4291 LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
4294 * Make sure it's still an appropriate lock, else start over.
4295 * Also, if it wasn't updated before we released the lock, but
4296 * is updated now, we start over too; the reason is that we
4297 * now need to follow the update chain to lock the new
4300 if (!HeapTupleHeaderIsOnlyLocked(tuple->t_data) &&
4301 ((tuple->t_data->t_infomask2 & HEAP_KEYS_UPDATED) ||
4305 /* Things look okay, so we can skip sleeping */
4306 require_sleep = false;
4309 * Note we allow Xmax to change here; other updaters/lockers
4310 * could have modified it before we grabbed the buffer lock.
4311 * However, this is not a problem, because with the recheck we
4312 * just did we ensure that they still don't conflict with the
4317 else if (mode == LockTupleShare)
4320 * If we're requesting Share, we can similarly avoid sleeping if
4321 * there's no update and no exclusive lock present.
4323 if (HEAP_XMAX_IS_LOCKED_ONLY(infomask) &&
4324 !HEAP_XMAX_IS_EXCL_LOCKED(infomask))
4326 LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
4329 * Make sure it's still an appropriate lock, else start over.
4330 * See above about allowing xmax to change.
4332 if (!HEAP_XMAX_IS_LOCKED_ONLY(tuple->t_data->t_infomask) ||
4333 HEAP_XMAX_IS_EXCL_LOCKED(tuple->t_data->t_infomask))
4335 require_sleep = false;
4338 else if (mode == LockTupleNoKeyExclusive)
4341 * If we're requesting NoKeyExclusive, we might also be able to
4342 * avoid sleeping; just ensure that there's no other lock type
4343 * than KeyShare. Note that this is a bit more involved than just
4344 * checking hint bits -- we need to expand the multixact to figure
4345 * out lock modes for each one (unless there was only one such
4348 if (infomask & HEAP_XMAX_IS_MULTI)
4351 MultiXactMember *members;
4354 * We don't need to allow old multixacts here; if that had
4355 * been the case, HeapTupleSatisfiesUpdate would have returned
4356 * MayBeUpdated and we wouldn't be here.
4359 GetMultiXactIdMembers(xwait, &members, false,
4360 HEAP_XMAX_IS_LOCKED_ONLY(infomask));
4365 * No need to keep the previous xmax here. This is
4366 * unlikely to happen.
4368 require_sleep = false;
4373 bool allowed = true;
4375 for (i = 0; i < nmembers; i++)
4377 if (members[i].status != MultiXactStatusForKeyShare)
4386 * if the xmax changed under us in the meantime, start
4389 LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
4390 if (xmax_infomask_changed(tuple->t_data->t_infomask, infomask) ||
4391 !TransactionIdEquals(HeapTupleHeaderGetRawXmax(tuple->t_data),
4397 /* otherwise, we're good */
4398 require_sleep = false;
4404 else if (HEAP_XMAX_IS_KEYSHR_LOCKED(infomask))
4406 LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
4408 /* if the xmax changed in the meantime, start over */
4409 if (xmax_infomask_changed(tuple->t_data->t_infomask, infomask) ||
4410 !TransactionIdEquals(
4411 HeapTupleHeaderGetRawXmax(tuple->t_data),
4414 /* otherwise, we're good */
4415 require_sleep = false;
4420 * By here, we either have already acquired the buffer exclusive lock,
4421 * or we must wait for the locking transaction or multixact; so below
4422 * we ensure that we grab buffer lock after the sleep.
4427 if (infomask & HEAP_XMAX_IS_MULTI)
4429 MultiXactStatus status = get_mxact_status_for_lock(mode, false);
4431 /* We only ever lock tuples, never update them */
4432 if (status >= MultiXactStatusNoKeyUpdate)
4433 elog(ERROR, "invalid lock mode in heap_lock_tuple");
4435 /* wait for multixact to end */
4438 if (!ConditionalMultiXactIdWait((MultiXactId) xwait,
4439 status, infomask, relation,
4442 (errcode(ERRCODE_LOCK_NOT_AVAILABLE),
4443 errmsg("could not obtain lock on row in relation \"%s\"",
4444 RelationGetRelationName(relation))));
4447 MultiXactIdWait((MultiXactId) xwait, status, infomask,
4448 relation, &tuple->t_data->t_ctid,
4451 /* if there are updates, follow the update chain */
4452 if (follow_updates &&
4453 !HEAP_XMAX_IS_LOCKED_ONLY(infomask))
4457 res = heap_lock_updated_tuple(relation, tuple, &t_ctid,
4458 GetCurrentTransactionId(),
4460 if (res != HeapTupleMayBeUpdated)
4463 /* recovery code expects to have buffer lock held */
4464 LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
4469 LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
4472 * If xwait had just locked the tuple then some other xact
4473 * could update this tuple before we get to this point. Check
4474 * for xmax change, and start over if so.
4476 if (xmax_infomask_changed(tuple->t_data->t_infomask, infomask) ||
4477 !TransactionIdEquals(
4478 HeapTupleHeaderGetRawXmax(tuple->t_data),
4483 * Of course, the multixact might not be done here: if we're
4484 * requesting a light lock mode, other transactions with light
4485 * locks could still be alive, as well as locks owned by our
4486 * own xact or other subxacts of this backend. We need to
4487 * preserve the surviving MultiXact members. Note that it
4488 * isn't absolutely necessary in the latter case, but doing so
4494 /* wait for regular transaction to end */
4497 if (!ConditionalXactLockTableWait(xwait))
4499 (errcode(ERRCODE_LOCK_NOT_AVAILABLE),
4500 errmsg("could not obtain lock on row in relation \"%s\"",
4501 RelationGetRelationName(relation))));
4504 XactLockTableWait(xwait, relation, &tuple->t_data->t_ctid,
4507 /* if there are updates, follow the update chain */
4508 if (follow_updates &&
4509 !HEAP_XMAX_IS_LOCKED_ONLY(infomask))
4513 res = heap_lock_updated_tuple(relation, tuple, &t_ctid,
4514 GetCurrentTransactionId(),
4516 if (res != HeapTupleMayBeUpdated)
4519 /* recovery code expects to have buffer lock held */
4520 LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
4525 LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
4528 * xwait is done, but if xwait had just locked the tuple then
4529 * some other xact could update this tuple before we get to
4530 * this point. Check for xmax change, and start over if so.
4532 if (xmax_infomask_changed(tuple->t_data->t_infomask, infomask) ||
4533 !TransactionIdEquals(
4534 HeapTupleHeaderGetRawXmax(tuple->t_data),
4539 * Otherwise check if it committed or aborted. Note we cannot
4540 * be here if the tuple was only locked by somebody who didn't
4541 * conflict with us; that should have been handled above. So
4542 * that transaction must necessarily be gone by now.
4544 UpdateXmaxHintBits(tuple->t_data, *buffer, xwait);
4548 /* By here, we're certain that we hold buffer exclusive lock again */
4551 * We may lock if previous xmax aborted, or if it committed but only
4552 * locked the tuple without updating it; or if we didn't have to wait
4553 * at all for whatever reason.
4555 if (!require_sleep ||
4556 (tuple->t_data->t_infomask & HEAP_XMAX_INVALID) ||
4557 HEAP_XMAX_IS_LOCKED_ONLY(tuple->t_data->t_infomask) ||
4558 HeapTupleHeaderIsOnlyLocked(tuple->t_data))
4559 result = HeapTupleMayBeUpdated;
4561 result = HeapTupleUpdated;
4565 if (result != HeapTupleMayBeUpdated)
4567 Assert(result == HeapTupleSelfUpdated || result == HeapTupleUpdated);
4568 Assert(!(tuple->t_data->t_infomask & HEAP_XMAX_INVALID));
4569 hufd->ctid = tuple->t_data->t_ctid;
4570 hufd->xmax = HeapTupleHeaderGetUpdateXid(tuple->t_data);
4571 if (result == HeapTupleSelfUpdated)
4572 hufd->cmax = HeapTupleHeaderGetCmax(tuple->t_data);
4574 hufd->cmax = InvalidCommandId;
4575 LockBuffer(*buffer, BUFFER_LOCK_UNLOCK);
4576 if (have_tuple_lock)
4577 UnlockTupleTuplock(relation, tid, mode);
4581 xmax = HeapTupleHeaderGetRawXmax(tuple->t_data);
4582 old_infomask = tuple->t_data->t_infomask;
4585 * We might already hold the desired lock (or stronger), possibly under a
4586 * different subtransaction of the current top transaction. If so, there
4587 * is no need to change state or issue a WAL record. We already handled
4588 * the case where this is true for xmax being a MultiXactId, so now check
4589 * for cases where it is a plain TransactionId.
4591 * Note in particular that this covers the case where we already hold
4592 * exclusive lock on the tuple and the caller only wants key share or
4593 * share lock. It would certainly not do to give up the exclusive lock.
4595 if (!(old_infomask & (HEAP_XMAX_INVALID |
4596 HEAP_XMAX_COMMITTED |
4597 HEAP_XMAX_IS_MULTI)) &&
4598 (mode == LockTupleKeyShare ?
4599 (HEAP_XMAX_IS_KEYSHR_LOCKED(old_infomask) ||
4600 HEAP_XMAX_IS_SHR_LOCKED(old_infomask) ||
4601 HEAP_XMAX_IS_EXCL_LOCKED(old_infomask)) :
4602 mode == LockTupleShare ?
4603 (HEAP_XMAX_IS_SHR_LOCKED(old_infomask) ||
4604 HEAP_XMAX_IS_EXCL_LOCKED(old_infomask)) :
4605 (HEAP_XMAX_IS_EXCL_LOCKED(old_infomask))) &&
4606 TransactionIdIsCurrentTransactionId(xmax))
4608 LockBuffer(*buffer, BUFFER_LOCK_UNLOCK);
4609 /* Probably can't hold tuple lock here, but may as well check */
4610 if (have_tuple_lock)
4611 UnlockTupleTuplock(relation, tid, mode);
4612 return HeapTupleMayBeUpdated;
4616 * If this is the first possibly-multixact-able operation in the current
4617 * transaction, set my per-backend OldestMemberMXactId setting. We can be
4618 * certain that the transaction will never become a member of any older
4619 * MultiXactIds than that. (We have to do this even if we end up just
4620 * using our own TransactionId below, since some other backend could
4621 * incorporate our XID into a MultiXact immediately afterwards.)
4623 MultiXactIdSetOldestMember();
4626 * Compute the new xmax and infomask to store into the tuple. Note we do
4627 * not modify the tuple just yet, because that would leave it in the wrong
4628 * state if multixact.c elogs.
4630 compute_new_xmax_infomask(xmax, old_infomask, tuple->t_data->t_infomask2,
4631 GetCurrentTransactionId(), mode, false,
4632 &xid, &new_infomask, &new_infomask2);
4634 START_CRIT_SECTION();
4637 * Store transaction information of xact locking the tuple.
4639 * Note: Cmax is meaningless in this context, so don't set it; this avoids
4640 * possibly generating a useless combo CID. Moreover, if we're locking a
4641 * previously updated tuple, it's important to preserve the Cmax.
4643 * Also reset the HOT UPDATE bit, but only if there's no update; otherwise
4644 * we would break the HOT chain.
4646 tuple->t_data->t_infomask &= ~HEAP_XMAX_BITS;
4647 tuple->t_data->t_infomask2 &= ~HEAP_KEYS_UPDATED;
4648 tuple->t_data->t_infomask |= new_infomask;
4649 tuple->t_data->t_infomask2 |= new_infomask2;
4650 if (HEAP_XMAX_IS_LOCKED_ONLY(new_infomask))
4651 HeapTupleHeaderClearHotUpdated(tuple->t_data);
4652 HeapTupleHeaderSetXmax(tuple->t_data, xid);
4655 * Make sure there is no forward chain link in t_ctid. Note that in the
4656 * cases where the tuple has been updated, we must not overwrite t_ctid,
4657 * because it was set by the updater. Moreover, if the tuple has been
4658 * updated, we need to follow the update chain to lock the new versions of
4659 * the tuple as well.
4661 if (HEAP_XMAX_IS_LOCKED_ONLY(new_infomask))
4662 tuple->t_data->t_ctid = *tid;
4664 MarkBufferDirty(*buffer);
4667 * XLOG stuff. You might think that we don't need an XLOG record because
4668 * there is no state change worth restoring after a crash. You would be
4669 * wrong however: we have just written either a TransactionId or a
4670 * MultiXactId that may never have been seen on disk before, and we need
4671 * to make sure that there are XLOG entries covering those ID numbers.
4672 * Else the same IDs might be re-used after a crash, which would be
4673 * disastrous if this page made it to disk before the crash. Essentially
4674 * we have to enforce the WAL log-before-data rule even in this case.
4675 * (Also, in a PITR log-shipping or 2PC environment, we have to have XLOG
4676 * entries for everything anyway.)
4678 if (RelationNeedsWAL(relation))
4682 XLogRecData rdata[2];
4684 xlrec.target.node = relation->rd_node;
4685 xlrec.target.tid = tuple->t_self;
4686 xlrec.locking_xid = xid;
4687 xlrec.infobits_set = compute_infobits(new_infomask,
4688 tuple->t_data->t_infomask2);
4689 rdata[0].data = (char *) &xlrec;
4690 rdata[0].len = SizeOfHeapLock;
4691 rdata[0].buffer = InvalidBuffer;
4692 rdata[0].next = &(rdata[1]);
4694 rdata[1].data = NULL;
4696 rdata[1].buffer = *buffer;
4697 rdata[1].buffer_std = true;
4698 rdata[1].next = NULL;
4700 recptr = XLogInsert(RM_HEAP_ID, XLOG_HEAP_LOCK, rdata);
4702 PageSetLSN(page, recptr);
4707 LockBuffer(*buffer, BUFFER_LOCK_UNLOCK);
4710 * Don't update the visibility map here. Locking a tuple doesn't change
4715 * Now that we have successfully marked the tuple as locked, we can
4716 * release the lmgr tuple lock, if we had it.
4718 if (have_tuple_lock)
4719 UnlockTupleTuplock(relation, tid, mode);
4721 return HeapTupleMayBeUpdated;
4726 * Given an original set of Xmax and infomask, and a transaction (identified by
4727 * add_to_xmax) acquiring a new lock of some mode, compute the new Xmax and
4728 * corresponding infomasks to use on the tuple.
4730 * Note that this might have side effects such as creating a new MultiXactId.
4732 * Most callers will have called HeapTupleSatisfiesUpdate before this function;
4733 * that will have set the HEAP_XMAX_INVALID bit if the xmax was a MultiXactId
4734 * but it was not running anymore. There is a race condition, which is that the
4735 * MultiXactId may have finished since then, but that uncommon case is handled
4736 * either here, or within MultiXactIdExpand.
4738 * There is a similar race condition possible when the old xmax was a regular
4739 * TransactionId. We test TransactionIdIsInProgress again just to narrow the
4740 * window, but it's still possible to end up creating an unnecessary
4741 * MultiXactId. Fortunately this is harmless.
4744 compute_new_xmax_infomask(TransactionId xmax, uint16 old_infomask,
4745 uint16 old_infomask2, TransactionId add_to_xmax,
4746 LockTupleMode mode, bool is_update,
4747 TransactionId *result_xmax, uint16 *result_infomask,
4748 uint16 *result_infomask2)
4750 TransactionId new_xmax;
4751 uint16 new_infomask,
4754 Assert(TransactionIdIsCurrentTransactionId(add_to_xmax));
4759 if (old_infomask & HEAP_XMAX_INVALID)
4762 * No previous locker; we just insert our own TransactionId.
4764 * Note that it's critical that this case be the first one checked,
4765 * because there are several blocks below that come back to this one
4766 * to implement certain optimizations; old_infomask might contain
4767 * other dirty bits in those cases, but we don't really care.
4771 new_xmax = add_to_xmax;
4772 if (mode == LockTupleExclusive)
4773 new_infomask2 |= HEAP_KEYS_UPDATED;
4777 new_infomask |= HEAP_XMAX_LOCK_ONLY;
4780 case LockTupleKeyShare:
4781 new_xmax = add_to_xmax;
4782 new_infomask |= HEAP_XMAX_KEYSHR_LOCK;
4784 case LockTupleShare:
4785 new_xmax = add_to_xmax;
4786 new_infomask |= HEAP_XMAX_SHR_LOCK;
4788 case LockTupleNoKeyExclusive:
4789 new_xmax = add_to_xmax;
4790 new_infomask |= HEAP_XMAX_EXCL_LOCK;
4792 case LockTupleExclusive:
4793 new_xmax = add_to_xmax;
4794 new_infomask |= HEAP_XMAX_EXCL_LOCK;
4795 new_infomask2 |= HEAP_KEYS_UPDATED;
4798 new_xmax = InvalidTransactionId; /* silence compiler */
4799 elog(ERROR, "invalid lock mode");
4803 else if (old_infomask & HEAP_XMAX_IS_MULTI)
4805 MultiXactStatus new_status;
4808 * Currently we don't allow XMAX_COMMITTED to be set for multis, so
4811 Assert(!(old_infomask & HEAP_XMAX_COMMITTED));
4814 * A multixact together with LOCK_ONLY set but neither lock bit set
4815 * (i.e. a pg_upgraded share locked tuple) cannot possibly be running
4816 * anymore. This check is critical for databases upgraded by
4817 * pg_upgrade; both MultiXactIdIsRunning and MultiXactIdExpand assume
4818 * that such multis are never passed.
4820 if (!(old_infomask & HEAP_LOCK_MASK) &&
4821 HEAP_XMAX_IS_LOCKED_ONLY(old_infomask))
4823 old_infomask &= ~HEAP_XMAX_IS_MULTI;
4824 old_infomask |= HEAP_XMAX_INVALID;
4829 * If the XMAX is already a MultiXactId, then we need to expand it to
4830 * include add_to_xmax; but if all the members were lockers and are
4831 * all gone, we can do away with the IS_MULTI bit and just set
4832 * add_to_xmax as the only locker/updater. If all lockers are gone
4833 * and we have an updater that aborted, we can also do without a
4836 * The cost of doing GetMultiXactIdMembers would be paid by
4837 * MultiXactIdExpand if we weren't to do this, so this check is not
4838 * incurring extra work anyhow.
4840 if (!MultiXactIdIsRunning(xmax, HEAP_XMAX_IS_LOCKED_ONLY(old_infomask)))
4842 if (HEAP_XMAX_IS_LOCKED_ONLY(old_infomask) ||
4843 TransactionIdDidAbort(MultiXactIdGetUpdateXid(xmax,
4847 * Reset these bits and restart; otherwise fall through to
4848 * create a new multi below.
4850 old_infomask &= ~HEAP_XMAX_IS_MULTI;
4851 old_infomask |= HEAP_XMAX_INVALID;
4856 new_status = get_mxact_status_for_lock(mode, is_update);
4858 new_xmax = MultiXactIdExpand((MultiXactId) xmax, add_to_xmax,
4860 GetMultiXactIdHintBits(new_xmax, &new_infomask, &new_infomask2);
4862 else if (old_infomask & HEAP_XMAX_COMMITTED)
4865 * It's a committed update, so we need to preserve him as updater of
4868 MultiXactStatus status;
4869 MultiXactStatus new_status;
4871 if (old_infomask2 & HEAP_KEYS_UPDATED)
4872 status = MultiXactStatusUpdate;
4874 status = MultiXactStatusNoKeyUpdate;
4876 new_status = get_mxact_status_for_lock(mode, is_update);
4879 * since it's not running, it's obviously impossible for the old
4880 * updater to be identical to the current one, so we need not check
4881 * for that case as we do in the block above.
4883 new_xmax = MultiXactIdCreate(xmax, status, add_to_xmax, new_status);
4884 GetMultiXactIdHintBits(new_xmax, &new_infomask, &new_infomask2);
4886 else if (TransactionIdIsInProgress(xmax))
4889 * If the XMAX is a valid, in-progress TransactionId, then we need to
4890 * create a new MultiXactId that includes both the old locker or
4891 * updater and our own TransactionId.
4893 MultiXactStatus new_status;
4894 MultiXactStatus old_status;
4895 LockTupleMode old_mode;
4897 if (HEAP_XMAX_IS_LOCKED_ONLY(old_infomask))
4899 if (HEAP_XMAX_IS_KEYSHR_LOCKED(old_infomask))
4900 old_status = MultiXactStatusForKeyShare;
4901 else if (HEAP_XMAX_IS_SHR_LOCKED(old_infomask))
4902 old_status = MultiXactStatusForShare;
4903 else if (HEAP_XMAX_IS_EXCL_LOCKED(old_infomask))
4905 if (old_infomask2 & HEAP_KEYS_UPDATED)
4906 old_status = MultiXactStatusForUpdate;
4908 old_status = MultiXactStatusForNoKeyUpdate;
4913 * LOCK_ONLY can be present alone only when a page has been
4914 * upgraded by pg_upgrade. But in that case,
4915 * TransactionIdIsInProgress() should have returned false. We
4916 * assume it's no longer locked in this case.
4918 elog(WARNING, "LOCK_ONLY found for Xid in progress %u", xmax);
4919 old_infomask |= HEAP_XMAX_INVALID;
4920 old_infomask &= ~HEAP_XMAX_LOCK_ONLY;
4926 /* it's an update, but which kind? */
4927 if (old_infomask2 & HEAP_KEYS_UPDATED)
4928 old_status = MultiXactStatusUpdate;
4930 old_status = MultiXactStatusNoKeyUpdate;
4933 old_mode = TUPLOCK_from_mxstatus(old_status);
4936 * If the lock to be acquired is for the same TransactionId as the
4937 * existing lock, there's an optimization possible: consider only the
4938 * strongest of both locks as the only one present, and restart.
4940 if (xmax == add_to_xmax)
4943 * Note that it's not possible for the original tuple to be
4944 * updated: we wouldn't be here because the tuple would have been
4945 * invisible and we wouldn't try to update it. As a subtlety,
4946 * this code can also run when traversing an update chain to lock
4947 * future versions of a tuple. But we wouldn't be here either,
4948 * because the add_to_xmax would be different from the original
4951 Assert(HEAP_XMAX_IS_LOCKED_ONLY(old_infomask));
4953 /* acquire the strongest of both */
4954 if (mode < old_mode)
4956 /* mustn't touch is_update */
4958 old_infomask |= HEAP_XMAX_INVALID;
4962 /* otherwise, just fall back to creating a new multixact */
4963 new_status = get_mxact_status_for_lock(mode, is_update);
4964 new_xmax = MultiXactIdCreate(xmax, old_status,
4965 add_to_xmax, new_status);
4966 GetMultiXactIdHintBits(new_xmax, &new_infomask, &new_infomask2);
4968 else if (!HEAP_XMAX_IS_LOCKED_ONLY(old_infomask) &&
4969 TransactionIdDidCommit(xmax))
4972 * It's a committed update, so we gotta preserve him as updater of the
4975 MultiXactStatus status;
4976 MultiXactStatus new_status;
4978 if (old_infomask2 & HEAP_KEYS_UPDATED)
4979 status = MultiXactStatusUpdate;
4981 status = MultiXactStatusNoKeyUpdate;
4983 new_status = get_mxact_status_for_lock(mode, is_update);
4986 * since it's not running, it's obviously impossible for the old
4987 * updater to be identical to the current one, so we need not check
4988 * for that case as we do in the block above.
4990 new_xmax = MultiXactIdCreate(xmax, status, add_to_xmax, new_status);
4991 GetMultiXactIdHintBits(new_xmax, &new_infomask, &new_infomask2);
4996 * Can get here iff the locking/updating transaction was running when
4997 * the infomask was extracted from the tuple, but finished before
4998 * TransactionIdIsInProgress got to run. Deal with it as if there was
4999 * no locker at all in the first place.
5001 old_infomask |= HEAP_XMAX_INVALID;
5005 *result_infomask = new_infomask;
5006 *result_infomask2 = new_infomask2;
5007 *result_xmax = new_xmax;
5011 * Subroutine for heap_lock_updated_tuple_rec.
5013 * Given an hypothetical multixact status held by the transaction identified
5014 * with the given xid, does the current transaction need to wait, fail, or can
5015 * it continue if it wanted to acquire a lock of the given mode? "needwait"
5016 * is set to true if waiting is necessary; if it can continue, then
5017 * HeapTupleMayBeUpdated is returned. In case of a conflict, a different
5018 * HeapTupleSatisfiesUpdate return code is returned.
5020 * The held status is said to be hypothetical because it might correspond to a
5021 * lock held by a single Xid, i.e. not a real MultiXactId; we express it this
5022 * way for simplicity of API.
5025 test_lockmode_for_conflict(MultiXactStatus status, TransactionId xid,
5026 LockTupleMode mode, bool *needwait)
5028 MultiXactStatus wantedstatus;
5031 wantedstatus = get_mxact_status_for_lock(mode, false);
5034 * Note: we *must* check TransactionIdIsInProgress before
5035 * TransactionIdDidAbort/Commit; see comment at top of tqual.c for an
5038 if (TransactionIdIsCurrentTransactionId(xid))
5041 * Updated by our own transaction? Just return failure. This
5042 * shouldn't normally happen.
5044 return HeapTupleSelfUpdated;
5046 else if (TransactionIdIsInProgress(xid))
5049 * If the locking transaction is running, what we do depends on
5050 * whether the lock modes conflict: if they do, then we must wait for
5051 * it to finish; otherwise we can fall through to lock this tuple
5052 * version without waiting.
5054 if (DoLockModesConflict(LOCKMODE_from_mxstatus(status),
5055 LOCKMODE_from_mxstatus(wantedstatus)))
5061 * If we set needwait above, then this value doesn't matter;
5062 * otherwise, this value signals to caller that it's okay to proceed.
5064 return HeapTupleMayBeUpdated;
5066 else if (TransactionIdDidAbort(xid))
5067 return HeapTupleMayBeUpdated;
5068 else if (TransactionIdDidCommit(xid))
5071 * The other transaction committed. If it was only a locker, then the
5072 * lock is completely gone now and we can return success; but if it
5073 * was an update, then what we do depends on whether the two lock
5074 * modes conflict. If they conflict, then we must report error to
5075 * caller. But if they don't, we can fall through to allow the current
5076 * transaction to lock the tuple.
5078 * Note: the reason we worry about ISUPDATE here is because as soon as
5079 * a transaction ends, all its locks are gone and meaningless, and
5080 * thus we can ignore them; whereas its updates persist. In the
5081 * TransactionIdIsInProgress case, above, we don't need to check
5082 * because we know the lock is still "alive" and thus a conflict needs
5083 * always be checked.
5085 if (!ISUPDATE_from_mxstatus(status))
5086 return HeapTupleMayBeUpdated;
5088 if (DoLockModesConflict(LOCKMODE_from_mxstatus(status),
5089 LOCKMODE_from_mxstatus(wantedstatus)))
5091 return HeapTupleUpdated;
5093 return HeapTupleMayBeUpdated;
5096 /* Not in progress, not aborted, not committed -- must have crashed */
5097 return HeapTupleMayBeUpdated;
5102 * Recursive part of heap_lock_updated_tuple
5104 * Fetch the tuple pointed to by tid in rel, and mark it as locked by the given
5105 * xid with the given mode; if this tuple is updated, recurse to lock the new
5109 heap_lock_updated_tuple_rec(Relation rel, ItemPointer tid, TransactionId xid,
5112 ItemPointerData tupid;
5113 HeapTupleData mytup;
5115 uint16 new_infomask,
5121 TransactionId priorXmax = InvalidTransactionId;
5123 ItemPointerCopy(tid, &tupid);
5128 new_xmax = InvalidTransactionId;
5129 ItemPointerCopy(&tupid, &(mytup.t_self));
5131 if (!heap_fetch(rel, SnapshotAny, &mytup, &buf, false, NULL))
5134 * if we fail to find the updated version of the tuple, it's
5135 * because it was vacuumed/pruned away after its creator
5136 * transaction aborted. So behave as if we got to the end of the
5137 * chain, and there's no further tuple to lock: return success to
5140 return HeapTupleMayBeUpdated;
5144 CHECK_FOR_INTERRUPTS();
5145 LockBuffer(buf, BUFFER_LOCK_EXCLUSIVE);
5148 * Check the tuple XMIN against prior XMAX, if any. If we reached the
5149 * end of the chain, we're done, so return success.
5151 if (TransactionIdIsValid(priorXmax) &&
5152 !TransactionIdEquals(HeapTupleHeaderGetXmin(mytup.t_data),
5155 UnlockReleaseBuffer(buf);
5156 return HeapTupleMayBeUpdated;
5159 old_infomask = mytup.t_data->t_infomask;
5160 old_infomask2 = mytup.t_data->t_infomask2;
5161 xmax = HeapTupleHeaderGetRawXmax(mytup.t_data);
5164 * If this tuple version has been updated or locked by some concurrent
5165 * transaction(s), what we do depends on whether our lock mode
5166 * conflicts with what those other transactions hold, and also on the
5169 if (!(old_infomask & HEAP_XMAX_INVALID))
5171 TransactionId rawxmax;
5174 rawxmax = HeapTupleHeaderGetRawXmax(mytup.t_data);
5175 if (old_infomask & HEAP_XMAX_IS_MULTI)
5179 MultiXactMember *members;
5181 nmembers = GetMultiXactIdMembers(rawxmax, &members, false,
5182 HEAP_XMAX_IS_LOCKED_ONLY(old_infomask));
5183 for (i = 0; i < nmembers; i++)
5187 res = test_lockmode_for_conflict(members[i].status,
5193 LockBuffer(buf, BUFFER_LOCK_UNLOCK);
5194 XactLockTableWait(members[i].xid, rel,
5195 &mytup.t_data->t_ctid,
5200 if (res != HeapTupleMayBeUpdated)
5202 UnlockReleaseBuffer(buf);
5213 MultiXactStatus status;
5216 * For a non-multi Xmax, we first need to compute the
5217 * corresponding MultiXactStatus by using the infomask bits.
5219 if (HEAP_XMAX_IS_LOCKED_ONLY(old_infomask))
5221 if (HEAP_XMAX_IS_KEYSHR_LOCKED(old_infomask))
5222 status = MultiXactStatusForKeyShare;
5223 else if (HEAP_XMAX_IS_SHR_LOCKED(old_infomask))
5224 status = MultiXactStatusForShare;
5225 else if (HEAP_XMAX_IS_EXCL_LOCKED(old_infomask))
5227 if (old_infomask2 & HEAP_KEYS_UPDATED)
5228 status = MultiXactStatusForUpdate;
5230 status = MultiXactStatusForNoKeyUpdate;
5235 * LOCK_ONLY present alone (a pg_upgraded tuple marked
5236 * as share-locked in the old cluster) shouldn't be
5237 * seen in the middle of an update chain.
5239 elog(ERROR, "invalid lock status in tuple");
5244 /* it's an update, but which kind? */
5245 if (old_infomask2 & HEAP_KEYS_UPDATED)
5246 status = MultiXactStatusUpdate;
5248 status = MultiXactStatusNoKeyUpdate;
5251 res = test_lockmode_for_conflict(status, rawxmax, mode,
5255 LockBuffer(buf, BUFFER_LOCK_UNLOCK);
5256 XactLockTableWait(rawxmax, rel, &mytup.t_data->t_ctid,
5260 if (res != HeapTupleMayBeUpdated)
5262 UnlockReleaseBuffer(buf);
5268 /* compute the new Xmax and infomask values for the tuple ... */
5269 compute_new_xmax_infomask(xmax, old_infomask, mytup.t_data->t_infomask2,
5271 &new_xmax, &new_infomask, &new_infomask2);
5273 START_CRIT_SECTION();
5275 /* ... and set them */
5276 HeapTupleHeaderSetXmax(mytup.t_data, new_xmax);
5277 mytup.t_data->t_infomask &= ~HEAP_XMAX_BITS;
5278 mytup.t_data->t_infomask2 &= ~HEAP_KEYS_UPDATED;
5279 mytup.t_data->t_infomask |= new_infomask;
5280 mytup.t_data->t_infomask2 |= new_infomask2;
5282 MarkBufferDirty(buf);
5285 if (RelationNeedsWAL(rel))
5287 xl_heap_lock_updated xlrec;
5289 XLogRecData rdata[2];
5290 Page page = BufferGetPage(buf);
5292 xlrec.target.node = rel->rd_node;
5293 xlrec.target.tid = mytup.t_self;
5294 xlrec.xmax = new_xmax;
5295 xlrec.infobits_set = compute_infobits(new_infomask, new_infomask2);
5297 rdata[0].data = (char *) &xlrec;
5298 rdata[0].len = SizeOfHeapLockUpdated;
5299 rdata[0].buffer = InvalidBuffer;
5300 rdata[0].next = &(rdata[1]);
5302 rdata[1].data = NULL;
5304 rdata[1].buffer = buf;
5305 rdata[1].buffer_std = true;
5306 rdata[1].next = NULL;
5308 recptr = XLogInsert(RM_HEAP2_ID, XLOG_HEAP2_LOCK_UPDATED, rdata);
5310 PageSetLSN(page, recptr);
5315 /* if we find the end of update chain, we're done. */
5316 if (mytup.t_data->t_infomask & HEAP_XMAX_INVALID ||
5317 ItemPointerEquals(&mytup.t_self, &mytup.t_data->t_ctid) ||
5318 HeapTupleHeaderIsOnlyLocked(mytup.t_data))
5320 UnlockReleaseBuffer(buf);
5321 return HeapTupleMayBeUpdated;
5324 /* tail recursion */
5325 priorXmax = HeapTupleHeaderGetUpdateXid(mytup.t_data);
5326 ItemPointerCopy(&(mytup.t_data->t_ctid), &tupid);
5327 UnlockReleaseBuffer(buf);
5332 * heap_lock_updated_tuple
5333 * Follow update chain when locking an updated tuple, acquiring locks (row
5334 * marks) on the updated versions.
5336 * The initial tuple is assumed to be already locked.
5338 * This function doesn't check visibility, it just inconditionally marks the
5339 * tuple(s) as locked. If any tuple in the updated chain is being deleted
5340 * concurrently (or updated with the key being modified), sleep until the
5341 * transaction doing it is finished.
5343 * Note that we don't acquire heavyweight tuple locks on the tuples we walk
5344 * when we have to wait for other transactions to release them, as opposed to
5345 * what heap_lock_tuple does. The reason is that having more than one
5346 * transaction walking the chain is probably uncommon enough that risk of
5347 * starvation is not likely: one of the preconditions for being here is that
5348 * the snapshot in use predates the update that created this tuple (because we
5349 * started at an earlier version of the tuple), but at the same time such a
5350 * transaction cannot be using repeatable read or serializable isolation
5351 * levels, because that would lead to a serializability failure.
5354 heap_lock_updated_tuple(Relation rel, HeapTuple tuple, ItemPointer ctid,
5355 TransactionId xid, LockTupleMode mode)
5357 if (!ItemPointerEquals(&tuple->t_self, ctid))
5360 * If this is the first possibly-multixact-able operation in the
5361 * current transaction, set my per-backend OldestMemberMXactId
5362 * setting. We can be certain that the transaction will never become a
5363 * member of any older MultiXactIds than that. (We have to do this
5364 * even if we end up just using our own TransactionId below, since
5365 * some other backend could incorporate our XID into a MultiXact
5366 * immediately afterwards.)
5368 MultiXactIdSetOldestMember();
5370 return heap_lock_updated_tuple_rec(rel, ctid, xid, mode);
5373 /* nothing to lock */
5374 return HeapTupleMayBeUpdated;
5379 * heap_inplace_update - update a tuple "in place" (ie, overwrite it)
5381 * Overwriting violates both MVCC and transactional safety, so the uses
5382 * of this function in Postgres are extremely limited. Nonetheless we
5383 * find some places to use it.
5385 * The tuple cannot change size, and therefore it's reasonable to assume
5386 * that its null bitmap (if any) doesn't change either. So we just
5387 * overwrite the data portion of the tuple without touching the null
5388 * bitmap or any of the header fields.
5390 * tuple is an in-memory tuple structure containing the data to be written
5391 * over the target tuple. Also, tuple->t_self identifies the target tuple.
5394 heap_inplace_update(Relation relation, HeapTuple tuple)
5398 OffsetNumber offnum;
5400 HeapTupleHeader htup;
5404 buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(&(tuple->t_self)));
5405 LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
5406 page = (Page) BufferGetPage(buffer);
5408 offnum = ItemPointerGetOffsetNumber(&(tuple->t_self));
5409 if (PageGetMaxOffsetNumber(page) >= offnum)
5410 lp = PageGetItemId(page, offnum);
5412 if (PageGetMaxOffsetNumber(page) < offnum || !ItemIdIsNormal(lp))
5413 elog(ERROR, "heap_inplace_update: invalid lp");
5415 htup = (HeapTupleHeader) PageGetItem(page, lp);
5417 oldlen = ItemIdGetLength(lp) - htup->t_hoff;
5418 newlen = tuple->t_len - tuple->t_data->t_hoff;
5419 if (oldlen != newlen || htup->t_hoff != tuple->t_data->t_hoff)
5420 elog(ERROR, "heap_inplace_update: wrong tuple length");
5422 /* NO EREPORT(ERROR) from here till changes are logged */
5423 START_CRIT_SECTION();
5425 memcpy((char *) htup + htup->t_hoff,
5426 (char *) tuple->t_data + tuple->t_data->t_hoff,
5429 MarkBufferDirty(buffer);
5432 if (RelationNeedsWAL(relation))
5434 xl_heap_inplace xlrec;
5436 XLogRecData rdata[2];
5438 xlrec.target.node = relation->rd_node;
5439 xlrec.target.tid = tuple->t_self;
5441 rdata[0].data = (char *) &xlrec;
5442 rdata[0].len = SizeOfHeapInplace;
5443 rdata[0].buffer = InvalidBuffer;
5444 rdata[0].next = &(rdata[1]);
5446 rdata[1].data = (char *) htup + htup->t_hoff;
5447 rdata[1].len = newlen;
5448 rdata[1].buffer = buffer;
5449 rdata[1].buffer_std = true;
5450 rdata[1].next = NULL;
5452 recptr = XLogInsert(RM_HEAP_ID, XLOG_HEAP_INPLACE, rdata);
5454 PageSetLSN(page, recptr);
5459 UnlockReleaseBuffer(buffer);
5462 * Send out shared cache inval if necessary. Note that because we only
5463 * pass the new version of the tuple, this mustn't be used for any
5464 * operations that could change catcache lookup keys. But we aren't
5465 * bothering with index updates either, so that's true a fortiori.
5467 if (!IsBootstrapProcessingMode())
5468 CacheInvalidateHeapTuple(relation, tuple, NULL);
5471 #define FRM_NOOP 0x0001
5472 #define FRM_INVALIDATE_XMAX 0x0002
5473 #define FRM_RETURN_IS_XID 0x0004
5474 #define FRM_RETURN_IS_MULTI 0x0008
5475 #define FRM_MARK_COMMITTED 0x0010
5479 * Determine what to do during freezing when a tuple is marked by a
5482 * NB -- this might have the side-effect of creating a new MultiXactId!
5484 * "flags" is an output value; it's used to tell caller what to do on return.
5485 * Possible flags are:
5487 * don't do anything -- keep existing Xmax
5488 * FRM_INVALIDATE_XMAX
5489 * mark Xmax as InvalidTransactionId and set XMAX_INVALID flag.
5491 * The Xid return value is a single update Xid to set as xmax.
5492 * FRM_MARK_COMMITTED
5493 * Xmax can be marked as HEAP_XMAX_COMMITTED
5494 * FRM_RETURN_IS_MULTI
5495 * The return value is a new MultiXactId to set as new Xmax.
5496 * (caller must obtain proper infomask bits using GetMultiXactIdHintBits)
5498 static TransactionId
5499 FreezeMultiXactId(MultiXactId multi, uint16 t_infomask,
5500 TransactionId cutoff_xid, MultiXactId cutoff_multi,
5503 TransactionId xid = InvalidTransactionId;
5505 MultiXactMember *members;
5509 MultiXactMember *newmembers;
5511 TransactionId update_xid;
5512 bool update_committed;
5517 /* We should only be called in Multis */
5518 Assert(t_infomask & HEAP_XMAX_IS_MULTI);
5520 if (!MultiXactIdIsValid(multi))
5522 /* Ensure infomask bits are appropriately set/reset */
5523 *flags |= FRM_INVALIDATE_XMAX;
5524 return InvalidTransactionId;
5526 else if (MultiXactIdPrecedes(multi, cutoff_multi))
5529 * This old multi cannot possibly have members still running. If it
5530 * was a locker only, it can be removed without any further
5531 * consideration; but if it contained an update, we might need to
5534 * Don't assert MultiXactIdIsRunning if the multi came from a
5535 * pg_upgrade'd share-locked tuple, though, as doing that causes an
5536 * error to be raised unnecessarily.
5538 Assert((!(t_infomask & HEAP_LOCK_MASK) &&
5539 HEAP_XMAX_IS_LOCKED_ONLY(t_infomask)) ||
5540 !MultiXactIdIsRunning(multi,
5541 HEAP_XMAX_IS_LOCKED_ONLY(t_infomask)));
5542 if (HEAP_XMAX_IS_LOCKED_ONLY(t_infomask))
5544 *flags |= FRM_INVALIDATE_XMAX;
5545 xid = InvalidTransactionId; /* not strictly necessary */
5549 /* replace multi by update xid */
5550 xid = MultiXactIdGetUpdateXid(multi, t_infomask);
5552 /* wasn't only a lock, xid needs to be valid */
5553 Assert(TransactionIdIsValid(xid));
5556 * If the xid is older than the cutoff, it has to have aborted,
5557 * otherwise the tuple would have gotten pruned away.
5559 if (TransactionIdPrecedes(xid, cutoff_xid))
5561 Assert(!TransactionIdDidCommit(xid));
5562 *flags |= FRM_INVALIDATE_XMAX;
5563 xid = InvalidTransactionId; /* not strictly necessary */
5567 *flags |= FRM_RETURN_IS_XID;
5575 * This multixact might have or might not have members still running, but
5576 * we know it's valid and is newer than the cutoff point for multis.
5577 * However, some member(s) of it may be below the cutoff for Xids, so we
5578 * need to walk the whole members array to figure out what to do, if
5582 allow_old = !(t_infomask & HEAP_LOCK_MASK) &&
5583 HEAP_XMAX_IS_LOCKED_ONLY(t_infomask);
5585 GetMultiXactIdMembers(multi, &members, allow_old,
5586 HEAP_XMAX_IS_LOCKED_ONLY(t_infomask));
5589 /* Nothing worth keeping */
5590 *flags |= FRM_INVALIDATE_XMAX;
5591 return InvalidTransactionId;
5594 /* is there anything older than the cutoff? */
5595 need_replace = false;
5596 for (i = 0; i < nmembers; i++)
5598 if (TransactionIdPrecedes(members[i].xid, cutoff_xid))
5600 need_replace = true;
5606 * In the simplest case, there is no member older than the cutoff; we can
5607 * keep the existing MultiXactId as is.
5613 return InvalidTransactionId;
5617 * If the multi needs to be updated, figure out which members do we need
5621 newmembers = palloc(sizeof(MultiXactMember) * nmembers);
5622 has_lockers = false;
5623 update_xid = InvalidTransactionId;
5624 update_committed = false;
5626 for (i = 0; i < nmembers; i++)
5629 * Determine whether to keep this member or ignore it.
5631 if (ISUPDATE_from_mxstatus(members[i].status))
5633 TransactionId xid = members[i].xid;
5636 * It's an update; should we keep it? If the transaction is known
5637 * aborted or crashed then it's okay to ignore it, otherwise not.
5638 * Note that an updater older than cutoff_xid cannot possibly be
5639 * committed, because HeapTupleSatisfiesVacuum would have returned
5640 * HEAPTUPLE_DEAD and we would not be trying to freeze the tuple.
5642 * As with all tuple visibility routines, it's critical to test
5643 * TransactionIdIsInProgress before TransactionIdDidCommit,
5644 * because of race conditions explained in detail in tqual.c.
5646 if (TransactionIdIsCurrentTransactionId(xid) ||
5647 TransactionIdIsInProgress(xid))
5649 Assert(!TransactionIdIsValid(update_xid));
5652 else if (TransactionIdDidCommit(xid))
5655 * The transaction committed, so we can tell caller to set
5656 * HEAP_XMAX_COMMITTED. (We can only do this because we know
5657 * the transaction is not running.)
5659 Assert(!TransactionIdIsValid(update_xid));
5660 update_committed = true;
5665 * Not in progress, not committed -- must be aborted or crashed;
5670 * Since the tuple wasn't marked HEAPTUPLE_DEAD by vacuum, the
5671 * update Xid cannot possibly be older than the xid cutoff.
5673 Assert(!TransactionIdIsValid(update_xid) ||
5674 !TransactionIdPrecedes(update_xid, cutoff_xid));
5677 * If we determined that it's an Xid corresponding to an update
5678 * that must be retained, additionally add it to the list of
5679 * members of the new Multi, in case we end up using that. (We
5680 * might still decide to use only an update Xid and not a multi,
5681 * but it's easier to maintain the list as we walk the old members
5684 if (TransactionIdIsValid(update_xid))
5685 newmembers[nnewmembers++] = members[i];
5689 /* We only keep lockers if they are still running */
5690 if (TransactionIdIsCurrentTransactionId(members[i].xid) ||
5691 TransactionIdIsInProgress(members[i].xid))
5693 /* running locker cannot possibly be older than the cutoff */
5694 Assert(!TransactionIdPrecedes(members[i].xid, cutoff_xid));
5695 newmembers[nnewmembers++] = members[i];
5703 if (nnewmembers == 0)
5705 /* nothing worth keeping!? Tell caller to remove the whole thing */
5706 *flags |= FRM_INVALIDATE_XMAX;
5707 xid = InvalidTransactionId;
5709 else if (TransactionIdIsValid(update_xid) && !has_lockers)
5712 * If there's a single member and it's an update, pass it back alone
5713 * without creating a new Multi. (XXX we could do this when there's a
5714 * single remaining locker, too, but that would complicate the API too
5715 * much; moreover, the case with the single updater is more
5716 * interesting, because those are longer-lived.)
5718 Assert(nnewmembers == 1);
5719 *flags |= FRM_RETURN_IS_XID;
5720 if (update_committed)
5721 *flags |= FRM_MARK_COMMITTED;
5727 * Create a new multixact with the surviving members of the previous
5728 * one, to set as new Xmax in the tuple.
5730 xid = MultiXactIdCreateFromMembers(nnewmembers, newmembers);
5731 *flags |= FRM_RETURN_IS_MULTI;
5740 * heap_prepare_freeze_tuple
5742 * Check to see whether any of the XID fields of a tuple (xmin, xmax, xvac)
5743 * are older than the specified cutoff XID and cutoff MultiXactId. If so,
5744 * setup enough state (in the *frz output argument) to later execute and
5745 * WAL-log what we would need to do, and return TRUE. Return FALSE if nothing
5748 * Caller is responsible for setting the offset field, if appropriate.
5750 * It is assumed that the caller has checked the tuple with
5751 * HeapTupleSatisfiesVacuum() and determined that it is not HEAPTUPLE_DEAD
5752 * (else we should be removing the tuple, not freezing it).
5754 * NB: cutoff_xid *must* be <= the current global xmin, to ensure that any
5755 * XID older than it could neither be running nor seen as running by any
5756 * open transaction. This ensures that the replacement will not change
5757 * anyone's idea of the tuple state.
5758 * Similarly, cutoff_multi must be less than or equal to the smallest
5759 * MultiXactId used by any transaction currently open.
5761 * If the tuple is in a shared buffer, caller must hold an exclusive lock on
5764 * NB: It is not enough to set hint bits to indicate something is
5765 * committed/invalid -- they might not be set on a standby, or after crash
5766 * recovery. We really need to remove old xids.
5769 heap_prepare_freeze_tuple(HeapTupleHeader tuple, TransactionId cutoff_xid,
5770 TransactionId cutoff_multi,
5771 xl_heap_freeze_tuple *frz)
5774 bool changed = false;
5775 bool freeze_xmax = false;
5779 frz->t_infomask2 = tuple->t_infomask2;
5780 frz->t_infomask = tuple->t_infomask;
5781 frz->xmax = HeapTupleHeaderGetRawXmax(tuple);
5784 xid = HeapTupleHeaderGetXmin(tuple);
5785 if (TransactionIdIsNormal(xid) &&
5786 TransactionIdPrecedes(xid, cutoff_xid))
5788 frz->t_infomask |= HEAP_XMIN_FROZEN;
5793 * Process xmax. To thoroughly examine the current Xmax value we need to
5794 * resolve a MultiXactId to its member Xids, in case some of them are
5795 * below the given cutoff for Xids. In that case, those values might need
5796 * freezing, too. Also, if a multi needs freezing, we cannot simply take
5797 * it out --- if there's a live updater Xid, it needs to be kept.
5799 * Make sure to keep heap_tuple_needs_freeze in sync with this.
5801 xid = HeapTupleHeaderGetRawXmax(tuple);
5803 if (tuple->t_infomask & HEAP_XMAX_IS_MULTI)
5805 TransactionId newxmax;
5808 newxmax = FreezeMultiXactId(xid, tuple->t_infomask,
5809 cutoff_xid, cutoff_multi, &flags);
5811 if (flags & FRM_INVALIDATE_XMAX)
5813 else if (flags & FRM_RETURN_IS_XID)
5816 * NB -- some of these transformations are only valid because we
5817 * know the return Xid is a tuple updater (i.e. not merely a
5818 * locker.) Also note that the only reason we don't explicitely
5819 * worry about HEAP_KEYS_UPDATED is because it lives in
5820 * t_infomask2 rather than t_infomask.
5822 frz->t_infomask &= ~HEAP_XMAX_BITS;
5823 frz->xmax = newxmax;
5824 if (flags & FRM_MARK_COMMITTED)
5825 frz->t_infomask &= HEAP_XMAX_COMMITTED;
5828 else if (flags & FRM_RETURN_IS_MULTI)
5834 * We can't use GetMultiXactIdHintBits directly on the new multi
5835 * here; that routine initializes the masks to all zeroes, which
5836 * would lose other bits we need. Doing it this way ensures all
5837 * unrelated bits remain untouched.
5839 frz->t_infomask &= ~HEAP_XMAX_BITS;
5840 frz->t_infomask2 &= ~HEAP_KEYS_UPDATED;
5841 GetMultiXactIdHintBits(newxmax, &newbits, &newbits2);
5842 frz->t_infomask |= newbits;
5843 frz->t_infomask2 |= newbits2;
5845 frz->xmax = newxmax;
5851 Assert(flags & FRM_NOOP);
5854 else if (TransactionIdIsNormal(xid) &&
5855 TransactionIdPrecedes(xid, cutoff_xid))
5862 frz->xmax = InvalidTransactionId;
5865 * The tuple might be marked either XMAX_INVALID or XMAX_COMMITTED +
5866 * LOCKED. Normalize to INVALID just to be sure no one gets confused.
5867 * Also get rid of the HEAP_KEYS_UPDATED bit.
5869 frz->t_infomask &= ~HEAP_XMAX_BITS;
5870 frz->t_infomask |= HEAP_XMAX_INVALID;
5871 frz->t_infomask2 &= ~HEAP_HOT_UPDATED;
5872 frz->t_infomask2 &= ~HEAP_KEYS_UPDATED;
5877 * Old-style VACUUM FULL is gone, but we have to keep this code as long as
5878 * we support having MOVED_OFF/MOVED_IN tuples in the database.
5880 if (tuple->t_infomask & HEAP_MOVED)
5882 xid = HeapTupleHeaderGetXvac(tuple);
5883 if (TransactionIdIsNormal(xid) &&
5884 TransactionIdPrecedes(xid, cutoff_xid))
5887 * If a MOVED_OFF tuple is not dead, the xvac transaction must
5888 * have failed; whereas a non-dead MOVED_IN tuple must mean the
5889 * xvac transaction succeeded.
5891 if (tuple->t_infomask & HEAP_MOVED_OFF)
5892 frz->frzflags |= XLH_INVALID_XVAC;
5894 frz->frzflags |= XLH_FREEZE_XVAC;
5897 * Might as well fix the hint bits too; usually XMIN_COMMITTED
5898 * will already be set here, but there's a small chance not.
5900 Assert(!(tuple->t_infomask & HEAP_XMIN_INVALID));
5901 frz->t_infomask |= HEAP_XMIN_COMMITTED;
5910 * heap_execute_freeze_tuple
5911 * Execute the prepared freezing of a tuple.
5913 * Caller is responsible for ensuring that no other backend can access the
5914 * storage underlying this tuple, either by holding an exclusive lock on the
5915 * buffer containing it (which is what lazy VACUUM does), or by having it by
5916 * in private storage (which is what CLUSTER and friends do).
5918 * Note: it might seem we could make the changes without exclusive lock, since
5919 * TransactionId read/write is assumed atomic anyway. However there is a race
5920 * condition: someone who just fetched an old XID that we overwrite here could
5921 * conceivably not finish checking the XID against pg_clog before we finish
5922 * the VACUUM and perhaps truncate off the part of pg_clog he needs. Getting
5923 * exclusive lock ensures no other backend is in process of checking the
5924 * tuple status. Also, getting exclusive lock makes it safe to adjust the
5927 * NB: All code in here must be safe to execute during crash recovery!
5930 heap_execute_freeze_tuple(HeapTupleHeader tuple, xl_heap_freeze_tuple *frz)
5932 HeapTupleHeaderSetXmax(tuple, frz->xmax);
5934 if (frz->frzflags & XLH_FREEZE_XVAC)
5935 HeapTupleHeaderSetXvac(tuple, FrozenTransactionId);
5937 if (frz->frzflags & XLH_INVALID_XVAC)
5938 HeapTupleHeaderSetXvac(tuple, InvalidTransactionId);
5940 tuple->t_infomask = frz->t_infomask;
5941 tuple->t_infomask2 = frz->t_infomask2;
5946 * Freeze tuple in place, without WAL logging.
5948 * Useful for callers like CLUSTER that perform their own WAL logging.
5951 heap_freeze_tuple(HeapTupleHeader tuple, TransactionId cutoff_xid,
5952 TransactionId cutoff_multi)
5954 xl_heap_freeze_tuple frz;
5957 do_freeze = heap_prepare_freeze_tuple(tuple, cutoff_xid, cutoff_multi,
5961 * Note that because this is not a WAL-logged operation, we don't need to
5962 * fill in the offset in the freeze record.
5966 heap_execute_freeze_tuple(tuple, &frz);
5971 * For a given MultiXactId, return the hint bits that should be set in the
5974 * Normally this should be called for a multixact that was just created, and
5975 * so is on our local cache, so the GetMembers call is fast.
5978 GetMultiXactIdHintBits(MultiXactId multi, uint16 *new_infomask,
5979 uint16 *new_infomask2)
5982 MultiXactMember *members;
5984 uint16 bits = HEAP_XMAX_IS_MULTI;
5986 bool has_update = false;
5987 LockTupleMode strongest = LockTupleKeyShare;
5990 * We only use this in multis we just created, so they cannot be values
5993 nmembers = GetMultiXactIdMembers(multi, &members, false, false);
5995 for (i = 0; i < nmembers; i++)
6000 * Remember the strongest lock mode held by any member of the
6003 mode = TUPLOCK_from_mxstatus(members[i].status);
6004 if (mode > strongest)
6007 /* See what other bits we need */
6008 switch (members[i].status)
6010 case MultiXactStatusForKeyShare:
6011 case MultiXactStatusForShare:
6012 case MultiXactStatusForNoKeyUpdate:
6015 case MultiXactStatusForUpdate:
6016 bits2 |= HEAP_KEYS_UPDATED;
6019 case MultiXactStatusNoKeyUpdate:
6023 case MultiXactStatusUpdate:
6024 bits2 |= HEAP_KEYS_UPDATED;
6030 if (strongest == LockTupleExclusive ||
6031 strongest == LockTupleNoKeyExclusive)
6032 bits |= HEAP_XMAX_EXCL_LOCK;
6033 else if (strongest == LockTupleShare)
6034 bits |= HEAP_XMAX_SHR_LOCK;
6035 else if (strongest == LockTupleKeyShare)
6036 bits |= HEAP_XMAX_KEYSHR_LOCK;
6039 bits |= HEAP_XMAX_LOCK_ONLY;
6044 *new_infomask = bits;
6045 *new_infomask2 = bits2;
6049 * MultiXactIdGetUpdateXid
6051 * Given a multixact Xmax and corresponding infomask, which does not have the
6052 * HEAP_XMAX_LOCK_ONLY bit set, obtain and return the Xid of the updating
6055 * Caller is expected to check the status of the updating transaction, if
6058 static TransactionId
6059 MultiXactIdGetUpdateXid(TransactionId xmax, uint16 t_infomask)
6061 TransactionId update_xact = InvalidTransactionId;
6062 MultiXactMember *members;
6065 Assert(!(t_infomask & HEAP_XMAX_LOCK_ONLY));
6066 Assert(t_infomask & HEAP_XMAX_IS_MULTI);
6069 * Since we know the LOCK_ONLY bit is not set, this cannot be a multi from
6072 nmembers = GetMultiXactIdMembers(xmax, &members, false, false);
6078 for (i = 0; i < nmembers; i++)
6080 /* Ignore lockers */
6081 if (!ISUPDATE_from_mxstatus(members[i].status))
6084 /* there can be at most one updater */
6085 Assert(update_xact == InvalidTransactionId);
6086 update_xact = members[i].xid;
6087 #ifndef USE_ASSERT_CHECKING
6090 * in an assert-enabled build, walk the whole array to ensure
6091 * there's no other updater.
6104 * HeapTupleGetUpdateXid
6105 * As above, but use a HeapTupleHeader
6107 * See also HeapTupleHeaderGetUpdateXid, which can be used without previously
6108 * checking the hint bits.
6111 HeapTupleGetUpdateXid(HeapTupleHeader tuple)
6113 return MultiXactIdGetUpdateXid(HeapTupleHeaderGetRawXmax(tuple),
6118 * Do_MultiXactIdWait
6119 * Actual implementation for the two functions below.
6121 * 'multi', 'status' and 'infomask' indicate what to sleep on (the status is
6122 * needed to ensure we only sleep on conflicting members, and the infomask is
6123 * used to optimize multixact access in case it's a lock-only multi); 'nowait'
6124 * indicates whether to use conditional lock acquisition, to allow callers to
6125 * fail if lock is unavailable. 'rel', 'ctid' and 'oper' are used to set up
6126 * context information for error messages. 'remaining', if not NULL, receives
6127 * the number of members that are still running, including any (non-aborted)
6128 * subtransactions of our own transaction.
6130 * We do this by sleeping on each member using XactLockTableWait. Any
6131 * members that belong to the current backend are *not* waited for, however;
6132 * this would not merely be useless but would lead to Assert failure inside
6133 * XactLockTableWait. By the time this returns, it is certain that all
6134 * transactions *of other backends* that were members of the MultiXactId
6135 * that conflict with the requested status are dead (and no new ones can have
6136 * been added, since it is not legal to add members to an existing
6139 * But by the time we finish sleeping, someone else may have changed the Xmax
6140 * of the containing tuple, so the caller needs to iterate on us somehow.
6142 * Note that in case we return false, the number of remaining members is
6143 * not to be trusted.
6146 Do_MultiXactIdWait(MultiXactId multi, MultiXactStatus status,
6147 uint16 infomask, bool nowait,
6148 Relation rel, ItemPointer ctid, XLTW_Oper oper,
6153 MultiXactMember *members;
6157 allow_old = !(infomask & HEAP_LOCK_MASK) && HEAP_XMAX_IS_LOCKED_ONLY(infomask);
6158 nmembers = GetMultiXactIdMembers(multi, &members, allow_old,
6159 HEAP_XMAX_IS_LOCKED_ONLY(infomask));
6165 for (i = 0; i < nmembers; i++)
6167 TransactionId memxid = members[i].xid;
6168 MultiXactStatus memstatus = members[i].status;
6170 if (TransactionIdIsCurrentTransactionId(memxid))
6176 if (!DoLockModesConflict(LOCKMODE_from_mxstatus(memstatus),
6177 LOCKMODE_from_mxstatus(status)))
6179 if (remaining && TransactionIdIsInProgress(memxid))
6185 * This member conflicts with our multi, so we have to sleep (or
6186 * return failure, if asked to avoid waiting.)
6188 * Note that we don't set up an error context callback ourselves,
6189 * but instead we pass the info down to XactLockTableWait. This
6190 * might seem a bit wasteful because the context is set up and
6191 * tore down for each member of the multixact, but in reality it
6192 * should be barely noticeable, and it avoids duplicate code.
6196 result = ConditionalXactLockTableWait(memxid);
6201 XactLockTableWait(memxid, rel, ctid, oper);
6208 *remaining = remain;
6215 * Sleep on a MultiXactId.
6217 * By the time we finish sleeping, someone else may have changed the Xmax
6218 * of the containing tuple, so the caller needs to iterate on us somehow.
6220 * We return (in *remaining, if not NULL) the number of members that are still
6221 * running, including any (non-aborted) subtransactions of our own transaction.
6224 MultiXactIdWait(MultiXactId multi, MultiXactStatus status, uint16 infomask,
6225 Relation rel, ItemPointer ctid, XLTW_Oper oper,
6228 (void) Do_MultiXactIdWait(multi, status, infomask, false,
6229 rel, ctid, oper, remaining);
6233 * ConditionalMultiXactIdWait
6234 * As above, but only lock if we can get the lock without blocking.
6236 * By the time we finish sleeping, someone else may have changed the Xmax
6237 * of the containing tuple, so the caller needs to iterate on us somehow.
6239 * If the multixact is now all gone, return true. Returns false if some
6240 * transactions might still be running.
6242 * We return (in *remaining, if not NULL) the number of members that are still
6243 * running, including any (non-aborted) subtransactions of our own transaction.
6246 ConditionalMultiXactIdWait(MultiXactId multi, MultiXactStatus status,
6247 uint16 infomask, Relation rel, int *remaining)
6249 return Do_MultiXactIdWait(multi, status, infomask, true,
6250 rel, NULL, XLTW_None, remaining);
6254 * heap_tuple_needs_freeze
6256 * Check to see whether any of the XID fields of a tuple (xmin, xmax, xvac)
6257 * are older than the specified cutoff XID or MultiXactId. If so, return TRUE.
6259 * It doesn't matter whether the tuple is alive or dead, we are checking
6260 * to see if a tuple needs to be removed or frozen to avoid wraparound.
6262 * NB: Cannot rely on hint bits here, they might not be set after a crash or
6266 heap_tuple_needs_freeze(HeapTupleHeader tuple, TransactionId cutoff_xid,
6267 MultiXactId cutoff_multi, Buffer buf)
6271 xid = HeapTupleHeaderGetXmin(tuple);
6272 if (TransactionIdIsNormal(xid) &&
6273 TransactionIdPrecedes(xid, cutoff_xid))
6277 * The considerations for multixacts are complicated; look at
6278 * heap_freeze_tuple for justifications. This routine had better be in
6279 * sync with that one!
6281 if (tuple->t_infomask & HEAP_XMAX_IS_MULTI)
6285 multi = HeapTupleHeaderGetRawXmax(tuple);
6286 if (!MultiXactIdIsValid(multi))
6288 /* no xmax set, ignore */
6291 else if (MultiXactIdPrecedes(multi, cutoff_multi))
6295 MultiXactMember *members;
6300 /* need to check whether any member of the mxact is too old */
6302 allow_old = !(tuple->t_infomask & HEAP_LOCK_MASK) &&
6303 HEAP_XMAX_IS_LOCKED_ONLY(tuple->t_infomask);
6304 nmembers = GetMultiXactIdMembers(multi, &members, allow_old,
6305 HEAP_XMAX_IS_LOCKED_ONLY(tuple->t_infomask));
6307 for (i = 0; i < nmembers; i++)
6309 if (TransactionIdPrecedes(members[i].xid, cutoff_xid))
6321 xid = HeapTupleHeaderGetRawXmax(tuple);
6322 if (TransactionIdIsNormal(xid) &&
6323 TransactionIdPrecedes(xid, cutoff_xid))
6327 if (tuple->t_infomask & HEAP_MOVED)
6329 xid = HeapTupleHeaderGetXvac(tuple);
6330 if (TransactionIdIsNormal(xid) &&
6331 TransactionIdPrecedes(xid, cutoff_xid))
6339 * heap_markpos - mark scan position
6343 heap_markpos(HeapScanDesc scan)
6345 /* Note: no locking manipulations needed */
6347 if (scan->rs_ctup.t_data != NULL)
6349 scan->rs_mctid = scan->rs_ctup.t_self;
6350 if (scan->rs_pageatatime)
6351 scan->rs_mindex = scan->rs_cindex;
6354 ItemPointerSetInvalid(&scan->rs_mctid);
6358 * heap_restrpos - restore position to marked location
6362 heap_restrpos(HeapScanDesc scan)
6364 /* XXX no amrestrpos checking that ammarkpos called */
6366 if (!ItemPointerIsValid(&scan->rs_mctid))
6368 scan->rs_ctup.t_data = NULL;
6371 * unpin scan buffers
6373 if (BufferIsValid(scan->rs_cbuf))
6374 ReleaseBuffer(scan->rs_cbuf);
6375 scan->rs_cbuf = InvalidBuffer;
6376 scan->rs_cblock = InvalidBlockNumber;
6377 scan->rs_inited = false;
6382 * If we reached end of scan, rs_inited will now be false. We must
6383 * reset it to true to keep heapgettup from doing the wrong thing.
6385 scan->rs_inited = true;
6386 scan->rs_ctup.t_self = scan->rs_mctid;
6387 if (scan->rs_pageatatime)
6389 scan->rs_cindex = scan->rs_mindex;
6390 heapgettup_pagemode(scan,
6391 NoMovementScanDirection,
6392 0, /* needn't recheck scan keys */
6397 NoMovementScanDirection,
6398 0, /* needn't recheck scan keys */
6404 * If 'tuple' contains any visible XID greater than latestRemovedXid,
6405 * ratchet forwards latestRemovedXid to the greatest one found.
6406 * This is used as the basis for generating Hot Standby conflicts, so
6407 * if a tuple was never visible then removing it should not conflict
6411 HeapTupleHeaderAdvanceLatestRemovedXid(HeapTupleHeader tuple,
6412 TransactionId *latestRemovedXid)
6414 TransactionId xmin = HeapTupleHeaderGetXmin(tuple);
6415 TransactionId xmax = HeapTupleHeaderGetUpdateXid(tuple);
6416 TransactionId xvac = HeapTupleHeaderGetXvac(tuple);
6418 if (tuple->t_infomask & HEAP_MOVED)
6420 if (TransactionIdPrecedes(*latestRemovedXid, xvac))
6421 *latestRemovedXid = xvac;
6425 * Ignore tuples inserted by an aborted transaction or if the tuple was
6426 * updated/deleted by the inserting transaction.
6428 * Look for a committed hint bit, or if no xmin bit is set, check clog.
6429 * This needs to work on both master and standby, where it is used to
6430 * assess btree delete records.
6432 if (HeapTupleHeaderXminCommitted(tuple) ||
6433 (!HeapTupleHeaderXminInvalid(tuple) && TransactionIdDidCommit(xmin)))
6436 TransactionIdFollows(xmax, *latestRemovedXid))
6437 *latestRemovedXid = xmax;
6440 /* *latestRemovedXid may still be invalid at end */
6444 * Perform XLogInsert to register a heap cleanup info message. These
6445 * messages are sent once per VACUUM and are required because
6446 * of the phasing of removal operations during a lazy VACUUM.
6447 * see comments for vacuum_log_cleanup_info().
6450 log_heap_cleanup_info(RelFileNode rnode, TransactionId latestRemovedXid)
6452 xl_heap_cleanup_info xlrec;
6457 xlrec.latestRemovedXid = latestRemovedXid;
6459 rdata.data = (char *) &xlrec;
6460 rdata.len = SizeOfHeapCleanupInfo;
6461 rdata.buffer = InvalidBuffer;
6464 recptr = XLogInsert(RM_HEAP2_ID, XLOG_HEAP2_CLEANUP_INFO, &rdata);
6470 * Perform XLogInsert for a heap-clean operation. Caller must already
6471 * have modified the buffer and marked it dirty.
6473 * Note: prior to Postgres 8.3, the entries in the nowunused[] array were
6474 * zero-based tuple indexes. Now they are one-based like other uses
6477 * We also include latestRemovedXid, which is the greatest XID present in
6478 * the removed tuples. That allows recovery processing to cancel or wait
6479 * for long standby queries that can still see these tuples.
6482 log_heap_clean(Relation reln, Buffer buffer,
6483 OffsetNumber *redirected, int nredirected,
6484 OffsetNumber *nowdead, int ndead,
6485 OffsetNumber *nowunused, int nunused,
6486 TransactionId latestRemovedXid)
6488 xl_heap_clean xlrec;
6491 XLogRecData rdata[4];
6493 /* Caller should not call me on a non-WAL-logged relation */
6494 Assert(RelationNeedsWAL(reln));
6496 xlrec.node = reln->rd_node;
6497 xlrec.block = BufferGetBlockNumber(buffer);
6498 xlrec.latestRemovedXid = latestRemovedXid;
6499 xlrec.nredirected = nredirected;
6500 xlrec.ndead = ndead;
6502 rdata[0].data = (char *) &xlrec;
6503 rdata[0].len = SizeOfHeapClean;
6504 rdata[0].buffer = InvalidBuffer;
6505 rdata[0].next = &(rdata[1]);
6508 * The OffsetNumber arrays are not actually in the buffer, but we pretend
6509 * that they are. When XLogInsert stores the whole buffer, the offset
6510 * arrays need not be stored too. Note that even if all three arrays are
6511 * empty, we want to expose the buffer as a candidate for whole-page
6512 * storage, since this record type implies a defragmentation operation
6513 * even if no item pointers changed state.
6515 if (nredirected > 0)
6517 rdata[1].data = (char *) redirected;
6518 rdata[1].len = nredirected * sizeof(OffsetNumber) * 2;
6522 rdata[1].data = NULL;
6525 rdata[1].buffer = buffer;
6526 rdata[1].buffer_std = true;
6527 rdata[1].next = &(rdata[2]);
6531 rdata[2].data = (char *) nowdead;
6532 rdata[2].len = ndead * sizeof(OffsetNumber);
6536 rdata[2].data = NULL;
6539 rdata[2].buffer = buffer;
6540 rdata[2].buffer_std = true;
6541 rdata[2].next = &(rdata[3]);
6545 rdata[3].data = (char *) nowunused;
6546 rdata[3].len = nunused * sizeof(OffsetNumber);
6550 rdata[3].data = NULL;
6553 rdata[3].buffer = buffer;
6554 rdata[3].buffer_std = true;
6555 rdata[3].next = NULL;
6557 info = XLOG_HEAP2_CLEAN;
6558 recptr = XLogInsert(RM_HEAP2_ID, info, rdata);
6564 * Perform XLogInsert for a heap-freeze operation. Caller must have already
6565 * modified the buffer and marked it dirty.
6568 log_heap_freeze(Relation reln, Buffer buffer, TransactionId cutoff_xid,
6569 xl_heap_freeze_tuple *tuples, int ntuples)
6571 xl_heap_freeze_page xlrec;
6573 XLogRecData rdata[2];
6575 /* Caller should not call me on a non-WAL-logged relation */
6576 Assert(RelationNeedsWAL(reln));
6577 /* nor when there are no tuples to freeze */
6578 Assert(ntuples > 0);
6580 xlrec.node = reln->rd_node;
6581 xlrec.block = BufferGetBlockNumber(buffer);
6582 xlrec.cutoff_xid = cutoff_xid;
6583 xlrec.ntuples = ntuples;
6585 rdata[0].data = (char *) &xlrec;
6586 rdata[0].len = SizeOfHeapFreezePage;
6587 rdata[0].buffer = InvalidBuffer;
6588 rdata[0].next = &(rdata[1]);
6591 * The freeze plan array is not actually in the buffer, but pretend that
6592 * it is. When XLogInsert stores the whole buffer, the freeze plan need
6593 * not be stored too.
6595 rdata[1].data = (char *) tuples;
6596 rdata[1].len = ntuples * sizeof(xl_heap_freeze_tuple);
6597 rdata[1].buffer = buffer;
6598 rdata[1].buffer_std = true;
6599 rdata[1].next = NULL;
6601 recptr = XLogInsert(RM_HEAP2_ID, XLOG_HEAP2_FREEZE_PAGE, rdata);
6607 * Perform XLogInsert for a heap-visible operation. 'block' is the block
6608 * being marked all-visible, and vm_buffer is the buffer containing the
6609 * corresponding visibility map block. Both should have already been modified
6612 * If checksums are enabled, we also add the heap_buffer to the chain to
6613 * protect it from being torn.
6616 log_heap_visible(RelFileNode rnode, Buffer heap_buffer, Buffer vm_buffer,
6617 TransactionId cutoff_xid)
6619 xl_heap_visible xlrec;
6621 XLogRecData rdata[3];
6623 Assert(BufferIsValid(heap_buffer));
6624 Assert(BufferIsValid(vm_buffer));
6627 xlrec.block = BufferGetBlockNumber(heap_buffer);
6628 xlrec.cutoff_xid = cutoff_xid;
6630 rdata[0].data = (char *) &xlrec;
6631 rdata[0].len = SizeOfHeapVisible;
6632 rdata[0].buffer = InvalidBuffer;
6633 rdata[0].next = &(rdata[1]);
6635 rdata[1].data = NULL;
6637 rdata[1].buffer = vm_buffer;
6638 rdata[1].buffer_std = false;
6639 rdata[1].next = NULL;
6641 if (XLogHintBitIsNeeded())
6643 rdata[1].next = &(rdata[2]);
6645 rdata[2].data = NULL;
6647 rdata[2].buffer = heap_buffer;
6648 rdata[2].buffer_std = true;
6649 rdata[2].next = NULL;
6652 recptr = XLogInsert(RM_HEAP2_ID, XLOG_HEAP2_VISIBLE, rdata);
6658 * Perform XLogInsert for a heap-update operation. Caller must already
6659 * have modified the buffer(s) and marked them dirty.
6662 log_heap_update(Relation reln, Buffer oldbuf,
6663 Buffer newbuf, HeapTuple oldtup, HeapTuple newtup,
6664 HeapTuple old_key_tuple,
6665 bool all_visible_cleared, bool new_all_visible_cleared)
6667 xl_heap_update xlrec;
6668 xl_heap_header_len xlhdr;
6669 xl_heap_header_len xlhdr_idx;
6671 uint16 prefix_suffix[2];
6672 uint16 prefixlen = 0,
6675 XLogRecData rdata[9];
6676 Page page = BufferGetPage(newbuf);
6677 bool need_tuple_data = RelationIsLogicallyLogged(reln);
6681 /* Caller should not call me on a non-WAL-logged relation */
6682 Assert(RelationNeedsWAL(reln));
6684 if (HeapTupleIsHeapOnly(newtup))
6685 info = XLOG_HEAP_HOT_UPDATE;
6687 info = XLOG_HEAP_UPDATE;
6690 * If the old and new tuple are on the same page, we only need to log the
6691 * parts of the new tuple that were changed. That saves on the amount of
6692 * WAL we need to write. Currently, we just count any unchanged bytes in
6693 * the beginning and end of the tuple. That's quick to check, and
6694 * perfectly covers the common case that only one field is updated.
6696 * We could do this even if the old and new tuple are on different pages,
6697 * but only if we don't make a full-page image of the old page, which is
6698 * difficult to know in advance. Also, if the old tuple is corrupt for
6699 * some reason, it would allow the corruption to propagate the new page,
6700 * so it seems best to avoid. Under the general assumption that most
6701 * updates tend to create the new tuple version on the same page, there
6702 * isn't much to be gained by doing this across pages anyway.
6704 * Skip this if we're taking a full-page image of the new page, as we
6705 * don't include the new tuple in the WAL record in that case. Also
6706 * disable if wal_level='logical', as logical decoding needs to be able to
6707 * read the new tuple in whole from the WAL record alone.
6709 if (oldbuf == newbuf && !need_tuple_data &&
6710 !XLogCheckBufferNeedsBackup(newbuf))
6712 char *oldp = (char *) oldtup->t_data + oldtup->t_data->t_hoff;
6713 char *newp = (char *) newtup->t_data + newtup->t_data->t_hoff;
6714 int oldlen = oldtup->t_len - oldtup->t_data->t_hoff;
6715 int newlen = newtup->t_len - newtup->t_data->t_hoff;
6717 /* Check for common prefix between old and new tuple */
6718 for (prefixlen = 0; prefixlen < Min(oldlen, newlen); prefixlen++)
6720 if (newp[prefixlen] != oldp[prefixlen])
6725 * Storing the length of the prefix takes 2 bytes, so we need to save
6726 * at least 3 bytes or there's no point.
6731 /* Same for suffix */
6732 for (suffixlen = 0; suffixlen < Min(oldlen, newlen) - prefixlen; suffixlen++)
6734 if (newp[newlen - suffixlen - 1] != oldp[oldlen - suffixlen - 1])
6741 xlrec.target.node = reln->rd_node;
6742 xlrec.target.tid = oldtup->t_self;
6743 xlrec.old_xmax = HeapTupleHeaderGetRawXmax(oldtup->t_data);
6744 xlrec.old_infobits_set = compute_infobits(oldtup->t_data->t_infomask,
6745 oldtup->t_data->t_infomask2);
6746 xlrec.new_xmax = HeapTupleHeaderGetRawXmax(newtup->t_data);
6748 if (all_visible_cleared)
6749 xlrec.flags |= XLOG_HEAP_ALL_VISIBLE_CLEARED;
6750 xlrec.newtid = newtup->t_self;
6751 if (new_all_visible_cleared)
6752 xlrec.flags |= XLOG_HEAP_NEW_ALL_VISIBLE_CLEARED;
6754 xlrec.flags |= XLOG_HEAP_PREFIX_FROM_OLD;
6756 xlrec.flags |= XLOG_HEAP_SUFFIX_FROM_OLD;
6758 /* If new tuple is the single and first tuple on page... */
6759 if (ItemPointerGetOffsetNumber(&(newtup->t_self)) == FirstOffsetNumber &&
6760 PageGetMaxOffsetNumber(page) == FirstOffsetNumber)
6762 info |= XLOG_HEAP_INIT_PAGE;
6763 newbufref = InvalidBuffer;
6768 rdata[0].data = NULL;
6770 rdata[0].buffer = oldbuf;
6771 rdata[0].buffer_std = true;
6772 rdata[0].next = &(rdata[1]);
6774 rdata[1].data = (char *) &xlrec;
6775 rdata[1].len = SizeOfHeapUpdate;
6776 rdata[1].buffer = InvalidBuffer;
6777 rdata[1].next = &(rdata[2]);
6779 /* prefix and/or suffix length fields */
6780 if (prefixlen > 0 || suffixlen > 0)
6782 if (prefixlen > 0 && suffixlen > 0)
6784 prefix_suffix[0] = prefixlen;
6785 prefix_suffix[1] = suffixlen;
6786 rdata[2].data = (char *) &prefix_suffix;
6787 rdata[2].len = 2 * sizeof(uint16);
6789 else if (prefixlen > 0)
6791 rdata[2].data = (char *) &prefixlen;
6792 rdata[2].len = sizeof(uint16);
6796 rdata[2].data = (char *) &suffixlen;
6797 rdata[2].len = sizeof(uint16);
6799 rdata[2].buffer = newbufref;
6800 rdata[2].buffer_std = true;
6801 rdata[2].next = &(rdata[3]);
6807 xlhdr.header.t_infomask2 = newtup->t_data->t_infomask2;
6808 xlhdr.header.t_infomask = newtup->t_data->t_infomask;
6809 xlhdr.header.t_hoff = newtup->t_data->t_hoff;
6810 Assert(offsetof(HeapTupleHeaderData, t_bits) +prefixlen + suffixlen <= newtup->t_len);
6811 xlhdr.t_len = newtup->t_len - offsetof(HeapTupleHeaderData, t_bits) -prefixlen - suffixlen;
6814 * As with insert records, we need not store this rdata segment if we
6815 * decide to store the whole buffer instead, unless we're doing logical
6818 rdata[nr].data = (char *) &xlhdr;
6819 rdata[nr].len = SizeOfHeapHeaderLen;
6820 rdata[nr].buffer = need_tuple_data ? InvalidBuffer : newbufref;
6821 rdata[nr].buffer_std = true;
6822 rdata[nr].next = &(rdata[nr + 1]);
6826 * PG73FORMAT: write bitmap [+ padding] [+ oid] + data
6828 * The 'data' doesn't include the common prefix or suffix.
6832 rdata[nr].data = ((char *) newtup->t_data) + offsetof(HeapTupleHeaderData, t_bits);
6833 rdata[nr].len = newtup->t_len - offsetof(HeapTupleHeaderData, t_bits) -suffixlen;
6834 rdata[nr].buffer = need_tuple_data ? InvalidBuffer : newbufref;
6835 rdata[nr].buffer_std = true;
6836 rdata[nr].next = NULL;
6842 * Have to write the null bitmap and data after the common prefix as
6843 * two separate rdata entries.
6845 /* bitmap [+ padding] [+ oid] */
6846 if (newtup->t_data->t_hoff - offsetof(HeapTupleHeaderData, t_bits) >0)
6848 rdata[nr - 1].next = &(rdata[nr]);
6849 rdata[nr].data = ((char *) newtup->t_data) + offsetof(HeapTupleHeaderData, t_bits);
6850 rdata[nr].len = newtup->t_data->t_hoff - offsetof(HeapTupleHeaderData, t_bits);
6851 rdata[nr].buffer = need_tuple_data ? InvalidBuffer : newbufref;
6852 rdata[nr].buffer_std = true;
6853 rdata[nr].next = NULL;
6857 /* data after common prefix */
6858 rdata[nr - 1].next = &(rdata[nr]);
6859 rdata[nr].data = ((char *) newtup->t_data) + newtup->t_data->t_hoff + prefixlen;
6860 rdata[nr].len = newtup->t_len - newtup->t_data->t_hoff - prefixlen - suffixlen;
6861 rdata[nr].buffer = need_tuple_data ? InvalidBuffer : newbufref;
6862 rdata[nr].buffer_std = true;
6863 rdata[nr].next = NULL;
6868 * Separate storage for the FPW buffer reference of the new page in the
6869 * wal_level >= logical case.
6871 if (need_tuple_data)
6873 rdata[nr - 1].next = &(rdata[nr]);
6875 rdata[nr].data = NULL,
6877 rdata[nr].buffer = newbufref;
6878 rdata[nr].buffer_std = true;
6879 rdata[nr].next = NULL;
6882 xlrec.flags |= XLOG_HEAP_CONTAINS_NEW_TUPLE;
6884 /* We need to log a tuple identity */
6887 /* don't really need this, but its more comfy to decode */
6888 xlhdr_idx.header.t_infomask2 = old_key_tuple->t_data->t_infomask2;
6889 xlhdr_idx.header.t_infomask = old_key_tuple->t_data->t_infomask;
6890 xlhdr_idx.header.t_hoff = old_key_tuple->t_data->t_hoff;
6891 xlhdr_idx.t_len = old_key_tuple->t_len;
6893 rdata[nr - 1].next = &(rdata[nr]);
6894 rdata[nr].data = (char *) &xlhdr_idx;
6895 rdata[nr].len = SizeOfHeapHeaderLen;
6896 rdata[nr].buffer = InvalidBuffer;
6897 rdata[nr].next = &(rdata[nr + 1]);
6900 /* PG73FORMAT: write bitmap [+ padding] [+ oid] + data */
6901 rdata[nr].data = (char *) old_key_tuple->t_data
6902 + offsetof(HeapTupleHeaderData, t_bits);
6903 rdata[nr].len = old_key_tuple->t_len
6904 - offsetof(HeapTupleHeaderData, t_bits);
6905 rdata[nr].buffer = InvalidBuffer;
6906 rdata[nr].next = NULL;
6909 if (reln->rd_rel->relreplident == REPLICA_IDENTITY_FULL)
6910 xlrec.flags |= XLOG_HEAP_CONTAINS_OLD_TUPLE;
6912 xlrec.flags |= XLOG_HEAP_CONTAINS_OLD_KEY;
6916 recptr = XLogInsert(RM_HEAP_ID, info, rdata);
6922 * Perform XLogInsert of a XLOG_HEAP2_NEW_CID record
6924 * This is only used in wal_level >= WAL_LEVEL_LOGICAL, and only for catalog
6928 log_heap_new_cid(Relation relation, HeapTuple tup)
6930 xl_heap_new_cid xlrec;
6933 XLogRecData rdata[1];
6934 HeapTupleHeader hdr = tup->t_data;
6936 Assert(ItemPointerIsValid(&tup->t_self));
6937 Assert(tup->t_tableOid != InvalidOid);
6939 xlrec.top_xid = GetTopTransactionId();
6940 xlrec.target.node = relation->rd_node;
6941 xlrec.target.tid = tup->t_self;
6944 * If the tuple got inserted & deleted in the same TX we definitely have a
6945 * combocid, set cmin and cmax.
6947 if (hdr->t_infomask & HEAP_COMBOCID)
6949 Assert(!(hdr->t_infomask & HEAP_XMAX_INVALID));
6950 Assert(!HeapTupleHeaderXminInvalid(hdr));
6951 xlrec.cmin = HeapTupleHeaderGetCmin(hdr);
6952 xlrec.cmax = HeapTupleHeaderGetCmax(hdr);
6953 xlrec.combocid = HeapTupleHeaderGetRawCommandId(hdr);
6955 /* No combocid, so only cmin or cmax can be set by this TX */
6961 * We need to check for LOCK ONLY because multixacts might be
6962 * transferred to the new tuple in case of FOR KEY SHARE updates in
6963 * which case there will be a xmax, although the tuple just got
6966 if (hdr->t_infomask & HEAP_XMAX_INVALID ||
6967 HEAP_XMAX_IS_LOCKED_ONLY(hdr->t_infomask))
6969 xlrec.cmin = HeapTupleHeaderGetRawCommandId(hdr);
6970 xlrec.cmax = InvalidCommandId;
6972 /* Tuple from a different tx updated or deleted. */
6975 xlrec.cmin = InvalidCommandId;
6976 xlrec.cmax = HeapTupleHeaderGetRawCommandId(hdr);
6979 xlrec.combocid = InvalidCommandId;
6982 rdata[0].data = (char *) &xlrec;
6983 rdata[0].len = SizeOfHeapNewCid;
6984 rdata[0].buffer = InvalidBuffer;
6985 rdata[0].next = NULL;
6987 recptr = XLogInsert(RM_HEAP2_ID, XLOG_HEAP2_NEW_CID, rdata);
6993 * Build a heap tuple representing the configured REPLICA IDENTITY to represent
6994 * the old tuple in a UPDATE or DELETE.
6996 * Returns NULL if there's no need to log a identity or if there's no suitable
6997 * key in the Relation relation.
7000 ExtractReplicaIdentity(Relation relation, HeapTuple tp, bool key_changed, bool *copy)
7002 TupleDesc desc = RelationGetDescr(relation);
7006 char replident = relation->rd_rel->relreplident;
7007 HeapTuple key_tuple = NULL;
7008 bool nulls[MaxHeapAttributeNumber];
7009 Datum values[MaxHeapAttributeNumber];
7014 if (!RelationIsLogicallyLogged(relation))
7017 if (replident == REPLICA_IDENTITY_NOTHING)
7020 if (replident == REPLICA_IDENTITY_FULL)
7023 * When logging the entire old tuple, it very well could contain
7024 * toasted columns. If so, force them to be inlined.
7026 if (HeapTupleHasExternal(tp))
7029 tp = toast_flatten_tuple(tp, RelationGetDescr(relation));
7034 /* if the key hasn't changed and we're only logging the key, we're done */
7038 /* find the replica identity index */
7039 replidindex = RelationGetReplicaIndex(relation);
7040 if (!OidIsValid(replidindex))
7042 elog(DEBUG4, "could not find configured replica identity for table \"%s\"",
7043 RelationGetRelationName(relation));
7047 idx_rel = RelationIdGetRelation(replidindex);
7048 idx_desc = RelationGetDescr(idx_rel);
7050 /* deform tuple, so we have fast access to columns */
7051 heap_deform_tuple(tp, desc, values, nulls);
7053 /* set all columns to NULL, regardless of whether they actually are */
7054 memset(nulls, 1, sizeof(nulls));
7057 * Now set all columns contained in the index to NOT NULL, they cannot
7058 * currently be NULL.
7060 for (natt = 0; natt < idx_desc->natts; natt++)
7062 int attno = idx_rel->rd_index->indkey.values[natt];
7067 * The OID column can appear in an index definition, but that's
7068 * OK, becuse we always copy the OID if present (see below). Other
7069 * system columns may not.
7071 if (attno == ObjectIdAttributeNumber)
7073 elog(ERROR, "system column in index");
7075 nulls[attno - 1] = false;
7078 key_tuple = heap_form_tuple(desc, values, nulls);
7080 RelationClose(idx_rel);
7083 * Always copy oids if the table has them, even if not included in the
7084 * index. The space in the logged tuple is used anyway, so there's little
7085 * point in not including the information.
7087 if (relation->rd_rel->relhasoids)
7088 HeapTupleSetOid(key_tuple, HeapTupleGetOid(tp));
7091 * If the tuple, which by here only contains indexed columns, still has
7092 * toasted columns, force them to be inlined. This is somewhat unlikely
7093 * since there's limits on the size of indexed columns, so we don't
7094 * duplicate toast_flatten_tuple()s functionality in the above loop over
7095 * the indexed columns, even if it would be more efficient.
7097 if (HeapTupleHasExternal(key_tuple))
7099 HeapTuple oldtup = key_tuple;
7101 key_tuple = toast_flatten_tuple(oldtup, RelationGetDescr(relation));
7102 heap_freetuple(oldtup);
7109 * Handles CLEANUP_INFO
7112 heap_xlog_cleanup_info(XLogRecPtr lsn, XLogRecord *record)
7114 xl_heap_cleanup_info *xlrec = (xl_heap_cleanup_info *) XLogRecGetData(record);
7117 ResolveRecoveryConflictWithSnapshot(xlrec->latestRemovedXid, xlrec->node);
7120 * Actual operation is a no-op. Record type exists to provide a means for
7121 * conflict processing to occur before we begin index vacuum actions. see
7122 * vacuumlazy.c and also comments in btvacuumpage()
7125 /* Backup blocks are not used in cleanup_info records */
7126 Assert(!(record->xl_info & XLR_BKP_BLOCK_MASK));
7130 * Handles HEAP2_CLEAN record type
7133 heap_xlog_clean(XLogRecPtr lsn, XLogRecord *record)
7135 xl_heap_clean *xlrec = (xl_heap_clean *) XLogRecGetData(record);
7140 XLogRedoAction action;
7142 rnode = xlrec->node;
7143 blkno = xlrec->block;
7146 * We're about to remove tuples. In Hot Standby mode, ensure that there's
7147 * no queries running for which the removed tuples are still visible.
7149 * Not all HEAP2_CLEAN records remove tuples with xids, so we only want to
7150 * conflict on the records that cause MVCC failures for user queries. If
7151 * latestRemovedXid is invalid, skip conflict processing.
7153 if (InHotStandby && TransactionIdIsValid(xlrec->latestRemovedXid))
7154 ResolveRecoveryConflictWithSnapshot(xlrec->latestRemovedXid, rnode);
7157 * If we have a full-page image, restore it (using a cleanup lock) and
7160 action = XLogReadBufferForRedoExtended(lsn, record, 0,
7161 rnode, MAIN_FORKNUM, blkno,
7162 RBM_NORMAL, true, &buffer);
7163 if (action == BLK_NEEDS_REDO)
7165 Page page = (Page) BufferGetPage(buffer);
7167 OffsetNumber *redirected;
7168 OffsetNumber *nowdead;
7169 OffsetNumber *nowunused;
7174 nredirected = xlrec->nredirected;
7175 ndead = xlrec->ndead;
7176 end = (OffsetNumber *) ((char *) xlrec + record->xl_len);
7177 redirected = (OffsetNumber *) ((char *) xlrec + SizeOfHeapClean);
7178 nowdead = redirected + (nredirected * 2);
7179 nowunused = nowdead + ndead;
7180 nunused = (end - nowunused);
7181 Assert(nunused >= 0);
7183 /* Update all item pointers per the record, and repair fragmentation */
7184 heap_page_prune_execute(buffer,
7185 redirected, nredirected,
7187 nowunused, nunused);
7189 freespace = PageGetHeapFreeSpace(page); /* needed to update FSM below */
7192 * Note: we don't worry about updating the page's prunability hints.
7193 * At worst this will cause an extra prune cycle to occur soon.
7196 PageSetLSN(page, lsn);
7197 MarkBufferDirty(buffer);
7199 if (BufferIsValid(buffer))
7200 UnlockReleaseBuffer(buffer);
7203 * Update the FSM as well.
7205 * XXX: Don't do this if the page was restored from full page image. We
7206 * don't bother to update the FSM in that case, it doesn't need to be
7207 * totally accurate anyway.
7209 if (action == BLK_NEEDS_REDO)
7210 XLogRecordPageWithFreeSpace(xlrec->node, xlrec->block, freespace);
7214 * Replay XLOG_HEAP2_VISIBLE record.
7216 * The critical integrity requirement here is that we must never end up with
7217 * a situation where the visibility map bit is set, and the page-level
7218 * PD_ALL_VISIBLE bit is clear. If that were to occur, then a subsequent
7219 * page modification would fail to clear the visibility map bit.
7222 heap_xlog_visible(XLogRecPtr lsn, XLogRecord *record)
7224 xl_heap_visible *xlrec = (xl_heap_visible *) XLogRecGetData(record);
7229 XLogRedoAction action;
7231 rnode = xlrec->node;
7232 blkno = xlrec->block;
7235 * If there are any Hot Standby transactions running that have an xmin
7236 * horizon old enough that this page isn't all-visible for them, they
7237 * might incorrectly decide that an index-only scan can skip a heap fetch.
7239 * NB: It might be better to throw some kind of "soft" conflict here that
7240 * forces any index-only scan that is in flight to perform heap fetches,
7241 * rather than killing the transaction outright.
7244 ResolveRecoveryConflictWithSnapshot(xlrec->cutoff_xid, rnode);
7247 * Read the heap page, if it still exists. If the heap file has dropped or
7248 * truncated later in recovery, we don't need to update the page, but we'd
7249 * better still update the visibility map.
7251 action = XLogReadBufferForRedo(lsn, record, 1, rnode, blkno, &buffer);
7252 if (action == BLK_NEEDS_REDO)
7255 * We don't bump the LSN of the heap page when setting the visibility
7256 * map bit (unless checksums are enabled, in which case we must),
7257 * because that would generate an unworkable volume of full-page
7258 * writes. This exposes us to torn page hazards, but since we're not
7259 * inspecting the existing page contents in any way, we don't care.
7261 * However, all operations that clear the visibility map bit *do* bump
7262 * the LSN, and those operations will only be replayed if the XLOG LSN
7263 * follows the page LSN. Thus, if the page LSN has advanced past our
7264 * XLOG record's LSN, we mustn't mark the page all-visible, because
7265 * the subsequent update won't be replayed to clear the flag.
7267 page = BufferGetPage(buffer);
7268 PageSetAllVisible(page);
7269 MarkBufferDirty(buffer);
7271 else if (action == BLK_RESTORED)
7274 * If heap block was backed up, restore it. This can only happen with
7275 * checksums enabled.
7277 Assert(DataChecksumsEnabled());
7279 if (BufferIsValid(buffer))
7280 UnlockReleaseBuffer(buffer);
7283 * Even if we skipped the heap page update due to the LSN interlock, it's
7284 * still safe to update the visibility map. Any WAL record that clears
7285 * the visibility map bit does so before checking the page LSN, so any
7286 * bits that need to be cleared will still be cleared.
7288 if (record->xl_info & XLR_BKP_BLOCK(0))
7289 (void) RestoreBackupBlock(lsn, record, 0, false, false);
7293 Buffer vmbuffer = InvalidBuffer;
7295 reln = CreateFakeRelcacheEntry(rnode);
7296 visibilitymap_pin(reln, blkno, &vmbuffer);
7299 * Don't set the bit if replay has already passed this point.
7301 * It might be safe to do this unconditionally; if replay has passed
7302 * this point, we'll replay at least as far this time as we did
7303 * before, and if this bit needs to be cleared, the record responsible
7304 * for doing so should be again replayed, and clear it. For right
7305 * now, out of an abundance of conservatism, we use the same test here
7306 * we did for the heap page. If this results in a dropped bit, no
7307 * real harm is done; and the next VACUUM will fix it.
7309 if (lsn > PageGetLSN(BufferGetPage(vmbuffer)))
7310 visibilitymap_set(reln, blkno, InvalidBuffer, lsn, vmbuffer,
7313 ReleaseBuffer(vmbuffer);
7314 FreeFakeRelcacheEntry(reln);
7319 * Replay XLOG_HEAP2_FREEZE_PAGE records
7322 heap_xlog_freeze_page(XLogRecPtr lsn, XLogRecord *record)
7324 xl_heap_freeze_page *xlrec = (xl_heap_freeze_page *) XLogRecGetData(record);
7325 TransactionId cutoff_xid = xlrec->cutoff_xid;
7331 * In Hot Standby mode, ensure that there's no queries running which still
7332 * consider the frozen xids as running.
7335 ResolveRecoveryConflictWithSnapshot(cutoff_xid, xlrec->node);
7337 if (XLogReadBufferForRedo(lsn, record, 0, xlrec->node, xlrec->block,
7338 &buffer) == BLK_NEEDS_REDO)
7340 page = BufferGetPage(buffer);
7342 /* now execute freeze plan for each frozen tuple */
7343 for (ntup = 0; ntup < xlrec->ntuples; ntup++)
7345 xl_heap_freeze_tuple *xlrec_tp;
7347 HeapTupleHeader tuple;
7349 xlrec_tp = &xlrec->tuples[ntup];
7350 lp = PageGetItemId(page, xlrec_tp->offset); /* offsets are one-based */
7351 tuple = (HeapTupleHeader) PageGetItem(page, lp);
7353 heap_execute_freeze_tuple(tuple, xlrec_tp);
7356 PageSetLSN(page, lsn);
7357 MarkBufferDirty(buffer);
7359 if (BufferIsValid(buffer))
7360 UnlockReleaseBuffer(buffer);
7364 * Given an "infobits" field from an XLog record, set the correct bits in the
7365 * given infomask and infomask2 for the tuple touched by the record.
7367 * (This is the reverse of compute_infobits).
7370 fix_infomask_from_infobits(uint8 infobits, uint16 *infomask, uint16 *infomask2)
7372 *infomask &= ~(HEAP_XMAX_IS_MULTI | HEAP_XMAX_LOCK_ONLY |
7373 HEAP_XMAX_KEYSHR_LOCK | HEAP_XMAX_EXCL_LOCK);
7374 *infomask2 &= ~HEAP_KEYS_UPDATED;
7376 if (infobits & XLHL_XMAX_IS_MULTI)
7377 *infomask |= HEAP_XMAX_IS_MULTI;
7378 if (infobits & XLHL_XMAX_LOCK_ONLY)
7379 *infomask |= HEAP_XMAX_LOCK_ONLY;
7380 if (infobits & XLHL_XMAX_EXCL_LOCK)
7381 *infomask |= HEAP_XMAX_EXCL_LOCK;
7382 /* note HEAP_XMAX_SHR_LOCK isn't considered here */
7383 if (infobits & XLHL_XMAX_KEYSHR_LOCK)
7384 *infomask |= HEAP_XMAX_KEYSHR_LOCK;
7386 if (infobits & XLHL_KEYS_UPDATED)
7387 *infomask2 |= HEAP_KEYS_UPDATED;
7391 heap_xlog_delete(XLogRecPtr lsn, XLogRecord *record)
7393 xl_heap_delete *xlrec = (xl_heap_delete *) XLogRecGetData(record);
7396 OffsetNumber offnum;
7398 HeapTupleHeader htup;
7400 RelFileNode target_node;
7402 blkno = ItemPointerGetBlockNumber(&(xlrec->target.tid));
7403 target_node = xlrec->target.node;
7406 * The visibility map may need to be fixed even if the heap page is
7407 * already up-to-date.
7409 if (xlrec->flags & XLOG_HEAP_ALL_VISIBLE_CLEARED)
7411 Relation reln = CreateFakeRelcacheEntry(target_node);
7412 Buffer vmbuffer = InvalidBuffer;
7414 visibilitymap_pin(reln, blkno, &vmbuffer);
7415 visibilitymap_clear(reln, blkno, vmbuffer);
7416 ReleaseBuffer(vmbuffer);
7417 FreeFakeRelcacheEntry(reln);
7420 if (XLogReadBufferForRedo(lsn, record, 0, target_node, blkno, &buffer)
7423 page = (Page) BufferGetPage(buffer);
7425 offnum = ItemPointerGetOffsetNumber(&(xlrec->target.tid));
7426 if (PageGetMaxOffsetNumber(page) >= offnum)
7427 lp = PageGetItemId(page, offnum);
7429 if (PageGetMaxOffsetNumber(page) < offnum || !ItemIdIsNormal(lp))
7430 elog(PANIC, "heap_delete_redo: invalid lp");
7432 htup = (HeapTupleHeader) PageGetItem(page, lp);
7434 htup->t_infomask &= ~(HEAP_XMAX_BITS | HEAP_MOVED);
7435 htup->t_infomask2 &= ~HEAP_KEYS_UPDATED;
7436 HeapTupleHeaderClearHotUpdated(htup);
7437 fix_infomask_from_infobits(xlrec->infobits_set,
7438 &htup->t_infomask, &htup->t_infomask2);
7439 HeapTupleHeaderSetXmax(htup, xlrec->xmax);
7440 HeapTupleHeaderSetCmax(htup, FirstCommandId, false);
7442 /* Mark the page as a candidate for pruning */
7443 PageSetPrunable(page, record->xl_xid);
7445 if (xlrec->flags & XLOG_HEAP_ALL_VISIBLE_CLEARED)
7446 PageClearAllVisible(page);
7448 /* Make sure there is no forward chain link in t_ctid */
7449 htup->t_ctid = xlrec->target.tid;
7450 PageSetLSN(page, lsn);
7451 MarkBufferDirty(buffer);
7453 if (BufferIsValid(buffer))
7454 UnlockReleaseBuffer(buffer);
7458 heap_xlog_insert(XLogRecPtr lsn, XLogRecord *record)
7460 xl_heap_insert *xlrec = (xl_heap_insert *) XLogRecGetData(record);
7463 OffsetNumber offnum;
7466 HeapTupleHeaderData hdr;
7467 char data[MaxHeapTupleSize];
7469 HeapTupleHeader htup;
7470 xl_heap_header xlhdr;
7473 RelFileNode target_node;
7475 XLogRedoAction action;
7477 target_node = xlrec->target.node;
7478 blkno = ItemPointerGetBlockNumber(&(xlrec->target.tid));
7481 * The visibility map may need to be fixed even if the heap page is
7482 * already up-to-date.
7484 if (xlrec->flags & XLOG_HEAP_ALL_VISIBLE_CLEARED)
7486 Relation reln = CreateFakeRelcacheEntry(target_node);
7487 Buffer vmbuffer = InvalidBuffer;
7489 visibilitymap_pin(reln, blkno, &vmbuffer);
7490 visibilitymap_clear(reln, blkno, vmbuffer);
7491 ReleaseBuffer(vmbuffer);
7492 FreeFakeRelcacheEntry(reln);
7496 * If we inserted the first and only tuple on the page, re-initialize
7497 * the page from scratch.
7499 if (record->xl_info & XLOG_HEAP_INIT_PAGE)
7501 XLogReadBufferForRedoExtended(lsn, record, 0,
7502 target_node, MAIN_FORKNUM, blkno,
7503 RBM_ZERO, false, &buffer);
7504 page = BufferGetPage(buffer);
7505 PageInit(page, BufferGetPageSize(buffer), 0);
7506 action = BLK_NEEDS_REDO;
7509 action = XLogReadBufferForRedo(lsn, record, 0, target_node, blkno,
7512 if (action == BLK_NEEDS_REDO)
7514 page = BufferGetPage(buffer);
7516 offnum = ItemPointerGetOffsetNumber(&(xlrec->target.tid));
7517 if (PageGetMaxOffsetNumber(page) + 1 < offnum)
7518 elog(PANIC, "heap_insert_redo: invalid max offset number");
7520 newlen = record->xl_len - SizeOfHeapInsert - SizeOfHeapHeader;
7521 Assert(newlen <= MaxHeapTupleSize);
7522 memcpy((char *) &xlhdr,
7523 (char *) xlrec + SizeOfHeapInsert,
7526 MemSet((char *) htup, 0, sizeof(HeapTupleHeaderData));
7527 /* PG73FORMAT: get bitmap [+ padding] [+ oid] + data */
7528 memcpy((char *) htup + offsetof(HeapTupleHeaderData, t_bits),
7529 (char *) xlrec + SizeOfHeapInsert + SizeOfHeapHeader,
7531 newlen += offsetof(HeapTupleHeaderData, t_bits);
7532 htup->t_infomask2 = xlhdr.t_infomask2;
7533 htup->t_infomask = xlhdr.t_infomask;
7534 htup->t_hoff = xlhdr.t_hoff;
7535 HeapTupleHeaderSetXmin(htup, record->xl_xid);
7536 HeapTupleHeaderSetCmin(htup, FirstCommandId);
7537 htup->t_ctid = xlrec->target.tid;
7539 offnum = PageAddItem(page, (Item) htup, newlen, offnum, true, true);
7540 if (offnum == InvalidOffsetNumber)
7541 elog(PANIC, "heap_insert_redo: failed to add tuple");
7543 freespace = PageGetHeapFreeSpace(page); /* needed to update FSM below */
7545 PageSetLSN(page, lsn);
7547 if (xlrec->flags & XLOG_HEAP_ALL_VISIBLE_CLEARED)
7548 PageClearAllVisible(page);
7550 MarkBufferDirty(buffer);
7552 if (BufferIsValid(buffer))
7553 UnlockReleaseBuffer(buffer);
7556 * If the page is running low on free space, update the FSM as well.
7557 * Arbitrarily, our definition of "low" is less than 20%. We can't do much
7558 * better than that without knowing the fill-factor for the table.
7560 * XXX: Don't do this if the page was restored from full page image. We
7561 * don't bother to update the FSM in that case, it doesn't need to be
7562 * totally accurate anyway.
7564 if (action == BLK_NEEDS_REDO && freespace < BLCKSZ / 5)
7565 XLogRecordPageWithFreeSpace(xlrec->target.node, blkno, freespace);
7569 * Handles MULTI_INSERT record type.
7572 heap_xlog_multi_insert(XLogRecPtr lsn, XLogRecord *record)
7574 char *recdata = XLogRecGetData(record);
7575 xl_heap_multi_insert *xlrec;
7582 HeapTupleHeaderData hdr;
7583 char data[MaxHeapTupleSize];
7585 HeapTupleHeader htup;
7589 bool isinit = (record->xl_info & XLOG_HEAP_INIT_PAGE) != 0;
7590 XLogRedoAction action;
7593 * Insertion doesn't overwrite MVCC data, so no conflict processing is
7597 xlrec = (xl_heap_multi_insert *) recdata;
7598 recdata += SizeOfHeapMultiInsert;
7600 rnode = xlrec->node;
7601 blkno = xlrec->blkno;
7604 * If we're reinitializing the page, the tuples are stored in order from
7605 * FirstOffsetNumber. Otherwise there's an array of offsets in the WAL
7609 recdata += sizeof(OffsetNumber) * xlrec->ntuples;
7612 * The visibility map may need to be fixed even if the heap page is
7613 * already up-to-date.
7615 if (xlrec->flags & XLOG_HEAP_ALL_VISIBLE_CLEARED)
7617 Relation reln = CreateFakeRelcacheEntry(rnode);
7618 Buffer vmbuffer = InvalidBuffer;
7620 visibilitymap_pin(reln, blkno, &vmbuffer);
7621 visibilitymap_clear(reln, blkno, vmbuffer);
7622 ReleaseBuffer(vmbuffer);
7623 FreeFakeRelcacheEntry(reln);
7628 XLogReadBufferForRedoExtended(lsn, record, 0,
7629 rnode, MAIN_FORKNUM, blkno,
7630 RBM_ZERO, false, &buffer);
7631 page = BufferGetPage(buffer);
7632 PageInit(page, BufferGetPageSize(buffer), 0);
7633 action = BLK_NEEDS_REDO;
7636 action = XLogReadBufferForRedo(lsn, record, 0, rnode, blkno, &buffer);
7638 if (action == BLK_NEEDS_REDO)
7640 page = BufferGetPage(buffer);
7641 for (i = 0; i < xlrec->ntuples; i++)
7643 OffsetNumber offnum;
7644 xl_multi_insert_tuple *xlhdr;
7647 offnum = FirstOffsetNumber + i;
7649 offnum = xlrec->offsets[i];
7650 if (PageGetMaxOffsetNumber(page) + 1 < offnum)
7651 elog(PANIC, "heap_multi_insert_redo: invalid max offset number");
7653 xlhdr = (xl_multi_insert_tuple *) SHORTALIGN(recdata);
7654 recdata = ((char *) xlhdr) + SizeOfMultiInsertTuple;
7656 newlen = xlhdr->datalen;
7657 Assert(newlen <= MaxHeapTupleSize);
7659 MemSet((char *) htup, 0, sizeof(HeapTupleHeaderData));
7660 /* PG73FORMAT: get bitmap [+ padding] [+ oid] + data */
7661 memcpy((char *) htup + offsetof(HeapTupleHeaderData, t_bits),
7666 newlen += offsetof(HeapTupleHeaderData, t_bits);
7667 htup->t_infomask2 = xlhdr->t_infomask2;
7668 htup->t_infomask = xlhdr->t_infomask;
7669 htup->t_hoff = xlhdr->t_hoff;
7670 HeapTupleHeaderSetXmin(htup, record->xl_xid);
7671 HeapTupleHeaderSetCmin(htup, FirstCommandId);
7672 ItemPointerSetBlockNumber(&htup->t_ctid, blkno);
7673 ItemPointerSetOffsetNumber(&htup->t_ctid, offnum);
7675 offnum = PageAddItem(page, (Item) htup, newlen, offnum, true, true);
7676 if (offnum == InvalidOffsetNumber)
7677 elog(PANIC, "heap_multi_insert_redo: failed to add tuple");
7680 freespace = PageGetHeapFreeSpace(page); /* needed to update FSM below */
7682 PageSetLSN(page, lsn);
7684 if (xlrec->flags & XLOG_HEAP_ALL_VISIBLE_CLEARED)
7685 PageClearAllVisible(page);
7687 MarkBufferDirty(buffer);
7689 if (BufferIsValid(buffer))
7690 UnlockReleaseBuffer(buffer);
7693 * If the page is running low on free space, update the FSM as well.
7694 * Arbitrarily, our definition of "low" is less than 20%. We can't do much
7695 * better than that without knowing the fill-factor for the table.
7697 * XXX: Don't do this if the page was restored from full page image. We
7698 * don't bother to update the FSM in that case, it doesn't need to be
7699 * totally accurate anyway.
7701 if (action == BLK_NEEDS_REDO && freespace < BLCKSZ / 5)
7702 XLogRecordPageWithFreeSpace(xlrec->node, blkno, freespace);
7706 * Handles UPDATE and HOT_UPDATE
7709 heap_xlog_update(XLogRecPtr lsn, XLogRecord *record, bool hot_update)
7711 xl_heap_update *xlrec = (xl_heap_update *) XLogRecGetData(record);
7718 OffsetNumber offnum;
7720 HeapTupleData oldtup;
7721 HeapTupleHeader htup;
7723 uint16 prefixlen = 0,
7728 HeapTupleHeaderData hdr;
7729 char data[MaxHeapTupleSize];
7731 xl_heap_header_len xlhdr;
7734 XLogRedoAction oldaction;
7735 XLogRedoAction newaction;
7737 /* initialize to keep the compiler quiet */
7738 oldtup.t_data = NULL;
7741 rnode = xlrec->target.node;
7742 newblk = ItemPointerGetBlockNumber(&xlrec->newtid);
7743 oldblk = ItemPointerGetBlockNumber(&xlrec->target.tid);
7746 * The visibility map may need to be fixed even if the heap page is
7747 * already up-to-date.
7749 if (xlrec->flags & XLOG_HEAP_ALL_VISIBLE_CLEARED)
7751 Relation reln = CreateFakeRelcacheEntry(rnode);
7752 Buffer vmbuffer = InvalidBuffer;
7754 visibilitymap_pin(reln, oldblk, &vmbuffer);
7755 visibilitymap_clear(reln, oldblk, vmbuffer);
7756 ReleaseBuffer(vmbuffer);
7757 FreeFakeRelcacheEntry(reln);
7761 * In normal operation, it is important to lock the two pages in
7762 * page-number order, to avoid possible deadlocks against other update
7763 * operations going the other way. However, during WAL replay there can
7764 * be no other update happening, so we don't need to worry about that. But
7765 * we *do* need to worry that we don't expose an inconsistent state to Hot
7766 * Standby queries --- so the original page can't be unlocked before we've
7767 * added the new tuple to the new page.
7770 /* Deal with old tuple version */
7771 oldaction = XLogReadBufferForRedo(lsn, record, 0, rnode, oldblk, &obuffer);
7772 if (oldaction == BLK_NEEDS_REDO)
7774 page = (Page) BufferGetPage(obuffer);
7776 offnum = ItemPointerGetOffsetNumber(&(xlrec->target.tid));
7777 if (PageGetMaxOffsetNumber(page) >= offnum)
7778 lp = PageGetItemId(page, offnum);
7780 if (PageGetMaxOffsetNumber(page) < offnum || !ItemIdIsNormal(lp))
7781 elog(PANIC, "heap_update_redo: invalid lp");
7783 htup = (HeapTupleHeader) PageGetItem(page, lp);
7785 oldtup.t_data = htup;
7786 oldtup.t_len = ItemIdGetLength(lp);
7788 htup->t_infomask &= ~(HEAP_XMAX_BITS | HEAP_MOVED);
7789 htup->t_infomask2 &= ~HEAP_KEYS_UPDATED;
7791 HeapTupleHeaderSetHotUpdated(htup);
7793 HeapTupleHeaderClearHotUpdated(htup);
7794 fix_infomask_from_infobits(xlrec->old_infobits_set, &htup->t_infomask,
7795 &htup->t_infomask2);
7796 HeapTupleHeaderSetXmax(htup, xlrec->old_xmax);
7797 HeapTupleHeaderSetCmax(htup, FirstCommandId, false);
7798 /* Set forward chain link in t_ctid */
7799 htup->t_ctid = xlrec->newtid;
7801 /* Mark the page as a candidate for pruning */
7802 PageSetPrunable(page, record->xl_xid);
7804 if (xlrec->flags & XLOG_HEAP_ALL_VISIBLE_CLEARED)
7805 PageClearAllVisible(page);
7807 PageSetLSN(page, lsn);
7808 MarkBufferDirty(obuffer);
7812 * Read the page the new tuple goes into, if different from old.
7814 if (oldblk == newblk)
7817 newaction = oldaction;
7819 else if (record->xl_info & XLOG_HEAP_INIT_PAGE)
7821 XLogReadBufferForRedoExtended(lsn, record, 1,
7822 rnode, MAIN_FORKNUM, newblk,
7823 RBM_ZERO, false, &nbuffer);
7824 page = (Page) BufferGetPage(nbuffer);
7825 PageInit(page, BufferGetPageSize(nbuffer), 0);
7826 newaction = BLK_NEEDS_REDO;
7829 newaction = XLogReadBufferForRedo(lsn, record, 1, rnode, newblk,
7833 * The visibility map may need to be fixed even if the heap page is
7834 * already up-to-date.
7836 if (xlrec->flags & XLOG_HEAP_NEW_ALL_VISIBLE_CLEARED)
7838 Relation reln = CreateFakeRelcacheEntry(xlrec->target.node);
7839 Buffer vmbuffer = InvalidBuffer;
7841 visibilitymap_pin(reln, newblk, &vmbuffer);
7842 visibilitymap_clear(reln, newblk, vmbuffer);
7843 ReleaseBuffer(vmbuffer);
7844 FreeFakeRelcacheEntry(reln);
7847 /* Deal with new tuple */
7848 if (newaction == BLK_NEEDS_REDO)
7850 page = (Page) BufferGetPage(nbuffer);
7852 offnum = ItemPointerGetOffsetNumber(&(xlrec->newtid));
7853 if (PageGetMaxOffsetNumber(page) + 1 < offnum)
7854 elog(PANIC, "heap_update_redo: invalid max offset number");
7856 recdata = (char *) xlrec + SizeOfHeapUpdate;
7858 if (xlrec->flags & XLOG_HEAP_PREFIX_FROM_OLD)
7860 Assert(newblk == oldblk);
7861 memcpy(&prefixlen, recdata, sizeof(uint16));
7862 recdata += sizeof(uint16);
7864 if (xlrec->flags & XLOG_HEAP_SUFFIX_FROM_OLD)
7866 Assert(newblk == oldblk);
7867 memcpy(&suffixlen, recdata, sizeof(uint16));
7868 recdata += sizeof(uint16);
7871 memcpy((char *) &xlhdr, recdata, SizeOfHeapHeaderLen);
7872 recdata += SizeOfHeapHeaderLen;
7874 Assert(xlhdr.t_len + prefixlen + suffixlen <= MaxHeapTupleSize);
7876 MemSet((char *) htup, 0, sizeof(HeapTupleHeaderData));
7879 * Reconstruct the new tuple using the prefix and/or suffix from the
7880 * old tuple, and the data stored in the WAL record.
7882 newp = (char *) htup + offsetof(HeapTupleHeaderData, t_bits);
7887 /* copy bitmap [+ padding] [+ oid] from WAL record */
7888 len = xlhdr.header.t_hoff - offsetof(HeapTupleHeaderData, t_bits);
7889 memcpy(newp, recdata, len);
7893 /* copy prefix from old tuple */
7894 memcpy(newp, (char *) oldtup.t_data + oldtup.t_data->t_hoff, prefixlen);
7897 /* copy new tuple data from WAL record */
7898 len = xlhdr.t_len - (xlhdr.header.t_hoff - offsetof(HeapTupleHeaderData, t_bits));
7899 memcpy(newp, recdata, len);
7906 * copy bitmap [+ padding] [+ oid] + data from record, all in one
7909 memcpy(newp, recdata, xlhdr.t_len);
7910 recdata += xlhdr.t_len;
7911 newp += xlhdr.t_len;
7913 /* copy suffix from old tuple */
7915 memcpy(newp, (char *) oldtup.t_data + oldtup.t_len - suffixlen, suffixlen);
7917 newlen = offsetof(HeapTupleHeaderData, t_bits) + xlhdr.t_len + prefixlen + suffixlen;
7918 htup->t_infomask2 = xlhdr.header.t_infomask2;
7919 htup->t_infomask = xlhdr.header.t_infomask;
7920 htup->t_hoff = xlhdr.header.t_hoff;
7922 HeapTupleHeaderSetXmin(htup, record->xl_xid);
7923 HeapTupleHeaderSetCmin(htup, FirstCommandId);
7924 HeapTupleHeaderSetXmax(htup, xlrec->new_xmax);
7925 /* Make sure there is no forward chain link in t_ctid */
7926 htup->t_ctid = xlrec->newtid;
7928 offnum = PageAddItem(page, (Item) htup, newlen, offnum, true, true);
7929 if (offnum == InvalidOffsetNumber)
7930 elog(PANIC, "heap_update_redo: failed to add tuple");
7932 if (xlrec->flags & XLOG_HEAP_NEW_ALL_VISIBLE_CLEARED)
7933 PageClearAllVisible(page);
7935 freespace = PageGetHeapFreeSpace(page); /* needed to update FSM below */
7937 PageSetLSN(page, lsn);
7938 MarkBufferDirty(nbuffer);
7940 if (BufferIsValid(nbuffer) && nbuffer != obuffer)
7941 UnlockReleaseBuffer(nbuffer);
7942 if (BufferIsValid(obuffer))
7943 UnlockReleaseBuffer(obuffer);
7946 * If the new page is running low on free space, update the FSM as well.
7947 * Arbitrarily, our definition of "low" is less than 20%. We can't do much
7948 * better than that without knowing the fill-factor for the table.
7950 * However, don't update the FSM on HOT updates, because after crash
7951 * recovery, either the old or the new tuple will certainly be dead and
7952 * prunable. After pruning, the page will have roughly as much free space
7953 * as it did before the update, assuming the new tuple is about the same
7954 * size as the old one.
7956 * XXX: Don't do this if the page was restored from full page image. We
7957 * don't bother to update the FSM in that case, it doesn't need to be
7958 * totally accurate anyway.
7960 if (newaction == BLK_NEEDS_REDO && !hot_update && freespace < BLCKSZ / 5)
7961 XLogRecordPageWithFreeSpace(xlrec->target.node,
7962 ItemPointerGetBlockNumber(&(xlrec->newtid)),
7967 heap_xlog_lock(XLogRecPtr lsn, XLogRecord *record)
7969 xl_heap_lock *xlrec = (xl_heap_lock *) XLogRecGetData(record);
7972 OffsetNumber offnum;
7974 HeapTupleHeader htup;
7976 if (XLogReadBufferForRedo(lsn, record, 0, xlrec->target.node,
7977 ItemPointerGetBlockNumber(&xlrec->target.tid),
7978 &buffer) == BLK_NEEDS_REDO)
7980 page = (Page) BufferGetPage(buffer);
7982 offnum = ItemPointerGetOffsetNumber(&(xlrec->target.tid));
7983 if (PageGetMaxOffsetNumber(page) >= offnum)
7984 lp = PageGetItemId(page, offnum);
7986 if (PageGetMaxOffsetNumber(page) < offnum || !ItemIdIsNormal(lp))
7987 elog(PANIC, "heap_lock_redo: invalid lp");
7989 htup = (HeapTupleHeader) PageGetItem(page, lp);
7991 fix_infomask_from_infobits(xlrec->infobits_set, &htup->t_infomask,
7992 &htup->t_infomask2);
7995 * Clear relevant update flags, but only if the modified infomask says
7996 * there's no update.
7998 if (HEAP_XMAX_IS_LOCKED_ONLY(htup->t_infomask))
8000 HeapTupleHeaderClearHotUpdated(htup);
8001 /* Make sure there is no forward chain link in t_ctid */
8002 htup->t_ctid = xlrec->target.tid;
8004 HeapTupleHeaderSetXmax(htup, xlrec->locking_xid);
8005 HeapTupleHeaderSetCmax(htup, FirstCommandId, false);
8006 PageSetLSN(page, lsn);
8007 MarkBufferDirty(buffer);
8009 if (BufferIsValid(buffer))
8010 UnlockReleaseBuffer(buffer);
8014 heap_xlog_lock_updated(XLogRecPtr lsn, XLogRecord *record)
8016 xl_heap_lock_updated *xlrec =
8017 (xl_heap_lock_updated *) XLogRecGetData(record);
8020 OffsetNumber offnum;
8022 HeapTupleHeader htup;
8024 if (XLogReadBufferForRedo(lsn, record, 0, xlrec->target.node,
8025 ItemPointerGetBlockNumber(&(xlrec->target.tid)),
8026 &buffer) == BLK_NEEDS_REDO)
8028 page = BufferGetPage(buffer);
8029 offnum = ItemPointerGetOffsetNumber(&(xlrec->target.tid));
8030 if (PageGetMaxOffsetNumber(page) >= offnum)
8031 lp = PageGetItemId(page, offnum);
8033 if (PageGetMaxOffsetNumber(page) < offnum || !ItemIdIsNormal(lp))
8034 elog(PANIC, "heap_xlog_lock_updated: invalid lp");
8036 htup = (HeapTupleHeader) PageGetItem(page, lp);
8038 fix_infomask_from_infobits(xlrec->infobits_set, &htup->t_infomask,
8039 &htup->t_infomask2);
8040 HeapTupleHeaderSetXmax(htup, xlrec->xmax);
8042 PageSetLSN(page, lsn);
8043 MarkBufferDirty(buffer);
8045 if (BufferIsValid(buffer))
8046 UnlockReleaseBuffer(buffer);
8050 heap_xlog_inplace(XLogRecPtr lsn, XLogRecord *record)
8052 xl_heap_inplace *xlrec = (xl_heap_inplace *) XLogRecGetData(record);
8055 OffsetNumber offnum;
8057 HeapTupleHeader htup;
8061 if (XLogReadBufferForRedo(lsn, record, 0, xlrec->target.node,
8062 ItemPointerGetBlockNumber(&(xlrec->target.tid)),
8063 &buffer) == BLK_NEEDS_REDO)
8065 page = BufferGetPage(buffer);
8067 offnum = ItemPointerGetOffsetNumber(&(xlrec->target.tid));
8068 if (PageGetMaxOffsetNumber(page) >= offnum)
8069 lp = PageGetItemId(page, offnum);
8071 if (PageGetMaxOffsetNumber(page) < offnum || !ItemIdIsNormal(lp))
8072 elog(PANIC, "heap_inplace_redo: invalid lp");
8074 htup = (HeapTupleHeader) PageGetItem(page, lp);
8076 oldlen = ItemIdGetLength(lp) - htup->t_hoff;
8077 newlen = record->xl_len - SizeOfHeapInplace;
8078 if (oldlen != newlen)
8079 elog(PANIC, "heap_inplace_redo: wrong tuple length");
8081 memcpy((char *) htup + htup->t_hoff,
8082 (char *) xlrec + SizeOfHeapInplace,
8085 PageSetLSN(page, lsn);
8086 MarkBufferDirty(buffer);
8088 if (BufferIsValid(buffer))
8089 UnlockReleaseBuffer(buffer);
8093 heap_redo(XLogRecPtr lsn, XLogRecord *record)
8095 uint8 info = record->xl_info & ~XLR_INFO_MASK;
8098 * These operations don't overwrite MVCC data so no conflict processing is
8099 * required. The ones in heap2 rmgr do.
8102 switch (info & XLOG_HEAP_OPMASK)
8104 case XLOG_HEAP_INSERT:
8105 heap_xlog_insert(lsn, record);
8107 case XLOG_HEAP_DELETE:
8108 heap_xlog_delete(lsn, record);
8110 case XLOG_HEAP_UPDATE:
8111 heap_xlog_update(lsn, record, false);
8113 case XLOG_HEAP_HOT_UPDATE:
8114 heap_xlog_update(lsn, record, true);
8116 case XLOG_HEAP_LOCK:
8117 heap_xlog_lock(lsn, record);
8119 case XLOG_HEAP_INPLACE:
8120 heap_xlog_inplace(lsn, record);
8123 elog(PANIC, "heap_redo: unknown op code %u", info);
8128 heap2_redo(XLogRecPtr lsn, XLogRecord *record)
8130 uint8 info = record->xl_info & ~XLR_INFO_MASK;
8132 switch (info & XLOG_HEAP_OPMASK)
8134 case XLOG_HEAP2_CLEAN:
8135 heap_xlog_clean(lsn, record);
8137 case XLOG_HEAP2_FREEZE_PAGE:
8138 heap_xlog_freeze_page(lsn, record);
8140 case XLOG_HEAP2_CLEANUP_INFO:
8141 heap_xlog_cleanup_info(lsn, record);
8143 case XLOG_HEAP2_VISIBLE:
8144 heap_xlog_visible(lsn, record);
8146 case XLOG_HEAP2_MULTI_INSERT:
8147 heap_xlog_multi_insert(lsn, record);
8149 case XLOG_HEAP2_LOCK_UPDATED:
8150 heap_xlog_lock_updated(lsn, record);
8152 case XLOG_HEAP2_NEW_CID:
8155 * Nothing to do on a real replay, only used during logical
8159 case XLOG_HEAP2_REWRITE:
8160 heap_xlog_logical_rewrite(lsn, record);
8163 elog(PANIC, "heap2_redo: unknown op code %u", info);
8168 * heap_sync - sync a heap, for use when no WAL has been written
8170 * This forces the heap contents (including TOAST heap if any) down to disk.
8171 * If we skipped using WAL, and WAL is otherwise needed, we must force the
8172 * relation down to disk before it's safe to commit the transaction. This
8173 * requires writing out any dirty buffers and then doing a forced fsync.
8175 * Indexes are not touched. (Currently, index operations associated with
8176 * the commands that use this are WAL-logged and so do not need fsync.
8177 * That behavior might change someday, but in any case it's likely that
8178 * any fsync decisions required would be per-index and hence not appropriate
8182 heap_sync(Relation rel)
8184 /* non-WAL-logged tables never need fsync */
8185 if (!RelationNeedsWAL(rel))
8189 FlushRelationBuffers(rel);
8190 /* FlushRelationBuffers will have opened rd_smgr */
8191 smgrimmedsync(rel->rd_smgr, MAIN_FORKNUM);
8193 /* FSM is not critical, don't bother syncing it */
8195 /* toast heap, if any */
8196 if (OidIsValid(rel->rd_rel->reltoastrelid))
8200 toastrel = heap_open(rel->rd_rel->reltoastrelid, AccessShareLock);
8201 FlushRelationBuffers(toastrel);
8202 smgrimmedsync(toastrel->rd_smgr, MAIN_FORKNUM);
8203 heap_close(toastrel, AccessShareLock);