1 /*-------------------------------------------------------------------------
4 * heap access method code
6 * Portions Copyright (c) 1996-2019, PostgreSQL Global Development Group
7 * Portions Copyright (c) 1994, Regents of the University of California
11 * src/backend/access/heap/heapam.c
15 * heap_beginscan - begin relation scan
16 * heap_rescan - restart a relation scan
17 * heap_endscan - end relation scan
18 * heap_getnext - retrieve next tuple in scan
19 * heap_fetch - retrieve tuple with given tid
20 * heap_insert - insert tuple into a relation
21 * heap_multi_insert - insert multiple tuples into a relation
22 * heap_delete - delete a tuple from a relation
23 * heap_update - replace a tuple in a relation with another tuple
24 * heap_sync - sync heap, for when no WAL has been written
27 * This file contains the heap_ routines which implement
28 * the POSTGRES heap access method used for all POSTGRES
31 *-------------------------------------------------------------------------
35 #include "access/bufmask.h"
36 #include "access/genam.h"
37 #include "access/heapam.h"
38 #include "access/heapam_xlog.h"
39 #include "access/hio.h"
40 #include "access/multixact.h"
41 #include "access/parallel.h"
42 #include "access/relscan.h"
43 #include "access/sysattr.h"
44 #include "access/tableam.h"
45 #include "access/transam.h"
46 #include "access/tuptoaster.h"
47 #include "access/valid.h"
48 #include "access/visibilitymap.h"
49 #include "access/xact.h"
50 #include "access/xlog.h"
51 #include "access/xloginsert.h"
52 #include "access/xlogutils.h"
53 #include "catalog/catalog.h"
54 #include "miscadmin.h"
56 #include "port/atomics.h"
57 #include "storage/bufmgr.h"
58 #include "storage/freespace.h"
59 #include "storage/lmgr.h"
60 #include "storage/predicate.h"
61 #include "storage/procarray.h"
62 #include "storage/smgr.h"
63 #include "storage/spin.h"
64 #include "storage/standby.h"
65 #include "utils/datum.h"
66 #include "utils/inval.h"
67 #include "utils/lsyscache.h"
68 #include "utils/relcache.h"
69 #include "utils/snapmgr.h"
70 #include "utils/spccache.h"
73 static HeapTuple heap_prepare_insert(Relation relation, HeapTuple tup,
74 TransactionId xid, CommandId cid, int options);
75 static XLogRecPtr log_heap_update(Relation reln, Buffer oldbuf,
76 Buffer newbuf, HeapTuple oldtup,
77 HeapTuple newtup, HeapTuple old_key_tup,
78 bool all_visible_cleared, bool new_all_visible_cleared);
79 static Bitmapset *HeapDetermineModifiedColumns(Relation relation,
80 Bitmapset *interesting_cols,
81 HeapTuple oldtup, HeapTuple newtup);
82 static bool heap_acquire_tuplock(Relation relation, ItemPointer tid,
83 LockTupleMode mode, LockWaitPolicy wait_policy,
84 bool *have_tuple_lock);
85 static void compute_new_xmax_infomask(TransactionId xmax, uint16 old_infomask,
86 uint16 old_infomask2, TransactionId add_to_xmax,
87 LockTupleMode mode, bool is_update,
88 TransactionId *result_xmax, uint16 *result_infomask,
89 uint16 *result_infomask2);
90 static TM_Result heap_lock_updated_tuple(Relation rel, HeapTuple tuple,
91 ItemPointer ctid, TransactionId xid,
93 static void GetMultiXactIdHintBits(MultiXactId multi, uint16 *new_infomask,
94 uint16 *new_infomask2);
95 static TransactionId MultiXactIdGetUpdateXid(TransactionId xmax,
97 static bool DoesMultiXactIdConflict(MultiXactId multi, uint16 infomask,
98 LockTupleMode lockmode);
99 static void MultiXactIdWait(MultiXactId multi, MultiXactStatus status, uint16 infomask,
100 Relation rel, ItemPointer ctid, XLTW_Oper oper,
102 static bool ConditionalMultiXactIdWait(MultiXactId multi, MultiXactStatus status,
103 uint16 infomask, Relation rel, int *remaining);
104 static XLogRecPtr log_heap_new_cid(Relation relation, HeapTuple tup);
105 static HeapTuple ExtractReplicaIdentity(Relation rel, HeapTuple tup, bool key_modified,
110 * Each tuple lock mode has a corresponding heavyweight lock, and one or two
111 * corresponding MultiXactStatuses (one to merely lock tuples, another one to
112 * update them). This table (and the macros below) helps us determine the
113 * heavyweight lock mode and MultiXactStatus values to use for any particular
114 * tuple lock strength.
116 * Don't look at lockstatus/updstatus directly! Use get_mxact_status_for_lock
126 tupleLockExtraInfo[MaxLockTupleMode + 1] =
128 { /* LockTupleKeyShare */
130 MultiXactStatusForKeyShare,
131 -1 /* KeyShare does not allow updating tuples */
133 { /* LockTupleShare */
135 MultiXactStatusForShare,
136 -1 /* Share does not allow updating tuples */
138 { /* LockTupleNoKeyExclusive */
140 MultiXactStatusForNoKeyUpdate,
141 MultiXactStatusNoKeyUpdate
143 { /* LockTupleExclusive */
145 MultiXactStatusForUpdate,
146 MultiXactStatusUpdate
150 /* Get the LOCKMODE for a given MultiXactStatus */
151 #define LOCKMODE_from_mxstatus(status) \
152 (tupleLockExtraInfo[TUPLOCK_from_mxstatus((status))].hwlock)
155 * Acquire heavyweight locks on tuples, using a LockTupleMode strength value.
156 * This is more readable than having every caller translate it to lock.h's
159 #define LockTupleTuplock(rel, tup, mode) \
160 LockTuple((rel), (tup), tupleLockExtraInfo[mode].hwlock)
161 #define UnlockTupleTuplock(rel, tup, mode) \
162 UnlockTuple((rel), (tup), tupleLockExtraInfo[mode].hwlock)
163 #define ConditionalLockTupleTuplock(rel, tup, mode) \
164 ConditionalLockTuple((rel), (tup), tupleLockExtraInfo[mode].hwlock)
168 * heap_compute_xid_horizon_for_tuples and xid_horizon_prefetch_buffer use
169 * this structure to coordinate prefetching activity.
173 BlockNumber cur_hblkno;
176 ItemPointerData *tids;
177 } XidHorizonPrefetchState;
181 * This table maps tuple lock strength values for each particular
182 * MultiXactStatus value.
184 static const int MultiXactStatusLock[MaxMultiXactStatus + 1] =
186 LockTupleKeyShare, /* ForKeyShare */
187 LockTupleShare, /* ForShare */
188 LockTupleNoKeyExclusive, /* ForNoKeyUpdate */
189 LockTupleExclusive, /* ForUpdate */
190 LockTupleNoKeyExclusive, /* NoKeyUpdate */
191 LockTupleExclusive /* Update */
194 /* Get the LockTupleMode for a given MultiXactStatus */
195 #define TUPLOCK_from_mxstatus(status) \
196 (MultiXactStatusLock[(status)])
198 /* ----------------------------------------------------------------
199 * heap support routines
200 * ----------------------------------------------------------------
204 * initscan - scan code common to heap_beginscan and heap_rescan
208 initscan(HeapScanDesc scan, ScanKey key, bool keep_startblock)
210 ParallelBlockTableScanDesc bpscan = NULL;
215 * Determine the number of blocks we have to scan.
217 * It is sufficient to do this once at scan start, since any tuples added
218 * while the scan is in progress will be invisible to my snapshot anyway.
219 * (That is not true when using a non-MVCC snapshot. However, we couldn't
220 * guarantee to return tuples added after scan start anyway, since they
221 * might go into pages we already scanned. To guarantee consistent
222 * results for a non-MVCC snapshot, the caller must hold some higher-level
223 * lock that ensures the interesting tuple(s) won't change.)
225 if (scan->rs_base.rs_parallel != NULL)
227 bpscan = (ParallelBlockTableScanDesc) scan->rs_base.rs_parallel;
228 scan->rs_nblocks = bpscan->phs_nblocks;
231 scan->rs_nblocks = RelationGetNumberOfBlocks(scan->rs_base.rs_rd);
234 * If the table is large relative to NBuffers, use a bulk-read access
235 * strategy and enable synchronized scanning (see syncscan.c). Although
236 * the thresholds for these features could be different, we make them the
237 * same so that there are only two behaviors to tune rather than four.
238 * (However, some callers need to be able to disable one or both of these
239 * behaviors, independently of the size of the table; also there is a GUC
240 * variable that can disable synchronized scanning.)
242 * Note that heap_parallelscan_initialize has a very similar test; if you
243 * change this, consider changing that one, too.
245 if (!RelationUsesLocalBuffers(scan->rs_base.rs_rd) &&
246 scan->rs_nblocks > NBuffers / 4)
248 allow_strat = scan->rs_base.rs_allow_strat;
249 allow_sync = scan->rs_base.rs_allow_sync;
252 allow_strat = allow_sync = false;
256 /* During a rescan, keep the previous strategy object. */
257 if (scan->rs_strategy == NULL)
258 scan->rs_strategy = GetAccessStrategy(BAS_BULKREAD);
262 if (scan->rs_strategy != NULL)
263 FreeAccessStrategy(scan->rs_strategy);
264 scan->rs_strategy = NULL;
267 if (scan->rs_base.rs_parallel != NULL)
269 /* For parallel scan, believe whatever ParallelTableScanDesc says. */
270 scan->rs_base.rs_syncscan = scan->rs_base.rs_parallel->phs_syncscan;
272 else if (keep_startblock)
275 * When rescanning, we want to keep the previous startblock setting,
276 * so that rewinding a cursor doesn't generate surprising results.
277 * Reset the active syncscan setting, though.
279 scan->rs_base.rs_syncscan = (allow_sync && synchronize_seqscans);
281 else if (allow_sync && synchronize_seqscans)
283 scan->rs_base.rs_syncscan = true;
284 scan->rs_startblock = ss_get_location(scan->rs_base.rs_rd, scan->rs_nblocks);
288 scan->rs_base.rs_syncscan = false;
289 scan->rs_startblock = 0;
292 scan->rs_numblocks = InvalidBlockNumber;
293 scan->rs_inited = false;
294 scan->rs_ctup.t_data = NULL;
295 ItemPointerSetInvalid(&scan->rs_ctup.t_self);
296 scan->rs_cbuf = InvalidBuffer;
297 scan->rs_cblock = InvalidBlockNumber;
299 /* page-at-a-time fields are always invalid when not rs_inited */
302 * copy the scan key, if appropriate
305 memcpy(scan->rs_base.rs_key, key, scan->rs_base.rs_nkeys * sizeof(ScanKeyData));
308 * Currently, we don't have a stats counter for bitmap heap scans (but the
309 * underlying bitmap index scans will be counted) or sample scans (we only
310 * update stats for tuple fetches there)
312 if (!scan->rs_base.rs_bitmapscan && !scan->rs_base.rs_samplescan)
313 pgstat_count_heap_scan(scan->rs_base.rs_rd);
317 * heap_setscanlimits - restrict range of a heapscan
319 * startBlk is the page to start at
320 * numBlks is number of pages to scan (InvalidBlockNumber means "all")
323 heap_setscanlimits(TableScanDesc sscan, BlockNumber startBlk, BlockNumber numBlks)
325 HeapScanDesc scan = (HeapScanDesc) sscan;
327 Assert(!scan->rs_inited); /* else too late to change */
328 Assert(!scan->rs_base.rs_syncscan); /* else rs_startblock is significant */
330 /* Check startBlk is valid (but allow case of zero blocks...) */
331 Assert(startBlk == 0 || startBlk < scan->rs_nblocks);
333 scan->rs_startblock = startBlk;
334 scan->rs_numblocks = numBlks;
338 * heapgetpage - subroutine for heapgettup()
340 * This routine reads and pins the specified page of the relation.
341 * In page-at-a-time mode it performs additional work, namely determining
342 * which tuples on the page are visible.
345 heapgetpage(TableScanDesc sscan, BlockNumber page)
347 HeapScanDesc scan = (HeapScanDesc) sscan;
353 OffsetNumber lineoff;
357 Assert(page < scan->rs_nblocks);
359 /* release previous scan buffer, if any */
360 if (BufferIsValid(scan->rs_cbuf))
362 ReleaseBuffer(scan->rs_cbuf);
363 scan->rs_cbuf = InvalidBuffer;
367 * Be sure to check for interrupts at least once per page. Checks at
368 * higher code levels won't be able to stop a seqscan that encounters many
369 * pages' worth of consecutive dead tuples.
371 CHECK_FOR_INTERRUPTS();
373 /* read page using selected strategy */
374 scan->rs_cbuf = ReadBufferExtended(scan->rs_base.rs_rd, MAIN_FORKNUM, page,
375 RBM_NORMAL, scan->rs_strategy);
376 scan->rs_cblock = page;
378 if (!scan->rs_base.rs_pageatatime)
381 buffer = scan->rs_cbuf;
382 snapshot = scan->rs_base.rs_snapshot;
385 * Prune and repair fragmentation for the whole page, if possible.
387 heap_page_prune_opt(scan->rs_base.rs_rd, buffer);
390 * We must hold share lock on the buffer content while examining tuple
391 * visibility. Afterwards, however, the tuples we have found to be
392 * visible are guaranteed good as long as we hold the buffer pin.
394 LockBuffer(buffer, BUFFER_LOCK_SHARE);
396 dp = BufferGetPage(buffer);
397 TestForOldSnapshot(snapshot, scan->rs_base.rs_rd, dp);
398 lines = PageGetMaxOffsetNumber(dp);
402 * If the all-visible flag indicates that all tuples on the page are
403 * visible to everyone, we can skip the per-tuple visibility tests.
405 * Note: In hot standby, a tuple that's already visible to all
406 * transactions in the master might still be invisible to a read-only
407 * transaction in the standby. We partly handle this problem by tracking
408 * the minimum xmin of visible tuples as the cut-off XID while marking a
409 * page all-visible on master and WAL log that along with the visibility
410 * map SET operation. In hot standby, we wait for (or abort) all
411 * transactions that can potentially may not see one or more tuples on the
412 * page. That's how index-only scans work fine in hot standby. A crucial
413 * difference between index-only scans and heap scans is that the
414 * index-only scan completely relies on the visibility map where as heap
415 * scan looks at the page-level PD_ALL_VISIBLE flag. We are not sure if
416 * the page-level flag can be trusted in the same way, because it might
417 * get propagated somehow without being explicitly WAL-logged, e.g. via a
418 * full page write. Until we can prove that beyond doubt, let's check each
419 * tuple for visibility the hard way.
421 all_visible = PageIsAllVisible(dp) && !snapshot->takenDuringRecovery;
423 for (lineoff = FirstOffsetNumber, lpp = PageGetItemId(dp, lineoff);
427 if (ItemIdIsNormal(lpp))
429 HeapTupleData loctup;
432 loctup.t_tableOid = RelationGetRelid(scan->rs_base.rs_rd);
433 loctup.t_data = (HeapTupleHeader) PageGetItem((Page) dp, lpp);
434 loctup.t_len = ItemIdGetLength(lpp);
435 ItemPointerSet(&(loctup.t_self), page, lineoff);
440 valid = HeapTupleSatisfiesVisibility(&loctup, snapshot, buffer);
442 CheckForSerializableConflictOut(valid, scan->rs_base.rs_rd,
443 &loctup, buffer, snapshot);
446 scan->rs_vistuples[ntup++] = lineoff;
450 LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
452 Assert(ntup <= MaxHeapTuplesPerPage);
453 scan->rs_ntuples = ntup;
457 * heapgettup - fetch next heap tuple
459 * Initialize the scan if not already done; then advance to the next
460 * tuple as indicated by "dir"; return the next tuple in scan->rs_ctup,
461 * or set scan->rs_ctup.t_data = NULL if no more tuples.
463 * dir == NoMovementScanDirection means "re-fetch the tuple indicated
466 * Note: the reason nkeys/key are passed separately, even though they are
467 * kept in the scan descriptor, is that the caller may not want us to check
470 * Note: when we fall off the end of the scan in either direction, we
471 * reset rs_inited. This means that a further request with the same
472 * scan direction will restart the scan, which is a bit odd, but a
473 * request with the opposite scan direction will start a fresh scan
474 * in the proper direction. The latter is required behavior for cursors,
475 * while the former case is generally undefined behavior in Postgres
476 * so we don't care too much.
480 heapgettup(HeapScanDesc scan,
485 HeapTuple tuple = &(scan->rs_ctup);
486 Snapshot snapshot = scan->rs_base.rs_snapshot;
487 bool backward = ScanDirectionIsBackward(dir);
492 OffsetNumber lineoff;
497 * calculate next starting lineoff, given scan direction
499 if (ScanDirectionIsForward(dir))
501 if (!scan->rs_inited)
504 * return null immediately if relation is empty
506 if (scan->rs_nblocks == 0 || scan->rs_numblocks == 0)
508 Assert(!BufferIsValid(scan->rs_cbuf));
509 tuple->t_data = NULL;
512 if (scan->rs_base.rs_parallel != NULL)
514 ParallelBlockTableScanDesc pbscan =
515 (ParallelBlockTableScanDesc) scan->rs_base.rs_parallel;
517 table_block_parallelscan_startblock_init(scan->rs_base.rs_rd,
520 page = table_block_parallelscan_nextpage(scan->rs_base.rs_rd,
523 /* Other processes might have already finished the scan. */
524 if (page == InvalidBlockNumber)
526 Assert(!BufferIsValid(scan->rs_cbuf));
527 tuple->t_data = NULL;
532 page = scan->rs_startblock; /* first page */
533 heapgetpage((TableScanDesc) scan, page);
534 lineoff = FirstOffsetNumber; /* first offnum */
535 scan->rs_inited = true;
539 /* continue from previously returned page/tuple */
540 page = scan->rs_cblock; /* current page */
541 lineoff = /* next offnum */
542 OffsetNumberNext(ItemPointerGetOffsetNumber(&(tuple->t_self)));
545 LockBuffer(scan->rs_cbuf, BUFFER_LOCK_SHARE);
547 dp = BufferGetPage(scan->rs_cbuf);
548 TestForOldSnapshot(snapshot, scan->rs_base.rs_rd, dp);
549 lines = PageGetMaxOffsetNumber(dp);
550 /* page and lineoff now reference the physically next tid */
552 linesleft = lines - lineoff + 1;
556 /* backward parallel scan not supported */
557 Assert(scan->rs_base.rs_parallel == NULL);
559 if (!scan->rs_inited)
562 * return null immediately if relation is empty
564 if (scan->rs_nblocks == 0 || scan->rs_numblocks == 0)
566 Assert(!BufferIsValid(scan->rs_cbuf));
567 tuple->t_data = NULL;
572 * Disable reporting to syncscan logic in a backwards scan; it's
573 * not very likely anyone else is doing the same thing at the same
574 * time, and much more likely that we'll just bollix things for
577 scan->rs_base.rs_syncscan = false;
578 /* start from last page of the scan */
579 if (scan->rs_startblock > 0)
580 page = scan->rs_startblock - 1;
582 page = scan->rs_nblocks - 1;
583 heapgetpage((TableScanDesc) scan, page);
587 /* continue from previously returned page/tuple */
588 page = scan->rs_cblock; /* current page */
591 LockBuffer(scan->rs_cbuf, BUFFER_LOCK_SHARE);
593 dp = BufferGetPage(scan->rs_cbuf);
594 TestForOldSnapshot(snapshot, scan->rs_base.rs_rd, dp);
595 lines = PageGetMaxOffsetNumber(dp);
597 if (!scan->rs_inited)
599 lineoff = lines; /* final offnum */
600 scan->rs_inited = true;
604 lineoff = /* previous offnum */
605 OffsetNumberPrev(ItemPointerGetOffsetNumber(&(tuple->t_self)));
607 /* page and lineoff now reference the physically previous tid */
614 * ``no movement'' scan direction: refetch prior tuple
616 if (!scan->rs_inited)
618 Assert(!BufferIsValid(scan->rs_cbuf));
619 tuple->t_data = NULL;
623 page = ItemPointerGetBlockNumber(&(tuple->t_self));
624 if (page != scan->rs_cblock)
625 heapgetpage((TableScanDesc) scan, page);
627 /* Since the tuple was previously fetched, needn't lock page here */
628 dp = BufferGetPage(scan->rs_cbuf);
629 TestForOldSnapshot(snapshot, scan->rs_base.rs_rd, dp);
630 lineoff = ItemPointerGetOffsetNumber(&(tuple->t_self));
631 lpp = PageGetItemId(dp, lineoff);
632 Assert(ItemIdIsNormal(lpp));
634 tuple->t_data = (HeapTupleHeader) PageGetItem((Page) dp, lpp);
635 tuple->t_len = ItemIdGetLength(lpp);
641 * advance the scan until we find a qualifying tuple or run out of stuff
644 lpp = PageGetItemId(dp, lineoff);
647 while (linesleft > 0)
649 if (ItemIdIsNormal(lpp))
653 tuple->t_data = (HeapTupleHeader) PageGetItem((Page) dp, lpp);
654 tuple->t_len = ItemIdGetLength(lpp);
655 ItemPointerSet(&(tuple->t_self), page, lineoff);
658 * if current tuple qualifies, return it.
660 valid = HeapTupleSatisfiesVisibility(tuple,
664 CheckForSerializableConflictOut(valid, scan->rs_base.rs_rd,
665 tuple, scan->rs_cbuf,
668 if (valid && key != NULL)
669 HeapKeyTest(tuple, RelationGetDescr(scan->rs_base.rs_rd),
674 LockBuffer(scan->rs_cbuf, BUFFER_LOCK_UNLOCK);
680 * otherwise move to the next item on the page
685 --lpp; /* move back in this page's ItemId array */
690 ++lpp; /* move forward in this page's ItemId array */
696 * if we get here, it means we've exhausted the items on this page and
697 * it's time to move to the next.
699 LockBuffer(scan->rs_cbuf, BUFFER_LOCK_UNLOCK);
702 * advance to next/prior page and detect end of scan
706 finished = (page == scan->rs_startblock) ||
707 (scan->rs_numblocks != InvalidBlockNumber ? --scan->rs_numblocks == 0 : false);
709 page = scan->rs_nblocks;
712 else if (scan->rs_base.rs_parallel != NULL)
714 ParallelBlockTableScanDesc pbscan =
715 (ParallelBlockTableScanDesc) scan->rs_base.rs_parallel;
717 page = table_block_parallelscan_nextpage(scan->rs_base.rs_rd,
719 finished = (page == InvalidBlockNumber);
724 if (page >= scan->rs_nblocks)
726 finished = (page == scan->rs_startblock) ||
727 (scan->rs_numblocks != InvalidBlockNumber ? --scan->rs_numblocks == 0 : false);
730 * Report our new scan position for synchronization purposes. We
731 * don't do that when moving backwards, however. That would just
732 * mess up any other forward-moving scanners.
734 * Note: we do this before checking for end of scan so that the
735 * final state of the position hint is back at the start of the
736 * rel. That's not strictly necessary, but otherwise when you run
737 * the same query multiple times the starting position would shift
738 * a little bit backwards on every invocation, which is confusing.
739 * We don't guarantee any specific ordering in general, though.
741 if (scan->rs_base.rs_syncscan)
742 ss_report_location(scan->rs_base.rs_rd, page);
746 * return NULL if we've exhausted all the pages
750 if (BufferIsValid(scan->rs_cbuf))
751 ReleaseBuffer(scan->rs_cbuf);
752 scan->rs_cbuf = InvalidBuffer;
753 scan->rs_cblock = InvalidBlockNumber;
754 tuple->t_data = NULL;
755 scan->rs_inited = false;
759 heapgetpage((TableScanDesc) scan, page);
761 LockBuffer(scan->rs_cbuf, BUFFER_LOCK_SHARE);
763 dp = BufferGetPage(scan->rs_cbuf);
764 TestForOldSnapshot(snapshot, scan->rs_base.rs_rd, dp);
765 lines = PageGetMaxOffsetNumber((Page) dp);
770 lpp = PageGetItemId(dp, lines);
774 lineoff = FirstOffsetNumber;
775 lpp = PageGetItemId(dp, FirstOffsetNumber);
781 * heapgettup_pagemode - fetch next heap tuple in page-at-a-time mode
783 * Same API as heapgettup, but used in page-at-a-time mode
785 * The internal logic is much the same as heapgettup's too, but there are some
786 * differences: we do not take the buffer content lock (that only needs to
787 * happen inside heapgetpage), and we iterate through just the tuples listed
788 * in rs_vistuples[] rather than all tuples on the page. Notice that
789 * lineindex is 0-based, where the corresponding loop variable lineoff in
790 * heapgettup is 1-based.
794 heapgettup_pagemode(HeapScanDesc scan,
799 HeapTuple tuple = &(scan->rs_ctup);
800 bool backward = ScanDirectionIsBackward(dir);
806 OffsetNumber lineoff;
811 * calculate next starting lineindex, given scan direction
813 if (ScanDirectionIsForward(dir))
815 if (!scan->rs_inited)
818 * return null immediately if relation is empty
820 if (scan->rs_nblocks == 0 || scan->rs_numblocks == 0)
822 Assert(!BufferIsValid(scan->rs_cbuf));
823 tuple->t_data = NULL;
826 if (scan->rs_base.rs_parallel != NULL)
828 ParallelBlockTableScanDesc pbscan =
829 (ParallelBlockTableScanDesc) scan->rs_base.rs_parallel;
831 table_block_parallelscan_startblock_init(scan->rs_base.rs_rd,
834 page = table_block_parallelscan_nextpage(scan->rs_base.rs_rd,
837 /* Other processes might have already finished the scan. */
838 if (page == InvalidBlockNumber)
840 Assert(!BufferIsValid(scan->rs_cbuf));
841 tuple->t_data = NULL;
846 page = scan->rs_startblock; /* first page */
847 heapgetpage((TableScanDesc) scan, page);
849 scan->rs_inited = true;
853 /* continue from previously returned page/tuple */
854 page = scan->rs_cblock; /* current page */
855 lineindex = scan->rs_cindex + 1;
858 dp = BufferGetPage(scan->rs_cbuf);
859 TestForOldSnapshot(scan->rs_base.rs_snapshot, scan->rs_base.rs_rd, dp);
860 lines = scan->rs_ntuples;
861 /* page and lineindex now reference the next visible tid */
863 linesleft = lines - lineindex;
867 /* backward parallel scan not supported */
868 Assert(scan->rs_base.rs_parallel == NULL);
870 if (!scan->rs_inited)
873 * return null immediately if relation is empty
875 if (scan->rs_nblocks == 0 || scan->rs_numblocks == 0)
877 Assert(!BufferIsValid(scan->rs_cbuf));
878 tuple->t_data = NULL;
883 * Disable reporting to syncscan logic in a backwards scan; it's
884 * not very likely anyone else is doing the same thing at the same
885 * time, and much more likely that we'll just bollix things for
888 scan->rs_base.rs_syncscan = false;
889 /* start from last page of the scan */
890 if (scan->rs_startblock > 0)
891 page = scan->rs_startblock - 1;
893 page = scan->rs_nblocks - 1;
894 heapgetpage((TableScanDesc) scan, page);
898 /* continue from previously returned page/tuple */
899 page = scan->rs_cblock; /* current page */
902 dp = BufferGetPage(scan->rs_cbuf);
903 TestForOldSnapshot(scan->rs_base.rs_snapshot, scan->rs_base.rs_rd, dp);
904 lines = scan->rs_ntuples;
906 if (!scan->rs_inited)
908 lineindex = lines - 1;
909 scan->rs_inited = true;
913 lineindex = scan->rs_cindex - 1;
915 /* page and lineindex now reference the previous visible tid */
917 linesleft = lineindex + 1;
922 * ``no movement'' scan direction: refetch prior tuple
924 if (!scan->rs_inited)
926 Assert(!BufferIsValid(scan->rs_cbuf));
927 tuple->t_data = NULL;
931 page = ItemPointerGetBlockNumber(&(tuple->t_self));
932 if (page != scan->rs_cblock)
933 heapgetpage((TableScanDesc) scan, page);
935 /* Since the tuple was previously fetched, needn't lock page here */
936 dp = BufferGetPage(scan->rs_cbuf);
937 TestForOldSnapshot(scan->rs_base.rs_snapshot, scan->rs_base.rs_rd, dp);
938 lineoff = ItemPointerGetOffsetNumber(&(tuple->t_self));
939 lpp = PageGetItemId(dp, lineoff);
940 Assert(ItemIdIsNormal(lpp));
942 tuple->t_data = (HeapTupleHeader) PageGetItem((Page) dp, lpp);
943 tuple->t_len = ItemIdGetLength(lpp);
945 /* check that rs_cindex is in sync */
946 Assert(scan->rs_cindex < scan->rs_ntuples);
947 Assert(lineoff == scan->rs_vistuples[scan->rs_cindex]);
953 * advance the scan until we find a qualifying tuple or run out of stuff
958 while (linesleft > 0)
960 lineoff = scan->rs_vistuples[lineindex];
961 lpp = PageGetItemId(dp, lineoff);
962 Assert(ItemIdIsNormal(lpp));
964 tuple->t_data = (HeapTupleHeader) PageGetItem((Page) dp, lpp);
965 tuple->t_len = ItemIdGetLength(lpp);
966 ItemPointerSet(&(tuple->t_self), page, lineoff);
969 * if current tuple qualifies, return it.
975 HeapKeyTest(tuple, RelationGetDescr(scan->rs_base.rs_rd),
979 scan->rs_cindex = lineindex;
985 scan->rs_cindex = lineindex;
990 * otherwise move to the next item on the page
1000 * if we get here, it means we've exhausted the items on this page and
1001 * it's time to move to the next.
1005 finished = (page == scan->rs_startblock) ||
1006 (scan->rs_numblocks != InvalidBlockNumber ? --scan->rs_numblocks == 0 : false);
1008 page = scan->rs_nblocks;
1011 else if (scan->rs_base.rs_parallel != NULL)
1013 ParallelBlockTableScanDesc pbscan =
1014 (ParallelBlockTableScanDesc) scan->rs_base.rs_parallel;
1016 page = table_block_parallelscan_nextpage(scan->rs_base.rs_rd,
1018 finished = (page == InvalidBlockNumber);
1023 if (page >= scan->rs_nblocks)
1025 finished = (page == scan->rs_startblock) ||
1026 (scan->rs_numblocks != InvalidBlockNumber ? --scan->rs_numblocks == 0 : false);
1029 * Report our new scan position for synchronization purposes. We
1030 * don't do that when moving backwards, however. That would just
1031 * mess up any other forward-moving scanners.
1033 * Note: we do this before checking for end of scan so that the
1034 * final state of the position hint is back at the start of the
1035 * rel. That's not strictly necessary, but otherwise when you run
1036 * the same query multiple times the starting position would shift
1037 * a little bit backwards on every invocation, which is confusing.
1038 * We don't guarantee any specific ordering in general, though.
1040 if (scan->rs_base.rs_syncscan)
1041 ss_report_location(scan->rs_base.rs_rd, page);
1045 * return NULL if we've exhausted all the pages
1049 if (BufferIsValid(scan->rs_cbuf))
1050 ReleaseBuffer(scan->rs_cbuf);
1051 scan->rs_cbuf = InvalidBuffer;
1052 scan->rs_cblock = InvalidBlockNumber;
1053 tuple->t_data = NULL;
1054 scan->rs_inited = false;
1058 heapgetpage((TableScanDesc) scan, page);
1060 dp = BufferGetPage(scan->rs_cbuf);
1061 TestForOldSnapshot(scan->rs_base.rs_snapshot, scan->rs_base.rs_rd, dp);
1062 lines = scan->rs_ntuples;
1065 lineindex = lines - 1;
1072 #if defined(DISABLE_COMPLEX_MACRO)
1074 * This is formatted so oddly so that the correspondence to the macro
1075 * definition in access/htup_details.h is maintained.
1078 fastgetattr(HeapTuple tup, int attnum, TupleDesc tupleDesc,
1084 (*(isnull) = false),
1085 HeapTupleNoNulls(tup) ?
1087 TupleDescAttr((tupleDesc), (attnum) - 1)->attcacheoff >= 0 ?
1089 fetchatt(TupleDescAttr((tupleDesc), (attnum) - 1),
1090 (char *) (tup)->t_data + (tup)->t_data->t_hoff +
1091 TupleDescAttr((tupleDesc), (attnum) - 1)->attcacheoff)
1094 nocachegetattr((tup), (attnum), (tupleDesc))
1098 att_isnull((attnum) - 1, (tup)->t_data->t_bits) ?
1105 nocachegetattr((tup), (attnum), (tupleDesc))
1115 #endif /* defined(DISABLE_COMPLEX_MACRO) */
1118 /* ----------------------------------------------------------------
1119 * heap access method interface
1120 * ----------------------------------------------------------------
1125 heap_beginscan(Relation relation, Snapshot snapshot,
1126 int nkeys, ScanKey key,
1127 ParallelTableScanDesc parallel_scan,
1130 bool allow_pagemode,
1138 * increment relation ref count while scanning relation
1140 * This is just to make really sure the relcache entry won't go away while
1141 * the scan has a pointer to it. Caller should be holding the rel open
1142 * anyway, so this is redundant in all normal scenarios...
1144 RelationIncrementReferenceCount(relation);
1147 * allocate and initialize scan descriptor
1149 scan = (HeapScanDesc) palloc(sizeof(HeapScanDescData));
1151 scan->rs_base.rs_rd = relation;
1152 scan->rs_base.rs_snapshot = snapshot;
1153 scan->rs_base.rs_nkeys = nkeys;
1154 scan->rs_base.rs_bitmapscan = is_bitmapscan;
1155 scan->rs_base.rs_samplescan = is_samplescan;
1156 scan->rs_strategy = NULL; /* set in initscan */
1157 scan->rs_base.rs_allow_strat = allow_strat;
1158 scan->rs_base.rs_allow_sync = allow_sync;
1159 scan->rs_base.rs_temp_snap = temp_snap;
1160 scan->rs_base.rs_parallel = parallel_scan;
1163 * we can use page-at-a-time mode if it's an MVCC-safe snapshot
1165 scan->rs_base.rs_pageatatime =
1166 allow_pagemode && snapshot && IsMVCCSnapshot(snapshot);
1169 * For a seqscan in a serializable transaction, acquire a predicate lock
1170 * on the entire relation. This is required not only to lock all the
1171 * matching tuples, but also to conflict with new insertions into the
1172 * table. In an indexscan, we take page locks on the index pages covering
1173 * the range specified in the scan qual, but in a heap scan there is
1174 * nothing more fine-grained to lock. A bitmap scan is a different story,
1175 * there we have already scanned the index and locked the index pages
1176 * covering the predicate. But in that case we still have to lock any
1177 * matching heap tuples.
1180 PredicateLockRelation(relation, snapshot);
1182 /* we only need to set this up once */
1183 scan->rs_ctup.t_tableOid = RelationGetRelid(relation);
1186 * we do this here instead of in initscan() because heap_rescan also calls
1187 * initscan() and we don't want to allocate memory again
1190 scan->rs_base.rs_key = (ScanKey) palloc(sizeof(ScanKeyData) * nkeys);
1192 scan->rs_base.rs_key = NULL;
1194 initscan(scan, key, false);
1196 return (TableScanDesc) scan;
1200 heap_rescan(TableScanDesc sscan, ScanKey key, bool set_params,
1201 bool allow_strat, bool allow_sync, bool allow_pagemode)
1203 HeapScanDesc scan = (HeapScanDesc) sscan;
1207 scan->rs_base.rs_allow_strat = allow_strat;
1208 scan->rs_base.rs_allow_sync = allow_sync;
1209 scan->rs_base.rs_pageatatime =
1210 allow_pagemode && IsMVCCSnapshot(scan->rs_base.rs_snapshot);
1214 * unpin scan buffers
1216 if (BufferIsValid(scan->rs_cbuf))
1217 ReleaseBuffer(scan->rs_cbuf);
1220 * reinitialize scan descriptor
1222 initscan(scan, key, true);
1226 heap_endscan(TableScanDesc sscan)
1228 HeapScanDesc scan = (HeapScanDesc) sscan;
1230 /* Note: no locking manipulations needed */
1233 * unpin scan buffers
1235 if (BufferIsValid(scan->rs_cbuf))
1236 ReleaseBuffer(scan->rs_cbuf);
1239 * decrement relation reference count and free scan descriptor storage
1241 RelationDecrementReferenceCount(scan->rs_base.rs_rd);
1243 if (scan->rs_base.rs_key)
1244 pfree(scan->rs_base.rs_key);
1246 if (scan->rs_strategy != NULL)
1247 FreeAccessStrategy(scan->rs_strategy);
1249 if (scan->rs_base.rs_temp_snap)
1250 UnregisterSnapshot(scan->rs_base.rs_snapshot);
1256 #define HEAPDEBUG_1 \
1257 elog(DEBUG2, "heap_getnext([%s,nkeys=%d],dir=%d) called", \
1258 RelationGetRelationName(scan->rs_rd), scan->rs_nkeys, (int) direction)
1259 #define HEAPDEBUG_2 \
1260 elog(DEBUG2, "heap_getnext returning EOS")
1261 #define HEAPDEBUG_3 \
1262 elog(DEBUG2, "heap_getnext returning tuple")
1267 #endif /* !defined(HEAPDEBUGALL) */
1271 heap_getnext(TableScanDesc sscan, ScanDirection direction)
1273 HeapScanDesc scan = (HeapScanDesc) sscan;
1276 * This is still widely used directly, without going through table AM, so
1277 * add a safety check. It's possible we should, at a later point,
1278 * downgrade this to an assert. The reason for checking the AM routine,
1279 * rather than the AM oid, is that this allows to write regression tests
1280 * that create another AM reusing the heap handler.
1282 if (unlikely(sscan->rs_rd->rd_tableam != GetHeapamTableAmRoutine()))
1284 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
1285 errmsg("only heap AM is supported")));
1287 /* Note: no locking manipulations needed */
1289 HEAPDEBUG_1; /* heap_getnext( info ) */
1291 if (scan->rs_base.rs_pageatatime)
1292 heapgettup_pagemode(scan, direction,
1293 scan->rs_base.rs_nkeys, scan->rs_base.rs_key);
1295 heapgettup(scan, direction,
1296 scan->rs_base.rs_nkeys, scan->rs_base.rs_key);
1298 if (scan->rs_ctup.t_data == NULL)
1300 HEAPDEBUG_2; /* heap_getnext returning EOS */
1305 * if we get here it means we have a new current scan tuple, so point to
1306 * the proper return buffer and return the tuple.
1308 HEAPDEBUG_3; /* heap_getnext returning tuple */
1310 pgstat_count_heap_getnext(scan->rs_base.rs_rd);
1312 return &scan->rs_ctup;
1315 #ifdef HEAPAMSLOTDEBUGALL
1316 #define HEAPAMSLOTDEBUG_1 \
1317 elog(DEBUG2, "heapam_getnextslot([%s,nkeys=%d],dir=%d) called", \
1318 RelationGetRelationName(scan->rs_base.rs_rd), scan->rs_base.rs_nkeys, (int) direction)
1319 #define HEAPAMSLOTDEBUG_2 \
1320 elog(DEBUG2, "heapam_getnextslot returning EOS")
1321 #define HEAPAMSLOTDEBUG_3 \
1322 elog(DEBUG2, "heapam_getnextslot returning tuple")
1324 #define HEAPAMSLOTDEBUG_1
1325 #define HEAPAMSLOTDEBUG_2
1326 #define HEAPAMSLOTDEBUG_3
1330 heap_getnextslot(TableScanDesc sscan, ScanDirection direction, TupleTableSlot *slot)
1332 HeapScanDesc scan = (HeapScanDesc) sscan;
1334 /* Note: no locking manipulations needed */
1336 HEAPAMSLOTDEBUG_1; /* heap_getnextslot( info ) */
1338 if (scan->rs_base.rs_pageatatime)
1339 heapgettup_pagemode(scan, direction,
1340 scan->rs_base.rs_nkeys, scan->rs_base.rs_key);
1342 heapgettup(scan, direction, scan->rs_base.rs_nkeys, scan->rs_base.rs_key);
1344 if (scan->rs_ctup.t_data == NULL)
1346 HEAPAMSLOTDEBUG_2; /* heap_getnextslot returning EOS */
1347 ExecClearTuple(slot);
1352 * if we get here it means we have a new current scan tuple, so point to
1353 * the proper return buffer and return the tuple.
1355 HEAPAMSLOTDEBUG_3; /* heap_getnextslot returning tuple */
1357 pgstat_count_heap_getnext(scan->rs_base.rs_rd);
1359 ExecStoreBufferHeapTuple(&scan->rs_ctup, slot,
1365 * heap_fetch - retrieve tuple with given tid
1367 * On entry, tuple->t_self is the TID to fetch. We pin the buffer holding
1368 * the tuple, fill in the remaining fields of *tuple, and check the tuple
1369 * against the specified snapshot.
1371 * If successful (tuple found and passes snapshot time qual), then *userbuf
1372 * is set to the buffer holding the tuple and true is returned. The caller
1373 * must unpin the buffer when done with the tuple.
1375 * If the tuple is not found (ie, item number references a deleted slot),
1376 * then tuple->t_data is set to NULL and false is returned.
1378 * If the tuple is found but fails the time qual check, then false is returned
1379 * but tuple->t_data is left pointing to the tuple.
1381 * keep_buf determines what is done with the buffer in the false-result cases.
1382 * When the caller specifies keep_buf = true, we retain the pin on the buffer
1383 * and return it in *userbuf (so the caller must eventually unpin it); when
1384 * keep_buf = false, the pin is released and *userbuf is set to InvalidBuffer.
1386 * stats_relation is the relation to charge the heap_fetch operation against
1387 * for statistical purposes. (This could be the heap rel itself, an
1388 * associated index, or NULL to not count the fetch at all.)
1390 * heap_fetch does not follow HOT chains: only the exact TID requested will
1393 * It is somewhat inconsistent that we ereport() on invalid block number but
1394 * return false on invalid item number. There are a couple of reasons though.
1395 * One is that the caller can relatively easily check the block number for
1396 * validity, but cannot check the item number without reading the page
1397 * himself. Another is that when we are following a t_ctid link, we can be
1398 * reasonably confident that the page number is valid (since VACUUM shouldn't
1399 * truncate off the destination page without having killed the referencing
1400 * tuple first), but the item number might well not be good.
1403 heap_fetch(Relation relation,
1408 ItemPointer tid = &(tuple->t_self);
1412 OffsetNumber offnum;
1416 * Fetch and pin the appropriate page of the relation.
1418 buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(tid));
1421 * Need share lock on buffer to examine tuple commit status.
1423 LockBuffer(buffer, BUFFER_LOCK_SHARE);
1424 page = BufferGetPage(buffer);
1425 TestForOldSnapshot(snapshot, relation, page);
1428 * We'd better check for out-of-range offnum in case of VACUUM since the
1431 offnum = ItemPointerGetOffsetNumber(tid);
1432 if (offnum < FirstOffsetNumber || offnum > PageGetMaxOffsetNumber(page))
1434 LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
1435 ReleaseBuffer(buffer);
1436 *userbuf = InvalidBuffer;
1437 tuple->t_data = NULL;
1442 * get the item line pointer corresponding to the requested tid
1444 lp = PageGetItemId(page, offnum);
1447 * Must check for deleted tuple.
1449 if (!ItemIdIsNormal(lp))
1451 LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
1452 ReleaseBuffer(buffer);
1453 *userbuf = InvalidBuffer;
1454 tuple->t_data = NULL;
1459 * fill in *tuple fields
1461 tuple->t_data = (HeapTupleHeader) PageGetItem(page, lp);
1462 tuple->t_len = ItemIdGetLength(lp);
1463 tuple->t_tableOid = RelationGetRelid(relation);
1466 * check tuple visibility, then release lock
1468 valid = HeapTupleSatisfiesVisibility(tuple, snapshot, buffer);
1471 PredicateLockTuple(relation, tuple, snapshot);
1473 CheckForSerializableConflictOut(valid, relation, tuple, buffer, snapshot);
1475 LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
1480 * All checks passed, so return the tuple as valid. Caller is now
1481 * responsible for releasing the buffer.
1488 /* Tuple failed time qual */
1489 ReleaseBuffer(buffer);
1490 *userbuf = InvalidBuffer;
1496 * heap_hot_search_buffer - search HOT chain for tuple satisfying snapshot
1498 * On entry, *tid is the TID of a tuple (either a simple tuple, or the root
1499 * of a HOT chain), and buffer is the buffer holding this tuple. We search
1500 * for the first chain member satisfying the given snapshot. If one is
1501 * found, we update *tid to reference that tuple's offset number, and
1502 * return true. If no match, return false without modifying *tid.
1504 * heapTuple is a caller-supplied buffer. When a match is found, we return
1505 * the tuple here, in addition to updating *tid. If no match is found, the
1506 * contents of this buffer on return are undefined.
1508 * If all_dead is not NULL, we check non-visible tuples to see if they are
1509 * globally dead; *all_dead is set true if all members of the HOT chain
1510 * are vacuumable, false if not.
1512 * Unlike heap_fetch, the caller must already have pin and (at least) share
1513 * lock on the buffer; it is still pinned/locked at exit. Also unlike
1514 * heap_fetch, we do not report any pgstats count; caller may do so if wanted.
1517 heap_hot_search_buffer(ItemPointer tid, Relation relation, Buffer buffer,
1518 Snapshot snapshot, HeapTuple heapTuple,
1519 bool *all_dead, bool first_call)
1521 Page dp = (Page) BufferGetPage(buffer);
1522 TransactionId prev_xmax = InvalidTransactionId;
1523 OffsetNumber offnum;
1524 bool at_chain_start;
1528 /* If this is not the first call, previous call returned a (live!) tuple */
1530 *all_dead = first_call;
1532 Assert(TransactionIdIsValid(RecentGlobalXmin));
1534 Assert(ItemPointerGetBlockNumber(tid) == BufferGetBlockNumber(buffer));
1535 offnum = ItemPointerGetOffsetNumber(tid);
1536 at_chain_start = first_call;
1539 heapTuple->t_self = *tid;
1541 /* Scan through possible multiple members of HOT-chain */
1546 /* check for bogus TID */
1547 if (offnum < FirstOffsetNumber || offnum > PageGetMaxOffsetNumber(dp))
1550 lp = PageGetItemId(dp, offnum);
1552 /* check for unused, dead, or redirected items */
1553 if (!ItemIdIsNormal(lp))
1555 /* We should only see a redirect at start of chain */
1556 if (ItemIdIsRedirected(lp) && at_chain_start)
1558 /* Follow the redirect */
1559 offnum = ItemIdGetRedirect(lp);
1560 at_chain_start = false;
1563 /* else must be end of chain */
1567 heapTuple->t_data = (HeapTupleHeader) PageGetItem(dp, lp);
1568 heapTuple->t_len = ItemIdGetLength(lp);
1569 heapTuple->t_tableOid = RelationGetRelid(relation);
1570 ItemPointerSetOffsetNumber(&heapTuple->t_self, offnum);
1573 * Shouldn't see a HEAP_ONLY tuple at chain start.
1575 if (at_chain_start && HeapTupleIsHeapOnly(heapTuple))
1579 * The xmin should match the previous xmax value, else chain is
1582 if (TransactionIdIsValid(prev_xmax) &&
1583 !TransactionIdEquals(prev_xmax,
1584 HeapTupleHeaderGetXmin(heapTuple->t_data)))
1588 * When first_call is true (and thus, skip is initially false) we'll
1589 * return the first tuple we find. But on later passes, heapTuple
1590 * will initially be pointing to the tuple we returned last time.
1591 * Returning it again would be incorrect (and would loop forever), so
1592 * we skip it and return the next match we find.
1597 * For the benefit of logical decoding, have t_self point at the
1598 * element of the HOT chain we're currently investigating instead
1599 * of the root tuple of the HOT chain. This is important because
1600 * the *Satisfies routine for historical mvcc snapshots needs the
1601 * correct tid to decide about the visibility in some cases.
1603 ItemPointerSet(&(heapTuple->t_self), BufferGetBlockNumber(buffer), offnum);
1605 /* If it's visible per the snapshot, we must return it */
1606 valid = HeapTupleSatisfiesVisibility(heapTuple, snapshot, buffer);
1607 CheckForSerializableConflictOut(valid, relation, heapTuple,
1609 /* reset to original, non-redirected, tid */
1610 heapTuple->t_self = *tid;
1614 ItemPointerSetOffsetNumber(tid, offnum);
1615 PredicateLockTuple(relation, heapTuple, snapshot);
1624 * If we can't see it, maybe no one else can either. At caller
1625 * request, check whether all chain members are dead to all
1628 * Note: if you change the criterion here for what is "dead", fix the
1629 * planner's get_actual_variable_range() function to match.
1631 if (all_dead && *all_dead &&
1632 !HeapTupleIsSurelyDead(heapTuple, RecentGlobalXmin))
1636 * Check to see if HOT chain continues past this tuple; if so fetch
1637 * the next offnum and loop around.
1639 if (HeapTupleIsHotUpdated(heapTuple))
1641 Assert(ItemPointerGetBlockNumber(&heapTuple->t_data->t_ctid) ==
1642 ItemPointerGetBlockNumber(tid));
1643 offnum = ItemPointerGetOffsetNumber(&heapTuple->t_data->t_ctid);
1644 at_chain_start = false;
1645 prev_xmax = HeapTupleHeaderGetUpdateXid(heapTuple->t_data);
1648 break; /* end of chain */
1655 * heap_get_latest_tid - get the latest tid of a specified tuple
1657 * Actually, this gets the latest version that is visible according to
1658 * the passed snapshot. You can pass SnapshotDirty to get the very latest,
1659 * possibly uncommitted version.
1661 * *tid is both an input and an output parameter: it is updated to
1662 * show the latest version of the row. Note that it will not be changed
1663 * if no version of the row passes the snapshot test.
1666 heap_get_latest_tid(Relation relation,
1671 ItemPointerData ctid;
1672 TransactionId priorXmax;
1674 /* this is to avoid Assert failures on bad input */
1675 if (!ItemPointerIsValid(tid))
1679 * Since this can be called with user-supplied TID, don't trust the input
1680 * too much. (RelationGetNumberOfBlocks is an expensive check, so we
1681 * don't check t_ctid links again this way. Note that it would not do to
1682 * call it just once and save the result, either.)
1684 blk = ItemPointerGetBlockNumber(tid);
1685 if (blk >= RelationGetNumberOfBlocks(relation))
1686 elog(ERROR, "block number %u is out of range for relation \"%s\"",
1687 blk, RelationGetRelationName(relation));
1690 * Loop to chase down t_ctid links. At top of loop, ctid is the tuple we
1691 * need to examine, and *tid is the TID we will return if ctid turns out
1694 * Note that we will loop until we reach the end of the t_ctid chain.
1695 * Depending on the snapshot passed, there might be at most one visible
1696 * version of the row, but we don't try to optimize for that.
1699 priorXmax = InvalidTransactionId; /* cannot check first XMIN */
1704 OffsetNumber offnum;
1710 * Read, pin, and lock the page.
1712 buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(&ctid));
1713 LockBuffer(buffer, BUFFER_LOCK_SHARE);
1714 page = BufferGetPage(buffer);
1715 TestForOldSnapshot(snapshot, relation, page);
1718 * Check for bogus item number. This is not treated as an error
1719 * condition because it can happen while following a t_ctid link. We
1720 * just assume that the prior tid is OK and return it unchanged.
1722 offnum = ItemPointerGetOffsetNumber(&ctid);
1723 if (offnum < FirstOffsetNumber || offnum > PageGetMaxOffsetNumber(page))
1725 UnlockReleaseBuffer(buffer);
1728 lp = PageGetItemId(page, offnum);
1729 if (!ItemIdIsNormal(lp))
1731 UnlockReleaseBuffer(buffer);
1735 /* OK to access the tuple */
1737 tp.t_data = (HeapTupleHeader) PageGetItem(page, lp);
1738 tp.t_len = ItemIdGetLength(lp);
1739 tp.t_tableOid = RelationGetRelid(relation);
1742 * After following a t_ctid link, we might arrive at an unrelated
1743 * tuple. Check for XMIN match.
1745 if (TransactionIdIsValid(priorXmax) &&
1746 !TransactionIdEquals(priorXmax, HeapTupleHeaderGetXmin(tp.t_data)))
1748 UnlockReleaseBuffer(buffer);
1753 * Check tuple visibility; if visible, set it as the new result
1756 valid = HeapTupleSatisfiesVisibility(&tp, snapshot, buffer);
1757 CheckForSerializableConflictOut(valid, relation, &tp, buffer, snapshot);
1762 * If there's a valid t_ctid link, follow it, else we're done.
1764 if ((tp.t_data->t_infomask & HEAP_XMAX_INVALID) ||
1765 HeapTupleHeaderIsOnlyLocked(tp.t_data) ||
1766 HeapTupleHeaderIndicatesMovedPartitions(tp.t_data) ||
1767 ItemPointerEquals(&tp.t_self, &tp.t_data->t_ctid))
1769 UnlockReleaseBuffer(buffer);
1773 ctid = tp.t_data->t_ctid;
1774 priorXmax = HeapTupleHeaderGetUpdateXid(tp.t_data);
1775 UnlockReleaseBuffer(buffer);
1781 * UpdateXmaxHintBits - update tuple hint bits after xmax transaction ends
1783 * This is called after we have waited for the XMAX transaction to terminate.
1784 * If the transaction aborted, we guarantee the XMAX_INVALID hint bit will
1785 * be set on exit. If the transaction committed, we set the XMAX_COMMITTED
1786 * hint bit if possible --- but beware that that may not yet be possible,
1787 * if the transaction committed asynchronously.
1789 * Note that if the transaction was a locker only, we set HEAP_XMAX_INVALID
1790 * even if it commits.
1792 * Hence callers should look only at XMAX_INVALID.
1794 * Note this is not allowed for tuples whose xmax is a multixact.
1797 UpdateXmaxHintBits(HeapTupleHeader tuple, Buffer buffer, TransactionId xid)
1799 Assert(TransactionIdEquals(HeapTupleHeaderGetRawXmax(tuple), xid));
1800 Assert(!(tuple->t_infomask & HEAP_XMAX_IS_MULTI));
1802 if (!(tuple->t_infomask & (HEAP_XMAX_COMMITTED | HEAP_XMAX_INVALID)))
1804 if (!HEAP_XMAX_IS_LOCKED_ONLY(tuple->t_infomask) &&
1805 TransactionIdDidCommit(xid))
1806 HeapTupleSetHintBits(tuple, buffer, HEAP_XMAX_COMMITTED,
1809 HeapTupleSetHintBits(tuple, buffer, HEAP_XMAX_INVALID,
1810 InvalidTransactionId);
1816 * GetBulkInsertState - prepare status object for a bulk insert
1819 GetBulkInsertState(void)
1821 BulkInsertState bistate;
1823 bistate = (BulkInsertState) palloc(sizeof(BulkInsertStateData));
1824 bistate->strategy = GetAccessStrategy(BAS_BULKWRITE);
1825 bistate->current_buf = InvalidBuffer;
1830 * FreeBulkInsertState - clean up after finishing a bulk insert
1833 FreeBulkInsertState(BulkInsertState bistate)
1835 if (bistate->current_buf != InvalidBuffer)
1836 ReleaseBuffer(bistate->current_buf);
1837 FreeAccessStrategy(bistate->strategy);
1842 * ReleaseBulkInsertStatePin - release a buffer currently held in bistate
1845 ReleaseBulkInsertStatePin(BulkInsertState bistate)
1847 if (bistate->current_buf != InvalidBuffer)
1848 ReleaseBuffer(bistate->current_buf);
1849 bistate->current_buf = InvalidBuffer;
1854 * heap_insert - insert tuple into a heap
1856 * The new tuple is stamped with current transaction ID and the specified
1859 * See table_insert for comments about most of the input flags, except that
1860 * this routine directly takes a tuple rather than a slot.
1862 * There's corresponding HEAP_INSERT_ options to all the TABLE_INSERT_
1863 * options, and there additionally is HEAP_INSERT_SPECULATIVE which is used to
1864 * implement table_insert_speculative().
1866 * On return the header fields of *tup are updated to match the stored tuple;
1867 * in particular tup->t_self receives the actual TID where the tuple was
1868 * stored. But note that any toasting of fields within the tuple data is NOT
1869 * reflected into *tup.
1872 heap_insert(Relation relation, HeapTuple tup, CommandId cid,
1873 int options, BulkInsertState bistate)
1875 TransactionId xid = GetCurrentTransactionId();
1878 Buffer vmbuffer = InvalidBuffer;
1879 bool all_visible_cleared = false;
1882 * Fill in tuple header fields and toast the tuple if necessary.
1884 * Note: below this point, heaptup is the data we actually intend to store
1885 * into the relation; tup is the caller's original untoasted data.
1887 heaptup = heap_prepare_insert(relation, tup, xid, cid, options);
1890 * Find buffer to insert this tuple into. If the page is all visible,
1891 * this will also pin the requisite visibility map page.
1893 buffer = RelationGetBufferForTuple(relation, heaptup->t_len,
1894 InvalidBuffer, options, bistate,
1898 * We're about to do the actual insert -- but check for conflict first, to
1899 * avoid possibly having to roll back work we've just done.
1901 * This is safe without a recheck as long as there is no possibility of
1902 * another process scanning the page between this check and the insert
1903 * being visible to the scan (i.e., an exclusive buffer content lock is
1904 * continuously held from this point until the tuple insert is visible).
1906 * For a heap insert, we only need to check for table-level SSI locks. Our
1907 * new tuple can't possibly conflict with existing tuple locks, and heap
1908 * page locks are only consolidated versions of tuple locks; they do not
1909 * lock "gaps" as index page locks do. So we don't need to specify a
1910 * buffer when making the call, which makes for a faster check.
1912 CheckForSerializableConflictIn(relation, NULL, InvalidBuffer);
1914 /* NO EREPORT(ERROR) from here till changes are logged */
1915 START_CRIT_SECTION();
1917 RelationPutHeapTuple(relation, buffer, heaptup,
1918 (options & HEAP_INSERT_SPECULATIVE) != 0);
1920 if (PageIsAllVisible(BufferGetPage(buffer)))
1922 all_visible_cleared = true;
1923 PageClearAllVisible(BufferGetPage(buffer));
1924 visibilitymap_clear(relation,
1925 ItemPointerGetBlockNumber(&(heaptup->t_self)),
1926 vmbuffer, VISIBILITYMAP_VALID_BITS);
1930 * XXX Should we set PageSetPrunable on this page ?
1932 * The inserting transaction may eventually abort thus making this tuple
1933 * DEAD and hence available for pruning. Though we don't want to optimize
1934 * for aborts, if no other tuple in this page is UPDATEd/DELETEd, the
1935 * aborted tuple will never be pruned until next vacuum is triggered.
1937 * If you do add PageSetPrunable here, add it in heap_xlog_insert too.
1940 MarkBufferDirty(buffer);
1943 if (!(options & HEAP_INSERT_SKIP_WAL) && RelationNeedsWAL(relation))
1945 xl_heap_insert xlrec;
1946 xl_heap_header xlhdr;
1948 Page page = BufferGetPage(buffer);
1949 uint8 info = XLOG_HEAP_INSERT;
1953 * If this is a catalog, we need to transmit combocids to properly
1954 * decode, so log that as well.
1956 if (RelationIsAccessibleInLogicalDecoding(relation))
1957 log_heap_new_cid(relation, heaptup);
1960 * If this is the single and first tuple on page, we can reinit the
1961 * page instead of restoring the whole thing. Set flag, and hide
1962 * buffer references from XLogInsert.
1964 if (ItemPointerGetOffsetNumber(&(heaptup->t_self)) == FirstOffsetNumber &&
1965 PageGetMaxOffsetNumber(page) == FirstOffsetNumber)
1967 info |= XLOG_HEAP_INIT_PAGE;
1968 bufflags |= REGBUF_WILL_INIT;
1971 xlrec.offnum = ItemPointerGetOffsetNumber(&heaptup->t_self);
1973 if (all_visible_cleared)
1974 xlrec.flags |= XLH_INSERT_ALL_VISIBLE_CLEARED;
1975 if (options & HEAP_INSERT_SPECULATIVE)
1976 xlrec.flags |= XLH_INSERT_IS_SPECULATIVE;
1977 Assert(ItemPointerGetBlockNumber(&heaptup->t_self) == BufferGetBlockNumber(buffer));
1980 * For logical decoding, we need the tuple even if we're doing a full
1981 * page write, so make sure it's included even if we take a full-page
1982 * image. (XXX We could alternatively store a pointer into the FPW).
1984 if (RelationIsLogicallyLogged(relation) &&
1985 !(options & HEAP_INSERT_NO_LOGICAL))
1987 xlrec.flags |= XLH_INSERT_CONTAINS_NEW_TUPLE;
1988 bufflags |= REGBUF_KEEP_DATA;
1992 XLogRegisterData((char *) &xlrec, SizeOfHeapInsert);
1994 xlhdr.t_infomask2 = heaptup->t_data->t_infomask2;
1995 xlhdr.t_infomask = heaptup->t_data->t_infomask;
1996 xlhdr.t_hoff = heaptup->t_data->t_hoff;
1999 * note we mark xlhdr as belonging to buffer; if XLogInsert decides to
2000 * write the whole page to the xlog, we don't need to store
2001 * xl_heap_header in the xlog.
2003 XLogRegisterBuffer(0, buffer, REGBUF_STANDARD | bufflags);
2004 XLogRegisterBufData(0, (char *) &xlhdr, SizeOfHeapHeader);
2005 /* PG73FORMAT: write bitmap [+ padding] [+ oid] + data */
2006 XLogRegisterBufData(0,
2007 (char *) heaptup->t_data + SizeofHeapTupleHeader,
2008 heaptup->t_len - SizeofHeapTupleHeader);
2010 /* filtering by origin on a row level is much more efficient */
2011 XLogSetRecordFlags(XLOG_INCLUDE_ORIGIN);
2013 recptr = XLogInsert(RM_HEAP_ID, info);
2015 PageSetLSN(page, recptr);
2020 UnlockReleaseBuffer(buffer);
2021 if (vmbuffer != InvalidBuffer)
2022 ReleaseBuffer(vmbuffer);
2025 * If tuple is cachable, mark it for invalidation from the caches in case
2026 * we abort. Note it is OK to do this after releasing the buffer, because
2027 * the heaptup data structure is all in local memory, not in the shared
2030 CacheInvalidateHeapTuple(relation, heaptup, NULL);
2032 /* Note: speculative insertions are counted too, even if aborted later */
2033 pgstat_count_heap_insert(relation, 1);
2036 * If heaptup is a private copy, release it. Don't forget to copy t_self
2037 * back to the caller's image, too.
2041 tup->t_self = heaptup->t_self;
2042 heap_freetuple(heaptup);
2047 * Subroutine for heap_insert(). Prepares a tuple for insertion. This sets the
2048 * tuple header fields and toasts the tuple if necessary. Returns a toasted
2049 * version of the tuple if it was toasted, or the original tuple if not. Note
2050 * that in any case, the header fields are also set in the original tuple.
2053 heap_prepare_insert(Relation relation, HeapTuple tup, TransactionId xid,
2054 CommandId cid, int options)
2057 * Parallel operations are required to be strictly read-only in a parallel
2058 * worker. Parallel inserts are not safe even in the leader in the
2059 * general case, because group locking means that heavyweight locks for
2060 * relation extension or GIN page locks will not conflict between members
2061 * of a lock group, but we don't prohibit that case here because there are
2062 * useful special cases that we can safely allow, such as CREATE TABLE AS.
2064 if (IsParallelWorker())
2066 (errcode(ERRCODE_INVALID_TRANSACTION_STATE),
2067 errmsg("cannot insert tuples in a parallel worker")));
2069 tup->t_data->t_infomask &= ~(HEAP_XACT_MASK);
2070 tup->t_data->t_infomask2 &= ~(HEAP2_XACT_MASK);
2071 tup->t_data->t_infomask |= HEAP_XMAX_INVALID;
2072 HeapTupleHeaderSetXmin(tup->t_data, xid);
2073 if (options & HEAP_INSERT_FROZEN)
2074 HeapTupleHeaderSetXminFrozen(tup->t_data);
2076 HeapTupleHeaderSetCmin(tup->t_data, cid);
2077 HeapTupleHeaderSetXmax(tup->t_data, 0); /* for cleanliness */
2078 tup->t_tableOid = RelationGetRelid(relation);
2081 * If the new tuple is too big for storage or contains already toasted
2082 * out-of-line attributes from some other relation, invoke the toaster.
2084 if (relation->rd_rel->relkind != RELKIND_RELATION &&
2085 relation->rd_rel->relkind != RELKIND_MATVIEW)
2087 /* toast table entries should never be recursively toasted */
2088 Assert(!HeapTupleHasExternal(tup));
2091 else if (HeapTupleHasExternal(tup) || tup->t_len > TOAST_TUPLE_THRESHOLD)
2092 return toast_insert_or_update(relation, tup, NULL, options);
2098 * heap_multi_insert - insert multiple tuple into a heap
2100 * This is like heap_insert(), but inserts multiple tuples in one operation.
2101 * That's faster than calling heap_insert() in a loop, because when multiple
2102 * tuples can be inserted on a single page, we can write just a single WAL
2103 * record covering all of them, and only need to lock/unlock the page once.
2105 * Note: this leaks memory into the current memory context. You can create a
2106 * temporary context before calling this, if that's a problem.
2109 heap_multi_insert(Relation relation, TupleTableSlot **slots, int ntuples,
2110 CommandId cid, int options, BulkInsertState bistate)
2112 TransactionId xid = GetCurrentTransactionId();
2113 HeapTuple *heaptuples;
2116 PGAlignedBlock scratch;
2120 bool need_tuple_data = RelationIsLogicallyLogged(relation);
2121 bool need_cids = RelationIsAccessibleInLogicalDecoding(relation);
2123 /* currently not needed (thus unsupported) for heap_multi_insert() */
2124 AssertArg(!(options & HEAP_INSERT_NO_LOGICAL));
2126 needwal = !(options & HEAP_INSERT_SKIP_WAL) && RelationNeedsWAL(relation);
2127 saveFreeSpace = RelationGetTargetPageFreeSpace(relation,
2128 HEAP_DEFAULT_FILLFACTOR);
2130 /* Toast and set header data in all the slots */
2131 heaptuples = palloc(ntuples * sizeof(HeapTuple));
2132 for (i = 0; i < ntuples; i++)
2136 tuple = ExecFetchSlotHeapTuple(slots[i], true, NULL);
2137 slots[i]->tts_tableOid = RelationGetRelid(relation);
2138 tuple->t_tableOid = slots[i]->tts_tableOid;
2139 heaptuples[i] = heap_prepare_insert(relation, tuple, xid, cid,
2144 * We're about to do the actual inserts -- but check for conflict first,
2145 * to minimize the possibility of having to roll back work we've just
2148 * A check here does not definitively prevent a serialization anomaly;
2149 * that check MUST be done at least past the point of acquiring an
2150 * exclusive buffer content lock on every buffer that will be affected,
2151 * and MAY be done after all inserts are reflected in the buffers and
2152 * those locks are released; otherwise there race condition. Since
2153 * multiple buffers can be locked and unlocked in the loop below, and it
2154 * would not be feasible to identify and lock all of those buffers before
2155 * the loop, we must do a final check at the end.
2157 * The check here could be omitted with no loss of correctness; it is
2158 * present strictly as an optimization.
2160 * For heap inserts, we only need to check for table-level SSI locks. Our
2161 * new tuples can't possibly conflict with existing tuple locks, and heap
2162 * page locks are only consolidated versions of tuple locks; they do not
2163 * lock "gaps" as index page locks do. So we don't need to specify a
2164 * buffer when making the call, which makes for a faster check.
2166 CheckForSerializableConflictIn(relation, NULL, InvalidBuffer);
2169 while (ndone < ntuples)
2172 Buffer vmbuffer = InvalidBuffer;
2173 bool all_visible_cleared = false;
2176 CHECK_FOR_INTERRUPTS();
2179 * Find buffer where at least the next tuple will fit. If the page is
2180 * all-visible, this will also pin the requisite visibility map page.
2182 buffer = RelationGetBufferForTuple(relation, heaptuples[ndone]->t_len,
2183 InvalidBuffer, options, bistate,
2185 page = BufferGetPage(buffer);
2187 /* NO EREPORT(ERROR) from here till changes are logged */
2188 START_CRIT_SECTION();
2191 * RelationGetBufferForTuple has ensured that the first tuple fits.
2192 * Put that on the page, and then as many other tuples as fit.
2194 RelationPutHeapTuple(relation, buffer, heaptuples[ndone], false);
2195 for (nthispage = 1; ndone + nthispage < ntuples; nthispage++)
2197 HeapTuple heaptup = heaptuples[ndone + nthispage];
2199 if (PageGetHeapFreeSpace(page) < MAXALIGN(heaptup->t_len) + saveFreeSpace)
2202 RelationPutHeapTuple(relation, buffer, heaptup, false);
2205 * We don't use heap_multi_insert for catalog tuples yet, but
2206 * better be prepared...
2208 if (needwal && need_cids)
2209 log_heap_new_cid(relation, heaptup);
2212 if (PageIsAllVisible(page))
2214 all_visible_cleared = true;
2215 PageClearAllVisible(page);
2216 visibilitymap_clear(relation,
2217 BufferGetBlockNumber(buffer),
2218 vmbuffer, VISIBILITYMAP_VALID_BITS);
2222 * XXX Should we set PageSetPrunable on this page ? See heap_insert()
2225 MarkBufferDirty(buffer);
2231 xl_heap_multi_insert *xlrec;
2232 uint8 info = XLOG_HEAP2_MULTI_INSERT;
2235 char *scratchptr = scratch.data;
2240 * If the page was previously empty, we can reinit the page
2241 * instead of restoring the whole thing.
2243 init = (ItemPointerGetOffsetNumber(&(heaptuples[ndone]->t_self)) == FirstOffsetNumber &&
2244 PageGetMaxOffsetNumber(page) == FirstOffsetNumber + nthispage - 1);
2246 /* allocate xl_heap_multi_insert struct from the scratch area */
2247 xlrec = (xl_heap_multi_insert *) scratchptr;
2248 scratchptr += SizeOfHeapMultiInsert;
2251 * Allocate offsets array. Unless we're reinitializing the page,
2252 * in that case the tuples are stored in order starting at
2253 * FirstOffsetNumber and we don't need to store the offsets
2257 scratchptr += nthispage * sizeof(OffsetNumber);
2259 /* the rest of the scratch space is used for tuple data */
2260 tupledata = scratchptr;
2262 xlrec->flags = all_visible_cleared ? XLH_INSERT_ALL_VISIBLE_CLEARED : 0;
2263 xlrec->ntuples = nthispage;
2266 * Write out an xl_multi_insert_tuple and the tuple data itself
2269 for (i = 0; i < nthispage; i++)
2271 HeapTuple heaptup = heaptuples[ndone + i];
2272 xl_multi_insert_tuple *tuphdr;
2276 xlrec->offsets[i] = ItemPointerGetOffsetNumber(&heaptup->t_self);
2277 /* xl_multi_insert_tuple needs two-byte alignment. */
2278 tuphdr = (xl_multi_insert_tuple *) SHORTALIGN(scratchptr);
2279 scratchptr = ((char *) tuphdr) + SizeOfMultiInsertTuple;
2281 tuphdr->t_infomask2 = heaptup->t_data->t_infomask2;
2282 tuphdr->t_infomask = heaptup->t_data->t_infomask;
2283 tuphdr->t_hoff = heaptup->t_data->t_hoff;
2285 /* write bitmap [+ padding] [+ oid] + data */
2286 datalen = heaptup->t_len - SizeofHeapTupleHeader;
2288 (char *) heaptup->t_data + SizeofHeapTupleHeader,
2290 tuphdr->datalen = datalen;
2291 scratchptr += datalen;
2293 totaldatalen = scratchptr - tupledata;
2294 Assert((scratchptr - scratch.data) < BLCKSZ);
2296 if (need_tuple_data)
2297 xlrec->flags |= XLH_INSERT_CONTAINS_NEW_TUPLE;
2300 * Signal that this is the last xl_heap_multi_insert record
2301 * emitted by this call to heap_multi_insert(). Needed for logical
2302 * decoding so it knows when to cleanup temporary data.
2304 if (ndone + nthispage == ntuples)
2305 xlrec->flags |= XLH_INSERT_LAST_IN_MULTI;
2309 info |= XLOG_HEAP_INIT_PAGE;
2310 bufflags |= REGBUF_WILL_INIT;
2314 * If we're doing logical decoding, include the new tuple data
2315 * even if we take a full-page image of the page.
2317 if (need_tuple_data)
2318 bufflags |= REGBUF_KEEP_DATA;
2321 XLogRegisterData((char *) xlrec, tupledata - scratch.data);
2322 XLogRegisterBuffer(0, buffer, REGBUF_STANDARD | bufflags);
2324 XLogRegisterBufData(0, tupledata, totaldatalen);
2326 /* filtering by origin on a row level is much more efficient */
2327 XLogSetRecordFlags(XLOG_INCLUDE_ORIGIN);
2329 recptr = XLogInsert(RM_HEAP2_ID, info);
2331 PageSetLSN(page, recptr);
2336 UnlockReleaseBuffer(buffer);
2337 if (vmbuffer != InvalidBuffer)
2338 ReleaseBuffer(vmbuffer);
2344 * We're done with the actual inserts. Check for conflicts again, to
2345 * ensure that all rw-conflicts in to these inserts are detected. Without
2346 * this final check, a sequential scan of the heap may have locked the
2347 * table after the "before" check, missing one opportunity to detect the
2348 * conflict, and then scanned the table before the new tuples were there,
2349 * missing the other chance to detect the conflict.
2351 * For heap inserts, we only need to check for table-level SSI locks. Our
2352 * new tuples can't possibly conflict with existing tuple locks, and heap
2353 * page locks are only consolidated versions of tuple locks; they do not
2354 * lock "gaps" as index page locks do. So we don't need to specify a
2355 * buffer when making the call.
2357 CheckForSerializableConflictIn(relation, NULL, InvalidBuffer);
2360 * If tuples are cachable, mark them for invalidation from the caches in
2361 * case we abort. Note it is OK to do this after releasing the buffer,
2362 * because the heaptuples data structure is all in local memory, not in
2363 * the shared buffer.
2365 if (IsCatalogRelation(relation))
2367 for (i = 0; i < ntuples; i++)
2368 CacheInvalidateHeapTuple(relation, heaptuples[i], NULL);
2371 /* copy t_self fields back to the caller's slots */
2372 for (i = 0; i < ntuples; i++)
2373 slots[i]->tts_tid = heaptuples[i]->t_self;
2375 pgstat_count_heap_insert(relation, ntuples);
2379 * simple_heap_insert - insert a tuple
2381 * Currently, this routine differs from heap_insert only in supplying
2382 * a default command ID and not allowing access to the speedup options.
2384 * This should be used rather than using heap_insert directly in most places
2385 * where we are modifying system catalogs.
2388 simple_heap_insert(Relation relation, HeapTuple tup)
2390 heap_insert(relation, tup, GetCurrentCommandId(true), 0, NULL);
2394 * Given infomask/infomask2, compute the bits that must be saved in the
2395 * "infobits" field of xl_heap_delete, xl_heap_update, xl_heap_lock,
2396 * xl_heap_lock_updated WAL records.
2398 * See fix_infomask_from_infobits.
2401 compute_infobits(uint16 infomask, uint16 infomask2)
2404 ((infomask & HEAP_XMAX_IS_MULTI) != 0 ? XLHL_XMAX_IS_MULTI : 0) |
2405 ((infomask & HEAP_XMAX_LOCK_ONLY) != 0 ? XLHL_XMAX_LOCK_ONLY : 0) |
2406 ((infomask & HEAP_XMAX_EXCL_LOCK) != 0 ? XLHL_XMAX_EXCL_LOCK : 0) |
2407 /* note we ignore HEAP_XMAX_SHR_LOCK here */
2408 ((infomask & HEAP_XMAX_KEYSHR_LOCK) != 0 ? XLHL_XMAX_KEYSHR_LOCK : 0) |
2409 ((infomask2 & HEAP_KEYS_UPDATED) != 0 ?
2410 XLHL_KEYS_UPDATED : 0);
2414 * Given two versions of the same t_infomask for a tuple, compare them and
2415 * return whether the relevant status for a tuple Xmax has changed. This is
2416 * used after a buffer lock has been released and reacquired: we want to ensure
2417 * that the tuple state continues to be the same it was when we previously
2420 * Note the Xmax field itself must be compared separately.
2423 xmax_infomask_changed(uint16 new_infomask, uint16 old_infomask)
2425 const uint16 interesting =
2426 HEAP_XMAX_IS_MULTI | HEAP_XMAX_LOCK_ONLY | HEAP_LOCK_MASK;
2428 if ((new_infomask & interesting) != (old_infomask & interesting))
2435 * heap_delete - delete a tuple
2437 * See table_delete() for an explanation of the parameters, except that this
2438 * routine directly takes a tuple rather than a slot.
2440 * In the failure cases, the routine fills *tmfd with the tuple's t_ctid,
2441 * t_xmax (resolving a possible MultiXact, if necessary), and t_cmax (the last
2442 * only for TM_SelfModified, since we cannot obtain cmax from a combocid
2443 * generated by another transaction).
2446 heap_delete(Relation relation, ItemPointer tid,
2447 CommandId cid, Snapshot crosscheck, bool wait,
2448 TM_FailureData *tmfd, bool changingPart)
2451 TransactionId xid = GetCurrentTransactionId();
2457 Buffer vmbuffer = InvalidBuffer;
2458 TransactionId new_xmax;
2459 uint16 new_infomask,
2461 bool have_tuple_lock = false;
2463 bool all_visible_cleared = false;
2464 HeapTuple old_key_tuple = NULL; /* replica identity of the tuple */
2465 bool old_key_copied = false;
2467 Assert(ItemPointerIsValid(tid));
2470 * Forbid this during a parallel operation, lest it allocate a combocid.
2471 * Other workers might need that combocid for visibility checks, and we
2472 * have no provision for broadcasting it to them.
2474 if (IsInParallelMode())
2476 (errcode(ERRCODE_INVALID_TRANSACTION_STATE),
2477 errmsg("cannot delete tuples during a parallel operation")));
2479 block = ItemPointerGetBlockNumber(tid);
2480 buffer = ReadBuffer(relation, block);
2481 page = BufferGetPage(buffer);
2484 * Before locking the buffer, pin the visibility map page if it appears to
2485 * be necessary. Since we haven't got the lock yet, someone else might be
2486 * in the middle of changing this, so we'll need to recheck after we have
2489 if (PageIsAllVisible(page))
2490 visibilitymap_pin(relation, block, &vmbuffer);
2492 LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
2495 * If we didn't pin the visibility map page and the page has become all
2496 * visible while we were busy locking the buffer, we'll have to unlock and
2497 * re-lock, to avoid holding the buffer lock across an I/O. That's a bit
2498 * unfortunate, but hopefully shouldn't happen often.
2500 if (vmbuffer == InvalidBuffer && PageIsAllVisible(page))
2502 LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
2503 visibilitymap_pin(relation, block, &vmbuffer);
2504 LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
2507 lp = PageGetItemId(page, ItemPointerGetOffsetNumber(tid));
2508 Assert(ItemIdIsNormal(lp));
2510 tp.t_tableOid = RelationGetRelid(relation);
2511 tp.t_data = (HeapTupleHeader) PageGetItem(page, lp);
2512 tp.t_len = ItemIdGetLength(lp);
2516 result = HeapTupleSatisfiesUpdate(&tp, cid, buffer);
2518 if (result == TM_Invisible)
2520 UnlockReleaseBuffer(buffer);
2522 (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
2523 errmsg("attempted to delete invisible tuple")));
2525 else if (result == TM_BeingModified && wait)
2527 TransactionId xwait;
2530 /* must copy state data before unlocking buffer */
2531 xwait = HeapTupleHeaderGetRawXmax(tp.t_data);
2532 infomask = tp.t_data->t_infomask;
2535 * Sleep until concurrent transaction ends -- except when there's a
2536 * single locker and it's our own transaction. Note we don't care
2537 * which lock mode the locker has, because we need the strongest one.
2539 * Before sleeping, we need to acquire tuple lock to establish our
2540 * priority for the tuple (see heap_lock_tuple). LockTuple will
2541 * release us when we are next-in-line for the tuple.
2543 * If we are forced to "start over" below, we keep the tuple lock;
2544 * this arranges that we stay at the head of the line while rechecking
2547 if (infomask & HEAP_XMAX_IS_MULTI)
2549 /* wait for multixact */
2550 if (DoesMultiXactIdConflict((MultiXactId) xwait, infomask,
2551 LockTupleExclusive))
2553 LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
2555 /* acquire tuple lock, if necessary */
2556 heap_acquire_tuplock(relation, &(tp.t_self), LockTupleExclusive,
2557 LockWaitBlock, &have_tuple_lock);
2559 /* wait for multixact */
2560 MultiXactIdWait((MultiXactId) xwait, MultiXactStatusUpdate, infomask,
2561 relation, &(tp.t_self), XLTW_Delete,
2563 LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
2566 * If xwait had just locked the tuple then some other xact
2567 * could update this tuple before we get to this point. Check
2568 * for xmax change, and start over if so.
2570 if (xmax_infomask_changed(tp.t_data->t_infomask, infomask) ||
2571 !TransactionIdEquals(HeapTupleHeaderGetRawXmax(tp.t_data),
2577 * You might think the multixact is necessarily done here, but not
2578 * so: it could have surviving members, namely our own xact or
2579 * other subxacts of this backend. It is legal for us to delete
2580 * the tuple in either case, however (the latter case is
2581 * essentially a situation of upgrading our former shared lock to
2582 * exclusive). We don't bother changing the on-disk hint bits
2583 * since we are about to overwrite the xmax altogether.
2586 else if (!TransactionIdIsCurrentTransactionId(xwait))
2589 * Wait for regular transaction to end; but first, acquire tuple
2592 LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
2593 heap_acquire_tuplock(relation, &(tp.t_self), LockTupleExclusive,
2594 LockWaitBlock, &have_tuple_lock);
2595 XactLockTableWait(xwait, relation, &(tp.t_self), XLTW_Delete);
2596 LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
2599 * xwait is done, but if xwait had just locked the tuple then some
2600 * other xact could update this tuple before we get to this point.
2601 * Check for xmax change, and start over if so.
2603 if (xmax_infomask_changed(tp.t_data->t_infomask, infomask) ||
2604 !TransactionIdEquals(HeapTupleHeaderGetRawXmax(tp.t_data),
2608 /* Otherwise check if it committed or aborted */
2609 UpdateXmaxHintBits(tp.t_data, buffer, xwait);
2613 * We may overwrite if previous xmax aborted, or if it committed but
2614 * only locked the tuple without updating it.
2616 if ((tp.t_data->t_infomask & HEAP_XMAX_INVALID) ||
2617 HEAP_XMAX_IS_LOCKED_ONLY(tp.t_data->t_infomask) ||
2618 HeapTupleHeaderIsOnlyLocked(tp.t_data))
2620 else if (!ItemPointerEquals(&tp.t_self, &tp.t_data->t_ctid) ||
2621 HeapTupleHeaderIndicatesMovedPartitions(tp.t_data))
2622 result = TM_Updated;
2624 result = TM_Deleted;
2627 if (crosscheck != InvalidSnapshot && result == TM_Ok)
2629 /* Perform additional check for transaction-snapshot mode RI updates */
2630 if (!HeapTupleSatisfiesVisibility(&tp, crosscheck, buffer))
2631 result = TM_Updated;
2634 if (result != TM_Ok)
2636 Assert(result == TM_SelfModified ||
2637 result == TM_Updated ||
2638 result == TM_Deleted ||
2639 result == TM_BeingModified);
2640 Assert(!(tp.t_data->t_infomask & HEAP_XMAX_INVALID));
2641 Assert(result != TM_Updated ||
2642 !ItemPointerEquals(&tp.t_self, &tp.t_data->t_ctid));
2643 tmfd->ctid = tp.t_data->t_ctid;
2644 tmfd->xmax = HeapTupleHeaderGetUpdateXid(tp.t_data);
2645 if (result == TM_SelfModified)
2646 tmfd->cmax = HeapTupleHeaderGetCmax(tp.t_data);
2648 tmfd->cmax = InvalidCommandId;
2649 UnlockReleaseBuffer(buffer);
2650 if (have_tuple_lock)
2651 UnlockTupleTuplock(relation, &(tp.t_self), LockTupleExclusive);
2652 if (vmbuffer != InvalidBuffer)
2653 ReleaseBuffer(vmbuffer);
2658 * We're about to do the actual delete -- check for conflict first, to
2659 * avoid possibly having to roll back work we've just done.
2661 * This is safe without a recheck as long as there is no possibility of
2662 * another process scanning the page between this check and the delete
2663 * being visible to the scan (i.e., an exclusive buffer content lock is
2664 * continuously held from this point until the tuple delete is visible).
2666 CheckForSerializableConflictIn(relation, &tp, buffer);
2668 /* replace cid with a combo cid if necessary */
2669 HeapTupleHeaderAdjustCmax(tp.t_data, &cid, &iscombo);
2672 * Compute replica identity tuple before entering the critical section so
2673 * we don't PANIC upon a memory allocation failure.
2675 old_key_tuple = ExtractReplicaIdentity(relation, &tp, true, &old_key_copied);
2678 * If this is the first possibly-multixact-able operation in the current
2679 * transaction, set my per-backend OldestMemberMXactId setting. We can be
2680 * certain that the transaction will never become a member of any older
2681 * MultiXactIds than that. (We have to do this even if we end up just
2682 * using our own TransactionId below, since some other backend could
2683 * incorporate our XID into a MultiXact immediately afterwards.)
2685 MultiXactIdSetOldestMember();
2687 compute_new_xmax_infomask(HeapTupleHeaderGetRawXmax(tp.t_data),
2688 tp.t_data->t_infomask, tp.t_data->t_infomask2,
2689 xid, LockTupleExclusive, true,
2690 &new_xmax, &new_infomask, &new_infomask2);
2692 START_CRIT_SECTION();
2695 * If this transaction commits, the tuple will become DEAD sooner or
2696 * later. Set flag that this page is a candidate for pruning once our xid
2697 * falls below the OldestXmin horizon. If the transaction finally aborts,
2698 * the subsequent page pruning will be a no-op and the hint will be
2701 PageSetPrunable(page, xid);
2703 if (PageIsAllVisible(page))
2705 all_visible_cleared = true;
2706 PageClearAllVisible(page);
2707 visibilitymap_clear(relation, BufferGetBlockNumber(buffer),
2708 vmbuffer, VISIBILITYMAP_VALID_BITS);
2711 /* store transaction information of xact deleting the tuple */
2712 tp.t_data->t_infomask &= ~(HEAP_XMAX_BITS | HEAP_MOVED);
2713 tp.t_data->t_infomask2 &= ~HEAP_KEYS_UPDATED;
2714 tp.t_data->t_infomask |= new_infomask;
2715 tp.t_data->t_infomask2 |= new_infomask2;
2716 HeapTupleHeaderClearHotUpdated(tp.t_data);
2717 HeapTupleHeaderSetXmax(tp.t_data, new_xmax);
2718 HeapTupleHeaderSetCmax(tp.t_data, cid, iscombo);
2719 /* Make sure there is no forward chain link in t_ctid */
2720 tp.t_data->t_ctid = tp.t_self;
2722 /* Signal that this is actually a move into another partition */
2724 HeapTupleHeaderSetMovedPartitions(tp.t_data);
2726 MarkBufferDirty(buffer);
2731 * NB: heap_abort_speculative() uses the same xlog record and replay
2734 if (RelationNeedsWAL(relation))
2736 xl_heap_delete xlrec;
2737 xl_heap_header xlhdr;
2740 /* For logical decode we need combocids to properly decode the catalog */
2741 if (RelationIsAccessibleInLogicalDecoding(relation))
2742 log_heap_new_cid(relation, &tp);
2745 if (all_visible_cleared)
2746 xlrec.flags |= XLH_DELETE_ALL_VISIBLE_CLEARED;
2748 xlrec.flags |= XLH_DELETE_IS_PARTITION_MOVE;
2749 xlrec.infobits_set = compute_infobits(tp.t_data->t_infomask,
2750 tp.t_data->t_infomask2);
2751 xlrec.offnum = ItemPointerGetOffsetNumber(&tp.t_self);
2752 xlrec.xmax = new_xmax;
2754 if (old_key_tuple != NULL)
2756 if (relation->rd_rel->relreplident == REPLICA_IDENTITY_FULL)
2757 xlrec.flags |= XLH_DELETE_CONTAINS_OLD_TUPLE;
2759 xlrec.flags |= XLH_DELETE_CONTAINS_OLD_KEY;
2763 XLogRegisterData((char *) &xlrec, SizeOfHeapDelete);
2765 XLogRegisterBuffer(0, buffer, REGBUF_STANDARD);
2768 * Log replica identity of the deleted tuple if there is one
2770 if (old_key_tuple != NULL)
2772 xlhdr.t_infomask2 = old_key_tuple->t_data->t_infomask2;
2773 xlhdr.t_infomask = old_key_tuple->t_data->t_infomask;
2774 xlhdr.t_hoff = old_key_tuple->t_data->t_hoff;
2776 XLogRegisterData((char *) &xlhdr, SizeOfHeapHeader);
2777 XLogRegisterData((char *) old_key_tuple->t_data
2778 + SizeofHeapTupleHeader,
2779 old_key_tuple->t_len
2780 - SizeofHeapTupleHeader);
2783 /* filtering by origin on a row level is much more efficient */
2784 XLogSetRecordFlags(XLOG_INCLUDE_ORIGIN);
2786 recptr = XLogInsert(RM_HEAP_ID, XLOG_HEAP_DELETE);
2788 PageSetLSN(page, recptr);
2793 LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
2795 if (vmbuffer != InvalidBuffer)
2796 ReleaseBuffer(vmbuffer);
2799 * If the tuple has toasted out-of-line attributes, we need to delete
2800 * those items too. We have to do this before releasing the buffer
2801 * because we need to look at the contents of the tuple, but it's OK to
2802 * release the content lock on the buffer first.
2804 if (relation->rd_rel->relkind != RELKIND_RELATION &&
2805 relation->rd_rel->relkind != RELKIND_MATVIEW)
2807 /* toast table entries should never be recursively toasted */
2808 Assert(!HeapTupleHasExternal(&tp));
2810 else if (HeapTupleHasExternal(&tp))
2811 toast_delete(relation, &tp, false);
2814 * Mark tuple for invalidation from system caches at next command
2815 * boundary. We have to do this before releasing the buffer because we
2816 * need to look at the contents of the tuple.
2818 CacheInvalidateHeapTuple(relation, &tp, NULL);
2820 /* Now we can release the buffer */
2821 ReleaseBuffer(buffer);
2824 * Release the lmgr tuple lock, if we had it.
2826 if (have_tuple_lock)
2827 UnlockTupleTuplock(relation, &(tp.t_self), LockTupleExclusive);
2829 pgstat_count_heap_delete(relation);
2831 if (old_key_tuple != NULL && old_key_copied)
2832 heap_freetuple(old_key_tuple);
2838 * simple_heap_delete - delete a tuple
2840 * This routine may be used to delete a tuple when concurrent updates of
2841 * the target tuple are not expected (for example, because we have a lock
2842 * on the relation associated with the tuple). Any failure is reported
2846 simple_heap_delete(Relation relation, ItemPointer tid)
2849 TM_FailureData tmfd;
2851 result = heap_delete(relation, tid,
2852 GetCurrentCommandId(true), InvalidSnapshot,
2853 true /* wait for commit */ ,
2854 &tmfd, false /* changingPart */ );
2857 case TM_SelfModified:
2858 /* Tuple was already updated in current command? */
2859 elog(ERROR, "tuple already updated by self");
2863 /* done successfully */
2867 elog(ERROR, "tuple concurrently updated");
2871 elog(ERROR, "tuple concurrently deleted");
2875 elog(ERROR, "unrecognized heap_delete status: %u", result);
2881 * heap_update - replace a tuple
2883 * See table_update() for an explanation of the parameters, except that this
2884 * routine directly takes a tuple rather than a slot.
2886 * In the failure cases, the routine fills *tmfd with the tuple's t_ctid,
2887 * t_xmax (resolving a possible MultiXact, if necessary), and t_cmax (the last
2888 * only for TM_SelfModified, since we cannot obtain cmax from a combocid
2889 * generated by another transaction).
2892 heap_update(Relation relation, ItemPointer otid, HeapTuple newtup,
2893 CommandId cid, Snapshot crosscheck, bool wait,
2894 TM_FailureData *tmfd, LockTupleMode *lockmode)
2897 TransactionId xid = GetCurrentTransactionId();
2898 Bitmapset *hot_attrs;
2899 Bitmapset *key_attrs;
2900 Bitmapset *id_attrs;
2901 Bitmapset *interesting_attrs;
2902 Bitmapset *modified_attrs;
2904 HeapTupleData oldtup;
2906 HeapTuple old_key_tuple = NULL;
2907 bool old_key_copied = false;
2910 MultiXactStatus mxact_status;
2913 vmbuffer = InvalidBuffer,
2914 vmbuffer_new = InvalidBuffer;
2918 bool have_tuple_lock = false;
2920 bool use_hot_update = false;
2921 bool hot_attrs_checked = false;
2923 bool all_visible_cleared = false;
2924 bool all_visible_cleared_new = false;
2925 bool checked_lockers;
2926 bool locker_remains;
2927 TransactionId xmax_new_tuple,
2929 uint16 infomask_old_tuple,
2930 infomask2_old_tuple,
2932 infomask2_new_tuple;
2934 Assert(ItemPointerIsValid(otid));
2937 * Forbid this during a parallel operation, lest it allocate a combocid.
2938 * Other workers might need that combocid for visibility checks, and we
2939 * have no provision for broadcasting it to them.
2941 if (IsInParallelMode())
2943 (errcode(ERRCODE_INVALID_TRANSACTION_STATE),
2944 errmsg("cannot update tuples during a parallel operation")));
2947 * Fetch the list of attributes to be checked for various operations.
2949 * For HOT considerations, this is wasted effort if we fail to update or
2950 * have to put the new tuple on a different page. But we must compute the
2951 * list before obtaining buffer lock --- in the worst case, if we are
2952 * doing an update on one of the relevant system catalogs, we could
2953 * deadlock if we try to fetch the list later. In any case, the relcache
2954 * caches the data so this is usually pretty cheap.
2956 * We also need columns used by the replica identity and columns that are
2957 * considered the "key" of rows in the table.
2959 * Note that we get copies of each bitmap, so we need not worry about
2960 * relcache flush happening midway through.
2962 hot_attrs = RelationGetIndexAttrBitmap(relation, INDEX_ATTR_BITMAP_ALL);
2963 key_attrs = RelationGetIndexAttrBitmap(relation, INDEX_ATTR_BITMAP_KEY);
2964 id_attrs = RelationGetIndexAttrBitmap(relation,
2965 INDEX_ATTR_BITMAP_IDENTITY_KEY);
2968 block = ItemPointerGetBlockNumber(otid);
2969 buffer = ReadBuffer(relation, block);
2970 page = BufferGetPage(buffer);
2972 interesting_attrs = NULL;
2975 * If the page is already full, there is hardly any chance of doing a HOT
2976 * update on this page. It might be wasteful effort to look for index
2977 * column updates only to later reject HOT updates for lack of space in
2978 * the same page. So we be conservative and only fetch hot_attrs if the
2979 * page is not already full. Since we are already holding a pin on the
2980 * buffer, there is no chance that the buffer can get cleaned up
2981 * concurrently and even if that was possible, in the worst case we lose a
2982 * chance to do a HOT update.
2984 if (!PageIsFull(page))
2986 interesting_attrs = bms_add_members(interesting_attrs, hot_attrs);
2987 hot_attrs_checked = true;
2989 interesting_attrs = bms_add_members(interesting_attrs, key_attrs);
2990 interesting_attrs = bms_add_members(interesting_attrs, id_attrs);
2993 * Before locking the buffer, pin the visibility map page if it appears to
2994 * be necessary. Since we haven't got the lock yet, someone else might be
2995 * in the middle of changing this, so we'll need to recheck after we have
2998 if (PageIsAllVisible(page))
2999 visibilitymap_pin(relation, block, &vmbuffer);
3001 LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
3003 lp = PageGetItemId(page, ItemPointerGetOffsetNumber(otid));
3004 Assert(ItemIdIsNormal(lp));
3007 * Fill in enough data in oldtup for HeapDetermineModifiedColumns to work
3010 oldtup.t_tableOid = RelationGetRelid(relation);
3011 oldtup.t_data = (HeapTupleHeader) PageGetItem(page, lp);
3012 oldtup.t_len = ItemIdGetLength(lp);
3013 oldtup.t_self = *otid;
3015 /* the new tuple is ready, except for this: */
3016 newtup->t_tableOid = RelationGetRelid(relation);
3018 /* Determine columns modified by the update. */
3019 modified_attrs = HeapDetermineModifiedColumns(relation, interesting_attrs,
3023 * If we're not updating any "key" column, we can grab a weaker lock type.
3024 * This allows for more concurrency when we are running simultaneously
3025 * with foreign key checks.
3027 * Note that if a column gets detoasted while executing the update, but
3028 * the value ends up being the same, this test will fail and we will use
3029 * the stronger lock. This is acceptable; the important case to optimize
3030 * is updates that don't manipulate key columns, not those that
3031 * serendipitiously arrive at the same key values.
3033 if (!bms_overlap(modified_attrs, key_attrs))
3035 *lockmode = LockTupleNoKeyExclusive;
3036 mxact_status = MultiXactStatusNoKeyUpdate;
3040 * If this is the first possibly-multixact-able operation in the
3041 * current transaction, set my per-backend OldestMemberMXactId
3042 * setting. We can be certain that the transaction will never become a
3043 * member of any older MultiXactIds than that. (We have to do this
3044 * even if we end up just using our own TransactionId below, since
3045 * some other backend could incorporate our XID into a MultiXact
3046 * immediately afterwards.)
3048 MultiXactIdSetOldestMember();
3052 *lockmode = LockTupleExclusive;
3053 mxact_status = MultiXactStatusUpdate;
3058 * Note: beyond this point, use oldtup not otid to refer to old tuple.
3059 * otid may very well point at newtup->t_self, which we will overwrite
3060 * with the new tuple's location, so there's great risk of confusion if we
3065 checked_lockers = false;
3066 locker_remains = false;
3067 result = HeapTupleSatisfiesUpdate(&oldtup, cid, buffer);
3069 /* see below about the "no wait" case */
3070 Assert(result != TM_BeingModified || wait);
3072 if (result == TM_Invisible)
3074 UnlockReleaseBuffer(buffer);
3076 (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
3077 errmsg("attempted to update invisible tuple")));
3079 else if (result == TM_BeingModified && wait)
3081 TransactionId xwait;
3083 bool can_continue = false;
3086 * XXX note that we don't consider the "no wait" case here. This
3087 * isn't a problem currently because no caller uses that case, but it
3088 * should be fixed if such a caller is introduced. It wasn't a
3089 * problem previously because this code would always wait, but now
3090 * that some tuple locks do not conflict with one of the lock modes we
3091 * use, it is possible that this case is interesting to handle
3094 * This may cause failures with third-party code that calls
3095 * heap_update directly.
3098 /* must copy state data before unlocking buffer */
3099 xwait = HeapTupleHeaderGetRawXmax(oldtup.t_data);
3100 infomask = oldtup.t_data->t_infomask;
3103 * Now we have to do something about the existing locker. If it's a
3104 * multi, sleep on it; we might be awakened before it is completely
3105 * gone (or even not sleep at all in some cases); we need to preserve
3106 * it as locker, unless it is gone completely.
3108 * If it's not a multi, we need to check for sleeping conditions
3109 * before actually going to sleep. If the update doesn't conflict
3110 * with the locks, we just continue without sleeping (but making sure
3113 * Before sleeping, we need to acquire tuple lock to establish our
3114 * priority for the tuple (see heap_lock_tuple). LockTuple will
3115 * release us when we are next-in-line for the tuple. Note we must
3116 * not acquire the tuple lock until we're sure we're going to sleep;
3117 * otherwise we're open for race conditions with other transactions
3118 * holding the tuple lock which sleep on us.
3120 * If we are forced to "start over" below, we keep the tuple lock;
3121 * this arranges that we stay at the head of the line while rechecking
3124 if (infomask & HEAP_XMAX_IS_MULTI)
3126 TransactionId update_xact;
3129 if (DoesMultiXactIdConflict((MultiXactId) xwait, infomask,
3132 LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
3134 /* acquire tuple lock, if necessary */
3135 heap_acquire_tuplock(relation, &(oldtup.t_self), *lockmode,
3136 LockWaitBlock, &have_tuple_lock);
3138 /* wait for multixact */
3139 MultiXactIdWait((MultiXactId) xwait, mxact_status, infomask,
3140 relation, &oldtup.t_self, XLTW_Update,
3142 checked_lockers = true;
3143 locker_remains = remain != 0;
3144 LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
3147 * If xwait had just locked the tuple then some other xact
3148 * could update this tuple before we get to this point. Check
3149 * for xmax change, and start over if so.
3151 if (xmax_infomask_changed(oldtup.t_data->t_infomask,
3153 !TransactionIdEquals(HeapTupleHeaderGetRawXmax(oldtup.t_data),
3159 * Note that the multixact may not be done by now. It could have
3160 * surviving members; our own xact or other subxacts of this
3161 * backend, and also any other concurrent transaction that locked
3162 * the tuple with KeyShare if we only got TupleLockUpdate. If
3163 * this is the case, we have to be careful to mark the updated
3164 * tuple with the surviving members in Xmax.
3166 * Note that there could have been another update in the
3167 * MultiXact. In that case, we need to check whether it committed
3168 * or aborted. If it aborted we are safe to update it again;
3169 * otherwise there is an update conflict, and we have to return
3170 * TableTuple{Deleted, Updated} below.
3172 * In the LockTupleExclusive case, we still need to preserve the
3173 * surviving members: those would include the tuple locks we had
3174 * before this one, which are important to keep in case this
3177 if (!HEAP_XMAX_IS_LOCKED_ONLY(oldtup.t_data->t_infomask))
3178 update_xact = HeapTupleGetUpdateXid(oldtup.t_data);
3180 update_xact = InvalidTransactionId;
3183 * There was no UPDATE in the MultiXact; or it aborted. No
3184 * TransactionIdIsInProgress() call needed here, since we called
3185 * MultiXactIdWait() above.
3187 if (!TransactionIdIsValid(update_xact) ||
3188 TransactionIdDidAbort(update_xact))
3189 can_continue = true;
3191 else if (TransactionIdIsCurrentTransactionId(xwait))
3194 * The only locker is ourselves; we can avoid grabbing the tuple
3195 * lock here, but must preserve our locking information.
3197 checked_lockers = true;
3198 locker_remains = true;
3199 can_continue = true;
3201 else if (HEAP_XMAX_IS_KEYSHR_LOCKED(infomask) && key_intact)
3204 * If it's just a key-share locker, and we're not changing the key
3205 * columns, we don't need to wait for it to end; but we need to
3206 * preserve it as locker.
3208 checked_lockers = true;
3209 locker_remains = true;
3210 can_continue = true;
3215 * Wait for regular transaction to end; but first, acquire tuple
3218 LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
3219 heap_acquire_tuplock(relation, &(oldtup.t_self), *lockmode,
3220 LockWaitBlock, &have_tuple_lock);
3221 XactLockTableWait(xwait, relation, &oldtup.t_self,
3223 checked_lockers = true;
3224 LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
3227 * xwait is done, but if xwait had just locked the tuple then some
3228 * other xact could update this tuple before we get to this point.
3229 * Check for xmax change, and start over if so.
3231 if (xmax_infomask_changed(oldtup.t_data->t_infomask, infomask) ||
3232 !TransactionIdEquals(xwait,
3233 HeapTupleHeaderGetRawXmax(oldtup.t_data)))
3236 /* Otherwise check if it committed or aborted */
3237 UpdateXmaxHintBits(oldtup.t_data, buffer, xwait);
3238 if (oldtup.t_data->t_infomask & HEAP_XMAX_INVALID)
3239 can_continue = true;
3244 else if (!ItemPointerEquals(&oldtup.t_self, &oldtup.t_data->t_ctid) ||
3245 HeapTupleHeaderIndicatesMovedPartitions(oldtup.t_data))
3246 result = TM_Updated;
3248 result = TM_Deleted;
3251 if (crosscheck != InvalidSnapshot && result == TM_Ok)
3253 /* Perform additional check for transaction-snapshot mode RI updates */
3254 if (!HeapTupleSatisfiesVisibility(&oldtup, crosscheck, buffer))
3256 result = TM_Updated;
3257 Assert(!ItemPointerEquals(&oldtup.t_self, &oldtup.t_data->t_ctid));
3261 if (result != TM_Ok)
3263 Assert(result == TM_SelfModified ||
3264 result == TM_Updated ||
3265 result == TM_Deleted ||
3266 result == TM_BeingModified);
3267 Assert(!(oldtup.t_data->t_infomask & HEAP_XMAX_INVALID));
3268 Assert(result != TM_Updated ||
3269 !ItemPointerEquals(&oldtup.t_self, &oldtup.t_data->t_ctid));
3270 tmfd->ctid = oldtup.t_data->t_ctid;
3271 tmfd->xmax = HeapTupleHeaderGetUpdateXid(oldtup.t_data);
3272 if (result == TM_SelfModified)
3273 tmfd->cmax = HeapTupleHeaderGetCmax(oldtup.t_data);
3275 tmfd->cmax = InvalidCommandId;
3276 UnlockReleaseBuffer(buffer);
3277 if (have_tuple_lock)
3278 UnlockTupleTuplock(relation, &(oldtup.t_self), *lockmode);
3279 if (vmbuffer != InvalidBuffer)
3280 ReleaseBuffer(vmbuffer);
3281 bms_free(hot_attrs);
3282 bms_free(key_attrs);
3284 bms_free(modified_attrs);
3285 bms_free(interesting_attrs);
3290 * If we didn't pin the visibility map page and the page has become all
3291 * visible while we were busy locking the buffer, or during some
3292 * subsequent window during which we had it unlocked, we'll have to unlock
3293 * and re-lock, to avoid holding the buffer lock across an I/O. That's a
3294 * bit unfortunate, especially since we'll now have to recheck whether the
3295 * tuple has been locked or updated under us, but hopefully it won't
3296 * happen very often.
3298 if (vmbuffer == InvalidBuffer && PageIsAllVisible(page))
3300 LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
3301 visibilitymap_pin(relation, block, &vmbuffer);
3302 LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
3306 /* Fill in transaction status data */
3309 * If the tuple we're updating is locked, we need to preserve the locking
3310 * info in the old tuple's Xmax. Prepare a new Xmax value for this.
3312 compute_new_xmax_infomask(HeapTupleHeaderGetRawXmax(oldtup.t_data),
3313 oldtup.t_data->t_infomask,
3314 oldtup.t_data->t_infomask2,
3315 xid, *lockmode, true,
3316 &xmax_old_tuple, &infomask_old_tuple,
3317 &infomask2_old_tuple);
3320 * And also prepare an Xmax value for the new copy of the tuple. If there
3321 * was no xmax previously, or there was one but all lockers are now gone,
3322 * then use InvalidXid; otherwise, get the xmax from the old tuple. (In
3323 * rare cases that might also be InvalidXid and yet not have the
3324 * HEAP_XMAX_INVALID bit set; that's fine.)
3326 if ((oldtup.t_data->t_infomask & HEAP_XMAX_INVALID) ||
3327 HEAP_LOCKED_UPGRADED(oldtup.t_data->t_infomask) ||
3328 (checked_lockers && !locker_remains))
3329 xmax_new_tuple = InvalidTransactionId;
3331 xmax_new_tuple = HeapTupleHeaderGetRawXmax(oldtup.t_data);
3333 if (!TransactionIdIsValid(xmax_new_tuple))
3335 infomask_new_tuple = HEAP_XMAX_INVALID;
3336 infomask2_new_tuple = 0;
3341 * If we found a valid Xmax for the new tuple, then the infomask bits
3342 * to use on the new tuple depend on what was there on the old one.
3343 * Note that since we're doing an update, the only possibility is that
3344 * the lockers had FOR KEY SHARE lock.
3346 if (oldtup.t_data->t_infomask & HEAP_XMAX_IS_MULTI)
3348 GetMultiXactIdHintBits(xmax_new_tuple, &infomask_new_tuple,
3349 &infomask2_new_tuple);
3353 infomask_new_tuple = HEAP_XMAX_KEYSHR_LOCK | HEAP_XMAX_LOCK_ONLY;
3354 infomask2_new_tuple = 0;
3359 * Prepare the new tuple with the appropriate initial values of Xmin and
3360 * Xmax, as well as initial infomask bits as computed above.
3362 newtup->t_data->t_infomask &= ~(HEAP_XACT_MASK);
3363 newtup->t_data->t_infomask2 &= ~(HEAP2_XACT_MASK);
3364 HeapTupleHeaderSetXmin(newtup->t_data, xid);
3365 HeapTupleHeaderSetCmin(newtup->t_data, cid);
3366 newtup->t_data->t_infomask |= HEAP_UPDATED | infomask_new_tuple;
3367 newtup->t_data->t_infomask2 |= infomask2_new_tuple;
3368 HeapTupleHeaderSetXmax(newtup->t_data, xmax_new_tuple);
3371 * Replace cid with a combo cid if necessary. Note that we already put
3372 * the plain cid into the new tuple.
3374 HeapTupleHeaderAdjustCmax(oldtup.t_data, &cid, &iscombo);
3377 * If the toaster needs to be activated, OR if the new tuple will not fit
3378 * on the same page as the old, then we need to release the content lock
3379 * (but not the pin!) on the old tuple's buffer while we are off doing
3380 * TOAST and/or table-file-extension work. We must mark the old tuple to
3381 * show that it's locked, else other processes may try to update it
3384 * We need to invoke the toaster if there are already any out-of-line
3385 * toasted values present, or if the new tuple is over-threshold.
3387 if (relation->rd_rel->relkind != RELKIND_RELATION &&
3388 relation->rd_rel->relkind != RELKIND_MATVIEW)
3390 /* toast table entries should never be recursively toasted */
3391 Assert(!HeapTupleHasExternal(&oldtup));
3392 Assert(!HeapTupleHasExternal(newtup));
3396 need_toast = (HeapTupleHasExternal(&oldtup) ||
3397 HeapTupleHasExternal(newtup) ||
3398 newtup->t_len > TOAST_TUPLE_THRESHOLD);
3400 pagefree = PageGetHeapFreeSpace(page);
3402 newtupsize = MAXALIGN(newtup->t_len);
3404 if (need_toast || newtupsize > pagefree)
3406 TransactionId xmax_lock_old_tuple;
3407 uint16 infomask_lock_old_tuple,
3408 infomask2_lock_old_tuple;
3409 bool cleared_all_frozen = false;
3412 * To prevent concurrent sessions from updating the tuple, we have to
3413 * temporarily mark it locked, while we release the page-level lock.
3415 * To satisfy the rule that any xid potentially appearing in a buffer
3416 * written out to disk, we unfortunately have to WAL log this
3417 * temporary modification. We can reuse xl_heap_lock for this
3418 * purpose. If we crash/error before following through with the
3419 * actual update, xmax will be of an aborted transaction, allowing
3420 * other sessions to proceed.
3424 * Compute xmax / infomask appropriate for locking the tuple. This has
3425 * to be done separately from the combo that's going to be used for
3426 * updating, because the potentially created multixact would otherwise
3429 compute_new_xmax_infomask(HeapTupleHeaderGetRawXmax(oldtup.t_data),
3430 oldtup.t_data->t_infomask,
3431 oldtup.t_data->t_infomask2,
3432 xid, *lockmode, false,
3433 &xmax_lock_old_tuple, &infomask_lock_old_tuple,
3434 &infomask2_lock_old_tuple);
3436 Assert(HEAP_XMAX_IS_LOCKED_ONLY(infomask_lock_old_tuple));
3438 START_CRIT_SECTION();
3440 /* Clear obsolete visibility flags ... */
3441 oldtup.t_data->t_infomask &= ~(HEAP_XMAX_BITS | HEAP_MOVED);
3442 oldtup.t_data->t_infomask2 &= ~HEAP_KEYS_UPDATED;
3443 HeapTupleClearHotUpdated(&oldtup);
3444 /* ... and store info about transaction updating this tuple */
3445 Assert(TransactionIdIsValid(xmax_lock_old_tuple));
3446 HeapTupleHeaderSetXmax(oldtup.t_data, xmax_lock_old_tuple);
3447 oldtup.t_data->t_infomask |= infomask_lock_old_tuple;
3448 oldtup.t_data->t_infomask2 |= infomask2_lock_old_tuple;
3449 HeapTupleHeaderSetCmax(oldtup.t_data, cid, iscombo);
3451 /* temporarily make it look not-updated, but locked */
3452 oldtup.t_data->t_ctid = oldtup.t_self;
3455 * Clear all-frozen bit on visibility map if needed. We could
3456 * immediately reset ALL_VISIBLE, but given that the WAL logging
3457 * overhead would be unchanged, that doesn't seem necessarily
3460 if (PageIsAllVisible(BufferGetPage(buffer)) &&
3461 visibilitymap_clear(relation, block, vmbuffer,
3462 VISIBILITYMAP_ALL_FROZEN))
3463 cleared_all_frozen = true;
3465 MarkBufferDirty(buffer);
3467 if (RelationNeedsWAL(relation))
3473 XLogRegisterBuffer(0, buffer, REGBUF_STANDARD);
3475 xlrec.offnum = ItemPointerGetOffsetNumber(&oldtup.t_self);
3476 xlrec.locking_xid = xmax_lock_old_tuple;
3477 xlrec.infobits_set = compute_infobits(oldtup.t_data->t_infomask,
3478 oldtup.t_data->t_infomask2);
3480 cleared_all_frozen ? XLH_LOCK_ALL_FROZEN_CLEARED : 0;
3481 XLogRegisterData((char *) &xlrec, SizeOfHeapLock);
3482 recptr = XLogInsert(RM_HEAP_ID, XLOG_HEAP_LOCK);
3483 PageSetLSN(page, recptr);
3488 LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
3491 * Let the toaster do its thing, if needed.
3493 * Note: below this point, heaptup is the data we actually intend to
3494 * store into the relation; newtup is the caller's original untoasted
3499 /* Note we always use WAL and FSM during updates */
3500 heaptup = toast_insert_or_update(relation, newtup, &oldtup, 0);
3501 newtupsize = MAXALIGN(heaptup->t_len);
3507 * Now, do we need a new page for the tuple, or not? This is a bit
3508 * tricky since someone else could have added tuples to the page while
3509 * we weren't looking. We have to recheck the available space after
3510 * reacquiring the buffer lock. But don't bother to do that if the
3511 * former amount of free space is still not enough; it's unlikely
3512 * there's more free now than before.
3514 * What's more, if we need to get a new page, we will need to acquire
3515 * buffer locks on both old and new pages. To avoid deadlock against
3516 * some other backend trying to get the same two locks in the other
3517 * order, we must be consistent about the order we get the locks in.
3518 * We use the rule "lock the lower-numbered page of the relation
3519 * first". To implement this, we must do RelationGetBufferForTuple
3520 * while not holding the lock on the old page, and we must rely on it
3521 * to get the locks on both pages in the correct order.
3523 if (newtupsize > pagefree)
3525 /* Assume there's no chance to put heaptup on same page. */
3526 newbuf = RelationGetBufferForTuple(relation, heaptup->t_len,
3528 &vmbuffer_new, &vmbuffer);
3532 /* Re-acquire the lock on the old tuple's page. */
3533 LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
3534 /* Re-check using the up-to-date free space */
3535 pagefree = PageGetHeapFreeSpace(page);
3536 if (newtupsize > pagefree)
3539 * Rats, it doesn't fit anymore. We must now unlock and
3540 * relock to avoid deadlock. Fortunately, this path should
3543 LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
3544 newbuf = RelationGetBufferForTuple(relation, heaptup->t_len,
3546 &vmbuffer_new, &vmbuffer);
3550 /* OK, it fits here, so we're done. */
3557 /* No TOAST work needed, and it'll fit on same page */
3563 * We're about to do the actual update -- check for conflict first, to
3564 * avoid possibly having to roll back work we've just done.
3566 * This is safe without a recheck as long as there is no possibility of
3567 * another process scanning the pages between this check and the update
3568 * being visible to the scan (i.e., exclusive buffer content lock(s) are
3569 * continuously held from this point until the tuple update is visible).
3571 * For the new tuple the only check needed is at the relation level, but
3572 * since both tuples are in the same relation and the check for oldtup
3573 * will include checking the relation level, there is no benefit to a
3574 * separate check for the new tuple.
3576 CheckForSerializableConflictIn(relation, &oldtup, buffer);
3579 * At this point newbuf and buffer are both pinned and locked, and newbuf
3580 * has enough space for the new tuple. If they are the same buffer, only
3584 if (newbuf == buffer)
3587 * Since the new tuple is going into the same page, we might be able
3588 * to do a HOT update. Check if any of the index columns have been
3589 * changed. If the page was already full, we may have skipped checking
3590 * for index columns, and also can't do a HOT update.
3592 if (hot_attrs_checked && !bms_overlap(modified_attrs, hot_attrs))
3593 use_hot_update = true;
3597 /* Set a hint that the old page could use prune/defrag */
3602 * Compute replica identity tuple before entering the critical section so
3603 * we don't PANIC upon a memory allocation failure.
3604 * ExtractReplicaIdentity() will return NULL if nothing needs to be
3607 old_key_tuple = ExtractReplicaIdentity(relation, &oldtup,
3608 bms_overlap(modified_attrs, id_attrs),
3611 /* NO EREPORT(ERROR) from here till changes are logged */
3612 START_CRIT_SECTION();
3615 * If this transaction commits, the old tuple will become DEAD sooner or
3616 * later. Set flag that this page is a candidate for pruning once our xid
3617 * falls below the OldestXmin horizon. If the transaction finally aborts,
3618 * the subsequent page pruning will be a no-op and the hint will be
3621 * XXX Should we set hint on newbuf as well? If the transaction aborts,
3622 * there would be a prunable tuple in the newbuf; but for now we choose
3623 * not to optimize for aborts. Note that heap_xlog_update must be kept in
3624 * sync if this decision changes.
3626 PageSetPrunable(page, xid);
3630 /* Mark the old tuple as HOT-updated */
3631 HeapTupleSetHotUpdated(&oldtup);
3632 /* And mark the new tuple as heap-only */
3633 HeapTupleSetHeapOnly(heaptup);
3634 /* Mark the caller's copy too, in case different from heaptup */
3635 HeapTupleSetHeapOnly(newtup);
3639 /* Make sure tuples are correctly marked as not-HOT */
3640 HeapTupleClearHotUpdated(&oldtup);
3641 HeapTupleClearHeapOnly(heaptup);
3642 HeapTupleClearHeapOnly(newtup);
3645 RelationPutHeapTuple(relation, newbuf, heaptup, false); /* insert new tuple */
3648 /* Clear obsolete visibility flags, possibly set by ourselves above... */
3649 oldtup.t_data->t_infomask &= ~(HEAP_XMAX_BITS | HEAP_MOVED);
3650 oldtup.t_data->t_infomask2 &= ~HEAP_KEYS_UPDATED;
3651 /* ... and store info about transaction updating this tuple */
3652 Assert(TransactionIdIsValid(xmax_old_tuple));
3653 HeapTupleHeaderSetXmax(oldtup.t_data, xmax_old_tuple);
3654 oldtup.t_data->t_infomask |= infomask_old_tuple;
3655 oldtup.t_data->t_infomask2 |= infomask2_old_tuple;
3656 HeapTupleHeaderSetCmax(oldtup.t_data, cid, iscombo);
3658 /* record address of new tuple in t_ctid of old one */
3659 oldtup.t_data->t_ctid = heaptup->t_self;
3661 /* clear PD_ALL_VISIBLE flags, reset all visibilitymap bits */
3662 if (PageIsAllVisible(BufferGetPage(buffer)))
3664 all_visible_cleared = true;
3665 PageClearAllVisible(BufferGetPage(buffer));
3666 visibilitymap_clear(relation, BufferGetBlockNumber(buffer),
3667 vmbuffer, VISIBILITYMAP_VALID_BITS);
3669 if (newbuf != buffer && PageIsAllVisible(BufferGetPage(newbuf)))
3671 all_visible_cleared_new = true;
3672 PageClearAllVisible(BufferGetPage(newbuf));
3673 visibilitymap_clear(relation, BufferGetBlockNumber(newbuf),
3674 vmbuffer_new, VISIBILITYMAP_VALID_BITS);
3677 if (newbuf != buffer)
3678 MarkBufferDirty(newbuf);
3679 MarkBufferDirty(buffer);
3682 if (RelationNeedsWAL(relation))
3687 * For logical decoding we need combocids to properly decode the
3690 if (RelationIsAccessibleInLogicalDecoding(relation))
3692 log_heap_new_cid(relation, &oldtup);
3693 log_heap_new_cid(relation, heaptup);
3696 recptr = log_heap_update(relation, buffer,
3697 newbuf, &oldtup, heaptup,
3699 all_visible_cleared,
3700 all_visible_cleared_new);
3701 if (newbuf != buffer)
3703 PageSetLSN(BufferGetPage(newbuf), recptr);
3705 PageSetLSN(BufferGetPage(buffer), recptr);
3710 if (newbuf != buffer)
3711 LockBuffer(newbuf, BUFFER_LOCK_UNLOCK);
3712 LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
3715 * Mark old tuple for invalidation from system caches at next command
3716 * boundary, and mark the new tuple for invalidation in case we abort. We
3717 * have to do this before releasing the buffer because oldtup is in the
3718 * buffer. (heaptup is all in local memory, but it's necessary to process
3719 * both tuple versions in one call to inval.c so we can avoid redundant
3722 CacheInvalidateHeapTuple(relation, &oldtup, heaptup);
3724 /* Now we can release the buffer(s) */
3725 if (newbuf != buffer)
3726 ReleaseBuffer(newbuf);
3727 ReleaseBuffer(buffer);
3728 if (BufferIsValid(vmbuffer_new))
3729 ReleaseBuffer(vmbuffer_new);
3730 if (BufferIsValid(vmbuffer))
3731 ReleaseBuffer(vmbuffer);
3734 * Release the lmgr tuple lock, if we had it.
3736 if (have_tuple_lock)
3737 UnlockTupleTuplock(relation, &(oldtup.t_self), *lockmode);
3739 pgstat_count_heap_update(relation, use_hot_update);
3742 * If heaptup is a private copy, release it. Don't forget to copy t_self
3743 * back to the caller's image, too.
3745 if (heaptup != newtup)
3747 newtup->t_self = heaptup->t_self;
3748 heap_freetuple(heaptup);
3751 if (old_key_tuple != NULL && old_key_copied)
3752 heap_freetuple(old_key_tuple);
3754 bms_free(hot_attrs);
3755 bms_free(key_attrs);
3757 bms_free(modified_attrs);
3758 bms_free(interesting_attrs);
3764 * Check if the specified attribute's value is same in both given tuples.
3765 * Subroutine for HeapDetermineModifiedColumns.
3768 heap_tuple_attr_equals(TupleDesc tupdesc, int attrnum,
3769 HeapTuple tup1, HeapTuple tup2)
3775 Form_pg_attribute att;
3778 * If it's a whole-tuple reference, say "not equal". It's not really
3779 * worth supporting this case, since it could only succeed after a no-op
3780 * update, which is hardly a case worth optimizing for.
3786 * Likewise, automatically say "not equal" for any system attribute other
3787 * than tableOID; we cannot expect these to be consistent in a HOT chain,
3788 * or even to be set correctly yet in the new tuple.
3792 if (attrnum != TableOidAttributeNumber)
3797 * Extract the corresponding values. XXX this is pretty inefficient if
3798 * there are many indexed columns. Should HeapDetermineModifiedColumns do
3799 * a single heap_deform_tuple call on each tuple, instead? But that
3800 * doesn't work for system columns ...
3802 value1 = heap_getattr(tup1, attrnum, tupdesc, &isnull1);
3803 value2 = heap_getattr(tup2, attrnum, tupdesc, &isnull2);
3806 * If one value is NULL and other is not, then they are certainly not
3809 if (isnull1 != isnull2)
3813 * If both are NULL, they can be considered equal.
3819 * We do simple binary comparison of the two datums. This may be overly
3820 * strict because there can be multiple binary representations for the
3821 * same logical value. But we should be OK as long as there are no false
3822 * positives. Using a type-specific equality operator is messy because
3823 * there could be multiple notions of equality in different operator
3824 * classes; furthermore, we cannot safely invoke user-defined functions
3825 * while holding exclusive buffer lock.
3829 /* The only allowed system columns are OIDs, so do this */
3830 return (DatumGetObjectId(value1) == DatumGetObjectId(value2));
3834 Assert(attrnum <= tupdesc->natts);
3835 att = TupleDescAttr(tupdesc, attrnum - 1);
3836 return datumIsEqual(value1, value2, att->attbyval, att->attlen);
3841 * Check which columns are being updated.
3843 * Given an updated tuple, determine (and return into the output bitmapset),
3844 * from those listed as interesting, the set of columns that changed.
3846 * The input bitmapset is destructively modified; that is OK since this is
3847 * invoked at most once in heap_update.
3850 HeapDetermineModifiedColumns(Relation relation, Bitmapset *interesting_cols,
3851 HeapTuple oldtup, HeapTuple newtup)
3854 Bitmapset *modified = NULL;
3856 while ((attnum = bms_first_member(interesting_cols)) >= 0)
3858 attnum += FirstLowInvalidHeapAttributeNumber;
3860 if (!heap_tuple_attr_equals(RelationGetDescr(relation),
3861 attnum, oldtup, newtup))
3862 modified = bms_add_member(modified,
3863 attnum - FirstLowInvalidHeapAttributeNumber);
3870 * simple_heap_update - replace a tuple
3872 * This routine may be used to update a tuple when concurrent updates of
3873 * the target tuple are not expected (for example, because we have a lock
3874 * on the relation associated with the tuple). Any failure is reported
3878 simple_heap_update(Relation relation, ItemPointer otid, HeapTuple tup)
3881 TM_FailureData tmfd;
3882 LockTupleMode lockmode;
3884 result = heap_update(relation, otid, tup,
3885 GetCurrentCommandId(true), InvalidSnapshot,
3886 true /* wait for commit */ ,
3890 case TM_SelfModified:
3891 /* Tuple was already updated in current command? */
3892 elog(ERROR, "tuple already updated by self");
3896 /* done successfully */
3900 elog(ERROR, "tuple concurrently updated");
3904 elog(ERROR, "tuple concurrently deleted");
3908 elog(ERROR, "unrecognized heap_update status: %u", result);
3915 * Return the MultiXactStatus corresponding to the given tuple lock mode.
3917 static MultiXactStatus
3918 get_mxact_status_for_lock(LockTupleMode mode, bool is_update)
3923 retval = tupleLockExtraInfo[mode].updstatus;
3925 retval = tupleLockExtraInfo[mode].lockstatus;
3928 elog(ERROR, "invalid lock tuple mode %d/%s", mode,
3929 is_update ? "true" : "false");
3931 return (MultiXactStatus) retval;
3935 * heap_lock_tuple - lock a tuple in shared or exclusive mode
3937 * Note that this acquires a buffer pin, which the caller must release.
3940 * relation: relation containing tuple (caller must hold suitable lock)
3941 * tid: TID of tuple to lock
3942 * cid: current command ID (used for visibility test, and stored into
3943 * tuple's cmax if lock is successful)
3944 * mode: indicates if shared or exclusive tuple lock is desired
3945 * wait_policy: what to do if tuple lock is not available
3946 * follow_updates: if true, follow the update chain to also lock descendant
3949 * Output parameters:
3950 * *tuple: all fields filled in
3951 * *buffer: set to buffer holding tuple (pinned but not locked at exit)
3952 * *tmfd: filled in failure cases (see below)
3954 * Function results are the same as the ones for table_lock_tuple().
3956 * In the failure cases other than TM_Invisible, the routine fills
3957 * *tmfd with the tuple's t_ctid, t_xmax (resolving a possible MultiXact,
3958 * if necessary), and t_cmax (the last only for TM_SelfModified,
3959 * since we cannot obtain cmax from a combocid generated by another
3961 * See comments for struct TM_FailureData for additional info.
3963 * See README.tuplock for a thorough explanation of this mechanism.
3966 heap_lock_tuple(Relation relation, HeapTuple tuple,
3967 CommandId cid, LockTupleMode mode, LockWaitPolicy wait_policy,
3968 bool follow_updates,
3969 Buffer *buffer, TM_FailureData *tmfd)
3972 ItemPointer tid = &(tuple->t_self);
3975 Buffer vmbuffer = InvalidBuffer;
3979 uint16 old_infomask,
3982 bool first_time = true;
3983 bool have_tuple_lock = false;
3984 bool cleared_all_frozen = false;
3986 *buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(tid));
3987 block = ItemPointerGetBlockNumber(tid);
3990 * Before locking the buffer, pin the visibility map page if it appears to
3991 * be necessary. Since we haven't got the lock yet, someone else might be
3992 * in the middle of changing this, so we'll need to recheck after we have
3995 if (PageIsAllVisible(BufferGetPage(*buffer)))
3996 visibilitymap_pin(relation, block, &vmbuffer);
3998 LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
4000 page = BufferGetPage(*buffer);
4001 lp = PageGetItemId(page, ItemPointerGetOffsetNumber(tid));
4002 Assert(ItemIdIsNormal(lp));
4004 tuple->t_data = (HeapTupleHeader) PageGetItem(page, lp);
4005 tuple->t_len = ItemIdGetLength(lp);
4006 tuple->t_tableOid = RelationGetRelid(relation);
4009 result = HeapTupleSatisfiesUpdate(tuple, cid, *buffer);
4011 if (result == TM_Invisible)
4014 * This is possible, but only when locking a tuple for ON CONFLICT
4015 * UPDATE. We return this value here rather than throwing an error in
4016 * order to give that case the opportunity to throw a more specific
4019 result = TM_Invisible;
4022 else if (result == TM_BeingModified ||
4023 result == TM_Updated ||
4024 result == TM_Deleted)
4026 TransactionId xwait;
4030 ItemPointerData t_ctid;
4032 /* must copy state data before unlocking buffer */
4033 xwait = HeapTupleHeaderGetRawXmax(tuple->t_data);
4034 infomask = tuple->t_data->t_infomask;
4035 infomask2 = tuple->t_data->t_infomask2;
4036 ItemPointerCopy(&tuple->t_data->t_ctid, &t_ctid);
4038 LockBuffer(*buffer, BUFFER_LOCK_UNLOCK);
4041 * If any subtransaction of the current top transaction already holds
4042 * a lock as strong as or stronger than what we're requesting, we
4043 * effectively hold the desired lock already. We *must* succeed
4044 * without trying to take the tuple lock, else we will deadlock
4045 * against anyone wanting to acquire a stronger lock.
4047 * Note we only do this the first time we loop on the HTSU result;
4048 * there is no point in testing in subsequent passes, because
4049 * evidently our own transaction cannot have acquired a new lock after
4050 * the first time we checked.
4056 if (infomask & HEAP_XMAX_IS_MULTI)
4060 MultiXactMember *members;
4063 * We don't need to allow old multixacts here; if that had
4064 * been the case, HeapTupleSatisfiesUpdate would have returned
4065 * MayBeUpdated and we wouldn't be here.
4068 GetMultiXactIdMembers(xwait, &members, false,
4069 HEAP_XMAX_IS_LOCKED_ONLY(infomask));
4071 for (i = 0; i < nmembers; i++)
4073 /* only consider members of our own transaction */
4074 if (!TransactionIdIsCurrentTransactionId(members[i].xid))
4077 if (TUPLOCK_from_mxstatus(members[i].status) >= mode)
4088 else if (TransactionIdIsCurrentTransactionId(xwait))
4092 case LockTupleKeyShare:
4093 Assert(HEAP_XMAX_IS_KEYSHR_LOCKED(infomask) ||
4094 HEAP_XMAX_IS_SHR_LOCKED(infomask) ||
4095 HEAP_XMAX_IS_EXCL_LOCKED(infomask));
4098 case LockTupleShare:
4099 if (HEAP_XMAX_IS_SHR_LOCKED(infomask) ||
4100 HEAP_XMAX_IS_EXCL_LOCKED(infomask))
4106 case LockTupleNoKeyExclusive:
4107 if (HEAP_XMAX_IS_EXCL_LOCKED(infomask))
4113 case LockTupleExclusive:
4114 if (HEAP_XMAX_IS_EXCL_LOCKED(infomask) &&
4115 infomask2 & HEAP_KEYS_UPDATED)
4126 * Initially assume that we will have to wait for the locking
4127 * transaction(s) to finish. We check various cases below in which
4128 * this can be turned off.
4130 require_sleep = true;
4131 if (mode == LockTupleKeyShare)
4134 * If we're requesting KeyShare, and there's no update present, we
4135 * don't need to wait. Even if there is an update, we can still
4136 * continue if the key hasn't been modified.
4138 * However, if there are updates, we need to walk the update chain
4139 * to mark future versions of the row as locked, too. That way,
4140 * if somebody deletes that future version, we're protected
4141 * against the key going away. This locking of future versions
4142 * could block momentarily, if a concurrent transaction is
4143 * deleting a key; or it could return a value to the effect that
4144 * the transaction deleting the key has already committed. So we
4145 * do this before re-locking the buffer; otherwise this would be
4146 * prone to deadlocks.
4148 * Note that the TID we're locking was grabbed before we unlocked
4149 * the buffer. For it to change while we're not looking, the
4150 * other properties we're testing for below after re-locking the
4151 * buffer would also change, in which case we would restart this
4154 if (!(infomask2 & HEAP_KEYS_UPDATED))
4158 updated = !HEAP_XMAX_IS_LOCKED_ONLY(infomask);
4161 * If there are updates, follow the update chain; bail out if
4162 * that cannot be done.
4164 if (follow_updates && updated)
4168 res = heap_lock_updated_tuple(relation, tuple, &t_ctid,
4169 GetCurrentTransactionId(),
4174 /* recovery code expects to have buffer lock held */
4175 LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
4180 LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
4183 * Make sure it's still an appropriate lock, else start over.
4184 * Also, if it wasn't updated before we released the lock, but
4185 * is updated now, we start over too; the reason is that we
4186 * now need to follow the update chain to lock the new
4189 if (!HeapTupleHeaderIsOnlyLocked(tuple->t_data) &&
4190 ((tuple->t_data->t_infomask2 & HEAP_KEYS_UPDATED) ||
4194 /* Things look okay, so we can skip sleeping */
4195 require_sleep = false;
4198 * Note we allow Xmax to change here; other updaters/lockers
4199 * could have modified it before we grabbed the buffer lock.
4200 * However, this is not a problem, because with the recheck we
4201 * just did we ensure that they still don't conflict with the
4206 else if (mode == LockTupleShare)
4209 * If we're requesting Share, we can similarly avoid sleeping if
4210 * there's no update and no exclusive lock present.
4212 if (HEAP_XMAX_IS_LOCKED_ONLY(infomask) &&
4213 !HEAP_XMAX_IS_EXCL_LOCKED(infomask))
4215 LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
4218 * Make sure it's still an appropriate lock, else start over.
4219 * See above about allowing xmax to change.
4221 if (!HEAP_XMAX_IS_LOCKED_ONLY(tuple->t_data->t_infomask) ||
4222 HEAP_XMAX_IS_EXCL_LOCKED(tuple->t_data->t_infomask))
4224 require_sleep = false;
4227 else if (mode == LockTupleNoKeyExclusive)
4230 * If we're requesting NoKeyExclusive, we might also be able to
4231 * avoid sleeping; just ensure that there no conflicting lock
4234 if (infomask & HEAP_XMAX_IS_MULTI)
4236 if (!DoesMultiXactIdConflict((MultiXactId) xwait, infomask,
4240 * No conflict, but if the xmax changed under us in the
4241 * meantime, start over.
4243 LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
4244 if (xmax_infomask_changed(tuple->t_data->t_infomask, infomask) ||
4245 !TransactionIdEquals(HeapTupleHeaderGetRawXmax(tuple->t_data),
4249 /* otherwise, we're good */
4250 require_sleep = false;
4253 else if (HEAP_XMAX_IS_KEYSHR_LOCKED(infomask))
4255 LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
4257 /* if the xmax changed in the meantime, start over */
4258 if (xmax_infomask_changed(tuple->t_data->t_infomask, infomask) ||
4259 !TransactionIdEquals(
4260 HeapTupleHeaderGetRawXmax(tuple->t_data),
4263 /* otherwise, we're good */
4264 require_sleep = false;
4269 * As a check independent from those above, we can also avoid sleeping
4270 * if the current transaction is the sole locker of the tuple. Note
4271 * that the strength of the lock already held is irrelevant; this is
4272 * not about recording the lock in Xmax (which will be done regardless
4273 * of this optimization, below). Also, note that the cases where we
4274 * hold a lock stronger than we are requesting are already handled
4275 * above by not doing anything.
4277 * Note we only deal with the non-multixact case here; MultiXactIdWait
4278 * is well equipped to deal with this situation on its own.
4280 if (require_sleep && !(infomask & HEAP_XMAX_IS_MULTI) &&
4281 TransactionIdIsCurrentTransactionId(xwait))
4283 /* ... but if the xmax changed in the meantime, start over */
4284 LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
4285 if (xmax_infomask_changed(tuple->t_data->t_infomask, infomask) ||
4286 !TransactionIdEquals(HeapTupleHeaderGetRawXmax(tuple->t_data),
4289 Assert(HEAP_XMAX_IS_LOCKED_ONLY(tuple->t_data->t_infomask));
4290 require_sleep = false;
4294 * Time to sleep on the other transaction/multixact, if necessary.
4296 * If the other transaction is an update/delete that's already
4297 * committed, then sleeping cannot possibly do any good: if we're
4298 * required to sleep, get out to raise an error instead.
4300 * By here, we either have already acquired the buffer exclusive lock,
4301 * or we must wait for the locking transaction or multixact; so below
4302 * we ensure that we grab buffer lock after the sleep.
4304 if (require_sleep && (result == TM_Updated || result == TM_Deleted))
4306 LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
4309 else if (require_sleep)
4312 * Acquire tuple lock to establish our priority for the tuple, or
4313 * die trying. LockTuple will release us when we are next-in-line
4314 * for the tuple. We must do this even if we are share-locking.
4316 * If we are forced to "start over" below, we keep the tuple lock;
4317 * this arranges that we stay at the head of the line while
4318 * rechecking tuple state.
4320 if (!heap_acquire_tuplock(relation, tid, mode, wait_policy,
4324 * This can only happen if wait_policy is Skip and the lock
4325 * couldn't be obtained.
4327 result = TM_WouldBlock;
4328 /* recovery code expects to have buffer lock held */
4329 LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
4333 if (infomask & HEAP_XMAX_IS_MULTI)
4335 MultiXactStatus status = get_mxact_status_for_lock(mode, false);
4337 /* We only ever lock tuples, never update them */
4338 if (status >= MultiXactStatusNoKeyUpdate)
4339 elog(ERROR, "invalid lock mode in heap_lock_tuple");
4341 /* wait for multixact to end, or die trying */
4342 switch (wait_policy)
4345 MultiXactIdWait((MultiXactId) xwait, status, infomask,
4346 relation, &tuple->t_self, XLTW_Lock, NULL);
4349 if (!ConditionalMultiXactIdWait((MultiXactId) xwait,
4350 status, infomask, relation,
4353 result = TM_WouldBlock;
4354 /* recovery code expects to have buffer lock held */
4355 LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
4360 if (!ConditionalMultiXactIdWait((MultiXactId) xwait,
4361 status, infomask, relation,
4364 (errcode(ERRCODE_LOCK_NOT_AVAILABLE),
4365 errmsg("could not obtain lock on row in relation \"%s\"",
4366 RelationGetRelationName(relation))));
4372 * Of course, the multixact might not be done here: if we're
4373 * requesting a light lock mode, other transactions with light
4374 * locks could still be alive, as well as locks owned by our
4375 * own xact or other subxacts of this backend. We need to
4376 * preserve the surviving MultiXact members. Note that it
4377 * isn't absolutely necessary in the latter case, but doing so
4383 /* wait for regular transaction to end, or die trying */
4384 switch (wait_policy)
4387 XactLockTableWait(xwait, relation, &tuple->t_self,
4391 if (!ConditionalXactLockTableWait(xwait))
4393 result = TM_WouldBlock;
4394 /* recovery code expects to have buffer lock held */
4395 LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
4400 if (!ConditionalXactLockTableWait(xwait))
4402 (errcode(ERRCODE_LOCK_NOT_AVAILABLE),
4403 errmsg("could not obtain lock on row in relation \"%s\"",
4404 RelationGetRelationName(relation))));
4409 /* if there are updates, follow the update chain */
4410 if (follow_updates && !HEAP_XMAX_IS_LOCKED_ONLY(infomask))
4414 res = heap_lock_updated_tuple(relation, tuple, &t_ctid,
4415 GetCurrentTransactionId(),
4420 /* recovery code expects to have buffer lock held */
4421 LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
4426 LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
4429 * xwait is done, but if xwait had just locked the tuple then some
4430 * other xact could update this tuple before we get to this point.
4431 * Check for xmax change, and start over if so.
4433 if (xmax_infomask_changed(tuple->t_data->t_infomask, infomask) ||
4434 !TransactionIdEquals(HeapTupleHeaderGetRawXmax(tuple->t_data),
4438 if (!(infomask & HEAP_XMAX_IS_MULTI))
4441 * Otherwise check if it committed or aborted. Note we cannot
4442 * be here if the tuple was only locked by somebody who didn't
4443 * conflict with us; that would have been handled above. So
4444 * that transaction must necessarily be gone by now. But
4445 * don't check for this in the multixact case, because some
4446 * locker transactions might still be running.
4448 UpdateXmaxHintBits(tuple->t_data, *buffer, xwait);
4452 /* By here, we're certain that we hold buffer exclusive lock again */
4455 * We may lock if previous xmax aborted, or if it committed but only
4456 * locked the tuple without updating it; or if we didn't have to wait
4457 * at all for whatever reason.
4459 if (!require_sleep ||
4460 (tuple->t_data->t_infomask & HEAP_XMAX_INVALID) ||
4461 HEAP_XMAX_IS_LOCKED_ONLY(tuple->t_data->t_infomask) ||
4462 HeapTupleHeaderIsOnlyLocked(tuple->t_data))
4464 else if (!ItemPointerEquals(&tuple->t_self, &tuple->t_data->t_ctid) ||
4465 HeapTupleHeaderIndicatesMovedPartitions(tuple->t_data))
4466 result = TM_Updated;
4468 result = TM_Deleted;
4472 if (result != TM_Ok)
4474 Assert(result == TM_SelfModified || result == TM_Updated ||
4475 result == TM_Deleted || result == TM_WouldBlock);
4476 Assert(!(tuple->t_data->t_infomask & HEAP_XMAX_INVALID));
4477 Assert(result != TM_Updated ||
4478 !ItemPointerEquals(&tuple->t_self, &tuple->t_data->t_ctid));
4479 tmfd->ctid = tuple->t_data->t_ctid;
4480 tmfd->xmax = HeapTupleHeaderGetUpdateXid(tuple->t_data);
4481 if (result == TM_SelfModified)
4482 tmfd->cmax = HeapTupleHeaderGetCmax(tuple->t_data);
4484 tmfd->cmax = InvalidCommandId;
4489 * If we didn't pin the visibility map page and the page has become all
4490 * visible while we were busy locking the buffer, or during some
4491 * subsequent window during which we had it unlocked, we'll have to unlock
4492 * and re-lock, to avoid holding the buffer lock across I/O. That's a bit
4493 * unfortunate, especially since we'll now have to recheck whether the
4494 * tuple has been locked or updated under us, but hopefully it won't
4495 * happen very often.
4497 if (vmbuffer == InvalidBuffer && PageIsAllVisible(page))
4499 LockBuffer(*buffer, BUFFER_LOCK_UNLOCK);
4500 visibilitymap_pin(relation, block, &vmbuffer);
4501 LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
4505 xmax = HeapTupleHeaderGetRawXmax(tuple->t_data);
4506 old_infomask = tuple->t_data->t_infomask;
4509 * If this is the first possibly-multixact-able operation in the current
4510 * transaction, set my per-backend OldestMemberMXactId setting. We can be
4511 * certain that the transaction will never become a member of any older
4512 * MultiXactIds than that. (We have to do this even if we end up just
4513 * using our own TransactionId below, since some other backend could
4514 * incorporate our XID into a MultiXact immediately afterwards.)
4516 MultiXactIdSetOldestMember();
4519 * Compute the new xmax and infomask to store into the tuple. Note we do
4520 * not modify the tuple just yet, because that would leave it in the wrong
4521 * state if multixact.c elogs.
4523 compute_new_xmax_infomask(xmax, old_infomask, tuple->t_data->t_infomask2,
4524 GetCurrentTransactionId(), mode, false,
4525 &xid, &new_infomask, &new_infomask2);
4527 START_CRIT_SECTION();
4530 * Store transaction information of xact locking the tuple.
4532 * Note: Cmax is meaningless in this context, so don't set it; this avoids
4533 * possibly generating a useless combo CID. Moreover, if we're locking a
4534 * previously updated tuple, it's important to preserve the Cmax.
4536 * Also reset the HOT UPDATE bit, but only if there's no update; otherwise
4537 * we would break the HOT chain.
4539 tuple->t_data->t_infomask &= ~HEAP_XMAX_BITS;
4540 tuple->t_data->t_infomask2 &= ~HEAP_KEYS_UPDATED;
4541 tuple->t_data->t_infomask |= new_infomask;
4542 tuple->t_data->t_infomask2 |= new_infomask2;
4543 if (HEAP_XMAX_IS_LOCKED_ONLY(new_infomask))
4544 HeapTupleHeaderClearHotUpdated(tuple->t_data);
4545 HeapTupleHeaderSetXmax(tuple->t_data, xid);
4548 * Make sure there is no forward chain link in t_ctid. Note that in the
4549 * cases where the tuple has been updated, we must not overwrite t_ctid,
4550 * because it was set by the updater. Moreover, if the tuple has been
4551 * updated, we need to follow the update chain to lock the new versions of
4552 * the tuple as well.
4554 if (HEAP_XMAX_IS_LOCKED_ONLY(new_infomask))
4555 tuple->t_data->t_ctid = *tid;
4557 /* Clear only the all-frozen bit on visibility map if needed */
4558 if (PageIsAllVisible(page) &&
4559 visibilitymap_clear(relation, block, vmbuffer,
4560 VISIBILITYMAP_ALL_FROZEN))
4561 cleared_all_frozen = true;
4564 MarkBufferDirty(*buffer);
4567 * XLOG stuff. You might think that we don't need an XLOG record because
4568 * there is no state change worth restoring after a crash. You would be
4569 * wrong however: we have just written either a TransactionId or a
4570 * MultiXactId that may never have been seen on disk before, and we need
4571 * to make sure that there are XLOG entries covering those ID numbers.
4572 * Else the same IDs might be re-used after a crash, which would be
4573 * disastrous if this page made it to disk before the crash. Essentially
4574 * we have to enforce the WAL log-before-data rule even in this case.
4575 * (Also, in a PITR log-shipping or 2PC environment, we have to have XLOG
4576 * entries for everything anyway.)
4578 if (RelationNeedsWAL(relation))
4584 XLogRegisterBuffer(0, *buffer, REGBUF_STANDARD);
4586 xlrec.offnum = ItemPointerGetOffsetNumber(&tuple->t_self);
4587 xlrec.locking_xid = xid;
4588 xlrec.infobits_set = compute_infobits(new_infomask,
4589 tuple->t_data->t_infomask2);
4590 xlrec.flags = cleared_all_frozen ? XLH_LOCK_ALL_FROZEN_CLEARED : 0;
4591 XLogRegisterData((char *) &xlrec, SizeOfHeapLock);
4593 /* we don't decode row locks atm, so no need to log the origin */
4595 recptr = XLogInsert(RM_HEAP_ID, XLOG_HEAP_LOCK);
4597 PageSetLSN(page, recptr);
4605 LockBuffer(*buffer, BUFFER_LOCK_UNLOCK);
4608 if (BufferIsValid(vmbuffer))
4609 ReleaseBuffer(vmbuffer);
4612 * Don't update the visibility map here. Locking a tuple doesn't change
4617 * Now that we have successfully marked the tuple as locked, we can
4618 * release the lmgr tuple lock, if we had it.
4620 if (have_tuple_lock)
4621 UnlockTupleTuplock(relation, tid, mode);
4627 * Acquire heavyweight lock on the given tuple, in preparation for acquiring
4628 * its normal, Xmax-based tuple lock.
4630 * have_tuple_lock is an input and output parameter: on input, it indicates
4631 * whether the lock has previously been acquired (and this function does
4632 * nothing in that case). If this function returns success, have_tuple_lock
4633 * has been flipped to true.
4635 * Returns false if it was unable to obtain the lock; this can only happen if
4636 * wait_policy is Skip.
4639 heap_acquire_tuplock(Relation relation, ItemPointer tid, LockTupleMode mode,
4640 LockWaitPolicy wait_policy, bool *have_tuple_lock)
4642 if (*have_tuple_lock)
4645 switch (wait_policy)
4648 LockTupleTuplock(relation, tid, mode);
4652 if (!ConditionalLockTupleTuplock(relation, tid, mode))
4657 if (!ConditionalLockTupleTuplock(relation, tid, mode))
4659 (errcode(ERRCODE_LOCK_NOT_AVAILABLE),
4660 errmsg("could not obtain lock on row in relation \"%s\"",
4661 RelationGetRelationName(relation))));
4664 *have_tuple_lock = true;
4670 * Given an original set of Xmax and infomask, and a transaction (identified by
4671 * add_to_xmax) acquiring a new lock of some mode, compute the new Xmax and
4672 * corresponding infomasks to use on the tuple.
4674 * Note that this might have side effects such as creating a new MultiXactId.
4676 * Most callers will have called HeapTupleSatisfiesUpdate before this function;
4677 * that will have set the HEAP_XMAX_INVALID bit if the xmax was a MultiXactId
4678 * but it was not running anymore. There is a race condition, which is that the
4679 * MultiXactId may have finished since then, but that uncommon case is handled
4680 * either here, or within MultiXactIdExpand.
4682 * There is a similar race condition possible when the old xmax was a regular
4683 * TransactionId. We test TransactionIdIsInProgress again just to narrow the
4684 * window, but it's still possible to end up creating an unnecessary
4685 * MultiXactId. Fortunately this is harmless.
4688 compute_new_xmax_infomask(TransactionId xmax, uint16 old_infomask,
4689 uint16 old_infomask2, TransactionId add_to_xmax,
4690 LockTupleMode mode, bool is_update,
4691 TransactionId *result_xmax, uint16 *result_infomask,
4692 uint16 *result_infomask2)
4694 TransactionId new_xmax;
4695 uint16 new_infomask,
4698 Assert(TransactionIdIsCurrentTransactionId(add_to_xmax));
4703 if (old_infomask & HEAP_XMAX_INVALID)
4706 * No previous locker; we just insert our own TransactionId.
4708 * Note that it's critical that this case be the first one checked,
4709 * because there are several blocks below that come back to this one
4710 * to implement certain optimizations; old_infomask might contain
4711 * other dirty bits in those cases, but we don't really care.
4715 new_xmax = add_to_xmax;
4716 if (mode == LockTupleExclusive)
4717 new_infomask2 |= HEAP_KEYS_UPDATED;
4721 new_infomask |= HEAP_XMAX_LOCK_ONLY;
4724 case LockTupleKeyShare:
4725 new_xmax = add_to_xmax;
4726 new_infomask |= HEAP_XMAX_KEYSHR_LOCK;
4728 case LockTupleShare:
4729 new_xmax = add_to_xmax;
4730 new_infomask |= HEAP_XMAX_SHR_LOCK;
4732 case LockTupleNoKeyExclusive:
4733 new_xmax = add_to_xmax;
4734 new_infomask |= HEAP_XMAX_EXCL_LOCK;
4736 case LockTupleExclusive:
4737 new_xmax = add_to_xmax;
4738 new_infomask |= HEAP_XMAX_EXCL_LOCK;
4739 new_infomask2 |= HEAP_KEYS_UPDATED;
4742 new_xmax = InvalidTransactionId; /* silence compiler */
4743 elog(ERROR, "invalid lock mode");
4747 else if (old_infomask & HEAP_XMAX_IS_MULTI)
4749 MultiXactStatus new_status;
4752 * Currently we don't allow XMAX_COMMITTED to be set for multis, so
4755 Assert(!(old_infomask & HEAP_XMAX_COMMITTED));
4758 * A multixact together with LOCK_ONLY set but neither lock bit set
4759 * (i.e. a pg_upgraded share locked tuple) cannot possibly be running
4760 * anymore. This check is critical for databases upgraded by
4761 * pg_upgrade; both MultiXactIdIsRunning and MultiXactIdExpand assume
4762 * that such multis are never passed.
4764 if (HEAP_LOCKED_UPGRADED(old_infomask))
4766 old_infomask &= ~HEAP_XMAX_IS_MULTI;
4767 old_infomask |= HEAP_XMAX_INVALID;
4772 * If the XMAX is already a MultiXactId, then we need to expand it to
4773 * include add_to_xmax; but if all the members were lockers and are
4774 * all gone, we can do away with the IS_MULTI bit and just set
4775 * add_to_xmax as the only locker/updater. If all lockers are gone
4776 * and we have an updater that aborted, we can also do without a
4779 * The cost of doing GetMultiXactIdMembers would be paid by
4780 * MultiXactIdExpand if we weren't to do this, so this check is not
4781 * incurring extra work anyhow.
4783 if (!MultiXactIdIsRunning(xmax, HEAP_XMAX_IS_LOCKED_ONLY(old_infomask)))
4785 if (HEAP_XMAX_IS_LOCKED_ONLY(old_infomask) ||
4786 !TransactionIdDidCommit(MultiXactIdGetUpdateXid(xmax,
4790 * Reset these bits and restart; otherwise fall through to
4791 * create a new multi below.
4793 old_infomask &= ~HEAP_XMAX_IS_MULTI;
4794 old_infomask |= HEAP_XMAX_INVALID;
4799 new_status = get_mxact_status_for_lock(mode, is_update);
4801 new_xmax = MultiXactIdExpand((MultiXactId) xmax, add_to_xmax,
4803 GetMultiXactIdHintBits(new_xmax, &new_infomask, &new_infomask2);
4805 else if (old_infomask & HEAP_XMAX_COMMITTED)
4808 * It's a committed update, so we need to preserve him as updater of
4811 MultiXactStatus status;
4812 MultiXactStatus new_status;
4814 if (old_infomask2 & HEAP_KEYS_UPDATED)
4815 status = MultiXactStatusUpdate;
4817 status = MultiXactStatusNoKeyUpdate;
4819 new_status = get_mxact_status_for_lock(mode, is_update);
4822 * since it's not running, it's obviously impossible for the old
4823 * updater to be identical to the current one, so we need not check
4824 * for that case as we do in the block above.
4826 new_xmax = MultiXactIdCreate(xmax, status, add_to_xmax, new_status);
4827 GetMultiXactIdHintBits(new_xmax, &new_infomask, &new_infomask2);
4829 else if (TransactionIdIsInProgress(xmax))
4832 * If the XMAX is a valid, in-progress TransactionId, then we need to
4833 * create a new MultiXactId that includes both the old locker or
4834 * updater and our own TransactionId.
4836 MultiXactStatus new_status;
4837 MultiXactStatus old_status;
4838 LockTupleMode old_mode;
4840 if (HEAP_XMAX_IS_LOCKED_ONLY(old_infomask))
4842 if (HEAP_XMAX_IS_KEYSHR_LOCKED(old_infomask))
4843 old_status = MultiXactStatusForKeyShare;
4844 else if (HEAP_XMAX_IS_SHR_LOCKED(old_infomask))
4845 old_status = MultiXactStatusForShare;
4846 else if (HEAP_XMAX_IS_EXCL_LOCKED(old_infomask))
4848 if (old_infomask2 & HEAP_KEYS_UPDATED)
4849 old_status = MultiXactStatusForUpdate;
4851 old_status = MultiXactStatusForNoKeyUpdate;
4856 * LOCK_ONLY can be present alone only when a page has been
4857 * upgraded by pg_upgrade. But in that case,
4858 * TransactionIdIsInProgress() should have returned false. We
4859 * assume it's no longer locked in this case.
4861 elog(WARNING, "LOCK_ONLY found for Xid in progress %u", xmax);
4862 old_infomask |= HEAP_XMAX_INVALID;
4863 old_infomask &= ~HEAP_XMAX_LOCK_ONLY;
4869 /* it's an update, but which kind? */
4870 if (old_infomask2 & HEAP_KEYS_UPDATED)
4871 old_status = MultiXactStatusUpdate;
4873 old_status = MultiXactStatusNoKeyUpdate;
4876 old_mode = TUPLOCK_from_mxstatus(old_status);
4879 * If the lock to be acquired is for the same TransactionId as the
4880 * existing lock, there's an optimization possible: consider only the
4881 * strongest of both locks as the only one present, and restart.
4883 if (xmax == add_to_xmax)
4886 * Note that it's not possible for the original tuple to be
4887 * updated: we wouldn't be here because the tuple would have been
4888 * invisible and we wouldn't try to update it. As a subtlety,
4889 * this code can also run when traversing an update chain to lock
4890 * future versions of a tuple. But we wouldn't be here either,
4891 * because the add_to_xmax would be different from the original
4894 Assert(HEAP_XMAX_IS_LOCKED_ONLY(old_infomask));
4896 /* acquire the strongest of both */
4897 if (mode < old_mode)
4899 /* mustn't touch is_update */
4901 old_infomask |= HEAP_XMAX_INVALID;
4905 /* otherwise, just fall back to creating a new multixact */
4906 new_status = get_mxact_status_for_lock(mode, is_update);
4907 new_xmax = MultiXactIdCreate(xmax, old_status,
4908 add_to_xmax, new_status);
4909 GetMultiXactIdHintBits(new_xmax, &new_infomask, &new_infomask2);
4911 else if (!HEAP_XMAX_IS_LOCKED_ONLY(old_infomask) &&
4912 TransactionIdDidCommit(xmax))
4915 * It's a committed update, so we gotta preserve him as updater of the
4918 MultiXactStatus status;
4919 MultiXactStatus new_status;
4921 if (old_infomask2 & HEAP_KEYS_UPDATED)
4922 status = MultiXactStatusUpdate;
4924 status = MultiXactStatusNoKeyUpdate;
4926 new_status = get_mxact_status_for_lock(mode, is_update);
4929 * since it's not running, it's obviously impossible for the old
4930 * updater to be identical to the current one, so we need not check
4931 * for that case as we do in the block above.
4933 new_xmax = MultiXactIdCreate(xmax, status, add_to_xmax, new_status);
4934 GetMultiXactIdHintBits(new_xmax, &new_infomask, &new_infomask2);
4939 * Can get here iff the locking/updating transaction was running when
4940 * the infomask was extracted from the tuple, but finished before
4941 * TransactionIdIsInProgress got to run. Deal with it as if there was
4942 * no locker at all in the first place.
4944 old_infomask |= HEAP_XMAX_INVALID;
4948 *result_infomask = new_infomask;
4949 *result_infomask2 = new_infomask2;
4950 *result_xmax = new_xmax;
4954 * Subroutine for heap_lock_updated_tuple_rec.
4956 * Given a hypothetical multixact status held by the transaction identified
4957 * with the given xid, does the current transaction need to wait, fail, or can
4958 * it continue if it wanted to acquire a lock of the given mode? "needwait"
4959 * is set to true if waiting is necessary; if it can continue, then TM_Ok is
4960 * returned. If the lock is already held by the current transaction, return
4961 * TM_SelfModified. In case of a conflict with another transaction, a
4962 * different HeapTupleSatisfiesUpdate return code is returned.
4964 * The held status is said to be hypothetical because it might correspond to a
4965 * lock held by a single Xid, i.e. not a real MultiXactId; we express it this
4966 * way for simplicity of API.
4969 test_lockmode_for_conflict(MultiXactStatus status, TransactionId xid,
4970 LockTupleMode mode, HeapTuple tup,
4973 MultiXactStatus wantedstatus;
4976 wantedstatus = get_mxact_status_for_lock(mode, false);
4979 * Note: we *must* check TransactionIdIsInProgress before
4980 * TransactionIdDidAbort/Commit; see comment at top of heapam_visibility.c
4981 * for an explanation.
4983 if (TransactionIdIsCurrentTransactionId(xid))
4986 * The tuple has already been locked by our own transaction. This is
4987 * very rare but can happen if multiple transactions are trying to
4988 * lock an ancient version of the same tuple.
4990 return TM_SelfModified;
4992 else if (TransactionIdIsInProgress(xid))
4995 * If the locking transaction is running, what we do depends on
4996 * whether the lock modes conflict: if they do, then we must wait for
4997 * it to finish; otherwise we can fall through to lock this tuple
4998 * version without waiting.
5000 if (DoLockModesConflict(LOCKMODE_from_mxstatus(status),
5001 LOCKMODE_from_mxstatus(wantedstatus)))
5007 * If we set needwait above, then this value doesn't matter;
5008 * otherwise, this value signals to caller that it's okay to proceed.
5012 else if (TransactionIdDidAbort(xid))
5014 else if (TransactionIdDidCommit(xid))
5017 * The other transaction committed. If it was only a locker, then the
5018 * lock is completely gone now and we can return success; but if it
5019 * was an update, then what we do depends on whether the two lock
5020 * modes conflict. If they conflict, then we must report error to
5021 * caller. But if they don't, we can fall through to allow the current
5022 * transaction to lock the tuple.
5024 * Note: the reason we worry about ISUPDATE here is because as soon as
5025 * a transaction ends, all its locks are gone and meaningless, and
5026 * thus we can ignore them; whereas its updates persist. In the
5027 * TransactionIdIsInProgress case, above, we don't need to check
5028 * because we know the lock is still "alive" and thus a conflict needs
5029 * always be checked.
5031 if (!ISUPDATE_from_mxstatus(status))
5034 if (DoLockModesConflict(LOCKMODE_from_mxstatus(status),
5035 LOCKMODE_from_mxstatus(wantedstatus)))
5038 if (!ItemPointerEquals(&tup->t_self, &tup->t_data->t_ctid) ||
5039 HeapTupleHeaderIndicatesMovedPartitions(tup->t_data))
5048 /* Not in progress, not aborted, not committed -- must have crashed */
5054 * Recursive part of heap_lock_updated_tuple
5056 * Fetch the tuple pointed to by tid in rel, and mark it as locked by the given
5057 * xid with the given mode; if this tuple is updated, recurse to lock the new
5061 heap_lock_updated_tuple_rec(Relation rel, ItemPointer tid, TransactionId xid,
5065 ItemPointerData tupid;
5066 HeapTupleData mytup;
5068 uint16 new_infomask,
5074 TransactionId priorXmax = InvalidTransactionId;
5075 bool cleared_all_frozen = false;
5076 bool pinned_desired_page;
5077 Buffer vmbuffer = InvalidBuffer;
5080 ItemPointerCopy(tid, &tupid);
5085 new_xmax = InvalidTransactionId;
5086 block = ItemPointerGetBlockNumber(&tupid);
5087 ItemPointerCopy(&tupid, &(mytup.t_self));
5089 if (!heap_fetch(rel, SnapshotAny, &mytup, &buf))
5092 * if we fail to find the updated version of the tuple, it's
5093 * because it was vacuumed/pruned away after its creator
5094 * transaction aborted. So behave as if we got to the end of the
5095 * chain, and there's no further tuple to lock: return success to
5103 CHECK_FOR_INTERRUPTS();
5106 * Before locking the buffer, pin the visibility map page if it
5107 * appears to be necessary. Since we haven't got the lock yet,
5108 * someone else might be in the middle of changing this, so we'll need
5109 * to recheck after we have the lock.
5111 if (PageIsAllVisible(BufferGetPage(buf)))
5113 visibilitymap_pin(rel, block, &vmbuffer);
5114 pinned_desired_page = true;
5117 pinned_desired_page = false;
5119 LockBuffer(buf, BUFFER_LOCK_EXCLUSIVE);
5122 * If we didn't pin the visibility map page and the page has become
5123 * all visible while we were busy locking the buffer, we'll have to
5124 * unlock and re-lock, to avoid holding the buffer lock across I/O.
5125 * That's a bit unfortunate, but hopefully shouldn't happen often.
5127 * Note: in some paths through this function, we will reach here
5128 * holding a pin on a vm page that may or may not be the one matching
5129 * this page. If this page isn't all-visible, we won't use the vm
5130 * page, but we hold onto such a pin till the end of the function.
5132 if (!pinned_desired_page && PageIsAllVisible(BufferGetPage(buf)))
5134 LockBuffer(buf, BUFFER_LOCK_UNLOCK);
5135 visibilitymap_pin(rel, block, &vmbuffer);
5136 LockBuffer(buf, BUFFER_LOCK_EXCLUSIVE);
5140 * Check the tuple XMIN against prior XMAX, if any. If we reached the
5141 * end of the chain, we're done, so return success.
5143 if (TransactionIdIsValid(priorXmax) &&
5144 !TransactionIdEquals(HeapTupleHeaderGetXmin(mytup.t_data),
5152 * Also check Xmin: if this tuple was created by an aborted
5153 * (sub)transaction, then we already locked the last live one in the
5154 * chain, thus we're done, so return success.
5156 if (TransactionIdDidAbort(HeapTupleHeaderGetXmin(mytup.t_data)))
5162 old_infomask = mytup.t_data->t_infomask;
5163 old_infomask2 = mytup.t_data->t_infomask2;
5164 xmax = HeapTupleHeaderGetRawXmax(mytup.t_data);
5167 * If this tuple version has been updated or locked by some concurrent
5168 * transaction(s), what we do depends on whether our lock mode
5169 * conflicts with what those other transactions hold, and also on the
5172 if (!(old_infomask & HEAP_XMAX_INVALID))
5174 TransactionId rawxmax;
5177 rawxmax = HeapTupleHeaderGetRawXmax(mytup.t_data);
5178 if (old_infomask & HEAP_XMAX_IS_MULTI)
5182 MultiXactMember *members;
5185 * We don't need a test for pg_upgrade'd tuples: this is only
5186 * applied to tuples after the first in an update chain. Said
5187 * first tuple in the chain may well be locked-in-9.2-and-
5188 * pg_upgraded, but that one was already locked by our caller,
5189 * not us; and any subsequent ones cannot be because our
5190 * caller must necessarily have obtained a snapshot later than
5191 * the pg_upgrade itself.
5193 Assert(!HEAP_LOCKED_UPGRADED(mytup.t_data->t_infomask));
5195 nmembers = GetMultiXactIdMembers(rawxmax, &members, false,
5196 HEAP_XMAX_IS_LOCKED_ONLY(old_infomask));
5197 for (i = 0; i < nmembers; i++)
5199 result = test_lockmode_for_conflict(members[i].status,
5206 * If the tuple was already locked by ourselves in a
5207 * previous iteration of this (say heap_lock_tuple was
5208 * forced to restart the locking loop because of a change
5209 * in xmax), then we hold the lock already on this tuple
5210 * version and we don't need to do anything; and this is
5211 * not an error condition either. We just need to skip
5212 * this tuple and continue locking the next version in the
5215 if (result == TM_SelfModified)
5223 LockBuffer(buf, BUFFER_LOCK_UNLOCK);
5224 XactLockTableWait(members[i].xid, rel,
5230 if (result != TM_Ok)
5241 MultiXactStatus status;
5244 * For a non-multi Xmax, we first need to compute the
5245 * corresponding MultiXactStatus by using the infomask bits.
5247 if (HEAP_XMAX_IS_LOCKED_ONLY(old_infomask))
5249 if (HEAP_XMAX_IS_KEYSHR_LOCKED(old_infomask))
5250 status = MultiXactStatusForKeyShare;
5251 else if (HEAP_XMAX_IS_SHR_LOCKED(old_infomask))
5252 status = MultiXactStatusForShare;
5253 else if (HEAP_XMAX_IS_EXCL_LOCKED(old_infomask))
5255 if (old_infomask2 & HEAP_KEYS_UPDATED)
5256 status = MultiXactStatusForUpdate;
5258 status = MultiXactStatusForNoKeyUpdate;
5263 * LOCK_ONLY present alone (a pg_upgraded tuple marked
5264 * as share-locked in the old cluster) shouldn't be
5265 * seen in the middle of an update chain.
5267 elog(ERROR, "invalid lock status in tuple");
5272 /* it's an update, but which kind? */
5273 if (old_infomask2 & HEAP_KEYS_UPDATED)
5274 status = MultiXactStatusUpdate;
5276 status = MultiXactStatusNoKeyUpdate;
5279 result = test_lockmode_for_conflict(status, rawxmax, mode,
5283 * If the tuple was already locked by ourselves in a previous
5284 * iteration of this (say heap_lock_tuple was forced to
5285 * restart the locking loop because of a change in xmax), then
5286 * we hold the lock already on this tuple version and we don't
5287 * need to do anything; and this is not an error condition
5288 * either. We just need to skip this tuple and continue
5289 * locking the next version in the update chain.
5291 if (result == TM_SelfModified)
5296 LockBuffer(buf, BUFFER_LOCK_UNLOCK);
5297 XactLockTableWait(rawxmax, rel, &mytup.t_self,
5301 if (result != TM_Ok)
5308 /* compute the new Xmax and infomask values for the tuple ... */
5309 compute_new_xmax_infomask(xmax, old_infomask, mytup.t_data->t_infomask2,
5311 &new_xmax, &new_infomask, &new_infomask2);
5313 if (PageIsAllVisible(BufferGetPage(buf)) &&
5314 visibilitymap_clear(rel, block, vmbuffer,
5315 VISIBILITYMAP_ALL_FROZEN))
5316 cleared_all_frozen = true;
5318 START_CRIT_SECTION();
5320 /* ... and set them */
5321 HeapTupleHeaderSetXmax(mytup.t_data, new_xmax);
5322 mytup.t_data->t_infomask &= ~HEAP_XMAX_BITS;
5323 mytup.t_data->t_infomask2 &= ~HEAP_KEYS_UPDATED;
5324 mytup.t_data->t_infomask |= new_infomask;
5325 mytup.t_data->t_infomask2 |= new_infomask2;
5327 MarkBufferDirty(buf);
5330 if (RelationNeedsWAL(rel))
5332 xl_heap_lock_updated xlrec;
5334 Page page = BufferGetPage(buf);
5337 XLogRegisterBuffer(0, buf, REGBUF_STANDARD);
5339 xlrec.offnum = ItemPointerGetOffsetNumber(&mytup.t_self);
5340 xlrec.xmax = new_xmax;
5341 xlrec.infobits_set = compute_infobits(new_infomask, new_infomask2);
5343 cleared_all_frozen ? XLH_LOCK_ALL_FROZEN_CLEARED : 0;
5345 XLogRegisterData((char *) &xlrec, SizeOfHeapLockUpdated);
5347 recptr = XLogInsert(RM_HEAP2_ID, XLOG_HEAP2_LOCK_UPDATED);
5349 PageSetLSN(page, recptr);
5355 /* if we find the end of update chain, we're done. */
5356 if (mytup.t_data->t_infomask & HEAP_XMAX_INVALID ||
5357 HeapTupleHeaderIndicatesMovedPartitions(mytup.t_data) ||
5358 ItemPointerEquals(&mytup.t_self, &mytup.t_data->t_ctid) ||
5359 HeapTupleHeaderIsOnlyLocked(mytup.t_data))
5365 /* tail recursion */
5366 priorXmax = HeapTupleHeaderGetUpdateXid(mytup.t_data);
5367 ItemPointerCopy(&(mytup.t_data->t_ctid), &tupid);
5368 UnlockReleaseBuffer(buf);
5374 UnlockReleaseBuffer(buf);
5377 if (vmbuffer != InvalidBuffer)
5378 ReleaseBuffer(vmbuffer);
5384 * heap_lock_updated_tuple
5385 * Follow update chain when locking an updated tuple, acquiring locks (row
5386 * marks) on the updated versions.
5388 * The initial tuple is assumed to be already locked.
5390 * This function doesn't check visibility, it just unconditionally marks the
5391 * tuple(s) as locked. If any tuple in the updated chain is being deleted
5392 * concurrently (or updated with the key being modified), sleep until the
5393 * transaction doing it is finished.
5395 * Note that we don't acquire heavyweight tuple locks on the tuples we walk
5396 * when we have to wait for other transactions to release them, as opposed to
5397 * what heap_lock_tuple does. The reason is that having more than one
5398 * transaction walking the chain is probably uncommon enough that risk of
5399 * starvation is not likely: one of the preconditions for being here is that
5400 * the snapshot in use predates the update that created this tuple (because we
5401 * started at an earlier version of the tuple), but at the same time such a
5402 * transaction cannot be using repeatable read or serializable isolation
5403 * levels, because that would lead to a serializability failure.
5406 heap_lock_updated_tuple(Relation rel, HeapTuple tuple, ItemPointer ctid,
5407 TransactionId xid, LockTupleMode mode)
5410 * If the tuple has not been updated, or has moved into another partition
5411 * (effectively a delete) stop here.
5413 if (!HeapTupleHeaderIndicatesMovedPartitions(tuple->t_data) &&
5414 !ItemPointerEquals(&tuple->t_self, ctid))
5417 * If this is the first possibly-multixact-able operation in the
5418 * current transaction, set my per-backend OldestMemberMXactId
5419 * setting. We can be certain that the transaction will never become a
5420 * member of any older MultiXactIds than that. (We have to do this
5421 * even if we end up just using our own TransactionId below, since
5422 * some other backend could incorporate our XID into a MultiXact
5423 * immediately afterwards.)
5425 MultiXactIdSetOldestMember();
5427 return heap_lock_updated_tuple_rec(rel, ctid, xid, mode);
5430 /* nothing to lock */
5435 * heap_finish_speculative - mark speculative insertion as successful
5437 * To successfully finish a speculative insertion we have to clear speculative
5438 * token from tuple. To do so the t_ctid field, which will contain a
5439 * speculative token value, is modified in place to point to the tuple itself,
5440 * which is characteristic of a newly inserted ordinary tuple.
5442 * NB: It is not ok to commit without either finishing or aborting a
5443 * speculative insertion. We could treat speculative tuples of committed
5444 * transactions implicitly as completed, but then we would have to be prepared
5445 * to deal with speculative tokens on committed tuples. That wouldn't be
5446 * difficult - no-one looks at the ctid field of a tuple with invalid xmax -
5447 * but clearing the token at completion isn't very expensive either.
5448 * An explicit confirmation WAL record also makes logical decoding simpler.
5451 heap_finish_speculative(Relation relation, ItemPointer tid)
5455 OffsetNumber offnum;
5457 HeapTupleHeader htup;
5459 buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(tid));
5460 LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
5461 page = (Page) BufferGetPage(buffer);
5463 offnum = ItemPointerGetOffsetNumber(tid);
5464 if (PageGetMaxOffsetNumber(page) >= offnum)
5465 lp = PageGetItemId(page, offnum);
5467 if (PageGetMaxOffsetNumber(page) < offnum || !ItemIdIsNormal(lp))
5468 elog(ERROR, "invalid lp");
5470 htup = (HeapTupleHeader) PageGetItem(page, lp);
5472 /* SpecTokenOffsetNumber should be distinguishable from any real offset */
5473 StaticAssertStmt(MaxOffsetNumber < SpecTokenOffsetNumber,
5474 "invalid speculative token constant");
5476 /* NO EREPORT(ERROR) from here till changes are logged */
5477 START_CRIT_SECTION();
5479 Assert(HeapTupleHeaderIsSpeculative(htup));
5481 MarkBufferDirty(buffer);
5484 * Replace the speculative insertion token with a real t_ctid, pointing to
5485 * itself like it does on regular tuples.
5487 htup->t_ctid = *tid;
5490 if (RelationNeedsWAL(relation))
5492 xl_heap_confirm xlrec;
5495 xlrec.offnum = ItemPointerGetOffsetNumber(tid);
5499 /* We want the same filtering on this as on a plain insert */
5500 XLogSetRecordFlags(XLOG_INCLUDE_ORIGIN);
5502 XLogRegisterData((char *) &xlrec, SizeOfHeapConfirm);
5503 XLogRegisterBuffer(0, buffer, REGBUF_STANDARD);
5505 recptr = XLogInsert(RM_HEAP_ID, XLOG_HEAP_CONFIRM);
5507 PageSetLSN(page, recptr);
5512 UnlockReleaseBuffer(buffer);
5516 * heap_abort_speculative - kill a speculatively inserted tuple
5518 * Marks a tuple that was speculatively inserted in the same command as dead,
5519 * by setting its xmin as invalid. That makes it immediately appear as dead
5520 * to all transactions, including our own. In particular, it makes
5521 * HeapTupleSatisfiesDirty() regard the tuple as dead, so that another backend
5522 * inserting a duplicate key value won't unnecessarily wait for our whole
5523 * transaction to finish (it'll just wait for our speculative insertion to
5526 * Killing the tuple prevents "unprincipled deadlocks", which are deadlocks
5527 * that arise due to a mutual dependency that is not user visible. By
5528 * definition, unprincipled deadlocks cannot be prevented by the user
5529 * reordering lock acquisition in client code, because the implementation level
5530 * lock acquisitions are not under the user's direct control. If speculative
5531 * inserters did not take this precaution, then under high concurrency they
5532 * could deadlock with each other, which would not be acceptable.
5534 * This is somewhat redundant with heap_delete, but we prefer to have a
5535 * dedicated routine with stripped down requirements. Note that this is also
5536 * used to delete the TOAST tuples created during speculative insertion.
5538 * This routine does not affect logical decoding as it only looks at
5539 * confirmation records.
5542 heap_abort_speculative(Relation relation, ItemPointer tid)
5544 TransactionId xid = GetCurrentTransactionId();
5551 Assert(ItemPointerIsValid(tid));
5553 block = ItemPointerGetBlockNumber(tid);
5554 buffer = ReadBuffer(relation, block);
5555 page = BufferGetPage(buffer);
5557 LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
5560 * Page can't be all visible, we just inserted into it, and are still
5563 Assert(!PageIsAllVisible(page));
5565 lp = PageGetItemId(page, ItemPointerGetOffsetNumber(tid));
5566 Assert(ItemIdIsNormal(lp));
5568 tp.t_tableOid = RelationGetRelid(relation);
5569 tp.t_data = (HeapTupleHeader) PageGetItem(page, lp);
5570 tp.t_len = ItemIdGetLength(lp);
5574 * Sanity check that the tuple really is a speculatively inserted tuple,
5577 if (tp.t_data->t_choice.t_heap.t_xmin != xid)
5578 elog(ERROR, "attempted to kill a tuple inserted by another transaction");
5579 if (!(IsToastRelation(relation) || HeapTupleHeaderIsSpeculative(tp.t_data)))
5580 elog(ERROR, "attempted to kill a non-speculative tuple");
5581 Assert(!HeapTupleHeaderIsHeapOnly(tp.t_data));
5584 * No need to check for serializable conflicts here. There is never a
5585 * need for a combocid, either. No need to extract replica identity, or
5586 * do anything special with infomask bits.
5589 START_CRIT_SECTION();
5592 * The tuple will become DEAD immediately. Flag that this page
5593 * immediately is a candidate for pruning by setting xmin to
5594 * RecentGlobalXmin. That's not pretty, but it doesn't seem worth
5595 * inventing a nicer API for this.
5597 Assert(TransactionIdIsValid(RecentGlobalXmin));
5598 PageSetPrunable(page, RecentGlobalXmin);
5600 /* store transaction information of xact deleting the tuple */
5601 tp.t_data->t_infomask &= ~(HEAP_XMAX_BITS | HEAP_MOVED);
5602 tp.t_data->t_infomask2 &= ~HEAP_KEYS_UPDATED;
5605 * Set the tuple header xmin to InvalidTransactionId. This makes the
5606 * tuple immediately invisible everyone. (In particular, to any
5607 * transactions waiting on the speculative token, woken up later.)
5609 HeapTupleHeaderSetXmin(tp.t_data, InvalidTransactionId);
5611 /* Clear the speculative insertion token too */
5612 tp.t_data->t_ctid = tp.t_self;
5614 MarkBufferDirty(buffer);
5619 * The WAL records generated here match heap_delete(). The same recovery
5620 * routines are used.
5622 if (RelationNeedsWAL(relation))
5624 xl_heap_delete xlrec;
5627 xlrec.flags = XLH_DELETE_IS_SUPER;
5628 xlrec.infobits_set = compute_infobits(tp.t_data->t_infomask,
5629 tp.t_data->t_infomask2);
5630 xlrec.offnum = ItemPointerGetOffsetNumber(&tp.t_self);
5634 XLogRegisterData((char *) &xlrec, SizeOfHeapDelete);
5635 XLogRegisterBuffer(0, buffer, REGBUF_STANDARD);
5637 /* No replica identity & replication origin logged */
5639 recptr = XLogInsert(RM_HEAP_ID, XLOG_HEAP_DELETE);
5641 PageSetLSN(page, recptr);
5646 LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
5648 if (HeapTupleHasExternal(&tp))
5650 Assert(!IsToastRelation(relation));
5651 toast_delete(relation, &tp, true);
5655 * Never need to mark tuple for invalidation, since catalogs don't support
5656 * speculative insertion
5659 /* Now we can release the buffer */
5660 ReleaseBuffer(buffer);
5662 /* count deletion, as we counted the insertion too */
5663 pgstat_count_heap_delete(relation);
5667 * heap_inplace_update - update a tuple "in place" (ie, overwrite it)
5669 * Overwriting violates both MVCC and transactional safety, so the uses
5670 * of this function in Postgres are extremely limited. Nonetheless we
5671 * find some places to use it.
5673 * The tuple cannot change size, and therefore it's reasonable to assume
5674 * that its null bitmap (if any) doesn't change either. So we just
5675 * overwrite the data portion of the tuple without touching the null
5676 * bitmap or any of the header fields.
5678 * tuple is an in-memory tuple structure containing the data to be written
5679 * over the target tuple. Also, tuple->t_self identifies the target tuple.
5682 heap_inplace_update(Relation relation, HeapTuple tuple)
5686 OffsetNumber offnum;
5688 HeapTupleHeader htup;
5693 * For now, parallel operations are required to be strictly read-only.
5694 * Unlike a regular update, this should never create a combo CID, so it
5695 * might be possible to relax this restriction, but not without more
5696 * thought and testing. It's not clear that it would be useful, anyway.
5698 if (IsInParallelMode())
5700 (errcode(ERRCODE_INVALID_TRANSACTION_STATE),
5701 errmsg("cannot update tuples during a parallel operation")));
5703 buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(&(tuple->t_self)));
5704 LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
5705 page = (Page) BufferGetPage(buffer);
5707 offnum = ItemPointerGetOffsetNumber(&(tuple->t_self));
5708 if (PageGetMaxOffsetNumber(page) >= offnum)
5709 lp = PageGetItemId(page, offnum);
5711 if (PageGetMaxOffsetNumber(page) < offnum || !ItemIdIsNormal(lp))
5712 elog(ERROR, "invalid lp");
5714 htup = (HeapTupleHeader) PageGetItem(page, lp);
5716 oldlen = ItemIdGetLength(lp) - htup->t_hoff;
5717 newlen = tuple->t_len - tuple->t_data->t_hoff;
5718 if (oldlen != newlen || htup->t_hoff != tuple->t_data->t_hoff)
5719 elog(ERROR, "wrong tuple length");
5721 /* NO EREPORT(ERROR) from here till changes are logged */
5722 START_CRIT_SECTION();
5724 memcpy((char *) htup + htup->t_hoff,
5725 (char *) tuple->t_data + tuple->t_data->t_hoff,
5728 MarkBufferDirty(buffer);
5731 if (RelationNeedsWAL(relation))
5733 xl_heap_inplace xlrec;
5736 xlrec.offnum = ItemPointerGetOffsetNumber(&tuple->t_self);
5739 XLogRegisterData((char *) &xlrec, SizeOfHeapInplace);
5741 XLogRegisterBuffer(0, buffer, REGBUF_STANDARD);
5742 XLogRegisterBufData(0, (char *) htup + htup->t_hoff, newlen);
5744 /* inplace updates aren't decoded atm, don't log the origin */
5746 recptr = XLogInsert(RM_HEAP_ID, XLOG_HEAP_INPLACE);
5748 PageSetLSN(page, recptr);
5753 UnlockReleaseBuffer(buffer);
5756 * Send out shared cache inval if necessary. Note that because we only
5757 * pass the new version of the tuple, this mustn't be used for any
5758 * operations that could change catcache lookup keys. But we aren't
5759 * bothering with index updates either, so that's true a fortiori.
5761 if (!IsBootstrapProcessingMode())
5762 CacheInvalidateHeapTuple(relation, tuple, NULL);
5765 #define FRM_NOOP 0x0001
5766 #define FRM_INVALIDATE_XMAX 0x0002
5767 #define FRM_RETURN_IS_XID 0x0004
5768 #define FRM_RETURN_IS_MULTI 0x0008
5769 #define FRM_MARK_COMMITTED 0x0010
5773 * Determine what to do during freezing when a tuple is marked by a
5776 * NB -- this might have the side-effect of creating a new MultiXactId!
5778 * "flags" is an output value; it's used to tell caller what to do on return.
5779 * Possible flags are:
5781 * don't do anything -- keep existing Xmax
5782 * FRM_INVALIDATE_XMAX
5783 * mark Xmax as InvalidTransactionId and set XMAX_INVALID flag.
5785 * The Xid return value is a single update Xid to set as xmax.
5786 * FRM_MARK_COMMITTED
5787 * Xmax can be marked as HEAP_XMAX_COMMITTED
5788 * FRM_RETURN_IS_MULTI
5789 * The return value is a new MultiXactId to set as new Xmax.
5790 * (caller must obtain proper infomask bits using GetMultiXactIdHintBits)
5792 static TransactionId
5793 FreezeMultiXactId(MultiXactId multi, uint16 t_infomask,
5794 TransactionId relfrozenxid, TransactionId relminmxid,
5795 TransactionId cutoff_xid, MultiXactId cutoff_multi,
5798 TransactionId xid = InvalidTransactionId;
5800 MultiXactMember *members;
5804 MultiXactMember *newmembers;
5806 TransactionId update_xid;
5807 bool update_committed;
5811 /* We should only be called in Multis */
5812 Assert(t_infomask & HEAP_XMAX_IS_MULTI);
5814 if (!MultiXactIdIsValid(multi) ||
5815 HEAP_LOCKED_UPGRADED(t_infomask))
5817 /* Ensure infomask bits are appropriately set/reset */
5818 *flags |= FRM_INVALIDATE_XMAX;
5819 return InvalidTransactionId;
5821 else if (MultiXactIdPrecedes(multi, relminmxid))
5823 (errcode(ERRCODE_DATA_CORRUPTED),
5824 errmsg_internal("found multixact %u from before relminmxid %u",
5825 multi, relminmxid)));
5826 else if (MultiXactIdPrecedes(multi, cutoff_multi))
5829 * This old multi cannot possibly have members still running, but
5830 * verify just in case. If it was a locker only, it can be removed
5831 * without any further consideration; but if it contained an update,
5832 * we might need to preserve it.
5834 if (MultiXactIdIsRunning(multi,
5835 HEAP_XMAX_IS_LOCKED_ONLY(t_infomask)))
5837 (errcode(ERRCODE_DATA_CORRUPTED),
5838 errmsg_internal("multixact %u from before cutoff %u found to be still running",
5839 multi, cutoff_multi)));
5841 if (HEAP_XMAX_IS_LOCKED_ONLY(t_infomask))
5843 *flags |= FRM_INVALIDATE_XMAX;
5844 xid = InvalidTransactionId; /* not strictly necessary */
5848 /* replace multi by update xid */
5849 xid = MultiXactIdGetUpdateXid(multi, t_infomask);
5851 /* wasn't only a lock, xid needs to be valid */
5852 Assert(TransactionIdIsValid(xid));
5854 if (TransactionIdPrecedes(xid, relfrozenxid))
5856 (errcode(ERRCODE_DATA_CORRUPTED),
5857 errmsg_internal("found update xid %u from before relfrozenxid %u",
5858 xid, relfrozenxid)));
5861 * If the xid is older than the cutoff, it has to have aborted,
5862 * otherwise the tuple would have gotten pruned away.
5864 if (TransactionIdPrecedes(xid, cutoff_xid))
5866 if (TransactionIdDidCommit(xid))
5868 (errcode(ERRCODE_DATA_CORRUPTED),
5869 errmsg_internal("cannot freeze committed update xid %u", xid)));
5870 *flags |= FRM_INVALIDATE_XMAX;
5871 xid = InvalidTransactionId; /* not strictly necessary */
5875 *flags |= FRM_RETURN_IS_XID;
5883 * This multixact might have or might not have members still running, but
5884 * we know it's valid and is newer than the cutoff point for multis.
5885 * However, some member(s) of it may be below the cutoff for Xids, so we
5886 * need to walk the whole members array to figure out what to do, if
5891 GetMultiXactIdMembers(multi, &members, false,
5892 HEAP_XMAX_IS_LOCKED_ONLY(t_infomask));
5895 /* Nothing worth keeping */
5896 *flags |= FRM_INVALIDATE_XMAX;
5897 return InvalidTransactionId;
5900 /* is there anything older than the cutoff? */
5901 need_replace = false;
5902 for (i = 0; i < nmembers; i++)
5904 if (TransactionIdPrecedes(members[i].xid, cutoff_xid))
5906 need_replace = true;
5912 * In the simplest case, there is no member older than the cutoff; we can
5913 * keep the existing MultiXactId as is.
5919 return InvalidTransactionId;
5923 * If the multi needs to be updated, figure out which members do we need
5927 newmembers = palloc(sizeof(MultiXactMember) * nmembers);
5928 has_lockers = false;
5929 update_xid = InvalidTransactionId;
5930 update_committed = false;
5932 for (i = 0; i < nmembers; i++)
5935 * Determine whether to keep this member or ignore it.
5937 if (ISUPDATE_from_mxstatus(members[i].status))
5939 TransactionId xid = members[i].xid;
5941 Assert(TransactionIdIsValid(xid));
5942 if (TransactionIdPrecedes(xid, relfrozenxid))
5944 (errcode(ERRCODE_DATA_CORRUPTED),
5945 errmsg_internal("found update xid %u from before relfrozenxid %u",
5946 xid, relfrozenxid)));
5949 * It's an update; should we keep it? If the transaction is known
5950 * aborted or crashed then it's okay to ignore it, otherwise not.
5951 * Note that an updater older than cutoff_xid cannot possibly be
5952 * committed, because HeapTupleSatisfiesVacuum would have returned
5953 * HEAPTUPLE_DEAD and we would not be trying to freeze the tuple.
5955 * As with all tuple visibility routines, it's critical to test
5956 * TransactionIdIsInProgress before TransactionIdDidCommit,
5957 * because of race conditions explained in detail in
5958 * heapam_visibility.c.
5960 if (TransactionIdIsCurrentTransactionId(xid) ||
5961 TransactionIdIsInProgress(xid))
5963 Assert(!TransactionIdIsValid(update_xid));
5966 else if (TransactionIdDidCommit(xid))
5969 * The transaction committed, so we can tell caller to set
5970 * HEAP_XMAX_COMMITTED. (We can only do this because we know
5971 * the transaction is not running.)
5973 Assert(!TransactionIdIsValid(update_xid));
5974 update_committed = true;
5980 * Not in progress, not committed -- must be aborted or
5981 * crashed; we can ignore it.
5986 * Since the tuple wasn't marked HEAPTUPLE_DEAD by vacuum, the
5987 * update Xid cannot possibly be older than the xid cutoff. The
5988 * presence of such a tuple would cause corruption, so be paranoid
5991 if (TransactionIdIsValid(update_xid) &&
5992 TransactionIdPrecedes(update_xid, cutoff_xid))
5994 (errcode(ERRCODE_DATA_CORRUPTED),
5995 errmsg_internal("found update xid %u from before xid cutoff %u",
5996 update_xid, cutoff_xid)));
5999 * If we determined that it's an Xid corresponding to an update
6000 * that must be retained, additionally add it to the list of
6001 * members of the new Multi, in case we end up using that. (We
6002 * might still decide to use only an update Xid and not a multi,
6003 * but it's easier to maintain the list as we walk the old members
6006 if (TransactionIdIsValid(update_xid))
6007 newmembers[nnewmembers++] = members[i];
6011 /* We only keep lockers if they are still running */
6012 if (TransactionIdIsCurrentTransactionId(members[i].xid) ||
6013 TransactionIdIsInProgress(members[i].xid))
6015 /* running locker cannot possibly be older than the cutoff */
6016 Assert(!TransactionIdPrecedes(members[i].xid, cutoff_xid));
6017 newmembers[nnewmembers++] = members[i];
6025 if (nnewmembers == 0)
6027 /* nothing worth keeping!? Tell caller to remove the whole thing */
6028 *flags |= FRM_INVALIDATE_XMAX;
6029 xid = InvalidTransactionId;
6031 else if (TransactionIdIsValid(update_xid) && !has_lockers)
6034 * If there's a single member and it's an update, pass it back alone
6035 * without creating a new Multi. (XXX we could do this when there's a
6036 * single remaining locker, too, but that would complicate the API too
6037 * much; moreover, the case with the single updater is more
6038 * interesting, because those are longer-lived.)
6040 Assert(nnewmembers == 1);
6041 *flags |= FRM_RETURN_IS_XID;
6042 if (update_committed)
6043 *flags |= FRM_MARK_COMMITTED;
6049 * Create a new multixact with the surviving members of the previous
6050 * one, to set as new Xmax in the tuple.
6052 xid = MultiXactIdCreateFromMembers(nnewmembers, newmembers);
6053 *flags |= FRM_RETURN_IS_MULTI;
6062 * heap_prepare_freeze_tuple
6064 * Check to see whether any of the XID fields of a tuple (xmin, xmax, xvac)
6065 * are older than the specified cutoff XID and cutoff MultiXactId. If so,
6066 * setup enough state (in the *frz output argument) to later execute and
6067 * WAL-log what we would need to do, and return true. Return false if nothing
6068 * is to be changed. In addition, set *totally_frozen_p to true if the tuple
6069 * will be totally frozen after these operations are performed and false if
6070 * more freezing will eventually be required.
6072 * Caller is responsible for setting the offset field, if appropriate.
6074 * It is assumed that the caller has checked the tuple with
6075 * HeapTupleSatisfiesVacuum() and determined that it is not HEAPTUPLE_DEAD
6076 * (else we should be removing the tuple, not freezing it).
6078 * NB: cutoff_xid *must* be <= the current global xmin, to ensure that any
6079 * XID older than it could neither be running nor seen as running by any
6080 * open transaction. This ensures that the replacement will not change
6081 * anyone's idea of the tuple state.
6082 * Similarly, cutoff_multi must be less than or equal to the smallest
6083 * MultiXactId used by any transaction currently open.
6085 * If the tuple is in a shared buffer, caller must hold an exclusive lock on
6088 * NB: It is not enough to set hint bits to indicate something is
6089 * committed/invalid -- they might not be set on a standby, or after crash
6090 * recovery. We really need to remove old xids.
6093 heap_prepare_freeze_tuple(HeapTupleHeader tuple,
6094 TransactionId relfrozenxid, TransactionId relminmxid,
6095 TransactionId cutoff_xid, TransactionId cutoff_multi,
6096 xl_heap_freeze_tuple *frz, bool *totally_frozen_p)
6098 bool changed = false;
6099 bool xmax_already_frozen = false;
6105 frz->t_infomask2 = tuple->t_infomask2;
6106 frz->t_infomask = tuple->t_infomask;
6107 frz->xmax = HeapTupleHeaderGetRawXmax(tuple);
6110 * Process xmin. xmin_frozen has two slightly different meanings: in the
6111 * !XidIsNormal case, it means "the xmin doesn't need any freezing" (it's
6112 * already a permanent value), while in the block below it is set true to
6113 * mean "xmin won't need freezing after what we do to it here" (false
6114 * otherwise). In both cases we're allowed to set totally_frozen, as far
6115 * as xmin is concerned.
6117 xid = HeapTupleHeaderGetXmin(tuple);
6118 if (!TransactionIdIsNormal(xid))
6122 if (TransactionIdPrecedes(xid, relfrozenxid))
6124 (errcode(ERRCODE_DATA_CORRUPTED),
6125 errmsg_internal("found xmin %u from before relfrozenxid %u",
6126 xid, relfrozenxid)));
6128 xmin_frozen = TransactionIdPrecedes(xid, cutoff_xid);
6131 if (!TransactionIdDidCommit(xid))
6133 (errcode(ERRCODE_DATA_CORRUPTED),
6134 errmsg_internal("uncommitted xmin %u from before xid cutoff %u needs to be frozen",
6137 frz->t_infomask |= HEAP_XMIN_FROZEN;
6143 * Process xmax. To thoroughly examine the current Xmax value we need to
6144 * resolve a MultiXactId to its member Xids, in case some of them are
6145 * below the given cutoff for Xids. In that case, those values might need
6146 * freezing, too. Also, if a multi needs freezing, we cannot simply take
6147 * it out --- if there's a live updater Xid, it needs to be kept.
6149 * Make sure to keep heap_tuple_needs_freeze in sync with this.
6151 xid = HeapTupleHeaderGetRawXmax(tuple);
6153 if (tuple->t_infomask & HEAP_XMAX_IS_MULTI)
6155 TransactionId newxmax;
6158 newxmax = FreezeMultiXactId(xid, tuple->t_infomask,
6159 relfrozenxid, relminmxid,
6160 cutoff_xid, cutoff_multi, &flags);
6162 freeze_xmax = (flags & FRM_INVALIDATE_XMAX);
6164 if (flags & FRM_RETURN_IS_XID)
6167 * NB -- some of these transformations are only valid because we
6168 * know the return Xid is a tuple updater (i.e. not merely a
6169 * locker.) Also note that the only reason we don't explicitly
6170 * worry about HEAP_KEYS_UPDATED is because it lives in
6171 * t_infomask2 rather than t_infomask.
6173 frz->t_infomask &= ~HEAP_XMAX_BITS;
6174 frz->xmax = newxmax;
6175 if (flags & FRM_MARK_COMMITTED)
6176 frz->t_infomask |= HEAP_XMAX_COMMITTED;
6179 else if (flags & FRM_RETURN_IS_MULTI)
6185 * We can't use GetMultiXactIdHintBits directly on the new multi
6186 * here; that routine initializes the masks to all zeroes, which
6187 * would lose other bits we need. Doing it this way ensures all
6188 * unrelated bits remain untouched.
6190 frz->t_infomask &= ~HEAP_XMAX_BITS;
6191 frz->t_infomask2 &= ~HEAP_KEYS_UPDATED;
6192 GetMultiXactIdHintBits(newxmax, &newbits, &newbits2);
6193 frz->t_infomask |= newbits;
6194 frz->t_infomask2 |= newbits2;
6196 frz->xmax = newxmax;
6201 else if (TransactionIdIsNormal(xid))
6203 if (TransactionIdPrecedes(xid, relfrozenxid))
6205 (errcode(ERRCODE_DATA_CORRUPTED),
6206 errmsg_internal("found xmax %u from before relfrozenxid %u",
6207 xid, relfrozenxid)));
6209 if (TransactionIdPrecedes(xid, cutoff_xid))
6212 * If we freeze xmax, make absolutely sure that it's not an XID
6213 * that is important. (Note, a lock-only xmax can be removed
6214 * independent of committedness, since a committed lock holder has
6215 * released the lock).
6217 if (!HEAP_XMAX_IS_LOCKED_ONLY(tuple->t_infomask) &&
6218 TransactionIdDidCommit(xid))
6220 (errcode(ERRCODE_DATA_CORRUPTED),
6221 errmsg_internal("cannot freeze committed xmax %u",
6226 freeze_xmax = false;
6228 else if ((tuple->t_infomask & HEAP_XMAX_INVALID) ||
6229 !TransactionIdIsValid(HeapTupleHeaderGetRawXmax(tuple)))
6231 freeze_xmax = false;
6232 xmax_already_frozen = true;
6236 (errcode(ERRCODE_DATA_CORRUPTED),
6237 errmsg_internal("found xmax %u (infomask 0x%04x) not frozen, not multi, not normal",
6238 xid, tuple->t_infomask)));
6242 Assert(!xmax_already_frozen);
6244 frz->xmax = InvalidTransactionId;
6247 * The tuple might be marked either XMAX_INVALID or XMAX_COMMITTED +
6248 * LOCKED. Normalize to INVALID just to be sure no one gets confused.
6249 * Also get rid of the HEAP_KEYS_UPDATED bit.
6251 frz->t_infomask &= ~HEAP_XMAX_BITS;
6252 frz->t_infomask |= HEAP_XMAX_INVALID;
6253 frz->t_infomask2 &= ~HEAP_HOT_UPDATED;
6254 frz->t_infomask2 &= ~HEAP_KEYS_UPDATED;
6259 * Old-style VACUUM FULL is gone, but we have to keep this code as long as
6260 * we support having MOVED_OFF/MOVED_IN tuples in the database.
6262 if (tuple->t_infomask & HEAP_MOVED)
6264 xid = HeapTupleHeaderGetXvac(tuple);
6267 * For Xvac, we ignore the cutoff_xid and just always perform the
6268 * freeze operation. The oldest release in which such a value can
6269 * actually be set is PostgreSQL 8.4, because old-style VACUUM FULL
6270 * was removed in PostgreSQL 9.0. Note that if we were to respect
6271 * cutoff_xid here, we'd need to make surely to clear totally_frozen
6272 * when we skipped freezing on that basis.
6274 if (TransactionIdIsNormal(xid))
6277 * If a MOVED_OFF tuple is not dead, the xvac transaction must
6278 * have failed; whereas a non-dead MOVED_IN tuple must mean the
6279 * xvac transaction succeeded.
6281 if (tuple->t_infomask & HEAP_MOVED_OFF)
6282 frz->frzflags |= XLH_INVALID_XVAC;
6284 frz->frzflags |= XLH_FREEZE_XVAC;
6287 * Might as well fix the hint bits too; usually XMIN_COMMITTED
6288 * will already be set here, but there's a small chance not.
6290 Assert(!(tuple->t_infomask & HEAP_XMIN_INVALID));
6291 frz->t_infomask |= HEAP_XMIN_COMMITTED;
6296 *totally_frozen_p = (xmin_frozen &&
6297 (freeze_xmax || xmax_already_frozen));
6302 * heap_execute_freeze_tuple
6303 * Execute the prepared freezing of a tuple.
6305 * Caller is responsible for ensuring that no other backend can access the
6306 * storage underlying this tuple, either by holding an exclusive lock on the
6307 * buffer containing it (which is what lazy VACUUM does), or by having it be
6308 * in private storage (which is what CLUSTER and friends do).
6310 * Note: it might seem we could make the changes without exclusive lock, since
6311 * TransactionId read/write is assumed atomic anyway. However there is a race
6312 * condition: someone who just fetched an old XID that we overwrite here could
6313 * conceivably not finish checking the XID against pg_xact before we finish
6314 * the VACUUM and perhaps truncate off the part of pg_xact he needs. Getting
6315 * exclusive lock ensures no other backend is in process of checking the
6316 * tuple status. Also, getting exclusive lock makes it safe to adjust the
6319 * NB: All code in here must be safe to execute during crash recovery!
6322 heap_execute_freeze_tuple(HeapTupleHeader tuple, xl_heap_freeze_tuple *frz)
6324 HeapTupleHeaderSetXmax(tuple, frz->xmax);
6326 if (frz->frzflags & XLH_FREEZE_XVAC)
6327 HeapTupleHeaderSetXvac(tuple, FrozenTransactionId);
6329 if (frz->frzflags & XLH_INVALID_XVAC)
6330 HeapTupleHeaderSetXvac(tuple, InvalidTransactionId);
6332 tuple->t_infomask = frz->t_infomask;
6333 tuple->t_infomask2 = frz->t_infomask2;
6338 * Freeze tuple in place, without WAL logging.
6340 * Useful for callers like CLUSTER that perform their own WAL logging.
6343 heap_freeze_tuple(HeapTupleHeader tuple,
6344 TransactionId relfrozenxid, TransactionId relminmxid,
6345 TransactionId cutoff_xid, TransactionId cutoff_multi)
6347 xl_heap_freeze_tuple frz;
6349 bool tuple_totally_frozen;
6351 do_freeze = heap_prepare_freeze_tuple(tuple,
6352 relfrozenxid, relminmxid,
6353 cutoff_xid, cutoff_multi,
6354 &frz, &tuple_totally_frozen);
6357 * Note that because this is not a WAL-logged operation, we don't need to
6358 * fill in the offset in the freeze record.
6362 heap_execute_freeze_tuple(tuple, &frz);
6367 * For a given MultiXactId, return the hint bits that should be set in the
6370 * Normally this should be called for a multixact that was just created, and
6371 * so is on our local cache, so the GetMembers call is fast.
6374 GetMultiXactIdHintBits(MultiXactId multi, uint16 *new_infomask,
6375 uint16 *new_infomask2)
6378 MultiXactMember *members;
6380 uint16 bits = HEAP_XMAX_IS_MULTI;
6382 bool has_update = false;
6383 LockTupleMode strongest = LockTupleKeyShare;
6386 * We only use this in multis we just created, so they cannot be values
6389 nmembers = GetMultiXactIdMembers(multi, &members, false, false);
6391 for (i = 0; i < nmembers; i++)
6396 * Remember the strongest lock mode held by any member of the
6399 mode = TUPLOCK_from_mxstatus(members[i].status);
6400 if (mode > strongest)
6403 /* See what other bits we need */
6404 switch (members[i].status)
6406 case MultiXactStatusForKeyShare:
6407 case MultiXactStatusForShare:
6408 case MultiXactStatusForNoKeyUpdate:
6411 case MultiXactStatusForUpdate:
6412 bits2 |= HEAP_KEYS_UPDATED;
6415 case MultiXactStatusNoKeyUpdate:
6419 case MultiXactStatusUpdate:
6420 bits2 |= HEAP_KEYS_UPDATED;
6426 if (strongest == LockTupleExclusive ||
6427 strongest == LockTupleNoKeyExclusive)
6428 bits |= HEAP_XMAX_EXCL_LOCK;
6429 else if (strongest == LockTupleShare)
6430 bits |= HEAP_XMAX_SHR_LOCK;
6431 else if (strongest == LockTupleKeyShare)
6432 bits |= HEAP_XMAX_KEYSHR_LOCK;
6435 bits |= HEAP_XMAX_LOCK_ONLY;
6440 *new_infomask = bits;
6441 *new_infomask2 = bits2;
6445 * MultiXactIdGetUpdateXid
6447 * Given a multixact Xmax and corresponding infomask, which does not have the
6448 * HEAP_XMAX_LOCK_ONLY bit set, obtain and return the Xid of the updating
6451 * Caller is expected to check the status of the updating transaction, if
6454 static TransactionId
6455 MultiXactIdGetUpdateXid(TransactionId xmax, uint16 t_infomask)
6457 TransactionId update_xact = InvalidTransactionId;
6458 MultiXactMember *members;
6461 Assert(!(t_infomask & HEAP_XMAX_LOCK_ONLY));
6462 Assert(t_infomask & HEAP_XMAX_IS_MULTI);
6465 * Since we know the LOCK_ONLY bit is not set, this cannot be a multi from
6468 nmembers = GetMultiXactIdMembers(xmax, &members, false, false);
6474 for (i = 0; i < nmembers; i++)
6476 /* Ignore lockers */
6477 if (!ISUPDATE_from_mxstatus(members[i].status))
6480 /* there can be at most one updater */
6481 Assert(update_xact == InvalidTransactionId);
6482 update_xact = members[i].xid;
6483 #ifndef USE_ASSERT_CHECKING
6486 * in an assert-enabled build, walk the whole array to ensure
6487 * there's no other updater.
6500 * HeapTupleGetUpdateXid
6501 * As above, but use a HeapTupleHeader
6503 * See also HeapTupleHeaderGetUpdateXid, which can be used without previously
6504 * checking the hint bits.
6507 HeapTupleGetUpdateXid(HeapTupleHeader tuple)
6509 return MultiXactIdGetUpdateXid(HeapTupleHeaderGetRawXmax(tuple),
6514 * Does the given multixact conflict with the current transaction grabbing a
6515 * tuple lock of the given strength?
6517 * The passed infomask pairs up with the given multixact in the tuple header.
6520 DoesMultiXactIdConflict(MultiXactId multi, uint16 infomask,
6521 LockTupleMode lockmode)
6524 MultiXactMember *members;
6525 bool result = false;
6526 LOCKMODE wanted = tupleLockExtraInfo[lockmode].hwlock;
6528 if (HEAP_LOCKED_UPGRADED(infomask))
6531 nmembers = GetMultiXactIdMembers(multi, &members, false,
6532 HEAP_XMAX_IS_LOCKED_ONLY(infomask));
6537 for (i = 0; i < nmembers; i++)
6539 TransactionId memxid;
6540 LOCKMODE memlockmode;
6542 memlockmode = LOCKMODE_from_mxstatus(members[i].status);
6544 /* ignore members that don't conflict with the lock we want */
6545 if (!DoLockModesConflict(memlockmode, wanted))
6548 /* ignore members from current xact */
6549 memxid = members[i].xid;
6550 if (TransactionIdIsCurrentTransactionId(memxid))
6553 if (ISUPDATE_from_mxstatus(members[i].status))
6555 /* ignore aborted updaters */
6556 if (TransactionIdDidAbort(memxid))
6561 /* ignore lockers-only that are no longer in progress */
6562 if (!TransactionIdIsInProgress(memxid))
6567 * Whatever remains are either live lockers that conflict with our
6568 * wanted lock, and updaters that are not aborted. Those conflict
6569 * with what we want, so return true.
6581 * Do_MultiXactIdWait
6582 * Actual implementation for the two functions below.
6584 * 'multi', 'status' and 'infomask' indicate what to sleep on (the status is
6585 * needed to ensure we only sleep on conflicting members, and the infomask is
6586 * used to optimize multixact access in case it's a lock-only multi); 'nowait'
6587 * indicates whether to use conditional lock acquisition, to allow callers to
6588 * fail if lock is unavailable. 'rel', 'ctid' and 'oper' are used to set up
6589 * context information for error messages. 'remaining', if not NULL, receives
6590 * the number of members that are still running, including any (non-aborted)
6591 * subtransactions of our own transaction.
6593 * We do this by sleeping on each member using XactLockTableWait. Any
6594 * members that belong to the current backend are *not* waited for, however;
6595 * this would not merely be useless but would lead to Assert failure inside
6596 * XactLockTableWait. By the time this returns, it is certain that all
6597 * transactions *of other backends* that were members of the MultiXactId
6598 * that conflict with the requested status are dead (and no new ones can have
6599 * been added, since it is not legal to add members to an existing
6602 * But by the time we finish sleeping, someone else may have changed the Xmax
6603 * of the containing tuple, so the caller needs to iterate on us somehow.
6605 * Note that in case we return false, the number of remaining members is
6606 * not to be trusted.
6609 Do_MultiXactIdWait(MultiXactId multi, MultiXactStatus status,
6610 uint16 infomask, bool nowait,
6611 Relation rel, ItemPointer ctid, XLTW_Oper oper,
6615 MultiXactMember *members;
6619 /* for pre-pg_upgrade tuples, no need to sleep at all */
6620 nmembers = HEAP_LOCKED_UPGRADED(infomask) ? -1 :
6621 GetMultiXactIdMembers(multi, &members, false,
6622 HEAP_XMAX_IS_LOCKED_ONLY(infomask));
6628 for (i = 0; i < nmembers; i++)
6630 TransactionId memxid = members[i].xid;
6631 MultiXactStatus memstatus = members[i].status;
6633 if (TransactionIdIsCurrentTransactionId(memxid))
6639 if (!DoLockModesConflict(LOCKMODE_from_mxstatus(memstatus),
6640 LOCKMODE_from_mxstatus(status)))
6642 if (remaining && TransactionIdIsInProgress(memxid))
6648 * This member conflicts with our multi, so we have to sleep (or
6649 * return failure, if asked to avoid waiting.)
6651 * Note that we don't set up an error context callback ourselves,
6652 * but instead we pass the info down to XactLockTableWait. This
6653 * might seem a bit wasteful because the context is set up and
6654 * tore down for each member of the multixact, but in reality it
6655 * should be barely noticeable, and it avoids duplicate code.
6659 result = ConditionalXactLockTableWait(memxid);
6664 XactLockTableWait(memxid, rel, ctid, oper);
6671 *remaining = remain;
6678 * Sleep on a MultiXactId.
6680 * By the time we finish sleeping, someone else may have changed the Xmax
6681 * of the containing tuple, so the caller needs to iterate on us somehow.
6683 * We return (in *remaining, if not NULL) the number of members that are still
6684 * running, including any (non-aborted) subtransactions of our own transaction.
6687 MultiXactIdWait(MultiXactId multi, MultiXactStatus status, uint16 infomask,
6688 Relation rel, ItemPointer ctid, XLTW_Oper oper,
6691 (void) Do_MultiXactIdWait(multi, status, infomask, false,
6692 rel, ctid, oper, remaining);
6696 * ConditionalMultiXactIdWait
6697 * As above, but only lock if we can get the lock without blocking.
6699 * By the time we finish sleeping, someone else may have changed the Xmax
6700 * of the containing tuple, so the caller needs to iterate on us somehow.
6702 * If the multixact is now all gone, return true. Returns false if some
6703 * transactions might still be running.
6705 * We return (in *remaining, if not NULL) the number of members that are still
6706 * running, including any (non-aborted) subtransactions of our own transaction.
6709 ConditionalMultiXactIdWait(MultiXactId multi, MultiXactStatus status,
6710 uint16 infomask, Relation rel, int *remaining)
6712 return Do_MultiXactIdWait(multi, status, infomask, true,
6713 rel, NULL, XLTW_None, remaining);
6717 * heap_tuple_needs_eventual_freeze
6719 * Check to see whether any of the XID fields of a tuple (xmin, xmax, xvac)
6720 * will eventually require freezing. Similar to heap_tuple_needs_freeze,
6721 * but there's no cutoff, since we're trying to figure out whether freezing
6722 * will ever be needed, not whether it's needed now.
6725 heap_tuple_needs_eventual_freeze(HeapTupleHeader tuple)
6730 * If xmin is a normal transaction ID, this tuple is definitely not
6733 xid = HeapTupleHeaderGetXmin(tuple);
6734 if (TransactionIdIsNormal(xid))
6738 * If xmax is a valid xact or multixact, this tuple is also not frozen.
6740 if (tuple->t_infomask & HEAP_XMAX_IS_MULTI)
6744 multi = HeapTupleHeaderGetRawXmax(tuple);
6745 if (MultiXactIdIsValid(multi))
6750 xid = HeapTupleHeaderGetRawXmax(tuple);
6751 if (TransactionIdIsNormal(xid))
6755 if (tuple->t_infomask & HEAP_MOVED)
6757 xid = HeapTupleHeaderGetXvac(tuple);
6758 if (TransactionIdIsNormal(xid))
6766 * heap_tuple_needs_freeze
6768 * Check to see whether any of the XID fields of a tuple (xmin, xmax, xvac)
6769 * are older than the specified cutoff XID or MultiXactId. If so, return true.
6771 * It doesn't matter whether the tuple is alive or dead, we are checking
6772 * to see if a tuple needs to be removed or frozen to avoid wraparound.
6774 * NB: Cannot rely on hint bits here, they might not be set after a crash or
6778 heap_tuple_needs_freeze(HeapTupleHeader tuple, TransactionId cutoff_xid,
6779 MultiXactId cutoff_multi, Buffer buf)
6783 xid = HeapTupleHeaderGetXmin(tuple);
6784 if (TransactionIdIsNormal(xid) &&
6785 TransactionIdPrecedes(xid, cutoff_xid))
6789 * The considerations for multixacts are complicated; look at
6790 * heap_prepare_freeze_tuple for justifications. This routine had better
6791 * be in sync with that one!
6793 if (tuple->t_infomask & HEAP_XMAX_IS_MULTI)
6797 multi = HeapTupleHeaderGetRawXmax(tuple);
6798 if (!MultiXactIdIsValid(multi))
6800 /* no xmax set, ignore */
6803 else if (HEAP_LOCKED_UPGRADED(tuple->t_infomask))
6805 else if (MultiXactIdPrecedes(multi, cutoff_multi))
6809 MultiXactMember *members;
6813 /* need to check whether any member of the mxact is too old */
6815 nmembers = GetMultiXactIdMembers(multi, &members, false,
6816 HEAP_XMAX_IS_LOCKED_ONLY(tuple->t_infomask));
6818 for (i = 0; i < nmembers; i++)
6820 if (TransactionIdPrecedes(members[i].xid, cutoff_xid))
6832 xid = HeapTupleHeaderGetRawXmax(tuple);
6833 if (TransactionIdIsNormal(xid) &&
6834 TransactionIdPrecedes(xid, cutoff_xid))
6838 if (tuple->t_infomask & HEAP_MOVED)
6840 xid = HeapTupleHeaderGetXvac(tuple);
6841 if (TransactionIdIsNormal(xid) &&
6842 TransactionIdPrecedes(xid, cutoff_xid))
6850 * If 'tuple' contains any visible XID greater than latestRemovedXid,
6851 * ratchet forwards latestRemovedXid to the greatest one found.
6852 * This is used as the basis for generating Hot Standby conflicts, so
6853 * if a tuple was never visible then removing it should not conflict
6857 HeapTupleHeaderAdvanceLatestRemovedXid(HeapTupleHeader tuple,
6858 TransactionId *latestRemovedXid)
6860 TransactionId xmin = HeapTupleHeaderGetXmin(tuple);
6861 TransactionId xmax = HeapTupleHeaderGetUpdateXid(tuple);
6862 TransactionId xvac = HeapTupleHeaderGetXvac(tuple);
6864 if (tuple->t_infomask & HEAP_MOVED)
6866 if (TransactionIdPrecedes(*latestRemovedXid, xvac))
6867 *latestRemovedXid = xvac;
6871 * Ignore tuples inserted by an aborted transaction or if the tuple was
6872 * updated/deleted by the inserting transaction.
6874 * Look for a committed hint bit, or if no xmin bit is set, check clog.
6875 * This needs to work on both master and standby, where it is used to
6876 * assess btree delete records.
6878 if (HeapTupleHeaderXminCommitted(tuple) ||
6879 (!HeapTupleHeaderXminInvalid(tuple) && TransactionIdDidCommit(xmin)))
6882 TransactionIdFollows(xmax, *latestRemovedXid))
6883 *latestRemovedXid = xmax;
6886 /* *latestRemovedXid may still be invalid at end */
6891 * Helper function for heap_compute_xid_horizon_for_tuples. Issue prefetch
6892 * requests for the number of buffers indicated by prefetch_count. The
6893 * prefetch_state keeps track of all the buffers that we can prefetch and
6894 * which ones have already been prefetched; each call to this function picks
6895 * up where the previous call left off.
6898 xid_horizon_prefetch_buffer(Relation rel,
6899 XidHorizonPrefetchState *prefetch_state,
6902 BlockNumber cur_hblkno = prefetch_state->cur_hblkno;
6905 int nitems = prefetch_state->nitems;
6906 ItemPointerData *tids = prefetch_state->tids;
6908 for (i = prefetch_state->next_item;
6909 i < nitems && count < prefetch_count;
6912 ItemPointer htid = &tids[i];
6914 if (cur_hblkno == InvalidBlockNumber ||
6915 ItemPointerGetBlockNumber(htid) != cur_hblkno)
6917 cur_hblkno = ItemPointerGetBlockNumber(htid);
6918 PrefetchBuffer(rel, MAIN_FORKNUM, cur_hblkno);
6924 * Save the prefetch position so that next time we can continue from that
6927 prefetch_state->next_item = i;
6928 prefetch_state->cur_hblkno = cur_hblkno;
6933 * Get the latestRemovedXid from the heap pages pointed at by the index
6934 * tuples being deleted.
6936 * We used to do this during recovery rather than on the primary, but that
6937 * approach now appears inferior. It meant that the master could generate
6938 * a lot of work for the standby without any back-pressure to slow down the
6939 * master, and it required the standby to have reached consistency, whereas
6940 * we want to have correct information available even before that point.
6942 * It's possible for this to generate a fair amount of I/O, since we may be
6943 * deleting hundreds of tuples from a single index block. To amortize that
6944 * cost to some degree, this uses prefetching and combines repeat accesses to
6948 heap_compute_xid_horizon_for_tuples(Relation rel,
6949 ItemPointerData *tids,
6952 TransactionId latestRemovedXid = InvalidTransactionId;
6954 Buffer buf = InvalidBuffer;
6957 XidHorizonPrefetchState prefetch_state;
6959 int prefetch_distance;
6963 * Sort to avoid repeated lookups for the same page, and to make it more
6964 * likely to access items in an efficient order. In particular, this
6965 * ensures that if there are multiple pointers to the same page, they all
6966 * get processed looking up and locking the page just once.
6968 qsort((void *) tids, nitems, sizeof(ItemPointerData),
6969 (int (*) (const void *, const void *)) ItemPointerCompare);
6972 /* Initialize prefetch state. */
6973 prefetch_state.cur_hblkno = InvalidBlockNumber;
6974 prefetch_state.next_item = 0;
6975 prefetch_state.nitems = nitems;
6976 prefetch_state.tids = tids;
6979 * Compute the prefetch distance that we will attempt to maintain.
6981 * We don't use the regular formula to determine how much to prefetch
6982 * here, but instead just add a constant to effective_io_concurrency.
6983 * That's because it seems best to do some prefetching here even when
6984 * effective_io_concurrency is set to 0, but if the DBA thinks it's OK to
6985 * do more prefetching for other operations, then it's probably OK to do
6986 * more prefetching in this case, too. It may be that this formula is too
6987 * simplistic, but at the moment there is no evidence of that or any idea
6988 * about what would work better.
6990 * Since the caller holds a buffer lock somewhere in rel, we'd better make
6991 * sure that isn't a catalog relation before we call code that does
6992 * syscache lookups, to avoid risk of deadlock.
6994 if (IsCatalogRelation(rel))
6995 io_concurrency = effective_io_concurrency;
6997 io_concurrency = get_tablespace_io_concurrency(rel->rd_rel->reltablespace);
6998 prefetch_distance = Min((io_concurrency) + 10, MAX_IO_CONCURRENCY);
7000 /* Start prefetching. */
7001 xid_horizon_prefetch_buffer(rel, &prefetch_state, prefetch_distance);
7004 /* Iterate over all tids, and check their horizon */
7005 hblkno = InvalidBlockNumber;
7007 for (int i = 0; i < nitems; i++)
7009 ItemPointer htid = &tids[i];
7011 OffsetNumber hoffnum;
7014 * Read heap buffer, but avoid refetching if it's the same block as
7015 * required for the last tid.
7017 if (hblkno == InvalidBlockNumber ||
7018 ItemPointerGetBlockNumber(htid) != hblkno)
7020 /* release old buffer */
7021 if (BufferIsValid(buf))
7023 LockBuffer(buf, BUFFER_LOCK_UNLOCK);
7027 hblkno = ItemPointerGetBlockNumber(htid);
7029 buf = ReadBuffer(rel, hblkno);
7034 * To maintain the prefetch distance, prefetch one more page for
7035 * each page we read.
7037 xid_horizon_prefetch_buffer(rel, &prefetch_state, 1);
7040 hpage = BufferGetPage(buf);
7042 LockBuffer(buf, BUFFER_LOCK_SHARE);
7045 hoffnum = ItemPointerGetOffsetNumber(htid);
7046 hitemid = PageGetItemId(hpage, hoffnum);
7049 * Follow any redirections until we find something useful.
7051 while (ItemIdIsRedirected(hitemid))
7053 hoffnum = ItemIdGetRedirect(hitemid);
7054 hitemid = PageGetItemId(hpage, hoffnum);
7055 CHECK_FOR_INTERRUPTS();
7059 * If the heap item has storage, then read the header and use that to
7060 * set latestRemovedXid.
7062 * Some LP_DEAD items may not be accessible, so we ignore them.
7064 if (ItemIdHasStorage(hitemid))
7066 HeapTupleHeader htuphdr;
7068 htuphdr = (HeapTupleHeader) PageGetItem(hpage, hitemid);
7070 HeapTupleHeaderAdvanceLatestRemovedXid(htuphdr, &latestRemovedXid);
7072 else if (ItemIdIsDead(hitemid))
7075 * Conjecture: if hitemid is dead then it had xids before the xids
7076 * marked on LP_NORMAL items. So we just ignore this item and move
7077 * onto the next, for the purposes of calculating
7078 * latestRemovedxids.
7082 Assert(!ItemIdIsUsed(hitemid));
7086 if (BufferIsValid(buf))
7088 LockBuffer(buf, BUFFER_LOCK_UNLOCK);
7093 * If all heap tuples were LP_DEAD then we will be returning
7094 * InvalidTransactionId here, which avoids conflicts. This matches
7095 * existing logic which assumes that LP_DEAD tuples must already be older
7096 * than the latestRemovedXid on the cleanup record that set them as
7097 * LP_DEAD, hence must already have generated a conflict.
7100 return latestRemovedXid;
7104 * Perform XLogInsert to register a heap cleanup info message. These
7105 * messages are sent once per VACUUM and are required because
7106 * of the phasing of removal operations during a lazy VACUUM.
7107 * see comments for vacuum_log_cleanup_info().
7110 log_heap_cleanup_info(RelFileNode rnode, TransactionId latestRemovedXid)
7112 xl_heap_cleanup_info xlrec;
7116 xlrec.latestRemovedXid = latestRemovedXid;
7119 XLogRegisterData((char *) &xlrec, SizeOfHeapCleanupInfo);
7121 recptr = XLogInsert(RM_HEAP2_ID, XLOG_HEAP2_CLEANUP_INFO);
7127 * Perform XLogInsert for a heap-clean operation. Caller must already
7128 * have modified the buffer and marked it dirty.
7130 * Note: prior to Postgres 8.3, the entries in the nowunused[] array were
7131 * zero-based tuple indexes. Now they are one-based like other uses
7134 * We also include latestRemovedXid, which is the greatest XID present in
7135 * the removed tuples. That allows recovery processing to cancel or wait
7136 * for long standby queries that can still see these tuples.
7139 log_heap_clean(Relation reln, Buffer buffer,
7140 OffsetNumber *redirected, int nredirected,
7141 OffsetNumber *nowdead, int ndead,
7142 OffsetNumber *nowunused, int nunused,
7143 TransactionId latestRemovedXid)
7145 xl_heap_clean xlrec;
7148 /* Caller should not call me on a non-WAL-logged relation */
7149 Assert(RelationNeedsWAL(reln));
7151 xlrec.latestRemovedXid = latestRemovedXid;
7152 xlrec.nredirected = nredirected;
7153 xlrec.ndead = ndead;
7156 XLogRegisterData((char *) &xlrec, SizeOfHeapClean);
7158 XLogRegisterBuffer(0, buffer, REGBUF_STANDARD);
7161 * The OffsetNumber arrays are not actually in the buffer, but we pretend
7162 * that they are. When XLogInsert stores the whole buffer, the offset
7163 * arrays need not be stored too. Note that even if all three arrays are
7164 * empty, we want to expose the buffer as a candidate for whole-page
7165 * storage, since this record type implies a defragmentation operation
7166 * even if no line pointers changed state.
7168 if (nredirected > 0)
7169 XLogRegisterBufData(0, (char *) redirected,
7170 nredirected * sizeof(OffsetNumber) * 2);
7173 XLogRegisterBufData(0, (char *) nowdead,
7174 ndead * sizeof(OffsetNumber));
7177 XLogRegisterBufData(0, (char *) nowunused,
7178 nunused * sizeof(OffsetNumber));
7180 recptr = XLogInsert(RM_HEAP2_ID, XLOG_HEAP2_CLEAN);
7186 * Perform XLogInsert for a heap-freeze operation. Caller must have already
7187 * modified the buffer and marked it dirty.
7190 log_heap_freeze(Relation reln, Buffer buffer, TransactionId cutoff_xid,
7191 xl_heap_freeze_tuple *tuples, int ntuples)
7193 xl_heap_freeze_page xlrec;
7196 /* Caller should not call me on a non-WAL-logged relation */
7197 Assert(RelationNeedsWAL(reln));
7198 /* nor when there are no tuples to freeze */
7199 Assert(ntuples > 0);
7201 xlrec.cutoff_xid = cutoff_xid;
7202 xlrec.ntuples = ntuples;
7205 XLogRegisterData((char *) &xlrec, SizeOfHeapFreezePage);
7208 * The freeze plan array is not actually in the buffer, but pretend that
7209 * it is. When XLogInsert stores the whole buffer, the freeze plan need
7210 * not be stored too.
7212 XLogRegisterBuffer(0, buffer, REGBUF_STANDARD);
7213 XLogRegisterBufData(0, (char *) tuples,
7214 ntuples * sizeof(xl_heap_freeze_tuple));
7216 recptr = XLogInsert(RM_HEAP2_ID, XLOG_HEAP2_FREEZE_PAGE);
7222 * Perform XLogInsert for a heap-visible operation. 'block' is the block
7223 * being marked all-visible, and vm_buffer is the buffer containing the
7224 * corresponding visibility map block. Both should have already been modified
7227 * If checksums are enabled, we also generate a full-page image of
7228 * heap_buffer, if necessary.
7231 log_heap_visible(RelFileNode rnode, Buffer heap_buffer, Buffer vm_buffer,
7232 TransactionId cutoff_xid, uint8 vmflags)
7234 xl_heap_visible xlrec;
7238 Assert(BufferIsValid(heap_buffer));
7239 Assert(BufferIsValid(vm_buffer));
7241 xlrec.cutoff_xid = cutoff_xid;
7242 xlrec.flags = vmflags;
7244 XLogRegisterData((char *) &xlrec, SizeOfHeapVisible);
7246 XLogRegisterBuffer(0, vm_buffer, 0);
7248 flags = REGBUF_STANDARD;
7249 if (!XLogHintBitIsNeeded())
7250 flags |= REGBUF_NO_IMAGE;
7251 XLogRegisterBuffer(1, heap_buffer, flags);
7253 recptr = XLogInsert(RM_HEAP2_ID, XLOG_HEAP2_VISIBLE);
7259 * Perform XLogInsert for a heap-update operation. Caller must already
7260 * have modified the buffer(s) and marked them dirty.
7263 log_heap_update(Relation reln, Buffer oldbuf,
7264 Buffer newbuf, HeapTuple oldtup, HeapTuple newtup,
7265 HeapTuple old_key_tuple,
7266 bool all_visible_cleared, bool new_all_visible_cleared)
7268 xl_heap_update xlrec;
7269 xl_heap_header xlhdr;
7270 xl_heap_header xlhdr_idx;
7272 uint16 prefix_suffix[2];
7273 uint16 prefixlen = 0,
7276 Page page = BufferGetPage(newbuf);
7277 bool need_tuple_data = RelationIsLogicallyLogged(reln);
7281 /* Caller should not call me on a non-WAL-logged relation */
7282 Assert(RelationNeedsWAL(reln));
7286 if (HeapTupleIsHeapOnly(newtup))
7287 info = XLOG_HEAP_HOT_UPDATE;
7289 info = XLOG_HEAP_UPDATE;
7292 * If the old and new tuple are on the same page, we only need to log the
7293 * parts of the new tuple that were changed. That saves on the amount of
7294 * WAL we need to write. Currently, we just count any unchanged bytes in
7295 * the beginning and end of the tuple. That's quick to check, and
7296 * perfectly covers the common case that only one field is updated.
7298 * We could do this even if the old and new tuple are on different pages,
7299 * but only if we don't make a full-page image of the old page, which is
7300 * difficult to know in advance. Also, if the old tuple is corrupt for
7301 * some reason, it would allow the corruption to propagate the new page,
7302 * so it seems best to avoid. Under the general assumption that most
7303 * updates tend to create the new tuple version on the same page, there
7304 * isn't much to be gained by doing this across pages anyway.
7306 * Skip this if we're taking a full-page image of the new page, as we
7307 * don't include the new tuple in the WAL record in that case. Also
7308 * disable if wal_level='logical', as logical decoding needs to be able to
7309 * read the new tuple in whole from the WAL record alone.
7311 if (oldbuf == newbuf && !need_tuple_data &&
7312 !XLogCheckBufferNeedsBackup(newbuf))
7314 char *oldp = (char *) oldtup->t_data + oldtup->t_data->t_hoff;
7315 char *newp = (char *) newtup->t_data + newtup->t_data->t_hoff;
7316 int oldlen = oldtup->t_len - oldtup->t_data->t_hoff;
7317 int newlen = newtup->t_len - newtup->t_data->t_hoff;
7319 /* Check for common prefix between old and new tuple */
7320 for (prefixlen = 0; prefixlen < Min(oldlen, newlen); prefixlen++)
7322 if (newp[prefixlen] != oldp[prefixlen])
7327 * Storing the length of the prefix takes 2 bytes, so we need to save
7328 * at least 3 bytes or there's no point.
7333 /* Same for suffix */
7334 for (suffixlen = 0; suffixlen < Min(oldlen, newlen) - prefixlen; suffixlen++)
7336 if (newp[newlen - suffixlen - 1] != oldp[oldlen - suffixlen - 1])
7343 /* Prepare main WAL data chain */
7345 if (all_visible_cleared)
7346 xlrec.flags |= XLH_UPDATE_OLD_ALL_VISIBLE_CLEARED;
7347 if (new_all_visible_cleared)
7348 xlrec.flags |= XLH_UPDATE_NEW_ALL_VISIBLE_CLEARED;
7350 xlrec.flags |= XLH_UPDATE_PREFIX_FROM_OLD;
7352 xlrec.flags |= XLH_UPDATE_SUFFIX_FROM_OLD;
7353 if (need_tuple_data)
7355 xlrec.flags |= XLH_UPDATE_CONTAINS_NEW_TUPLE;
7358 if (reln->rd_rel->relreplident == REPLICA_IDENTITY_FULL)
7359 xlrec.flags |= XLH_UPDATE_CONTAINS_OLD_TUPLE;
7361 xlrec.flags |= XLH_UPDATE_CONTAINS_OLD_KEY;
7365 /* If new tuple is the single and first tuple on page... */
7366 if (ItemPointerGetOffsetNumber(&(newtup->t_self)) == FirstOffsetNumber &&
7367 PageGetMaxOffsetNumber(page) == FirstOffsetNumber)
7369 info |= XLOG_HEAP_INIT_PAGE;
7375 /* Prepare WAL data for the old page */
7376 xlrec.old_offnum = ItemPointerGetOffsetNumber(&oldtup->t_self);
7377 xlrec.old_xmax = HeapTupleHeaderGetRawXmax(oldtup->t_data);
7378 xlrec.old_infobits_set = compute_infobits(oldtup->t_data->t_infomask,
7379 oldtup->t_data->t_infomask2);
7381 /* Prepare WAL data for the new page */
7382 xlrec.new_offnum = ItemPointerGetOffsetNumber(&newtup->t_self);
7383 xlrec.new_xmax = HeapTupleHeaderGetRawXmax(newtup->t_data);
7385 bufflags = REGBUF_STANDARD;
7387 bufflags |= REGBUF_WILL_INIT;
7388 if (need_tuple_data)
7389 bufflags |= REGBUF_KEEP_DATA;
7391 XLogRegisterBuffer(0, newbuf, bufflags);
7392 if (oldbuf != newbuf)
7393 XLogRegisterBuffer(1, oldbuf, REGBUF_STANDARD);
7395 XLogRegisterData((char *) &xlrec, SizeOfHeapUpdate);
7398 * Prepare WAL data for the new tuple.
7400 if (prefixlen > 0 || suffixlen > 0)
7402 if (prefixlen > 0 && suffixlen > 0)
7404 prefix_suffix[0] = prefixlen;
7405 prefix_suffix[1] = suffixlen;
7406 XLogRegisterBufData(0, (char *) &prefix_suffix, sizeof(uint16) * 2);
7408 else if (prefixlen > 0)
7410 XLogRegisterBufData(0, (char *) &prefixlen, sizeof(uint16));
7414 XLogRegisterBufData(0, (char *) &suffixlen, sizeof(uint16));
7418 xlhdr.t_infomask2 = newtup->t_data->t_infomask2;
7419 xlhdr.t_infomask = newtup->t_data->t_infomask;
7420 xlhdr.t_hoff = newtup->t_data->t_hoff;
7421 Assert(SizeofHeapTupleHeader + prefixlen + suffixlen <= newtup->t_len);
7424 * PG73FORMAT: write bitmap [+ padding] [+ oid] + data
7426 * The 'data' doesn't include the common prefix or suffix.
7428 XLogRegisterBufData(0, (char *) &xlhdr, SizeOfHeapHeader);
7431 XLogRegisterBufData(0,
7432 ((char *) newtup->t_data) + SizeofHeapTupleHeader,
7433 newtup->t_len - SizeofHeapTupleHeader - suffixlen);
7438 * Have to write the null bitmap and data after the common prefix as
7439 * two separate rdata entries.
7441 /* bitmap [+ padding] [+ oid] */
7442 if (newtup->t_data->t_hoff - SizeofHeapTupleHeader > 0)
7444 XLogRegisterBufData(0,
7445 ((char *) newtup->t_data) + SizeofHeapTupleHeader,
7446 newtup->t_data->t_hoff - SizeofHeapTupleHeader);
7449 /* data after common prefix */
7450 XLogRegisterBufData(0,
7451 ((char *) newtup->t_data) + newtup->t_data->t_hoff + prefixlen,
7452 newtup->t_len - newtup->t_data->t_hoff - prefixlen - suffixlen);
7455 /* We need to log a tuple identity */
7456 if (need_tuple_data && old_key_tuple)
7458 /* don't really need this, but its more comfy to decode */
7459 xlhdr_idx.t_infomask2 = old_key_tuple->t_data->t_infomask2;
7460 xlhdr_idx.t_infomask = old_key_tuple->t_data->t_infomask;
7461 xlhdr_idx.t_hoff = old_key_tuple->t_data->t_hoff;
7463 XLogRegisterData((char *) &xlhdr_idx, SizeOfHeapHeader);
7465 /* PG73FORMAT: write bitmap [+ padding] [+ oid] + data */
7466 XLogRegisterData((char *) old_key_tuple->t_data + SizeofHeapTupleHeader,
7467 old_key_tuple->t_len - SizeofHeapTupleHeader);
7470 /* filtering by origin on a row level is much more efficient */
7471 XLogSetRecordFlags(XLOG_INCLUDE_ORIGIN);
7473 recptr = XLogInsert(RM_HEAP_ID, info);
7479 * Perform XLogInsert of an XLOG_HEAP2_NEW_CID record
7481 * This is only used in wal_level >= WAL_LEVEL_LOGICAL, and only for catalog
7485 log_heap_new_cid(Relation relation, HeapTuple tup)
7487 xl_heap_new_cid xlrec;
7490 HeapTupleHeader hdr = tup->t_data;
7492 Assert(ItemPointerIsValid(&tup->t_self));
7493 Assert(tup->t_tableOid != InvalidOid);
7495 xlrec.top_xid = GetTopTransactionId();
7496 xlrec.target_node = relation->rd_node;
7497 xlrec.target_tid = tup->t_self;
7500 * If the tuple got inserted & deleted in the same TX we definitely have a
7501 * combocid, set cmin and cmax.
7503 if (hdr->t_infomask & HEAP_COMBOCID)
7505 Assert(!(hdr->t_infomask & HEAP_XMAX_INVALID));
7506 Assert(!HeapTupleHeaderXminInvalid(hdr));
7507 xlrec.cmin = HeapTupleHeaderGetCmin(hdr);
7508 xlrec.cmax = HeapTupleHeaderGetCmax(hdr);
7509 xlrec.combocid = HeapTupleHeaderGetRawCommandId(hdr);
7511 /* No combocid, so only cmin or cmax can be set by this TX */
7517 * We need to check for LOCK ONLY because multixacts might be
7518 * transferred to the new tuple in case of FOR KEY SHARE updates in
7519 * which case there will be an xmax, although the tuple just got
7522 if (hdr->t_infomask & HEAP_XMAX_INVALID ||
7523 HEAP_XMAX_IS_LOCKED_ONLY(hdr->t_infomask))
7525 xlrec.cmin = HeapTupleHeaderGetRawCommandId(hdr);
7526 xlrec.cmax = InvalidCommandId;
7528 /* Tuple from a different tx updated or deleted. */
7531 xlrec.cmin = InvalidCommandId;
7532 xlrec.cmax = HeapTupleHeaderGetRawCommandId(hdr);
7535 xlrec.combocid = InvalidCommandId;
7539 * Note that we don't need to register the buffer here, because this
7540 * operation does not modify the page. The insert/update/delete that
7541 * called us certainly did, but that's WAL-logged separately.
7544 XLogRegisterData((char *) &xlrec, SizeOfHeapNewCid);
7546 /* will be looked at irrespective of origin */
7548 recptr = XLogInsert(RM_HEAP2_ID, XLOG_HEAP2_NEW_CID);
7554 * Build a heap tuple representing the configured REPLICA IDENTITY to represent
7555 * the old tuple in a UPDATE or DELETE.
7557 * Returns NULL if there's no need to log an identity or if there's no suitable
7558 * key in the Relation relation.
7561 ExtractReplicaIdentity(Relation relation, HeapTuple tp, bool key_changed, bool *copy)
7563 TupleDesc desc = RelationGetDescr(relation);
7566 char replident = relation->rd_rel->relreplident;
7567 HeapTuple key_tuple = NULL;
7568 bool nulls[MaxHeapAttributeNumber];
7569 Datum values[MaxHeapAttributeNumber];
7574 if (!RelationIsLogicallyLogged(relation))
7577 if (replident == REPLICA_IDENTITY_NOTHING)
7580 if (replident == REPLICA_IDENTITY_FULL)
7583 * When logging the entire old tuple, it very well could contain
7584 * toasted columns. If so, force them to be inlined.
7586 if (HeapTupleHasExternal(tp))
7589 tp = toast_flatten_tuple(tp, RelationGetDescr(relation));
7594 /* if the key hasn't changed and we're only logging the key, we're done */
7598 /* find the replica identity index */
7599 replidindex = RelationGetReplicaIndex(relation);
7600 if (!OidIsValid(replidindex))
7602 elog(DEBUG4, "could not find configured replica identity for table \"%s\"",
7603 RelationGetRelationName(relation));
7607 idx_rel = RelationIdGetRelation(replidindex);
7609 Assert(CheckRelationLockedByMe(idx_rel, AccessShareLock, true));
7611 /* deform tuple, so we have fast access to columns */
7612 heap_deform_tuple(tp, desc, values, nulls);
7614 /* set all columns to NULL, regardless of whether they actually are */
7615 memset(nulls, 1, sizeof(nulls));
7618 * Now set all columns contained in the index to NOT NULL, they cannot
7619 * currently be NULL.
7621 for (natt = 0; natt < IndexRelationGetNumberOfKeyAttributes(idx_rel); natt++)
7623 int attno = idx_rel->rd_index->indkey.values[natt];
7626 elog(ERROR, "system column in index");
7627 nulls[attno - 1] = false;
7630 key_tuple = heap_form_tuple(desc, values, nulls);
7632 RelationClose(idx_rel);
7635 * If the tuple, which by here only contains indexed columns, still has
7636 * toasted columns, force them to be inlined. This is somewhat unlikely
7637 * since there's limits on the size of indexed columns, so we don't
7638 * duplicate toast_flatten_tuple()s functionality in the above loop over
7639 * the indexed columns, even if it would be more efficient.
7641 if (HeapTupleHasExternal(key_tuple))
7643 HeapTuple oldtup = key_tuple;
7645 key_tuple = toast_flatten_tuple(oldtup, RelationGetDescr(relation));
7646 heap_freetuple(oldtup);
7653 * Handles CLEANUP_INFO
7656 heap_xlog_cleanup_info(XLogReaderState *record)
7658 xl_heap_cleanup_info *xlrec = (xl_heap_cleanup_info *) XLogRecGetData(record);
7661 ResolveRecoveryConflictWithSnapshot(xlrec->latestRemovedXid, xlrec->node);
7664 * Actual operation is a no-op. Record type exists to provide a means for
7665 * conflict processing to occur before we begin index vacuum actions. see
7666 * vacuumlazy.c and also comments in btvacuumpage()
7669 /* Backup blocks are not used in cleanup_info records */
7670 Assert(!XLogRecHasAnyBlockRefs(record));
7674 * Handles XLOG_HEAP2_CLEAN record type
7677 heap_xlog_clean(XLogReaderState *record)
7679 XLogRecPtr lsn = record->EndRecPtr;
7680 xl_heap_clean *xlrec = (xl_heap_clean *) XLogRecGetData(record);
7684 XLogRedoAction action;
7686 XLogRecGetBlockTag(record, 0, &rnode, NULL, &blkno);
7689 * We're about to remove tuples. In Hot Standby mode, ensure that there's
7690 * no queries running for which the removed tuples are still visible.
7692 * Not all HEAP2_CLEAN records remove tuples with xids, so we only want to
7693 * conflict on the records that cause MVCC failures for user queries. If
7694 * latestRemovedXid is invalid, skip conflict processing.
7696 if (InHotStandby && TransactionIdIsValid(xlrec->latestRemovedXid))
7697 ResolveRecoveryConflictWithSnapshot(xlrec->latestRemovedXid, rnode);
7700 * If we have a full-page image, restore it (using a cleanup lock) and
7703 action = XLogReadBufferForRedoExtended(record, 0, RBM_NORMAL, true,
7705 if (action == BLK_NEEDS_REDO)
7707 Page page = (Page) BufferGetPage(buffer);
7709 OffsetNumber *redirected;
7710 OffsetNumber *nowdead;
7711 OffsetNumber *nowunused;
7717 redirected = (OffsetNumber *) XLogRecGetBlockData(record, 0, &datalen);
7719 nredirected = xlrec->nredirected;
7720 ndead = xlrec->ndead;
7721 end = (OffsetNumber *) ((char *) redirected + datalen);
7722 nowdead = redirected + (nredirected * 2);
7723 nowunused = nowdead + ndead;
7724 nunused = (end - nowunused);
7725 Assert(nunused >= 0);
7727 /* Update all line pointers per the record, and repair fragmentation */
7728 heap_page_prune_execute(buffer,
7729 redirected, nredirected,
7731 nowunused, nunused);
7734 * Note: we don't worry about updating the page's prunability hints.
7735 * At worst this will cause an extra prune cycle to occur soon.
7738 PageSetLSN(page, lsn);
7739 MarkBufferDirty(buffer);
7742 if (BufferIsValid(buffer))
7744 Size freespace = PageGetHeapFreeSpace(BufferGetPage(buffer));
7746 UnlockReleaseBuffer(buffer);
7749 * After cleaning records from a page, it's useful to update the FSM
7750 * about it, as it may cause the page become target for insertions
7751 * later even if vacuum decides not to visit it (which is possible if
7752 * gets marked all-visible.)
7754 * Do this regardless of a full-page image being applied, since the
7755 * FSM data is not in the page anyway.
7757 XLogRecordPageWithFreeSpace(rnode, blkno, freespace);
7762 * Replay XLOG_HEAP2_VISIBLE record.
7764 * The critical integrity requirement here is that we must never end up with
7765 * a situation where the visibility map bit is set, and the page-level
7766 * PD_ALL_VISIBLE bit is clear. If that were to occur, then a subsequent
7767 * page modification would fail to clear the visibility map bit.
7770 heap_xlog_visible(XLogReaderState *record)
7772 XLogRecPtr lsn = record->EndRecPtr;
7773 xl_heap_visible *xlrec = (xl_heap_visible *) XLogRecGetData(record);
7774 Buffer vmbuffer = InvalidBuffer;
7779 XLogRedoAction action;
7781 XLogRecGetBlockTag(record, 1, &rnode, NULL, &blkno);
7784 * If there are any Hot Standby transactions running that have an xmin
7785 * horizon old enough that this page isn't all-visible for them, they
7786 * might incorrectly decide that an index-only scan can skip a heap fetch.
7788 * NB: It might be better to throw some kind of "soft" conflict here that
7789 * forces any index-only scan that is in flight to perform heap fetches,
7790 * rather than killing the transaction outright.
7793 ResolveRecoveryConflictWithSnapshot(xlrec->cutoff_xid, rnode);
7796 * Read the heap page, if it still exists. If the heap file has dropped or
7797 * truncated later in recovery, we don't need to update the page, but we'd
7798 * better still update the visibility map.
7800 action = XLogReadBufferForRedo(record, 1, &buffer);
7801 if (action == BLK_NEEDS_REDO)
7804 * We don't bump the LSN of the heap page when setting the visibility
7805 * map bit (unless checksums or wal_hint_bits is enabled, in which
7806 * case we must), because that would generate an unworkable volume of
7807 * full-page writes. This exposes us to torn page hazards, but since
7808 * we're not inspecting the existing page contents in any way, we
7811 * However, all operations that clear the visibility map bit *do* bump
7812 * the LSN, and those operations will only be replayed if the XLOG LSN
7813 * follows the page LSN. Thus, if the page LSN has advanced past our
7814 * XLOG record's LSN, we mustn't mark the page all-visible, because
7815 * the subsequent update won't be replayed to clear the flag.
7817 page = BufferGetPage(buffer);
7819 PageSetAllVisible(page);
7821 MarkBufferDirty(buffer);
7823 else if (action == BLK_RESTORED)
7826 * If heap block was backed up, we already restored it and there's
7827 * nothing more to do. (This can only happen with checksums or
7828 * wal_log_hints enabled.)
7832 if (BufferIsValid(buffer))
7834 Size space = PageGetFreeSpace(BufferGetPage(buffer));
7836 UnlockReleaseBuffer(buffer);
7839 * Since FSM is not WAL-logged and only updated heuristically, it
7840 * easily becomes stale in standbys. If the standby is later promoted
7841 * and runs VACUUM, it will skip updating individual free space
7842 * figures for pages that became all-visible (or all-frozen, depending
7843 * on the vacuum mode,) which is troublesome when FreeSpaceMapVacuum
7844 * propagates too optimistic free space values to upper FSM layers;
7845 * later inserters try to use such pages only to find out that they
7846 * are unusable. This can cause long stalls when there are many such
7849 * Forestall those problems by updating FSM's idea about a page that
7850 * is becoming all-visible or all-frozen.
7852 * Do this regardless of a full-page image being applied, since the
7853 * FSM data is not in the page anyway.
7855 if (xlrec->flags & VISIBILITYMAP_VALID_BITS)
7856 XLogRecordPageWithFreeSpace(rnode, blkno, space);
7860 * Even if we skipped the heap page update due to the LSN interlock, it's
7861 * still safe to update the visibility map. Any WAL record that clears
7862 * the visibility map bit does so before checking the page LSN, so any
7863 * bits that need to be cleared will still be cleared.
7865 if (XLogReadBufferForRedoExtended(record, 0, RBM_ZERO_ON_ERROR, false,
7866 &vmbuffer) == BLK_NEEDS_REDO)
7868 Page vmpage = BufferGetPage(vmbuffer);
7871 /* initialize the page if it was read as zeros */
7872 if (PageIsNew(vmpage))
7873 PageInit(vmpage, BLCKSZ, 0);
7876 * XLogReadBufferForRedoExtended locked the buffer. But
7877 * visibilitymap_set will handle locking itself.
7879 LockBuffer(vmbuffer, BUFFER_LOCK_UNLOCK);
7881 reln = CreateFakeRelcacheEntry(rnode);
7882 visibilitymap_pin(reln, blkno, &vmbuffer);
7885 * Don't set the bit if replay has already passed this point.
7887 * It might be safe to do this unconditionally; if replay has passed
7888 * this point, we'll replay at least as far this time as we did
7889 * before, and if this bit needs to be cleared, the record responsible
7890 * for doing so should be again replayed, and clear it. For right
7891 * now, out of an abundance of conservatism, we use the same test here
7892 * we did for the heap page. If this results in a dropped bit, no
7893 * real harm is done; and the next VACUUM will fix it.
7895 if (lsn > PageGetLSN(vmpage))
7896 visibilitymap_set(reln, blkno, InvalidBuffer, lsn, vmbuffer,
7897 xlrec->cutoff_xid, xlrec->flags);
7899 ReleaseBuffer(vmbuffer);
7900 FreeFakeRelcacheEntry(reln);
7902 else if (BufferIsValid(vmbuffer))
7903 UnlockReleaseBuffer(vmbuffer);
7907 * Replay XLOG_HEAP2_FREEZE_PAGE records
7910 heap_xlog_freeze_page(XLogReaderState *record)
7912 XLogRecPtr lsn = record->EndRecPtr;
7913 xl_heap_freeze_page *xlrec = (xl_heap_freeze_page *) XLogRecGetData(record);
7914 TransactionId cutoff_xid = xlrec->cutoff_xid;
7919 * In Hot Standby mode, ensure that there's no queries running which still
7920 * consider the frozen xids as running.
7925 TransactionId latestRemovedXid = cutoff_xid;
7927 TransactionIdRetreat(latestRemovedXid);
7929 XLogRecGetBlockTag(record, 0, &rnode, NULL, NULL);
7930 ResolveRecoveryConflictWithSnapshot(latestRemovedXid, rnode);
7933 if (XLogReadBufferForRedo(record, 0, &buffer) == BLK_NEEDS_REDO)
7935 Page page = BufferGetPage(buffer);
7936 xl_heap_freeze_tuple *tuples;
7938 tuples = (xl_heap_freeze_tuple *) XLogRecGetBlockData(record, 0, NULL);
7940 /* now execute freeze plan for each frozen tuple */
7941 for (ntup = 0; ntup < xlrec->ntuples; ntup++)
7943 xl_heap_freeze_tuple *xlrec_tp;
7945 HeapTupleHeader tuple;
7947 xlrec_tp = &tuples[ntup];
7948 lp = PageGetItemId(page, xlrec_tp->offset); /* offsets are one-based */
7949 tuple = (HeapTupleHeader) PageGetItem(page, lp);
7951 heap_execute_freeze_tuple(tuple, xlrec_tp);
7954 PageSetLSN(page, lsn);
7955 MarkBufferDirty(buffer);
7957 if (BufferIsValid(buffer))
7958 UnlockReleaseBuffer(buffer);
7962 * Given an "infobits" field from an XLog record, set the correct bits in the
7963 * given infomask and infomask2 for the tuple touched by the record.
7965 * (This is the reverse of compute_infobits).
7968 fix_infomask_from_infobits(uint8 infobits, uint16 *infomask, uint16 *infomask2)
7970 *infomask &= ~(HEAP_XMAX_IS_MULTI | HEAP_XMAX_LOCK_ONLY |
7971 HEAP_XMAX_KEYSHR_LOCK | HEAP_XMAX_EXCL_LOCK);
7972 *infomask2 &= ~HEAP_KEYS_UPDATED;
7974 if (infobits & XLHL_XMAX_IS_MULTI)
7975 *infomask |= HEAP_XMAX_IS_MULTI;
7976 if (infobits & XLHL_XMAX_LOCK_ONLY)
7977 *infomask |= HEAP_XMAX_LOCK_ONLY;
7978 if (infobits & XLHL_XMAX_EXCL_LOCK)
7979 *infomask |= HEAP_XMAX_EXCL_LOCK;
7980 /* note HEAP_XMAX_SHR_LOCK isn't considered here */
7981 if (infobits & XLHL_XMAX_KEYSHR_LOCK)
7982 *infomask |= HEAP_XMAX_KEYSHR_LOCK;
7984 if (infobits & XLHL_KEYS_UPDATED)
7985 *infomask2 |= HEAP_KEYS_UPDATED;
7989 heap_xlog_delete(XLogReaderState *record)
7991 XLogRecPtr lsn = record->EndRecPtr;
7992 xl_heap_delete *xlrec = (xl_heap_delete *) XLogRecGetData(record);
7996 HeapTupleHeader htup;
7998 RelFileNode target_node;
7999 ItemPointerData target_tid;
8001 XLogRecGetBlockTag(record, 0, &target_node, NULL, &blkno);
8002 ItemPointerSetBlockNumber(&target_tid, blkno);
8003 ItemPointerSetOffsetNumber(&target_tid, xlrec->offnum);
8006 * The visibility map may need to be fixed even if the heap page is
8007 * already up-to-date.
8009 if (xlrec->flags & XLH_DELETE_ALL_VISIBLE_CLEARED)
8011 Relation reln = CreateFakeRelcacheEntry(target_node);
8012 Buffer vmbuffer = InvalidBuffer;
8014 visibilitymap_pin(reln, blkno, &vmbuffer);
8015 visibilitymap_clear(reln, blkno, vmbuffer, VISIBILITYMAP_VALID_BITS);
8016 ReleaseBuffer(vmbuffer);
8017 FreeFakeRelcacheEntry(reln);
8020 if (XLogReadBufferForRedo(record, 0, &buffer) == BLK_NEEDS_REDO)
8022 page = BufferGetPage(buffer);
8024 if (PageGetMaxOffsetNumber(page) >= xlrec->offnum)
8025 lp = PageGetItemId(page, xlrec->offnum);
8027 if (PageGetMaxOffsetNumber(page) < xlrec->offnum || !ItemIdIsNormal(lp))
8028 elog(PANIC, "invalid lp");
8030 htup = (HeapTupleHeader) PageGetItem(page, lp);
8032 htup->t_infomask &= ~(HEAP_XMAX_BITS | HEAP_MOVED);
8033 htup->t_infomask2 &= ~HEAP_KEYS_UPDATED;
8034 HeapTupleHeaderClearHotUpdated(htup);
8035 fix_infomask_from_infobits(xlrec->infobits_set,
8036 &htup->t_infomask, &htup->t_infomask2);
8037 if (!(xlrec->flags & XLH_DELETE_IS_SUPER))
8038 HeapTupleHeaderSetXmax(htup, xlrec->xmax);
8040 HeapTupleHeaderSetXmin(htup, InvalidTransactionId);
8041 HeapTupleHeaderSetCmax(htup, FirstCommandId, false);
8043 /* Mark the page as a candidate for pruning */
8044 PageSetPrunable(page, XLogRecGetXid(record));
8046 if (xlrec->flags & XLH_DELETE_ALL_VISIBLE_CLEARED)
8047 PageClearAllVisible(page);
8049 /* Make sure t_ctid is set correctly */
8050 if (xlrec->flags & XLH_DELETE_IS_PARTITION_MOVE)
8051 HeapTupleHeaderSetMovedPartitions(htup);
8053 htup->t_ctid = target_tid;
8054 PageSetLSN(page, lsn);
8055 MarkBufferDirty(buffer);
8057 if (BufferIsValid(buffer))
8058 UnlockReleaseBuffer(buffer);
8062 heap_xlog_insert(XLogReaderState *record)
8064 XLogRecPtr lsn = record->EndRecPtr;
8065 xl_heap_insert *xlrec = (xl_heap_insert *) XLogRecGetData(record);
8070 HeapTupleHeaderData hdr;
8071 char data[MaxHeapTupleSize];
8073 HeapTupleHeader htup;
8074 xl_heap_header xlhdr;
8077 RelFileNode target_node;
8079 ItemPointerData target_tid;
8080 XLogRedoAction action;
8082 XLogRecGetBlockTag(record, 0, &target_node, NULL, &blkno);
8083 ItemPointerSetBlockNumber(&target_tid, blkno);
8084 ItemPointerSetOffsetNumber(&target_tid, xlrec->offnum);
8087 * The visibility map may need to be fixed even if the heap page is
8088 * already up-to-date.
8090 if (xlrec->flags & XLH_INSERT_ALL_VISIBLE_CLEARED)
8092 Relation reln = CreateFakeRelcacheEntry(target_node);
8093 Buffer vmbuffer = InvalidBuffer;
8095 visibilitymap_pin(reln, blkno, &vmbuffer);
8096 visibilitymap_clear(reln, blkno, vmbuffer, VISIBILITYMAP_VALID_BITS);
8097 ReleaseBuffer(vmbuffer);
8098 FreeFakeRelcacheEntry(reln);
8102 * If we inserted the first and only tuple on the page, re-initialize the
8103 * page from scratch.
8105 if (XLogRecGetInfo(record) & XLOG_HEAP_INIT_PAGE)
8107 buffer = XLogInitBufferForRedo(record, 0);
8108 page = BufferGetPage(buffer);
8109 PageInit(page, BufferGetPageSize(buffer), 0);
8110 action = BLK_NEEDS_REDO;
8113 action = XLogReadBufferForRedo(record, 0, &buffer);
8114 if (action == BLK_NEEDS_REDO)
8119 page = BufferGetPage(buffer);
8121 if (PageGetMaxOffsetNumber(page) + 1 < xlrec->offnum)
8122 elog(PANIC, "invalid max offset number");
8124 data = XLogRecGetBlockData(record, 0, &datalen);
8126 newlen = datalen - SizeOfHeapHeader;
8127 Assert(datalen > SizeOfHeapHeader && newlen <= MaxHeapTupleSize);
8128 memcpy((char *) &xlhdr, data, SizeOfHeapHeader);
8129 data += SizeOfHeapHeader;
8132 MemSet((char *) htup, 0, SizeofHeapTupleHeader);
8133 /* PG73FORMAT: get bitmap [+ padding] [+ oid] + data */
8134 memcpy((char *) htup + SizeofHeapTupleHeader,
8137 newlen += SizeofHeapTupleHeader;
8138 htup->t_infomask2 = xlhdr.t_infomask2;
8139 htup->t_infomask = xlhdr.t_infomask;
8140 htup->t_hoff = xlhdr.t_hoff;
8141 HeapTupleHeaderSetXmin(htup, XLogRecGetXid(record));
8142 HeapTupleHeaderSetCmin(htup, FirstCommandId);
8143 htup->t_ctid = target_tid;
8145 if (PageAddItem(page, (Item) htup, newlen, xlrec->offnum,
8146 true, true) == InvalidOffsetNumber)
8147 elog(PANIC, "failed to add tuple");
8149 freespace = PageGetHeapFreeSpace(page); /* needed to update FSM below */
8151 PageSetLSN(page, lsn);
8153 if (xlrec->flags & XLH_INSERT_ALL_VISIBLE_CLEARED)
8154 PageClearAllVisible(page);
8156 MarkBufferDirty(buffer);
8158 if (BufferIsValid(buffer))
8159 UnlockReleaseBuffer(buffer);
8162 * If the page is running low on free space, update the FSM as well.
8163 * Arbitrarily, our definition of "low" is less than 20%. We can't do much
8164 * better than that without knowing the fill-factor for the table.
8166 * XXX: Don't do this if the page was restored from full page image. We
8167 * don't bother to update the FSM in that case, it doesn't need to be
8168 * totally accurate anyway.
8170 if (action == BLK_NEEDS_REDO && freespace < BLCKSZ / 5)
8171 XLogRecordPageWithFreeSpace(target_node, blkno, freespace);
8175 * Handles MULTI_INSERT record type.
8178 heap_xlog_multi_insert(XLogReaderState *record)
8180 XLogRecPtr lsn = record->EndRecPtr;
8181 xl_heap_multi_insert *xlrec;
8188 HeapTupleHeaderData hdr;
8189 char data[MaxHeapTupleSize];
8191 HeapTupleHeader htup;
8195 bool isinit = (XLogRecGetInfo(record) & XLOG_HEAP_INIT_PAGE) != 0;
8196 XLogRedoAction action;
8199 * Insertion doesn't overwrite MVCC data, so no conflict processing is
8202 xlrec = (xl_heap_multi_insert *) XLogRecGetData(record);
8204 XLogRecGetBlockTag(record, 0, &rnode, NULL, &blkno);
8207 * The visibility map may need to be fixed even if the heap page is
8208 * already up-to-date.
8210 if (xlrec->flags & XLH_INSERT_ALL_VISIBLE_CLEARED)
8212 Relation reln = CreateFakeRelcacheEntry(rnode);
8213 Buffer vmbuffer = InvalidBuffer;
8215 visibilitymap_pin(reln, blkno, &vmbuffer);
8216 visibilitymap_clear(reln, blkno, vmbuffer, VISIBILITYMAP_VALID_BITS);
8217 ReleaseBuffer(vmbuffer);
8218 FreeFakeRelcacheEntry(reln);
8223 buffer = XLogInitBufferForRedo(record, 0);
8224 page = BufferGetPage(buffer);
8225 PageInit(page, BufferGetPageSize(buffer), 0);
8226 action = BLK_NEEDS_REDO;
8229 action = XLogReadBufferForRedo(record, 0, &buffer);
8230 if (action == BLK_NEEDS_REDO)
8236 /* Tuples are stored as block data */
8237 tupdata = XLogRecGetBlockData(record, 0, &len);
8238 endptr = tupdata + len;
8240 page = (Page) BufferGetPage(buffer);
8242 for (i = 0; i < xlrec->ntuples; i++)
8244 OffsetNumber offnum;
8245 xl_multi_insert_tuple *xlhdr;
8248 * If we're reinitializing the page, the tuples are stored in
8249 * order from FirstOffsetNumber. Otherwise there's an array of
8250 * offsets in the WAL record, and the tuples come after that.
8253 offnum = FirstOffsetNumber + i;
8255 offnum = xlrec->offsets[i];
8256 if (PageGetMaxOffsetNumber(page) + 1 < offnum)
8257 elog(PANIC, "invalid max offset number");
8259 xlhdr = (xl_multi_insert_tuple *) SHORTALIGN(tupdata);
8260 tupdata = ((char *) xlhdr) + SizeOfMultiInsertTuple;
8262 newlen = xlhdr->datalen;
8263 Assert(newlen <= MaxHeapTupleSize);
8265 MemSet((char *) htup, 0, SizeofHeapTupleHeader);
8266 /* PG73FORMAT: get bitmap [+ padding] [+ oid] + data */
8267 memcpy((char *) htup + SizeofHeapTupleHeader,
8272 newlen += SizeofHeapTupleHeader;
8273 htup->t_infomask2 = xlhdr->t_infomask2;
8274 htup->t_infomask = xlhdr->t_infomask;
8275 htup->t_hoff = xlhdr->t_hoff;
8276 HeapTupleHeaderSetXmin(htup, XLogRecGetXid(record));
8277 HeapTupleHeaderSetCmin(htup, FirstCommandId);
8278 ItemPointerSetBlockNumber(&htup->t_ctid, blkno);
8279 ItemPointerSetOffsetNumber(&htup->t_ctid, offnum);
8281 offnum = PageAddItem(page, (Item) htup, newlen, offnum, true, true);
8282 if (offnum == InvalidOffsetNumber)
8283 elog(PANIC, "failed to add tuple");
8285 if (tupdata != endptr)
8286 elog(PANIC, "total tuple length mismatch");
8288 freespace = PageGetHeapFreeSpace(page); /* needed to update FSM below */
8290 PageSetLSN(page, lsn);
8292 if (xlrec->flags & XLH_INSERT_ALL_VISIBLE_CLEARED)
8293 PageClearAllVisible(page);
8295 MarkBufferDirty(buffer);
8297 if (BufferIsValid(buffer))
8298 UnlockReleaseBuffer(buffer);
8301 * If the page is running low on free space, update the FSM as well.
8302 * Arbitrarily, our definition of "low" is less than 20%. We can't do much
8303 * better than that without knowing the fill-factor for the table.
8305 * XXX: Don't do this if the page was restored from full page image. We
8306 * don't bother to update the FSM in that case, it doesn't need to be
8307 * totally accurate anyway.
8309 if (action == BLK_NEEDS_REDO && freespace < BLCKSZ / 5)
8310 XLogRecordPageWithFreeSpace(rnode, blkno, freespace);
8314 * Handles UPDATE and HOT_UPDATE
8317 heap_xlog_update(XLogReaderState *record, bool hot_update)
8319 XLogRecPtr lsn = record->EndRecPtr;
8320 xl_heap_update *xlrec = (xl_heap_update *) XLogRecGetData(record);
8324 ItemPointerData newtid;
8328 OffsetNumber offnum;
8330 HeapTupleData oldtup;
8331 HeapTupleHeader htup;
8332 uint16 prefixlen = 0,
8337 HeapTupleHeaderData hdr;
8338 char data[MaxHeapTupleSize];
8340 xl_heap_header xlhdr;
8343 XLogRedoAction oldaction;
8344 XLogRedoAction newaction;
8346 /* initialize to keep the compiler quiet */
8347 oldtup.t_data = NULL;
8350 XLogRecGetBlockTag(record, 0, &rnode, NULL, &newblk);
8351 if (XLogRecGetBlockTag(record, 1, NULL, NULL, &oldblk))
8353 /* HOT updates are never done across pages */
8354 Assert(!hot_update);
8359 ItemPointerSet(&newtid, newblk, xlrec->new_offnum);
8362 * The visibility map may need to be fixed even if the heap page is
8363 * already up-to-date.
8365 if (xlrec->flags & XLH_UPDATE_OLD_ALL_VISIBLE_CLEARED)
8367 Relation reln = CreateFakeRelcacheEntry(rnode);
8368 Buffer vmbuffer = InvalidBuffer;
8370 visibilitymap_pin(reln, oldblk, &vmbuffer);
8371 visibilitymap_clear(reln, oldblk, vmbuffer, VISIBILITYMAP_VALID_BITS);
8372 ReleaseBuffer(vmbuffer);
8373 FreeFakeRelcacheEntry(reln);
8377 * In normal operation, it is important to lock the two pages in
8378 * page-number order, to avoid possible deadlocks against other update
8379 * operations going the other way. However, during WAL replay there can
8380 * be no other update happening, so we don't need to worry about that. But
8381 * we *do* need to worry that we don't expose an inconsistent state to Hot
8382 * Standby queries --- so the original page can't be unlocked before we've
8383 * added the new tuple to the new page.
8386 /* Deal with old tuple version */
8387 oldaction = XLogReadBufferForRedo(record, (oldblk == newblk) ? 0 : 1,
8389 if (oldaction == BLK_NEEDS_REDO)
8391 page = BufferGetPage(obuffer);
8392 offnum = xlrec->old_offnum;
8393 if (PageGetMaxOffsetNumber(page) >= offnum)
8394 lp = PageGetItemId(page, offnum);
8396 if (PageGetMaxOffsetNumber(page) < offnum || !ItemIdIsNormal(lp))
8397 elog(PANIC, "invalid lp");
8399 htup = (HeapTupleHeader) PageGetItem(page, lp);
8401 oldtup.t_data = htup;
8402 oldtup.t_len = ItemIdGetLength(lp);
8404 htup->t_infomask &= ~(HEAP_XMAX_BITS | HEAP_MOVED);
8405 htup->t_infomask2 &= ~HEAP_KEYS_UPDATED;
8407 HeapTupleHeaderSetHotUpdated(htup);
8409 HeapTupleHeaderClearHotUpdated(htup);
8410 fix_infomask_from_infobits(xlrec->old_infobits_set, &htup->t_infomask,
8411 &htup->t_infomask2);
8412 HeapTupleHeaderSetXmax(htup, xlrec->old_xmax);
8413 HeapTupleHeaderSetCmax(htup, FirstCommandId, false);
8414 /* Set forward chain link in t_ctid */
8415 htup->t_ctid = newtid;
8417 /* Mark the page as a candidate for pruning */
8418 PageSetPrunable(page, XLogRecGetXid(record));
8420 if (xlrec->flags & XLH_UPDATE_OLD_ALL_VISIBLE_CLEARED)
8421 PageClearAllVisible(page);
8423 PageSetLSN(page, lsn);
8424 MarkBufferDirty(obuffer);
8428 * Read the page the new tuple goes into, if different from old.
8430 if (oldblk == newblk)
8433 newaction = oldaction;
8435 else if (XLogRecGetInfo(record) & XLOG_HEAP_INIT_PAGE)
8437 nbuffer = XLogInitBufferForRedo(record, 0);
8438 page = (Page) BufferGetPage(nbuffer);
8439 PageInit(page, BufferGetPageSize(nbuffer), 0);
8440 newaction = BLK_NEEDS_REDO;
8443 newaction = XLogReadBufferForRedo(record, 0, &nbuffer);
8446 * The visibility map may need to be fixed even if the heap page is
8447 * already up-to-date.
8449 if (xlrec->flags & XLH_UPDATE_NEW_ALL_VISIBLE_CLEARED)
8451 Relation reln = CreateFakeRelcacheEntry(rnode);
8452 Buffer vmbuffer = InvalidBuffer;
8454 visibilitymap_pin(reln, newblk, &vmbuffer);
8455 visibilitymap_clear(reln, newblk, vmbuffer, VISIBILITYMAP_VALID_BITS);
8456 ReleaseBuffer(vmbuffer);
8457 FreeFakeRelcacheEntry(reln);
8460 /* Deal with new tuple */
8461 if (newaction == BLK_NEEDS_REDO)
8468 recdata = XLogRecGetBlockData(record, 0, &datalen);
8469 recdata_end = recdata + datalen;
8471 page = BufferGetPage(nbuffer);
8473 offnum = xlrec->new_offnum;
8474 if (PageGetMaxOffsetNumber(page) + 1 < offnum)
8475 elog(PANIC, "invalid max offset number");
8477 if (xlrec->flags & XLH_UPDATE_PREFIX_FROM_OLD)
8479 Assert(newblk == oldblk);
8480 memcpy(&prefixlen, recdata, sizeof(uint16));
8481 recdata += sizeof(uint16);
8483 if (xlrec->flags & XLH_UPDATE_SUFFIX_FROM_OLD)
8485 Assert(newblk == oldblk);
8486 memcpy(&suffixlen, recdata, sizeof(uint16));
8487 recdata += sizeof(uint16);
8490 memcpy((char *) &xlhdr, recdata, SizeOfHeapHeader);
8491 recdata += SizeOfHeapHeader;
8493 tuplen = recdata_end - recdata;
8494 Assert(tuplen <= MaxHeapTupleSize);
8497 MemSet((char *) htup, 0, SizeofHeapTupleHeader);
8500 * Reconstruct the new tuple using the prefix and/or suffix from the
8501 * old tuple, and the data stored in the WAL record.
8503 newp = (char *) htup + SizeofHeapTupleHeader;
8508 /* copy bitmap [+ padding] [+ oid] from WAL record */
8509 len = xlhdr.t_hoff - SizeofHeapTupleHeader;
8510 memcpy(newp, recdata, len);
8514 /* copy prefix from old tuple */
8515 memcpy(newp, (char *) oldtup.t_data + oldtup.t_data->t_hoff, prefixlen);
8518 /* copy new tuple data from WAL record */
8519 len = tuplen - (xlhdr.t_hoff - SizeofHeapTupleHeader);
8520 memcpy(newp, recdata, len);
8527 * copy bitmap [+ padding] [+ oid] + data from record, all in one
8530 memcpy(newp, recdata, tuplen);
8534 Assert(recdata == recdata_end);
8536 /* copy suffix from old tuple */
8538 memcpy(newp, (char *) oldtup.t_data + oldtup.t_len - suffixlen, suffixlen);
8540 newlen = SizeofHeapTupleHeader + tuplen + prefixlen + suffixlen;
8541 htup->t_infomask2 = xlhdr.t_infomask2;
8542 htup->t_infomask = xlhdr.t_infomask;
8543 htup->t_hoff = xlhdr.t_hoff;
8545 HeapTupleHeaderSetXmin(htup, XLogRecGetXid(record));
8546 HeapTupleHeaderSetCmin(htup, FirstCommandId);
8547 HeapTupleHeaderSetXmax(htup, xlrec->new_xmax);
8548 /* Make sure there is no forward chain link in t_ctid */
8549 htup->t_ctid = newtid;
8551 offnum = PageAddItem(page, (Item) htup, newlen, offnum, true, true);
8552 if (offnum == InvalidOffsetNumber)
8553 elog(PANIC, "failed to add tuple");
8555 if (xlrec->flags & XLH_UPDATE_NEW_ALL_VISIBLE_CLEARED)
8556 PageClearAllVisible(page);
8558 freespace = PageGetHeapFreeSpace(page); /* needed to update FSM below */
8560 PageSetLSN(page, lsn);
8561 MarkBufferDirty(nbuffer);
8564 if (BufferIsValid(nbuffer) && nbuffer != obuffer)
8565 UnlockReleaseBuffer(nbuffer);
8566 if (BufferIsValid(obuffer))
8567 UnlockReleaseBuffer(obuffer);
8570 * If the new page is running low on free space, update the FSM as well.
8571 * Arbitrarily, our definition of "low" is less than 20%. We can't do much
8572 * better than that without knowing the fill-factor for the table.
8574 * However, don't update the FSM on HOT updates, because after crash
8575 * recovery, either the old or the new tuple will certainly be dead and
8576 * prunable. After pruning, the page will have roughly as much free space
8577 * as it did before the update, assuming the new tuple is about the same
8578 * size as the old one.
8580 * XXX: Don't do this if the page was restored from full page image. We
8581 * don't bother to update the FSM in that case, it doesn't need to be
8582 * totally accurate anyway.
8584 if (newaction == BLK_NEEDS_REDO && !hot_update && freespace < BLCKSZ / 5)
8585 XLogRecordPageWithFreeSpace(rnode, newblk, freespace);
8589 heap_xlog_confirm(XLogReaderState *record)
8591 XLogRecPtr lsn = record->EndRecPtr;
8592 xl_heap_confirm *xlrec = (xl_heap_confirm *) XLogRecGetData(record);
8595 OffsetNumber offnum;
8597 HeapTupleHeader htup;
8599 if (XLogReadBufferForRedo(record, 0, &buffer) == BLK_NEEDS_REDO)
8601 page = BufferGetPage(buffer);
8603 offnum = xlrec->offnum;
8604 if (PageGetMaxOffsetNumber(page) >= offnum)
8605 lp = PageGetItemId(page, offnum);
8607 if (PageGetMaxOffsetNumber(page) < offnum || !ItemIdIsNormal(lp))
8608 elog(PANIC, "invalid lp");
8610 htup = (HeapTupleHeader) PageGetItem(page, lp);
8613 * Confirm tuple as actually inserted
8615 ItemPointerSet(&htup->t_ctid, BufferGetBlockNumber(buffer), offnum);
8617 PageSetLSN(page, lsn);
8618 MarkBufferDirty(buffer);
8620 if (BufferIsValid(buffer))
8621 UnlockReleaseBuffer(buffer);
8625 heap_xlog_lock(XLogReaderState *record)
8627 XLogRecPtr lsn = record->EndRecPtr;
8628 xl_heap_lock *xlrec = (xl_heap_lock *) XLogRecGetData(record);
8631 OffsetNumber offnum;
8633 HeapTupleHeader htup;
8636 * The visibility map may need to be fixed even if the heap page is
8637 * already up-to-date.
8639 if (xlrec->flags & XLH_LOCK_ALL_FROZEN_CLEARED)
8642 Buffer vmbuffer = InvalidBuffer;
8646 XLogRecGetBlockTag(record, 0, &rnode, NULL, &block);
8647 reln = CreateFakeRelcacheEntry(rnode);
8649 visibilitymap_pin(reln, block, &vmbuffer);
8650 visibilitymap_clear(reln, block, vmbuffer, VISIBILITYMAP_ALL_FROZEN);
8652 ReleaseBuffer(vmbuffer);
8653 FreeFakeRelcacheEntry(reln);
8656 if (XLogReadBufferForRedo(record, 0, &buffer) == BLK_NEEDS_REDO)
8658 page = (Page) BufferGetPage(buffer);
8660 offnum = xlrec->offnum;
8661 if (PageGetMaxOffsetNumber(page) >= offnum)
8662 lp = PageGetItemId(page, offnum);
8664 if (PageGetMaxOffsetNumber(page) < offnum || !ItemIdIsNormal(lp))
8665 elog(PANIC, "invalid lp");
8667 htup = (HeapTupleHeader) PageGetItem(page, lp);
8669 htup->t_infomask &= ~(HEAP_XMAX_BITS | HEAP_MOVED);
8670 htup->t_infomask2 &= ~HEAP_KEYS_UPDATED;
8671 fix_infomask_from_infobits(xlrec->infobits_set, &htup->t_infomask,
8672 &htup->t_infomask2);
8675 * Clear relevant update flags, but only if the modified infomask says
8676 * there's no update.
8678 if (HEAP_XMAX_IS_LOCKED_ONLY(htup->t_infomask))
8680 HeapTupleHeaderClearHotUpdated(htup);
8681 /* Make sure there is no forward chain link in t_ctid */
8682 ItemPointerSet(&htup->t_ctid,
8683 BufferGetBlockNumber(buffer),
8686 HeapTupleHeaderSetXmax(htup, xlrec->locking_xid);
8687 HeapTupleHeaderSetCmax(htup, FirstCommandId, false);
8688 PageSetLSN(page, lsn);
8689 MarkBufferDirty(buffer);
8691 if (BufferIsValid(buffer))
8692 UnlockReleaseBuffer(buffer);
8696 heap_xlog_lock_updated(XLogReaderState *record)
8698 XLogRecPtr lsn = record->EndRecPtr;
8699 xl_heap_lock_updated *xlrec;
8702 OffsetNumber offnum;
8704 HeapTupleHeader htup;
8706 xlrec = (xl_heap_lock_updated *) XLogRecGetData(record);
8709 * The visibility map may need to be fixed even if the heap page is
8710 * already up-to-date.
8712 if (xlrec->flags & XLH_LOCK_ALL_FROZEN_CLEARED)
8715 Buffer vmbuffer = InvalidBuffer;
8719 XLogRecGetBlockTag(record, 0, &rnode, NULL, &block);
8720 reln = CreateFakeRelcacheEntry(rnode);
8722 visibilitymap_pin(reln, block, &vmbuffer);
8723 visibilitymap_clear(reln, block, vmbuffer, VISIBILITYMAP_ALL_FROZEN);
8725 ReleaseBuffer(vmbuffer);
8726 FreeFakeRelcacheEntry(reln);
8729 if (XLogReadBufferForRedo(record, 0, &buffer) == BLK_NEEDS_REDO)
8731 page = BufferGetPage(buffer);
8733 offnum = xlrec->offnum;
8734 if (PageGetMaxOffsetNumber(page) >= offnum)
8735 lp = PageGetItemId(page, offnum);
8737 if (PageGetMaxOffsetNumber(page) < offnum || !ItemIdIsNormal(lp))
8738 elog(PANIC, "invalid lp");
8740 htup = (HeapTupleHeader) PageGetItem(page, lp);
8742 htup->t_infomask &= ~(HEAP_XMAX_BITS | HEAP_MOVED);
8743 htup->t_infomask2 &= ~HEAP_KEYS_UPDATED;
8744 fix_infomask_from_infobits(xlrec->infobits_set, &htup->t_infomask,
8745 &htup->t_infomask2);
8746 HeapTupleHeaderSetXmax(htup, xlrec->xmax);
8748 PageSetLSN(page, lsn);
8749 MarkBufferDirty(buffer);
8751 if (BufferIsValid(buffer))
8752 UnlockReleaseBuffer(buffer);
8756 heap_xlog_inplace(XLogReaderState *record)
8758 XLogRecPtr lsn = record->EndRecPtr;
8759 xl_heap_inplace *xlrec = (xl_heap_inplace *) XLogRecGetData(record);
8762 OffsetNumber offnum;
8764 HeapTupleHeader htup;
8768 if (XLogReadBufferForRedo(record, 0, &buffer) == BLK_NEEDS_REDO)
8770 char *newtup = XLogRecGetBlockData(record, 0, &newlen);
8772 page = BufferGetPage(buffer);
8774 offnum = xlrec->offnum;
8775 if (PageGetMaxOffsetNumber(page) >= offnum)
8776 lp = PageGetItemId(page, offnum);
8778 if (PageGetMaxOffsetNumber(page) < offnum || !ItemIdIsNormal(lp))
8779 elog(PANIC, "invalid lp");
8781 htup = (HeapTupleHeader) PageGetItem(page, lp);
8783 oldlen = ItemIdGetLength(lp) - htup->t_hoff;
8784 if (oldlen != newlen)
8785 elog(PANIC, "wrong tuple length");
8787 memcpy((char *) htup + htup->t_hoff, newtup, newlen);
8789 PageSetLSN(page, lsn);
8790 MarkBufferDirty(buffer);
8792 if (BufferIsValid(buffer))
8793 UnlockReleaseBuffer(buffer);
8797 heap_redo(XLogReaderState *record)
8799 uint8 info = XLogRecGetInfo(record) & ~XLR_INFO_MASK;
8802 * These operations don't overwrite MVCC data so no conflict processing is
8803 * required. The ones in heap2 rmgr do.
8806 switch (info & XLOG_HEAP_OPMASK)
8808 case XLOG_HEAP_INSERT:
8809 heap_xlog_insert(record);
8811 case XLOG_HEAP_DELETE:
8812 heap_xlog_delete(record);
8814 case XLOG_HEAP_UPDATE:
8815 heap_xlog_update(record, false);
8817 case XLOG_HEAP_TRUNCATE:
8820 * TRUNCATE is a no-op because the actions are already logged as
8821 * SMGR WAL records. TRUNCATE WAL record only exists for logical
8825 case XLOG_HEAP_HOT_UPDATE:
8826 heap_xlog_update(record, true);
8828 case XLOG_HEAP_CONFIRM:
8829 heap_xlog_confirm(record);
8831 case XLOG_HEAP_LOCK:
8832 heap_xlog_lock(record);
8834 case XLOG_HEAP_INPLACE:
8835 heap_xlog_inplace(record);
8838 elog(PANIC, "heap_redo: unknown op code %u", info);
8843 heap2_redo(XLogReaderState *record)
8845 uint8 info = XLogRecGetInfo(record) & ~XLR_INFO_MASK;
8847 switch (info & XLOG_HEAP_OPMASK)
8849 case XLOG_HEAP2_CLEAN:
8850 heap_xlog_clean(record);
8852 case XLOG_HEAP2_FREEZE_PAGE:
8853 heap_xlog_freeze_page(record);
8855 case XLOG_HEAP2_CLEANUP_INFO:
8856 heap_xlog_cleanup_info(record);
8858 case XLOG_HEAP2_VISIBLE:
8859 heap_xlog_visible(record);
8861 case XLOG_HEAP2_MULTI_INSERT:
8862 heap_xlog_multi_insert(record);
8864 case XLOG_HEAP2_LOCK_UPDATED:
8865 heap_xlog_lock_updated(record);
8867 case XLOG_HEAP2_NEW_CID:
8870 * Nothing to do on a real replay, only used during logical
8874 case XLOG_HEAP2_REWRITE:
8875 heap_xlog_logical_rewrite(record);
8878 elog(PANIC, "heap2_redo: unknown op code %u", info);
8883 * heap_sync - sync a heap, for use when no WAL has been written
8885 * This forces the heap contents (including TOAST heap if any) down to disk.
8886 * If we skipped using WAL, and WAL is otherwise needed, we must force the
8887 * relation down to disk before it's safe to commit the transaction. This
8888 * requires writing out any dirty buffers and then doing a forced fsync.
8890 * Indexes are not touched. (Currently, index operations associated with
8891 * the commands that use this are WAL-logged and so do not need fsync.
8892 * That behavior might change someday, but in any case it's likely that
8893 * any fsync decisions required would be per-index and hence not appropriate
8897 heap_sync(Relation rel)
8899 /* non-WAL-logged tables never need fsync */
8900 if (!RelationNeedsWAL(rel))
8904 FlushRelationBuffers(rel);
8905 /* FlushRelationBuffers will have opened rd_smgr */
8906 smgrimmedsync(rel->rd_smgr, MAIN_FORKNUM);
8908 /* FSM is not critical, don't bother syncing it */
8910 /* toast heap, if any */
8911 if (OidIsValid(rel->rd_rel->reltoastrelid))
8915 toastrel = table_open(rel->rd_rel->reltoastrelid, AccessShareLock);
8916 FlushRelationBuffers(toastrel);
8917 smgrimmedsync(toastrel->rd_smgr, MAIN_FORKNUM);
8918 table_close(toastrel, AccessShareLock);
8923 * Mask a heap page before performing consistency checks on it.
8926 heap_mask(char *pagedata, BlockNumber blkno)
8928 Page page = (Page) pagedata;
8931 mask_page_lsn_and_checksum(page);
8933 mask_page_hint_bits(page);
8934 mask_unused_space(page);
8936 for (off = 1; off <= PageGetMaxOffsetNumber(page); off++)
8938 ItemId iid = PageGetItemId(page, off);
8941 page_item = (char *) (page + ItemIdGetOffset(iid));
8943 if (ItemIdIsNormal(iid))
8945 HeapTupleHeader page_htup = (HeapTupleHeader) page_item;
8948 * If xmin of a tuple is not yet frozen, we should ignore
8949 * differences in hint bits, since they can be set without
8952 if (!HeapTupleHeaderXminFrozen(page_htup))
8953 page_htup->t_infomask &= ~HEAP_XACT_MASK;
8956 /* Still we need to mask xmax hint bits. */
8957 page_htup->t_infomask &= ~HEAP_XMAX_INVALID;
8958 page_htup->t_infomask &= ~HEAP_XMAX_COMMITTED;
8962 * During replay, we set Command Id to FirstCommandId. Hence, mask
8963 * it. See heap_xlog_insert() for details.
8965 page_htup->t_choice.t_heap.t_field3.t_cid = MASK_MARKER;
8968 * For a speculative tuple, heap_insert() does not set ctid in the
8969 * caller-passed heap tuple itself, leaving the ctid field to
8970 * contain a speculative token value - a per-backend monotonically
8971 * increasing identifier. Besides, it does not WAL-log ctid under
8972 * any circumstances.
8974 * During redo, heap_xlog_insert() sets t_ctid to current block
8975 * number and self offset number. It doesn't care about any
8976 * speculative insertions in master. Hence, we set t_ctid to
8977 * current block number and self offset number to ignore any
8980 if (HeapTupleHeaderIsSpeculative(page_htup))
8981 ItemPointerSet(&page_htup->t_ctid, blkno, off);
8984 * NB: Not ignoring ctid changes due to the tuple having moved
8985 * (i.e. HeapTupleHeaderIndicatesMovedPartitions), because that's
8986 * important information that needs to be in-sync between primary
8987 * and standby, and thus is WAL logged.
8992 * Ignore any padding bytes after the tuple, when the length of the
8993 * item is not MAXALIGNed.
8995 if (ItemIdHasStorage(iid))
8997 int len = ItemIdGetLength(iid);
8998 int padlen = MAXALIGN(len) - len;
9001 memset(page_item + len, MASK_MARKER, padlen);