1 /*-------------------------------------------------------------------------
4 * heap access method code
6 * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
7 * Portions Copyright (c) 1994, Regents of the University of California
11 * src/backend/access/heap/heapam.c
15 * relation_open - open any relation by relation OID
16 * relation_openrv - open any relation specified by a RangeVar
17 * relation_close - close any relation
18 * heap_open - open a heap relation by relation OID
19 * heap_openrv - open a heap relation specified by a RangeVar
20 * heap_close - (now just a macro for relation_close)
21 * heap_beginscan - begin relation scan
22 * heap_rescan - restart a relation scan
23 * heap_endscan - end relation scan
24 * heap_getnext - retrieve next tuple in scan
25 * heap_fetch - retrieve tuple with given tid
26 * heap_insert - insert tuple into a relation
27 * heap_multi_insert - insert multiple tuples into a relation
28 * heap_delete - delete a tuple from a relation
29 * heap_update - replace a tuple in a relation with another tuple
30 * heap_sync - sync heap, for when no WAL has been written
33 * This file contains the heap_ routines which implement
34 * the POSTGRES heap access method used for all POSTGRES
37 *-------------------------------------------------------------------------
41 #include "access/heapam.h"
42 #include "access/heapam_xlog.h"
43 #include "access/hio.h"
44 #include "access/multixact.h"
45 #include "access/parallel.h"
46 #include "access/relscan.h"
47 #include "access/sysattr.h"
48 #include "access/transam.h"
49 #include "access/tuptoaster.h"
50 #include "access/valid.h"
51 #include "access/visibilitymap.h"
52 #include "access/xact.h"
53 #include "access/xlog.h"
54 #include "access/xloginsert.h"
55 #include "access/xlogutils.h"
56 #include "catalog/catalog.h"
57 #include "catalog/namespace.h"
58 #include "miscadmin.h"
60 #include "storage/bufmgr.h"
61 #include "storage/freespace.h"
62 #include "storage/lmgr.h"
63 #include "storage/predicate.h"
64 #include "storage/procarray.h"
65 #include "storage/smgr.h"
66 #include "storage/spin.h"
67 #include "storage/standby.h"
68 #include "utils/datum.h"
69 #include "utils/inval.h"
70 #include "utils/lsyscache.h"
71 #include "utils/relcache.h"
72 #include "utils/snapmgr.h"
73 #include "utils/syscache.h"
74 #include "utils/tqual.h"
78 bool synchronize_seqscans = true;
81 static HeapScanDesc heap_beginscan_internal(Relation relation,
83 int nkeys, ScanKey key,
84 ParallelHeapScanDesc parallel_scan,
91 static BlockNumber heap_parallelscan_nextpage(HeapScanDesc scan);
92 static HeapTuple heap_prepare_insert(Relation relation, HeapTuple tup,
93 TransactionId xid, CommandId cid, int options);
94 static XLogRecPtr log_heap_update(Relation reln, Buffer oldbuf,
95 Buffer newbuf, HeapTuple oldtup,
96 HeapTuple newtup, HeapTuple old_key_tup,
97 bool all_visible_cleared, bool new_all_visible_cleared);
98 static void HeapSatisfiesHOTandKeyUpdate(Relation relation,
100 Bitmapset *key_attrs, Bitmapset *id_attrs,
101 bool *satisfies_hot, bool *satisfies_key,
103 HeapTuple oldtup, HeapTuple newtup);
104 static bool heap_acquire_tuplock(Relation relation, ItemPointer tid,
105 LockTupleMode mode, LockWaitPolicy wait_policy,
106 bool *have_tuple_lock);
107 static void compute_new_xmax_infomask(TransactionId xmax, uint16 old_infomask,
108 uint16 old_infomask2, TransactionId add_to_xmax,
109 LockTupleMode mode, bool is_update,
110 TransactionId *result_xmax, uint16 *result_infomask,
111 uint16 *result_infomask2);
112 static HTSU_Result heap_lock_updated_tuple(Relation rel, HeapTuple tuple,
113 ItemPointer ctid, TransactionId xid,
115 static void GetMultiXactIdHintBits(MultiXactId multi, uint16 *new_infomask,
116 uint16 *new_infomask2);
117 static TransactionId MultiXactIdGetUpdateXid(TransactionId xmax,
119 static bool DoesMultiXactIdConflict(MultiXactId multi, uint16 infomask,
120 LockTupleMode lockmode);
121 static void MultiXactIdWait(MultiXactId multi, MultiXactStatus status, uint16 infomask,
122 Relation rel, ItemPointer ctid, XLTW_Oper oper,
124 static bool ConditionalMultiXactIdWait(MultiXactId multi, MultiXactStatus status,
125 uint16 infomask, Relation rel, int *remaining);
126 static XLogRecPtr log_heap_new_cid(Relation relation, HeapTuple tup);
127 static HeapTuple ExtractReplicaIdentity(Relation rel, HeapTuple tup, bool key_modified,
132 * Each tuple lock mode has a corresponding heavyweight lock, and one or two
133 * corresponding MultiXactStatuses (one to merely lock tuples, another one to
134 * update them). This table (and the macros below) helps us determine the
135 * heavyweight lock mode and MultiXactStatus values to use for any particular
136 * tuple lock strength.
138 * Don't look at lockstatus/updstatus directly! Use get_mxact_status_for_lock
148 tupleLockExtraInfo[MaxLockTupleMode + 1] =
150 { /* LockTupleKeyShare */
152 MultiXactStatusForKeyShare,
153 -1 /* KeyShare does not allow updating tuples */
155 { /* LockTupleShare */
157 MultiXactStatusForShare,
158 -1 /* Share does not allow updating tuples */
160 { /* LockTupleNoKeyExclusive */
162 MultiXactStatusForNoKeyUpdate,
163 MultiXactStatusNoKeyUpdate
165 { /* LockTupleExclusive */
167 MultiXactStatusForUpdate,
168 MultiXactStatusUpdate
172 /* Get the LOCKMODE for a given MultiXactStatus */
173 #define LOCKMODE_from_mxstatus(status) \
174 (tupleLockExtraInfo[TUPLOCK_from_mxstatus((status))].hwlock)
177 * Acquire heavyweight locks on tuples, using a LockTupleMode strength value.
178 * This is more readable than having every caller translate it to lock.h's
181 #define LockTupleTuplock(rel, tup, mode) \
182 LockTuple((rel), (tup), tupleLockExtraInfo[mode].hwlock)
183 #define UnlockTupleTuplock(rel, tup, mode) \
184 UnlockTuple((rel), (tup), tupleLockExtraInfo[mode].hwlock)
185 #define ConditionalLockTupleTuplock(rel, tup, mode) \
186 ConditionalLockTuple((rel), (tup), tupleLockExtraInfo[mode].hwlock)
189 * This table maps tuple lock strength values for each particular
190 * MultiXactStatus value.
192 static const int MultiXactStatusLock[MaxMultiXactStatus + 1] =
194 LockTupleKeyShare, /* ForKeyShare */
195 LockTupleShare, /* ForShare */
196 LockTupleNoKeyExclusive, /* ForNoKeyUpdate */
197 LockTupleExclusive, /* ForUpdate */
198 LockTupleNoKeyExclusive, /* NoKeyUpdate */
199 LockTupleExclusive /* Update */
202 /* Get the LockTupleMode for a given MultiXactStatus */
203 #define TUPLOCK_from_mxstatus(status) \
204 (MultiXactStatusLock[(status)])
206 /* ----------------------------------------------------------------
207 * heap support routines
208 * ----------------------------------------------------------------
212 * initscan - scan code common to heap_beginscan and heap_rescan
216 initscan(HeapScanDesc scan, ScanKey key, bool keep_startblock)
222 * Determine the number of blocks we have to scan.
224 * It is sufficient to do this once at scan start, since any tuples added
225 * while the scan is in progress will be invisible to my snapshot anyway.
226 * (That is not true when using a non-MVCC snapshot. However, we couldn't
227 * guarantee to return tuples added after scan start anyway, since they
228 * might go into pages we already scanned. To guarantee consistent
229 * results for a non-MVCC snapshot, the caller must hold some higher-level
230 * lock that ensures the interesting tuple(s) won't change.)
232 if (scan->rs_parallel != NULL)
233 scan->rs_nblocks = scan->rs_parallel->phs_nblocks;
235 scan->rs_nblocks = RelationGetNumberOfBlocks(scan->rs_rd);
238 * If the table is large relative to NBuffers, use a bulk-read access
239 * strategy and enable synchronized scanning (see syncscan.c). Although
240 * the thresholds for these features could be different, we make them the
241 * same so that there are only two behaviors to tune rather than four.
242 * (However, some callers need to be able to disable one or both of these
243 * behaviors, independently of the size of the table; also there is a GUC
244 * variable that can disable synchronized scanning.)
246 * Note that heap_parallelscan_initialize has a very similar test; if you
247 * change this, consider changing that one, too.
249 if (!RelationUsesLocalBuffers(scan->rs_rd) &&
250 scan->rs_nblocks > NBuffers / 4)
252 allow_strat = scan->rs_allow_strat;
253 allow_sync = scan->rs_allow_sync;
256 allow_strat = allow_sync = false;
260 /* During a rescan, keep the previous strategy object. */
261 if (scan->rs_strategy == NULL)
262 scan->rs_strategy = GetAccessStrategy(BAS_BULKREAD);
266 if (scan->rs_strategy != NULL)
267 FreeAccessStrategy(scan->rs_strategy);
268 scan->rs_strategy = NULL;
271 if (scan->rs_parallel != NULL)
273 /* For parallel scan, believe whatever ParallelHeapScanDesc says. */
274 scan->rs_syncscan = scan->rs_parallel->phs_syncscan;
276 else if (keep_startblock)
279 * When rescanning, we want to keep the previous startblock setting,
280 * so that rewinding a cursor doesn't generate surprising results.
281 * Reset the active syncscan setting, though.
283 scan->rs_syncscan = (allow_sync && synchronize_seqscans);
285 else if (allow_sync && synchronize_seqscans)
287 scan->rs_syncscan = true;
288 scan->rs_startblock = ss_get_location(scan->rs_rd, scan->rs_nblocks);
292 scan->rs_syncscan = false;
293 scan->rs_startblock = 0;
296 scan->rs_numblocks = InvalidBlockNumber;
297 scan->rs_inited = false;
298 scan->rs_ctup.t_data = NULL;
299 ItemPointerSetInvalid(&scan->rs_ctup.t_self);
300 scan->rs_cbuf = InvalidBuffer;
301 scan->rs_cblock = InvalidBlockNumber;
303 /* page-at-a-time fields are always invalid when not rs_inited */
306 * copy the scan key, if appropriate
309 memcpy(scan->rs_key, key, scan->rs_nkeys * sizeof(ScanKeyData));
312 * Currently, we don't have a stats counter for bitmap heap scans (but the
313 * underlying bitmap index scans will be counted) or sample scans (we only
314 * update stats for tuple fetches there)
316 if (!scan->rs_bitmapscan && !scan->rs_samplescan)
317 pgstat_count_heap_scan(scan->rs_rd);
321 * heap_setscanlimits - restrict range of a heapscan
323 * startBlk is the page to start at
324 * numBlks is number of pages to scan (InvalidBlockNumber means "all")
327 heap_setscanlimits(HeapScanDesc scan, BlockNumber startBlk, BlockNumber numBlks)
329 Assert(!scan->rs_inited); /* else too late to change */
330 Assert(!scan->rs_syncscan); /* else rs_startblock is significant */
332 /* Check startBlk is valid (but allow case of zero blocks...) */
333 Assert(startBlk == 0 || startBlk < scan->rs_nblocks);
335 scan->rs_startblock = startBlk;
336 scan->rs_numblocks = numBlks;
340 * heapgetpage - subroutine for heapgettup()
342 * This routine reads and pins the specified page of the relation.
343 * In page-at-a-time mode it performs additional work, namely determining
344 * which tuples on the page are visible.
347 heapgetpage(HeapScanDesc scan, BlockNumber page)
354 OffsetNumber lineoff;
358 Assert(page < scan->rs_nblocks);
360 /* release previous scan buffer, if any */
361 if (BufferIsValid(scan->rs_cbuf))
363 ReleaseBuffer(scan->rs_cbuf);
364 scan->rs_cbuf = InvalidBuffer;
368 * Be sure to check for interrupts at least once per page. Checks at
369 * higher code levels won't be able to stop a seqscan that encounters many
370 * pages' worth of consecutive dead tuples.
372 CHECK_FOR_INTERRUPTS();
374 /* read page using selected strategy */
375 scan->rs_cbuf = ReadBufferExtended(scan->rs_rd, MAIN_FORKNUM, page,
376 RBM_NORMAL, scan->rs_strategy);
377 scan->rs_cblock = page;
379 if (!scan->rs_pageatatime)
382 buffer = scan->rs_cbuf;
383 snapshot = scan->rs_snapshot;
386 * Prune and repair fragmentation for the whole page, if possible.
388 heap_page_prune_opt(scan->rs_rd, buffer);
391 * We must hold share lock on the buffer content while examining tuple
392 * visibility. Afterwards, however, the tuples we have found to be
393 * visible are guaranteed good as long as we hold the buffer pin.
395 LockBuffer(buffer, BUFFER_LOCK_SHARE);
397 dp = (Page) BufferGetPage(buffer);
398 lines = PageGetMaxOffsetNumber(dp);
402 * If the all-visible flag indicates that all tuples on the page are
403 * visible to everyone, we can skip the per-tuple visibility tests.
405 * Note: In hot standby, a tuple that's already visible to all
406 * transactions in the master might still be invisible to a read-only
407 * transaction in the standby. We partly handle this problem by tracking
408 * the minimum xmin of visible tuples as the cut-off XID while marking a
409 * page all-visible on master and WAL log that along with the visibility
410 * map SET operation. In hot standby, we wait for (or abort) all
411 * transactions that can potentially may not see one or more tuples on the
412 * page. That's how index-only scans work fine in hot standby. A crucial
413 * difference between index-only scans and heap scans is that the
414 * index-only scan completely relies on the visibility map where as heap
415 * scan looks at the page-level PD_ALL_VISIBLE flag. We are not sure if
416 * the page-level flag can be trusted in the same way, because it might
417 * get propagated somehow without being explicitly WAL-logged, e.g. via a
418 * full page write. Until we can prove that beyond doubt, let's check each
419 * tuple for visibility the hard way.
421 all_visible = PageIsAllVisible(dp) && !snapshot->takenDuringRecovery;
423 for (lineoff = FirstOffsetNumber, lpp = PageGetItemId(dp, lineoff);
427 if (ItemIdIsNormal(lpp))
429 HeapTupleData loctup;
432 loctup.t_tableOid = RelationGetRelid(scan->rs_rd);
433 loctup.t_data = (HeapTupleHeader) PageGetItem((Page) dp, lpp);
434 loctup.t_len = ItemIdGetLength(lpp);
435 ItemPointerSet(&(loctup.t_self), page, lineoff);
440 valid = HeapTupleSatisfiesVisibility(&loctup, snapshot, buffer);
442 CheckForSerializableConflictOut(valid, scan->rs_rd, &loctup,
446 scan->rs_vistuples[ntup++] = lineoff;
450 LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
452 Assert(ntup <= MaxHeapTuplesPerPage);
453 scan->rs_ntuples = ntup;
457 * heapgettup - fetch next heap tuple
459 * Initialize the scan if not already done; then advance to the next
460 * tuple as indicated by "dir"; return the next tuple in scan->rs_ctup,
461 * or set scan->rs_ctup.t_data = NULL if no more tuples.
463 * dir == NoMovementScanDirection means "re-fetch the tuple indicated
466 * Note: the reason nkeys/key are passed separately, even though they are
467 * kept in the scan descriptor, is that the caller may not want us to check
470 * Note: when we fall off the end of the scan in either direction, we
471 * reset rs_inited. This means that a further request with the same
472 * scan direction will restart the scan, which is a bit odd, but a
473 * request with the opposite scan direction will start a fresh scan
474 * in the proper direction. The latter is required behavior for cursors,
475 * while the former case is generally undefined behavior in Postgres
476 * so we don't care too much.
480 heapgettup(HeapScanDesc scan,
485 HeapTuple tuple = &(scan->rs_ctup);
486 Snapshot snapshot = scan->rs_snapshot;
487 bool backward = ScanDirectionIsBackward(dir);
492 OffsetNumber lineoff;
497 * calculate next starting lineoff, given scan direction
499 if (ScanDirectionIsForward(dir))
501 if (!scan->rs_inited)
504 * return null immediately if relation is empty
506 if (scan->rs_nblocks == 0 || scan->rs_numblocks == 0)
508 Assert(!BufferIsValid(scan->rs_cbuf));
509 tuple->t_data = NULL;
512 if (scan->rs_parallel != NULL)
514 page = heap_parallelscan_nextpage(scan);
516 /* Other processes might have already finished the scan. */
517 if (page == InvalidBlockNumber)
519 Assert(!BufferIsValid(scan->rs_cbuf));
520 tuple->t_data = NULL;
525 page = scan->rs_startblock; /* first page */
526 heapgetpage(scan, page);
527 lineoff = FirstOffsetNumber; /* first offnum */
528 scan->rs_inited = true;
532 /* continue from previously returned page/tuple */
533 page = scan->rs_cblock; /* current page */
534 lineoff = /* next offnum */
535 OffsetNumberNext(ItemPointerGetOffsetNumber(&(tuple->t_self)));
538 LockBuffer(scan->rs_cbuf, BUFFER_LOCK_SHARE);
540 dp = (Page) BufferGetPage(scan->rs_cbuf);
541 lines = PageGetMaxOffsetNumber(dp);
542 /* page and lineoff now reference the physically next tid */
544 linesleft = lines - lineoff + 1;
548 /* backward parallel scan not supported */
549 Assert(scan->rs_parallel == NULL);
551 if (!scan->rs_inited)
554 * return null immediately if relation is empty
556 if (scan->rs_nblocks == 0 || scan->rs_numblocks == 0)
558 Assert(!BufferIsValid(scan->rs_cbuf));
559 tuple->t_data = NULL;
564 * Disable reporting to syncscan logic in a backwards scan; it's
565 * not very likely anyone else is doing the same thing at the same
566 * time, and much more likely that we'll just bollix things for
569 scan->rs_syncscan = false;
570 /* start from last page of the scan */
571 if (scan->rs_startblock > 0)
572 page = scan->rs_startblock - 1;
574 page = scan->rs_nblocks - 1;
575 heapgetpage(scan, page);
579 /* continue from previously returned page/tuple */
580 page = scan->rs_cblock; /* current page */
583 LockBuffer(scan->rs_cbuf, BUFFER_LOCK_SHARE);
585 dp = (Page) BufferGetPage(scan->rs_cbuf);
586 lines = PageGetMaxOffsetNumber(dp);
588 if (!scan->rs_inited)
590 lineoff = lines; /* final offnum */
591 scan->rs_inited = true;
595 lineoff = /* previous offnum */
596 OffsetNumberPrev(ItemPointerGetOffsetNumber(&(tuple->t_self)));
598 /* page and lineoff now reference the physically previous tid */
605 * ``no movement'' scan direction: refetch prior tuple
607 if (!scan->rs_inited)
609 Assert(!BufferIsValid(scan->rs_cbuf));
610 tuple->t_data = NULL;
614 page = ItemPointerGetBlockNumber(&(tuple->t_self));
615 if (page != scan->rs_cblock)
616 heapgetpage(scan, page);
618 /* Since the tuple was previously fetched, needn't lock page here */
619 dp = (Page) BufferGetPage(scan->rs_cbuf);
620 lineoff = ItemPointerGetOffsetNumber(&(tuple->t_self));
621 lpp = PageGetItemId(dp, lineoff);
622 Assert(ItemIdIsNormal(lpp));
624 tuple->t_data = (HeapTupleHeader) PageGetItem((Page) dp, lpp);
625 tuple->t_len = ItemIdGetLength(lpp);
631 * advance the scan until we find a qualifying tuple or run out of stuff
634 lpp = PageGetItemId(dp, lineoff);
637 while (linesleft > 0)
639 if (ItemIdIsNormal(lpp))
643 tuple->t_data = (HeapTupleHeader) PageGetItem((Page) dp, lpp);
644 tuple->t_len = ItemIdGetLength(lpp);
645 ItemPointerSet(&(tuple->t_self), page, lineoff);
648 * if current tuple qualifies, return it.
650 valid = HeapTupleSatisfiesVisibility(tuple,
654 CheckForSerializableConflictOut(valid, scan->rs_rd, tuple,
655 scan->rs_cbuf, snapshot);
657 if (valid && key != NULL)
658 HeapKeyTest(tuple, RelationGetDescr(scan->rs_rd),
663 LockBuffer(scan->rs_cbuf, BUFFER_LOCK_UNLOCK);
669 * otherwise move to the next item on the page
674 --lpp; /* move back in this page's ItemId array */
679 ++lpp; /* move forward in this page's ItemId array */
685 * if we get here, it means we've exhausted the items on this page and
686 * it's time to move to the next.
688 LockBuffer(scan->rs_cbuf, BUFFER_LOCK_UNLOCK);
691 * advance to next/prior page and detect end of scan
695 finished = (page == scan->rs_startblock) ||
696 (scan->rs_numblocks != InvalidBlockNumber ? --scan->rs_numblocks == 0 : false);
698 page = scan->rs_nblocks;
701 else if (scan->rs_parallel != NULL)
703 page = heap_parallelscan_nextpage(scan);
704 finished = (page == InvalidBlockNumber);
709 if (page >= scan->rs_nblocks)
711 finished = (page == scan->rs_startblock) ||
712 (scan->rs_numblocks != InvalidBlockNumber ? --scan->rs_numblocks == 0 : false);
715 * Report our new scan position for synchronization purposes. We
716 * don't do that when moving backwards, however. That would just
717 * mess up any other forward-moving scanners.
719 * Note: we do this before checking for end of scan so that the
720 * final state of the position hint is back at the start of the
721 * rel. That's not strictly necessary, but otherwise when you run
722 * the same query multiple times the starting position would shift
723 * a little bit backwards on every invocation, which is confusing.
724 * We don't guarantee any specific ordering in general, though.
726 if (scan->rs_syncscan)
727 ss_report_location(scan->rs_rd, page);
731 * return NULL if we've exhausted all the pages
735 if (BufferIsValid(scan->rs_cbuf))
736 ReleaseBuffer(scan->rs_cbuf);
737 scan->rs_cbuf = InvalidBuffer;
738 scan->rs_cblock = InvalidBlockNumber;
739 tuple->t_data = NULL;
740 scan->rs_inited = false;
744 heapgetpage(scan, page);
746 LockBuffer(scan->rs_cbuf, BUFFER_LOCK_SHARE);
748 dp = (Page) BufferGetPage(scan->rs_cbuf);
749 lines = PageGetMaxOffsetNumber((Page) dp);
754 lpp = PageGetItemId(dp, lines);
758 lineoff = FirstOffsetNumber;
759 lpp = PageGetItemId(dp, FirstOffsetNumber);
765 * heapgettup_pagemode - fetch next heap tuple in page-at-a-time mode
767 * Same API as heapgettup, but used in page-at-a-time mode
769 * The internal logic is much the same as heapgettup's too, but there are some
770 * differences: we do not take the buffer content lock (that only needs to
771 * happen inside heapgetpage), and we iterate through just the tuples listed
772 * in rs_vistuples[] rather than all tuples on the page. Notice that
773 * lineindex is 0-based, where the corresponding loop variable lineoff in
774 * heapgettup is 1-based.
778 heapgettup_pagemode(HeapScanDesc scan,
783 HeapTuple tuple = &(scan->rs_ctup);
784 bool backward = ScanDirectionIsBackward(dir);
790 OffsetNumber lineoff;
795 * calculate next starting lineindex, given scan direction
797 if (ScanDirectionIsForward(dir))
799 if (!scan->rs_inited)
802 * return null immediately if relation is empty
804 if (scan->rs_nblocks == 0 || scan->rs_numblocks == 0)
806 Assert(!BufferIsValid(scan->rs_cbuf));
807 tuple->t_data = NULL;
810 if (scan->rs_parallel != NULL)
812 page = heap_parallelscan_nextpage(scan);
814 /* Other processes might have already finished the scan. */
815 if (page == InvalidBlockNumber)
817 Assert(!BufferIsValid(scan->rs_cbuf));
818 tuple->t_data = NULL;
823 page = scan->rs_startblock; /* first page */
824 heapgetpage(scan, page);
826 scan->rs_inited = true;
830 /* continue from previously returned page/tuple */
831 page = scan->rs_cblock; /* current page */
832 lineindex = scan->rs_cindex + 1;
835 dp = (Page) BufferGetPage(scan->rs_cbuf);
836 lines = scan->rs_ntuples;
837 /* page and lineindex now reference the next visible tid */
839 linesleft = lines - lineindex;
843 /* backward parallel scan not supported */
844 Assert(scan->rs_parallel == NULL);
846 if (!scan->rs_inited)
849 * return null immediately if relation is empty
851 if (scan->rs_nblocks == 0 || scan->rs_numblocks == 0)
853 Assert(!BufferIsValid(scan->rs_cbuf));
854 tuple->t_data = NULL;
859 * Disable reporting to syncscan logic in a backwards scan; it's
860 * not very likely anyone else is doing the same thing at the same
861 * time, and much more likely that we'll just bollix things for
864 scan->rs_syncscan = false;
865 /* start from last page of the scan */
866 if (scan->rs_startblock > 0)
867 page = scan->rs_startblock - 1;
869 page = scan->rs_nblocks - 1;
870 heapgetpage(scan, page);
874 /* continue from previously returned page/tuple */
875 page = scan->rs_cblock; /* current page */
878 dp = (Page) BufferGetPage(scan->rs_cbuf);
879 lines = scan->rs_ntuples;
881 if (!scan->rs_inited)
883 lineindex = lines - 1;
884 scan->rs_inited = true;
888 lineindex = scan->rs_cindex - 1;
890 /* page and lineindex now reference the previous visible tid */
892 linesleft = lineindex + 1;
897 * ``no movement'' scan direction: refetch prior tuple
899 if (!scan->rs_inited)
901 Assert(!BufferIsValid(scan->rs_cbuf));
902 tuple->t_data = NULL;
906 page = ItemPointerGetBlockNumber(&(tuple->t_self));
907 if (page != scan->rs_cblock)
908 heapgetpage(scan, page);
910 /* Since the tuple was previously fetched, needn't lock page here */
911 dp = (Page) BufferGetPage(scan->rs_cbuf);
912 lineoff = ItemPointerGetOffsetNumber(&(tuple->t_self));
913 lpp = PageGetItemId(dp, lineoff);
914 Assert(ItemIdIsNormal(lpp));
916 tuple->t_data = (HeapTupleHeader) PageGetItem((Page) dp, lpp);
917 tuple->t_len = ItemIdGetLength(lpp);
919 /* check that rs_cindex is in sync */
920 Assert(scan->rs_cindex < scan->rs_ntuples);
921 Assert(lineoff == scan->rs_vistuples[scan->rs_cindex]);
927 * advance the scan until we find a qualifying tuple or run out of stuff
932 while (linesleft > 0)
934 lineoff = scan->rs_vistuples[lineindex];
935 lpp = PageGetItemId(dp, lineoff);
936 Assert(ItemIdIsNormal(lpp));
938 tuple->t_data = (HeapTupleHeader) PageGetItem((Page) dp, lpp);
939 tuple->t_len = ItemIdGetLength(lpp);
940 ItemPointerSet(&(tuple->t_self), page, lineoff);
943 * if current tuple qualifies, return it.
949 HeapKeyTest(tuple, RelationGetDescr(scan->rs_rd),
953 scan->rs_cindex = lineindex;
959 scan->rs_cindex = lineindex;
964 * otherwise move to the next item on the page
974 * if we get here, it means we've exhausted the items on this page and
975 * it's time to move to the next.
979 finished = (page == scan->rs_startblock) ||
980 (scan->rs_numblocks != InvalidBlockNumber ? --scan->rs_numblocks == 0 : false);
982 page = scan->rs_nblocks;
985 else if (scan->rs_parallel != NULL)
987 page = heap_parallelscan_nextpage(scan);
988 finished = (page == InvalidBlockNumber);
993 if (page >= scan->rs_nblocks)
995 finished = (page == scan->rs_startblock) ||
996 (scan->rs_numblocks != InvalidBlockNumber ? --scan->rs_numblocks == 0 : false);
999 * Report our new scan position for synchronization purposes. We
1000 * don't do that when moving backwards, however. That would just
1001 * mess up any other forward-moving scanners.
1003 * Note: we do this before checking for end of scan so that the
1004 * final state of the position hint is back at the start of the
1005 * rel. That's not strictly necessary, but otherwise when you run
1006 * the same query multiple times the starting position would shift
1007 * a little bit backwards on every invocation, which is confusing.
1008 * We don't guarantee any specific ordering in general, though.
1010 if (scan->rs_syncscan)
1011 ss_report_location(scan->rs_rd, page);
1015 * return NULL if we've exhausted all the pages
1019 if (BufferIsValid(scan->rs_cbuf))
1020 ReleaseBuffer(scan->rs_cbuf);
1021 scan->rs_cbuf = InvalidBuffer;
1022 scan->rs_cblock = InvalidBlockNumber;
1023 tuple->t_data = NULL;
1024 scan->rs_inited = false;
1028 heapgetpage(scan, page);
1030 dp = (Page) BufferGetPage(scan->rs_cbuf);
1031 lines = scan->rs_ntuples;
1034 lineindex = lines - 1;
1041 #if defined(DISABLE_COMPLEX_MACRO)
1043 * This is formatted so oddly so that the correspondence to the macro
1044 * definition in access/htup_details.h is maintained.
1047 fastgetattr(HeapTuple tup, int attnum, TupleDesc tupleDesc,
1053 (*(isnull) = false),
1054 HeapTupleNoNulls(tup) ?
1056 (tupleDesc)->attrs[(attnum) - 1]->attcacheoff >= 0 ?
1058 fetchatt((tupleDesc)->attrs[(attnum) - 1],
1059 (char *) (tup)->t_data + (tup)->t_data->t_hoff +
1060 (tupleDesc)->attrs[(attnum) - 1]->attcacheoff)
1063 nocachegetattr((tup), (attnum), (tupleDesc))
1067 att_isnull((attnum) - 1, (tup)->t_data->t_bits) ?
1074 nocachegetattr((tup), (attnum), (tupleDesc))
1084 #endif /* defined(DISABLE_COMPLEX_MACRO) */
1087 /* ----------------------------------------------------------------
1088 * heap access method interface
1089 * ----------------------------------------------------------------
1093 * relation_open - open any relation by relation OID
1095 * If lockmode is not "NoLock", the specified kind of lock is
1096 * obtained on the relation. (Generally, NoLock should only be
1097 * used if the caller knows it has some appropriate lock on the
1098 * relation already.)
1100 * An error is raised if the relation does not exist.
1102 * NB: a "relation" is anything with a pg_class entry. The caller is
1103 * expected to check whether the relkind is something it can handle.
1107 relation_open(Oid relationId, LOCKMODE lockmode)
1111 Assert(lockmode >= NoLock && lockmode < MAX_LOCKMODES);
1113 /* Get the lock before trying to open the relcache entry */
1114 if (lockmode != NoLock)
1115 LockRelationOid(relationId, lockmode);
1117 /* The relcache does all the real work... */
1118 r = RelationIdGetRelation(relationId);
1120 if (!RelationIsValid(r))
1121 elog(ERROR, "could not open relation with OID %u", relationId);
1123 /* Make note that we've accessed a temporary relation */
1124 if (RelationUsesLocalBuffers(r))
1126 if (IsParallelWorker())
1128 (errcode(ERRCODE_INVALID_TRANSACTION_STATE),
1129 errmsg("cannot access temporary tables during a parallel operation")));
1130 MyXactAccessedTempRel = true;
1133 pgstat_initstats(r);
1139 * try_relation_open - open any relation by relation OID
1141 * Same as relation_open, except return NULL instead of failing
1142 * if the relation does not exist.
1146 try_relation_open(Oid relationId, LOCKMODE lockmode)
1150 Assert(lockmode >= NoLock && lockmode < MAX_LOCKMODES);
1152 /* Get the lock first */
1153 if (lockmode != NoLock)
1154 LockRelationOid(relationId, lockmode);
1157 * Now that we have the lock, probe to see if the relation really exists
1160 if (!SearchSysCacheExists1(RELOID, ObjectIdGetDatum(relationId)))
1162 /* Release useless lock */
1163 if (lockmode != NoLock)
1164 UnlockRelationOid(relationId, lockmode);
1169 /* Should be safe to do a relcache load */
1170 r = RelationIdGetRelation(relationId);
1172 if (!RelationIsValid(r))
1173 elog(ERROR, "could not open relation with OID %u", relationId);
1175 /* Make note that we've accessed a temporary relation */
1176 if (RelationUsesLocalBuffers(r))
1178 if (IsParallelWorker())
1180 (errcode(ERRCODE_INVALID_TRANSACTION_STATE),
1181 errmsg("cannot access temporary tables during a parallel operation")));
1182 MyXactAccessedTempRel = true;
1185 pgstat_initstats(r);
1191 * relation_openrv - open any relation specified by a RangeVar
1193 * Same as relation_open, but the relation is specified by a RangeVar.
1197 relation_openrv(const RangeVar *relation, LOCKMODE lockmode)
1202 * Check for shared-cache-inval messages before trying to open the
1203 * relation. This is needed even if we already hold a lock on the
1204 * relation, because GRANT/REVOKE are executed without taking any lock on
1205 * the target relation, and we want to be sure we see current ACL
1206 * information. We can skip this if asked for NoLock, on the assumption
1207 * that such a call is not the first one in the current command, and so we
1208 * should be reasonably up-to-date already. (XXX this all could stand to
1209 * be redesigned, but for the moment we'll keep doing this like it's been
1210 * done historically.)
1212 if (lockmode != NoLock)
1213 AcceptInvalidationMessages();
1215 /* Look up and lock the appropriate relation using namespace search */
1216 relOid = RangeVarGetRelid(relation, lockmode, false);
1218 /* Let relation_open do the rest */
1219 return relation_open(relOid, NoLock);
1223 * relation_openrv_extended - open any relation specified by a RangeVar
1225 * Same as relation_openrv, but with an additional missing_ok argument
1226 * allowing a NULL return rather than an error if the relation is not
1227 * found. (Note that some other causes, such as permissions problems,
1228 * will still result in an ereport.)
1232 relation_openrv_extended(const RangeVar *relation, LOCKMODE lockmode,
1238 * Check for shared-cache-inval messages before trying to open the
1239 * relation. See comments in relation_openrv().
1241 if (lockmode != NoLock)
1242 AcceptInvalidationMessages();
1244 /* Look up and lock the appropriate relation using namespace search */
1245 relOid = RangeVarGetRelid(relation, lockmode, missing_ok);
1247 /* Return NULL on not-found */
1248 if (!OidIsValid(relOid))
1251 /* Let relation_open do the rest */
1252 return relation_open(relOid, NoLock);
1256 * relation_close - close any relation
1258 * If lockmode is not "NoLock", we then release the specified lock.
1260 * Note that it is often sensible to hold a lock beyond relation_close;
1261 * in that case, the lock is released automatically at xact end.
1265 relation_close(Relation relation, LOCKMODE lockmode)
1267 LockRelId relid = relation->rd_lockInfo.lockRelId;
1269 Assert(lockmode >= NoLock && lockmode < MAX_LOCKMODES);
1271 /* The relcache does the real work... */
1272 RelationClose(relation);
1274 if (lockmode != NoLock)
1275 UnlockRelationId(&relid, lockmode);
1280 * heap_open - open a heap relation by relation OID
1282 * This is essentially relation_open plus check that the relation
1283 * is not an index nor a composite type. (The caller should also
1284 * check that it's not a view or foreign table before assuming it has
1289 heap_open(Oid relationId, LOCKMODE lockmode)
1293 r = relation_open(relationId, lockmode);
1295 if (r->rd_rel->relkind == RELKIND_INDEX)
1297 (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1298 errmsg("\"%s\" is an index",
1299 RelationGetRelationName(r))));
1300 else if (r->rd_rel->relkind == RELKIND_COMPOSITE_TYPE)
1302 (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1303 errmsg("\"%s\" is a composite type",
1304 RelationGetRelationName(r))));
1310 * heap_openrv - open a heap relation specified
1311 * by a RangeVar node
1313 * As above, but relation is specified by a RangeVar.
1317 heap_openrv(const RangeVar *relation, LOCKMODE lockmode)
1321 r = relation_openrv(relation, lockmode);
1323 if (r->rd_rel->relkind == RELKIND_INDEX)
1325 (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1326 errmsg("\"%s\" is an index",
1327 RelationGetRelationName(r))));
1328 else if (r->rd_rel->relkind == RELKIND_COMPOSITE_TYPE)
1330 (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1331 errmsg("\"%s\" is a composite type",
1332 RelationGetRelationName(r))));
1338 * heap_openrv_extended - open a heap relation specified
1339 * by a RangeVar node
1341 * As above, but optionally return NULL instead of failing for
1342 * relation-not-found.
1346 heap_openrv_extended(const RangeVar *relation, LOCKMODE lockmode,
1351 r = relation_openrv_extended(relation, lockmode, missing_ok);
1355 if (r->rd_rel->relkind == RELKIND_INDEX)
1357 (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1358 errmsg("\"%s\" is an index",
1359 RelationGetRelationName(r))));
1360 else if (r->rd_rel->relkind == RELKIND_COMPOSITE_TYPE)
1362 (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1363 errmsg("\"%s\" is a composite type",
1364 RelationGetRelationName(r))));
1372 * heap_beginscan - begin relation scan
1374 * heap_beginscan is the "standard" case.
1376 * heap_beginscan_catalog differs in setting up its own temporary snapshot.
1378 * heap_beginscan_strat offers an extended API that lets the caller control
1379 * whether a nondefault buffer access strategy can be used, and whether
1380 * syncscan can be chosen (possibly resulting in the scan not starting from
1381 * block zero). Both of these default to TRUE with plain heap_beginscan.
1383 * heap_beginscan_bm is an alternative entry point for setting up a
1384 * HeapScanDesc for a bitmap heap scan. Although that scan technology is
1385 * really quite unlike a standard seqscan, there is just enough commonality
1386 * to make it worth using the same data structure.
1388 * heap_beginscan_sampling is an alternative entry point for setting up a
1389 * HeapScanDesc for a TABLESAMPLE scan. As with bitmap scans, it's worth
1390 * using the same data structure although the behavior is rather different.
1391 * In addition to the options offered by heap_beginscan_strat, this call
1392 * also allows control of whether page-mode visibility checking is used.
1396 heap_beginscan(Relation relation, Snapshot snapshot,
1397 int nkeys, ScanKey key)
1399 return heap_beginscan_internal(relation, snapshot, nkeys, key, NULL,
1400 true, true, true, false, false, false);
1404 heap_beginscan_catalog(Relation relation, int nkeys, ScanKey key)
1406 Oid relid = RelationGetRelid(relation);
1407 Snapshot snapshot = RegisterSnapshot(GetCatalogSnapshot(relid));
1409 return heap_beginscan_internal(relation, snapshot, nkeys, key, NULL,
1410 true, true, true, false, false, true);
1414 heap_beginscan_strat(Relation relation, Snapshot snapshot,
1415 int nkeys, ScanKey key,
1416 bool allow_strat, bool allow_sync)
1418 return heap_beginscan_internal(relation, snapshot, nkeys, key, NULL,
1419 allow_strat, allow_sync, true,
1420 false, false, false);
1424 heap_beginscan_bm(Relation relation, Snapshot snapshot,
1425 int nkeys, ScanKey key)
1427 return heap_beginscan_internal(relation, snapshot, nkeys, key, NULL,
1428 false, false, true, true, false, false);
1432 heap_beginscan_sampling(Relation relation, Snapshot snapshot,
1433 int nkeys, ScanKey key,
1434 bool allow_strat, bool allow_sync, bool allow_pagemode)
1436 return heap_beginscan_internal(relation, snapshot, nkeys, key, NULL,
1437 allow_strat, allow_sync, allow_pagemode,
1438 false, true, false);
1442 heap_beginscan_internal(Relation relation, Snapshot snapshot,
1443 int nkeys, ScanKey key,
1444 ParallelHeapScanDesc parallel_scan,
1447 bool allow_pagemode,
1455 * increment relation ref count while scanning relation
1457 * This is just to make really sure the relcache entry won't go away while
1458 * the scan has a pointer to it. Caller should be holding the rel open
1459 * anyway, so this is redundant in all normal scenarios...
1461 RelationIncrementReferenceCount(relation);
1464 * allocate and initialize scan descriptor
1466 scan = (HeapScanDesc) palloc(sizeof(HeapScanDescData));
1468 scan->rs_rd = relation;
1469 scan->rs_snapshot = snapshot;
1470 scan->rs_nkeys = nkeys;
1471 scan->rs_bitmapscan = is_bitmapscan;
1472 scan->rs_samplescan = is_samplescan;
1473 scan->rs_strategy = NULL; /* set in initscan */
1474 scan->rs_allow_strat = allow_strat;
1475 scan->rs_allow_sync = allow_sync;
1476 scan->rs_temp_snap = temp_snap;
1477 scan->rs_parallel = parallel_scan;
1480 * we can use page-at-a-time mode if it's an MVCC-safe snapshot
1482 scan->rs_pageatatime = allow_pagemode && IsMVCCSnapshot(snapshot);
1485 * For a seqscan in a serializable transaction, acquire a predicate lock
1486 * on the entire relation. This is required not only to lock all the
1487 * matching tuples, but also to conflict with new insertions into the
1488 * table. In an indexscan, we take page locks on the index pages covering
1489 * the range specified in the scan qual, but in a heap scan there is
1490 * nothing more fine-grained to lock. A bitmap scan is a different story,
1491 * there we have already scanned the index and locked the index pages
1492 * covering the predicate. But in that case we still have to lock any
1493 * matching heap tuples.
1496 PredicateLockRelation(relation, snapshot);
1498 /* we only need to set this up once */
1499 scan->rs_ctup.t_tableOid = RelationGetRelid(relation);
1502 * we do this here instead of in initscan() because heap_rescan also calls
1503 * initscan() and we don't want to allocate memory again
1506 scan->rs_key = (ScanKey) palloc(sizeof(ScanKeyData) * nkeys);
1508 scan->rs_key = NULL;
1510 initscan(scan, key, false);
1516 * heap_rescan - restart a relation scan
1520 heap_rescan(HeapScanDesc scan,
1524 * unpin scan buffers
1526 if (BufferIsValid(scan->rs_cbuf))
1527 ReleaseBuffer(scan->rs_cbuf);
1530 * reinitialize scan descriptor
1532 initscan(scan, key, true);
1535 * reset parallel scan, if present
1537 if (scan->rs_parallel != NULL)
1539 ParallelHeapScanDesc parallel_scan;
1542 * Caller is responsible for making sure that all workers have
1543 * finished the scan before calling this, so it really shouldn't be
1544 * necessary to acquire the mutex at all. We acquire it anyway, just
1547 parallel_scan = scan->rs_parallel;
1548 SpinLockAcquire(¶llel_scan->phs_mutex);
1549 parallel_scan->phs_cblock = parallel_scan->phs_startblock;
1550 SpinLockRelease(¶llel_scan->phs_mutex);
1555 * heap_rescan_set_params - restart a relation scan after changing params
1557 * This call allows changing the buffer strategy, syncscan, and pagemode
1558 * options before starting a fresh scan. Note that although the actual use
1559 * of syncscan might change (effectively, enabling or disabling reporting),
1560 * the previously selected startblock will be kept.
1564 heap_rescan_set_params(HeapScanDesc scan, ScanKey key,
1565 bool allow_strat, bool allow_sync, bool allow_pagemode)
1567 /* adjust parameters */
1568 scan->rs_allow_strat = allow_strat;
1569 scan->rs_allow_sync = allow_sync;
1570 scan->rs_pageatatime = allow_pagemode && IsMVCCSnapshot(scan->rs_snapshot);
1571 /* ... and rescan */
1572 heap_rescan(scan, key);
1576 * heap_endscan - end relation scan
1578 * See how to integrate with index scans.
1579 * Check handling if reldesc caching.
1583 heap_endscan(HeapScanDesc scan)
1585 /* Note: no locking manipulations needed */
1588 * unpin scan buffers
1590 if (BufferIsValid(scan->rs_cbuf))
1591 ReleaseBuffer(scan->rs_cbuf);
1594 * decrement relation reference count and free scan descriptor storage
1596 RelationDecrementReferenceCount(scan->rs_rd);
1599 pfree(scan->rs_key);
1601 if (scan->rs_strategy != NULL)
1602 FreeAccessStrategy(scan->rs_strategy);
1604 if (scan->rs_temp_snap)
1605 UnregisterSnapshot(scan->rs_snapshot);
1611 * heap_parallelscan_estimate - estimate storage for ParallelHeapScanDesc
1613 * Sadly, this doesn't reduce to a constant, because the size required
1614 * to serialize the snapshot can vary.
1618 heap_parallelscan_estimate(Snapshot snapshot)
1620 return add_size(offsetof(ParallelHeapScanDescData, phs_snapshot_data),
1621 EstimateSnapshotSpace(snapshot));
1625 * heap_parallelscan_initialize - initialize ParallelHeapScanDesc
1627 * Must allow as many bytes of shared memory as returned by
1628 * heap_parallelscan_estimate. Call this just once in the leader
1629 * process; then, individual workers attach via heap_beginscan_parallel.
1633 heap_parallelscan_initialize(ParallelHeapScanDesc target, Relation relation,
1636 target->phs_relid = RelationGetRelid(relation);
1637 target->phs_nblocks = RelationGetNumberOfBlocks(relation);
1638 /* compare phs_syncscan initialization to similar logic in initscan */
1639 target->phs_syncscan = synchronize_seqscans &&
1640 !RelationUsesLocalBuffers(relation) &&
1641 target->phs_nblocks > NBuffers / 4;
1642 SpinLockInit(&target->phs_mutex);
1643 target->phs_cblock = InvalidBlockNumber;
1644 target->phs_startblock = InvalidBlockNumber;
1645 SerializeSnapshot(snapshot, target->phs_snapshot_data);
1649 * heap_beginscan_parallel - join a parallel scan
1651 * Caller must hold a suitable lock on the correct relation.
1655 heap_beginscan_parallel(Relation relation, ParallelHeapScanDesc parallel_scan)
1659 Assert(RelationGetRelid(relation) == parallel_scan->phs_relid);
1660 snapshot = RestoreSnapshot(parallel_scan->phs_snapshot_data);
1661 RegisterSnapshot(snapshot);
1663 return heap_beginscan_internal(relation, snapshot, 0, NULL, parallel_scan,
1664 true, true, true, false, false, true);
1668 * heap_parallelscan_nextpage - get the next page to scan
1670 * Get the next page to scan. Even if there are no pages left to scan,
1671 * another backend could have grabbed a page to scan and not yet finished
1672 * looking at it, so it doesn't follow that the scan is done when the
1673 * first backend gets an InvalidBlockNumber return.
1677 heap_parallelscan_nextpage(HeapScanDesc scan)
1679 BlockNumber page = InvalidBlockNumber;
1680 BlockNumber sync_startpage = InvalidBlockNumber;
1681 BlockNumber report_page = InvalidBlockNumber;
1682 ParallelHeapScanDesc parallel_scan;
1684 Assert(scan->rs_parallel);
1685 parallel_scan = scan->rs_parallel;
1688 /* Grab the spinlock. */
1689 SpinLockAcquire(¶llel_scan->phs_mutex);
1692 * If the scan's startblock has not yet been initialized, we must do so
1693 * now. If this is not a synchronized scan, we just start at block 0, but
1694 * if it is a synchronized scan, we must get the starting position from
1695 * the synchronized scan machinery. We can't hold the spinlock while
1696 * doing that, though, so release the spinlock, get the information we
1697 * need, and retry. If nobody else has initialized the scan in the
1698 * meantime, we'll fill in the value we fetched on the second time
1701 if (parallel_scan->phs_startblock == InvalidBlockNumber)
1703 if (!parallel_scan->phs_syncscan)
1704 parallel_scan->phs_startblock = 0;
1705 else if (sync_startpage != InvalidBlockNumber)
1706 parallel_scan->phs_startblock = sync_startpage;
1709 SpinLockRelease(¶llel_scan->phs_mutex);
1710 sync_startpage = ss_get_location(scan->rs_rd, scan->rs_nblocks);
1713 parallel_scan->phs_cblock = parallel_scan->phs_startblock;
1717 * The current block number is the next one that needs to be scanned,
1718 * unless it's InvalidBlockNumber already, in which case there are no more
1719 * blocks to scan. After remembering the current value, we must advance
1720 * it so that the next call to this function returns the next block to be
1723 page = parallel_scan->phs_cblock;
1724 if (page != InvalidBlockNumber)
1726 parallel_scan->phs_cblock++;
1727 if (parallel_scan->phs_cblock >= scan->rs_nblocks)
1728 parallel_scan->phs_cblock = 0;
1729 if (parallel_scan->phs_cblock == parallel_scan->phs_startblock)
1731 parallel_scan->phs_cblock = InvalidBlockNumber;
1732 report_page = parallel_scan->phs_startblock;
1736 /* Release the lock. */
1737 SpinLockRelease(¶llel_scan->phs_mutex);
1740 * Report scan location. Normally, we report the current page number.
1741 * When we reach the end of the scan, though, we report the starting page,
1742 * not the ending page, just so the starting positions for later scans
1743 * doesn't slew backwards. We only report the position at the end of the
1744 * scan once, though: subsequent callers will have report nothing, since
1745 * they will have page == InvalidBlockNumber.
1747 if (scan->rs_syncscan)
1749 if (report_page == InvalidBlockNumber)
1751 if (report_page != InvalidBlockNumber)
1752 ss_report_location(scan->rs_rd, report_page);
1759 * heap_getnext - retrieve next tuple in scan
1761 * Fix to work with index relations.
1762 * We don't return the buffer anymore, but you can get it from the
1763 * returned HeapTuple.
1768 #define HEAPDEBUG_1 \
1769 elog(DEBUG2, "heap_getnext([%s,nkeys=%d],dir=%d) called", \
1770 RelationGetRelationName(scan->rs_rd), scan->rs_nkeys, (int) direction)
1771 #define HEAPDEBUG_2 \
1772 elog(DEBUG2, "heap_getnext returning EOS")
1773 #define HEAPDEBUG_3 \
1774 elog(DEBUG2, "heap_getnext returning tuple")
1779 #endif /* !defined(HEAPDEBUGALL) */
1783 heap_getnext(HeapScanDesc scan, ScanDirection direction)
1785 /* Note: no locking manipulations needed */
1787 HEAPDEBUG_1; /* heap_getnext( info ) */
1789 if (scan->rs_pageatatime)
1790 heapgettup_pagemode(scan, direction,
1791 scan->rs_nkeys, scan->rs_key);
1793 heapgettup(scan, direction, scan->rs_nkeys, scan->rs_key);
1795 if (scan->rs_ctup.t_data == NULL)
1797 HEAPDEBUG_2; /* heap_getnext returning EOS */
1802 * if we get here it means we have a new current scan tuple, so point to
1803 * the proper return buffer and return the tuple.
1805 HEAPDEBUG_3; /* heap_getnext returning tuple */
1807 pgstat_count_heap_getnext(scan->rs_rd);
1809 return &(scan->rs_ctup);
1813 * heap_fetch - retrieve tuple with given tid
1815 * On entry, tuple->t_self is the TID to fetch. We pin the buffer holding
1816 * the tuple, fill in the remaining fields of *tuple, and check the tuple
1817 * against the specified snapshot.
1819 * If successful (tuple found and passes snapshot time qual), then *userbuf
1820 * is set to the buffer holding the tuple and TRUE is returned. The caller
1821 * must unpin the buffer when done with the tuple.
1823 * If the tuple is not found (ie, item number references a deleted slot),
1824 * then tuple->t_data is set to NULL and FALSE is returned.
1826 * If the tuple is found but fails the time qual check, then FALSE is returned
1827 * but tuple->t_data is left pointing to the tuple.
1829 * keep_buf determines what is done with the buffer in the FALSE-result cases.
1830 * When the caller specifies keep_buf = true, we retain the pin on the buffer
1831 * and return it in *userbuf (so the caller must eventually unpin it); when
1832 * keep_buf = false, the pin is released and *userbuf is set to InvalidBuffer.
1834 * stats_relation is the relation to charge the heap_fetch operation against
1835 * for statistical purposes. (This could be the heap rel itself, an
1836 * associated index, or NULL to not count the fetch at all.)
1838 * heap_fetch does not follow HOT chains: only the exact TID requested will
1841 * It is somewhat inconsistent that we ereport() on invalid block number but
1842 * return false on invalid item number. There are a couple of reasons though.
1843 * One is that the caller can relatively easily check the block number for
1844 * validity, but cannot check the item number without reading the page
1845 * himself. Another is that when we are following a t_ctid link, we can be
1846 * reasonably confident that the page number is valid (since VACUUM shouldn't
1847 * truncate off the destination page without having killed the referencing
1848 * tuple first), but the item number might well not be good.
1851 heap_fetch(Relation relation,
1856 Relation stats_relation)
1858 ItemPointer tid = &(tuple->t_self);
1862 OffsetNumber offnum;
1866 * Fetch and pin the appropriate page of the relation.
1868 buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(tid));
1871 * Need share lock on buffer to examine tuple commit status.
1873 LockBuffer(buffer, BUFFER_LOCK_SHARE);
1874 page = BufferGetPage(buffer);
1877 * We'd better check for out-of-range offnum in case of VACUUM since the
1880 offnum = ItemPointerGetOffsetNumber(tid);
1881 if (offnum < FirstOffsetNumber || offnum > PageGetMaxOffsetNumber(page))
1883 LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
1888 ReleaseBuffer(buffer);
1889 *userbuf = InvalidBuffer;
1891 tuple->t_data = NULL;
1896 * get the item line pointer corresponding to the requested tid
1898 lp = PageGetItemId(page, offnum);
1901 * Must check for deleted tuple.
1903 if (!ItemIdIsNormal(lp))
1905 LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
1910 ReleaseBuffer(buffer);
1911 *userbuf = InvalidBuffer;
1913 tuple->t_data = NULL;
1918 * fill in *tuple fields
1920 tuple->t_data = (HeapTupleHeader) PageGetItem(page, lp);
1921 tuple->t_len = ItemIdGetLength(lp);
1922 tuple->t_tableOid = RelationGetRelid(relation);
1925 * check time qualification of tuple, then release lock
1927 valid = HeapTupleSatisfiesVisibility(tuple, snapshot, buffer);
1930 PredicateLockTuple(relation, tuple, snapshot);
1932 CheckForSerializableConflictOut(valid, relation, tuple, buffer, snapshot);
1934 LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
1939 * All checks passed, so return the tuple as valid. Caller is now
1940 * responsible for releasing the buffer.
1944 /* Count the successful fetch against appropriate rel, if any */
1945 if (stats_relation != NULL)
1946 pgstat_count_heap_fetch(stats_relation);
1951 /* Tuple failed time qual, but maybe caller wants to see it anyway. */
1956 ReleaseBuffer(buffer);
1957 *userbuf = InvalidBuffer;
1964 * heap_hot_search_buffer - search HOT chain for tuple satisfying snapshot
1966 * On entry, *tid is the TID of a tuple (either a simple tuple, or the root
1967 * of a HOT chain), and buffer is the buffer holding this tuple. We search
1968 * for the first chain member satisfying the given snapshot. If one is
1969 * found, we update *tid to reference that tuple's offset number, and
1970 * return TRUE. If no match, return FALSE without modifying *tid.
1972 * heapTuple is a caller-supplied buffer. When a match is found, we return
1973 * the tuple here, in addition to updating *tid. If no match is found, the
1974 * contents of this buffer on return are undefined.
1976 * If all_dead is not NULL, we check non-visible tuples to see if they are
1977 * globally dead; *all_dead is set TRUE if all members of the HOT chain
1978 * are vacuumable, FALSE if not.
1980 * Unlike heap_fetch, the caller must already have pin and (at least) share
1981 * lock on the buffer; it is still pinned/locked at exit. Also unlike
1982 * heap_fetch, we do not report any pgstats count; caller may do so if wanted.
1985 heap_hot_search_buffer(ItemPointer tid, Relation relation, Buffer buffer,
1986 Snapshot snapshot, HeapTuple heapTuple,
1987 bool *all_dead, bool first_call)
1989 Page dp = (Page) BufferGetPage(buffer);
1990 TransactionId prev_xmax = InvalidTransactionId;
1991 OffsetNumber offnum;
1992 bool at_chain_start;
1996 /* If this is not the first call, previous call returned a (live!) tuple */
1998 *all_dead = first_call;
2000 Assert(TransactionIdIsValid(RecentGlobalXmin));
2002 Assert(ItemPointerGetBlockNumber(tid) == BufferGetBlockNumber(buffer));
2003 offnum = ItemPointerGetOffsetNumber(tid);
2004 at_chain_start = first_call;
2007 heapTuple->t_self = *tid;
2009 /* Scan through possible multiple members of HOT-chain */
2014 /* check for bogus TID */
2015 if (offnum < FirstOffsetNumber || offnum > PageGetMaxOffsetNumber(dp))
2018 lp = PageGetItemId(dp, offnum);
2020 /* check for unused, dead, or redirected items */
2021 if (!ItemIdIsNormal(lp))
2023 /* We should only see a redirect at start of chain */
2024 if (ItemIdIsRedirected(lp) && at_chain_start)
2026 /* Follow the redirect */
2027 offnum = ItemIdGetRedirect(lp);
2028 at_chain_start = false;
2031 /* else must be end of chain */
2035 heapTuple->t_data = (HeapTupleHeader) PageGetItem(dp, lp);
2036 heapTuple->t_len = ItemIdGetLength(lp);
2037 heapTuple->t_tableOid = RelationGetRelid(relation);
2038 ItemPointerSetOffsetNumber(&heapTuple->t_self, offnum);
2041 * Shouldn't see a HEAP_ONLY tuple at chain start.
2043 if (at_chain_start && HeapTupleIsHeapOnly(heapTuple))
2047 * The xmin should match the previous xmax value, else chain is
2050 if (TransactionIdIsValid(prev_xmax) &&
2051 !TransactionIdEquals(prev_xmax,
2052 HeapTupleHeaderGetXmin(heapTuple->t_data)))
2056 * When first_call is true (and thus, skip is initially false) we'll
2057 * return the first tuple we find. But on later passes, heapTuple
2058 * will initially be pointing to the tuple we returned last time.
2059 * Returning it again would be incorrect (and would loop forever), so
2060 * we skip it and return the next match we find.
2065 * For the benefit of logical decoding, have t_self point at the
2066 * element of the HOT chain we're currently investigating instead
2067 * of the root tuple of the HOT chain. This is important because
2068 * the *Satisfies routine for historical mvcc snapshots needs the
2069 * correct tid to decide about the visibility in some cases.
2071 ItemPointerSet(&(heapTuple->t_self), BufferGetBlockNumber(buffer), offnum);
2073 /* If it's visible per the snapshot, we must return it */
2074 valid = HeapTupleSatisfiesVisibility(heapTuple, snapshot, buffer);
2075 CheckForSerializableConflictOut(valid, relation, heapTuple,
2077 /* reset to original, non-redirected, tid */
2078 heapTuple->t_self = *tid;
2082 ItemPointerSetOffsetNumber(tid, offnum);
2083 PredicateLockTuple(relation, heapTuple, snapshot);
2092 * If we can't see it, maybe no one else can either. At caller
2093 * request, check whether all chain members are dead to all
2096 if (all_dead && *all_dead &&
2097 !HeapTupleIsSurelyDead(heapTuple, RecentGlobalXmin))
2101 * Check to see if HOT chain continues past this tuple; if so fetch
2102 * the next offnum and loop around.
2104 if (HeapTupleIsHotUpdated(heapTuple))
2106 Assert(ItemPointerGetBlockNumber(&heapTuple->t_data->t_ctid) ==
2107 ItemPointerGetBlockNumber(tid));
2108 offnum = ItemPointerGetOffsetNumber(&heapTuple->t_data->t_ctid);
2109 at_chain_start = false;
2110 prev_xmax = HeapTupleHeaderGetUpdateXid(heapTuple->t_data);
2113 break; /* end of chain */
2120 * heap_hot_search - search HOT chain for tuple satisfying snapshot
2122 * This has the same API as heap_hot_search_buffer, except that the caller
2123 * does not provide the buffer containing the page, rather we access it
2127 heap_hot_search(ItemPointer tid, Relation relation, Snapshot snapshot,
2132 HeapTupleData heapTuple;
2134 buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(tid));
2135 LockBuffer(buffer, BUFFER_LOCK_SHARE);
2136 result = heap_hot_search_buffer(tid, relation, buffer, snapshot,
2137 &heapTuple, all_dead, true);
2138 LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
2139 ReleaseBuffer(buffer);
2144 * heap_get_latest_tid - get the latest tid of a specified tuple
2146 * Actually, this gets the latest version that is visible according to
2147 * the passed snapshot. You can pass SnapshotDirty to get the very latest,
2148 * possibly uncommitted version.
2150 * *tid is both an input and an output parameter: it is updated to
2151 * show the latest version of the row. Note that it will not be changed
2152 * if no version of the row passes the snapshot test.
2155 heap_get_latest_tid(Relation relation,
2160 ItemPointerData ctid;
2161 TransactionId priorXmax;
2163 /* this is to avoid Assert failures on bad input */
2164 if (!ItemPointerIsValid(tid))
2168 * Since this can be called with user-supplied TID, don't trust the input
2169 * too much. (RelationGetNumberOfBlocks is an expensive check, so we
2170 * don't check t_ctid links again this way. Note that it would not do to
2171 * call it just once and save the result, either.)
2173 blk = ItemPointerGetBlockNumber(tid);
2174 if (blk >= RelationGetNumberOfBlocks(relation))
2175 elog(ERROR, "block number %u is out of range for relation \"%s\"",
2176 blk, RelationGetRelationName(relation));
2179 * Loop to chase down t_ctid links. At top of loop, ctid is the tuple we
2180 * need to examine, and *tid is the TID we will return if ctid turns out
2183 * Note that we will loop until we reach the end of the t_ctid chain.
2184 * Depending on the snapshot passed, there might be at most one visible
2185 * version of the row, but we don't try to optimize for that.
2188 priorXmax = InvalidTransactionId; /* cannot check first XMIN */
2193 OffsetNumber offnum;
2199 * Read, pin, and lock the page.
2201 buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(&ctid));
2202 LockBuffer(buffer, BUFFER_LOCK_SHARE);
2203 page = BufferGetPage(buffer);
2206 * Check for bogus item number. This is not treated as an error
2207 * condition because it can happen while following a t_ctid link. We
2208 * just assume that the prior tid is OK and return it unchanged.
2210 offnum = ItemPointerGetOffsetNumber(&ctid);
2211 if (offnum < FirstOffsetNumber || offnum > PageGetMaxOffsetNumber(page))
2213 UnlockReleaseBuffer(buffer);
2216 lp = PageGetItemId(page, offnum);
2217 if (!ItemIdIsNormal(lp))
2219 UnlockReleaseBuffer(buffer);
2223 /* OK to access the tuple */
2225 tp.t_data = (HeapTupleHeader) PageGetItem(page, lp);
2226 tp.t_len = ItemIdGetLength(lp);
2227 tp.t_tableOid = RelationGetRelid(relation);
2230 * After following a t_ctid link, we might arrive at an unrelated
2231 * tuple. Check for XMIN match.
2233 if (TransactionIdIsValid(priorXmax) &&
2234 !TransactionIdEquals(priorXmax, HeapTupleHeaderGetXmin(tp.t_data)))
2236 UnlockReleaseBuffer(buffer);
2241 * Check time qualification of tuple; if visible, set it as the new
2244 valid = HeapTupleSatisfiesVisibility(&tp, snapshot, buffer);
2245 CheckForSerializableConflictOut(valid, relation, &tp, buffer, snapshot);
2250 * If there's a valid t_ctid link, follow it, else we're done.
2252 if ((tp.t_data->t_infomask & HEAP_XMAX_INVALID) ||
2253 HeapTupleHeaderIsOnlyLocked(tp.t_data) ||
2254 ItemPointerEquals(&tp.t_self, &tp.t_data->t_ctid))
2256 UnlockReleaseBuffer(buffer);
2260 ctid = tp.t_data->t_ctid;
2261 priorXmax = HeapTupleHeaderGetUpdateXid(tp.t_data);
2262 UnlockReleaseBuffer(buffer);
2268 * UpdateXmaxHintBits - update tuple hint bits after xmax transaction ends
2270 * This is called after we have waited for the XMAX transaction to terminate.
2271 * If the transaction aborted, we guarantee the XMAX_INVALID hint bit will
2272 * be set on exit. If the transaction committed, we set the XMAX_COMMITTED
2273 * hint bit if possible --- but beware that that may not yet be possible,
2274 * if the transaction committed asynchronously.
2276 * Note that if the transaction was a locker only, we set HEAP_XMAX_INVALID
2277 * even if it commits.
2279 * Hence callers should look only at XMAX_INVALID.
2281 * Note this is not allowed for tuples whose xmax is a multixact.
2284 UpdateXmaxHintBits(HeapTupleHeader tuple, Buffer buffer, TransactionId xid)
2286 Assert(TransactionIdEquals(HeapTupleHeaderGetRawXmax(tuple), xid));
2287 Assert(!(tuple->t_infomask & HEAP_XMAX_IS_MULTI));
2289 if (!(tuple->t_infomask & (HEAP_XMAX_COMMITTED | HEAP_XMAX_INVALID)))
2291 if (!HEAP_XMAX_IS_LOCKED_ONLY(tuple->t_infomask) &&
2292 TransactionIdDidCommit(xid))
2293 HeapTupleSetHintBits(tuple, buffer, HEAP_XMAX_COMMITTED,
2296 HeapTupleSetHintBits(tuple, buffer, HEAP_XMAX_INVALID,
2297 InvalidTransactionId);
2303 * GetBulkInsertState - prepare status object for a bulk insert
2306 GetBulkInsertState(void)
2308 BulkInsertState bistate;
2310 bistate = (BulkInsertState) palloc(sizeof(BulkInsertStateData));
2311 bistate->strategy = GetAccessStrategy(BAS_BULKWRITE);
2312 bistate->current_buf = InvalidBuffer;
2317 * FreeBulkInsertState - clean up after finishing a bulk insert
2320 FreeBulkInsertState(BulkInsertState bistate)
2322 if (bistate->current_buf != InvalidBuffer)
2323 ReleaseBuffer(bistate->current_buf);
2324 FreeAccessStrategy(bistate->strategy);
2330 * heap_insert - insert tuple into a heap
2332 * The new tuple is stamped with current transaction ID and the specified
2335 * If the HEAP_INSERT_SKIP_WAL option is specified, the new tuple is not
2336 * logged in WAL, even for a non-temp relation. Safe usage of this behavior
2337 * requires that we arrange that all new tuples go into new pages not
2338 * containing any tuples from other transactions, and that the relation gets
2339 * fsync'd before commit. (See also heap_sync() comments)
2341 * The HEAP_INSERT_SKIP_FSM option is passed directly to
2342 * RelationGetBufferForTuple, which see for more info.
2344 * HEAP_INSERT_FROZEN should only be specified for inserts into
2345 * relfilenodes created during the current subtransaction and when
2346 * there are no prior snapshots or pre-existing portals open.
2347 * This causes rows to be frozen, which is an MVCC violation and
2348 * requires explicit options chosen by user.
2350 * HEAP_INSERT_IS_SPECULATIVE is used on so-called "speculative insertions",
2351 * which can be backed out afterwards without aborting the whole transaction.
2352 * Other sessions can wait for the speculative insertion to be confirmed,
2353 * turning it into a regular tuple, or aborted, as if it never existed.
2354 * Speculatively inserted tuples behave as "value locks" of short duration,
2355 * used to implement INSERT .. ON CONFLICT.
2357 * Note that most of these options will be applied when inserting into the
2358 * heap's TOAST table, too, if the tuple requires any out-of-line data. Only
2359 * HEAP_INSERT_IS_SPECULATIVE is explicitly ignored, as the toast data does
2360 * not partake in speculative insertion.
2362 * The BulkInsertState object (if any; bistate can be NULL for default
2363 * behavior) is also just passed through to RelationGetBufferForTuple.
2365 * The return value is the OID assigned to the tuple (either here or by the
2366 * caller), or InvalidOid if no OID. The header fields of *tup are updated
2367 * to match the stored tuple; in particular tup->t_self receives the actual
2368 * TID where the tuple was stored. But note that any toasting of fields
2369 * within the tuple data is NOT reflected into *tup.
2372 heap_insert(Relation relation, HeapTuple tup, CommandId cid,
2373 int options, BulkInsertState bistate)
2375 TransactionId xid = GetCurrentTransactionId();
2378 Buffer vmbuffer = InvalidBuffer;
2379 bool all_visible_cleared = false;
2382 * Fill in tuple header fields, assign an OID, and toast the tuple if
2385 * Note: below this point, heaptup is the data we actually intend to store
2386 * into the relation; tup is the caller's original untoasted data.
2388 heaptup = heap_prepare_insert(relation, tup, xid, cid, options);
2391 * Find buffer to insert this tuple into. If the page is all visible,
2392 * this will also pin the requisite visibility map page.
2394 buffer = RelationGetBufferForTuple(relation, heaptup->t_len,
2395 InvalidBuffer, options, bistate,
2399 * We're about to do the actual insert -- but check for conflict first, to
2400 * avoid possibly having to roll back work we've just done.
2402 * This is safe without a recheck as long as there is no possibility of
2403 * another process scanning the page between this check and the insert
2404 * being visible to the scan (i.e., an exclusive buffer content lock is
2405 * continuously held from this point until the tuple insert is visible).
2407 * For a heap insert, we only need to check for table-level SSI locks. Our
2408 * new tuple can't possibly conflict with existing tuple locks, and heap
2409 * page locks are only consolidated versions of tuple locks; they do not
2410 * lock "gaps" as index page locks do. So we don't need to specify a
2411 * buffer when making the call, which makes for a faster check.
2413 CheckForSerializableConflictIn(relation, NULL, InvalidBuffer);
2415 /* NO EREPORT(ERROR) from here till changes are logged */
2416 START_CRIT_SECTION();
2418 RelationPutHeapTuple(relation, buffer, heaptup,
2419 (options & HEAP_INSERT_SPECULATIVE) != 0);
2421 if (PageIsAllVisible(BufferGetPage(buffer)))
2423 all_visible_cleared = true;
2424 PageClearAllVisible(BufferGetPage(buffer));
2425 visibilitymap_clear(relation,
2426 ItemPointerGetBlockNumber(&(heaptup->t_self)),
2431 * XXX Should we set PageSetPrunable on this page ?
2433 * The inserting transaction may eventually abort thus making this tuple
2434 * DEAD and hence available for pruning. Though we don't want to optimize
2435 * for aborts, if no other tuple in this page is UPDATEd/DELETEd, the
2436 * aborted tuple will never be pruned until next vacuum is triggered.
2438 * If you do add PageSetPrunable here, add it in heap_xlog_insert too.
2441 MarkBufferDirty(buffer);
2444 if (!(options & HEAP_INSERT_SKIP_WAL) && RelationNeedsWAL(relation))
2446 xl_heap_insert xlrec;
2447 xl_heap_header xlhdr;
2449 Page page = BufferGetPage(buffer);
2450 uint8 info = XLOG_HEAP_INSERT;
2454 * If this is a catalog, we need to transmit combocids to properly
2455 * decode, so log that as well.
2457 if (RelationIsAccessibleInLogicalDecoding(relation))
2458 log_heap_new_cid(relation, heaptup);
2461 * If this is the single and first tuple on page, we can reinit the
2462 * page instead of restoring the whole thing. Set flag, and hide
2463 * buffer references from XLogInsert.
2465 if (ItemPointerGetOffsetNumber(&(heaptup->t_self)) == FirstOffsetNumber &&
2466 PageGetMaxOffsetNumber(page) == FirstOffsetNumber)
2468 info |= XLOG_HEAP_INIT_PAGE;
2469 bufflags |= REGBUF_WILL_INIT;
2472 xlrec.offnum = ItemPointerGetOffsetNumber(&heaptup->t_self);
2474 if (all_visible_cleared)
2475 xlrec.flags |= XLH_INSERT_ALL_VISIBLE_CLEARED;
2476 if (options & HEAP_INSERT_SPECULATIVE)
2477 xlrec.flags |= XLH_INSERT_IS_SPECULATIVE;
2478 Assert(ItemPointerGetBlockNumber(&heaptup->t_self) == BufferGetBlockNumber(buffer));
2481 * For logical decoding, we need the tuple even if we're doing a full
2482 * page write, so make sure it's included even if we take a full-page
2483 * image. (XXX We could alternatively store a pointer into the FPW).
2485 if (RelationIsLogicallyLogged(relation))
2487 xlrec.flags |= XLH_INSERT_CONTAINS_NEW_TUPLE;
2488 bufflags |= REGBUF_KEEP_DATA;
2492 XLogRegisterData((char *) &xlrec, SizeOfHeapInsert);
2494 xlhdr.t_infomask2 = heaptup->t_data->t_infomask2;
2495 xlhdr.t_infomask = heaptup->t_data->t_infomask;
2496 xlhdr.t_hoff = heaptup->t_data->t_hoff;
2499 * note we mark xlhdr as belonging to buffer; if XLogInsert decides to
2500 * write the whole page to the xlog, we don't need to store
2501 * xl_heap_header in the xlog.
2503 XLogRegisterBuffer(0, buffer, REGBUF_STANDARD | bufflags);
2504 XLogRegisterBufData(0, (char *) &xlhdr, SizeOfHeapHeader);
2505 /* PG73FORMAT: write bitmap [+ padding] [+ oid] + data */
2506 XLogRegisterBufData(0,
2507 (char *) heaptup->t_data + SizeofHeapTupleHeader,
2508 heaptup->t_len - SizeofHeapTupleHeader);
2510 /* filtering by origin on a row level is much more efficient */
2511 XLogIncludeOrigin();
2513 recptr = XLogInsert(RM_HEAP_ID, info);
2515 PageSetLSN(page, recptr);
2520 UnlockReleaseBuffer(buffer);
2521 if (vmbuffer != InvalidBuffer)
2522 ReleaseBuffer(vmbuffer);
2525 * If tuple is cachable, mark it for invalidation from the caches in case
2526 * we abort. Note it is OK to do this after releasing the buffer, because
2527 * the heaptup data structure is all in local memory, not in the shared
2530 CacheInvalidateHeapTuple(relation, heaptup, NULL);
2532 /* Note: speculative insertions are counted too, even if aborted later */
2533 pgstat_count_heap_insert(relation, 1);
2536 * If heaptup is a private copy, release it. Don't forget to copy t_self
2537 * back to the caller's image, too.
2541 tup->t_self = heaptup->t_self;
2542 heap_freetuple(heaptup);
2545 return HeapTupleGetOid(tup);
2549 * Subroutine for heap_insert(). Prepares a tuple for insertion. This sets the
2550 * tuple header fields, assigns an OID, and toasts the tuple if necessary.
2551 * Returns a toasted version of the tuple if it was toasted, or the original
2552 * tuple if not. Note that in any case, the header fields are also set in
2553 * the original tuple.
2556 heap_prepare_insert(Relation relation, HeapTuple tup, TransactionId xid,
2557 CommandId cid, int options)
2560 * For now, parallel operations are required to be strictly read-only.
2561 * Unlike heap_update() and heap_delete(), an insert should never create a
2562 * combo CID, so it might be possible to relax this restriction, but not
2563 * without more thought and testing.
2565 if (IsInParallelMode())
2567 (errcode(ERRCODE_INVALID_TRANSACTION_STATE),
2568 errmsg("cannot insert tuples during a parallel operation")));
2570 if (relation->rd_rel->relhasoids)
2573 /* this is redundant with an Assert in HeapTupleSetOid */
2574 Assert(tup->t_data->t_infomask & HEAP_HASOID);
2578 * If the object id of this tuple has already been assigned, trust the
2579 * caller. There are a couple of ways this can happen. At initial db
2580 * creation, the backend program sets oids for tuples. When we define
2581 * an index, we set the oid. Finally, in the future, we may allow
2582 * users to set their own object ids in order to support a persistent
2583 * object store (objects need to contain pointers to one another).
2585 if (!OidIsValid(HeapTupleGetOid(tup)))
2586 HeapTupleSetOid(tup, GetNewOid(relation));
2590 /* check there is not space for an OID */
2591 Assert(!(tup->t_data->t_infomask & HEAP_HASOID));
2594 tup->t_data->t_infomask &= ~(HEAP_XACT_MASK);
2595 tup->t_data->t_infomask2 &= ~(HEAP2_XACT_MASK);
2596 tup->t_data->t_infomask |= HEAP_XMAX_INVALID;
2597 HeapTupleHeaderSetXmin(tup->t_data, xid);
2598 if (options & HEAP_INSERT_FROZEN)
2599 HeapTupleHeaderSetXminFrozen(tup->t_data);
2601 HeapTupleHeaderSetCmin(tup->t_data, cid);
2602 HeapTupleHeaderSetXmax(tup->t_data, 0); /* for cleanliness */
2603 tup->t_tableOid = RelationGetRelid(relation);
2606 * If the new tuple is too big for storage or contains already toasted
2607 * out-of-line attributes from some other relation, invoke the toaster.
2609 if (relation->rd_rel->relkind != RELKIND_RELATION &&
2610 relation->rd_rel->relkind != RELKIND_MATVIEW)
2612 /* toast table entries should never be recursively toasted */
2613 Assert(!HeapTupleHasExternal(tup));
2616 else if (HeapTupleHasExternal(tup) || tup->t_len > TOAST_TUPLE_THRESHOLD)
2617 return toast_insert_or_update(relation, tup, NULL, options);
2623 * heap_multi_insert - insert multiple tuple into a heap
2625 * This is like heap_insert(), but inserts multiple tuples in one operation.
2626 * That's faster than calling heap_insert() in a loop, because when multiple
2627 * tuples can be inserted on a single page, we can write just a single WAL
2628 * record covering all of them, and only need to lock/unlock the page once.
2630 * Note: this leaks memory into the current memory context. You can create a
2631 * temporary context before calling this, if that's a problem.
2634 heap_multi_insert(Relation relation, HeapTuple *tuples, int ntuples,
2635 CommandId cid, int options, BulkInsertState bistate)
2637 TransactionId xid = GetCurrentTransactionId();
2638 HeapTuple *heaptuples;
2641 char *scratch = NULL;
2645 bool need_tuple_data = RelationIsLogicallyLogged(relation);
2646 bool need_cids = RelationIsAccessibleInLogicalDecoding(relation);
2648 needwal = !(options & HEAP_INSERT_SKIP_WAL) && RelationNeedsWAL(relation);
2649 saveFreeSpace = RelationGetTargetPageFreeSpace(relation,
2650 HEAP_DEFAULT_FILLFACTOR);
2652 /* Toast and set header data in all the tuples */
2653 heaptuples = palloc(ntuples * sizeof(HeapTuple));
2654 for (i = 0; i < ntuples; i++)
2655 heaptuples[i] = heap_prepare_insert(relation, tuples[i],
2659 * Allocate some memory to use for constructing the WAL record. Using
2660 * palloc() within a critical section is not safe, so we allocate this
2664 scratch = palloc(BLCKSZ);
2667 * We're about to do the actual inserts -- but check for conflict first,
2668 * to minimize the possibility of having to roll back work we've just
2671 * A check here does not definitively prevent a serialization anomaly;
2672 * that check MUST be done at least past the point of acquiring an
2673 * exclusive buffer content lock on every buffer that will be affected,
2674 * and MAY be done after all inserts are reflected in the buffers and
2675 * those locks are released; otherwise there race condition. Since
2676 * multiple buffers can be locked and unlocked in the loop below, and it
2677 * would not be feasible to identify and lock all of those buffers before
2678 * the loop, we must do a final check at the end.
2680 * The check here could be omitted with no loss of correctness; it is
2681 * present strictly as an optimization.
2683 * For heap inserts, we only need to check for table-level SSI locks. Our
2684 * new tuples can't possibly conflict with existing tuple locks, and heap
2685 * page locks are only consolidated versions of tuple locks; they do not
2686 * lock "gaps" as index page locks do. So we don't need to specify a
2687 * buffer when making the call, which makes for a faster check.
2689 CheckForSerializableConflictIn(relation, NULL, InvalidBuffer);
2692 while (ndone < ntuples)
2695 Buffer vmbuffer = InvalidBuffer;
2696 bool all_visible_cleared = false;
2699 CHECK_FOR_INTERRUPTS();
2702 * Find buffer where at least the next tuple will fit. If the page is
2703 * all-visible, this will also pin the requisite visibility map page.
2705 buffer = RelationGetBufferForTuple(relation, heaptuples[ndone]->t_len,
2706 InvalidBuffer, options, bistate,
2708 page = BufferGetPage(buffer);
2710 /* NO EREPORT(ERROR) from here till changes are logged */
2711 START_CRIT_SECTION();
2714 * RelationGetBufferForTuple has ensured that the first tuple fits.
2715 * Put that on the page, and then as many other tuples as fit.
2717 RelationPutHeapTuple(relation, buffer, heaptuples[ndone], false);
2718 for (nthispage = 1; ndone + nthispage < ntuples; nthispage++)
2720 HeapTuple heaptup = heaptuples[ndone + nthispage];
2722 if (PageGetHeapFreeSpace(page) < MAXALIGN(heaptup->t_len) + saveFreeSpace)
2725 RelationPutHeapTuple(relation, buffer, heaptup, false);
2728 * We don't use heap_multi_insert for catalog tuples yet, but
2729 * better be prepared...
2731 if (needwal && need_cids)
2732 log_heap_new_cid(relation, heaptup);
2735 if (PageIsAllVisible(page))
2737 all_visible_cleared = true;
2738 PageClearAllVisible(page);
2739 visibilitymap_clear(relation,
2740 BufferGetBlockNumber(buffer),
2745 * XXX Should we set PageSetPrunable on this page ? See heap_insert()
2748 MarkBufferDirty(buffer);
2754 xl_heap_multi_insert *xlrec;
2755 uint8 info = XLOG_HEAP2_MULTI_INSERT;
2758 char *scratchptr = scratch;
2763 * If the page was previously empty, we can reinit the page
2764 * instead of restoring the whole thing.
2766 init = (ItemPointerGetOffsetNumber(&(heaptuples[ndone]->t_self)) == FirstOffsetNumber &&
2767 PageGetMaxOffsetNumber(page) == FirstOffsetNumber + nthispage - 1);
2769 /* allocate xl_heap_multi_insert struct from the scratch area */
2770 xlrec = (xl_heap_multi_insert *) scratchptr;
2771 scratchptr += SizeOfHeapMultiInsert;
2774 * Allocate offsets array. Unless we're reinitializing the page,
2775 * in that case the tuples are stored in order starting at
2776 * FirstOffsetNumber and we don't need to store the offsets
2780 scratchptr += nthispage * sizeof(OffsetNumber);
2782 /* the rest of the scratch space is used for tuple data */
2783 tupledata = scratchptr;
2785 xlrec->flags = all_visible_cleared ? XLH_INSERT_ALL_VISIBLE_CLEARED : 0;
2786 xlrec->ntuples = nthispage;
2789 * Write out an xl_multi_insert_tuple and the tuple data itself
2792 for (i = 0; i < nthispage; i++)
2794 HeapTuple heaptup = heaptuples[ndone + i];
2795 xl_multi_insert_tuple *tuphdr;
2799 xlrec->offsets[i] = ItemPointerGetOffsetNumber(&heaptup->t_self);
2800 /* xl_multi_insert_tuple needs two-byte alignment. */
2801 tuphdr = (xl_multi_insert_tuple *) SHORTALIGN(scratchptr);
2802 scratchptr = ((char *) tuphdr) + SizeOfMultiInsertTuple;
2804 tuphdr->t_infomask2 = heaptup->t_data->t_infomask2;
2805 tuphdr->t_infomask = heaptup->t_data->t_infomask;
2806 tuphdr->t_hoff = heaptup->t_data->t_hoff;
2808 /* write bitmap [+ padding] [+ oid] + data */
2809 datalen = heaptup->t_len - SizeofHeapTupleHeader;
2811 (char *) heaptup->t_data + SizeofHeapTupleHeader,
2813 tuphdr->datalen = datalen;
2814 scratchptr += datalen;
2816 totaldatalen = scratchptr - tupledata;
2817 Assert((scratchptr - scratch) < BLCKSZ);
2819 if (need_tuple_data)
2820 xlrec->flags |= XLH_INSERT_CONTAINS_NEW_TUPLE;
2823 * Signal that this is the last xl_heap_multi_insert record
2824 * emitted by this call to heap_multi_insert(). Needed for logical
2825 * decoding so it knows when to cleanup temporary data.
2827 if (ndone + nthispage == ntuples)
2828 xlrec->flags |= XLH_INSERT_LAST_IN_MULTI;
2832 info |= XLOG_HEAP_INIT_PAGE;
2833 bufflags |= REGBUF_WILL_INIT;
2837 * If we're doing logical decoding, include the new tuple data
2838 * even if we take a full-page image of the page.
2840 if (need_tuple_data)
2841 bufflags |= REGBUF_KEEP_DATA;
2844 XLogRegisterData((char *) xlrec, tupledata - scratch);
2845 XLogRegisterBuffer(0, buffer, REGBUF_STANDARD | bufflags);
2847 XLogRegisterBufData(0, tupledata, totaldatalen);
2849 /* filtering by origin on a row level is much more efficient */
2850 XLogIncludeOrigin();
2852 recptr = XLogInsert(RM_HEAP2_ID, info);
2854 PageSetLSN(page, recptr);
2859 UnlockReleaseBuffer(buffer);
2860 if (vmbuffer != InvalidBuffer)
2861 ReleaseBuffer(vmbuffer);
2867 * We're done with the actual inserts. Check for conflicts again, to
2868 * ensure that all rw-conflicts in to these inserts are detected. Without
2869 * this final check, a sequential scan of the heap may have locked the
2870 * table after the "before" check, missing one opportunity to detect the
2871 * conflict, and then scanned the table before the new tuples were there,
2872 * missing the other chance to detect the conflict.
2874 * For heap inserts, we only need to check for table-level SSI locks. Our
2875 * new tuples can't possibly conflict with existing tuple locks, and heap
2876 * page locks are only consolidated versions of tuple locks; they do not
2877 * lock "gaps" as index page locks do. So we don't need to specify a
2878 * buffer when making the call.
2880 CheckForSerializableConflictIn(relation, NULL, InvalidBuffer);
2883 * If tuples are cachable, mark them for invalidation from the caches in
2884 * case we abort. Note it is OK to do this after releasing the buffer,
2885 * because the heaptuples data structure is all in local memory, not in
2886 * the shared buffer.
2888 if (IsCatalogRelation(relation))
2890 for (i = 0; i < ntuples; i++)
2891 CacheInvalidateHeapTuple(relation, heaptuples[i], NULL);
2895 * Copy t_self fields back to the caller's original tuples. This does
2896 * nothing for untoasted tuples (tuples[i] == heaptuples[i)], but it's
2897 * probably faster to always copy than check.
2899 for (i = 0; i < ntuples; i++)
2900 tuples[i]->t_self = heaptuples[i]->t_self;
2902 pgstat_count_heap_insert(relation, ntuples);
2906 * simple_heap_insert - insert a tuple
2908 * Currently, this routine differs from heap_insert only in supplying
2909 * a default command ID and not allowing access to the speedup options.
2911 * This should be used rather than using heap_insert directly in most places
2912 * where we are modifying system catalogs.
2915 simple_heap_insert(Relation relation, HeapTuple tup)
2917 return heap_insert(relation, tup, GetCurrentCommandId(true), 0, NULL);
2921 * Given infomask/infomask2, compute the bits that must be saved in the
2922 * "infobits" field of xl_heap_delete, xl_heap_update, xl_heap_lock,
2923 * xl_heap_lock_updated WAL records.
2925 * See fix_infomask_from_infobits.
2928 compute_infobits(uint16 infomask, uint16 infomask2)
2931 ((infomask & HEAP_XMAX_IS_MULTI) != 0 ? XLHL_XMAX_IS_MULTI : 0) |
2932 ((infomask & HEAP_XMAX_LOCK_ONLY) != 0 ? XLHL_XMAX_LOCK_ONLY : 0) |
2933 ((infomask & HEAP_XMAX_EXCL_LOCK) != 0 ? XLHL_XMAX_EXCL_LOCK : 0) |
2934 /* note we ignore HEAP_XMAX_SHR_LOCK here */
2935 ((infomask & HEAP_XMAX_KEYSHR_LOCK) != 0 ? XLHL_XMAX_KEYSHR_LOCK : 0) |
2936 ((infomask2 & HEAP_KEYS_UPDATED) != 0 ?
2937 XLHL_KEYS_UPDATED : 0);
2941 * Given two versions of the same t_infomask for a tuple, compare them and
2942 * return whether the relevant status for a tuple Xmax has changed. This is
2943 * used after a buffer lock has been released and reacquired: we want to ensure
2944 * that the tuple state continues to be the same it was when we previously
2947 * Note the Xmax field itself must be compared separately.
2950 xmax_infomask_changed(uint16 new_infomask, uint16 old_infomask)
2952 const uint16 interesting =
2953 HEAP_XMAX_IS_MULTI | HEAP_XMAX_LOCK_ONLY | HEAP_LOCK_MASK;
2955 if ((new_infomask & interesting) != (old_infomask & interesting))
2962 * heap_delete - delete a tuple
2964 * NB: do not call this directly unless you are prepared to deal with
2965 * concurrent-update conditions. Use simple_heap_delete instead.
2967 * relation - table to be modified (caller must hold suitable lock)
2968 * tid - TID of tuple to be deleted
2969 * cid - delete command ID (used for visibility test, and stored into
2970 * cmax if successful)
2971 * crosscheck - if not InvalidSnapshot, also check tuple against this
2972 * wait - true if should wait for any conflicting update to commit/abort
2973 * hufd - output parameter, filled in failure cases (see below)
2975 * Normal, successful return value is HeapTupleMayBeUpdated, which
2976 * actually means we did delete it. Failure return codes are
2977 * HeapTupleSelfUpdated, HeapTupleUpdated, or HeapTupleBeingUpdated
2978 * (the last only possible if wait == false).
2980 * In the failure cases, the routine fills *hufd with the tuple's t_ctid,
2981 * t_xmax (resolving a possible MultiXact, if necessary), and t_cmax
2982 * (the last only for HeapTupleSelfUpdated, since we
2983 * cannot obtain cmax from a combocid generated by another transaction).
2984 * See comments for struct HeapUpdateFailureData for additional info.
2987 heap_delete(Relation relation, ItemPointer tid,
2988 CommandId cid, Snapshot crosscheck, bool wait,
2989 HeapUpdateFailureData *hufd)
2992 TransactionId xid = GetCurrentTransactionId();
2998 Buffer vmbuffer = InvalidBuffer;
2999 TransactionId new_xmax;
3000 uint16 new_infomask,
3002 bool have_tuple_lock = false;
3004 bool all_visible_cleared = false;
3005 HeapTuple old_key_tuple = NULL; /* replica identity of the tuple */
3006 bool old_key_copied = false;
3008 Assert(ItemPointerIsValid(tid));
3011 * Forbid this during a parallel operation, lets it allocate a combocid.
3012 * Other workers might need that combocid for visibility checks, and we
3013 * have no provision for broadcasting it to them.
3015 if (IsInParallelMode())
3017 (errcode(ERRCODE_INVALID_TRANSACTION_STATE),
3018 errmsg("cannot delete tuples during a parallel operation")));
3020 block = ItemPointerGetBlockNumber(tid);
3021 buffer = ReadBuffer(relation, block);
3022 page = BufferGetPage(buffer);
3025 * Before locking the buffer, pin the visibility map page if it appears to
3026 * be necessary. Since we haven't got the lock yet, someone else might be
3027 * in the middle of changing this, so we'll need to recheck after we have
3030 if (PageIsAllVisible(page))
3031 visibilitymap_pin(relation, block, &vmbuffer);
3033 LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
3036 * If we didn't pin the visibility map page and the page has become all
3037 * visible while we were busy locking the buffer, we'll have to unlock and
3038 * re-lock, to avoid holding the buffer lock across an I/O. That's a bit
3039 * unfortunate, but hopefully shouldn't happen often.
3041 if (vmbuffer == InvalidBuffer && PageIsAllVisible(page))
3043 LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
3044 visibilitymap_pin(relation, block, &vmbuffer);
3045 LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
3048 lp = PageGetItemId(page, ItemPointerGetOffsetNumber(tid));
3049 Assert(ItemIdIsNormal(lp));
3051 tp.t_tableOid = RelationGetRelid(relation);
3052 tp.t_data = (HeapTupleHeader) PageGetItem(page, lp);
3053 tp.t_len = ItemIdGetLength(lp);
3057 result = HeapTupleSatisfiesUpdate(&tp, cid, buffer);
3059 if (result == HeapTupleInvisible)
3061 UnlockReleaseBuffer(buffer);
3063 (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
3064 errmsg("attempted to delete invisible tuple")));
3066 else if (result == HeapTupleBeingUpdated && wait)
3068 TransactionId xwait;
3071 /* must copy state data before unlocking buffer */
3072 xwait = HeapTupleHeaderGetRawXmax(tp.t_data);
3073 infomask = tp.t_data->t_infomask;
3076 * Sleep until concurrent transaction ends -- except when there's a
3077 * single locker and it's our own transaction. Note we don't care
3078 * which lock mode the locker has, because we need the strongest one.
3080 * Before sleeping, we need to acquire tuple lock to establish our
3081 * priority for the tuple (see heap_lock_tuple). LockTuple will
3082 * release us when we are next-in-line for the tuple.
3084 * If we are forced to "start over" below, we keep the tuple lock;
3085 * this arranges that we stay at the head of the line while rechecking
3088 if (infomask & HEAP_XMAX_IS_MULTI)
3090 /* wait for multixact */
3091 if (DoesMultiXactIdConflict((MultiXactId) xwait, infomask,
3092 LockTupleExclusive))
3094 LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
3096 /* acquire tuple lock, if necessary */
3097 heap_acquire_tuplock(relation, &(tp.t_self), LockTupleExclusive,
3098 LockWaitBlock, &have_tuple_lock);
3100 /* wait for multixact */
3101 MultiXactIdWait((MultiXactId) xwait, MultiXactStatusUpdate, infomask,
3102 relation, &(tp.t_self), XLTW_Delete,
3104 LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
3107 * If xwait had just locked the tuple then some other xact
3108 * could update this tuple before we get to this point. Check
3109 * for xmax change, and start over if so.
3111 if (xmax_infomask_changed(tp.t_data->t_infomask, infomask) ||
3112 !TransactionIdEquals(HeapTupleHeaderGetRawXmax(tp.t_data),
3118 * You might think the multixact is necessarily done here, but not
3119 * so: it could have surviving members, namely our own xact or
3120 * other subxacts of this backend. It is legal for us to delete
3121 * the tuple in either case, however (the latter case is
3122 * essentially a situation of upgrading our former shared lock to
3123 * exclusive). We don't bother changing the on-disk hint bits
3124 * since we are about to overwrite the xmax altogether.
3127 else if (!TransactionIdIsCurrentTransactionId(xwait))
3130 * Wait for regular transaction to end; but first, acquire tuple
3133 LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
3134 heap_acquire_tuplock(relation, &(tp.t_self), LockTupleExclusive,
3135 LockWaitBlock, &have_tuple_lock);
3136 XactLockTableWait(xwait, relation, &(tp.t_self), XLTW_Delete);
3137 LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
3140 * xwait is done, but if xwait had just locked the tuple then some
3141 * other xact could update this tuple before we get to this point.
3142 * Check for xmax change, and start over if so.
3144 if (xmax_infomask_changed(tp.t_data->t_infomask, infomask) ||
3145 !TransactionIdEquals(HeapTupleHeaderGetRawXmax(tp.t_data),
3149 /* Otherwise check if it committed or aborted */
3150 UpdateXmaxHintBits(tp.t_data, buffer, xwait);
3154 * We may overwrite if previous xmax aborted, or if it committed but
3155 * only locked the tuple without updating it.
3157 if ((tp.t_data->t_infomask & HEAP_XMAX_INVALID) ||
3158 HEAP_XMAX_IS_LOCKED_ONLY(tp.t_data->t_infomask) ||
3159 HeapTupleHeaderIsOnlyLocked(tp.t_data))
3160 result = HeapTupleMayBeUpdated;
3162 result = HeapTupleUpdated;
3165 if (crosscheck != InvalidSnapshot && result == HeapTupleMayBeUpdated)
3167 /* Perform additional check for transaction-snapshot mode RI updates */
3168 if (!HeapTupleSatisfiesVisibility(&tp, crosscheck, buffer))
3169 result = HeapTupleUpdated;
3172 if (result != HeapTupleMayBeUpdated)
3174 Assert(result == HeapTupleSelfUpdated ||
3175 result == HeapTupleUpdated ||
3176 result == HeapTupleBeingUpdated);
3177 Assert(!(tp.t_data->t_infomask & HEAP_XMAX_INVALID));
3178 hufd->ctid = tp.t_data->t_ctid;
3179 hufd->xmax = HeapTupleHeaderGetUpdateXid(tp.t_data);
3180 if (result == HeapTupleSelfUpdated)
3181 hufd->cmax = HeapTupleHeaderGetCmax(tp.t_data);
3183 hufd->cmax = InvalidCommandId;
3184 UnlockReleaseBuffer(buffer);
3185 if (have_tuple_lock)
3186 UnlockTupleTuplock(relation, &(tp.t_self), LockTupleExclusive);
3187 if (vmbuffer != InvalidBuffer)
3188 ReleaseBuffer(vmbuffer);
3193 * We're about to do the actual delete -- check for conflict first, to
3194 * avoid possibly having to roll back work we've just done.
3196 * This is safe without a recheck as long as there is no possibility of
3197 * another process scanning the page between this check and the delete
3198 * being visible to the scan (i.e., an exclusive buffer content lock is
3199 * continuously held from this point until the tuple delete is visible).
3201 CheckForSerializableConflictIn(relation, &tp, buffer);
3203 /* replace cid with a combo cid if necessary */
3204 HeapTupleHeaderAdjustCmax(tp.t_data, &cid, &iscombo);
3207 * Compute replica identity tuple before entering the critical section so
3208 * we don't PANIC upon a memory allocation failure.
3210 old_key_tuple = ExtractReplicaIdentity(relation, &tp, true, &old_key_copied);
3213 * If this is the first possibly-multixact-able operation in the current
3214 * transaction, set my per-backend OldestMemberMXactId setting. We can be
3215 * certain that the transaction will never become a member of any older
3216 * MultiXactIds than that. (We have to do this even if we end up just
3217 * using our own TransactionId below, since some other backend could
3218 * incorporate our XID into a MultiXact immediately afterwards.)
3220 MultiXactIdSetOldestMember();
3222 compute_new_xmax_infomask(HeapTupleHeaderGetRawXmax(tp.t_data),
3223 tp.t_data->t_infomask, tp.t_data->t_infomask2,
3224 xid, LockTupleExclusive, true,
3225 &new_xmax, &new_infomask, &new_infomask2);
3227 START_CRIT_SECTION();
3230 * If this transaction commits, the tuple will become DEAD sooner or
3231 * later. Set flag that this page is a candidate for pruning once our xid
3232 * falls below the OldestXmin horizon. If the transaction finally aborts,
3233 * the subsequent page pruning will be a no-op and the hint will be
3236 PageSetPrunable(page, xid);
3238 if (PageIsAllVisible(page))
3240 all_visible_cleared = true;
3241 PageClearAllVisible(page);
3242 visibilitymap_clear(relation, BufferGetBlockNumber(buffer),
3246 /* store transaction information of xact deleting the tuple */
3247 tp.t_data->t_infomask &= ~(HEAP_XMAX_BITS | HEAP_MOVED);
3248 tp.t_data->t_infomask2 &= ~HEAP_KEYS_UPDATED;
3249 tp.t_data->t_infomask |= new_infomask;
3250 tp.t_data->t_infomask2 |= new_infomask2;
3251 HeapTupleHeaderClearHotUpdated(tp.t_data);
3252 HeapTupleHeaderSetXmax(tp.t_data, new_xmax);
3253 HeapTupleHeaderSetCmax(tp.t_data, cid, iscombo);
3254 /* Make sure there is no forward chain link in t_ctid */
3255 tp.t_data->t_ctid = tp.t_self;
3257 MarkBufferDirty(buffer);
3262 * NB: heap_abort_speculative() uses the same xlog record and replay
3265 if (RelationNeedsWAL(relation))
3267 xl_heap_delete xlrec;
3270 /* For logical decode we need combocids to properly decode the catalog */
3271 if (RelationIsAccessibleInLogicalDecoding(relation))
3272 log_heap_new_cid(relation, &tp);
3274 xlrec.flags = all_visible_cleared ? XLH_DELETE_ALL_VISIBLE_CLEARED : 0;
3275 xlrec.infobits_set = compute_infobits(tp.t_data->t_infomask,
3276 tp.t_data->t_infomask2);
3277 xlrec.offnum = ItemPointerGetOffsetNumber(&tp.t_self);
3278 xlrec.xmax = new_xmax;
3280 if (old_key_tuple != NULL)
3282 if (relation->rd_rel->relreplident == REPLICA_IDENTITY_FULL)
3283 xlrec.flags |= XLH_DELETE_CONTAINS_OLD_TUPLE;
3285 xlrec.flags |= XLH_DELETE_CONTAINS_OLD_KEY;
3289 XLogRegisterData((char *) &xlrec, SizeOfHeapDelete);
3291 XLogRegisterBuffer(0, buffer, REGBUF_STANDARD);
3294 * Log replica identity of the deleted tuple if there is one
3296 if (old_key_tuple != NULL)
3298 xl_heap_header xlhdr;
3300 xlhdr.t_infomask2 = old_key_tuple->t_data->t_infomask2;
3301 xlhdr.t_infomask = old_key_tuple->t_data->t_infomask;
3302 xlhdr.t_hoff = old_key_tuple->t_data->t_hoff;
3304 XLogRegisterData((char *) &xlhdr, SizeOfHeapHeader);
3305 XLogRegisterData((char *) old_key_tuple->t_data
3306 + SizeofHeapTupleHeader,
3307 old_key_tuple->t_len
3308 - SizeofHeapTupleHeader);
3311 /* filtering by origin on a row level is much more efficient */
3312 XLogIncludeOrigin();
3314 recptr = XLogInsert(RM_HEAP_ID, XLOG_HEAP_DELETE);
3316 PageSetLSN(page, recptr);
3321 LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
3323 if (vmbuffer != InvalidBuffer)
3324 ReleaseBuffer(vmbuffer);
3327 * If the tuple has toasted out-of-line attributes, we need to delete
3328 * those items too. We have to do this before releasing the buffer
3329 * because we need to look at the contents of the tuple, but it's OK to
3330 * release the content lock on the buffer first.
3332 if (relation->rd_rel->relkind != RELKIND_RELATION &&
3333 relation->rd_rel->relkind != RELKIND_MATVIEW)
3335 /* toast table entries should never be recursively toasted */
3336 Assert(!HeapTupleHasExternal(&tp));
3338 else if (HeapTupleHasExternal(&tp))
3339 toast_delete(relation, &tp);
3342 * Mark tuple for invalidation from system caches at next command
3343 * boundary. We have to do this before releasing the buffer because we
3344 * need to look at the contents of the tuple.
3346 CacheInvalidateHeapTuple(relation, &tp, NULL);
3348 /* Now we can release the buffer */
3349 ReleaseBuffer(buffer);
3352 * Release the lmgr tuple lock, if we had it.
3354 if (have_tuple_lock)
3355 UnlockTupleTuplock(relation, &(tp.t_self), LockTupleExclusive);
3357 pgstat_count_heap_delete(relation);
3359 if (old_key_tuple != NULL && old_key_copied)
3360 heap_freetuple(old_key_tuple);
3362 return HeapTupleMayBeUpdated;
3366 * simple_heap_delete - delete a tuple
3368 * This routine may be used to delete a tuple when concurrent updates of
3369 * the target tuple are not expected (for example, because we have a lock
3370 * on the relation associated with the tuple). Any failure is reported
3374 simple_heap_delete(Relation relation, ItemPointer tid)
3377 HeapUpdateFailureData hufd;
3379 result = heap_delete(relation, tid,
3380 GetCurrentCommandId(true), InvalidSnapshot,
3381 true /* wait for commit */ ,
3385 case HeapTupleSelfUpdated:
3386 /* Tuple was already updated in current command? */
3387 elog(ERROR, "tuple already updated by self");
3390 case HeapTupleMayBeUpdated:
3391 /* done successfully */
3394 case HeapTupleUpdated:
3395 elog(ERROR, "tuple concurrently updated");
3399 elog(ERROR, "unrecognized heap_delete status: %u", result);
3405 * heap_update - replace a tuple
3407 * NB: do not call this directly unless you are prepared to deal with
3408 * concurrent-update conditions. Use simple_heap_update instead.
3410 * relation - table to be modified (caller must hold suitable lock)
3411 * otid - TID of old tuple to be replaced
3412 * newtup - newly constructed tuple data to store
3413 * cid - update command ID (used for visibility test, and stored into
3414 * cmax/cmin if successful)
3415 * crosscheck - if not InvalidSnapshot, also check old tuple against this
3416 * wait - true if should wait for any conflicting update to commit/abort
3417 * hufd - output parameter, filled in failure cases (see below)
3418 * lockmode - output parameter, filled with lock mode acquired on tuple
3420 * Normal, successful return value is HeapTupleMayBeUpdated, which
3421 * actually means we *did* update it. Failure return codes are
3422 * HeapTupleSelfUpdated, HeapTupleUpdated, or HeapTupleBeingUpdated
3423 * (the last only possible if wait == false).
3425 * On success, the header fields of *newtup are updated to match the new
3426 * stored tuple; in particular, newtup->t_self is set to the TID where the
3427 * new tuple was inserted, and its HEAP_ONLY_TUPLE flag is set iff a HOT
3428 * update was done. However, any TOAST changes in the new tuple's
3429 * data are not reflected into *newtup.
3431 * In the failure cases, the routine fills *hufd with the tuple's t_ctid,
3432 * t_xmax (resolving a possible MultiXact, if necessary), and t_cmax
3433 * (the last only for HeapTupleSelfUpdated, since we
3434 * cannot obtain cmax from a combocid generated by another transaction).
3435 * See comments for struct HeapUpdateFailureData for additional info.
3438 heap_update(Relation relation, ItemPointer otid, HeapTuple newtup,
3439 CommandId cid, Snapshot crosscheck, bool wait,
3440 HeapUpdateFailureData *hufd, LockTupleMode *lockmode)
3443 TransactionId xid = GetCurrentTransactionId();
3444 Bitmapset *hot_attrs;
3445 Bitmapset *key_attrs;
3446 Bitmapset *id_attrs;
3448 HeapTupleData oldtup;
3450 HeapTuple old_key_tuple = NULL;
3451 bool old_key_copied = false;
3454 MultiXactStatus mxact_status;
3457 vmbuffer = InvalidBuffer,
3458 vmbuffer_new = InvalidBuffer;
3463 bool have_tuple_lock = false;
3468 bool use_hot_update = false;
3470 bool all_visible_cleared = false;
3471 bool all_visible_cleared_new = false;
3472 bool checked_lockers;
3473 bool locker_remains;
3474 TransactionId xmax_new_tuple,
3476 uint16 infomask_old_tuple,
3477 infomask2_old_tuple,
3479 infomask2_new_tuple;
3481 Assert(ItemPointerIsValid(otid));
3484 * Forbid this during a parallel operation, lets it allocate a combocid.
3485 * Other workers might need that combocid for visibility checks, and we
3486 * have no provision for broadcasting it to them.
3488 if (IsInParallelMode())
3490 (errcode(ERRCODE_INVALID_TRANSACTION_STATE),
3491 errmsg("cannot update tuples during a parallel operation")));
3494 * Fetch the list of attributes to be checked for HOT update. This is
3495 * wasted effort if we fail to update or have to put the new tuple on a
3496 * different page. But we must compute the list before obtaining buffer
3497 * lock --- in the worst case, if we are doing an update on one of the
3498 * relevant system catalogs, we could deadlock if we try to fetch the list
3499 * later. In any case, the relcache caches the data so this is usually
3502 * Note that we get a copy here, so we need not worry about relcache flush
3503 * happening midway through.
3505 hot_attrs = RelationGetIndexAttrBitmap(relation, INDEX_ATTR_BITMAP_ALL);
3506 key_attrs = RelationGetIndexAttrBitmap(relation, INDEX_ATTR_BITMAP_KEY);
3507 id_attrs = RelationGetIndexAttrBitmap(relation,
3508 INDEX_ATTR_BITMAP_IDENTITY_KEY);
3510 block = ItemPointerGetBlockNumber(otid);
3511 buffer = ReadBuffer(relation, block);
3512 page = BufferGetPage(buffer);
3515 * Before locking the buffer, pin the visibility map page if it appears to
3516 * be necessary. Since we haven't got the lock yet, someone else might be
3517 * in the middle of changing this, so we'll need to recheck after we have
3520 if (PageIsAllVisible(page))
3521 visibilitymap_pin(relation, block, &vmbuffer);
3523 LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
3525 lp = PageGetItemId(page, ItemPointerGetOffsetNumber(otid));
3526 Assert(ItemIdIsNormal(lp));
3529 * Fill in enough data in oldtup for HeapSatisfiesHOTandKeyUpdate to work
3532 oldtup.t_tableOid = RelationGetRelid(relation);
3533 oldtup.t_data = (HeapTupleHeader) PageGetItem(page, lp);
3534 oldtup.t_len = ItemIdGetLength(lp);
3535 oldtup.t_self = *otid;
3537 /* the new tuple is ready, except for this: */
3538 newtup->t_tableOid = RelationGetRelid(relation);
3540 /* Fill in OID for newtup */
3541 if (relation->rd_rel->relhasoids)
3544 /* this is redundant with an Assert in HeapTupleSetOid */
3545 Assert(newtup->t_data->t_infomask & HEAP_HASOID);
3547 HeapTupleSetOid(newtup, HeapTupleGetOid(&oldtup));
3551 /* check there is not space for an OID */
3552 Assert(!(newtup->t_data->t_infomask & HEAP_HASOID));
3556 * If we're not updating any "key" column, we can grab a weaker lock type.
3557 * This allows for more concurrency when we are running simultaneously
3558 * with foreign key checks.
3560 * Note that if a column gets detoasted while executing the update, but
3561 * the value ends up being the same, this test will fail and we will use
3562 * the stronger lock. This is acceptable; the important case to optimize
3563 * is updates that don't manipulate key columns, not those that
3564 * serendipitiously arrive at the same key values.
3566 HeapSatisfiesHOTandKeyUpdate(relation, hot_attrs, key_attrs, id_attrs,
3567 &satisfies_hot, &satisfies_key,
3568 &satisfies_id, &oldtup, newtup);
3571 *lockmode = LockTupleNoKeyExclusive;
3572 mxact_status = MultiXactStatusNoKeyUpdate;
3576 * If this is the first possibly-multixact-able operation in the
3577 * current transaction, set my per-backend OldestMemberMXactId
3578 * setting. We can be certain that the transaction will never become a
3579 * member of any older MultiXactIds than that. (We have to do this
3580 * even if we end up just using our own TransactionId below, since
3581 * some other backend could incorporate our XID into a MultiXact
3582 * immediately afterwards.)
3584 MultiXactIdSetOldestMember();
3588 *lockmode = LockTupleExclusive;
3589 mxact_status = MultiXactStatusUpdate;
3594 * Note: beyond this point, use oldtup not otid to refer to old tuple.
3595 * otid may very well point at newtup->t_self, which we will overwrite
3596 * with the new tuple's location, so there's great risk of confusion if we
3601 checked_lockers = false;
3602 locker_remains = false;
3603 result = HeapTupleSatisfiesUpdate(&oldtup, cid, buffer);
3605 /* see below about the "no wait" case */
3606 Assert(result != HeapTupleBeingUpdated || wait);
3608 if (result == HeapTupleInvisible)
3610 UnlockReleaseBuffer(buffer);
3612 (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
3613 errmsg("attempted to update invisible tuple")));
3615 else if (result == HeapTupleBeingUpdated && wait)
3617 TransactionId xwait;
3619 bool can_continue = false;
3622 * XXX note that we don't consider the "no wait" case here. This
3623 * isn't a problem currently because no caller uses that case, but it
3624 * should be fixed if such a caller is introduced. It wasn't a
3625 * problem previously because this code would always wait, but now
3626 * that some tuple locks do not conflict with one of the lock modes we
3627 * use, it is possible that this case is interesting to handle
3630 * This may cause failures with third-party code that calls
3631 * heap_update directly.
3634 /* must copy state data before unlocking buffer */
3635 xwait = HeapTupleHeaderGetRawXmax(oldtup.t_data);
3636 infomask = oldtup.t_data->t_infomask;
3639 * Now we have to do something about the existing locker. If it's a
3640 * multi, sleep on it; we might be awakened before it is completely
3641 * gone (or even not sleep at all in some cases); we need to preserve
3642 * it as locker, unless it is gone completely.
3644 * If it's not a multi, we need to check for sleeping conditions
3645 * before actually going to sleep. If the update doesn't conflict
3646 * with the locks, we just continue without sleeping (but making sure
3649 * Before sleeping, we need to acquire tuple lock to establish our
3650 * priority for the tuple (see heap_lock_tuple). LockTuple will
3651 * release us when we are next-in-line for the tuple. Note we must
3652 * not acquire the tuple lock until we're sure we're going to sleep;
3653 * otherwise we're open for race conditions with other transactions
3654 * holding the tuple lock which sleep on us.
3656 * If we are forced to "start over" below, we keep the tuple lock;
3657 * this arranges that we stay at the head of the line while rechecking
3660 if (infomask & HEAP_XMAX_IS_MULTI)
3662 TransactionId update_xact;
3665 if (DoesMultiXactIdConflict((MultiXactId) xwait, infomask,
3668 LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
3670 /* acquire tuple lock, if necessary */
3671 heap_acquire_tuplock(relation, &(oldtup.t_self), *lockmode,
3672 LockWaitBlock, &have_tuple_lock);
3674 /* wait for multixact */
3675 MultiXactIdWait((MultiXactId) xwait, mxact_status, infomask,
3676 relation, &oldtup.t_self, XLTW_Update,
3678 checked_lockers = true;
3679 locker_remains = remain != 0;
3680 LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
3683 * If xwait had just locked the tuple then some other xact
3684 * could update this tuple before we get to this point. Check
3685 * for xmax change, and start over if so.
3687 if (xmax_infomask_changed(oldtup.t_data->t_infomask,
3689 !TransactionIdEquals(HeapTupleHeaderGetRawXmax(oldtup.t_data),
3695 * Note that the multixact may not be done by now. It could have
3696 * surviving members; our own xact or other subxacts of this
3697 * backend, and also any other concurrent transaction that locked
3698 * the tuple with KeyShare if we only got TupleLockUpdate. If
3699 * this is the case, we have to be careful to mark the updated
3700 * tuple with the surviving members in Xmax.
3702 * Note that there could have been another update in the
3703 * MultiXact. In that case, we need to check whether it committed
3704 * or aborted. If it aborted we are safe to update it again;
3705 * otherwise there is an update conflict, and we have to return
3706 * HeapTupleUpdated below.
3708 * In the LockTupleExclusive case, we still need to preserve the
3709 * surviving members: those would include the tuple locks we had
3710 * before this one, which are important to keep in case this
3713 if (!HEAP_XMAX_IS_LOCKED_ONLY(oldtup.t_data->t_infomask))
3714 update_xact = HeapTupleGetUpdateXid(oldtup.t_data);
3716 update_xact = InvalidTransactionId;
3719 * There was no UPDATE in the MultiXact; or it aborted. No
3720 * TransactionIdIsInProgress() call needed here, since we called
3721 * MultiXactIdWait() above.
3723 if (!TransactionIdIsValid(update_xact) ||
3724 TransactionIdDidAbort(update_xact))
3725 can_continue = true;
3727 else if (TransactionIdIsCurrentTransactionId(xwait))
3730 * The only locker is ourselves; we can avoid grabbing the tuple
3731 * lock here, but must preserve our locking information.
3733 checked_lockers = true;
3734 locker_remains = true;
3735 can_continue = true;
3737 else if (HEAP_XMAX_IS_KEYSHR_LOCKED(infomask) && key_intact)
3740 * If it's just a key-share locker, and we're not changing the key
3741 * columns, we don't need to wait for it to end; but we need to
3742 * preserve it as locker.
3744 checked_lockers = true;
3745 locker_remains = true;
3746 can_continue = true;
3751 * Wait for regular transaction to end; but first, acquire tuple
3754 LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
3755 heap_acquire_tuplock(relation, &(oldtup.t_self), *lockmode,
3756 LockWaitBlock, &have_tuple_lock);
3757 XactLockTableWait(xwait, relation, &oldtup.t_self,
3759 checked_lockers = true;
3760 LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
3763 * xwait is done, but if xwait had just locked the tuple then some
3764 * other xact could update this tuple before we get to this point.
3765 * Check for xmax change, and start over if so.
3767 if (xmax_infomask_changed(oldtup.t_data->t_infomask, infomask) ||
3768 !TransactionIdEquals(xwait,
3769 HeapTupleHeaderGetRawXmax(oldtup.t_data)))
3772 /* Otherwise check if it committed or aborted */
3773 UpdateXmaxHintBits(oldtup.t_data, buffer, xwait);
3774 if (oldtup.t_data->t_infomask & HEAP_XMAX_INVALID)
3775 can_continue = true;
3778 result = can_continue ? HeapTupleMayBeUpdated : HeapTupleUpdated;
3781 if (crosscheck != InvalidSnapshot && result == HeapTupleMayBeUpdated)
3783 /* Perform additional check for transaction-snapshot mode RI updates */
3784 if (!HeapTupleSatisfiesVisibility(&oldtup, crosscheck, buffer))
3785 result = HeapTupleUpdated;
3788 if (result != HeapTupleMayBeUpdated)
3790 Assert(result == HeapTupleSelfUpdated ||
3791 result == HeapTupleUpdated ||
3792 result == HeapTupleBeingUpdated);
3793 Assert(!(oldtup.t_data->t_infomask & HEAP_XMAX_INVALID));
3794 hufd->ctid = oldtup.t_data->t_ctid;
3795 hufd->xmax = HeapTupleHeaderGetUpdateXid(oldtup.t_data);
3796 if (result == HeapTupleSelfUpdated)
3797 hufd->cmax = HeapTupleHeaderGetCmax(oldtup.t_data);
3799 hufd->cmax = InvalidCommandId;
3800 UnlockReleaseBuffer(buffer);
3801 if (have_tuple_lock)
3802 UnlockTupleTuplock(relation, &(oldtup.t_self), *lockmode);
3803 if (vmbuffer != InvalidBuffer)
3804 ReleaseBuffer(vmbuffer);
3805 bms_free(hot_attrs);
3806 bms_free(key_attrs);
3811 * If we didn't pin the visibility map page and the page has become all
3812 * visible while we were busy locking the buffer, or during some
3813 * subsequent window during which we had it unlocked, we'll have to unlock
3814 * and re-lock, to avoid holding the buffer lock across an I/O. That's a
3815 * bit unfortunate, especially since we'll now have to recheck whether the
3816 * tuple has been locked or updated under us, but hopefully it won't
3817 * happen very often.
3819 if (vmbuffer == InvalidBuffer && PageIsAllVisible(page))
3821 LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
3822 visibilitymap_pin(relation, block, &vmbuffer);
3823 LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
3827 /* Fill in transaction status data */
3830 * If the tuple we're updating is locked, we need to preserve the locking
3831 * info in the old tuple's Xmax. Prepare a new Xmax value for this.
3833 compute_new_xmax_infomask(HeapTupleHeaderGetRawXmax(oldtup.t_data),
3834 oldtup.t_data->t_infomask,
3835 oldtup.t_data->t_infomask2,
3836 xid, *lockmode, true,
3837 &xmax_old_tuple, &infomask_old_tuple,
3838 &infomask2_old_tuple);
3841 * And also prepare an Xmax value for the new copy of the tuple. If there
3842 * was no xmax previously, or there was one but all lockers are now gone,
3843 * then use InvalidXid; otherwise, get the xmax from the old tuple. (In
3844 * rare cases that might also be InvalidXid and yet not have the
3845 * HEAP_XMAX_INVALID bit set; that's fine.)
3847 if ((oldtup.t_data->t_infomask & HEAP_XMAX_INVALID) ||
3848 (checked_lockers && !locker_remains))
3849 xmax_new_tuple = InvalidTransactionId;
3851 xmax_new_tuple = HeapTupleHeaderGetRawXmax(oldtup.t_data);
3853 if (!TransactionIdIsValid(xmax_new_tuple))
3855 infomask_new_tuple = HEAP_XMAX_INVALID;
3856 infomask2_new_tuple = 0;
3861 * If we found a valid Xmax for the new tuple, then the infomask bits
3862 * to use on the new tuple depend on what was there on the old one.
3863 * Note that since we're doing an update, the only possibility is that
3864 * the lockers had FOR KEY SHARE lock.
3866 if (oldtup.t_data->t_infomask & HEAP_XMAX_IS_MULTI)
3868 GetMultiXactIdHintBits(xmax_new_tuple, &infomask_new_tuple,
3869 &infomask2_new_tuple);
3873 infomask_new_tuple = HEAP_XMAX_KEYSHR_LOCK | HEAP_XMAX_LOCK_ONLY;
3874 infomask2_new_tuple = 0;
3879 * Prepare the new tuple with the appropriate initial values of Xmin and
3880 * Xmax, as well as initial infomask bits as computed above.
3882 newtup->t_data->t_infomask &= ~(HEAP_XACT_MASK);
3883 newtup->t_data->t_infomask2 &= ~(HEAP2_XACT_MASK);
3884 HeapTupleHeaderSetXmin(newtup->t_data, xid);
3885 HeapTupleHeaderSetCmin(newtup->t_data, cid);
3886 newtup->t_data->t_infomask |= HEAP_UPDATED | infomask_new_tuple;
3887 newtup->t_data->t_infomask2 |= infomask2_new_tuple;
3888 HeapTupleHeaderSetXmax(newtup->t_data, xmax_new_tuple);
3891 * Replace cid with a combo cid if necessary. Note that we already put
3892 * the plain cid into the new tuple.
3894 HeapTupleHeaderAdjustCmax(oldtup.t_data, &cid, &iscombo);
3897 * If the toaster needs to be activated, OR if the new tuple will not fit
3898 * on the same page as the old, then we need to release the content lock
3899 * (but not the pin!) on the old tuple's buffer while we are off doing
3900 * TOAST and/or table-file-extension work. We must mark the old tuple to
3901 * show that it's already being updated, else other processes may try to
3902 * update it themselves.
3904 * We need to invoke the toaster if there are already any out-of-line
3905 * toasted values present, or if the new tuple is over-threshold.
3907 if (relation->rd_rel->relkind != RELKIND_RELATION &&
3908 relation->rd_rel->relkind != RELKIND_MATVIEW)
3910 /* toast table entries should never be recursively toasted */
3911 Assert(!HeapTupleHasExternal(&oldtup));
3912 Assert(!HeapTupleHasExternal(newtup));
3916 need_toast = (HeapTupleHasExternal(&oldtup) ||
3917 HeapTupleHasExternal(newtup) ||
3918 newtup->t_len > TOAST_TUPLE_THRESHOLD);
3920 pagefree = PageGetHeapFreeSpace(page);
3922 newtupsize = MAXALIGN(newtup->t_len);
3924 if (need_toast || newtupsize > pagefree)
3926 /* Clear obsolete visibility flags ... */
3927 oldtup.t_data->t_infomask &= ~(HEAP_XMAX_BITS | HEAP_MOVED);
3928 oldtup.t_data->t_infomask2 &= ~HEAP_KEYS_UPDATED;
3929 HeapTupleClearHotUpdated(&oldtup);
3930 /* ... and store info about transaction updating this tuple */
3931 Assert(TransactionIdIsValid(xmax_old_tuple));
3932 HeapTupleHeaderSetXmax(oldtup.t_data, xmax_old_tuple);
3933 oldtup.t_data->t_infomask |= infomask_old_tuple;
3934 oldtup.t_data->t_infomask2 |= infomask2_old_tuple;
3935 HeapTupleHeaderSetCmax(oldtup.t_data, cid, iscombo);
3936 /* temporarily make it look not-updated */
3937 oldtup.t_data->t_ctid = oldtup.t_self;
3938 already_marked = true;
3939 LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
3942 * Let the toaster do its thing, if needed.
3944 * Note: below this point, heaptup is the data we actually intend to
3945 * store into the relation; newtup is the caller's original untoasted
3950 /* Note we always use WAL and FSM during updates */
3951 heaptup = toast_insert_or_update(relation, newtup, &oldtup, 0);
3952 newtupsize = MAXALIGN(heaptup->t_len);
3958 * Now, do we need a new page for the tuple, or not? This is a bit
3959 * tricky since someone else could have added tuples to the page while
3960 * we weren't looking. We have to recheck the available space after
3961 * reacquiring the buffer lock. But don't bother to do that if the
3962 * former amount of free space is still not enough; it's unlikely
3963 * there's more free now than before.
3965 * What's more, if we need to get a new page, we will need to acquire
3966 * buffer locks on both old and new pages. To avoid deadlock against
3967 * some other backend trying to get the same two locks in the other
3968 * order, we must be consistent about the order we get the locks in.
3969 * We use the rule "lock the lower-numbered page of the relation
3970 * first". To implement this, we must do RelationGetBufferForTuple
3971 * while not holding the lock on the old page, and we must rely on it
3972 * to get the locks on both pages in the correct order.
3974 if (newtupsize > pagefree)
3976 /* Assume there's no chance to put heaptup on same page. */
3977 newbuf = RelationGetBufferForTuple(relation, heaptup->t_len,
3979 &vmbuffer_new, &vmbuffer);
3983 /* Re-acquire the lock on the old tuple's page. */
3984 LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
3985 /* Re-check using the up-to-date free space */
3986 pagefree = PageGetHeapFreeSpace(page);
3987 if (newtupsize > pagefree)
3990 * Rats, it doesn't fit anymore. We must now unlock and
3991 * relock to avoid deadlock. Fortunately, this path should
3994 LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
3995 newbuf = RelationGetBufferForTuple(relation, heaptup->t_len,
3997 &vmbuffer_new, &vmbuffer);
4001 /* OK, it fits here, so we're done. */
4008 /* No TOAST work needed, and it'll fit on same page */
4009 already_marked = false;
4015 * We're about to do the actual update -- check for conflict first, to
4016 * avoid possibly having to roll back work we've just done.
4018 * This is safe without a recheck as long as there is no possibility of
4019 * another process scanning the pages between this check and the update
4020 * being visible to the scan (i.e., exclusive buffer content lock(s) are
4021 * continuously held from this point until the tuple update is visible).
4023 * For the new tuple the only check needed is at the relation level, but
4024 * since both tuples are in the same relation and the check for oldtup
4025 * will include checking the relation level, there is no benefit to a
4026 * separate check for the new tuple.
4028 CheckForSerializableConflictIn(relation, &oldtup, buffer);
4031 * At this point newbuf and buffer are both pinned and locked, and newbuf
4032 * has enough space for the new tuple. If they are the same buffer, only
4036 if (newbuf == buffer)
4039 * Since the new tuple is going into the same page, we might be able
4040 * to do a HOT update. Check if any of the index columns have been
4041 * changed. If not, then HOT update is possible.
4044 use_hot_update = true;
4048 /* Set a hint that the old page could use prune/defrag */
4053 * Compute replica identity tuple before entering the critical section so
4054 * we don't PANIC upon a memory allocation failure.
4055 * ExtractReplicaIdentity() will return NULL if nothing needs to be
4058 old_key_tuple = ExtractReplicaIdentity(relation, &oldtup, !satisfies_id, &old_key_copied);
4060 /* NO EREPORT(ERROR) from here till changes are logged */
4061 START_CRIT_SECTION();
4064 * If this transaction commits, the old tuple will become DEAD sooner or
4065 * later. Set flag that this page is a candidate for pruning once our xid
4066 * falls below the OldestXmin horizon. If the transaction finally aborts,
4067 * the subsequent page pruning will be a no-op and the hint will be
4070 * XXX Should we set hint on newbuf as well? If the transaction aborts,
4071 * there would be a prunable tuple in the newbuf; but for now we choose
4072 * not to optimize for aborts. Note that heap_xlog_update must be kept in
4073 * sync if this decision changes.
4075 PageSetPrunable(page, xid);
4079 /* Mark the old tuple as HOT-updated */
4080 HeapTupleSetHotUpdated(&oldtup);
4081 /* And mark the new tuple as heap-only */
4082 HeapTupleSetHeapOnly(heaptup);
4083 /* Mark the caller's copy too, in case different from heaptup */
4084 HeapTupleSetHeapOnly(newtup);
4088 /* Make sure tuples are correctly marked as not-HOT */
4089 HeapTupleClearHotUpdated(&oldtup);
4090 HeapTupleClearHeapOnly(heaptup);
4091 HeapTupleClearHeapOnly(newtup);
4094 RelationPutHeapTuple(relation, newbuf, heaptup, false); /* insert new tuple */
4096 if (!already_marked)
4098 /* Clear obsolete visibility flags ... */
4099 oldtup.t_data->t_infomask &= ~(HEAP_XMAX_BITS | HEAP_MOVED);
4100 oldtup.t_data->t_infomask2 &= ~HEAP_KEYS_UPDATED;
4101 /* ... and store info about transaction updating this tuple */
4102 Assert(TransactionIdIsValid(xmax_old_tuple));
4103 HeapTupleHeaderSetXmax(oldtup.t_data, xmax_old_tuple);
4104 oldtup.t_data->t_infomask |= infomask_old_tuple;
4105 oldtup.t_data->t_infomask2 |= infomask2_old_tuple;
4106 HeapTupleHeaderSetCmax(oldtup.t_data, cid, iscombo);
4109 /* record address of new tuple in t_ctid of old one */
4110 oldtup.t_data->t_ctid = heaptup->t_self;
4112 /* clear PD_ALL_VISIBLE flags */
4113 if (PageIsAllVisible(BufferGetPage(buffer)))
4115 all_visible_cleared = true;
4116 PageClearAllVisible(BufferGetPage(buffer));
4117 visibilitymap_clear(relation, BufferGetBlockNumber(buffer),
4120 if (newbuf != buffer && PageIsAllVisible(BufferGetPage(newbuf)))
4122 all_visible_cleared_new = true;
4123 PageClearAllVisible(BufferGetPage(newbuf));
4124 visibilitymap_clear(relation, BufferGetBlockNumber(newbuf),
4128 if (newbuf != buffer)
4129 MarkBufferDirty(newbuf);
4130 MarkBufferDirty(buffer);
4133 if (RelationNeedsWAL(relation))
4138 * For logical decoding we need combocids to properly decode the
4141 if (RelationIsAccessibleInLogicalDecoding(relation))
4143 log_heap_new_cid(relation, &oldtup);
4144 log_heap_new_cid(relation, heaptup);
4147 recptr = log_heap_update(relation, buffer,
4148 newbuf, &oldtup, heaptup,
4150 all_visible_cleared,
4151 all_visible_cleared_new);
4152 if (newbuf != buffer)
4154 PageSetLSN(BufferGetPage(newbuf), recptr);
4156 PageSetLSN(BufferGetPage(buffer), recptr);
4161 if (newbuf != buffer)
4162 LockBuffer(newbuf, BUFFER_LOCK_UNLOCK);
4163 LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
4166 * Mark old tuple for invalidation from system caches at next command
4167 * boundary, and mark the new tuple for invalidation in case we abort. We
4168 * have to do this before releasing the buffer because oldtup is in the
4169 * buffer. (heaptup is all in local memory, but it's necessary to process
4170 * both tuple versions in one call to inval.c so we can avoid redundant
4173 CacheInvalidateHeapTuple(relation, &oldtup, heaptup);
4175 /* Now we can release the buffer(s) */
4176 if (newbuf != buffer)
4177 ReleaseBuffer(newbuf);
4178 ReleaseBuffer(buffer);
4179 if (BufferIsValid(vmbuffer_new))
4180 ReleaseBuffer(vmbuffer_new);
4181 if (BufferIsValid(vmbuffer))
4182 ReleaseBuffer(vmbuffer);
4185 * Release the lmgr tuple lock, if we had it.
4187 if (have_tuple_lock)
4188 UnlockTupleTuplock(relation, &(oldtup.t_self), *lockmode);
4190 pgstat_count_heap_update(relation, use_hot_update);
4193 * If heaptup is a private copy, release it. Don't forget to copy t_self
4194 * back to the caller's image, too.
4196 if (heaptup != newtup)
4198 newtup->t_self = heaptup->t_self;
4199 heap_freetuple(heaptup);
4202 if (old_key_tuple != NULL && old_key_copied)
4203 heap_freetuple(old_key_tuple);
4205 bms_free(hot_attrs);
4206 bms_free(key_attrs);
4208 return HeapTupleMayBeUpdated;
4212 * Check if the specified attribute's value is same in both given tuples.
4213 * Subroutine for HeapSatisfiesHOTandKeyUpdate.
4216 heap_tuple_attr_equals(TupleDesc tupdesc, int attrnum,
4217 HeapTuple tup1, HeapTuple tup2)
4223 Form_pg_attribute att;
4226 * If it's a whole-tuple reference, say "not equal". It's not really
4227 * worth supporting this case, since it could only succeed after a no-op
4228 * update, which is hardly a case worth optimizing for.
4234 * Likewise, automatically say "not equal" for any system attribute other
4235 * than OID and tableOID; we cannot expect these to be consistent in a HOT
4236 * chain, or even to be set correctly yet in the new tuple.
4240 if (attrnum != ObjectIdAttributeNumber &&
4241 attrnum != TableOidAttributeNumber)
4246 * Extract the corresponding values. XXX this is pretty inefficient if
4247 * there are many indexed columns. Should HeapSatisfiesHOTandKeyUpdate do
4248 * a single heap_deform_tuple call on each tuple, instead? But that
4249 * doesn't work for system columns ...
4251 value1 = heap_getattr(tup1, attrnum, tupdesc, &isnull1);
4252 value2 = heap_getattr(tup2, attrnum, tupdesc, &isnull2);
4255 * If one value is NULL and other is not, then they are certainly not
4258 if (isnull1 != isnull2)
4262 * If both are NULL, they can be considered equal.
4268 * We do simple binary comparison of the two datums. This may be overly
4269 * strict because there can be multiple binary representations for the
4270 * same logical value. But we should be OK as long as there are no false
4271 * positives. Using a type-specific equality operator is messy because
4272 * there could be multiple notions of equality in different operator
4273 * classes; furthermore, we cannot safely invoke user-defined functions
4274 * while holding exclusive buffer lock.
4278 /* The only allowed system columns are OIDs, so do this */
4279 return (DatumGetObjectId(value1) == DatumGetObjectId(value2));
4283 Assert(attrnum <= tupdesc->natts);
4284 att = tupdesc->attrs[attrnum - 1];
4285 return datumIsEqual(value1, value2, att->attbyval, att->attlen);
4290 * Check which columns are being updated.
4292 * This simultaneously checks conditions for HOT updates, for FOR KEY
4293 * SHARE updates, and REPLICA IDENTITY concerns. Since much of the time they
4294 * will be checking very similar sets of columns, and doing the same tests on
4295 * them, it makes sense to optimize and do them together.
4297 * We receive three bitmapsets comprising the three sets of columns we're
4298 * interested in. Note these are destructively modified; that is OK since
4299 * this is invoked at most once in heap_update.
4301 * hot_result is set to TRUE if it's okay to do a HOT update (i.e. it does not
4302 * modified indexed columns); key_result is set to TRUE if the update does not
4303 * modify columns used in the key; id_result is set to TRUE if the update does
4304 * not modify columns in any index marked as the REPLICA IDENTITY.
4307 HeapSatisfiesHOTandKeyUpdate(Relation relation, Bitmapset *hot_attrs,
4308 Bitmapset *key_attrs, Bitmapset *id_attrs,
4309 bool *satisfies_hot, bool *satisfies_key,
4311 HeapTuple oldtup, HeapTuple newtup)
4313 int next_hot_attnum;
4314 int next_key_attnum;
4316 bool hot_result = true;
4317 bool key_result = true;
4318 bool id_result = true;
4320 /* If REPLICA IDENTITY is set to FULL, id_attrs will be empty. */
4321 Assert(bms_is_subset(id_attrs, key_attrs));
4322 Assert(bms_is_subset(key_attrs, hot_attrs));
4325 * If one of these sets contains no remaining bits, bms_first_member will
4326 * return -1, and after adding FirstLowInvalidHeapAttributeNumber (which
4327 * is negative!) we'll get an attribute number that can't possibly be
4328 * real, and thus won't match any actual attribute number.
4330 next_hot_attnum = bms_first_member(hot_attrs);
4331 next_hot_attnum += FirstLowInvalidHeapAttributeNumber;
4332 next_key_attnum = bms_first_member(key_attrs);
4333 next_key_attnum += FirstLowInvalidHeapAttributeNumber;
4334 next_id_attnum = bms_first_member(id_attrs);
4335 next_id_attnum += FirstLowInvalidHeapAttributeNumber;
4343 * Since the HOT attributes are a superset of the key attributes and
4344 * the key attributes are a superset of the id attributes, this logic
4345 * is guaranteed to identify the next column that needs to be checked.
4347 if (hot_result && next_hot_attnum > FirstLowInvalidHeapAttributeNumber)
4348 check_now = next_hot_attnum;
4349 else if (key_result && next_key_attnum > FirstLowInvalidHeapAttributeNumber)
4350 check_now = next_key_attnum;
4351 else if (id_result && next_id_attnum > FirstLowInvalidHeapAttributeNumber)
4352 check_now = next_id_attnum;
4356 /* See whether it changed. */
4357 changed = !heap_tuple_attr_equals(RelationGetDescr(relation),
4358 check_now, oldtup, newtup);
4361 if (check_now == next_hot_attnum)
4363 if (check_now == next_key_attnum)
4365 if (check_now == next_id_attnum)
4368 /* if all are false now, we can stop checking */
4369 if (!hot_result && !key_result && !id_result)
4374 * Advance the next attribute numbers for the sets that contain the
4375 * attribute we just checked. As we work our way through the columns,
4376 * the next_attnum values will rise; but when each set becomes empty,
4377 * bms_first_member() will return -1 and the attribute number will end
4378 * up with a value less than FirstLowInvalidHeapAttributeNumber.
4380 if (hot_result && check_now == next_hot_attnum)
4382 next_hot_attnum = bms_first_member(hot_attrs);
4383 next_hot_attnum += FirstLowInvalidHeapAttributeNumber;
4385 if (key_result && check_now == next_key_attnum)
4387 next_key_attnum = bms_first_member(key_attrs);
4388 next_key_attnum += FirstLowInvalidHeapAttributeNumber;
4390 if (id_result && check_now == next_id_attnum)
4392 next_id_attnum = bms_first_member(id_attrs);
4393 next_id_attnum += FirstLowInvalidHeapAttributeNumber;
4397 *satisfies_hot = hot_result;
4398 *satisfies_key = key_result;
4399 *satisfies_id = id_result;
4403 * simple_heap_update - replace a tuple
4405 * This routine may be used to update a tuple when concurrent updates of
4406 * the target tuple are not expected (for example, because we have a lock
4407 * on the relation associated with the tuple). Any failure is reported
4411 simple_heap_update(Relation relation, ItemPointer otid, HeapTuple tup)
4414 HeapUpdateFailureData hufd;
4415 LockTupleMode lockmode;
4417 result = heap_update(relation, otid, tup,
4418 GetCurrentCommandId(true), InvalidSnapshot,
4419 true /* wait for commit */ ,
4423 case HeapTupleSelfUpdated:
4424 /* Tuple was already updated in current command? */
4425 elog(ERROR, "tuple already updated by self");
4428 case HeapTupleMayBeUpdated:
4429 /* done successfully */
4432 case HeapTupleUpdated:
4433 elog(ERROR, "tuple concurrently updated");
4437 elog(ERROR, "unrecognized heap_update status: %u", result);
4444 * Return the MultiXactStatus corresponding to the given tuple lock mode.
4446 static MultiXactStatus
4447 get_mxact_status_for_lock(LockTupleMode mode, bool is_update)
4452 retval = tupleLockExtraInfo[mode].updstatus;
4454 retval = tupleLockExtraInfo[mode].lockstatus;
4457 elog(ERROR, "invalid lock tuple mode %d/%s", mode,
4458 is_update ? "true" : "false");
4460 return (MultiXactStatus) retval;
4464 * heap_lock_tuple - lock a tuple in shared or exclusive mode
4466 * Note that this acquires a buffer pin, which the caller must release.
4469 * relation: relation containing tuple (caller must hold suitable lock)
4470 * tuple->t_self: TID of tuple to lock (rest of struct need not be valid)
4471 * cid: current command ID (used for visibility test, and stored into
4472 * tuple's cmax if lock is successful)
4473 * mode: indicates if shared or exclusive tuple lock is desired
4474 * wait_policy: what to do if tuple lock is not available
4475 * follow_updates: if true, follow the update chain to also lock descendant
4478 * Output parameters:
4479 * *tuple: all fields filled in
4480 * *buffer: set to buffer holding tuple (pinned but not locked at exit)
4481 * *hufd: filled in failure cases (see below)
4483 * Function result may be:
4484 * HeapTupleMayBeUpdated: lock was successfully acquired
4485 * HeapTupleInvisible: lock failed because tuple was never visible to us
4486 * HeapTupleSelfUpdated: lock failed because tuple updated by self
4487 * HeapTupleUpdated: lock failed because tuple updated by other xact
4488 * HeapTupleWouldBlock: lock couldn't be acquired and wait_policy is skip
4490 * In the failure cases other than HeapTupleInvisible, the routine fills
4491 * *hufd with the tuple's t_ctid, t_xmax (resolving a possible MultiXact,
4492 * if necessary), and t_cmax (the last only for HeapTupleSelfUpdated,
4493 * since we cannot obtain cmax from a combocid generated by another
4495 * See comments for struct HeapUpdateFailureData for additional info.
4497 * See README.tuplock for a thorough explanation of this mechanism.
4500 heap_lock_tuple(Relation relation, HeapTuple tuple,
4501 CommandId cid, LockTupleMode mode, LockWaitPolicy wait_policy,
4502 bool follow_updates,
4503 Buffer *buffer, HeapUpdateFailureData *hufd)
4506 ItemPointer tid = &(tuple->t_self);
4511 uint16 old_infomask,
4514 bool first_time = true;
4515 bool have_tuple_lock = false;
4517 *buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(tid));
4518 LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
4520 page = BufferGetPage(*buffer);
4521 lp = PageGetItemId(page, ItemPointerGetOffsetNumber(tid));
4522 Assert(ItemIdIsNormal(lp));
4524 tuple->t_data = (HeapTupleHeader) PageGetItem(page, lp);
4525 tuple->t_len = ItemIdGetLength(lp);
4526 tuple->t_tableOid = RelationGetRelid(relation);
4529 result = HeapTupleSatisfiesUpdate(tuple, cid, *buffer);
4531 if (result == HeapTupleInvisible)
4533 LockBuffer(*buffer, BUFFER_LOCK_UNLOCK);
4536 * This is possible, but only when locking a tuple for ON CONFLICT
4537 * UPDATE. We return this value here rather than throwing an error in
4538 * order to give that case the opportunity to throw a more specific
4541 return HeapTupleInvisible;
4543 else if (result == HeapTupleBeingUpdated)
4545 TransactionId xwait;
4549 ItemPointerData t_ctid;
4551 /* must copy state data before unlocking buffer */
4552 xwait = HeapTupleHeaderGetRawXmax(tuple->t_data);
4553 infomask = tuple->t_data->t_infomask;
4554 infomask2 = tuple->t_data->t_infomask2;
4555 ItemPointerCopy(&tuple->t_data->t_ctid, &t_ctid);
4557 LockBuffer(*buffer, BUFFER_LOCK_UNLOCK);
4560 * If any subtransaction of the current top transaction already holds
4561 * a lock as strong as or stronger than what we're requesting, we
4562 * effectively hold the desired lock already. We *must* succeed
4563 * without trying to take the tuple lock, else we will deadlock
4564 * against anyone wanting to acquire a stronger lock.
4566 * Note we only do this the first time we loop on the HTSU result;
4567 * there is no point in testing in subsequent passes, because
4568 * evidently our own transaction cannot have acquired a new lock after
4569 * the first time we checked.
4575 if (infomask & HEAP_XMAX_IS_MULTI)
4579 MultiXactMember *members;
4582 * We don't need to allow old multixacts here; if that had
4583 * been the case, HeapTupleSatisfiesUpdate would have returned
4584 * MayBeUpdated and we wouldn't be here.
4587 GetMultiXactIdMembers(xwait, &members, false,
4588 HEAP_XMAX_IS_LOCKED_ONLY(infomask));
4590 for (i = 0; i < nmembers; i++)
4592 /* only consider members of our own transaction */
4593 if (!TransactionIdIsCurrentTransactionId(members[i].xid))
4596 if (TUPLOCK_from_mxstatus(members[i].status) >= mode)
4599 return HeapTupleMayBeUpdated;
4606 else if (TransactionIdIsCurrentTransactionId(xwait))
4610 case LockTupleKeyShare:
4611 Assert(HEAP_XMAX_IS_KEYSHR_LOCKED(infomask) ||
4612 HEAP_XMAX_IS_SHR_LOCKED(infomask) ||
4613 HEAP_XMAX_IS_EXCL_LOCKED(infomask));
4614 return HeapTupleMayBeUpdated;
4616 case LockTupleShare:
4617 if (HEAP_XMAX_IS_SHR_LOCKED(infomask) ||
4618 HEAP_XMAX_IS_EXCL_LOCKED(infomask))
4619 return HeapTupleMayBeUpdated;
4621 case LockTupleNoKeyExclusive:
4622 if (HEAP_XMAX_IS_EXCL_LOCKED(infomask))
4623 return HeapTupleMayBeUpdated;
4625 case LockTupleExclusive:
4626 if (HEAP_XMAX_IS_EXCL_LOCKED(infomask) &&
4627 infomask2 & HEAP_KEYS_UPDATED)
4628 return HeapTupleMayBeUpdated;
4635 * Initially assume that we will have to wait for the locking
4636 * transaction(s) to finish. We check various cases below in which
4637 * this can be turned off.
4639 require_sleep = true;
4640 if (mode == LockTupleKeyShare)
4643 * If we're requesting KeyShare, and there's no update present, we
4644 * don't need to wait. Even if there is an update, we can still
4645 * continue if the key hasn't been modified.
4647 * However, if there are updates, we need to walk the update chain
4648 * to mark future versions of the row as locked, too. That way,
4649 * if somebody deletes that future version, we're protected
4650 * against the key going away. This locking of future versions
4651 * could block momentarily, if a concurrent transaction is
4652 * deleting a key; or it could return a value to the effect that
4653 * the transaction deleting the key has already committed. So we
4654 * do this before re-locking the buffer; otherwise this would be
4655 * prone to deadlocks.
4657 * Note that the TID we're locking was grabbed before we unlocked
4658 * the buffer. For it to change while we're not looking, the
4659 * other properties we're testing for below after re-locking the
4660 * buffer would also change, in which case we would restart this
4663 if (!(infomask2 & HEAP_KEYS_UPDATED))
4667 updated = !HEAP_XMAX_IS_LOCKED_ONLY(infomask);
4670 * If there are updates, follow the update chain; bail out if
4671 * that cannot be done.
4673 if (follow_updates && updated)
4677 res = heap_lock_updated_tuple(relation, tuple, &t_ctid,
4678 GetCurrentTransactionId(),
4680 if (res != HeapTupleMayBeUpdated)
4683 /* recovery code expects to have buffer lock held */
4684 LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
4689 LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
4692 * Make sure it's still an appropriate lock, else start over.
4693 * Also, if it wasn't updated before we released the lock, but
4694 * is updated now, we start over too; the reason is that we
4695 * now need to follow the update chain to lock the new
4698 if (!HeapTupleHeaderIsOnlyLocked(tuple->t_data) &&
4699 ((tuple->t_data->t_infomask2 & HEAP_KEYS_UPDATED) ||
4703 /* Things look okay, so we can skip sleeping */
4704 require_sleep = false;
4707 * Note we allow Xmax to change here; other updaters/lockers
4708 * could have modified it before we grabbed the buffer lock.
4709 * However, this is not a problem, because with the recheck we
4710 * just did we ensure that they still don't conflict with the
4715 else if (mode == LockTupleShare)
4718 * If we're requesting Share, we can similarly avoid sleeping if
4719 * there's no update and no exclusive lock present.
4721 if (HEAP_XMAX_IS_LOCKED_ONLY(infomask) &&
4722 !HEAP_XMAX_IS_EXCL_LOCKED(infomask))
4724 LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
4727 * Make sure it's still an appropriate lock, else start over.
4728 * See above about allowing xmax to change.
4730 if (!HEAP_XMAX_IS_LOCKED_ONLY(tuple->t_data->t_infomask) ||
4731 HEAP_XMAX_IS_EXCL_LOCKED(tuple->t_data->t_infomask))
4733 require_sleep = false;
4736 else if (mode == LockTupleNoKeyExclusive)
4739 * If we're requesting NoKeyExclusive, we might also be able to
4740 * avoid sleeping; just ensure that there no conflicting lock
4743 if (infomask & HEAP_XMAX_IS_MULTI)
4745 if (!DoesMultiXactIdConflict((MultiXactId) xwait, infomask,
4749 * No conflict, but if the xmax changed under us in the
4750 * meantime, start over.
4752 LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
4753 if (xmax_infomask_changed(tuple->t_data->t_infomask, infomask) ||
4754 !TransactionIdEquals(HeapTupleHeaderGetRawXmax(tuple->t_data),
4758 /* otherwise, we're good */
4759 require_sleep = false;
4762 else if (HEAP_XMAX_IS_KEYSHR_LOCKED(infomask))
4764 LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
4766 /* if the xmax changed in the meantime, start over */
4767 if (xmax_infomask_changed(tuple->t_data->t_infomask, infomask) ||
4768 !TransactionIdEquals(
4769 HeapTupleHeaderGetRawXmax(tuple->t_data),
4772 /* otherwise, we're good */
4773 require_sleep = false;
4778 * As a check independent from those above, we can also avoid sleeping
4779 * if the current transaction is the sole locker of the tuple. Note
4780 * that the strength of the lock already held is irrelevant; this is
4781 * not about recording the lock in Xmax (which will be done regardless
4782 * of this optimization, below). Also, note that the cases where we
4783 * hold a lock stronger than we are requesting are already handled
4784 * above by not doing anything.
4786 * Note we only deal with the non-multixact case here; MultiXactIdWait
4787 * is well equipped to deal with this situation on its own.
4789 if (require_sleep && !(infomask & HEAP_XMAX_IS_MULTI) &&
4790 TransactionIdIsCurrentTransactionId(xwait))
4792 /* ... but if the xmax changed in the meantime, start over */
4793 LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
4794 if (xmax_infomask_changed(tuple->t_data->t_infomask, infomask) ||
4795 !TransactionIdEquals(HeapTupleHeaderGetRawXmax(tuple->t_data),
4798 Assert(HEAP_XMAX_IS_LOCKED_ONLY(tuple->t_data->t_infomask));
4799 require_sleep = false;
4803 * By here, we either have already acquired the buffer exclusive lock,
4804 * or we must wait for the locking transaction or multixact; so below
4805 * we ensure that we grab buffer lock after the sleep.
4811 * Acquire tuple lock to establish our priority for the tuple, or
4812 * die trying. LockTuple will release us when we are next-in-line
4813 * for the tuple. We must do this even if we are share-locking.
4815 * If we are forced to "start over" below, we keep the tuple lock;
4816 * this arranges that we stay at the head of the line while
4817 * rechecking tuple state.
4819 if (!heap_acquire_tuplock(relation, tid, mode, wait_policy,
4823 * This can only happen if wait_policy is Skip and the lock
4824 * couldn't be obtained.
4826 result = HeapTupleWouldBlock;
4827 /* recovery code expects to have buffer lock held */
4828 LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
4832 if (infomask & HEAP_XMAX_IS_MULTI)
4834 MultiXactStatus status = get_mxact_status_for_lock(mode, false);
4836 /* We only ever lock tuples, never update them */
4837 if (status >= MultiXactStatusNoKeyUpdate)
4838 elog(ERROR, "invalid lock mode in heap_lock_tuple");
4840 /* wait for multixact to end, or die trying */
4841 switch (wait_policy)
4844 MultiXactIdWait((MultiXactId) xwait, status, infomask,
4845 relation, &tuple->t_self, XLTW_Lock, NULL);
4848 if (!ConditionalMultiXactIdWait((MultiXactId) xwait,
4849 status, infomask, relation,
4852 result = HeapTupleWouldBlock;
4853 /* recovery code expects to have buffer lock held */
4854 LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
4859 if (!ConditionalMultiXactIdWait((MultiXactId) xwait,
4860 status, infomask, relation,
4863 (errcode(ERRCODE_LOCK_NOT_AVAILABLE),
4864 errmsg("could not obtain lock on row in relation \"%s\"",
4865 RelationGetRelationName(relation))));
4871 * Of course, the multixact might not be done here: if we're
4872 * requesting a light lock mode, other transactions with light
4873 * locks could still be alive, as well as locks owned by our
4874 * own xact or other subxacts of this backend. We need to
4875 * preserve the surviving MultiXact members. Note that it
4876 * isn't absolutely necessary in the latter case, but doing so
4882 /* wait for regular transaction to end, or die trying */
4883 switch (wait_policy)
4886 XactLockTableWait(xwait, relation, &tuple->t_self,
4890 if (!ConditionalXactLockTableWait(xwait))
4892 result = HeapTupleWouldBlock;
4893 /* recovery code expects to have buffer lock held */
4894 LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
4899 if (!ConditionalXactLockTableWait(xwait))
4901 (errcode(ERRCODE_LOCK_NOT_AVAILABLE),
4902 errmsg("could not obtain lock on row in relation \"%s\"",
4903 RelationGetRelationName(relation))));
4908 /* if there are updates, follow the update chain */
4909 if (follow_updates && !HEAP_XMAX_IS_LOCKED_ONLY(infomask))
4913 res = heap_lock_updated_tuple(relation, tuple, &t_ctid,
4914 GetCurrentTransactionId(),
4916 if (res != HeapTupleMayBeUpdated)
4919 /* recovery code expects to have buffer lock held */
4920 LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
4925 LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
4928 * xwait is done, but if xwait had just locked the tuple then some
4929 * other xact could update this tuple before we get to this point.
4930 * Check for xmax change, and start over if so.
4932 if (xmax_infomask_changed(tuple->t_data->t_infomask, infomask) ||
4933 !TransactionIdEquals(HeapTupleHeaderGetRawXmax(tuple->t_data),
4937 if (!(infomask & HEAP_XMAX_IS_MULTI))
4940 * Otherwise check if it committed or aborted. Note we cannot
4941 * be here if the tuple was only locked by somebody who didn't
4942 * conflict with us; that would have been handled above. So
4943 * that transaction must necessarily be gone by now. But
4944 * don't check for this in the multixact case, because some
4945 * locker transactions might still be running.
4947 UpdateXmaxHintBits(tuple->t_data, *buffer, xwait);
4951 /* By here, we're certain that we hold buffer exclusive lock again */
4954 * We may lock if previous xmax aborted, or if it committed but only
4955 * locked the tuple without updating it; or if we didn't have to wait
4956 * at all for whatever reason.
4958 if (!require_sleep ||
4959 (tuple->t_data->t_infomask & HEAP_XMAX_INVALID) ||
4960 HEAP_XMAX_IS_LOCKED_ONLY(tuple->t_data->t_infomask) ||
4961 HeapTupleHeaderIsOnlyLocked(tuple->t_data))
4962 result = HeapTupleMayBeUpdated;
4964 result = HeapTupleUpdated;
4968 if (result != HeapTupleMayBeUpdated)
4970 Assert(result == HeapTupleSelfUpdated || result == HeapTupleUpdated ||
4971 result == HeapTupleWouldBlock);
4972 Assert(!(tuple->t_data->t_infomask & HEAP_XMAX_INVALID));
4973 hufd->ctid = tuple->t_data->t_ctid;
4974 hufd->xmax = HeapTupleHeaderGetUpdateXid(tuple->t_data);
4975 if (result == HeapTupleSelfUpdated)
4976 hufd->cmax = HeapTupleHeaderGetCmax(tuple->t_data);
4978 hufd->cmax = InvalidCommandId;
4979 LockBuffer(*buffer, BUFFER_LOCK_UNLOCK);
4980 if (have_tuple_lock)
4981 UnlockTupleTuplock(relation, tid, mode);
4985 xmax = HeapTupleHeaderGetRawXmax(tuple->t_data);
4986 old_infomask = tuple->t_data->t_infomask;
4989 * If this is the first possibly-multixact-able operation in the current
4990 * transaction, set my per-backend OldestMemberMXactId setting. We can be
4991 * certain that the transaction will never become a member of any older
4992 * MultiXactIds than that. (We have to do this even if we end up just
4993 * using our own TransactionId below, since some other backend could
4994 * incorporate our XID into a MultiXact immediately afterwards.)
4996 MultiXactIdSetOldestMember();
4999 * Compute the new xmax and infomask to store into the tuple. Note we do
5000 * not modify the tuple just yet, because that would leave it in the wrong
5001 * state if multixact.c elogs.
5003 compute_new_xmax_infomask(xmax, old_infomask, tuple->t_data->t_infomask2,
5004 GetCurrentTransactionId(), mode, false,
5005 &xid, &new_infomask, &new_infomask2);
5007 START_CRIT_SECTION();
5010 * Store transaction information of xact locking the tuple.
5012 * Note: Cmax is meaningless in this context, so don't set it; this avoids
5013 * possibly generating a useless combo CID. Moreover, if we're locking a
5014 * previously updated tuple, it's important to preserve the Cmax.
5016 * Also reset the HOT UPDATE bit, but only if there's no update; otherwise
5017 * we would break the HOT chain.
5019 tuple->t_data->t_infomask &= ~HEAP_XMAX_BITS;
5020 tuple->t_data->t_infomask2 &= ~HEAP_KEYS_UPDATED;
5021 tuple->t_data->t_infomask |= new_infomask;
5022 tuple->t_data->t_infomask2 |= new_infomask2;
5023 if (HEAP_XMAX_IS_LOCKED_ONLY(new_infomask))
5024 HeapTupleHeaderClearHotUpdated(tuple->t_data);
5025 HeapTupleHeaderSetXmax(tuple->t_data, xid);
5028 * Make sure there is no forward chain link in t_ctid. Note that in the
5029 * cases where the tuple has been updated, we must not overwrite t_ctid,
5030 * because it was set by the updater. Moreover, if the tuple has been
5031 * updated, we need to follow the update chain to lock the new versions of
5032 * the tuple as well.
5034 if (HEAP_XMAX_IS_LOCKED_ONLY(new_infomask))
5035 tuple->t_data->t_ctid = *tid;
5037 MarkBufferDirty(*buffer);
5040 * XLOG stuff. You might think that we don't need an XLOG record because
5041 * there is no state change worth restoring after a crash. You would be
5042 * wrong however: we have just written either a TransactionId or a
5043 * MultiXactId that may never have been seen on disk before, and we need
5044 * to make sure that there are XLOG entries covering those ID numbers.
5045 * Else the same IDs might be re-used after a crash, which would be
5046 * disastrous if this page made it to disk before the crash. Essentially
5047 * we have to enforce the WAL log-before-data rule even in this case.
5048 * (Also, in a PITR log-shipping or 2PC environment, we have to have XLOG
5049 * entries for everything anyway.)
5051 if (RelationNeedsWAL(relation))
5057 XLogRegisterBuffer(0, *buffer, REGBUF_STANDARD);
5059 xlrec.offnum = ItemPointerGetOffsetNumber(&tuple->t_self);
5060 xlrec.locking_xid = xid;
5061 xlrec.infobits_set = compute_infobits(new_infomask,
5062 tuple->t_data->t_infomask2);
5063 XLogRegisterData((char *) &xlrec, SizeOfHeapLock);
5065 /* we don't decode row locks atm, so no need to log the origin */
5067 recptr = XLogInsert(RM_HEAP_ID, XLOG_HEAP_LOCK);
5069 PageSetLSN(page, recptr);
5074 LockBuffer(*buffer, BUFFER_LOCK_UNLOCK);
5077 * Don't update the visibility map here. Locking a tuple doesn't change
5082 * Now that we have successfully marked the tuple as locked, we can
5083 * release the lmgr tuple lock, if we had it.
5085 if (have_tuple_lock)
5086 UnlockTupleTuplock(relation, tid, mode);
5088 return HeapTupleMayBeUpdated;
5092 * Acquire heavyweight lock on the given tuple, in preparation for acquiring
5093 * its normal, Xmax-based tuple lock.
5095 * have_tuple_lock is an input and output parameter: on input, it indicates
5096 * whether the lock has previously been acquired (and this function does
5097 * nothing in that case). If this function returns success, have_tuple_lock
5098 * has been flipped to true.
5100 * Returns false if it was unable to obtain the lock; this can only happen if
5101 * wait_policy is Skip.
5104 heap_acquire_tuplock(Relation relation, ItemPointer tid, LockTupleMode mode,
5105 LockWaitPolicy wait_policy, bool *have_tuple_lock)
5107 if (*have_tuple_lock)
5110 switch (wait_policy)
5113 LockTupleTuplock(relation, tid, mode);
5117 if (!ConditionalLockTupleTuplock(relation, tid, mode))
5122 if (!ConditionalLockTupleTuplock(relation, tid, mode))
5124 (errcode(ERRCODE_LOCK_NOT_AVAILABLE),
5125 errmsg("could not obtain lock on row in relation \"%s\"",
5126 RelationGetRelationName(relation))));
5129 *have_tuple_lock = true;
5135 * Given an original set of Xmax and infomask, and a transaction (identified by
5136 * add_to_xmax) acquiring a new lock of some mode, compute the new Xmax and
5137 * corresponding infomasks to use on the tuple.
5139 * Note that this might have side effects such as creating a new MultiXactId.
5141 * Most callers will have called HeapTupleSatisfiesUpdate before this function;
5142 * that will have set the HEAP_XMAX_INVALID bit if the xmax was a MultiXactId
5143 * but it was not running anymore. There is a race condition, which is that the
5144 * MultiXactId may have finished since then, but that uncommon case is handled
5145 * either here, or within MultiXactIdExpand.
5147 * There is a similar race condition possible when the old xmax was a regular
5148 * TransactionId. We test TransactionIdIsInProgress again just to narrow the
5149 * window, but it's still possible to end up creating an unnecessary
5150 * MultiXactId. Fortunately this is harmless.
5153 compute_new_xmax_infomask(TransactionId xmax, uint16 old_infomask,
5154 uint16 old_infomask2, TransactionId add_to_xmax,
5155 LockTupleMode mode, bool is_update,
5156 TransactionId *result_xmax, uint16 *result_infomask,
5157 uint16 *result_infomask2)
5159 TransactionId new_xmax;
5160 uint16 new_infomask,
5163 Assert(TransactionIdIsCurrentTransactionId(add_to_xmax));
5168 if (old_infomask & HEAP_XMAX_INVALID)
5171 * No previous locker; we just insert our own TransactionId.
5173 * Note that it's critical that this case be the first one checked,
5174 * because there are several blocks below that come back to this one
5175 * to implement certain optimizations; old_infomask might contain
5176 * other dirty bits in those cases, but we don't really care.
5180 new_xmax = add_to_xmax;
5181 if (mode == LockTupleExclusive)
5182 new_infomask2 |= HEAP_KEYS_UPDATED;
5186 new_infomask |= HEAP_XMAX_LOCK_ONLY;
5189 case LockTupleKeyShare:
5190 new_xmax = add_to_xmax;
5191 new_infomask |= HEAP_XMAX_KEYSHR_LOCK;
5193 case LockTupleShare:
5194 new_xmax = add_to_xmax;
5195 new_infomask |= HEAP_XMAX_SHR_LOCK;
5197 case LockTupleNoKeyExclusive:
5198 new_xmax = add_to_xmax;
5199 new_infomask |= HEAP_XMAX_EXCL_LOCK;
5201 case LockTupleExclusive:
5202 new_xmax = add_to_xmax;
5203 new_infomask |= HEAP_XMAX_EXCL_LOCK;
5204 new_infomask2 |= HEAP_KEYS_UPDATED;
5207 new_xmax = InvalidTransactionId; /* silence compiler */
5208 elog(ERROR, "invalid lock mode");
5212 else if (old_infomask & HEAP_XMAX_IS_MULTI)
5214 MultiXactStatus new_status;
5217 * Currently we don't allow XMAX_COMMITTED to be set for multis, so
5220 Assert(!(old_infomask & HEAP_XMAX_COMMITTED));
5223 * A multixact together with LOCK_ONLY set but neither lock bit set
5224 * (i.e. a pg_upgraded share locked tuple) cannot possibly be running
5225 * anymore. This check is critical for databases upgraded by
5226 * pg_upgrade; both MultiXactIdIsRunning and MultiXactIdExpand assume
5227 * that such multis are never passed.
5229 if (!(old_infomask & HEAP_LOCK_MASK) &&
5230 HEAP_XMAX_IS_LOCKED_ONLY(old_infomask))
5232 old_infomask &= ~HEAP_XMAX_IS_MULTI;
5233 old_infomask |= HEAP_XMAX_INVALID;
5238 * If the XMAX is already a MultiXactId, then we need to expand it to
5239 * include add_to_xmax; but if all the members were lockers and are
5240 * all gone, we can do away with the IS_MULTI bit and just set
5241 * add_to_xmax as the only locker/updater. If all lockers are gone
5242 * and we have an updater that aborted, we can also do without a
5245 * The cost of doing GetMultiXactIdMembers would be paid by
5246 * MultiXactIdExpand if we weren't to do this, so this check is not
5247 * incurring extra work anyhow.
5249 if (!MultiXactIdIsRunning(xmax, HEAP_XMAX_IS_LOCKED_ONLY(old_infomask)))
5251 if (HEAP_XMAX_IS_LOCKED_ONLY(old_infomask) ||
5252 !TransactionIdDidCommit(MultiXactIdGetUpdateXid(xmax,
5256 * Reset these bits and restart; otherwise fall through to
5257 * create a new multi below.
5259 old_infomask &= ~HEAP_XMAX_IS_MULTI;
5260 old_infomask |= HEAP_XMAX_INVALID;
5265 new_status = get_mxact_status_for_lock(mode, is_update);
5267 new_xmax = MultiXactIdExpand((MultiXactId) xmax, add_to_xmax,
5269 GetMultiXactIdHintBits(new_xmax, &new_infomask, &new_infomask2);
5271 else if (old_infomask & HEAP_XMAX_COMMITTED)
5274 * It's a committed update, so we need to preserve him as updater of
5277 MultiXactStatus status;
5278 MultiXactStatus new_status;
5280 if (old_infomask2 & HEAP_KEYS_UPDATED)
5281 status = MultiXactStatusUpdate;
5283 status = MultiXactStatusNoKeyUpdate;
5285 new_status = get_mxact_status_for_lock(mode, is_update);
5288 * since it's not running, it's obviously impossible for the old
5289 * updater to be identical to the current one, so we need not check
5290 * for that case as we do in the block above.
5292 new_xmax = MultiXactIdCreate(xmax, status, add_to_xmax, new_status);
5293 GetMultiXactIdHintBits(new_xmax, &new_infomask, &new_infomask2);
5295 else if (TransactionIdIsInProgress(xmax))
5298 * If the XMAX is a valid, in-progress TransactionId, then we need to
5299 * create a new MultiXactId that includes both the old locker or
5300 * updater and our own TransactionId.
5302 MultiXactStatus new_status;
5303 MultiXactStatus old_status;
5304 LockTupleMode old_mode;
5306 if (HEAP_XMAX_IS_LOCKED_ONLY(old_infomask))
5308 if (HEAP_XMAX_IS_KEYSHR_LOCKED(old_infomask))
5309 old_status = MultiXactStatusForKeyShare;
5310 else if (HEAP_XMAX_IS_SHR_LOCKED(old_infomask))
5311 old_status = MultiXactStatusForShare;
5312 else if (HEAP_XMAX_IS_EXCL_LOCKED(old_infomask))
5314 if (old_infomask2 & HEAP_KEYS_UPDATED)
5315 old_status = MultiXactStatusForUpdate;
5317 old_status = MultiXactStatusForNoKeyUpdate;
5322 * LOCK_ONLY can be present alone only when a page has been
5323 * upgraded by pg_upgrade. But in that case,
5324 * TransactionIdIsInProgress() should have returned false. We
5325 * assume it's no longer locked in this case.
5327 elog(WARNING, "LOCK_ONLY found for Xid in progress %u", xmax);
5328 old_infomask |= HEAP_XMAX_INVALID;
5329 old_infomask &= ~HEAP_XMAX_LOCK_ONLY;
5335 /* it's an update, but which kind? */
5336 if (old_infomask2 & HEAP_KEYS_UPDATED)
5337 old_status = MultiXactStatusUpdate;
5339 old_status = MultiXactStatusNoKeyUpdate;
5342 old_mode = TUPLOCK_from_mxstatus(old_status);
5345 * If the lock to be acquired is for the same TransactionId as the
5346 * existing lock, there's an optimization possible: consider only the
5347 * strongest of both locks as the only one present, and restart.
5349 if (xmax == add_to_xmax)
5352 * Note that it's not possible for the original tuple to be
5353 * updated: we wouldn't be here because the tuple would have been
5354 * invisible and we wouldn't try to update it. As a subtlety,
5355 * this code can also run when traversing an update chain to lock
5356 * future versions of a tuple. But we wouldn't be here either,
5357 * because the add_to_xmax would be different from the original
5360 Assert(HEAP_XMAX_IS_LOCKED_ONLY(old_infomask));
5362 /* acquire the strongest of both */
5363 if (mode < old_mode)
5365 /* mustn't touch is_update */
5367 old_infomask |= HEAP_XMAX_INVALID;
5371 /* otherwise, just fall back to creating a new multixact */
5372 new_status = get_mxact_status_for_lock(mode, is_update);
5373 new_xmax = MultiXactIdCreate(xmax, old_status,
5374 add_to_xmax, new_status);
5375 GetMultiXactIdHintBits(new_xmax, &new_infomask, &new_infomask2);
5377 else if (!HEAP_XMAX_IS_LOCKED_ONLY(old_infomask) &&
5378 TransactionIdDidCommit(xmax))
5381 * It's a committed update, so we gotta preserve him as updater of the
5384 MultiXactStatus status;
5385 MultiXactStatus new_status;
5387 if (old_infomask2 & HEAP_KEYS_UPDATED)
5388 status = MultiXactStatusUpdate;
5390 status = MultiXactStatusNoKeyUpdate;
5392 new_status = get_mxact_status_for_lock(mode, is_update);
5395 * since it's not running, it's obviously impossible for the old
5396 * updater to be identical to the current one, so we need not check
5397 * for that case as we do in the block above.
5399 new_xmax = MultiXactIdCreate(xmax, status, add_to_xmax, new_status);
5400 GetMultiXactIdHintBits(new_xmax, &new_infomask, &new_infomask2);
5405 * Can get here iff the locking/updating transaction was running when
5406 * the infomask was extracted from the tuple, but finished before
5407 * TransactionIdIsInProgress got to run. Deal with it as if there was
5408 * no locker at all in the first place.
5410 old_infomask |= HEAP_XMAX_INVALID;
5414 *result_infomask = new_infomask;
5415 *result_infomask2 = new_infomask2;
5416 *result_xmax = new_xmax;
5420 * Subroutine for heap_lock_updated_tuple_rec.
5422 * Given a hypothetical multixact status held by the transaction identified
5423 * with the given xid, does the current transaction need to wait, fail, or can
5424 * it continue if it wanted to acquire a lock of the given mode? "needwait"
5425 * is set to true if waiting is necessary; if it can continue, then
5426 * HeapTupleMayBeUpdated is returned. In case of a conflict, a different
5427 * HeapTupleSatisfiesUpdate return code is returned.
5429 * The held status is said to be hypothetical because it might correspond to a
5430 * lock held by a single Xid, i.e. not a real MultiXactId; we express it this
5431 * way for simplicity of API.
5434 test_lockmode_for_conflict(MultiXactStatus status, TransactionId xid,
5435 LockTupleMode mode, bool *needwait)
5437 MultiXactStatus wantedstatus;
5440 wantedstatus = get_mxact_status_for_lock(mode, false);
5443 * Note: we *must* check TransactionIdIsInProgress before
5444 * TransactionIdDidAbort/Commit; see comment at top of tqual.c for an
5447 if (TransactionIdIsCurrentTransactionId(xid))
5450 * Updated by our own transaction? Just return failure. This
5451 * shouldn't normally happen.
5453 return HeapTupleSelfUpdated;
5455 else if (TransactionIdIsInProgress(xid))
5458 * If the locking transaction is running, what we do depends on
5459 * whether the lock modes conflict: if they do, then we must wait for
5460 * it to finish; otherwise we can fall through to lock this tuple
5461 * version without waiting.
5463 if (DoLockModesConflict(LOCKMODE_from_mxstatus(status),
5464 LOCKMODE_from_mxstatus(wantedstatus)))
5470 * If we set needwait above, then this value doesn't matter;
5471 * otherwise, this value signals to caller that it's okay to proceed.
5473 return HeapTupleMayBeUpdated;
5475 else if (TransactionIdDidAbort(xid))
5476 return HeapTupleMayBeUpdated;
5477 else if (TransactionIdDidCommit(xid))
5480 * The other transaction committed. If it was only a locker, then the
5481 * lock is completely gone now and we can return success; but if it
5482 * was an update, then what we do depends on whether the two lock
5483 * modes conflict. If they conflict, then we must report error to
5484 * caller. But if they don't, we can fall through to allow the current
5485 * transaction to lock the tuple.
5487 * Note: the reason we worry about ISUPDATE here is because as soon as
5488 * a transaction ends, all its locks are gone and meaningless, and
5489 * thus we can ignore them; whereas its updates persist. In the
5490 * TransactionIdIsInProgress case, above, we don't need to check
5491 * because we know the lock is still "alive" and thus a conflict needs
5492 * always be checked.
5494 if (!ISUPDATE_from_mxstatus(status))
5495 return HeapTupleMayBeUpdated;
5497 if (DoLockModesConflict(LOCKMODE_from_mxstatus(status),
5498 LOCKMODE_from_mxstatus(wantedstatus)))
5500 return HeapTupleUpdated;
5502 return HeapTupleMayBeUpdated;
5505 /* Not in progress, not aborted, not committed -- must have crashed */
5506 return HeapTupleMayBeUpdated;
5511 * Recursive part of heap_lock_updated_tuple
5513 * Fetch the tuple pointed to by tid in rel, and mark it as locked by the given
5514 * xid with the given mode; if this tuple is updated, recurse to lock the new
5518 heap_lock_updated_tuple_rec(Relation rel, ItemPointer tid, TransactionId xid,
5521 ItemPointerData tupid;
5522 HeapTupleData mytup;
5524 uint16 new_infomask,
5530 TransactionId priorXmax = InvalidTransactionId;
5532 ItemPointerCopy(tid, &tupid);
5537 new_xmax = InvalidTransactionId;
5538 ItemPointerCopy(&tupid, &(mytup.t_self));
5540 if (!heap_fetch(rel, SnapshotAny, &mytup, &buf, false, NULL))
5543 * if we fail to find the updated version of the tuple, it's
5544 * because it was vacuumed/pruned away after its creator
5545 * transaction aborted. So behave as if we got to the end of the
5546 * chain, and there's no further tuple to lock: return success to
5549 return HeapTupleMayBeUpdated;
5553 CHECK_FOR_INTERRUPTS();
5554 LockBuffer(buf, BUFFER_LOCK_EXCLUSIVE);
5557 * Check the tuple XMIN against prior XMAX, if any. If we reached the
5558 * end of the chain, we're done, so return success.
5560 if (TransactionIdIsValid(priorXmax) &&
5561 !TransactionIdEquals(HeapTupleHeaderGetXmin(mytup.t_data),
5564 UnlockReleaseBuffer(buf);
5565 return HeapTupleMayBeUpdated;
5568 old_infomask = mytup.t_data->t_infomask;
5569 old_infomask2 = mytup.t_data->t_infomask2;
5570 xmax = HeapTupleHeaderGetRawXmax(mytup.t_data);
5573 * If this tuple version has been updated or locked by some concurrent
5574 * transaction(s), what we do depends on whether our lock mode
5575 * conflicts with what those other transactions hold, and also on the
5578 if (!(old_infomask & HEAP_XMAX_INVALID))
5580 TransactionId rawxmax;
5583 rawxmax = HeapTupleHeaderGetRawXmax(mytup.t_data);
5584 if (old_infomask & HEAP_XMAX_IS_MULTI)
5588 MultiXactMember *members;
5590 nmembers = GetMultiXactIdMembers(rawxmax, &members, false,
5591 HEAP_XMAX_IS_LOCKED_ONLY(old_infomask));
5592 for (i = 0; i < nmembers; i++)
5596 res = test_lockmode_for_conflict(members[i].status,
5602 LockBuffer(buf, BUFFER_LOCK_UNLOCK);
5603 XactLockTableWait(members[i].xid, rel,
5609 if (res != HeapTupleMayBeUpdated)
5611 UnlockReleaseBuffer(buf);
5622 MultiXactStatus status;
5625 * For a non-multi Xmax, we first need to compute the
5626 * corresponding MultiXactStatus by using the infomask bits.
5628 if (HEAP_XMAX_IS_LOCKED_ONLY(old_infomask))
5630 if (HEAP_XMAX_IS_KEYSHR_LOCKED(old_infomask))
5631 status = MultiXactStatusForKeyShare;
5632 else if (HEAP_XMAX_IS_SHR_LOCKED(old_infomask))
5633 status = MultiXactStatusForShare;
5634 else if (HEAP_XMAX_IS_EXCL_LOCKED(old_infomask))
5636 if (old_infomask2 & HEAP_KEYS_UPDATED)
5637 status = MultiXactStatusForUpdate;
5639 status = MultiXactStatusForNoKeyUpdate;
5644 * LOCK_ONLY present alone (a pg_upgraded tuple marked
5645 * as share-locked in the old cluster) shouldn't be
5646 * seen in the middle of an update chain.
5648 elog(ERROR, "invalid lock status in tuple");
5653 /* it's an update, but which kind? */
5654 if (old_infomask2 & HEAP_KEYS_UPDATED)
5655 status = MultiXactStatusUpdate;
5657 status = MultiXactStatusNoKeyUpdate;
5660 res = test_lockmode_for_conflict(status, rawxmax, mode,
5664 LockBuffer(buf, BUFFER_LOCK_UNLOCK);
5665 XactLockTableWait(rawxmax, rel, &mytup.t_self,
5669 if (res != HeapTupleMayBeUpdated)
5671 UnlockReleaseBuffer(buf);
5677 /* compute the new Xmax and infomask values for the tuple ... */
5678 compute_new_xmax_infomask(xmax, old_infomask, mytup.t_data->t_infomask2,
5680 &new_xmax, &new_infomask, &new_infomask2);
5682 START_CRIT_SECTION();
5684 /* ... and set them */
5685 HeapTupleHeaderSetXmax(mytup.t_data, new_xmax);
5686 mytup.t_data->t_infomask &= ~HEAP_XMAX_BITS;
5687 mytup.t_data->t_infomask2 &= ~HEAP_KEYS_UPDATED;
5688 mytup.t_data->t_infomask |= new_infomask;
5689 mytup.t_data->t_infomask2 |= new_infomask2;
5691 MarkBufferDirty(buf);
5694 if (RelationNeedsWAL(rel))
5696 xl_heap_lock_updated xlrec;
5698 Page page = BufferGetPage(buf);
5701 XLogRegisterBuffer(0, buf, REGBUF_STANDARD);
5703 xlrec.offnum = ItemPointerGetOffsetNumber(&mytup.t_self);
5704 xlrec.xmax = new_xmax;
5705 xlrec.infobits_set = compute_infobits(new_infomask, new_infomask2);
5707 XLogRegisterData((char *) &xlrec, SizeOfHeapLockUpdated);
5709 recptr = XLogInsert(RM_HEAP2_ID, XLOG_HEAP2_LOCK_UPDATED);
5711 PageSetLSN(page, recptr);
5716 /* if we find the end of update chain, we're done. */
5717 if (mytup.t_data->t_infomask & HEAP_XMAX_INVALID ||
5718 ItemPointerEquals(&mytup.t_self, &mytup.t_data->t_ctid) ||
5719 HeapTupleHeaderIsOnlyLocked(mytup.t_data))
5721 UnlockReleaseBuffer(buf);
5722 return HeapTupleMayBeUpdated;
5725 /* tail recursion */
5726 priorXmax = HeapTupleHeaderGetUpdateXid(mytup.t_data);
5727 ItemPointerCopy(&(mytup.t_data->t_ctid), &tupid);
5728 UnlockReleaseBuffer(buf);
5733 * heap_lock_updated_tuple
5734 * Follow update chain when locking an updated tuple, acquiring locks (row
5735 * marks) on the updated versions.
5737 * The initial tuple is assumed to be already locked.
5739 * This function doesn't check visibility, it just unconditionally marks the
5740 * tuple(s) as locked. If any tuple in the updated chain is being deleted
5741 * concurrently (or updated with the key being modified), sleep until the
5742 * transaction doing it is finished.
5744 * Note that we don't acquire heavyweight tuple locks on the tuples we walk
5745 * when we have to wait for other transactions to release them, as opposed to
5746 * what heap_lock_tuple does. The reason is that having more than one
5747 * transaction walking the chain is probably uncommon enough that risk of
5748 * starvation is not likely: one of the preconditions for being here is that
5749 * the snapshot in use predates the update that created this tuple (because we
5750 * started at an earlier version of the tuple), but at the same time such a
5751 * transaction cannot be using repeatable read or serializable isolation
5752 * levels, because that would lead to a serializability failure.
5755 heap_lock_updated_tuple(Relation rel, HeapTuple tuple, ItemPointer ctid,
5756 TransactionId xid, LockTupleMode mode)
5758 if (!ItemPointerEquals(&tuple->t_self, ctid))
5761 * If this is the first possibly-multixact-able operation in the
5762 * current transaction, set my per-backend OldestMemberMXactId
5763 * setting. We can be certain that the transaction will never become a
5764 * member of any older MultiXactIds than that. (We have to do this
5765 * even if we end up just using our own TransactionId below, since
5766 * some other backend could incorporate our XID into a MultiXact
5767 * immediately afterwards.)
5769 MultiXactIdSetOldestMember();
5771 return heap_lock_updated_tuple_rec(rel, ctid, xid, mode);
5774 /* nothing to lock */
5775 return HeapTupleMayBeUpdated;
5779 * heap_finish_speculative - mark speculative insertion as successful
5781 * To successfully finish a speculative insertion we have to clear speculative
5782 * token from tuple. To do so the t_ctid field, which will contain a
5783 * speculative token value, is modified in place to point to the tuple itself,
5784 * which is characteristic of a newly inserted ordinary tuple.
5786 * NB: It is not ok to commit without either finishing or aborting a
5787 * speculative insertion. We could treat speculative tuples of committed
5788 * transactions implicitly as completed, but then we would have to be prepared
5789 * to deal with speculative tokens on committed tuples. That wouldn't be
5790 * difficult - no-one looks at the ctid field of a tuple with invalid xmax -
5791 * but clearing the token at completion isn't very expensive either.
5792 * An explicit confirmation WAL record also makes logical decoding simpler.
5795 heap_finish_speculative(Relation relation, HeapTuple tuple)
5799 OffsetNumber offnum;
5801 HeapTupleHeader htup;
5803 buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(&(tuple->t_self)));
5804 LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
5805 page = (Page) BufferGetPage(buffer);
5807 offnum = ItemPointerGetOffsetNumber(&(tuple->t_self));
5808 if (PageGetMaxOffsetNumber(page) >= offnum)
5809 lp = PageGetItemId(page, offnum);
5811 if (PageGetMaxOffsetNumber(page) < offnum || !ItemIdIsNormal(lp))
5812 elog(ERROR, "invalid lp");
5814 htup = (HeapTupleHeader) PageGetItem(page, lp);
5816 /* SpecTokenOffsetNumber should be distinguishable from any real offset */
5817 StaticAssertStmt(MaxOffsetNumber < SpecTokenOffsetNumber,
5818 "invalid speculative token constant");
5820 /* NO EREPORT(ERROR) from here till changes are logged */
5821 START_CRIT_SECTION();
5823 Assert(HeapTupleHeaderIsSpeculative(tuple->t_data));
5825 MarkBufferDirty(buffer);
5828 * Replace the speculative insertion token with a real t_ctid, pointing to
5829 * itself like it does on regular tuples.
5831 htup->t_ctid = tuple->t_self;
5834 if (RelationNeedsWAL(relation))
5836 xl_heap_confirm xlrec;
5839 xlrec.offnum = ItemPointerGetOffsetNumber(&tuple->t_self);
5843 /* We want the same filtering on this as on a plain insert */
5844 XLogIncludeOrigin();
5846 XLogRegisterData((char *) &xlrec, SizeOfHeapConfirm);
5847 XLogRegisterBuffer(0, buffer, REGBUF_STANDARD);
5849 recptr = XLogInsert(RM_HEAP_ID, XLOG_HEAP_CONFIRM);
5851 PageSetLSN(page, recptr);
5856 UnlockReleaseBuffer(buffer);
5860 * heap_abort_speculative - kill a speculatively inserted tuple
5862 * Marks a tuple that was speculatively inserted in the same command as dead,
5863 * by setting its xmin as invalid. That makes it immediately appear as dead
5864 * to all transactions, including our own. In particular, it makes
5865 * HeapTupleSatisfiesDirty() regard the tuple as dead, so that another backend
5866 * inserting a duplicate key value won't unnecessarily wait for our whole
5867 * transaction to finish (it'll just wait for our speculative insertion to
5870 * Killing the tuple prevents "unprincipled deadlocks", which are deadlocks
5871 * that arise due to a mutual dependency that is not user visible. By
5872 * definition, unprincipled deadlocks cannot be prevented by the user
5873 * reordering lock acquisition in client code, because the implementation level
5874 * lock acquisitions are not under the user's direct control. If speculative
5875 * inserters did not take this precaution, then under high concurrency they
5876 * could deadlock with each other, which would not be acceptable.
5878 * This is somewhat redundant with heap_delete, but we prefer to have a
5879 * dedicated routine with stripped down requirements.
5881 * This routine does not affect logical decoding as it only looks at
5882 * confirmation records.
5885 heap_abort_speculative(Relation relation, HeapTuple tuple)
5887 TransactionId xid = GetCurrentTransactionId();
5888 ItemPointer tid = &(tuple->t_self);
5895 Assert(ItemPointerIsValid(tid));
5897 block = ItemPointerGetBlockNumber(tid);
5898 buffer = ReadBuffer(relation, block);
5899 page = BufferGetPage(buffer);
5901 LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
5904 * Page can't be all visible, we just inserted into it, and are still
5907 Assert(!PageIsAllVisible(page));
5909 lp = PageGetItemId(page, ItemPointerGetOffsetNumber(tid));
5910 Assert(ItemIdIsNormal(lp));
5912 tp.t_tableOid = RelationGetRelid(relation);
5913 tp.t_data = (HeapTupleHeader) PageGetItem(page, lp);
5914 tp.t_len = ItemIdGetLength(lp);
5918 * Sanity check that the tuple really is a speculatively inserted tuple,
5921 if (tp.t_data->t_choice.t_heap.t_xmin != xid)
5922 elog(ERROR, "attempted to kill a tuple inserted by another transaction");
5923 if (!HeapTupleHeaderIsSpeculative(tp.t_data))
5924 elog(ERROR, "attempted to kill a non-speculative tuple");
5925 Assert(!HeapTupleHeaderIsHeapOnly(tp.t_data));
5928 * No need to check for serializable conflicts here. There is never a
5929 * need for a combocid, either. No need to extract replica identity, or
5930 * do anything special with infomask bits.
5933 START_CRIT_SECTION();
5936 * The tuple will become DEAD immediately. Flag that this page
5937 * immediately is a candidate for pruning by setting xmin to
5938 * RecentGlobalXmin. That's not pretty, but it doesn't seem worth
5939 * inventing a nicer API for this.
5941 Assert(TransactionIdIsValid(RecentGlobalXmin));
5942 PageSetPrunable(page, RecentGlobalXmin);
5944 /* store transaction information of xact deleting the tuple */
5945 tp.t_data->t_infomask &= ~(HEAP_XMAX_BITS | HEAP_MOVED);
5946 tp.t_data->t_infomask2 &= ~HEAP_KEYS_UPDATED;
5949 * Set the tuple header xmin to InvalidTransactionId. This makes the
5950 * tuple immediately invisible everyone. (In particular, to any
5951 * transactions waiting on the speculative token, woken up later.)
5953 HeapTupleHeaderSetXmin(tp.t_data, InvalidTransactionId);
5955 /* Clear the speculative insertion token too */
5956 tp.t_data->t_ctid = tp.t_self;
5958 MarkBufferDirty(buffer);
5963 * The WAL records generated here match heap_delete(). The same recovery
5964 * routines are used.
5966 if (RelationNeedsWAL(relation))
5968 xl_heap_delete xlrec;
5971 xlrec.flags = XLH_DELETE_IS_SUPER;
5972 xlrec.infobits_set = compute_infobits(tp.t_data->t_infomask,
5973 tp.t_data->t_infomask2);
5974 xlrec.offnum = ItemPointerGetOffsetNumber(&tp.t_self);
5978 XLogRegisterData((char *) &xlrec, SizeOfHeapDelete);
5979 XLogRegisterBuffer(0, buffer, REGBUF_STANDARD);
5981 /* No replica identity & replication origin logged */
5983 recptr = XLogInsert(RM_HEAP_ID, XLOG_HEAP_DELETE);
5985 PageSetLSN(page, recptr);
5990 LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
5992 if (HeapTupleHasExternal(&tp))
5993 toast_delete(relation, &tp);
5996 * Never need to mark tuple for invalidation, since catalogs don't support
5997 * speculative insertion
6000 /* Now we can release the buffer */
6001 ReleaseBuffer(buffer);
6003 /* count deletion, as we counted the insertion too */
6004 pgstat_count_heap_delete(relation);
6008 * heap_inplace_update - update a tuple "in place" (ie, overwrite it)
6010 * Overwriting violates both MVCC and transactional safety, so the uses
6011 * of this function in Postgres are extremely limited. Nonetheless we
6012 * find some places to use it.
6014 * The tuple cannot change size, and therefore it's reasonable to assume
6015 * that its null bitmap (if any) doesn't change either. So we just
6016 * overwrite the data portion of the tuple without touching the null
6017 * bitmap or any of the header fields.
6019 * tuple is an in-memory tuple structure containing the data to be written
6020 * over the target tuple. Also, tuple->t_self identifies the target tuple.
6023 heap_inplace_update(Relation relation, HeapTuple tuple)
6027 OffsetNumber offnum;
6029 HeapTupleHeader htup;
6034 * For now, parallel operations are required to be strictly read-only.
6035 * Unlike a regular update, this should never create a combo CID, so it
6036 * might be possible to relax this restriction, but not without more
6037 * thought and testing. It's not clear that it would be useful, anyway.
6039 if (IsInParallelMode())
6041 (errcode(ERRCODE_INVALID_TRANSACTION_STATE),
6042 errmsg("cannot update tuples during a parallel operation")));
6044 buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(&(tuple->t_self)));
6045 LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
6046 page = (Page) BufferGetPage(buffer);
6048 offnum = ItemPointerGetOffsetNumber(&(tuple->t_self));
6049 if (PageGetMaxOffsetNumber(page) >= offnum)
6050 lp = PageGetItemId(page, offnum);
6052 if (PageGetMaxOffsetNumber(page) < offnum || !ItemIdIsNormal(lp))
6053 elog(ERROR, "invalid lp");
6055 htup = (HeapTupleHeader) PageGetItem(page, lp);
6057 oldlen = ItemIdGetLength(lp) - htup->t_hoff;
6058 newlen = tuple->t_len - tuple->t_data->t_hoff;
6059 if (oldlen != newlen || htup->t_hoff != tuple->t_data->t_hoff)
6060 elog(ERROR, "wrong tuple length");
6062 /* NO EREPORT(ERROR) from here till changes are logged */
6063 START_CRIT_SECTION();
6065 memcpy((char *) htup + htup->t_hoff,
6066 (char *) tuple->t_data + tuple->t_data->t_hoff,
6069 MarkBufferDirty(buffer);
6072 if (RelationNeedsWAL(relation))
6074 xl_heap_inplace xlrec;
6077 xlrec.offnum = ItemPointerGetOffsetNumber(&tuple->t_self);
6080 XLogRegisterData((char *) &xlrec, SizeOfHeapInplace);
6082 XLogRegisterBuffer(0, buffer, REGBUF_STANDARD);
6083 XLogRegisterBufData(0, (char *) htup + htup->t_hoff, newlen);
6085 /* inplace updates aren't decoded atm, don't log the origin */
6087 recptr = XLogInsert(RM_HEAP_ID, XLOG_HEAP_INPLACE);
6089 PageSetLSN(page, recptr);
6094 UnlockReleaseBuffer(buffer);
6097 * Send out shared cache inval if necessary. Note that because we only
6098 * pass the new version of the tuple, this mustn't be used for any
6099 * operations that could change catcache lookup keys. But we aren't
6100 * bothering with index updates either, so that's true a fortiori.
6102 if (!IsBootstrapProcessingMode())
6103 CacheInvalidateHeapTuple(relation, tuple, NULL);
6106 #define FRM_NOOP 0x0001
6107 #define FRM_INVALIDATE_XMAX 0x0002
6108 #define FRM_RETURN_IS_XID 0x0004
6109 #define FRM_RETURN_IS_MULTI 0x0008
6110 #define FRM_MARK_COMMITTED 0x0010
6114 * Determine what to do during freezing when a tuple is marked by a
6117 * NB -- this might have the side-effect of creating a new MultiXactId!
6119 * "flags" is an output value; it's used to tell caller what to do on return.
6120 * Possible flags are:
6122 * don't do anything -- keep existing Xmax
6123 * FRM_INVALIDATE_XMAX
6124 * mark Xmax as InvalidTransactionId and set XMAX_INVALID flag.
6126 * The Xid return value is a single update Xid to set as xmax.
6127 * FRM_MARK_COMMITTED
6128 * Xmax can be marked as HEAP_XMAX_COMMITTED
6129 * FRM_RETURN_IS_MULTI
6130 * The return value is a new MultiXactId to set as new Xmax.
6131 * (caller must obtain proper infomask bits using GetMultiXactIdHintBits)
6133 static TransactionId
6134 FreezeMultiXactId(MultiXactId multi, uint16 t_infomask,
6135 TransactionId cutoff_xid, MultiXactId cutoff_multi,
6138 TransactionId xid = InvalidTransactionId;
6140 MultiXactMember *members;
6144 MultiXactMember *newmembers;
6146 TransactionId update_xid;
6147 bool update_committed;
6152 /* We should only be called in Multis */
6153 Assert(t_infomask & HEAP_XMAX_IS_MULTI);
6155 if (!MultiXactIdIsValid(multi))
6157 /* Ensure infomask bits are appropriately set/reset */
6158 *flags |= FRM_INVALIDATE_XMAX;
6159 return InvalidTransactionId;
6161 else if (MultiXactIdPrecedes(multi, cutoff_multi))
6164 * This old multi cannot possibly have members still running. If it
6165 * was a locker only, it can be removed without any further
6166 * consideration; but if it contained an update, we might need to
6169 * Don't assert MultiXactIdIsRunning if the multi came from a
6170 * pg_upgrade'd share-locked tuple, though, as doing that causes an
6171 * error to be raised unnecessarily.
6173 Assert((!(t_infomask & HEAP_LOCK_MASK) &&
6174 HEAP_XMAX_IS_LOCKED_ONLY(t_infomask)) ||
6175 !MultiXactIdIsRunning(multi,
6176 HEAP_XMAX_IS_LOCKED_ONLY(t_infomask)));
6177 if (HEAP_XMAX_IS_LOCKED_ONLY(t_infomask))
6179 *flags |= FRM_INVALIDATE_XMAX;
6180 xid = InvalidTransactionId; /* not strictly necessary */
6184 /* replace multi by update xid */
6185 xid = MultiXactIdGetUpdateXid(multi, t_infomask);
6187 /* wasn't only a lock, xid needs to be valid */
6188 Assert(TransactionIdIsValid(xid));
6191 * If the xid is older than the cutoff, it has to have aborted,
6192 * otherwise the tuple would have gotten pruned away.
6194 if (TransactionIdPrecedes(xid, cutoff_xid))
6196 Assert(!TransactionIdDidCommit(xid));
6197 *flags |= FRM_INVALIDATE_XMAX;
6198 xid = InvalidTransactionId; /* not strictly necessary */
6202 *flags |= FRM_RETURN_IS_XID;
6210 * This multixact might have or might not have members still running, but
6211 * we know it's valid and is newer than the cutoff point for multis.
6212 * However, some member(s) of it may be below the cutoff for Xids, so we
6213 * need to walk the whole members array to figure out what to do, if
6217 allow_old = !(t_infomask & HEAP_LOCK_MASK) &&
6218 HEAP_XMAX_IS_LOCKED_ONLY(t_infomask);
6220 GetMultiXactIdMembers(multi, &members, allow_old,
6221 HEAP_XMAX_IS_LOCKED_ONLY(t_infomask));
6224 /* Nothing worth keeping */
6225 *flags |= FRM_INVALIDATE_XMAX;
6226 return InvalidTransactionId;
6229 /* is there anything older than the cutoff? */
6230 need_replace = false;
6231 for (i = 0; i < nmembers; i++)
6233 if (TransactionIdPrecedes(members[i].xid, cutoff_xid))
6235 need_replace = true;
6241 * In the simplest case, there is no member older than the cutoff; we can
6242 * keep the existing MultiXactId as is.
6248 return InvalidTransactionId;
6252 * If the multi needs to be updated, figure out which members do we need
6256 newmembers = palloc(sizeof(MultiXactMember) * nmembers);
6257 has_lockers = false;
6258 update_xid = InvalidTransactionId;
6259 update_committed = false;
6261 for (i = 0; i < nmembers; i++)
6264 * Determine whether to keep this member or ignore it.
6266 if (ISUPDATE_from_mxstatus(members[i].status))
6268 TransactionId xid = members[i].xid;
6271 * It's an update; should we keep it? If the transaction is known
6272 * aborted or crashed then it's okay to ignore it, otherwise not.
6273 * Note that an updater older than cutoff_xid cannot possibly be
6274 * committed, because HeapTupleSatisfiesVacuum would have returned
6275 * HEAPTUPLE_DEAD and we would not be trying to freeze the tuple.
6277 * As with all tuple visibility routines, it's critical to test
6278 * TransactionIdIsInProgress before TransactionIdDidCommit,
6279 * because of race conditions explained in detail in tqual.c.
6281 if (TransactionIdIsCurrentTransactionId(xid) ||
6282 TransactionIdIsInProgress(xid))
6284 Assert(!TransactionIdIsValid(update_xid));
6287 else if (TransactionIdDidCommit(xid))
6290 * The transaction committed, so we can tell caller to set
6291 * HEAP_XMAX_COMMITTED. (We can only do this because we know
6292 * the transaction is not running.)
6294 Assert(!TransactionIdIsValid(update_xid));
6295 update_committed = true;
6300 * Not in progress, not committed -- must be aborted or crashed;
6305 * Since the tuple wasn't marked HEAPTUPLE_DEAD by vacuum, the
6306 * update Xid cannot possibly be older than the xid cutoff.
6308 Assert(!TransactionIdIsValid(update_xid) ||
6309 !TransactionIdPrecedes(update_xid, cutoff_xid));
6312 * If we determined that it's an Xid corresponding to an update
6313 * that must be retained, additionally add it to the list of
6314 * members of the new Multi, in case we end up using that. (We
6315 * might still decide to use only an update Xid and not a multi,
6316 * but it's easier to maintain the list as we walk the old members
6319 if (TransactionIdIsValid(update_xid))
6320 newmembers[nnewmembers++] = members[i];
6324 /* We only keep lockers if they are still running */
6325 if (TransactionIdIsCurrentTransactionId(members[i].xid) ||
6326 TransactionIdIsInProgress(members[i].xid))
6328 /* running locker cannot possibly be older than the cutoff */
6329 Assert(!TransactionIdPrecedes(members[i].xid, cutoff_xid));
6330 newmembers[nnewmembers++] = members[i];
6338 if (nnewmembers == 0)
6340 /* nothing worth keeping!? Tell caller to remove the whole thing */
6341 *flags |= FRM_INVALIDATE_XMAX;
6342 xid = InvalidTransactionId;
6344 else if (TransactionIdIsValid(update_xid) && !has_lockers)
6347 * If there's a single member and it's an update, pass it back alone
6348 * without creating a new Multi. (XXX we could do this when there's a
6349 * single remaining locker, too, but that would complicate the API too
6350 * much; moreover, the case with the single updater is more
6351 * interesting, because those are longer-lived.)
6353 Assert(nnewmembers == 1);
6354 *flags |= FRM_RETURN_IS_XID;
6355 if (update_committed)
6356 *flags |= FRM_MARK_COMMITTED;
6362 * Create a new multixact with the surviving members of the previous
6363 * one, to set as new Xmax in the tuple.
6365 xid = MultiXactIdCreateFromMembers(nnewmembers, newmembers);
6366 *flags |= FRM_RETURN_IS_MULTI;
6375 * heap_prepare_freeze_tuple
6377 * Check to see whether any of the XID fields of a tuple (xmin, xmax, xvac)
6378 * are older than the specified cutoff XID and cutoff MultiXactId. If so,
6379 * setup enough state (in the *frz output argument) to later execute and
6380 * WAL-log what we would need to do, and return TRUE. Return FALSE if nothing
6383 * Caller is responsible for setting the offset field, if appropriate.
6385 * It is assumed that the caller has checked the tuple with
6386 * HeapTupleSatisfiesVacuum() and determined that it is not HEAPTUPLE_DEAD
6387 * (else we should be removing the tuple, not freezing it).
6389 * NB: cutoff_xid *must* be <= the current global xmin, to ensure that any
6390 * XID older than it could neither be running nor seen as running by any
6391 * open transaction. This ensures that the replacement will not change
6392 * anyone's idea of the tuple state.
6393 * Similarly, cutoff_multi must be less than or equal to the smallest
6394 * MultiXactId used by any transaction currently open.
6396 * If the tuple is in a shared buffer, caller must hold an exclusive lock on
6399 * NB: It is not enough to set hint bits to indicate something is
6400 * committed/invalid -- they might not be set on a standby, or after crash
6401 * recovery. We really need to remove old xids.
6404 heap_prepare_freeze_tuple(HeapTupleHeader tuple, TransactionId cutoff_xid,
6405 TransactionId cutoff_multi,
6406 xl_heap_freeze_tuple *frz)
6409 bool changed = false;
6410 bool freeze_xmax = false;
6414 frz->t_infomask2 = tuple->t_infomask2;
6415 frz->t_infomask = tuple->t_infomask;
6416 frz->xmax = HeapTupleHeaderGetRawXmax(tuple);
6419 xid = HeapTupleHeaderGetXmin(tuple);
6420 if (TransactionIdIsNormal(xid) &&
6421 TransactionIdPrecedes(xid, cutoff_xid))
6423 frz->t_infomask |= HEAP_XMIN_FROZEN;
6428 * Process xmax. To thoroughly examine the current Xmax value we need to
6429 * resolve a MultiXactId to its member Xids, in case some of them are
6430 * below the given cutoff for Xids. In that case, those values might need
6431 * freezing, too. Also, if a multi needs freezing, we cannot simply take
6432 * it out --- if there's a live updater Xid, it needs to be kept.
6434 * Make sure to keep heap_tuple_needs_freeze in sync with this.
6436 xid = HeapTupleHeaderGetRawXmax(tuple);
6438 if (tuple->t_infomask & HEAP_XMAX_IS_MULTI)
6440 TransactionId newxmax;
6443 newxmax = FreezeMultiXactId(xid, tuple->t_infomask,
6444 cutoff_xid, cutoff_multi, &flags);
6446 if (flags & FRM_INVALIDATE_XMAX)
6448 else if (flags & FRM_RETURN_IS_XID)
6451 * NB -- some of these transformations are only valid because we
6452 * know the return Xid is a tuple updater (i.e. not merely a
6453 * locker.) Also note that the only reason we don't explicitly
6454 * worry about HEAP_KEYS_UPDATED is because it lives in
6455 * t_infomask2 rather than t_infomask.
6457 frz->t_infomask &= ~HEAP_XMAX_BITS;
6458 frz->xmax = newxmax;
6459 if (flags & FRM_MARK_COMMITTED)
6460 frz->t_infomask &= HEAP_XMAX_COMMITTED;
6463 else if (flags & FRM_RETURN_IS_MULTI)
6469 * We can't use GetMultiXactIdHintBits directly on the new multi
6470 * here; that routine initializes the masks to all zeroes, which
6471 * would lose other bits we need. Doing it this way ensures all
6472 * unrelated bits remain untouched.
6474 frz->t_infomask &= ~HEAP_XMAX_BITS;
6475 frz->t_infomask2 &= ~HEAP_KEYS_UPDATED;
6476 GetMultiXactIdHintBits(newxmax, &newbits, &newbits2);
6477 frz->t_infomask |= newbits;
6478 frz->t_infomask2 |= newbits2;
6480 frz->xmax = newxmax;
6486 Assert(flags & FRM_NOOP);
6489 else if (TransactionIdIsNormal(xid) &&
6490 TransactionIdPrecedes(xid, cutoff_xid))
6497 frz->xmax = InvalidTransactionId;
6500 * The tuple might be marked either XMAX_INVALID or XMAX_COMMITTED +
6501 * LOCKED. Normalize to INVALID just to be sure no one gets confused.
6502 * Also get rid of the HEAP_KEYS_UPDATED bit.
6504 frz->t_infomask &= ~HEAP_XMAX_BITS;
6505 frz->t_infomask |= HEAP_XMAX_INVALID;
6506 frz->t_infomask2 &= ~HEAP_HOT_UPDATED;
6507 frz->t_infomask2 &= ~HEAP_KEYS_UPDATED;
6512 * Old-style VACUUM FULL is gone, but we have to keep this code as long as
6513 * we support having MOVED_OFF/MOVED_IN tuples in the database.
6515 if (tuple->t_infomask & HEAP_MOVED)
6517 xid = HeapTupleHeaderGetXvac(tuple);
6518 if (TransactionIdIsNormal(xid) &&
6519 TransactionIdPrecedes(xid, cutoff_xid))
6522 * If a MOVED_OFF tuple is not dead, the xvac transaction must
6523 * have failed; whereas a non-dead MOVED_IN tuple must mean the
6524 * xvac transaction succeeded.
6526 if (tuple->t_infomask & HEAP_MOVED_OFF)
6527 frz->frzflags |= XLH_INVALID_XVAC;
6529 frz->frzflags |= XLH_FREEZE_XVAC;
6532 * Might as well fix the hint bits too; usually XMIN_COMMITTED
6533 * will already be set here, but there's a small chance not.
6535 Assert(!(tuple->t_infomask & HEAP_XMIN_INVALID));
6536 frz->t_infomask |= HEAP_XMIN_COMMITTED;
6545 * heap_execute_freeze_tuple
6546 * Execute the prepared freezing of a tuple.
6548 * Caller is responsible for ensuring that no other backend can access the
6549 * storage underlying this tuple, either by holding an exclusive lock on the
6550 * buffer containing it (which is what lazy VACUUM does), or by having it be
6551 * in private storage (which is what CLUSTER and friends do).
6553 * Note: it might seem we could make the changes without exclusive lock, since
6554 * TransactionId read/write is assumed atomic anyway. However there is a race
6555 * condition: someone who just fetched an old XID that we overwrite here could
6556 * conceivably not finish checking the XID against pg_clog before we finish
6557 * the VACUUM and perhaps truncate off the part of pg_clog he needs. Getting
6558 * exclusive lock ensures no other backend is in process of checking the
6559 * tuple status. Also, getting exclusive lock makes it safe to adjust the
6562 * NB: All code in here must be safe to execute during crash recovery!
6565 heap_execute_freeze_tuple(HeapTupleHeader tuple, xl_heap_freeze_tuple *frz)
6567 HeapTupleHeaderSetXmax(tuple, frz->xmax);
6569 if (frz->frzflags & XLH_FREEZE_XVAC)
6570 HeapTupleHeaderSetXvac(tuple, FrozenTransactionId);
6572 if (frz->frzflags & XLH_INVALID_XVAC)
6573 HeapTupleHeaderSetXvac(tuple, InvalidTransactionId);
6575 tuple->t_infomask = frz->t_infomask;
6576 tuple->t_infomask2 = frz->t_infomask2;
6581 * Freeze tuple in place, without WAL logging.
6583 * Useful for callers like CLUSTER that perform their own WAL logging.
6586 heap_freeze_tuple(HeapTupleHeader tuple, TransactionId cutoff_xid,
6587 TransactionId cutoff_multi)
6589 xl_heap_freeze_tuple frz;
6592 do_freeze = heap_prepare_freeze_tuple(tuple, cutoff_xid, cutoff_multi,
6596 * Note that because this is not a WAL-logged operation, we don't need to
6597 * fill in the offset in the freeze record.
6601 heap_execute_freeze_tuple(tuple, &frz);
6606 * For a given MultiXactId, return the hint bits that should be set in the
6609 * Normally this should be called for a multixact that was just created, and
6610 * so is on our local cache, so the GetMembers call is fast.
6613 GetMultiXactIdHintBits(MultiXactId multi, uint16 *new_infomask,
6614 uint16 *new_infomask2)
6617 MultiXactMember *members;
6619 uint16 bits = HEAP_XMAX_IS_MULTI;
6621 bool has_update = false;
6622 LockTupleMode strongest = LockTupleKeyShare;
6625 * We only use this in multis we just created, so they cannot be values
6628 nmembers = GetMultiXactIdMembers(multi, &members, false, false);
6630 for (i = 0; i < nmembers; i++)
6635 * Remember the strongest lock mode held by any member of the
6638 mode = TUPLOCK_from_mxstatus(members[i].status);
6639 if (mode > strongest)
6642 /* See what other bits we need */
6643 switch (members[i].status)
6645 case MultiXactStatusForKeyShare:
6646 case MultiXactStatusForShare:
6647 case MultiXactStatusForNoKeyUpdate:
6650 case MultiXactStatusForUpdate:
6651 bits2 |= HEAP_KEYS_UPDATED;
6654 case MultiXactStatusNoKeyUpdate:
6658 case MultiXactStatusUpdate:
6659 bits2 |= HEAP_KEYS_UPDATED;
6665 if (strongest == LockTupleExclusive ||
6666 strongest == LockTupleNoKeyExclusive)
6667 bits |= HEAP_XMAX_EXCL_LOCK;
6668 else if (strongest == LockTupleShare)
6669 bits |= HEAP_XMAX_SHR_LOCK;
6670 else if (strongest == LockTupleKeyShare)
6671 bits |= HEAP_XMAX_KEYSHR_LOCK;
6674 bits |= HEAP_XMAX_LOCK_ONLY;
6679 *new_infomask = bits;
6680 *new_infomask2 = bits2;
6684 * MultiXactIdGetUpdateXid
6686 * Given a multixact Xmax and corresponding infomask, which does not have the
6687 * HEAP_XMAX_LOCK_ONLY bit set, obtain and return the Xid of the updating
6690 * Caller is expected to check the status of the updating transaction, if
6693 static TransactionId
6694 MultiXactIdGetUpdateXid(TransactionId xmax, uint16 t_infomask)
6696 TransactionId update_xact = InvalidTransactionId;
6697 MultiXactMember *members;
6700 Assert(!(t_infomask & HEAP_XMAX_LOCK_ONLY));
6701 Assert(t_infomask & HEAP_XMAX_IS_MULTI);
6704 * Since we know the LOCK_ONLY bit is not set, this cannot be a multi from
6707 nmembers = GetMultiXactIdMembers(xmax, &members, false, false);
6713 for (i = 0; i < nmembers; i++)
6715 /* Ignore lockers */
6716 if (!ISUPDATE_from_mxstatus(members[i].status))
6719 /* there can be at most one updater */
6720 Assert(update_xact == InvalidTransactionId);
6721 update_xact = members[i].xid;
6722 #ifndef USE_ASSERT_CHECKING
6725 * in an assert-enabled build, walk the whole array to ensure
6726 * there's no other updater.
6739 * HeapTupleGetUpdateXid
6740 * As above, but use a HeapTupleHeader
6742 * See also HeapTupleHeaderGetUpdateXid, which can be used without previously
6743 * checking the hint bits.
6746 HeapTupleGetUpdateXid(HeapTupleHeader tuple)
6748 return MultiXactIdGetUpdateXid(HeapTupleHeaderGetRawXmax(tuple),
6753 * Does the given multixact conflict with the current transaction grabbing a
6754 * tuple lock of the given strength?
6756 * The passed infomask pairs up with the given multixact in the tuple header.
6759 DoesMultiXactIdConflict(MultiXactId multi, uint16 infomask,
6760 LockTupleMode lockmode)
6764 MultiXactMember *members;
6765 bool result = false;
6766 LOCKMODE wanted = tupleLockExtraInfo[lockmode].hwlock;
6768 allow_old = !(infomask & HEAP_LOCK_MASK) && HEAP_XMAX_IS_LOCKED_ONLY(infomask);
6769 nmembers = GetMultiXactIdMembers(multi, &members, allow_old,
6770 HEAP_XMAX_IS_LOCKED_ONLY(infomask));
6775 for (i = 0; i < nmembers; i++)
6777 TransactionId memxid;
6778 LOCKMODE memlockmode;
6780 memlockmode = LOCKMODE_from_mxstatus(members[i].status);
6782 /* ignore members that don't conflict with the lock we want */
6783 if (!DoLockModesConflict(memlockmode, wanted))
6786 /* ignore members from current xact */
6787 memxid = members[i].xid;
6788 if (TransactionIdIsCurrentTransactionId(memxid))
6791 if (ISUPDATE_from_mxstatus(members[i].status))
6793 /* ignore aborted updaters */
6794 if (TransactionIdDidAbort(memxid))
6799 /* ignore lockers-only that are no longer in progress */
6800 if (!TransactionIdIsInProgress(memxid))
6805 * Whatever remains are either live lockers that conflict with our
6806 * wanted lock, and updaters that are not aborted. Those conflict
6807 * with what we want, so return true.
6819 * Do_MultiXactIdWait
6820 * Actual implementation for the two functions below.
6822 * 'multi', 'status' and 'infomask' indicate what to sleep on (the status is
6823 * needed to ensure we only sleep on conflicting members, and the infomask is
6824 * used to optimize multixact access in case it's a lock-only multi); 'nowait'
6825 * indicates whether to use conditional lock acquisition, to allow callers to
6826 * fail if lock is unavailable. 'rel', 'ctid' and 'oper' are used to set up
6827 * context information for error messages. 'remaining', if not NULL, receives
6828 * the number of members that are still running, including any (non-aborted)
6829 * subtransactions of our own transaction.
6831 * We do this by sleeping on each member using XactLockTableWait. Any
6832 * members that belong to the current backend are *not* waited for, however;
6833 * this would not merely be useless but would lead to Assert failure inside
6834 * XactLockTableWait. By the time this returns, it is certain that all
6835 * transactions *of other backends* that were members of the MultiXactId
6836 * that conflict with the requested status are dead (and no new ones can have
6837 * been added, since it is not legal to add members to an existing
6840 * But by the time we finish sleeping, someone else may have changed the Xmax
6841 * of the containing tuple, so the caller needs to iterate on us somehow.
6843 * Note that in case we return false, the number of remaining members is
6844 * not to be trusted.
6847 Do_MultiXactIdWait(MultiXactId multi, MultiXactStatus status,
6848 uint16 infomask, bool nowait,
6849 Relation rel, ItemPointer ctid, XLTW_Oper oper,
6854 MultiXactMember *members;
6858 allow_old = !(infomask & HEAP_LOCK_MASK) && HEAP_XMAX_IS_LOCKED_ONLY(infomask);
6859 nmembers = GetMultiXactIdMembers(multi, &members, allow_old,
6860 HEAP_XMAX_IS_LOCKED_ONLY(infomask));
6866 for (i = 0; i < nmembers; i++)
6868 TransactionId memxid = members[i].xid;
6869 MultiXactStatus memstatus = members[i].status;
6871 if (TransactionIdIsCurrentTransactionId(memxid))
6877 if (!DoLockModesConflict(LOCKMODE_from_mxstatus(memstatus),
6878 LOCKMODE_from_mxstatus(status)))
6880 if (remaining && TransactionIdIsInProgress(memxid))
6886 * This member conflicts with our multi, so we have to sleep (or
6887 * return failure, if asked to avoid waiting.)
6889 * Note that we don't set up an error context callback ourselves,
6890 * but instead we pass the info down to XactLockTableWait. This
6891 * might seem a bit wasteful because the context is set up and
6892 * tore down for each member of the multixact, but in reality it
6893 * should be barely noticeable, and it avoids duplicate code.
6897 result = ConditionalXactLockTableWait(memxid);
6902 XactLockTableWait(memxid, rel, ctid, oper);
6909 *remaining = remain;
6916 * Sleep on a MultiXactId.
6918 * By the time we finish sleeping, someone else may have changed the Xmax
6919 * of the containing tuple, so the caller needs to iterate on us somehow.
6921 * We return (in *remaining, if not NULL) the number of members that are still
6922 * running, including any (non-aborted) subtransactions of our own transaction.
6925 MultiXactIdWait(MultiXactId multi, MultiXactStatus status, uint16 infomask,
6926 Relation rel, ItemPointer ctid, XLTW_Oper oper,
6929 (void) Do_MultiXactIdWait(multi, status, infomask, false,
6930 rel, ctid, oper, remaining);
6934 * ConditionalMultiXactIdWait
6935 * As above, but only lock if we can get the lock without blocking.
6937 * By the time we finish sleeping, someone else may have changed the Xmax
6938 * of the containing tuple, so the caller needs to iterate on us somehow.
6940 * If the multixact is now all gone, return true. Returns false if some
6941 * transactions might still be running.
6943 * We return (in *remaining, if not NULL) the number of members that are still
6944 * running, including any (non-aborted) subtransactions of our own transaction.
6947 ConditionalMultiXactIdWait(MultiXactId multi, MultiXactStatus status,
6948 uint16 infomask, Relation rel, int *remaining)
6950 return Do_MultiXactIdWait(multi, status, infomask, true,
6951 rel, NULL, XLTW_None, remaining);
6955 * heap_tuple_needs_eventual_freeze
6957 * Check to see whether any of the XID fields of a tuple (xmin, xmax, xvac)
6958 * will eventually require freezing. Similar to heap_tuple_needs_freeze,
6959 * but there's no cutoff, since we're trying to figure out whether freezing
6960 * will ever be needed, not whether it's needed now.
6963 heap_tuple_needs_eventual_freeze(HeapTupleHeader tuple)
6968 * If xmin is a normal transaction ID, this tuple is definitely not
6971 xid = HeapTupleHeaderGetXmin(tuple);
6972 if (TransactionIdIsNormal(xid))
6976 * If xmax is a valid xact or multixact, this tuple is also not frozen.
6978 if (tuple->t_infomask & HEAP_XMAX_IS_MULTI)
6982 multi = HeapTupleHeaderGetRawXmax(tuple);
6983 if (MultiXactIdIsValid(multi))
6988 xid = HeapTupleHeaderGetRawXmax(tuple);
6989 if (TransactionIdIsNormal(xid))
6993 if (tuple->t_infomask & HEAP_MOVED)
6995 xid = HeapTupleHeaderGetXvac(tuple);
6996 if (TransactionIdIsNormal(xid))
7004 * heap_tuple_needs_freeze
7006 * Check to see whether any of the XID fields of a tuple (xmin, xmax, xvac)
7007 * are older than the specified cutoff XID or MultiXactId. If so, return TRUE.
7009 * It doesn't matter whether the tuple is alive or dead, we are checking
7010 * to see if a tuple needs to be removed or frozen to avoid wraparound.
7012 * NB: Cannot rely on hint bits here, they might not be set after a crash or
7016 heap_tuple_needs_freeze(HeapTupleHeader tuple, TransactionId cutoff_xid,
7017 MultiXactId cutoff_multi, Buffer buf)
7021 xid = HeapTupleHeaderGetXmin(tuple);
7022 if (TransactionIdIsNormal(xid) &&
7023 TransactionIdPrecedes(xid, cutoff_xid))
7027 * The considerations for multixacts are complicated; look at
7028 * heap_freeze_tuple for justifications. This routine had better be in
7029 * sync with that one!
7031 if (tuple->t_infomask & HEAP_XMAX_IS_MULTI)
7035 multi = HeapTupleHeaderGetRawXmax(tuple);
7036 if (!MultiXactIdIsValid(multi))
7038 /* no xmax set, ignore */
7041 else if (MultiXactIdPrecedes(multi, cutoff_multi))
7045 MultiXactMember *members;
7050 /* need to check whether any member of the mxact is too old */
7052 allow_old = !(tuple->t_infomask & HEAP_LOCK_MASK) &&
7053 HEAP_XMAX_IS_LOCKED_ONLY(tuple->t_infomask);
7054 nmembers = GetMultiXactIdMembers(multi, &members, allow_old,
7055 HEAP_XMAX_IS_LOCKED_ONLY(tuple->t_infomask));
7057 for (i = 0; i < nmembers; i++)
7059 if (TransactionIdPrecedes(members[i].xid, cutoff_xid))
7071 xid = HeapTupleHeaderGetRawXmax(tuple);
7072 if (TransactionIdIsNormal(xid) &&
7073 TransactionIdPrecedes(xid, cutoff_xid))
7077 if (tuple->t_infomask & HEAP_MOVED)
7079 xid = HeapTupleHeaderGetXvac(tuple);
7080 if (TransactionIdIsNormal(xid) &&
7081 TransactionIdPrecedes(xid, cutoff_xid))
7089 * If 'tuple' contains any visible XID greater than latestRemovedXid,
7090 * ratchet forwards latestRemovedXid to the greatest one found.
7091 * This is used as the basis for generating Hot Standby conflicts, so
7092 * if a tuple was never visible then removing it should not conflict
7096 HeapTupleHeaderAdvanceLatestRemovedXid(HeapTupleHeader tuple,
7097 TransactionId *latestRemovedXid)
7099 TransactionId xmin = HeapTupleHeaderGetXmin(tuple);
7100 TransactionId xmax = HeapTupleHeaderGetUpdateXid(tuple);
7101 TransactionId xvac = HeapTupleHeaderGetXvac(tuple);
7103 if (tuple->t_infomask & HEAP_MOVED)
7105 if (TransactionIdPrecedes(*latestRemovedXid, xvac))
7106 *latestRemovedXid = xvac;
7110 * Ignore tuples inserted by an aborted transaction or if the tuple was
7111 * updated/deleted by the inserting transaction.
7113 * Look for a committed hint bit, or if no xmin bit is set, check clog.
7114 * This needs to work on both master and standby, where it is used to
7115 * assess btree delete records.
7117 if (HeapTupleHeaderXminCommitted(tuple) ||
7118 (!HeapTupleHeaderXminInvalid(tuple) && TransactionIdDidCommit(xmin)))
7121 TransactionIdFollows(xmax, *latestRemovedXid))
7122 *latestRemovedXid = xmax;
7125 /* *latestRemovedXid may still be invalid at end */
7129 * Perform XLogInsert to register a heap cleanup info message. These
7130 * messages are sent once per VACUUM and are required because
7131 * of the phasing of removal operations during a lazy VACUUM.
7132 * see comments for vacuum_log_cleanup_info().
7135 log_heap_cleanup_info(RelFileNode rnode, TransactionId latestRemovedXid)
7137 xl_heap_cleanup_info xlrec;
7141 xlrec.latestRemovedXid = latestRemovedXid;
7144 XLogRegisterData((char *) &xlrec, SizeOfHeapCleanupInfo);
7146 recptr = XLogInsert(RM_HEAP2_ID, XLOG_HEAP2_CLEANUP_INFO);
7152 * Perform XLogInsert for a heap-clean operation. Caller must already
7153 * have modified the buffer and marked it dirty.
7155 * Note: prior to Postgres 8.3, the entries in the nowunused[] array were
7156 * zero-based tuple indexes. Now they are one-based like other uses
7159 * We also include latestRemovedXid, which is the greatest XID present in
7160 * the removed tuples. That allows recovery processing to cancel or wait
7161 * for long standby queries that can still see these tuples.
7164 log_heap_clean(Relation reln, Buffer buffer,
7165 OffsetNumber *redirected, int nredirected,
7166 OffsetNumber *nowdead, int ndead,
7167 OffsetNumber *nowunused, int nunused,
7168 TransactionId latestRemovedXid)
7170 xl_heap_clean xlrec;
7173 /* Caller should not call me on a non-WAL-logged relation */
7174 Assert(RelationNeedsWAL(reln));
7176 xlrec.latestRemovedXid = latestRemovedXid;
7177 xlrec.nredirected = nredirected;
7178 xlrec.ndead = ndead;
7181 XLogRegisterData((char *) &xlrec, SizeOfHeapClean);
7183 XLogRegisterBuffer(0, buffer, REGBUF_STANDARD);
7186 * The OffsetNumber arrays are not actually in the buffer, but we pretend
7187 * that they are. When XLogInsert stores the whole buffer, the offset
7188 * arrays need not be stored too. Note that even if all three arrays are
7189 * empty, we want to expose the buffer as a candidate for whole-page
7190 * storage, since this record type implies a defragmentation operation
7191 * even if no item pointers changed state.
7193 if (nredirected > 0)
7194 XLogRegisterBufData(0, (char *) redirected,
7195 nredirected * sizeof(OffsetNumber) * 2);
7198 XLogRegisterBufData(0, (char *) nowdead,
7199 ndead * sizeof(OffsetNumber));
7202 XLogRegisterBufData(0, (char *) nowunused,
7203 nunused * sizeof(OffsetNumber));
7205 recptr = XLogInsert(RM_HEAP2_ID, XLOG_HEAP2_CLEAN);
7211 * Perform XLogInsert for a heap-freeze operation. Caller must have already
7212 * modified the buffer and marked it dirty.
7215 log_heap_freeze(Relation reln, Buffer buffer, TransactionId cutoff_xid,
7216 xl_heap_freeze_tuple *tuples, int ntuples)
7218 xl_heap_freeze_page xlrec;
7221 /* Caller should not call me on a non-WAL-logged relation */
7222 Assert(RelationNeedsWAL(reln));
7223 /* nor when there are no tuples to freeze */
7224 Assert(ntuples > 0);
7226 xlrec.cutoff_xid = cutoff_xid;
7227 xlrec.ntuples = ntuples;
7230 XLogRegisterData((char *) &xlrec, SizeOfHeapFreezePage);
7233 * The freeze plan array is not actually in the buffer, but pretend that
7234 * it is. When XLogInsert stores the whole buffer, the freeze plan need
7235 * not be stored too.
7237 XLogRegisterBuffer(0, buffer, REGBUF_STANDARD);
7238 XLogRegisterBufData(0, (char *) tuples,
7239 ntuples * sizeof(xl_heap_freeze_tuple));
7241 recptr = XLogInsert(RM_HEAP2_ID, XLOG_HEAP2_FREEZE_PAGE);
7247 * Perform XLogInsert for a heap-visible operation. 'block' is the block
7248 * being marked all-visible, and vm_buffer is the buffer containing the
7249 * corresponding visibility map block. Both should have already been modified
7252 * If checksums are enabled, we also generate a full-page image of
7253 * heap_buffer, if necessary.
7256 log_heap_visible(RelFileNode rnode, Buffer heap_buffer, Buffer vm_buffer,
7257 TransactionId cutoff_xid, uint8 vmflags)
7259 xl_heap_visible xlrec;
7263 Assert(BufferIsValid(heap_buffer));
7264 Assert(BufferIsValid(vm_buffer));
7266 xlrec.cutoff_xid = cutoff_xid;
7267 xlrec.flags = vmflags;
7269 XLogRegisterData((char *) &xlrec, SizeOfHeapVisible);
7271 XLogRegisterBuffer(0, vm_buffer, 0);
7273 flags = REGBUF_STANDARD;
7274 if (!XLogHintBitIsNeeded())
7275 flags |= REGBUF_NO_IMAGE;
7276 XLogRegisterBuffer(1, heap_buffer, flags);
7278 recptr = XLogInsert(RM_HEAP2_ID, XLOG_HEAP2_VISIBLE);
7284 * Perform XLogInsert for a heap-update operation. Caller must already
7285 * have modified the buffer(s) and marked them dirty.
7288 log_heap_update(Relation reln, Buffer oldbuf,
7289 Buffer newbuf, HeapTuple oldtup, HeapTuple newtup,
7290 HeapTuple old_key_tuple,
7291 bool all_visible_cleared, bool new_all_visible_cleared)
7293 xl_heap_update xlrec;
7294 xl_heap_header xlhdr;
7295 xl_heap_header xlhdr_idx;
7297 uint16 prefix_suffix[2];
7298 uint16 prefixlen = 0,
7301 Page page = BufferGetPage(newbuf);
7302 bool need_tuple_data = RelationIsLogicallyLogged(reln);
7306 /* Caller should not call me on a non-WAL-logged relation */
7307 Assert(RelationNeedsWAL(reln));
7311 if (HeapTupleIsHeapOnly(newtup))
7312 info = XLOG_HEAP_HOT_UPDATE;
7314 info = XLOG_HEAP_UPDATE;
7317 * If the old and new tuple are on the same page, we only need to log the
7318 * parts of the new tuple that were changed. That saves on the amount of
7319 * WAL we need to write. Currently, we just count any unchanged bytes in
7320 * the beginning and end of the tuple. That's quick to check, and
7321 * perfectly covers the common case that only one field is updated.
7323 * We could do this even if the old and new tuple are on different pages,
7324 * but only if we don't make a full-page image of the old page, which is
7325 * difficult to know in advance. Also, if the old tuple is corrupt for
7326 * some reason, it would allow the corruption to propagate the new page,
7327 * so it seems best to avoid. Under the general assumption that most
7328 * updates tend to create the new tuple version on the same page, there
7329 * isn't much to be gained by doing this across pages anyway.
7331 * Skip this if we're taking a full-page image of the new page, as we
7332 * don't include the new tuple in the WAL record in that case. Also
7333 * disable if wal_level='logical', as logical decoding needs to be able to
7334 * read the new tuple in whole from the WAL record alone.
7336 if (oldbuf == newbuf && !need_tuple_data &&
7337 !XLogCheckBufferNeedsBackup(newbuf))
7339 char *oldp = (char *) oldtup->t_data + oldtup->t_data->t_hoff;
7340 char *newp = (char *) newtup->t_data + newtup->t_data->t_hoff;
7341 int oldlen = oldtup->t_len - oldtup->t_data->t_hoff;
7342 int newlen = newtup->t_len - newtup->t_data->t_hoff;
7344 /* Check for common prefix between old and new tuple */
7345 for (prefixlen = 0; prefixlen < Min(oldlen, newlen); prefixlen++)
7347 if (newp[prefixlen] != oldp[prefixlen])
7352 * Storing the length of the prefix takes 2 bytes, so we need to save
7353 * at least 3 bytes or there's no point.
7358 /* Same for suffix */
7359 for (suffixlen = 0; suffixlen < Min(oldlen, newlen) - prefixlen; suffixlen++)
7361 if (newp[newlen - suffixlen - 1] != oldp[oldlen - suffixlen - 1])
7368 /* Prepare main WAL data chain */
7370 if (all_visible_cleared)
7371 xlrec.flags |= XLH_UPDATE_OLD_ALL_VISIBLE_CLEARED;
7372 if (new_all_visible_cleared)
7373 xlrec.flags |= XLH_UPDATE_NEW_ALL_VISIBLE_CLEARED;
7375 xlrec.flags |= XLH_UPDATE_PREFIX_FROM_OLD;
7377 xlrec.flags |= XLH_UPDATE_SUFFIX_FROM_OLD;
7378 if (need_tuple_data)
7380 xlrec.flags |= XLH_UPDATE_CONTAINS_NEW_TUPLE;
7383 if (reln->rd_rel->relreplident == REPLICA_IDENTITY_FULL)
7384 xlrec.flags |= XLH_UPDATE_CONTAINS_OLD_TUPLE;
7386 xlrec.flags |= XLH_UPDATE_CONTAINS_OLD_KEY;
7390 /* If new tuple is the single and first tuple on page... */
7391 if (ItemPointerGetOffsetNumber(&(newtup->t_self)) == FirstOffsetNumber &&
7392 PageGetMaxOffsetNumber(page) == FirstOffsetNumber)
7394 info |= XLOG_HEAP_INIT_PAGE;
7400 /* Prepare WAL data for the old page */
7401 xlrec.old_offnum = ItemPointerGetOffsetNumber(&oldtup->t_self);
7402 xlrec.old_xmax = HeapTupleHeaderGetRawXmax(oldtup->t_data);
7403 xlrec.old_infobits_set = compute_infobits(oldtup->t_data->t_infomask,
7404 oldtup->t_data->t_infomask2);
7406 /* Prepare WAL data for the new page */
7407 xlrec.new_offnum = ItemPointerGetOffsetNumber(&newtup->t_self);
7408 xlrec.new_xmax = HeapTupleHeaderGetRawXmax(newtup->t_data);
7410 bufflags = REGBUF_STANDARD;
7412 bufflags |= REGBUF_WILL_INIT;
7413 if (need_tuple_data)
7414 bufflags |= REGBUF_KEEP_DATA;
7416 XLogRegisterBuffer(0, newbuf, bufflags);
7417 if (oldbuf != newbuf)
7418 XLogRegisterBuffer(1, oldbuf, REGBUF_STANDARD);
7420 XLogRegisterData((char *) &xlrec, SizeOfHeapUpdate);
7423 * Prepare WAL data for the new tuple.
7425 if (prefixlen > 0 || suffixlen > 0)
7427 if (prefixlen > 0 && suffixlen > 0)
7429 prefix_suffix[0] = prefixlen;
7430 prefix_suffix[1] = suffixlen;
7431 XLogRegisterBufData(0, (char *) &prefix_suffix, sizeof(uint16) * 2);
7433 else if (prefixlen > 0)
7435 XLogRegisterBufData(0, (char *) &prefixlen, sizeof(uint16));
7439 XLogRegisterBufData(0, (char *) &suffixlen, sizeof(uint16));
7443 xlhdr.t_infomask2 = newtup->t_data->t_infomask2;
7444 xlhdr.t_infomask = newtup->t_data->t_infomask;
7445 xlhdr.t_hoff = newtup->t_data->t_hoff;
7446 Assert(SizeofHeapTupleHeader + prefixlen + suffixlen <= newtup->t_len);
7449 * PG73FORMAT: write bitmap [+ padding] [+ oid] + data
7451 * The 'data' doesn't include the common prefix or suffix.
7453 XLogRegisterBufData(0, (char *) &xlhdr, SizeOfHeapHeader);
7456 XLogRegisterBufData(0,
7457 ((char *) newtup->t_data) + SizeofHeapTupleHeader,
7458 newtup->t_len - SizeofHeapTupleHeader - suffixlen);
7463 * Have to write the null bitmap and data after the common prefix as
7464 * two separate rdata entries.
7466 /* bitmap [+ padding] [+ oid] */
7467 if (newtup->t_data->t_hoff - SizeofHeapTupleHeader > 0)
7469 XLogRegisterBufData(0,
7470 ((char *) newtup->t_data) + SizeofHeapTupleHeader,
7471 newtup->t_data->t_hoff - SizeofHeapTupleHeader);
7474 /* data after common prefix */
7475 XLogRegisterBufData(0,
7476 ((char *) newtup->t_data) + newtup->t_data->t_hoff + prefixlen,
7477 newtup->t_len - newtup->t_data->t_hoff - prefixlen - suffixlen);
7480 /* We need to log a tuple identity */
7481 if (need_tuple_data && old_key_tuple)
7483 /* don't really need this, but its more comfy to decode */
7484 xlhdr_idx.t_infomask2 = old_key_tuple->t_data->t_infomask2;
7485 xlhdr_idx.t_infomask = old_key_tuple->t_data->t_infomask;
7486 xlhdr_idx.t_hoff = old_key_tuple->t_data->t_hoff;
7488 XLogRegisterData((char *) &xlhdr_idx, SizeOfHeapHeader);
7490 /* PG73FORMAT: write bitmap [+ padding] [+ oid] + data */
7491 XLogRegisterData((char *) old_key_tuple->t_data + SizeofHeapTupleHeader,
7492 old_key_tuple->t_len - SizeofHeapTupleHeader);
7495 /* filtering by origin on a row level is much more efficient */
7496 XLogIncludeOrigin();
7498 recptr = XLogInsert(RM_HEAP_ID, info);
7504 * Perform XLogInsert of an XLOG_HEAP2_NEW_CID record
7506 * This is only used in wal_level >= WAL_LEVEL_LOGICAL, and only for catalog
7510 log_heap_new_cid(Relation relation, HeapTuple tup)
7512 xl_heap_new_cid xlrec;
7515 HeapTupleHeader hdr = tup->t_data;
7517 Assert(ItemPointerIsValid(&tup->t_self));
7518 Assert(tup->t_tableOid != InvalidOid);
7520 xlrec.top_xid = GetTopTransactionId();
7521 xlrec.target_node = relation->rd_node;
7522 xlrec.target_tid = tup->t_self;
7525 * If the tuple got inserted & deleted in the same TX we definitely have a
7526 * combocid, set cmin and cmax.
7528 if (hdr->t_infomask & HEAP_COMBOCID)
7530 Assert(!(hdr->t_infomask & HEAP_XMAX_INVALID));
7531 Assert(!HeapTupleHeaderXminInvalid(hdr));
7532 xlrec.cmin = HeapTupleHeaderGetCmin(hdr);
7533 xlrec.cmax = HeapTupleHeaderGetCmax(hdr);
7534 xlrec.combocid = HeapTupleHeaderGetRawCommandId(hdr);
7536 /* No combocid, so only cmin or cmax can be set by this TX */
7542 * We need to check for LOCK ONLY because multixacts might be
7543 * transferred to the new tuple in case of FOR KEY SHARE updates in
7544 * which case there will be an xmax, although the tuple just got
7547 if (hdr->t_infomask & HEAP_XMAX_INVALID ||
7548 HEAP_XMAX_IS_LOCKED_ONLY(hdr->t_infomask))
7550 xlrec.cmin = HeapTupleHeaderGetRawCommandId(hdr);
7551 xlrec.cmax = InvalidCommandId;
7553 /* Tuple from a different tx updated or deleted. */
7556 xlrec.cmin = InvalidCommandId;
7557 xlrec.cmax = HeapTupleHeaderGetRawCommandId(hdr);
7560 xlrec.combocid = InvalidCommandId;
7564 * Note that we don't need to register the buffer here, because this
7565 * operation does not modify the page. The insert/update/delete that
7566 * called us certainly did, but that's WAL-logged separately.
7569 XLogRegisterData((char *) &xlrec, SizeOfHeapNewCid);
7571 /* will be looked at irrespective of origin */
7573 recptr = XLogInsert(RM_HEAP2_ID, XLOG_HEAP2_NEW_CID);
7579 * Build a heap tuple representing the configured REPLICA IDENTITY to represent
7580 * the old tuple in a UPDATE or DELETE.
7582 * Returns NULL if there's no need to log an identity or if there's no suitable
7583 * key in the Relation relation.
7586 ExtractReplicaIdentity(Relation relation, HeapTuple tp, bool key_changed, bool *copy)
7588 TupleDesc desc = RelationGetDescr(relation);
7592 char replident = relation->rd_rel->relreplident;
7593 HeapTuple key_tuple = NULL;
7594 bool nulls[MaxHeapAttributeNumber];
7595 Datum values[MaxHeapAttributeNumber];
7600 if (!RelationIsLogicallyLogged(relation))
7603 if (replident == REPLICA_IDENTITY_NOTHING)
7606 if (replident == REPLICA_IDENTITY_FULL)
7609 * When logging the entire old tuple, it very well could contain
7610 * toasted columns. If so, force them to be inlined.
7612 if (HeapTupleHasExternal(tp))
7615 tp = toast_flatten_tuple(tp, RelationGetDescr(relation));
7620 /* if the key hasn't changed and we're only logging the key, we're done */
7624 /* find the replica identity index */
7625 replidindex = RelationGetReplicaIndex(relation);
7626 if (!OidIsValid(replidindex))
7628 elog(DEBUG4, "could not find configured replica identity for table \"%s\"",
7629 RelationGetRelationName(relation));
7633 idx_rel = RelationIdGetRelation(replidindex);
7634 idx_desc = RelationGetDescr(idx_rel);
7636 /* deform tuple, so we have fast access to columns */
7637 heap_deform_tuple(tp, desc, values, nulls);
7639 /* set all columns to NULL, regardless of whether they actually are */
7640 memset(nulls, 1, sizeof(nulls));
7643 * Now set all columns contained in the index to NOT NULL, they cannot
7644 * currently be NULL.
7646 for (natt = 0; natt < idx_desc->natts; natt++)
7648 int attno = idx_rel->rd_index->indkey.values[natt];
7653 * The OID column can appear in an index definition, but that's
7654 * OK, because we always copy the OID if present (see below).
7655 * Other system columns may not.
7657 if (attno == ObjectIdAttributeNumber)
7659 elog(ERROR, "system column in index");
7661 nulls[attno - 1] = false;
7664 key_tuple = heap_form_tuple(desc, values, nulls);
7666 RelationClose(idx_rel);
7669 * Always copy oids if the table has them, even if not included in the
7670 * index. The space in the logged tuple is used anyway, so there's little
7671 * point in not including the information.
7673 if (relation->rd_rel->relhasoids)
7674 HeapTupleSetOid(key_tuple, HeapTupleGetOid(tp));
7677 * If the tuple, which by here only contains indexed columns, still has
7678 * toasted columns, force them to be inlined. This is somewhat unlikely
7679 * since there's limits on the size of indexed columns, so we don't
7680 * duplicate toast_flatten_tuple()s functionality in the above loop over
7681 * the indexed columns, even if it would be more efficient.
7683 if (HeapTupleHasExternal(key_tuple))
7685 HeapTuple oldtup = key_tuple;
7687 key_tuple = toast_flatten_tuple(oldtup, RelationGetDescr(relation));
7688 heap_freetuple(oldtup);
7695 * Handles CLEANUP_INFO
7698 heap_xlog_cleanup_info(XLogReaderState *record)
7700 xl_heap_cleanup_info *xlrec = (xl_heap_cleanup_info *) XLogRecGetData(record);
7703 ResolveRecoveryConflictWithSnapshot(xlrec->latestRemovedXid, xlrec->node);
7706 * Actual operation is a no-op. Record type exists to provide a means for
7707 * conflict processing to occur before we begin index vacuum actions. see
7708 * vacuumlazy.c and also comments in btvacuumpage()
7711 /* Backup blocks are not used in cleanup_info records */
7712 Assert(!XLogRecHasAnyBlockRefs(record));
7716 * Handles HEAP2_CLEAN record type
7719 heap_xlog_clean(XLogReaderState *record)
7721 XLogRecPtr lsn = record->EndRecPtr;
7722 xl_heap_clean *xlrec = (xl_heap_clean *) XLogRecGetData(record);
7727 XLogRedoAction action;
7729 XLogRecGetBlockTag(record, 0, &rnode, NULL, &blkno);
7732 * We're about to remove tuples. In Hot Standby mode, ensure that there's
7733 * no queries running for which the removed tuples are still visible.
7735 * Not all HEAP2_CLEAN records remove tuples with xids, so we only want to
7736 * conflict on the records that cause MVCC failures for user queries. If
7737 * latestRemovedXid is invalid, skip conflict processing.
7739 if (InHotStandby && TransactionIdIsValid(xlrec->latestRemovedXid))
7740 ResolveRecoveryConflictWithSnapshot(xlrec->latestRemovedXid, rnode);
7743 * If we have a full-page image, restore it (using a cleanup lock) and
7746 action = XLogReadBufferForRedoExtended(record, 0, RBM_NORMAL, true,
7748 if (action == BLK_NEEDS_REDO)
7750 Page page = (Page) BufferGetPage(buffer);
7752 OffsetNumber *redirected;
7753 OffsetNumber *nowdead;
7754 OffsetNumber *nowunused;
7760 redirected = (OffsetNumber *) XLogRecGetBlockData(record, 0, &datalen);
7762 nredirected = xlrec->nredirected;
7763 ndead = xlrec->ndead;
7764 end = (OffsetNumber *) ((char *) redirected + datalen);
7765 nowdead = redirected + (nredirected * 2);
7766 nowunused = nowdead + ndead;
7767 nunused = (end - nowunused);
7768 Assert(nunused >= 0);
7770 /* Update all item pointers per the record, and repair fragmentation */
7771 heap_page_prune_execute(buffer,
7772 redirected, nredirected,
7774 nowunused, nunused);
7776 freespace = PageGetHeapFreeSpace(page); /* needed to update FSM below */
7779 * Note: we don't worry about updating the page's prunability hints.
7780 * At worst this will cause an extra prune cycle to occur soon.
7783 PageSetLSN(page, lsn);
7784 MarkBufferDirty(buffer);
7786 if (BufferIsValid(buffer))
7787 UnlockReleaseBuffer(buffer);
7790 * Update the FSM as well.
7792 * XXX: Don't do this if the page was restored from full page image. We
7793 * don't bother to update the FSM in that case, it doesn't need to be
7794 * totally accurate anyway.
7796 if (action == BLK_NEEDS_REDO)
7797 XLogRecordPageWithFreeSpace(rnode, blkno, freespace);
7801 * Replay XLOG_HEAP2_VISIBLE record.
7803 * The critical integrity requirement here is that we must never end up with
7804 * a situation where the visibility map bit is set, and the page-level
7805 * PD_ALL_VISIBLE bit is clear. If that were to occur, then a subsequent
7806 * page modification would fail to clear the visibility map bit.
7809 heap_xlog_visible(XLogReaderState *record)
7811 XLogRecPtr lsn = record->EndRecPtr;
7812 xl_heap_visible *xlrec = (xl_heap_visible *) XLogRecGetData(record);
7813 Buffer vmbuffer = InvalidBuffer;
7818 XLogRedoAction action;
7820 XLogRecGetBlockTag(record, 1, &rnode, NULL, &blkno);
7823 * If there are any Hot Standby transactions running that have an xmin
7824 * horizon old enough that this page isn't all-visible for them, they
7825 * might incorrectly decide that an index-only scan can skip a heap fetch.
7827 * NB: It might be better to throw some kind of "soft" conflict here that
7828 * forces any index-only scan that is in flight to perform heap fetches,
7829 * rather than killing the transaction outright.
7832 ResolveRecoveryConflictWithSnapshot(xlrec->cutoff_xid, rnode);
7835 * Read the heap page, if it still exists. If the heap file has dropped or
7836 * truncated later in recovery, we don't need to update the page, but we'd
7837 * better still update the visibility map.
7839 action = XLogReadBufferForRedo(record, 1, &buffer);
7840 if (action == BLK_NEEDS_REDO)
7843 * We don't bump the LSN of the heap page when setting the visibility
7844 * map bit (unless checksums or wal_hint_bits is enabled, in which
7845 * case we must), because that would generate an unworkable volume of
7846 * full-page writes. This exposes us to torn page hazards, but since
7847 * we're not inspecting the existing page contents in any way, we
7850 * However, all operations that clear the visibility map bit *do* bump
7851 * the LSN, and those operations will only be replayed if the XLOG LSN
7852 * follows the page LSN. Thus, if the page LSN has advanced past our
7853 * XLOG record's LSN, we mustn't mark the page all-visible, because
7854 * the subsequent update won't be replayed to clear the flag.
7856 page = BufferGetPage(buffer);
7858 if (xlrec->flags & VISIBILITYMAP_ALL_VISIBLE)
7859 PageSetAllVisible(page);
7860 if (xlrec->flags & VISIBILITYMAP_ALL_FROZEN)
7861 PageSetAllFrozen(page);
7863 MarkBufferDirty(buffer);
7865 else if (action == BLK_RESTORED)
7868 * If heap block was backed up, we already restored it and there's
7869 * nothing more to do. (This can only happen with checksums or
7870 * wal_log_hints enabled.)
7873 if (BufferIsValid(buffer))
7874 UnlockReleaseBuffer(buffer);
7877 * Even if we skipped the heap page update due to the LSN interlock, it's
7878 * still safe to update the visibility map. Any WAL record that clears
7879 * the visibility map bit does so before checking the page LSN, so any
7880 * bits that need to be cleared will still be cleared.
7882 if (XLogReadBufferForRedoExtended(record, 0, RBM_ZERO_ON_ERROR, false,
7883 &vmbuffer) == BLK_NEEDS_REDO)
7885 Page vmpage = BufferGetPage(vmbuffer);
7888 /* initialize the page if it was read as zeros */
7889 if (PageIsNew(vmpage))
7890 PageInit(vmpage, BLCKSZ, 0);
7893 * XLogReplayBufferExtended locked the buffer. But visibilitymap_set
7894 * will handle locking itself.
7896 LockBuffer(vmbuffer, BUFFER_LOCK_UNLOCK);
7898 reln = CreateFakeRelcacheEntry(rnode);
7899 visibilitymap_pin(reln, blkno, &vmbuffer);
7902 * Don't set the bit if replay has already passed this point.
7904 * It might be safe to do this unconditionally; if replay has passed
7905 * this point, we'll replay at least as far this time as we did
7906 * before, and if this bit needs to be cleared, the record responsible
7907 * for doing so should be again replayed, and clear it. For right
7908 * now, out of an abundance of conservatism, we use the same test here
7909 * we did for the heap page. If this results in a dropped bit, no
7910 * real harm is done; and the next VACUUM will fix it.
7912 if (lsn > PageGetLSN(vmpage))
7913 visibilitymap_set(reln, blkno, InvalidBuffer, lsn, vmbuffer,
7914 xlrec->cutoff_xid, xlrec->flags);
7916 ReleaseBuffer(vmbuffer);
7917 FreeFakeRelcacheEntry(reln);
7919 else if (BufferIsValid(vmbuffer))
7920 UnlockReleaseBuffer(vmbuffer);
7924 * Replay XLOG_HEAP2_FREEZE_PAGE records
7927 heap_xlog_freeze_page(XLogReaderState *record)
7929 XLogRecPtr lsn = record->EndRecPtr;
7930 xl_heap_freeze_page *xlrec = (xl_heap_freeze_page *) XLogRecGetData(record);
7931 TransactionId cutoff_xid = xlrec->cutoff_xid;
7936 * In Hot Standby mode, ensure that there's no queries running which still
7937 * consider the frozen xids as running.
7942 TransactionId latestRemovedXid = cutoff_xid;
7944 TransactionIdRetreat(latestRemovedXid);
7946 XLogRecGetBlockTag(record, 0, &rnode, NULL, NULL);
7947 ResolveRecoveryConflictWithSnapshot(latestRemovedXid, rnode);
7950 if (XLogReadBufferForRedo(record, 0, &buffer) == BLK_NEEDS_REDO)
7952 Page page = BufferGetPage(buffer);
7953 xl_heap_freeze_tuple *tuples;
7955 tuples = (xl_heap_freeze_tuple *) XLogRecGetBlockData(record, 0, NULL);
7957 /* now execute freeze plan for each frozen tuple */
7958 for (ntup = 0; ntup < xlrec->ntuples; ntup++)
7960 xl_heap_freeze_tuple *xlrec_tp;
7962 HeapTupleHeader tuple;
7964 xlrec_tp = &tuples[ntup];
7965 lp = PageGetItemId(page, xlrec_tp->offset); /* offsets are one-based */
7966 tuple = (HeapTupleHeader) PageGetItem(page, lp);
7968 heap_execute_freeze_tuple(tuple, xlrec_tp);
7971 PageSetLSN(page, lsn);
7972 MarkBufferDirty(buffer);
7974 if (BufferIsValid(buffer))
7975 UnlockReleaseBuffer(buffer);
7979 * Given an "infobits" field from an XLog record, set the correct bits in the
7980 * given infomask and infomask2 for the tuple touched by the record.
7982 * (This is the reverse of compute_infobits).
7985 fix_infomask_from_infobits(uint8 infobits, uint16 *infomask, uint16 *infomask2)
7987 *infomask &= ~(HEAP_XMAX_IS_MULTI | HEAP_XMAX_LOCK_ONLY |
7988 HEAP_XMAX_KEYSHR_LOCK | HEAP_XMAX_EXCL_LOCK);
7989 *infomask2 &= ~HEAP_KEYS_UPDATED;
7991 if (infobits & XLHL_XMAX_IS_MULTI)
7992 *infomask |= HEAP_XMAX_IS_MULTI;
7993 if (infobits & XLHL_XMAX_LOCK_ONLY)
7994 *infomask |= HEAP_XMAX_LOCK_ONLY;
7995 if (infobits & XLHL_XMAX_EXCL_LOCK)
7996 *infomask |= HEAP_XMAX_EXCL_LOCK;
7997 /* note HEAP_XMAX_SHR_LOCK isn't considered here */
7998 if (infobits & XLHL_XMAX_KEYSHR_LOCK)
7999 *infomask |= HEAP_XMAX_KEYSHR_LOCK;
8001 if (infobits & XLHL_KEYS_UPDATED)
8002 *infomask2 |= HEAP_KEYS_UPDATED;
8006 heap_xlog_delete(XLogReaderState *record)
8008 XLogRecPtr lsn = record->EndRecPtr;
8009 xl_heap_delete *xlrec = (xl_heap_delete *) XLogRecGetData(record);
8013 HeapTupleHeader htup;
8015 RelFileNode target_node;
8016 ItemPointerData target_tid;
8018 XLogRecGetBlockTag(record, 0, &target_node, NULL, &blkno);
8019 ItemPointerSetBlockNumber(&target_tid, blkno);
8020 ItemPointerSetOffsetNumber(&target_tid, xlrec->offnum);
8023 * The visibility map may need to be fixed even if the heap page is
8024 * already up-to-date.
8026 if (xlrec->flags & XLH_DELETE_ALL_VISIBLE_CLEARED)
8028 Relation reln = CreateFakeRelcacheEntry(target_node);
8029 Buffer vmbuffer = InvalidBuffer;
8031 visibilitymap_pin(reln, blkno, &vmbuffer);
8032 visibilitymap_clear(reln, blkno, vmbuffer);
8033 ReleaseBuffer(vmbuffer);
8034 FreeFakeRelcacheEntry(reln);
8037 if (XLogReadBufferForRedo(record, 0, &buffer) == BLK_NEEDS_REDO)
8039 page = BufferGetPage(buffer);
8041 if (PageGetMaxOffsetNumber(page) >= xlrec->offnum)
8042 lp = PageGetItemId(page, xlrec->offnum);
8044 if (PageGetMaxOffsetNumber(page) < xlrec->offnum || !ItemIdIsNormal(lp))
8045 elog(PANIC, "invalid lp");
8047 htup = (HeapTupleHeader) PageGetItem(page, lp);
8049 htup->t_infomask &= ~(HEAP_XMAX_BITS | HEAP_MOVED);
8050 htup->t_infomask2 &= ~HEAP_KEYS_UPDATED;
8051 HeapTupleHeaderClearHotUpdated(htup);
8052 fix_infomask_from_infobits(xlrec->infobits_set,
8053 &htup->t_infomask, &htup->t_infomask2);
8054 if (!(xlrec->flags & XLH_DELETE_IS_SUPER))
8055 HeapTupleHeaderSetXmax(htup, xlrec->xmax);
8057 HeapTupleHeaderSetXmin(htup, InvalidTransactionId);
8058 HeapTupleHeaderSetCmax(htup, FirstCommandId, false);
8060 /* Mark the page as a candidate for pruning */
8061 PageSetPrunable(page, XLogRecGetXid(record));
8063 if (xlrec->flags & XLH_DELETE_ALL_VISIBLE_CLEARED)
8064 PageClearAllVisible(page);
8066 /* Make sure there is no forward chain link in t_ctid */
8067 htup->t_ctid = target_tid;
8068 PageSetLSN(page, lsn);
8069 MarkBufferDirty(buffer);
8071 if (BufferIsValid(buffer))
8072 UnlockReleaseBuffer(buffer);
8076 heap_xlog_insert(XLogReaderState *record)
8078 XLogRecPtr lsn = record->EndRecPtr;
8079 xl_heap_insert *xlrec = (xl_heap_insert *) XLogRecGetData(record);
8084 HeapTupleHeaderData hdr;
8085 char data[MaxHeapTupleSize];
8087 HeapTupleHeader htup;
8088 xl_heap_header xlhdr;
8091 RelFileNode target_node;
8093 ItemPointerData target_tid;
8094 XLogRedoAction action;
8096 XLogRecGetBlockTag(record, 0, &target_node, NULL, &blkno);
8097 ItemPointerSetBlockNumber(&target_tid, blkno);
8098 ItemPointerSetOffsetNumber(&target_tid, xlrec->offnum);
8101 * The visibility map may need to be fixed even if the heap page is
8102 * already up-to-date.
8104 if (xlrec->flags & XLH_INSERT_ALL_VISIBLE_CLEARED)
8106 Relation reln = CreateFakeRelcacheEntry(target_node);
8107 Buffer vmbuffer = InvalidBuffer;
8109 visibilitymap_pin(reln, blkno, &vmbuffer);
8110 visibilitymap_clear(reln, blkno, vmbuffer);
8111 ReleaseBuffer(vmbuffer);
8112 FreeFakeRelcacheEntry(reln);
8116 * If we inserted the first and only tuple on the page, re-initialize the
8117 * page from scratch.
8119 if (XLogRecGetInfo(record) & XLOG_HEAP_INIT_PAGE)
8121 buffer = XLogInitBufferForRedo(record, 0);
8122 page = BufferGetPage(buffer);
8123 PageInit(page, BufferGetPageSize(buffer), 0);
8124 action = BLK_NEEDS_REDO;
8127 action = XLogReadBufferForRedo(record, 0, &buffer);
8128 if (action == BLK_NEEDS_REDO)
8133 page = BufferGetPage(buffer);
8135 if (PageGetMaxOffsetNumber(page) + 1 < xlrec->offnum)
8136 elog(PANIC, "invalid max offset number");
8138 data = XLogRecGetBlockData(record, 0, &datalen);
8140 newlen = datalen - SizeOfHeapHeader;
8141 Assert(datalen > SizeOfHeapHeader && newlen <= MaxHeapTupleSize);
8142 memcpy((char *) &xlhdr, data, SizeOfHeapHeader);
8143 data += SizeOfHeapHeader;
8146 MemSet((char *) htup, 0, SizeofHeapTupleHeader);
8147 /* PG73FORMAT: get bitmap [+ padding] [+ oid] + data */
8148 memcpy((char *) htup + SizeofHeapTupleHeader,
8151 newlen += SizeofHeapTupleHeader;
8152 htup->t_infomask2 = xlhdr.t_infomask2;
8153 htup->t_infomask = xlhdr.t_infomask;
8154 htup->t_hoff = xlhdr.t_hoff;
8155 HeapTupleHeaderSetXmin(htup, XLogRecGetXid(record));
8156 HeapTupleHeaderSetCmin(htup, FirstCommandId);
8157 htup->t_ctid = target_tid;
8159 if (PageAddItem(page, (Item) htup, newlen, xlrec->offnum,
8160 true, true) == InvalidOffsetNumber)
8161 elog(PANIC, "failed to add tuple");
8163 freespace = PageGetHeapFreeSpace(page); /* needed to update FSM below */
8165 PageSetLSN(page, lsn);
8167 if (xlrec->flags & XLH_INSERT_ALL_VISIBLE_CLEARED)
8168 PageClearAllVisible(page);
8170 MarkBufferDirty(buffer);
8172 if (BufferIsValid(buffer))
8173 UnlockReleaseBuffer(buffer);
8176 * If the page is running low on free space, update the FSM as well.
8177 * Arbitrarily, our definition of "low" is less than 20%. We can't do much
8178 * better than that without knowing the fill-factor for the table.
8180 * XXX: Don't do this if the page was restored from full page image. We
8181 * don't bother to update the FSM in that case, it doesn't need to be
8182 * totally accurate anyway.
8184 if (action == BLK_NEEDS_REDO && freespace < BLCKSZ / 5)
8185 XLogRecordPageWithFreeSpace(target_node, blkno, freespace);
8189 * Handles MULTI_INSERT record type.
8192 heap_xlog_multi_insert(XLogReaderState *record)
8194 XLogRecPtr lsn = record->EndRecPtr;
8195 xl_heap_multi_insert *xlrec;
8202 HeapTupleHeaderData hdr;
8203 char data[MaxHeapTupleSize];
8205 HeapTupleHeader htup;
8209 bool isinit = (XLogRecGetInfo(record) & XLOG_HEAP_INIT_PAGE) != 0;
8210 XLogRedoAction action;
8213 * Insertion doesn't overwrite MVCC data, so no conflict processing is
8216 xlrec = (xl_heap_multi_insert *) XLogRecGetData(record);
8218 XLogRecGetBlockTag(record, 0, &rnode, NULL, &blkno);
8221 * The visibility map may need to be fixed even if the heap page is
8222 * already up-to-date.
8224 if (xlrec->flags & XLH_INSERT_ALL_VISIBLE_CLEARED)
8226 Relation reln = CreateFakeRelcacheEntry(rnode);
8227 Buffer vmbuffer = InvalidBuffer;
8229 visibilitymap_pin(reln, blkno, &vmbuffer);
8230 visibilitymap_clear(reln, blkno, vmbuffer);
8231 ReleaseBuffer(vmbuffer);
8232 FreeFakeRelcacheEntry(reln);
8237 buffer = XLogInitBufferForRedo(record, 0);
8238 page = BufferGetPage(buffer);
8239 PageInit(page, BufferGetPageSize(buffer), 0);
8240 action = BLK_NEEDS_REDO;
8243 action = XLogReadBufferForRedo(record, 0, &buffer);
8244 if (action == BLK_NEEDS_REDO)
8250 /* Tuples are stored as block data */
8251 tupdata = XLogRecGetBlockData(record, 0, &len);
8252 endptr = tupdata + len;
8254 page = (Page) BufferGetPage(buffer);
8256 for (i = 0; i < xlrec->ntuples; i++)
8258 OffsetNumber offnum;
8259 xl_multi_insert_tuple *xlhdr;
8262 * If we're reinitializing the page, the tuples are stored in
8263 * order from FirstOffsetNumber. Otherwise there's an array of
8264 * offsets in the WAL record, and the tuples come after that.
8267 offnum = FirstOffsetNumber + i;
8269 offnum = xlrec->offsets[i];
8270 if (PageGetMaxOffsetNumber(page) + 1 < offnum)
8271 elog(PANIC, "invalid max offset number");
8273 xlhdr = (xl_multi_insert_tuple *) SHORTALIGN(tupdata);
8274 tupdata = ((char *) xlhdr) + SizeOfMultiInsertTuple;
8276 newlen = xlhdr->datalen;
8277 Assert(newlen <= MaxHeapTupleSize);
8279 MemSet((char *) htup, 0, SizeofHeapTupleHeader);
8280 /* PG73FORMAT: get bitmap [+ padding] [+ oid] + data */
8281 memcpy((char *) htup + SizeofHeapTupleHeader,
8286 newlen += SizeofHeapTupleHeader;
8287 htup->t_infomask2 = xlhdr->t_infomask2;
8288 htup->t_infomask = xlhdr->t_infomask;
8289 htup->t_hoff = xlhdr->t_hoff;
8290 HeapTupleHeaderSetXmin(htup, XLogRecGetXid(record));
8291 HeapTupleHeaderSetCmin(htup, FirstCommandId);
8292 ItemPointerSetBlockNumber(&htup->t_ctid, blkno);
8293 ItemPointerSetOffsetNumber(&htup->t_ctid, offnum);
8295 offnum = PageAddItem(page, (Item) htup, newlen, offnum, true, true);
8296 if (offnum == InvalidOffsetNumber)
8297 elog(PANIC, "failed to add tuple");
8299 if (tupdata != endptr)
8300 elog(PANIC, "total tuple length mismatch");
8302 freespace = PageGetHeapFreeSpace(page); /* needed to update FSM below */
8304 PageSetLSN(page, lsn);
8306 if (xlrec->flags & XLH_INSERT_ALL_VISIBLE_CLEARED)
8307 PageClearAllVisible(page);
8309 MarkBufferDirty(buffer);
8311 if (BufferIsValid(buffer))
8312 UnlockReleaseBuffer(buffer);
8315 * If the page is running low on free space, update the FSM as well.
8316 * Arbitrarily, our definition of "low" is less than 20%. We can't do much
8317 * better than that without knowing the fill-factor for the table.
8319 * XXX: Don't do this if the page was restored from full page image. We
8320 * don't bother to update the FSM in that case, it doesn't need to be
8321 * totally accurate anyway.
8323 if (action == BLK_NEEDS_REDO && freespace < BLCKSZ / 5)
8324 XLogRecordPageWithFreeSpace(rnode, blkno, freespace);
8328 * Handles UPDATE and HOT_UPDATE
8331 heap_xlog_update(XLogReaderState *record, bool hot_update)
8333 XLogRecPtr lsn = record->EndRecPtr;
8334 xl_heap_update *xlrec = (xl_heap_update *) XLogRecGetData(record);
8338 ItemPointerData newtid;
8342 OffsetNumber offnum;
8344 HeapTupleData oldtup;
8345 HeapTupleHeader htup;
8346 uint16 prefixlen = 0,
8351 HeapTupleHeaderData hdr;
8352 char data[MaxHeapTupleSize];
8354 xl_heap_header xlhdr;
8357 XLogRedoAction oldaction;
8358 XLogRedoAction newaction;
8360 /* initialize to keep the compiler quiet */
8361 oldtup.t_data = NULL;
8364 XLogRecGetBlockTag(record, 0, &rnode, NULL, &newblk);
8365 if (XLogRecGetBlockTag(record, 1, NULL, NULL, &oldblk))
8367 /* HOT updates are never done across pages */
8368 Assert(!hot_update);
8373 ItemPointerSet(&newtid, newblk, xlrec->new_offnum);
8376 * The visibility map may need to be fixed even if the heap page is
8377 * already up-to-date.
8379 if (xlrec->flags & XLH_UPDATE_OLD_ALL_VISIBLE_CLEARED)
8381 Relation reln = CreateFakeRelcacheEntry(rnode);
8382 Buffer vmbuffer = InvalidBuffer;
8384 visibilitymap_pin(reln, oldblk, &vmbuffer);
8385 visibilitymap_clear(reln, oldblk, vmbuffer);
8386 ReleaseBuffer(vmbuffer);
8387 FreeFakeRelcacheEntry(reln);
8391 * In normal operation, it is important to lock the two pages in
8392 * page-number order, to avoid possible deadlocks against other update
8393 * operations going the other way. However, during WAL replay there can
8394 * be no other update happening, so we don't need to worry about that. But
8395 * we *do* need to worry that we don't expose an inconsistent state to Hot
8396 * Standby queries --- so the original page can't be unlocked before we've
8397 * added the new tuple to the new page.
8400 /* Deal with old tuple version */
8401 oldaction = XLogReadBufferForRedo(record, (oldblk == newblk) ? 0 : 1,
8403 if (oldaction == BLK_NEEDS_REDO)
8405 page = BufferGetPage(obuffer);
8406 offnum = xlrec->old_offnum;
8407 if (PageGetMaxOffsetNumber(page) >= offnum)
8408 lp = PageGetItemId(page, offnum);
8410 if (PageGetMaxOffsetNumber(page) < offnum || !ItemIdIsNormal(lp))
8411 elog(PANIC, "invalid lp");
8413 htup = (HeapTupleHeader) PageGetItem(page, lp);
8415 oldtup.t_data = htup;
8416 oldtup.t_len = ItemIdGetLength(lp);
8418 htup->t_infomask &= ~(HEAP_XMAX_BITS | HEAP_MOVED);
8419 htup->t_infomask2 &= ~HEAP_KEYS_UPDATED;
8421 HeapTupleHeaderSetHotUpdated(htup);
8423 HeapTupleHeaderClearHotUpdated(htup);
8424 fix_infomask_from_infobits(xlrec->old_infobits_set, &htup->t_infomask,
8425 &htup->t_infomask2);
8426 HeapTupleHeaderSetXmax(htup, xlrec->old_xmax);
8427 HeapTupleHeaderSetCmax(htup, FirstCommandId, false);
8428 /* Set forward chain link in t_ctid */
8429 htup->t_ctid = newtid;
8431 /* Mark the page as a candidate for pruning */
8432 PageSetPrunable(page, XLogRecGetXid(record));
8434 if (xlrec->flags & XLH_UPDATE_OLD_ALL_VISIBLE_CLEARED)
8435 PageClearAllVisible(page);
8437 PageSetLSN(page, lsn);
8438 MarkBufferDirty(obuffer);
8442 * Read the page the new tuple goes into, if different from old.
8444 if (oldblk == newblk)
8447 newaction = oldaction;
8449 else if (XLogRecGetInfo(record) & XLOG_HEAP_INIT_PAGE)
8451 nbuffer = XLogInitBufferForRedo(record, 0);
8452 page = (Page) BufferGetPage(nbuffer);
8453 PageInit(page, BufferGetPageSize(nbuffer), 0);
8454 newaction = BLK_NEEDS_REDO;
8457 newaction = XLogReadBufferForRedo(record, 0, &nbuffer);
8460 * The visibility map may need to be fixed even if the heap page is
8461 * already up-to-date.
8463 if (xlrec->flags & XLH_UPDATE_NEW_ALL_VISIBLE_CLEARED)
8465 Relation reln = CreateFakeRelcacheEntry(rnode);
8466 Buffer vmbuffer = InvalidBuffer;
8468 visibilitymap_pin(reln, newblk, &vmbuffer);
8469 visibilitymap_clear(reln, newblk, vmbuffer);
8470 ReleaseBuffer(vmbuffer);
8471 FreeFakeRelcacheEntry(reln);
8474 /* Deal with new tuple */
8475 if (newaction == BLK_NEEDS_REDO)
8482 recdata = XLogRecGetBlockData(record, 0, &datalen);
8483 recdata_end = recdata + datalen;
8485 page = BufferGetPage(nbuffer);
8487 offnum = xlrec->new_offnum;
8488 if (PageGetMaxOffsetNumber(page) + 1 < offnum)
8489 elog(PANIC, "invalid max offset number");
8491 if (xlrec->flags & XLH_UPDATE_PREFIX_FROM_OLD)
8493 Assert(newblk == oldblk);
8494 memcpy(&prefixlen, recdata, sizeof(uint16));
8495 recdata += sizeof(uint16);
8497 if (xlrec->flags & XLH_UPDATE_SUFFIX_FROM_OLD)
8499 Assert(newblk == oldblk);
8500 memcpy(&suffixlen, recdata, sizeof(uint16));
8501 recdata += sizeof(uint16);
8504 memcpy((char *) &xlhdr, recdata, SizeOfHeapHeader);
8505 recdata += SizeOfHeapHeader;
8507 tuplen = recdata_end - recdata;
8508 Assert(tuplen <= MaxHeapTupleSize);
8511 MemSet((char *) htup, 0, SizeofHeapTupleHeader);
8514 * Reconstruct the new tuple using the prefix and/or suffix from the
8515 * old tuple, and the data stored in the WAL record.
8517 newp = (char *) htup + SizeofHeapTupleHeader;
8522 /* copy bitmap [+ padding] [+ oid] from WAL record */
8523 len = xlhdr.t_hoff - SizeofHeapTupleHeader;
8524 memcpy(newp, recdata, len);
8528 /* copy prefix from old tuple */
8529 memcpy(newp, (char *) oldtup.t_data + oldtup.t_data->t_hoff, prefixlen);
8532 /* copy new tuple data from WAL record */
8533 len = tuplen - (xlhdr.t_hoff - SizeofHeapTupleHeader);
8534 memcpy(newp, recdata, len);
8541 * copy bitmap [+ padding] [+ oid] + data from record, all in one
8544 memcpy(newp, recdata, tuplen);
8548 Assert(recdata == recdata_end);
8550 /* copy suffix from old tuple */
8552 memcpy(newp, (char *) oldtup.t_data + oldtup.t_len - suffixlen, suffixlen);
8554 newlen = SizeofHeapTupleHeader + tuplen + prefixlen + suffixlen;
8555 htup->t_infomask2 = xlhdr.t_infomask2;
8556 htup->t_infomask = xlhdr.t_infomask;
8557 htup->t_hoff = xlhdr.t_hoff;
8559 HeapTupleHeaderSetXmin(htup, XLogRecGetXid(record));
8560 HeapTupleHeaderSetCmin(htup, FirstCommandId);
8561 HeapTupleHeaderSetXmax(htup, xlrec->new_xmax);
8562 /* Make sure there is no forward chain link in t_ctid */
8563 htup->t_ctid = newtid;
8565 offnum = PageAddItem(page, (Item) htup, newlen, offnum, true, true);
8566 if (offnum == InvalidOffsetNumber)
8567 elog(PANIC, "failed to add tuple");
8569 if (xlrec->flags & XLH_UPDATE_NEW_ALL_VISIBLE_CLEARED)
8570 PageClearAllVisible(page);
8572 freespace = PageGetHeapFreeSpace(page); /* needed to update FSM below */
8574 PageSetLSN(page, lsn);
8575 MarkBufferDirty(nbuffer);
8578 if (BufferIsValid(nbuffer) && nbuffer != obuffer)
8579 UnlockReleaseBuffer(nbuffer);
8580 if (BufferIsValid(obuffer))
8581 UnlockReleaseBuffer(obuffer);
8584 * If the new page is running low on free space, update the FSM as well.
8585 * Arbitrarily, our definition of "low" is less than 20%. We can't do much
8586 * better than that without knowing the fill-factor for the table.
8588 * However, don't update the FSM on HOT updates, because after crash
8589 * recovery, either the old or the new tuple will certainly be dead and
8590 * prunable. After pruning, the page will have roughly as much free space
8591 * as it did before the update, assuming the new tuple is about the same
8592 * size as the old one.
8594 * XXX: Don't do this if the page was restored from full page image. We
8595 * don't bother to update the FSM in that case, it doesn't need to be
8596 * totally accurate anyway.
8598 if (newaction == BLK_NEEDS_REDO && !hot_update && freespace < BLCKSZ / 5)
8599 XLogRecordPageWithFreeSpace(rnode, newblk, freespace);
8603 heap_xlog_confirm(XLogReaderState *record)
8605 XLogRecPtr lsn = record->EndRecPtr;
8606 xl_heap_confirm *xlrec = (xl_heap_confirm *) XLogRecGetData(record);
8609 OffsetNumber offnum;
8611 HeapTupleHeader htup;
8613 if (XLogReadBufferForRedo(record, 0, &buffer) == BLK_NEEDS_REDO)
8615 page = BufferGetPage(buffer);
8617 offnum = xlrec->offnum;
8618 if (PageGetMaxOffsetNumber(page) >= offnum)
8619 lp = PageGetItemId(page, offnum);
8621 if (PageGetMaxOffsetNumber(page) < offnum || !ItemIdIsNormal(lp))
8622 elog(PANIC, "invalid lp");
8624 htup = (HeapTupleHeader) PageGetItem(page, lp);
8627 * Confirm tuple as actually inserted
8629 ItemPointerSet(&htup->t_ctid, BufferGetBlockNumber(buffer), offnum);
8631 PageSetLSN(page, lsn);
8632 MarkBufferDirty(buffer);
8634 if (BufferIsValid(buffer))
8635 UnlockReleaseBuffer(buffer);
8639 heap_xlog_lock(XLogReaderState *record)
8641 XLogRecPtr lsn = record->EndRecPtr;
8642 xl_heap_lock *xlrec = (xl_heap_lock *) XLogRecGetData(record);
8645 OffsetNumber offnum;
8647 HeapTupleHeader htup;
8649 if (XLogReadBufferForRedo(record, 0, &buffer) == BLK_NEEDS_REDO)
8651 page = (Page) BufferGetPage(buffer);
8653 offnum = xlrec->offnum;
8654 if (PageGetMaxOffsetNumber(page) >= offnum)
8655 lp = PageGetItemId(page, offnum);
8657 if (PageGetMaxOffsetNumber(page) < offnum || !ItemIdIsNormal(lp))
8658 elog(PANIC, "invalid lp");
8660 htup = (HeapTupleHeader) PageGetItem(page, lp);
8662 fix_infomask_from_infobits(xlrec->infobits_set, &htup->t_infomask,
8663 &htup->t_infomask2);
8666 * Clear relevant update flags, but only if the modified infomask says
8667 * there's no update.
8669 if (HEAP_XMAX_IS_LOCKED_ONLY(htup->t_infomask))
8671 HeapTupleHeaderClearHotUpdated(htup);
8672 /* Make sure there is no forward chain link in t_ctid */
8673 ItemPointerSet(&htup->t_ctid,
8674 BufferGetBlockNumber(buffer),
8677 HeapTupleHeaderSetXmax(htup, xlrec->locking_xid);
8678 HeapTupleHeaderSetCmax(htup, FirstCommandId, false);
8679 PageSetLSN(page, lsn);
8680 MarkBufferDirty(buffer);
8682 if (BufferIsValid(buffer))
8683 UnlockReleaseBuffer(buffer);
8687 heap_xlog_lock_updated(XLogReaderState *record)
8689 XLogRecPtr lsn = record->EndRecPtr;
8690 xl_heap_lock_updated *xlrec;
8693 OffsetNumber offnum;
8695 HeapTupleHeader htup;
8697 xlrec = (xl_heap_lock_updated *) XLogRecGetData(record);
8699 if (XLogReadBufferForRedo(record, 0, &buffer) == BLK_NEEDS_REDO)
8701 page = BufferGetPage(buffer);
8703 offnum = xlrec->offnum;
8704 if (PageGetMaxOffsetNumber(page) >= offnum)
8705 lp = PageGetItemId(page, offnum);
8707 if (PageGetMaxOffsetNumber(page) < offnum || !ItemIdIsNormal(lp))
8708 elog(PANIC, "invalid lp");
8710 htup = (HeapTupleHeader) PageGetItem(page, lp);
8712 fix_infomask_from_infobits(xlrec->infobits_set, &htup->t_infomask,
8713 &htup->t_infomask2);
8714 HeapTupleHeaderSetXmax(htup, xlrec->xmax);
8716 PageSetLSN(page, lsn);
8717 MarkBufferDirty(buffer);
8719 if (BufferIsValid(buffer))
8720 UnlockReleaseBuffer(buffer);
8724 heap_xlog_inplace(XLogReaderState *record)
8726 XLogRecPtr lsn = record->EndRecPtr;
8727 xl_heap_inplace *xlrec = (xl_heap_inplace *) XLogRecGetData(record);
8730 OffsetNumber offnum;
8732 HeapTupleHeader htup;
8736 if (XLogReadBufferForRedo(record, 0, &buffer) == BLK_NEEDS_REDO)
8738 char *newtup = XLogRecGetBlockData(record, 0, &newlen);
8740 page = BufferGetPage(buffer);
8742 offnum = xlrec->offnum;
8743 if (PageGetMaxOffsetNumber(page) >= offnum)
8744 lp = PageGetItemId(page, offnum);
8746 if (PageGetMaxOffsetNumber(page) < offnum || !ItemIdIsNormal(lp))
8747 elog(PANIC, "invalid lp");
8749 htup = (HeapTupleHeader) PageGetItem(page, lp);
8751 oldlen = ItemIdGetLength(lp) - htup->t_hoff;
8752 if (oldlen != newlen)
8753 elog(PANIC, "wrong tuple length");
8755 memcpy((char *) htup + htup->t_hoff, newtup, newlen);
8757 PageSetLSN(page, lsn);
8758 MarkBufferDirty(buffer);
8760 if (BufferIsValid(buffer))
8761 UnlockReleaseBuffer(buffer);
8765 heap_redo(XLogReaderState *record)
8767 uint8 info = XLogRecGetInfo(record) & ~XLR_INFO_MASK;
8770 * These operations don't overwrite MVCC data so no conflict processing is
8771 * required. The ones in heap2 rmgr do.
8774 switch (info & XLOG_HEAP_OPMASK)
8776 case XLOG_HEAP_INSERT:
8777 heap_xlog_insert(record);
8779 case XLOG_HEAP_DELETE:
8780 heap_xlog_delete(record);
8782 case XLOG_HEAP_UPDATE:
8783 heap_xlog_update(record, false);
8785 case XLOG_HEAP_HOT_UPDATE:
8786 heap_xlog_update(record, true);
8788 case XLOG_HEAP_CONFIRM:
8789 heap_xlog_confirm(record);
8791 case XLOG_HEAP_LOCK:
8792 heap_xlog_lock(record);
8794 case XLOG_HEAP_INPLACE:
8795 heap_xlog_inplace(record);
8798 elog(PANIC, "heap_redo: unknown op code %u", info);
8803 heap2_redo(XLogReaderState *record)
8805 uint8 info = XLogRecGetInfo(record) & ~XLR_INFO_MASK;
8807 switch (info & XLOG_HEAP_OPMASK)
8809 case XLOG_HEAP2_CLEAN:
8810 heap_xlog_clean(record);
8812 case XLOG_HEAP2_FREEZE_PAGE:
8813 heap_xlog_freeze_page(record);
8815 case XLOG_HEAP2_CLEANUP_INFO:
8816 heap_xlog_cleanup_info(record);
8818 case XLOG_HEAP2_VISIBLE:
8819 heap_xlog_visible(record);
8821 case XLOG_HEAP2_MULTI_INSERT:
8822 heap_xlog_multi_insert(record);
8824 case XLOG_HEAP2_LOCK_UPDATED:
8825 heap_xlog_lock_updated(record);
8827 case XLOG_HEAP2_NEW_CID:
8830 * Nothing to do on a real replay, only used during logical
8834 case XLOG_HEAP2_REWRITE:
8835 heap_xlog_logical_rewrite(record);
8838 elog(PANIC, "heap2_redo: unknown op code %u", info);
8843 * heap_sync - sync a heap, for use when no WAL has been written
8845 * This forces the heap contents (including TOAST heap if any) down to disk.
8846 * If we skipped using WAL, and WAL is otherwise needed, we must force the
8847 * relation down to disk before it's safe to commit the transaction. This
8848 * requires writing out any dirty buffers and then doing a forced fsync.
8850 * Indexes are not touched. (Currently, index operations associated with
8851 * the commands that use this are WAL-logged and so do not need fsync.
8852 * That behavior might change someday, but in any case it's likely that
8853 * any fsync decisions required would be per-index and hence not appropriate
8857 heap_sync(Relation rel)
8859 /* non-WAL-logged tables never need fsync */
8860 if (!RelationNeedsWAL(rel))
8864 FlushRelationBuffers(rel);
8865 /* FlushRelationBuffers will have opened rd_smgr */
8866 smgrimmedsync(rel->rd_smgr, MAIN_FORKNUM);
8868 /* FSM is not critical, don't bother syncing it */
8870 /* toast heap, if any */
8871 if (OidIsValid(rel->rd_rel->reltoastrelid))
8875 toastrel = heap_open(rel->rd_rel->reltoastrelid, AccessShareLock);
8876 FlushRelationBuffers(toastrel);
8877 smgrimmedsync(toastrel->rd_smgr, MAIN_FORKNUM);
8878 heap_close(toastrel, AccessShareLock);