1 /*-------------------------------------------------------------------------
4 * heap access method code
6 * Portions Copyright (c) 1996-2009, PostgreSQL Global Development Group
7 * Portions Copyright (c) 1994, Regents of the University of California
11 * $PostgreSQL: pgsql/src/backend/access/heap/heapam.c,v 1.273 2009/01/01 17:23:35 momjian Exp $
15 * relation_open - open any relation by relation OID
16 * relation_openrv - open any relation specified by a RangeVar
17 * relation_close - close any relation
18 * heap_open - open a heap relation by relation OID
19 * heap_openrv - open a heap relation specified by a RangeVar
20 * heap_close - (now just a macro for relation_close)
21 * heap_beginscan - begin relation scan
22 * heap_rescan - restart a relation scan
23 * heap_endscan - end relation scan
24 * heap_getnext - retrieve next tuple in scan
25 * heap_fetch - retrieve tuple with given tid
26 * heap_insert - insert tuple into a relation
27 * heap_delete - delete a tuple from a relation
28 * heap_update - replace a tuple in a relation with another tuple
29 * heap_markpos - mark scan position
30 * heap_restrpos - restore position to marked location
31 * heap_sync - sync heap, for when no WAL has been written
34 * This file contains the heap_ routines which implement
35 * the POSTGRES heap access method used for all POSTGRES
38 *-------------------------------------------------------------------------
42 #include "access/heapam.h"
43 #include "access/hio.h"
44 #include "access/multixact.h"
45 #include "access/relscan.h"
46 #include "access/sysattr.h"
47 #include "access/transam.h"
48 #include "access/tuptoaster.h"
49 #include "access/valid.h"
50 #include "access/visibilitymap.h"
51 #include "access/xact.h"
52 #include "access/xlogutils.h"
53 #include "catalog/catalog.h"
54 #include "catalog/namespace.h"
55 #include "miscadmin.h"
57 #include "storage/bufmgr.h"
58 #include "storage/freespace.h"
59 #include "storage/lmgr.h"
60 #include "storage/procarray.h"
61 #include "storage/smgr.h"
62 #include "utils/datum.h"
63 #include "utils/inval.h"
64 #include "utils/lsyscache.h"
65 #include "utils/relcache.h"
66 #include "utils/snapmgr.h"
67 #include "utils/syscache.h"
68 #include "utils/tqual.h"
72 bool synchronize_seqscans = true;
75 static HeapScanDesc heap_beginscan_internal(Relation relation,
77 int nkeys, ScanKey key,
78 bool allow_strat, bool allow_sync,
80 static XLogRecPtr log_heap_update(Relation reln, Buffer oldbuf,
81 ItemPointerData from, Buffer newbuf, HeapTuple newtup, bool move);
82 static bool HeapSatisfiesHOTUpdate(Relation relation, Bitmapset *hot_attrs,
83 HeapTuple oldtup, HeapTuple newtup);
86 /* ----------------------------------------------------------------
87 * heap support routines
88 * ----------------------------------------------------------------
92 * initscan - scan code common to heap_beginscan and heap_rescan
96 initscan(HeapScanDesc scan, ScanKey key)
102 * Determine the number of blocks we have to scan.
104 * It is sufficient to do this once at scan start, since any tuples added
105 * while the scan is in progress will be invisible to my snapshot anyway.
106 * (That is not true when using a non-MVCC snapshot. However, we couldn't
107 * guarantee to return tuples added after scan start anyway, since they
108 * might go into pages we already scanned. To guarantee consistent
109 * results for a non-MVCC snapshot, the caller must hold some higher-level
110 * lock that ensures the interesting tuple(s) won't change.)
112 scan->rs_nblocks = RelationGetNumberOfBlocks(scan->rs_rd);
115 * If the table is large relative to NBuffers, use a bulk-read access
116 * strategy and enable synchronized scanning (see syncscan.c). Although
117 * the thresholds for these features could be different, we make them the
118 * same so that there are only two behaviors to tune rather than four.
119 * (However, some callers need to be able to disable one or both of
120 * these behaviors, independently of the size of the table; also there
121 * is a GUC variable that can disable synchronized scanning.)
123 * During a rescan, don't make a new strategy object if we don't have to.
125 if (!scan->rs_rd->rd_istemp &&
126 scan->rs_nblocks > NBuffers / 4)
128 allow_strat = scan->rs_allow_strat;
129 allow_sync = scan->rs_allow_sync;
132 allow_strat = allow_sync = false;
136 if (scan->rs_strategy == NULL)
137 scan->rs_strategy = GetAccessStrategy(BAS_BULKREAD);
141 if (scan->rs_strategy != NULL)
142 FreeAccessStrategy(scan->rs_strategy);
143 scan->rs_strategy = NULL;
146 if (allow_sync && synchronize_seqscans)
148 scan->rs_syncscan = true;
149 scan->rs_startblock = ss_get_location(scan->rs_rd, scan->rs_nblocks);
153 scan->rs_syncscan = false;
154 scan->rs_startblock = 0;
157 scan->rs_inited = false;
158 scan->rs_ctup.t_data = NULL;
159 ItemPointerSetInvalid(&scan->rs_ctup.t_self);
160 scan->rs_cbuf = InvalidBuffer;
161 scan->rs_cblock = InvalidBlockNumber;
163 /* we don't have a marked position... */
164 ItemPointerSetInvalid(&(scan->rs_mctid));
166 /* page-at-a-time fields are always invalid when not rs_inited */
169 * copy the scan key, if appropriate
172 memcpy(scan->rs_key, key, scan->rs_nkeys * sizeof(ScanKeyData));
175 * Currently, we don't have a stats counter for bitmap heap scans (but the
176 * underlying bitmap index scans will be counted).
178 if (!scan->rs_bitmapscan)
179 pgstat_count_heap_scan(scan->rs_rd);
183 * heapgetpage - subroutine for heapgettup()
185 * This routine reads and pins the specified page of the relation.
186 * In page-at-a-time mode it performs additional work, namely determining
187 * which tuples on the page are visible.
190 heapgetpage(HeapScanDesc scan, BlockNumber page)
197 OffsetNumber lineoff;
201 Assert(page < scan->rs_nblocks);
203 /* release previous scan buffer, if any */
204 if (BufferIsValid(scan->rs_cbuf))
206 ReleaseBuffer(scan->rs_cbuf);
207 scan->rs_cbuf = InvalidBuffer;
210 /* read page using selected strategy */
211 scan->rs_cbuf = ReadBufferExtended(scan->rs_rd, MAIN_FORKNUM, page,
212 RBM_NORMAL, scan->rs_strategy);
213 scan->rs_cblock = page;
215 if (!scan->rs_pageatatime)
218 buffer = scan->rs_cbuf;
219 snapshot = scan->rs_snapshot;
222 * Prune and repair fragmentation for the whole page, if possible.
224 Assert(TransactionIdIsValid(RecentGlobalXmin));
225 heap_page_prune_opt(scan->rs_rd, buffer, RecentGlobalXmin);
228 * We must hold share lock on the buffer content while examining tuple
229 * visibility. Afterwards, however, the tuples we have found to be
230 * visible are guaranteed good as long as we hold the buffer pin.
232 LockBuffer(buffer, BUFFER_LOCK_SHARE);
234 dp = (Page) BufferGetPage(buffer);
235 lines = PageGetMaxOffsetNumber(dp);
239 * If the all-visible flag indicates that all tuples on the page are
240 * visible to everyone, we can skip the per-tuple visibility tests.
242 all_visible = PageIsAllVisible(dp);
244 for (lineoff = FirstOffsetNumber, lpp = PageGetItemId(dp, lineoff);
248 if (ItemIdIsNormal(lpp))
256 HeapTupleData loctup;
258 loctup.t_data = (HeapTupleHeader) PageGetItem((Page) dp, lpp);
259 loctup.t_len = ItemIdGetLength(lpp);
260 ItemPointerSet(&(loctup.t_self), page, lineoff);
262 valid = HeapTupleSatisfiesVisibility(&loctup, snapshot, buffer);
265 scan->rs_vistuples[ntup++] = lineoff;
269 LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
271 Assert(ntup <= MaxHeapTuplesPerPage);
272 scan->rs_ntuples = ntup;
276 * heapgettup - fetch next heap tuple
278 * Initialize the scan if not already done; then advance to the next
279 * tuple as indicated by "dir"; return the next tuple in scan->rs_ctup,
280 * or set scan->rs_ctup.t_data = NULL if no more tuples.
282 * dir == NoMovementScanDirection means "re-fetch the tuple indicated
285 * Note: the reason nkeys/key are passed separately, even though they are
286 * kept in the scan descriptor, is that the caller may not want us to check
289 * Note: when we fall off the end of the scan in either direction, we
290 * reset rs_inited. This means that a further request with the same
291 * scan direction will restart the scan, which is a bit odd, but a
292 * request with the opposite scan direction will start a fresh scan
293 * in the proper direction. The latter is required behavior for cursors,
294 * while the former case is generally undefined behavior in Postgres
295 * so we don't care too much.
299 heapgettup(HeapScanDesc scan,
304 HeapTuple tuple = &(scan->rs_ctup);
305 Snapshot snapshot = scan->rs_snapshot;
306 bool backward = ScanDirectionIsBackward(dir);
311 OffsetNumber lineoff;
316 * calculate next starting lineoff, given scan direction
318 if (ScanDirectionIsForward(dir))
320 if (!scan->rs_inited)
323 * return null immediately if relation is empty
325 if (scan->rs_nblocks == 0)
327 Assert(!BufferIsValid(scan->rs_cbuf));
328 tuple->t_data = NULL;
331 page = scan->rs_startblock; /* first page */
332 heapgetpage(scan, page);
333 lineoff = FirstOffsetNumber; /* first offnum */
334 scan->rs_inited = true;
338 /* continue from previously returned page/tuple */
339 page = scan->rs_cblock; /* current page */
340 lineoff = /* next offnum */
341 OffsetNumberNext(ItemPointerGetOffsetNumber(&(tuple->t_self)));
344 LockBuffer(scan->rs_cbuf, BUFFER_LOCK_SHARE);
346 dp = (Page) BufferGetPage(scan->rs_cbuf);
347 lines = PageGetMaxOffsetNumber(dp);
348 /* page and lineoff now reference the physically next tid */
350 linesleft = lines - lineoff + 1;
354 if (!scan->rs_inited)
357 * return null immediately if relation is empty
359 if (scan->rs_nblocks == 0)
361 Assert(!BufferIsValid(scan->rs_cbuf));
362 tuple->t_data = NULL;
367 * Disable reporting to syncscan logic in a backwards scan; it's
368 * not very likely anyone else is doing the same thing at the same
369 * time, and much more likely that we'll just bollix things for
372 scan->rs_syncscan = false;
373 /* start from last page of the scan */
374 if (scan->rs_startblock > 0)
375 page = scan->rs_startblock - 1;
377 page = scan->rs_nblocks - 1;
378 heapgetpage(scan, page);
382 /* continue from previously returned page/tuple */
383 page = scan->rs_cblock; /* current page */
386 LockBuffer(scan->rs_cbuf, BUFFER_LOCK_SHARE);
388 dp = (Page) BufferGetPage(scan->rs_cbuf);
389 lines = PageGetMaxOffsetNumber(dp);
391 if (!scan->rs_inited)
393 lineoff = lines; /* final offnum */
394 scan->rs_inited = true;
398 lineoff = /* previous offnum */
399 OffsetNumberPrev(ItemPointerGetOffsetNumber(&(tuple->t_self)));
401 /* page and lineoff now reference the physically previous tid */
408 * ``no movement'' scan direction: refetch prior tuple
410 if (!scan->rs_inited)
412 Assert(!BufferIsValid(scan->rs_cbuf));
413 tuple->t_data = NULL;
417 page = ItemPointerGetBlockNumber(&(tuple->t_self));
418 if (page != scan->rs_cblock)
419 heapgetpage(scan, page);
421 /* Since the tuple was previously fetched, needn't lock page here */
422 dp = (Page) BufferGetPage(scan->rs_cbuf);
423 lineoff = ItemPointerGetOffsetNumber(&(tuple->t_self));
424 lpp = PageGetItemId(dp, lineoff);
425 Assert(ItemIdIsNormal(lpp));
427 tuple->t_data = (HeapTupleHeader) PageGetItem((Page) dp, lpp);
428 tuple->t_len = ItemIdGetLength(lpp);
434 * advance the scan until we find a qualifying tuple or run out of stuff
437 lpp = PageGetItemId(dp, lineoff);
440 while (linesleft > 0)
442 if (ItemIdIsNormal(lpp))
446 tuple->t_data = (HeapTupleHeader) PageGetItem((Page) dp, lpp);
447 tuple->t_len = ItemIdGetLength(lpp);
448 ItemPointerSet(&(tuple->t_self), page, lineoff);
451 * if current tuple qualifies, return it.
453 valid = HeapTupleSatisfiesVisibility(tuple,
457 if (valid && key != NULL)
458 HeapKeyTest(tuple, RelationGetDescr(scan->rs_rd),
463 LockBuffer(scan->rs_cbuf, BUFFER_LOCK_UNLOCK);
469 * otherwise move to the next item on the page
474 --lpp; /* move back in this page's ItemId array */
479 ++lpp; /* move forward in this page's ItemId array */
485 * if we get here, it means we've exhausted the items on this page and
486 * it's time to move to the next.
488 LockBuffer(scan->rs_cbuf, BUFFER_LOCK_UNLOCK);
491 * advance to next/prior page and detect end of scan
495 finished = (page == scan->rs_startblock);
497 page = scan->rs_nblocks;
503 if (page >= scan->rs_nblocks)
505 finished = (page == scan->rs_startblock);
508 * Report our new scan position for synchronization purposes. We
509 * don't do that when moving backwards, however. That would just
510 * mess up any other forward-moving scanners.
512 * Note: we do this before checking for end of scan so that the
513 * final state of the position hint is back at the start of the
514 * rel. That's not strictly necessary, but otherwise when you run
515 * the same query multiple times the starting position would shift
516 * a little bit backwards on every invocation, which is confusing.
517 * We don't guarantee any specific ordering in general, though.
519 if (scan->rs_syncscan)
520 ss_report_location(scan->rs_rd, page);
524 * return NULL if we've exhausted all the pages
528 if (BufferIsValid(scan->rs_cbuf))
529 ReleaseBuffer(scan->rs_cbuf);
530 scan->rs_cbuf = InvalidBuffer;
531 scan->rs_cblock = InvalidBlockNumber;
532 tuple->t_data = NULL;
533 scan->rs_inited = false;
537 heapgetpage(scan, page);
539 LockBuffer(scan->rs_cbuf, BUFFER_LOCK_SHARE);
541 dp = (Page) BufferGetPage(scan->rs_cbuf);
542 lines = PageGetMaxOffsetNumber((Page) dp);
547 lpp = PageGetItemId(dp, lines);
551 lineoff = FirstOffsetNumber;
552 lpp = PageGetItemId(dp, FirstOffsetNumber);
558 * heapgettup_pagemode - fetch next heap tuple in page-at-a-time mode
560 * Same API as heapgettup, but used in page-at-a-time mode
562 * The internal logic is much the same as heapgettup's too, but there are some
563 * differences: we do not take the buffer content lock (that only needs to
564 * happen inside heapgetpage), and we iterate through just the tuples listed
565 * in rs_vistuples[] rather than all tuples on the page. Notice that
566 * lineindex is 0-based, where the corresponding loop variable lineoff in
567 * heapgettup is 1-based.
571 heapgettup_pagemode(HeapScanDesc scan,
576 HeapTuple tuple = &(scan->rs_ctup);
577 bool backward = ScanDirectionIsBackward(dir);
583 OffsetNumber lineoff;
588 * calculate next starting lineindex, given scan direction
590 if (ScanDirectionIsForward(dir))
592 if (!scan->rs_inited)
595 * return null immediately if relation is empty
597 if (scan->rs_nblocks == 0)
599 Assert(!BufferIsValid(scan->rs_cbuf));
600 tuple->t_data = NULL;
603 page = scan->rs_startblock; /* first page */
604 heapgetpage(scan, page);
606 scan->rs_inited = true;
610 /* continue from previously returned page/tuple */
611 page = scan->rs_cblock; /* current page */
612 lineindex = scan->rs_cindex + 1;
615 dp = (Page) BufferGetPage(scan->rs_cbuf);
616 lines = scan->rs_ntuples;
617 /* page and lineindex now reference the next visible tid */
619 linesleft = lines - lineindex;
623 if (!scan->rs_inited)
626 * return null immediately if relation is empty
628 if (scan->rs_nblocks == 0)
630 Assert(!BufferIsValid(scan->rs_cbuf));
631 tuple->t_data = NULL;
636 * Disable reporting to syncscan logic in a backwards scan; it's
637 * not very likely anyone else is doing the same thing at the same
638 * time, and much more likely that we'll just bollix things for
641 scan->rs_syncscan = false;
642 /* start from last page of the scan */
643 if (scan->rs_startblock > 0)
644 page = scan->rs_startblock - 1;
646 page = scan->rs_nblocks - 1;
647 heapgetpage(scan, page);
651 /* continue from previously returned page/tuple */
652 page = scan->rs_cblock; /* current page */
655 dp = (Page) BufferGetPage(scan->rs_cbuf);
656 lines = scan->rs_ntuples;
658 if (!scan->rs_inited)
660 lineindex = lines - 1;
661 scan->rs_inited = true;
665 lineindex = scan->rs_cindex - 1;
667 /* page and lineindex now reference the previous visible tid */
669 linesleft = lineindex + 1;
674 * ``no movement'' scan direction: refetch prior tuple
676 if (!scan->rs_inited)
678 Assert(!BufferIsValid(scan->rs_cbuf));
679 tuple->t_data = NULL;
683 page = ItemPointerGetBlockNumber(&(tuple->t_self));
684 if (page != scan->rs_cblock)
685 heapgetpage(scan, page);
687 /* Since the tuple was previously fetched, needn't lock page here */
688 dp = (Page) BufferGetPage(scan->rs_cbuf);
689 lineoff = ItemPointerGetOffsetNumber(&(tuple->t_self));
690 lpp = PageGetItemId(dp, lineoff);
691 Assert(ItemIdIsNormal(lpp));
693 tuple->t_data = (HeapTupleHeader) PageGetItem((Page) dp, lpp);
694 tuple->t_len = ItemIdGetLength(lpp);
696 /* check that rs_cindex is in sync */
697 Assert(scan->rs_cindex < scan->rs_ntuples);
698 Assert(lineoff == scan->rs_vistuples[scan->rs_cindex]);
704 * advance the scan until we find a qualifying tuple or run out of stuff
709 while (linesleft > 0)
711 lineoff = scan->rs_vistuples[lineindex];
712 lpp = PageGetItemId(dp, lineoff);
713 Assert(ItemIdIsNormal(lpp));
715 tuple->t_data = (HeapTupleHeader) PageGetItem((Page) dp, lpp);
716 tuple->t_len = ItemIdGetLength(lpp);
717 ItemPointerSet(&(tuple->t_self), page, lineoff);
720 * if current tuple qualifies, return it.
726 HeapKeyTest(tuple, RelationGetDescr(scan->rs_rd),
730 scan->rs_cindex = lineindex;
736 scan->rs_cindex = lineindex;
741 * otherwise move to the next item on the page
751 * if we get here, it means we've exhausted the items on this page and
752 * it's time to move to the next.
756 finished = (page == scan->rs_startblock);
758 page = scan->rs_nblocks;
764 if (page >= scan->rs_nblocks)
766 finished = (page == scan->rs_startblock);
769 * Report our new scan position for synchronization purposes. We
770 * don't do that when moving backwards, however. That would just
771 * mess up any other forward-moving scanners.
773 * Note: we do this before checking for end of scan so that the
774 * final state of the position hint is back at the start of the
775 * rel. That's not strictly necessary, but otherwise when you run
776 * the same query multiple times the starting position would shift
777 * a little bit backwards on every invocation, which is confusing.
778 * We don't guarantee any specific ordering in general, though.
780 if (scan->rs_syncscan)
781 ss_report_location(scan->rs_rd, page);
785 * return NULL if we've exhausted all the pages
789 if (BufferIsValid(scan->rs_cbuf))
790 ReleaseBuffer(scan->rs_cbuf);
791 scan->rs_cbuf = InvalidBuffer;
792 scan->rs_cblock = InvalidBlockNumber;
793 tuple->t_data = NULL;
794 scan->rs_inited = false;
798 heapgetpage(scan, page);
800 dp = (Page) BufferGetPage(scan->rs_cbuf);
801 lines = scan->rs_ntuples;
804 lineindex = lines - 1;
811 #if defined(DISABLE_COMPLEX_MACRO)
813 * This is formatted so oddly so that the correspondence to the macro
814 * definition in access/heapam.h is maintained.
817 fastgetattr(HeapTuple tup, int attnum, TupleDesc tupleDesc,
823 ((isnull) ? (*(isnull) = false) : (dummyret) NULL),
824 HeapTupleNoNulls(tup) ?
826 (tupleDesc)->attrs[(attnum) - 1]->attcacheoff >= 0 ?
828 fetchatt((tupleDesc)->attrs[(attnum) - 1],
829 (char *) (tup)->t_data + (tup)->t_data->t_hoff +
830 (tupleDesc)->attrs[(attnum) - 1]->attcacheoff)
833 nocachegetattr((tup), (attnum), (tupleDesc), (isnull))
837 att_isnull((attnum) - 1, (tup)->t_data->t_bits) ?
839 ((isnull) ? (*(isnull) = true) : (dummyret) NULL),
844 nocachegetattr((tup), (attnum), (tupleDesc), (isnull))
854 #endif /* defined(DISABLE_COMPLEX_MACRO) */
857 /* ----------------------------------------------------------------
858 * heap access method interface
859 * ----------------------------------------------------------------
863 * relation_open - open any relation by relation OID
865 * If lockmode is not "NoLock", the specified kind of lock is
866 * obtained on the relation. (Generally, NoLock should only be
867 * used if the caller knows it has some appropriate lock on the
870 * An error is raised if the relation does not exist.
872 * NB: a "relation" is anything with a pg_class entry. The caller is
873 * expected to check whether the relkind is something it can handle.
877 relation_open(Oid relationId, LOCKMODE lockmode)
881 Assert(lockmode >= NoLock && lockmode < MAX_LOCKMODES);
883 /* Get the lock before trying to open the relcache entry */
884 if (lockmode != NoLock)
885 LockRelationOid(relationId, lockmode);
887 /* The relcache does all the real work... */
888 r = RelationIdGetRelation(relationId);
890 if (!RelationIsValid(r))
891 elog(ERROR, "could not open relation with OID %u", relationId);
893 /* Make note that we've accessed a temporary relation */
895 MyXactAccessedTempRel = true;
903 * try_relation_open - open any relation by relation OID
905 * Same as relation_open, except return NULL instead of failing
906 * if the relation does not exist.
910 try_relation_open(Oid relationId, LOCKMODE lockmode)
914 Assert(lockmode >= NoLock && lockmode < MAX_LOCKMODES);
916 /* Get the lock first */
917 if (lockmode != NoLock)
918 LockRelationOid(relationId, lockmode);
921 * Now that we have the lock, probe to see if the relation really exists
924 if (!SearchSysCacheExists(RELOID,
925 ObjectIdGetDatum(relationId),
928 /* Release useless lock */
929 if (lockmode != NoLock)
930 UnlockRelationOid(relationId, lockmode);
935 /* Should be safe to do a relcache load */
936 r = RelationIdGetRelation(relationId);
938 if (!RelationIsValid(r))
939 elog(ERROR, "could not open relation with OID %u", relationId);
941 /* Make note that we've accessed a temporary relation */
943 MyXactAccessedTempRel = true;
951 * relation_open_nowait - open but don't wait for lock
953 * Same as relation_open, except throw an error instead of waiting
954 * when the requested lock is not immediately obtainable.
958 relation_open_nowait(Oid relationId, LOCKMODE lockmode)
962 Assert(lockmode >= NoLock && lockmode < MAX_LOCKMODES);
964 /* Get the lock before trying to open the relcache entry */
965 if (lockmode != NoLock)
967 if (!ConditionalLockRelationOid(relationId, lockmode))
969 /* try to throw error by name; relation could be deleted... */
970 char *relname = get_rel_name(relationId);
974 (errcode(ERRCODE_LOCK_NOT_AVAILABLE),
975 errmsg("could not obtain lock on relation \"%s\"",
979 (errcode(ERRCODE_LOCK_NOT_AVAILABLE),
980 errmsg("could not obtain lock on relation with OID %u",
985 /* The relcache does all the real work... */
986 r = RelationIdGetRelation(relationId);
988 if (!RelationIsValid(r))
989 elog(ERROR, "could not open relation with OID %u", relationId);
991 /* Make note that we've accessed a temporary relation */
993 MyXactAccessedTempRel = true;
1001 * relation_openrv - open any relation specified by a RangeVar
1003 * Same as relation_open, but the relation is specified by a RangeVar.
1007 relation_openrv(const RangeVar *relation, LOCKMODE lockmode)
1012 * Check for shared-cache-inval messages before trying to open the
1013 * relation. This is needed to cover the case where the name identifies a
1014 * rel that has been dropped and recreated since the start of our
1015 * transaction: if we don't flush the old syscache entry then we'll latch
1016 * onto that entry and suffer an error when we do RelationIdGetRelation.
1017 * Note that relation_open does not need to do this, since a relation's
1018 * OID never changes.
1020 * We skip this if asked for NoLock, on the assumption that the caller has
1021 * already ensured some appropriate lock is held.
1023 if (lockmode != NoLock)
1024 AcceptInvalidationMessages();
1026 /* Look up the appropriate relation using namespace search */
1027 relOid = RangeVarGetRelid(relation, false);
1029 /* Let relation_open do the rest */
1030 return relation_open(relOid, lockmode);
1034 * try_relation_openrv - open any relation specified by a RangeVar
1036 * Same as relation_openrv, but return NULL instead of failing for
1037 * relation-not-found. (Note that some other causes, such as
1038 * permissions problems, will still result in an ereport.)
1042 try_relation_openrv(const RangeVar *relation, LOCKMODE lockmode)
1047 * Check for shared-cache-inval messages before trying to open the
1048 * relation. This is needed to cover the case where the name identifies a
1049 * rel that has been dropped and recreated since the start of our
1050 * transaction: if we don't flush the old syscache entry then we'll latch
1051 * onto that entry and suffer an error when we do RelationIdGetRelation.
1052 * Note that relation_open does not need to do this, since a relation's
1053 * OID never changes.
1055 * We skip this if asked for NoLock, on the assumption that the caller has
1056 * already ensured some appropriate lock is held.
1058 if (lockmode != NoLock)
1059 AcceptInvalidationMessages();
1061 /* Look up the appropriate relation using namespace search */
1062 relOid = RangeVarGetRelid(relation, true);
1064 /* Return NULL on not-found */
1065 if (!OidIsValid(relOid))
1068 /* Let relation_open do the rest */
1069 return relation_open(relOid, lockmode);
1073 * relation_close - close any relation
1075 * If lockmode is not "NoLock", we then release the specified lock.
1077 * Note that it is often sensible to hold a lock beyond relation_close;
1078 * in that case, the lock is released automatically at xact end.
1082 relation_close(Relation relation, LOCKMODE lockmode)
1084 LockRelId relid = relation->rd_lockInfo.lockRelId;
1086 Assert(lockmode >= NoLock && lockmode < MAX_LOCKMODES);
1088 /* The relcache does the real work... */
1089 RelationClose(relation);
1091 if (lockmode != NoLock)
1092 UnlockRelationId(&relid, lockmode);
1097 * heap_open - open a heap relation by relation OID
1099 * This is essentially relation_open plus check that the relation
1100 * is not an index nor a composite type. (The caller should also
1101 * check that it's not a view before assuming it has storage.)
1105 heap_open(Oid relationId, LOCKMODE lockmode)
1109 r = relation_open(relationId, lockmode);
1111 if (r->rd_rel->relkind == RELKIND_INDEX)
1113 (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1114 errmsg("\"%s\" is an index",
1115 RelationGetRelationName(r))));
1116 else if (r->rd_rel->relkind == RELKIND_COMPOSITE_TYPE)
1118 (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1119 errmsg("\"%s\" is a composite type",
1120 RelationGetRelationName(r))));
1126 * heap_openrv - open a heap relation specified
1127 * by a RangeVar node
1129 * As above, but relation is specified by a RangeVar.
1133 heap_openrv(const RangeVar *relation, LOCKMODE lockmode)
1137 r = relation_openrv(relation, lockmode);
1139 if (r->rd_rel->relkind == RELKIND_INDEX)
1141 (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1142 errmsg("\"%s\" is an index",
1143 RelationGetRelationName(r))));
1144 else if (r->rd_rel->relkind == RELKIND_COMPOSITE_TYPE)
1146 (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1147 errmsg("\"%s\" is a composite type",
1148 RelationGetRelationName(r))));
1154 * try_heap_openrv - open a heap relation specified
1155 * by a RangeVar node
1157 * As above, but return NULL instead of failing for relation-not-found.
1161 try_heap_openrv(const RangeVar *relation, LOCKMODE lockmode)
1165 r = try_relation_openrv(relation, lockmode);
1169 if (r->rd_rel->relkind == RELKIND_INDEX)
1171 (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1172 errmsg("\"%s\" is an index",
1173 RelationGetRelationName(r))));
1174 else if (r->rd_rel->relkind == RELKIND_COMPOSITE_TYPE)
1176 (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1177 errmsg("\"%s\" is a composite type",
1178 RelationGetRelationName(r))));
1186 * heap_beginscan - begin relation scan
1188 * heap_beginscan_strat offers an extended API that lets the caller control
1189 * whether a nondefault buffer access strategy can be used, and whether
1190 * syncscan can be chosen (possibly resulting in the scan not starting from
1191 * block zero). Both of these default to TRUE with plain heap_beginscan.
1193 * heap_beginscan_bm is an alternative entry point for setting up a
1194 * HeapScanDesc for a bitmap heap scan. Although that scan technology is
1195 * really quite unlike a standard seqscan, there is just enough commonality
1196 * to make it worth using the same data structure.
1200 heap_beginscan(Relation relation, Snapshot snapshot,
1201 int nkeys, ScanKey key)
1203 return heap_beginscan_internal(relation, snapshot, nkeys, key,
1208 heap_beginscan_strat(Relation relation, Snapshot snapshot,
1209 int nkeys, ScanKey key,
1210 bool allow_strat, bool allow_sync)
1212 return heap_beginscan_internal(relation, snapshot, nkeys, key,
1213 allow_strat, allow_sync, false);
1217 heap_beginscan_bm(Relation relation, Snapshot snapshot,
1218 int nkeys, ScanKey key)
1220 return heap_beginscan_internal(relation, snapshot, nkeys, key,
1221 false, false, true);
1225 heap_beginscan_internal(Relation relation, Snapshot snapshot,
1226 int nkeys, ScanKey key,
1227 bool allow_strat, bool allow_sync,
1233 * increment relation ref count while scanning relation
1235 * This is just to make really sure the relcache entry won't go away while
1236 * the scan has a pointer to it. Caller should be holding the rel open
1237 * anyway, so this is redundant in all normal scenarios...
1239 RelationIncrementReferenceCount(relation);
1242 * allocate and initialize scan descriptor
1244 scan = (HeapScanDesc) palloc(sizeof(HeapScanDescData));
1246 scan->rs_rd = relation;
1247 scan->rs_snapshot = snapshot;
1248 scan->rs_nkeys = nkeys;
1249 scan->rs_bitmapscan = is_bitmapscan;
1250 scan->rs_strategy = NULL; /* set in initscan */
1251 scan->rs_allow_strat = allow_strat;
1252 scan->rs_allow_sync = allow_sync;
1255 * we can use page-at-a-time mode if it's an MVCC-safe snapshot
1257 scan->rs_pageatatime = IsMVCCSnapshot(snapshot);
1259 /* we only need to set this up once */
1260 scan->rs_ctup.t_tableOid = RelationGetRelid(relation);
1263 * we do this here instead of in initscan() because heap_rescan also calls
1264 * initscan() and we don't want to allocate memory again
1267 scan->rs_key = (ScanKey) palloc(sizeof(ScanKeyData) * nkeys);
1269 scan->rs_key = NULL;
1271 initscan(scan, key);
1277 * heap_rescan - restart a relation scan
1281 heap_rescan(HeapScanDesc scan,
1285 * unpin scan buffers
1287 if (BufferIsValid(scan->rs_cbuf))
1288 ReleaseBuffer(scan->rs_cbuf);
1291 * reinitialize scan descriptor
1293 initscan(scan, key);
1297 * heap_endscan - end relation scan
1299 * See how to integrate with index scans.
1300 * Check handling if reldesc caching.
1304 heap_endscan(HeapScanDesc scan)
1306 /* Note: no locking manipulations needed */
1309 * unpin scan buffers
1311 if (BufferIsValid(scan->rs_cbuf))
1312 ReleaseBuffer(scan->rs_cbuf);
1315 * decrement relation reference count and free scan descriptor storage
1317 RelationDecrementReferenceCount(scan->rs_rd);
1320 pfree(scan->rs_key);
1322 if (scan->rs_strategy != NULL)
1323 FreeAccessStrategy(scan->rs_strategy);
1329 * heap_getnext - retrieve next tuple in scan
1331 * Fix to work with index relations.
1332 * We don't return the buffer anymore, but you can get it from the
1333 * returned HeapTuple.
1338 #define HEAPDEBUG_1 \
1339 elog(DEBUG2, "heap_getnext([%s,nkeys=%d],dir=%d) called", \
1340 RelationGetRelationName(scan->rs_rd), scan->rs_nkeys, (int) direction)
1341 #define HEAPDEBUG_2 \
1342 elog(DEBUG2, "heap_getnext returning EOS")
1343 #define HEAPDEBUG_3 \
1344 elog(DEBUG2, "heap_getnext returning tuple")
1349 #endif /* !defined(HEAPDEBUGALL) */
1353 heap_getnext(HeapScanDesc scan, ScanDirection direction)
1355 /* Note: no locking manipulations needed */
1357 HEAPDEBUG_1; /* heap_getnext( info ) */
1359 if (scan->rs_pageatatime)
1360 heapgettup_pagemode(scan, direction,
1361 scan->rs_nkeys, scan->rs_key);
1363 heapgettup(scan, direction, scan->rs_nkeys, scan->rs_key);
1365 if (scan->rs_ctup.t_data == NULL)
1367 HEAPDEBUG_2; /* heap_getnext returning EOS */
1372 * if we get here it means we have a new current scan tuple, so point to
1373 * the proper return buffer and return the tuple.
1375 HEAPDEBUG_3; /* heap_getnext returning tuple */
1377 pgstat_count_heap_getnext(scan->rs_rd);
1379 return &(scan->rs_ctup);
1383 * heap_fetch - retrieve tuple with given tid
1385 * On entry, tuple->t_self is the TID to fetch. We pin the buffer holding
1386 * the tuple, fill in the remaining fields of *tuple, and check the tuple
1387 * against the specified snapshot.
1389 * If successful (tuple found and passes snapshot time qual), then *userbuf
1390 * is set to the buffer holding the tuple and TRUE is returned. The caller
1391 * must unpin the buffer when done with the tuple.
1393 * If the tuple is not found (ie, item number references a deleted slot),
1394 * then tuple->t_data is set to NULL and FALSE is returned.
1396 * If the tuple is found but fails the time qual check, then FALSE is returned
1397 * but tuple->t_data is left pointing to the tuple.
1399 * keep_buf determines what is done with the buffer in the FALSE-result cases.
1400 * When the caller specifies keep_buf = true, we retain the pin on the buffer
1401 * and return it in *userbuf (so the caller must eventually unpin it); when
1402 * keep_buf = false, the pin is released and *userbuf is set to InvalidBuffer.
1404 * stats_relation is the relation to charge the heap_fetch operation against
1405 * for statistical purposes. (This could be the heap rel itself, an
1406 * associated index, or NULL to not count the fetch at all.)
1408 * heap_fetch does not follow HOT chains: only the exact TID requested will
1411 * It is somewhat inconsistent that we ereport() on invalid block number but
1412 * return false on invalid item number. There are a couple of reasons though.
1413 * One is that the caller can relatively easily check the block number for
1414 * validity, but cannot check the item number without reading the page
1415 * himself. Another is that when we are following a t_ctid link, we can be
1416 * reasonably confident that the page number is valid (since VACUUM shouldn't
1417 * truncate off the destination page without having killed the referencing
1418 * tuple first), but the item number might well not be good.
1421 heap_fetch(Relation relation,
1426 Relation stats_relation)
1428 ItemPointer tid = &(tuple->t_self);
1432 OffsetNumber offnum;
1436 * Fetch and pin the appropriate page of the relation.
1438 buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(tid));
1441 * Need share lock on buffer to examine tuple commit status.
1443 LockBuffer(buffer, BUFFER_LOCK_SHARE);
1444 page = BufferGetPage(buffer);
1447 * We'd better check for out-of-range offnum in case of VACUUM since the
1450 offnum = ItemPointerGetOffsetNumber(tid);
1451 if (offnum < FirstOffsetNumber || offnum > PageGetMaxOffsetNumber(page))
1453 LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
1458 ReleaseBuffer(buffer);
1459 *userbuf = InvalidBuffer;
1461 tuple->t_data = NULL;
1466 * get the item line pointer corresponding to the requested tid
1468 lp = PageGetItemId(page, offnum);
1471 * Must check for deleted tuple.
1473 if (!ItemIdIsNormal(lp))
1475 LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
1480 ReleaseBuffer(buffer);
1481 *userbuf = InvalidBuffer;
1483 tuple->t_data = NULL;
1488 * fill in *tuple fields
1490 tuple->t_data = (HeapTupleHeader) PageGetItem(page, lp);
1491 tuple->t_len = ItemIdGetLength(lp);
1492 tuple->t_tableOid = RelationGetRelid(relation);
1495 * check time qualification of tuple, then release lock
1497 valid = HeapTupleSatisfiesVisibility(tuple, snapshot, buffer);
1499 LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
1504 * All checks passed, so return the tuple as valid. Caller is now
1505 * responsible for releasing the buffer.
1509 /* Count the successful fetch against appropriate rel, if any */
1510 if (stats_relation != NULL)
1511 pgstat_count_heap_fetch(stats_relation);
1516 /* Tuple failed time qual, but maybe caller wants to see it anyway. */
1521 ReleaseBuffer(buffer);
1522 *userbuf = InvalidBuffer;
1529 * heap_hot_search_buffer - search HOT chain for tuple satisfying snapshot
1531 * On entry, *tid is the TID of a tuple (either a simple tuple, or the root
1532 * of a HOT chain), and buffer is the buffer holding this tuple. We search
1533 * for the first chain member satisfying the given snapshot. If one is
1534 * found, we update *tid to reference that tuple's offset number, and
1535 * return TRUE. If no match, return FALSE without modifying *tid.
1537 * If all_dead is not NULL, we check non-visible tuples to see if they are
1538 * globally dead; *all_dead is set TRUE if all members of the HOT chain
1539 * are vacuumable, FALSE if not.
1541 * Unlike heap_fetch, the caller must already have pin and (at least) share
1542 * lock on the buffer; it is still pinned/locked at exit. Also unlike
1543 * heap_fetch, we do not report any pgstats count; caller may do so if wanted.
1546 heap_hot_search_buffer(ItemPointer tid, Buffer buffer, Snapshot snapshot,
1549 Page dp = (Page) BufferGetPage(buffer);
1550 TransactionId prev_xmax = InvalidTransactionId;
1551 OffsetNumber offnum;
1552 bool at_chain_start;
1557 Assert(TransactionIdIsValid(RecentGlobalXmin));
1559 Assert(ItemPointerGetBlockNumber(tid) == BufferGetBlockNumber(buffer));
1560 offnum = ItemPointerGetOffsetNumber(tid);
1561 at_chain_start = true;
1563 /* Scan through possible multiple members of HOT-chain */
1567 HeapTupleData heapTuple;
1569 /* check for bogus TID */
1570 if (offnum < FirstOffsetNumber || offnum > PageGetMaxOffsetNumber(dp))
1573 lp = PageGetItemId(dp, offnum);
1575 /* check for unused, dead, or redirected items */
1576 if (!ItemIdIsNormal(lp))
1578 /* We should only see a redirect at start of chain */
1579 if (ItemIdIsRedirected(lp) && at_chain_start)
1581 /* Follow the redirect */
1582 offnum = ItemIdGetRedirect(lp);
1583 at_chain_start = false;
1586 /* else must be end of chain */
1590 heapTuple.t_data = (HeapTupleHeader) PageGetItem(dp, lp);
1591 heapTuple.t_len = ItemIdGetLength(lp);
1594 * Shouldn't see a HEAP_ONLY tuple at chain start.
1596 if (at_chain_start && HeapTupleIsHeapOnly(&heapTuple))
1600 * The xmin should match the previous xmax value, else chain is
1603 if (TransactionIdIsValid(prev_xmax) &&
1604 !TransactionIdEquals(prev_xmax,
1605 HeapTupleHeaderGetXmin(heapTuple.t_data)))
1608 /* If it's visible per the snapshot, we must return it */
1609 if (HeapTupleSatisfiesVisibility(&heapTuple, snapshot, buffer))
1611 ItemPointerSetOffsetNumber(tid, offnum);
1618 * If we can't see it, maybe no one else can either. At caller
1619 * request, check whether all chain members are dead to all
1622 if (all_dead && *all_dead &&
1623 HeapTupleSatisfiesVacuum(heapTuple.t_data, RecentGlobalXmin,
1624 buffer) != HEAPTUPLE_DEAD)
1628 * Check to see if HOT chain continues past this tuple; if so fetch
1629 * the next offnum and loop around.
1631 if (HeapTupleIsHotUpdated(&heapTuple))
1633 Assert(ItemPointerGetBlockNumber(&heapTuple.t_data->t_ctid) ==
1634 ItemPointerGetBlockNumber(tid));
1635 offnum = ItemPointerGetOffsetNumber(&heapTuple.t_data->t_ctid);
1636 at_chain_start = false;
1637 prev_xmax = HeapTupleHeaderGetXmax(heapTuple.t_data);
1640 break; /* end of chain */
1647 * heap_hot_search - search HOT chain for tuple satisfying snapshot
1649 * This has the same API as heap_hot_search_buffer, except that the caller
1650 * does not provide the buffer containing the page, rather we access it
1654 heap_hot_search(ItemPointer tid, Relation relation, Snapshot snapshot,
1660 buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(tid));
1661 LockBuffer(buffer, BUFFER_LOCK_SHARE);
1662 result = heap_hot_search_buffer(tid, buffer, snapshot, all_dead);
1663 LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
1664 ReleaseBuffer(buffer);
1669 * heap_get_latest_tid - get the latest tid of a specified tuple
1671 * Actually, this gets the latest version that is visible according to
1672 * the passed snapshot. You can pass SnapshotDirty to get the very latest,
1673 * possibly uncommitted version.
1675 * *tid is both an input and an output parameter: it is updated to
1676 * show the latest version of the row. Note that it will not be changed
1677 * if no version of the row passes the snapshot test.
1680 heap_get_latest_tid(Relation relation,
1685 ItemPointerData ctid;
1686 TransactionId priorXmax;
1688 /* this is to avoid Assert failures on bad input */
1689 if (!ItemPointerIsValid(tid))
1693 * Since this can be called with user-supplied TID, don't trust the input
1694 * too much. (RelationGetNumberOfBlocks is an expensive check, so we
1695 * don't check t_ctid links again this way. Note that it would not do to
1696 * call it just once and save the result, either.)
1698 blk = ItemPointerGetBlockNumber(tid);
1699 if (blk >= RelationGetNumberOfBlocks(relation))
1700 elog(ERROR, "block number %u is out of range for relation \"%s\"",
1701 blk, RelationGetRelationName(relation));
1704 * Loop to chase down t_ctid links. At top of loop, ctid is the tuple we
1705 * need to examine, and *tid is the TID we will return if ctid turns out
1708 * Note that we will loop until we reach the end of the t_ctid chain.
1709 * Depending on the snapshot passed, there might be at most one visible
1710 * version of the row, but we don't try to optimize for that.
1713 priorXmax = InvalidTransactionId; /* cannot check first XMIN */
1718 OffsetNumber offnum;
1724 * Read, pin, and lock the page.
1726 buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(&ctid));
1727 LockBuffer(buffer, BUFFER_LOCK_SHARE);
1728 page = BufferGetPage(buffer);
1731 * Check for bogus item number. This is not treated as an error
1732 * condition because it can happen while following a t_ctid link. We
1733 * just assume that the prior tid is OK and return it unchanged.
1735 offnum = ItemPointerGetOffsetNumber(&ctid);
1736 if (offnum < FirstOffsetNumber || offnum > PageGetMaxOffsetNumber(page))
1738 UnlockReleaseBuffer(buffer);
1741 lp = PageGetItemId(page, offnum);
1742 if (!ItemIdIsNormal(lp))
1744 UnlockReleaseBuffer(buffer);
1748 /* OK to access the tuple */
1750 tp.t_data = (HeapTupleHeader) PageGetItem(page, lp);
1751 tp.t_len = ItemIdGetLength(lp);
1754 * After following a t_ctid link, we might arrive at an unrelated
1755 * tuple. Check for XMIN match.
1757 if (TransactionIdIsValid(priorXmax) &&
1758 !TransactionIdEquals(priorXmax, HeapTupleHeaderGetXmin(tp.t_data)))
1760 UnlockReleaseBuffer(buffer);
1765 * Check time qualification of tuple; if visible, set it as the new
1768 valid = HeapTupleSatisfiesVisibility(&tp, snapshot, buffer);
1773 * If there's a valid t_ctid link, follow it, else we're done.
1775 if ((tp.t_data->t_infomask & (HEAP_XMAX_INVALID | HEAP_IS_LOCKED)) ||
1776 ItemPointerEquals(&tp.t_self, &tp.t_data->t_ctid))
1778 UnlockReleaseBuffer(buffer);
1782 ctid = tp.t_data->t_ctid;
1783 priorXmax = HeapTupleHeaderGetXmax(tp.t_data);
1784 UnlockReleaseBuffer(buffer);
1790 * UpdateXmaxHintBits - update tuple hint bits after xmax transaction ends
1792 * This is called after we have waited for the XMAX transaction to terminate.
1793 * If the transaction aborted, we guarantee the XMAX_INVALID hint bit will
1794 * be set on exit. If the transaction committed, we set the XMAX_COMMITTED
1795 * hint bit if possible --- but beware that that may not yet be possible,
1796 * if the transaction committed asynchronously. Hence callers should look
1797 * only at XMAX_INVALID.
1800 UpdateXmaxHintBits(HeapTupleHeader tuple, Buffer buffer, TransactionId xid)
1802 Assert(TransactionIdEquals(HeapTupleHeaderGetXmax(tuple), xid));
1804 if (!(tuple->t_infomask & (HEAP_XMAX_COMMITTED | HEAP_XMAX_INVALID)))
1806 if (TransactionIdDidCommit(xid))
1807 HeapTupleSetHintBits(tuple, buffer, HEAP_XMAX_COMMITTED,
1810 HeapTupleSetHintBits(tuple, buffer, HEAP_XMAX_INVALID,
1811 InvalidTransactionId);
1817 * GetBulkInsertState - prepare status object for a bulk insert
1820 GetBulkInsertState(void)
1822 BulkInsertState bistate;
1824 bistate = (BulkInsertState) palloc(sizeof(BulkInsertStateData));
1825 bistate->strategy = GetAccessStrategy(BAS_BULKWRITE);
1826 bistate->current_buf = InvalidBuffer;
1831 * FreeBulkInsertState - clean up after finishing a bulk insert
1834 FreeBulkInsertState(BulkInsertState bistate)
1836 if (bistate->current_buf != InvalidBuffer)
1837 ReleaseBuffer(bistate->current_buf);
1838 FreeAccessStrategy(bistate->strategy);
1844 * heap_insert - insert tuple into a heap
1846 * The new tuple is stamped with current transaction ID and the specified
1849 * If the HEAP_INSERT_SKIP_WAL option is specified, the new tuple is not
1850 * logged in WAL, even for a non-temp relation. Safe usage of this behavior
1851 * requires that we arrange that all new tuples go into new pages not
1852 * containing any tuples from other transactions, and that the relation gets
1853 * fsync'd before commit. (See also heap_sync() comments)
1855 * The HEAP_INSERT_SKIP_FSM option is passed directly to
1856 * RelationGetBufferForTuple, which see for more info.
1858 * Note that these options will be applied when inserting into the heap's
1859 * TOAST table, too, if the tuple requires any out-of-line data.
1861 * The BulkInsertState object (if any; bistate can be NULL for default
1862 * behavior) is also just passed through to RelationGetBufferForTuple.
1864 * The return value is the OID assigned to the tuple (either here or by the
1865 * caller), or InvalidOid if no OID. The header fields of *tup are updated
1866 * to match the stored tuple; in particular tup->t_self receives the actual
1867 * TID where the tuple was stored. But note that any toasting of fields
1868 * within the tuple data is NOT reflected into *tup.
1871 heap_insert(Relation relation, HeapTuple tup, CommandId cid,
1872 int options, BulkInsertState bistate)
1874 TransactionId xid = GetCurrentTransactionId();
1877 bool all_visible_cleared = false;
1879 if (relation->rd_rel->relhasoids)
1882 /* this is redundant with an Assert in HeapTupleSetOid */
1883 Assert(tup->t_data->t_infomask & HEAP_HASOID);
1887 * If the object id of this tuple has already been assigned, trust the
1888 * caller. There are a couple of ways this can happen. At initial db
1889 * creation, the backend program sets oids for tuples. When we define
1890 * an index, we set the oid. Finally, in the future, we may allow
1891 * users to set their own object ids in order to support a persistent
1892 * object store (objects need to contain pointers to one another).
1894 if (!OidIsValid(HeapTupleGetOid(tup)))
1895 HeapTupleSetOid(tup, GetNewOid(relation));
1899 /* check there is not space for an OID */
1900 Assert(!(tup->t_data->t_infomask & HEAP_HASOID));
1903 tup->t_data->t_infomask &= ~(HEAP_XACT_MASK);
1904 tup->t_data->t_infomask2 &= ~(HEAP2_XACT_MASK);
1905 tup->t_data->t_infomask |= HEAP_XMAX_INVALID;
1906 HeapTupleHeaderSetXmin(tup->t_data, xid);
1907 HeapTupleHeaderSetCmin(tup->t_data, cid);
1908 HeapTupleHeaderSetXmax(tup->t_data, 0); /* for cleanliness */
1909 tup->t_tableOid = RelationGetRelid(relation);
1912 * If the new tuple is too big for storage or contains already toasted
1913 * out-of-line attributes from some other relation, invoke the toaster.
1915 * Note: below this point, heaptup is the data we actually intend to store
1916 * into the relation; tup is the caller's original untoasted data.
1918 if (relation->rd_rel->relkind != RELKIND_RELATION)
1920 /* toast table entries should never be recursively toasted */
1921 Assert(!HeapTupleHasExternal(tup));
1924 else if (HeapTupleHasExternal(tup) || tup->t_len > TOAST_TUPLE_THRESHOLD)
1925 heaptup = toast_insert_or_update(relation, tup, NULL, options);
1929 /* Find buffer to insert this tuple into */
1930 buffer = RelationGetBufferForTuple(relation, heaptup->t_len,
1931 InvalidBuffer, options, bistate);
1933 /* NO EREPORT(ERROR) from here till changes are logged */
1934 START_CRIT_SECTION();
1936 RelationPutHeapTuple(relation, buffer, heaptup);
1938 if (PageIsAllVisible(BufferGetPage(buffer)))
1940 all_visible_cleared = true;
1941 PageClearAllVisible(BufferGetPage(buffer));
1945 * XXX Should we set PageSetPrunable on this page ?
1947 * The inserting transaction may eventually abort thus making this tuple
1948 * DEAD and hence available for pruning. Though we don't want to optimize
1949 * for aborts, if no other tuple in this page is UPDATEd/DELETEd, the
1950 * aborted tuple will never be pruned until next vacuum is triggered.
1952 * If you do add PageSetPrunable here, add it in heap_xlog_insert too.
1955 MarkBufferDirty(buffer);
1958 if (!(options & HEAP_INSERT_SKIP_WAL) && !relation->rd_istemp)
1960 xl_heap_insert xlrec;
1961 xl_heap_header xlhdr;
1963 XLogRecData rdata[3];
1964 Page page = BufferGetPage(buffer);
1965 uint8 info = XLOG_HEAP_INSERT;
1967 xlrec.all_visible_cleared = all_visible_cleared;
1968 xlrec.target.node = relation->rd_node;
1969 xlrec.target.tid = heaptup->t_self;
1970 rdata[0].data = (char *) &xlrec;
1971 rdata[0].len = SizeOfHeapInsert;
1972 rdata[0].buffer = InvalidBuffer;
1973 rdata[0].next = &(rdata[1]);
1975 xlhdr.t_infomask2 = heaptup->t_data->t_infomask2;
1976 xlhdr.t_infomask = heaptup->t_data->t_infomask;
1977 xlhdr.t_hoff = heaptup->t_data->t_hoff;
1980 * note we mark rdata[1] as belonging to buffer; if XLogInsert decides
1981 * to write the whole page to the xlog, we don't need to store
1982 * xl_heap_header in the xlog.
1984 rdata[1].data = (char *) &xlhdr;
1985 rdata[1].len = SizeOfHeapHeader;
1986 rdata[1].buffer = buffer;
1987 rdata[1].buffer_std = true;
1988 rdata[1].next = &(rdata[2]);
1990 /* PG73FORMAT: write bitmap [+ padding] [+ oid] + data */
1991 rdata[2].data = (char *) heaptup->t_data + offsetof(HeapTupleHeaderData, t_bits);
1992 rdata[2].len = heaptup->t_len - offsetof(HeapTupleHeaderData, t_bits);
1993 rdata[2].buffer = buffer;
1994 rdata[2].buffer_std = true;
1995 rdata[2].next = NULL;
1998 * If this is the single and first tuple on page, we can reinit the
1999 * page instead of restoring the whole thing. Set flag, and hide
2000 * buffer references from XLogInsert.
2002 if (ItemPointerGetOffsetNumber(&(heaptup->t_self)) == FirstOffsetNumber &&
2003 PageGetMaxOffsetNumber(page) == FirstOffsetNumber)
2005 info |= XLOG_HEAP_INIT_PAGE;
2006 rdata[1].buffer = rdata[2].buffer = InvalidBuffer;
2009 recptr = XLogInsert(RM_HEAP_ID, info, rdata);
2011 PageSetLSN(page, recptr);
2012 PageSetTLI(page, ThisTimeLineID);
2017 UnlockReleaseBuffer(buffer);
2019 /* Clear the bit in the visibility map if necessary */
2020 if (all_visible_cleared)
2021 visibilitymap_clear(relation,
2022 ItemPointerGetBlockNumber(&(heaptup->t_self)));
2025 * If tuple is cachable, mark it for invalidation from the caches in case
2026 * we abort. Note it is OK to do this after releasing the buffer, because
2027 * the heaptup data structure is all in local memory, not in the shared
2030 CacheInvalidateHeapTuple(relation, heaptup);
2032 pgstat_count_heap_insert(relation);
2035 * If heaptup is a private copy, release it. Don't forget to copy t_self
2036 * back to the caller's image, too.
2040 tup->t_self = heaptup->t_self;
2041 heap_freetuple(heaptup);
2044 return HeapTupleGetOid(tup);
2048 * simple_heap_insert - insert a tuple
2050 * Currently, this routine differs from heap_insert only in supplying
2051 * a default command ID and not allowing access to the speedup options.
2053 * This should be used rather than using heap_insert directly in most places
2054 * where we are modifying system catalogs.
2057 simple_heap_insert(Relation relation, HeapTuple tup)
2059 return heap_insert(relation, tup, GetCurrentCommandId(true), 0, NULL);
2063 * heap_delete - delete a tuple
2065 * NB: do not call this directly unless you are prepared to deal with
2066 * concurrent-update conditions. Use simple_heap_delete instead.
2068 * relation - table to be modified (caller must hold suitable lock)
2069 * tid - TID of tuple to be deleted
2070 * ctid - output parameter, used only for failure case (see below)
2071 * update_xmax - output parameter, used only for failure case (see below)
2072 * cid - delete command ID (used for visibility test, and stored into
2073 * cmax if successful)
2074 * crosscheck - if not InvalidSnapshot, also check tuple against this
2075 * wait - true if should wait for any conflicting update to commit/abort
2077 * Normal, successful return value is HeapTupleMayBeUpdated, which
2078 * actually means we did delete it. Failure return codes are
2079 * HeapTupleSelfUpdated, HeapTupleUpdated, or HeapTupleBeingUpdated
2080 * (the last only possible if wait == false).
2082 * In the failure cases, the routine returns the tuple's t_ctid and t_xmax.
2083 * If t_ctid is the same as tid, the tuple was deleted; if different, the
2084 * tuple was updated, and t_ctid is the location of the replacement tuple.
2085 * (t_xmax is needed to verify that the replacement tuple matches.)
2088 heap_delete(Relation relation, ItemPointer tid,
2089 ItemPointer ctid, TransactionId *update_xmax,
2090 CommandId cid, Snapshot crosscheck, bool wait)
2093 TransactionId xid = GetCurrentTransactionId();
2098 bool have_tuple_lock = false;
2100 bool all_visible_cleared = false;
2102 Assert(ItemPointerIsValid(tid));
2104 buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(tid));
2105 LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
2107 page = BufferGetPage(buffer);
2108 lp = PageGetItemId(page, ItemPointerGetOffsetNumber(tid));
2109 Assert(ItemIdIsNormal(lp));
2111 tp.t_data = (HeapTupleHeader) PageGetItem(page, lp);
2112 tp.t_len = ItemIdGetLength(lp);
2116 result = HeapTupleSatisfiesUpdate(tp.t_data, cid, buffer);
2118 if (result == HeapTupleInvisible)
2120 UnlockReleaseBuffer(buffer);
2121 elog(ERROR, "attempted to delete invisible tuple");
2123 else if (result == HeapTupleBeingUpdated && wait)
2125 TransactionId xwait;
2128 /* must copy state data before unlocking buffer */
2129 xwait = HeapTupleHeaderGetXmax(tp.t_data);
2130 infomask = tp.t_data->t_infomask;
2132 LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
2135 * Acquire tuple lock to establish our priority for the tuple (see
2136 * heap_lock_tuple). LockTuple will release us when we are
2137 * next-in-line for the tuple.
2139 * If we are forced to "start over" below, we keep the tuple lock;
2140 * this arranges that we stay at the head of the line while rechecking
2143 if (!have_tuple_lock)
2145 LockTuple(relation, &(tp.t_self), ExclusiveLock);
2146 have_tuple_lock = true;
2150 * Sleep until concurrent transaction ends. Note that we don't care
2151 * if the locker has an exclusive or shared lock, because we need
2155 if (infomask & HEAP_XMAX_IS_MULTI)
2157 /* wait for multixact */
2158 MultiXactIdWait((MultiXactId) xwait);
2159 LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
2162 * If xwait had just locked the tuple then some other xact could
2163 * update this tuple before we get to this point. Check for xmax
2164 * change, and start over if so.
2166 if (!(tp.t_data->t_infomask & HEAP_XMAX_IS_MULTI) ||
2167 !TransactionIdEquals(HeapTupleHeaderGetXmax(tp.t_data),
2172 * You might think the multixact is necessarily done here, but not
2173 * so: it could have surviving members, namely our own xact or
2174 * other subxacts of this backend. It is legal for us to delete
2175 * the tuple in either case, however (the latter case is
2176 * essentially a situation of upgrading our former shared lock to
2177 * exclusive). We don't bother changing the on-disk hint bits
2178 * since we are about to overwrite the xmax altogether.
2183 /* wait for regular transaction to end */
2184 XactLockTableWait(xwait);
2185 LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
2188 * xwait is done, but if xwait had just locked the tuple then some
2189 * other xact could update this tuple before we get to this point.
2190 * Check for xmax change, and start over if so.
2192 if ((tp.t_data->t_infomask & HEAP_XMAX_IS_MULTI) ||
2193 !TransactionIdEquals(HeapTupleHeaderGetXmax(tp.t_data),
2197 /* Otherwise check if it committed or aborted */
2198 UpdateXmaxHintBits(tp.t_data, buffer, xwait);
2202 * We may overwrite if previous xmax aborted, or if it committed but
2203 * only locked the tuple without updating it.
2205 if (tp.t_data->t_infomask & (HEAP_XMAX_INVALID |
2207 result = HeapTupleMayBeUpdated;
2209 result = HeapTupleUpdated;
2212 if (crosscheck != InvalidSnapshot && result == HeapTupleMayBeUpdated)
2214 /* Perform additional check for serializable RI updates */
2215 if (!HeapTupleSatisfiesVisibility(&tp, crosscheck, buffer))
2216 result = HeapTupleUpdated;
2219 if (result != HeapTupleMayBeUpdated)
2221 Assert(result == HeapTupleSelfUpdated ||
2222 result == HeapTupleUpdated ||
2223 result == HeapTupleBeingUpdated);
2224 Assert(!(tp.t_data->t_infomask & HEAP_XMAX_INVALID));
2225 *ctid = tp.t_data->t_ctid;
2226 *update_xmax = HeapTupleHeaderGetXmax(tp.t_data);
2227 UnlockReleaseBuffer(buffer);
2228 if (have_tuple_lock)
2229 UnlockTuple(relation, &(tp.t_self), ExclusiveLock);
2233 /* replace cid with a combo cid if necessary */
2234 HeapTupleHeaderAdjustCmax(tp.t_data, &cid, &iscombo);
2236 START_CRIT_SECTION();
2239 * If this transaction commits, the tuple will become DEAD sooner or
2240 * later. Set flag that this page is a candidate for pruning once our xid
2241 * falls below the OldestXmin horizon. If the transaction finally aborts,
2242 * the subsequent page pruning will be a no-op and the hint will be
2245 PageSetPrunable(page, xid);
2247 if (PageIsAllVisible(page))
2249 all_visible_cleared = true;
2250 PageClearAllVisible(page);
2253 /* store transaction information of xact deleting the tuple */
2254 tp.t_data->t_infomask &= ~(HEAP_XMAX_COMMITTED |
2256 HEAP_XMAX_IS_MULTI |
2259 HeapTupleHeaderClearHotUpdated(tp.t_data);
2260 HeapTupleHeaderSetXmax(tp.t_data, xid);
2261 HeapTupleHeaderSetCmax(tp.t_data, cid, iscombo);
2262 /* Make sure there is no forward chain link in t_ctid */
2263 tp.t_data->t_ctid = tp.t_self;
2265 MarkBufferDirty(buffer);
2268 if (!relation->rd_istemp)
2270 xl_heap_delete xlrec;
2272 XLogRecData rdata[2];
2274 xlrec.all_visible_cleared = all_visible_cleared;
2275 xlrec.target.node = relation->rd_node;
2276 xlrec.target.tid = tp.t_self;
2277 rdata[0].data = (char *) &xlrec;
2278 rdata[0].len = SizeOfHeapDelete;
2279 rdata[0].buffer = InvalidBuffer;
2280 rdata[0].next = &(rdata[1]);
2282 rdata[1].data = NULL;
2284 rdata[1].buffer = buffer;
2285 rdata[1].buffer_std = true;
2286 rdata[1].next = NULL;
2288 recptr = XLogInsert(RM_HEAP_ID, XLOG_HEAP_DELETE, rdata);
2290 PageSetLSN(page, recptr);
2291 PageSetTLI(page, ThisTimeLineID);
2296 LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
2299 * If the tuple has toasted out-of-line attributes, we need to delete
2300 * those items too. We have to do this before releasing the buffer
2301 * because we need to look at the contents of the tuple, but it's OK to
2302 * release the content lock on the buffer first.
2304 if (relation->rd_rel->relkind != RELKIND_RELATION)
2306 /* toast table entries should never be recursively toasted */
2307 Assert(!HeapTupleHasExternal(&tp));
2309 else if (HeapTupleHasExternal(&tp))
2310 toast_delete(relation, &tp);
2313 * Mark tuple for invalidation from system caches at next command
2314 * boundary. We have to do this before releasing the buffer because we
2315 * need to look at the contents of the tuple.
2317 CacheInvalidateHeapTuple(relation, &tp);
2319 /* Clear the bit in the visibility map if necessary */
2320 if (all_visible_cleared)
2321 visibilitymap_clear(relation, BufferGetBlockNumber(buffer));
2323 /* Now we can release the buffer */
2324 ReleaseBuffer(buffer);
2327 * Release the lmgr tuple lock, if we had it.
2329 if (have_tuple_lock)
2330 UnlockTuple(relation, &(tp.t_self), ExclusiveLock);
2332 pgstat_count_heap_delete(relation);
2334 return HeapTupleMayBeUpdated;
2338 * simple_heap_delete - delete a tuple
2340 * This routine may be used to delete a tuple when concurrent updates of
2341 * the target tuple are not expected (for example, because we have a lock
2342 * on the relation associated with the tuple). Any failure is reported
2346 simple_heap_delete(Relation relation, ItemPointer tid)
2349 ItemPointerData update_ctid;
2350 TransactionId update_xmax;
2352 result = heap_delete(relation, tid,
2353 &update_ctid, &update_xmax,
2354 GetCurrentCommandId(true), InvalidSnapshot,
2355 true /* wait for commit */ );
2358 case HeapTupleSelfUpdated:
2359 /* Tuple was already updated in current command? */
2360 elog(ERROR, "tuple already updated by self");
2363 case HeapTupleMayBeUpdated:
2364 /* done successfully */
2367 case HeapTupleUpdated:
2368 elog(ERROR, "tuple concurrently updated");
2372 elog(ERROR, "unrecognized heap_delete status: %u", result);
2378 * heap_update - replace a tuple
2380 * NB: do not call this directly unless you are prepared to deal with
2381 * concurrent-update conditions. Use simple_heap_update instead.
2383 * relation - table to be modified (caller must hold suitable lock)
2384 * otid - TID of old tuple to be replaced
2385 * newtup - newly constructed tuple data to store
2386 * ctid - output parameter, used only for failure case (see below)
2387 * update_xmax - output parameter, used only for failure case (see below)
2388 * cid - update command ID (used for visibility test, and stored into
2389 * cmax/cmin if successful)
2390 * crosscheck - if not InvalidSnapshot, also check old tuple against this
2391 * wait - true if should wait for any conflicting update to commit/abort
2393 * Normal, successful return value is HeapTupleMayBeUpdated, which
2394 * actually means we *did* update it. Failure return codes are
2395 * HeapTupleSelfUpdated, HeapTupleUpdated, or HeapTupleBeingUpdated
2396 * (the last only possible if wait == false).
2398 * On success, the header fields of *newtup are updated to match the new
2399 * stored tuple; in particular, newtup->t_self is set to the TID where the
2400 * new tuple was inserted, and its HEAP_ONLY_TUPLE flag is set iff a HOT
2401 * update was done. However, any TOAST changes in the new tuple's
2402 * data are not reflected into *newtup.
2404 * In the failure cases, the routine returns the tuple's t_ctid and t_xmax.
2405 * If t_ctid is the same as otid, the tuple was deleted; if different, the
2406 * tuple was updated, and t_ctid is the location of the replacement tuple.
2407 * (t_xmax is needed to verify that the replacement tuple matches.)
2410 heap_update(Relation relation, ItemPointer otid, HeapTuple newtup,
2411 ItemPointer ctid, TransactionId *update_xmax,
2412 CommandId cid, Snapshot crosscheck, bool wait)
2415 TransactionId xid = GetCurrentTransactionId();
2416 Bitmapset *hot_attrs;
2418 HeapTupleData oldtup;
2427 bool have_tuple_lock = false;
2429 bool use_hot_update = false;
2430 bool all_visible_cleared = false;
2431 bool all_visible_cleared_new = false;
2433 Assert(ItemPointerIsValid(otid));
2436 * Fetch the list of attributes to be checked for HOT update. This is
2437 * wasted effort if we fail to update or have to put the new tuple on a
2438 * different page. But we must compute the list before obtaining buffer
2439 * lock --- in the worst case, if we are doing an update on one of the
2440 * relevant system catalogs, we could deadlock if we try to fetch the list
2441 * later. In any case, the relcache caches the data so this is usually
2444 * Note that we get a copy here, so we need not worry about relcache flush
2445 * happening midway through.
2447 hot_attrs = RelationGetIndexAttrBitmap(relation);
2449 buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(otid));
2450 LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
2452 page = BufferGetPage(buffer);
2453 lp = PageGetItemId(page, ItemPointerGetOffsetNumber(otid));
2454 Assert(ItemIdIsNormal(lp));
2456 oldtup.t_data = (HeapTupleHeader) PageGetItem(page, lp);
2457 oldtup.t_len = ItemIdGetLength(lp);
2458 oldtup.t_self = *otid;
2461 * Note: beyond this point, use oldtup not otid to refer to old tuple.
2462 * otid may very well point at newtup->t_self, which we will overwrite
2463 * with the new tuple's location, so there's great risk of confusion if we
2468 result = HeapTupleSatisfiesUpdate(oldtup.t_data, cid, buffer);
2470 if (result == HeapTupleInvisible)
2472 UnlockReleaseBuffer(buffer);
2473 elog(ERROR, "attempted to update invisible tuple");
2475 else if (result == HeapTupleBeingUpdated && wait)
2477 TransactionId xwait;
2480 /* must copy state data before unlocking buffer */
2481 xwait = HeapTupleHeaderGetXmax(oldtup.t_data);
2482 infomask = oldtup.t_data->t_infomask;
2484 LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
2487 * Acquire tuple lock to establish our priority for the tuple (see
2488 * heap_lock_tuple). LockTuple will release us when we are
2489 * next-in-line for the tuple.
2491 * If we are forced to "start over" below, we keep the tuple lock;
2492 * this arranges that we stay at the head of the line while rechecking
2495 if (!have_tuple_lock)
2497 LockTuple(relation, &(oldtup.t_self), ExclusiveLock);
2498 have_tuple_lock = true;
2502 * Sleep until concurrent transaction ends. Note that we don't care
2503 * if the locker has an exclusive or shared lock, because we need
2507 if (infomask & HEAP_XMAX_IS_MULTI)
2509 /* wait for multixact */
2510 MultiXactIdWait((MultiXactId) xwait);
2511 LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
2514 * If xwait had just locked the tuple then some other xact could
2515 * update this tuple before we get to this point. Check for xmax
2516 * change, and start over if so.
2518 if (!(oldtup.t_data->t_infomask & HEAP_XMAX_IS_MULTI) ||
2519 !TransactionIdEquals(HeapTupleHeaderGetXmax(oldtup.t_data),
2524 * You might think the multixact is necessarily done here, but not
2525 * so: it could have surviving members, namely our own xact or
2526 * other subxacts of this backend. It is legal for us to update
2527 * the tuple in either case, however (the latter case is
2528 * essentially a situation of upgrading our former shared lock to
2529 * exclusive). We don't bother changing the on-disk hint bits
2530 * since we are about to overwrite the xmax altogether.
2535 /* wait for regular transaction to end */
2536 XactLockTableWait(xwait);
2537 LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
2540 * xwait is done, but if xwait had just locked the tuple then some
2541 * other xact could update this tuple before we get to this point.
2542 * Check for xmax change, and start over if so.
2544 if ((oldtup.t_data->t_infomask & HEAP_XMAX_IS_MULTI) ||
2545 !TransactionIdEquals(HeapTupleHeaderGetXmax(oldtup.t_data),
2549 /* Otherwise check if it committed or aborted */
2550 UpdateXmaxHintBits(oldtup.t_data, buffer, xwait);
2554 * We may overwrite if previous xmax aborted, or if it committed but
2555 * only locked the tuple without updating it.
2557 if (oldtup.t_data->t_infomask & (HEAP_XMAX_INVALID |
2559 result = HeapTupleMayBeUpdated;
2561 result = HeapTupleUpdated;
2564 if (crosscheck != InvalidSnapshot && result == HeapTupleMayBeUpdated)
2566 /* Perform additional check for serializable RI updates */
2567 if (!HeapTupleSatisfiesVisibility(&oldtup, crosscheck, buffer))
2568 result = HeapTupleUpdated;
2571 if (result != HeapTupleMayBeUpdated)
2573 Assert(result == HeapTupleSelfUpdated ||
2574 result == HeapTupleUpdated ||
2575 result == HeapTupleBeingUpdated);
2576 Assert(!(oldtup.t_data->t_infomask & HEAP_XMAX_INVALID));
2577 *ctid = oldtup.t_data->t_ctid;
2578 *update_xmax = HeapTupleHeaderGetXmax(oldtup.t_data);
2579 UnlockReleaseBuffer(buffer);
2580 if (have_tuple_lock)
2581 UnlockTuple(relation, &(oldtup.t_self), ExclusiveLock);
2582 bms_free(hot_attrs);
2586 /* Fill in OID and transaction status data for newtup */
2587 if (relation->rd_rel->relhasoids)
2590 /* this is redundant with an Assert in HeapTupleSetOid */
2591 Assert(newtup->t_data->t_infomask & HEAP_HASOID);
2593 HeapTupleSetOid(newtup, HeapTupleGetOid(&oldtup));
2597 /* check there is not space for an OID */
2598 Assert(!(newtup->t_data->t_infomask & HEAP_HASOID));
2601 newtup->t_data->t_infomask &= ~(HEAP_XACT_MASK);
2602 newtup->t_data->t_infomask2 &= ~(HEAP2_XACT_MASK);
2603 newtup->t_data->t_infomask |= (HEAP_XMAX_INVALID | HEAP_UPDATED);
2604 HeapTupleHeaderSetXmin(newtup->t_data, xid);
2605 HeapTupleHeaderSetCmin(newtup->t_data, cid);
2606 HeapTupleHeaderSetXmax(newtup->t_data, 0); /* for cleanliness */
2607 newtup->t_tableOid = RelationGetRelid(relation);
2610 * Replace cid with a combo cid if necessary. Note that we already put
2611 * the plain cid into the new tuple.
2613 HeapTupleHeaderAdjustCmax(oldtup.t_data, &cid, &iscombo);
2616 * If the toaster needs to be activated, OR if the new tuple will not fit
2617 * on the same page as the old, then we need to release the content lock
2618 * (but not the pin!) on the old tuple's buffer while we are off doing
2619 * TOAST and/or table-file-extension work. We must mark the old tuple to
2620 * show that it's already being updated, else other processes may try to
2621 * update it themselves.
2623 * We need to invoke the toaster if there are already any out-of-line
2624 * toasted values present, or if the new tuple is over-threshold.
2626 if (relation->rd_rel->relkind != RELKIND_RELATION)
2628 /* toast table entries should never be recursively toasted */
2629 Assert(!HeapTupleHasExternal(&oldtup));
2630 Assert(!HeapTupleHasExternal(newtup));
2634 need_toast = (HeapTupleHasExternal(&oldtup) ||
2635 HeapTupleHasExternal(newtup) ||
2636 newtup->t_len > TOAST_TUPLE_THRESHOLD);
2638 pagefree = PageGetHeapFreeSpace(page);
2640 newtupsize = MAXALIGN(newtup->t_len);
2642 if (need_toast || newtupsize > pagefree)
2644 /* Clear obsolete visibility flags ... */
2645 oldtup.t_data->t_infomask &= ~(HEAP_XMAX_COMMITTED |
2647 HEAP_XMAX_IS_MULTI |
2650 HeapTupleClearHotUpdated(&oldtup);
2651 /* ... and store info about transaction updating this tuple */
2652 HeapTupleHeaderSetXmax(oldtup.t_data, xid);
2653 HeapTupleHeaderSetCmax(oldtup.t_data, cid, iscombo);
2654 /* temporarily make it look not-updated */
2655 oldtup.t_data->t_ctid = oldtup.t_self;
2656 already_marked = true;
2657 LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
2660 * Let the toaster do its thing, if needed.
2662 * Note: below this point, heaptup is the data we actually intend to
2663 * store into the relation; newtup is the caller's original untoasted
2668 /* Note we always use WAL and FSM during updates */
2669 heaptup = toast_insert_or_update(relation, newtup, &oldtup, 0);
2670 newtupsize = MAXALIGN(heaptup->t_len);
2676 * Now, do we need a new page for the tuple, or not? This is a bit
2677 * tricky since someone else could have added tuples to the page while
2678 * we weren't looking. We have to recheck the available space after
2679 * reacquiring the buffer lock. But don't bother to do that if the
2680 * former amount of free space is still not enough; it's unlikely
2681 * there's more free now than before.
2683 * What's more, if we need to get a new page, we will need to acquire
2684 * buffer locks on both old and new pages. To avoid deadlock against
2685 * some other backend trying to get the same two locks in the other
2686 * order, we must be consistent about the order we get the locks in.
2687 * We use the rule "lock the lower-numbered page of the relation
2688 * first". To implement this, we must do RelationGetBufferForTuple
2689 * while not holding the lock on the old page, and we must rely on it
2690 * to get the locks on both pages in the correct order.
2692 if (newtupsize > pagefree)
2694 /* Assume there's no chance to put heaptup on same page. */
2695 newbuf = RelationGetBufferForTuple(relation, heaptup->t_len,
2700 /* Re-acquire the lock on the old tuple's page. */
2701 LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
2702 /* Re-check using the up-to-date free space */
2703 pagefree = PageGetHeapFreeSpace(page);
2704 if (newtupsize > pagefree)
2707 * Rats, it doesn't fit anymore. We must now unlock and
2708 * relock to avoid deadlock. Fortunately, this path should
2711 LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
2712 newbuf = RelationGetBufferForTuple(relation, heaptup->t_len,
2717 /* OK, it fits here, so we're done. */
2724 /* No TOAST work needed, and it'll fit on same page */
2725 already_marked = false;
2731 * At this point newbuf and buffer are both pinned and locked, and newbuf
2732 * has enough space for the new tuple. If they are the same buffer, only
2736 if (newbuf == buffer)
2739 * Since the new tuple is going into the same page, we might be able
2740 * to do a HOT update. Check if any of the index columns have been
2741 * changed. If not, then HOT update is possible.
2743 if (HeapSatisfiesHOTUpdate(relation, hot_attrs, &oldtup, heaptup))
2744 use_hot_update = true;
2748 /* Set a hint that the old page could use prune/defrag */
2752 /* NO EREPORT(ERROR) from here till changes are logged */
2753 START_CRIT_SECTION();
2756 * If this transaction commits, the old tuple will become DEAD sooner or
2757 * later. Set flag that this page is a candidate for pruning once our xid
2758 * falls below the OldestXmin horizon. If the transaction finally aborts,
2759 * the subsequent page pruning will be a no-op and the hint will be
2762 * XXX Should we set hint on newbuf as well? If the transaction aborts,
2763 * there would be a prunable tuple in the newbuf; but for now we choose
2764 * not to optimize for aborts. Note that heap_xlog_update must be kept in
2765 * sync if this decision changes.
2767 PageSetPrunable(page, xid);
2771 /* Mark the old tuple as HOT-updated */
2772 HeapTupleSetHotUpdated(&oldtup);
2773 /* And mark the new tuple as heap-only */
2774 HeapTupleSetHeapOnly(heaptup);
2775 /* Mark the caller's copy too, in case different from heaptup */
2776 HeapTupleSetHeapOnly(newtup);
2780 /* Make sure tuples are correctly marked as not-HOT */
2781 HeapTupleClearHotUpdated(&oldtup);
2782 HeapTupleClearHeapOnly(heaptup);
2783 HeapTupleClearHeapOnly(newtup);
2786 RelationPutHeapTuple(relation, newbuf, heaptup); /* insert new tuple */
2788 if (!already_marked)
2790 /* Clear obsolete visibility flags ... */
2791 oldtup.t_data->t_infomask &= ~(HEAP_XMAX_COMMITTED |
2793 HEAP_XMAX_IS_MULTI |
2796 /* ... and store info about transaction updating this tuple */
2797 HeapTupleHeaderSetXmax(oldtup.t_data, xid);
2798 HeapTupleHeaderSetCmax(oldtup.t_data, cid, iscombo);
2801 /* record address of new tuple in t_ctid of old one */
2802 oldtup.t_data->t_ctid = heaptup->t_self;
2804 if (newbuf != buffer)
2805 MarkBufferDirty(newbuf);
2806 MarkBufferDirty(buffer);
2809 * Note: we mustn't clear PD_ALL_VISIBLE flags before writing the WAL
2810 * record, because log_heap_update looks at those flags to set the
2811 * corresponding flags in the WAL record.
2815 if (!relation->rd_istemp)
2817 XLogRecPtr recptr = log_heap_update(relation, buffer, oldtup.t_self,
2818 newbuf, heaptup, false);
2820 if (newbuf != buffer)
2822 PageSetLSN(BufferGetPage(newbuf), recptr);
2823 PageSetTLI(BufferGetPage(newbuf), ThisTimeLineID);
2825 PageSetLSN(BufferGetPage(buffer), recptr);
2826 PageSetTLI(BufferGetPage(buffer), ThisTimeLineID);
2829 /* Clear PD_ALL_VISIBLE flags */
2830 if (PageIsAllVisible(BufferGetPage(buffer)))
2832 all_visible_cleared = true;
2833 PageClearAllVisible(BufferGetPage(buffer));
2835 if (newbuf != buffer && PageIsAllVisible(BufferGetPage(newbuf)))
2837 all_visible_cleared_new = true;
2838 PageClearAllVisible(BufferGetPage(newbuf));
2843 if (newbuf != buffer)
2844 LockBuffer(newbuf, BUFFER_LOCK_UNLOCK);
2845 LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
2848 * Mark old tuple for invalidation from system caches at next command
2849 * boundary. We have to do this before releasing the buffer because we
2850 * need to look at the contents of the tuple.
2852 CacheInvalidateHeapTuple(relation, &oldtup);
2854 /* Clear bits in visibility map */
2855 if (all_visible_cleared)
2856 visibilitymap_clear(relation, BufferGetBlockNumber(buffer));
2857 if (all_visible_cleared_new)
2858 visibilitymap_clear(relation, BufferGetBlockNumber(newbuf));
2860 /* Now we can release the buffer(s) */
2861 if (newbuf != buffer)
2862 ReleaseBuffer(newbuf);
2863 ReleaseBuffer(buffer);
2866 * If new tuple is cachable, mark it for invalidation from the caches in
2867 * case we abort. Note it is OK to do this after releasing the buffer,
2868 * because the heaptup data structure is all in local memory, not in the
2871 CacheInvalidateHeapTuple(relation, heaptup);
2874 * Release the lmgr tuple lock, if we had it.
2876 if (have_tuple_lock)
2877 UnlockTuple(relation, &(oldtup.t_self), ExclusiveLock);
2879 pgstat_count_heap_update(relation, use_hot_update);
2882 * If heaptup is a private copy, release it. Don't forget to copy t_self
2883 * back to the caller's image, too.
2885 if (heaptup != newtup)
2887 newtup->t_self = heaptup->t_self;
2888 heap_freetuple(heaptup);
2891 bms_free(hot_attrs);
2893 return HeapTupleMayBeUpdated;
2897 * Check if the specified attribute's value is same in both given tuples.
2898 * Subroutine for HeapSatisfiesHOTUpdate.
2901 heap_tuple_attr_equals(TupleDesc tupdesc, int attrnum,
2902 HeapTuple tup1, HeapTuple tup2)
2908 Form_pg_attribute att;
2911 * If it's a whole-tuple reference, say "not equal". It's not really
2912 * worth supporting this case, since it could only succeed after a no-op
2913 * update, which is hardly a case worth optimizing for.
2919 * Likewise, automatically say "not equal" for any system attribute other
2920 * than OID and tableOID; we cannot expect these to be consistent in a HOT
2921 * chain, or even to be set correctly yet in the new tuple.
2925 if (attrnum != ObjectIdAttributeNumber &&
2926 attrnum != TableOidAttributeNumber)
2931 * Extract the corresponding values. XXX this is pretty inefficient if
2932 * there are many indexed columns. Should HeapSatisfiesHOTUpdate do a
2933 * single heap_deform_tuple call on each tuple, instead? But that doesn't
2934 * work for system columns ...
2936 value1 = heap_getattr(tup1, attrnum, tupdesc, &isnull1);
2937 value2 = heap_getattr(tup2, attrnum, tupdesc, &isnull2);
2940 * If one value is NULL and other is not, then they are certainly not
2943 if (isnull1 != isnull2)
2947 * If both are NULL, they can be considered equal.
2953 * We do simple binary comparison of the two datums. This may be overly
2954 * strict because there can be multiple binary representations for the
2955 * same logical value. But we should be OK as long as there are no false
2956 * positives. Using a type-specific equality operator is messy because
2957 * there could be multiple notions of equality in different operator
2958 * classes; furthermore, we cannot safely invoke user-defined functions
2959 * while holding exclusive buffer lock.
2963 /* The only allowed system columns are OIDs, so do this */
2964 return (DatumGetObjectId(value1) == DatumGetObjectId(value2));
2968 Assert(attrnum <= tupdesc->natts);
2969 att = tupdesc->attrs[attrnum - 1];
2970 return datumIsEqual(value1, value2, att->attbyval, att->attlen);
2975 * Check if the old and new tuples represent a HOT-safe update. To be able
2976 * to do a HOT update, we must not have changed any columns used in index
2979 * The set of attributes to be checked is passed in (we dare not try to
2980 * compute it while holding exclusive buffer lock...) NOTE that hot_attrs
2981 * is destructively modified! That is OK since this is invoked at most once
2984 * Returns true if safe to do HOT update.
2987 HeapSatisfiesHOTUpdate(Relation relation, Bitmapset *hot_attrs,
2988 HeapTuple oldtup, HeapTuple newtup)
2992 while ((attrnum = bms_first_member(hot_attrs)) >= 0)
2994 /* Adjust for system attributes */
2995 attrnum += FirstLowInvalidHeapAttributeNumber;
2997 /* If the attribute value has changed, we can't do HOT update */
2998 if (!heap_tuple_attr_equals(RelationGetDescr(relation), attrnum,
3007 * simple_heap_update - replace a tuple
3009 * This routine may be used to update a tuple when concurrent updates of
3010 * the target tuple are not expected (for example, because we have a lock
3011 * on the relation associated with the tuple). Any failure is reported
3015 simple_heap_update(Relation relation, ItemPointer otid, HeapTuple tup)
3018 ItemPointerData update_ctid;
3019 TransactionId update_xmax;
3021 result = heap_update(relation, otid, tup,
3022 &update_ctid, &update_xmax,
3023 GetCurrentCommandId(true), InvalidSnapshot,
3024 true /* wait for commit */ );
3027 case HeapTupleSelfUpdated:
3028 /* Tuple was already updated in current command? */
3029 elog(ERROR, "tuple already updated by self");
3032 case HeapTupleMayBeUpdated:
3033 /* done successfully */
3036 case HeapTupleUpdated:
3037 elog(ERROR, "tuple concurrently updated");
3041 elog(ERROR, "unrecognized heap_update status: %u", result);
3047 * heap_lock_tuple - lock a tuple in shared or exclusive mode
3049 * Note that this acquires a buffer pin, which the caller must release.
3052 * relation: relation containing tuple (caller must hold suitable lock)
3053 * tuple->t_self: TID of tuple to lock (rest of struct need not be valid)
3054 * cid: current command ID (used for visibility test, and stored into
3055 * tuple's cmax if lock is successful)
3056 * mode: indicates if shared or exclusive tuple lock is desired
3057 * nowait: if true, ereport rather than blocking if lock not available
3059 * Output parameters:
3060 * *tuple: all fields filled in
3061 * *buffer: set to buffer holding tuple (pinned but not locked at exit)
3062 * *ctid: set to tuple's t_ctid, but only in failure cases
3063 * *update_xmax: set to tuple's xmax, but only in failure cases
3065 * Function result may be:
3066 * HeapTupleMayBeUpdated: lock was successfully acquired
3067 * HeapTupleSelfUpdated: lock failed because tuple updated by self
3068 * HeapTupleUpdated: lock failed because tuple updated by other xact
3070 * In the failure cases, the routine returns the tuple's t_ctid and t_xmax.
3071 * If t_ctid is the same as t_self, the tuple was deleted; if different, the
3072 * tuple was updated, and t_ctid is the location of the replacement tuple.
3073 * (t_xmax is needed to verify that the replacement tuple matches.)
3076 * NOTES: because the shared-memory lock table is of finite size, but users
3077 * could reasonably want to lock large numbers of tuples, we do not rely on
3078 * the standard lock manager to store tuple-level locks over the long term.
3079 * Instead, a tuple is marked as locked by setting the current transaction's
3080 * XID as its XMAX, and setting additional infomask bits to distinguish this
3081 * usage from the more normal case of having deleted the tuple. When
3082 * multiple transactions concurrently share-lock a tuple, the first locker's
3083 * XID is replaced in XMAX with a MultiTransactionId representing the set of
3084 * XIDs currently holding share-locks.
3086 * When it is necessary to wait for a tuple-level lock to be released, the
3087 * basic delay is provided by XactLockTableWait or MultiXactIdWait on the
3088 * contents of the tuple's XMAX. However, that mechanism will release all
3089 * waiters concurrently, so there would be a race condition as to which
3090 * waiter gets the tuple, potentially leading to indefinite starvation of
3091 * some waiters. The possibility of share-locking makes the problem much
3092 * worse --- a steady stream of share-lockers can easily block an exclusive
3093 * locker forever. To provide more reliable semantics about who gets a
3094 * tuple-level lock first, we use the standard lock manager. The protocol
3095 * for waiting for a tuple-level lock is really
3097 * XactLockTableWait()
3098 * mark tuple as locked by me
3100 * When there are multiple waiters, arbitration of who is to get the lock next
3101 * is provided by LockTuple(). However, at most one tuple-level lock will
3102 * be held or awaited per backend at any time, so we don't risk overflow
3103 * of the lock table. Note that incoming share-lockers are required to
3104 * do LockTuple as well, if there is any conflict, to ensure that they don't
3105 * starve out waiting exclusive-lockers. However, if there is not any active
3106 * conflict for a tuple, we don't incur any extra overhead.
3109 heap_lock_tuple(Relation relation, HeapTuple tuple, Buffer *buffer,
3110 ItemPointer ctid, TransactionId *update_xmax,
3111 CommandId cid, LockTupleMode mode, bool nowait)
3114 ItemPointer tid = &(tuple->t_self);
3119 uint16 old_infomask;
3120 uint16 new_infomask;
3121 LOCKMODE tuple_lock_type;
3122 bool have_tuple_lock = false;
3124 tuple_lock_type = (mode == LockTupleShared) ? ShareLock : ExclusiveLock;
3126 *buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(tid));
3127 LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
3129 page = BufferGetPage(*buffer);
3130 lp = PageGetItemId(page, ItemPointerGetOffsetNumber(tid));
3131 Assert(ItemIdIsNormal(lp));
3133 tuple->t_data = (HeapTupleHeader) PageGetItem(page, lp);
3134 tuple->t_len = ItemIdGetLength(lp);
3135 tuple->t_tableOid = RelationGetRelid(relation);
3138 result = HeapTupleSatisfiesUpdate(tuple->t_data, cid, *buffer);
3140 if (result == HeapTupleInvisible)
3142 UnlockReleaseBuffer(*buffer);
3143 elog(ERROR, "attempted to lock invisible tuple");
3145 else if (result == HeapTupleBeingUpdated)
3147 TransactionId xwait;
3150 /* must copy state data before unlocking buffer */
3151 xwait = HeapTupleHeaderGetXmax(tuple->t_data);
3152 infomask = tuple->t_data->t_infomask;
3154 LockBuffer(*buffer, BUFFER_LOCK_UNLOCK);
3157 * If we wish to acquire share lock, and the tuple is already
3158 * share-locked by a multixact that includes any subtransaction of the
3159 * current top transaction, then we effectively hold the desired lock
3160 * already. We *must* succeed without trying to take the tuple lock,
3161 * else we will deadlock against anyone waiting to acquire exclusive
3162 * lock. We don't need to make any state changes in this case.
3164 if (mode == LockTupleShared &&
3165 (infomask & HEAP_XMAX_IS_MULTI) &&
3166 MultiXactIdIsCurrent((MultiXactId) xwait))
3168 Assert(infomask & HEAP_XMAX_SHARED_LOCK);
3169 /* Probably can't hold tuple lock here, but may as well check */
3170 if (have_tuple_lock)
3171 UnlockTuple(relation, tid, tuple_lock_type);
3172 return HeapTupleMayBeUpdated;
3176 * Acquire tuple lock to establish our priority for the tuple.
3177 * LockTuple will release us when we are next-in-line for the tuple.
3178 * We must do this even if we are share-locking.
3180 * If we are forced to "start over" below, we keep the tuple lock;
3181 * this arranges that we stay at the head of the line while rechecking
3184 if (!have_tuple_lock)
3188 if (!ConditionalLockTuple(relation, tid, tuple_lock_type))
3190 (errcode(ERRCODE_LOCK_NOT_AVAILABLE),
3191 errmsg("could not obtain lock on row in relation \"%s\"",
3192 RelationGetRelationName(relation))));
3195 LockTuple(relation, tid, tuple_lock_type);
3196 have_tuple_lock = true;
3199 if (mode == LockTupleShared && (infomask & HEAP_XMAX_SHARED_LOCK))
3202 * Acquiring sharelock when there's at least one sharelocker
3203 * already. We need not wait for him/them to complete.
3205 LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
3208 * Make sure it's still a shared lock, else start over. (It's OK
3209 * if the ownership of the shared lock has changed, though.)
3211 if (!(tuple->t_data->t_infomask & HEAP_XMAX_SHARED_LOCK))
3214 else if (infomask & HEAP_XMAX_IS_MULTI)
3216 /* wait for multixact to end */
3219 if (!ConditionalMultiXactIdWait((MultiXactId) xwait))
3221 (errcode(ERRCODE_LOCK_NOT_AVAILABLE),
3222 errmsg("could not obtain lock on row in relation \"%s\"",
3223 RelationGetRelationName(relation))));
3226 MultiXactIdWait((MultiXactId) xwait);
3228 LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
3231 * If xwait had just locked the tuple then some other xact could
3232 * update this tuple before we get to this point. Check for xmax
3233 * change, and start over if so.
3235 if (!(tuple->t_data->t_infomask & HEAP_XMAX_IS_MULTI) ||
3236 !TransactionIdEquals(HeapTupleHeaderGetXmax(tuple->t_data),
3241 * You might think the multixact is necessarily done here, but not
3242 * so: it could have surviving members, namely our own xact or
3243 * other subxacts of this backend. It is legal for us to lock the
3244 * tuple in either case, however. We don't bother changing the
3245 * on-disk hint bits since we are about to overwrite the xmax
3251 /* wait for regular transaction to end */
3254 if (!ConditionalXactLockTableWait(xwait))
3256 (errcode(ERRCODE_LOCK_NOT_AVAILABLE),
3257 errmsg("could not obtain lock on row in relation \"%s\"",
3258 RelationGetRelationName(relation))));
3261 XactLockTableWait(xwait);
3263 LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
3266 * xwait is done, but if xwait had just locked the tuple then some
3267 * other xact could update this tuple before we get to this point.
3268 * Check for xmax change, and start over if so.
3270 if ((tuple->t_data->t_infomask & HEAP_XMAX_IS_MULTI) ||
3271 !TransactionIdEquals(HeapTupleHeaderGetXmax(tuple->t_data),
3275 /* Otherwise check if it committed or aborted */
3276 UpdateXmaxHintBits(tuple->t_data, *buffer, xwait);
3280 * We may lock if previous xmax aborted, or if it committed but only
3281 * locked the tuple without updating it. The case where we didn't
3282 * wait because we are joining an existing shared lock is correctly
3285 if (tuple->t_data->t_infomask & (HEAP_XMAX_INVALID |
3287 result = HeapTupleMayBeUpdated;
3289 result = HeapTupleUpdated;
3292 if (result != HeapTupleMayBeUpdated)
3294 Assert(result == HeapTupleSelfUpdated || result == HeapTupleUpdated);
3295 Assert(!(tuple->t_data->t_infomask & HEAP_XMAX_INVALID));
3296 *ctid = tuple->t_data->t_ctid;
3297 *update_xmax = HeapTupleHeaderGetXmax(tuple->t_data);
3298 LockBuffer(*buffer, BUFFER_LOCK_UNLOCK);
3299 if (have_tuple_lock)
3300 UnlockTuple(relation, tid, tuple_lock_type);
3305 * We might already hold the desired lock (or stronger), possibly under a
3306 * different subtransaction of the current top transaction. If so, there
3307 * is no need to change state or issue a WAL record. We already handled
3308 * the case where this is true for xmax being a MultiXactId, so now check
3309 * for cases where it is a plain TransactionId.
3311 * Note in particular that this covers the case where we already hold
3312 * exclusive lock on the tuple and the caller only wants shared lock. It
3313 * would certainly not do to give up the exclusive lock.
3315 xmax = HeapTupleHeaderGetXmax(tuple->t_data);
3316 old_infomask = tuple->t_data->t_infomask;
3318 if (!(old_infomask & (HEAP_XMAX_INVALID |
3319 HEAP_XMAX_COMMITTED |
3320 HEAP_XMAX_IS_MULTI)) &&
3321 (mode == LockTupleShared ?
3322 (old_infomask & HEAP_IS_LOCKED) :
3323 (old_infomask & HEAP_XMAX_EXCL_LOCK)) &&
3324 TransactionIdIsCurrentTransactionId(xmax))
3326 LockBuffer(*buffer, BUFFER_LOCK_UNLOCK);
3327 /* Probably can't hold tuple lock here, but may as well check */
3328 if (have_tuple_lock)
3329 UnlockTuple(relation, tid, tuple_lock_type);
3330 return HeapTupleMayBeUpdated;
3334 * Compute the new xmax and infomask to store into the tuple. Note we do
3335 * not modify the tuple just yet, because that would leave it in the wrong
3336 * state if multixact.c elogs.
3338 xid = GetCurrentTransactionId();
3340 new_infomask = old_infomask & ~(HEAP_XMAX_COMMITTED |
3342 HEAP_XMAX_IS_MULTI |
3346 if (mode == LockTupleShared)
3349 * If this is the first acquisition of a shared lock in the current
3350 * transaction, set my per-backend OldestMemberMXactId setting. We can
3351 * be certain that the transaction will never become a member of any
3352 * older MultiXactIds than that. (We have to do this even if we end
3353 * up just using our own TransactionId below, since some other backend
3354 * could incorporate our XID into a MultiXact immediately afterwards.)
3356 MultiXactIdSetOldestMember();
3358 new_infomask |= HEAP_XMAX_SHARED_LOCK;
3361 * Check to see if we need a MultiXactId because there are multiple
3364 * HeapTupleSatisfiesUpdate will have set the HEAP_XMAX_INVALID bit if
3365 * the xmax was a MultiXactId but it was not running anymore. There is
3366 * a race condition, which is that the MultiXactId may have finished
3367 * since then, but that uncommon case is handled within
3368 * MultiXactIdExpand.
3370 * There is a similar race condition possible when the old xmax was a
3371 * regular TransactionId. We test TransactionIdIsInProgress again
3372 * just to narrow the window, but it's still possible to end up
3373 * creating an unnecessary MultiXactId. Fortunately this is harmless.
3375 if (!(old_infomask & (HEAP_XMAX_INVALID | HEAP_XMAX_COMMITTED)))
3377 if (old_infomask & HEAP_XMAX_IS_MULTI)
3380 * If the XMAX is already a MultiXactId, then we need to
3381 * expand it to include our own TransactionId.
3383 xid = MultiXactIdExpand((MultiXactId) xmax, xid);
3384 new_infomask |= HEAP_XMAX_IS_MULTI;
3386 else if (TransactionIdIsInProgress(xmax))
3389 * If the XMAX is a valid TransactionId, then we need to
3390 * create a new MultiXactId that includes both the old locker
3391 * and our own TransactionId.
3393 xid = MultiXactIdCreate(xmax, xid);
3394 new_infomask |= HEAP_XMAX_IS_MULTI;
3399 * Can get here iff HeapTupleSatisfiesUpdate saw the old xmax
3400 * as running, but it finished before
3401 * TransactionIdIsInProgress() got to run. Treat it like
3402 * there's no locker in the tuple.
3409 * There was no previous locker, so just insert our own
3416 /* We want an exclusive lock on the tuple */
3417 new_infomask |= HEAP_XMAX_EXCL_LOCK;
3420 START_CRIT_SECTION();
3423 * Store transaction information of xact locking the tuple.
3425 * Note: Cmax is meaningless in this context, so don't set it; this avoids
3426 * possibly generating a useless combo CID.
3428 tuple->t_data->t_infomask = new_infomask;
3429 HeapTupleHeaderClearHotUpdated(tuple->t_data);
3430 HeapTupleHeaderSetXmax(tuple->t_data, xid);
3431 /* Make sure there is no forward chain link in t_ctid */
3432 tuple->t_data->t_ctid = *tid;
3434 MarkBufferDirty(*buffer);
3437 * XLOG stuff. You might think that we don't need an XLOG record because
3438 * there is no state change worth restoring after a crash. You would be
3439 * wrong however: we have just written either a TransactionId or a
3440 * MultiXactId that may never have been seen on disk before, and we need
3441 * to make sure that there are XLOG entries covering those ID numbers.
3442 * Else the same IDs might be re-used after a crash, which would be
3443 * disastrous if this page made it to disk before the crash. Essentially
3444 * we have to enforce the WAL log-before-data rule even in this case.
3445 * (Also, in a PITR log-shipping or 2PC environment, we have to have XLOG
3446 * entries for everything anyway.)
3448 if (!relation->rd_istemp)
3452 XLogRecData rdata[2];
3454 xlrec.target.node = relation->rd_node;
3455 xlrec.target.tid = tuple->t_self;
3456 xlrec.locking_xid = xid;
3457 xlrec.xid_is_mxact = ((new_infomask & HEAP_XMAX_IS_MULTI) != 0);
3458 xlrec.shared_lock = (mode == LockTupleShared);
3459 rdata[0].data = (char *) &xlrec;
3460 rdata[0].len = SizeOfHeapLock;
3461 rdata[0].buffer = InvalidBuffer;
3462 rdata[0].next = &(rdata[1]);
3464 rdata[1].data = NULL;
3466 rdata[1].buffer = *buffer;
3467 rdata[1].buffer_std = true;
3468 rdata[1].next = NULL;
3470 recptr = XLogInsert(RM_HEAP_ID, XLOG_HEAP_LOCK, rdata);
3472 PageSetLSN(page, recptr);
3473 PageSetTLI(page, ThisTimeLineID);
3478 LockBuffer(*buffer, BUFFER_LOCK_UNLOCK);
3481 * Don't update the visibility map here. Locking a tuple doesn't
3482 * change visibility info.
3486 * Now that we have successfully marked the tuple as locked, we can
3487 * release the lmgr tuple lock, if we had it.
3489 if (have_tuple_lock)
3490 UnlockTuple(relation, tid, tuple_lock_type);
3492 return HeapTupleMayBeUpdated;
3497 * heap_inplace_update - update a tuple "in place" (ie, overwrite it)
3499 * Overwriting violates both MVCC and transactional safety, so the uses
3500 * of this function in Postgres are extremely limited. Nonetheless we
3501 * find some places to use it.
3503 * The tuple cannot change size, and therefore it's reasonable to assume
3504 * that its null bitmap (if any) doesn't change either. So we just
3505 * overwrite the data portion of the tuple without touching the null
3506 * bitmap or any of the header fields.
3508 * tuple is an in-memory tuple structure containing the data to be written
3509 * over the target tuple. Also, tuple->t_self identifies the target tuple.
3512 heap_inplace_update(Relation relation, HeapTuple tuple)
3516 OffsetNumber offnum;
3518 HeapTupleHeader htup;
3522 buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(&(tuple->t_self)));
3523 LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
3524 page = (Page) BufferGetPage(buffer);
3526 offnum = ItemPointerGetOffsetNumber(&(tuple->t_self));
3527 if (PageGetMaxOffsetNumber(page) >= offnum)
3528 lp = PageGetItemId(page, offnum);
3530 if (PageGetMaxOffsetNumber(page) < offnum || !ItemIdIsNormal(lp))
3531 elog(ERROR, "heap_inplace_update: invalid lp");
3533 htup = (HeapTupleHeader) PageGetItem(page, lp);
3535 oldlen = ItemIdGetLength(lp) - htup->t_hoff;
3536 newlen = tuple->t_len - tuple->t_data->t_hoff;
3537 if (oldlen != newlen || htup->t_hoff != tuple->t_data->t_hoff)
3538 elog(ERROR, "heap_inplace_update: wrong tuple length");
3540 /* NO EREPORT(ERROR) from here till changes are logged */
3541 START_CRIT_SECTION();
3543 memcpy((char *) htup + htup->t_hoff,
3544 (char *) tuple->t_data + tuple->t_data->t_hoff,
3547 MarkBufferDirty(buffer);
3550 if (!relation->rd_istemp)
3552 xl_heap_inplace xlrec;
3554 XLogRecData rdata[2];
3556 xlrec.target.node = relation->rd_node;
3557 xlrec.target.tid = tuple->t_self;
3559 rdata[0].data = (char *) &xlrec;
3560 rdata[0].len = SizeOfHeapInplace;
3561 rdata[0].buffer = InvalidBuffer;
3562 rdata[0].next = &(rdata[1]);
3564 rdata[1].data = (char *) htup + htup->t_hoff;
3565 rdata[1].len = newlen;
3566 rdata[1].buffer = buffer;
3567 rdata[1].buffer_std = true;
3568 rdata[1].next = NULL;
3570 recptr = XLogInsert(RM_HEAP_ID, XLOG_HEAP_INPLACE, rdata);
3572 PageSetLSN(page, recptr);
3573 PageSetTLI(page, ThisTimeLineID);
3578 UnlockReleaseBuffer(buffer);
3580 /* Send out shared cache inval if necessary */
3581 if (!IsBootstrapProcessingMode())
3582 CacheInvalidateHeapTuple(relation, tuple);
3589 * Check to see whether any of the XID fields of a tuple (xmin, xmax, xvac)
3590 * are older than the specified cutoff XID. If so, replace them with
3591 * FrozenTransactionId or InvalidTransactionId as appropriate, and return
3592 * TRUE. Return FALSE if nothing was changed.
3594 * It is assumed that the caller has checked the tuple with
3595 * HeapTupleSatisfiesVacuum() and determined that it is not HEAPTUPLE_DEAD
3596 * (else we should be removing the tuple, not freezing it).
3598 * NB: cutoff_xid *must* be <= the current global xmin, to ensure that any
3599 * XID older than it could neither be running nor seen as running by any
3600 * open transaction. This ensures that the replacement will not change
3601 * anyone's idea of the tuple state. Also, since we assume the tuple is
3602 * not HEAPTUPLE_DEAD, the fact that an XID is not still running allows us
3603 * to assume that it is either committed good or aborted, as appropriate;
3604 * so we need no external state checks to decide what to do. (This is good
3605 * because this function is applied during WAL recovery, when we don't have
3606 * access to any such state, and can't depend on the hint bits to be set.)
3608 * In lazy VACUUM, we call this while initially holding only a shared lock
3609 * on the tuple's buffer. If any change is needed, we trade that in for an
3610 * exclusive lock before making the change. Caller should pass the buffer ID
3611 * if shared lock is held, InvalidBuffer if exclusive lock is already held.
3613 * Note: it might seem we could make the changes without exclusive lock, since
3614 * TransactionId read/write is assumed atomic anyway. However there is a race
3615 * condition: someone who just fetched an old XID that we overwrite here could
3616 * conceivably not finish checking the XID against pg_clog before we finish
3617 * the VACUUM and perhaps truncate off the part of pg_clog he needs. Getting
3618 * exclusive lock ensures no other backend is in process of checking the
3619 * tuple status. Also, getting exclusive lock makes it safe to adjust the
3623 heap_freeze_tuple(HeapTupleHeader tuple, TransactionId cutoff_xid,
3626 bool changed = false;
3629 xid = HeapTupleHeaderGetXmin(tuple);
3630 if (TransactionIdIsNormal(xid) &&
3631 TransactionIdPrecedes(xid, cutoff_xid))
3633 if (buf != InvalidBuffer)
3635 /* trade in share lock for exclusive lock */
3636 LockBuffer(buf, BUFFER_LOCK_UNLOCK);
3637 LockBuffer(buf, BUFFER_LOCK_EXCLUSIVE);
3638 buf = InvalidBuffer;
3640 HeapTupleHeaderSetXmin(tuple, FrozenTransactionId);
3643 * Might as well fix the hint bits too; usually XMIN_COMMITTED will
3644 * already be set here, but there's a small chance not.
3646 Assert(!(tuple->t_infomask & HEAP_XMIN_INVALID));
3647 tuple->t_infomask |= HEAP_XMIN_COMMITTED;
3652 * When we release shared lock, it's possible for someone else to change
3653 * xmax before we get the lock back, so repeat the check after acquiring
3654 * exclusive lock. (We don't need this pushup for xmin, because only
3655 * VACUUM could be interested in changing an existing tuple's xmin, and
3656 * there's only one VACUUM allowed on a table at a time.)
3659 if (!(tuple->t_infomask & HEAP_XMAX_IS_MULTI))
3661 xid = HeapTupleHeaderGetXmax(tuple);
3662 if (TransactionIdIsNormal(xid) &&
3663 TransactionIdPrecedes(xid, cutoff_xid))
3665 if (buf != InvalidBuffer)
3667 /* trade in share lock for exclusive lock */
3668 LockBuffer(buf, BUFFER_LOCK_UNLOCK);
3669 LockBuffer(buf, BUFFER_LOCK_EXCLUSIVE);
3670 buf = InvalidBuffer;
3671 goto recheck_xmax; /* see comment above */
3673 HeapTupleHeaderSetXmax(tuple, InvalidTransactionId);
3676 * The tuple might be marked either XMAX_INVALID or XMAX_COMMITTED
3677 * + LOCKED. Normalize to INVALID just to be sure no one gets
3680 tuple->t_infomask &= ~HEAP_XMAX_COMMITTED;
3681 tuple->t_infomask |= HEAP_XMAX_INVALID;
3682 HeapTupleHeaderClearHotUpdated(tuple);
3689 * XXX perhaps someday we should zero out very old MultiXactIds here?
3691 * The only way a stale MultiXactId could pose a problem is if a
3692 * tuple, having once been multiply-share-locked, is not touched by
3693 * any vacuum or attempted lock or deletion for just over 4G MultiXact
3694 * creations, and then in the probably-narrow window where its xmax
3695 * is again a live MultiXactId, someone tries to lock or delete it.
3696 * Even then, another share-lock attempt would work fine. An
3697 * exclusive-lock or delete attempt would face unexpected delay, or
3698 * in the very worst case get a deadlock error. This seems an
3699 * extremely low-probability scenario with minimal downside even if
3700 * it does happen, so for now we don't do the extra bookkeeping that
3701 * would be needed to clean out MultiXactIds.
3707 * Although xvac per se could only be set by VACUUM, it shares physical
3708 * storage space with cmax, and so could be wiped out by someone setting
3709 * xmax. Hence recheck after changing lock, same as for xmax itself.
3712 if (tuple->t_infomask & HEAP_MOVED)
3714 xid = HeapTupleHeaderGetXvac(tuple);
3715 if (TransactionIdIsNormal(xid) &&
3716 TransactionIdPrecedes(xid, cutoff_xid))
3718 if (buf != InvalidBuffer)
3720 /* trade in share lock for exclusive lock */
3721 LockBuffer(buf, BUFFER_LOCK_UNLOCK);
3722 LockBuffer(buf, BUFFER_LOCK_EXCLUSIVE);
3723 buf = InvalidBuffer;
3724 goto recheck_xvac; /* see comment above */
3728 * If a MOVED_OFF tuple is not dead, the xvac transaction must
3729 * have failed; whereas a non-dead MOVED_IN tuple must mean the
3730 * xvac transaction succeeded.
3732 if (tuple->t_infomask & HEAP_MOVED_OFF)
3733 HeapTupleHeaderSetXvac(tuple, InvalidTransactionId);
3735 HeapTupleHeaderSetXvac(tuple, FrozenTransactionId);
3738 * Might as well fix the hint bits too; usually XMIN_COMMITTED
3739 * will already be set here, but there's a small chance not.
3741 Assert(!(tuple->t_infomask & HEAP_XMIN_INVALID));
3742 tuple->t_infomask |= HEAP_XMIN_COMMITTED;
3752 * heap_markpos - mark scan position
3756 heap_markpos(HeapScanDesc scan)
3758 /* Note: no locking manipulations needed */
3760 if (scan->rs_ctup.t_data != NULL)
3762 scan->rs_mctid = scan->rs_ctup.t_self;
3763 if (scan->rs_pageatatime)
3764 scan->rs_mindex = scan->rs_cindex;
3767 ItemPointerSetInvalid(&scan->rs_mctid);
3771 * heap_restrpos - restore position to marked location
3775 heap_restrpos(HeapScanDesc scan)
3777 /* XXX no amrestrpos checking that ammarkpos called */
3779 if (!ItemPointerIsValid(&scan->rs_mctid))
3781 scan->rs_ctup.t_data = NULL;
3784 * unpin scan buffers
3786 if (BufferIsValid(scan->rs_cbuf))
3787 ReleaseBuffer(scan->rs_cbuf);
3788 scan->rs_cbuf = InvalidBuffer;
3789 scan->rs_cblock = InvalidBlockNumber;
3790 scan->rs_inited = false;
3795 * If we reached end of scan, rs_inited will now be false. We must
3796 * reset it to true to keep heapgettup from doing the wrong thing.
3798 scan->rs_inited = true;
3799 scan->rs_ctup.t_self = scan->rs_mctid;
3800 if (scan->rs_pageatatime)
3802 scan->rs_cindex = scan->rs_mindex;
3803 heapgettup_pagemode(scan,
3804 NoMovementScanDirection,
3805 0, /* needn't recheck scan keys */
3810 NoMovementScanDirection,
3811 0, /* needn't recheck scan keys */
3817 * Perform XLogInsert for a heap-clean operation. Caller must already
3818 * have modified the buffer and marked it dirty.
3820 * Note: prior to Postgres 8.3, the entries in the nowunused[] array were
3821 * zero-based tuple indexes. Now they are one-based like other uses
3825 log_heap_clean(Relation reln, Buffer buffer,
3826 OffsetNumber *redirected, int nredirected,
3827 OffsetNumber *nowdead, int ndead,
3828 OffsetNumber *nowunused, int nunused,
3831 xl_heap_clean xlrec;
3834 XLogRecData rdata[4];
3836 /* Caller should not call me on a temp relation */
3837 Assert(!reln->rd_istemp);
3839 xlrec.node = reln->rd_node;
3840 xlrec.block = BufferGetBlockNumber(buffer);
3841 xlrec.nredirected = nredirected;
3842 xlrec.ndead = ndead;
3844 rdata[0].data = (char *) &xlrec;
3845 rdata[0].len = SizeOfHeapClean;
3846 rdata[0].buffer = InvalidBuffer;
3847 rdata[0].next = &(rdata[1]);
3850 * The OffsetNumber arrays are not actually in the buffer, but we pretend
3851 * that they are. When XLogInsert stores the whole buffer, the offset
3852 * arrays need not be stored too. Note that even if all three arrays are
3853 * empty, we want to expose the buffer as a candidate for whole-page
3854 * storage, since this record type implies a defragmentation operation
3855 * even if no item pointers changed state.
3857 if (nredirected > 0)
3859 rdata[1].data = (char *) redirected;
3860 rdata[1].len = nredirected * sizeof(OffsetNumber) * 2;
3864 rdata[1].data = NULL;
3867 rdata[1].buffer = buffer;
3868 rdata[1].buffer_std = true;
3869 rdata[1].next = &(rdata[2]);
3873 rdata[2].data = (char *) nowdead;
3874 rdata[2].len = ndead * sizeof(OffsetNumber);
3878 rdata[2].data = NULL;
3881 rdata[2].buffer = buffer;
3882 rdata[2].buffer_std = true;
3883 rdata[2].next = &(rdata[3]);
3887 rdata[3].data = (char *) nowunused;
3888 rdata[3].len = nunused * sizeof(OffsetNumber);
3892 rdata[3].data = NULL;
3895 rdata[3].buffer = buffer;
3896 rdata[3].buffer_std = true;
3897 rdata[3].next = NULL;
3899 info = redirect_move ? XLOG_HEAP2_CLEAN_MOVE : XLOG_HEAP2_CLEAN;
3900 recptr = XLogInsert(RM_HEAP2_ID, info, rdata);
3906 * Perform XLogInsert for a heap-freeze operation. Caller must already
3907 * have modified the buffer and marked it dirty.
3910 log_heap_freeze(Relation reln, Buffer buffer,
3911 TransactionId cutoff_xid,
3912 OffsetNumber *offsets, int offcnt)
3914 xl_heap_freeze xlrec;
3916 XLogRecData rdata[2];
3918 /* Caller should not call me on a temp relation */
3919 Assert(!reln->rd_istemp);
3920 /* nor when there are no tuples to freeze */
3923 xlrec.node = reln->rd_node;
3924 xlrec.block = BufferGetBlockNumber(buffer);
3925 xlrec.cutoff_xid = cutoff_xid;
3927 rdata[0].data = (char *) &xlrec;
3928 rdata[0].len = SizeOfHeapFreeze;
3929 rdata[0].buffer = InvalidBuffer;
3930 rdata[0].next = &(rdata[1]);
3933 * The tuple-offsets array is not actually in the buffer, but pretend that
3934 * it is. When XLogInsert stores the whole buffer, the offsets array need
3935 * not be stored too.
3937 rdata[1].data = (char *) offsets;
3938 rdata[1].len = offcnt * sizeof(OffsetNumber);
3939 rdata[1].buffer = buffer;
3940 rdata[1].buffer_std = true;
3941 rdata[1].next = NULL;
3943 recptr = XLogInsert(RM_HEAP2_ID, XLOG_HEAP2_FREEZE, rdata);
3949 * Perform XLogInsert for a heap-update operation. Caller must already
3950 * have modified the buffer(s) and marked them dirty.
3953 log_heap_update(Relation reln, Buffer oldbuf, ItemPointerData from,
3954 Buffer newbuf, HeapTuple newtup, bool move)
3957 * Note: xlhdr is declared to have adequate size and correct alignment for
3958 * an xl_heap_header. However the two tids, if present at all, will be
3959 * packed in with no wasted space after the xl_heap_header; they aren't
3960 * necessarily aligned as implied by this struct declaration.
3968 int hsize = SizeOfHeapHeader;
3969 xl_heap_update xlrec;
3972 XLogRecData rdata[4];
3973 Page page = BufferGetPage(newbuf);
3975 /* Caller should not call me on a temp relation */
3976 Assert(!reln->rd_istemp);
3980 Assert(!HeapTupleIsHeapOnly(newtup));
3981 info = XLOG_HEAP_MOVE;
3983 else if (HeapTupleIsHeapOnly(newtup))
3984 info = XLOG_HEAP_HOT_UPDATE;
3986 info = XLOG_HEAP_UPDATE;
3988 xlrec.target.node = reln->rd_node;
3989 xlrec.target.tid = from;
3990 xlrec.all_visible_cleared = PageIsAllVisible(BufferGetPage(oldbuf));
3991 xlrec.newtid = newtup->t_self;
3992 xlrec.new_all_visible_cleared = PageIsAllVisible(BufferGetPage(newbuf));
3994 rdata[0].data = (char *) &xlrec;
3995 rdata[0].len = SizeOfHeapUpdate;
3996 rdata[0].buffer = InvalidBuffer;
3997 rdata[0].next = &(rdata[1]);
3999 rdata[1].data = NULL;
4001 rdata[1].buffer = oldbuf;
4002 rdata[1].buffer_std = true;
4003 rdata[1].next = &(rdata[2]);
4005 xlhdr.hdr.t_infomask2 = newtup->t_data->t_infomask2;
4006 xlhdr.hdr.t_infomask = newtup->t_data->t_infomask;
4007 xlhdr.hdr.t_hoff = newtup->t_data->t_hoff;
4008 if (move) /* remember xmax & xmin */
4010 TransactionId xid[2]; /* xmax, xmin */
4012 if (newtup->t_data->t_infomask & (HEAP_XMAX_INVALID | HEAP_IS_LOCKED))
4013 xid[0] = InvalidTransactionId;
4015 xid[0] = HeapTupleHeaderGetXmax(newtup->t_data);
4016 xid[1] = HeapTupleHeaderGetXmin(newtup->t_data);
4017 memcpy((char *) &xlhdr + hsize,
4019 2 * sizeof(TransactionId));
4020 hsize += 2 * sizeof(TransactionId);
4024 * As with insert records, we need not store the rdata[2] segment if we
4025 * decide to store the whole buffer instead.
4027 rdata[2].data = (char *) &xlhdr;
4028 rdata[2].len = hsize;
4029 rdata[2].buffer = newbuf;
4030 rdata[2].buffer_std = true;
4031 rdata[2].next = &(rdata[3]);
4033 /* PG73FORMAT: write bitmap [+ padding] [+ oid] + data */
4034 rdata[3].data = (char *) newtup->t_data + offsetof(HeapTupleHeaderData, t_bits);
4035 rdata[3].len = newtup->t_len - offsetof(HeapTupleHeaderData, t_bits);
4036 rdata[3].buffer = newbuf;
4037 rdata[3].buffer_std = true;
4038 rdata[3].next = NULL;
4040 /* If new tuple is the single and first tuple on page... */
4041 if (ItemPointerGetOffsetNumber(&(newtup->t_self)) == FirstOffsetNumber &&
4042 PageGetMaxOffsetNumber(page) == FirstOffsetNumber)
4044 info |= XLOG_HEAP_INIT_PAGE;
4045 rdata[2].buffer = rdata[3].buffer = InvalidBuffer;
4048 recptr = XLogInsert(RM_HEAP_ID, info, rdata);
4054 * Perform XLogInsert for a heap-move operation. Caller must already
4055 * have modified the buffers and marked them dirty.
4058 log_heap_move(Relation reln, Buffer oldbuf, ItemPointerData from,
4059 Buffer newbuf, HeapTuple newtup)
4061 return log_heap_update(reln, oldbuf, from, newbuf, newtup, true);
4065 * Perform XLogInsert of a HEAP_NEWPAGE record to WAL. Caller is responsible
4066 * for writing the page to disk after calling this routine.
4068 * Note: all current callers build pages in private memory and write them
4069 * directly to smgr, rather than using bufmgr. Therefore there is no need
4070 * to pass a buffer ID to XLogInsert, nor to perform MarkBufferDirty within
4071 * the critical section.
4073 * Note: the NEWPAGE log record is used for both heaps and indexes, so do
4074 * not do anything that assumes we are touching a heap.
4077 log_newpage(RelFileNode *rnode, ForkNumber forkNum, BlockNumber blkno,
4080 xl_heap_newpage xlrec;
4082 XLogRecData rdata[2];
4084 /* NO ELOG(ERROR) from here till newpage op is logged */
4085 START_CRIT_SECTION();
4087 xlrec.node = *rnode;
4088 xlrec.forknum = forkNum;
4089 xlrec.blkno = blkno;
4091 rdata[0].data = (char *) &xlrec;
4092 rdata[0].len = SizeOfHeapNewpage;
4093 rdata[0].buffer = InvalidBuffer;
4094 rdata[0].next = &(rdata[1]);
4096 rdata[1].data = (char *) page;
4097 rdata[1].len = BLCKSZ;
4098 rdata[1].buffer = InvalidBuffer;
4099 rdata[1].next = NULL;
4101 recptr = XLogInsert(RM_HEAP_ID, XLOG_HEAP_NEWPAGE, rdata);
4103 PageSetLSN(page, recptr);
4104 PageSetTLI(page, ThisTimeLineID);
4112 * Handles CLEAN and CLEAN_MOVE record types
4115 heap_xlog_clean(XLogRecPtr lsn, XLogRecord *record, bool clean_move)
4117 xl_heap_clean *xlrec = (xl_heap_clean *) XLogRecGetData(record);
4121 OffsetNumber *redirected;
4122 OffsetNumber *nowdead;
4123 OffsetNumber *nowunused;
4129 if (record->xl_info & XLR_BKP_BLOCK_1)
4132 buffer = XLogReadBuffer(xlrec->node, xlrec->block, false);
4133 if (!BufferIsValid(buffer))
4135 page = (Page) BufferGetPage(buffer);
4137 if (XLByteLE(lsn, PageGetLSN(page)))
4139 UnlockReleaseBuffer(buffer);
4143 nredirected = xlrec->nredirected;
4144 ndead = xlrec->ndead;
4145 end = (OffsetNumber *) ((char *) xlrec + record->xl_len);
4146 redirected = (OffsetNumber *) ((char *) xlrec + SizeOfHeapClean);
4147 nowdead = redirected + (nredirected * 2);
4148 nowunused = nowdead + ndead;
4149 nunused = (end - nowunused);
4150 Assert(nunused >= 0);
4152 /* Update all item pointers per the record, and repair fragmentation */
4153 heap_page_prune_execute(buffer,
4154 redirected, nredirected,
4159 freespace = PageGetHeapFreeSpace(page); /* needed to update FSM below */
4162 * Note: we don't worry about updating the page's prunability hints.
4163 * At worst this will cause an extra prune cycle to occur soon.
4166 PageSetLSN(page, lsn);
4167 PageSetTLI(page, ThisTimeLineID);
4168 MarkBufferDirty(buffer);
4169 UnlockReleaseBuffer(buffer);
4172 * Update the FSM as well.
4174 * XXX: We don't get here if the page was restored from full page image.
4175 * We don't bother to update the FSM in that case, it doesn't need to be
4176 * totally accurate anyway.
4178 XLogRecordPageWithFreeSpace(xlrec->node, xlrec->block, freespace);
4182 heap_xlog_freeze(XLogRecPtr lsn, XLogRecord *record)
4184 xl_heap_freeze *xlrec = (xl_heap_freeze *) XLogRecGetData(record);
4185 TransactionId cutoff_xid = xlrec->cutoff_xid;
4189 if (record->xl_info & XLR_BKP_BLOCK_1)
4192 buffer = XLogReadBuffer(xlrec->node, xlrec->block, false);
4193 if (!BufferIsValid(buffer))
4195 page = (Page) BufferGetPage(buffer);
4197 if (XLByteLE(lsn, PageGetLSN(page)))
4199 UnlockReleaseBuffer(buffer);
4203 if (record->xl_len > SizeOfHeapFreeze)
4205 OffsetNumber *offsets;
4206 OffsetNumber *offsets_end;
4208 offsets = (OffsetNumber *) ((char *) xlrec + SizeOfHeapFreeze);
4209 offsets_end = (OffsetNumber *) ((char *) xlrec + record->xl_len);
4211 while (offsets < offsets_end)
4213 /* offsets[] entries are one-based */
4214 ItemId lp = PageGetItemId(page, *offsets);
4215 HeapTupleHeader tuple = (HeapTupleHeader) PageGetItem(page, lp);
4217 (void) heap_freeze_tuple(tuple, cutoff_xid, InvalidBuffer);
4222 PageSetLSN(page, lsn);
4223 PageSetTLI(page, ThisTimeLineID);
4224 MarkBufferDirty(buffer);
4225 UnlockReleaseBuffer(buffer);
4229 heap_xlog_newpage(XLogRecPtr lsn, XLogRecord *record)
4231 xl_heap_newpage *xlrec = (xl_heap_newpage *) XLogRecGetData(record);
4236 * Note: the NEWPAGE log record is used for both heaps and indexes, so do
4237 * not do anything that assumes we are touching a heap.
4239 buffer = XLogReadBuffer(xlrec->node, xlrec->blkno, true);
4240 Assert(BufferIsValid(buffer));
4241 page = (Page) BufferGetPage(buffer);
4243 Assert(record->xl_len == SizeOfHeapNewpage + BLCKSZ);
4244 memcpy(page, (char *) xlrec + SizeOfHeapNewpage, BLCKSZ);
4246 PageSetLSN(page, lsn);
4247 PageSetTLI(page, ThisTimeLineID);
4248 MarkBufferDirty(buffer);
4249 UnlockReleaseBuffer(buffer);
4253 heap_xlog_delete(XLogRecPtr lsn, XLogRecord *record)
4255 xl_heap_delete *xlrec = (xl_heap_delete *) XLogRecGetData(record);
4258 OffsetNumber offnum;
4260 HeapTupleHeader htup;
4263 blkno = ItemPointerGetBlockNumber(&(xlrec->target.tid));
4266 * The visibility map always needs to be updated, even if the heap page
4267 * is already up-to-date.
4269 if (xlrec->all_visible_cleared)
4271 Relation reln = CreateFakeRelcacheEntry(xlrec->target.node);
4272 visibilitymap_clear(reln, blkno);
4273 FreeFakeRelcacheEntry(reln);
4276 if (record->xl_info & XLR_BKP_BLOCK_1)
4279 buffer = XLogReadBuffer(xlrec->target.node, blkno, false);
4280 if (!BufferIsValid(buffer))
4282 page = (Page) BufferGetPage(buffer);
4284 if (XLByteLE(lsn, PageGetLSN(page))) /* changes are applied */
4286 UnlockReleaseBuffer(buffer);
4290 offnum = ItemPointerGetOffsetNumber(&(xlrec->target.tid));
4291 if (PageGetMaxOffsetNumber(page) >= offnum)
4292 lp = PageGetItemId(page, offnum);
4294 if (PageGetMaxOffsetNumber(page) < offnum || !ItemIdIsNormal(lp))
4295 elog(PANIC, "heap_delete_redo: invalid lp");
4297 htup = (HeapTupleHeader) PageGetItem(page, lp);
4299 htup->t_infomask &= ~(HEAP_XMAX_COMMITTED |
4301 HEAP_XMAX_IS_MULTI |
4304 HeapTupleHeaderClearHotUpdated(htup);
4305 HeapTupleHeaderSetXmax(htup, record->xl_xid);
4306 HeapTupleHeaderSetCmax(htup, FirstCommandId, false);
4308 /* Mark the page as a candidate for pruning */
4309 PageSetPrunable(page, record->xl_xid);
4311 if (xlrec->all_visible_cleared)
4312 PageClearAllVisible(page);
4314 /* Make sure there is no forward chain link in t_ctid */
4315 htup->t_ctid = xlrec->target.tid;
4316 PageSetLSN(page, lsn);
4317 PageSetTLI(page, ThisTimeLineID);
4318 MarkBufferDirty(buffer);
4319 UnlockReleaseBuffer(buffer);
4323 heap_xlog_insert(XLogRecPtr lsn, XLogRecord *record)
4325 xl_heap_insert *xlrec = (xl_heap_insert *) XLogRecGetData(record);
4328 OffsetNumber offnum;
4331 HeapTupleHeaderData hdr;
4332 char data[MaxHeapTupleSize];
4334 HeapTupleHeader htup;
4335 xl_heap_header xlhdr;
4340 blkno = ItemPointerGetBlockNumber(&(xlrec->target.tid));
4343 * The visibility map always needs to be updated, even if the heap page
4344 * is already up-to-date.
4346 if (xlrec->all_visible_cleared)
4348 Relation reln = CreateFakeRelcacheEntry(xlrec->target.node);
4349 visibilitymap_clear(reln, blkno);
4350 FreeFakeRelcacheEntry(reln);
4353 if (record->xl_info & XLR_BKP_BLOCK_1)
4356 if (record->xl_info & XLOG_HEAP_INIT_PAGE)
4358 buffer = XLogReadBuffer(xlrec->target.node, blkno, true);
4359 Assert(BufferIsValid(buffer));
4360 page = (Page) BufferGetPage(buffer);
4362 PageInit(page, BufferGetPageSize(buffer), 0);
4366 buffer = XLogReadBuffer(xlrec->target.node, blkno, false);
4367 if (!BufferIsValid(buffer))
4369 page = (Page) BufferGetPage(buffer);
4371 if (XLByteLE(lsn, PageGetLSN(page))) /* changes are applied */
4373 UnlockReleaseBuffer(buffer);
4378 offnum = ItemPointerGetOffsetNumber(&(xlrec->target.tid));
4379 if (PageGetMaxOffsetNumber(page) + 1 < offnum)
4380 elog(PANIC, "heap_insert_redo: invalid max offset number");
4382 newlen = record->xl_len - SizeOfHeapInsert - SizeOfHeapHeader;
4383 Assert(newlen <= MaxHeapTupleSize);
4384 memcpy((char *) &xlhdr,
4385 (char *) xlrec + SizeOfHeapInsert,
4388 MemSet((char *) htup, 0, sizeof(HeapTupleHeaderData));
4389 /* PG73FORMAT: get bitmap [+ padding] [+ oid] + data */
4390 memcpy((char *) htup + offsetof(HeapTupleHeaderData, t_bits),
4391 (char *) xlrec + SizeOfHeapInsert + SizeOfHeapHeader,
4393 newlen += offsetof(HeapTupleHeaderData, t_bits);
4394 htup->t_infomask2 = xlhdr.t_infomask2;
4395 htup->t_infomask = xlhdr.t_infomask;
4396 htup->t_hoff = xlhdr.t_hoff;
4397 HeapTupleHeaderSetXmin(htup, record->xl_xid);
4398 HeapTupleHeaderSetCmin(htup, FirstCommandId);
4399 htup->t_ctid = xlrec->target.tid;
4401 offnum = PageAddItem(page, (Item) htup, newlen, offnum, true, true);
4402 if (offnum == InvalidOffsetNumber)
4403 elog(PANIC, "heap_insert_redo: failed to add tuple");
4405 freespace = PageGetHeapFreeSpace(page); /* needed to update FSM below */
4407 PageSetLSN(page, lsn);
4408 PageSetTLI(page, ThisTimeLineID);
4410 if (xlrec->all_visible_cleared)
4411 PageClearAllVisible(page);
4413 MarkBufferDirty(buffer);
4414 UnlockReleaseBuffer(buffer);
4417 * If the page is running low on free space, update the FSM as well.
4418 * Arbitrarily, our definition of "low" is less than 20%. We can't do
4419 * much better than that without knowing the fill-factor for the table.
4421 * XXX: We don't get here if the page was restored from full page image.
4422 * We don't bother to update the FSM in that case, it doesn't need to be
4423 * totally accurate anyway.
4425 if (freespace < BLCKSZ / 5)
4426 XLogRecordPageWithFreeSpace(xlrec->target.node, blkno, freespace);
4430 * Handles UPDATE, HOT_UPDATE & MOVE
4433 heap_xlog_update(XLogRecPtr lsn, XLogRecord *record, bool move, bool hot_update)
4435 xl_heap_update *xlrec = (xl_heap_update *) XLogRecGetData(record);
4437 bool samepage = (ItemPointerGetBlockNumber(&(xlrec->newtid)) ==
4438 ItemPointerGetBlockNumber(&(xlrec->target.tid)));
4440 OffsetNumber offnum;
4442 HeapTupleHeader htup;
4445 HeapTupleHeaderData hdr;
4446 char data[MaxHeapTupleSize];
4448 xl_heap_header xlhdr;
4454 * The visibility map always needs to be updated, even if the heap page
4455 * is already up-to-date.
4457 if (xlrec->all_visible_cleared)
4459 Relation reln = CreateFakeRelcacheEntry(xlrec->target.node);
4460 visibilitymap_clear(reln,
4461 ItemPointerGetBlockNumber(&xlrec->target.tid));
4462 FreeFakeRelcacheEntry(reln);
4465 if (record->xl_info & XLR_BKP_BLOCK_1)
4468 return; /* backup block covered both changes */
4472 /* Deal with old tuple version */
4474 buffer = XLogReadBuffer(xlrec->target.node,
4475 ItemPointerGetBlockNumber(&(xlrec->target.tid)),
4477 if (!BufferIsValid(buffer))
4479 page = (Page) BufferGetPage(buffer);
4481 if (XLByteLE(lsn, PageGetLSN(page))) /* changes are applied */
4483 UnlockReleaseBuffer(buffer);
4489 offnum = ItemPointerGetOffsetNumber(&(xlrec->target.tid));
4490 if (PageGetMaxOffsetNumber(page) >= offnum)
4491 lp = PageGetItemId(page, offnum);
4493 if (PageGetMaxOffsetNumber(page) < offnum || !ItemIdIsNormal(lp))
4494 elog(PANIC, "heap_update_redo: invalid lp");
4496 htup = (HeapTupleHeader) PageGetItem(page, lp);
4500 htup->t_infomask &= ~(HEAP_XMIN_COMMITTED |
4503 htup->t_infomask |= HEAP_MOVED_OFF;
4504 HeapTupleHeaderClearHotUpdated(htup);
4505 HeapTupleHeaderSetXvac(htup, record->xl_xid);
4506 /* Make sure there is no forward chain link in t_ctid */
4507 htup->t_ctid = xlrec->target.tid;
4511 htup->t_infomask &= ~(HEAP_XMAX_COMMITTED |
4513 HEAP_XMAX_IS_MULTI |
4517 HeapTupleHeaderSetHotUpdated(htup);
4519 HeapTupleHeaderClearHotUpdated(htup);
4520 HeapTupleHeaderSetXmax(htup, record->xl_xid);
4521 HeapTupleHeaderSetCmax(htup, FirstCommandId, false);
4522 /* Set forward chain link in t_ctid */
4523 htup->t_ctid = xlrec->newtid;
4526 /* Mark the page as a candidate for pruning */
4527 PageSetPrunable(page, record->xl_xid);
4529 if (xlrec->all_visible_cleared)
4530 PageClearAllVisible(page);
4533 * this test is ugly, but necessary to avoid thinking that insert change
4534 * is already applied
4538 PageSetLSN(page, lsn);
4539 PageSetTLI(page, ThisTimeLineID);
4540 MarkBufferDirty(buffer);
4541 UnlockReleaseBuffer(buffer);
4543 /* Deal with new tuple */
4548 * The visibility map always needs to be updated, even if the heap page
4549 * is already up-to-date.
4551 if (xlrec->new_all_visible_cleared)
4553 Relation reln = CreateFakeRelcacheEntry(xlrec->target.node);
4554 visibilitymap_clear(reln, ItemPointerGetBlockNumber(&xlrec->newtid));
4555 FreeFakeRelcacheEntry(reln);
4558 if (record->xl_info & XLR_BKP_BLOCK_2)
4561 if (record->xl_info & XLOG_HEAP_INIT_PAGE)
4563 buffer = XLogReadBuffer(xlrec->target.node,
4564 ItemPointerGetBlockNumber(&(xlrec->newtid)),
4566 Assert(BufferIsValid(buffer));
4567 page = (Page) BufferGetPage(buffer);
4569 PageInit(page, BufferGetPageSize(buffer), 0);
4573 buffer = XLogReadBuffer(xlrec->target.node,
4574 ItemPointerGetBlockNumber(&(xlrec->newtid)),
4576 if (!BufferIsValid(buffer))
4578 page = (Page) BufferGetPage(buffer);
4580 if (XLByteLE(lsn, PageGetLSN(page))) /* changes are applied */
4582 UnlockReleaseBuffer(buffer);
4589 offnum = ItemPointerGetOffsetNumber(&(xlrec->newtid));
4590 if (PageGetMaxOffsetNumber(page) + 1 < offnum)
4591 elog(PANIC, "heap_update_redo: invalid max offset number");
4593 hsize = SizeOfHeapUpdate + SizeOfHeapHeader;
4595 hsize += (2 * sizeof(TransactionId));
4597 newlen = record->xl_len - hsize;
4598 Assert(newlen <= MaxHeapTupleSize);
4599 memcpy((char *) &xlhdr,
4600 (char *) xlrec + SizeOfHeapUpdate,
4603 MemSet((char *) htup, 0, sizeof(HeapTupleHeaderData));
4604 /* PG73FORMAT: get bitmap [+ padding] [+ oid] + data */
4605 memcpy((char *) htup + offsetof(HeapTupleHeaderData, t_bits),
4606 (char *) xlrec + hsize,
4608 newlen += offsetof(HeapTupleHeaderData, t_bits);
4609 htup->t_infomask2 = xlhdr.t_infomask2;
4610 htup->t_infomask = xlhdr.t_infomask;
4611 htup->t_hoff = xlhdr.t_hoff;
4615 TransactionId xid[2]; /* xmax, xmin */
4617 memcpy((char *) xid,
4618 (char *) xlrec + SizeOfHeapUpdate + SizeOfHeapHeader,
4619 2 * sizeof(TransactionId));
4620 HeapTupleHeaderSetXmin(htup, xid[1]);
4621 HeapTupleHeaderSetXmax(htup, xid[0]);
4622 HeapTupleHeaderSetXvac(htup, record->xl_xid);
4626 HeapTupleHeaderSetXmin(htup, record->xl_xid);
4627 HeapTupleHeaderSetCmin(htup, FirstCommandId);
4629 /* Make sure there is no forward chain link in t_ctid */
4630 htup->t_ctid = xlrec->newtid;
4632 offnum = PageAddItem(page, (Item) htup, newlen, offnum, true, true);
4633 if (offnum == InvalidOffsetNumber)
4634 elog(PANIC, "heap_update_redo: failed to add tuple");
4636 if (xlrec->new_all_visible_cleared)
4637 PageClearAllVisible(page);
4639 freespace = PageGetHeapFreeSpace(page); /* needed to update FSM below */
4641 PageSetLSN(page, lsn);
4642 PageSetTLI(page, ThisTimeLineID);
4643 MarkBufferDirty(buffer);
4644 UnlockReleaseBuffer(buffer);
4647 * If the page is running low on free space, update the FSM as well.
4648 * Arbitrarily, our definition of "low" is less than 20%. We can't do
4649 * much better than that without knowing the fill-factor for the table.
4651 * However, don't update the FSM on HOT updates, because after crash
4652 * recovery, either the old or the new tuple will certainly be dead and
4653 * prunable. After pruning, the page will have roughly as much free space
4654 * as it did before the update, assuming the new tuple is about the same
4655 * size as the old one.
4657 * XXX: We don't get here if the page was restored from full page image.
4658 * We don't bother to update the FSM in that case, it doesn't need to be
4659 * totally accurate anyway.
4661 if (!hot_update && freespace < BLCKSZ / 5)
4662 XLogRecordPageWithFreeSpace(xlrec->target.node,
4663 ItemPointerGetBlockNumber(&(xlrec->newtid)), freespace);
4667 heap_xlog_lock(XLogRecPtr lsn, XLogRecord *record)
4669 xl_heap_lock *xlrec = (xl_heap_lock *) XLogRecGetData(record);
4672 OffsetNumber offnum;
4674 HeapTupleHeader htup;
4676 if (record->xl_info & XLR_BKP_BLOCK_1)
4679 buffer = XLogReadBuffer(xlrec->target.node,
4680 ItemPointerGetBlockNumber(&(xlrec->target.tid)),
4682 if (!BufferIsValid(buffer))
4684 page = (Page) BufferGetPage(buffer);
4686 if (XLByteLE(lsn, PageGetLSN(page))) /* changes are applied */
4688 UnlockReleaseBuffer(buffer);
4692 offnum = ItemPointerGetOffsetNumber(&(xlrec->target.tid));
4693 if (PageGetMaxOffsetNumber(page) >= offnum)
4694 lp = PageGetItemId(page, offnum);
4696 if (PageGetMaxOffsetNumber(page) < offnum || !ItemIdIsNormal(lp))
4697 elog(PANIC, "heap_lock_redo: invalid lp");
4699 htup = (HeapTupleHeader) PageGetItem(page, lp);
4701 htup->t_infomask &= ~(HEAP_XMAX_COMMITTED |
4703 HEAP_XMAX_IS_MULTI |
4706 if (xlrec->xid_is_mxact)
4707 htup->t_infomask |= HEAP_XMAX_IS_MULTI;
4708 if (xlrec->shared_lock)
4709 htup->t_infomask |= HEAP_XMAX_SHARED_LOCK;
4711 htup->t_infomask |= HEAP_XMAX_EXCL_LOCK;
4712 HeapTupleHeaderClearHotUpdated(htup);
4713 HeapTupleHeaderSetXmax(htup, xlrec->locking_xid);
4714 HeapTupleHeaderSetCmax(htup, FirstCommandId, false);
4715 /* Make sure there is no forward chain link in t_ctid */
4716 htup->t_ctid = xlrec->target.tid;
4717 PageSetLSN(page, lsn);
4718 PageSetTLI(page, ThisTimeLineID);
4719 MarkBufferDirty(buffer);
4720 UnlockReleaseBuffer(buffer);
4724 heap_xlog_inplace(XLogRecPtr lsn, XLogRecord *record)
4726 xl_heap_inplace *xlrec = (xl_heap_inplace *) XLogRecGetData(record);
4729 OffsetNumber offnum;
4731 HeapTupleHeader htup;
4735 if (record->xl_info & XLR_BKP_BLOCK_1)
4738 buffer = XLogReadBuffer(xlrec->target.node,
4739 ItemPointerGetBlockNumber(&(xlrec->target.tid)),
4741 if (!BufferIsValid(buffer))
4743 page = (Page) BufferGetPage(buffer);
4745 if (XLByteLE(lsn, PageGetLSN(page))) /* changes are applied */
4747 UnlockReleaseBuffer(buffer);
4751 offnum = ItemPointerGetOffsetNumber(&(xlrec->target.tid));
4752 if (PageGetMaxOffsetNumber(page) >= offnum)
4753 lp = PageGetItemId(page, offnum);
4755 if (PageGetMaxOffsetNumber(page) < offnum || !ItemIdIsNormal(lp))
4756 elog(PANIC, "heap_inplace_redo: invalid lp");
4758 htup = (HeapTupleHeader) PageGetItem(page, lp);
4760 oldlen = ItemIdGetLength(lp) - htup->t_hoff;
4761 newlen = record->xl_len - SizeOfHeapInplace;
4762 if (oldlen != newlen)
4763 elog(PANIC, "heap_inplace_redo: wrong tuple length");
4765 memcpy((char *) htup + htup->t_hoff,
4766 (char *) xlrec + SizeOfHeapInplace,
4769 PageSetLSN(page, lsn);
4770 PageSetTLI(page, ThisTimeLineID);
4771 MarkBufferDirty(buffer);
4772 UnlockReleaseBuffer(buffer);
4776 heap_redo(XLogRecPtr lsn, XLogRecord *record)
4778 uint8 info = record->xl_info & ~XLR_INFO_MASK;
4780 switch (info & XLOG_HEAP_OPMASK)
4782 case XLOG_HEAP_INSERT:
4783 heap_xlog_insert(lsn, record);
4785 case XLOG_HEAP_DELETE:
4786 heap_xlog_delete(lsn, record);
4788 case XLOG_HEAP_UPDATE:
4789 heap_xlog_update(lsn, record, false, false);
4791 case XLOG_HEAP_MOVE:
4792 heap_xlog_update(lsn, record, true, false);
4794 case XLOG_HEAP_HOT_UPDATE:
4795 heap_xlog_update(lsn, record, false, true);
4797 case XLOG_HEAP_NEWPAGE:
4798 heap_xlog_newpage(lsn, record);
4800 case XLOG_HEAP_LOCK:
4801 heap_xlog_lock(lsn, record);
4803 case XLOG_HEAP_INPLACE:
4804 heap_xlog_inplace(lsn, record);
4807 elog(PANIC, "heap_redo: unknown op code %u", info);
4812 heap2_redo(XLogRecPtr lsn, XLogRecord *record)
4814 uint8 info = record->xl_info & ~XLR_INFO_MASK;
4816 switch (info & XLOG_HEAP_OPMASK)
4818 case XLOG_HEAP2_FREEZE:
4819 heap_xlog_freeze(lsn, record);
4821 case XLOG_HEAP2_CLEAN:
4822 heap_xlog_clean(lsn, record, false);
4824 case XLOG_HEAP2_CLEAN_MOVE:
4825 heap_xlog_clean(lsn, record, true);
4828 elog(PANIC, "heap2_redo: unknown op code %u", info);
4833 out_target(StringInfo buf, xl_heaptid *target)
4835 appendStringInfo(buf, "rel %u/%u/%u; tid %u/%u",
4836 target->node.spcNode, target->node.dbNode, target->node.relNode,
4837 ItemPointerGetBlockNumber(&(target->tid)),
4838 ItemPointerGetOffsetNumber(&(target->tid)));
4842 heap_desc(StringInfo buf, uint8 xl_info, char *rec)
4844 uint8 info = xl_info & ~XLR_INFO_MASK;
4846 info &= XLOG_HEAP_OPMASK;
4847 if (info == XLOG_HEAP_INSERT)
4849 xl_heap_insert *xlrec = (xl_heap_insert *) rec;
4851 if (xl_info & XLOG_HEAP_INIT_PAGE)
4852 appendStringInfo(buf, "insert(init): ");
4854 appendStringInfo(buf, "insert: ");
4855 out_target(buf, &(xlrec->target));
4857 else if (info == XLOG_HEAP_DELETE)
4859 xl_heap_delete *xlrec = (xl_heap_delete *) rec;
4861 appendStringInfo(buf, "delete: ");
4862 out_target(buf, &(xlrec->target));
4864 else if (info == XLOG_HEAP_UPDATE)
4866 xl_heap_update *xlrec = (xl_heap_update *) rec;
4868 if (xl_info & XLOG_HEAP_INIT_PAGE)
4869 appendStringInfo(buf, "update(init): ");
4871 appendStringInfo(buf, "update: ");
4872 out_target(buf, &(xlrec->target));
4873 appendStringInfo(buf, "; new %u/%u",
4874 ItemPointerGetBlockNumber(&(xlrec->newtid)),
4875 ItemPointerGetOffsetNumber(&(xlrec->newtid)));
4877 else if (info == XLOG_HEAP_MOVE)
4879 xl_heap_update *xlrec = (xl_heap_update *) rec;
4881 if (xl_info & XLOG_HEAP_INIT_PAGE)
4882 appendStringInfo(buf, "move(init): ");
4884 appendStringInfo(buf, "move: ");
4885 out_target(buf, &(xlrec->target));
4886 appendStringInfo(buf, "; new %u/%u",
4887 ItemPointerGetBlockNumber(&(xlrec->newtid)),
4888 ItemPointerGetOffsetNumber(&(xlrec->newtid)));
4890 else if (info == XLOG_HEAP_HOT_UPDATE)
4892 xl_heap_update *xlrec = (xl_heap_update *) rec;
4894 if (xl_info & XLOG_HEAP_INIT_PAGE) /* can this case happen? */
4895 appendStringInfo(buf, "hot_update(init): ");
4897 appendStringInfo(buf, "hot_update: ");
4898 out_target(buf, &(xlrec->target));
4899 appendStringInfo(buf, "; new %u/%u",
4900 ItemPointerGetBlockNumber(&(xlrec->newtid)),
4901 ItemPointerGetOffsetNumber(&(xlrec->newtid)));
4903 else if (info == XLOG_HEAP_NEWPAGE)
4905 xl_heap_newpage *xlrec = (xl_heap_newpage *) rec;
4907 appendStringInfo(buf, "newpage: rel %u/%u/%u; blk %u",
4908 xlrec->node.spcNode, xlrec->node.dbNode,
4909 xlrec->node.relNode, xlrec->blkno);
4911 else if (info == XLOG_HEAP_LOCK)
4913 xl_heap_lock *xlrec = (xl_heap_lock *) rec;
4915 if (xlrec->shared_lock)
4916 appendStringInfo(buf, "shared_lock: ");
4918 appendStringInfo(buf, "exclusive_lock: ");
4919 if (xlrec->xid_is_mxact)
4920 appendStringInfo(buf, "mxid ");
4922 appendStringInfo(buf, "xid ");
4923 appendStringInfo(buf, "%u ", xlrec->locking_xid);
4924 out_target(buf, &(xlrec->target));
4926 else if (info == XLOG_HEAP_INPLACE)
4928 xl_heap_inplace *xlrec = (xl_heap_inplace *) rec;
4930 appendStringInfo(buf, "inplace: ");
4931 out_target(buf, &(xlrec->target));
4934 appendStringInfo(buf, "UNKNOWN");
4938 heap2_desc(StringInfo buf, uint8 xl_info, char *rec)
4940 uint8 info = xl_info & ~XLR_INFO_MASK;
4942 info &= XLOG_HEAP_OPMASK;
4943 if (info == XLOG_HEAP2_FREEZE)
4945 xl_heap_freeze *xlrec = (xl_heap_freeze *) rec;
4947 appendStringInfo(buf, "freeze: rel %u/%u/%u; blk %u; cutoff %u",
4948 xlrec->node.spcNode, xlrec->node.dbNode,
4949 xlrec->node.relNode, xlrec->block,
4952 else if (info == XLOG_HEAP2_CLEAN)
4954 xl_heap_clean *xlrec = (xl_heap_clean *) rec;
4956 appendStringInfo(buf, "clean: rel %u/%u/%u; blk %u",
4957 xlrec->node.spcNode, xlrec->node.dbNode,
4958 xlrec->node.relNode, xlrec->block);
4960 else if (info == XLOG_HEAP2_CLEAN_MOVE)
4962 xl_heap_clean *xlrec = (xl_heap_clean *) rec;
4964 appendStringInfo(buf, "clean_move: rel %u/%u/%u; blk %u",
4965 xlrec->node.spcNode, xlrec->node.dbNode,
4966 xlrec->node.relNode, xlrec->block);
4969 appendStringInfo(buf, "UNKNOWN");
4973 * heap_sync - sync a heap, for use when no WAL has been written
4975 * This forces the heap contents (including TOAST heap if any) down to disk.
4976 * If we skipped using WAL, and it's not a temp relation, we must force the
4977 * relation down to disk before it's safe to commit the transaction. This
4978 * requires writing out any dirty buffers and then doing a forced fsync.
4980 * Indexes are not touched. (Currently, index operations associated with
4981 * the commands that use this are WAL-logged and so do not need fsync.
4982 * That behavior might change someday, but in any case it's likely that
4983 * any fsync decisions required would be per-index and hence not appropriate
4987 heap_sync(Relation rel)
4989 /* temp tables never need fsync */
4994 FlushRelationBuffers(rel);
4995 /* FlushRelationBuffers will have opened rd_smgr */
4996 smgrimmedsync(rel->rd_smgr, MAIN_FORKNUM);
4998 /* FSM is not critical, don't bother syncing it */
5000 /* toast heap, if any */
5001 if (OidIsValid(rel->rd_rel->reltoastrelid))
5005 toastrel = heap_open(rel->rd_rel->reltoastrelid, AccessShareLock);
5006 FlushRelationBuffers(toastrel);
5007 smgrimmedsync(toastrel->rd_smgr, MAIN_FORKNUM);
5008 heap_close(toastrel, AccessShareLock);