1 /*-------------------------------------------------------------------------
4 * heap access method code
6 * Portions Copyright (c) 1996-2009, PostgreSQL Global Development Group
7 * Portions Copyright (c) 1994, Regents of the University of California
11 * $PostgreSQL: pgsql/src/backend/access/heap/heapam.c,v 1.275 2009/05/12 16:43:32 tgl Exp $
15 * relation_open - open any relation by relation OID
16 * relation_openrv - open any relation specified by a RangeVar
17 * relation_close - close any relation
18 * heap_open - open a heap relation by relation OID
19 * heap_openrv - open a heap relation specified by a RangeVar
20 * heap_close - (now just a macro for relation_close)
21 * heap_beginscan - begin relation scan
22 * heap_rescan - restart a relation scan
23 * heap_endscan - end relation scan
24 * heap_getnext - retrieve next tuple in scan
25 * heap_fetch - retrieve tuple with given tid
26 * heap_insert - insert tuple into a relation
27 * heap_delete - delete a tuple from a relation
28 * heap_update - replace a tuple in a relation with another tuple
29 * heap_markpos - mark scan position
30 * heap_restrpos - restore position to marked location
31 * heap_sync - sync heap, for when no WAL has been written
34 * This file contains the heap_ routines which implement
35 * the POSTGRES heap access method used for all POSTGRES
38 *-------------------------------------------------------------------------
42 #include "access/heapam.h"
43 #include "access/hio.h"
44 #include "access/multixact.h"
45 #include "access/relscan.h"
46 #include "access/sysattr.h"
47 #include "access/transam.h"
48 #include "access/tuptoaster.h"
49 #include "access/valid.h"
50 #include "access/visibilitymap.h"
51 #include "access/xact.h"
52 #include "access/xlogutils.h"
53 #include "catalog/catalog.h"
54 #include "catalog/namespace.h"
55 #include "miscadmin.h"
57 #include "storage/bufmgr.h"
58 #include "storage/freespace.h"
59 #include "storage/lmgr.h"
60 #include "storage/procarray.h"
61 #include "storage/smgr.h"
62 #include "utils/datum.h"
63 #include "utils/inval.h"
64 #include "utils/lsyscache.h"
65 #include "utils/relcache.h"
66 #include "utils/snapmgr.h"
67 #include "utils/syscache.h"
68 #include "utils/tqual.h"
72 bool synchronize_seqscans = true;
75 static HeapScanDesc heap_beginscan_internal(Relation relation,
77 int nkeys, ScanKey key,
78 bool allow_strat, bool allow_sync,
80 static XLogRecPtr log_heap_update(Relation reln, Buffer oldbuf,
81 ItemPointerData from, Buffer newbuf, HeapTuple newtup, bool move);
82 static bool HeapSatisfiesHOTUpdate(Relation relation, Bitmapset *hot_attrs,
83 HeapTuple oldtup, HeapTuple newtup);
86 /* ----------------------------------------------------------------
87 * heap support routines
88 * ----------------------------------------------------------------
92 * initscan - scan code common to heap_beginscan and heap_rescan
96 initscan(HeapScanDesc scan, ScanKey key)
102 * Determine the number of blocks we have to scan.
104 * It is sufficient to do this once at scan start, since any tuples added
105 * while the scan is in progress will be invisible to my snapshot anyway.
106 * (That is not true when using a non-MVCC snapshot. However, we couldn't
107 * guarantee to return tuples added after scan start anyway, since they
108 * might go into pages we already scanned. To guarantee consistent
109 * results for a non-MVCC snapshot, the caller must hold some higher-level
110 * lock that ensures the interesting tuple(s) won't change.)
112 scan->rs_nblocks = RelationGetNumberOfBlocks(scan->rs_rd);
115 * If the table is large relative to NBuffers, use a bulk-read access
116 * strategy and enable synchronized scanning (see syncscan.c). Although
117 * the thresholds for these features could be different, we make them the
118 * same so that there are only two behaviors to tune rather than four.
119 * (However, some callers need to be able to disable one or both of
120 * these behaviors, independently of the size of the table; also there
121 * is a GUC variable that can disable synchronized scanning.)
123 * During a rescan, don't make a new strategy object if we don't have to.
125 if (!scan->rs_rd->rd_istemp &&
126 scan->rs_nblocks > NBuffers / 4)
128 allow_strat = scan->rs_allow_strat;
129 allow_sync = scan->rs_allow_sync;
132 allow_strat = allow_sync = false;
136 if (scan->rs_strategy == NULL)
137 scan->rs_strategy = GetAccessStrategy(BAS_BULKREAD);
141 if (scan->rs_strategy != NULL)
142 FreeAccessStrategy(scan->rs_strategy);
143 scan->rs_strategy = NULL;
146 if (allow_sync && synchronize_seqscans)
148 scan->rs_syncscan = true;
149 scan->rs_startblock = ss_get_location(scan->rs_rd, scan->rs_nblocks);
153 scan->rs_syncscan = false;
154 scan->rs_startblock = 0;
157 scan->rs_inited = false;
158 scan->rs_ctup.t_data = NULL;
159 ItemPointerSetInvalid(&scan->rs_ctup.t_self);
160 scan->rs_cbuf = InvalidBuffer;
161 scan->rs_cblock = InvalidBlockNumber;
163 /* we don't have a marked position... */
164 ItemPointerSetInvalid(&(scan->rs_mctid));
166 /* page-at-a-time fields are always invalid when not rs_inited */
169 * copy the scan key, if appropriate
172 memcpy(scan->rs_key, key, scan->rs_nkeys * sizeof(ScanKeyData));
175 * Currently, we don't have a stats counter for bitmap heap scans (but the
176 * underlying bitmap index scans will be counted).
178 if (!scan->rs_bitmapscan)
179 pgstat_count_heap_scan(scan->rs_rd);
183 * heapgetpage - subroutine for heapgettup()
185 * This routine reads and pins the specified page of the relation.
186 * In page-at-a-time mode it performs additional work, namely determining
187 * which tuples on the page are visible.
190 heapgetpage(HeapScanDesc scan, BlockNumber page)
197 OffsetNumber lineoff;
201 Assert(page < scan->rs_nblocks);
203 /* release previous scan buffer, if any */
204 if (BufferIsValid(scan->rs_cbuf))
206 ReleaseBuffer(scan->rs_cbuf);
207 scan->rs_cbuf = InvalidBuffer;
210 /* read page using selected strategy */
211 scan->rs_cbuf = ReadBufferExtended(scan->rs_rd, MAIN_FORKNUM, page,
212 RBM_NORMAL, scan->rs_strategy);
213 scan->rs_cblock = page;
215 if (!scan->rs_pageatatime)
218 buffer = scan->rs_cbuf;
219 snapshot = scan->rs_snapshot;
222 * Prune and repair fragmentation for the whole page, if possible.
224 Assert(TransactionIdIsValid(RecentGlobalXmin));
225 heap_page_prune_opt(scan->rs_rd, buffer, RecentGlobalXmin);
228 * We must hold share lock on the buffer content while examining tuple
229 * visibility. Afterwards, however, the tuples we have found to be
230 * visible are guaranteed good as long as we hold the buffer pin.
232 LockBuffer(buffer, BUFFER_LOCK_SHARE);
234 dp = (Page) BufferGetPage(buffer);
235 lines = PageGetMaxOffsetNumber(dp);
239 * If the all-visible flag indicates that all tuples on the page are
240 * visible to everyone, we can skip the per-tuple visibility tests.
242 all_visible = PageIsAllVisible(dp);
244 for (lineoff = FirstOffsetNumber, lpp = PageGetItemId(dp, lineoff);
248 if (ItemIdIsNormal(lpp))
256 HeapTupleData loctup;
258 loctup.t_data = (HeapTupleHeader) PageGetItem((Page) dp, lpp);
259 loctup.t_len = ItemIdGetLength(lpp);
260 ItemPointerSet(&(loctup.t_self), page, lineoff);
262 valid = HeapTupleSatisfiesVisibility(&loctup, snapshot, buffer);
265 scan->rs_vistuples[ntup++] = lineoff;
269 LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
271 Assert(ntup <= MaxHeapTuplesPerPage);
272 scan->rs_ntuples = ntup;
276 * heapgettup - fetch next heap tuple
278 * Initialize the scan if not already done; then advance to the next
279 * tuple as indicated by "dir"; return the next tuple in scan->rs_ctup,
280 * or set scan->rs_ctup.t_data = NULL if no more tuples.
282 * dir == NoMovementScanDirection means "re-fetch the tuple indicated
285 * Note: the reason nkeys/key are passed separately, even though they are
286 * kept in the scan descriptor, is that the caller may not want us to check
289 * Note: when we fall off the end of the scan in either direction, we
290 * reset rs_inited. This means that a further request with the same
291 * scan direction will restart the scan, which is a bit odd, but a
292 * request with the opposite scan direction will start a fresh scan
293 * in the proper direction. The latter is required behavior for cursors,
294 * while the former case is generally undefined behavior in Postgres
295 * so we don't care too much.
299 heapgettup(HeapScanDesc scan,
304 HeapTuple tuple = &(scan->rs_ctup);
305 Snapshot snapshot = scan->rs_snapshot;
306 bool backward = ScanDirectionIsBackward(dir);
311 OffsetNumber lineoff;
316 * calculate next starting lineoff, given scan direction
318 if (ScanDirectionIsForward(dir))
320 if (!scan->rs_inited)
323 * return null immediately if relation is empty
325 if (scan->rs_nblocks == 0)
327 Assert(!BufferIsValid(scan->rs_cbuf));
328 tuple->t_data = NULL;
331 page = scan->rs_startblock; /* first page */
332 heapgetpage(scan, page);
333 lineoff = FirstOffsetNumber; /* first offnum */
334 scan->rs_inited = true;
338 /* continue from previously returned page/tuple */
339 page = scan->rs_cblock; /* current page */
340 lineoff = /* next offnum */
341 OffsetNumberNext(ItemPointerGetOffsetNumber(&(tuple->t_self)));
344 LockBuffer(scan->rs_cbuf, BUFFER_LOCK_SHARE);
346 dp = (Page) BufferGetPage(scan->rs_cbuf);
347 lines = PageGetMaxOffsetNumber(dp);
348 /* page and lineoff now reference the physically next tid */
350 linesleft = lines - lineoff + 1;
354 if (!scan->rs_inited)
357 * return null immediately if relation is empty
359 if (scan->rs_nblocks == 0)
361 Assert(!BufferIsValid(scan->rs_cbuf));
362 tuple->t_data = NULL;
367 * Disable reporting to syncscan logic in a backwards scan; it's
368 * not very likely anyone else is doing the same thing at the same
369 * time, and much more likely that we'll just bollix things for
372 scan->rs_syncscan = false;
373 /* start from last page of the scan */
374 if (scan->rs_startblock > 0)
375 page = scan->rs_startblock - 1;
377 page = scan->rs_nblocks - 1;
378 heapgetpage(scan, page);
382 /* continue from previously returned page/tuple */
383 page = scan->rs_cblock; /* current page */
386 LockBuffer(scan->rs_cbuf, BUFFER_LOCK_SHARE);
388 dp = (Page) BufferGetPage(scan->rs_cbuf);
389 lines = PageGetMaxOffsetNumber(dp);
391 if (!scan->rs_inited)
393 lineoff = lines; /* final offnum */
394 scan->rs_inited = true;
398 lineoff = /* previous offnum */
399 OffsetNumberPrev(ItemPointerGetOffsetNumber(&(tuple->t_self)));
401 /* page and lineoff now reference the physically previous tid */
408 * ``no movement'' scan direction: refetch prior tuple
410 if (!scan->rs_inited)
412 Assert(!BufferIsValid(scan->rs_cbuf));
413 tuple->t_data = NULL;
417 page = ItemPointerGetBlockNumber(&(tuple->t_self));
418 if (page != scan->rs_cblock)
419 heapgetpage(scan, page);
421 /* Since the tuple was previously fetched, needn't lock page here */
422 dp = (Page) BufferGetPage(scan->rs_cbuf);
423 lineoff = ItemPointerGetOffsetNumber(&(tuple->t_self));
424 lpp = PageGetItemId(dp, lineoff);
425 Assert(ItemIdIsNormal(lpp));
427 tuple->t_data = (HeapTupleHeader) PageGetItem((Page) dp, lpp);
428 tuple->t_len = ItemIdGetLength(lpp);
434 * advance the scan until we find a qualifying tuple or run out of stuff
437 lpp = PageGetItemId(dp, lineoff);
440 while (linesleft > 0)
442 if (ItemIdIsNormal(lpp))
446 tuple->t_data = (HeapTupleHeader) PageGetItem((Page) dp, lpp);
447 tuple->t_len = ItemIdGetLength(lpp);
448 ItemPointerSet(&(tuple->t_self), page, lineoff);
451 * if current tuple qualifies, return it.
453 valid = HeapTupleSatisfiesVisibility(tuple,
457 if (valid && key != NULL)
458 HeapKeyTest(tuple, RelationGetDescr(scan->rs_rd),
463 LockBuffer(scan->rs_cbuf, BUFFER_LOCK_UNLOCK);
469 * otherwise move to the next item on the page
474 --lpp; /* move back in this page's ItemId array */
479 ++lpp; /* move forward in this page's ItemId array */
485 * if we get here, it means we've exhausted the items on this page and
486 * it's time to move to the next.
488 LockBuffer(scan->rs_cbuf, BUFFER_LOCK_UNLOCK);
491 * advance to next/prior page and detect end of scan
495 finished = (page == scan->rs_startblock);
497 page = scan->rs_nblocks;
503 if (page >= scan->rs_nblocks)
505 finished = (page == scan->rs_startblock);
508 * Report our new scan position for synchronization purposes. We
509 * don't do that when moving backwards, however. That would just
510 * mess up any other forward-moving scanners.
512 * Note: we do this before checking for end of scan so that the
513 * final state of the position hint is back at the start of the
514 * rel. That's not strictly necessary, but otherwise when you run
515 * the same query multiple times the starting position would shift
516 * a little bit backwards on every invocation, which is confusing.
517 * We don't guarantee any specific ordering in general, though.
519 if (scan->rs_syncscan)
520 ss_report_location(scan->rs_rd, page);
524 * return NULL if we've exhausted all the pages
528 if (BufferIsValid(scan->rs_cbuf))
529 ReleaseBuffer(scan->rs_cbuf);
530 scan->rs_cbuf = InvalidBuffer;
531 scan->rs_cblock = InvalidBlockNumber;
532 tuple->t_data = NULL;
533 scan->rs_inited = false;
537 heapgetpage(scan, page);
539 LockBuffer(scan->rs_cbuf, BUFFER_LOCK_SHARE);
541 dp = (Page) BufferGetPage(scan->rs_cbuf);
542 lines = PageGetMaxOffsetNumber((Page) dp);
547 lpp = PageGetItemId(dp, lines);
551 lineoff = FirstOffsetNumber;
552 lpp = PageGetItemId(dp, FirstOffsetNumber);
558 * heapgettup_pagemode - fetch next heap tuple in page-at-a-time mode
560 * Same API as heapgettup, but used in page-at-a-time mode
562 * The internal logic is much the same as heapgettup's too, but there are some
563 * differences: we do not take the buffer content lock (that only needs to
564 * happen inside heapgetpage), and we iterate through just the tuples listed
565 * in rs_vistuples[] rather than all tuples on the page. Notice that
566 * lineindex is 0-based, where the corresponding loop variable lineoff in
567 * heapgettup is 1-based.
571 heapgettup_pagemode(HeapScanDesc scan,
576 HeapTuple tuple = &(scan->rs_ctup);
577 bool backward = ScanDirectionIsBackward(dir);
583 OffsetNumber lineoff;
588 * calculate next starting lineindex, given scan direction
590 if (ScanDirectionIsForward(dir))
592 if (!scan->rs_inited)
595 * return null immediately if relation is empty
597 if (scan->rs_nblocks == 0)
599 Assert(!BufferIsValid(scan->rs_cbuf));
600 tuple->t_data = NULL;
603 page = scan->rs_startblock; /* first page */
604 heapgetpage(scan, page);
606 scan->rs_inited = true;
610 /* continue from previously returned page/tuple */
611 page = scan->rs_cblock; /* current page */
612 lineindex = scan->rs_cindex + 1;
615 dp = (Page) BufferGetPage(scan->rs_cbuf);
616 lines = scan->rs_ntuples;
617 /* page and lineindex now reference the next visible tid */
619 linesleft = lines - lineindex;
623 if (!scan->rs_inited)
626 * return null immediately if relation is empty
628 if (scan->rs_nblocks == 0)
630 Assert(!BufferIsValid(scan->rs_cbuf));
631 tuple->t_data = NULL;
636 * Disable reporting to syncscan logic in a backwards scan; it's
637 * not very likely anyone else is doing the same thing at the same
638 * time, and much more likely that we'll just bollix things for
641 scan->rs_syncscan = false;
642 /* start from last page of the scan */
643 if (scan->rs_startblock > 0)
644 page = scan->rs_startblock - 1;
646 page = scan->rs_nblocks - 1;
647 heapgetpage(scan, page);
651 /* continue from previously returned page/tuple */
652 page = scan->rs_cblock; /* current page */
655 dp = (Page) BufferGetPage(scan->rs_cbuf);
656 lines = scan->rs_ntuples;
658 if (!scan->rs_inited)
660 lineindex = lines - 1;
661 scan->rs_inited = true;
665 lineindex = scan->rs_cindex - 1;
667 /* page and lineindex now reference the previous visible tid */
669 linesleft = lineindex + 1;
674 * ``no movement'' scan direction: refetch prior tuple
676 if (!scan->rs_inited)
678 Assert(!BufferIsValid(scan->rs_cbuf));
679 tuple->t_data = NULL;
683 page = ItemPointerGetBlockNumber(&(tuple->t_self));
684 if (page != scan->rs_cblock)
685 heapgetpage(scan, page);
687 /* Since the tuple was previously fetched, needn't lock page here */
688 dp = (Page) BufferGetPage(scan->rs_cbuf);
689 lineoff = ItemPointerGetOffsetNumber(&(tuple->t_self));
690 lpp = PageGetItemId(dp, lineoff);
691 Assert(ItemIdIsNormal(lpp));
693 tuple->t_data = (HeapTupleHeader) PageGetItem((Page) dp, lpp);
694 tuple->t_len = ItemIdGetLength(lpp);
696 /* check that rs_cindex is in sync */
697 Assert(scan->rs_cindex < scan->rs_ntuples);
698 Assert(lineoff == scan->rs_vistuples[scan->rs_cindex]);
704 * advance the scan until we find a qualifying tuple or run out of stuff
709 while (linesleft > 0)
711 lineoff = scan->rs_vistuples[lineindex];
712 lpp = PageGetItemId(dp, lineoff);
713 Assert(ItemIdIsNormal(lpp));
715 tuple->t_data = (HeapTupleHeader) PageGetItem((Page) dp, lpp);
716 tuple->t_len = ItemIdGetLength(lpp);
717 ItemPointerSet(&(tuple->t_self), page, lineoff);
720 * if current tuple qualifies, return it.
726 HeapKeyTest(tuple, RelationGetDescr(scan->rs_rd),
730 scan->rs_cindex = lineindex;
736 scan->rs_cindex = lineindex;
741 * otherwise move to the next item on the page
751 * if we get here, it means we've exhausted the items on this page and
752 * it's time to move to the next.
756 finished = (page == scan->rs_startblock);
758 page = scan->rs_nblocks;
764 if (page >= scan->rs_nblocks)
766 finished = (page == scan->rs_startblock);
769 * Report our new scan position for synchronization purposes. We
770 * don't do that when moving backwards, however. That would just
771 * mess up any other forward-moving scanners.
773 * Note: we do this before checking for end of scan so that the
774 * final state of the position hint is back at the start of the
775 * rel. That's not strictly necessary, but otherwise when you run
776 * the same query multiple times the starting position would shift
777 * a little bit backwards on every invocation, which is confusing.
778 * We don't guarantee any specific ordering in general, though.
780 if (scan->rs_syncscan)
781 ss_report_location(scan->rs_rd, page);
785 * return NULL if we've exhausted all the pages
789 if (BufferIsValid(scan->rs_cbuf))
790 ReleaseBuffer(scan->rs_cbuf);
791 scan->rs_cbuf = InvalidBuffer;
792 scan->rs_cblock = InvalidBlockNumber;
793 tuple->t_data = NULL;
794 scan->rs_inited = false;
798 heapgetpage(scan, page);
800 dp = (Page) BufferGetPage(scan->rs_cbuf);
801 lines = scan->rs_ntuples;
804 lineindex = lines - 1;
811 #if defined(DISABLE_COMPLEX_MACRO)
813 * This is formatted so oddly so that the correspondence to the macro
814 * definition in access/heapam.h is maintained.
817 fastgetattr(HeapTuple tup, int attnum, TupleDesc tupleDesc,
823 ((isnull) ? (*(isnull) = false) : (dummyret) NULL),
824 HeapTupleNoNulls(tup) ?
826 (tupleDesc)->attrs[(attnum) - 1]->attcacheoff >= 0 ?
828 fetchatt((tupleDesc)->attrs[(attnum) - 1],
829 (char *) (tup)->t_data + (tup)->t_data->t_hoff +
830 (tupleDesc)->attrs[(attnum) - 1]->attcacheoff)
833 nocachegetattr((tup), (attnum), (tupleDesc), (isnull))
837 att_isnull((attnum) - 1, (tup)->t_data->t_bits) ?
839 ((isnull) ? (*(isnull) = true) : (dummyret) NULL),
844 nocachegetattr((tup), (attnum), (tupleDesc), (isnull))
854 #endif /* defined(DISABLE_COMPLEX_MACRO) */
857 /* ----------------------------------------------------------------
858 * heap access method interface
859 * ----------------------------------------------------------------
863 * relation_open - open any relation by relation OID
865 * If lockmode is not "NoLock", the specified kind of lock is
866 * obtained on the relation. (Generally, NoLock should only be
867 * used if the caller knows it has some appropriate lock on the
870 * An error is raised if the relation does not exist.
872 * NB: a "relation" is anything with a pg_class entry. The caller is
873 * expected to check whether the relkind is something it can handle.
877 relation_open(Oid relationId, LOCKMODE lockmode)
881 Assert(lockmode >= NoLock && lockmode < MAX_LOCKMODES);
883 /* Get the lock before trying to open the relcache entry */
884 if (lockmode != NoLock)
885 LockRelationOid(relationId, lockmode);
887 /* The relcache does all the real work... */
888 r = RelationIdGetRelation(relationId);
890 if (!RelationIsValid(r))
891 elog(ERROR, "could not open relation with OID %u", relationId);
893 /* Make note that we've accessed a temporary relation */
895 MyXactAccessedTempRel = true;
903 * try_relation_open - open any relation by relation OID
905 * Same as relation_open, except return NULL instead of failing
906 * if the relation does not exist.
910 try_relation_open(Oid relationId, LOCKMODE lockmode)
914 Assert(lockmode >= NoLock && lockmode < MAX_LOCKMODES);
916 /* Get the lock first */
917 if (lockmode != NoLock)
918 LockRelationOid(relationId, lockmode);
921 * Now that we have the lock, probe to see if the relation really exists
924 if (!SearchSysCacheExists(RELOID,
925 ObjectIdGetDatum(relationId),
928 /* Release useless lock */
929 if (lockmode != NoLock)
930 UnlockRelationOid(relationId, lockmode);
935 /* Should be safe to do a relcache load */
936 r = RelationIdGetRelation(relationId);
938 if (!RelationIsValid(r))
939 elog(ERROR, "could not open relation with OID %u", relationId);
941 /* Make note that we've accessed a temporary relation */
943 MyXactAccessedTempRel = true;
951 * relation_openrv - open any relation specified by a RangeVar
953 * Same as relation_open, but the relation is specified by a RangeVar.
957 relation_openrv(const RangeVar *relation, LOCKMODE lockmode)
962 * Check for shared-cache-inval messages before trying to open the
963 * relation. This is needed to cover the case where the name identifies a
964 * rel that has been dropped and recreated since the start of our
965 * transaction: if we don't flush the old syscache entry then we'll latch
966 * onto that entry and suffer an error when we do RelationIdGetRelation.
967 * Note that relation_open does not need to do this, since a relation's
970 * We skip this if asked for NoLock, on the assumption that the caller has
971 * already ensured some appropriate lock is held.
973 if (lockmode != NoLock)
974 AcceptInvalidationMessages();
976 /* Look up the appropriate relation using namespace search */
977 relOid = RangeVarGetRelid(relation, false);
979 /* Let relation_open do the rest */
980 return relation_open(relOid, lockmode);
984 * try_relation_openrv - open any relation specified by a RangeVar
986 * Same as relation_openrv, but return NULL instead of failing for
987 * relation-not-found. (Note that some other causes, such as
988 * permissions problems, will still result in an ereport.)
992 try_relation_openrv(const RangeVar *relation, LOCKMODE lockmode)
997 * Check for shared-cache-inval messages before trying to open the
998 * relation. This is needed to cover the case where the name identifies a
999 * rel that has been dropped and recreated since the start of our
1000 * transaction: if we don't flush the old syscache entry then we'll latch
1001 * onto that entry and suffer an error when we do RelationIdGetRelation.
1002 * Note that relation_open does not need to do this, since a relation's
1003 * OID never changes.
1005 * We skip this if asked for NoLock, on the assumption that the caller has
1006 * already ensured some appropriate lock is held.
1008 if (lockmode != NoLock)
1009 AcceptInvalidationMessages();
1011 /* Look up the appropriate relation using namespace search */
1012 relOid = RangeVarGetRelid(relation, true);
1014 /* Return NULL on not-found */
1015 if (!OidIsValid(relOid))
1018 /* Let relation_open do the rest */
1019 return relation_open(relOid, lockmode);
1023 * relation_close - close any relation
1025 * If lockmode is not "NoLock", we then release the specified lock.
1027 * Note that it is often sensible to hold a lock beyond relation_close;
1028 * in that case, the lock is released automatically at xact end.
1032 relation_close(Relation relation, LOCKMODE lockmode)
1034 LockRelId relid = relation->rd_lockInfo.lockRelId;
1036 Assert(lockmode >= NoLock && lockmode < MAX_LOCKMODES);
1038 /* The relcache does the real work... */
1039 RelationClose(relation);
1041 if (lockmode != NoLock)
1042 UnlockRelationId(&relid, lockmode);
1047 * heap_open - open a heap relation by relation OID
1049 * This is essentially relation_open plus check that the relation
1050 * is not an index nor a composite type. (The caller should also
1051 * check that it's not a view before assuming it has storage.)
1055 heap_open(Oid relationId, LOCKMODE lockmode)
1059 r = relation_open(relationId, lockmode);
1061 if (r->rd_rel->relkind == RELKIND_INDEX)
1063 (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1064 errmsg("\"%s\" is an index",
1065 RelationGetRelationName(r))));
1066 else if (r->rd_rel->relkind == RELKIND_COMPOSITE_TYPE)
1068 (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1069 errmsg("\"%s\" is a composite type",
1070 RelationGetRelationName(r))));
1076 * heap_openrv - open a heap relation specified
1077 * by a RangeVar node
1079 * As above, but relation is specified by a RangeVar.
1083 heap_openrv(const RangeVar *relation, LOCKMODE lockmode)
1087 r = relation_openrv(relation, lockmode);
1089 if (r->rd_rel->relkind == RELKIND_INDEX)
1091 (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1092 errmsg("\"%s\" is an index",
1093 RelationGetRelationName(r))));
1094 else if (r->rd_rel->relkind == RELKIND_COMPOSITE_TYPE)
1096 (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1097 errmsg("\"%s\" is a composite type",
1098 RelationGetRelationName(r))));
1104 * try_heap_openrv - open a heap relation specified
1105 * by a RangeVar node
1107 * As above, but return NULL instead of failing for relation-not-found.
1111 try_heap_openrv(const RangeVar *relation, LOCKMODE lockmode)
1115 r = try_relation_openrv(relation, lockmode);
1119 if (r->rd_rel->relkind == RELKIND_INDEX)
1121 (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1122 errmsg("\"%s\" is an index",
1123 RelationGetRelationName(r))));
1124 else if (r->rd_rel->relkind == RELKIND_COMPOSITE_TYPE)
1126 (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1127 errmsg("\"%s\" is a composite type",
1128 RelationGetRelationName(r))));
1136 * heap_beginscan - begin relation scan
1138 * heap_beginscan_strat offers an extended API that lets the caller control
1139 * whether a nondefault buffer access strategy can be used, and whether
1140 * syncscan can be chosen (possibly resulting in the scan not starting from
1141 * block zero). Both of these default to TRUE with plain heap_beginscan.
1143 * heap_beginscan_bm is an alternative entry point for setting up a
1144 * HeapScanDesc for a bitmap heap scan. Although that scan technology is
1145 * really quite unlike a standard seqscan, there is just enough commonality
1146 * to make it worth using the same data structure.
1150 heap_beginscan(Relation relation, Snapshot snapshot,
1151 int nkeys, ScanKey key)
1153 return heap_beginscan_internal(relation, snapshot, nkeys, key,
1158 heap_beginscan_strat(Relation relation, Snapshot snapshot,
1159 int nkeys, ScanKey key,
1160 bool allow_strat, bool allow_sync)
1162 return heap_beginscan_internal(relation, snapshot, nkeys, key,
1163 allow_strat, allow_sync, false);
1167 heap_beginscan_bm(Relation relation, Snapshot snapshot,
1168 int nkeys, ScanKey key)
1170 return heap_beginscan_internal(relation, snapshot, nkeys, key,
1171 false, false, true);
1175 heap_beginscan_internal(Relation relation, Snapshot snapshot,
1176 int nkeys, ScanKey key,
1177 bool allow_strat, bool allow_sync,
1183 * increment relation ref count while scanning relation
1185 * This is just to make really sure the relcache entry won't go away while
1186 * the scan has a pointer to it. Caller should be holding the rel open
1187 * anyway, so this is redundant in all normal scenarios...
1189 RelationIncrementReferenceCount(relation);
1192 * allocate and initialize scan descriptor
1194 scan = (HeapScanDesc) palloc(sizeof(HeapScanDescData));
1196 scan->rs_rd = relation;
1197 scan->rs_snapshot = snapshot;
1198 scan->rs_nkeys = nkeys;
1199 scan->rs_bitmapscan = is_bitmapscan;
1200 scan->rs_strategy = NULL; /* set in initscan */
1201 scan->rs_allow_strat = allow_strat;
1202 scan->rs_allow_sync = allow_sync;
1205 * we can use page-at-a-time mode if it's an MVCC-safe snapshot
1207 scan->rs_pageatatime = IsMVCCSnapshot(snapshot);
1209 /* we only need to set this up once */
1210 scan->rs_ctup.t_tableOid = RelationGetRelid(relation);
1213 * we do this here instead of in initscan() because heap_rescan also calls
1214 * initscan() and we don't want to allocate memory again
1217 scan->rs_key = (ScanKey) palloc(sizeof(ScanKeyData) * nkeys);
1219 scan->rs_key = NULL;
1221 initscan(scan, key);
1227 * heap_rescan - restart a relation scan
1231 heap_rescan(HeapScanDesc scan,
1235 * unpin scan buffers
1237 if (BufferIsValid(scan->rs_cbuf))
1238 ReleaseBuffer(scan->rs_cbuf);
1241 * reinitialize scan descriptor
1243 initscan(scan, key);
1247 * heap_endscan - end relation scan
1249 * See how to integrate with index scans.
1250 * Check handling if reldesc caching.
1254 heap_endscan(HeapScanDesc scan)
1256 /* Note: no locking manipulations needed */
1259 * unpin scan buffers
1261 if (BufferIsValid(scan->rs_cbuf))
1262 ReleaseBuffer(scan->rs_cbuf);
1265 * decrement relation reference count and free scan descriptor storage
1267 RelationDecrementReferenceCount(scan->rs_rd);
1270 pfree(scan->rs_key);
1272 if (scan->rs_strategy != NULL)
1273 FreeAccessStrategy(scan->rs_strategy);
1279 * heap_getnext - retrieve next tuple in scan
1281 * Fix to work with index relations.
1282 * We don't return the buffer anymore, but you can get it from the
1283 * returned HeapTuple.
1288 #define HEAPDEBUG_1 \
1289 elog(DEBUG2, "heap_getnext([%s,nkeys=%d],dir=%d) called", \
1290 RelationGetRelationName(scan->rs_rd), scan->rs_nkeys, (int) direction)
1291 #define HEAPDEBUG_2 \
1292 elog(DEBUG2, "heap_getnext returning EOS")
1293 #define HEAPDEBUG_3 \
1294 elog(DEBUG2, "heap_getnext returning tuple")
1299 #endif /* !defined(HEAPDEBUGALL) */
1303 heap_getnext(HeapScanDesc scan, ScanDirection direction)
1305 /* Note: no locking manipulations needed */
1307 HEAPDEBUG_1; /* heap_getnext( info ) */
1309 if (scan->rs_pageatatime)
1310 heapgettup_pagemode(scan, direction,
1311 scan->rs_nkeys, scan->rs_key);
1313 heapgettup(scan, direction, scan->rs_nkeys, scan->rs_key);
1315 if (scan->rs_ctup.t_data == NULL)
1317 HEAPDEBUG_2; /* heap_getnext returning EOS */
1322 * if we get here it means we have a new current scan tuple, so point to
1323 * the proper return buffer and return the tuple.
1325 HEAPDEBUG_3; /* heap_getnext returning tuple */
1327 pgstat_count_heap_getnext(scan->rs_rd);
1329 return &(scan->rs_ctup);
1333 * heap_fetch - retrieve tuple with given tid
1335 * On entry, tuple->t_self is the TID to fetch. We pin the buffer holding
1336 * the tuple, fill in the remaining fields of *tuple, and check the tuple
1337 * against the specified snapshot.
1339 * If successful (tuple found and passes snapshot time qual), then *userbuf
1340 * is set to the buffer holding the tuple and TRUE is returned. The caller
1341 * must unpin the buffer when done with the tuple.
1343 * If the tuple is not found (ie, item number references a deleted slot),
1344 * then tuple->t_data is set to NULL and FALSE is returned.
1346 * If the tuple is found but fails the time qual check, then FALSE is returned
1347 * but tuple->t_data is left pointing to the tuple.
1349 * keep_buf determines what is done with the buffer in the FALSE-result cases.
1350 * When the caller specifies keep_buf = true, we retain the pin on the buffer
1351 * and return it in *userbuf (so the caller must eventually unpin it); when
1352 * keep_buf = false, the pin is released and *userbuf is set to InvalidBuffer.
1354 * stats_relation is the relation to charge the heap_fetch operation against
1355 * for statistical purposes. (This could be the heap rel itself, an
1356 * associated index, or NULL to not count the fetch at all.)
1358 * heap_fetch does not follow HOT chains: only the exact TID requested will
1361 * It is somewhat inconsistent that we ereport() on invalid block number but
1362 * return false on invalid item number. There are a couple of reasons though.
1363 * One is that the caller can relatively easily check the block number for
1364 * validity, but cannot check the item number without reading the page
1365 * himself. Another is that when we are following a t_ctid link, we can be
1366 * reasonably confident that the page number is valid (since VACUUM shouldn't
1367 * truncate off the destination page without having killed the referencing
1368 * tuple first), but the item number might well not be good.
1371 heap_fetch(Relation relation,
1376 Relation stats_relation)
1378 ItemPointer tid = &(tuple->t_self);
1382 OffsetNumber offnum;
1386 * Fetch and pin the appropriate page of the relation.
1388 buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(tid));
1391 * Need share lock on buffer to examine tuple commit status.
1393 LockBuffer(buffer, BUFFER_LOCK_SHARE);
1394 page = BufferGetPage(buffer);
1397 * We'd better check for out-of-range offnum in case of VACUUM since the
1400 offnum = ItemPointerGetOffsetNumber(tid);
1401 if (offnum < FirstOffsetNumber || offnum > PageGetMaxOffsetNumber(page))
1403 LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
1408 ReleaseBuffer(buffer);
1409 *userbuf = InvalidBuffer;
1411 tuple->t_data = NULL;
1416 * get the item line pointer corresponding to the requested tid
1418 lp = PageGetItemId(page, offnum);
1421 * Must check for deleted tuple.
1423 if (!ItemIdIsNormal(lp))
1425 LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
1430 ReleaseBuffer(buffer);
1431 *userbuf = InvalidBuffer;
1433 tuple->t_data = NULL;
1438 * fill in *tuple fields
1440 tuple->t_data = (HeapTupleHeader) PageGetItem(page, lp);
1441 tuple->t_len = ItemIdGetLength(lp);
1442 tuple->t_tableOid = RelationGetRelid(relation);
1445 * check time qualification of tuple, then release lock
1447 valid = HeapTupleSatisfiesVisibility(tuple, snapshot, buffer);
1449 LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
1454 * All checks passed, so return the tuple as valid. Caller is now
1455 * responsible for releasing the buffer.
1459 /* Count the successful fetch against appropriate rel, if any */
1460 if (stats_relation != NULL)
1461 pgstat_count_heap_fetch(stats_relation);
1466 /* Tuple failed time qual, but maybe caller wants to see it anyway. */
1471 ReleaseBuffer(buffer);
1472 *userbuf = InvalidBuffer;
1479 * heap_hot_search_buffer - search HOT chain for tuple satisfying snapshot
1481 * On entry, *tid is the TID of a tuple (either a simple tuple, or the root
1482 * of a HOT chain), and buffer is the buffer holding this tuple. We search
1483 * for the first chain member satisfying the given snapshot. If one is
1484 * found, we update *tid to reference that tuple's offset number, and
1485 * return TRUE. If no match, return FALSE without modifying *tid.
1487 * If all_dead is not NULL, we check non-visible tuples to see if they are
1488 * globally dead; *all_dead is set TRUE if all members of the HOT chain
1489 * are vacuumable, FALSE if not.
1491 * Unlike heap_fetch, the caller must already have pin and (at least) share
1492 * lock on the buffer; it is still pinned/locked at exit. Also unlike
1493 * heap_fetch, we do not report any pgstats count; caller may do so if wanted.
1496 heap_hot_search_buffer(ItemPointer tid, Buffer buffer, Snapshot snapshot,
1499 Page dp = (Page) BufferGetPage(buffer);
1500 TransactionId prev_xmax = InvalidTransactionId;
1501 OffsetNumber offnum;
1502 bool at_chain_start;
1507 Assert(TransactionIdIsValid(RecentGlobalXmin));
1509 Assert(ItemPointerGetBlockNumber(tid) == BufferGetBlockNumber(buffer));
1510 offnum = ItemPointerGetOffsetNumber(tid);
1511 at_chain_start = true;
1513 /* Scan through possible multiple members of HOT-chain */
1517 HeapTupleData heapTuple;
1519 /* check for bogus TID */
1520 if (offnum < FirstOffsetNumber || offnum > PageGetMaxOffsetNumber(dp))
1523 lp = PageGetItemId(dp, offnum);
1525 /* check for unused, dead, or redirected items */
1526 if (!ItemIdIsNormal(lp))
1528 /* We should only see a redirect at start of chain */
1529 if (ItemIdIsRedirected(lp) && at_chain_start)
1531 /* Follow the redirect */
1532 offnum = ItemIdGetRedirect(lp);
1533 at_chain_start = false;
1536 /* else must be end of chain */
1540 heapTuple.t_data = (HeapTupleHeader) PageGetItem(dp, lp);
1541 heapTuple.t_len = ItemIdGetLength(lp);
1544 * Shouldn't see a HEAP_ONLY tuple at chain start.
1546 if (at_chain_start && HeapTupleIsHeapOnly(&heapTuple))
1550 * The xmin should match the previous xmax value, else chain is
1553 if (TransactionIdIsValid(prev_xmax) &&
1554 !TransactionIdEquals(prev_xmax,
1555 HeapTupleHeaderGetXmin(heapTuple.t_data)))
1558 /* If it's visible per the snapshot, we must return it */
1559 if (HeapTupleSatisfiesVisibility(&heapTuple, snapshot, buffer))
1561 ItemPointerSetOffsetNumber(tid, offnum);
1568 * If we can't see it, maybe no one else can either. At caller
1569 * request, check whether all chain members are dead to all
1572 if (all_dead && *all_dead &&
1573 HeapTupleSatisfiesVacuum(heapTuple.t_data, RecentGlobalXmin,
1574 buffer) != HEAPTUPLE_DEAD)
1578 * Check to see if HOT chain continues past this tuple; if so fetch
1579 * the next offnum and loop around.
1581 if (HeapTupleIsHotUpdated(&heapTuple))
1583 Assert(ItemPointerGetBlockNumber(&heapTuple.t_data->t_ctid) ==
1584 ItemPointerGetBlockNumber(tid));
1585 offnum = ItemPointerGetOffsetNumber(&heapTuple.t_data->t_ctid);
1586 at_chain_start = false;
1587 prev_xmax = HeapTupleHeaderGetXmax(heapTuple.t_data);
1590 break; /* end of chain */
1597 * heap_hot_search - search HOT chain for tuple satisfying snapshot
1599 * This has the same API as heap_hot_search_buffer, except that the caller
1600 * does not provide the buffer containing the page, rather we access it
1604 heap_hot_search(ItemPointer tid, Relation relation, Snapshot snapshot,
1610 buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(tid));
1611 LockBuffer(buffer, BUFFER_LOCK_SHARE);
1612 result = heap_hot_search_buffer(tid, buffer, snapshot, all_dead);
1613 LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
1614 ReleaseBuffer(buffer);
1619 * heap_get_latest_tid - get the latest tid of a specified tuple
1621 * Actually, this gets the latest version that is visible according to
1622 * the passed snapshot. You can pass SnapshotDirty to get the very latest,
1623 * possibly uncommitted version.
1625 * *tid is both an input and an output parameter: it is updated to
1626 * show the latest version of the row. Note that it will not be changed
1627 * if no version of the row passes the snapshot test.
1630 heap_get_latest_tid(Relation relation,
1635 ItemPointerData ctid;
1636 TransactionId priorXmax;
1638 /* this is to avoid Assert failures on bad input */
1639 if (!ItemPointerIsValid(tid))
1643 * Since this can be called with user-supplied TID, don't trust the input
1644 * too much. (RelationGetNumberOfBlocks is an expensive check, so we
1645 * don't check t_ctid links again this way. Note that it would not do to
1646 * call it just once and save the result, either.)
1648 blk = ItemPointerGetBlockNumber(tid);
1649 if (blk >= RelationGetNumberOfBlocks(relation))
1650 elog(ERROR, "block number %u is out of range for relation \"%s\"",
1651 blk, RelationGetRelationName(relation));
1654 * Loop to chase down t_ctid links. At top of loop, ctid is the tuple we
1655 * need to examine, and *tid is the TID we will return if ctid turns out
1658 * Note that we will loop until we reach the end of the t_ctid chain.
1659 * Depending on the snapshot passed, there might be at most one visible
1660 * version of the row, but we don't try to optimize for that.
1663 priorXmax = InvalidTransactionId; /* cannot check first XMIN */
1668 OffsetNumber offnum;
1674 * Read, pin, and lock the page.
1676 buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(&ctid));
1677 LockBuffer(buffer, BUFFER_LOCK_SHARE);
1678 page = BufferGetPage(buffer);
1681 * Check for bogus item number. This is not treated as an error
1682 * condition because it can happen while following a t_ctid link. We
1683 * just assume that the prior tid is OK and return it unchanged.
1685 offnum = ItemPointerGetOffsetNumber(&ctid);
1686 if (offnum < FirstOffsetNumber || offnum > PageGetMaxOffsetNumber(page))
1688 UnlockReleaseBuffer(buffer);
1691 lp = PageGetItemId(page, offnum);
1692 if (!ItemIdIsNormal(lp))
1694 UnlockReleaseBuffer(buffer);
1698 /* OK to access the tuple */
1700 tp.t_data = (HeapTupleHeader) PageGetItem(page, lp);
1701 tp.t_len = ItemIdGetLength(lp);
1704 * After following a t_ctid link, we might arrive at an unrelated
1705 * tuple. Check for XMIN match.
1707 if (TransactionIdIsValid(priorXmax) &&
1708 !TransactionIdEquals(priorXmax, HeapTupleHeaderGetXmin(tp.t_data)))
1710 UnlockReleaseBuffer(buffer);
1715 * Check time qualification of tuple; if visible, set it as the new
1718 valid = HeapTupleSatisfiesVisibility(&tp, snapshot, buffer);
1723 * If there's a valid t_ctid link, follow it, else we're done.
1725 if ((tp.t_data->t_infomask & (HEAP_XMAX_INVALID | HEAP_IS_LOCKED)) ||
1726 ItemPointerEquals(&tp.t_self, &tp.t_data->t_ctid))
1728 UnlockReleaseBuffer(buffer);
1732 ctid = tp.t_data->t_ctid;
1733 priorXmax = HeapTupleHeaderGetXmax(tp.t_data);
1734 UnlockReleaseBuffer(buffer);
1740 * UpdateXmaxHintBits - update tuple hint bits after xmax transaction ends
1742 * This is called after we have waited for the XMAX transaction to terminate.
1743 * If the transaction aborted, we guarantee the XMAX_INVALID hint bit will
1744 * be set on exit. If the transaction committed, we set the XMAX_COMMITTED
1745 * hint bit if possible --- but beware that that may not yet be possible,
1746 * if the transaction committed asynchronously. Hence callers should look
1747 * only at XMAX_INVALID.
1750 UpdateXmaxHintBits(HeapTupleHeader tuple, Buffer buffer, TransactionId xid)
1752 Assert(TransactionIdEquals(HeapTupleHeaderGetXmax(tuple), xid));
1754 if (!(tuple->t_infomask & (HEAP_XMAX_COMMITTED | HEAP_XMAX_INVALID)))
1756 if (TransactionIdDidCommit(xid))
1757 HeapTupleSetHintBits(tuple, buffer, HEAP_XMAX_COMMITTED,
1760 HeapTupleSetHintBits(tuple, buffer, HEAP_XMAX_INVALID,
1761 InvalidTransactionId);
1767 * GetBulkInsertState - prepare status object for a bulk insert
1770 GetBulkInsertState(void)
1772 BulkInsertState bistate;
1774 bistate = (BulkInsertState) palloc(sizeof(BulkInsertStateData));
1775 bistate->strategy = GetAccessStrategy(BAS_BULKWRITE);
1776 bistate->current_buf = InvalidBuffer;
1781 * FreeBulkInsertState - clean up after finishing a bulk insert
1784 FreeBulkInsertState(BulkInsertState bistate)
1786 if (bistate->current_buf != InvalidBuffer)
1787 ReleaseBuffer(bistate->current_buf);
1788 FreeAccessStrategy(bistate->strategy);
1794 * heap_insert - insert tuple into a heap
1796 * The new tuple is stamped with current transaction ID and the specified
1799 * If the HEAP_INSERT_SKIP_WAL option is specified, the new tuple is not
1800 * logged in WAL, even for a non-temp relation. Safe usage of this behavior
1801 * requires that we arrange that all new tuples go into new pages not
1802 * containing any tuples from other transactions, and that the relation gets
1803 * fsync'd before commit. (See also heap_sync() comments)
1805 * The HEAP_INSERT_SKIP_FSM option is passed directly to
1806 * RelationGetBufferForTuple, which see for more info.
1808 * Note that these options will be applied when inserting into the heap's
1809 * TOAST table, too, if the tuple requires any out-of-line data.
1811 * The BulkInsertState object (if any; bistate can be NULL for default
1812 * behavior) is also just passed through to RelationGetBufferForTuple.
1814 * The return value is the OID assigned to the tuple (either here or by the
1815 * caller), or InvalidOid if no OID. The header fields of *tup are updated
1816 * to match the stored tuple; in particular tup->t_self receives the actual
1817 * TID where the tuple was stored. But note that any toasting of fields
1818 * within the tuple data is NOT reflected into *tup.
1821 heap_insert(Relation relation, HeapTuple tup, CommandId cid,
1822 int options, BulkInsertState bistate)
1824 TransactionId xid = GetCurrentTransactionId();
1827 bool all_visible_cleared = false;
1829 if (relation->rd_rel->relhasoids)
1832 /* this is redundant with an Assert in HeapTupleSetOid */
1833 Assert(tup->t_data->t_infomask & HEAP_HASOID);
1837 * If the object id of this tuple has already been assigned, trust the
1838 * caller. There are a couple of ways this can happen. At initial db
1839 * creation, the backend program sets oids for tuples. When we define
1840 * an index, we set the oid. Finally, in the future, we may allow
1841 * users to set their own object ids in order to support a persistent
1842 * object store (objects need to contain pointers to one another).
1844 if (!OidIsValid(HeapTupleGetOid(tup)))
1845 HeapTupleSetOid(tup, GetNewOid(relation));
1849 /* check there is not space for an OID */
1850 Assert(!(tup->t_data->t_infomask & HEAP_HASOID));
1853 tup->t_data->t_infomask &= ~(HEAP_XACT_MASK);
1854 tup->t_data->t_infomask2 &= ~(HEAP2_XACT_MASK);
1855 tup->t_data->t_infomask |= HEAP_XMAX_INVALID;
1856 HeapTupleHeaderSetXmin(tup->t_data, xid);
1857 HeapTupleHeaderSetCmin(tup->t_data, cid);
1858 HeapTupleHeaderSetXmax(tup->t_data, 0); /* for cleanliness */
1859 tup->t_tableOid = RelationGetRelid(relation);
1862 * If the new tuple is too big for storage or contains already toasted
1863 * out-of-line attributes from some other relation, invoke the toaster.
1865 * Note: below this point, heaptup is the data we actually intend to store
1866 * into the relation; tup is the caller's original untoasted data.
1868 if (relation->rd_rel->relkind != RELKIND_RELATION)
1870 /* toast table entries should never be recursively toasted */
1871 Assert(!HeapTupleHasExternal(tup));
1874 else if (HeapTupleHasExternal(tup) || tup->t_len > TOAST_TUPLE_THRESHOLD)
1875 heaptup = toast_insert_or_update(relation, tup, NULL, options);
1879 /* Find buffer to insert this tuple into */
1880 buffer = RelationGetBufferForTuple(relation, heaptup->t_len,
1881 InvalidBuffer, options, bistate);
1883 /* NO EREPORT(ERROR) from here till changes are logged */
1884 START_CRIT_SECTION();
1886 RelationPutHeapTuple(relation, buffer, heaptup);
1888 if (PageIsAllVisible(BufferGetPage(buffer)))
1890 all_visible_cleared = true;
1891 PageClearAllVisible(BufferGetPage(buffer));
1895 * XXX Should we set PageSetPrunable on this page ?
1897 * The inserting transaction may eventually abort thus making this tuple
1898 * DEAD and hence available for pruning. Though we don't want to optimize
1899 * for aborts, if no other tuple in this page is UPDATEd/DELETEd, the
1900 * aborted tuple will never be pruned until next vacuum is triggered.
1902 * If you do add PageSetPrunable here, add it in heap_xlog_insert too.
1905 MarkBufferDirty(buffer);
1908 if (!(options & HEAP_INSERT_SKIP_WAL) && !relation->rd_istemp)
1910 xl_heap_insert xlrec;
1911 xl_heap_header xlhdr;
1913 XLogRecData rdata[3];
1914 Page page = BufferGetPage(buffer);
1915 uint8 info = XLOG_HEAP_INSERT;
1917 xlrec.all_visible_cleared = all_visible_cleared;
1918 xlrec.target.node = relation->rd_node;
1919 xlrec.target.tid = heaptup->t_self;
1920 rdata[0].data = (char *) &xlrec;
1921 rdata[0].len = SizeOfHeapInsert;
1922 rdata[0].buffer = InvalidBuffer;
1923 rdata[0].next = &(rdata[1]);
1925 xlhdr.t_infomask2 = heaptup->t_data->t_infomask2;
1926 xlhdr.t_infomask = heaptup->t_data->t_infomask;
1927 xlhdr.t_hoff = heaptup->t_data->t_hoff;
1930 * note we mark rdata[1] as belonging to buffer; if XLogInsert decides
1931 * to write the whole page to the xlog, we don't need to store
1932 * xl_heap_header in the xlog.
1934 rdata[1].data = (char *) &xlhdr;
1935 rdata[1].len = SizeOfHeapHeader;
1936 rdata[1].buffer = buffer;
1937 rdata[1].buffer_std = true;
1938 rdata[1].next = &(rdata[2]);
1940 /* PG73FORMAT: write bitmap [+ padding] [+ oid] + data */
1941 rdata[2].data = (char *) heaptup->t_data + offsetof(HeapTupleHeaderData, t_bits);
1942 rdata[2].len = heaptup->t_len - offsetof(HeapTupleHeaderData, t_bits);
1943 rdata[2].buffer = buffer;
1944 rdata[2].buffer_std = true;
1945 rdata[2].next = NULL;
1948 * If this is the single and first tuple on page, we can reinit the
1949 * page instead of restoring the whole thing. Set flag, and hide
1950 * buffer references from XLogInsert.
1952 if (ItemPointerGetOffsetNumber(&(heaptup->t_self)) == FirstOffsetNumber &&
1953 PageGetMaxOffsetNumber(page) == FirstOffsetNumber)
1955 info |= XLOG_HEAP_INIT_PAGE;
1956 rdata[1].buffer = rdata[2].buffer = InvalidBuffer;
1959 recptr = XLogInsert(RM_HEAP_ID, info, rdata);
1961 PageSetLSN(page, recptr);
1962 PageSetTLI(page, ThisTimeLineID);
1967 UnlockReleaseBuffer(buffer);
1969 /* Clear the bit in the visibility map if necessary */
1970 if (all_visible_cleared)
1971 visibilitymap_clear(relation,
1972 ItemPointerGetBlockNumber(&(heaptup->t_self)));
1975 * If tuple is cachable, mark it for invalidation from the caches in case
1976 * we abort. Note it is OK to do this after releasing the buffer, because
1977 * the heaptup data structure is all in local memory, not in the shared
1980 CacheInvalidateHeapTuple(relation, heaptup);
1982 pgstat_count_heap_insert(relation);
1985 * If heaptup is a private copy, release it. Don't forget to copy t_self
1986 * back to the caller's image, too.
1990 tup->t_self = heaptup->t_self;
1991 heap_freetuple(heaptup);
1994 return HeapTupleGetOid(tup);
1998 * simple_heap_insert - insert a tuple
2000 * Currently, this routine differs from heap_insert only in supplying
2001 * a default command ID and not allowing access to the speedup options.
2003 * This should be used rather than using heap_insert directly in most places
2004 * where we are modifying system catalogs.
2007 simple_heap_insert(Relation relation, HeapTuple tup)
2009 return heap_insert(relation, tup, GetCurrentCommandId(true), 0, NULL);
2013 * heap_delete - delete a tuple
2015 * NB: do not call this directly unless you are prepared to deal with
2016 * concurrent-update conditions. Use simple_heap_delete instead.
2018 * relation - table to be modified (caller must hold suitable lock)
2019 * tid - TID of tuple to be deleted
2020 * ctid - output parameter, used only for failure case (see below)
2021 * update_xmax - output parameter, used only for failure case (see below)
2022 * cid - delete command ID (used for visibility test, and stored into
2023 * cmax if successful)
2024 * crosscheck - if not InvalidSnapshot, also check tuple against this
2025 * wait - true if should wait for any conflicting update to commit/abort
2027 * Normal, successful return value is HeapTupleMayBeUpdated, which
2028 * actually means we did delete it. Failure return codes are
2029 * HeapTupleSelfUpdated, HeapTupleUpdated, or HeapTupleBeingUpdated
2030 * (the last only possible if wait == false).
2032 * In the failure cases, the routine returns the tuple's t_ctid and t_xmax.
2033 * If t_ctid is the same as tid, the tuple was deleted; if different, the
2034 * tuple was updated, and t_ctid is the location of the replacement tuple.
2035 * (t_xmax is needed to verify that the replacement tuple matches.)
2038 heap_delete(Relation relation, ItemPointer tid,
2039 ItemPointer ctid, TransactionId *update_xmax,
2040 CommandId cid, Snapshot crosscheck, bool wait)
2043 TransactionId xid = GetCurrentTransactionId();
2048 bool have_tuple_lock = false;
2050 bool all_visible_cleared = false;
2052 Assert(ItemPointerIsValid(tid));
2054 buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(tid));
2055 LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
2057 page = BufferGetPage(buffer);
2058 lp = PageGetItemId(page, ItemPointerGetOffsetNumber(tid));
2059 Assert(ItemIdIsNormal(lp));
2061 tp.t_data = (HeapTupleHeader) PageGetItem(page, lp);
2062 tp.t_len = ItemIdGetLength(lp);
2066 result = HeapTupleSatisfiesUpdate(tp.t_data, cid, buffer);
2068 if (result == HeapTupleInvisible)
2070 UnlockReleaseBuffer(buffer);
2071 elog(ERROR, "attempted to delete invisible tuple");
2073 else if (result == HeapTupleBeingUpdated && wait)
2075 TransactionId xwait;
2078 /* must copy state data before unlocking buffer */
2079 xwait = HeapTupleHeaderGetXmax(tp.t_data);
2080 infomask = tp.t_data->t_infomask;
2082 LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
2085 * Acquire tuple lock to establish our priority for the tuple (see
2086 * heap_lock_tuple). LockTuple will release us when we are
2087 * next-in-line for the tuple.
2089 * If we are forced to "start over" below, we keep the tuple lock;
2090 * this arranges that we stay at the head of the line while rechecking
2093 if (!have_tuple_lock)
2095 LockTuple(relation, &(tp.t_self), ExclusiveLock);
2096 have_tuple_lock = true;
2100 * Sleep until concurrent transaction ends. Note that we don't care
2101 * if the locker has an exclusive or shared lock, because we need
2105 if (infomask & HEAP_XMAX_IS_MULTI)
2107 /* wait for multixact */
2108 MultiXactIdWait((MultiXactId) xwait);
2109 LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
2112 * If xwait had just locked the tuple then some other xact could
2113 * update this tuple before we get to this point. Check for xmax
2114 * change, and start over if so.
2116 if (!(tp.t_data->t_infomask & HEAP_XMAX_IS_MULTI) ||
2117 !TransactionIdEquals(HeapTupleHeaderGetXmax(tp.t_data),
2122 * You might think the multixact is necessarily done here, but not
2123 * so: it could have surviving members, namely our own xact or
2124 * other subxacts of this backend. It is legal for us to delete
2125 * the tuple in either case, however (the latter case is
2126 * essentially a situation of upgrading our former shared lock to
2127 * exclusive). We don't bother changing the on-disk hint bits
2128 * since we are about to overwrite the xmax altogether.
2133 /* wait for regular transaction to end */
2134 XactLockTableWait(xwait);
2135 LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
2138 * xwait is done, but if xwait had just locked the tuple then some
2139 * other xact could update this tuple before we get to this point.
2140 * Check for xmax change, and start over if so.
2142 if ((tp.t_data->t_infomask & HEAP_XMAX_IS_MULTI) ||
2143 !TransactionIdEquals(HeapTupleHeaderGetXmax(tp.t_data),
2147 /* Otherwise check if it committed or aborted */
2148 UpdateXmaxHintBits(tp.t_data, buffer, xwait);
2152 * We may overwrite if previous xmax aborted, or if it committed but
2153 * only locked the tuple without updating it.
2155 if (tp.t_data->t_infomask & (HEAP_XMAX_INVALID |
2157 result = HeapTupleMayBeUpdated;
2159 result = HeapTupleUpdated;
2162 if (crosscheck != InvalidSnapshot && result == HeapTupleMayBeUpdated)
2164 /* Perform additional check for serializable RI updates */
2165 if (!HeapTupleSatisfiesVisibility(&tp, crosscheck, buffer))
2166 result = HeapTupleUpdated;
2169 if (result != HeapTupleMayBeUpdated)
2171 Assert(result == HeapTupleSelfUpdated ||
2172 result == HeapTupleUpdated ||
2173 result == HeapTupleBeingUpdated);
2174 Assert(!(tp.t_data->t_infomask & HEAP_XMAX_INVALID));
2175 *ctid = tp.t_data->t_ctid;
2176 *update_xmax = HeapTupleHeaderGetXmax(tp.t_data);
2177 UnlockReleaseBuffer(buffer);
2178 if (have_tuple_lock)
2179 UnlockTuple(relation, &(tp.t_self), ExclusiveLock);
2183 /* replace cid with a combo cid if necessary */
2184 HeapTupleHeaderAdjustCmax(tp.t_data, &cid, &iscombo);
2186 START_CRIT_SECTION();
2189 * If this transaction commits, the tuple will become DEAD sooner or
2190 * later. Set flag that this page is a candidate for pruning once our xid
2191 * falls below the OldestXmin horizon. If the transaction finally aborts,
2192 * the subsequent page pruning will be a no-op and the hint will be
2195 PageSetPrunable(page, xid);
2197 if (PageIsAllVisible(page))
2199 all_visible_cleared = true;
2200 PageClearAllVisible(page);
2203 /* store transaction information of xact deleting the tuple */
2204 tp.t_data->t_infomask &= ~(HEAP_XMAX_COMMITTED |
2206 HEAP_XMAX_IS_MULTI |
2209 HeapTupleHeaderClearHotUpdated(tp.t_data);
2210 HeapTupleHeaderSetXmax(tp.t_data, xid);
2211 HeapTupleHeaderSetCmax(tp.t_data, cid, iscombo);
2212 /* Make sure there is no forward chain link in t_ctid */
2213 tp.t_data->t_ctid = tp.t_self;
2215 MarkBufferDirty(buffer);
2218 if (!relation->rd_istemp)
2220 xl_heap_delete xlrec;
2222 XLogRecData rdata[2];
2224 xlrec.all_visible_cleared = all_visible_cleared;
2225 xlrec.target.node = relation->rd_node;
2226 xlrec.target.tid = tp.t_self;
2227 rdata[0].data = (char *) &xlrec;
2228 rdata[0].len = SizeOfHeapDelete;
2229 rdata[0].buffer = InvalidBuffer;
2230 rdata[0].next = &(rdata[1]);
2232 rdata[1].data = NULL;
2234 rdata[1].buffer = buffer;
2235 rdata[1].buffer_std = true;
2236 rdata[1].next = NULL;
2238 recptr = XLogInsert(RM_HEAP_ID, XLOG_HEAP_DELETE, rdata);
2240 PageSetLSN(page, recptr);
2241 PageSetTLI(page, ThisTimeLineID);
2246 LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
2249 * If the tuple has toasted out-of-line attributes, we need to delete
2250 * those items too. We have to do this before releasing the buffer
2251 * because we need to look at the contents of the tuple, but it's OK to
2252 * release the content lock on the buffer first.
2254 if (relation->rd_rel->relkind != RELKIND_RELATION)
2256 /* toast table entries should never be recursively toasted */
2257 Assert(!HeapTupleHasExternal(&tp));
2259 else if (HeapTupleHasExternal(&tp))
2260 toast_delete(relation, &tp);
2263 * Mark tuple for invalidation from system caches at next command
2264 * boundary. We have to do this before releasing the buffer because we
2265 * need to look at the contents of the tuple.
2267 CacheInvalidateHeapTuple(relation, &tp);
2269 /* Clear the bit in the visibility map if necessary */
2270 if (all_visible_cleared)
2271 visibilitymap_clear(relation, BufferGetBlockNumber(buffer));
2273 /* Now we can release the buffer */
2274 ReleaseBuffer(buffer);
2277 * Release the lmgr tuple lock, if we had it.
2279 if (have_tuple_lock)
2280 UnlockTuple(relation, &(tp.t_self), ExclusiveLock);
2282 pgstat_count_heap_delete(relation);
2284 return HeapTupleMayBeUpdated;
2288 * simple_heap_delete - delete a tuple
2290 * This routine may be used to delete a tuple when concurrent updates of
2291 * the target tuple are not expected (for example, because we have a lock
2292 * on the relation associated with the tuple). Any failure is reported
2296 simple_heap_delete(Relation relation, ItemPointer tid)
2299 ItemPointerData update_ctid;
2300 TransactionId update_xmax;
2302 result = heap_delete(relation, tid,
2303 &update_ctid, &update_xmax,
2304 GetCurrentCommandId(true), InvalidSnapshot,
2305 true /* wait for commit */ );
2308 case HeapTupleSelfUpdated:
2309 /* Tuple was already updated in current command? */
2310 elog(ERROR, "tuple already updated by self");
2313 case HeapTupleMayBeUpdated:
2314 /* done successfully */
2317 case HeapTupleUpdated:
2318 elog(ERROR, "tuple concurrently updated");
2322 elog(ERROR, "unrecognized heap_delete status: %u", result);
2328 * heap_update - replace a tuple
2330 * NB: do not call this directly unless you are prepared to deal with
2331 * concurrent-update conditions. Use simple_heap_update instead.
2333 * relation - table to be modified (caller must hold suitable lock)
2334 * otid - TID of old tuple to be replaced
2335 * newtup - newly constructed tuple data to store
2336 * ctid - output parameter, used only for failure case (see below)
2337 * update_xmax - output parameter, used only for failure case (see below)
2338 * cid - update command ID (used for visibility test, and stored into
2339 * cmax/cmin if successful)
2340 * crosscheck - if not InvalidSnapshot, also check old tuple against this
2341 * wait - true if should wait for any conflicting update to commit/abort
2343 * Normal, successful return value is HeapTupleMayBeUpdated, which
2344 * actually means we *did* update it. Failure return codes are
2345 * HeapTupleSelfUpdated, HeapTupleUpdated, or HeapTupleBeingUpdated
2346 * (the last only possible if wait == false).
2348 * On success, the header fields of *newtup are updated to match the new
2349 * stored tuple; in particular, newtup->t_self is set to the TID where the
2350 * new tuple was inserted, and its HEAP_ONLY_TUPLE flag is set iff a HOT
2351 * update was done. However, any TOAST changes in the new tuple's
2352 * data are not reflected into *newtup.
2354 * In the failure cases, the routine returns the tuple's t_ctid and t_xmax.
2355 * If t_ctid is the same as otid, the tuple was deleted; if different, the
2356 * tuple was updated, and t_ctid is the location of the replacement tuple.
2357 * (t_xmax is needed to verify that the replacement tuple matches.)
2360 heap_update(Relation relation, ItemPointer otid, HeapTuple newtup,
2361 ItemPointer ctid, TransactionId *update_xmax,
2362 CommandId cid, Snapshot crosscheck, bool wait)
2365 TransactionId xid = GetCurrentTransactionId();
2366 Bitmapset *hot_attrs;
2368 HeapTupleData oldtup;
2377 bool have_tuple_lock = false;
2379 bool use_hot_update = false;
2380 bool all_visible_cleared = false;
2381 bool all_visible_cleared_new = false;
2383 Assert(ItemPointerIsValid(otid));
2386 * Fetch the list of attributes to be checked for HOT update. This is
2387 * wasted effort if we fail to update or have to put the new tuple on a
2388 * different page. But we must compute the list before obtaining buffer
2389 * lock --- in the worst case, if we are doing an update on one of the
2390 * relevant system catalogs, we could deadlock if we try to fetch the list
2391 * later. In any case, the relcache caches the data so this is usually
2394 * Note that we get a copy here, so we need not worry about relcache flush
2395 * happening midway through.
2397 hot_attrs = RelationGetIndexAttrBitmap(relation);
2399 buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(otid));
2400 LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
2402 page = BufferGetPage(buffer);
2403 lp = PageGetItemId(page, ItemPointerGetOffsetNumber(otid));
2404 Assert(ItemIdIsNormal(lp));
2406 oldtup.t_data = (HeapTupleHeader) PageGetItem(page, lp);
2407 oldtup.t_len = ItemIdGetLength(lp);
2408 oldtup.t_self = *otid;
2411 * Note: beyond this point, use oldtup not otid to refer to old tuple.
2412 * otid may very well point at newtup->t_self, which we will overwrite
2413 * with the new tuple's location, so there's great risk of confusion if we
2418 result = HeapTupleSatisfiesUpdate(oldtup.t_data, cid, buffer);
2420 if (result == HeapTupleInvisible)
2422 UnlockReleaseBuffer(buffer);
2423 elog(ERROR, "attempted to update invisible tuple");
2425 else if (result == HeapTupleBeingUpdated && wait)
2427 TransactionId xwait;
2430 /* must copy state data before unlocking buffer */
2431 xwait = HeapTupleHeaderGetXmax(oldtup.t_data);
2432 infomask = oldtup.t_data->t_infomask;
2434 LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
2437 * Acquire tuple lock to establish our priority for the tuple (see
2438 * heap_lock_tuple). LockTuple will release us when we are
2439 * next-in-line for the tuple.
2441 * If we are forced to "start over" below, we keep the tuple lock;
2442 * this arranges that we stay at the head of the line while rechecking
2445 if (!have_tuple_lock)
2447 LockTuple(relation, &(oldtup.t_self), ExclusiveLock);
2448 have_tuple_lock = true;
2452 * Sleep until concurrent transaction ends. Note that we don't care
2453 * if the locker has an exclusive or shared lock, because we need
2457 if (infomask & HEAP_XMAX_IS_MULTI)
2459 /* wait for multixact */
2460 MultiXactIdWait((MultiXactId) xwait);
2461 LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
2464 * If xwait had just locked the tuple then some other xact could
2465 * update this tuple before we get to this point. Check for xmax
2466 * change, and start over if so.
2468 if (!(oldtup.t_data->t_infomask & HEAP_XMAX_IS_MULTI) ||
2469 !TransactionIdEquals(HeapTupleHeaderGetXmax(oldtup.t_data),
2474 * You might think the multixact is necessarily done here, but not
2475 * so: it could have surviving members, namely our own xact or
2476 * other subxacts of this backend. It is legal for us to update
2477 * the tuple in either case, however (the latter case is
2478 * essentially a situation of upgrading our former shared lock to
2479 * exclusive). We don't bother changing the on-disk hint bits
2480 * since we are about to overwrite the xmax altogether.
2485 /* wait for regular transaction to end */
2486 XactLockTableWait(xwait);
2487 LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
2490 * xwait is done, but if xwait had just locked the tuple then some
2491 * other xact could update this tuple before we get to this point.
2492 * Check for xmax change, and start over if so.
2494 if ((oldtup.t_data->t_infomask & HEAP_XMAX_IS_MULTI) ||
2495 !TransactionIdEquals(HeapTupleHeaderGetXmax(oldtup.t_data),
2499 /* Otherwise check if it committed or aborted */
2500 UpdateXmaxHintBits(oldtup.t_data, buffer, xwait);
2504 * We may overwrite if previous xmax aborted, or if it committed but
2505 * only locked the tuple without updating it.
2507 if (oldtup.t_data->t_infomask & (HEAP_XMAX_INVALID |
2509 result = HeapTupleMayBeUpdated;
2511 result = HeapTupleUpdated;
2514 if (crosscheck != InvalidSnapshot && result == HeapTupleMayBeUpdated)
2516 /* Perform additional check for serializable RI updates */
2517 if (!HeapTupleSatisfiesVisibility(&oldtup, crosscheck, buffer))
2518 result = HeapTupleUpdated;
2521 if (result != HeapTupleMayBeUpdated)
2523 Assert(result == HeapTupleSelfUpdated ||
2524 result == HeapTupleUpdated ||
2525 result == HeapTupleBeingUpdated);
2526 Assert(!(oldtup.t_data->t_infomask & HEAP_XMAX_INVALID));
2527 *ctid = oldtup.t_data->t_ctid;
2528 *update_xmax = HeapTupleHeaderGetXmax(oldtup.t_data);
2529 UnlockReleaseBuffer(buffer);
2530 if (have_tuple_lock)
2531 UnlockTuple(relation, &(oldtup.t_self), ExclusiveLock);
2532 bms_free(hot_attrs);
2536 /* Fill in OID and transaction status data for newtup */
2537 if (relation->rd_rel->relhasoids)
2540 /* this is redundant with an Assert in HeapTupleSetOid */
2541 Assert(newtup->t_data->t_infomask & HEAP_HASOID);
2543 HeapTupleSetOid(newtup, HeapTupleGetOid(&oldtup));
2547 /* check there is not space for an OID */
2548 Assert(!(newtup->t_data->t_infomask & HEAP_HASOID));
2551 newtup->t_data->t_infomask &= ~(HEAP_XACT_MASK);
2552 newtup->t_data->t_infomask2 &= ~(HEAP2_XACT_MASK);
2553 newtup->t_data->t_infomask |= (HEAP_XMAX_INVALID | HEAP_UPDATED);
2554 HeapTupleHeaderSetXmin(newtup->t_data, xid);
2555 HeapTupleHeaderSetCmin(newtup->t_data, cid);
2556 HeapTupleHeaderSetXmax(newtup->t_data, 0); /* for cleanliness */
2557 newtup->t_tableOid = RelationGetRelid(relation);
2560 * Replace cid with a combo cid if necessary. Note that we already put
2561 * the plain cid into the new tuple.
2563 HeapTupleHeaderAdjustCmax(oldtup.t_data, &cid, &iscombo);
2566 * If the toaster needs to be activated, OR if the new tuple will not fit
2567 * on the same page as the old, then we need to release the content lock
2568 * (but not the pin!) on the old tuple's buffer while we are off doing
2569 * TOAST and/or table-file-extension work. We must mark the old tuple to
2570 * show that it's already being updated, else other processes may try to
2571 * update it themselves.
2573 * We need to invoke the toaster if there are already any out-of-line
2574 * toasted values present, or if the new tuple is over-threshold.
2576 if (relation->rd_rel->relkind != RELKIND_RELATION)
2578 /* toast table entries should never be recursively toasted */
2579 Assert(!HeapTupleHasExternal(&oldtup));
2580 Assert(!HeapTupleHasExternal(newtup));
2584 need_toast = (HeapTupleHasExternal(&oldtup) ||
2585 HeapTupleHasExternal(newtup) ||
2586 newtup->t_len > TOAST_TUPLE_THRESHOLD);
2588 pagefree = PageGetHeapFreeSpace(page);
2590 newtupsize = MAXALIGN(newtup->t_len);
2592 if (need_toast || newtupsize > pagefree)
2594 /* Clear obsolete visibility flags ... */
2595 oldtup.t_data->t_infomask &= ~(HEAP_XMAX_COMMITTED |
2597 HEAP_XMAX_IS_MULTI |
2600 HeapTupleClearHotUpdated(&oldtup);
2601 /* ... and store info about transaction updating this tuple */
2602 HeapTupleHeaderSetXmax(oldtup.t_data, xid);
2603 HeapTupleHeaderSetCmax(oldtup.t_data, cid, iscombo);
2604 /* temporarily make it look not-updated */
2605 oldtup.t_data->t_ctid = oldtup.t_self;
2606 already_marked = true;
2607 LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
2610 * Let the toaster do its thing, if needed.
2612 * Note: below this point, heaptup is the data we actually intend to
2613 * store into the relation; newtup is the caller's original untoasted
2618 /* Note we always use WAL and FSM during updates */
2619 heaptup = toast_insert_or_update(relation, newtup, &oldtup, 0);
2620 newtupsize = MAXALIGN(heaptup->t_len);
2626 * Now, do we need a new page for the tuple, or not? This is a bit
2627 * tricky since someone else could have added tuples to the page while
2628 * we weren't looking. We have to recheck the available space after
2629 * reacquiring the buffer lock. But don't bother to do that if the
2630 * former amount of free space is still not enough; it's unlikely
2631 * there's more free now than before.
2633 * What's more, if we need to get a new page, we will need to acquire
2634 * buffer locks on both old and new pages. To avoid deadlock against
2635 * some other backend trying to get the same two locks in the other
2636 * order, we must be consistent about the order we get the locks in.
2637 * We use the rule "lock the lower-numbered page of the relation
2638 * first". To implement this, we must do RelationGetBufferForTuple
2639 * while not holding the lock on the old page, and we must rely on it
2640 * to get the locks on both pages in the correct order.
2642 if (newtupsize > pagefree)
2644 /* Assume there's no chance to put heaptup on same page. */
2645 newbuf = RelationGetBufferForTuple(relation, heaptup->t_len,
2650 /* Re-acquire the lock on the old tuple's page. */
2651 LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
2652 /* Re-check using the up-to-date free space */
2653 pagefree = PageGetHeapFreeSpace(page);
2654 if (newtupsize > pagefree)
2657 * Rats, it doesn't fit anymore. We must now unlock and
2658 * relock to avoid deadlock. Fortunately, this path should
2661 LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
2662 newbuf = RelationGetBufferForTuple(relation, heaptup->t_len,
2667 /* OK, it fits here, so we're done. */
2674 /* No TOAST work needed, and it'll fit on same page */
2675 already_marked = false;
2681 * At this point newbuf and buffer are both pinned and locked, and newbuf
2682 * has enough space for the new tuple. If they are the same buffer, only
2686 if (newbuf == buffer)
2689 * Since the new tuple is going into the same page, we might be able
2690 * to do a HOT update. Check if any of the index columns have been
2691 * changed. If not, then HOT update is possible.
2693 if (HeapSatisfiesHOTUpdate(relation, hot_attrs, &oldtup, heaptup))
2694 use_hot_update = true;
2698 /* Set a hint that the old page could use prune/defrag */
2702 /* NO EREPORT(ERROR) from here till changes are logged */
2703 START_CRIT_SECTION();
2706 * If this transaction commits, the old tuple will become DEAD sooner or
2707 * later. Set flag that this page is a candidate for pruning once our xid
2708 * falls below the OldestXmin horizon. If the transaction finally aborts,
2709 * the subsequent page pruning will be a no-op and the hint will be
2712 * XXX Should we set hint on newbuf as well? If the transaction aborts,
2713 * there would be a prunable tuple in the newbuf; but for now we choose
2714 * not to optimize for aborts. Note that heap_xlog_update must be kept in
2715 * sync if this decision changes.
2717 PageSetPrunable(page, xid);
2721 /* Mark the old tuple as HOT-updated */
2722 HeapTupleSetHotUpdated(&oldtup);
2723 /* And mark the new tuple as heap-only */
2724 HeapTupleSetHeapOnly(heaptup);
2725 /* Mark the caller's copy too, in case different from heaptup */
2726 HeapTupleSetHeapOnly(newtup);
2730 /* Make sure tuples are correctly marked as not-HOT */
2731 HeapTupleClearHotUpdated(&oldtup);
2732 HeapTupleClearHeapOnly(heaptup);
2733 HeapTupleClearHeapOnly(newtup);
2736 RelationPutHeapTuple(relation, newbuf, heaptup); /* insert new tuple */
2738 if (!already_marked)
2740 /* Clear obsolete visibility flags ... */
2741 oldtup.t_data->t_infomask &= ~(HEAP_XMAX_COMMITTED |
2743 HEAP_XMAX_IS_MULTI |
2746 /* ... and store info about transaction updating this tuple */
2747 HeapTupleHeaderSetXmax(oldtup.t_data, xid);
2748 HeapTupleHeaderSetCmax(oldtup.t_data, cid, iscombo);
2751 /* record address of new tuple in t_ctid of old one */
2752 oldtup.t_data->t_ctid = heaptup->t_self;
2754 if (newbuf != buffer)
2755 MarkBufferDirty(newbuf);
2756 MarkBufferDirty(buffer);
2759 * Note: we mustn't clear PD_ALL_VISIBLE flags before writing the WAL
2760 * record, because log_heap_update looks at those flags to set the
2761 * corresponding flags in the WAL record.
2765 if (!relation->rd_istemp)
2767 XLogRecPtr recptr = log_heap_update(relation, buffer, oldtup.t_self,
2768 newbuf, heaptup, false);
2770 if (newbuf != buffer)
2772 PageSetLSN(BufferGetPage(newbuf), recptr);
2773 PageSetTLI(BufferGetPage(newbuf), ThisTimeLineID);
2775 PageSetLSN(BufferGetPage(buffer), recptr);
2776 PageSetTLI(BufferGetPage(buffer), ThisTimeLineID);
2779 /* Clear PD_ALL_VISIBLE flags */
2780 if (PageIsAllVisible(BufferGetPage(buffer)))
2782 all_visible_cleared = true;
2783 PageClearAllVisible(BufferGetPage(buffer));
2785 if (newbuf != buffer && PageIsAllVisible(BufferGetPage(newbuf)))
2787 all_visible_cleared_new = true;
2788 PageClearAllVisible(BufferGetPage(newbuf));
2793 if (newbuf != buffer)
2794 LockBuffer(newbuf, BUFFER_LOCK_UNLOCK);
2795 LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
2798 * Mark old tuple for invalidation from system caches at next command
2799 * boundary. We have to do this before releasing the buffer because we
2800 * need to look at the contents of the tuple.
2802 CacheInvalidateHeapTuple(relation, &oldtup);
2804 /* Clear bits in visibility map */
2805 if (all_visible_cleared)
2806 visibilitymap_clear(relation, BufferGetBlockNumber(buffer));
2807 if (all_visible_cleared_new)
2808 visibilitymap_clear(relation, BufferGetBlockNumber(newbuf));
2810 /* Now we can release the buffer(s) */
2811 if (newbuf != buffer)
2812 ReleaseBuffer(newbuf);
2813 ReleaseBuffer(buffer);
2816 * If new tuple is cachable, mark it for invalidation from the caches in
2817 * case we abort. Note it is OK to do this after releasing the buffer,
2818 * because the heaptup data structure is all in local memory, not in the
2821 CacheInvalidateHeapTuple(relation, heaptup);
2824 * Release the lmgr tuple lock, if we had it.
2826 if (have_tuple_lock)
2827 UnlockTuple(relation, &(oldtup.t_self), ExclusiveLock);
2829 pgstat_count_heap_update(relation, use_hot_update);
2832 * If heaptup is a private copy, release it. Don't forget to copy t_self
2833 * back to the caller's image, too.
2835 if (heaptup != newtup)
2837 newtup->t_self = heaptup->t_self;
2838 heap_freetuple(heaptup);
2841 bms_free(hot_attrs);
2843 return HeapTupleMayBeUpdated;
2847 * Check if the specified attribute's value is same in both given tuples.
2848 * Subroutine for HeapSatisfiesHOTUpdate.
2851 heap_tuple_attr_equals(TupleDesc tupdesc, int attrnum,
2852 HeapTuple tup1, HeapTuple tup2)
2858 Form_pg_attribute att;
2861 * If it's a whole-tuple reference, say "not equal". It's not really
2862 * worth supporting this case, since it could only succeed after a no-op
2863 * update, which is hardly a case worth optimizing for.
2869 * Likewise, automatically say "not equal" for any system attribute other
2870 * than OID and tableOID; we cannot expect these to be consistent in a HOT
2871 * chain, or even to be set correctly yet in the new tuple.
2875 if (attrnum != ObjectIdAttributeNumber &&
2876 attrnum != TableOidAttributeNumber)
2881 * Extract the corresponding values. XXX this is pretty inefficient if
2882 * there are many indexed columns. Should HeapSatisfiesHOTUpdate do a
2883 * single heap_deform_tuple call on each tuple, instead? But that doesn't
2884 * work for system columns ...
2886 value1 = heap_getattr(tup1, attrnum, tupdesc, &isnull1);
2887 value2 = heap_getattr(tup2, attrnum, tupdesc, &isnull2);
2890 * If one value is NULL and other is not, then they are certainly not
2893 if (isnull1 != isnull2)
2897 * If both are NULL, they can be considered equal.
2903 * We do simple binary comparison of the two datums. This may be overly
2904 * strict because there can be multiple binary representations for the
2905 * same logical value. But we should be OK as long as there are no false
2906 * positives. Using a type-specific equality operator is messy because
2907 * there could be multiple notions of equality in different operator
2908 * classes; furthermore, we cannot safely invoke user-defined functions
2909 * while holding exclusive buffer lock.
2913 /* The only allowed system columns are OIDs, so do this */
2914 return (DatumGetObjectId(value1) == DatumGetObjectId(value2));
2918 Assert(attrnum <= tupdesc->natts);
2919 att = tupdesc->attrs[attrnum - 1];
2920 return datumIsEqual(value1, value2, att->attbyval, att->attlen);
2925 * Check if the old and new tuples represent a HOT-safe update. To be able
2926 * to do a HOT update, we must not have changed any columns used in index
2929 * The set of attributes to be checked is passed in (we dare not try to
2930 * compute it while holding exclusive buffer lock...) NOTE that hot_attrs
2931 * is destructively modified! That is OK since this is invoked at most once
2934 * Returns true if safe to do HOT update.
2937 HeapSatisfiesHOTUpdate(Relation relation, Bitmapset *hot_attrs,
2938 HeapTuple oldtup, HeapTuple newtup)
2942 while ((attrnum = bms_first_member(hot_attrs)) >= 0)
2944 /* Adjust for system attributes */
2945 attrnum += FirstLowInvalidHeapAttributeNumber;
2947 /* If the attribute value has changed, we can't do HOT update */
2948 if (!heap_tuple_attr_equals(RelationGetDescr(relation), attrnum,
2957 * simple_heap_update - replace a tuple
2959 * This routine may be used to update a tuple when concurrent updates of
2960 * the target tuple are not expected (for example, because we have a lock
2961 * on the relation associated with the tuple). Any failure is reported
2965 simple_heap_update(Relation relation, ItemPointer otid, HeapTuple tup)
2968 ItemPointerData update_ctid;
2969 TransactionId update_xmax;
2971 result = heap_update(relation, otid, tup,
2972 &update_ctid, &update_xmax,
2973 GetCurrentCommandId(true), InvalidSnapshot,
2974 true /* wait for commit */ );
2977 case HeapTupleSelfUpdated:
2978 /* Tuple was already updated in current command? */
2979 elog(ERROR, "tuple already updated by self");
2982 case HeapTupleMayBeUpdated:
2983 /* done successfully */
2986 case HeapTupleUpdated:
2987 elog(ERROR, "tuple concurrently updated");
2991 elog(ERROR, "unrecognized heap_update status: %u", result);
2997 * heap_lock_tuple - lock a tuple in shared or exclusive mode
2999 * Note that this acquires a buffer pin, which the caller must release.
3002 * relation: relation containing tuple (caller must hold suitable lock)
3003 * tuple->t_self: TID of tuple to lock (rest of struct need not be valid)
3004 * cid: current command ID (used for visibility test, and stored into
3005 * tuple's cmax if lock is successful)
3006 * mode: indicates if shared or exclusive tuple lock is desired
3007 * nowait: if true, ereport rather than blocking if lock not available
3009 * Output parameters:
3010 * *tuple: all fields filled in
3011 * *buffer: set to buffer holding tuple (pinned but not locked at exit)
3012 * *ctid: set to tuple's t_ctid, but only in failure cases
3013 * *update_xmax: set to tuple's xmax, but only in failure cases
3015 * Function result may be:
3016 * HeapTupleMayBeUpdated: lock was successfully acquired
3017 * HeapTupleSelfUpdated: lock failed because tuple updated by self
3018 * HeapTupleUpdated: lock failed because tuple updated by other xact
3020 * In the failure cases, the routine returns the tuple's t_ctid and t_xmax.
3021 * If t_ctid is the same as t_self, the tuple was deleted; if different, the
3022 * tuple was updated, and t_ctid is the location of the replacement tuple.
3023 * (t_xmax is needed to verify that the replacement tuple matches.)
3026 * NOTES: because the shared-memory lock table is of finite size, but users
3027 * could reasonably want to lock large numbers of tuples, we do not rely on
3028 * the standard lock manager to store tuple-level locks over the long term.
3029 * Instead, a tuple is marked as locked by setting the current transaction's
3030 * XID as its XMAX, and setting additional infomask bits to distinguish this
3031 * usage from the more normal case of having deleted the tuple. When
3032 * multiple transactions concurrently share-lock a tuple, the first locker's
3033 * XID is replaced in XMAX with a MultiTransactionId representing the set of
3034 * XIDs currently holding share-locks.
3036 * When it is necessary to wait for a tuple-level lock to be released, the
3037 * basic delay is provided by XactLockTableWait or MultiXactIdWait on the
3038 * contents of the tuple's XMAX. However, that mechanism will release all
3039 * waiters concurrently, so there would be a race condition as to which
3040 * waiter gets the tuple, potentially leading to indefinite starvation of
3041 * some waiters. The possibility of share-locking makes the problem much
3042 * worse --- a steady stream of share-lockers can easily block an exclusive
3043 * locker forever. To provide more reliable semantics about who gets a
3044 * tuple-level lock first, we use the standard lock manager. The protocol
3045 * for waiting for a tuple-level lock is really
3047 * XactLockTableWait()
3048 * mark tuple as locked by me
3050 * When there are multiple waiters, arbitration of who is to get the lock next
3051 * is provided by LockTuple(). However, at most one tuple-level lock will
3052 * be held or awaited per backend at any time, so we don't risk overflow
3053 * of the lock table. Note that incoming share-lockers are required to
3054 * do LockTuple as well, if there is any conflict, to ensure that they don't
3055 * starve out waiting exclusive-lockers. However, if there is not any active
3056 * conflict for a tuple, we don't incur any extra overhead.
3059 heap_lock_tuple(Relation relation, HeapTuple tuple, Buffer *buffer,
3060 ItemPointer ctid, TransactionId *update_xmax,
3061 CommandId cid, LockTupleMode mode, bool nowait)
3064 ItemPointer tid = &(tuple->t_self);
3069 uint16 old_infomask;
3070 uint16 new_infomask;
3071 LOCKMODE tuple_lock_type;
3072 bool have_tuple_lock = false;
3074 tuple_lock_type = (mode == LockTupleShared) ? ShareLock : ExclusiveLock;
3076 *buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(tid));
3077 LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
3079 page = BufferGetPage(*buffer);
3080 lp = PageGetItemId(page, ItemPointerGetOffsetNumber(tid));
3081 Assert(ItemIdIsNormal(lp));
3083 tuple->t_data = (HeapTupleHeader) PageGetItem(page, lp);
3084 tuple->t_len = ItemIdGetLength(lp);
3085 tuple->t_tableOid = RelationGetRelid(relation);
3088 result = HeapTupleSatisfiesUpdate(tuple->t_data, cid, *buffer);
3090 if (result == HeapTupleInvisible)
3092 UnlockReleaseBuffer(*buffer);
3093 elog(ERROR, "attempted to lock invisible tuple");
3095 else if (result == HeapTupleBeingUpdated)
3097 TransactionId xwait;
3100 /* must copy state data before unlocking buffer */
3101 xwait = HeapTupleHeaderGetXmax(tuple->t_data);
3102 infomask = tuple->t_data->t_infomask;
3104 LockBuffer(*buffer, BUFFER_LOCK_UNLOCK);
3107 * If we wish to acquire share lock, and the tuple is already
3108 * share-locked by a multixact that includes any subtransaction of the
3109 * current top transaction, then we effectively hold the desired lock
3110 * already. We *must* succeed without trying to take the tuple lock,
3111 * else we will deadlock against anyone waiting to acquire exclusive
3112 * lock. We don't need to make any state changes in this case.
3114 if (mode == LockTupleShared &&
3115 (infomask & HEAP_XMAX_IS_MULTI) &&
3116 MultiXactIdIsCurrent((MultiXactId) xwait))
3118 Assert(infomask & HEAP_XMAX_SHARED_LOCK);
3119 /* Probably can't hold tuple lock here, but may as well check */
3120 if (have_tuple_lock)
3121 UnlockTuple(relation, tid, tuple_lock_type);
3122 return HeapTupleMayBeUpdated;
3126 * Acquire tuple lock to establish our priority for the tuple.
3127 * LockTuple will release us when we are next-in-line for the tuple.
3128 * We must do this even if we are share-locking.
3130 * If we are forced to "start over" below, we keep the tuple lock;
3131 * this arranges that we stay at the head of the line while rechecking
3134 if (!have_tuple_lock)
3138 if (!ConditionalLockTuple(relation, tid, tuple_lock_type))
3140 (errcode(ERRCODE_LOCK_NOT_AVAILABLE),
3141 errmsg("could not obtain lock on row in relation \"%s\"",
3142 RelationGetRelationName(relation))));
3145 LockTuple(relation, tid, tuple_lock_type);
3146 have_tuple_lock = true;
3149 if (mode == LockTupleShared && (infomask & HEAP_XMAX_SHARED_LOCK))
3152 * Acquiring sharelock when there's at least one sharelocker
3153 * already. We need not wait for him/them to complete.
3155 LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
3158 * Make sure it's still a shared lock, else start over. (It's OK
3159 * if the ownership of the shared lock has changed, though.)
3161 if (!(tuple->t_data->t_infomask & HEAP_XMAX_SHARED_LOCK))
3164 else if (infomask & HEAP_XMAX_IS_MULTI)
3166 /* wait for multixact to end */
3169 if (!ConditionalMultiXactIdWait((MultiXactId) xwait))
3171 (errcode(ERRCODE_LOCK_NOT_AVAILABLE),
3172 errmsg("could not obtain lock on row in relation \"%s\"",
3173 RelationGetRelationName(relation))));
3176 MultiXactIdWait((MultiXactId) xwait);
3178 LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
3181 * If xwait had just locked the tuple then some other xact could
3182 * update this tuple before we get to this point. Check for xmax
3183 * change, and start over if so.
3185 if (!(tuple->t_data->t_infomask & HEAP_XMAX_IS_MULTI) ||
3186 !TransactionIdEquals(HeapTupleHeaderGetXmax(tuple->t_data),
3191 * You might think the multixact is necessarily done here, but not
3192 * so: it could have surviving members, namely our own xact or
3193 * other subxacts of this backend. It is legal for us to lock the
3194 * tuple in either case, however. We don't bother changing the
3195 * on-disk hint bits since we are about to overwrite the xmax
3201 /* wait for regular transaction to end */
3204 if (!ConditionalXactLockTableWait(xwait))
3206 (errcode(ERRCODE_LOCK_NOT_AVAILABLE),
3207 errmsg("could not obtain lock on row in relation \"%s\"",
3208 RelationGetRelationName(relation))));
3211 XactLockTableWait(xwait);
3213 LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
3216 * xwait is done, but if xwait had just locked the tuple then some
3217 * other xact could update this tuple before we get to this point.
3218 * Check for xmax change, and start over if so.
3220 if ((tuple->t_data->t_infomask & HEAP_XMAX_IS_MULTI) ||
3221 !TransactionIdEquals(HeapTupleHeaderGetXmax(tuple->t_data),
3225 /* Otherwise check if it committed or aborted */
3226 UpdateXmaxHintBits(tuple->t_data, *buffer, xwait);
3230 * We may lock if previous xmax aborted, or if it committed but only
3231 * locked the tuple without updating it. The case where we didn't
3232 * wait because we are joining an existing shared lock is correctly
3235 if (tuple->t_data->t_infomask & (HEAP_XMAX_INVALID |
3237 result = HeapTupleMayBeUpdated;
3239 result = HeapTupleUpdated;
3242 if (result != HeapTupleMayBeUpdated)
3244 Assert(result == HeapTupleSelfUpdated || result == HeapTupleUpdated);
3245 Assert(!(tuple->t_data->t_infomask & HEAP_XMAX_INVALID));
3246 *ctid = tuple->t_data->t_ctid;
3247 *update_xmax = HeapTupleHeaderGetXmax(tuple->t_data);
3248 LockBuffer(*buffer, BUFFER_LOCK_UNLOCK);
3249 if (have_tuple_lock)
3250 UnlockTuple(relation, tid, tuple_lock_type);
3255 * We might already hold the desired lock (or stronger), possibly under a
3256 * different subtransaction of the current top transaction. If so, there
3257 * is no need to change state or issue a WAL record. We already handled
3258 * the case where this is true for xmax being a MultiXactId, so now check
3259 * for cases where it is a plain TransactionId.
3261 * Note in particular that this covers the case where we already hold
3262 * exclusive lock on the tuple and the caller only wants shared lock. It
3263 * would certainly not do to give up the exclusive lock.
3265 xmax = HeapTupleHeaderGetXmax(tuple->t_data);
3266 old_infomask = tuple->t_data->t_infomask;
3268 if (!(old_infomask & (HEAP_XMAX_INVALID |
3269 HEAP_XMAX_COMMITTED |
3270 HEAP_XMAX_IS_MULTI)) &&
3271 (mode == LockTupleShared ?
3272 (old_infomask & HEAP_IS_LOCKED) :
3273 (old_infomask & HEAP_XMAX_EXCL_LOCK)) &&
3274 TransactionIdIsCurrentTransactionId(xmax))
3276 LockBuffer(*buffer, BUFFER_LOCK_UNLOCK);
3277 /* Probably can't hold tuple lock here, but may as well check */
3278 if (have_tuple_lock)
3279 UnlockTuple(relation, tid, tuple_lock_type);
3280 return HeapTupleMayBeUpdated;
3284 * Compute the new xmax and infomask to store into the tuple. Note we do
3285 * not modify the tuple just yet, because that would leave it in the wrong
3286 * state if multixact.c elogs.
3288 xid = GetCurrentTransactionId();
3290 new_infomask = old_infomask & ~(HEAP_XMAX_COMMITTED |
3292 HEAP_XMAX_IS_MULTI |
3296 if (mode == LockTupleShared)
3299 * If this is the first acquisition of a shared lock in the current
3300 * transaction, set my per-backend OldestMemberMXactId setting. We can
3301 * be certain that the transaction will never become a member of any
3302 * older MultiXactIds than that. (We have to do this even if we end
3303 * up just using our own TransactionId below, since some other backend
3304 * could incorporate our XID into a MultiXact immediately afterwards.)
3306 MultiXactIdSetOldestMember();
3308 new_infomask |= HEAP_XMAX_SHARED_LOCK;
3311 * Check to see if we need a MultiXactId because there are multiple
3314 * HeapTupleSatisfiesUpdate will have set the HEAP_XMAX_INVALID bit if
3315 * the xmax was a MultiXactId but it was not running anymore. There is
3316 * a race condition, which is that the MultiXactId may have finished
3317 * since then, but that uncommon case is handled within
3318 * MultiXactIdExpand.
3320 * There is a similar race condition possible when the old xmax was a
3321 * regular TransactionId. We test TransactionIdIsInProgress again
3322 * just to narrow the window, but it's still possible to end up
3323 * creating an unnecessary MultiXactId. Fortunately this is harmless.
3325 if (!(old_infomask & (HEAP_XMAX_INVALID | HEAP_XMAX_COMMITTED)))
3327 if (old_infomask & HEAP_XMAX_IS_MULTI)
3330 * If the XMAX is already a MultiXactId, then we need to
3331 * expand it to include our own TransactionId.
3333 xid = MultiXactIdExpand((MultiXactId) xmax, xid);
3334 new_infomask |= HEAP_XMAX_IS_MULTI;
3336 else if (TransactionIdIsInProgress(xmax))
3339 * If the XMAX is a valid TransactionId, then we need to
3340 * create a new MultiXactId that includes both the old locker
3341 * and our own TransactionId.
3343 xid = MultiXactIdCreate(xmax, xid);
3344 new_infomask |= HEAP_XMAX_IS_MULTI;
3349 * Can get here iff HeapTupleSatisfiesUpdate saw the old xmax
3350 * as running, but it finished before
3351 * TransactionIdIsInProgress() got to run. Treat it like
3352 * there's no locker in the tuple.
3359 * There was no previous locker, so just insert our own
3366 /* We want an exclusive lock on the tuple */
3367 new_infomask |= HEAP_XMAX_EXCL_LOCK;
3370 START_CRIT_SECTION();
3373 * Store transaction information of xact locking the tuple.
3375 * Note: Cmax is meaningless in this context, so don't set it; this avoids
3376 * possibly generating a useless combo CID.
3378 tuple->t_data->t_infomask = new_infomask;
3379 HeapTupleHeaderClearHotUpdated(tuple->t_data);
3380 HeapTupleHeaderSetXmax(tuple->t_data, xid);
3381 /* Make sure there is no forward chain link in t_ctid */
3382 tuple->t_data->t_ctid = *tid;
3384 MarkBufferDirty(*buffer);
3387 * XLOG stuff. You might think that we don't need an XLOG record because
3388 * there is no state change worth restoring after a crash. You would be
3389 * wrong however: we have just written either a TransactionId or a
3390 * MultiXactId that may never have been seen on disk before, and we need
3391 * to make sure that there are XLOG entries covering those ID numbers.
3392 * Else the same IDs might be re-used after a crash, which would be
3393 * disastrous if this page made it to disk before the crash. Essentially
3394 * we have to enforce the WAL log-before-data rule even in this case.
3395 * (Also, in a PITR log-shipping or 2PC environment, we have to have XLOG
3396 * entries for everything anyway.)
3398 if (!relation->rd_istemp)
3402 XLogRecData rdata[2];
3404 xlrec.target.node = relation->rd_node;
3405 xlrec.target.tid = tuple->t_self;
3406 xlrec.locking_xid = xid;
3407 xlrec.xid_is_mxact = ((new_infomask & HEAP_XMAX_IS_MULTI) != 0);
3408 xlrec.shared_lock = (mode == LockTupleShared);
3409 rdata[0].data = (char *) &xlrec;
3410 rdata[0].len = SizeOfHeapLock;
3411 rdata[0].buffer = InvalidBuffer;
3412 rdata[0].next = &(rdata[1]);
3414 rdata[1].data = NULL;
3416 rdata[1].buffer = *buffer;
3417 rdata[1].buffer_std = true;
3418 rdata[1].next = NULL;
3420 recptr = XLogInsert(RM_HEAP_ID, XLOG_HEAP_LOCK, rdata);
3422 PageSetLSN(page, recptr);
3423 PageSetTLI(page, ThisTimeLineID);
3428 LockBuffer(*buffer, BUFFER_LOCK_UNLOCK);
3431 * Don't update the visibility map here. Locking a tuple doesn't
3432 * change visibility info.
3436 * Now that we have successfully marked the tuple as locked, we can
3437 * release the lmgr tuple lock, if we had it.
3439 if (have_tuple_lock)
3440 UnlockTuple(relation, tid, tuple_lock_type);
3442 return HeapTupleMayBeUpdated;
3447 * heap_inplace_update - update a tuple "in place" (ie, overwrite it)
3449 * Overwriting violates both MVCC and transactional safety, so the uses
3450 * of this function in Postgres are extremely limited. Nonetheless we
3451 * find some places to use it.
3453 * The tuple cannot change size, and therefore it's reasonable to assume
3454 * that its null bitmap (if any) doesn't change either. So we just
3455 * overwrite the data portion of the tuple without touching the null
3456 * bitmap or any of the header fields.
3458 * tuple is an in-memory tuple structure containing the data to be written
3459 * over the target tuple. Also, tuple->t_self identifies the target tuple.
3462 heap_inplace_update(Relation relation, HeapTuple tuple)
3466 OffsetNumber offnum;
3468 HeapTupleHeader htup;
3472 buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(&(tuple->t_self)));
3473 LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
3474 page = (Page) BufferGetPage(buffer);
3476 offnum = ItemPointerGetOffsetNumber(&(tuple->t_self));
3477 if (PageGetMaxOffsetNumber(page) >= offnum)
3478 lp = PageGetItemId(page, offnum);
3480 if (PageGetMaxOffsetNumber(page) < offnum || !ItemIdIsNormal(lp))
3481 elog(ERROR, "heap_inplace_update: invalid lp");
3483 htup = (HeapTupleHeader) PageGetItem(page, lp);
3485 oldlen = ItemIdGetLength(lp) - htup->t_hoff;
3486 newlen = tuple->t_len - tuple->t_data->t_hoff;
3487 if (oldlen != newlen || htup->t_hoff != tuple->t_data->t_hoff)
3488 elog(ERROR, "heap_inplace_update: wrong tuple length");
3490 /* NO EREPORT(ERROR) from here till changes are logged */
3491 START_CRIT_SECTION();
3493 memcpy((char *) htup + htup->t_hoff,
3494 (char *) tuple->t_data + tuple->t_data->t_hoff,
3497 MarkBufferDirty(buffer);
3500 if (!relation->rd_istemp)
3502 xl_heap_inplace xlrec;
3504 XLogRecData rdata[2];
3506 xlrec.target.node = relation->rd_node;
3507 xlrec.target.tid = tuple->t_self;
3509 rdata[0].data = (char *) &xlrec;
3510 rdata[0].len = SizeOfHeapInplace;
3511 rdata[0].buffer = InvalidBuffer;
3512 rdata[0].next = &(rdata[1]);
3514 rdata[1].data = (char *) htup + htup->t_hoff;
3515 rdata[1].len = newlen;
3516 rdata[1].buffer = buffer;
3517 rdata[1].buffer_std = true;
3518 rdata[1].next = NULL;
3520 recptr = XLogInsert(RM_HEAP_ID, XLOG_HEAP_INPLACE, rdata);
3522 PageSetLSN(page, recptr);
3523 PageSetTLI(page, ThisTimeLineID);
3528 UnlockReleaseBuffer(buffer);
3530 /* Send out shared cache inval if necessary */
3531 if (!IsBootstrapProcessingMode())
3532 CacheInvalidateHeapTuple(relation, tuple);
3539 * Check to see whether any of the XID fields of a tuple (xmin, xmax, xvac)
3540 * are older than the specified cutoff XID. If so, replace them with
3541 * FrozenTransactionId or InvalidTransactionId as appropriate, and return
3542 * TRUE. Return FALSE if nothing was changed.
3544 * It is assumed that the caller has checked the tuple with
3545 * HeapTupleSatisfiesVacuum() and determined that it is not HEAPTUPLE_DEAD
3546 * (else we should be removing the tuple, not freezing it).
3548 * NB: cutoff_xid *must* be <= the current global xmin, to ensure that any
3549 * XID older than it could neither be running nor seen as running by any
3550 * open transaction. This ensures that the replacement will not change
3551 * anyone's idea of the tuple state. Also, since we assume the tuple is
3552 * not HEAPTUPLE_DEAD, the fact that an XID is not still running allows us
3553 * to assume that it is either committed good or aborted, as appropriate;
3554 * so we need no external state checks to decide what to do. (This is good
3555 * because this function is applied during WAL recovery, when we don't have
3556 * access to any such state, and can't depend on the hint bits to be set.)
3558 * In lazy VACUUM, we call this while initially holding only a shared lock
3559 * on the tuple's buffer. If any change is needed, we trade that in for an
3560 * exclusive lock before making the change. Caller should pass the buffer ID
3561 * if shared lock is held, InvalidBuffer if exclusive lock is already held.
3563 * Note: it might seem we could make the changes without exclusive lock, since
3564 * TransactionId read/write is assumed atomic anyway. However there is a race
3565 * condition: someone who just fetched an old XID that we overwrite here could
3566 * conceivably not finish checking the XID against pg_clog before we finish
3567 * the VACUUM and perhaps truncate off the part of pg_clog he needs. Getting
3568 * exclusive lock ensures no other backend is in process of checking the
3569 * tuple status. Also, getting exclusive lock makes it safe to adjust the
3573 heap_freeze_tuple(HeapTupleHeader tuple, TransactionId cutoff_xid,
3576 bool changed = false;
3579 xid = HeapTupleHeaderGetXmin(tuple);
3580 if (TransactionIdIsNormal(xid) &&
3581 TransactionIdPrecedes(xid, cutoff_xid))
3583 if (buf != InvalidBuffer)
3585 /* trade in share lock for exclusive lock */
3586 LockBuffer(buf, BUFFER_LOCK_UNLOCK);
3587 LockBuffer(buf, BUFFER_LOCK_EXCLUSIVE);
3588 buf = InvalidBuffer;
3590 HeapTupleHeaderSetXmin(tuple, FrozenTransactionId);
3593 * Might as well fix the hint bits too; usually XMIN_COMMITTED will
3594 * already be set here, but there's a small chance not.
3596 Assert(!(tuple->t_infomask & HEAP_XMIN_INVALID));
3597 tuple->t_infomask |= HEAP_XMIN_COMMITTED;
3602 * When we release shared lock, it's possible for someone else to change
3603 * xmax before we get the lock back, so repeat the check after acquiring
3604 * exclusive lock. (We don't need this pushup for xmin, because only
3605 * VACUUM could be interested in changing an existing tuple's xmin, and
3606 * there's only one VACUUM allowed on a table at a time.)
3609 if (!(tuple->t_infomask & HEAP_XMAX_IS_MULTI))
3611 xid = HeapTupleHeaderGetXmax(tuple);
3612 if (TransactionIdIsNormal(xid) &&
3613 TransactionIdPrecedes(xid, cutoff_xid))
3615 if (buf != InvalidBuffer)
3617 /* trade in share lock for exclusive lock */
3618 LockBuffer(buf, BUFFER_LOCK_UNLOCK);
3619 LockBuffer(buf, BUFFER_LOCK_EXCLUSIVE);
3620 buf = InvalidBuffer;
3621 goto recheck_xmax; /* see comment above */
3623 HeapTupleHeaderSetXmax(tuple, InvalidTransactionId);
3626 * The tuple might be marked either XMAX_INVALID or XMAX_COMMITTED
3627 * + LOCKED. Normalize to INVALID just to be sure no one gets
3630 tuple->t_infomask &= ~HEAP_XMAX_COMMITTED;
3631 tuple->t_infomask |= HEAP_XMAX_INVALID;
3632 HeapTupleHeaderClearHotUpdated(tuple);
3639 * XXX perhaps someday we should zero out very old MultiXactIds here?
3641 * The only way a stale MultiXactId could pose a problem is if a
3642 * tuple, having once been multiply-share-locked, is not touched by
3643 * any vacuum or attempted lock or deletion for just over 4G MultiXact
3644 * creations, and then in the probably-narrow window where its xmax
3645 * is again a live MultiXactId, someone tries to lock or delete it.
3646 * Even then, another share-lock attempt would work fine. An
3647 * exclusive-lock or delete attempt would face unexpected delay, or
3648 * in the very worst case get a deadlock error. This seems an
3649 * extremely low-probability scenario with minimal downside even if
3650 * it does happen, so for now we don't do the extra bookkeeping that
3651 * would be needed to clean out MultiXactIds.
3657 * Although xvac per se could only be set by VACUUM, it shares physical
3658 * storage space with cmax, and so could be wiped out by someone setting
3659 * xmax. Hence recheck after changing lock, same as for xmax itself.
3662 if (tuple->t_infomask & HEAP_MOVED)
3664 xid = HeapTupleHeaderGetXvac(tuple);
3665 if (TransactionIdIsNormal(xid) &&
3666 TransactionIdPrecedes(xid, cutoff_xid))
3668 if (buf != InvalidBuffer)
3670 /* trade in share lock for exclusive lock */
3671 LockBuffer(buf, BUFFER_LOCK_UNLOCK);
3672 LockBuffer(buf, BUFFER_LOCK_EXCLUSIVE);
3673 buf = InvalidBuffer;
3674 goto recheck_xvac; /* see comment above */
3678 * If a MOVED_OFF tuple is not dead, the xvac transaction must
3679 * have failed; whereas a non-dead MOVED_IN tuple must mean the
3680 * xvac transaction succeeded.
3682 if (tuple->t_infomask & HEAP_MOVED_OFF)
3683 HeapTupleHeaderSetXvac(tuple, InvalidTransactionId);
3685 HeapTupleHeaderSetXvac(tuple, FrozenTransactionId);
3688 * Might as well fix the hint bits too; usually XMIN_COMMITTED
3689 * will already be set here, but there's a small chance not.
3691 Assert(!(tuple->t_infomask & HEAP_XMIN_INVALID));
3692 tuple->t_infomask |= HEAP_XMIN_COMMITTED;
3702 * heap_markpos - mark scan position
3706 heap_markpos(HeapScanDesc scan)
3708 /* Note: no locking manipulations needed */
3710 if (scan->rs_ctup.t_data != NULL)
3712 scan->rs_mctid = scan->rs_ctup.t_self;
3713 if (scan->rs_pageatatime)
3714 scan->rs_mindex = scan->rs_cindex;
3717 ItemPointerSetInvalid(&scan->rs_mctid);
3721 * heap_restrpos - restore position to marked location
3725 heap_restrpos(HeapScanDesc scan)
3727 /* XXX no amrestrpos checking that ammarkpos called */
3729 if (!ItemPointerIsValid(&scan->rs_mctid))
3731 scan->rs_ctup.t_data = NULL;
3734 * unpin scan buffers
3736 if (BufferIsValid(scan->rs_cbuf))
3737 ReleaseBuffer(scan->rs_cbuf);
3738 scan->rs_cbuf = InvalidBuffer;
3739 scan->rs_cblock = InvalidBlockNumber;
3740 scan->rs_inited = false;
3745 * If we reached end of scan, rs_inited will now be false. We must
3746 * reset it to true to keep heapgettup from doing the wrong thing.
3748 scan->rs_inited = true;
3749 scan->rs_ctup.t_self = scan->rs_mctid;
3750 if (scan->rs_pageatatime)
3752 scan->rs_cindex = scan->rs_mindex;
3753 heapgettup_pagemode(scan,
3754 NoMovementScanDirection,
3755 0, /* needn't recheck scan keys */
3760 NoMovementScanDirection,
3761 0, /* needn't recheck scan keys */
3767 * Perform XLogInsert for a heap-clean operation. Caller must already
3768 * have modified the buffer and marked it dirty.
3770 * Note: prior to Postgres 8.3, the entries in the nowunused[] array were
3771 * zero-based tuple indexes. Now they are one-based like other uses
3775 log_heap_clean(Relation reln, Buffer buffer,
3776 OffsetNumber *redirected, int nredirected,
3777 OffsetNumber *nowdead, int ndead,
3778 OffsetNumber *nowunused, int nunused,
3781 xl_heap_clean xlrec;
3784 XLogRecData rdata[4];
3786 /* Caller should not call me on a temp relation */
3787 Assert(!reln->rd_istemp);
3789 xlrec.node = reln->rd_node;
3790 xlrec.block = BufferGetBlockNumber(buffer);
3791 xlrec.nredirected = nredirected;
3792 xlrec.ndead = ndead;
3794 rdata[0].data = (char *) &xlrec;
3795 rdata[0].len = SizeOfHeapClean;
3796 rdata[0].buffer = InvalidBuffer;
3797 rdata[0].next = &(rdata[1]);
3800 * The OffsetNumber arrays are not actually in the buffer, but we pretend
3801 * that they are. When XLogInsert stores the whole buffer, the offset
3802 * arrays need not be stored too. Note that even if all three arrays are
3803 * empty, we want to expose the buffer as a candidate for whole-page
3804 * storage, since this record type implies a defragmentation operation
3805 * even if no item pointers changed state.
3807 if (nredirected > 0)
3809 rdata[1].data = (char *) redirected;
3810 rdata[1].len = nredirected * sizeof(OffsetNumber) * 2;
3814 rdata[1].data = NULL;
3817 rdata[1].buffer = buffer;
3818 rdata[1].buffer_std = true;
3819 rdata[1].next = &(rdata[2]);
3823 rdata[2].data = (char *) nowdead;
3824 rdata[2].len = ndead * sizeof(OffsetNumber);
3828 rdata[2].data = NULL;
3831 rdata[2].buffer = buffer;
3832 rdata[2].buffer_std = true;
3833 rdata[2].next = &(rdata[3]);
3837 rdata[3].data = (char *) nowunused;
3838 rdata[3].len = nunused * sizeof(OffsetNumber);
3842 rdata[3].data = NULL;
3845 rdata[3].buffer = buffer;
3846 rdata[3].buffer_std = true;
3847 rdata[3].next = NULL;
3849 info = redirect_move ? XLOG_HEAP2_CLEAN_MOVE : XLOG_HEAP2_CLEAN;
3850 recptr = XLogInsert(RM_HEAP2_ID, info, rdata);
3856 * Perform XLogInsert for a heap-freeze operation. Caller must already
3857 * have modified the buffer and marked it dirty.
3860 log_heap_freeze(Relation reln, Buffer buffer,
3861 TransactionId cutoff_xid,
3862 OffsetNumber *offsets, int offcnt)
3864 xl_heap_freeze xlrec;
3866 XLogRecData rdata[2];
3868 /* Caller should not call me on a temp relation */
3869 Assert(!reln->rd_istemp);
3870 /* nor when there are no tuples to freeze */
3873 xlrec.node = reln->rd_node;
3874 xlrec.block = BufferGetBlockNumber(buffer);
3875 xlrec.cutoff_xid = cutoff_xid;
3877 rdata[0].data = (char *) &xlrec;
3878 rdata[0].len = SizeOfHeapFreeze;
3879 rdata[0].buffer = InvalidBuffer;
3880 rdata[0].next = &(rdata[1]);
3883 * The tuple-offsets array is not actually in the buffer, but pretend that
3884 * it is. When XLogInsert stores the whole buffer, the offsets array need
3885 * not be stored too.
3887 rdata[1].data = (char *) offsets;
3888 rdata[1].len = offcnt * sizeof(OffsetNumber);
3889 rdata[1].buffer = buffer;
3890 rdata[1].buffer_std = true;
3891 rdata[1].next = NULL;
3893 recptr = XLogInsert(RM_HEAP2_ID, XLOG_HEAP2_FREEZE, rdata);
3899 * Perform XLogInsert for a heap-update operation. Caller must already
3900 * have modified the buffer(s) and marked them dirty.
3903 log_heap_update(Relation reln, Buffer oldbuf, ItemPointerData from,
3904 Buffer newbuf, HeapTuple newtup, bool move)
3907 * Note: xlhdr is declared to have adequate size and correct alignment for
3908 * an xl_heap_header. However the two tids, if present at all, will be
3909 * packed in with no wasted space after the xl_heap_header; they aren't
3910 * necessarily aligned as implied by this struct declaration.
3918 int hsize = SizeOfHeapHeader;
3919 xl_heap_update xlrec;
3922 XLogRecData rdata[4];
3923 Page page = BufferGetPage(newbuf);
3925 /* Caller should not call me on a temp relation */
3926 Assert(!reln->rd_istemp);
3930 Assert(!HeapTupleIsHeapOnly(newtup));
3931 info = XLOG_HEAP_MOVE;
3933 else if (HeapTupleIsHeapOnly(newtup))
3934 info = XLOG_HEAP_HOT_UPDATE;
3936 info = XLOG_HEAP_UPDATE;
3938 xlrec.target.node = reln->rd_node;
3939 xlrec.target.tid = from;
3940 xlrec.all_visible_cleared = PageIsAllVisible(BufferGetPage(oldbuf));
3941 xlrec.newtid = newtup->t_self;
3942 xlrec.new_all_visible_cleared = PageIsAllVisible(BufferGetPage(newbuf));
3944 rdata[0].data = (char *) &xlrec;
3945 rdata[0].len = SizeOfHeapUpdate;
3946 rdata[0].buffer = InvalidBuffer;
3947 rdata[0].next = &(rdata[1]);
3949 rdata[1].data = NULL;
3951 rdata[1].buffer = oldbuf;
3952 rdata[1].buffer_std = true;
3953 rdata[1].next = &(rdata[2]);
3955 xlhdr.hdr.t_infomask2 = newtup->t_data->t_infomask2;
3956 xlhdr.hdr.t_infomask = newtup->t_data->t_infomask;
3957 xlhdr.hdr.t_hoff = newtup->t_data->t_hoff;
3958 if (move) /* remember xmax & xmin */
3960 TransactionId xid[2]; /* xmax, xmin */
3962 if (newtup->t_data->t_infomask & (HEAP_XMAX_INVALID | HEAP_IS_LOCKED))
3963 xid[0] = InvalidTransactionId;
3965 xid[0] = HeapTupleHeaderGetXmax(newtup->t_data);
3966 xid[1] = HeapTupleHeaderGetXmin(newtup->t_data);
3967 memcpy((char *) &xlhdr + hsize,
3969 2 * sizeof(TransactionId));
3970 hsize += 2 * sizeof(TransactionId);
3974 * As with insert records, we need not store the rdata[2] segment if we
3975 * decide to store the whole buffer instead.
3977 rdata[2].data = (char *) &xlhdr;
3978 rdata[2].len = hsize;
3979 rdata[2].buffer = newbuf;
3980 rdata[2].buffer_std = true;
3981 rdata[2].next = &(rdata[3]);
3983 /* PG73FORMAT: write bitmap [+ padding] [+ oid] + data */
3984 rdata[3].data = (char *) newtup->t_data + offsetof(HeapTupleHeaderData, t_bits);
3985 rdata[3].len = newtup->t_len - offsetof(HeapTupleHeaderData, t_bits);
3986 rdata[3].buffer = newbuf;
3987 rdata[3].buffer_std = true;
3988 rdata[3].next = NULL;
3990 /* If new tuple is the single and first tuple on page... */
3991 if (ItemPointerGetOffsetNumber(&(newtup->t_self)) == FirstOffsetNumber &&
3992 PageGetMaxOffsetNumber(page) == FirstOffsetNumber)
3994 info |= XLOG_HEAP_INIT_PAGE;
3995 rdata[2].buffer = rdata[3].buffer = InvalidBuffer;
3998 recptr = XLogInsert(RM_HEAP_ID, info, rdata);
4004 * Perform XLogInsert for a heap-move operation. Caller must already
4005 * have modified the buffers and marked them dirty.
4008 log_heap_move(Relation reln, Buffer oldbuf, ItemPointerData from,
4009 Buffer newbuf, HeapTuple newtup)
4011 return log_heap_update(reln, oldbuf, from, newbuf, newtup, true);
4015 * Perform XLogInsert of a HEAP_NEWPAGE record to WAL. Caller is responsible
4016 * for writing the page to disk after calling this routine.
4018 * Note: all current callers build pages in private memory and write them
4019 * directly to smgr, rather than using bufmgr. Therefore there is no need
4020 * to pass a buffer ID to XLogInsert, nor to perform MarkBufferDirty within
4021 * the critical section.
4023 * Note: the NEWPAGE log record is used for both heaps and indexes, so do
4024 * not do anything that assumes we are touching a heap.
4027 log_newpage(RelFileNode *rnode, ForkNumber forkNum, BlockNumber blkno,
4030 xl_heap_newpage xlrec;
4032 XLogRecData rdata[2];
4034 /* NO ELOG(ERROR) from here till newpage op is logged */
4035 START_CRIT_SECTION();
4037 xlrec.node = *rnode;
4038 xlrec.forknum = forkNum;
4039 xlrec.blkno = blkno;
4041 rdata[0].data = (char *) &xlrec;
4042 rdata[0].len = SizeOfHeapNewpage;
4043 rdata[0].buffer = InvalidBuffer;
4044 rdata[0].next = &(rdata[1]);
4046 rdata[1].data = (char *) page;
4047 rdata[1].len = BLCKSZ;
4048 rdata[1].buffer = InvalidBuffer;
4049 rdata[1].next = NULL;
4051 recptr = XLogInsert(RM_HEAP_ID, XLOG_HEAP_NEWPAGE, rdata);
4053 PageSetLSN(page, recptr);
4054 PageSetTLI(page, ThisTimeLineID);
4062 * Handles CLEAN and CLEAN_MOVE record types
4065 heap_xlog_clean(XLogRecPtr lsn, XLogRecord *record, bool clean_move)
4067 xl_heap_clean *xlrec = (xl_heap_clean *) XLogRecGetData(record);
4071 OffsetNumber *redirected;
4072 OffsetNumber *nowdead;
4073 OffsetNumber *nowunused;
4079 if (record->xl_info & XLR_BKP_BLOCK_1)
4082 buffer = XLogReadBuffer(xlrec->node, xlrec->block, false);
4083 if (!BufferIsValid(buffer))
4085 page = (Page) BufferGetPage(buffer);
4087 if (XLByteLE(lsn, PageGetLSN(page)))
4089 UnlockReleaseBuffer(buffer);
4093 nredirected = xlrec->nredirected;
4094 ndead = xlrec->ndead;
4095 end = (OffsetNumber *) ((char *) xlrec + record->xl_len);
4096 redirected = (OffsetNumber *) ((char *) xlrec + SizeOfHeapClean);
4097 nowdead = redirected + (nredirected * 2);
4098 nowunused = nowdead + ndead;
4099 nunused = (end - nowunused);
4100 Assert(nunused >= 0);
4102 /* Update all item pointers per the record, and repair fragmentation */
4103 heap_page_prune_execute(buffer,
4104 redirected, nredirected,
4109 freespace = PageGetHeapFreeSpace(page); /* needed to update FSM below */
4112 * Note: we don't worry about updating the page's prunability hints.
4113 * At worst this will cause an extra prune cycle to occur soon.
4116 PageSetLSN(page, lsn);
4117 PageSetTLI(page, ThisTimeLineID);
4118 MarkBufferDirty(buffer);
4119 UnlockReleaseBuffer(buffer);
4122 * Update the FSM as well.
4124 * XXX: We don't get here if the page was restored from full page image.
4125 * We don't bother to update the FSM in that case, it doesn't need to be
4126 * totally accurate anyway.
4128 XLogRecordPageWithFreeSpace(xlrec->node, xlrec->block, freespace);
4132 heap_xlog_freeze(XLogRecPtr lsn, XLogRecord *record)
4134 xl_heap_freeze *xlrec = (xl_heap_freeze *) XLogRecGetData(record);
4135 TransactionId cutoff_xid = xlrec->cutoff_xid;
4139 if (record->xl_info & XLR_BKP_BLOCK_1)
4142 buffer = XLogReadBuffer(xlrec->node, xlrec->block, false);
4143 if (!BufferIsValid(buffer))
4145 page = (Page) BufferGetPage(buffer);
4147 if (XLByteLE(lsn, PageGetLSN(page)))
4149 UnlockReleaseBuffer(buffer);
4153 if (record->xl_len > SizeOfHeapFreeze)
4155 OffsetNumber *offsets;
4156 OffsetNumber *offsets_end;
4158 offsets = (OffsetNumber *) ((char *) xlrec + SizeOfHeapFreeze);
4159 offsets_end = (OffsetNumber *) ((char *) xlrec + record->xl_len);
4161 while (offsets < offsets_end)
4163 /* offsets[] entries are one-based */
4164 ItemId lp = PageGetItemId(page, *offsets);
4165 HeapTupleHeader tuple = (HeapTupleHeader) PageGetItem(page, lp);
4167 (void) heap_freeze_tuple(tuple, cutoff_xid, InvalidBuffer);
4172 PageSetLSN(page, lsn);
4173 PageSetTLI(page, ThisTimeLineID);
4174 MarkBufferDirty(buffer);
4175 UnlockReleaseBuffer(buffer);
4179 heap_xlog_newpage(XLogRecPtr lsn, XLogRecord *record)
4181 xl_heap_newpage *xlrec = (xl_heap_newpage *) XLogRecGetData(record);
4186 * Note: the NEWPAGE log record is used for both heaps and indexes, so do
4187 * not do anything that assumes we are touching a heap.
4189 buffer = XLogReadBuffer(xlrec->node, xlrec->blkno, true);
4190 Assert(BufferIsValid(buffer));
4191 page = (Page) BufferGetPage(buffer);
4193 Assert(record->xl_len == SizeOfHeapNewpage + BLCKSZ);
4194 memcpy(page, (char *) xlrec + SizeOfHeapNewpage, BLCKSZ);
4196 PageSetLSN(page, lsn);
4197 PageSetTLI(page, ThisTimeLineID);
4198 MarkBufferDirty(buffer);
4199 UnlockReleaseBuffer(buffer);
4203 heap_xlog_delete(XLogRecPtr lsn, XLogRecord *record)
4205 xl_heap_delete *xlrec = (xl_heap_delete *) XLogRecGetData(record);
4208 OffsetNumber offnum;
4210 HeapTupleHeader htup;
4213 blkno = ItemPointerGetBlockNumber(&(xlrec->target.tid));
4216 * The visibility map always needs to be updated, even if the heap page
4217 * is already up-to-date.
4219 if (xlrec->all_visible_cleared)
4221 Relation reln = CreateFakeRelcacheEntry(xlrec->target.node);
4222 visibilitymap_clear(reln, blkno);
4223 FreeFakeRelcacheEntry(reln);
4226 if (record->xl_info & XLR_BKP_BLOCK_1)
4229 buffer = XLogReadBuffer(xlrec->target.node, blkno, false);
4230 if (!BufferIsValid(buffer))
4232 page = (Page) BufferGetPage(buffer);
4234 if (XLByteLE(lsn, PageGetLSN(page))) /* changes are applied */
4236 UnlockReleaseBuffer(buffer);
4240 offnum = ItemPointerGetOffsetNumber(&(xlrec->target.tid));
4241 if (PageGetMaxOffsetNumber(page) >= offnum)
4242 lp = PageGetItemId(page, offnum);
4244 if (PageGetMaxOffsetNumber(page) < offnum || !ItemIdIsNormal(lp))
4245 elog(PANIC, "heap_delete_redo: invalid lp");
4247 htup = (HeapTupleHeader) PageGetItem(page, lp);
4249 htup->t_infomask &= ~(HEAP_XMAX_COMMITTED |
4251 HEAP_XMAX_IS_MULTI |
4254 HeapTupleHeaderClearHotUpdated(htup);
4255 HeapTupleHeaderSetXmax(htup, record->xl_xid);
4256 HeapTupleHeaderSetCmax(htup, FirstCommandId, false);
4258 /* Mark the page as a candidate for pruning */
4259 PageSetPrunable(page, record->xl_xid);
4261 if (xlrec->all_visible_cleared)
4262 PageClearAllVisible(page);
4264 /* Make sure there is no forward chain link in t_ctid */
4265 htup->t_ctid = xlrec->target.tid;
4266 PageSetLSN(page, lsn);
4267 PageSetTLI(page, ThisTimeLineID);
4268 MarkBufferDirty(buffer);
4269 UnlockReleaseBuffer(buffer);
4273 heap_xlog_insert(XLogRecPtr lsn, XLogRecord *record)
4275 xl_heap_insert *xlrec = (xl_heap_insert *) XLogRecGetData(record);
4278 OffsetNumber offnum;
4281 HeapTupleHeaderData hdr;
4282 char data[MaxHeapTupleSize];
4284 HeapTupleHeader htup;
4285 xl_heap_header xlhdr;
4290 blkno = ItemPointerGetBlockNumber(&(xlrec->target.tid));
4293 * The visibility map always needs to be updated, even if the heap page
4294 * is already up-to-date.
4296 if (xlrec->all_visible_cleared)
4298 Relation reln = CreateFakeRelcacheEntry(xlrec->target.node);
4299 visibilitymap_clear(reln, blkno);
4300 FreeFakeRelcacheEntry(reln);
4303 if (record->xl_info & XLR_BKP_BLOCK_1)
4306 if (record->xl_info & XLOG_HEAP_INIT_PAGE)
4308 buffer = XLogReadBuffer(xlrec->target.node, blkno, true);
4309 Assert(BufferIsValid(buffer));
4310 page = (Page) BufferGetPage(buffer);
4312 PageInit(page, BufferGetPageSize(buffer), 0);
4316 buffer = XLogReadBuffer(xlrec->target.node, blkno, false);
4317 if (!BufferIsValid(buffer))
4319 page = (Page) BufferGetPage(buffer);
4321 if (XLByteLE(lsn, PageGetLSN(page))) /* changes are applied */
4323 UnlockReleaseBuffer(buffer);
4328 offnum = ItemPointerGetOffsetNumber(&(xlrec->target.tid));
4329 if (PageGetMaxOffsetNumber(page) + 1 < offnum)
4330 elog(PANIC, "heap_insert_redo: invalid max offset number");
4332 newlen = record->xl_len - SizeOfHeapInsert - SizeOfHeapHeader;
4333 Assert(newlen <= MaxHeapTupleSize);
4334 memcpy((char *) &xlhdr,
4335 (char *) xlrec + SizeOfHeapInsert,
4338 MemSet((char *) htup, 0, sizeof(HeapTupleHeaderData));
4339 /* PG73FORMAT: get bitmap [+ padding] [+ oid] + data */
4340 memcpy((char *) htup + offsetof(HeapTupleHeaderData, t_bits),
4341 (char *) xlrec + SizeOfHeapInsert + SizeOfHeapHeader,
4343 newlen += offsetof(HeapTupleHeaderData, t_bits);
4344 htup->t_infomask2 = xlhdr.t_infomask2;
4345 htup->t_infomask = xlhdr.t_infomask;
4346 htup->t_hoff = xlhdr.t_hoff;
4347 HeapTupleHeaderSetXmin(htup, record->xl_xid);
4348 HeapTupleHeaderSetCmin(htup, FirstCommandId);
4349 htup->t_ctid = xlrec->target.tid;
4351 offnum = PageAddItem(page, (Item) htup, newlen, offnum, true, true);
4352 if (offnum == InvalidOffsetNumber)
4353 elog(PANIC, "heap_insert_redo: failed to add tuple");
4355 freespace = PageGetHeapFreeSpace(page); /* needed to update FSM below */
4357 PageSetLSN(page, lsn);
4358 PageSetTLI(page, ThisTimeLineID);
4360 if (xlrec->all_visible_cleared)
4361 PageClearAllVisible(page);
4363 MarkBufferDirty(buffer);
4364 UnlockReleaseBuffer(buffer);
4367 * If the page is running low on free space, update the FSM as well.
4368 * Arbitrarily, our definition of "low" is less than 20%. We can't do
4369 * much better than that without knowing the fill-factor for the table.
4371 * XXX: We don't get here if the page was restored from full page image.
4372 * We don't bother to update the FSM in that case, it doesn't need to be
4373 * totally accurate anyway.
4375 if (freespace < BLCKSZ / 5)
4376 XLogRecordPageWithFreeSpace(xlrec->target.node, blkno, freespace);
4380 * Handles UPDATE, HOT_UPDATE & MOVE
4383 heap_xlog_update(XLogRecPtr lsn, XLogRecord *record, bool move, bool hot_update)
4385 xl_heap_update *xlrec = (xl_heap_update *) XLogRecGetData(record);
4387 bool samepage = (ItemPointerGetBlockNumber(&(xlrec->newtid)) ==
4388 ItemPointerGetBlockNumber(&(xlrec->target.tid)));
4390 OffsetNumber offnum;
4392 HeapTupleHeader htup;
4395 HeapTupleHeaderData hdr;
4396 char data[MaxHeapTupleSize];
4398 xl_heap_header xlhdr;
4404 * The visibility map always needs to be updated, even if the heap page
4405 * is already up-to-date.
4407 if (xlrec->all_visible_cleared)
4409 Relation reln = CreateFakeRelcacheEntry(xlrec->target.node);
4410 visibilitymap_clear(reln,
4411 ItemPointerGetBlockNumber(&xlrec->target.tid));
4412 FreeFakeRelcacheEntry(reln);
4415 if (record->xl_info & XLR_BKP_BLOCK_1)
4418 return; /* backup block covered both changes */
4422 /* Deal with old tuple version */
4424 buffer = XLogReadBuffer(xlrec->target.node,
4425 ItemPointerGetBlockNumber(&(xlrec->target.tid)),
4427 if (!BufferIsValid(buffer))
4429 page = (Page) BufferGetPage(buffer);
4431 if (XLByteLE(lsn, PageGetLSN(page))) /* changes are applied */
4433 UnlockReleaseBuffer(buffer);
4439 offnum = ItemPointerGetOffsetNumber(&(xlrec->target.tid));
4440 if (PageGetMaxOffsetNumber(page) >= offnum)
4441 lp = PageGetItemId(page, offnum);
4443 if (PageGetMaxOffsetNumber(page) < offnum || !ItemIdIsNormal(lp))
4444 elog(PANIC, "heap_update_redo: invalid lp");
4446 htup = (HeapTupleHeader) PageGetItem(page, lp);
4450 htup->t_infomask &= ~(HEAP_XMIN_COMMITTED |
4453 htup->t_infomask |= HEAP_MOVED_OFF;
4454 HeapTupleHeaderClearHotUpdated(htup);
4455 HeapTupleHeaderSetXvac(htup, record->xl_xid);
4456 /* Make sure there is no forward chain link in t_ctid */
4457 htup->t_ctid = xlrec->target.tid;
4461 htup->t_infomask &= ~(HEAP_XMAX_COMMITTED |
4463 HEAP_XMAX_IS_MULTI |
4467 HeapTupleHeaderSetHotUpdated(htup);
4469 HeapTupleHeaderClearHotUpdated(htup);
4470 HeapTupleHeaderSetXmax(htup, record->xl_xid);
4471 HeapTupleHeaderSetCmax(htup, FirstCommandId, false);
4472 /* Set forward chain link in t_ctid */
4473 htup->t_ctid = xlrec->newtid;
4476 /* Mark the page as a candidate for pruning */
4477 PageSetPrunable(page, record->xl_xid);
4479 if (xlrec->all_visible_cleared)
4480 PageClearAllVisible(page);
4483 * this test is ugly, but necessary to avoid thinking that insert change
4484 * is already applied
4488 PageSetLSN(page, lsn);
4489 PageSetTLI(page, ThisTimeLineID);
4490 MarkBufferDirty(buffer);
4491 UnlockReleaseBuffer(buffer);
4493 /* Deal with new tuple */
4498 * The visibility map always needs to be updated, even if the heap page
4499 * is already up-to-date.
4501 if (xlrec->new_all_visible_cleared)
4503 Relation reln = CreateFakeRelcacheEntry(xlrec->target.node);
4504 visibilitymap_clear(reln, ItemPointerGetBlockNumber(&xlrec->newtid));
4505 FreeFakeRelcacheEntry(reln);
4508 if (record->xl_info & XLR_BKP_BLOCK_2)
4511 if (record->xl_info & XLOG_HEAP_INIT_PAGE)
4513 buffer = XLogReadBuffer(xlrec->target.node,
4514 ItemPointerGetBlockNumber(&(xlrec->newtid)),
4516 Assert(BufferIsValid(buffer));
4517 page = (Page) BufferGetPage(buffer);
4519 PageInit(page, BufferGetPageSize(buffer), 0);
4523 buffer = XLogReadBuffer(xlrec->target.node,
4524 ItemPointerGetBlockNumber(&(xlrec->newtid)),
4526 if (!BufferIsValid(buffer))
4528 page = (Page) BufferGetPage(buffer);
4530 if (XLByteLE(lsn, PageGetLSN(page))) /* changes are applied */
4532 UnlockReleaseBuffer(buffer);
4539 offnum = ItemPointerGetOffsetNumber(&(xlrec->newtid));
4540 if (PageGetMaxOffsetNumber(page) + 1 < offnum)
4541 elog(PANIC, "heap_update_redo: invalid max offset number");
4543 hsize = SizeOfHeapUpdate + SizeOfHeapHeader;
4545 hsize += (2 * sizeof(TransactionId));
4547 newlen = record->xl_len - hsize;
4548 Assert(newlen <= MaxHeapTupleSize);
4549 memcpy((char *) &xlhdr,
4550 (char *) xlrec + SizeOfHeapUpdate,
4553 MemSet((char *) htup, 0, sizeof(HeapTupleHeaderData));
4554 /* PG73FORMAT: get bitmap [+ padding] [+ oid] + data */
4555 memcpy((char *) htup + offsetof(HeapTupleHeaderData, t_bits),
4556 (char *) xlrec + hsize,
4558 newlen += offsetof(HeapTupleHeaderData, t_bits);
4559 htup->t_infomask2 = xlhdr.t_infomask2;
4560 htup->t_infomask = xlhdr.t_infomask;
4561 htup->t_hoff = xlhdr.t_hoff;
4565 TransactionId xid[2]; /* xmax, xmin */
4567 memcpy((char *) xid,
4568 (char *) xlrec + SizeOfHeapUpdate + SizeOfHeapHeader,
4569 2 * sizeof(TransactionId));
4570 HeapTupleHeaderSetXmin(htup, xid[1]);
4571 HeapTupleHeaderSetXmax(htup, xid[0]);
4572 HeapTupleHeaderSetXvac(htup, record->xl_xid);
4576 HeapTupleHeaderSetXmin(htup, record->xl_xid);
4577 HeapTupleHeaderSetCmin(htup, FirstCommandId);
4579 /* Make sure there is no forward chain link in t_ctid */
4580 htup->t_ctid = xlrec->newtid;
4582 offnum = PageAddItem(page, (Item) htup, newlen, offnum, true, true);
4583 if (offnum == InvalidOffsetNumber)
4584 elog(PANIC, "heap_update_redo: failed to add tuple");
4586 if (xlrec->new_all_visible_cleared)
4587 PageClearAllVisible(page);
4589 freespace = PageGetHeapFreeSpace(page); /* needed to update FSM below */
4591 PageSetLSN(page, lsn);
4592 PageSetTLI(page, ThisTimeLineID);
4593 MarkBufferDirty(buffer);
4594 UnlockReleaseBuffer(buffer);
4597 * If the page is running low on free space, update the FSM as well.
4598 * Arbitrarily, our definition of "low" is less than 20%. We can't do
4599 * much better than that without knowing the fill-factor for the table.
4601 * However, don't update the FSM on HOT updates, because after crash
4602 * recovery, either the old or the new tuple will certainly be dead and
4603 * prunable. After pruning, the page will have roughly as much free space
4604 * as it did before the update, assuming the new tuple is about the same
4605 * size as the old one.
4607 * XXX: We don't get here if the page was restored from full page image.
4608 * We don't bother to update the FSM in that case, it doesn't need to be
4609 * totally accurate anyway.
4611 if (!hot_update && freespace < BLCKSZ / 5)
4612 XLogRecordPageWithFreeSpace(xlrec->target.node,
4613 ItemPointerGetBlockNumber(&(xlrec->newtid)), freespace);
4617 heap_xlog_lock(XLogRecPtr lsn, XLogRecord *record)
4619 xl_heap_lock *xlrec = (xl_heap_lock *) XLogRecGetData(record);
4622 OffsetNumber offnum;
4624 HeapTupleHeader htup;
4626 if (record->xl_info & XLR_BKP_BLOCK_1)
4629 buffer = XLogReadBuffer(xlrec->target.node,
4630 ItemPointerGetBlockNumber(&(xlrec->target.tid)),
4632 if (!BufferIsValid(buffer))
4634 page = (Page) BufferGetPage(buffer);
4636 if (XLByteLE(lsn, PageGetLSN(page))) /* changes are applied */
4638 UnlockReleaseBuffer(buffer);
4642 offnum = ItemPointerGetOffsetNumber(&(xlrec->target.tid));
4643 if (PageGetMaxOffsetNumber(page) >= offnum)
4644 lp = PageGetItemId(page, offnum);
4646 if (PageGetMaxOffsetNumber(page) < offnum || !ItemIdIsNormal(lp))
4647 elog(PANIC, "heap_lock_redo: invalid lp");
4649 htup = (HeapTupleHeader) PageGetItem(page, lp);
4651 htup->t_infomask &= ~(HEAP_XMAX_COMMITTED |
4653 HEAP_XMAX_IS_MULTI |
4656 if (xlrec->xid_is_mxact)
4657 htup->t_infomask |= HEAP_XMAX_IS_MULTI;
4658 if (xlrec->shared_lock)
4659 htup->t_infomask |= HEAP_XMAX_SHARED_LOCK;
4661 htup->t_infomask |= HEAP_XMAX_EXCL_LOCK;
4662 HeapTupleHeaderClearHotUpdated(htup);
4663 HeapTupleHeaderSetXmax(htup, xlrec->locking_xid);
4664 HeapTupleHeaderSetCmax(htup, FirstCommandId, false);
4665 /* Make sure there is no forward chain link in t_ctid */
4666 htup->t_ctid = xlrec->target.tid;
4667 PageSetLSN(page, lsn);
4668 PageSetTLI(page, ThisTimeLineID);
4669 MarkBufferDirty(buffer);
4670 UnlockReleaseBuffer(buffer);
4674 heap_xlog_inplace(XLogRecPtr lsn, XLogRecord *record)
4676 xl_heap_inplace *xlrec = (xl_heap_inplace *) XLogRecGetData(record);
4679 OffsetNumber offnum;
4681 HeapTupleHeader htup;
4685 if (record->xl_info & XLR_BKP_BLOCK_1)
4688 buffer = XLogReadBuffer(xlrec->target.node,
4689 ItemPointerGetBlockNumber(&(xlrec->target.tid)),
4691 if (!BufferIsValid(buffer))
4693 page = (Page) BufferGetPage(buffer);
4695 if (XLByteLE(lsn, PageGetLSN(page))) /* changes are applied */
4697 UnlockReleaseBuffer(buffer);
4701 offnum = ItemPointerGetOffsetNumber(&(xlrec->target.tid));
4702 if (PageGetMaxOffsetNumber(page) >= offnum)
4703 lp = PageGetItemId(page, offnum);
4705 if (PageGetMaxOffsetNumber(page) < offnum || !ItemIdIsNormal(lp))
4706 elog(PANIC, "heap_inplace_redo: invalid lp");
4708 htup = (HeapTupleHeader) PageGetItem(page, lp);
4710 oldlen = ItemIdGetLength(lp) - htup->t_hoff;
4711 newlen = record->xl_len - SizeOfHeapInplace;
4712 if (oldlen != newlen)
4713 elog(PANIC, "heap_inplace_redo: wrong tuple length");
4715 memcpy((char *) htup + htup->t_hoff,
4716 (char *) xlrec + SizeOfHeapInplace,
4719 PageSetLSN(page, lsn);
4720 PageSetTLI(page, ThisTimeLineID);
4721 MarkBufferDirty(buffer);
4722 UnlockReleaseBuffer(buffer);
4726 heap_redo(XLogRecPtr lsn, XLogRecord *record)
4728 uint8 info = record->xl_info & ~XLR_INFO_MASK;
4730 RestoreBkpBlocks(lsn, record, false);
4732 switch (info & XLOG_HEAP_OPMASK)
4734 case XLOG_HEAP_INSERT:
4735 heap_xlog_insert(lsn, record);
4737 case XLOG_HEAP_DELETE:
4738 heap_xlog_delete(lsn, record);
4740 case XLOG_HEAP_UPDATE:
4741 heap_xlog_update(lsn, record, false, false);
4743 case XLOG_HEAP_MOVE:
4744 heap_xlog_update(lsn, record, true, false);
4746 case XLOG_HEAP_HOT_UPDATE:
4747 heap_xlog_update(lsn, record, false, true);
4749 case XLOG_HEAP_NEWPAGE:
4750 heap_xlog_newpage(lsn, record);
4752 case XLOG_HEAP_LOCK:
4753 heap_xlog_lock(lsn, record);
4755 case XLOG_HEAP_INPLACE:
4756 heap_xlog_inplace(lsn, record);
4759 elog(PANIC, "heap_redo: unknown op code %u", info);
4764 heap2_redo(XLogRecPtr lsn, XLogRecord *record)
4766 uint8 info = record->xl_info & ~XLR_INFO_MASK;
4768 switch (info & XLOG_HEAP_OPMASK)
4770 case XLOG_HEAP2_FREEZE:
4771 RestoreBkpBlocks(lsn, record, false);
4772 heap_xlog_freeze(lsn, record);
4774 case XLOG_HEAP2_CLEAN:
4775 RestoreBkpBlocks(lsn, record, true);
4776 heap_xlog_clean(lsn, record, false);
4778 case XLOG_HEAP2_CLEAN_MOVE:
4779 RestoreBkpBlocks(lsn, record, true);
4780 heap_xlog_clean(lsn, record, true);
4783 elog(PANIC, "heap2_redo: unknown op code %u", info);
4788 out_target(StringInfo buf, xl_heaptid *target)
4790 appendStringInfo(buf, "rel %u/%u/%u; tid %u/%u",
4791 target->node.spcNode, target->node.dbNode, target->node.relNode,
4792 ItemPointerGetBlockNumber(&(target->tid)),
4793 ItemPointerGetOffsetNumber(&(target->tid)));
4797 heap_desc(StringInfo buf, uint8 xl_info, char *rec)
4799 uint8 info = xl_info & ~XLR_INFO_MASK;
4801 info &= XLOG_HEAP_OPMASK;
4802 if (info == XLOG_HEAP_INSERT)
4804 xl_heap_insert *xlrec = (xl_heap_insert *) rec;
4806 if (xl_info & XLOG_HEAP_INIT_PAGE)
4807 appendStringInfo(buf, "insert(init): ");
4809 appendStringInfo(buf, "insert: ");
4810 out_target(buf, &(xlrec->target));
4812 else if (info == XLOG_HEAP_DELETE)
4814 xl_heap_delete *xlrec = (xl_heap_delete *) rec;
4816 appendStringInfo(buf, "delete: ");
4817 out_target(buf, &(xlrec->target));
4819 else if (info == XLOG_HEAP_UPDATE)
4821 xl_heap_update *xlrec = (xl_heap_update *) rec;
4823 if (xl_info & XLOG_HEAP_INIT_PAGE)
4824 appendStringInfo(buf, "update(init): ");
4826 appendStringInfo(buf, "update: ");
4827 out_target(buf, &(xlrec->target));
4828 appendStringInfo(buf, "; new %u/%u",
4829 ItemPointerGetBlockNumber(&(xlrec->newtid)),
4830 ItemPointerGetOffsetNumber(&(xlrec->newtid)));
4832 else if (info == XLOG_HEAP_MOVE)
4834 xl_heap_update *xlrec = (xl_heap_update *) rec;
4836 if (xl_info & XLOG_HEAP_INIT_PAGE)
4837 appendStringInfo(buf, "move(init): ");
4839 appendStringInfo(buf, "move: ");
4840 out_target(buf, &(xlrec->target));
4841 appendStringInfo(buf, "; new %u/%u",
4842 ItemPointerGetBlockNumber(&(xlrec->newtid)),
4843 ItemPointerGetOffsetNumber(&(xlrec->newtid)));
4845 else if (info == XLOG_HEAP_HOT_UPDATE)
4847 xl_heap_update *xlrec = (xl_heap_update *) rec;
4849 if (xl_info & XLOG_HEAP_INIT_PAGE) /* can this case happen? */
4850 appendStringInfo(buf, "hot_update(init): ");
4852 appendStringInfo(buf, "hot_update: ");
4853 out_target(buf, &(xlrec->target));
4854 appendStringInfo(buf, "; new %u/%u",
4855 ItemPointerGetBlockNumber(&(xlrec->newtid)),
4856 ItemPointerGetOffsetNumber(&(xlrec->newtid)));
4858 else if (info == XLOG_HEAP_NEWPAGE)
4860 xl_heap_newpage *xlrec = (xl_heap_newpage *) rec;
4862 appendStringInfo(buf, "newpage: rel %u/%u/%u; blk %u",
4863 xlrec->node.spcNode, xlrec->node.dbNode,
4864 xlrec->node.relNode, xlrec->blkno);
4866 else if (info == XLOG_HEAP_LOCK)
4868 xl_heap_lock *xlrec = (xl_heap_lock *) rec;
4870 if (xlrec->shared_lock)
4871 appendStringInfo(buf, "shared_lock: ");
4873 appendStringInfo(buf, "exclusive_lock: ");
4874 if (xlrec->xid_is_mxact)
4875 appendStringInfo(buf, "mxid ");
4877 appendStringInfo(buf, "xid ");
4878 appendStringInfo(buf, "%u ", xlrec->locking_xid);
4879 out_target(buf, &(xlrec->target));
4881 else if (info == XLOG_HEAP_INPLACE)
4883 xl_heap_inplace *xlrec = (xl_heap_inplace *) rec;
4885 appendStringInfo(buf, "inplace: ");
4886 out_target(buf, &(xlrec->target));
4889 appendStringInfo(buf, "UNKNOWN");
4893 heap2_desc(StringInfo buf, uint8 xl_info, char *rec)
4895 uint8 info = xl_info & ~XLR_INFO_MASK;
4897 info &= XLOG_HEAP_OPMASK;
4898 if (info == XLOG_HEAP2_FREEZE)
4900 xl_heap_freeze *xlrec = (xl_heap_freeze *) rec;
4902 appendStringInfo(buf, "freeze: rel %u/%u/%u; blk %u; cutoff %u",
4903 xlrec->node.spcNode, xlrec->node.dbNode,
4904 xlrec->node.relNode, xlrec->block,
4907 else if (info == XLOG_HEAP2_CLEAN)
4909 xl_heap_clean *xlrec = (xl_heap_clean *) rec;
4911 appendStringInfo(buf, "clean: rel %u/%u/%u; blk %u",
4912 xlrec->node.spcNode, xlrec->node.dbNode,
4913 xlrec->node.relNode, xlrec->block);
4915 else if (info == XLOG_HEAP2_CLEAN_MOVE)
4917 xl_heap_clean *xlrec = (xl_heap_clean *) rec;
4919 appendStringInfo(buf, "clean_move: rel %u/%u/%u; blk %u",
4920 xlrec->node.spcNode, xlrec->node.dbNode,
4921 xlrec->node.relNode, xlrec->block);
4924 appendStringInfo(buf, "UNKNOWN");
4928 * heap_sync - sync a heap, for use when no WAL has been written
4930 * This forces the heap contents (including TOAST heap if any) down to disk.
4931 * If we skipped using WAL, and it's not a temp relation, we must force the
4932 * relation down to disk before it's safe to commit the transaction. This
4933 * requires writing out any dirty buffers and then doing a forced fsync.
4935 * Indexes are not touched. (Currently, index operations associated with
4936 * the commands that use this are WAL-logged and so do not need fsync.
4937 * That behavior might change someday, but in any case it's likely that
4938 * any fsync decisions required would be per-index and hence not appropriate
4942 heap_sync(Relation rel)
4944 /* temp tables never need fsync */
4949 FlushRelationBuffers(rel);
4950 /* FlushRelationBuffers will have opened rd_smgr */
4951 smgrimmedsync(rel->rd_smgr, MAIN_FORKNUM);
4953 /* FSM is not critical, don't bother syncing it */
4955 /* toast heap, if any */
4956 if (OidIsValid(rel->rd_rel->reltoastrelid))
4960 toastrel = heap_open(rel->rd_rel->reltoastrelid, AccessShareLock);
4961 FlushRelationBuffers(toastrel);
4962 smgrimmedsync(toastrel->rd_smgr, MAIN_FORKNUM);
4963 heap_close(toastrel, AccessShareLock);