heap_fetch(Relation relation,
Snapshot snapshot,
HeapTuple tuple,
- Buffer *userbuf,
- Relation stats_relation)
+ Buffer *userbuf)
{
ItemPointer tid = &(tuple->t_self);
ItemId lp;
*/
*userbuf = buffer;
- /* Count the successful fetch against appropriate rel, if any */
- if (stats_relation != NULL)
- pgstat_count_heap_fetch(stats_relation);
-
return true;
}
block = ItemPointerGetBlockNumber(&tupid);
ItemPointerCopy(&tupid, &(mytup.t_self));
- if (!heap_fetch(rel, SnapshotAny, &mytup, &buf, NULL))
+ if (!heap_fetch(rel, SnapshotAny, &mytup, &buf))
{
/*
* if we fail to find the updated version of the tuple, it's
* ------------------------------------------------------------------------
*/
+static bool
+heapam_fetch_row_version(Relation relation,
+ ItemPointer tid,
+ Snapshot snapshot,
+ TupleTableSlot *slot)
+{
+ BufferHeapTupleTableSlot *bslot = (BufferHeapTupleTableSlot *) slot;
+ Buffer buffer;
+
+ Assert(TTS_IS_BUFFERTUPLE(slot));
+
+ bslot->base.tupdata.t_self = *tid;
+ if (heap_fetch(relation, snapshot, &bslot->base.tupdata, &buffer))
+ {
+ /* store in slot, transferring existing pin */
+ ExecStorePinnedBufferHeapTuple(&bslot->base.tupdata, slot, buffer);
+ slot->tts_tableOid = RelationGetRelid(relation);
+
+ return true;
+ }
+
+ return false;
+}
+
static bool
heapam_tuple_satisfies_snapshot(Relation rel, TupleTableSlot *slot,
Snapshot snapshot)
errmsg("tuple to be locked was already moved to another partition due to concurrent update")));
tuple->t_self = *tid;
- if (heap_fetch(relation, &SnapshotDirty, tuple, &buffer, NULL))
+ if (heap_fetch(relation, &SnapshotDirty, tuple, &buffer))
{
/*
* If xmin isn't what we're expecting, the slot must have
.tuple_update = heapam_tuple_update,
.tuple_lock = heapam_tuple_lock,
+ .tuple_fetch_row_version = heapam_fetch_row_version,
.tuple_satisfies_snapshot = heapam_tuple_satisfies_snapshot,
};
Assert(routine->index_fetch_end != NULL);
Assert(routine->index_fetch_tuple != NULL);
+ Assert(routine->tuple_fetch_row_version != NULL);
Assert(routine->tuple_satisfies_snapshot != NULL);
Assert(routine->tuple_insert != NULL);
#include "postgres.h"
#include "access/genam.h"
-#include "access/heapam.h"
-#include "access/tableam.h"
-#include "access/sysattr.h"
#include "access/htup_details.h"
+#include "access/relation.h"
+#include "access/sysattr.h"
+#include "access/table.h"
+#include "access/tableam.h"
#include "access/xact.h"
#include "catalog/catalog.h"
#include "catalog/dependency.h"
}
else
{
- Page page;
- ItemId lp;
- Buffer buffer;
- BufferHeapTupleTableSlot *boldslot;
- HeapTuple tuple;
-
- Assert(TTS_IS_BUFFERTUPLE(oldslot));
- ExecClearTuple(oldslot);
- boldslot = (BufferHeapTupleTableSlot *) oldslot;
- tuple = &boldslot->base.tupdata;
-
- buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(tid));
-
/*
- * Although we already know this tuple is valid, we must lock the
- * buffer to ensure that no one has a buffer cleanup lock; otherwise
- * they might move the tuple while we try to copy it. But we can
- * release the lock before actually doing the heap_copytuple call,
- * since holding pin is sufficient to prevent anyone from getting a
- * cleanup lock they don't already hold.
+ * We expect the tuple to be present, thus very simple error handling
+ * suffices.
*/
- LockBuffer(buffer, BUFFER_LOCK_SHARE);
-
- page = BufferGetPage(buffer);
- lp = PageGetItemId(page, ItemPointerGetOffsetNumber(tid));
-
- Assert(ItemIdIsNormal(lp));
-
- tuple->t_data = (HeapTupleHeader) PageGetItem(page, lp);
- tuple->t_len = ItemIdGetLength(lp);
- tuple->t_self = *tid;
- tuple->t_tableOid = RelationGetRelid(relation);
-
- LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
-
- ExecStorePinnedBufferHeapTuple(tuple, oldslot, buffer);
+ if (!table_fetch_row_version(relation, tid, SnapshotAny, oldslot))
+ elog(ERROR, "failed to fetch tuple for trigger");
}
return true;
AfterTriggerShared evtshared = GetTriggerSharedData(event);
Oid tgoid = evtshared->ats_tgoid;
TriggerData LocTriggerData;
- HeapTupleData tuple1;
- HeapTupleData tuple2;
HeapTuple rettuple;
int tgindx;
bool should_free_trig = false;
default:
if (ItemPointerIsValid(&(event->ate_ctid1)))
{
- Buffer buffer;
-
LocTriggerData.tg_trigslot = ExecGetTriggerOldSlot(estate, relInfo);
- ItemPointerCopy(&(event->ate_ctid1), &(tuple1.t_self));
- if (!heap_fetch(rel, SnapshotAny, &tuple1, &buffer, NULL))
+ if (!table_fetch_row_version(rel, &(event->ate_ctid1), SnapshotAny, LocTriggerData.tg_trigslot))
elog(ERROR, "failed to fetch tuple1 for AFTER trigger");
- ExecStorePinnedBufferHeapTuple(&tuple1,
- LocTriggerData.tg_trigslot,
- buffer);
LocTriggerData.tg_trigtuple =
- ExecFetchSlotHeapTuple(LocTriggerData.tg_trigslot, false,
- &should_free_trig);
+ ExecFetchSlotHeapTuple(LocTriggerData.tg_trigslot, false, &should_free_trig);
}
else
{
AFTER_TRIGGER_2CTID &&
ItemPointerIsValid(&(event->ate_ctid2)))
{
- Buffer buffer;
-
LocTriggerData.tg_newslot = ExecGetTriggerNewSlot(estate, relInfo);
- ItemPointerCopy(&(event->ate_ctid2), &(tuple2.t_self));
- if (!heap_fetch(rel, SnapshotAny, &tuple2, &buffer, NULL))
+ if (!table_fetch_row_version(rel, &(event->ate_ctid2), SnapshotAny, LocTriggerData.tg_newslot))
elog(ERROR, "failed to fetch tuple2 for AFTER trigger");
- ExecStorePinnedBufferHeapTuple(&tuple2,
- LocTriggerData.tg_newslot,
- buffer);
LocTriggerData.tg_newtuple =
- ExecFetchSlotHeapTuple(LocTriggerData.tg_newslot, false,
- &should_free_new);
+ ExecFetchSlotHeapTuple(LocTriggerData.tg_newslot, false, &should_free_new);
}
else
{
else
{
/* ordinary table, fetch the tuple */
- HeapTupleData tuple;
- Buffer buffer;
-
- tuple.t_self = *((ItemPointer) DatumGetPointer(datum));
- if (!heap_fetch(erm->relation, SnapshotAny, &tuple, &buffer,
- NULL))
+ if (!table_fetch_row_version(erm->relation,
+ (ItemPointer) DatumGetPointer(datum),
+ SnapshotAny, slot))
elog(ERROR, "failed to fetch tuple for EvalPlanQual recheck");
-
- /* successful, store tuple */
- ExecStorePinnedBufferHeapTuple(&tuple, slot, buffer);
- ExecMaterializeSlot(slot);
}
}
else
TupleTableSlot *tempSlot)
{
Relation rel = relinfo->ri_RelationDesc;
- Buffer buffer;
- HeapTupleData tuple;
/* Redundantly check isolation level */
if (!IsolationUsesXactSnapshot())
return;
- tuple.t_self = *tid;
- if (!heap_fetch(rel, SnapshotAny, &tuple, &buffer, NULL))
+ if (!table_fetch_row_version(rel, tid, SnapshotAny, tempSlot))
elog(ERROR, "failed to fetch conflicting tuple for ON CONFLICT");
- ExecStorePinnedBufferHeapTuple(&tuple, tempSlot, buffer);
ExecCheckTupleVisible(estate, rel, tempSlot);
ExecClearTuple(tempSlot);
}
}
else
{
- BufferHeapTupleTableSlot *bslot;
- HeapTuple deltuple;
- Buffer buffer;
-
- Assert(TTS_IS_BUFFERTUPLE(slot));
- ExecClearTuple(slot);
- bslot = (BufferHeapTupleTableSlot *) slot;
- deltuple = &bslot->base.tupdata;
-
- deltuple->t_self = *tupleid;
- if (!heap_fetch(resultRelationDesc, SnapshotAny,
- deltuple, &buffer, NULL))
+ if (!table_fetch_row_version(resultRelationDesc, tupleid,
+ SnapshotAny, slot))
elog(ERROR, "failed to fetch deleted tuple for DELETE RETURNING");
-
- ExecStorePinnedBufferHeapTuple(deltuple, slot, buffer);
}
}
Relation heapRelation;
HeapTuple tuple;
TupleTableSlot *slot;
- Buffer buffer = InvalidBuffer;
ItemPointerData *tidList;
int numTids;
bool bBackward;
if (node->tss_isCurrentOf)
heap_get_latest_tid(heapRelation, snapshot, &tuple->t_self);
- if (heap_fetch(heapRelation, snapshot, tuple, &buffer, NULL))
- {
- /*
- * Store the scanned tuple in the scan tuple slot of the scan
- * state, transferring the pin to the slot.
- */
- ExecStorePinnedBufferHeapTuple(tuple, /* tuple to store */
- slot, /* slot to store in */
- buffer); /* buffer associated with
- * tuple */
-
+ if (table_fetch_row_version(heapRelation, &tuple->t_self, snapshot,
+ slot))
return slot;
- }
+
/* Bad TID or failed snapshot qual; try next */
if (bBackward)
node->tss_TidPtr--;
ScanDirection direction, struct TupleTableSlot *slot);
extern bool heap_fetch(Relation relation, Snapshot snapshot,
- HeapTuple tuple, Buffer *userbuf, Relation stats_relation);
+ HeapTuple tuple, Buffer *userbuf);
extern bool heap_hot_search_buffer(ItemPointer tid, Relation relation,
Buffer buffer, Snapshot snapshot, HeapTuple heapTuple,
bool *all_dead, bool first_call);
* ------------------------------------------------------------------------
*/
+
+ /*
+ * Fetch tuple at `tid` into `slot, after doing a visibility test
+ * according to `snapshot`. If a tuple was found and passed the visibility
+ * test, returns true, false otherwise.
+ */
+ bool (*tuple_fetch_row_version) (Relation rel,
+ ItemPointer tid,
+ Snapshot snapshot,
+ TupleTableSlot *slot);
+
/*
* Does the tuple in `slot` satisfy `snapshot`? The slot needs to be of
* the appropriate type for the AM.
}
/*
- * Fetches tuple at `tid` into `slot`, after doing a visibility test according
- * to `snapshot`. If a tuple was found and passed the visibility test, returns
- * true, false otherwise.
+ * Fetches, as part of an index scan, tuple at `tid` into `slot`, after doing
+ * a visibility test according to `snapshot`. If a tuple was found and passed
+ * the visibility test, returns true, false otherwise.
*
* *call_again needs to be false on the first call to table_index_fetch_tuple() for
* a tid. If there potentially is another tuple matching the tid, *call_again
* *all_dead will be set to true by table_index_fetch_tuple() iff it is guaranteed
* that no backend needs to see that tuple. Index AMs can use that do avoid
* returning that tid in future searches.
+ *
+ * The difference between this function and table_fetch_row_version is that
+ * this function returns the currently visible version of a row if the AM
+ * supports storing multiple row versions reachable via a single index entry
+ * (like heap's HOT). Whereas table_fetch_row_version only evaluates the the
+ * tuple exactly at `tid`. Outside of index entry ->table tuple lookups,
+ * table_fetch_row_version is what's usually needed.
*/
static inline bool
table_index_fetch_tuple(struct IndexFetchTableData *scan,
* ------------------------------------------------------------------------
*/
+
+/*
+ * Fetch tuple at `tid` into `slot, after doing a visibility test according to
+ * `snapshot`. If a tuple was found and passed the visibility test, returns
+ * true, false otherwise.
+ *
+ * See table_index_fetch_tuple's comment about what the difference between
+ * these functions is. This function is the correct to use outside of
+ * index entry->table tuple lookups.
+ */
+static inline bool
+table_fetch_row_version(Relation rel,
+ ItemPointer tid,
+ Snapshot snapshot,
+ TupleTableSlot *slot)
+{
+ return rel->rd_tableam->tuple_fetch_row_version(rel, tid, snapshot, slot);
+}
+
/*
* Return true iff tuple in slot satisfies the snapshot.
*