PageClearAllVisible(BufferGetPage(buffer));
visibilitymap_clear(relation,
ItemPointerGetBlockNumber(&(heaptup->t_self)),
- vmbuffer);
+ vmbuffer, VISIBILITYMAP_VALID_BITS);
}
/*
PageClearAllVisible(page);
visibilitymap_clear(relation,
BufferGetBlockNumber(buffer),
- vmbuffer);
+ vmbuffer, VISIBILITYMAP_VALID_BITS);
}
/*
all_visible_cleared = true;
PageClearAllVisible(page);
visibilitymap_clear(relation, BufferGetBlockNumber(buffer),
- vmbuffer);
+ vmbuffer, VISIBILITYMAP_VALID_BITS);
}
/* store transaction information of xact deleting the tuple */
TransactionId xmax_lock_old_tuple;
uint16 infomask_lock_old_tuple,
infomask2_lock_old_tuple;
+ bool cleared_all_frozen = false;
/*
* To prevent concurrent sessions from updating the tuple, we have to
/* temporarily make it look not-updated, but locked */
oldtup.t_data->t_ctid = oldtup.t_self;
+ /*
+ * Clear all-frozen bit on visibility map if needed. We could
+ * immediately reset ALL_VISIBLE, but given that the WAL logging
+ * overhead would be unchanged, that doesn't seem necessarily
+ * worthwhile.
+ */
+ if (PageIsAllVisible(BufferGetPage(buffer)) &&
+ visibilitymap_clear(relation, block, vmbuffer,
+ VISIBILITYMAP_ALL_FROZEN))
+ cleared_all_frozen = true;
+
MarkBufferDirty(buffer);
if (RelationNeedsWAL(relation))
xlrec.locking_xid = xmax_lock_old_tuple;
xlrec.infobits_set = compute_infobits(oldtup.t_data->t_infomask,
oldtup.t_data->t_infomask2);
+ xlrec.flags =
+ cleared_all_frozen ? XLH_LOCK_ALL_FROZEN_CLEARED : 0;
XLogRegisterData((char *) &xlrec, SizeOfHeapLock);
recptr = XLogInsert(RM_HEAP_ID, XLOG_HEAP_LOCK);
PageSetLSN(page, recptr);
/* record address of new tuple in t_ctid of old one */
oldtup.t_data->t_ctid = heaptup->t_self;
- /* clear PD_ALL_VISIBLE flags */
+ /* clear PD_ALL_VISIBLE flags, reset all visibilitymap bits */
if (PageIsAllVisible(BufferGetPage(buffer)))
{
all_visible_cleared = true;
PageClearAllVisible(BufferGetPage(buffer));
visibilitymap_clear(relation, BufferGetBlockNumber(buffer),
- vmbuffer);
+ vmbuffer, VISIBILITYMAP_VALID_BITS);
}
if (newbuf != buffer && PageIsAllVisible(BufferGetPage(newbuf)))
{
all_visible_cleared_new = true;
PageClearAllVisible(BufferGetPage(newbuf));
visibilitymap_clear(relation, BufferGetBlockNumber(newbuf),
- vmbuffer_new);
+ vmbuffer_new, VISIBILITYMAP_VALID_BITS);
}
if (newbuf != buffer)
ItemPointer tid = &(tuple->t_self);
ItemId lp;
Page page;
+ Buffer vmbuffer = InvalidBuffer;
+ BlockNumber block;
TransactionId xid,
xmax;
uint16 old_infomask,
new_infomask2;
bool first_time = true;
bool have_tuple_lock = false;
+ bool cleared_all_frozen = false;
*buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(tid));
+ block = ItemPointerGetBlockNumber(tid);
+
+ /*
+ * Before locking the buffer, pin the visibility map page if it may be
+ * necessary. XXX: It might be possible for this to change after acquiring
+ * the lock below. We don't yet deal with that case.
+ */
+ if (PageIsAllVisible(BufferGetPage(*buffer)))
+ visibilitymap_pin(relation, block, &vmbuffer);
+
LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
page = BufferGetPage(*buffer);
if (result == HeapTupleInvisible)
{
- LockBuffer(*buffer, BUFFER_LOCK_UNLOCK);
-
/*
* This is possible, but only when locking a tuple for ON CONFLICT
* UPDATE. We return this value here rather than throwing an error in
* order to give that case the opportunity to throw a more specific
* error.
*/
- return HeapTupleInvisible;
+ result = HeapTupleInvisible;
+ goto out_locked;
}
else if (result == HeapTupleBeingUpdated || result == HeapTupleUpdated)
{
if (TUPLOCK_from_mxstatus(members[i].status) >= mode)
{
pfree(members);
- return HeapTupleMayBeUpdated;
+ result = HeapTupleMayBeUpdated;
+ goto out_unlocked;
}
}
Assert(HEAP_XMAX_IS_KEYSHR_LOCKED(infomask) ||
HEAP_XMAX_IS_SHR_LOCKED(infomask) ||
HEAP_XMAX_IS_EXCL_LOCKED(infomask));
- return HeapTupleMayBeUpdated;
- break;
+ result = HeapTupleMayBeUpdated;
+ goto out_unlocked;
case LockTupleShare:
if (HEAP_XMAX_IS_SHR_LOCKED(infomask) ||
HEAP_XMAX_IS_EXCL_LOCKED(infomask))
- return HeapTupleMayBeUpdated;
+ {
+ result = HeapTupleMayBeUpdated;
+ goto out_unlocked;
+ }
break;
case LockTupleNoKeyExclusive:
if (HEAP_XMAX_IS_EXCL_LOCKED(infomask))
- return HeapTupleMayBeUpdated;
+ {
+ result = HeapTupleMayBeUpdated;
+ goto out_unlocked;
+ }
break;
case LockTupleExclusive:
if (HEAP_XMAX_IS_EXCL_LOCKED(infomask) &&
infomask2 & HEAP_KEYS_UPDATED)
- return HeapTupleMayBeUpdated;
+ {
+ result = HeapTupleMayBeUpdated;
+ goto out_unlocked;
+ }
break;
}
}
hufd->cmax = HeapTupleHeaderGetCmax(tuple->t_data);
else
hufd->cmax = InvalidCommandId;
- LockBuffer(*buffer, BUFFER_LOCK_UNLOCK);
- if (have_tuple_lock)
- UnlockTupleTuplock(relation, tid, mode);
- return result;
+ goto out_locked;
}
xmax = HeapTupleHeaderGetRawXmax(tuple->t_data);
if (HEAP_XMAX_IS_LOCKED_ONLY(new_infomask))
tuple->t_data->t_ctid = *tid;
+ /* Clear only the all-frozen bit on visibility map if needed */
+ if (PageIsAllVisible(page) &&
+ visibilitymap_clear(relation, block, vmbuffer,
+ VISIBILITYMAP_ALL_FROZEN))
+ cleared_all_frozen = true;
+
+
MarkBufferDirty(*buffer);
/*
xlrec.locking_xid = xid;
xlrec.infobits_set = compute_infobits(new_infomask,
tuple->t_data->t_infomask2);
+ xlrec.flags = cleared_all_frozen ? XLH_LOCK_ALL_FROZEN_CLEARED : 0;
XLogRegisterData((char *) &xlrec, SizeOfHeapLock);
/* we don't decode row locks atm, so no need to log the origin */
END_CRIT_SECTION();
+ result = HeapTupleMayBeUpdated;
+
+out_locked:
LockBuffer(*buffer, BUFFER_LOCK_UNLOCK);
+out_unlocked:
+ if (BufferIsValid(vmbuffer))
+ ReleaseBuffer(vmbuffer);
+
/*
* Don't update the visibility map here. Locking a tuple doesn't change
* visibility info.
if (have_tuple_lock)
UnlockTupleTuplock(relation, tid, mode);
- return HeapTupleMayBeUpdated;
+ return result;
}
/*
heap_lock_updated_tuple_rec(Relation rel, ItemPointer tid, TransactionId xid,
LockTupleMode mode)
{
+ HTSU_Result result;
ItemPointerData tupid;
HeapTupleData mytup;
Buffer buf;
TransactionId xmax,
new_xmax;
TransactionId priorXmax = InvalidTransactionId;
+ bool cleared_all_frozen = false;
+ Buffer vmbuffer = InvalidBuffer;
+ BlockNumber block;
ItemPointerCopy(tid, &tupid);
{
new_infomask = 0;
new_xmax = InvalidTransactionId;
+ block = ItemPointerGetBlockNumber(&tupid);
ItemPointerCopy(&tupid, &(mytup.t_self));
if (!heap_fetch(rel, SnapshotAny, &mytup, &buf, false, NULL))
l4:
CHECK_FOR_INTERRUPTS();
+
+ /*
+ * Before locking the buffer, pin the visibility map page if it may be
+ * necessary. XXX: It might be possible for this to change after
+ * acquiring the lock below. We don't yet deal with that case.
+ */
+ if (PageIsAllVisible(BufferGetPage(buf)))
+ visibilitymap_pin(rel, block, &vmbuffer);
+ else
+ vmbuffer = InvalidBuffer;
+
LockBuffer(buf, BUFFER_LOCK_EXCLUSIVE);
/*
!TransactionIdEquals(HeapTupleHeaderGetXmin(mytup.t_data),
priorXmax))
{
- UnlockReleaseBuffer(buf);
- return HeapTupleMayBeUpdated;
+ result = HeapTupleMayBeUpdated;
+ goto out_locked;
}
old_infomask = mytup.t_data->t_infomask;
HEAP_XMAX_IS_LOCKED_ONLY(old_infomask));
for (i = 0; i < nmembers; i++)
{
- HTSU_Result res;
-
- res = test_lockmode_for_conflict(members[i].status,
- members[i].xid,
- mode, &needwait);
+ result = test_lockmode_for_conflict(members[i].status,
+ members[i].xid,
+ mode, &needwait);
if (needwait)
{
pfree(members);
goto l4;
}
- if (res != HeapTupleMayBeUpdated)
+ if (result != HeapTupleMayBeUpdated)
{
- UnlockReleaseBuffer(buf);
pfree(members);
- return res;
+ goto out_locked;
}
}
if (members)
}
else
{
- HTSU_Result res;
MultiXactStatus status;
/*
status = MultiXactStatusNoKeyUpdate;
}
- res = test_lockmode_for_conflict(status, rawxmax, mode,
- &needwait);
+ result = test_lockmode_for_conflict(status, rawxmax, mode,
+ &needwait);
if (needwait)
{
LockBuffer(buf, BUFFER_LOCK_UNLOCK);
XLTW_LockUpdated);
goto l4;
}
- if (res != HeapTupleMayBeUpdated)
+ if (result != HeapTupleMayBeUpdated)
{
- UnlockReleaseBuffer(buf);
- return res;
+ goto out_locked;
}
}
}
xid, mode, false,
&new_xmax, &new_infomask, &new_infomask2);
+ if (PageIsAllVisible(BufferGetPage(buf)) &&
+ visibilitymap_clear(rel, block, vmbuffer,
+ VISIBILITYMAP_ALL_FROZEN))
+ cleared_all_frozen = true;
+
START_CRIT_SECTION();
/* ... and set them */
xlrec.offnum = ItemPointerGetOffsetNumber(&mytup.t_self);
xlrec.xmax = new_xmax;
xlrec.infobits_set = compute_infobits(new_infomask, new_infomask2);
+ xlrec.flags =
+ cleared_all_frozen ? XLH_LOCK_ALL_FROZEN_CLEARED : 0;
XLogRegisterData((char *) &xlrec, SizeOfHeapLockUpdated);
ItemPointerEquals(&mytup.t_self, &mytup.t_data->t_ctid) ||
HeapTupleHeaderIsOnlyLocked(mytup.t_data))
{
- UnlockReleaseBuffer(buf);
- return HeapTupleMayBeUpdated;
+ result = HeapTupleMayBeUpdated;
+ goto out_locked;
}
/* tail recursion */
priorXmax = HeapTupleHeaderGetUpdateXid(mytup.t_data);
ItemPointerCopy(&(mytup.t_data->t_ctid), &tupid);
UnlockReleaseBuffer(buf);
+ if (vmbuffer != InvalidBuffer)
+ ReleaseBuffer(vmbuffer);
}
+
+ result = HeapTupleMayBeUpdated;
+
+out_locked:
+ UnlockReleaseBuffer(buf);
+
+ if (vmbuffer != InvalidBuffer)
+ ReleaseBuffer(vmbuffer);
+
+ return result;
+
}
/*
Buffer vmbuffer = InvalidBuffer;
visibilitymap_pin(reln, blkno, &vmbuffer);
- visibilitymap_clear(reln, blkno, vmbuffer);
+ visibilitymap_clear(reln, blkno, vmbuffer, VISIBILITYMAP_VALID_BITS);
ReleaseBuffer(vmbuffer);
FreeFakeRelcacheEntry(reln);
}
Buffer vmbuffer = InvalidBuffer;
visibilitymap_pin(reln, blkno, &vmbuffer);
- visibilitymap_clear(reln, blkno, vmbuffer);
+ visibilitymap_clear(reln, blkno, vmbuffer, VISIBILITYMAP_VALID_BITS);
ReleaseBuffer(vmbuffer);
FreeFakeRelcacheEntry(reln);
}
Buffer vmbuffer = InvalidBuffer;
visibilitymap_pin(reln, blkno, &vmbuffer);
- visibilitymap_clear(reln, blkno, vmbuffer);
+ visibilitymap_clear(reln, blkno, vmbuffer, VISIBILITYMAP_VALID_BITS);
ReleaseBuffer(vmbuffer);
FreeFakeRelcacheEntry(reln);
}
Buffer vmbuffer = InvalidBuffer;
visibilitymap_pin(reln, oldblk, &vmbuffer);
- visibilitymap_clear(reln, oldblk, vmbuffer);
+ visibilitymap_clear(reln, oldblk, vmbuffer, VISIBILITYMAP_VALID_BITS);
ReleaseBuffer(vmbuffer);
FreeFakeRelcacheEntry(reln);
}
Buffer vmbuffer = InvalidBuffer;
visibilitymap_pin(reln, newblk, &vmbuffer);
- visibilitymap_clear(reln, newblk, vmbuffer);
+ visibilitymap_clear(reln, newblk, vmbuffer, VISIBILITYMAP_VALID_BITS);
ReleaseBuffer(vmbuffer);
FreeFakeRelcacheEntry(reln);
}
ItemId lp = NULL;
HeapTupleHeader htup;
+ /*
+ * The visibility map may need to be fixed even if the heap page is
+ * already up-to-date.
+ */
+ if (xlrec->flags & XLH_LOCK_ALL_FROZEN_CLEARED)
+ {
+ RelFileNode rnode;
+ Buffer vmbuffer = InvalidBuffer;
+ BlockNumber block;
+ Relation reln;
+
+ XLogRecGetBlockTag(record, 0, &rnode, NULL, &block);
+ reln = CreateFakeRelcacheEntry(rnode);
+
+ visibilitymap_pin(reln, block, &vmbuffer);
+ visibilitymap_clear(reln, block, vmbuffer, VISIBILITYMAP_ALL_FROZEN);
+
+ ReleaseBuffer(vmbuffer);
+ FreeFakeRelcacheEntry(reln);
+ }
+
if (XLogReadBufferForRedo(record, 0, &buffer) == BLK_NEEDS_REDO)
{
page = (Page) BufferGetPage(buffer);
xlrec = (xl_heap_lock_updated *) XLogRecGetData(record);
+ /*
+ * The visibility map may need to be fixed even if the heap page is
+ * already up-to-date.
+ */
+ if (xlrec->flags & XLH_LOCK_ALL_FROZEN_CLEARED)
+ {
+ RelFileNode rnode;
+ Buffer vmbuffer = InvalidBuffer;
+ BlockNumber block;
+ Relation reln;
+
+ XLogRecGetBlockTag(record, 0, &rnode, NULL, &block);
+ reln = CreateFakeRelcacheEntry(rnode);
+
+ visibilitymap_pin(reln, block, &vmbuffer);
+ visibilitymap_clear(reln, block, vmbuffer, VISIBILITYMAP_ALL_FROZEN);
+
+ ReleaseBuffer(vmbuffer);
+ FreeFakeRelcacheEntry(reln);
+ }
+
if (XLogReadBufferForRedo(record, 0, &buffer) == BLK_NEEDS_REDO)
{
page = BufferGetPage(buffer);