new_xmax;
TransactionId priorXmax = InvalidTransactionId;
bool cleared_all_frozen = false;
+ bool pinned_desired_page;
Buffer vmbuffer = InvalidBuffer;
BlockNumber block;
* chain, and there's no further tuple to lock: return success to
* caller.
*/
- return HeapTupleMayBeUpdated;
+ result = HeapTupleMayBeUpdated;
+ goto out_unlocked;
}
l4:
* to recheck after we have the lock.
*/
if (PageIsAllVisible(BufferGetPage(buf)))
+ {
visibilitymap_pin(rel, block, &vmbuffer);
+ pinned_desired_page = true;
+ }
else
- vmbuffer = InvalidBuffer;
+ pinned_desired_page = false;
LockBuffer(buf, BUFFER_LOCK_EXCLUSIVE);
* all visible while we were busy locking the buffer, we'll have to
* unlock and re-lock, to avoid holding the buffer lock across I/O.
* That's a bit unfortunate, but hopefully shouldn't happen often.
+ *
+ * Note: in some paths through this function, we will reach here
+ * holding a pin on a vm page that may or may not be the one matching
+ * this page. If this page isn't all-visible, we won't use the vm
+ * page, but we hold onto such a pin till the end of the function.
*/
- if (vmbuffer == InvalidBuffer && PageIsAllVisible(BufferGetPage(buf)))
+ if (!pinned_desired_page && PageIsAllVisible(BufferGetPage(buf)))
{
LockBuffer(buf, BUFFER_LOCK_UNLOCK);
visibilitymap_pin(rel, block, &vmbuffer);
*/
if (TransactionIdDidAbort(HeapTupleHeaderGetXmin(mytup.t_data)))
{
- UnlockReleaseBuffer(buf);
- return HeapTupleMayBeUpdated;
+ result = HeapTupleMayBeUpdated;
+ goto out_locked;
}
old_infomask = mytup.t_data->t_infomask;
priorXmax = HeapTupleHeaderGetUpdateXid(mytup.t_data);
ItemPointerCopy(&(mytup.t_data->t_ctid), &tupid);
UnlockReleaseBuffer(buf);
- if (vmbuffer != InvalidBuffer)
- ReleaseBuffer(vmbuffer);
}
result = HeapTupleMayBeUpdated;
out_locked:
UnlockReleaseBuffer(buf);
+out_unlocked:
if (vmbuffer != InvalidBuffer)
ReleaseBuffer(vmbuffer);
return result;
-
}
/*