]> granicus.if.org Git - postgresql/commitdiff
Fix VM buffer pin management in heap_lock_updated_tuple_rec().
authorTom Lane <tgl@sss.pgh.pa.us>
Fri, 2 Mar 2018 22:40:48 +0000 (17:40 -0500)
committerTom Lane <tgl@sss.pgh.pa.us>
Fri, 2 Mar 2018 22:40:48 +0000 (17:40 -0500)
Sloppy coding in this function could lead to leaking a VM buffer pin,
or to attempting to free the same pin twice.  Repair.  While at it,
reduce the code's tendency to free and reacquire the same page pin.

Back-patch to 9.6; before that, this routine did not concern itself
with VM pages.

Amit Kapila and Tom Lane

Discussion: https://postgr.es/m/CAA4eK1KJKwhc=isgTQHjM76CAdVswzNeAuZkh_cx-6QgGkSEgA@mail.gmail.com

src/backend/access/heap/heapam.c

index dc762f913d2503579e875c31d58a82d898b87b0a..302f8c63ad638572e621bec0d470a373ad524d8d 100644 (file)
@@ -5677,6 +5677,7 @@ heap_lock_updated_tuple_rec(Relation rel, ItemPointer tid, TransactionId xid,
                                new_xmax;
        TransactionId priorXmax = InvalidTransactionId;
        bool            cleared_all_frozen = false;
+       bool            pinned_desired_page;
        Buffer          vmbuffer = InvalidBuffer;
        BlockNumber block;
 
@@ -5698,7 +5699,8 @@ heap_lock_updated_tuple_rec(Relation rel, ItemPointer tid, TransactionId xid,
                         * chain, and there's no further tuple to lock: return success to
                         * caller.
                         */
-                       return HeapTupleMayBeUpdated;
+                       result = HeapTupleMayBeUpdated;
+                       goto out_unlocked;
                }
 
 l4:
@@ -5711,9 +5713,12 @@ l4:
                 * to recheck after we have the lock.
                 */
                if (PageIsAllVisible(BufferGetPage(buf)))
+               {
                        visibilitymap_pin(rel, block, &vmbuffer);
+                       pinned_desired_page = true;
+               }
                else
-                       vmbuffer = InvalidBuffer;
+                       pinned_desired_page = false;
 
                LockBuffer(buf, BUFFER_LOCK_EXCLUSIVE);
 
@@ -5722,8 +5727,13 @@ l4:
                 * all visible while we were busy locking the buffer, we'll have to
                 * unlock and re-lock, to avoid holding the buffer lock across I/O.
                 * That's a bit unfortunate, but hopefully shouldn't happen often.
+                *
+                * Note: in some paths through this function, we will reach here
+                * holding a pin on a vm page that may or may not be the one matching
+                * this page.  If this page isn't all-visible, we won't use the vm
+                * page, but we hold onto such a pin till the end of the function.
                 */
-               if (vmbuffer == InvalidBuffer && PageIsAllVisible(BufferGetPage(buf)))
+               if (!pinned_desired_page && PageIsAllVisible(BufferGetPage(buf)))
                {
                        LockBuffer(buf, BUFFER_LOCK_UNLOCK);
                        visibilitymap_pin(rel, block, &vmbuffer);
@@ -5749,8 +5759,8 @@ l4:
                 */
                if (TransactionIdDidAbort(HeapTupleHeaderGetXmin(mytup.t_data)))
                {
-                       UnlockReleaseBuffer(buf);
-                       return HeapTupleMayBeUpdated;
+                       result = HeapTupleMayBeUpdated;
+                       goto out_locked;
                }
 
                old_infomask = mytup.t_data->t_infomask;
@@ -5957,8 +5967,6 @@ next:
                priorXmax = HeapTupleHeaderGetUpdateXid(mytup.t_data);
                ItemPointerCopy(&(mytup.t_data->t_ctid), &tupid);
                UnlockReleaseBuffer(buf);
-               if (vmbuffer != InvalidBuffer)
-                       ReleaseBuffer(vmbuffer);
        }
 
        result = HeapTupleMayBeUpdated;
@@ -5966,11 +5974,11 @@ next:
 out_locked:
        UnlockReleaseBuffer(buf);
 
+out_unlocked:
        if (vmbuffer != InvalidBuffer)
                ReleaseBuffer(vmbuffer);
 
        return result;
-
 }
 
 /*