]> granicus.if.org Git - postgresql/commitdiff
Fix VM buffer pin management in heap_lock_updated_tuple_rec().
authorTom Lane <tgl@sss.pgh.pa.us>
Fri, 2 Mar 2018 22:40:48 +0000 (17:40 -0500)
committerTom Lane <tgl@sss.pgh.pa.us>
Fri, 2 Mar 2018 22:40:48 +0000 (17:40 -0500)
Sloppy coding in this function could lead to leaking a VM buffer pin,
or to attempting to free the same pin twice.  Repair.  While at it,
reduce the code's tendency to free and reacquire the same page pin.

Back-patch to 9.6; before that, this routine did not concern itself
with VM pages.

Amit Kapila and Tom Lane

Discussion: https://postgr.es/m/CAA4eK1KJKwhc=isgTQHjM76CAdVswzNeAuZkh_cx-6QgGkSEgA@mail.gmail.com

src/backend/access/heap/heapam.c

index f4a55d0688d9fc96a21f2597ea6cdc9ec0a5cd2f..ad4cdedcffa7013091dd6ab02bde85a8dbd2a577 100644 (file)
@@ -5661,6 +5661,7 @@ heap_lock_updated_tuple_rec(Relation rel, ItemPointer tid, TransactionId xid,
                                new_xmax;
        TransactionId priorXmax = InvalidTransactionId;
        bool            cleared_all_frozen = false;
+       bool            pinned_desired_page;
        Buffer          vmbuffer = InvalidBuffer;
        BlockNumber block;
 
@@ -5682,7 +5683,8 @@ heap_lock_updated_tuple_rec(Relation rel, ItemPointer tid, TransactionId xid,
                         * chain, and there's no further tuple to lock: return success to
                         * caller.
                         */
-                       return HeapTupleMayBeUpdated;
+                       result = HeapTupleMayBeUpdated;
+                       goto out_unlocked;
                }
 
 l4:
@@ -5695,9 +5697,12 @@ l4:
                 * to recheck after we have the lock.
                 */
                if (PageIsAllVisible(BufferGetPage(buf)))
+               {
                        visibilitymap_pin(rel, block, &vmbuffer);
+                       pinned_desired_page = true;
+               }
                else
-                       vmbuffer = InvalidBuffer;
+                       pinned_desired_page = false;
 
                LockBuffer(buf, BUFFER_LOCK_EXCLUSIVE);
 
@@ -5706,8 +5711,13 @@ l4:
                 * all visible while we were busy locking the buffer, we'll have to
                 * unlock and re-lock, to avoid holding the buffer lock across I/O.
                 * That's a bit unfortunate, but hopefully shouldn't happen often.
+                *
+                * Note: in some paths through this function, we will reach here
+                * holding a pin on a vm page that may or may not be the one matching
+                * this page.  If this page isn't all-visible, we won't use the vm
+                * page, but we hold onto such a pin till the end of the function.
                 */
-               if (vmbuffer == InvalidBuffer && PageIsAllVisible(BufferGetPage(buf)))
+               if (!pinned_desired_page && PageIsAllVisible(BufferGetPage(buf)))
                {
                        LockBuffer(buf, BUFFER_LOCK_UNLOCK);
                        visibilitymap_pin(rel, block, &vmbuffer);
@@ -5733,8 +5743,8 @@ l4:
                 */
                if (TransactionIdDidAbort(HeapTupleHeaderGetXmin(mytup.t_data)))
                {
-                       UnlockReleaseBuffer(buf);
-                       return HeapTupleMayBeUpdated;
+                       result = HeapTupleMayBeUpdated;
+                       goto out_locked;
                }
 
                old_infomask = mytup.t_data->t_infomask;
@@ -5941,8 +5951,6 @@ next:
                priorXmax = HeapTupleHeaderGetUpdateXid(mytup.t_data);
                ItemPointerCopy(&(mytup.t_data->t_ctid), &tupid);
                UnlockReleaseBuffer(buf);
-               if (vmbuffer != InvalidBuffer)
-                       ReleaseBuffer(vmbuffer);
        }
 
        result = HeapTupleMayBeUpdated;
@@ -5950,11 +5958,11 @@ next:
 out_locked:
        UnlockReleaseBuffer(buf);
 
+out_unlocked:
        if (vmbuffer != InvalidBuffer)
                ReleaseBuffer(vmbuffer);
 
        return result;
-
 }
 
 /*