aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorTom Lane <tgl@sss.pgh.pa.us>2018-03-02 17:40:48 -0500
committerTom Lane <tgl@sss.pgh.pa.us>2018-03-02 17:40:48 -0500
commit76ec45756644e5d46301ee292f8f951f9f835fae (patch)
tree06773ed376da9bb91223958dbcb7b308d6f82b00
parentccd650430db6168aaaae0b28702e11caf7781bf4 (diff)
downloadpostgresql-76ec45756644e5d46301ee292f8f951f9f835fae.tar.gz
postgresql-76ec45756644e5d46301ee292f8f951f9f835fae.zip
Fix VM buffer pin management in heap_lock_updated_tuple_rec().
Sloppy coding in this function could lead to leaking a VM buffer pin, or to attempting to free the same pin twice. Repair. While at it, reduce the code's tendency to free and reacquire the same page pin. Back-patch to 9.6; before that, this routine did not concern itself with VM pages. Amit Kapila and Tom Lane Discussion: https://postgr.es/m/CAA4eK1KJKwhc=isgTQHjM76CAdVswzNeAuZkh_cx-6QgGkSEgA@mail.gmail.com
-rw-r--r--src/backend/access/heap/heapam.c24
1 files changed, 16 insertions, 8 deletions
diff --git a/src/backend/access/heap/heapam.c b/src/backend/access/heap/heapam.c
index 94180c8733c..fee062924a9 100644
--- a/src/backend/access/heap/heapam.c
+++ b/src/backend/access/heap/heapam.c
@@ -5635,6 +5635,7 @@ heap_lock_updated_tuple_rec(Relation rel, ItemPointer tid, TransactionId xid,
new_xmax;
TransactionId priorXmax = InvalidTransactionId;
bool cleared_all_frozen = false;
+ bool pinned_desired_page;
Buffer vmbuffer = InvalidBuffer;
BlockNumber block;
@@ -5656,7 +5657,8 @@ heap_lock_updated_tuple_rec(Relation rel, ItemPointer tid, TransactionId xid,
* chain, and there's no further tuple to lock: return success to
* caller.
*/
- return HeapTupleMayBeUpdated;
+ result = HeapTupleMayBeUpdated;
+ goto out_unlocked;
}
l4:
@@ -5669,9 +5671,12 @@ l4:
* to recheck after we have the lock.
*/
if (PageIsAllVisible(BufferGetPage(buf)))
+ {
visibilitymap_pin(rel, block, &vmbuffer);
+ pinned_desired_page = true;
+ }
else
- vmbuffer = InvalidBuffer;
+ pinned_desired_page = false;
LockBuffer(buf, BUFFER_LOCK_EXCLUSIVE);
@@ -5680,8 +5685,13 @@ l4:
* all visible while we were busy locking the buffer, we'll have to
* unlock and re-lock, to avoid holding the buffer lock across I/O.
* That's a bit unfortunate, but hopefully shouldn't happen often.
+ *
+ * Note: in some paths through this function, we will reach here
+ * holding a pin on a vm page that may or may not be the one matching
+ * this page. If this page isn't all-visible, we won't use the vm
+ * page, but we hold onto such a pin till the end of the function.
*/
- if (vmbuffer == InvalidBuffer && PageIsAllVisible(BufferGetPage(buf)))
+ if (!pinned_desired_page && PageIsAllVisible(BufferGetPage(buf)))
{
LockBuffer(buf, BUFFER_LOCK_UNLOCK);
visibilitymap_pin(rel, block, &vmbuffer);
@@ -5707,8 +5717,8 @@ l4:
*/
if (TransactionIdDidAbort(HeapTupleHeaderGetXmin(mytup.t_data)))
{
- UnlockReleaseBuffer(buf);
- return HeapTupleMayBeUpdated;
+ result = HeapTupleMayBeUpdated;
+ goto out_locked;
}
old_infomask = mytup.t_data->t_infomask;
@@ -5915,8 +5925,6 @@ next:
priorXmax = HeapTupleHeaderGetUpdateXid(mytup.t_data);
ItemPointerCopy(&(mytup.t_data->t_ctid), &tupid);
UnlockReleaseBuffer(buf);
- if (vmbuffer != InvalidBuffer)
- ReleaseBuffer(vmbuffer);
}
result = HeapTupleMayBeUpdated;
@@ -5924,11 +5932,11 @@ next:
out_locked:
UnlockReleaseBuffer(buf);
+out_unlocked:
if (vmbuffer != InvalidBuffer)
ReleaseBuffer(vmbuffer);
return result;
-
}
/*