aboutsummaryrefslogtreecommitdiff
path: root/src/backend
diff options
context:
space:
mode:
Diffstat (limited to 'src/backend')
-rw-r--r--src/backend/access/heap/README.tuplock10
-rw-r--r--src/backend/access/heap/heapam.c83
2 files changed, 72 insertions, 21 deletions
diff --git a/src/backend/access/heap/README.tuplock b/src/backend/access/heap/README.tuplock
index b2f3a4ce909..d03ddf6cdcc 100644
--- a/src/backend/access/heap/README.tuplock
+++ b/src/backend/access/heap/README.tuplock
@@ -36,6 +36,16 @@ do LockTuple as well, if there is any conflict, to ensure that they don't
starve out waiting exclusive-lockers. However, if there is not any active
conflict for a tuple, we don't incur any extra overhead.
+We make an exception to the above rule for those lockers that already hold
+some lock on a tuple and attempt to acquire a stronger one on it. In that
+case, we skip the LockTuple() call even when there are conflicts, provided
+that the target tuple is being locked, updated or deleted by multiple sessions
+concurrently. Failing to skip the lock would risk a deadlock, e.g., between a
+session that was first to record its weaker lock in the tuple header and would
+be waiting on the LockTuple() call to upgrade to the stronger lock level, and
+another session that has already done LockTuple() and is waiting for the first
+session transaction to release its tuple header-level lock.
+
We provide four levels of tuple locking strength: SELECT FOR UPDATE obtains an
exclusive lock which prevents any kind of modification of the tuple. This is
the lock level that is implicitly taken by DELETE operations, and also by
diff --git a/src/backend/access/heap/heapam.c b/src/backend/access/heap/heapam.c
index 8ac0f8a5134..d768b9b061c 100644
--- a/src/backend/access/heap/heapam.c
+++ b/src/backend/access/heap/heapam.c
@@ -95,7 +95,7 @@ static void GetMultiXactIdHintBits(MultiXactId multi, uint16 *new_infomask,
static TransactionId MultiXactIdGetUpdateXid(TransactionId xmax,
uint16 t_infomask);
static bool DoesMultiXactIdConflict(MultiXactId multi, uint16 infomask,
- LockTupleMode lockmode);
+ LockTupleMode lockmode, bool *current_is_member);
static void MultiXactIdWait(MultiXactId multi, MultiXactStatus status, uint16 infomask,
Relation rel, ItemPointer ctid, XLTW_Oper oper,
int *remaining);
@@ -2547,15 +2547,20 @@ l1:
*/
if (infomask & HEAP_XMAX_IS_MULTI)
{
- /* wait for multixact */
+ bool current_is_member = false;
+
if (DoesMultiXactIdConflict((MultiXactId) xwait, infomask,
- LockTupleExclusive))
+ LockTupleExclusive, &current_is_member))
{
LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
- /* acquire tuple lock, if necessary */
- heap_acquire_tuplock(relation, &(tp.t_self), LockTupleExclusive,
- LockWaitBlock, &have_tuple_lock);
+ /*
+ * Acquire the lock, if necessary (but skip it when we're
+ * requesting a lock and already have one; avoids deadlock).
+ */
+ if (!current_is_member)
+ heap_acquire_tuplock(relation, &(tp.t_self), LockTupleExclusive,
+ LockWaitBlock, &have_tuple_lock);
/* wait for multixact */
MultiXactIdWait((MultiXactId) xwait, MultiXactStatusUpdate, infomask,
@@ -3126,15 +3131,20 @@ l2:
{
TransactionId update_xact;
int remain;
+ bool current_is_member = false;
if (DoesMultiXactIdConflict((MultiXactId) xwait, infomask,
- *lockmode))
+ *lockmode, &current_is_member))
{
LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
- /* acquire tuple lock, if necessary */
- heap_acquire_tuplock(relation, &(oldtup.t_self), *lockmode,
- LockWaitBlock, &have_tuple_lock);
+ /*
+ * Acquire the lock, if necessary (but skip it when we're
+ * requesting a lock and already have one; avoids deadlock).
+ */
+ if (!current_is_member)
+ heap_acquire_tuplock(relation, &(oldtup.t_self), *lockmode,
+ LockWaitBlock, &have_tuple_lock);
/* wait for multixact */
MultiXactIdWait((MultiXactId) xwait, mxact_status, infomask,
@@ -3981,6 +3991,7 @@ heap_lock_tuple(Relation relation, HeapTuple tuple,
new_infomask,
new_infomask2;
bool first_time = true;
+ bool skip_tuple_lock = false;
bool have_tuple_lock = false;
bool cleared_all_frozen = false;
@@ -4081,6 +4092,21 @@ l3:
result = TM_Ok;
goto out_unlocked;
}
+ else
+ {
+ /*
+ * Disable acquisition of the heavyweight tuple lock.
+ * Otherwise, when promoting a weaker lock, we might
+ * deadlock with another locker that has acquired the
+ * heavyweight tuple lock and is waiting for our
+ * transaction to finish.
+ *
+ * Note that in this case we still need to wait for
+ * the multixact if required, to avoid acquiring
+ * conflicting locks.
+ */
+ skip_tuple_lock = true;
+ }
}
if (members)
@@ -4235,7 +4261,7 @@ l3:
if (infomask & HEAP_XMAX_IS_MULTI)
{
if (!DoesMultiXactIdConflict((MultiXactId) xwait, infomask,
- mode))
+ mode, NULL))
{
/*
* No conflict, but if the xmax changed under us in the
@@ -4312,13 +4338,15 @@ l3:
/*
* Acquire tuple lock to establish our priority for the tuple, or
* die trying. LockTuple will release us when we are next-in-line
- * for the tuple. We must do this even if we are share-locking.
+ * for the tuple. We must do this even if we are share-locking,
+ * but not if we already have a weaker lock on the tuple.
*
* If we are forced to "start over" below, we keep the tuple lock;
* this arranges that we stay at the head of the line while
* rechecking tuple state.
*/
- if (!heap_acquire_tuplock(relation, tid, mode, wait_policy,
+ if (!skip_tuple_lock &&
+ !heap_acquire_tuplock(relation, tid, mode, wait_policy,
&have_tuple_lock))
{
/*
@@ -6516,10 +6544,13 @@ HeapTupleGetUpdateXid(HeapTupleHeader tuple)
* tuple lock of the given strength?
*
* The passed infomask pairs up with the given multixact in the tuple header.
+ *
+ * If current_is_member is not NULL, it is set to 'true' if the current
+ * transaction is a member of the given multixact.
*/
static bool
DoesMultiXactIdConflict(MultiXactId multi, uint16 infomask,
- LockTupleMode lockmode)
+ LockTupleMode lockmode, bool *current_is_member)
{
int nmembers;
MultiXactMember *members;
@@ -6540,15 +6571,24 @@ DoesMultiXactIdConflict(MultiXactId multi, uint16 infomask,
TransactionId memxid;
LOCKMODE memlockmode;
- memlockmode = LOCKMODE_from_mxstatus(members[i].status);
+ if (result && (current_is_member == NULL || *current_is_member))
+ break;
- /* ignore members that don't conflict with the lock we want */
- if (!DoLockModesConflict(memlockmode, wanted))
- continue;
+ memlockmode = LOCKMODE_from_mxstatus(members[i].status);
- /* ignore members from current xact */
+ /* ignore members from current xact (but track their presence) */
memxid = members[i].xid;
if (TransactionIdIsCurrentTransactionId(memxid))
+ {
+ if (current_is_member != NULL)
+ *current_is_member = true;
+ continue;
+ }
+ else if (result)
+ continue;
+
+ /* ignore members that don't conflict with the lock we want */
+ if (!DoLockModesConflict(memlockmode, wanted))
continue;
if (ISUPDATE_from_mxstatus(members[i].status))
@@ -6567,10 +6607,11 @@ DoesMultiXactIdConflict(MultiXactId multi, uint16 infomask,
/*
* Whatever remains are either live lockers that conflict with our
* wanted lock, and updaters that are not aborted. Those conflict
- * with what we want, so return true.
+ * with what we want. Set up to return true, but keep going to
+ * look for the current transaction among the multixact members,
+ * if needed.
*/
result = true;
- break;
}
pfree(members);
}