aboutsummaryrefslogtreecommitdiff
path: root/src/backend/access/heap/heapam.c
diff options
context:
space:
mode:
Diffstat (limited to 'src/backend/access/heap/heapam.c')
-rw-r--r--src/backend/access/heap/heapam.c83
1 files changed, 21 insertions, 62 deletions
diff --git a/src/backend/access/heap/heapam.c b/src/backend/access/heap/heapam.c
index d0e39b1f8db..698d08d1d39 100644
--- a/src/backend/access/heap/heapam.c
+++ b/src/backend/access/heap/heapam.c
@@ -120,7 +120,7 @@ static void GetMultiXactIdHintBits(MultiXactId multi, uint16 *new_infomask,
static TransactionId MultiXactIdGetUpdateXid(TransactionId xmax,
uint16 t_infomask);
static bool DoesMultiXactIdConflict(MultiXactId multi, uint16 infomask,
- LockTupleMode lockmode, bool *current_is_member);
+ LockTupleMode lockmode);
static void MultiXactIdWait(MultiXactId multi, MultiXactStatus status, uint16 infomask,
Relation rel, ItemPointer ctid, XLTW_Oper oper,
int *remaining);
@@ -3161,20 +3161,15 @@ l1:
*/
if (infomask & HEAP_XMAX_IS_MULTI)
{
- bool current_is_member = false;
-
+ /* wait for multixact */
if (DoesMultiXactIdConflict((MultiXactId) xwait, infomask,
- LockTupleExclusive, &current_is_member))
+ LockTupleExclusive))
{
LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
- /*
- * Acquire the lock, if necessary (but skip it when we're
- * requesting a lock and already have one; avoids deadlock).
- */
- if (!current_is_member)
- heap_acquire_tuplock(relation, &(tp.t_self), LockTupleExclusive,
- LockWaitBlock, &have_tuple_lock);
+ /* acquire tuple lock, if necessary */
+ heap_acquire_tuplock(relation, &(tp.t_self), LockTupleExclusive,
+ LockWaitBlock, &have_tuple_lock);
/* wait for multixact */
MultiXactIdWait((MultiXactId) xwait, MultiXactStatusUpdate, infomask,
@@ -3773,20 +3768,15 @@ l2:
{
TransactionId update_xact;
int remain;
- bool current_is_member = false;
if (DoesMultiXactIdConflict((MultiXactId) xwait, infomask,
- *lockmode, &current_is_member))
+ *lockmode))
{
LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
- /*
- * Acquire the lock, if necessary (but skip it when we're
- * requesting a lock and already have one; avoids deadlock).
- */
- if (!current_is_member)
- heap_acquire_tuplock(relation, &(oldtup.t_self), *lockmode,
- LockWaitBlock, &have_tuple_lock);
+ /* acquire tuple lock, if necessary */
+ heap_acquire_tuplock(relation, &(oldtup.t_self), *lockmode,
+ LockWaitBlock, &have_tuple_lock);
/* wait for multixact */
MultiXactIdWait((MultiXactId) xwait, mxact_status, infomask,
@@ -4756,7 +4746,6 @@ l3:
uint16 infomask;
uint16 infomask2;
bool require_sleep;
- bool skip_tuple_lock = false;
ItemPointerData t_ctid;
/* must copy state data before unlocking buffer */
@@ -4810,21 +4799,6 @@ l3:
result = HeapTupleMayBeUpdated;
goto out_unlocked;
}
- else
- {
- /*
- * Disable acquisition of the heavyweight tuple lock.
- * Otherwise, when promoting a weaker lock, we might
- * deadlock with another locker that has acquired the
- * heavyweight tuple lock and is waiting for our
- * transaction to finish.
- *
- * Note that in this case we still need to wait for
- * the multixact if required, to avoid acquiring
- * conflicting locks.
- */
- skip_tuple_lock = true;
- }
}
if (members)
@@ -4979,7 +4953,7 @@ l3:
if (infomask & HEAP_XMAX_IS_MULTI)
{
if (!DoesMultiXactIdConflict((MultiXactId) xwait, infomask,
- mode, NULL))
+ mode))
{
/*
* No conflict, but if the xmax changed under us in the
@@ -5056,15 +5030,13 @@ l3:
/*
* Acquire tuple lock to establish our priority for the tuple, or
* die trying. LockTuple will release us when we are next-in-line
- * for the tuple. We must do this even if we are share-locking,
- * but not if we already have a weaker lock on the tuple.
+ * for the tuple. We must do this even if we are share-locking.
*
* If we are forced to "start over" below, we keep the tuple lock;
* this arranges that we stay at the head of the line while
* rechecking tuple state.
*/
- if (!skip_tuple_lock &&
- !heap_acquire_tuplock(relation, tid, mode, wait_policy,
+ if (!heap_acquire_tuplock(relation, tid, mode, wait_policy,
&have_tuple_lock))
{
/*
@@ -7242,13 +7214,10 @@ HeapTupleGetUpdateXid(HeapTupleHeader tuple)
* tuple lock of the given strength?
*
* The passed infomask pairs up with the given multixact in the tuple header.
- *
- * If current_is_member is not NULL, it is set to 'true' if the current
- * transaction is a member of the given multixact.
*/
static bool
DoesMultiXactIdConflict(MultiXactId multi, uint16 infomask,
- LockTupleMode lockmode, bool *current_is_member)
+ LockTupleMode lockmode)
{
int nmembers;
MultiXactMember *members;
@@ -7269,26 +7238,17 @@ DoesMultiXactIdConflict(MultiXactId multi, uint16 infomask,
TransactionId memxid;
LOCKMODE memlockmode;
- if (result && (current_is_member == NULL || *current_is_member))
- break;
-
memlockmode = LOCKMODE_from_mxstatus(members[i].status);
- /* ignore members from current xact (but track their presence) */
- memxid = members[i].xid;
- if (TransactionIdIsCurrentTransactionId(memxid))
- {
- if (current_is_member != NULL)
- *current_is_member = true;
- continue;
- }
- else if (result)
- continue;
-
/* ignore members that don't conflict with the lock we want */
if (!DoLockModesConflict(memlockmode, wanted))
continue;
+ /* ignore members from current xact */
+ memxid = members[i].xid;
+ if (TransactionIdIsCurrentTransactionId(memxid))
+ continue;
+
if (ISUPDATE_from_mxstatus(members[i].status))
{
/* ignore aborted updaters */
@@ -7305,11 +7265,10 @@ DoesMultiXactIdConflict(MultiXactId multi, uint16 infomask,
/*
* Whatever remains are either live lockers that conflict with our
* wanted lock, and updaters that are not aborted. Those conflict
- * with what we want. Set up to return true, but keep going to
- * look for the current transaction among the multixact members,
- * if needed.
+ * with what we want, so return true.
*/
result = true;
+ break;
}
pfree(members);
}