aboutsummaryrefslogtreecommitdiff
path: root/src/backend/executor/nodeLockRows.c
diff options
context:
space:
mode:
Diffstat (limited to 'src/backend/executor/nodeLockRows.c')
-rw-r--r--src/backend/executor/nodeLockRows.c161
1 files changed, 90 insertions, 71 deletions
diff --git a/src/backend/executor/nodeLockRows.c b/src/backend/executor/nodeLockRows.c
index 1a9dab25dd6..bbccafb2cfd 100644
--- a/src/backend/executor/nodeLockRows.c
+++ b/src/backend/executor/nodeLockRows.c
@@ -79,10 +79,7 @@ lnext:
Datum datum;
bool isNull;
ItemPointerData tid;
- TM_FailureData tmfd;
LockTupleMode lockmode;
- int lockflags = 0;
- TM_Result test;
TupleTableSlot *markSlot;
/* clear any leftover test tuple for this rel */
@@ -179,74 +176,11 @@ lnext:
break;
}
- lockflags = TUPLE_LOCK_FLAG_LOCK_UPDATE_IN_PROGRESS;
- if (!IsolationUsesXactSnapshot())
- lockflags |= TUPLE_LOCK_FLAG_FIND_LAST_VERSION;
-
- test = table_tuple_lock(erm->relation, &tid, estate->es_snapshot,
- markSlot, estate->es_output_cid,
- lockmode, erm->waitPolicy,
- lockflags,
- &tmfd);
-
- switch (test)
- {
- case TM_WouldBlock:
- /* couldn't lock tuple in SKIP LOCKED mode */
- goto lnext;
-
- case TM_SelfModified:
-
- /*
- * The target tuple was already updated or deleted by the
- * current command, or by a later command in the current
- * transaction. We *must* ignore the tuple in the former
- * case, so as to avoid the "Halloween problem" of repeated
- * update attempts. In the latter case it might be sensible
- * to fetch the updated tuple instead, but doing so would
- * require changing heap_update and heap_delete to not
- * complain about updating "invisible" tuples, which seems
- * pretty scary (table_tuple_lock will not complain, but few
- * callers expect TM_Invisible, and we're not one of them). So
- * for now, treat the tuple as deleted and do not process.
- */
- goto lnext;
-
- case TM_Ok:
-
- /*
- * Got the lock successfully, the locked tuple saved in
- * markSlot for, if needed, EvalPlanQual testing below.
- */
- if (tmfd.traversed)
- epq_needed = true;
- break;
-
- case TM_Updated:
- if (IsolationUsesXactSnapshot())
- ereport(ERROR,
- (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
- errmsg("could not serialize access due to concurrent update")));
- elog(ERROR, "unexpected table_tuple_lock status: %u",
- test);
- break;
-
- case TM_Deleted:
- if (IsolationUsesXactSnapshot())
- ereport(ERROR,
- (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
- errmsg("could not serialize access due to concurrent update")));
- /* tuple was deleted so don't return it */
- goto lnext;
-
- case TM_Invisible:
- elog(ERROR, "attempted to lock invisible tuple");
- break;
-
- default:
- elog(ERROR, "unrecognized table_tuple_lock status: %u",
- test);
- }
+ /* skip tuple if it couldn't be locked */
+ if (!ExecLockTableTuple(erm->relation, &tid, markSlot,
+ estate->es_snapshot, estate->es_output_cid,
+ lockmode, erm->waitPolicy, &epq_needed))
+ goto lnext;
/* Remember locked tuple's TID for EPQ testing and WHERE CURRENT OF */
erm->curCtid = tid;
@@ -281,6 +215,91 @@ lnext:
return slot;
}
+/*
+ * ExecLockTableTuple
+ * Locks tuple with the specified TID in lockmode following given wait
+ * policy
+ *
+ * Returns true if the tuple was successfully locked. Locked tuple is loaded
+ * into provided slot.
+ */
+bool
+ExecLockTableTuple(Relation relation, ItemPointer tid, TupleTableSlot *slot,
+ Snapshot snapshot, CommandId cid,
+ LockTupleMode lockmode, LockWaitPolicy waitPolicy,
+ bool *epq_needed)
+{
+ TM_FailureData tmfd;
+ int lockflags = TUPLE_LOCK_FLAG_LOCK_UPDATE_IN_PROGRESS;
+ TM_Result test;
+
+ if (!IsolationUsesXactSnapshot())
+ lockflags |= TUPLE_LOCK_FLAG_FIND_LAST_VERSION;
+
+ test = table_tuple_lock(relation, tid, snapshot, slot, cid, lockmode,
+ waitPolicy, lockflags, &tmfd);
+
+ switch (test)
+ {
+ case TM_WouldBlock:
+ /* couldn't lock tuple in SKIP LOCKED mode */
+ return false;
+
+ case TM_SelfModified:
+
+ /*
+ * The target tuple was already updated or deleted by the current
+ * command, or by a later command in the current transaction. We
+ * *must* ignore the tuple in the former case, so as to avoid the
+ * "Halloween problem" of repeated update attempts. In the latter
+ * case it might be sensible to fetch the updated tuple instead,
+ * but doing so would require changing heap_update and heap_delete
+ * to not complain about updating "invisible" tuples, which seems
+ * pretty scary (table_tuple_lock will not complain, but few
+ * callers expect TM_Invisible, and we're not one of them). So for
+ * now, treat the tuple as deleted and do not process.
+ */
+ return false;
+
+ case TM_Ok:
+
+ /*
+ * Got the lock successfully, the locked tuple saved in slot for
+ * EvalPlanQual, if asked by the caller.
+ */
+ if (tmfd.traversed && epq_needed)
+ *epq_needed = true;
+ break;
+
+ case TM_Updated:
+ if (IsolationUsesXactSnapshot())
+ ereport(ERROR,
+ (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
+ errmsg("could not serialize access due to concurrent update")));
+ elog(ERROR, "unexpected table_tuple_lock status: %u",
+ test);
+ break;
+
+ case TM_Deleted:
+ if (IsolationUsesXactSnapshot())
+ ereport(ERROR,
+ (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
+ errmsg("could not serialize access due to concurrent update")));
+ /* tuple was deleted so don't return it */
+ return false;
+
+ case TM_Invisible:
+ elog(ERROR, "attempted to lock invisible tuple");
+ return false;
+
+ default:
+ elog(ERROR, "unrecognized table_tuple_lock status: %u", test);
+ return false;
+ }
+
+ return true;
+}
+
/* ----------------------------------------------------------------
* ExecInitLockRows
*