aboutsummaryrefslogtreecommitdiff
path: root/src/backend/executor/nodeLockRows.c
diff options
context:
space:
mode:
authorTom Lane <tgl@sss.pgh.pa.us>2016-03-22 17:56:06 -0400
committerTom Lane <tgl@sss.pgh.pa.us>2016-03-22 17:56:20 -0400
commit71404af2a29ce4a3a5907cdc8b893ec2bc0285b4 (patch)
tree875e6859c394a4f32c770bddab3d3069e457a12e /src/backend/executor/nodeLockRows.c
parentf6bd0da63bf40bc324eec6fd47f3d3d301f44dab (diff)
downloadpostgresql-71404af2a29ce4a3a5907cdc8b893ec2bc0285b4.tar.gz
postgresql-71404af2a29ce4a3a5907cdc8b893ec2bc0285b4.zip
Fix EvalPlanQual bug when query contains both locked and not-locked rels.
In commit afb9249d06f47d7a, we (probably I) made ExecLockRows assign null test tuples to all relations of the query while setting up to do an EvalPlanQual recheck for a newly-updated locked row. This was sheerest brain fade: we should only set test tuples for relations that are lockable by the LockRows node, and in particular empty test tuples are only sensible for inheritance child relations that weren't the source of the current tuple from their inheritance tree. Setting a null test tuple for an unrelated table causes it to return NULLs when it should not, as exhibited in bug #14034 from Bronislav Houdek. To add insult to injury, doing it the wrong way required two loops where one would suffice; so the corrected code is even a bit shorter and faster. Add a regression test case based on his example, and back-patch to 9.5 where the bug was introduced.
Diffstat (limited to 'src/backend/executor/nodeLockRows.c')
-rw-r--r--src/backend/executor/nodeLockRows.c40
1 files changed, 18 insertions, 22 deletions
diff --git a/src/backend/executor/nodeLockRows.c b/src/backend/executor/nodeLockRows.c
index 68208d5c9a7..4ebcaffe699 100644
--- a/src/backend/executor/nodeLockRows.c
+++ b/src/backend/executor/nodeLockRows.c
@@ -262,29 +262,15 @@ lnext:
*/
if (epq_needed)
{
- int i;
-
/* Initialize EPQ machinery */
EvalPlanQualBegin(&node->lr_epqstate, estate);
/*
- * Transfer already-fetched tuples into the EPQ state, and make sure
- * its test tuples for other tables are reset to NULL.
- */
- for (i = 0; i < node->lr_ntables; i++)
- {
- EvalPlanQualSetTuple(&node->lr_epqstate,
- i + 1,
- node->lr_curtuples[i]);
- /* freeing this tuple is now the responsibility of EPQ */
- node->lr_curtuples[i] = NULL;
- }
-
- /*
- * Next, fetch a copy of any rows that were successfully locked
- * without any update having occurred. (We do this in a separate pass
- * so as to avoid overhead in the common case where there are no
- * concurrent updates.)
+ * Transfer any already-fetched tuples into the EPQ state, and fetch a
+ * copy of any rows that were successfully locked without any update
+ * having occurred. (We do this in a separate pass so as to avoid
+ * overhead in the common case where there are no concurrent updates.)
+ * Make sure any inactive child rels have NULL test tuples in EPQ.
*/
foreach(lc, node->lr_arowMarks)
{
@@ -293,15 +279,25 @@ lnext:
HeapTupleData tuple;
Buffer buffer;
- /* ignore non-active child tables */
+ /* skip non-active child tables, but clear their test tuples */
if (!erm->ermActive)
{
Assert(erm->rti != erm->prti); /* check it's child table */
+ EvalPlanQualSetTuple(&node->lr_epqstate, erm->rti, NULL);
continue;
}
- if (EvalPlanQualGetTuple(&node->lr_epqstate, erm->rti) != NULL)
- continue; /* it was updated and fetched above */
+ /* was tuple updated and fetched above? */
+ if (node->lr_curtuples[erm->rti - 1] != NULL)
+ {
+ /* yes, so set it as the EPQ test tuple for this rel */
+ EvalPlanQualSetTuple(&node->lr_epqstate,
+ erm->rti,
+ node->lr_curtuples[erm->rti - 1]);
+ /* freeing this tuple is now the responsibility of EPQ */
+ node->lr_curtuples[erm->rti - 1] = NULL;
+ continue;
+ }
/* foreign tables should have been fetched above */
Assert(erm->relation->rd_rel->relkind != RELKIND_FOREIGN_TABLE);