aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAmit Kapila <akapila@postgresql.org>2025-04-08 15:35:42 +0530
committerAmit Kapila <akapila@postgresql.org>2025-04-08 15:35:42 +0530
commit12eece5fd54c2aa3dbdefb6de7f18566f5c00357 (patch)
tree005de6ab36fb26933fd67cc01d1c74cc461b07dc
parent7ea21f4ee2721be4239657e74c2ae8f88fb5a3ef (diff)
downloadpostgresql-12eece5fd54c2aa3dbdefb6de7f18566f5c00357.tar.gz
postgresql-12eece5fd54c2aa3dbdefb6de7f18566f5c00357.zip
Fix uninitialized index information access during apply.
The issue happens when building conflict information during apply of INSERT or UPDATE operations that violate unique constraints on leaf partitions. The problem was introduced in commit 9ff68679b5, which removed the redundant calls to ExecOpenIndices/ExecCloseIndices. The previous code was relying on the redundant ExecOpenIndices call in apply_handle_tuple_routing() to build the index information required for unique key conflict detection. The fix is to delay building the index information until a conflict is detected instead of relying on ExecOpenIndices to do the same. The additional benefit of this approach is that it avoids building index information when there is no conflict. Author: Hou Zhijie <houzj.fnst@fujitsu.com> Reviewed-by:Reviewed-by: Amit Kapila <amit.kapila16@gmail.com> Discussion: https://postgr.es/m/TYAPR01MB57244ADA33DDA57119B9D26494A62@TYAPR01MB5724.jpnprd01.prod.outlook.com
-rw-r--r--src/backend/executor/execIndexing.c5
-rw-r--r--src/backend/executor/execReplication.c30
-rw-r--r--src/backend/replication/logical/worker.c4
-rw-r--r--src/test/subscription/t/035_conflicts.pl37
4 files changed, 70 insertions, 6 deletions
diff --git a/src/backend/executor/execIndexing.c b/src/backend/executor/execIndexing.c
index e3fe9b78bb5..bdf862b2406 100644
--- a/src/backend/executor/execIndexing.c
+++ b/src/backend/executor/execIndexing.c
@@ -214,9 +214,8 @@ ExecOpenIndices(ResultRelInfo *resultRelInfo, bool speculative)
ii = BuildIndexInfo(indexDesc);
/*
- * If the indexes are to be used for speculative insertion or conflict
- * detection in logical replication, add extra information required by
- * unique index entries.
+ * If the indexes are to be used for speculative insertion, add extra
+ * information required by unique index entries.
*/
if (speculative && ii->ii_Unique && !indexDesc->rd_index->indisexclusion)
BuildSpeculativeIndexInfo(indexDesc, ii);
diff --git a/src/backend/executor/execReplication.c b/src/backend/executor/execReplication.c
index ede89ea3cf9..53ddd25c42d 100644
--- a/src/backend/executor/execReplication.c
+++ b/src/backend/executor/execReplication.c
@@ -432,6 +432,30 @@ retry:
}
/*
+ * Build additional index information necessary for conflict detection.
+ */
+static void
+BuildConflictIndexInfo(ResultRelInfo *resultRelInfo, Oid conflictindex)
+{
+ for (int i = 0; i < resultRelInfo->ri_NumIndices; i++)
+ {
+ Relation indexRelation = resultRelInfo->ri_IndexRelationDescs[i];
+ IndexInfo *indexRelationInfo = resultRelInfo->ri_IndexRelationInfo[i];
+
+ if (conflictindex != RelationGetRelid(indexRelation))
+ continue;
+
+ /*
+ * This Assert will fail if BuildSpeculativeIndexInfo() is called
+ * twice for the given index.
+ */
+ Assert(indexRelationInfo->ii_UniqueOps == NULL);
+
+ BuildSpeculativeIndexInfo(indexRelation, indexRelationInfo);
+ }
+}
+
+/*
* Find the tuple that violates the passed unique index (conflictindex).
*
* If the conflicting tuple is found return true, otherwise false.
@@ -452,6 +476,12 @@ FindConflictTuple(ResultRelInfo *resultRelInfo, EState *estate,
*conflictslot = NULL;
+ /*
+ * Build additional information required to check constraints violations.
+ * See check_exclusion_or_unique_constraint().
+ */
+ BuildConflictIndexInfo(resultRelInfo, conflictindex);
+
retry:
if (ExecCheckIndexConstraints(resultRelInfo, slot, estate,
&conflictTid, &slot->tts_tid,
diff --git a/src/backend/replication/logical/worker.c b/src/backend/replication/logical/worker.c
index e3b2b144942..5ce596f4576 100644
--- a/src/backend/replication/logical/worker.c
+++ b/src/backend/replication/logical/worker.c
@@ -2457,7 +2457,7 @@ apply_handle_insert(StringInfo s)
{
ResultRelInfo *relinfo = edata->targetRelInfo;
- ExecOpenIndices(relinfo, true);
+ ExecOpenIndices(relinfo, false);
apply_handle_insert_internal(edata, relinfo, remoteslot);
ExecCloseIndices(relinfo);
}
@@ -2680,7 +2680,7 @@ apply_handle_update_internal(ApplyExecutionData *edata,
MemoryContext oldctx;
EvalPlanQualInit(&epqstate, estate, NULL, NIL, -1, NIL);
- ExecOpenIndices(relinfo, true);
+ ExecOpenIndices(relinfo, false);
found = FindReplTupleInLocalRel(edata, localrel,
&relmapentry->remoterel,
diff --git a/src/test/subscription/t/035_conflicts.pl b/src/test/subscription/t/035_conflicts.pl
index 3a4d44e1d0e..2a7a8239a29 100644
--- a/src/test/subscription/t/035_conflicts.pl
+++ b/src/test/subscription/t/035_conflicts.pl
@@ -25,14 +25,23 @@ $node_subscriber->start;
$node_publisher->safe_psql('postgres',
"CREATE TABLE conf_tab (a int PRIMARY KEY, b int UNIQUE, c int UNIQUE);");
+$node_publisher->safe_psql('postgres',
+ "CREATE TABLE conf_tab_2 (a int PRIMARY KEY, b int UNIQUE, c int UNIQUE);");
+
# Create same table on subscriber
$node_subscriber->safe_psql('postgres',
"CREATE TABLE conf_tab (a int PRIMARY key, b int UNIQUE, c int UNIQUE);");
+$node_subscriber->safe_psql(
+ 'postgres', qq[
+ CREATE TABLE conf_tab_2 (a int PRIMARY KEY, b int, c int, unique(a,b)) PARTITION BY RANGE (a);
+ CREATE TABLE conf_tab_2_p1 PARTITION OF conf_tab_2 FOR VALUES FROM (MINVALUE) TO (100);
+]);
+
# Setup logical replication
my $publisher_connstr = $node_publisher->connstr . ' dbname=postgres';
$node_publisher->safe_psql('postgres',
- "CREATE PUBLICATION pub_tab FOR TABLE conf_tab");
+ "CREATE PUBLICATION pub_tab FOR TABLE conf_tab, conf_tab_2");
# Create the subscription
my $appname = 'sub_tab';
@@ -110,4 +119,30 @@ $node_subscriber->wait_for_log(
pass('multiple_unique_conflicts detected during update');
+# Truncate table to get rid of the error
+$node_subscriber->safe_psql('postgres', "TRUNCATE conf_tab;");
+
+
+##################################################
+# Test multiple_unique_conflicts due to INSERT on a leaf partition
+##################################################
+
+# Insert data in the subscriber table
+$node_subscriber->safe_psql('postgres',
+ "INSERT INTO conf_tab_2 VALUES (55,2,3);");
+
+# Insert data in the publisher table
+$node_publisher->safe_psql('postgres',
+ "INSERT INTO conf_tab_2 VALUES (55,2,3);");
+
+$node_subscriber->wait_for_log(
+ qr/conflict detected on relation \"public.conf_tab_2_p1\": conflict=multiple_unique_conflicts.*
+.*Key already exists in unique index \"conf_tab_2_p1_pkey\".*
+.*Key \(a\)=\(55\); existing local tuple \(55, 2, 3\); remote tuple \(55, 2, 3\).*
+.*Key already exists in unique index \"conf_tab_2_p1_a_b_key\".*
+.*Key \(a, b\)=\(55, 2\); existing local tuple \(55, 2, 3\); remote tuple \(55, 2, 3\)./,
+ $log_offset);
+
+pass('multiple_unique_conflicts detected on a leaf partition during insert');
+
done_testing();